]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge remote-tracking branch 'usb-chipidea-next/ci-for-usb-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 11 Feb 2016 03:04:48 +0000 (14:04 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 11 Feb 2016 03:04:48 +0000 (14:04 +1100)
3277 files changed:
.mailmap
Documentation/ABI/obsolete/sysfs-class-rfkill
Documentation/ABI/removed/sysfs-class-rfkill [new file with mode: 0644]
Documentation/ABI/stable/sysfs-fs-orangefs [new file with mode: 0644]
Documentation/ABI/testing/sysfs-driver-toshiba_acpi
Documentation/ABI/testing/sysfs-firmware-qemu_fw_cfg [new file with mode: 0644]
Documentation/ABI/testing/sysfs-fs-f2fs
Documentation/CodingStyle
Documentation/DocBook/crypto-API.tmpl
Documentation/DocBook/device-drivers.tmpl
Documentation/DocBook/gpu.tmpl
Documentation/DocBook/media/v4l/media-ioc-g-topology.xml
Documentation/DocBook/media/v4l/media-types.xml
Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml
Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml [deleted file]
Documentation/DocBook/media/v4l/pixfmt.xml
Documentation/DocBook/media/v4l/vidioc-query-dv-timings.xml
Documentation/DocBook/media/v4l/vidioc-querystd.xml
Documentation/HOWTO
Documentation/Intel-IOMMU.txt
Documentation/RCU/Design/Data-Structures/BigTreeClassicRCU.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/Data-Structures.html [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/Data-Structures.htmlx [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/HugeTreeClassicRCU.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/TreeLevel.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/TreeMapping.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/TreeMappingLevel.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/blkd_task.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/nxtlist.svg [new file with mode: 0644]
Documentation/RCU/Design/Requirements/Requirements.html
Documentation/RCU/Design/Requirements/Requirements.htmlx
Documentation/RCU/trace.txt
Documentation/SubmittingPatches
Documentation/arm/sunxi/README
Documentation/arm64/booting.txt
Documentation/cgroup-v2.txt
Documentation/crypto/api-intro.txt
Documentation/devicetree/bindings/arm/cpus.txt
Documentation/devicetree/bindings/arm/fw-cfg.txt
Documentation/devicetree/bindings/arm/keystone/keystone.txt
Documentation/devicetree/bindings/arm/marvell,kirkwood.txt
Documentation/devicetree/bindings/arm/samsung/exynos-srom.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/sunxi.txt
Documentation/devicetree/bindings/clock/sunxi.txt
Documentation/devicetree/bindings/display/arm,hdlcd.txt [new file with mode: 0644]
Documentation/devicetree/bindings/dma/arm-pl330.txt
Documentation/devicetree/bindings/edac/apm-xgene-edac.txt
Documentation/devicetree/bindings/firmware/qcom,scm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hsi/nokia-modem.txt
Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/interrupt-controller/microchip,pic32-evic.txt [new file with mode: 0644]
Documentation/devicetree/bindings/leds/leds-sn3218.txt [new file with mode: 0644]
Documentation/devicetree/bindings/media/i2c/mt9v032.txt
Documentation/devicetree/bindings/media/i2c/tvp5150.txt [new file with mode: 0644]
Documentation/devicetree/bindings/media/rcar_vin.txt
Documentation/devicetree/bindings/media/renesas,jpu.txt
Documentation/devicetree/bindings/media/ti-cal.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mfd/tps65086.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mips/pic32/microchip,pic32mzda.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/brcm,sdhci-iproc.txt
Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt
Documentation/devicetree/bindings/mtd/qcom_nandc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
Documentation/devicetree/bindings/net/hisilicon-hns-nic.txt
Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
Documentation/devicetree/bindings/net/mdio-mux.txt
Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/phy.txt
Documentation/devicetree/bindings/net/ralink,rt2880-net.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
Documentation/devicetree/bindings/pci/rcar-pci.txt
Documentation/devicetree/bindings/phy/phy-ath79-usb.txt [new file with mode: 0644]
Documentation/devicetree/bindings/pinctrl/marvell,armada-370-pinctrl.txt
Documentation/devicetree/bindings/regmap/regmap.txt
Documentation/devicetree/bindings/regulator/act8945a-regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/regulator/lp872x.txt
Documentation/devicetree/bindings/regulator/max77802.txt
Documentation/devicetree/bindings/regulator/qcom,saw-regulator.txt [new file with mode: 0644]
Documentation/devicetree/bindings/rng/brcm,bcm6368.txt [new file with mode: 0644]
Documentation/devicetree/bindings/serial/brcm,bcm2835-aux-uart.txt [new file with mode: 0644]
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
Documentation/devicetree/bindings/soc/rockchip/power_domain.txt
Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
Documentation/devicetree/bindings/sound/max98926.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/pcm179x.txt
Documentation/devicetree/bindings/sound/rt5514.txt [new file with mode: 0644]
Documentation/devicetree/bindings/sound/rt5616.txt
Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt [new file with mode: 0644]
Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt [new file with mode: 0644]
Documentation/devicetree/bindings/thermal/rcar-thermal.txt
Documentation/devicetree/bindings/thermal/rockchip-thermal.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/dma-buf-sharing.txt
Documentation/driver-model/platform.txt
Documentation/dvb/README.dvb-usb
Documentation/efi-stub.txt
Documentation/features/list-arch.sh
Documentation/features/vm/huge-vmap/arch-support.txt
Documentation/filesystems/orangefs.txt [new file with mode: 0644]
Documentation/filesystems/proc.txt
Documentation/hwmon/ltc2990 [new file with mode: 0644]
Documentation/hwmon/max34440
Documentation/i2c/dev-interface
Documentation/i2c/slave-eeprom-backend
Documentation/ja_JP/HOWTO
Documentation/kernel-parameters.txt
Documentation/memory-barriers.txt
Documentation/networking/batman-adv.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/mac80211-injection.txt
Documentation/serial/tty.txt
Documentation/sound/alsa/ALSA-Configuration.txt
Documentation/sound/alsa/HD-Audio-DP-MST-audio.txt [new file with mode: 0644]
Documentation/sysctl/kernel.txt
Documentation/virtual/kvm/api.txt
Documentation/virtual/kvm/devices/s390_flic.txt
Documentation/virtual/kvm/devices/vm.txt
Documentation/watchdog/watchdog-parameters.txt
Documentation/x86/early-microcode.txt
Documentation/x86/x86_64/mm.txt
MAINTAINERS
Makefile
README
REPORTING-BUGS
arch/alpha/include/asm/pci.h
arch/alpha/include/asm/serial.h
arch/arc/Kconfig
arch/arc/configs/vdk_hs38_smp_defconfig
arch/arc/include/asm/arcregs.h
arch/arc/include/asm/irqflags-arcv2.h
arch/arc/include/asm/mcip.h
arch/arc/kernel/entry-arcv2.S
arch/arc/kernel/intc-arcv2.c
arch/arc/kernel/mcip.c
arch/arc/kernel/setup.c
arch/arc/kernel/time.c
arch/arm/Kconfig
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/arm-soc-for-next-contents.txt [new file with mode: 0644]
arch/arm/boot/compressed/Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-cm-t335.dts
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/am437x-gp-evm.dts
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/am57xx-cl-som-am57x.dts
arch/arm/boot/dts/am57xx-sbc-am57x.dts
arch/arm/boot/dts/armada-370-mirabox.dts
arch/arm/boot/dts/armada-370-netgear-rn104.dts
arch/arm/boot/dts/armada-370-synology-ds213j.dts
arch/arm/boot/dts/armada-385-db-ap.dts
arch/arm/boot/dts/armada-388-gp.dts
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts
arch/arm/boot/dts/at91-sama5d2_xplained.dts
arch/arm/boot/dts/at91-sama5d4_xplained.dts
arch/arm/boot/dts/at91-sama5d4ek.dts
arch/arm/boot/dts/at91sam9n12ek.dts
arch/arm/boot/dts/bcm2835-rpi-a.dts [new file with mode: 0644]
arch/arm/boot/dts/bcm2835-rpi.dtsi
arch/arm/boot/dts/bcm283x.dtsi
arch/arm/boot/dts/emev2.dtsi
arch/arm/boot/dts/exynos3250-monk.dts
arch/arm/boot/dts/exynos3250-rinato.dts
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/exynos4210-origen.dts
arch/arm/boot/dts/exynos4210-smdkv310.dts
arch/arm/boot/dts/exynos4210-trats.dts
arch/arm/boot/dts/exynos4210-universal_c210.dts
arch/arm/boot/dts/exynos4412-odroid-common.dtsi
arch/arm/boot/dts/exynos4412-odroidx.dts
arch/arm/boot/dts/exynos4412-origen.dts
arch/arm/boot/dts/exynos4412-smdk4412.dts
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/exynos5.dtsi
arch/arm/boot/dts/exynos5250-arndale.dts
arch/arm/boot/dts/exynos5250-snow-common.dtsi
arch/arm/boot/dts/exynos5250-spring.dts
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/exynos5410-pinctrl.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos5410-smdk5410.dts
arch/arm/boot/dts/exynos5410.dtsi
arch/arm/boot/dts/exynos5420-arndale-octa.dts
arch/arm/boot/dts/exynos5420-cpus.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos5420-peach-pit.dts
arch/arm/boot/dts/exynos5420-smdk5420.dts
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/exynos5422-cpus.dtsi
arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
arch/arm/boot/dts/exynos5800-peach-pi.dts
arch/arm/boot/dts/exynos5800.dtsi
arch/arm/boot/dts/imx25-eukrea-mbimxsd25-baseboard.dts
arch/arm/boot/dts/imx28-apf28dev.dts
arch/arm/boot/dts/imx28-eukrea-mbmx28lc.dtsi
arch/arm/boot/dts/imx28-tx28.dts
arch/arm/boot/dts/imx35-eukrea-mbimxsd35-baseboard.dts
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/imx51-digi-connectcore-som.dtsi
arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
arch/arm/boot/dts/imx53-ard.dts
arch/arm/boot/dts/imx53-qsb-common.dtsi
arch/arm/boot/dts/imx53-tx53-x03x.dts
arch/arm/boot/dts/imx53-tx53-x13x.dts
arch/arm/boot/dts/imx53-tx53.dtsi
arch/arm/boot/dts/imx6dl-tx6u-811x.dts
arch/arm/boot/dts/imx6q-gk802.dts
arch/arm/boot/dts/imx6q-icore-rqs.dts [new file with mode: 0644]
arch/arm/boot/dts/imx6q-tx6q-1110.dts
arch/arm/boot/dts/imx6qdl-apf6dev.dtsi
arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
arch/arm/boot/dts/imx6qdl-gw552x.dtsi
arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi [new file with mode: 0644]
arch/arm/boot/dts/imx6qdl-microsom.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
arch/arm/boot/dts/imx6qdl-sabresd.dtsi
arch/arm/boot/dts/imx6qdl-tx6.dtsi
arch/arm/boot/dts/imx6qdl-udoo.dtsi
arch/arm/boot/dts/imx6qdl.dtsi
arch/arm/boot/dts/imx6sl-warp.dts
arch/arm/boot/dts/imx6sx-sabreauto.dts
arch/arm/boot/dts/imx6sx-sdb.dtsi
arch/arm/boot/dts/imx6ul-14x14-evk.dts
arch/arm/boot/dts/imx7d-sbc-imx7.dts
arch/arm/boot/dts/imx7d-sdb.dts
arch/arm/boot/dts/imx7d.dtsi
arch/arm/boot/dts/k2g-evm.dts [new file with mode: 0644]
arch/arm/boot/dts/k2g.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-ds112.dts
arch/arm/boot/dts/kirkwood-linkstation-6282.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-duo-6281.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lsqvl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lsvl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lswsxl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lswvl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation-lswxl.dts [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-linkstation.dtsi [new file with mode: 0644]
arch/arm/boot/dts/kirkwood-lswvl.dts [deleted file]
arch/arm/boot/dts/kirkwood-lswxl.dts [deleted file]
arch/arm/boot/dts/kirkwood-openrd-client.dts
arch/arm/boot/dts/kirkwood-openrd.dtsi
arch/arm/boot/dts/kirkwood-pogoplug-series-4.dts
arch/arm/boot/dts/kirkwood.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/mvebu-linkstation-fan.dtsi [new file with mode: 0644]
arch/arm/boot/dts/mvebu-linkstation-gpio-simple.dtsi [new file with mode: 0644]
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/omap3-n950.dts
arch/arm/boot/dts/omap34xx.dtsi
arch/arm/boot/dts/omap36xx.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/orion5x-linkstation-lsgl.dts [new file with mode: 0644]
arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
arch/arm/boot/dts/orion5x-linkstation.dtsi [new file with mode: 0644]
arch/arm/boot/dts/qcom-apq8064.dtsi
arch/arm/boot/dts/qcom-apq8084.dtsi
arch/arm/boot/dts/qcom-ipq8064.dtsi
arch/arm/boot/dts/qcom-msm8660.dtsi
arch/arm/boot/dts/qcom-msm8974.dtsi
arch/arm/boot/dts/r7s72100.dtsi
arch/arm/boot/dts/r8a73a4.dtsi
arch/arm/boot/dts/r8a7740.dtsi
arch/arm/boot/dts/r8a7778-bockw.dts
arch/arm/boot/dts/r8a7778.dtsi
arch/arm/boot/dts/r8a7779-marzen.dts
arch/arm/boot/dts/r8a7779.dtsi
arch/arm/boot/dts/r8a7790-lager.dts
arch/arm/boot/dts/r8a7790.dtsi
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/r8a7791-porter.dts
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/r8a7793-gose.dts
arch/arm/boot/dts/r8a7793.dtsi
arch/arm/boot/dts/r8a7794-alt.dts
arch/arm/boot/dts/r8a7794-silk.dts
arch/arm/boot/dts/r8a7794.dtsi
arch/arm/boot/dts/rk3036-kylin.dts
arch/arm/boot/dts/rk3036.dtsi
arch/arm/boot/dts/rk3066a-bqcurie2.dts
arch/arm/boot/dts/rk3066a-marsboard.dts
arch/arm/boot/dts/rk3066a-rayeager.dts
arch/arm/boot/dts/rk3066a.dtsi
arch/arm/boot/dts/rk3188-radxarock.dts
arch/arm/boot/dts/rk3188.dtsi
arch/arm/boot/dts/rk3288-evb.dtsi
arch/arm/boot/dts/rk3288-firefly.dtsi
arch/arm/boot/dts/rk3288-popmetal.dts
arch/arm/boot/dts/rk3288-r89.dts
arch/arm/boot/dts/rk3288-rock2-som.dtsi
arch/arm/boot/dts/rk3288-rock2-square.dts
arch/arm/boot/dts/rk3288-veyron-chromebook.dtsi
arch/arm/boot/dts/rk3288-veyron.dtsi
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/rk3xxx.dtsi
arch/arm/boot/dts/s5pv210-aquila.dts
arch/arm/boot/dts/s5pv210-goni.dts
arch/arm/boot/dts/s5pv210-smdkv210.dts
arch/arm/boot/dts/sama5d4.dtsi
arch/arm/boot/dts/sh73a0.dtsi
arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
arch/arm/boot/dts/sun4i-a10-chuwi-v7-cw0825.dts
arch/arm/boot/dts/sun4i-a10-inet97fv2.dts
arch/arm/boot/dts/sun4i-a10-inet9f-rev03.dts
arch/arm/boot/dts/sun4i-a10-itead-iteaduino-plus.dts
arch/arm/boot/dts/sun5i-r8-chip.dts
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun6i-a31s-sina31s-core.dtsi
arch/arm/boot/dts/sun7i-a20-itead-ibox.dts [new file with mode: 0644]
arch/arm/boot/dts/sun8i-a23-a33.dtsi
arch/arm/boot/dts/sun8i-a23.dtsi
arch/arm/boot/dts/sun8i-a33-sinlinx-sina33.dts
arch/arm/boot/dts/sun8i-a33.dtsi
arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts [new file with mode: 0644]
arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts [new file with mode: 0644]
arch/arm/boot/dts/sun8i-a83t.dtsi [new file with mode: 0644]
arch/arm/boot/dts/sun8i-h3.dtsi
arch/arm/boot/dts/sun9i-a80-cubieboard4.dts
arch/arm/boot/dts/sun9i-a80-optimus.dts
arch/arm/boot/dts/sun9i-a80.dtsi
arch/arm/boot/dts/sunxi-itead-core-common.dtsi [new file with mode: 0644]
arch/arm/boot/dts/tegra114.dtsi
arch/arm/boot/dts/tegra124-jetson-tk1.dts
arch/arm/boot/dts/tegra124.dtsi
arch/arm/boot/dts/tegra20.dtsi
arch/arm/boot/dts/tegra30.dtsi
arch/arm/boot/dts/vf-colibri.dtsi
arch/arm/boot/dts/vfxxx.dtsi
arch/arm/common/icst.c
arch/arm/configs/exynos_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/mvebu_v5_defconfig
arch/arm/configs/mvebu_v7_defconfig
arch/arm/configs/mxs_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/shmobile_defconfig
arch/arm/include/asm/div64.h
arch/arm/include/asm/kvm_asm.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/pci.h
arch/arm/include/debug/imx.S
arch/arm/include/debug/palmchip.S [new file with mode: 0644]
arch/arm/include/uapi/asm/unistd.h
arch/arm/kernel/calls.S
arch/arm/kernel/entry-armv.S
arch/arm/kernel/hibernate.c
arch/arm/kernel/irq.c
arch/arm/kernel/machine_kexec.c
arch/arm/kernel/reboot.c
arch/arm/kernel/setup.c
arch/arm/kernel/topology.c
arch/arm/kernel/vmlinux-xip.lds.S [new file with mode: 0644]
arch/arm/kernel/vmlinux.lds.S
arch/arm/kvm/arm.c
arch/arm/mach-cns3xxx/Makefile.boot [deleted file]
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/Makefile
arch/arm/mach-exynos/Makefile.boot [deleted file]
arch/arm/mach-exynos/exynos.c
arch/arm/mach-exynos/include/mach/map.h
arch/arm/mach-exynos/mcpm-exynos.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-exynos/pm.c
arch/arm/mach-exynos/pmu.c [deleted file]
arch/arm/mach-exynos/regs-srom.h [deleted file]
arch/arm/mach-exynos/suspend.c
arch/arm/mach-imx/3ds_debugboard.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/anatop.c
arch/arm/mach-imx/avic.c
arch/arm/mach-imx/cpu-imx27.c
arch/arm/mach-imx/cpu-imx31.c
arch/arm/mach-imx/cpu-imx35.c
arch/arm/mach-imx/cpu.c
arch/arm/mach-imx/epit.c
arch/arm/mach-imx/headsmp.S
arch/arm/mach-imx/iomux-imx31.c
arch/arm/mach-imx/iomux-v1.c
arch/arm/mach-imx/iomux-v3.c
arch/arm/mach-imx/mach-armadillo5x0.c
arch/arm/mach-imx/mach-imx51.c
arch/arm/mach-imx/mach-mx27ads.c
arch/arm/mach-imx/mach-mx31ads.c
arch/arm/mach-imx/mach-mx31moboard.c
arch/arm/mach-imx/mach-qong.c
arch/arm/mach-imx/mxc.h
arch/arm/mach-imx/pm-imx27.c
arch/arm/mach-imx/pm-imx3.c
arch/arm/mach-imx/pm-imx5.c
arch/arm/mach-imx/pm-imx6.c
arch/arm/mach-imx/system.c
arch/arm/mach-imx/tzic.c
arch/arm/mach-integrator/Kconfig
arch/arm/mach-integrator/Makefile.boot [deleted file]
arch/arm/mach-keystone/Makefile.boot [deleted file]
arch/arm/mach-keystone/keystone.c
arch/arm/mach-mmp/Makefile.boot [deleted file]
arch/arm/mach-mv78xx0/Kconfig
arch/arm/mach-mv78xx0/Makefile.boot [deleted file]
arch/arm/mach-mvebu/platsmp.c
arch/arm/mach-netx/Kconfig
arch/arm/mach-nspire/Makefile.boot [deleted file]
arch/arm/mach-omap2/Makefile.boot [deleted file]
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-omap2/serial.c
arch/arm/mach-omap2/sleep34xx.S
arch/arm/mach-omap2/sleep44xx.S
arch/arm/mach-orion5x/Makefile.boot [deleted file]
arch/arm/mach-prima2/Makefile.boot [deleted file]
arch/arm/mach-qcom/Kconfig
arch/arm/mach-realview/Kconfig
arch/arm/mach-realview/Makefile.boot [deleted file]
arch/arm/mach-realview/platsmp-dt.c
arch/arm/mach-s3c64xx/Kconfig
arch/arm/mach-s3c64xx/Makefile.boot [deleted file]
arch/arm/mach-shmobile/common.h
arch/arm/mach-shmobile/cpufreq.c
arch/arm/mach-shmobile/emev2.h [new file with mode: 0644]
arch/arm/mach-shmobile/headsmp-scu.S
arch/arm/mach-shmobile/platsmp-scu.c
arch/arm/mach-shmobile/setup-emev2.c
arch/arm/mach-shmobile/setup-r8a7740.c
arch/arm/mach-shmobile/setup-rcar-gen2.c
arch/arm/mach-shmobile/smp-emev2.c
arch/arm/mach-shmobile/smp-r8a7779.c
arch/arm/mach-shmobile/smp-sh73a0.c
arch/arm/mach-shmobile/suspend.c
arch/arm/mach-shmobile/timer.c
arch/arm/mach-spear/Makefile.boot [deleted file]
arch/arm/mach-sunxi/sunxi.c
arch/arm/mach-tango/Kconfig
arch/arm/mach-tango/platsmp.c
arch/arm/mach-u300/Makefile.boot [deleted file]
arch/arm/mach-ux500/Makefile.boot [deleted file]
arch/arm/mach-zynq/Makefile.boot [deleted file]
arch/arm/mm/Kconfig
arch/arm/mm/cache-tauros2.c
arch/arm/mm/idmap.c
arch/arm/mm/init.c
arch/arm/plat-orion/time.c
arch/arm/plat-samsung/adc.c
arch/arm/plat-samsung/devs.c
arch/arm/plat-samsung/include/plat/map-s5p.h
arch/arm/plat-samsung/pm-check.c
arch/arm64/Kconfig
arch/arm64/Kconfig.platforms
arch/arm64/Makefile
arch/arm64/boot/dts/amd/Makefile
arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts [new file with mode: 0644]
arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts [new file with mode: 0644]
arch/arm64/boot/dts/amd/amd-seattle-soc.dtsi
arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/amd/husky.dts [new file with mode: 0644]
arch/arm64/boot/dts/apm/apm-storm.dtsi
arch/arm64/boot/dts/arm/juno-base.dtsi
arch/arm64/boot/dts/hisilicon/hip05_hns.dtsi
arch/arm64/boot/dts/nvidia/tegra132-norrin.dts
arch/arm64/boot/dts/nvidia/tegra132.dtsi
arch/arm64/boot/dts/nvidia/tegra210.dtsi
arch/arm64/boot/dts/qcom/Makefile
arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
arch/arm64/boot/dts/qcom/msm8916.dtsi
arch/arm64/boot/dts/qcom/msm8996-mtp.dts [new file with mode: 0644]
arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/qcom/msm8996.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/qcom/pm8004.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/qcom/pm8994.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/qcom/pmi8994.dtsi [new file with mode: 0644]
arch/arm64/boot/dts/renesas/r8a7795-salvator-x.dts
arch/arm64/boot/dts/renesas/r8a7795.dtsi
arch/arm64/boot/dts/rockchip/rk3368-evb.dtsi
arch/arm64/boot/dts/rockchip/rk3368-r88.dts
arch/arm64/boot/dts/rockchip/rk3368.dtsi
arch/arm64/configs/defconfig
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/acpi.h
arch/arm64/include/asm/boot.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/fixmap.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/hardirq.h
arch/arm64/include/asm/kasan.h
arch/arm64/include/asm/kernel-pgtable.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/pci.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/smp.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/acpi_parking_protocol.c [new file with mode: 0644]
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpu_ops.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/head.S
arch/arm64/kernel/image.h
arch/arm64/kernel/pci.c
arch/arm64/kernel/psci.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/hyp.S
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/inject_fault.c
arch/arm64/kvm/sys_regs.c
arch/arm64/lib/copy_page.S
arch/arm64/mm/dump.c
arch/arm64/mm/init.c
arch/arm64/mm/kasan_init.c
arch/arm64/mm/mmu.c
arch/arm64/mm/pageattr.c
arch/arm64/mm/proc-macros.S
arch/arm64/mm/proc.S
arch/avr32/kernel/setup.c
arch/c6x/kernel/setup.c
arch/frv/include/asm/serial.h
arch/h8300/boot/dts/edosk2674.dts
arch/h8300/boot/dts/h8300h_sim.dts
arch/h8300/boot/dts/h8s_sim.dts
arch/ia64/kernel/efi.c
arch/ia64/kernel/setup.c
arch/ia64/kernel/unaligned.c
arch/m32r/Kconfig
arch/m32r/kernel/setup.c
arch/m68k/coldfire/device.c
arch/m68k/configs/amiga_defconfig
arch/m68k/configs/apollo_defconfig
arch/m68k/configs/atari_defconfig
arch/m68k/configs/bvme6000_defconfig
arch/m68k/configs/hp300_defconfig
arch/m68k/configs/mac_defconfig
arch/m68k/configs/multi_defconfig
arch/m68k/configs/mvme147_defconfig
arch/m68k/configs/mvme16x_defconfig
arch/m68k/configs/q40_defconfig
arch/m68k/configs/sun3_defconfig
arch/m68k/configs/sun3x_defconfig
arch/m68k/include/asm/serial.h
arch/m68k/include/asm/unistd.h
arch/m68k/include/uapi/asm/unistd.h
arch/m68k/kernel/syscalltable.S
arch/metag/kernel/ftrace.c
arch/microblaze/include/asm/unistd.h
arch/microblaze/include/uapi/asm/unistd.h
arch/microblaze/kernel/setup.c
arch/microblaze/kernel/syscall_table.S
arch/mips/Kbuild.platforms
arch/mips/Kconfig
arch/mips/Makefile
arch/mips/alchemy/common/gpiolib.c
arch/mips/ar7/gpio.c
arch/mips/ath79/Kconfig
arch/mips/ath79/common.h
arch/mips/ath79/irq.c
arch/mips/ath79/setup.c
arch/mips/bcm47xx/sprom.c
arch/mips/bcm63xx/nvram.c
arch/mips/bmips/irq.c
arch/mips/bmips/setup.c
arch/mips/boot/compressed/Makefile
arch/mips/boot/compressed/uart-prom.c [new file with mode: 0644]
arch/mips/boot/dts/Makefile
arch/mips/boot/dts/brcm/bcm6328.dtsi
arch/mips/boot/dts/brcm/bcm6368.dtsi
arch/mips/boot/dts/brcm/bcm7125.dtsi
arch/mips/boot/dts/brcm/bcm7346.dtsi
arch/mips/boot/dts/brcm/bcm7358.dtsi
arch/mips/boot/dts/brcm/bcm7360.dtsi
arch/mips/boot/dts/brcm/bcm7362.dtsi
arch/mips/boot/dts/brcm/bcm7420.dtsi
arch/mips/boot/dts/brcm/bcm7425.dtsi
arch/mips/boot/dts/brcm/bcm7435.dtsi
arch/mips/boot/dts/ingenic/ci20.dts
arch/mips/boot/dts/ingenic/jz4780.dtsi
arch/mips/boot/dts/pic32/Makefile [new file with mode: 0644]
arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi [new file with mode: 0644]
arch/mips/boot/dts/pic32/pic32mzda.dtsi [new file with mode: 0644]
arch/mips/boot/dts/pic32/pic32mzda_sk.dts [new file with mode: 0644]
arch/mips/boot/dts/qca/Makefile
arch/mips/boot/dts/qca/ar9132.dtsi
arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
arch/mips/cavium-octeon/smp.c
arch/mips/configs/pic32mzda_defconfig [new file with mode: 0644]
arch/mips/include/asm/cacheops.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/fpu_emulator.h
arch/mips/include/asm/highmem.h
arch/mips/include/asm/io.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/mach-ath79/ath79.h
arch/mips/include/asm/mach-jz4740/jz4740_nand.h
arch/mips/include/asm/mach-pic32/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mach-pic32/irq.h [new file with mode: 0644]
arch/mips/include/asm/mach-pic32/pic32.h [new file with mode: 0644]
arch/mips/include/asm/mach-pic32/spaces.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/irq.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/mt7621.h [new file with mode: 0644]
arch/mips/include/asm/mach-ralink/mt7621/cpu-feature-overrides.h [new file with mode: 0644]
arch/mips/include/asm/mips-cm.h
arch/mips/include/asm/mips-r2-to-r6-emul.h
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/page.h
arch/mips/include/asm/pci.h
arch/mips/include/asm/pgtable.h
arch/mips/include/uapi/asm/inst.h
arch/mips/kernel/cpu-bugs64.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/elf.c
arch/mips/kernel/gpio_txx9.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/setup.c
arch/mips/kernel/smp-cps.c
arch/mips/kernel/sync-r4k.c
arch/mips/kernel/traps.c
arch/mips/kvm/callback.c
arch/mips/kvm/dyntrans.c
arch/mips/kvm/emulate.c
arch/mips/kvm/interrupt.c
arch/mips/kvm/locore.S
arch/mips/kvm/mips.c
arch/mips/kvm/opcode.h [deleted file]
arch/mips/kvm/tlb.c
arch/mips/kvm/trap_emul.c
arch/mips/lib/mips-atomic.c
arch/mips/loongson64/Platform
arch/mips/loongson64/loongson-3/hpet.c
arch/mips/loongson64/loongson-3/smp.c
arch/mips/math-emu/cp1emu.c
arch/mips/math-emu/dp_simple.c
arch/mips/math-emu/dp_tint.c
arch/mips/math-emu/dp_tlong.c
arch/mips/math-emu/dsemul.c
arch/mips/math-emu/ieee754.c
arch/mips/math-emu/ieee754.h
arch/mips/math-emu/ieee754dp.c
arch/mips/math-emu/ieee754int.h
arch/mips/math-emu/ieee754sp.c
arch/mips/math-emu/sp_fdp.c
arch/mips/math-emu/sp_simple.c
arch/mips/math-emu/sp_tint.c
arch/mips/math-emu/sp_tlong.c
arch/mips/mm/tlbex.c
arch/mips/pci/Makefile
arch/mips/pci/pci-mt7620.c [new file with mode: 0644]
arch/mips/pic32/Kconfig [new file with mode: 0644]
arch/mips/pic32/Makefile [new file with mode: 0644]
arch/mips/pic32/Platform [new file with mode: 0644]
arch/mips/pic32/common/Makefile [new file with mode: 0644]
arch/mips/pic32/common/irq.c [new file with mode: 0644]
arch/mips/pic32/common/reset.c [new file with mode: 0644]
arch/mips/pic32/pic32mzda/Makefile [new file with mode: 0644]
arch/mips/pic32/pic32mzda/config.c [new file with mode: 0644]
arch/mips/pic32/pic32mzda/early_clk.c [new file with mode: 0644]
arch/mips/pic32/pic32mzda/early_console.c [new file with mode: 0644]
arch/mips/pic32/pic32mzda/early_pin.c [new file with mode: 0644]
arch/mips/pic32/pic32mzda/early_pin.h [new file with mode: 0644]
arch/mips/pic32/pic32mzda/init.c [new file with mode: 0644]
arch/mips/pic32/pic32mzda/pic32mzda.h [new file with mode: 0644]
arch/mips/pic32/pic32mzda/time.c [new file with mode: 0644]
arch/mips/pmcs-msp71xx/msp_serial.c
arch/mips/pmcs-msp71xx/msp_setup.c
arch/mips/ralink/Kconfig
arch/mips/ralink/Makefile
arch/mips/ralink/Platform
arch/mips/ralink/irq-gic.c [new file with mode: 0644]
arch/mips/ralink/mt7620.c
arch/mips/ralink/mt7621.c [new file with mode: 0644]
arch/mips/ralink/rt288x.c
arch/mips/ralink/rt305x.c
arch/mips/ralink/rt3883.c
arch/mips/ralink/timer-gic.c [new file with mode: 0644]
arch/mips/rb532/gpio.c
arch/mips/txx9/generic/setup.c
arch/mips/vr41xx/common/pmu.c
arch/mn10300/include/asm/serial.h
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/book3s/64/pgtable.h
arch/powerpc/include/asm/eeh.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/pci-bridge.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/trace.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/include/uapi/asm/unistd.h
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/eeh_pe.c
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kvm/book3s_64_mmu.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/powerpc.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/perf/power8-pmu.c
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/pci.c
arch/powerpc/platforms/powernv/pci.h
arch/s390/include/asm/irqflags.h
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/pci_io.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/ptrace.h
arch/s390/include/uapi/asm/kvm.h
arch/s390/include/uapi/asm/unistd.h
arch/s390/kernel/compat_wrapper.c
arch/s390/kernel/crash_dump.c
arch/s390/kernel/debug.c
arch/s390/kernel/dumpstack.c
arch/s390/kernel/early.c
arch/s390/kernel/ftrace.c
arch/s390/kernel/ipl.c
arch/s390/kernel/kprobes.c
arch/s390/kernel/perf_event.c
arch/s390/kernel/process.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/signal.c
arch/s390/kernel/smp.c
arch/s390/kernel/stacktrace.c
arch/s390/kernel/syscalls.S
arch/s390/kernel/traps.c
arch/s390/kvm/Kconfig
arch/s390/kvm/Makefile
arch/s390/kvm/gaccess.c
arch/s390/kvm/gaccess.h
arch/s390/kvm/guestdbg.c
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/priv.c
arch/s390/mm/fault.c
arch/s390/mm/init.c
arch/s390/mm/mmap.c
arch/s390/mm/pgtable.c
arch/s390/numa/numa.c
arch/s390/oprofile/backtrace.c
arch/s390/pci/pci.c
arch/s390/pci/pci_event.c
arch/score/kernel/setup.c
arch/sh/include/asm/barrier.h
arch/sh/kernel/setup.c
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/entry.S
arch/sparc/kernel/head_64.S
arch/sparc/kernel/hvcalls.S
arch/sparc/kernel/sparc_ksyms_64.c
arch/sparc/kernel/syscalls.S
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/mm/init_64.c
arch/tile/kernel/kgdb.c
arch/tile/kernel/setup.c
arch/um/include/asm/page.h
arch/unicore32/include/asm/pci.h
arch/unicore32/kernel/setup.c
arch/x86/Kbuild
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/boot/cpuflags.h
arch/x86/boot/mkcpustr.c
arch/x86/crypto/chacha20-ssse3-x86_64.S
arch/x86/crypto/crc32-pclmul_glue.c
arch/x86/crypto/crc32c-intel_glue.c
arch/x86/crypto/crct10dif-pclmul_glue.c
arch/x86/crypto/sha-mb/sha1_mb.c
arch/x86/crypto/sha-mb/sha1_mb_mgr_submit_avx2.S
arch/x86/entry/calling.h
arch/x86/entry/common.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/entry/syscall_32.c
arch/x86/entry/syscall_64.c
arch/x86/entry/syscalls/syscall_64.tbl
arch/x86/entry/syscalls/syscalltbl.sh
arch/x86/entry/vdso/vdso2c.h
arch/x86/entry/vdso/vdso32-setup.c
arch/x86/entry/vdso/vdso32/system_call.S
arch/x86/entry/vdso/vma.c
arch/x86/entry/vsyscall/vsyscall_gtod.c
arch/x86/events/Makefile [new file with mode: 0644]
arch/x86/events/amd/core.c [moved from arch/x86/kernel/cpu/perf_event_amd.c with 99% similarity]
arch/x86/events/amd/ibs.c [moved from arch/x86/kernel/cpu/perf_event_amd_ibs.c with 98% similarity]
arch/x86/events/amd/iommu.c [moved from arch/x86/kernel/cpu/perf_event_amd_iommu.c with 99% similarity]
arch/x86/events/amd/iommu.h [moved from arch/x86/kernel/cpu/perf_event_amd_iommu.h with 100% similarity]
arch/x86/events/amd/uncore.c [moved from arch/x86/kernel/cpu/perf_event_amd_uncore.c with 99% similarity]
arch/x86/events/core.c [moved from arch/x86/kernel/cpu/perf_event.c with 99% similarity]
arch/x86/include/asm/alternative.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/arch_hweight.h
arch/x86/include/asm/barrier.h
arch/x86/include/asm/bitops.h
arch/x86/include/asm/clocksource.h
arch/x86/include/asm/cmpxchg.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/cpufeatures.h [new file with mode: 0644]
arch/x86/include/asm/dmi.h
arch/x86/include/asm/efi.h
arch/x86/include/asm/elf.h
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/fpu/internal.h
arch/x86/include/asm/frame.h
arch/x86/include/asm/irq.h
arch/x86/include/asm/irq_work.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/microcode.h
arch/x86/include/asm/mmu.h
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/smap.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/tlbflush.h
arch/x86/include/asm/uaccess_64.h
arch/x86/include/asm/vdso.h
arch/x86/include/asm/vgtod.h
arch/x86/kernel/apic/apic_numachip.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs_64.c
arch/x86/kernel/cpu/centaur.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/cyrix.c
arch/x86/kernel/cpu/hypervisor.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/match.c
arch/x86/kernel/cpu/mcheck/mce-inject.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/p5.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/cpu/mcheck/threshold.c
arch/x86/kernel/cpu/mcheck/winchip.c
arch/x86/kernel/cpu/microcode/amd.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/mkcapflags.sh
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/mtrr/centaur.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
arch/x86/kernel/cpu/rdrand.c
arch/x86/kernel/cpu/topology.c
arch/x86/kernel/cpu/transmeta.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/crash.c
arch/x86/kernel/e820.c
arch/x86/kernel/fpu/core.c
arch/x86/kernel/fpu/init.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/hpet.c
arch/x86/kernel/irq.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/msr.c
arch/x86/kernel/pmem.c
arch/x86/kernel/process.c
arch/x86/kernel/setup.c
arch/x86/kernel/signal.c
arch/x86/kernel/traps.c
arch/x86/kernel/verify_cpu.S
arch/x86/kernel/vm86_32.c
arch/x86/kernel/vmlinux.lds.S
arch/x86/kvm/irq.c
arch/x86/kvm/irq.h
arch/x86/kvm/irq_comm.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/pmu.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/lguest/boot.c
arch/x86/lib/clear_page_64.S
arch/x86/lib/cmdline.c
arch/x86/lib/copy_page_64.S
arch/x86/lib/copy_user_64.S
arch/x86/lib/memcpy_64.S
arch/x86/lib/memmove_64.S
arch/x86/lib/memset_64.S
arch/x86/mm/hugetlbpage.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/kasan_init_64.c
arch/x86/mm/numa.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/mm/setup_nx.c
arch/x86/oprofile/op_model_amd.c
arch/x86/pci/common.c
arch/x86/platform/efi/efi-bgrt.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_32.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/efi_stub_64.S
arch/x86/platform/efi/quirks.c
arch/x86/platform/intel-mid/intel-mid.c
arch/x86/platform/intel-quark/imr.c
arch/x86/um/asm/barrier.h
arch/x86/um/os-Linux/task_size.c
arch/x86/um/sys_call_table_32.c
arch/x86/um/sys_call_table_64.c
arch/x86/um/user-offsets.c
arch/x86/xen/enlighten.c
arch/xtensa/Kconfig
arch/xtensa/include/asm/io.h
arch/xtensa/include/asm/processor.h
arch/xtensa/include/asm/timex.h
arch/xtensa/kernel/traps.c
arch/xtensa/mm/Makefile
arch/xtensa/mm/ioremap.c [new file with mode: 0644]
arch/xtensa/platforms/iss/console.c
arch/xtensa/platforms/xt2000/setup.c
block/blk-core.c
block/blk-merge.c
block/blk-mq-sysfs.c
block/blk-mq.c
block/blk-mq.h
block/cfq-iosched.c
block/ioctl.c
block/partition-generic.c
crypto/Kconfig
crypto/Makefile
crypto/ahash.c
crypto/algapi.c
crypto/algif_hash.c
crypto/algif_skcipher.c
crypto/asymmetric_keys/pkcs7_parser.c
crypto/crc32_generic.c [moved from crypto/crc32.c with 98% similarity]
crypto/crypto_engine.c [new file with mode: 0644]
crypto/crypto_user.c
crypto/drbg.c
crypto/internal.h
crypto/keywrap.c
crypto/mcryptd.c
crypto/pcompress.c [deleted file]
crypto/shash.c
crypto/skcipher.c
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
crypto/zlib.c [deleted file]
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_platform.c
drivers/acpi/apei/einj.c
drivers/acpi/video_detect.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_brcmstb.c
drivers/ata/ahci_xgene.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-sff.c
drivers/ata/pata_macio.c
drivers/atm/firestream.c
drivers/base/bus.c
drivers/base/component.c
drivers/base/core.c
drivers/base/dma-coherent.c
drivers/base/platform-msi.c
drivers/base/platform.c
drivers/base/power/common.c
drivers/base/power/domain.c
drivers/base/power/opp/core.c
drivers/base/power/opp/opp.h
drivers/base/regmap/regcache.c
drivers/base/regmap/regmap-irq.c
drivers/base/regmap/regmap-mmio.c
drivers/base/regmap/regmap.c
drivers/bcma/bcma_private.h
drivers/bcma/driver_chipcommon.c
drivers/bcma/driver_chipcommon_pmu.c
drivers/bcma/driver_chipcommon_sflash.c
drivers/bcma/driver_gpio.c
drivers/bcma/driver_mips.c
drivers/bcma/host_pci.c
drivers/bcma/scan.c
drivers/block/cryptoloop.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_worker.c
drivers/block/rbd.c
drivers/bluetooth/ath3k.c
drivers/bus/Kconfig
drivers/bus/sunxi-rsb.c
drivers/bus/vexpress-config.c
drivers/char/agp/intel-gtt.c
drivers/char/agp/uninorth-agp.c
drivers/char/hw_random/Kconfig
drivers/char/hw_random/bcm63xx-rng.c
drivers/char/hw_random/n2-drv.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
drivers/char/pcmcia/synclink_cs.c
drivers/char/ttyprintk.c
drivers/clk/rockchip/clk-rk3036.c
drivers/clk/rockchip/clk-rk3188.c
drivers/clk/rockchip/clk-rk3228.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/rockchip/clk-rk3368.c
drivers/clk/rockchip/clk.c
drivers/clk/rockchip/clk.h
drivers/clk/sunxi/clk-a10-hosc.c
drivers/clk/sunxi/clk-a20-gmac.c
drivers/clk/sunxi/clk-factors.c
drivers/clk/sunxi/clk-factors.h
drivers/clk/sunxi/clk-mod0.c
drivers/clk/sunxi/clk-simple-gates.c
drivers/clk/sunxi/clk-sun6i-apb0-gates.c
drivers/clk/sunxi/clk-sun6i-ar100.c
drivers/clk/sunxi/clk-sun8i-bus-gates.c
drivers/clk/sunxi/clk-sun8i-mbus.c
drivers/clk/sunxi/clk-sun9i-core.c
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/sunxi/clk-usb.c
drivers/clk/tegra/clk-emc.c
drivers/clk/tegra/clk-id.h
drivers/clk/tegra/clk-pll.c
drivers/clk/tegra/clk-tegra-periph.c
drivers/clk/tegra/clk-tegra-super-gen4.c
drivers/clk/tegra/clk-tegra210.c
drivers/clocksource/Kconfig
drivers/clocksource/tcb_clksrc.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_governor.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/cpufreq/cpufreq_performance.c
drivers/cpufreq/cpufreq_powersave.c
drivers/cpufreq/cpufreq_userspace.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/powernv-cpufreq.c
drivers/cpufreq/pxa2xx-cpufreq.c
drivers/cpufreq/s5pv210-cpufreq.c
drivers/cpuidle/coupled.c
drivers/cpuidle/cpuidle.c
drivers/crypto/Kconfig
drivers/crypto/atmel-aes.c
drivers/crypto/atmel-sha-regs.h
drivers/crypto/atmel-sha.c
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/regs.h
drivers/crypto/ccp/ccp-crypto-aes-cmac.c
drivers/crypto/ccp/ccp-crypto-sha.c
drivers/crypto/ccp/ccp-crypto.h
drivers/crypto/ixp4xx_crypto.c
drivers/crypto/marvell/cesa.c
drivers/crypto/nx/nx-842.c
drivers/crypto/omap-aes.c
drivers/crypto/qat/qat_common/adf_accel_devices.h
drivers/crypto/qat/qat_common/adf_aer.c
drivers/crypto/qat/qat_common/adf_cfg_user.h
drivers/crypto/qat/qat_common/qat_algs.c
drivers/crypto/s5p-sss.c
drivers/crypto/sahara.c
drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
drivers/dma-buf/dma-buf.c
drivers/dma/dmaengine.c
drivers/dma/dw/core.c
drivers/dma/dw/pci.c
drivers/dma/dw/regs.h
drivers/dma/edma.c
drivers/dma/ep93xx_dma.c
drivers/dma/ioat/dma.c
drivers/dma/ioat/init.c
drivers/dma/ioat/prep.c
drivers/dma/pl330.c
drivers/dma/sh/Kconfig
drivers/edac/amd64_edac.c
drivers/edac/debugfs.c
drivers/edac/edac_mc.c
drivers/edac/edac_pci.c
drivers/edac/mpc85xx_edac.c
drivers/edac/xgene_edac.c
drivers/firmware/Kconfig
drivers/firmware/Makefile
drivers/firmware/efi/efi.c
drivers/firmware/efi/efivars.c
drivers/firmware/efi/esrt.c
drivers/firmware/efi/runtime-wrappers.c
drivers/firmware/efi/vars.c
drivers/firmware/psci.c
drivers/firmware/qcom_scm-32.c
drivers/firmware/qcom_scm-64.c
drivers/firmware/qcom_scm.c
drivers/firmware/qcom_scm.h
drivers/firmware/qemu_fw_cfg.c [new file with mode: 0644]
drivers/gpu/drm/Kconfig
drivers/gpu/drm/Makefile
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
drivers/gpu/drm/amd/amdgpu/iceland_smc.c
drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
drivers/gpu/drm/arm/Kconfig [new file with mode: 0644]
drivers/gpu/drm/arm/Makefile [new file with mode: 0644]
drivers/gpu/drm/arm/hdlcd_crtc.c [new file with mode: 0644]
drivers/gpu/drm/arm/hdlcd_drv.c [new file with mode: 0644]
drivers/gpu/drm/arm/hdlcd_drv.h [new file with mode: 0644]
drivers/gpu/drm/arm/hdlcd_regs.h [new file with mode: 0644]
drivers/gpu/drm/armada/armada_drv.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_crtc.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_encoder_slave.c
drivers/gpu/drm/drm_fb_cma_helper.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/drm_prime.c
drivers/gpu/drm/etnaviv/common.xml.h
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_dump.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem.h
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/state_hi.xml.h
drivers/gpu/drm/exynos/exynos_dp_core.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_ipp.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/intel_gmbus.c
drivers/gpu/drm/gma500/mdfld_dsi_output.c
drivers/gpu/drm/gma500/psb_drv.c
drivers/gpu/drm/i2c/adv7511.c
drivers/gpu/drm/i2c/adv7511.h
drivers/gpu/drm/i2c/ch7006_drv.c
drivers/gpu/drm/i915/Kconfig
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_fence.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gem_shrinker.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_guc_reg.h
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_params.c
drivers/gpu/drm/i915/i915_params.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/intel_atomic.c
drivers/gpu/drm/i915/intel_atomic_plane.c
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_csr.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi.h
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
drivers/gpu/drm/i915/intel_dsi_pll.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_guc.h
drivers/gpu/drm/i915/intel_guc_fwif.h
drivers/gpu/drm/i915/intel_guc_loader.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_psr.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/i915/intel_sdvo_regs.h
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/imx/imx-drm-core.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/nouveau/nouveau_acpi.c
drivers/gpu/drm/nouveau/nouveau_connector.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/omapdrm/omap_crtc.c
drivers/gpu/drm/omapdrm/omap_drv.c
drivers/gpu/drm/omapdrm/omap_drv.h
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/radeon/dce6_afmt.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_audio.h
drivers/gpu/drm/radeon/radeon_combios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/vce_v1_0.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.c
drivers/gpu/drm/rcar-du/rcar_du_crtc.h
drivers/gpu/drm/rcar-du/rcar_du_drv.c
drivers/gpu/drm/rockchip/Makefile
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
drivers/gpu/drm/rockchip/rockchip_drm_drv.c
drivers/gpu/drm/rockchip/rockchip_drm_fb.c
drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
drivers/gpu/drm/rockchip/rockchip_drm_gem.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.c
drivers/gpu/drm/shmobile/shmob_drm_crtc.h
drivers/gpu/drm/shmobile/shmob_drm_drv.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tilcdc/tilcdc_crtc.c
drivers/gpu/drm/tilcdc/tilcdc_drv.c
drivers/gpu/drm/tilcdc/tilcdc_drv.h
drivers/gpu/drm/udl/udl_fb.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_drv.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_v3d.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_plane.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
drivers/gpu/host1x/bus.c
drivers/gpu/host1x/job.c
drivers/gpu/vga/vga_switcheroo.c
drivers/hid/hid-core.c
drivers/hid/hid-dr.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-multitouch.c
drivers/hid/hid-rmi.c
drivers/hid/hid-sony.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.c
drivers/hsi/clients/nokia-modem.c
drivers/hsi/clients/ssi_protocol.c
drivers/hwmon/Kconfig
drivers/hwmon/Makefile
drivers/hwmon/dell-smm-hwmon.c
drivers/hwmon/emc2103.c
drivers/hwmon/fam15h_power.c
drivers/hwmon/ltc2990.c [new file with mode: 0644]
drivers/hwmon/vexpress-hwmon.c [moved from drivers/hwmon/vexpress.c with 100% similarity]
drivers/hwspinlock/hwspinlock_core.c
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-piix4.c
drivers/ide/pdc202xx_new.c
drivers/ide/pmac.c
drivers/iio/accel/Kconfig
drivers/iio/adc/Kconfig
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/dac/mcp4725.c
drivers/iio/humidity/dht11.c
drivers/iio/imu/adis_buffer.c
drivers/iio/imu/inv_mpu6050/Kconfig
drivers/iio/inkern.c
drivers/iio/light/acpi-als.c
drivers/iio/light/ltr501.c
drivers/iio/pressure/mpl115.c
drivers/iio/proximity/pulsedlight-lidar-lite-v2.c
drivers/infiniband/core/ud_header.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
drivers/input/joystick/xpad.c
drivers/input/keyboard/adp5589-keys.c
drivers/input/keyboard/cap11xx.c
drivers/input/misc/Kconfig
drivers/input/misc/sirfsoc-onkey.c
drivers/input/mouse/Kconfig
drivers/input/mouse/Makefile
drivers/input/mouse/byd.c [new file with mode: 0644]
drivers/input/mouse/byd.h [new file with mode: 0644]
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/psmouse.h
drivers/input/mouse/vmmouse.c
drivers/input/serio/serio.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/colibri-vf50-ts.c
drivers/input/touchscreen/cyttsp_core.c
drivers/input/touchscreen/cyttsp_core.h
drivers/input/touchscreen/cyttsp_i2c.c
drivers/input/touchscreen/cyttsp_spi.c
drivers/input/touchscreen/edt-ft5x06.c
drivers/input/touchscreen/wdt87xx_i2c.c
drivers/iommu/amd_iommu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel-svm.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-atmel-aic-common.c
drivers/irqchip/irq-atmel-aic-common.h
drivers/irqchip/irq-atmel-aic.c
drivers/irqchip/irq-atmel-aic5.c
drivers/irqchip/irq-bcm6345-l1.c [new file with mode: 0644]
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-mxs.c
drivers/irqchip/irq-pic32-evic.c [new file with mode: 0644]
drivers/irqchip/irq-s3c24xx.c
drivers/irqchip/irq-sun4i.c
drivers/isdn/i4l/isdn_tty.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/led-class.c
drivers/leds/led-core.c
drivers/leds/leds-gpio.c
drivers/leds/leds-lp3944.c
drivers/leds/leds-sn3218.c [new file with mode: 0644]
drivers/macintosh/macio_asic.c
drivers/mailbox/Kconfig
drivers/mailbox/pcc.c
drivers/md/bitmap.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-delay.c
drivers/md/dm-flakey.c
drivers/md/dm-ioctl.c
drivers/md/dm-log-writes.c
drivers/md/dm-mpath.c
drivers/md/dm-raid1.c
drivers/md/dm-snap.c
drivers/md/dm-table.c
drivers/md/dm-target.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-thin.c
drivers/md/dm-verity-fec.c
drivers/md/dm-verity-target.c
drivers/md/dm.c
drivers/md/dm.h
drivers/md/faulty.c
drivers/md/md-cluster.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/media/common/b2c2/flexcop-fe-tuner.c
drivers/media/common/b2c2/flexcop.c
drivers/media/common/cypress_firmware.c
drivers/media/common/cypress_firmware.h
drivers/media/dvb-core/dvb-usb-ids.h
drivers/media/dvb-core/dvb_frontend.c
drivers/media/dvb-core/dvbdev.c
drivers/media/dvb-frontends/af9013.c
drivers/media/dvb-frontends/af9033.c
drivers/media/dvb-frontends/bcm3510.c
drivers/media/dvb-frontends/bcm3510.h
drivers/media/dvb-frontends/bcm3510_priv.h
drivers/media/dvb-frontends/cxd2820r_core.c
drivers/media/dvb-frontends/dib0070.c
drivers/media/dvb-frontends/dib0090.c
drivers/media/dvb-frontends/dib3000.h
drivers/media/dvb-frontends/dib3000mb.c
drivers/media/dvb-frontends/dib3000mb_priv.h
drivers/media/dvb-frontends/dib3000mc.c
drivers/media/dvb-frontends/dib3000mc.h
drivers/media/dvb-frontends/dib7000m.c
drivers/media/dvb-frontends/dib7000p.c
drivers/media/dvb-frontends/dib8000.c
drivers/media/dvb-frontends/dib9000.c
drivers/media/dvb-frontends/dibx000_common.c
drivers/media/dvb-frontends/rtl2830.c
drivers/media/dvb-frontends/si2165.c
drivers/media/dvb-frontends/stv0299.c
drivers/media/dvb-frontends/stv6110x.c
drivers/media/dvb-frontends/stv6110x.h
drivers/media/dvb-frontends/stv6110x_priv.h
drivers/media/dvb-frontends/tda1004x.c
drivers/media/dvb-frontends/ts2020.c
drivers/media/i2c/adv7604.c
drivers/media/i2c/ir-kbd-i2c.c
drivers/media/i2c/msp3400-driver.c
drivers/media/i2c/msp3400-driver.h
drivers/media/i2c/mt9v011.c
drivers/media/i2c/mt9v032.c
drivers/media/i2c/ov2659.c
drivers/media/i2c/s5c73m3/s5c73m3-core.c
drivers/media/i2c/s5c73m3/s5c73m3-spi.c
drivers/media/i2c/s5k5baf.c
drivers/media/i2c/s5k6a3.c
drivers/media/i2c/saa7115.c
drivers/media/i2c/soc_camera/mt9m001.c
drivers/media/i2c/soc_camera/mt9t031.c
drivers/media/i2c/soc_camera/mt9v022.c
drivers/media/i2c/tc358743.c
drivers/media/i2c/tvp514x.c
drivers/media/i2c/tvp5150.c
drivers/media/i2c/tvp7002.c
drivers/media/i2c/vpx3220.c
drivers/media/media-device.c
drivers/media/media-devnode.c
drivers/media/media-entity.c
drivers/media/pci/b2c2/flexcop-pci.c
drivers/media/pci/bt8xx/bttv-driver.c
drivers/media/pci/bt8xx/dvb-bt8xx.c
drivers/media/pci/ddbridge/ddbridge-core.c
drivers/media/pci/netup_unidvb/netup_unidvb_core.c
drivers/media/pci/ngene/ngene-cards.c
drivers/media/pci/saa7134/saa7134-alsa.c
drivers/media/pci/saa7134/saa7134-empress.c
drivers/media/pci/saa7134/saa7134-go7007.c
drivers/media/pci/saa7134/saa7134-video.c
drivers/media/pci/ttpci/av7110.c
drivers/media/pci/ttpci/budget.c
drivers/media/platform/Kconfig
drivers/media/platform/Makefile
drivers/media/platform/coda/coda-bit.c
drivers/media/platform/davinci/dm644x_ccdc.c
drivers/media/platform/exynos-gsc/gsc-m2m.c
drivers/media/platform/exynos4-is/Kconfig
drivers/media/platform/exynos4-is/fimc-is.c
drivers/media/platform/exynos4-is/fimc-isp-video.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/exynos4-is/mipi-csis.c
drivers/media/platform/omap3isp/isp.c
drivers/media/platform/omap3isp/ispccdc.c
drivers/media/platform/omap3isp/isppreview.c
drivers/media/platform/omap3isp/ispvideo.c
drivers/media/platform/omap3isp/omap3isp.h
drivers/media/platform/rcar_jpu.c
drivers/media/platform/soc_camera/atmel-isi.c
drivers/media/platform/soc_camera/rcar_vin.c
drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
drivers/media/platform/soc_camera/soc_camera.c
drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
drivers/media/platform/ti-vpe/Makefile
drivers/media/platform/ti-vpe/cal.c [new file with mode: 0644]
drivers/media/platform/ti-vpe/cal_regs.h [new file with mode: 0644]
drivers/media/platform/vim2m.c
drivers/media/platform/vivid/vivid-tpg.h
drivers/media/platform/vsp1/vsp1_drv.c
drivers/media/platform/vsp1/vsp1_video.c
drivers/media/radio/radio-si476x.c
drivers/media/radio/wl128x/fmdrv_common.c
drivers/media/rc/nuvoton-cir.c
drivers/media/rc/nuvoton-cir.h
drivers/media/rc/rc-core-priv.h
drivers/media/rc/rc-ir-raw.c
drivers/media/tuners/m88rs6000t.c
drivers/media/tuners/r820t.c
drivers/media/tuners/si2157.c
drivers/media/tuners/tuner-xc2028.c
drivers/media/usb/as102/as102_drv.h
drivers/media/usb/as102/as102_usb_drv.c
drivers/media/usb/au0828/au0828-core.c
drivers/media/usb/au0828/au0828-dvb.c
drivers/media/usb/b2c2/flexcop-usb.c
drivers/media/usb/cx231xx/cx231xx-417.c
drivers/media/usb/cx231xx/cx231xx-audio.c
drivers/media/usb/cx231xx/cx231xx-cards.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/dvb-usb-v2/af9035.h
drivers/media/usb/dvb-usb-v2/dvb_usb.h
drivers/media/usb/dvb-usb-v2/dvb_usb_common.h
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
drivers/media/usb/dvb-usb-v2/dvb_usb_urb.c
drivers/media/usb/dvb-usb-v2/dvbsky.c
drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
drivers/media/usb/dvb-usb-v2/mxl111sf.c
drivers/media/usb/dvb-usb-v2/rtl28xxu.c
drivers/media/usb/dvb-usb-v2/usb_urb.c
drivers/media/usb/dvb-usb/a800.c
drivers/media/usb/dvb-usb/cxusb.c
drivers/media/usb/dvb-usb/dib0700_core.c
drivers/media/usb/dvb-usb/dib0700_devices.c
drivers/media/usb/dvb-usb/dibusb-common.c
drivers/media/usb/dvb-usb/dibusb-mb.c
drivers/media/usb/dvb-usb/dibusb-mc.c
drivers/media/usb/dvb-usb/dibusb.h
drivers/media/usb/dvb-usb/digitv.c
drivers/media/usb/dvb-usb/dtt200u-fe.c
drivers/media/usb/dvb-usb/dtt200u.c
drivers/media/usb/dvb-usb/dtt200u.h
drivers/media/usb/dvb-usb/dvb-usb-common.h
drivers/media/usb/dvb-usb/dvb-usb-dvb.c
drivers/media/usb/dvb-usb/dvb-usb-firmware.c
drivers/media/usb/dvb-usb/dvb-usb-i2c.c
drivers/media/usb/dvb-usb/dvb-usb-init.c
drivers/media/usb/dvb-usb/dvb-usb-remote.c
drivers/media/usb/dvb-usb/dvb-usb-urb.c
drivers/media/usb/dvb-usb/dvb-usb.h
drivers/media/usb/dvb-usb/dw2102.c
drivers/media/usb/dvb-usb/nova-t-usb2.c
drivers/media/usb/dvb-usb/technisat-usb2.c
drivers/media/usb/dvb-usb/ttusb2.c
drivers/media/usb/dvb-usb/umt-010.c
drivers/media/usb/dvb-usb/usb-urb.c
drivers/media/usb/dvb-usb/vp702x-fe.c
drivers/media/usb/dvb-usb/vp702x.c
drivers/media/usb/dvb-usb/vp7045-fe.c
drivers/media/usb/dvb-usb/vp7045.c
drivers/media/usb/dvb-usb/vp7045.h
drivers/media/usb/em28xx/em28xx-camera.c
drivers/media/usb/em28xx/em28xx-cards.c
drivers/media/usb/em28xx/em28xx-dvb.c
drivers/media/usb/em28xx/em28xx-video.c
drivers/media/usb/em28xx/em28xx.h
drivers/media/usb/go7007/go7007-priv.h
drivers/media/usb/go7007/go7007-usb.c
drivers/media/usb/hdpvr/hdpvr-core.c
drivers/media/usb/hdpvr/hdpvr-video.c
drivers/media/usb/msi2500/msi2500.c
drivers/media/usb/pwc/pwc-if.c
drivers/media/usb/stk1160/stk1160-video.c
drivers/media/usb/usbtv/usbtv-video.c
drivers/media/usb/usbtv/usbtv.h
drivers/media/usb/usbvision/usbvision-video.c
drivers/media/v4l2-core/Kconfig
drivers/media/v4l2-core/tuner-core.c
drivers/media/v4l2-core/v4l2-compat-ioctl32.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/media/v4l2-core/v4l2-of.c
drivers/media/v4l2-core/videobuf2-core.c
drivers/media/v4l2-core/videobuf2-v4l2.c
drivers/memory/fsl_ifc.c
drivers/mfd/Kconfig
drivers/mfd/Makefile
drivers/mfd/ab8500-core.c
drivers/mfd/db8500-prcmu.c
drivers/mfd/intel-lpss-pci.c
drivers/mfd/intel-lpss.c
drivers/mfd/lpc_ich.c
drivers/mfd/syscon.c
drivers/mfd/tps65010.c
drivers/mfd/tps65086.c [new file with mode: 0644]
drivers/misc/cxl/pci.c
drivers/misc/mei/main.c
drivers/mmc/card/block.c
drivers/mmc/card/mmc_test.c
drivers/mmc/card/sdio_uart.c
drivers/mmc/core/core.c
drivers/mmc/core/debugfs.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/core/mmc_ops.c
drivers/mmc/core/pwrseq_simple.c
drivers/mmc/core/sd_ops.c
drivers/mmc/core/sdio_ops.c
drivers/mmc/host/Kconfig
drivers/mmc/host/Makefile
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/bfin_sdh.c
drivers/mmc/host/davinci_mmc.c
drivers/mmc/host/dw_mmc-exynos.c
drivers/mmc/host/dw_mmc-pltfm.c
drivers/mmc/host/dw_mmc-rockchip.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/dw_mmc.h
drivers/mmc/host/jz4740_mmc.c
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/omap_hsmmc.c
drivers/mmc/host/pxamci.c
drivers/mmc/host/s3cmci.c
drivers/mmc/host/sdhci-acpi.c
drivers/mmc/host/sdhci-iproc.c
drivers/mmc/host/sdhci-of-arasan.c
drivers/mmc/host/sdhci-of-at91.c
drivers/mmc/host/sdhci-pic32.c [new file with mode: 0644]
drivers/mmc/host/sdricoh_cs.c
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/sh_mobile_sdhi.c
drivers/mmc/host/sunxi-mmc.c
drivers/mmc/host/tmio_mmc_dma.c
drivers/mmc/host/tmio_mmc_pio.c
drivers/mmc/host/usdhi6rol0.c
drivers/mtd/bcm47xxpart.c
drivers/mtd/bcm63xxpart.c
drivers/mtd/nand/Kconfig
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
drivers/mtd/nand/jz4740_nand.c
drivers/mtd/nand/lpc32xx_mlc.c
drivers/mtd/nand/mpc5121_nfc.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/nand_ids.c
drivers/mtd/nand/nuc900_nand.c
drivers/mtd/nand/plat_nand.c
drivers/mtd/nand/pxa3xx_nand.c
drivers/mtd/nand/s3c2410.c
drivers/mtd/nand/sunxi_nand.c
drivers/mtd/nand/vf610_nfc.c
drivers/mtd/onenand/onenand_bbt.c
drivers/mtd/spi-nor/Kconfig
drivers/mtd/spi-nor/mtk-quadspi.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_options.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/aurora/nb8800.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/vnic_dev.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ezchip/Kconfig
drivers/net/ethernet/freescale/Makefile
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
drivers/net/ethernet/hisilicon/hns/hnae.c
drivers/net/ethernet/hisilicon/hns/hnae.h
drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
drivers/net/ethernet/hisilicon/hns/hns_enet.c
drivers/net/ethernet/hisilicon/hns/hns_enet.h
drivers/net/ethernet/hp/hp100.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_dcb.c
drivers/net/ethernet/intel/i40e/i40e_devids.h
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40evf.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlxsw/reg.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum.h
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/moxa/moxart_ether.c
drivers/net/ethernet/moxa/moxart_ether.h
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/nuvoton/w90p910_ether.c
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/renesas/ravb_ptp.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/samsung/sxgbe/Makefile
drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c [deleted file]
drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h [deleted file]
drivers/net/ethernet/sun/sungem.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/synopsys/dwc_eth_qos.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/ethernet/toshiba/spider_net.c
drivers/net/fddi/defxx.c
drivers/net/geneve.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/irda/bfin_sir.h
drivers/net/irda/irtty-sir.c
drivers/net/macvlan.c
drivers/net/phy/Kconfig
drivers/net/phy/dp83640.c
drivers/net/phy/phy.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_mppe.c
drivers/net/ppp/pptp.c
drivers/net/team/team.c
drivers/net/usb/lan78xx.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath10k/mac.c
drivers/net/wireless/ath/ath9k/eeprom.c
drivers/net/wireless/ath/ath9k/htc_drv_main.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/carl9170/main.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wil6210/cfg80211.c
drivers/net/wireless/broadcom/b43/main.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/intel/iwlegacy/4965-mac.c
drivers/net/wireless/intel/iwlegacy/4965.h
drivers/net/wireless/intel/iwlwifi/Kconfig
drivers/net/wireless/intel/iwlwifi/dvm/led.c
drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/iwl-7000.c
drivers/net/wireless/intel/iwlwifi/iwl-9000.c
drivers/net/wireless/intel/iwlwifi/iwl-config.h
drivers/net/wireless/intel/iwlwifi/iwl-fh.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
drivers/net/wireless/intel/iwlwifi/iwl-trans.h
drivers/net/wireless/intel/iwlwifi/mvm/constants.h
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
drivers/net/wireless/intel/iwlwifi/mvm/ops.c
drivers/net/wireless/intel/iwlwifi/mvm/power.c
drivers/net/wireless/intel/iwlwifi/mvm/quota.c
drivers/net/wireless/intel/iwlwifi/mvm/rs.c
drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.h
drivers/net/wireless/intel/iwlwifi/mvm/tt.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/mvm/utils.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/internal.h
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intel/iwlwifi/pcie/tx.c
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/intersil/orinoco/mic.c
drivers/net/wireless/intersil/orinoco/mic.h
drivers/net/wireless/intersil/orinoco/orinoco.h
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/marvell/libertas/cfg.c
drivers/net/wireless/marvell/libertas/cmd.c
drivers/net/wireless/marvell/libertas/cmdresp.c
drivers/net/wireless/marvell/libertas/dev.h
drivers/net/wireless/marvell/libertas/if_sdio.c
drivers/net/wireless/marvell/libertas/if_usb.c
drivers/net/wireless/marvell/libertas/main.c
drivers/net/wireless/marvell/mwifiex/README
drivers/net/wireless/marvell/mwifiex/cfg80211.c
drivers/net/wireless/marvell/mwifiex/cmdevt.c
drivers/net/wireless/marvell/mwifiex/debugfs.c
drivers/net/wireless/marvell/mwifiex/decl.h
drivers/net/wireless/marvell/mwifiex/fw.h
drivers/net/wireless/marvell/mwifiex/init.c
drivers/net/wireless/marvell/mwifiex/ioctl.h
drivers/net/wireless/marvell/mwifiex/join.c
drivers/net/wireless/marvell/mwifiex/main.c
drivers/net/wireless/marvell/mwifiex/main.h
drivers/net/wireless/marvell/mwifiex/pcie.c
drivers/net/wireless/marvell/mwifiex/pcie.h
drivers/net/wireless/marvell/mwifiex/scan.c
drivers/net/wireless/marvell/mwifiex/sdio.c
drivers/net/wireless/marvell/mwifiex/sta_cmd.c
drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
drivers/net/wireless/marvell/mwifiex/sta_event.c
drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
drivers/net/wireless/marvell/mwifiex/wmm.c
drivers/net/wireless/marvell/mwl8k.c
drivers/net/wireless/mediatek/mt7601u/main.c
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
drivers/net/wireless/ralink/rt2x00/rt2800lib.h
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00config.c
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
drivers/net/wireless/ralink/rt2x00/rt61pci.c
drivers/net/wireless/ralink/rt2x00/rt61pci.h
drivers/net/wireless/ralink/rt2x00/rt73usb.c
drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/net/wireless/realtek/rtlwifi/rc.c
drivers/net/wireless/realtek/rtlwifi/regd.c
drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
drivers/net/wireless/rsi/rsi_91x_mac80211.c
drivers/net/wireless/st/cw1200/sta.c
drivers/net/wireless/st/cw1200/sta.h
drivers/net/wireless/ti/wlcore/Kconfig
drivers/net/wireless/ti/wlcore/event.c
drivers/net/wireless/ti/wlcore/main.c
drivers/net/wireless/ti/wlcore/spi.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nfc/s3fwrn5/firmware.c
drivers/nvdimm/e820.c
drivers/nvdimm/namespace_devs.c
drivers/nvdimm/pfn_devs.c
drivers/nvme/host/Kconfig
drivers/nvme/host/Makefile
drivers/nvme/host/core.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvmem/core.c
drivers/nvmem/qfprom.c
drivers/of/fdt.c
drivers/of/fdt_address.c
drivers/of/irq.c
drivers/of/of_mdio.c
drivers/of/of_pci.c
drivers/parisc/eisa_enumerator.c
drivers/pci/bus.c
drivers/pci/host/pci-imx6.c
drivers/pci/host/pci-layerscape.c
drivers/pci/host/pci-mvebu.c
drivers/pci/host/pcie-designware.c
drivers/pci/host/pcie-iproc.c
drivers/pci/host/pcie-rcar.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/pci.c
drivers/pci/pcie/aer/aer_inject.c
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/pcie/aer/aerdrv.h
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/pme.c
drivers/pci/probe.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/phy/phy-core.c
drivers/platform/x86/alienware-wmi.c
drivers/platform/x86/apple-gmux.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel_telemetry_debugfs.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/pnp/quirks.c
drivers/ptp/ptp_ixp46x.c
drivers/rapidio/rio.c
drivers/regulator/Kconfig
drivers/regulator/Makefile
drivers/regulator/act8945a-regulator.c [new file with mode: 0644]
drivers/regulator/axp20x-regulator.c
drivers/regulator/core.c
drivers/regulator/da9210-regulator.c
drivers/regulator/fan53555.c
drivers/regulator/lp872x.c
drivers/regulator/mt6397-regulator.c
drivers/regulator/pbias-regulator.c
drivers/regulator/qcom_saw-regulator.c [new file with mode: 0644]
drivers/rtc/Kconfig
drivers/rtc/Makefile
drivers/rtc/rtc-max77686.c
drivers/rtc/rtc-max77802.c [deleted file]
drivers/rtc/rtc-rx8025.c
drivers/s390/char/con3215.c
drivers/s390/cio/chp.c
drivers/s390/cio/chp.h
drivers/s390/cio/chsc.c
drivers/s390/crypto/zcrypt_error.h
drivers/s390/crypto/zcrypt_msgtype50.c
drivers/s390/crypto/zcrypt_msgtype6.c
drivers/scsi/hisi_sas/Kconfig
drivers/scsi/iscsi_tcp.c
drivers/scsi/iscsi_tcp.h
drivers/scsi/libiscsi_tcp.c
drivers/scsi/mac53c94.c
drivers/scsi/mesh.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/sr.c
drivers/scsi/ufs/ufshcd.c
drivers/sh/superhyway/superhyway.c
drivers/soc/Kconfig
drivers/soc/Makefile
drivers/soc/qcom/smd.c
drivers/soc/qcom/spm.c
drivers/soc/rockchip/pm_domains.c
drivers/soc/samsung/Kconfig [new file with mode: 0644]
drivers/soc/samsung/Makefile [new file with mode: 0644]
drivers/soc/samsung/exynos-pmu.c [new file with mode: 0644]
drivers/soc/samsung/exynos-pmu.h [new file with mode: 0644]
drivers/soc/samsung/exynos-srom.c [new file with mode: 0644]
drivers/soc/samsung/exynos-srom.h [new file with mode: 0644]
drivers/soc/samsung/exynos3250-pmu.c [new file with mode: 0644]
drivers/soc/samsung/exynos4-pmu.c [new file with mode: 0644]
drivers/soc/samsung/exynos5250-pmu.c [new file with mode: 0644]
drivers/soc/samsung/exynos5420-pmu.c [new file with mode: 0644]
drivers/soc/sunxi/sunxi_sram.c
drivers/soc/tegra/Kconfig
drivers/soc/tegra/pmc.c
drivers/spi/Kconfig
drivers/spi/Makefile
drivers/spi/spi-atmel.c
drivers/spi/spi-axi-spi-engine.c [new file with mode: 0644]
drivers/spi/spi-bcm2835aux.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-dw-mmio.c
drivers/spi/spi-fsl-espi.c
drivers/spi/spi-imx.c
drivers/spi/spi-loopback-test.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-pxa2xx.h
drivers/spi/spi-rockchip.c
drivers/spi/spi-ti-qspi.c
drivers/spi/spi.c
drivers/spmi/spmi-pmic-arb.c
drivers/ssb/main.c
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_test.c
drivers/staging/dgap/dgap.c
drivers/staging/dgnc/dgnc_tty.c
drivers/staging/iio/adc/Kconfig
drivers/staging/iio/meter/ade7753.c
drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
drivers/staging/media/davinci_vpfe/davinci_vpfe_user.h
drivers/staging/media/lirc/lirc_parallel.c
drivers/staging/mt29f_spinand/mt29f_spinand.h
drivers/staging/panel/panel.c
drivers/staging/rdma/Kconfig
drivers/staging/rdma/Makefile
drivers/staging/rdma/amso1100/Kbuild [deleted file]
drivers/staging/rdma/amso1100/Kconfig [deleted file]
drivers/staging/rdma/amso1100/TODO [deleted file]
drivers/staging/rdma/amso1100/c2.c [deleted file]
drivers/staging/rdma/amso1100/c2.h [deleted file]
drivers/staging/rdma/amso1100/c2_ae.c [deleted file]
drivers/staging/rdma/amso1100/c2_ae.h [deleted file]
drivers/staging/rdma/amso1100/c2_alloc.c [deleted file]
drivers/staging/rdma/amso1100/c2_cm.c [deleted file]
drivers/staging/rdma/amso1100/c2_cq.c [deleted file]
drivers/staging/rdma/amso1100/c2_intr.c [deleted file]
drivers/staging/rdma/amso1100/c2_mm.c [deleted file]
drivers/staging/rdma/amso1100/c2_mq.c [deleted file]
drivers/staging/rdma/amso1100/c2_mq.h [deleted file]
drivers/staging/rdma/amso1100/c2_pd.c [deleted file]
drivers/staging/rdma/amso1100/c2_provider.c [deleted file]
drivers/staging/rdma/amso1100/c2_provider.h [deleted file]
drivers/staging/rdma/amso1100/c2_qp.c [deleted file]
drivers/staging/rdma/amso1100/c2_rnic.c [deleted file]
drivers/staging/rdma/amso1100/c2_status.h [deleted file]
drivers/staging/rdma/amso1100/c2_user.h [deleted file]
drivers/staging/rdma/amso1100/c2_vq.c [deleted file]
drivers/staging/rdma/amso1100/c2_vq.h [deleted file]
drivers/staging/rdma/amso1100/c2_wr.h [deleted file]
drivers/staging/rdma/ehca/Kconfig [deleted file]
drivers/staging/rdma/ehca/Makefile [deleted file]
drivers/staging/rdma/ehca/TODO [deleted file]
drivers/staging/rdma/ehca/ehca_av.c [deleted file]
drivers/staging/rdma/ehca/ehca_classes.h [deleted file]
drivers/staging/rdma/ehca/ehca_classes_pSeries.h [deleted file]
drivers/staging/rdma/ehca/ehca_cq.c [deleted file]
drivers/staging/rdma/ehca/ehca_eq.c [deleted file]
drivers/staging/rdma/ehca/ehca_hca.c [deleted file]
drivers/staging/rdma/ehca/ehca_irq.c [deleted file]
drivers/staging/rdma/ehca/ehca_irq.h [deleted file]
drivers/staging/rdma/ehca/ehca_iverbs.h [deleted file]
drivers/staging/rdma/ehca/ehca_main.c [deleted file]
drivers/staging/rdma/ehca/ehca_mcast.c [deleted file]
drivers/staging/rdma/ehca/ehca_mrmw.c [deleted file]
drivers/staging/rdma/ehca/ehca_mrmw.h [deleted file]
drivers/staging/rdma/ehca/ehca_pd.c [deleted file]
drivers/staging/rdma/ehca/ehca_qes.h [deleted file]
drivers/staging/rdma/ehca/ehca_qp.c [deleted file]
drivers/staging/rdma/ehca/ehca_reqs.c [deleted file]
drivers/staging/rdma/ehca/ehca_sqp.c [deleted file]
drivers/staging/rdma/ehca/ehca_tools.h [deleted file]
drivers/staging/rdma/ehca/ehca_uverbs.c [deleted file]
drivers/staging/rdma/ehca/hcp_if.c [deleted file]
drivers/staging/rdma/ehca/hcp_if.h [deleted file]
drivers/staging/rdma/ehca/hcp_phyp.c [deleted file]
drivers/staging/rdma/ehca/hcp_phyp.h [deleted file]
drivers/staging/rdma/ehca/hipz_fns.h [deleted file]
drivers/staging/rdma/ehca/hipz_fns_core.h [deleted file]
drivers/staging/rdma/ehca/hipz_hw.h [deleted file]
drivers/staging/rdma/ehca/ipz_pt_fn.c [deleted file]
drivers/staging/rdma/ehca/ipz_pt_fn.h [deleted file]
drivers/staging/rdma/ipath/Kconfig [deleted file]
drivers/staging/rdma/ipath/Makefile [deleted file]
drivers/staging/rdma/ipath/TODO [deleted file]
drivers/staging/rdma/ipath/ipath_common.h [deleted file]
drivers/staging/rdma/ipath/ipath_cq.c [deleted file]
drivers/staging/rdma/ipath/ipath_debug.h [deleted file]
drivers/staging/rdma/ipath/ipath_diag.c [deleted file]
drivers/staging/rdma/ipath/ipath_dma.c [deleted file]
drivers/staging/rdma/ipath/ipath_driver.c [deleted file]
drivers/staging/rdma/ipath/ipath_eeprom.c [deleted file]
drivers/staging/rdma/ipath/ipath_file_ops.c [deleted file]
drivers/staging/rdma/ipath/ipath_fs.c [deleted file]
drivers/staging/rdma/ipath/ipath_iba6110.c [deleted file]
drivers/staging/rdma/ipath/ipath_init_chip.c [deleted file]
drivers/staging/rdma/ipath/ipath_intr.c [deleted file]
drivers/staging/rdma/ipath/ipath_kernel.h [deleted file]
drivers/staging/rdma/ipath/ipath_keys.c [deleted file]
drivers/staging/rdma/ipath/ipath_mad.c [deleted file]
drivers/staging/rdma/ipath/ipath_mmap.c [deleted file]
drivers/staging/rdma/ipath/ipath_mr.c [deleted file]
drivers/staging/rdma/ipath/ipath_qp.c [deleted file]
drivers/staging/rdma/ipath/ipath_rc.c [deleted file]
drivers/staging/rdma/ipath/ipath_registers.h [deleted file]
drivers/staging/rdma/ipath/ipath_ruc.c [deleted file]
drivers/staging/rdma/ipath/ipath_sdma.c [deleted file]
drivers/staging/rdma/ipath/ipath_srq.c [deleted file]
drivers/staging/rdma/ipath/ipath_stats.c [deleted file]
drivers/staging/rdma/ipath/ipath_sysfs.c [deleted file]
drivers/staging/rdma/ipath/ipath_uc.c [deleted file]
drivers/staging/rdma/ipath/ipath_ud.c [deleted file]
drivers/staging/rdma/ipath/ipath_user_pages.c [deleted file]
drivers/staging/rdma/ipath/ipath_user_sdma.c [deleted file]
drivers/staging/rdma/ipath/ipath_user_sdma.h [deleted file]
drivers/staging/rdma/ipath/ipath_verbs.c [deleted file]
drivers/staging/rdma/ipath/ipath_verbs.h [deleted file]
drivers/staging/rdma/ipath/ipath_verbs_mcast.c [deleted file]
drivers/staging/rdma/ipath/ipath_wc_ppc64.c [deleted file]
drivers/staging/rdma/ipath/ipath_wc_x86_64.c [deleted file]
drivers/staging/rtl8192e/rtllib_crypt_tkip.c
drivers/staging/rtl8192e/rtllib_crypt_wep.c
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_wep.c
drivers/staging/speakup/Kconfig
drivers/staging/speakup/main.c
drivers/staging/speakup/selection.c
drivers/staging/speakup/serialio.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_login.c
drivers/thermal/Kconfig
drivers/thermal/int340x_thermal/processor_thermal_device.c
drivers/thermal/intel_pch_thermal.c
drivers/thermal/of-thermal.c
drivers/thermal/rcar_thermal.c
drivers/thermal/rockchip_thermal.c
drivers/thermal/spear_thermal.c
drivers/thermal/step_wise.c
drivers/thermal/thermal_core.c
drivers/thermal/thermal_core.h
drivers/thunderbolt/ctl.c
drivers/thunderbolt/nhi.c
drivers/thunderbolt/path.c
drivers/thunderbolt/tb.h
drivers/tty/Kconfig
drivers/tty/amiserial.c
drivers/tty/cyclades.c
drivers/tty/ehv_bytechan.c
drivers/tty/goldfish.c
drivers/tty/hvc/hvc_vio.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/isicom.c
drivers/tty/mxser.c
drivers/tty/n_gsm.c
drivers/tty/n_hdlc.c
drivers/tty/n_tty.c
drivers/tty/nozomi.c
drivers/tty/pty.c
drivers/tty/rocket.c
drivers/tty/rocket_int.h
drivers/tty/serial/68328serial.c [deleted file]
drivers/tty/serial/8250/8250.h
drivers/tty/serial/8250/8250_accent.c
drivers/tty/serial/8250/8250_acorn.c
drivers/tty/serial/8250/8250_bcm2835aux.c [new file with mode: 0644]
drivers/tty/serial/8250/8250_boca.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_dw.c
drivers/tty/serial/8250/8250_early.c
drivers/tty/serial/8250/8250_exar_st16c554.c
drivers/tty/serial/8250/8250_fourport.c
drivers/tty/serial/8250/8250_gsc.c
drivers/tty/serial/8250/8250_hp300.c
drivers/tty/serial/8250/8250_hub6.c
drivers/tty/serial/8250/8250_ingenic.c
drivers/tty/serial/8250/8250_of.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/8250/8250_pci.c
drivers/tty/serial/8250/8250_pnp.c
drivers/tty/serial/8250/8250_port.c
drivers/tty/serial/8250/Kconfig
drivers/tty/serial/8250/Makefile
drivers/tty/serial/8250/serial_cs.c
drivers/tty/serial/Kconfig
drivers/tty/serial/Makefile
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/arc_uart.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/crisv10.c
drivers/tty/serial/earlycon.c
drivers/tty/serial/imx.c
drivers/tty/serial/jsm/jsm_tty.c
drivers/tty/serial/m32r_sio.c
drivers/tty/serial/m32r_sio.h [deleted file]
drivers/tty/serial/mpc52xx_uart.c
drivers/tty/serial/mpsc.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/omap-serial.c
drivers/tty/serial/samsung.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/serial_ks8695.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/sh-sci.h
drivers/tty/serial/sprd_serial.c
drivers/tty/serial/uartlite.c
drivers/tty/serial/xilinx_uartps.c
drivers/tty/serial/zs.c
drivers/tty/synclink.c
drivers/tty/synclink_gt.c
drivers/tty/synclinkmp.c
drivers/tty/tty_audit.c
drivers/tty/tty_buffer.c
drivers/tty/tty_io.c
drivers/tty/tty_ioctl.c
drivers/tty/tty_ldisc.c
drivers/tty/tty_mutex.c
drivers/tty/tty_port.c
drivers/tty/vt/keyboard.c
drivers/tty/vt/selection.c
drivers/tty/vt/vt.c
drivers/usb/atm/cxacru.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/class/usbtmc.c
drivers/usb/common/common.c
drivers/usb/core/config.c
drivers/usb/core/devices.c
drivers/usb/core/devio.c
drivers/usb/core/file.c
drivers/usb/core/hcd-pci.c
drivers/usb/core/hcd.c
drivers/usb/core/hub.c
drivers/usb/core/hub.h
drivers/usb/core/sysfs.c
drivers/usb/core/urb.c
drivers/usb/core/usb.c
drivers/usb/core/usb.h
drivers/usb/dwc2/core.c
drivers/usb/dwc2/platform.c
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/rndis.c
drivers/usb/gadget/legacy/Kconfig
drivers/usb/host/bcma-hcd.c
drivers/usb/host/ehci-dbg.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-msm.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-platform.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ehci-sched.c
drivers/usb/host/ehci-st.c
drivers/usb/host/ehci-timer.c
drivers/usb/host/ehci.h
drivers/usb/host/fotg210-hcd.c
drivers/usb/host/fsl-mph-dr-of.c
drivers/usb/host/max3421-hcd.c
drivers/usb/host/ohci-nxp.c
drivers/usb/host/ohci-platform.c
drivers/usb/host/ohci-st.c
drivers/usb/host/oxu210hp-hcd.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/host/u132-hcd.c
drivers/usb/host/uhci-q.c
drivers/usb/host/xhci-ext-caps.h
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-mtk-sch.c
drivers/usb/host/xhci-mtk.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/sisusbvga/sisusb.c
drivers/usb/mon/mon_main.c
drivers/usb/musb/ux500.c
drivers/usb/phy/phy-msm-usb.c
drivers/usb/phy/phy-mxs-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/fifo.h
drivers/usb/renesas_usbhs/pipe.h
drivers/usb/serial/console.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/mct_u232.c
drivers/usb/serial/mos7720.c
drivers/usb/serial/mos7840.c
drivers/usb/serial/mxu11x0.c
drivers/usb/serial/option.c
drivers/usb/serial/visor.c
drivers/usb/storage/ene_ub6250.c
drivers/usb/storage/uas.c
drivers/usb/usbip/usbip_event.c
drivers/usb/usbip/vhci_hcd.c
drivers/usb/usbip/vhci_rx.c
drivers/usb/usbip/vhci_sysfs.c
drivers/usb/usbip/vhci_tx.c
drivers/usb/wusbcore/crypto.c
drivers/usb/wusbcore/wusbhc.h
drivers/vfio/vfio.c
drivers/video/fbdev/aty/aty128fb.c
drivers/video/fbdev/aty/radeon_base.c
drivers/video/fbdev/imsttfb.c
drivers/video/fbdev/matrox/matroxfb_base.h
drivers/video/fbdev/offb.c
drivers/virtio/virtio_pci_common.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/max63xx_wdt.c
drivers/watchdog/pcwd_usb.c
drivers/watchdog/sp805_wdt.c
drivers/watchdog/sun4v_wdt.c [new file with mode: 0644]
drivers/xen/balloon.c
drivers/xen/tmem.c
drivers/zorro/zorro-sysfs.c
fs/Kconfig
fs/Makefile
fs/binfmt_elf.c
fs/block_dev.c
fs/btrfs/async-thread.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/file-item.c
fs/btrfs/file.c
fs/btrfs/free-space-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/reada.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/sysfs.c
fs/btrfs/sysfs.h
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tree-log.c
fs/ceph/addr.c
fs/ceph/cache.c
fs/ceph/file.c
fs/ceph/inode.c
fs/cifs/cifs_debug.c
fs/cifs/cifs_debug.h
fs/cifs/cifsencrypt.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/inode.c
fs/cifs/misc.c
fs/cifs/readdir.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2pdu.h
fs/cifs/smb2proto.h
fs/cifs/smb2transport.c
fs/cifs/smbencrypt.c
fs/cifs/transport.c
fs/compat_ioctl.c
fs/dax.c
fs/devpts/inode.c
fs/direct-io.c
fs/ecryptfs/crypto.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/ecryptfs/mmap.c
fs/ecryptfs/super.c
fs/eventpoll.c
fs/ext4/crypto.c
fs/ext4/crypto_fname.c
fs/ext4/crypto_key.c
fs/ext4/dir.c
fs/ext4/ext4.h
fs/ext4/ext4_crypto.h
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/super.c
fs/f2fs/checkpoint.c
fs/f2fs/crypto.c
fs/f2fs/crypto_fname.c
fs/f2fs/crypto_key.c
fs/f2fs/data.c
fs/f2fs/dir.c
fs/f2fs/extent_cache.c
fs/f2fs/f2fs.h
fs/f2fs/f2fs_crypto.h
fs/f2fs/file.c
fs/f2fs/gc.c
fs/f2fs/inline.c
fs/f2fs/inode.c
fs/f2fs/node.c
fs/f2fs/node.h
fs/f2fs/recovery.c
fs/f2fs/segment.c
fs/f2fs/segment.h
fs/f2fs/super.c
fs/f2fs/xattr.c
fs/gfs2/aops.c
fs/gfs2/dir.c
fs/gfs2/glock.c
fs/gfs2/incore.h
fs/gfs2/inode.c
fs/gfs2/super.c
fs/kernfs/dir.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/flexfilelayout/flexfilelayoutdev.c
fs/nfs/nfs4proc.c
fs/nfs/pnfs.c
fs/nfs/pnfs.h
fs/nfsd/nfs4recover.c
fs/ocfs2/aops.c
fs/ocfs2/cluster/heartbeat.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/ocfs2_trace.h
fs/ocfs2/quota_global.c
fs/orangefs/Kconfig [new file with mode: 0644]
fs/orangefs/Makefile [new file with mode: 0644]
fs/orangefs/acl.c [new file with mode: 0644]
fs/orangefs/dcache.c [new file with mode: 0644]
fs/orangefs/devorangefs-req.c [new file with mode: 0644]
fs/orangefs/dir.c [new file with mode: 0644]
fs/orangefs/downcall.h [new file with mode: 0644]
fs/orangefs/file.c [new file with mode: 0644]
fs/orangefs/inode.c [new file with mode: 0644]
fs/orangefs/namei.c [new file with mode: 0644]
fs/orangefs/orangefs-bufmap.c [new file with mode: 0644]
fs/orangefs/orangefs-bufmap.h [new file with mode: 0644]
fs/orangefs/orangefs-cache.c [new file with mode: 0644]
fs/orangefs/orangefs-debug.h [new file with mode: 0644]
fs/orangefs/orangefs-debugfs.c [new file with mode: 0644]
fs/orangefs/orangefs-debugfs.h [new file with mode: 0644]
fs/orangefs/orangefs-dev-proto.h [new file with mode: 0644]
fs/orangefs/orangefs-kernel.h [new file with mode: 0644]
fs/orangefs/orangefs-mod.c [new file with mode: 0644]
fs/orangefs/orangefs-sysfs.c [new file with mode: 0644]
fs/orangefs/orangefs-sysfs.h [new file with mode: 0644]
fs/orangefs/orangefs-utils.c [new file with mode: 0644]
fs/orangefs/protocol.h [new file with mode: 0644]
fs/orangefs/super.c [new file with mode: 0644]
fs/orangefs/symlink.c [new file with mode: 0644]
fs/orangefs/upcall.h [new file with mode: 0644]
fs/orangefs/waitqueue.c [new file with mode: 0644]
fs/orangefs/xattr.c [new file with mode: 0644]
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/quota/dquot.c
fs/quota/quota.c
fs/quota/quota_tree.c
fs/quota/quota_v2.c
fs/reiserfs/super.c
fs/timerfd.c
fs/udf/dir.c
fs/udf/namei.c
fs/udf/super.c
fs/udf/udfdecl.h
fs/udf/unicode.c
fs/xfs/libxfs/xfs_alloc_btree.c
fs/xfs/libxfs/xfs_attr_sf.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap_btree.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/libxfs/xfs_da_format.h
fs/xfs/libxfs/xfs_dir2.c
fs/xfs/libxfs/xfs_ialloc_btree.c
fs/xfs/libxfs/xfs_inode_buf.c
fs/xfs/libxfs/xfs_inode_buf.h
fs/xfs/libxfs/xfs_inode_fork.c
fs/xfs/libxfs/xfs_log_format.h
fs/xfs/libxfs/xfs_quota_defs.h
fs/xfs/libxfs/xfs_rtbitmap.c
fs/xfs/libxfs/xfs_sb.h
fs/xfs/libxfs/xfs_shared.h
fs/xfs/xfs_aops.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_buf.h
fs/xfs/xfs_buf_item.c
fs/xfs/xfs_dir2_readdir.c
fs/xfs/xfs_dquot.c
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_filestream.c
fs/xfs/xfs_fsops.h
fs/xfs/xfs_icache.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_qm.c
fs/xfs/xfs_qm.h
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_quotaops.c
fs/xfs/xfs_rtalloc.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_trans_ail.c
fs/xfs/xfs_trans_buf.c
fs/xfs/xfs_trans_dquot.c
fs/xfs/xfs_trans_inode.c
include/acpi/cppc_acpi.h
include/asm-generic/cputime_nsecs.h
include/asm-generic/fixmap.h
include/asm-generic/pci-bridge.h [deleted file]
include/asm-generic/pgtable.h
include/asm-generic/vmlinux.lds.h
include/crypto/algapi.h
include/crypto/compress.h [deleted file]
include/crypto/drbg.h
include/crypto/hash.h
include/crypto/internal/aead.h
include/crypto/internal/compress.h [deleted file]
include/crypto/internal/hash.h
include/crypto/skcipher.h
include/drm/drmP.h
include/drm/drm_atomic_helper.h
include/drm/drm_cache.h
include/drm/drm_crtc.h
include/drm/drm_dp_mst_helper.h
include/drm/drm_fixed.h
include/drm/i915_pciids.h
include/dt-bindings/clock/r8a7793-clock.h
include/dt-bindings/clock/rk3188-cru-common.h
include/dt-bindings/clock/tegra210-car.h
include/dt-bindings/power/rk3368-power.h [new file with mode: 0644]
include/linux/apple-gmux.h [new file with mode: 0644]
include/linux/audit.h
include/linux/bcm963xx_nvram.h [new file with mode: 0644]
include/linux/bcm963xx_tag.h [moved from arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h with 87% similarity]
include/linux/bcma/bcma.h
include/linux/bcma/bcma_driver_chipcommon.h
include/linux/blk-mq.h
include/linux/bpf.h
include/linux/ceph/ceph_features.h
include/linux/ceph/ceph_frag.h
include/linux/ceph/messenger.h
include/linux/cgroup-defs.h
include/linux/cleancache.h
include/linux/clockchips.h
include/linux/compiler-clang.h
include/linux/compiler.h
include/linux/cpufreq.h
include/linux/cpuset.h
include/linux/crush/crush.h
include/linux/crypto.h
include/linux/dax.h
include/linux/debugfs.h
include/linux/device-mapper.h
include/linux/device.h
include/linux/devpts_fs.h
include/linux/dma-buf.h
include/linux/dmaengine.h
include/linux/dqblk_qtree.h
include/linux/efi.h
include/linux/f2fs_fs.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/gfp.h
include/linux/hrtimer.h
include/linux/ieee80211.h
include/linux/if_team.h
include/linux/input/cyttsp.h
include/linux/iommu.h
include/linux/ioport.h
include/linux/irq.h
include/linux/irqdomain.h
include/linux/isdn.h
include/linux/latencytop.h
include/linux/leds.h
include/linux/libata.h
include/linux/lockdep.h
include/linux/memcontrol.h
include/linux/mfd/core.h
include/linux/mfd/tmio.h
include/linux/mfd/tps65086.h [new file with mode: 0644]
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmc/core.h
include/linux/mmc/dw_mmc.h
include/linux/mmc/tmio.h
include/linux/mmzone.h
include/linux/module.h
include/linux/mtd/bbm.h
include/linux/mtd/inftl.h
include/linux/mtd/nand.h
include/linux/mtd/nftl.h
include/linux/netdevice.h
include/linux/nfs_fs.h
include/linux/of.h
include/linux/of_fdt.h
include/linux/pci.h
include/linux/perf_event.h
include/linux/pfn_t.h
include/linux/platform_data/mtd-nand-s3c2410.h
include/linux/platform_data/sdhci-pic32.h [new file with mode: 0644]
include/linux/platform_data/serial-omap.h
include/linux/pm_opp.h
include/linux/psci.h
include/linux/pxa2xx_ssp.h
include/linux/qcom_scm.h
include/linux/quota.h
include/linux/quotaops.h
include/linux/radix-tree.h
include/linux/raid/pq.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/regmap.h
include/linux/regulator/lp872x.h
include/linux/rfkill.h
include/linux/rmap.h
include/linux/sched.h
include/linux/sched/sysctl.h
include/linux/serial_8250.h
include/linux/serial_core.h
include/linux/skbuff.h
include/linux/soc/qcom/smd.h
include/linux/soc/qcom/smem_state.h
include/linux/soc/samsung/exynos-pmu.h [moved from arch/arm/mach-exynos/exynos-pmu.h with 81% similarity]
include/linux/soc/samsung/exynos-regs-pmu.h [moved from arch/arm/mach-exynos/regs-pmu.h with 99% similarity]
include/linux/spi/spi.h
include/linux/srcu.h
include/linux/sunrpc/gss_krb5.h
include/linux/swiotlb.h
include/linux/tcp.h
include/linux/thermal.h
include/linux/tty.h
include/linux/tty_ldisc.h
include/linux/usb.h
include/linux/usb/hcd.h
include/linux/usb/msm_hsusb_hw.h
include/linux/usb/storage.h
include/linux/vga_switcheroo.h
include/linux/workqueue.h
include/media/tuner.h
include/media/v4l2-mc.h [new file with mode: 0644]
include/media/videobuf2-core.h
include/net/af_unix.h
include/net/bluetooth/hci_core.h
include/net/bluetooth/l2cap.h
include/net/bond_3ad.h
include/net/cfg80211.h
include/net/dst_metadata.h
include/net/ip6_route.h
include/net/ip_tunnels.h
include/net/iw_handler.h
include/net/mac80211.h
include/net/netfilter/nf_conntrack_core.h
include/net/netns/ipv4.h
include/net/scm.h
include/net/sctp/auth.h
include/net/sctp/structs.h
include/net/sock.h
include/net/sock_reuseport.h
include/net/tcp.h
include/net/vxlan.h
include/scsi/libiscsi_tcp.h
include/sound/pcm.h
include/sound/rawmidi.h
include/target/iscsi/iscsi_target_core.h
include/trace/events/fence.h
include/trace/events/power.h
include/trace/events/rcu.h
include/trace/events/sunvnet.h [new file with mode: 0644]
include/uapi/drm/drm.h
include/uapi/drm/etnaviv_drm.h
include/uapi/drm/i915_drm.h
include/uapi/drm/msm_drm.h
include/uapi/linux/audit.h
include/uapi/linux/bpf.h
include/uapi/linux/dqblk_xfs.h
include/uapi/linux/ethtool.h
include/uapi/linux/fs.h
include/uapi/linux/if_bridge.h
include/uapi/linux/if_link.h
include/uapi/linux/kvm.h
include/uapi/linux/media.h
include/uapi/linux/nl80211.h
include/uapi/linux/quota.h
include/uapi/linux/rfkill.h
include/uapi/linux/usb/ch11.h
include/uapi/linux/usb/ch9.h
include/uapi/linux/usb/tmc.h
include/uapi/linux/v4l2-common.h
init/main.c
kernel/audit.c
kernel/audit_watch.c
kernel/auditfilter.c
kernel/bpf/arraymap.c
kernel/bpf/hashtab.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/cpuset.c
kernel/debug/kdb/kdb_io.c
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/events/ring_buffer.c
kernel/futex.c
kernel/irq/handle.c
kernel/irq/internals.h
kernel/irq/irqdesc.c
kernel/irq/irqdomain.c
kernel/kexec_core.c
kernel/kexec_file.c
kernel/latencytop.c
kernel/locking/lockdep.c
kernel/locking/mutex.c
kernel/locking/rtmutex.c
kernel/memremap.c
kernel/module.c
kernel/pid.c
kernel/power/Kconfig
kernel/profile.c
kernel/rcu/Makefile
kernel/rcu/rcuperf.c [new file with mode: 0644]
kernel/rcu/rcutorture.c
kernel/rcu/tiny_plugin.h
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c
kernel/rcu/update.c
kernel/resource.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/sched.h
kernel/sched/stats.h
kernel/seccomp.c
kernel/signal.c
kernel/sysctl.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/itimer.c
kernel/time/ntp.c
kernel/time/posix-timers.c
kernel/time/tick-sched.c
kernel/time/timekeeping.c
kernel/time/timer_list.c
kernel/trace/bpf_trace.c
kernel/trace/power-traces.c
kernel/trace/trace.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_stack.c
kernel/workqueue.c
lib/842/842_decompress.c
lib/Kconfig.debug
lib/atomic64_test.c
lib/debugobjects.c
lib/dump_stack.c
lib/flex_proportions.c
lib/klist.c
lib/kobject.c
lib/percpu-refcount.c
lib/radix-tree.c
lib/scatterlist.c
lib/test-string_helpers.c
lib/test_static_keys.c
mm/Kconfig
mm/backing-dev.c
mm/balloon_compaction.c
mm/cleancache.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/internal.h
mm/memblock.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/mmap.c
mm/page_alloc.c
mm/swapfile.c
mm/util.c
mm/vmpressure.c
mm/vmscan.c
mm/vmstat.c
net/9p/trans_fd.c
net/9p/trans_virtio.c
net/batman-adv/Kconfig
net/batman-adv/Makefile
net/batman-adv/bat_algo.h
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bitarray.c
net/batman-adv/bitarray.h
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/bridge_loop_avoidance.h
net/batman-adv/debugfs.c
net/batman-adv/debugfs.h
net/batman-adv/distributed-arp-table.c
net/batman-adv/distributed-arp-table.h
net/batman-adv/fragmentation.c
net/batman-adv/fragmentation.h
net/batman-adv/gateway_client.c
net/batman-adv/gateway_client.h
net/batman-adv/gateway_common.c
net/batman-adv/gateway_common.h
net/batman-adv/hard-interface.c
net/batman-adv/hard-interface.h
net/batman-adv/hash.c
net/batman-adv/hash.h
net/batman-adv/icmp_socket.c
net/batman-adv/icmp_socket.h
net/batman-adv/main.c
net/batman-adv/main.h
net/batman-adv/multicast.c
net/batman-adv/multicast.h
net/batman-adv/network-coding.c
net/batman-adv/network-coding.h
net/batman-adv/originator.c
net/batman-adv/originator.h
net/batman-adv/packet.h
net/batman-adv/routing.c
net/batman-adv/routing.h
net/batman-adv/send.c
net/batman-adv/send.h
net/batman-adv/soft-interface.c
net/batman-adv/soft-interface.h
net/batman-adv/sysfs.c
net/batman-adv/sysfs.h
net/batman-adv/translation-table.c
net/batman-adv/translation-table.h
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/Kconfig
net/bluetooth/Makefile
net/bluetooth/hci_core.c
net/bluetooth/hci_request.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/leds.c [new file with mode: 0644]
net/bluetooth/leds.h [new file with mode: 0644]
net/bluetooth/smp.c
net/bridge/br.c
net/bridge/br_mdb.c
net/bridge/br_multicast.c
net/bridge/br_private.h
net/ceph/auth_x.c
net/ceph/auth_x.h
net/ceph/crush/mapper.c
net/ceph/crypto.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/ceph/osdmap.c
net/core/dev.c
net/core/ethtool.c
net/core/flow_dissector.c
net/core/net-sysfs.c
net/core/rtnetlink.c
net/core/scm.c
net/core/skbuff.c
net/core/sock_reuseport.c
net/core/sysctl_net_core.c
net/ipv4/Kconfig
net/ipv4/fib_trie.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/ip_fragment.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_tunnel.c
net/ipv4/ipconfig.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/syncookies.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv6/Kconfig
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ip6_output.c
net/ipv6/route.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/irda/ircomm/ircomm_param.c
net/irda/ircomm/ircomm_tty.c
net/irda/ircomm/ircomm_tty_ioctl.c
net/iucv/af_iucv.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/chan.c
net/mac80211/debugfs.c
net/mac80211/driver-ops.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/main.c
net/mac80211/mesh.c
net/mac80211/mesh.h
net/mac80211/mesh_pathtbl.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/offchannel.c
net/mac80211/rc80211_minstrel.c
net/mac80211/rc80211_minstrel_ht.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/sta_info.c
net/mac80211/sta_info.h
net/mac80211/status.c
net/mac80211/trace.h
net/mac80211/tx.c
net/mac80211/util.c
net/mac80211/vht.c
net/mac802154/llsec.c
net/mac802154/llsec.h
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_pe_sip.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_tables_netdev.c
net/netfilter/nfnetlink_cttimeout.c
net/netfilter/nft_byteorder.c
net/netfilter/nft_ct.c
net/netfilter/xt_TCPMSS.c
net/netlink/af_netlink.c
net/nfc/nci/uart.c
net/openvswitch/vport-geneve.c
net/openvswitch/vport-vxlan.c
net/packet/af_packet.c
net/rfkill/core.c
net/rxrpc/ar-internal.h
net/rxrpc/ar-key.c
net/rxrpc/rxkad.c
net/sched/sch_drr.c
net/sctp/auth.c
net/sctp/endpointola.c
net/sctp/input.c
net/sctp/proc.c
net/sctp/sm_make_chunk.c
net/sctp/sm_sideeffect.c
net/sctp/socket.c
net/sctp/transport.c
net/sunrpc/auth_gss/gss_krb5_crypto.c
net/sunrpc/auth_gss/gss_krb5_keys.c
net/sunrpc/auth_gss/gss_krb5_mech.c
net/sunrpc/auth_gss/gss_krb5_seqnum.c
net/sunrpc/auth_gss/gss_krb5_wrap.c
net/switchdev/switchdev.c
net/tipc/link.c
net/tipc/link.h
net/tipc/name_table.c
net/tipc/node.c
net/tipc/server.c
net/tipc/subscr.c
net/tipc/subscr.h
net/unix/af_unix.c
net/unix/garbage.c
net/wireless/Kconfig
net/wireless/core.c
net/wireless/lib80211_crypt_tkip.c
net/wireless/lib80211_crypt_wep.c
net/wireless/mlme.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/sme.c
net/wireless/util.c
net/wireless/wext-core.c
net/xfrm/xfrm_algo.c
samples/bpf/test_maps.c
samples/bpf/tracex2_kern.c
samples/bpf/tracex2_user.c
samples/bpf/tracex3_kern.c
samples/bpf/tracex3_user.c
scripts/Makefile.dtbinst
scripts/checkpatch.pl
scripts/dtc/dtx_diff [new file with mode: 0755]
scripts/kconfig/Makefile
scripts/kconfig/confdata.c
scripts/link-vmlinux.sh
scripts/mod/modpost.c
scripts/prune-kernel [new file with mode: 0755]
security/keys/encrypted-keys/encrypted.c
security/keys/key.c
security/selinux/Makefile
security/selinux/hooks.c
security/selinux/nlmsgtab.c
sound/core/Kconfig
sound/core/compress_offload.c
sound/core/oss/pcm_oss.c
sound/core/pcm_misc.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_init.c
sound/core/seq/oss/seq_oss_synth.c
sound/core/seq/seq_clientmgr.c
sound/core/seq/seq_ports.c
sound/core/seq/seq_timer.c
sound/core/seq/seq_virmidi.c
sound/core/timer.c
sound/drivers/dummy.c
sound/firewire/bebob/bebob_stream.c
sound/firewire/dice/dice-midi.c
sound/firewire/dice/dice-pcm.c
sound/firewire/dice/dice-stream.c
sound/firewire/dice/dice-transaction.c
sound/firewire/dice/dice.c
sound/firewire/dice/dice.h
sound/firewire/digi00x/amdtp-dot.c
sound/firewire/tascam/tascam-transaction.c
sound/firewire/tascam/tascam.c
sound/firewire/tascam/tascam.h
sound/isa/Kconfig
sound/mips/Kconfig
sound/mips/Makefile
sound/mips/au1x00.c [deleted file]
sound/pci/Kconfig
sound/pci/emu10k1/emu10k1_main.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_jack.c
sound/pci/hda/hda_jack.h
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/ppc/pmac.c
sound/soc/amd/acp-pcm-dma.c
sound/soc/bcm/bcm2835-i2s.c
sound/soc/codecs/Kconfig
sound/soc/codecs/Makefile
sound/soc/codecs/ab8500-codec.c
sound/soc/codecs/adau1761.c
sound/soc/codecs/arizona.c
sound/soc/codecs/arizona.h
sound/soc/codecs/cs42xx8.c
sound/soc/codecs/cs47l24.c
sound/soc/codecs/max98926.c [new file with mode: 0644]
sound/soc/codecs/max98926.h [new file with mode: 0644]
sound/soc/codecs/pcm179x-i2c.c [new file with mode: 0644]
sound/soc/codecs/pcm179x-spi.c [new file with mode: 0644]
sound/soc/codecs/pcm179x.c
sound/soc/codecs/pcm179x.h
sound/soc/codecs/pcm3168a.c
sound/soc/codecs/rt286.c
sound/soc/codecs/rt5514.c [new file with mode: 0644]
sound/soc/codecs/rt5514.h [new file with mode: 0644]
sound/soc/codecs/rt5616.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt5659.c
sound/soc/codecs/rt5659.h
sound/soc/codecs/sigmadsp-i2c.c
sound/soc/codecs/ssm4567.c
sound/soc/codecs/wm5102.c
sound/soc/codecs/wm5110.c
sound/soc/codecs/wm8960.c
sound/soc/codecs/wm8974.c
sound/soc/codecs/wm8997.c
sound/soc/codecs/wm8998.c
sound/soc/codecs/wm_adsp.c
sound/soc/codecs/wm_adsp.h
sound/soc/davinci/davinci-mcasp.c
sound/soc/dwc/designware_i2s.c
sound/soc/fsl/Kconfig
sound/soc/fsl/fsl-asoc-card.c
sound/soc/fsl/fsl_sai.c
sound/soc/fsl/fsl_ssi.c
sound/soc/fsl/imx-spdif.c
sound/soc/fsl/mpc5200_psc_ac97.c
sound/soc/generic/simple-card.c
sound/soc/intel/Kconfig
sound/soc/intel/atom/sst-mfld-platform-pcm.c
sound/soc/intel/boards/cht_bsw_max98090_ti.c
sound/soc/intel/boards/cht_bsw_rt5645.c
sound/soc/intel/boards/skl_rt286.c
sound/soc/intel/common/Makefile
sound/soc/intel/common/sst-acpi.c
sound/soc/intel/common/sst-match-acpi.c
sound/soc/intel/skylake/skl-messages.c
sound/soc/intel/skylake/skl-pcm.c
sound/soc/intel/skylake/skl-topology.c
sound/soc/intel/skylake/skl-topology.h
sound/soc/intel/skylake/skl-tplg-interface.h
sound/soc/intel/skylake/skl.c
sound/soc/mediatek/Kconfig
sound/soc/mediatek/mtk-afe-common.h
sound/soc/mediatek/mtk-afe-pcm.c
sound/soc/mxs/mxs-saif.c
sound/soc/qcom/lpass-platform.c
sound/soc/samsung/s3c-i2s-v2.c
sound/soc/samsung/s3c-i2s-v2.h
sound/soc/sh/rcar/adg.c
sound/soc/sh/rcar/cmd.c
sound/soc/sh/rcar/core.c
sound/soc/sh/rcar/ctu.c
sound/soc/sh/rcar/dma.c
sound/soc/sh/rcar/dvc.c
sound/soc/sh/rcar/gen.c
sound/soc/sh/rcar/mix.c
sound/soc/sh/rcar/rsnd.h
sound/soc/sh/rcar/src.c
sound/soc/sh/rcar/ssi.c
sound/soc/sh/rcar/ssiu.c
sound/soc/soc-dapm.c
sound/soc/soc-pcm.c
sound/sparc/Kconfig
sound/usb/card.c
sound/usb/midi.c
sound/usb/midi.h
sound/usb/quirks.c
sound/usb/quirks.h
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-compile.c
tools/build/feature/test-libcrypto.c [new file with mode: 0644]
tools/lib/bpf/libbpf.c
tools/lib/lockdep/common.c
tools/lib/lockdep/include/liblockdep/common.h
tools/lib/lockdep/preload.c
tools/perf/Documentation/perf-config.txt
tools/perf/Documentation/perf-inject.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perfconfig.example
tools/perf/Makefile
tools/perf/Makefile.perf
tools/perf/arch/powerpc/Makefile
tools/perf/arch/powerpc/util/Build
tools/perf/arch/powerpc/util/book3s_hcalls.h [new file with mode: 0644]
tools/perf/arch/powerpc/util/book3s_hv_exits.h [new file with mode: 0644]
tools/perf/arch/powerpc/util/kvm-stat.c [new file with mode: 0644]
tools/perf/arch/s390/util/kvm-stat.c
tools/perf/arch/x86/tests/intel-cqm.c
tools/perf/arch/x86/util/intel-bts.c
tools/perf/arch/x86/util/intel-pt.c
tools/perf/arch/x86/util/kvm-stat.c
tools/perf/builtin-annotate.c
tools/perf/builtin-buildid-cache.c
tools/perf/builtin-inject.c
tools/perf/builtin-kvm.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-top.c
tools/perf/config/Makefile
tools/perf/jvmti/Makefile [new file with mode: 0644]
tools/perf/jvmti/jvmti_agent.c [new file with mode: 0644]
tools/perf/jvmti/jvmti_agent.h [new file with mode: 0644]
tools/perf/jvmti/libjvmti.c [new file with mode: 0644]
tools/perf/tests/.gitignore
tools/perf/tests/Build
tools/perf/tests/bp_signal.c
tools/perf/tests/bpf-script-test-relocation.c [new file with mode: 0644]
tools/perf/tests/bpf.c
tools/perf/tests/hists_cumulate.c
tools/perf/tests/hists_filter.c
tools/perf/tests/hists_output.c
tools/perf/tests/llvm.c
tools/perf/tests/llvm.h
tools/perf/tests/make
tools/perf/tests/vmlinux-kallsyms.c
tools/perf/ui/browser.c
tools/perf/ui/browser.h
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/gtk/hists.c
tools/perf/ui/hist.c
tools/perf/ui/stdio/hist.c
tools/perf/util/Build
tools/perf/util/auxtrace.c
tools/perf/util/auxtrace.h
tools/perf/util/build-id.c
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/demangle-java.c [new file with mode: 0644]
tools/perf/util/demangle-java.h [new file with mode: 0644]
tools/perf/util/dso.c
tools/perf/util/event.c
tools/perf/util/evsel.c
tools/perf/util/genelf.c [new file with mode: 0644]
tools/perf/util/genelf.h [new file with mode: 0644]
tools/perf/util/genelf_debug.c [new file with mode: 0644]
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/intel-pt.c
tools/perf/util/jit.h [new file with mode: 0644]
tools/perf/util/jitdump.c [new file with mode: 0644]
tools/perf/util/jitdump.h [new file with mode: 0644]
tools/perf/util/kvm-stat.h
tools/perf/util/machine.h
tools/perf/util/parse-events.c
tools/perf/util/pmu.c
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/session.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.c
tools/perf/util/util.c
tools/perf/util/util.h
tools/testing/nvdimm/test/iomap.c
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/bin/parse-console.sh
tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcuperf/TREE [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh [new file with mode: 0644]
tools/testing/selftests/timers/alarmtimer-suspend.c
tools/testing/selftests/timers/valid-adjtimex.c
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/check_initial_reg_state.c [new file with mode: 0644]
tools/virtio/asm/barrier.h
tools/virtio/linux/compiler.h [new file with mode: 0644]
tools/virtio/linux/kernel.h
tools/virtio/ringtest/Makefile [new file with mode: 0644]
tools/virtio/ringtest/README [new file with mode: 0644]
tools/virtio/ringtest/main.c [new file with mode: 0644]
tools/virtio/ringtest/main.h [new file with mode: 0644]
tools/virtio/ringtest/ring.c [new file with mode: 0644]
tools/virtio/ringtest/run-on-all.sh [new file with mode: 0755]
tools/virtio/ringtest/virtio_ring_0_9.c [new file with mode: 0644]
tools/virtio/ringtest/virtio_ring_poll.c [new file with mode: 0644]
virt/kvm/arm/arch_timer.c

index b1e9a97653dc64853775a97db377a8f269cc8d95..7e6c5334c337ae0c792a36ec5a2e9309b4128061 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
+Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
 Archit Taneja <archit@ti.com>
 Arnaud Patard <arnaud.patard@rtp-net.org>
 Arnd Bergmann <arnd@arndb.de>
index ff60ad9eca4c7ba390e22bd0eda0378bfbe236f1..e736d145085f302b031f4aa40ff3ca0e0c5fe060 100644 (file)
@@ -18,12 +18,3 @@ Values:      A numeric value.
                2: RFKILL_STATE_HARD_BLOCKED
                        transmitter is forced off by something outside of
                        the driver's control.
-
-What:          /sys/class/rfkill/rfkill[0-9]+/claim
-Date:          09-Jul-2007
-KernelVersion  v2.6.22
-Contact:       linux-wireless@vger.kernel.org
-Description:   This file is deprecated because there no longer is a way to
-               claim just control over a single rfkill instance.
-               This file is scheduled to be removed in 2012.
-Values:        0: Kernel handles events
diff --git a/Documentation/ABI/removed/sysfs-class-rfkill b/Documentation/ABI/removed/sysfs-class-rfkill
new file mode 100644 (file)
index 0000000..3ce6231
--- /dev/null
@@ -0,0 +1,13 @@
+rfkill - radio frequency (RF) connector kill switch support
+
+For details to this subsystem look at Documentation/rfkill.txt.
+
+What:          /sys/class/rfkill/rfkill[0-9]+/claim
+Date:          09-Jul-2007
+KernelVersion  v2.6.22
+Contact:       linux-wireless@vger.kernel.org
+Description:   This file was deprecated because there no longer was a way to
+               claim just control over a single rfkill instance.
+               This file was scheduled to be removed in 2012, and was removed
+               in 2016.
+Values:        0: Kernel handles events
diff --git a/Documentation/ABI/stable/sysfs-fs-orangefs b/Documentation/ABI/stable/sysfs-fs-orangefs
new file mode 100644 (file)
index 0000000..affdb11
--- /dev/null
@@ -0,0 +1,87 @@
+What:                  /sys/fs/orangefs/perf_counters/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Counters and settings for various caches.
+                       Read only.
+
+
+What:                  /sys/fs/orangefs/perf_counter_reset
+Date:                  June 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       echo a 0 or a 1 into perf_counter_reset to
+                       reset all the counters in
+                       /sys/fs/orangefs/perf_counters
+                       except ones with PINT_PERF_PRESERVE set.
+
+
+What:                  /sys/fs/orangefs/perf_time_interval_secs
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Length of perf counter intervals in
+                       seconds.
+
+
+What:                  /sys/fs/orangefs/perf_history_size
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       The perf_counters cache statistics have N, or
+                       perf_history_size, samples. The default is
+                       one.
+
+                       Every perf_time_interval_secs the (first)
+                       samples are reset.
+
+                       If N is greater than one, the "current" set
+                       of samples is reset, and the samples from the
+                       other N-1 intervals remain available.
+
+
+What:                  /sys/fs/orangefs/op_timeout_secs
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Service operation timeout in seconds.
+
+
+What:                  /sys/fs/orangefs/slot_timeout_secs
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       "Slot" timeout in seconds. A "slot"
+                       is an indexed buffer in the shared
+                       memory segment used for communication
+                       between the kernel module and userspace.
+                       Slots are requested and waited for,
+                       the wait times out after slot_timeout_secs.
+
+
+What:                  /sys/fs/orangefs/acache/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Attribute cache configurable settings.
+
+
+What:                  /sys/fs/orangefs/ncache/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Name cache configurable settings.
+
+
+What:                  /sys/fs/orangefs/capcache/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Capability cache configurable settings.
+
+
+What:                  /sys/fs/orangefs/ccache/*
+Date:                  Jun 2015
+Contact:               Mike Marshall <hubcap@omnibond.com>
+Description:
+                       Credential cache configurable settings.
index eed922ef42e533ae9bf16da6d224a50d2b46e16b..f34221b52b14b796377893207692c48ddaabf7fb 100644 (file)
@@ -179,3 +179,19 @@ Description:       This file controls the USB 3 functionality, valid values are:
                Note that toggling this value requires a reboot for changes to
                take effect.
 Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/cooling_method
+Date:          2016
+KernelVersion: 4.6
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the Cooling Method feature.
+               Reading this file prints two values, the first is the actual cooling method
+               and the second is the maximum cooling method supported.
+               When the maximum cooling method is ONE, valid values are:
+                       * 0 -> Maximum Performance
+                       * 1 -> Battery Optimized
+               When the maximum cooling method is TWO, valid values are:
+                       * 0 -> Maximum Performance
+                       * 1 -> Performance
+                       * 2 -> Battery Optimized
+Users:         KToshiba
diff --git a/Documentation/ABI/testing/sysfs-firmware-qemu_fw_cfg b/Documentation/ABI/testing/sysfs-firmware-qemu_fw_cfg
new file mode 100644 (file)
index 0000000..011dda4
--- /dev/null
@@ -0,0 +1,100 @@
+What:          /sys/firmware/qemu_fw_cfg/
+Date:          August 2015
+Contact:       Gabriel Somlo <somlo@cmu.edu>
+Description:
+               Several different architectures supported by QEMU (x86, arm,
+               sun4*, ppc/mac) are provisioned with a firmware configuration
+               (fw_cfg) device, originally intended as a way for the host to
+               provide configuration data to the guest firmware. Starting
+               with QEMU v2.4, arbitrary fw_cfg file entries may be specified
+               by the user on the command line, which makes fw_cfg additionally
+               useful as an out-of-band, asynchronous mechanism for providing
+               configuration data to the guest userspace.
+
+               The authoritative guest-side hardware interface documentation
+               to the fw_cfg device can be found in "docs/specs/fw_cfg.txt"
+               in the QEMU source tree.
+
+               === SysFS fw_cfg Interface ===
+
+               The fw_cfg sysfs interface described in this document is only
+               intended to display discoverable blobs (i.e., those registered
+               with the file directory), as there is no way to determine the
+               presence or size of "legacy" blobs (with selector keys between
+               0x0002 and 0x0018) programmatically.
+
+               All fw_cfg information is shown under:
+
+                       /sys/firmware/qemu_fw_cfg/
+
+               The only legacy blob displayed is the fw_cfg device revision:
+
+                       /sys/firmware/qemu_fw_cfg/rev
+
+               --- Discoverable fw_cfg blobs by selector key ---
+
+               All discoverable blobs listed in the fw_cfg file directory are
+               displayed as entries named after their unique selector key
+               value, e.g.:
+
+                       /sys/firmware/qemu_fw_cfg/by_key/32
+                       /sys/firmware/qemu_fw_cfg/by_key/33
+                       /sys/firmware/qemu_fw_cfg/by_key/34
+                       ...
+
+               Each such fw_cfg sysfs entry has the following values exported
+               as attributes:
+
+               name    : The 56-byte nul-terminated ASCII string used as the
+                         blob's 'file name' in the fw_cfg directory.
+               size    : The length of the blob, as given in the fw_cfg
+                         directory.
+               key     : The value of the blob's selector key as given in the
+                         fw_cfg directory. This value is the same as used in
+                         the parent directory name.
+               raw     : The raw bytes of the blob, obtained by selecting the
+                         entry via the control register, and reading a number
+                         of bytes equal to the blob size from the data
+                         register.
+
+               --- Listing fw_cfg blobs by file name ---
+
+               While the fw_cfg device does not impose any specific naming
+               convention on the blobs registered in the file directory,
+               QEMU developers have traditionally used path name semantics
+               to give each blob a descriptive name. For example:
+
+                       "bootorder"
+                       "genroms/kvmvapic.bin"
+                       "etc/e820"
+                       "etc/boot-fail-wait"
+                       "etc/system-states"
+                       "etc/table-loader"
+                       "etc/acpi/rsdp"
+                       "etc/acpi/tables"
+                       "etc/smbios/smbios-tables"
+                       "etc/smbios/smbios-anchor"
+                       ...
+
+               In addition to the listing by unique selector key described
+               above, the fw_cfg sysfs driver also attempts to build a tree
+               of directories matching the path name components of fw_cfg
+               blob names, ending in symlinks to the by_key entry for each
+               "basename", as illustrated below (assume current directory is
+               /sys/firmware):
+
+                   qemu_fw_cfg/by_name/bootorder -> ../by_key/38
+                   qemu_fw_cfg/by_name/etc/e820 -> ../../by_key/35
+                   qemu_fw_cfg/by_name/etc/acpi/rsdp -> ../../../by_key/41
+                   ...
+
+               Construction of the directory tree and symlinks is done on a
+               "best-effort" basis, as there is no guarantee that components
+               of fw_cfg blob names are always "well behaved". I.e., there is
+               the possibility that a symlink (basename) will conflict with
+               a dirname component of another fw_cfg blob, in which case the
+               creation of the offending /sys/firmware/qemu_fw_cfg/by_name
+               entry will be skipped.
+
+               The authoritative list of entries will continue to be found
+               under the /sys/firmware/qemu_fw_cfg/by_key directory.
index e5200f354abfe933d3fbe69f919da2dcc0e585a6..a809f6005f1464ed7c86133db7b4b95d4a3c7388 100644 (file)
@@ -98,3 +98,17 @@ Date:                October 2015
 Contact:       "Chao Yu" <chao2.yu@samsung.com>
 Description:
                 Controls the count of nid pages to be readaheaded.
+
+What:          /sys/fs/f2fs/<disk>/dirty_nats_ratio
+Date:          January 2016
+Contact:       "Chao Yu" <chao2.yu@samsung.com>
+Description:
+                Controls dirty nat entries ratio threshold, if current
+                ratio exceeds configured threshold, checkpoint will
+                be triggered for flushing dirty nat entries.
+
+What:          /sys/fs/f2fs/<disk>/lifetime_write_kbytes
+Date:          January 2016
+Contact:       "Shuoran Liu" <liushuoran@huawei.com>
+Description:
+                Shows total written kbytes issued to disk.
index db653774c0b74fe5f3a98598b5b68d6c10a20e34..9a70ddd16584bd92b1f834286adf6706ced4f0dc 100644 (file)
@@ -640,7 +640,7 @@ Things to avoid when using macros:
                do {                                    \
                        if (blah(x) < 0)                \
                                return -EBUGGERED;      \
-               } while(0)
+               } while (0)
 
 is a _very_ bad idea.  It looks like a function call but exits the "calling"
 function; don't break the internal parsers of those who will read the code.
index 07df23ea06e4936d6de435ba4c862ffdb4b299d1..866ff082272b42b460e51308111410e7bb7db821 100644 (file)
@@ -1761,19 +1761,6 @@ read(opfd, out, outlen);
 !Finclude/linux/crypto.h crypto_cipher_setkey
 !Finclude/linux/crypto.h crypto_cipher_encrypt_one
 !Finclude/linux/crypto.h crypto_cipher_decrypt_one
-   </sect1>
-   <sect1><title>Synchronous Message Digest API</title>
-!Pinclude/linux/crypto.h Synchronous Message Digest API
-!Finclude/linux/crypto.h crypto_alloc_hash
-!Finclude/linux/crypto.h crypto_free_hash
-!Finclude/linux/crypto.h crypto_has_hash
-!Finclude/linux/crypto.h crypto_hash_blocksize
-!Finclude/linux/crypto.h crypto_hash_digestsize
-!Finclude/linux/crypto.h crypto_hash_init
-!Finclude/linux/crypto.h crypto_hash_update
-!Finclude/linux/crypto.h crypto_hash_final
-!Finclude/linux/crypto.h crypto_hash_digest
-!Finclude/linux/crypto.h crypto_hash_setkey
    </sect1>
    <sect1><title>Message Digest Algorithm Definitions</title>
 !Pinclude/crypto/hash.h Message Digest Algorithm Definitions
index cdd8b24db68d3efc7f9a8d85130c8b851b2f963b..184f3c7b5145e3129cd7a26456cc10705f872ab1 100644 (file)
@@ -229,6 +229,7 @@ X!Isound/sound_firmware.c
 !Iinclude/media/v4l2-dv-timings.h
 !Iinclude/media/v4l2-event.h
 !Iinclude/media/v4l2-flash-led-class.h
+!Iinclude/media/v4l2-mc.h
 !Iinclude/media/v4l2-mediabus.h
 !Iinclude/media/v4l2-mem2mem.h
 !Iinclude/media/v4l2-of.h
@@ -368,7 +369,7 @@ X!Ilib/fonts/fonts.c
 !Iinclude/linux/input-polldev.h
 !Edrivers/input/input-polldev.c
      </sect1>
-     <sect1><title>Matrix keyboars/keypads</title>
+     <sect1><title>Matrix keyboards/keypads</title>
 !Iinclude/linux/input/matrix_keypad.h
      </sect1>
      <sect1><title>Sparse keymap support</title>
index a8669330b456eff026895af70af1121559998d2e..fe6b36a2fd9882950f3a8bc578c4a2e0c2d85fb0 100644 (file)
@@ -2886,52 +2886,8 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
     </sect2>
     <sect2>
       <title>File Operations</title>
-      <synopsis>const struct file_operations *fops</synopsis>
-      <abstract>File operations for the DRM device node.</abstract>
-      <para>
-        Drivers must define the file operations structure that forms the DRM
-       userspace API entry point, even though most of those operations are
-       implemented in the DRM core. The <methodname>open</methodname>,
-       <methodname>release</methodname> and <methodname>ioctl</methodname>
-       operations are handled by
-       <programlisting>
-       .owner = THIS_MODULE,
-       .open = drm_open,
-       .release = drm_release,
-       .unlocked_ioctl = drm_ioctl,
-  #ifdef CONFIG_COMPAT
-       .compat_ioctl = drm_compat_ioctl,
-  #endif
-        </programlisting>
-      </para>
-      <para>
-        Drivers that implement private ioctls that requires 32/64bit
-       compatibility support must provide their own
-       <methodname>compat_ioctl</methodname> handler that processes private
-       ioctls and calls <function>drm_compat_ioctl</function> for core ioctls.
-      </para>
-      <para>
-        The <methodname>read</methodname> and <methodname>poll</methodname>
-       operations provide support for reading DRM events and polling them. They
-       are implemented by
-       <programlisting>
-       .poll = drm_poll,
-       .read = drm_read,
-       .llseek = no_llseek,
-       </programlisting>
-      </para>
-      <para>
-        The memory mapping implementation varies depending on how the driver
-       manages memory. Pre-GEM drivers will use <function>drm_mmap</function>,
-       while GEM-aware drivers will use <function>drm_gem_mmap</function>. See
-       <xref linkend="drm-gem"/>.
-       <programlisting>
-       .mmap = drm_gem_mmap,
-       </programlisting>
-      </para>
-      <para>
-        No other file operation is supported by the DRM API.
-      </para>
+!Pdrivers/gpu/drm/drm_fops.c file operations
+!Edrivers/gpu/drm/drm_fops.c
     </sect2>
     <sect2>
       <title>IOCTLs</title>
@@ -3319,6 +3275,12 @@ int num_ioctls;</synopsis>
 !Pdrivers/gpu/drm/i915/intel_csr.c csr support for dmc
 !Idrivers/gpu/drm/i915/intel_csr.c
       </sect2>
+      <sect2>
+       <title>Video BIOS Table (VBT)</title>
+!Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT)
+!Idrivers/gpu/drm/i915/intel_bios.c
+!Idrivers/gpu/drm/i915/intel_bios.h
+      </sect2>
     </sect1>
 
     <sect1>
@@ -3460,6 +3422,7 @@ int num_ioctls;</synopsis>
     </sect1>
     <sect1>
       <title>Public constants</title>
+!Finclude/linux/vga_switcheroo.h vga_switcheroo_handler_flags_t
 !Finclude/linux/vga_switcheroo.h vga_switcheroo_client_id
 !Finclude/linux/vga_switcheroo.h vga_switcheroo_state
     </sect1>
@@ -3488,6 +3451,10 @@ int num_ioctls;</synopsis>
         <title>Backlight control</title>
 !Pdrivers/platform/x86/apple-gmux.c Backlight control
       </sect2>
+      <sect2>
+        <title>Public functions</title>
+!Iinclude/linux/apple-gmux.h
+      </sect2>
     </sect1>
   </chapter>
 
index 63152ab9efbad0b10c59105effc55c49cc02d488..e0d49fa329f095bdc35ff643e4cac206232f2e03 100644 (file)
@@ -48,9 +48,6 @@
 
   <refsect1>
     <title>Description</title>
-
-    <para><emphasis role="bold">NOTE:</emphasis> This new ioctl is programmed to be added on Kernel 4.6. Its definition/arguments may change until its final version.</para>
-
     <para>The typical usage of this ioctl is to call it twice.
     On the first call, the structure defined at &media-v2-topology; should
     be zeroed. At return, if no errors happen, this ioctl will return the
index 1af3842509105ea45fd815ad953781d901b03bfe..751c3d0271036544dae44457fdab0f8767830ef7 100644 (file)
          </row>
          <row>
            <entry><constant>MEDIA_ENT_F_TUNER</constant></entry>
-           <entry>Digital TV, analog TV, radio and/or software radio tuner.</entry>
+           <entry>Digital TV, analog TV, radio and/or software radio tuner,
+                  with consists on a PLL tuning stage that converts radio
+                  frequency (RF) signal into an Intermediate Frequency (IF).
+                  Modern tuners have internally IF-PLL decoders for audio
+                  and video, but older models have those stages implemented
+                  on separate entities.
+           </entry>
+         </row>
+         <row>
+           <entry><constant>MEDIA_ENT_F_IF_VID_DECODER</constant></entry>
+           <entry>IF-PLL video decoder. It receives the IF from a PLL
+                  and decodes the analog TV video signal. This is commonly
+                  found on some very old analog tuners, like Philips MK3
+                  designs. They all contain a tda9887 (or some software
+                  compatible similar chip, like tda9885). Those devices
+                  use a different I2C address than the tuner PLL.
+           </entry>
+         </row>
+         <row>
+           <entry><constant>MEDIA_ENT_F_IF_AUD_DECODER</constant></entry>
+           <entry>IF-PLL sound decoder. It receives the IF from a PLL
+                  and decodes the analog TV audio signal. This is commonly
+                  found on some very old analog hardware, like Micronas
+                  msp3400, Philips tda9840, tda985x, etc. Those devices
+                  use a different I2C address than the tuner PLL and
+                  should be controlled together with the IF-PLL video
+                  decoder.
+           </entry>
          </row>
        </tbody>
       </tgroup>
index e781cc61786c52b93f3f2973110f6040c56dac0e..7d13fe96657d6fef3ab8fa51bf54ec60a7f697fa 100644 (file)
@@ -1,35 +1,43 @@
-    <refentry id="V4L2-PIX-FMT-YUV420M">
+    <refentry>
       <refmeta>
-       <refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12')</refentrytitle>
+       <refentrytitle>V4L2_PIX_FMT_YUV420M ('YM12'), V4L2_PIX_FMT_YVU420M ('YM21')</refentrytitle>
        &manvol;
       </refmeta>
       <refnamediv>
-       <refname> <constant>V4L2_PIX_FMT_YUV420M</constant></refname>
-       <refpurpose>Variation of <constant>V4L2_PIX_FMT_YUV420</constant>
-         with planes non contiguous in memory. </refpurpose>
+       <refname id="V4L2-PIX-FMT-YUV420M"><constant>V4L2_PIX_FMT_YUV420M</constant></refname>
+       <refname id="V4L2-PIX-FMT-YVU420M"><constant>V4L2_PIX_FMT_YVU420M</constant></refname>
+       <refpurpose>Variation of <constant>V4L2_PIX_FMT_YUV420</constant> and
+         <constant>V4L2_PIX_FMT_YVU420</constant> with planes non contiguous
+         in memory.</refpurpose>
       </refnamediv>
 
       <refsect1>
        <title>Description</title>
 
        <para>This is a multi-planar format, as opposed to a packed format.
-The three components are separated into three sub- images or planes.
+The three components are separated into three sub-images or planes.</para>
 
-The Y plane is first. The Y plane has one byte per pixel. The Cb data
+       <para>The Y plane is first. The Y plane has one byte per pixel.
+For <constant>V4L2_PIX_FMT_YUV420M</constant> the Cb data
 constitutes the second plane which is half the width and half
 the height of the Y plane (and of the image). Each Cb belongs to four
 pixels, a two-by-two square of the image. For example,
 Cb<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
 Y'<subscript>01</subscript>, Y'<subscript>10</subscript>, and
 Y'<subscript>11</subscript>. The Cr data, just like the Cb plane, is
-in the third plane. </para>
+in the third plane.</para>
+
+       <para><constant>V4L2_PIX_FMT_YVU420M</constant> is the same except
+the Cr data is stored in the second plane and the Cb data in the third plane.
+</para>
 
        <para>If the Y plane has pad bytes after each row, then the Cb
 and Cr planes have half as many pad bytes after their rows. In other
 words, two Cx rows (including padding) is exactly as long as one Y row
 (including padding).</para>
 
-       <para><constant>V4L2_PIX_FMT_YUV420M</constant> is intended to be
+       <para><constant>V4L2_PIX_FMT_YUV420M</constant> and
+<constant>V4L2_PIX_FMT_YVU420M</constant> are intended to be
 used only in drivers and applications that support the multi-planar API,
 described in <xref linkend="planar-apis"/>. </para>
 
diff --git a/Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yvu420m.xml
deleted file mode 100644 (file)
index 2330667..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-    <refentry id="V4L2-PIX-FMT-YVU420M">
-      <refmeta>
-       <refentrytitle>V4L2_PIX_FMT_YVU420M ('YM21')</refentrytitle>
-       &manvol;
-      </refmeta>
-      <refnamediv>
-       <refname> <constant>V4L2_PIX_FMT_YVU420M</constant></refname>
-       <refpurpose>Variation of <constant>V4L2_PIX_FMT_YVU420</constant>
-         with planes non contiguous in memory. </refpurpose>
-      </refnamediv>
-
-      <refsect1>
-       <title>Description</title>
-
-       <para>This is a multi-planar format, as opposed to a packed format.
-The three components are separated into three sub-images or planes.
-
-The Y plane is first. The Y plane has one byte per pixel. The Cr data
-constitutes the second plane which is half the width and half
-the height of the Y plane (and of the image). Each Cr belongs to four
-pixels, a two-by-two square of the image. For example,
-Cr<subscript>0</subscript> belongs to Y'<subscript>00</subscript>,
-Y'<subscript>01</subscript>, Y'<subscript>10</subscript>, and
-Y'<subscript>11</subscript>. The Cb data, just like the Cr plane, constitutes
-the third plane. </para>
-
-       <para>If the Y plane has pad bytes after each row, then the Cr
-and Cb planes have half as many pad bytes after their rows. In other
-words, two Cx rows (including padding) is exactly as long as one Y row
-(including padding).</para>
-
-       <para><constant>V4L2_PIX_FMT_YVU420M</constant> is intended to be
-used only in drivers and applications that support the multi-planar API,
-described in <xref linkend="planar-apis"/>. </para>
-
-       <example>
-         <title><constant>V4L2_PIX_FMT_YVU420M</constant> 4 &times; 4
-pixel image</title>
-
-         <formalpara>
-           <title>Byte Order.</title>
-           <para>Each cell is one byte.
-               <informaltable frame="none">
-               <tgroup cols="5" align="center">
-                 <colspec align="left" colwidth="2*" />
-                 <tbody valign="top">
-                   <row>
-                     <entry>start0&nbsp;+&nbsp;0:</entry>
-                     <entry>Y'<subscript>00</subscript></entry>
-                     <entry>Y'<subscript>01</subscript></entry>
-                     <entry>Y'<subscript>02</subscript></entry>
-                     <entry>Y'<subscript>03</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start0&nbsp;+&nbsp;4:</entry>
-                     <entry>Y'<subscript>10</subscript></entry>
-                     <entry>Y'<subscript>11</subscript></entry>
-                     <entry>Y'<subscript>12</subscript></entry>
-                     <entry>Y'<subscript>13</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start0&nbsp;+&nbsp;8:</entry>
-                     <entry>Y'<subscript>20</subscript></entry>
-                     <entry>Y'<subscript>21</subscript></entry>
-                     <entry>Y'<subscript>22</subscript></entry>
-                     <entry>Y'<subscript>23</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start0&nbsp;+&nbsp;12:</entry>
-                     <entry>Y'<subscript>30</subscript></entry>
-                     <entry>Y'<subscript>31</subscript></entry>
-                     <entry>Y'<subscript>32</subscript></entry>
-                     <entry>Y'<subscript>33</subscript></entry>
-                   </row>
-                   <row><entry></entry></row>
-                   <row>
-                     <entry>start1&nbsp;+&nbsp;0:</entry>
-                     <entry>Cr<subscript>00</subscript></entry>
-                     <entry>Cr<subscript>01</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start1&nbsp;+&nbsp;2:</entry>
-                     <entry>Cr<subscript>10</subscript></entry>
-                     <entry>Cr<subscript>11</subscript></entry>
-                   </row>
-                   <row><entry></entry></row>
-                   <row>
-                     <entry>start2&nbsp;+&nbsp;0:</entry>
-                     <entry>Cb<subscript>00</subscript></entry>
-                     <entry>Cb<subscript>01</subscript></entry>
-                   </row>
-                   <row>
-                     <entry>start2&nbsp;+&nbsp;2:</entry>
-                     <entry>Cb<subscript>10</subscript></entry>
-                     <entry>Cb<subscript>11</subscript></entry>
-                   </row>
-                 </tbody>
-               </tgroup>
-               </informaltable>
-             </para>
-         </formalpara>
-
-         <formalpara>
-           <title>Color Sample Location.</title>
-           <para>
-               <informaltable frame="none">
-               <tgroup cols="7" align="center">
-                 <tbody valign="top">
-                   <row>
-                     <entry></entry>
-                     <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
-                     <entry>2</entry><entry></entry><entry>3</entry>
-                   </row>
-                   <row>
-                     <entry>0</entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry>
-                   </row>
-                   <row>
-                     <entry></entry>
-                     <entry></entry><entry>C</entry><entry></entry><entry></entry>
-                     <entry></entry><entry>C</entry><entry></entry>
-                   </row>
-                   <row>
-                     <entry>1</entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry>
-                   </row>
-                   <row>
-                     <entry></entry>
-                   </row>
-                   <row>
-                     <entry>2</entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry>
-                   </row>
-                   <row>
-                     <entry></entry>
-                     <entry></entry><entry>C</entry><entry></entry><entry></entry>
-                     <entry></entry><entry>C</entry><entry></entry>
-                   </row>
-                   <row>
-                     <entry>3</entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
-                     <entry>Y</entry><entry></entry><entry>Y</entry>
-                   </row>
-                 </tbody>
-               </tgroup>
-               </informaltable>
-             </para>
-         </formalpara>
-       </example>
-      </refsect1>
-    </refentry>
index d871245d29735a0084ea422539a0dc23f106ca10..9e77ff353febe87d1e8ba66b6159c78e656f716a 100644 (file)
@@ -1628,7 +1628,6 @@ information.</para>
     &sub-y41p;
     &sub-yuv420;
     &sub-yuv420m;
-    &sub-yvu420m;
     &sub-yuv410;
     &sub-yuv422p;
     &sub-yuv411p;
index e9c70a8f34762b878ee67aa51a57d568ce5d6791..0c93677d16b449b931f5b9cb7591bd184be39fa2 100644 (file)
@@ -60,9 +60,19 @@ input</refpurpose>
 automatically, similar to sensing the video standard. To do so, applications
 call <constant>VIDIOC_QUERY_DV_TIMINGS</constant> with a pointer to a
 &v4l2-dv-timings;. Once the hardware detects the timings, it will fill in the
-timings structure.
+timings structure.</para>
 
-If the timings could not be detected because there was no signal, then
+<para>Please note that drivers shall <emphasis>not</emphasis> switch timings automatically
+if new timings are detected. Instead, drivers should send the
+<constant>V4L2_EVENT_SOURCE_CHANGE</constant> event (if they support this) and expect
+that userspace will take action by calling <constant>VIDIOC_QUERY_DV_TIMINGS</constant>.
+The reason is that new timings usually mean different buffer sizes as well, and you
+cannot change buffer sizes on the fly. In general, applications that receive the
+Source Change event will have to call <constant>VIDIOC_QUERY_DV_TIMINGS</constant>,
+and if the detected timings are valid they will have to stop streaming, set the new
+timings, allocate new buffers and start streaming again.</para>
+
+<para>If the timings could not be detected because there was no signal, then
 <errorcode>ENOLINK</errorcode> is returned. If a signal was detected, but
 it was unstable and the receiver could not lock to the signal, then
 <errorcode>ENOLCK</errorcode> is returned. If the receiver could lock to the signal,
index 222348542182bfb06b5019bbac2ea023d06bab02..3ceae35fab0317e22fe891ce8c895346f527d223 100644 (file)
@@ -59,6 +59,16 @@ then the driver will return V4L2_STD_UNKNOWN. When detection is not
 possible or fails, the set must contain all standards supported by the
 current video input or output.</para>
 
+<para>Please note that drivers shall <emphasis>not</emphasis> switch the video standard
+automatically if a new video standard is detected. Instead, drivers should send the
+<constant>V4L2_EVENT_SOURCE_CHANGE</constant> event (if they support this) and expect
+that userspace will take action by calling <constant>VIDIOC_QUERYSTD</constant>.
+The reason is that a new video standard can mean different buffer sizes as well, and you
+cannot change buffer sizes on the fly. In general, applications that receive the
+Source Change event will have to call <constant>VIDIOC_QUERYSTD</constant>,
+and if the detected video standard is valid they will have to stop streaming, set the new
+standard, allocate new buffers and start streaming again.</para>
+
   </refsect1>
 
   <refsect1>
index d5a699d5a55181a3b585ae9c7ae2c047a38bdf59..ef2ff1e9d3e0a0ce6d90afcb88df501b2892f1ba 100644 (file)
@@ -187,7 +187,7 @@ apply a patch.
 If you do not know where you want to start, but you want to look for
 some task to start doing to join into the kernel development community,
 go to the Linux Kernel Janitor's project:
-       http://kernelnewbies.org/KernelJanitors 
+       http://kernelnewbies.org/KernelJanitors
 It is a great place to start.  It describes a list of relatively simple
 problems that need to be cleaned up and fixed within the Linux kernel
 source tree.  Working with the developers in charge of this project, you
@@ -250,11 +250,6 @@ process is as follows:
     release a new -rc kernel every week.
   - Process continues until the kernel is considered "ready", the
     process should last around 6 weeks.
-  - Known regressions in each release are periodically posted to the 
-    linux-kernel mailing list.  The goal is to reduce the length of 
-    that list to zero before declaring the kernel to be "ready," but, in
-    the real world, a small number of regressions often remain at 
-    release time.
 
 It is worth mentioning what Andrew Morton wrote on the linux-kernel
 mailing list about kernel releases:
index 7b57fc087088f49756eeb8eaabf403bfbbd92b93..49585b6e1ea24c951ea58090b762740aa36b066d 100644 (file)
@@ -3,7 +3,7 @@ Linux IOMMU Support
 
 The architecture spec can be obtained from the below location.
 
-http://www.intel.com/technology/virtualization/
+http://www.intel.com/content/dam/www/public/us/en/documents/product-specifications/vt-directed-io-spec.pdf
 
 This guide gives a quick cheat sheet for some basic understanding.
 
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCU.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCU.svg
new file mode 100644 (file)
index 0000000..727e270
--- /dev/null
@@ -0,0 +1,474 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:28:20 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="9.1in"
+   height="8.9in"
+   viewBox="-66 -66 10932 10707"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreeClassicRCU.fig">
+  <metadata
+     id="metadata106">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs104">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3864"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="973"
+     inkscape:window-height="1137"
+     id="namedview102"
+     showgrid="false"
+     inkscape:zoom="0.9743589"
+     inkscape:cx="409.50003"
+     inkscape:cy="400.49997"
+     inkscape:window-x="915"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="10800"
+       height="5625"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="1125"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="3825"
+       y="900"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="6525"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line -->
+    <polyline
+       points="3375,6525 3375,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline14" />
+    <!-- Arrowhead on XXXpoint 3375 6525 - 3375 4860-->
+    <!-- Circle -->
+    <circle
+       cx="7425"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle18" />
+    <!-- Circle -->
+    <circle
+       cx="7875"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle20" />
+    <!-- Circle -->
+    <circle
+       cx="8325"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle22" />
+    <!-- Circle -->
+    <circle
+       cx="2025"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle24" />
+    <!-- Circle -->
+    <circle
+       cx="2475"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="2925"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="4725"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle30" />
+    <!-- Circle -->
+    <circle
+       cx="5175"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle32" />
+    <!-- Circle -->
+    <circle
+       cx="5625"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle34" />
+    <!-- Line: box -->
+    <rect
+       x="2025"
+       y="6525"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect36" />
+    <!-- Line -->
+    <polyline
+       points="2475,3600 3975,2310 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 2475 3600 - 4116 2190-->
+    <!-- Line -->
+    <polyline
+       points="7875,3600 6372,2310 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline42" />
+    <!-- Arrowhead on XXXpoint 7875 3600 - 6231 2190-->
+    <!-- Line -->
+    <polyline
+       points="6975,8775 6975,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 6975 8775 - 6975 4860-->
+    <!-- Line -->
+    <polyline
+       points="1575,8775 1575,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 1575 8775 - 1575 4860-->
+    <!-- Line -->
+    <polyline
+       points="8775,6525 8775,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 8775 6525 - 8775 4860-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1575"
+       y="9225"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text58">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1575"
+       y="9675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text60">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1575"
+       y="10350"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text62">CPU 0</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3375"
+       y="6975"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text64">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3375"
+       y="7425"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text66">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3375"
+       y="8100"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text68">CPU 15</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6975"
+       y="9225"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text70">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6975"
+       y="9675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text72">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6975"
+       y="10350"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text74">CPU 1007</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="6930"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text76">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="7380"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text78">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="8055"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text80">CPU 1023</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="start"
+       id="text82">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2475"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text84">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2475"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text86">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7875"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text88">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7875"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text90">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5175"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text92">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5175"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text94">rcu_node</text>
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="8775"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect96" />
+    <!-- Line: box -->
+    <rect
+       x="5625"
+       y="8775"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect98" />
+    <!-- Line: box -->
+    <rect
+       x="7380"
+       y="6480"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect100" />
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg
new file mode 100644 (file)
index 0000000..9bbb194
--- /dev/null
@@ -0,0 +1,499 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:26:09 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="5.7in"
+   height="6.6in"
+   viewBox="-44 -44 6838 7888"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreeClassicRCUBH.fig">
+  <metadata
+     id="metadata110">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs108">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3868"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Mend"
+       style="overflow:visible;">
+      <path
+         id="path3886"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(0.6) rotate(180) translate(0,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="878"
+     inkscape:window-height="1148"
+     id="namedview106"
+     showgrid="false"
+     inkscape:zoom="1.3547758"
+     inkscape:cx="256.5"
+     inkscape:cy="297"
+     inkscape:window-x="45"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect14" />
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle16" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle18" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle20" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle22" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle24" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle30" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle32" />
+    <!-- Line -->
+    <polyline
+       points="1350,3450 2350,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+    <!-- Line -->
+    <polyline
+       points="4950,3450 3948,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect42" />
+    <!-- Line -->
+    <polyline
+       points="2250,5400 2250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect48" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect50" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect52" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect54" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect56" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="1650"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect58" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text60">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="1950"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text62">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text64">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text66">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text68">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text70">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text72">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text74">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text76">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text78">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text80">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text82">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text84">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text86">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text88">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text90">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text92">rcu_sched</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5400 5250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline94" />
+    <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+    <!-- Line -->
+    <polyline
+       points="4050,6600 4050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline98" />
+    <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+    <!-- Line -->
+    <polyline
+       points="1050,6600 1050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline102" />
+    <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg
new file mode 100644 (file)
index 0000000..21ba782
--- /dev/null
@@ -0,0 +1,695 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:20:02 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="5.7in"
+   height="8.6in"
+   viewBox="-44 -44 6838 10288"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreeClassicRCUBHdyntick.fig">
+  <metadata
+     id="metadata166">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs164">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3924"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;">
+      <path
+         id="path3936"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="845"
+     inkscape:window-height="988"
+     id="namedview162"
+     showgrid="false"
+     inkscape:zoom="1.0452196"
+     inkscape:cx="256.5"
+     inkscape:cy="387.00003"
+     inkscape:window-x="356"
+     inkscape:window-y="61"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect10" />
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5688,5912 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline12" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+    <polyline
+       points="5714 6068 5704 5822 5598 6044 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline14" />
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4486,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline16" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+    <polyline
+       points="4514 7418 4506 7172 4396 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline18" />
+    <!-- Line -->
+    <polyline
+       points="1040,9300 1476,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+    <polyline
+       points="1504 7418 1496 7172 1386 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline22" />
+    <!-- Line -->
+    <polyline
+       points="2240,8100 2676,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline24" />
+    <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+    <polyline
+       points="2704 6218 2696 5972 2586 6194 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline26" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect30" />
+    <!-- Line -->
+    <polyline
+       points="1350,3450 2350,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+    <!-- Line -->
+    <polyline
+       points="4950,3450 3948,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+    <!-- Line -->
+    <polyline
+       points="4050,6600 4050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+    <!-- Line -->
+    <polyline
+       points="1050,6600 1050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,5400 2250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline48" />
+    <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,8100 2250,6364 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline52" />
+    <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+    <!-- Line -->
+    <polyline
+       points="1050,9300 1050,7564 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline56" />
+    <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4050,7564 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5250,6364 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline64" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle80" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle82" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle84" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect86" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect88" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect90" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect92" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect94" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="1650"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect96" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect98" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect100" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect102" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect104" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect106" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text108">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="1950"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text110">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text112">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text114">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text116">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text118">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text120">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text122">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text124">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text126">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text128">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text130">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text132">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text134">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text136">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text138">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text152">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text154">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text156">rcu_sched</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5400 5250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline158" />
+    <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg
new file mode 100644 (file)
index 0000000..15adcac
--- /dev/null
@@ -0,0 +1,741 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:32:59 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="6.1in"
+   height="8.9in"
+   viewBox="-44 -44 7288 10738"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreePreemptRCUBHdyntick.fig">
+  <metadata
+     id="metadata182">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs180">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3940"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="874"
+     inkscape:window-height="1148"
+     id="namedview178"
+     showgrid="false"
+     inkscape:zoom="1.2097379"
+     inkscape:cx="274.5"
+     inkscape:cy="400.49997"
+     inkscape:window-x="946"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="900"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="1200"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="5400"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect16" />
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5688,6362 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline18" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240-->
+    <polyline
+       points="5714 6518 5704 6272 5598 6494 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline20" />
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4486,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590-->
+    <polyline
+       points="4514 7868 4506 7622 4396 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline24" />
+    <!-- Line -->
+    <polyline
+       points="1040,9750 1476,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline26" />
+    <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590-->
+    <polyline
+       points="1504 7868 1496 7622 1386 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline28" />
+    <!-- Line -->
+    <polyline
+       points="2240,8550 2676,6512 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline30" />
+    <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390-->
+    <polyline
+       points="2704 6668 2696 6422 2586 6644 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline32" />
+    <!-- Line -->
+    <polyline
+       points="4050,9750 5682,6360 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 5736 6246-->
+    <polyline
+       points="5672 6518 5722 6276 5562 6466 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline36" />
+    <!-- Line -->
+    <polyline
+       points="1010,9750 2642,6360 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 1010 9750 - 2696 6246-->
+    <polyline
+       points="2632 6518 2682 6276 2522 6466 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline40" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="900"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect42" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1500"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect44" />
+    <!-- Line -->
+    <polyline
+       points="1350,3900 2350,3040 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960-->
+    <!-- Line -->
+    <polyline
+       points="4950,3900 3948,3040 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960-->
+    <!-- Line -->
+    <polyline
+       points="4050,7050 4050,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740-->
+    <!-- Line -->
+    <polyline
+       points="1050,7050 1050,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline58" />
+    <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,5850 2250,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline62" />
+    <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,8550 2250,6814 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline66" />
+    <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690-->
+    <!-- Line -->
+    <polyline
+       points="1050,9750 1050,8014 "
+       style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline70" />
+    <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890-->
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4050,8014 "
+       style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline74" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890-->
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5250,6814 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline78" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle82" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle84" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle86" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle88" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle90" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle92" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle94" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle96" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle98" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect100" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect102" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect104" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect106" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect108" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="2100"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect110" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect112" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect114" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect116" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect118" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect120" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text122">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text124">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text126">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text128">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text130">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text132">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text134">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text136">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text138">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text152">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text154">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text156">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text158">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text160">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text164">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text166">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text168">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6900"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text170">rcu_preempt</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="1200"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text172">rcu_sched</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5850 5250,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline174" />
+    <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg
new file mode 100644 (file)
index 0000000..bbc3801
--- /dev/null
@@ -0,0 +1,858 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:29:48 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="7.4in"
+   height="9.9in"
+   viewBox="-44 -44 8938 11938"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreePreemptRCUBHdyntickCB.svg">
+  <metadata
+     id="metadata212">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs210">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3970"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="881"
+     inkscape:window-height="1128"
+     id="namedview208"
+     showgrid="false"
+     inkscape:zoom="1.0195195"
+     inkscape:cx="333"
+     inkscape:cy="445.49997"
+     inkscape:window-x="936"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="900"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="1200"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="5400"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect16" />
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5688,6362 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline18" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240-->
+    <polyline
+       points="5714 6518 5704 6272 5598 6494 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline20" />
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4486,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590-->
+    <polyline
+       points="4514 7868 4506 7622 4396 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline24" />
+    <!-- Line -->
+    <polyline
+       points="1040,9750 1476,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline26" />
+    <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590-->
+    <polyline
+       points="1504 7868 1496 7622 1386 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline28" />
+    <!-- Line -->
+    <polyline
+       points="2240,8550 2676,6512 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline30" />
+    <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390-->
+    <polyline
+       points="2704 6668 2696 6422 2586 6644 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline32" />
+    <!-- Line -->
+    <polyline
+       points="4050,9600 5692,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 4050 9600 - 5744 5948-->
+    <polyline
+       points="5682 6220 5730 5978 5574 6170 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline36" />
+    <!-- Line -->
+    <polyline
+       points="1086,9600 2728,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 1086 9600 - 2780 5948-->
+    <polyline
+       points="2718 6220 2766 5978 2610 6170 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline40" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="900"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect42" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1500"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect44" />
+    <!-- Line -->
+    <polyline
+       points="1350,3900 2350,3040 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960-->
+    <!-- Line -->
+    <polyline
+       points="4950,3900 3948,3040 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960-->
+    <!-- Line -->
+    <polyline
+       points="4050,7050 4050,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740-->
+    <!-- Line -->
+    <polyline
+       points="1050,7050 1050,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline58" />
+    <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,5850 2250,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline62" />
+    <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,8550 2250,6814 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline66" />
+    <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690-->
+    <!-- Line -->
+    <polyline
+       points="1050,9750 1050,8014 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline70" />
+    <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890-->
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4050,8014 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline74" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890-->
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5250,6814 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline78" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690-->
+    <!-- Line -->
+    <polyline
+       points="6000,6300 8048,7910 "
+       style="stroke:#87cfff;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline82" />
+    <!-- Arrowhead on XXXpoint 6000 6300 - 8146 7986-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle86" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle88" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle90" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle92" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle94" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle96" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle98" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle100" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle102" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="7950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect104" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="9450"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect106" />
+    <!-- Line -->
+    <polyline
+       points="8100,8850 8100,9384 "
+       style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline108" />
+    <!-- Arrowhead on XXXpoint 8100 8850 - 8100 9510-->
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="10950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect112" />
+    <!-- Line -->
+    <polyline
+       points="8100,10350 8100,10884 "
+       style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline114" />
+    <!-- Arrowhead on XXXpoint 8100 10350 - 8100 11010-->
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect118" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect120" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect122" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect124" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect126" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="2100"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect128" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect130" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect132" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect134" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect136" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect138" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="8250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="8550"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_head</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="9750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_head</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="11250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="11550"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_head</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="1200"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text152">rcu_sched</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text154">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text156">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text158">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text160">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text162">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text164">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text166">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text168">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text170">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text172">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text174">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text176">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text178">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text180">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text182">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text184">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text186">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text188">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text190">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text192">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text194">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text196">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text198">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text200">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6900"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text202">rcu_preempt</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5850 5250,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline204" />
+    <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
new file mode 100644 (file)
index 0000000..ba9fbb5
--- /dev/null
@@ -0,0 +1,1372 @@
+<!-- DO NOT HAND EDIT. -->
+<!-- Instead, edit Data-Structures.htmlx and run 'sh htmlqqz.sh Data-Structures' -->
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+        "http://www.w3.org/TR/html4/loose.dtd">
+        <html>
+        <head><title>A Tour Through TREE_RCU's Data Structures [LWN.net]</title>
+        <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+
+           <p>January 27, 2016</p>
+           <p>This article was contributed by Paul E.&nbsp;McKenney</p>
+
+<h3>Introduction</h3>
+
+This document describes RCU's major data structures and their relationship
+to each other.
+
+<ol>
+<li>   <a href="#Data-Structure Relationships">
+       Data-Structure Relationships</a>
+<li>   <a href="#The rcu_state Structure">
+       The <tt>rcu_state</tt> Structure</a>
+<li>   <a href="#The rcu_node Structure">
+       The <tt>rcu_node</tt> Structure</a>
+<li>   <a href="#The rcu_data Structure">
+       The <tt>rcu_data</tt> Structure</a>
+<li>   <a href="#The rcu_dynticks Structure">
+       The <tt>rcu_dynticks</tt> Structure</a>
+<li>   <a href="#The rcu_head Structure">
+       The <tt>rcu_head</tt> Structure</a>
+<li>   <a href="#RCU-Specific Fields in the task_struct Structure">
+       RCU-Specific Fields in the <tt>task_struct</tt> Structure</a>
+<li>   <a href="#Accessor Functions">
+       Accessor Functions</a>
+</ol>
+
+At the end we have the
+<a href="#Answers to Quick Quizzes">answers to the quick quizzes</a>.
+
+<h3><a name="Data-Structure Relationships">Data-Structure Relationships</a></h3>
+
+<p>RCU is for all intents and purposes a large state machine, and its
+data structures maintain the state in such a way as to allow RCU readers
+to execute extremely quickly, while also processing the RCU grace periods
+requested by updaters in an efficient and extremely scalable fashion.
+The efficiency and scalability of RCU updaters is provided primarily
+by a combining tree, as shown below:
+
+</p><p><img src="BigTreeClassicRCU.svg" alt="BigTreeClassicRCU.svg" width="30%">
+
+</p><p>This diagram shows an enclosing <tt>rcu_state</tt> structure
+containing a tree of <tt>rcu_node</tt> structures.
+Each leaf node of the <tt>rcu_node</tt> tree has up to 16
+<tt>rcu_data</tt> structures associated with it, so that there
+are <tt>NR_CPUS</tt> number of <tt>rcu_data</tt> structures,
+one for each possible CPU.
+This structure is adjusted at boot time, if needed, to handle the
+common case where <tt>nr_cpu_ids</tt> is much less than
+<tt>NR_CPUs</tt>.
+For example, a number of Linux distributions set <tt>NR_CPUs=4096</tt>,
+which results in a three-level <tt>rcu_node</tt> tree.
+If the actual hardware has only 16 CPUs, RCU will adjust itself
+at boot time, resulting in an <tt>rcu_node</tt> tree with only a single node.
+
+</p><p>The purpose of this combining tree is to allow per-CPU events
+such as quiescent states, dyntick-idle transitions,
+and CPU hotplug operations to be processed efficiently
+and scalably.
+Quiescent states are recorded by the per-CPU <tt>rcu_data</tt> structures,
+and other events are recorded by the leaf-level <tt>rcu_node</tt>
+structures.
+All of these events are combined at each level of the tree until finally
+grace periods are completed at the tree's root <tt>rcu_node</tt>
+structure.
+A grace period can be completed at the root once every CPU
+(or, in the case of <tt>CONFIG_TREE_PREEMPT_RCU</tt>, task)
+has passed through a quiescent state.
+Once a grace period has completed, record of that fact is propagated
+back down the tree.
+
+</p><p>As can be seen from the diagram, on a 64-bit system
+a two-level tree with 64 leaves can accommodate 1,024 CPUs, with a fanout
+of 64 at the root and a fanout of 16 at the leaves.
+
+<p><a name="Quick Quiz 1"><b>Quick Quiz 1</b>:</a>
+Why isn't the fanout at the leaves also 64?
+<br><a href="#qq1answer">Answer</a>
+
+</p><p>If your system has more than 1,024 CPUs (or more than 512 CPUs on
+a 32-bit system), then RCU will automatically add more levels to the
+tree.
+For example, if you are crazy enough to build a 64-bit system with 65,536
+CPUs, RCU would configure the <tt>rcu_node</tt> tree as follows:
+
+</p><p><img src="HugeTreeClassicRCU.svg" alt="HugeTreeClassicRCU.svg" width="50%">
+
+</p><p>RCU currently permits up to a four-level tree, which on a 64-bit system
+accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for
+32-bit systems.
+On the other hand, you can set <tt>CONFIG_RCU_FANOUT</tt> to be
+as small as 2 if you wish, which would permit only 16 CPUs, which
+is useful for testing.
+
+</p><p>The Linux kernel actually supports multiple flavors of RCU
+running concurrently, so RCU builds separate data structures for each
+flavor.
+For example, for <tt>CONFIG_TREE_RCU=y</tt> kernels, RCU provides
+rcu_sched and rcu_bh, as shown below:
+
+</p><p><img src="BigTreeClassicRCUBH.svg" alt="BigTreeClassicRCUBH.svg" width="33%">
+
+</p><p>Energy efficiency is increasingly important, and for that
+reason the Linux kernel provides <tt>CONFIG_NO_HZ_IDLE</tt>, which
+turns off the scheduling-clock interrupts on idle CPUs, which in
+turn allows those CPUs to attain deeper sleep states and to consume
+less energy.
+CPUs whose scheduling-clock interrupts have been turned off are
+said to be in <i>dyntick-idle mode</i>.
+RCU must handle dyntick-idle CPUs specially
+because RCU would otherwise wake up each CPU on every grace period,
+which would defeat the whole purpose of <tt>CONFIG_NO_HZ_IDLE</tt>.
+RCU uses the <tt>rcu_dynticks</tt> structure to track
+which CPUs are in dyntick idle mode, as shown below:
+
+</p><p><img src="BigTreeClassicRCUBHdyntick.svg" alt="BigTreeClassicRCUBHdyntick.svg" width="33%">
+
+</p><p>However, if a CPU is in dyntick-idle mode, it is in that mode
+for all flavors of RCU.
+Therefore, a single <tt>rcu_dynticks</tt> structure is allocated per
+CPU, and all of a given CPU's <tt>rcu_data</tt> structures share
+that <tt>rcu_dynticks</tt>, as shown in the figure.
+
+</p><p>Kernels built with <tt>CONFIG_TREE_PREEMPT_RCU</tt> support
+rcu_preempt in addition to rcu_sched and rcu_bh, as shown below:
+
+</p><p><img src="BigTreePreemptRCUBHdyntick.svg" alt="BigTreePreemptRCUBHdyntick.svg" width="35%">
+
+</p><p>RCU updaters wait for normal grace periods by registering
+RCU callbacks, either directly via <tt>call_rcu()</tt> and
+friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>),
+there being a separate interface per flavor of RCU)
+or indirectly via <tt>synchronize_rcu()</tt> and friends.
+RCU callbacks are represented by <tt>rcu_head</tt> structures,
+which are queued on <tt>rcu_data</tt> structures while they are
+waiting for a grace period to elapse, as shown in the following figure:
+
+</p><p><img src="BigTreePreemptRCUBHdyntickCB.svg" alt="BigTreePreemptRCUBHdyntickCB.svg" width="40%">
+
+</p><p>This figure shows how <tt>TREE_RCU</tt>'s and
+<tt>TREE_PREEMPT_RCU</tt>'s major data structures are related.
+Lesser data structures will be introduced with the algorithms that
+make use of them.
+
+</p><p>Note that each of the data structures in the above figure has
+its own synchronization:
+
+<p><ol>
+<li>   Each <tt>rcu_state</tt> structures has a lock and a mutex,
+       and some fields are protected by the corresponding root
+       <tt>rcu_node</tt> structure's lock.
+<li>   Each <tt>rcu_node</tt> structure has a spinlock.
+<li>   The fields in <tt>rcu_data</tt> are private to the corresponding
+       CPU, although a few can be read and written by other CPUs.
+<li>   Similarly, the fields in <tt>rcu_dynticks</tt> are private
+       to the corresponding CPU, although a few can be read by
+       other CPUs.
+</ol>
+
+<p>It is important to note that different data structures can have
+very different ideas about the state of RCU at any given time.
+For but one example, awareness of the start or end of a given RCU
+grace period propagates slowly through the data structures.
+This slow propagation is absolutely necessary for RCU to have good
+read-side performance.
+If this balkanized implementation seems foreign to you, one useful
+trick is to consider each instance of these data structures to be
+a different person, each having the usual slightly different
+view of reality.
+
+</p><p>The general role of each of these data structures is as
+follows:
+
+</p><ol>
+<li>   <tt>rcu_state</tt>:
+       This structure forms the interconnection between the
+       <tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
+       tracks grace periods, serves as short-term repository
+       for callbacks orphaned by CPU-hotplug events,
+       maintains <tt>rcu_barrier()</tt> state,
+       tracks expedited grace-period state,
+       and maintains state used to force quiescent states when
+       grace periods extend too long,
+<li>   <tt>rcu_node</tt>: This structure forms the combining
+       tree that propagates quiescent-state
+       information from the leaves to the root, and also propagates
+       grace-period information from the root to the leaves.
+       It provides local copies of the grace-period state in order
+       to allow this information to be accessed in a synchronized
+       manner without suffering the scalability limitations that
+       would otherwise be imposed by global locking.
+       In <tt>CONFIG_TREE_PREEMPT_RCU</tt> kernels, it manages the lists
+       of tasks that have blocked while in their current
+       RCU read-side critical section.
+       In <tt>CONFIG_TREE_PREEMPT_RCU</tt> with
+       <tt>CONFIG_RCU_BOOST</tt>, it manages the
+       per-<tt>rcu_node</tt> priority-boosting
+       kernel threads (kthreads) and state.
+       Finally, it records CPU-hotplug state in order to determine
+       which CPUs should be ignored during a given grace period.
+<li>   <tt>rcu_data</tt>: This per-CPU structure is the
+       focus of quiescent-state detection and RCU callback queuing.
+       It also tracks its relationship to the corresponding leaf
+       <tt>rcu_node</tt> structure to allow more-efficient
+       propagation of quiescent states up the <tt>rcu_node</tt>
+       combining tree.
+       Like the <tt>rcu_node</tt> structure, it provides a local
+       copy of the grace-period information to allow for-free
+       synchronized
+       access to this information from the corresponding CPU.
+       Finally, this structure records past dyntick-idle state
+       for the corresponding CPU and also tracks statistics.
+<li>   <tt>rcu_dynticks</tt>:
+       This per-CPU structure tracks the current dyntick-idle
+       state for the corresponding CPU.
+       Unlike the other three structures, the <tt>rcu_dynticks</tt>
+       structure is not replicated per RCU flavor.
+<li>   <tt>rcu_head</tt>:
+       This structure represents RCU callbacks, and is the
+       only structure allocated and managed by RCU users.
+       The <tt>rcu_head</tt> structure is normally embedded
+       within the RCU-protected data structure.
+</ol>
+
+<p>If all you wanted from this article was a general notion of how
+RCU's data structures are related, you are done.
+Otherwise, each of the following sections give more details on
+the <tt>rcu_state</tt>, <tt>rcu_node</tt>, <tt>rcu_data</tt>,
+and <tt>rcu_dynticks</tt> data structures.
+
+<h3><a name="The rcu_state Structure">
+The <tt>rcu_state</tt> Structure</a></h3>
+
+<p>The <tt>rcu_state</tt> structure is the base structure that
+represents a flavor of RCU.
+This structure forms the interconnection between the
+<tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
+tracks grace periods, contains the lock used to
+synchronize with CPU-hotplug events,
+and maintains state used to force quiescent states when
+grace periods extend too long,
+
+</p><p>A few of the <tt>rcu_state</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+The more specialized fields are covered in the discussion of their
+use.
+
+<h5>Relationship to rcu_node and rcu_data Structures</h5>
+
+This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   struct rcu_node node[NUM_RCU_NODES];
+  2   struct rcu_node *level[NUM_RCU_LVLS + 1];
+  3   struct rcu_data __percpu *rda;
+</pre>
+
+<p><a name="Quick Quiz 2"><b>Quick Quiz 2</b>:</a>
+Wait a minute!
+You said that the <tt>rcu_node</tt> structures formed a tree,
+but they are declared as a flat array!
+What gives?
+<br><a href="#qq2answer">Answer</a>
+
+</p><p>The <tt>rcu_node</tt> tree is embedded into the
+<tt>-&gt;node[]</tt> array as shown in the following figure:
+
+</p><p><img src="TreeMapping.svg" alt="TreeMapping.svg" width="40%">
+
+</p><p>One interesting consequence of this mapping is that a
+breadth-first traversal of the tree is implemented as a simple
+linear scan of the array, which is in fact what the
+<tt>rcu_for_each_node_breadth_first()</tt> macro does.
+This macro is used at the beginning and ends of grace periods.
+
+</p><p>Each entry of the <tt>-&gt;level</tt> array references
+the first <tt>rcu_node</tt> structure on the corresponding level
+of the tree, for example, as shown below:
+
+</p><p><img src="TreeMappingLevel.svg" alt="TreeMappingLevel.svg" width="40%">
+
+</p><p>The zero<sup>th</sup> element of the array references the root
+<tt>rcu_node</tt> structure, the first element references the
+first child of the root <tt>rcu_node</tt>, and finally the second
+element references the first leaf <tt>rcu_node</tt> structure.
+
+<p><a name="Quick Quiz 3"><b>Quick Quiz 3</b>:</a>
+Given that this array represents a tree, why can't the diagram that
+includes the <tt>-&gt;level</tt> array be planar?
+<br><a href="#qq3answer">Answer</a>
+
+</p><p>Finally, the <tt>-&gt;rda</tt> field references a per-CPU
+pointer to the corresponding CPU's <tt>rcu_data</tt> structure.
+
+</p><p>All of these fields are constant once initialization is complete,
+and therefore need no protection.
+
+<h5>Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gpnum;
+  2   unsigned long completed;
+</pre>
+
+<p>RCU grace periods are numbered, and
+the <tt>-&gt;gpnum</tt> field contains the number of the grace
+period that started most recently.
+The <tt>-&gt;completed</tt> field contains the number of the
+grace period that completed most recently.
+If the two fields are equal, the RCU grace period that most recently
+started has already completed, and therefore the corresponding
+flavor of RCU is idle.
+If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
+then <tt>-&gt;gpnum</tt> gives the number of the current RCU
+grace period, which has not yet completed.
+Any other combination of values indicates that something is broken.
+These two fields are protected by the root <tt>rcu_node</tt>'s
+<tt>-&gt;lock</tt> field.
+
+</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
+as well.
+The fields in the <tt>rcu_state</tt> structure represent the
+most current values, and those of the other structures are compared
+in order to detect the start of a new grace period in a distributed
+fashion.
+The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
+(down the tree from the root to the leaves) to <tt>rcu_data</tt>.
+
+<h5>Miscellaneous</h5>
+
+<p>This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gp_max;
+  2   char abbr;
+  3   char *name;
+</pre>
+
+<p>The <tt>-&gt;gp_max</tt> field tracks the duration of the longest
+grace period in jiffies.
+It is protected by the root <tt>rcu_node</tt>'s <tt>-&gt;lock</tt>.
+
+<p>The <tt>-&gt;name</tt> field points to the name of the RCU flavor
+(for example, &ldquo;rcu_sched&rdquo;), and is constant.
+The <tt>-&gt;abbr</tt> field contains a one-character abbreviation,
+for example, &ldquo;s&rdquo; for RCU-sched.
+
+<h3><a name="The rcu_node Structure">
+The <tt>rcu_node</tt> Structure</a></h3>
+
+<p>The <tt>rcu_node</tt> structures form the combining
+tree that propagates quiescent-state
+information from the leaves to the root and also that propagates
+grace-period information from the root down to the leaves.
+They provides local copies of the grace-period state in order
+to allow this information to be accessed in a synchronized
+manner without suffering the scalability limitations that
+would otherwise be imposed by global locking.
+In <tt>CONFIG_TREE_PREEMPT_RCU</tt> kernels, they manage the lists
+of tasks that have blocked while in their current
+RCU read-side critical section.
+In <tt>CONFIG_TREE_PREEMPT_RCU</tt> with
+<tt>CONFIG_RCU_BOOST</tt>, they manage the
+per-<tt>rcu_node</tt> priority-boosting
+kernel threads (kthreads) and state.
+Finally, they record CPU-hotplug state in order to determine
+which CPUs should be ignored during a given grace period.
+
+</p><p>The <tt>rcu_node</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+
+<h5>Connection to Combining Tree</h5>
+
+<p>This portion of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   struct rcu_node *parent;
+  2   u8 level;
+  3   u8 grpnum;
+  4   unsigned long grpmask;
+  5   int grplo;
+  6   int grphi;
+</pre>
+
+<p>The <tt>-&gt;parent</tt> pointer references the <tt>rcu_node</tt>
+one level up in the tree, and is <tt>NULL</tt> for the root
+<tt>rcu_node</tt>.
+The RCU implementation makes heavy use of this field to push quiescent
+states up the tree.
+The <tt>-&gt;level</tt> field gives the level in the tree, with
+the root being at level zero, its children at level one, and so on.
+The <tt>-&gt;grpnum</tt> field gives this node's position within
+the children of its parent, so this number can range between 0 and 31
+on 32-bit systems and between 0 and 63 on 64-bit systems.
+The <tt>-&gt;level</tt> and <tt>-&gt;grpnum</tt> fields are
+used only during initialization and for tracing.
+The <tt>-&gt;grpmask</tt> field is the bitmask counterpart of
+<tt>-&gt;grpnum</tt>, and therefore always has exactly one bit set.
+This mask is used to clear the bit corresponding to this <tt>rcu_node</tt>
+structure in its parent's bitmasks, which are described later.
+Finally, the <tt>-&gt;grplo</tt> and <tt>-&gt;grphi</tt> fields
+contain the lowest and highest numbered CPU served by this
+<tt>rcu_node</tt> structure, respectively.
+
+</p><p>All of these fields are constant, and thus do not require any
+synchronization.
+
+<h5>Synchronization</h5>
+
+<p>This field of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   raw_spinlock_t lock;
+</pre>
+
+<p>This field is used to protect the remaining fields in this structure,
+unless otherwise stated.
+That said, all of the fields in this structure can be accessed without
+locking for tracing purposes.
+Yes, this can result in confusing traces, but better some tracing confusion
+than to be heisenbugged out of existence.
+
+<h5>Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gpnum;
+  2   unsigned long completed;
+</pre>
+
+<p>These fields are the counterparts of the fields of the same name in
+the <tt>rcu_state</tt> structure.
+They each may lag up to one behind their <tt>rcu_state</tt>
+counterparts.
+If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
+<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+structure believes that RCU is idle.
+Otherwise, as with the <tt>rcu_state</tt> structure,
+the <tt>-&gt;gpnum</tt> field will be one greater than the
+<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
+indicating which grace period this <tt>rcu_node</tt> believes
+is still being waited for.
+
+</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning
+of each grace period, and the <tt>-&gt;completed</tt> fields are
+updated at the end of each grace period.
+
+<h5>Quiescent-State Tracking</h5>
+
+<p>These fields manage the propagation of quiescent states up the
+combining tree.
+
+</p><p>This portion of the <tt>rcu_node</tt> structure has fields
+as follows:
+
+<pre>
+  1   unsigned long qsmask;
+  2   unsigned long expmask;
+  3   unsigned long qsmaskinit;
+  4   unsigned long expmaskinit;
+</pre>
+
+<p>The <tt>-&gt;qsmask</tt> field tracks which of this
+<tt>rcu_node</tt> structure's children still need to report
+quiescent states for the current normal grace period.
+Such children will have a value of 1 in their corresponding bit.
+Note that the leaf <tt>rcu_node</tt> structures should be
+thought of as having <tt>rcu_data</tt> structures as their
+children.
+Similarly, the <tt>-&gt;expmask</tt> field tracks which
+of this <tt>rcu_node</tt> structure's children still need to report
+quiescent states for the current expedited grace period.
+An expedited grace period has
+the same conceptual properties as a normal grace period, but the
+expedited implementation accepts extreme CPU overhead to obtain
+much lower grace-period latency, for example, consuming a few
+tens of microseconds worth of CPU time to reduce grace-period
+duration from milliseconds to tens of microseconds.
+The <tt>-&gt;qsmaskinit</tt> field tracks which of this
+<tt>rcu_node</tt> structure's children cover for at least
+one online CPU.
+This mask is used to initialize <tt>-&gt;qsmask</tt>,
+and <tt>-&gt;expmaskinit</tt> is used to initialize
+<tt>-&gt;expmask</tt> and the beginning of the
+normal and expedited grace periods, respectively.
+
+<p><a name="Quick Quiz 4"><b>Quick Quiz 4</b>:</a>
+Why are these bitmasks protected by locking?
+Come on, haven't you heard of atomic instructions???
+<br><a href="#qq4answer">Answer</a>
+
+<h5>Blocked-Task Management</h5>
+
+<p><tt>TREE_PREEMPT_RCU</tt> allows tasks to be preempted in the
+midst of their RCU read-side critical sections, and these tasks
+must be tracked explicitly.
+The details of exactly why and how they are tracked will be covered
+in a separate article on RCU read-side processing.
+For now, it is enough to know that the <tt>rcu_node</tt>
+structure tracks them.
+
+<pre>
+  1   struct list_head blkd_tasks;
+  2   struct list_head *gp_tasks;
+  3   struct list_head *exp_tasks;
+  4   bool wait_blkd_tasks;
+</pre>
+
+<p>The <tt>-&gt;blkd_tasks</tt> field is a list header for
+the list of blocked and preempted tasks.
+As tasks undergo context switches within RCU read-side critical
+sections, their <tt>task_struct</tt> structures are enqueued
+(via the <tt>task_struct</tt>'s <tt>-&gt;rcu_node_entry</tt>
+field) onto the head of the <tt>-&gt;blkd_tasks</tt> list for the
+leaf <tt>rcu_node</tt> structure corresponding to the CPU
+on which the outgoing context switch executed.
+As these tasks later exit their RCU read-side critical sections,
+they remove themselves from the list.
+This list is therefore in reverse time order, so that if one of the tasks
+is blocking the current grace period, all subsequent tasks must
+also be blocking that same grace period.
+Therefore, a single pointer into this list suffices to track
+all tasks blocking a given grace period.
+That pointer is stored in <tt>-&gt;gp_tasks</tt> for normal
+grace periods and in <tt>-&gt;exp_tasks</tt> for expedited
+grace periods.
+These last two fields are <tt>NULL</tt> if either there is
+no grace period in flight or if there are no blocked tasks
+preventing that grace period from completing.
+If either of these two pointers is referencing a task that
+removes itself from the <tt>-&gt;blkd_tasks</tt> list,
+then that task must advance the pointer to the next task on
+the list, or set the pointer to <tt>NULL</tt> if there
+are no subsequent tasks on the list.
+
+</p><p>For example, suppose that tasks&nbsp;T1, T2, and&nbsp;T3 are
+all hard-affinitied to the largest-numbered CPU in the system.
+Then if task&nbsp;T1 blocked in an RCU read-side
+critical section, then an expedited grace period started,
+then task&nbsp;T2 blocked in an RCU read-side critical section,
+then a normal grace period started, and finally task&nbsp;3 blocked
+in an RCU read-side critical section, then the state of the
+last leaf <tt>rcu_node</tt> structure's blocked-task list
+would be as shown below:
+
+</p><p><img src="blkd_task.svg" alt="blkd_task.svg" width="60%">
+
+</p><p>Task&nbsp;T1 is blocking both grace periods, task&nbsp;T2 is
+blocking only the normal grace period, and task&nbsp;T3 is blocking
+neither grace period.
+Note that these tasks will not remove themselves from this list
+immediately upon resuming execution.
+They will instead remain on the list until they execute the outermost
+<tt>rcu_read_unlock()</tt> that ends their RCU read-side critical
+section.
+
+<p>
+The <tt>-&gt;wait_blkd_tasks</tt> field indicates whether or not
+the current grace period is waiting on a blocked task.
+
+<h5>Sizing the <tt>rcu_node</tt> Array</h5>
+
+<p>The <tt>rcu_node</tt> array is sized via a series of
+C-preprocessor expressions as follows:
+
+<pre>
+ 1 #ifdef CONFIG_RCU_FANOUT
+ 2 #define RCU_FANOUT CONFIG_RCU_FANOUT
+ 3 #else
+ 4 # ifdef CONFIG_64BIT
+ 5 # define RCU_FANOUT 64
+ 6 # else
+ 7 # define RCU_FANOUT 32
+ 8 # endif
+ 9 #endif
+10
+11 #ifdef CONFIG_RCU_FANOUT_LEAF
+12 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
+13 #else
+14 # ifdef CONFIG_64BIT
+15 # define RCU_FANOUT_LEAF 64
+16 # else
+17 # define RCU_FANOUT_LEAF 32
+18 # endif
+19 #endif
+20
+21 #define RCU_FANOUT_1        (RCU_FANOUT_LEAF)
+22 #define RCU_FANOUT_2        (RCU_FANOUT_1 * RCU_FANOUT)
+23 #define RCU_FANOUT_3        (RCU_FANOUT_2 * RCU_FANOUT)
+24 #define RCU_FANOUT_4        (RCU_FANOUT_3 * RCU_FANOUT)
+25
+26 #if NR_CPUS &lt;= RCU_FANOUT_1
+27 #  define RCU_NUM_LVLS        1
+28 #  define NUM_RCU_LVL_0        1
+29 #  define NUM_RCU_NODES        NUM_RCU_LVL_0
+30 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
+31 #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
+32 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
+33 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
+34 #elif NR_CPUS &lt;= RCU_FANOUT_2
+35 #  define RCU_NUM_LVLS        2
+36 #  define NUM_RCU_LVL_0        1
+37 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+38 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
+39 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
+40 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
+41 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
+42 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
+43 #elif NR_CPUS &lt;= RCU_FANOUT_3
+44 #  define RCU_NUM_LVLS        3
+45 #  define NUM_RCU_LVL_0        1
+46 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+47 #  define NUM_RCU_LVL_2        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+48 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
+49 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
+50 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
+51 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
+52 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
+53 #elif NR_CPUS &lt;= RCU_FANOUT_4
+54 #  define RCU_NUM_LVLS        4
+55 #  define NUM_RCU_LVL_0        1
+56 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
+57 #  define NUM_RCU_LVL_2        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+58 #  define NUM_RCU_LVL_3        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+59 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+60 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
+61 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
+62 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
+63 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
+64 #else
+65 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+66 #endif
+</pre>
+
+<p>The maximum number of levels in the <tt>rcu_node</tt> structure
+is currently limited to four, as specified by lines&nbsp;21-24
+and the structure of the subsequent &ldquo;if&rdquo; statement.
+For 32-bit systems, this allows 16*32*32*32=524,288 CPUs, which
+should be sufficient for the next few years at least.
+For 64-bit systems, 16*64*64*64=4,194,304 CPUs is allowed, which
+should see us through the next decade or so.
+This four-level tree also allows kernels built with
+<tt>CONFIG_RCU_FANOUT=8</tt> to support up to 4096 CPUs,
+which might be useful in very large systems having eight CPUs per
+socket (but please note that no one has yet shown any measurable
+performance degradation due to misaligned socket and <tt>rcu_node</tt>
+boundaries).
+In addition, building kernels with a full four levels of <tt>rcu_node</tt>
+tree permits better testing of RCU's combining-tree code.
+
+</p><p>The <tt>RCU_FANOUT</tt> symbol controls how many children
+are permitted at each non-leaf level of the <tt>rcu_node</tt> tree.
+If the <tt>CONFIG_RCU_FANOUT</tt> Kconfig option is not specified,
+it is set based on the word size of the system, which is also
+the Kconfig default.
+
+</p><p>The <tt>RCU_FANOUT_LEAF</tt> symbol controls how many CPUs are
+handled by each leaf <tt>rcu_node</tt> structure.
+Experience has shown that allowing a given leaf <tt>rcu_node</tt>
+structure to handle 64 CPUs, as permitted by the number of bits in
+the <tt>-&gt;qsmask</tt> field on a 64-bit system, results in
+excessive contention for the leaf <tt>rcu_node</tt> structures'
+<tt>-&gt;lock</tt> fields.
+The number of CPUs per leaf <tt>rcu_node</tt> structure is therefore
+limited to 16 given the default value of <tt>CONFIG_RCU_FANOUT_LEAF</tt>.
+If <tt>CONFIG_RCU_FANOUT_LEAF</tt> is unspecified, the value
+selected is based on the word size of the system, just as for
+<tt>CONFIG_RCU_FANOUT</tt>.
+Lines&nbsp;11-19 perform this computation.
+
+</p><p>Lines&nbsp;21-24 compute the maximum number of CPUs supported by
+a single-level (which contains a single <tt>rcu_node</tt> structure),
+two-level, three-level, and four-level <tt>rcu_node</tt> tree,
+respectively, given the fanout specified by <tt>RCU_FANOUT</tt>
+and <tt>RCU_FANOUT_LEAF</tt>.
+These numbers of CPUs are retained in the
+<tt>RCU_FANOUT_1</tt>,
+<tt>RCU_FANOUT_2</tt>,
+<tt>RCU_FANOUT_3</tt>, and
+<tt>RCU_FANOUT_4</tt>
+C-preprocessor variables, respectively.
+
+</p><p>These variables are used to control the C-preprocessor <tt>#if</tt>
+statement spanning lines&nbsp;26-66 that computes the number of
+<tt>rcu_node</tt> structures required for each level of the tree,
+as well as the number of levels required.
+The number of levels is placed in the <tt>NUM_RCU_LVLS</tt>
+C-preprocessor variable by lines&nbsp;27, 35, 44, and&nbsp;54.
+The number of <tt>rcu_node</tt> structures for the topmost level
+of the tree is always exactly one, and this value is unconditionally
+placed into <tt>NUM_RCU_LVL_0</tt> by lines&nbsp;28, 36, 45, and&nbsp;55.
+The rest of the levels (if any) of the <tt>rcu_node</tt> tree
+are computed by dividing the maximum number of CPUs by the
+fanout supported by the number of levels from the current level down,
+rounding up.  This computation is performed by lines&nbsp;37,
+46-47, and&nbsp;56-58.
+Lines&nbsp;31-33, 40-42, 50-52, and&nbsp;62-63 create initializers
+for lockdep lock-class names.
+Finally, lines&nbsp;64-66 produce an error if the maximum number of
+CPUs is too large for the specified fanout.
+
+<h3><a name="The rcu_data Structure">
+The <tt>rcu_data</tt> Structure</a></h3>
+
+<p>The <tt>rcu_data</tt> maintains the per-CPU state for the
+corresponding flavor of RCU.
+The fields in this structure may be accessed only from the corresponding
+CPU (and from tracing) unless otherwise stated.
+This structure is the
+focus of quiescent-state detection and RCU callback queuing.
+It also tracks its relationship to the corresponding leaf
+<tt>rcu_node</tt> structure to allow more-efficient
+propagation of quiescent states up the <tt>rcu_node</tt>
+combining tree.
+Like the <tt>rcu_node</tt> structure, it provides a local
+copy of the grace-period information to allow for-free
+synchronized
+access to this information from the corresponding CPU.
+Finally, this structure records past dyntick-idle state
+for the corresponding CPU and also tracks statistics.
+
+</p><p>The <tt>rcu_data</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+
+<h5>Connection to Other Data Structures</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   int cpu;
+  2   struct rcu_state *rsp;
+  3   struct rcu_node *mynode;
+  4   struct rcu_dynticks *dynticks;
+  5   unsigned long grpmask;
+  6   bool beenonline;
+</pre>
+
+<p>The <tt>-&gt;cpu</tt> field contains the number of the
+corresponding CPU, the <tt>-&gt;rsp</tt> pointer references
+the corresponding <tt>rcu_state</tt> structure (and is most frequently
+used to locate the name of the corresponding flavor of RCU for tracing),
+and the <tt>-&gt;mynode</tt> field references the corresponding
+<tt>rcu_node</tt> structure.
+The <tt>-&gt;mynode</tt> is used to propagate quiescent states
+up the combining tree.
+<p>The <tt>-&gt;dynticks</tt> pointer references the
+<tt>rcu_dynticks</tt> structure corresponding to this
+CPU.
+Recall that a single per-CPU instance of the <tt>rcu_dynticks</tt>
+structure is shared among all flavors of RCU.
+These first four fields are constant and therefore require not
+synchronization.
+
+</p><p>The <tt>-&gt;grpmask</tt> field indicates the bit in
+the <tt>-&gt;mynode-&gt;qsmask</tt> corresponding to this
+<tt>rcu_data</tt> structure, and is also used when propagating
+quiescent states.
+The <tt>-&gt;beenonline</tt> flag is set whenever the corresponding
+CPU comes online, which means that the debugfs tracing need not dump
+out any <tt>rcu_data</tt> structure for which this flag is not set.
+
+<h5>Quiescent-State and Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long completed;
+  2   unsigned long gpnum;
+  3   bool cpu_no_qs;
+  4   bool core_needs_qs;
+  5   bool gpwrap;
+  6   unsigned long rcu_qs_ctr_snap;
+</pre>
+
+<p>The <tt>completed</tt> and <tt>gpnum</tt>
+fields are the counterparts of the fields of the same name
+in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
+They may each lag up to one behind their <tt>rcu_node</tt>
+counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
+<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
+arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
+will catch up upon exit from dyntick-idle mode).
+If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
+<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+structure believes that RCU is idle.
+Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
+structure,
+the <tt>-&gt;gpnum</tt> field will be one greater than the
+<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
+indicating which grace period this <tt>rcu_data</tt> believes
+is still being waited for.
+
+<p><a name="Quick Quiz 5"><b>Quick Quiz 5</b>:</a>
+All this replication of the grace period numbers can only cause
+massive confusion.
+Why not just keep a global pair of counters and be done with it???
+<br><a href="#qq5answer">Answer</a>
+
+</p><p>The <tt>-&gt;cpu_no_qs</tt> flag indicates that the
+CPU has not yet passed through a quiescent state,
+while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
+RCU core needs a quiescent state from the corresponding CPU.
+The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
+CPU has remained idle for so long that the <tt>completed</tt>
+and <tt>gpnum</tt> counters are in danger of overflow, which
+will cause the CPU to disregard the values of its counters on
+its next exit from idle.
+Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
+cases where a given operation has resulted in a quiescent state
+for all flavors of RCU, for example, <tt>cond_resched_rcu_qs()</tt>.
+
+<h5>RCU Callback Handling</h5>
+
+<p>In the absence of CPU-hotplug events, RCU callbacks are invoked by
+the same CPU that registered them.
+This is strictly a cache-locality optimization: callbacks can and
+do get invoked on CPUs other than the one that registered them.
+After all, if the CPU that registered a given callback has gone
+offline before the callback can be invoked, there really is no other
+choice.
+
+</p><p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+ 1 struct rcu_head *nxtlist;
+ 2 struct rcu_head **nxttail[RCU_NEXT_SIZE];
+ 3 unsigned long nxtcompleted[RCU_NEXT_SIZE];
+ 4 long qlen_lazy;
+ 5 long qlen;
+ 6 long qlen_last_fqs_check;
+ 7 unsigned long n_force_qs_snap;
+ 8 unsigned long n_cbs_invoked;
+ 9 unsigned long n_cbs_orphaned;
+10 unsigned long n_cbs_adopted;
+11 long blimit;
+</pre>
+
+<p>The <tt>-&gt;nxtlist</tt> pointer and the
+<tt>-&gt;nxttail[]</tt> array form a four-segment list with
+older callbacks near the head and newer ones near the tail.
+Each segment contains callbacks with the corresponding relationship
+to the current grace period.
+The pointer out of the end of each of the four segments is referenced
+by the element of the <tt>-&gt;nxttail[]</tt> array indexed by
+<tt>RCU_DONE_TAIL</tt> (for callbacks handled by a prior grace period),
+<tt>RCU_WAIT_TAIL</tt> (for callbacks waiting on the current grace period),
+<tt>RCU_NEXT_READY_TAIL</tt> (for callbacks that will wait on the next
+grace period), and
+<tt>RCU_NEXT_TAIL</tt> (for callbacks that are not yet associated
+with a specific grace period)
+respectively, as shown in the following figure.
+
+</p><p><img src="nxtlist.svg" alt="nxtlist.svg" width="40%">
+
+</p><p>In this figure, the <tt>-&gt;nxtlist</tt> pointer references the
+first
+RCU callback in the list.
+The <tt>-&gt;nxttail[RCU_DONE_TAIL]</tt> array element references
+the <tt>-&gt;nxtlist</tt> pointer itself, indicating that none
+of the callbacks is ready to invoke.
+The <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt> array element references callback
+CB&nbsp;2's <tt>-&gt;next</tt> pointer, which indicates that
+CB&nbsp;1 and CB&nbsp;2 are both waiting on the current grace period.
+The <tt>-&gt;nxttail[RCU_NEXT_READY_TAIL]</tt> array element
+references the same RCU callback that <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt>
+does, which indicates that there are no callbacks waiting on the next
+RCU grace period.
+The <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element references
+CB&nbsp;4's <tt>-&gt;next</tt> pointer, indicating that all the
+remaining RCU callbacks have not yet been assigned to an RCU grace
+period.
+Note that the <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element
+always references the last RCU callback's <tt>-&gt;next</tt> pointer
+unless the callback list is empty, in which case it references
+the <tt>-&gt;nxtlist</tt> pointer.
+
+</p><p>CPUs advance their callbacks from the
+<tt>RCU_NEXT_TAIL</tt> to the <tt>RCU_NEXT_READY_TAIL</tt> to the
+<tt>RCU_WAIT_TAIL</tt> to the <tt>RCU_DONE_TAIL</tt> list segments
+as grace periods advance.
+The CPU advances the callbacks in its <tt>rcu_data</tt> structure
+whenever it notices that another RCU grace period has completed.
+The CPU detects the completion of an RCU grace period by noticing
+that the value of its <tt>rcu_data</tt> structure's
+<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>rcu_node</tt> structure.
+Recall that each <tt>rcu_node</tt> structure's
+<tt>-&gt;completed</tt> field is updated at the end of each
+grace period.
+
+</p><p>The <tt>-&gt;nxtcompleted[]</tt> array records grace-period
+numbers corresponding to the list segments.
+This allows CPUs that go idle for extended periods to determine
+which of their callbacks are ready to be invoked after reawakening.
+
+</p><p>The <tt>-&gt;qlen</tt> counter contains the number of
+callbacks in <tt>-&gt;nxtlist</tt>, and the
+<tt>-&gt;qlen_lazy</tt> contains the number of those callbacks that
+are known to only free memory, and whose invocation can therefore
+be safely deferred.
+The <tt>-&gt;qlen_last_fqs_check</tt> and
+<tt>-&gt;n_force_qs_snap</tt> coordinate the forcing of quiescent
+states from <tt>call_rcu()</tt> and friends when callback
+lists grow excessively long.
+
+</p><p>The <tt>-&gt;n_cbs_invoked</tt>,
+<tt>-&gt;n_cbs_orphaned</tt>, and <tt>-&gt;n_cbs_adopted</tt>
+fields count the number of callbacks invoked,
+sent to other CPUs when this CPU goes offline,
+and received from other CPUs when those other CPUs go offline.
+Finally, the <tt>-&gt;blimit</tt> counter is the maximum number of
+RCU callbacks that may be invoked at a given time.
+
+<h5>Dyntick-Idle Handling</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   int dynticks_snap;
+  2   unsigned long dynticks_fqs;
+</pre>
+
+The <tt>-&gt;dynticks_snap</tt> field is used to take a snapshot
+of the corresponding CPU's dyntick-idle state when forcing
+quiescent states, and is therefore accessed from other CPUs.
+Finally, the <tt>-&gt;dynticks_fqs</tt> field is used to
+count the number of times this CPU is determined to be in
+dyntick-idle state, and is used for tracing and debugging purposes.
+
+<h3><a name="The rcu_dynticks Structure">
+The <tt>rcu_dynticks</tt> Structure</a></h3>
+
+<p>The <tt>rcu_dynticks</tt> maintains the per-CPU dyntick-idle state
+for the corresponding CPU.
+Unlike the other structures, <tt>rcu_dynticks</tt> is not
+replicated over the different flavors of RCU.
+The fields in this structure may be accessed only from the corresponding
+CPU (and from tracing) unless otherwise stated.
+Its fields are as follows:
+
+<pre>
+  1   int dynticks_nesting;
+  2   int dynticks_nmi_nesting;
+  3   atomic_t dynticks;
+</pre>
+
+<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
+nesting depth of normal interrupts.
+In addition, this counter is incremented when exiting dyntick-idle
+mode and decremented when entering it.
+This counter can therefore be thought of as counting the number
+of reasons why this CPU cannot be permitted to enter dyntick-idle
+mode, aside from non-maskable interrupts (NMIs).
+NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
+field, except that NMIs that interrupt non-dyntick-idle execution
+are not counted.
+
+</p><p>Finally, the <tt>-&gt;dynticks</tt> field counts the corresponding
+CPU's transitions to and from dyntick-idle mode, so that this counter
+has an even value when the CPU is in dyntick-idle mode and an odd
+value otherwise.
+
+<p><a name="Quick Quiz 6"><b>Quick Quiz 6</b>:</a>
+Why not just count all NMIs?
+Wouldn't that be simpler and less error prone?
+<br><a href="#qq6answer">Answer</a>
+
+</p><p>Additional fields are present for some special-purpose
+builds, and are discussed separately.
+
+<h3><a name="The rcu_head Structure">
+The <tt>rcu_head</tt> Structure</a></h3>
+
+<p>Each <tt>rcu_head</tt> structure represents an RCU callback.
+These structures are normally embedded within RCU-protected data
+structures whose algorithms use asynchronous grace periods.
+In contrast, when using algorithms that block waiting for RCU grace periods,
+RCU users need not provide <tt>rcu_head</tt> structures.
+
+</p><p>The <tt>rcu_head</tt> structure has fields as follows:
+
+<pre>
+  1   struct rcu_head *next;
+  2   void (*func)(struct rcu_head *head);
+</pre>
+
+<p>The <tt>-&gt;next</tt> field is used
+to link the <tt>rcu_head</tt> structures together in the
+lists within the <tt>rcu_data</tt> structures.
+The <tt>-&gt;func</tt> field is a pointer to the function
+to be called when the callback is ready to be invoked, and
+this function is passed a pointer to the <tt>rcu_head</tt>
+structure.
+However, <tt>kfree_rcu()</tt> uses the <tt>-&gt;func</tt>
+field to record the offset of the <tt>rcu_head</tt>
+structure within the enclosing RCU-protected data structure.
+
+</p><p>Both of these fields are used internally by RCU.
+From the viewpoint of RCU users, this structure is an
+opaque &ldquo;cookie&rdquo;.
+
+<p><a name="Quick Quiz 7"><b>Quick Quiz 7</b>:</a>
+Given that the callback function <tt>-&gt;func</tt>
+is passed a pointer to the <tt>rcu_head</tt> structure,
+how is that function supposed to find the beginning of the
+enclosing RCU-protected data structure?
+<br><a href="#qq7answer">Answer</a>
+
+<h3><a name="RCU-Specific Fields in the task_struct Structure">
+RCU-Specific Fields in the <tt>task_struct</tt> Structure</a></h3>
+
+<p>The <tt>CONFIG_TREE_PREEMPT_RCU</tt> implementation uses some
+additional fields in the <tt>task_struct</tt> structure:
+
+<pre>
+ 1 #ifdef CONFIG_PREEMPT_RCU
+ 2   int rcu_read_lock_nesting;
+ 3   union rcu_special rcu_read_unlock_special;
+ 4   struct list_head rcu_node_entry;
+ 5   struct rcu_node *rcu_blocked_node;
+ 6 #endif /* #ifdef CONFIG_PREEMPT_RCU */
+ 7 #ifdef CONFIG_TASKS_RCU
+ 8   unsigned long rcu_tasks_nvcsw;
+ 9   bool rcu_tasks_holdout;
+10   struct list_head rcu_tasks_holdout_list;
+11   int rcu_tasks_idle_cpu;
+12 #endif /* #ifdef CONFIG_TASKS_RCU */
+</pre>
+
+<p>The <tt>-&gt;rcu_read_lock_nesting</tt> field records the
+nesting level for RCU read-side critical sections, and
+the <tt>-&gt;rcu_read_unlock_special</tt> field is a bitmask
+that records special conditions that require <tt>rcu_read_unlock()</tt>
+to do additional work.
+The <tt>-&gt;rcu_node_entry</tt> field is used to form lists of
+tasks that have blocked within preemptible-RCU read-side critical
+sections and the <tt>-&gt;rcu_blocked_node</tt> field references
+the <tt>rcu_node</tt> structure whose list this task is a member of,
+or <tt>NULL</tt> if it is not blocked within a preemptible-RCU
+read-side critical section.
+
+<p>The <tt>-&gt;rcu_tasks_nvcsw</tt> field tracks the number of
+voluntary context switches that this task had undergone at the
+beginning of the current tasks-RCU grace period,
+<tt>-&gt;rcu_tasks_holdout</tt> is set if the current tasks-RCU
+grace period is waiting on this task, <tt>-&gt;rcu_tasks_holdout_list</tt>
+is a list element enqueuing this task on the holdout list,
+and <tt>-&gt;rcu_tasks_idle_cpu</tt> tracks which CPU this
+idle task is running, but only if the task is currently running,
+that is, if the CPU is currently idle.
+
+<p><a name="Quick Quiz 8"><b>Quick Quiz 8</b>:</a>
+Why is <tt>-&gt;rcu_boosted</tt> required, given that there is
+a <tt>RCU_READ_UNLOCK_BOOSTED</tt> bit in
+<tt>-&gt;rcu_read_unlock_special</tt>?
+<br><a href="#qq8answer">Answer</a>
+
+<h3><a name="Accessor Functions">
+Accessor Functions</a></h3>
+
+<p>The following listing shows the
+<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt>,
+<tt>rcu_for_each_nonleaf_node_breadth_first()</tt>, and
+<tt>rcu_for_each_leaf_node()</tt> function and macros:
+
+<pre>
+  1 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+  2 {
+  3   return &amp;rsp-&gt;node[0];
+  4 }
+  5
+  6 #define rcu_for_each_node_breadth_first(rsp, rnp) \
+  7   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
+  8        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+  9
+ 10 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
+ 11   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
+ 12        (rnp) &lt; (rsp)-&gt;level[NUM_RCU_LVLS - 1]; (rnp)++)
+ 13
+ 14 #define rcu_for_each_leaf_node(rsp, rnp) \
+ 15   for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
+ 16        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+</pre>
+
+<p>The <tt>rcu_get_root()</tt> simply returns a pointer to the
+first element of the specified <tt>rcu_state</tt> structure's
+<tt>-&gt;node[]</tt> array, which is the root <tt>rcu_node</tt>
+structure.
+
+</p><p>As noted earlier, the <tt>rcu_for_each_node_breadth_first()</tt>
+macro takes advantage of the layout of the <tt>rcu_node</tt>
+structures in the <tt>rcu_state</tt> structure's
+<tt>-&gt;node[]</tt> array, performing a breadth-first traversal by
+simply traversing the array in order.
+The <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> macro operates
+similarly, but traverses only the first part of the array, thus excluding
+the leaf <tt>rcu_node</tt> structures.
+Finally, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
+the last part of the array, thus traversing only the leaf
+<tt>rcu_node</tt> structures.
+
+<p><a name="Quick Quiz 9"><b>Quick Quiz 9</b>:</a>
+What do <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> and
+<tt>rcu_for_each_leaf_node()</tt> do if the <tt>rcu_node</tt> tree
+contains only a single node?
+<br><a href="#qq9answer">Answer</a>
+
+<h3><a name="Summary">
+Summary</a></h3>
+
+So each flavor of RCU is represented by an <tt>rcu_state</tt> structure,
+which contains a combining tree of <tt>rcu_node</tt> and
+<tt>rcu_data</tt> structures.
+Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle
+state is tracked by an <tt>rcu_dynticks</tt> structure.
+
+If you made it this far, you are well prepared to read the code
+walkthroughs in the other articles in this series.
+
+<h3><a name="Acknowledgments">
+Acknowledgments</a></h3>
+
+I owe thanks to Cyrill Gorcunov, Mathieu Desnoyers, Dhaval Giani, Paul
+Turner, Abhishek Srivastava, Matt Kowalczyk, and Serge Hallyn
+for helping me get this document into a more human-readable state.
+
+<h3><a name="Legal Statement">
+Legal Statement</a></h3>
+
+<p>This work represents the view of the author and does not necessarily
+represent the view of IBM.
+
+</p><p>Linux is a registered trademark of Linus Torvalds.
+
+</p><p>Other company, product, and service names may be trademarks or
+service marks of others.
+
+
+<h3><a name="Answers to Quick Quizzes">
+Answers to Quick Quizzes</a></h3>
+
+<a name="qq1answer"></a>
+<p><b>Quick Quiz 1</b>:
+Why isn't the fanout at the leaves also 64?
+
+
+</p><p><b>Answer</b>:
+Because there are more types of events that affect the leaf-level
+<tt>rcu_node</tt> structures than further up the tree.
+Therefore, if the leaf <tt>rcu_node</tt> structures have
+fanout of 64, the contention on these structures' <tt>-&gt;structures</tt>
+becomes excessive.
+Experimentation on a wide variety of systems has shown that a fanout
+of 16 works well for the leaves of the <tt>rcu_node</tt> tree.
+
+</p><p>Of course, further experience with systems having hundreds or
+thousands of CPUs may demonstrate that the fanout for the non-leaf
+<tt>rcu_node</tt> structures must also be reduced.
+Such reduction can be easily carried out when and if it proves necessary.
+In the meantime, if you are using such a system and running into
+contention problems on the non-leaf <tt>rcu_node</tt> structures,
+you may use the <tt>CONFIG_RCU_FANOUT</tt> kernel configuration
+parameter to reduce the non-leaf fanout as needed.
+
+</p><p>Kernels built for systems with strong NUMA characteristics might
+also need to adjust <tt>CONFIG_RCU_FANOUT</tt> so that the
+domains of the <tt>rcu_node</tt> structures align with hardware
+boundaries.
+However, there has thus far been no need for this.
+
+
+</p><p><a href="#Quick%20Quiz%201"><b>Back to Quick Quiz 1</b>.</a>
+
+<a name="qq2answer"></a>
+<p><b>Quick Quiz 2</b>:
+Wait a minute!
+You said that the <tt>rcu_node</tt> structures formed a tree,
+but they are declared as a flat array!
+What gives?
+
+
+</p><p><b>Answer</b>:
+The tree is laid out in the array.
+The first node In the array is the head, the next set of nodes in the
+array are children of the head node, and so on until the last set of
+nodes in the array are the leaves.
+
+</p><p>See the following diagrams to see how this works.
+
+
+</p><p><a href="#Quick%20Quiz%202"><b>Back to Quick Quiz 2</b>.</a>
+
+<a name="qq3answer"></a>
+<p><b>Quick Quiz 3</b>:
+Given that this array represents a tree, why can't the diagram that
+includes the <tt>-&gt;level</tt> array be planar?
+
+
+</p><p><b>Answer</b>:
+It can be planar, it is just that it looks uglier that way.
+But don't take my word for it, draw it yourself!
+
+</p><p>But if you draw the tree to be tree-shaped rather than
+array-shaped, it is easy to draw a planar representation:
+
+</p><p><img src="TreeLevel.svg" alt="TreeLevel.svg" width="60%">
+
+
+</p><p><a href="#Quick%20Quiz%203"><b>Back to Quick Quiz 3</b>.</a>
+
+<a name="qq4answer"></a>
+<p><b>Quick Quiz 4</b>:
+Why are these bitmasks protected by locking?
+Come on, haven't you heard of atomic instructions???
+
+
+</p><p><b>Answer</b>:
+Lockless grace-period computation!  Such a tantalizing possibility!
+
+</p><p>But consider the following sequence of events:
+
+<ol>
+<li>   CPU&nbsp;0 has been in dyntick-idle mode for quite
+       some time.
+       When it wakes up, it notices that the current RCU
+       grace period needs it to report in, so it sets a
+       flag where the scheduling clock interrupt will find it.
+<li>   Meanwhile, CPU&nbsp;1 is running <tt>force_quiescent_state()</tt>,
+       and notices that CPU&nbsp;0 has been in dyntick idle mode,
+       which qualifies as an extended quiescent state.
+<li>   CPU&nbsp;0's scheduling clock interrupt fires in the
+       middle of an RCU read-side critical section, and notices
+       that the RCU core needs something, so commences RCU softirq
+       processing.
+<li>   CPU&nbsp;0's softirq handler executes and is just about ready
+       to report its quiescent state up the <tt>rcu_node</tt>
+       tree.
+<li>   But CPU&nbsp;1 beats it to the punch, completing the current
+       grace period and starting a new one.
+<li>   CPU&nbsp;0 now reports its quiescent state for the wrong
+       grace period.
+       That grace period might now end before the RCU read-side
+       critical section.
+       If that happens, disaster will ensue.
+</ol>
+
+<p>So the locking is absolutely required in order to coordinate clearing
+of the bits with the grace-period numbers in <tt>-&gt;gpnum</tt>
+and <tt>-&gt;completed</tt>.
+
+
+</p><p><a href="#Quick%20Quiz%204"><b>Back to Quick Quiz 4</b>.</a>
+
+<a name="qq5answer"></a>
+<p><b>Quick Quiz 5</b>:
+All this replication of the grace period numbers can only cause
+massive confusion.
+Why not just keep a global pair of counters and be done with it???
+
+
+</p><p><b>Answer</b>:
+Because if there was only a single global pair of grace-period numbers,
+there would need to be a single global lock to allow safely accessing
+and updating them.
+And if we are not going to have a single global lock, we need to carefully
+manage the numbers on a per-node basis.
+Recall from the answer to a previous Quick Quiz that the consequences
+of applying a previously sampled quiescent state to the wrong
+grace period are quite severe.
+
+
+</p><p><a href="#Quick%20Quiz%205"><b>Back to Quick Quiz 5</b>.</a>
+
+<a name="qq6answer"></a>
+<p><b>Quick Quiz 6</b>:
+Why not just count all NMIs?
+Wouldn't that be simpler and less error prone?
+
+
+</p><p><b>Answer</b>:
+It seems simpler only until you think hard about how to go about
+updating the <tt>rcu_dynticks</tt> structure's
+<tt>-&gt;dynticks</tt> field.
+
+
+</p><p><a href="#Quick%20Quiz%206"><b>Back to Quick Quiz 6</b>.</a>
+
+<a name="qq7answer"></a>
+<p><b>Quick Quiz 7</b>:
+Given that the callback function <tt>-&gt;func</tt>
+is passed a pointer to the <tt>rcu_head</tt> structure,
+how is that function supposed to find the beginning of the
+enclosing RCU-protected data structure?
+
+
+</p><p><b>Answer</b>:
+In actual practice, there is a separate callback function per
+type of RCU-protected data structure.
+The callback function can therefore use the <tt>container_of()</tt>
+macro in the Linux kernel (or other pointer-manipulation facilities
+in other software environments) to find the beginning of the
+enclosing structure.
+
+
+</p><p><a href="#Quick%20Quiz%207"><b>Back to Quick Quiz 7</b>.</a>
+
+<a name="qq8answer"></a>
+<p><b>Quick Quiz 8</b>:
+Why is <tt>-&gt;rcu_boosted</tt> required, given that there is
+a <tt>RCU_READ_UNLOCK_BOOSTED</tt> bit in
+<tt>-&gt;rcu_read_unlock_special</tt>?
+
+
+</p><p><b>Answer</b>:
+The <tt>-&gt;rcu_read_unlock_special</tt> field may only be
+updated by the task itself.
+By definition, RCU priority boosting must be carried out by some
+other task.
+This other task cannot safely update the boosted task's
+<tt>-&gt;rcu_read_unlock_special</tt> field without the use of
+expensive atomic instructions.
+The <tt>-&gt;rcu_boosted</tt> field is therefore used by the
+boosting task to let the boosted task know that it has been boosted.
+The boosted task makes use of the
+<tt>RCU_READ_UNLOCK_BOOSTED</tt> bit in
+<tt>-&gt;rcu_read_unlock_special</tt>
+when deboosting itself.
+
+
+</p><p><a href="#Quick%20Quiz%208"><b>Back to Quick Quiz 8</b>.</a>
+
+<a name="qq9answer"></a>
+<p><b>Quick Quiz 9</b>:
+What do <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> and
+<tt>rcu_for_each_leaf_node()</tt> do if the <tt>rcu_node</tt> tree
+contains only a single node?
+
+
+</p><p><b>Answer</b>:
+In the single-node case,
+<tt>rcu_for_each_nonleaf_node_breadth_first()</tt> is a no-op
+and <tt>rcu_for_each_leaf_node()</tt> traverses the single node.
+
+
+</p><p><a href="#Quick%20Quiz%209"><b>Back to Quick Quiz 9</b>.</a>
+
+
+
+</body></html>
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.htmlx b/Documentation/RCU/Design/Data-Structures/Data-Structures.htmlx
new file mode 100644 (file)
index 0000000..c08fd8e
--- /dev/null
@@ -0,0 +1,1272 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+        "http://www.w3.org/TR/html4/loose.dtd">
+        <html>
+        <head><title>A Tour Through TREE_RCU's Data Structures [LWN.net]</title>
+        <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+
+           <p>January 27, 2016</p>
+           <p>This article was contributed by Paul E.&nbsp;McKenney</p>
+
+<h3>Introduction</h3>
+
+This document describes RCU's major data structures and their relationship
+to each other.
+
+<ol>
+<li>   <a href="#Data-Structure Relationships">
+       Data-Structure Relationships</a>
+<li>   <a href="#The rcu_state Structure">
+       The <tt>rcu_state</tt> Structure</a>
+<li>   <a href="#The rcu_node Structure">
+       The <tt>rcu_node</tt> Structure</a>
+<li>   <a href="#The rcu_data Structure">
+       The <tt>rcu_data</tt> Structure</a>
+<li>   <a href="#The rcu_dynticks Structure">
+       The <tt>rcu_dynticks</tt> Structure</a>
+<li>   <a href="#The rcu_head Structure">
+       The <tt>rcu_head</tt> Structure</a>
+<li>   <a href="#RCU-Specific Fields in the task_struct Structure">
+       RCU-Specific Fields in the <tt>task_struct</tt> Structure</a>
+<li>   <a href="#Accessor Functions">
+       Accessor Functions</a>
+</ol>
+
+At the end we have the
+<a href="#Answers to Quick Quizzes">answers to the quick quizzes</a>.
+
+<h3><a name="Data-Structure Relationships">Data-Structure Relationships</a></h3>
+
+<p>RCU is for all intents and purposes a large state machine, and its
+data structures maintain the state in such a way as to allow RCU readers
+to execute extremely quickly, while also processing the RCU grace periods
+requested by updaters in an efficient and extremely scalable fashion.
+The efficiency and scalability of RCU updaters is provided primarily
+by a combining tree, as shown below:
+
+</p><p><img src="BigTreeClassicRCU.svg" alt="BigTreeClassicRCU.svg" width="30%">
+
+</p><p>This diagram shows an enclosing <tt>rcu_state</tt> structure
+containing a tree of <tt>rcu_node</tt> structures.
+Each leaf node of the <tt>rcu_node</tt> tree has up to 16
+<tt>rcu_data</tt> structures associated with it, so that there
+are <tt>NR_CPUS</tt> number of <tt>rcu_data</tt> structures,
+one for each possible CPU.
+This structure is adjusted at boot time, if needed, to handle the
+common case where <tt>nr_cpu_ids</tt> is much less than
+<tt>NR_CPUs</tt>.
+For example, a number of Linux distributions set <tt>NR_CPUs=4096</tt>,
+which results in a three-level <tt>rcu_node</tt> tree.
+If the actual hardware has only 16 CPUs, RCU will adjust itself
+at boot time, resulting in an <tt>rcu_node</tt> tree with only a single node.
+
+</p><p>The purpose of this combining tree is to allow per-CPU events
+such as quiescent states, dyntick-idle transitions,
+and CPU hotplug operations to be processed efficiently
+and scalably.
+Quiescent states are recorded by the per-CPU <tt>rcu_data</tt> structures,
+and other events are recorded by the leaf-level <tt>rcu_node</tt>
+structures.
+All of these events are combined at each level of the tree until finally
+grace periods are completed at the tree's root <tt>rcu_node</tt>
+structure.
+A grace period can be completed at the root once every CPU
+(or, in the case of <tt>CONFIG_TREE_PREEMPT_RCU</tt>, task)
+has passed through a quiescent state.
+Once a grace period has completed, record of that fact is propagated
+back down the tree.
+
+</p><p>As can be seen from the diagram, on a 64-bit system
+a two-level tree with 64 leaves can accommodate 1,024 CPUs, with a fanout
+of 64 at the root and a fanout of 16 at the leaves.
+
+<p>@@QQ@@
+Why isn't the fanout at the leaves also 64?
+<p>@@QQA@@
+Because there are more types of events that affect the leaf-level
+<tt>rcu_node</tt> structures than further up the tree.
+Therefore, if the leaf <tt>rcu_node</tt> structures have
+fanout of 64, the contention on these structures' <tt>-&gt;structures</tt>
+becomes excessive.
+Experimentation on a wide variety of systems has shown that a fanout
+of 16 works well for the leaves of the <tt>rcu_node</tt> tree.
+
+</p><p>Of course, further experience with systems having hundreds or
+thousands of CPUs may demonstrate that the fanout for the non-leaf
+<tt>rcu_node</tt> structures must also be reduced.
+Such reduction can be easily carried out when and if it proves necessary.
+In the meantime, if you are using such a system and running into
+contention problems on the non-leaf <tt>rcu_node</tt> structures,
+you may use the <tt>CONFIG_RCU_FANOUT</tt> kernel configuration
+parameter to reduce the non-leaf fanout as needed.
+
+</p><p>Kernels built for systems with strong NUMA characteristics might
+also need to adjust <tt>CONFIG_RCU_FANOUT</tt> so that the
+domains of the <tt>rcu_node</tt> structures align with hardware
+boundaries.
+However, there has thus far been no need for this.
+<p>@@QQE@@
+
+</p><p>If your system has more than 1,024 CPUs (or more than 512 CPUs on
+a 32-bit system), then RCU will automatically add more levels to the
+tree.
+For example, if you are crazy enough to build a 64-bit system with 65,536
+CPUs, RCU would configure the <tt>rcu_node</tt> tree as follows:
+
+</p><p><img src="HugeTreeClassicRCU.svg" alt="HugeTreeClassicRCU.svg" width="50%">
+
+</p><p>RCU currently permits up to a four-level tree, which on a 64-bit system
+accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for
+32-bit systems.
+On the other hand, you can set <tt>CONFIG_RCU_FANOUT</tt> to be
+as small as 2 if you wish, which would permit only 16 CPUs, which
+is useful for testing.
+
+</p><p>The Linux kernel actually supports multiple flavors of RCU
+running concurrently, so RCU builds separate data structures for each
+flavor.
+For example, for <tt>CONFIG_TREE_RCU=y</tt> kernels, RCU provides
+rcu_sched and rcu_bh, as shown below:
+
+</p><p><img src="BigTreeClassicRCUBH.svg" alt="BigTreeClassicRCUBH.svg" width="33%">
+
+</p><p>Energy efficiency is increasingly important, and for that
+reason the Linux kernel provides <tt>CONFIG_NO_HZ_IDLE</tt>, which
+turns off the scheduling-clock interrupts on idle CPUs, which in
+turn allows those CPUs to attain deeper sleep states and to consume
+less energy.
+CPUs whose scheduling-clock interrupts have been turned off are
+said to be in <i>dyntick-idle mode</i>.
+RCU must handle dyntick-idle CPUs specially
+because RCU would otherwise wake up each CPU on every grace period,
+which would defeat the whole purpose of <tt>CONFIG_NO_HZ_IDLE</tt>.
+RCU uses the <tt>rcu_dynticks</tt> structure to track
+which CPUs are in dyntick idle mode, as shown below:
+
+</p><p><img src="BigTreeClassicRCUBHdyntick.svg" alt="BigTreeClassicRCUBHdyntick.svg" width="33%">
+
+</p><p>However, if a CPU is in dyntick-idle mode, it is in that mode
+for all flavors of RCU.
+Therefore, a single <tt>rcu_dynticks</tt> structure is allocated per
+CPU, and all of a given CPU's <tt>rcu_data</tt> structures share
+that <tt>rcu_dynticks</tt>, as shown in the figure.
+
+</p><p>Kernels built with <tt>CONFIG_TREE_PREEMPT_RCU</tt> support
+rcu_preempt in addition to rcu_sched and rcu_bh, as shown below:
+
+</p><p><img src="BigTreePreemptRCUBHdyntick.svg" alt="BigTreePreemptRCUBHdyntick.svg" width="35%">
+
+</p><p>RCU updaters wait for normal grace periods by registering
+RCU callbacks, either directly via <tt>call_rcu()</tt> and
+friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>),
+there being a separate interface per flavor of RCU)
+or indirectly via <tt>synchronize_rcu()</tt> and friends.
+RCU callbacks are represented by <tt>rcu_head</tt> structures,
+which are queued on <tt>rcu_data</tt> structures while they are
+waiting for a grace period to elapse, as shown in the following figure:
+
+</p><p><img src="BigTreePreemptRCUBHdyntickCB.svg" alt="BigTreePreemptRCUBHdyntickCB.svg" width="40%">
+
+</p><p>This figure shows how <tt>TREE_RCU</tt>'s and
+<tt>TREE_PREEMPT_RCU</tt>'s major data structures are related.
+Lesser data structures will be introduced with the algorithms that
+make use of them.
+
+</p><p>Note that each of the data structures in the above figure has
+its own synchronization:
+
+<p><ol>
+<li>   Each <tt>rcu_state</tt> structures has a lock and a mutex,
+       and some fields are protected by the corresponding root
+       <tt>rcu_node</tt> structure's lock.
+<li>   Each <tt>rcu_node</tt> structure has a spinlock.
+<li>   The fields in <tt>rcu_data</tt> are private to the corresponding
+       CPU, although a few can be read and written by other CPUs.
+<li>   Similarly, the fields in <tt>rcu_dynticks</tt> are private
+       to the corresponding CPU, although a few can be read by
+       other CPUs.
+</ol>
+
+<p>It is important to note that different data structures can have
+very different ideas about the state of RCU at any given time.
+For but one example, awareness of the start or end of a given RCU
+grace period propagates slowly through the data structures.
+This slow propagation is absolutely necessary for RCU to have good
+read-side performance.
+If this balkanized implementation seems foreign to you, one useful
+trick is to consider each instance of these data structures to be
+a different person, each having the usual slightly different
+view of reality.
+
+</p><p>The general role of each of these data structures is as
+follows:
+
+</p><ol>
+<li>   <tt>rcu_state</tt>:
+       This structure forms the interconnection between the
+       <tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
+       tracks grace periods, serves as short-term repository
+       for callbacks orphaned by CPU-hotplug events,
+       maintains <tt>rcu_barrier()</tt> state,
+       tracks expedited grace-period state,
+       and maintains state used to force quiescent states when
+       grace periods extend too long,
+<li>   <tt>rcu_node</tt>: This structure forms the combining
+       tree that propagates quiescent-state
+       information from the leaves to the root, and also propagates
+       grace-period information from the root to the leaves.
+       It provides local copies of the grace-period state in order
+       to allow this information to be accessed in a synchronized
+       manner without suffering the scalability limitations that
+       would otherwise be imposed by global locking.
+       In <tt>CONFIG_TREE_PREEMPT_RCU</tt> kernels, it manages the lists
+       of tasks that have blocked while in their current
+       RCU read-side critical section.
+       In <tt>CONFIG_TREE_PREEMPT_RCU</tt> with
+       <tt>CONFIG_RCU_BOOST</tt>, it manages the
+       per-<tt>rcu_node</tt> priority-boosting
+       kernel threads (kthreads) and state.
+       Finally, it records CPU-hotplug state in order to determine
+       which CPUs should be ignored during a given grace period.
+<li>   <tt>rcu_data</tt>: This per-CPU structure is the
+       focus of quiescent-state detection and RCU callback queuing.
+       It also tracks its relationship to the corresponding leaf
+       <tt>rcu_node</tt> structure to allow more-efficient
+       propagation of quiescent states up the <tt>rcu_node</tt>
+       combining tree.
+       Like the <tt>rcu_node</tt> structure, it provides a local
+       copy of the grace-period information to allow for-free
+       synchronized
+       access to this information from the corresponding CPU.
+       Finally, this structure records past dyntick-idle state
+       for the corresponding CPU and also tracks statistics.
+<li>   <tt>rcu_dynticks</tt>:
+       This per-CPU structure tracks the current dyntick-idle
+       state for the corresponding CPU.
+       Unlike the other three structures, the <tt>rcu_dynticks</tt>
+       structure is not replicated per RCU flavor.
+<li>   <tt>rcu_head</tt>:
+       This structure represents RCU callbacks, and is the
+       only structure allocated and managed by RCU users.
+       The <tt>rcu_head</tt> structure is normally embedded
+       within the RCU-protected data structure.
+</ol>
+
+<p>If all you wanted from this article was a general notion of how
+RCU's data structures are related, you are done.
+Otherwise, each of the following sections give more details on
+the <tt>rcu_state</tt>, <tt>rcu_node</tt>, <tt>rcu_data</tt>,
+and <tt>rcu_dynticks</tt> data structures.
+
+<h3><a name="The rcu_state Structure">
+The <tt>rcu_state</tt> Structure</a></h3>
+
+<p>The <tt>rcu_state</tt> structure is the base structure that
+represents a flavor of RCU.
+This structure forms the interconnection between the
+<tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
+tracks grace periods, contains the lock used to
+synchronize with CPU-hotplug events,
+and maintains state used to force quiescent states when
+grace periods extend too long,
+
+</p><p>A few of the <tt>rcu_state</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+The more specialized fields are covered in the discussion of their
+use.
+
+<h5>Relationship to rcu_node and rcu_data Structures</h5>
+
+This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   struct rcu_node node[NUM_RCU_NODES];
+  2   struct rcu_node *level[NUM_RCU_LVLS + 1];
+  3   struct rcu_data __percpu *rda;
+</pre>
+
+<p>@@QQ@@
+Wait a minute!
+You said that the <tt>rcu_node</tt> structures formed a tree,
+but they are declared as a flat array!
+What gives?
+<p>@@QQA@@
+The tree is laid out in the array.
+The first node In the array is the head, the next set of nodes in the
+array are children of the head node, and so on until the last set of
+nodes in the array are the leaves.
+
+</p><p>See the following diagrams to see how this works.
+<p>@@QQE@@
+
+</p><p>The <tt>rcu_node</tt> tree is embedded into the
+<tt>-&gt;node[]</tt> array as shown in the following figure:
+
+</p><p><img src="TreeMapping.svg" alt="TreeMapping.svg" width="40%">
+
+</p><p>One interesting consequence of this mapping is that a
+breadth-first traversal of the tree is implemented as a simple
+linear scan of the array, which is in fact what the
+<tt>rcu_for_each_node_breadth_first()</tt> macro does.
+This macro is used at the beginning and ends of grace periods.
+
+</p><p>Each entry of the <tt>-&gt;level</tt> array references
+the first <tt>rcu_node</tt> structure on the corresponding level
+of the tree, for example, as shown below:
+
+</p><p><img src="TreeMappingLevel.svg" alt="TreeMappingLevel.svg" width="40%">
+
+</p><p>The zero<sup>th</sup> element of the array references the root
+<tt>rcu_node</tt> structure, the first element references the
+first child of the root <tt>rcu_node</tt>, and finally the second
+element references the first leaf <tt>rcu_node</tt> structure.
+
+<p>@@QQ@@
+Given that this array represents a tree, why can't the diagram that
+includes the <tt>-&gt;level</tt> array be planar?
+<p>@@QQA@@
+It can be planar, it is just that it looks uglier that way.
+But don't take my word for it, draw it yourself!
+
+</p><p>But if you draw the tree to be tree-shaped rather than
+array-shaped, it is easy to draw a planar representation:
+
+</p><p><img src="TreeLevel.svg" alt="TreeLevel.svg" width="60%">
+<p>@@QQE@@
+
+</p><p>Finally, the <tt>-&gt;rda</tt> field references a per-CPU
+pointer to the corresponding CPU's <tt>rcu_data</tt> structure.
+
+</p><p>All of these fields are constant once initialization is complete,
+and therefore need no protection.
+
+<h5>Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gpnum;
+  2   unsigned long completed;
+</pre>
+
+<p>RCU grace periods are numbered, and
+the <tt>-&gt;gpnum</tt> field contains the number of the grace
+period that started most recently.
+The <tt>-&gt;completed</tt> field contains the number of the
+grace period that completed most recently.
+If the two fields are equal, the RCU grace period that most recently
+started has already completed, and therefore the corresponding
+flavor of RCU is idle.
+If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
+then <tt>-&gt;gpnum</tt> gives the number of the current RCU
+grace period, which has not yet completed.
+Any other combination of values indicates that something is broken.
+These two fields are protected by the root <tt>rcu_node</tt>'s
+<tt>-&gt;lock</tt> field.
+
+</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
+as well.
+The fields in the <tt>rcu_state</tt> structure represent the
+most current values, and those of the other structures are compared
+in order to detect the start of a new grace period in a distributed
+fashion.
+The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
+(down the tree from the root to the leaves) to <tt>rcu_data</tt>.
+
+<h5>Miscellaneous</h5>
+
+<p>This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gp_max;
+  2   char abbr;
+  3   char *name;
+</pre>
+
+<p>The <tt>-&gt;gp_max</tt> field tracks the duration of the longest
+grace period in jiffies.
+It is protected by the root <tt>rcu_node</tt>'s <tt>-&gt;lock</tt>.
+
+<p>The <tt>-&gt;name</tt> field points to the name of the RCU flavor
+(for example, &ldquo;rcu_sched&rdquo;), and is constant.
+The <tt>-&gt;abbr</tt> field contains a one-character abbreviation,
+for example, &ldquo;s&rdquo; for RCU-sched.
+
+<h3><a name="The rcu_node Structure">
+The <tt>rcu_node</tt> Structure</a></h3>
+
+<p>The <tt>rcu_node</tt> structures form the combining
+tree that propagates quiescent-state
+information from the leaves to the root and also that propagates
+grace-period information from the root down to the leaves.
+They provides local copies of the grace-period state in order
+to allow this information to be accessed in a synchronized
+manner without suffering the scalability limitations that
+would otherwise be imposed by global locking.
+In <tt>CONFIG_TREE_PREEMPT_RCU</tt> kernels, they manage the lists
+of tasks that have blocked while in their current
+RCU read-side critical section.
+In <tt>CONFIG_TREE_PREEMPT_RCU</tt> with
+<tt>CONFIG_RCU_BOOST</tt>, they manage the
+per-<tt>rcu_node</tt> priority-boosting
+kernel threads (kthreads) and state.
+Finally, they record CPU-hotplug state in order to determine
+which CPUs should be ignored during a given grace period.
+
+</p><p>The <tt>rcu_node</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+
+<h5>Connection to Combining Tree</h5>
+
+<p>This portion of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   struct rcu_node *parent;
+  2   u8 level;
+  3   u8 grpnum;
+  4   unsigned long grpmask;
+  5   int grplo;
+  6   int grphi;
+</pre>
+
+<p>The <tt>-&gt;parent</tt> pointer references the <tt>rcu_node</tt>
+one level up in the tree, and is <tt>NULL</tt> for the root
+<tt>rcu_node</tt>.
+The RCU implementation makes heavy use of this field to push quiescent
+states up the tree.
+The <tt>-&gt;level</tt> field gives the level in the tree, with
+the root being at level zero, its children at level one, and so on.
+The <tt>-&gt;grpnum</tt> field gives this node's position within
+the children of its parent, so this number can range between 0 and 31
+on 32-bit systems and between 0 and 63 on 64-bit systems.
+The <tt>-&gt;level</tt> and <tt>-&gt;grpnum</tt> fields are
+used only during initialization and for tracing.
+The <tt>-&gt;grpmask</tt> field is the bitmask counterpart of
+<tt>-&gt;grpnum</tt>, and therefore always has exactly one bit set.
+This mask is used to clear the bit corresponding to this <tt>rcu_node</tt>
+structure in its parent's bitmasks, which are described later.
+Finally, the <tt>-&gt;grplo</tt> and <tt>-&gt;grphi</tt> fields
+contain the lowest and highest numbered CPU served by this
+<tt>rcu_node</tt> structure, respectively.
+
+</p><p>All of these fields are constant, and thus do not require any
+synchronization.
+
+<h5>Synchronization</h5>
+
+<p>This field of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   raw_spinlock_t lock;
+</pre>
+
+<p>This field is used to protect the remaining fields in this structure,
+unless otherwise stated.
+That said, all of the fields in this structure can be accessed without
+locking for tracing purposes.
+Yes, this can result in confusing traces, but better some tracing confusion
+than to be heisenbugged out of existence.
+
+<h5>Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gpnum;
+  2   unsigned long completed;
+</pre>
+
+<p>These fields are the counterparts of the fields of the same name in
+the <tt>rcu_state</tt> structure.
+They each may lag up to one behind their <tt>rcu_state</tt>
+counterparts.
+If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
+<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+structure believes that RCU is idle.
+Otherwise, as with the <tt>rcu_state</tt> structure,
+the <tt>-&gt;gpnum</tt> field will be one greater than the
+<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
+indicating which grace period this <tt>rcu_node</tt> believes
+is still being waited for.
+
+</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning
+of each grace period, and the <tt>-&gt;completed</tt> fields are
+updated at the end of each grace period.
+
+<h5>Quiescent-State Tracking</h5>
+
+<p>These fields manage the propagation of quiescent states up the
+combining tree.
+
+</p><p>This portion of the <tt>rcu_node</tt> structure has fields
+as follows:
+
+<pre>
+  1   unsigned long qsmask;
+  2   unsigned long expmask;
+  3   unsigned long qsmaskinit;
+  4   unsigned long expmaskinit;
+</pre>
+
+<p>The <tt>-&gt;qsmask</tt> field tracks which of this
+<tt>rcu_node</tt> structure's children still need to report
+quiescent states for the current normal grace period.
+Such children will have a value of 1 in their corresponding bit.
+Note that the leaf <tt>rcu_node</tt> structures should be
+thought of as having <tt>rcu_data</tt> structures as their
+children.
+Similarly, the <tt>-&gt;expmask</tt> field tracks which
+of this <tt>rcu_node</tt> structure's children still need to report
+quiescent states for the current expedited grace period.
+An expedited grace period has
+the same conceptual properties as a normal grace period, but the
+expedited implementation accepts extreme CPU overhead to obtain
+much lower grace-period latency, for example, consuming a few
+tens of microseconds worth of CPU time to reduce grace-period
+duration from milliseconds to tens of microseconds.
+The <tt>-&gt;qsmaskinit</tt> field tracks which of this
+<tt>rcu_node</tt> structure's children cover for at least
+one online CPU.
+This mask is used to initialize <tt>-&gt;qsmask</tt>,
+and <tt>-&gt;expmaskinit</tt> is used to initialize
+<tt>-&gt;expmask</tt> and the beginning of the
+normal and expedited grace periods, respectively.
+
+<p>@@QQ@@
+Why are these bitmasks protected by locking?
+Come on, haven't you heard of atomic instructions???
+<p>@@QQA@@
+Lockless grace-period computation!  Such a tantalizing possibility!
+
+</p><p>But consider the following sequence of events:
+
+<ol>
+<li>   CPU&nbsp;0 has been in dyntick-idle mode for quite
+       some time.
+       When it wakes up, it notices that the current RCU
+       grace period needs it to report in, so it sets a
+       flag where the scheduling clock interrupt will find it.
+<li>   Meanwhile, CPU&nbsp;1 is running <tt>force_quiescent_state()</tt>,
+       and notices that CPU&nbsp;0 has been in dyntick idle mode,
+       which qualifies as an extended quiescent state.
+<li>   CPU&nbsp;0's scheduling clock interrupt fires in the
+       middle of an RCU read-side critical section, and notices
+       that the RCU core needs something, so commences RCU softirq
+       processing.
+<li>   CPU&nbsp;0's softirq handler executes and is just about ready
+       to report its quiescent state up the <tt>rcu_node</tt>
+       tree.
+<li>   But CPU&nbsp;1 beats it to the punch, completing the current
+       grace period and starting a new one.
+<li>   CPU&nbsp;0 now reports its quiescent state for the wrong
+       grace period.
+       That grace period might now end before the RCU read-side
+       critical section.
+       If that happens, disaster will ensue.
+</ol>
+
+<p>So the locking is absolutely required in order to coordinate clearing
+of the bits with the grace-period numbers in <tt>-&gt;gpnum</tt>
+and <tt>-&gt;completed</tt>.
+<p>@@QQE@@
+
+<h5>Blocked-Task Management</h5>
+
+<p><tt>TREE_PREEMPT_RCU</tt> allows tasks to be preempted in the
+midst of their RCU read-side critical sections, and these tasks
+must be tracked explicitly.
+The details of exactly why and how they are tracked will be covered
+in a separate article on RCU read-side processing.
+For now, it is enough to know that the <tt>rcu_node</tt>
+structure tracks them.
+
+<pre>
+  1   struct list_head blkd_tasks;
+  2   struct list_head *gp_tasks;
+  3   struct list_head *exp_tasks;
+  4   bool wait_blkd_tasks;
+</pre>
+
+<p>The <tt>-&gt;blkd_tasks</tt> field is a list header for
+the list of blocked and preempted tasks.
+As tasks undergo context switches within RCU read-side critical
+sections, their <tt>task_struct</tt> structures are enqueued
+(via the <tt>task_struct</tt>'s <tt>-&gt;rcu_node_entry</tt>
+field) onto the head of the <tt>-&gt;blkd_tasks</tt> list for the
+leaf <tt>rcu_node</tt> structure corresponding to the CPU
+on which the outgoing context switch executed.
+As these tasks later exit their RCU read-side critical sections,
+they remove themselves from the list.
+This list is therefore in reverse time order, so that if one of the tasks
+is blocking the current grace period, all subsequent tasks must
+also be blocking that same grace period.
+Therefore, a single pointer into this list suffices to track
+all tasks blocking a given grace period.
+That pointer is stored in <tt>-&gt;gp_tasks</tt> for normal
+grace periods and in <tt>-&gt;exp_tasks</tt> for expedited
+grace periods.
+These last two fields are <tt>NULL</tt> if either there is
+no grace period in flight or if there are no blocked tasks
+preventing that grace period from completing.
+If either of these two pointers is referencing a task that
+removes itself from the <tt>-&gt;blkd_tasks</tt> list,
+then that task must advance the pointer to the next task on
+the list, or set the pointer to <tt>NULL</tt> if there
+are no subsequent tasks on the list.
+
+</p><p>For example, suppose that tasks&nbsp;T1, T2, and&nbsp;T3 are
+all hard-affinitied to the largest-numbered CPU in the system.
+Then if task&nbsp;T1 blocked in an RCU read-side
+critical section, then an expedited grace period started,
+then task&nbsp;T2 blocked in an RCU read-side critical section,
+then a normal grace period started, and finally task&nbsp;3 blocked
+in an RCU read-side critical section, then the state of the
+last leaf <tt>rcu_node</tt> structure's blocked-task list
+would be as shown below:
+
+</p><p><img src="blkd_task.svg" alt="blkd_task.svg" width="60%">
+
+</p><p>Task&nbsp;T1 is blocking both grace periods, task&nbsp;T2 is
+blocking only the normal grace period, and task&nbsp;T3 is blocking
+neither grace period.
+Note that these tasks will not remove themselves from this list
+immediately upon resuming execution.
+They will instead remain on the list until they execute the outermost
+<tt>rcu_read_unlock()</tt> that ends their RCU read-side critical
+section.
+
+<p>
+The <tt>-&gt;wait_blkd_tasks</tt> field indicates whether or not
+the current grace period is waiting on a blocked task.
+
+<h5>Sizing the <tt>rcu_node</tt> Array</h5>
+
+<p>The <tt>rcu_node</tt> array is sized via a series of
+C-preprocessor expressions as follows:
+
+<pre>
+ 1 #ifdef CONFIG_RCU_FANOUT
+ 2 #define RCU_FANOUT CONFIG_RCU_FANOUT
+ 3 #else
+ 4 # ifdef CONFIG_64BIT
+ 5 # define RCU_FANOUT 64
+ 6 # else
+ 7 # define RCU_FANOUT 32
+ 8 # endif
+ 9 #endif
+10
+11 #ifdef CONFIG_RCU_FANOUT_LEAF
+12 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
+13 #else
+14 # ifdef CONFIG_64BIT
+15 # define RCU_FANOUT_LEAF 64
+16 # else
+17 # define RCU_FANOUT_LEAF 32
+18 # endif
+19 #endif
+20
+21 #define RCU_FANOUT_1        (RCU_FANOUT_LEAF)
+22 #define RCU_FANOUT_2        (RCU_FANOUT_1 * RCU_FANOUT)
+23 #define RCU_FANOUT_3        (RCU_FANOUT_2 * RCU_FANOUT)
+24 #define RCU_FANOUT_4        (RCU_FANOUT_3 * RCU_FANOUT)
+25
+26 #if NR_CPUS &lt;= RCU_FANOUT_1
+27 #  define RCU_NUM_LVLS        1
+28 #  define NUM_RCU_LVL_0        1
+29 #  define NUM_RCU_NODES        NUM_RCU_LVL_0
+30 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
+31 #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
+32 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
+33 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
+34 #elif NR_CPUS &lt;= RCU_FANOUT_2
+35 #  define RCU_NUM_LVLS        2
+36 #  define NUM_RCU_LVL_0        1
+37 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+38 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
+39 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
+40 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
+41 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
+42 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
+43 #elif NR_CPUS &lt;= RCU_FANOUT_3
+44 #  define RCU_NUM_LVLS        3
+45 #  define NUM_RCU_LVL_0        1
+46 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+47 #  define NUM_RCU_LVL_2        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+48 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
+49 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
+50 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
+51 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
+52 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
+53 #elif NR_CPUS &lt;= RCU_FANOUT_4
+54 #  define RCU_NUM_LVLS        4
+55 #  define NUM_RCU_LVL_0        1
+56 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
+57 #  define NUM_RCU_LVL_2        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+58 #  define NUM_RCU_LVL_3        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+59 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+60 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
+61 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
+62 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
+63 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
+64 #else
+65 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+66 #endif
+</pre>
+
+<p>The maximum number of levels in the <tt>rcu_node</tt> structure
+is currently limited to four, as specified by lines&nbsp;21-24
+and the structure of the subsequent &ldquo;if&rdquo; statement.
+For 32-bit systems, this allows 16*32*32*32=524,288 CPUs, which
+should be sufficient for the next few years at least.
+For 64-bit systems, 16*64*64*64=4,194,304 CPUs is allowed, which
+should see us through the next decade or so.
+This four-level tree also allows kernels built with
+<tt>CONFIG_RCU_FANOUT=8</tt> to support up to 4096 CPUs,
+which might be useful in very large systems having eight CPUs per
+socket (but please note that no one has yet shown any measurable
+performance degradation due to misaligned socket and <tt>rcu_node</tt>
+boundaries).
+In addition, building kernels with a full four levels of <tt>rcu_node</tt>
+tree permits better testing of RCU's combining-tree code.
+
+</p><p>The <tt>RCU_FANOUT</tt> symbol controls how many children
+are permitted at each non-leaf level of the <tt>rcu_node</tt> tree.
+If the <tt>CONFIG_RCU_FANOUT</tt> Kconfig option is not specified,
+it is set based on the word size of the system, which is also
+the Kconfig default.
+
+</p><p>The <tt>RCU_FANOUT_LEAF</tt> symbol controls how many CPUs are
+handled by each leaf <tt>rcu_node</tt> structure.
+Experience has shown that allowing a given leaf <tt>rcu_node</tt>
+structure to handle 64 CPUs, as permitted by the number of bits in
+the <tt>-&gt;qsmask</tt> field on a 64-bit system, results in
+excessive contention for the leaf <tt>rcu_node</tt> structures'
+<tt>-&gt;lock</tt> fields.
+The number of CPUs per leaf <tt>rcu_node</tt> structure is therefore
+limited to 16 given the default value of <tt>CONFIG_RCU_FANOUT_LEAF</tt>.
+If <tt>CONFIG_RCU_FANOUT_LEAF</tt> is unspecified, the value
+selected is based on the word size of the system, just as for
+<tt>CONFIG_RCU_FANOUT</tt>.
+Lines&nbsp;11-19 perform this computation.
+
+</p><p>Lines&nbsp;21-24 compute the maximum number of CPUs supported by
+a single-level (which contains a single <tt>rcu_node</tt> structure),
+two-level, three-level, and four-level <tt>rcu_node</tt> tree,
+respectively, given the fanout specified by <tt>RCU_FANOUT</tt>
+and <tt>RCU_FANOUT_LEAF</tt>.
+These numbers of CPUs are retained in the
+<tt>RCU_FANOUT_1</tt>,
+<tt>RCU_FANOUT_2</tt>,
+<tt>RCU_FANOUT_3</tt>, and
+<tt>RCU_FANOUT_4</tt>
+C-preprocessor variables, respectively.
+
+</p><p>These variables are used to control the C-preprocessor <tt>#if</tt>
+statement spanning lines&nbsp;26-66 that computes the number of
+<tt>rcu_node</tt> structures required for each level of the tree,
+as well as the number of levels required.
+The number of levels is placed in the <tt>NUM_RCU_LVLS</tt>
+C-preprocessor variable by lines&nbsp;27, 35, 44, and&nbsp;54.
+The number of <tt>rcu_node</tt> structures for the topmost level
+of the tree is always exactly one, and this value is unconditionally
+placed into <tt>NUM_RCU_LVL_0</tt> by lines&nbsp;28, 36, 45, and&nbsp;55.
+The rest of the levels (if any) of the <tt>rcu_node</tt> tree
+are computed by dividing the maximum number of CPUs by the
+fanout supported by the number of levels from the current level down,
+rounding up.  This computation is performed by lines&nbsp;37,
+46-47, and&nbsp;56-58.
+Lines&nbsp;31-33, 40-42, 50-52, and&nbsp;62-63 create initializers
+for lockdep lock-class names.
+Finally, lines&nbsp;64-66 produce an error if the maximum number of
+CPUs is too large for the specified fanout.
+
+<h3><a name="The rcu_data Structure">
+The <tt>rcu_data</tt> Structure</a></h3>
+
+<p>The <tt>rcu_data</tt> maintains the per-CPU state for the
+corresponding flavor of RCU.
+The fields in this structure may be accessed only from the corresponding
+CPU (and from tracing) unless otherwise stated.
+This structure is the
+focus of quiescent-state detection and RCU callback queuing.
+It also tracks its relationship to the corresponding leaf
+<tt>rcu_node</tt> structure to allow more-efficient
+propagation of quiescent states up the <tt>rcu_node</tt>
+combining tree.
+Like the <tt>rcu_node</tt> structure, it provides a local
+copy of the grace-period information to allow for-free
+synchronized
+access to this information from the corresponding CPU.
+Finally, this structure records past dyntick-idle state
+for the corresponding CPU and also tracks statistics.
+
+</p><p>The <tt>rcu_data</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+
+<h5>Connection to Other Data Structures</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   int cpu;
+  2   struct rcu_state *rsp;
+  3   struct rcu_node *mynode;
+  4   struct rcu_dynticks *dynticks;
+  5   unsigned long grpmask;
+  6   bool beenonline;
+</pre>
+
+<p>The <tt>-&gt;cpu</tt> field contains the number of the
+corresponding CPU, the <tt>-&gt;rsp</tt> pointer references
+the corresponding <tt>rcu_state</tt> structure (and is most frequently
+used to locate the name of the corresponding flavor of RCU for tracing),
+and the <tt>-&gt;mynode</tt> field references the corresponding
+<tt>rcu_node</tt> structure.
+The <tt>-&gt;mynode</tt> is used to propagate quiescent states
+up the combining tree.
+<p>The <tt>-&gt;dynticks</tt> pointer references the
+<tt>rcu_dynticks</tt> structure corresponding to this
+CPU.
+Recall that a single per-CPU instance of the <tt>rcu_dynticks</tt>
+structure is shared among all flavors of RCU.
+These first four fields are constant and therefore require not
+synchronization.
+
+</p><p>The <tt>-&gt;grpmask</tt> field indicates the bit in
+the <tt>-&gt;mynode-&gt;qsmask</tt> corresponding to this
+<tt>rcu_data</tt> structure, and is also used when propagating
+quiescent states.
+The <tt>-&gt;beenonline</tt> flag is set whenever the corresponding
+CPU comes online, which means that the debugfs tracing need not dump
+out any <tt>rcu_data</tt> structure for which this flag is not set.
+
+<h5>Quiescent-State and Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long completed;
+  2   unsigned long gpnum;
+  3   bool cpu_no_qs;
+  4   bool core_needs_qs;
+  5   bool gpwrap;
+  6   unsigned long rcu_qs_ctr_snap;
+</pre>
+
+<p>The <tt>completed</tt> and <tt>gpnum</tt>
+fields are the counterparts of the fields of the same name
+in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
+They may each lag up to one behind their <tt>rcu_node</tt>
+counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
+<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
+arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
+will catch up upon exit from dyntick-idle mode).
+If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
+<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+structure believes that RCU is idle.
+Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
+structure,
+the <tt>-&gt;gpnum</tt> field will be one greater than the
+<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
+indicating which grace period this <tt>rcu_data</tt> believes
+is still being waited for.
+
+<p>@@QQ@@
+All this replication of the grace period numbers can only cause
+massive confusion.
+Why not just keep a global pair of counters and be done with it???
+<p>@@QQA@@
+Because if there was only a single global pair of grace-period numbers,
+there would need to be a single global lock to allow safely accessing
+and updating them.
+And if we are not going to have a single global lock, we need to carefully
+manage the numbers on a per-node basis.
+Recall from the answer to a previous Quick Quiz that the consequences
+of applying a previously sampled quiescent state to the wrong
+grace period are quite severe.
+<p>@@QQE@@
+
+</p><p>The <tt>-&gt;cpu_no_qs</tt> flag indicates that the
+CPU has not yet passed through a quiescent state,
+while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
+RCU core needs a quiescent state from the corresponding CPU.
+The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
+CPU has remained idle for so long that the <tt>completed</tt>
+and <tt>gpnum</tt> counters are in danger of overflow, which
+will cause the CPU to disregard the values of its counters on
+its next exit from idle.
+Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
+cases where a given operation has resulted in a quiescent state
+for all flavors of RCU, for example, <tt>cond_resched_rcu_qs()</tt>.
+
+<h5>RCU Callback Handling</h5>
+
+<p>In the absence of CPU-hotplug events, RCU callbacks are invoked by
+the same CPU that registered them.
+This is strictly a cache-locality optimization: callbacks can and
+do get invoked on CPUs other than the one that registered them.
+After all, if the CPU that registered a given callback has gone
+offline before the callback can be invoked, there really is no other
+choice.
+
+</p><p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+ 1 struct rcu_head *nxtlist;
+ 2 struct rcu_head **nxttail[RCU_NEXT_SIZE];
+ 3 unsigned long nxtcompleted[RCU_NEXT_SIZE];
+ 4 long qlen_lazy;
+ 5 long qlen;
+ 6 long qlen_last_fqs_check;
+ 7 unsigned long n_force_qs_snap;
+ 8 unsigned long n_cbs_invoked;
+ 9 unsigned long n_cbs_orphaned;
+10 unsigned long n_cbs_adopted;
+11 long blimit;
+</pre>
+
+<p>The <tt>-&gt;nxtlist</tt> pointer and the
+<tt>-&gt;nxttail[]</tt> array form a four-segment list with
+older callbacks near the head and newer ones near the tail.
+Each segment contains callbacks with the corresponding relationship
+to the current grace period.
+The pointer out of the end of each of the four segments is referenced
+by the element of the <tt>-&gt;nxttail[]</tt> array indexed by
+<tt>RCU_DONE_TAIL</tt> (for callbacks handled by a prior grace period),
+<tt>RCU_WAIT_TAIL</tt> (for callbacks waiting on the current grace period),
+<tt>RCU_NEXT_READY_TAIL</tt> (for callbacks that will wait on the next
+grace period), and
+<tt>RCU_NEXT_TAIL</tt> (for callbacks that are not yet associated
+with a specific grace period)
+respectively, as shown in the following figure.
+
+</p><p><img src="nxtlist.svg" alt="nxtlist.svg" width="40%">
+
+</p><p>In this figure, the <tt>-&gt;nxtlist</tt> pointer references the
+first
+RCU callback in the list.
+The <tt>-&gt;nxttail[RCU_DONE_TAIL]</tt> array element references
+the <tt>-&gt;nxtlist</tt> pointer itself, indicating that none
+of the callbacks is ready to invoke.
+The <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt> array element references callback
+CB&nbsp;2's <tt>-&gt;next</tt> pointer, which indicates that
+CB&nbsp;1 and CB&nbsp;2 are both waiting on the current grace period.
+The <tt>-&gt;nxttail[RCU_NEXT_READY_TAIL]</tt> array element
+references the same RCU callback that <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt>
+does, which indicates that there are no callbacks waiting on the next
+RCU grace period.
+The <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element references
+CB&nbsp;4's <tt>-&gt;next</tt> pointer, indicating that all the
+remaining RCU callbacks have not yet been assigned to an RCU grace
+period.
+Note that the <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element
+always references the last RCU callback's <tt>-&gt;next</tt> pointer
+unless the callback list is empty, in which case it references
+the <tt>-&gt;nxtlist</tt> pointer.
+
+</p><p>CPUs advance their callbacks from the
+<tt>RCU_NEXT_TAIL</tt> to the <tt>RCU_NEXT_READY_TAIL</tt> to the
+<tt>RCU_WAIT_TAIL</tt> to the <tt>RCU_DONE_TAIL</tt> list segments
+as grace periods advance.
+The CPU advances the callbacks in its <tt>rcu_data</tt> structure
+whenever it notices that another RCU grace period has completed.
+The CPU detects the completion of an RCU grace period by noticing
+that the value of its <tt>rcu_data</tt> structure's
+<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>rcu_node</tt> structure.
+Recall that each <tt>rcu_node</tt> structure's
+<tt>-&gt;completed</tt> field is updated at the end of each
+grace period.
+
+</p><p>The <tt>-&gt;nxtcompleted[]</tt> array records grace-period
+numbers corresponding to the list segments.
+This allows CPUs that go idle for extended periods to determine
+which of their callbacks are ready to be invoked after reawakening.
+
+</p><p>The <tt>-&gt;qlen</tt> counter contains the number of
+callbacks in <tt>-&gt;nxtlist</tt>, and the
+<tt>-&gt;qlen_lazy</tt> contains the number of those callbacks that
+are known to only free memory, and whose invocation can therefore
+be safely deferred.
+The <tt>-&gt;qlen_last_fqs_check</tt> and
+<tt>-&gt;n_force_qs_snap</tt> coordinate the forcing of quiescent
+states from <tt>call_rcu()</tt> and friends when callback
+lists grow excessively long.
+
+</p><p>The <tt>-&gt;n_cbs_invoked</tt>,
+<tt>-&gt;n_cbs_orphaned</tt>, and <tt>-&gt;n_cbs_adopted</tt>
+fields count the number of callbacks invoked,
+sent to other CPUs when this CPU goes offline,
+and received from other CPUs when those other CPUs go offline.
+Finally, the <tt>-&gt;blimit</tt> counter is the maximum number of
+RCU callbacks that may be invoked at a given time.
+
+<h5>Dyntick-Idle Handling</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   int dynticks_snap;
+  2   unsigned long dynticks_fqs;
+</pre>
+
+The <tt>-&gt;dynticks_snap</tt> field is used to take a snapshot
+of the corresponding CPU's dyntick-idle state when forcing
+quiescent states, and is therefore accessed from other CPUs.
+Finally, the <tt>-&gt;dynticks_fqs</tt> field is used to
+count the number of times this CPU is determined to be in
+dyntick-idle state, and is used for tracing and debugging purposes.
+
+<h3><a name="The rcu_dynticks Structure">
+The <tt>rcu_dynticks</tt> Structure</a></h3>
+
+<p>The <tt>rcu_dynticks</tt> maintains the per-CPU dyntick-idle state
+for the corresponding CPU.
+Unlike the other structures, <tt>rcu_dynticks</tt> is not
+replicated over the different flavors of RCU.
+The fields in this structure may be accessed only from the corresponding
+CPU (and from tracing) unless otherwise stated.
+Its fields are as follows:
+
+<pre>
+  1   int dynticks_nesting;
+  2   int dynticks_nmi_nesting;
+  3   atomic_t dynticks;
+</pre>
+
+<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
+nesting depth of normal interrupts.
+In addition, this counter is incremented when exiting dyntick-idle
+mode and decremented when entering it.
+This counter can therefore be thought of as counting the number
+of reasons why this CPU cannot be permitted to enter dyntick-idle
+mode, aside from non-maskable interrupts (NMIs).
+NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
+field, except that NMIs that interrupt non-dyntick-idle execution
+are not counted.
+
+</p><p>Finally, the <tt>-&gt;dynticks</tt> field counts the corresponding
+CPU's transitions to and from dyntick-idle mode, so that this counter
+has an even value when the CPU is in dyntick-idle mode and an odd
+value otherwise.
+
+<p>@@QQ@@
+Why not just count all NMIs?
+Wouldn't that be simpler and less error prone?
+<p>@@QQA@@
+It seems simpler only until you think hard about how to go about
+updating the <tt>rcu_dynticks</tt> structure's
+<tt>-&gt;dynticks</tt> field.
+<p>@@QQE@@
+
+</p><p>Additional fields are present for some special-purpose
+builds, and are discussed separately.
+
+<h3><a name="The rcu_head Structure">
+The <tt>rcu_head</tt> Structure</a></h3>
+
+<p>Each <tt>rcu_head</tt> structure represents an RCU callback.
+These structures are normally embedded within RCU-protected data
+structures whose algorithms use asynchronous grace periods.
+In contrast, when using algorithms that block waiting for RCU grace periods,
+RCU users need not provide <tt>rcu_head</tt> structures.
+
+</p><p>The <tt>rcu_head</tt> structure has fields as follows:
+
+<pre>
+  1   struct rcu_head *next;
+  2   void (*func)(struct rcu_head *head);
+</pre>
+
+<p>The <tt>-&gt;next</tt> field is used
+to link the <tt>rcu_head</tt> structures together in the
+lists within the <tt>rcu_data</tt> structures.
+The <tt>-&gt;func</tt> field is a pointer to the function
+to be called when the callback is ready to be invoked, and
+this function is passed a pointer to the <tt>rcu_head</tt>
+structure.
+However, <tt>kfree_rcu()</tt> uses the <tt>-&gt;func</tt>
+field to record the offset of the <tt>rcu_head</tt>
+structure within the enclosing RCU-protected data structure.
+
+</p><p>Both of these fields are used internally by RCU.
+From the viewpoint of RCU users, this structure is an
+opaque &ldquo;cookie&rdquo;.
+
+<p>@@QQ@@
+Given that the callback function <tt>-&gt;func</tt>
+is passed a pointer to the <tt>rcu_head</tt> structure,
+how is that function supposed to find the beginning of the
+enclosing RCU-protected data structure?
+<p>@@QQA@@
+In actual practice, there is a separate callback function per
+type of RCU-protected data structure.
+The callback function can therefore use the <tt>container_of()</tt>
+macro in the Linux kernel (or other pointer-manipulation facilities
+in other software environments) to find the beginning of the
+enclosing structure.
+<p>@@QQE@@
+
+<h3><a name="RCU-Specific Fields in the task_struct Structure">
+RCU-Specific Fields in the <tt>task_struct</tt> Structure</a></h3>
+
+<p>The <tt>CONFIG_TREE_PREEMPT_RCU</tt> implementation uses some
+additional fields in the <tt>task_struct</tt> structure:
+
+<pre>
+ 1 #ifdef CONFIG_PREEMPT_RCU
+ 2   int rcu_read_lock_nesting;
+ 3   union rcu_special rcu_read_unlock_special;
+ 4   struct list_head rcu_node_entry;
+ 5   struct rcu_node *rcu_blocked_node;
+ 6 #endif /* #ifdef CONFIG_PREEMPT_RCU */
+ 7 #ifdef CONFIG_TASKS_RCU
+ 8   unsigned long rcu_tasks_nvcsw;
+ 9   bool rcu_tasks_holdout;
+10   struct list_head rcu_tasks_holdout_list;
+11   int rcu_tasks_idle_cpu;
+12 #endif /* #ifdef CONFIG_TASKS_RCU */
+</pre>
+
+<p>The <tt>-&gt;rcu_read_lock_nesting</tt> field records the
+nesting level for RCU read-side critical sections, and
+the <tt>-&gt;rcu_read_unlock_special</tt> field is a bitmask
+that records special conditions that require <tt>rcu_read_unlock()</tt>
+to do additional work.
+The <tt>-&gt;rcu_node_entry</tt> field is used to form lists of
+tasks that have blocked within preemptible-RCU read-side critical
+sections and the <tt>-&gt;rcu_blocked_node</tt> field references
+the <tt>rcu_node</tt> structure whose list this task is a member of,
+or <tt>NULL</tt> if it is not blocked within a preemptible-RCU
+read-side critical section.
+
+<p>The <tt>-&gt;rcu_tasks_nvcsw</tt> field tracks the number of
+voluntary context switches that this task had undergone at the
+beginning of the current tasks-RCU grace period,
+<tt>-&gt;rcu_tasks_holdout</tt> is set if the current tasks-RCU
+grace period is waiting on this task, <tt>-&gt;rcu_tasks_holdout_list</tt>
+is a list element enqueuing this task on the holdout list,
+and <tt>-&gt;rcu_tasks_idle_cpu</tt> tracks which CPU this
+idle task is running, but only if the task is currently running,
+that is, if the CPU is currently idle.
+
+<p>@@QQ@@
+Why is <tt>-&gt;rcu_boosted</tt> required, given that there is
+a <tt>RCU_READ_UNLOCK_BOOSTED</tt> bit in
+<tt>-&gt;rcu_read_unlock_special</tt>?
+<p>@@QQA@@
+The <tt>-&gt;rcu_read_unlock_special</tt> field may only be
+updated by the task itself.
+By definition, RCU priority boosting must be carried out by some
+other task.
+This other task cannot safely update the boosted task's
+<tt>-&gt;rcu_read_unlock_special</tt> field without the use of
+expensive atomic instructions.
+The <tt>-&gt;rcu_boosted</tt> field is therefore used by the
+boosting task to let the boosted task know that it has been boosted.
+The boosted task makes use of the
+<tt>RCU_READ_UNLOCK_BOOSTED</tt> bit in
+<tt>-&gt;rcu_read_unlock_special</tt>
+when deboosting itself.
+<p>@@QQE@@
+
+<h3><a name="Accessor Functions">
+Accessor Functions</a></h3>
+
+<p>The following listing shows the
+<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt>,
+<tt>rcu_for_each_nonleaf_node_breadth_first()</tt>, and
+<tt>rcu_for_each_leaf_node()</tt> function and macros:
+
+<pre>
+  1 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+  2 {
+  3   return &amp;rsp-&gt;node[0];
+  4 }
+  5
+  6 #define rcu_for_each_node_breadth_first(rsp, rnp) \
+  7   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
+  8        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+  9
+ 10 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
+ 11   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
+ 12        (rnp) &lt; (rsp)-&gt;level[NUM_RCU_LVLS - 1]; (rnp)++)
+ 13
+ 14 #define rcu_for_each_leaf_node(rsp, rnp) \
+ 15   for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
+ 16        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+</pre>
+
+<p>The <tt>rcu_get_root()</tt> simply returns a pointer to the
+first element of the specified <tt>rcu_state</tt> structure's
+<tt>-&gt;node[]</tt> array, which is the root <tt>rcu_node</tt>
+structure.
+
+</p><p>As noted earlier, the <tt>rcu_for_each_node_breadth_first()</tt>
+macro takes advantage of the layout of the <tt>rcu_node</tt>
+structures in the <tt>rcu_state</tt> structure's
+<tt>-&gt;node[]</tt> array, performing a breadth-first traversal by
+simply traversing the array in order.
+The <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> macro operates
+similarly, but traverses only the first part of the array, thus excluding
+the leaf <tt>rcu_node</tt> structures.
+Finally, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
+the last part of the array, thus traversing only the leaf
+<tt>rcu_node</tt> structures.
+
+<p>@@QQ@@
+What do <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> and
+<tt>rcu_for_each_leaf_node()</tt> do if the <tt>rcu_node</tt> tree
+contains only a single node?
+<p>@@QQA@@
+In the single-node case,
+<tt>rcu_for_each_nonleaf_node_breadth_first()</tt> is a no-op
+and <tt>rcu_for_each_leaf_node()</tt> traverses the single node.
+<p>@@QQE@@
+
+<h3><a name="Summary">
+Summary</a></h3>
+
+So each flavor of RCU is represented by an <tt>rcu_state</tt> structure,
+which contains a combining tree of <tt>rcu_node</tt> and
+<tt>rcu_data</tt> structures.
+Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle
+state is tracked by an <tt>rcu_dynticks</tt> structure.
+
+If you made it this far, you are well prepared to read the code
+walkthroughs in the other articles in this series.
+
+<h3><a name="Acknowledgments">
+Acknowledgments</a></h3>
+
+I owe thanks to Cyrill Gorcunov, Mathieu Desnoyers, Dhaval Giani, Paul
+Turner, Abhishek Srivastava, Matt Kowalczyk, and Serge Hallyn
+for helping me get this document into a more human-readable state.
+
+<h3><a name="Legal Statement">
+Legal Statement</a></h3>
+
+<p>This work represents the view of the author and does not necessarily
+represent the view of IBM.
+
+</p><p>Linux is a registered trademark of Linus Torvalds.
+
+</p><p>Other company, product, and service names may be trademarks or
+service marks of others.
+
+
+<p>@@QQAL@@
+
+
+</body></html>
diff --git a/Documentation/RCU/Design/Data-Structures/HugeTreeClassicRCU.svg b/Documentation/RCU/Design/Data-Structures/HugeTreeClassicRCU.svg
new file mode 100644 (file)
index 0000000..2bf12b4
--- /dev/null
@@ -0,0 +1,939 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:37:22 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="15.1in"
+   height="11.2in"
+   viewBox="-66 -66 18087 13407"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="HugeTreeClassicRCU.fig">
+  <metadata
+     id="metadata224">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs222">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3982"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1134"
+     inkscape:window-height="789"
+     id="namedview220"
+     showgrid="false"
+     inkscape:zoom="0.60515873"
+     inkscape:cx="679.5"
+     inkscape:cy="504"
+     inkscape:window-x="786"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="17100"
+       height="8325"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="11025"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="4275"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="5400"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="9900"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="14400"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="900"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect18" />
+    <!-- Line: box -->
+    <rect
+       x="7650"
+       y="900"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect20" />
+    <!-- Line -->
+    <polyline
+       points="3150,9225 3150,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 3150 9225 - 3150 7560-->
+    <!-- Circle -->
+    <circle
+       cx="8550"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="9000"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="9450"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle30" />
+    <!-- Line -->
+    <polyline
+       points="6750,6300 8250,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 6750 6300 - 8391 4890-->
+    <!-- Line -->
+    <polyline
+       points="11250,6300 9747,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 11250 6300 - 9606 4890-->
+    <!-- Circle -->
+    <circle
+       cx="13950"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle40" />
+    <!-- Circle -->
+    <circle
+       cx="13500"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle42" />
+    <!-- Circle -->
+    <circle
+       cx="13050"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle44" />
+    <!-- Circle -->
+    <circle
+       cx="9450"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle46" />
+    <!-- Circle -->
+    <circle
+       cx="9000"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle48" />
+    <!-- Circle -->
+    <circle
+       cx="8550"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle50" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle52" />
+    <!-- Circle -->
+    <circle
+       cx="4500"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle54" />
+    <!-- Circle -->
+    <circle
+       cx="4050"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle56" />
+    <!-- Circle -->
+    <circle
+       cx="1800"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle58" />
+    <!-- Circle -->
+    <circle
+       cx="2250"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle60" />
+    <!-- Circle -->
+    <circle
+       cx="2700"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle62" />
+    <!-- Circle -->
+    <circle
+       cx="15300"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle64" />
+    <!-- Circle -->
+    <circle
+       cx="15750"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle66" />
+    <!-- Circle -->
+    <circle
+       cx="16200"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="10800"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="11250"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="11700"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="6300"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="6750"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="7200"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle80" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect82" />
+    <!-- Line: box -->
+    <rect
+       x="1800"
+       y="9225"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect84" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect86" />
+    <!-- Line: box -->
+    <rect
+       x="6300"
+       y="9270"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect88" />
+    <!-- Line: box -->
+    <rect
+       x="8955"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect90" />
+    <!-- Line: box -->
+    <rect
+       x="10755"
+       y="9270"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect92" />
+    <!-- Line: box -->
+    <rect
+       x="13455"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect94" />
+    <!-- Line: box -->
+    <rect
+       x="15255"
+       y="9270"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect96" />
+    <!-- Line -->
+    <polyline
+       points="11700,3600 10197,2310 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline98" />
+    <!-- Arrowhead on XXXpoint 11700 3600 - 10056 2190-->
+    <!-- Line -->
+    <polyline
+       points="6300,3600 7800,2310 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline102" />
+    <!-- Arrowhead on XXXpoint 6300 3600 - 7941 2190-->
+    <!-- Line -->
+    <polyline
+       points="3150,6300 4650,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline106" />
+    <!-- Arrowhead on XXXpoint 3150 6300 - 4791 4890-->
+    <!-- Line -->
+    <polyline
+       points="14850,6300 13347,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline110" />
+    <!-- Arrowhead on XXXpoint 14850 6300 - 13206 4890-->
+    <!-- Line -->
+    <polyline
+       points="1350,11475 1350,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline114" />
+    <!-- Arrowhead on XXXpoint 1350 11475 - 1350 7560-->
+    <!-- Line -->
+    <polyline
+       points="16650,9225 16650,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline118" />
+    <!-- Arrowhead on XXXpoint 16650 9225 - 16650 7560-->
+    <!-- Line -->
+    <polyline
+       points="14850,11475 14850,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline122" />
+    <!-- Arrowhead on XXXpoint 14850 11475 - 14850 7560-->
+    <!-- Line -->
+    <polyline
+       points="12150,9225 12150,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline126" />
+    <!-- Arrowhead on XXXpoint 12150 9225 - 12150 7560-->
+    <!-- Line -->
+    <polyline
+       points="10350,11475 10350,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline130" />
+    <!-- Arrowhead on XXXpoint 10350 11475 - 10350 7560-->
+    <!-- Line -->
+    <polyline
+       points="7650,9225 7650,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline134" />
+    <!-- Arrowhead on XXXpoint 7650 9225 - 7650 7560-->
+    <!-- Line -->
+    <polyline
+       points="5850,11475 5850,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline138" />
+    <!-- Arrowhead on XXXpoint 5850 11475 - 5850 7560-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12375"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text142">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12375"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5625"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text146">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5625"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text148">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6750"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text150">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6750"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text152">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11250"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text154">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11250"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text156">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15750"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text158">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15750"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text160">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text164">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text166">CPU 0</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text168">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text170">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="10800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text172">CPU 15</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="9675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text174">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="10125"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text176">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5850"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text178">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5850"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text180">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5850"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text182">CPU 21823</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7650"
+       y="10845"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text184">CPU 21839</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7650"
+       y="10170"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text186">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7650"
+       y="9720"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text188">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10305"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text190">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10305"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text192">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10305"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text194">CPU 43679</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="10845"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text196">CPU 43695</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="10170"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text198">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="9720"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text200">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14805"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text202">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14805"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text204">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14805"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text206">CPU 65519</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="16605"
+       y="10845"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text208">CPU 65535</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="16605"
+       y="10170"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text210">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="16605"
+       y="9720"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text212">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="675"
+       y="450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="start"
+       id="text214">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9000"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text216">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9000"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text218">rcu_node</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/TreeLevel.svg b/Documentation/RCU/Design/Data-Structures/TreeLevel.svg
new file mode 100644 (file)
index 0000000..7a7eb3b
--- /dev/null
@@ -0,0 +1,828 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:41:29 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="17.7in"
+   height="10.4in"
+   viewBox="-66 -66 21237 12507"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="TreeLevel.fig">
+  <metadata
+     id="metadata216">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs214">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3974"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1023"
+     inkscape:window-height="1148"
+     id="namedview212"
+     showgrid="false"
+     inkscape:zoom="0.55869424"
+     inkscape:cx="796.50006"
+     inkscape:cy="467.99997"
+     inkscape:window-x="897"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="20655"
+       height="8325"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="14130"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="7380"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="8505"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="13005"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="17505"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="4005"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect18" />
+    <!-- Line: box -->
+    <rect
+       x="10755"
+       y="900"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect20" />
+    <!-- Line -->
+    <polyline
+       points="6255,9225 6255,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 6255 9225 - 6255 7560-->
+    <!-- Circle -->
+    <circle
+       cx="11655"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="12105"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="12555"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle30" />
+    <!-- Line -->
+    <polyline
+       points="9855,6300 11355,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 9855 6300 - 11496 4890-->
+    <!-- Line -->
+    <polyline
+       points="14355,6300 12852,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 14355 6300 - 12711 4890-->
+    <!-- Circle -->
+    <circle
+       cx="17055"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle40" />
+    <!-- Circle -->
+    <circle
+       cx="16605"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle42" />
+    <!-- Circle -->
+    <circle
+       cx="16155"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle44" />
+    <!-- Circle -->
+    <circle
+       cx="12555"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle46" />
+    <!-- Circle -->
+    <circle
+       cx="12105"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle48" />
+    <!-- Circle -->
+    <circle
+       cx="11655"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle50" />
+    <!-- Circle -->
+    <circle
+       cx="8055"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle52" />
+    <!-- Circle -->
+    <circle
+       cx="7605"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle54" />
+    <!-- Circle -->
+    <circle
+       cx="7155"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle56" />
+    <!-- Circle -->
+    <circle
+       cx="4905"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle58" />
+    <!-- Circle -->
+    <circle
+       cx="5355"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle60" />
+    <!-- Circle -->
+    <circle
+       cx="5805"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle62" />
+    <!-- Circle -->
+    <circle
+       cx="18405"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle64" />
+    <!-- Circle -->
+    <circle
+       cx="18855"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle66" />
+    <!-- Circle -->
+    <circle
+       cx="19305"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="13905"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="14355"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="14805"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="9405"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="9855"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="10305"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle80" />
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="1125"
+       width="3150"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:21; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect82" />
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="2250"
+       width="3150"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:21; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect84" />
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="3375"
+       width="3150"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:21; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect86" />
+    <!-- Line -->
+    <polyline
+       points="14805,3600 13302,2310 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline88" />
+    <!-- Arrowhead on XXXpoint 14805 3600 - 13161 2190-->
+    <!-- Line -->
+    <polyline
+       points="9405,3600 10905,2310 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline92" />
+    <!-- Arrowhead on XXXpoint 9405 3600 - 11046 2190-->
+    <!-- Line -->
+    <polyline
+       points="6255,6300 7755,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline96" />
+    <!-- Arrowhead on XXXpoint 6255 6300 - 7896 4890-->
+    <!-- Line -->
+    <polyline
+       points="17955,6300 16452,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline100" />
+    <!-- Arrowhead on XXXpoint 17955 6300 - 16311 4890-->
+    <!-- Line -->
+    <polyline
+       points="4455,11025 4455,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline104" />
+    <!-- Arrowhead on XXXpoint 4455 11025 - 4455 7560-->
+    <!-- Line -->
+    <polyline
+       points="19755,9225 19755,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline108" />
+    <!-- Arrowhead on XXXpoint 19755 9225 - 19755 7560-->
+    <!-- Line -->
+    <polyline
+       points="17955,11025 17955,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline112" />
+    <!-- Arrowhead on XXXpoint 17955 11025 - 17955 7560-->
+    <!-- Line -->
+    <polyline
+       points="15255,9225 15255,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline116" />
+    <!-- Arrowhead on XXXpoint 15255 9225 - 15255 7560-->
+    <!-- Line -->
+    <polyline
+       points="13455,11025 13455,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline120" />
+    <!-- Arrowhead on XXXpoint 13455 11025 - 13455 7560-->
+    <!-- Line -->
+    <polyline
+       points="10755,9225 10755,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline124" />
+    <!-- Arrowhead on XXXpoint 10755 9225 - 10755 7560-->
+    <!-- Line -->
+    <polyline
+       points="8955,11025 8955,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline128" />
+    <!-- Arrowhead on XXXpoint 8955 11025 - 8955 7560-->
+    <!-- Line: box -->
+    <rect
+       x="12105"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect132" />
+    <!-- Line: box -->
+    <rect
+       x="13905"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect134" />
+    <!-- Line: box -->
+    <rect
+       x="16605"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect136" />
+    <!-- Line: box -->
+    <rect
+       x="18405"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect138" />
+    <!-- Line: box -->
+    <rect
+       x="9405"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect140" />
+    <!-- Line: box -->
+    <rect
+       x="7605"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect142" />
+    <!-- Line: box -->
+    <rect
+       x="4905"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect144" />
+    <!-- Line: box -->
+    <rect
+       x="3105"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect146" />
+    <!-- Line -->
+    <polyline
+       points="3375,1575 10701,1575 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline148" />
+    <!-- Arrowhead on XXXpoint 3375 1575 - 10890 1575-->
+    <!-- Line -->
+    <polyline
+       points="3375,3825 4050,3825 4050,5400 2700,5400 2700,6975 3951,6975 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline152" />
+    <!-- Arrowhead on XXXpoint 2700 6975 - 4140 6975-->
+    <!-- Line -->
+    <polyline
+       points="3375,2700 5175,2700 5175,4275 7326,4275 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline156" />
+    <!-- Arrowhead on XXXpoint 5175 4275 - 7515 4275-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15480"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text160">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15480"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text164">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text166">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9855"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text168">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9855"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text170">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14355"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text172">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14355"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text174">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="18855"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text176">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="18855"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text178">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5355"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text180">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5355"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text182">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text184">-&gt;level[0]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="2925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text186">-&gt;level[1]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text188">-&gt;level[2]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text190">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text192">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6255"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text194">CPU 15</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4455"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text196">CPU 0</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="19755"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text198">CPU 65535</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="17955"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text200">CPU 65519</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15255"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text202">CPU 43695</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="13455"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text204">CPU 43679</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10755"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text206">CPU 21839</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8955"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text208">CPU 21823</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="start"
+       id="text210">struct rcu_state</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/TreeMapping.svg b/Documentation/RCU/Design/Data-Structures/TreeMapping.svg
new file mode 100644 (file)
index 0000000..729cfa9
--- /dev/null
@@ -0,0 +1,305 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:43:22 2015 -->
+
+<!-- Magnification: 1.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="3.1in"
+   height="0.9in"
+   viewBox="-12 -12 3699 1074"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="TreeMapping.fig">
+  <metadata
+     id="metadata66">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs64">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;">
+      <path
+         id="path3836"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Mend"
+       style="overflow:visible;">
+      <path
+         id="path3842"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(0.6) rotate(180) translate(0,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3824"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="991"
+     inkscape:window-height="606"
+     id="namedview62"
+     showgrid="false"
+     inkscape:zoom="3.0752688"
+     inkscape:cx="139.5"
+     inkscape:cy="40.5"
+     inkscape:window-x="891"
+     inkscape:window-y="177"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="3675"
+       height="1050"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="600"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="1125"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="1650"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="2175"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="3225"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect18" />
+    <!-- Line -->
+    <polyline
+       points="675,375 675,150 300,150 300,358 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 300 150 - 300 390-->
+    <!-- Line -->
+    <polyline
+       points="1200,675 1200,900 300,900 300,691 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline24" />
+    <!-- Arrowhead on XXXpoint 300 900 - 300 660-->
+    <!-- Line -->
+    <polyline
+       points="1725,375 1725,150 900,150 900,358 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline28" />
+    <!-- Arrowhead on XXXpoint 900 150 - 900 390-->
+    <!-- Line -->
+    <polyline
+       points="2250,375 2250,75 825,75 825,358 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 825 75 - 825 390-->
+    <!-- Line -->
+    <polyline
+       points="2775,675 2775,900 1425,900 1425,691 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 1425 900 - 1425 660-->
+    <!-- Line -->
+    <polyline
+       points="3300,675 3300,975 1350,975 1350,691 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 1350 975 - 1350 660-->
+    <!-- Line: box -->
+    <rect
+       x="2700"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect44" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="300"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text46">0:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text48">4:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1875"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text50">0:1  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text52">2:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2925"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text54">4:5  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3450"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text56">6:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="825"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text58">0:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3600"
+       y="150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="end"
+       id="text60">struct rcu_state</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/TreeMappingLevel.svg b/Documentation/RCU/Design/Data-Structures/TreeMappingLevel.svg
new file mode 100644 (file)
index 0000000..5b416a4
--- /dev/null
@@ -0,0 +1,380 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:45:19 2015 -->
+
+<!-- Magnification: 1.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="3.1in"
+   height="1.8in"
+   viewBox="-12 -12 3699 2124"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="TreeMappingLevel.svg">
+  <metadata
+     id="metadata98">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs96">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;">
+      <path
+         id="path3868"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1598"
+     inkscape:window-height="1211"
+     id="namedview94"
+     showgrid="false"
+     inkscape:zoom="5.2508961"
+     inkscape:cx="139.5"
+     inkscape:cy="81"
+     inkscape:window-x="840"
+     inkscape:window-y="122"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="3675"
+       height="2100"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="1350"
+       width="750"
+       height="225"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="1575"
+       width="750"
+       height="225"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="1800"
+       width="750"
+       height="225"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Arc -->
+    <path
+       style="stroke:#000000;stroke-width:7;stroke-linecap:butt;"
+       d="M 1800,900 A 118 118  0  0  0  1800  1125 "
+       id="path14" />
+    <!-- Arc -->
+    <path
+       style="stroke:#000000;stroke-width:7;stroke-linecap:butt;"
+       d="M 750,900 A 75 75  0  0  0  750  1050 "
+       id="path16" />
+    <!-- Line -->
+    <polyline
+       points="750,900 750,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline18" />
+    <!-- Arrowhead on XXXpoint 750 900 - 750 660-->
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect22" />
+    <!-- Line: box -->
+    <rect
+       x="600"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect24" />
+    <!-- Line: box -->
+    <rect
+       x="1650"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect26" />
+    <!-- Line: box -->
+    <rect
+       x="2175"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="3225"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect30" />
+    <!-- Line -->
+    <polyline
+       points="675,375 675,150 300,150 300,358 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 300 150 - 300 390-->
+    <!-- Line -->
+    <polyline
+       points="1725,375 1725,150 900,150 900,358 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 900 150 - 900 390-->
+    <!-- Line -->
+    <polyline
+       points="2250,375 2250,75 825,75 825,358 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 825 75 - 825 390-->
+    <!-- Line -->
+    <polyline
+       points="2775,675 2775,975 1425,975 1425,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 1425 975 - 1425 660-->
+    <!-- Line: box -->
+    <rect
+       x="2700"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect48" />
+    <!-- Line: box -->
+    <rect
+       x="1125"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect50" />
+    <!-- Line -->
+    <polyline
+       points="3300,675 3300,1050 1350,1050 1350,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline52" />
+    <!-- Arrowhead on XXXpoint 1350 1050 - 1350 660-->
+    <!-- Line -->
+    <polyline
+       points="825,1425 975,1425 975,1200 225,1200 225,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline56" />
+    <!-- Arrowhead on XXXpoint 225 1200 - 225 660-->
+    <!-- Line -->
+    <polyline
+       points="1200,675 1200,975 300,975 300,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 300 975 - 300 660-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="150"
+       y="1500"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="108"
+       text-anchor="start"
+       id="text64">-&gt;level[0]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="150"
+       y="1725"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="108"
+       text-anchor="start"
+       id="text66">-&gt;level[1]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="150"
+       y="1950"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="108"
+       text-anchor="start"
+       id="text68">-&gt;level[2]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="300"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text70">0:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text72">4:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1875"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text74">0:1  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text76">2:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2925"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text78">4:5  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3450"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text80">6:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="825"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text82">0:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3600"
+       y="150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="end"
+       id="text84">struct rcu_state</text>
+    <!-- Line -->
+    <polyline
+       points="825,1875 1800,1875 1800,1125 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:none"
+       id="polyline86" />
+    <!-- Line -->
+    <polyline
+       points="1800,900 1800,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline88" />
+    <!-- Arrowhead on XXXpoint 1800 900 - 1800 660-->
+    <!-- Line -->
+    <polyline
+       points="825,1650 1200,1650 1200,1125 750,1125 750,1050 "
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline92" />
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/blkd_task.svg b/Documentation/RCU/Design/Data-Structures/blkd_task.svg
new file mode 100644 (file)
index 0000000..00e810b
--- /dev/null
@@ -0,0 +1,843 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="10.1in"
+   height="8.6in"
+   viewBox="-44 -44 12088 10288"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="blkd_task.fig">
+  <metadata
+     id="metadata212">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs210">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3970"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1087"
+     inkscape:window-height="1144"
+     id="namedview208"
+     showgrid="false"
+     inkscape:zoom="1.0495049"
+     inkscape:cx="454.50003"
+     inkscape:cy="387.00003"
+     inkscape:window-x="833"
+     inkscape:window-y="28"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect10" />
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5688,5912 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline12" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+    <polyline
+       points="5714 6068 5704 5822 5598 6044 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline14" />
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4486,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline16" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+    <polyline
+       points="4514 7418 4506 7172 4396 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline18" />
+    <!-- Line -->
+    <polyline
+       points="1040,9300 1476,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+    <polyline
+       points="1504 7418 1496 7172 1386 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline22" />
+    <!-- Line -->
+    <polyline
+       points="2240,8100 2676,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline24" />
+    <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+    <polyline
+       points="2704 6218 2696 5972 2586 6194 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline26" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect30" />
+    <!-- Line -->
+    <polyline
+       points="1350,3450 2350,2590 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+    <!-- Line -->
+    <polyline
+       points="4950,3450 3948,2590 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+    <!-- Line -->
+    <polyline
+       points="4050,6600 4050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+    <!-- Line -->
+    <polyline
+       points="1050,6600 1050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,5400 2250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline48" />
+    <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,8100 2250,6364 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline52" />
+    <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+    <!-- Line -->
+    <polyline
+       points="1050,9300 1050,7564 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline56" />
+    <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4050,7564 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5250,6364 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline64" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle80" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle82" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle84" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect86" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect88" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect90" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect92" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="1650"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect94" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect96" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect98" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect100" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect102" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect104" />
+    <!-- Line -->
+    <polygon
+       points="5550,3450 7350,2850 7350,5100 5550,4350 5550,3450 "
+       style="stroke:#000000;stroke-width:14; stroke-linejoin:miter; stroke-linecap:butt; stroke-dasharray:120 120;fill:#ffbfbf; "
+       id="polygon106" />
+    <!-- Line -->
+    <polyline
+       points="9300,3150 10734,3150 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline108" />
+    <!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150-->
+    <!-- Line: box -->
+    <rect
+       x="10800"
+       y="2850"
+       width="1200"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect112" />
+    <!-- Line -->
+    <polyline
+       points="11400,3600 11400,4284 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline114" />
+    <!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410-->
+    <!-- Line: box -->
+    <rect
+       x="10800"
+       y="4350"
+       width="1200"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect118" />
+    <!-- Line -->
+    <polyline
+       points="11400,5100 11400,5784 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline120" />
+    <!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910-->
+    <!-- Line: box -->
+    <rect
+       x="10800"
+       y="5850"
+       width="1200"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect124" />
+    <!-- Line -->
+    <polyline
+       points="9300,3900 9900,3900 9900,4650 10734,4650 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline126" />
+    <!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650-->
+    <!-- Line -->
+    <polyline
+       points="9300,4650 9600,4650 9600,6150 10734,6150 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline130" />
+    <!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text134">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="1950"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text136">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text138">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text152">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text154">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text156">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text158">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text160">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text164">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text166">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text168">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text170">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text172">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text174">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text176">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text178">rcu_sched</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11400"
+       y="3300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="216"
+       text-anchor="middle"
+       id="text180">T3</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11400"
+       y="4800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="216"
+       text-anchor="middle"
+       id="text182">T2</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11400"
+       y="6300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="216"
+       text-anchor="middle"
+       id="text184">T1</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5400 5250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline186" />
+    <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect190" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="2850"
+       width="1950"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect192" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="3600"
+       width="1950"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect194" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="4350"
+       width="1950"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect196" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text198">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text200">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7500"
+       y="3300"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text202">blkd_tasks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7500"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text204">gp_tasks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7500"
+       y="4800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text206">exp_tasks</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/nxtlist.svg b/Documentation/RCU/Design/Data-Structures/nxtlist.svg
new file mode 100644 (file)
index 0000000..abc4cc7
--- /dev/null
@@ -0,0 +1,396 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:39:46 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="10.4in"
+   height="10.4in"
+   viewBox="-66 -66 12507 12507"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="nxtlist.fig">
+  <metadata
+     id="metadata94">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs92">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3852"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="925"
+     inkscape:window-height="928"
+     id="namedview90"
+     showgrid="false"
+     inkscape:zoom="0.80021373"
+     inkscape:cx="467.99997"
+     inkscape:cy="467.99997"
+     inkscape:window-x="948"
+     inkscape:window-y="73"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="1125"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="2250"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="3375"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="4500"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="0"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="1125"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect18" />
+    <!-- Line -->
+    <polyline
+       points="11475,2250 11475,3276 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 11475 2250 - 11475 3465-->
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="6750"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect24" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="7875"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect26" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="10125"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="11250"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect30" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="3375"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect32" />
+    <!-- Line -->
+    <polyline
+       points="11475,5625 11475,6651 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 11475 5625 - 11475 6840-->
+    <!-- Line -->
+    <polyline
+       points="7875,225 10476,225 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 7875 225 - 10665 225-->
+    <!-- Line -->
+    <polyline
+       points="7875,1350 9675,1350 9675,675 7971,675 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline42" />
+    <!-- Arrowhead on XXXpoint 9675 675 - 7785 675-->
+    <!-- Line -->
+    <polyline
+       points="7875,2475 9675,2475 9675,4725 10476,4725 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 9675 4725 - 10665 4725-->
+    <!-- Line -->
+    <polyline
+       points="7875,3600 9225,3600 9225,5175 10476,5175 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 9225 5175 - 10665 5175-->
+    <!-- Line -->
+    <polyline
+       points="7875,4725 8775,4725 8775,11475 10476,11475 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 8775 11475 - 10665 11475-->
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="4500"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect58" />
+    <!-- Line -->
+    <polyline
+       points="11475,9000 11475,10026 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 11475 9000 - 11475 10215-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text64">nxtlist</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text66">nxttail[RCU_DONE_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="2925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text68">nxttail[RCU_WAIT_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text70">nxttail[RCU_NEXT_READY_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="5175"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text72">nxttail[RCU_NEXT_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="675"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text74">CB 1</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="1800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text76">next</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="7425"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text78">CB 3</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="8550"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text80">next</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="10800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text82">CB 4</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text84">next</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="4050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text86">CB 2</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="5175"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text88">next</text>
+  </g>
+</svg>
index a725f9900ec8962a43dfb888f71f1a2b05efabf0..59acd82e67d4a2bd7e5bcc3efa366d23bf4c1846 100644 (file)
@@ -1,5 +1,5 @@
 <!-- DO NOT HAND EDIT. -->
-<!-- Instead, edit Documentation/RCU/Design/Requirements/Requirements.htmlx and run 'sh htmlqqz.sh Documentation/RCU/Design/Requirements/Requirements' -->
+<!-- Instead, edit Requirements.htmlx and run 'sh htmlqqz.sh Requirements' -->
 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
         "http://www.w3.org/TR/html4/loose.dtd">
         <html>
@@ -2170,6 +2170,14 @@ up to and including systems with 4096 CPUs.
 This real-time requirement motivated the grace-period kthread, which
 also simplified handling of a number of race conditions.
 
+<p>
+RCU must avoid degrading real-time response for CPU-bound threads, whether
+executing in usermode (which is one use case for
+<tt>CONFIG_NO_HZ_FULL=y</tt>) or in the kernel.
+That said, CPU-bound loops in the kernel must execute
+<tt>cond_resched_rcu_qs()</tt> at least once per few tens of milliseconds
+in order to avoid receiving an IPI from RCU.
+
 <p>
 Finally, RCU's status as a synchronization primitive means that
 any RCU failure can result in arbitrary memory corruption that can be
@@ -2223,6 +2231,8 @@ described in a separate section.
 <li>   <a href="#Sched Flavor">Sched Flavor</a>
 <li>   <a href="#Sleepable RCU">Sleepable RCU</a>
 <li>   <a href="#Tasks RCU">Tasks RCU</a>
+<li>   <a href="#Waiting for Multiple Grace Periods">
+       Waiting for Multiple Grace Periods</a>
 </ol>
 
 <h3><a name="Bottom-Half Flavor">Bottom-Half Flavor</a></h3>
@@ -2472,6 +2482,81 @@ The tasks-RCU API is quite compact, consisting only of
 <tt>synchronize_rcu_tasks()</tt>, and
 <tt>rcu_barrier_tasks()</tt>.
 
+<h3><a name="Waiting for Multiple Grace Periods">
+Waiting for Multiple Grace Periods</a></h3>
+
+<p>
+Perhaps you have an RCU protected data structure that is accessed from
+RCU read-side critical sections, from softirq handlers, and from
+hardware interrupt handlers.
+That is three flavors of RCU, the normal flavor, the bottom-half flavor,
+and the sched flavor.
+How to wait for a compound grace period?
+
+<p>
+The best approach is usually to &ldquo;just say no!&rdquo; and
+insert <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
+around each RCU read-side critical section, regardless of what
+environment it happens to be in.
+But suppose that some of the RCU read-side critical sections are
+on extremely hot code paths, and that use of <tt>CONFIG_PREEMPT=n</tt>
+is not a viable option, so that <tt>rcu_read_lock()</tt> and
+<tt>rcu_read_unlock()</tt> are not free.
+What then?
+
+<p>
+You <i>could</i> wait on all three grace periods in succession, as follows:
+
+<blockquote>
+<pre>
+ 1 synchronize_rcu();
+ 2 synchronize_rcu_bh();
+ 3 synchronize_sched();
+</pre>
+</blockquote>
+
+<p>
+This works, but triples the update-side latency penalty.
+In cases where this is not acceptable, <tt>synchronize_rcu_mult()</tt>
+may be used to wait on all three flavors of grace period concurrently:
+
+<blockquote>
+<pre>
+ 1 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched);
+</pre>
+</blockquote>
+
+<p>
+But what if it is necessary to also wait on SRCU?
+This can be done as follows:
+
+<blockquote>
+<pre>
+ 1 static void call_my_srcu(struct rcu_head *head,
+ 2        void (*func)(struct rcu_head *head))
+ 3 {
+ 4   call_srcu(&amp;my_srcu, head, func);
+ 5 }
+ 6
+ 7 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched, call_my_srcu);
+</pre>
+</blockquote>
+
+<p>
+If you needed to wait on multiple different flavors of SRCU
+(but why???), you would need to create a wrapper function resembling
+<tt>call_my_srcu()</tt> for each SRCU flavor.
+
+<p><a name="Quick Quiz 15"><b>Quick Quiz 15</b>:</a>
+But what if I need to wait for multiple RCU flavors, but I also need
+the grace periods to be expedited?
+<br><a href="#qq15answer">Answer</a>
+
+<p>
+Again, it is usually better to adjust the RCU read-side critical sections
+to use a single flavor of RCU, but when this is not feasible, you can use
+<tt>synchronize_rcu_mult()</tt>.
+
 <h2><a name="Possible Future Changes">Possible Future Changes</a></h2>
 
 <p>
@@ -2893,5 +2978,20 @@ during scheduler initialization.
 
 </p><p><a href="#Quick%20Quiz%2014"><b>Back to Quick Quiz 14</b>.</a>
 
+<a name="qq15answer"></a>
+<p><b>Quick Quiz 15</b>:
+But what if I need to wait for multiple RCU flavors, but I also need
+the grace periods to be expedited?
+
+
+</p><p><b>Answer</b>:
+If you are using expedited grace periods, there should be less penalty
+for waiting on them in succession.
+But if that is nevertheless a problem, you can use workqueues or multiple
+kthreads to wait on the various expedited grace periods concurrently.
+
+
+</p><p><a href="#Quick%20Quiz%2015"><b>Back to Quick Quiz 15</b>.</a>
+
 
 </body></html>
index 3a97ba490c42b03d606c5fe36e5afe1a9b213e28..6ff4966672e2eb3a99a2644db4a1dcf85ffc9246 100644 (file)
@@ -2337,6 +2337,14 @@ up to and including systems with 4096 CPUs.
 This real-time requirement motivated the grace-period kthread, which
 also simplified handling of a number of race conditions.
 
+<p>
+RCU must avoid degrading real-time response for CPU-bound threads, whether
+executing in usermode (which is one use case for
+<tt>CONFIG_NO_HZ_FULL=y</tt>) or in the kernel.
+That said, CPU-bound loops in the kernel must execute
+<tt>cond_resched_rcu_qs()</tt> at least once per few tens of milliseconds
+in order to avoid receiving an IPI from RCU.
+
 <p>
 Finally, RCU's status as a synchronization primitive means that
 any RCU failure can result in arbitrary memory corruption that can be
@@ -2390,6 +2398,8 @@ described in a separate section.
 <li>   <a href="#Sched Flavor">Sched Flavor</a>
 <li>   <a href="#Sleepable RCU">Sleepable RCU</a>
 <li>   <a href="#Tasks RCU">Tasks RCU</a>
+<li>   <a href="#Waiting for Multiple Grace Periods">
+       Waiting for Multiple Grace Periods</a>
 </ol>
 
 <h3><a name="Bottom-Half Flavor">Bottom-Half Flavor</a></h3>
@@ -2639,6 +2649,86 @@ The tasks-RCU API is quite compact, consisting only of
 <tt>synchronize_rcu_tasks()</tt>, and
 <tt>rcu_barrier_tasks()</tt>.
 
+<h3><a name="Waiting for Multiple Grace Periods">
+Waiting for Multiple Grace Periods</a></h3>
+
+<p>
+Perhaps you have an RCU protected data structure that is accessed from
+RCU read-side critical sections, from softirq handlers, and from
+hardware interrupt handlers.
+That is three flavors of RCU, the normal flavor, the bottom-half flavor,
+and the sched flavor.
+How to wait for a compound grace period?
+
+<p>
+The best approach is usually to &ldquo;just say no!&rdquo; and
+insert <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
+around each RCU read-side critical section, regardless of what
+environment it happens to be in.
+But suppose that some of the RCU read-side critical sections are
+on extremely hot code paths, and that use of <tt>CONFIG_PREEMPT=n</tt>
+is not a viable option, so that <tt>rcu_read_lock()</tt> and
+<tt>rcu_read_unlock()</tt> are not free.
+What then?
+
+<p>
+You <i>could</i> wait on all three grace periods in succession, as follows:
+
+<blockquote>
+<pre>
+ 1 synchronize_rcu();
+ 2 synchronize_rcu_bh();
+ 3 synchronize_sched();
+</pre>
+</blockquote>
+
+<p>
+This works, but triples the update-side latency penalty.
+In cases where this is not acceptable, <tt>synchronize_rcu_mult()</tt>
+may be used to wait on all three flavors of grace period concurrently:
+
+<blockquote>
+<pre>
+ 1 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched);
+</pre>
+</blockquote>
+
+<p>
+But what if it is necessary to also wait on SRCU?
+This can be done as follows:
+
+<blockquote>
+<pre>
+ 1 static void call_my_srcu(struct rcu_head *head,
+ 2        void (*func)(struct rcu_head *head))
+ 3 {
+ 4   call_srcu(&amp;my_srcu, head, func);
+ 5 }
+ 6
+ 7 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched, call_my_srcu);
+</pre>
+</blockquote>
+
+<p>
+If you needed to wait on multiple different flavors of SRCU
+(but why???), you would need to create a wrapper function resembling
+<tt>call_my_srcu()</tt> for each SRCU flavor.
+
+<p>@@QQ@@
+But what if I need to wait for multiple RCU flavors, but I also need
+the grace periods to be expedited?
+<p>@@QQA@@
+If you are using expedited grace periods, there should be less penalty
+for waiting on them in succession.
+But if that is nevertheless a problem, you can use workqueues or multiple
+kthreads to wait on the various expedited grace periods concurrently.
+<p>@@QQE@@
+
+<p>
+Again, it is usually better to adjust the RCU read-side critical sections
+to use a single flavor of RCU, but when this is not feasible, you can use
+<tt>synchronize_rcu_mult()</tt>.
+
 <h2><a name="Possible Future Changes">Possible Future Changes</a></h2>
 
 <p>
index ec6998b1b6d04f3139ed6c066537cc059c89838d..00a3a38b375ae9946425fc2ea94fa0c2383e867c 100644 (file)
@@ -237,17 +237,17 @@ o "ktl" is the low-order 16 bits (in hexadecimal) of the count of
 
 The output of "cat rcu/rcu_preempt/rcuexp" looks as follows:
 
-s=21872 wd0=0 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
+s=21872 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
 
 These fields are as follows:
 
 o      "s" is the sequence number, with an odd number indicating that
        an expedited grace period is in progress.
 
-o      "wd0", "wd1", "wd2", and "wd3" are the number of times that an
-       attempt to start an expedited grace period found that someone
-       else had completed an expedited grace period that satisfies the
-       attempted request.  "Our work is done."
+o      "wd1", "wd2", and "wd3" are the number of times that an attempt
+       to start an expedited grace period found that someone else had
+       completed an expedited grace period that satisfies the attempted
+       request.  "Our work is done."
 
 o      "n" is number of times that a concurrent CPU-hotplug operation
        forced a fallback to a normal grace period.
index d603fa07823598b87dc48d0e8362a6fcb5a57a32..8c79f1d537313a9ca68c6b59b3203808ec8df5e8 100644 (file)
@@ -722,7 +722,7 @@ references.
 --------------------------------
 
 It can be helpful to manually add In-Reply-To: headers to a patch
-(e.g., when using "git send email") to associate the patch with
+(e.g., when using "git send-email") to associate the patch with
 previous relevant discussion, e.g. to link a bug fix to the email with
 the bug report.  However, for a multi-patch series, it is generally
 best to avoid using In-Reply-To: to link to older versions of the
index 430d279a8df374883f5038d0f86961843928f2a7..e5a115f244717aa93122a07d8bc3dc968833f588 100644 (file)
@@ -72,6 +72,5 @@ SunXi family
 
     * Octa ARM Cortex-A7 based SoCs
       - Allwinner A83T
-        + Not Supported
         + Datasheet
           http://dl.linux-sunxi.org/A83T/A83T_datasheet_Revision_1.1.pdf
index 701d39d3171a74d8c2eb670c0b1be2931f326ee8..56d6d8b796db6dd3aadd252a85cc4b691f9e20f7 100644 (file)
@@ -109,7 +109,13 @@ Header notes:
                        1 - 4K
                        2 - 16K
                        3 - 64K
-  Bits 3-63:   Reserved.
+  Bit 3:       Kernel physical placement
+                       0 - 2MB aligned base should be as close as possible
+                           to the base of DRAM, since memory below it is not
+                           accessible via the linear mapping
+                       1 - 2MB aligned base may be anywhere in physical
+                           memory
+  Bits 4-63:   Reserved.
 
 - When image_size is zero, a bootloader should attempt to keep as much
   memory as possible free for use by the kernel immediately after the
@@ -117,14 +123,14 @@ Header notes:
   depending on selected features, and is effectively unbound.
 
 The Image must be placed text_offset bytes from a 2MB aligned base
-address near the start of usable system RAM and called there. Memory
-below that base address is currently unusable by Linux, and therefore it
-is strongly recommended that this location is the start of system RAM.
-The region between the 2 MB aligned base address and the start of the
-image has no special significance to the kernel, and may be used for
-other purposes.
+address anywhere in usable system RAM and called there. The region
+between the 2 MB aligned base address and the start of the image has no
+special significance to the kernel, and may be used for other purposes.
 At least image_size bytes from the start of the image must be free for
 use by the kernel.
+NOTE: versions prior to v4.6 cannot make use of memory below the
+physical offset of the Image so it is recommended that the Image be
+placed as close as possible to the start of system RAM.
 
 Any memory described to the kernel (even that below the start of the
 image) which is not marked as reserved from the kernel (e.g., with a
index 65b3eac8856cf7ec26b341b217046a2fb9b7bf26..ff49cf901148d895b765800ec6ddb79c0e38ed53 100644 (file)
@@ -7,7 +7,7 @@ This is the authoritative documentation on the design, interface and
 conventions of cgroup v2.  It describes all userland-visible aspects
 of cgroup including core and specific controller behaviors.  All
 future changes must be reflected in this document.  Documentation for
-v1 is available under Documentation/cgroup-legacy/.
+v1 is available under Documentation/cgroup-v1/.
 
 CONTENTS
 
@@ -843,6 +843,10 @@ PAGE_SIZE multiple when read back.
                Amount of memory used to cache filesystem data,
                including tmpfs and shared memory.
 
+         sock
+
+               Amount of memory used in network transmission buffers
+
          file_mapped
 
                Amount of cached filesystem data mapped with mmap()
index 8b49302712a890365ff3edaa2571971608ccd381..beda682e8d7750492a182380737aee06441cf432 100644 (file)
@@ -49,28 +49,33 @@ under development.
 
 Here's an example of how to use the API:
 
-       #include <linux/crypto.h>
+       #include <crypto/ahash.h>
        #include <linux/err.h>
        #include <linux/scatterlist.h>
        
        struct scatterlist sg[2];
        char result[128];
-       struct crypto_hash *tfm;
-       struct hash_desc desc;
+       struct crypto_ahash *tfm;
+       struct ahash_request *req;
        
-       tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm))
                fail();
                
        /* ... set up the scatterlists ... */
 
-       desc.tfm = tfm;
-       desc.flags = 0;
-       
-       if (crypto_hash_digest(&desc, sg, 2, result))
+       req = ahash_request_alloc(tfm, GFP_ATOMIC);
+       if (!req)
                fail();
+
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, result, 2);
        
-       crypto_free_hash(tfm);
+       if (crypto_ahash_digest(req))
+               fail();
+
+       ahash_request_free(req);
+       crypto_free_ahash(tfm);
 
     
 Many real examples are available in the regression test module (tcrypt.c).
index ae9be074d09f66d503adccc539c892e6cf13af90..a0884b85abf2cfccab76126f335352a9c6c44979 100644 (file)
@@ -178,6 +178,7 @@ nodes to be present and contain the properties described below.
                            "marvell,sheeva-v5"
                            "nvidia,tegra132-denver"
                            "qcom,krait"
+                           "qcom,kryo"
                            "qcom,scorpion"
        - enable-method
                Value type: <stringlist>
index 953fb640d9c461596a43c81d6a6aaff08d2e9a78..fd54e1db215652e73606fd899856727f08f8e9e8 100644 (file)
@@ -11,43 +11,9 @@ QEMU exposes the control and data register to ARM guests as memory mapped
 registers; their location is communicated to the guest's UEFI firmware in the
 DTB that QEMU places at the bottom of the guest's DRAM.
 
-The guest writes a selector value (a key) to the selector register, and then
-can read the corresponding data (produced by QEMU) via the data register. If
-the selected entry is writable, the guest can rewrite it through the data
-register.
+The authoritative guest-side hardware interface documentation to the fw_cfg
+device can be found in "docs/specs/fw_cfg.txt" in the QEMU source tree.
 
-The selector register takes keys in big endian byte order.
-
-The data register allows accesses with 8, 16, 32 and 64-bit width (only at
-offset 0 of the register). Accesses larger than a byte are interpreted as
-arrays, bundled together only for better performance. The bytes constituting
-such a word, in increasing address order, correspond to the bytes that would
-have been transferred by byte-wide accesses in chronological order.
-
-The interface allows guest firmware to download various parameters and blobs
-that affect how the firmware works and what tables it installs for the guest
-OS. For example, boot order of devices, ACPI tables, SMBIOS tables, kernel and
-initrd images for direct kernel booting, virtual machine UUID, SMP information,
-virtual NUMA topology, and so on.
-
-The authoritative registry of the valid selector values and their meanings is
-the QEMU source code; the structure of the data blobs corresponding to the
-individual key values is also defined in the QEMU source code.
-
-The presence of the registers can be verified by selecting the "signature" blob
-with key 0x0000, and reading four bytes from the data register. The returned
-signature is "QEMU".
-
-The outermost protocol (involving the write / read sequences of the control and
-data registers) is expected to be versioned, and/or described by feature bits.
-The interface revision / feature bitmap can be retrieved with key 0x0001. The
-blob to be read from the data register has size 4, and it is to be interpreted
-as a uint32_t value in little endian byte order. The current value
-(corresponding to the above outer protocol) is zero.
-
-The guest kernel is not expected to use these registers (although it is
-certainly allowed to); the device tree bindings are documented here because
-this is where device tree bindings reside in general.
 
 Required properties:
 
index 3090a8a008c03ed61cfb42b94d2cf36f3b5756ed..48f6703a28c8f50108a1d9f3bdda62462d9b099e 100644 (file)
@@ -22,6 +22,8 @@ SoCs:
    compatible = "ti,k2l", "ti,keystone"
 - Keystone 2 Edison
    compatible = "ti,k2e", "ti,keystone"
+- K2G
+   compatible = "ti,k2g", "ti,keystone"
 
 Boards:
 -  Keystone 2 Hawking/Kepler EVM
@@ -32,3 +34,6 @@ Boards:
 
 -  Keystone 2 Edison EVM
    compatible = "ti,k2e-evm", "ti,k2e", "ti,keystone"
+
+-  K2G EVM
+   compatible = "ti,k2g-evm", "ti,k2g", "ti-keystone"
index ab0c9cdf388e9e5769bf7ecfa88f5bbc869b58fb..7d28fe4bf654e8d1fa9c1a3cc06a8fab0631ef9a 100644 (file)
@@ -19,9 +19,12 @@ SoC. Currently known SoC compatibles are:
 And in addition, the compatible shall be extended with the specific
 board. Currently known boards are:
 
+"buffalo,linkstation-lsqvl"
+"buffalo,linkstation-lsvl"
+"buffalo,linkstation-lswsxl"
+"buffalo,linkstation-lswxl"
+"buffalo,linkstation-lswvl"
 "buffalo,lschlv2"
-"buffalo,lswvl"
-"buffalo,lswxl"
 "buffalo,lsxhl"
 "buffalo,lsxl"
 "cloudengines,pogo02"
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-srom.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-srom.txt
new file mode 100644 (file)
index 0000000..e5c18df
--- /dev/null
@@ -0,0 +1,81 @@
+SAMSUNG Exynos SoCs SROM Controller driver.
+
+Required properties:
+- compatible : Should contain "samsung,exynos-srom".
+
+- reg: offset and length of the register set
+
+Optional properties:
+The SROM controller can be used to attach external peripherals. In this case
+extra properties, describing the bus behind it, should be specified as below:
+
+- #address-cells: Must be set to 2 to allow device address translation.
+                 Address is specified as (bank#, offset).
+
+- #size-cells: Must be set to 1 to allow device size passing
+
+- ranges: Must be set up to reflect the memory layout with four integer values
+         per bank:
+               <bank-number> 0 <parent address of bank> <size>
+
+Sub-nodes:
+The actual device nodes should be added as subnodes to the SROMc node. These
+subnodes, except regular device specification, should contain the following
+properties, describing configuration of the relevant SROM bank:
+
+Required properties:
+- reg: bank number, base address (relative to start of the bank) and size of
+       the memory mapped for the device. Note that base address will be
+       typically 0 as this is the start of the bank.
+
+- samsung,srom-timing : array of 6 integers, specifying bank timings in the
+                        following order: Tacp, Tcah, Tcoh, Tacc, Tcos, Tacs.
+                        Each value is specified in cycles and has the following
+                        meaning and valid range:
+                        Tacp : Page mode access cycle at Page mode (0 - 15)
+                        Tcah : Address holding time after CSn (0 - 15)
+                        Tcoh : Chip selection hold on OEn (0 - 15)
+                        Tacc : Access cycle (0 - 31, the actual time is N + 1)
+                        Tcos : Chip selection set-up before OEn (0 - 15)
+                        Tacs : Address set-up before CSn (0 - 15)
+
+Optional properties:
+- reg-io-width : data width in bytes (1 or 2). If omitted, default of 1 is used.
+
+- samsung,srom-page-mode : page mode configuration for the bank:
+                          0 - normal (one data)
+                          1 - four data
+                          If omitted, default of 0 is used.
+
+Example: basic definition, no banks are configured
+       sromc@12570000 {
+               compatible = "samsung,exynos-srom";
+               reg = <0x12570000 0x14>;
+       };
+
+Example: SROMc with SMSC911x ethernet chip on bank 3
+       sromc@12570000 {
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <0 0 0x04000000 0x20000   // Bank0
+                         1 0 0x05000000 0x20000   // Bank1
+                         2 0 0x06000000 0x20000   // Bank2
+                         3 0 0x07000000 0x20000>; // Bank3
+
+               compatible = "samsung,exynos-srom";
+               reg = <0x12570000 0x14>;
+
+               ethernet@3,0 {
+                       compatible = "smsc,lan9115";
+                       reg = <3 0 0x10000>;       // Bank 3, offset = 0
+                       phy-mode = "mii";
+                       interrupt-parent = <&gpx0>;
+                       interrupts = <5 8>;
+                       reg-io-width = <2>;
+                       smsc,irq-push-pull;
+                       smsc,force-internal-phy;
+
+                       samsung,srom-page-mode = <1>;
+                       samsung,srom-timing = <9 12 1 9 1 1>;
+               };
+       };
index bb9b0faa919d098309c9eb22289f8cef3b995d9b..7e79fcc36b0db60c8b147bf7a498d8013c120d16 100644 (file)
@@ -11,5 +11,6 @@ using one of the following compatible strings:
   allwinner,sun7i-a20
   allwinner,sun8i-a23
   allwinner,sun8i-a33
+  allwinner,sun8i-a83t
   allwinner,sun8i-h3
   allwinner,sun9i-a80
index e59f57b24777c473c5c92faefbc0bec1ec17ecb9..966dcaffcb9cbb09d288a968cd7cb3faae3019eb 100644 (file)
@@ -39,6 +39,7 @@ Required properties:
        "allwinner,sun6i-a31-apb0-clk" - for the APB0 clock on A31
        "allwinner,sun8i-a23-apb0-clk" - for the APB0 clock on A23
        "allwinner,sun9i-a80-apb0-clk" - for the APB0 bus clock on A80
+       "allwinner,sun8i-a83t-apb0-gates-clk" - for the APB0 gates on A83T
        "allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10
        "allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13
        "allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s
@@ -57,6 +58,7 @@ Required properties:
        "allwinner,sun9i-a80-apb1-gates-clk" - for the APB1 gates on A80
        "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
        "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
+       "allwinner,sun8i-a83t-bus-gates-clk" - for the bus gates on A83T
        "allwinner,sun8i-h3-bus-gates-clk" - for the bus gates on H3
        "allwinner,sun9i-a80-apbs-gates-clk" - for the APBS gates on A80
        "allwinner,sun4i-a10-dram-gates-clk" - for the DRAM gates on A10
@@ -86,7 +88,7 @@ Required properties for all clocks:
 - #clock-cells : from common clock binding; shall be set to 0 except for
        the following compatibles where it shall be set to 1:
        "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
-       "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk",
+       "allwinner,sun4i-pll6-clk",
        "allwinner,*-usb-clk", "allwinner,*-mmc-clk",
        "allwinner,*-mmc-config-clk"
 - clock-output-names : shall be the corresponding names of the outputs.
diff --git a/Documentation/devicetree/bindings/display/arm,hdlcd.txt b/Documentation/devicetree/bindings/display/arm,hdlcd.txt
new file mode 100644 (file)
index 0000000..78bc242
--- /dev/null
@@ -0,0 +1,79 @@
+ARM HDLCD
+
+This is a display controller found on several development platforms produced
+by ARM Ltd and in more modern of its' Fast Models. The HDLCD is an RGB
+streamer that reads the data from a framebuffer and sends it to a single
+digital encoder (DVI or HDMI).
+
+Required properties:
+  - compatible: "arm,hdlcd"
+  - reg: Physical base address and length of the controller's registers.
+  - interrupts: One interrupt used by the display controller to notify the
+    interrupt controller when any of the interrupt sources programmed in
+    the interrupt mask register have activated.
+  - clocks: A list of phandle + clock-specifier pairs, one for each
+    entry in 'clock-names'.
+  - clock-names: A list of clock names. For HDLCD it should contain:
+      - "pxlclk" for the clock feeding the output PLL of the controller.
+
+Required sub-nodes:
+  - port: The HDLCD connection to an encoder chip. The connection is modeled
+    using the OF graph bindings specified in
+    Documentation/devicetree/bindings/graph.txt.
+
+Optional properties:
+  - memory-region: phandle to a node describing memory (see
+    Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt) to be
+    used for the framebuffer; if not present, the framebuffer may be located
+    anywhere in memory.
+
+
+Example:
+
+/ {
+       ...
+
+       hdlcd@2b000000 {
+               compatible = "arm,hdlcd";
+               reg = <0 0x2b000000 0 0x1000>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&oscclk5>;
+               clock-names = "pxlclk";
+               port {
+                       hdlcd_output: endpoint@0 {
+                               remote-endpoint = <&hdmi_enc_input>;
+                       };
+               };
+       };
+
+       /* HDMI encoder on I2C bus */
+       i2c@7ffa0000 {
+               ....
+               hdmi-transmitter@70 {
+                       compatible = ".....";
+                       reg = <0x70>;
+                       port@0 {
+                               hdmi_enc_input: endpoint {
+                                       remote-endpoint = <&hdlcd_output>;
+                               };
+
+                               hdmi_enc_output: endpoint {
+                                       remote-endpoint = <&hdmi_1_port>;
+                               };
+                       };
+               };
+
+       };
+
+       hdmi1: connector@1 {
+               compatible = "hdmi-connector";
+               type = "a";
+               port {
+                       hdmi_1_port: endpoint {
+                               remote-endpoint = <&hdmi_enc_output>;
+                       };
+               };
+       };
+
+       ...
+};
index 267565894db929013c88d75be65093f693546b8b..db7e2260f9c5749e510a19e6d275e7bbfdaff467 100644 (file)
@@ -15,6 +15,7 @@ Optional properties:
     cells in the dmas property of client device.
   - dma-channels: contains the total number of DMA channels supported by the DMAC
   - dma-requests: contains the total number of DMA requests supported by the DMAC
+  - arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP
 
 Example:
 
index 78e2a31c58d0735b40254e592e1fa7c73983d571..1006b04894642685a984081121c4b9229181a55c 100644 (file)
@@ -16,6 +16,10 @@ Required properties:
 - regmap-mcba          : Regmap of the MCB-A (memory bridge) resource.
 - regmap-mcbb          : Regmap of the MCB-B (memory bridge) resource.
 - regmap-efuse         : Regmap of the PMD efuse resource.
+- regmap-rb            : Regmap of the register bus resource. This property
+                         is optional only for compatibility. If the RB
+                         error conditions are not cleared, it will
+                         continuously generate interrupt.
 - reg                  : First resource shall be the CPU bus (PCP) resource.
 - interrupts            : Interrupt-specifier for MCU, PMD, L3, or SoC error
                          IRQ(s).
@@ -64,6 +68,11 @@ Example:
                reg = <0x0 0x1054a000 0x0 0x20>;
        };
 
+       rb: rb@7e000000 {
+               compatible = "apm,xgene-rb", "syscon";
+               reg = <0x0 0x7e000000 0x0 0x10>;
+       };
+
        edac@78800000 {
                compatible = "apm,xgene-edac";
                #address-cells = <2>;
@@ -73,6 +82,7 @@ Example:
                regmap-mcba = <&mcba>;
                regmap-mcbb = <&mcbb>;
                regmap-efuse = <&efuse>;
+               regmap-rb = <&rb>;
                reg = <0x0 0x78800000 0x0 0x100>;
                interrupts = <0x0 0x20 0x4>,
                             <0x0 0x21 0x4>,
diff --git a/Documentation/devicetree/bindings/firmware/qcom,scm.txt b/Documentation/devicetree/bindings/firmware/qcom,scm.txt
new file mode 100644 (file)
index 0000000..debcd32
--- /dev/null
@@ -0,0 +1,25 @@
+QCOM Secure Channel Manager (SCM)
+
+Qualcomm processors include an interface to communicate to the secure firmware.
+This interface allows for clients to request different types of actions.  These
+can include CPU power up/down, HDCP requests, loading of firmware, and other
+assorted actions.
+
+Required properties:
+- compatible: must contain "qcom,scm"
+- clocks: Should contain the core, iface, and bus clocks.
+- clock-names: Must contain "core" for the core clock, "iface" for the interface
+  clock and "bus" for the bus clock.
+
+Example:
+
+       firmware {
+               compatible = "simple-bus";
+
+               scm {
+                       compatible = "qcom,scm";
+                       clocks = <&gcc GCC_CE1_CLK> , <&gcc GCC_CE1_AXI_CLK>, <&gcc GCC_CE1_AHB_CLK>;
+                       clock-names = "core", "bus", "iface";
+               };
+       };
+
index 8a979780452bdbf12994aae6e6c2e0272172baae..53de1d9d0b958c57dc79129569dab03d843a61a3 100644 (file)
@@ -7,6 +7,8 @@ properties are needed by the Nokia modem HSI client:
 Required properties:
 - compatible:          Should be one of
       "nokia,n900-modem"
+      "nokia,n950-modem"
+      "nokia,n9-modem"
 - hsi-channel-names:   Should contain the following strings
       "mcsaab-control"
       "speech-control"
@@ -15,11 +17,11 @@ Required properties:
 - gpios:               Should provide a GPIO handler for each GPIO listed in
                         gpio-names
 - gpio-names:          Should contain the following strings
-      "cmt_apeslpx"
-      "cmt_rst_rq"
-      "cmt_en"
-      "cmt_rst"
-      "cmt_bsi"
+      "cmt_apeslpx" (for n900, n950, n9)
+      "cmt_rst_rq"  (for n900, n950, n9)
+      "cmt_en"      (for n900, n950, n9)
+      "cmt_rst"     (for n900)
+      "cmt_bsi"     (for n900)
 - interrupts:          Should be IRQ handle for modem's reset indication
 
 Example:
diff --git a/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt b/Documentation/devicetree/bindings/input/touchscreen/cyttsp.txt
new file mode 100644 (file)
index 0000000..b75d4cf
--- /dev/null
@@ -0,0 +1,95 @@
+* Cypress cyttsp touchscreen controller
+
+Required properties:
+ - compatible          : must be "cypress,cyttsp-i2c" or "cypress,cyttsp-spi"
+ - reg                 : Device I2C address or SPI chip select number
+ - spi-max-frequency   : Maximum SPI clocking speed of the device (for cyttsp-spi)
+ - interrupt-parent    : the phandle for the gpio controller
+                         (see interrupt binding[0]).
+ - interrupts          : (gpio) interrupt to which the chip is connected
+                         (see interrupt binding[0]).
+ - bootloader-key      : the 8-byte bootloader key that is required to switch
+                         the chip from bootloader mode (default mode) to
+                         application mode.
+                         This property has to be specified as an array of 8
+                         '/bits/ 8' values.
+
+Optional properties:
+ - reset-gpios         : the reset gpio the chip is connected to
+                         (see GPIO binding[1] for more details).
+ - touchscreen-size-x  : horizontal resolution of touchscreen (in pixels)
+ - touchscreen-size-y  : vertical resolution of touchscreen (in pixels)
+ - touchscreen-fuzz-x  : horizontal noise value of the absolute input device
+                         (in pixels)
+ - touchscreen-fuzz-y  : vertical noise value of the absolute input device
+                         (in pixels)
+ - active-distance     : the distance in pixels beyond which a touch must move
+                         before movement is detected and reported by the device.
+                         Valid values: 0-15.
+ - active-interval-ms  : the minimum period in ms between consecutive
+                         scanning/processing cycles when the chip is in active mode.
+                         Valid values: 0-255.
+ - lowpower-interval-ms        : the minimum period in ms between consecutive
+                         scanning/processing cycles when the chip is in low-power mode.
+                         Valid values: 0-2550
+ - touch-timeout-ms    : minimum time in ms spent in the active power state while no
+                         touches are detected before entering low-power mode.
+                         Valid values: 0-2550
+ - use-handshake       : enable register-based handshake (boolean). This should
+                         only be used if the chip is configured to use 'blocking
+                         communication with timeout' (in this case the device
+                         generates an interrupt at the end of every
+                         scanning/processing cycle).
+
+[0]: Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+[1]: Documentation/devicetree/bindings/gpio/gpio.txt
+
+Example:
+       &i2c1 {
+               /* ... */
+               cyttsp@a {
+                       compatible = "cypress,cyttsp-i2c";
+                       reg = <0xa>;
+                       interrupt-parent = <&gpio0>;
+                       interrupts = <28 0>;
+                       reset-gpios = <&gpio3 4 GPIO_ACTIVE_LOW>;
+
+                       touchscreen-size-x = <800>;
+                       touchscreen-size-y = <480>;
+                       touchscreen-fuzz-x = <4>;
+                       touchscreen-fuzz-y = <7>;
+
+                       bootloader-key = /bits/ 8 <0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08>;
+                       active-distance = <8>;
+                       active-interval-ms = <0>;
+                       lowpower-interval-ms = <200>;
+                       touch-timeout-ms = <100>;
+               };
+
+               /* ... */
+       };
+
+       &mcspi1 {
+               /* ... */
+               cyttsp@0 {
+                       compatible = "cypress,cyttsp-spi";
+                       spi-max-frequency = <6000000>;
+                       reg = <0>;
+                       interrupt-parent = <&gpio0>;
+                       interrupts = <28 0>;
+                       reset-gpios = <&gpio3 4 GPIO_ACTIVE_LOW>;
+
+                       touchscreen-size-x = <800>;
+                       touchscreen-size-y = <480>;
+                       touchscreen-fuzz-x = <4>;
+                       touchscreen-fuzz-y = <7>;
+
+                       bootloader-key = /bits/ 8 <0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08>;
+                       active-distance = <8>;
+                       active-interval-ms = <0>;
+                       lowpower-interval-ms = <200>;
+                       touch-timeout-ms = <100>;
+               };
+
+               /* ... */
+       };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/microchip,pic32-evic.txt b/Documentation/devicetree/bindings/interrupt-controller/microchip,pic32-evic.txt
new file mode 100644 (file)
index 0000000..c3a1b37
--- /dev/null
@@ -0,0 +1,67 @@
+Microchip PIC32 Interrupt Controller
+====================================
+
+The Microchip PIC32 contains an Enhanced Vectored Interrupt Controller (EVIC).
+It handles all internal and external interrupts. This controller exists outside
+of the CPU and is the arbitrator of all interrupts (including interrupts from
+the CPU itself) before they are presented to the CPU.
+
+External interrupts have a software configurable edge polarity. Non external
+interrupts have a type and polarity that is determined by the source of the
+interrupt.
+
+Required properties
+-------------------
+
+- compatible: Should be "microchip,pic32mzda-evic"
+- reg: Specifies physical base address and size of register range.
+- interrupt-controller: Identifies the node as an interrupt controller.
+- #interrupt cells: Specifies the number of cells used to encode an interrupt
+  source connected to this controller. The value shall be 2 and interrupt
+  descriptor shall have the following format:
+
+       <hw_irq irq_type>
+
+  hw_irq - represents the hardware interrupt number as in the data sheet.
+  irq_type - is used to describe the type and polarity of an interrupt. For
+  internal interrupts use IRQ_TYPE_EDGE_RISING for non persistent interrupts and
+  IRQ_TYPE_LEVEL_HIGH for persistent interrupts. For external interrupts use
+  IRQ_TYPE_EDGE_RISING or IRQ_TYPE_EDGE_FALLING to select the desired polarity.
+
+Optional properties
+-------------------
+- microchip,external-irqs: u32 array of external interrupts with software
+  polarity configuration. This array corresponds to the bits in the INTCON
+  SFR.
+
+Example
+-------
+
+evic: interrupt-controller@1f810000 {
+       compatible = "microchip,pic32mzda-evic";
+       interrupt-controller;
+       #interrupt-cells = <2>;
+       reg = <0x1f810000 0x1000>;
+       microchip,external-irqs = <3 8 13 18 23>;
+};
+
+Each device/peripheral must request its interrupt line with the associated type
+and polarity.
+
+Internal interrupt DTS snippet
+------------------------------
+
+device@1f800000 {
+       ...
+       interrupts = <113 IRQ_TYPE_LEVEL_HIGH>;
+       ...
+};
+
+External interrupt DTS snippet
+------------------------------
+
+device@1f800000 {
+       ...
+       interrupts = <3 IRQ_TYPE_EDGE_RISING>;
+       ...
+};
diff --git a/Documentation/devicetree/bindings/leds/leds-sn3218.txt b/Documentation/devicetree/bindings/leds/leds-sn3218.txt
new file mode 100644 (file)
index 0000000..19cbf57
--- /dev/null
@@ -0,0 +1,41 @@
+* Si-En Technology - SN3218 18-Channel LED Driver
+
+Required properties:
+- compatible :
+       "si-en,sn3218"
+- address-cells : must be 1
+- size-cells : must be 0
+- reg : I2C slave address, typically 0x54
+
+There must be at least 1 LED which is represented as a sub-node
+of the sn3218 device.
+
+LED sub-node properties:
+- label : (optional) see Documentation/devicetree/bindings/leds/common.txt
+- reg : number of LED line (could be from 0 to 17)
+- linux,default-trigger : (optional)
+   see Documentation/devicetree/bindings/leds/common.txt
+
+Example:
+
+sn3218: led-controller@54 {
+       compatible = "si-en,sn3218";
+       #address-cells = <1>;
+       #size-cells = <0>;
+       reg = <0x54>;
+
+       led@0 {
+               label = "led1";
+               reg = <0x0>;
+       };
+
+       led@1 {
+               label = "led2";
+               reg = <0x1>;
+       };
+
+       led@2 {
+               label = "led3";
+               reg = <0x2>;
+       };
+};
index 202565313e825fe897e03a05849fa54765efe2fe..100f0ae432691c36ff54609c86a8cc984b9a5b7d 100644 (file)
@@ -20,6 +20,8 @@ Optional Properties:
 
 - link-frequencies: List of allowed link frequencies in Hz. Each frequency is
        expressed as a 64-bit big-endian integer.
+- reset-gpios: GPIO handle which is connected to the reset pin of the chip.
+- standby-gpios: GPIO handle which is connected to the standby pin of the chip.
 
 For further reading on port node refer to
 Documentation/devicetree/bindings/media/video-interfaces.txt.
diff --git a/Documentation/devicetree/bindings/media/i2c/tvp5150.txt b/Documentation/devicetree/bindings/media/i2c/tvp5150.txt
new file mode 100644 (file)
index 0000000..8c0fc1a
--- /dev/null
@@ -0,0 +1,45 @@
+* Texas Instruments TVP5150 and TVP5151 video decoders
+
+The TVP5150 and TVP5151 are video decoders that convert baseband NTSC and PAL
+(and also SECAM in the TVP5151 case) video signals to either 8-bit 4:2:2 YUV
+with discrete syncs or 8-bit ITU-R BT.656 with embedded syncs output formats.
+
+Required Properties:
+- compatible: value must be "ti,tvp5150"
+- reg: I2C slave address
+
+Optional Properties:
+- pdn-gpios: phandle for the GPIO connected to the PDN pin, if any.
+- reset-gpios: phandle for the GPIO connected to the RESETB pin, if any.
+
+The device node must contain one 'port' child node for its digital output
+video port, in accordance with the video interface bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Required Endpoint Properties for parallel synchronization:
+
+- hsync-active: active state of the HSYNC signal. Must be <1> (HIGH).
+- vsync-active: active state of the VSYNC signal. Must be <1> (HIGH).
+- field-even-active: field signal level during the even field data
+  transmission. Must be <0>.
+
+If none of hsync-active, vsync-active and field-even-active is specified,
+the endpoint is assumed to use embedded BT.656 synchronization.
+
+Example:
+
+&i2c2 {
+       ...
+       tvp5150@5c {
+               compatible = "ti,tvp5150";
+               reg = <0x5c>;
+               pdn-gpios = <&gpio4 30 GPIO_ACTIVE_LOW>;
+               reset-gpios = <&gpio6 7 GPIO_ACTIVE_LOW>;
+
+               port {
+                       tvp5150_1: endpoint {
+                               remote-endpoint = <&ccdc_ep>;
+                       };
+               };
+       };
+};
index 9dafe6b06cd286a053a5db66cbbf8a5288faba2a..619193ccf7ff831d876c151f9689d8074c20070d 100644 (file)
@@ -6,6 +6,7 @@ family of devices. The current blocks are always slaves and suppot one input
 channel which can be either RGB, YUYV or BT656.
 
  - compatible: Must be one of the following
+   - "renesas,vin-r8a7795" for the R8A7795 device
    - "renesas,vin-r8a7794" for the R8A7794 device
    - "renesas,vin-r8a7793" for the R8A7793 device
    - "renesas,vin-r8a7791" for the R8A7791 device
index 0cb94201bf921a191c617c4c84a4a4637fc9c8d0..d3436e5190f9196a64f42fd0872a218a893cc229 100644 (file)
@@ -5,11 +5,12 @@ and decoding function conforming to the JPEG baseline process, so that the JPU
 can encode image data and decode JPEG data quickly.
 
 Required properties:
-  - compatible: should containg one of the following:
-                       - "renesas,jpu-r8a7790" for R-Car H2
-                       - "renesas,jpu-r8a7791" for R-Car M2-W
-                       - "renesas,jpu-r8a7792" for R-Car V2H
-                       - "renesas,jpu-r8a7793" for R-Car M2-N
+- compatible: "renesas,jpu-<soctype>", "renesas,rcar-gen2-jpu" as fallback.
+       Examples with soctypes are:
+         - "renesas,jpu-r8a7790" for R-Car H2
+         - "renesas,jpu-r8a7791" for R-Car M2-W
+         - "renesas,jpu-r8a7792" for R-Car V2H
+         - "renesas,jpu-r8a7793" for R-Car M2-N
 
   - reg: Base address and length of the registers block for the JPU.
   - interrupts: JPU interrupt specifier.
@@ -17,7 +18,7 @@ Required properties:
 
 Example: R8A7790 (R-Car H2) JPU node
        jpeg-codec@fe980000 {
-               compatible = "renesas,jpu-r8a7790";
+               compatible = "renesas,jpu-r8a7790", "renesas,rcar-gen2-jpu";
                reg = <0 0xfe980000 0 0x10300>;
                interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_JPU>;
diff --git a/Documentation/devicetree/bindings/media/ti-cal.txt b/Documentation/devicetree/bindings/media/ti-cal.txt
new file mode 100644 (file)
index 0000000..ae9b52f
--- /dev/null
@@ -0,0 +1,72 @@
+Texas Instruments DRA72x CAMERA ADAPTATION LAYER (CAL)
+------------------------------------------------------
+
+The Camera Adaptation Layer (CAL) is a key component for image capture
+applications. The capture module provides the system interface and the
+processing capability to connect CSI2 image-sensor modules to the
+DRA72x device.
+
+Required properties:
+- compatible: must be "ti,dra72-cal"
+- reg: CAL Top level, Receiver Core #0, Receiver Core #1 and Camera RX
+       control address space
+- reg-names: cal_top, cal_rx_core0, cal_rx_core1, and camerrx_control
+            registers
+- interrupts: should contain IRQ line for the CAL;
+
+CAL supports 2 camera port nodes on MIPI bus. Each CSI2 camera port nodes
+should contain a 'port' child node with child 'endpoint' node. Please
+refer to the bindings defined in
+Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+       cal: cal@4845b000 {
+               compatible = "ti,dra72-cal";
+               ti,hwmods = "cal";
+               reg = <0x4845B000 0x400>,
+                     <0x4845B800 0x40>,
+                     <0x4845B900 0x40>,
+                     <0x4A002e94 0x4>;
+               reg-names = "cal_top",
+                           "cal_rx_core0",
+                           "cal_rx_core1",
+                           "camerrx_control";
+               interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       csi2_0: port@0 {
+                               reg = <0>;
+                               endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ar0330_1>;
+                               };
+                       };
+                       csi2_1: port@1 {
+                               reg = <1>;
+                       };
+               };
+       };
+
+       i2c5: i2c@4807c000 {
+               ar0330@10 {
+                       compatible = "ti,ar0330";
+                       reg = <0x10>;
+
+                       port {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               ar0330_1: endpoint {
+                                       reg = <0>;
+                                       clock-lanes = <1>;
+                                       data-lanes = <0 2 3 4>;
+                                       remote-endpoint = <&csi2_0>;
+                               };
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/mfd/tps65086.txt b/Documentation/devicetree/bindings/mfd/tps65086.txt
new file mode 100644 (file)
index 0000000..d370561
--- /dev/null
@@ -0,0 +1,55 @@
+* TPS65086 Power Management Integrated Circuit (PMIC) bindings
+
+Required properties:
+ - compatible          : Should be "ti,tps65086".
+ - reg                 : I2C slave address.
+ - interrupt-parent    : Phandle to the parent interrupt controller.
+ - interrupts          : The interrupt line the device is connected to.
+ - interrupt-controller        : Marks the device node as an interrupt controller.
+ - #interrupt-cells    : The number of cells to describe an IRQ, should be 2.
+                           The first cell is the IRQ number.
+                           The second cell is the flags, encoded as trigger
+                           masks from ../interrupt-controller/interrupts.txt.
+ - gpio-controller      : Marks the device node as a GPIO Controller.
+ - #gpio-cells          : Should be two.  The first cell is the pin number and
+                            the second cell is used to specify flags.
+                            See ../gpio/gpio.txt for more information.
+ - regulators:          : List of child nodes that specify the regulator
+                            initialization data. Child nodes must be named
+                            after their hardware counterparts: buck[1-6],
+                            ldoa[1-3], swa1, swb[1-2], and vtt. Each child
+                            node is defined using the standard binding for
+                            regulators and the optional regulator properties
+                            defined below.
+
+Optional regulator properties:
+ - ti,regulator-step-size-25mv : This is applicable for buck[1,2,6], set this
+                                   if the regulator is factory set with a 25mv
+                                   step voltage mapping.
+ - ti,regulator-decay          : This is applicable for buck[1-6], set this if
+                                   the output needs to decay, default is for
+                                   the output to slew down.
+
+Example:
+
+       pmic: tps65086@5e {
+               compatible = "ti,tps65086";
+               reg = <0x5e>;
+               interrupt-parent = <&gpio1>;
+               interrupts = <28 IRQ_TYPE_LEVEL_LOW>;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               regulators {
+                       buck1 {
+                               regulator-name = "vcc1";
+                               regulator-min-microvolt = <1600000>;
+                               regulator-max-microvolt = <1600000>;
+                               regulator-boot-on;
+                               ti,regulator-decay;
+                               ti,regulator-step-size-25mv;
+                       };
+               };
+       };
diff --git a/Documentation/devicetree/bindings/mips/pic32/microchip,pic32mzda.txt b/Documentation/devicetree/bindings/mips/pic32/microchip,pic32mzda.txt
new file mode 100644 (file)
index 0000000..1c8dbc4
--- /dev/null
@@ -0,0 +1,31 @@
+* Microchip PIC32MZDA Platforms
+
+PIC32MZDA Starter Kit
+Required root node properties:
+    - compatible = "microchip,pic32mzda-sk", "microchip,pic32mzda"
+
+CPU nodes:
+----------
+A "cpus" node is required.  Required properties:
+ - #address-cells: Must be 1.
+ - #size-cells: Must be 0.
+A CPU sub-node is also required.  Required properties:
+ - device_type: Must be "cpu".
+ - compatible: Must be "mti,mips14KEc".
+Example:
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "mti,mips14KEc";
+               };
+       };
+
+Boot protocol
+--------------
+In accordance with Unified Hosting Interface Reference Manual (MD01069), the
+bootloader must pass the following arguments to the kernel:
+ - $a0: -2.
+ - $a1: KSEG0 address of the flattened device-tree blob.
index 72cc9cc95880ad8cd3915c8d1648c20ee0e8e363..be56d2bd474a0c309729128c97c4a3a91bc7b918 100644 (file)
@@ -4,7 +4,10 @@ This file documents differences between the core properties described
 by mmc.txt and the properties that represent the IPROC SDHCI controller.
 
 Required properties:
-- compatible : Should be "brcm,sdhci-iproc-cygnus".
+- compatible : Should be one of the following
+              "brcm,bcm2835-sdhci"
+              "brcm,sdhci-iproc-cygnus"
+
 - clocks : The clock feeding the SDHCI controller.
 
 Optional properties:
diff --git a/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt b/Documentation/devicetree/bindings/mmc/microchip,sdhci-pic32.txt
new file mode 100644 (file)
index 0000000..71ad57e
--- /dev/null
@@ -0,0 +1,29 @@
+* Microchip PIC32 SDHCI Controller
+
+This file documents differences between the core properties in mmc.txt
+and the properties used by the sdhci-pic32 driver.
+
+Required properties:
+- compatible: Should be "microchip,pic32mzda-sdhci"
+- interrupts: Should contain interrupt
+- clock-names: Should be "base_clk", "sys_clk".
+               See: Documentation/devicetree/bindings/resource-names.txt
+- clocks: Phandle to the clock.
+          See: Documentation/devicetree/bindings/clock/clock-bindings.txt
+- pinctrl-names: A pinctrl state names "default" must be defined.
+- pinctrl-0: Phandle referencing pin configuration of the SDHCI controller.
+             See: Documentation/devicetree/bindings/pinctrl/pinctrl-binding.txt
+
+Example:
+
+       sdhci@1f8ec000 {
+               compatible = "microchip,pic32mzda-sdhci";
+               reg = <0x1f8ec000 0x100>;
+               interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&REFCLKO4>, <&PBCLK5>;
+               clock-names = "base_clk", "sys_clk";
+               bus-width = <4>;
+               cap-sd-highspeed;
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_sdhc1>;
+       };
index 3dc13b68fc3ffb6dd136038f9116a96fbde996c7..ea5614b6f6130f32219e63115541f1bea328d47d 100644 (file)
@@ -13,6 +13,8 @@ Required Properties:
        - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following,
                                                        before RK3288
        - "rockchip,rk3288-dw-mshc": for Rockchip RK3288
+       - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036
+       - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368
 
 Optional Properties:
 * clocks: from common clock binding: if ciu_drive and ciu_sample are
diff --git a/Documentation/devicetree/bindings/mtd/qcom_nandc.txt b/Documentation/devicetree/bindings/mtd/qcom_nandc.txt
new file mode 100644 (file)
index 0000000..70dd511
--- /dev/null
@@ -0,0 +1,86 @@
+* Qualcomm NAND controller
+
+Required properties:
+- compatible:          should be "qcom,ipq806x-nand"
+- reg:                 MMIO address range
+- clocks:              must contain core clock and always on clock
+- clock-names:         must contain "core" for the core clock and "aon" for the
+                       always on clock
+- dmas:                        DMA specifier, consisting of a phandle to the ADM DMA
+                       controller node and the channel number to be used for
+                       NAND. Refer to dma.txt and qcom_adm.txt for more details
+- dma-names:           must be "rxtx"
+- qcom,cmd-crci:       must contain the ADM command type CRCI block instance
+                       number specified for the NAND controller on the given
+                       platform
+- qcom,data-crci:      must contain the ADM data type CRCI block instance
+                       number specified for the NAND controller on the given
+                       platform
+- #address-cells:      <1> - subnodes give the chip-select number
+- #size-cells:         <0>
+
+* NAND chip-select
+
+Each controller may contain one or more subnodes to represent enabled
+chip-selects which (may) contain NAND flash chips. Their properties are as
+follows.
+
+Required properties:
+- compatible:          should contain "qcom,nandcs"
+- reg:                 a single integer representing the chip-select
+                       number (e.g., 0, 1, 2, etc.)
+- #address-cells:      see partition.txt
+- #size-cells:         see partition.txt
+- nand-ecc-strength:   see nand.txt
+- nand-ecc-step-size:  must be 512. see nand.txt for more details.
+
+Optional properties:
+- nand-bus-width:      see nand.txt
+
+Each nandcs device node may optionally contain a 'partitions' sub-node, which
+further contains sub-nodes describing the flash partition mapping. See
+partition.txt for more detail.
+
+Example:
+
+nand@1ac00000 {
+       compatible = "qcom,ebi2-nandc";
+       reg = <0x1ac00000 0x800>;
+
+       clocks = <&gcc EBI2_CLK>,
+                <&gcc EBI2_AON_CLK>;
+       clock-names = "core", "aon";
+
+       dmas = <&adm_dma 3>;
+       dma-names = "rxtx";
+       qcom,cmd-crci = <15>;
+       qcom,data-crci = <3>;
+
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       nandcs@0 {
+               compatible = "qcom,nandcs";
+               reg = <0>;
+
+               nand-ecc-strength = <4>;
+               nand-ecc-step-size = <512>;
+               nand-bus-width = <8>;
+
+               partitions {
+                       compatible = "fixed-partitions";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       partition@0 {
+                               label = "boot-nand";
+                               reg = <0 0x58a0000>;
+                       };
+
+                       partition@58a0000 {
+                               label = "fs-nand";
+                               reg = <0x58a0000 0x4000000>;
+                       };
+               };
+       };
+};
index 451fef26b4dfaf05b6782099e54816d52a52baa8..10587bdadbbe5a8e8fe703e23c194420e3701f9f 100644 (file)
@@ -68,7 +68,7 @@ ethernet@f0b60000 {
                phy1: ethernet-phy@1 {
                        max-speed = <1000>;
                        reg = <0x1>;
-                       compatible = "brcm,28nm-gphy", "ethernet-phy-ieee802.3-c22";
+                       compatible = "ethernet-phy-ieee802.3-c22";
                };
        };
 };
@@ -115,7 +115,7 @@ ethernet@f0ba0000 {
                phy0: ethernet-phy@0 {
                        max-speed = <1000>;
                        reg = <0x0>;
-                       compatible = "brcm,bcm53125", "ethernet-phy-ieee802.3-c22";
+                       compatible = "ethernet-phy-ieee802.3-c22";
                };
        };
 };
index 80411b2f04906bc065513cb8a968f1a57f132906..ecacfa44b1eb9603d8b77c712dcff565ef4f470d 100644 (file)
@@ -4,8 +4,6 @@ Required properties:
 - compatible: should be "hisilicon,hns-dsaf-v1" or "hisilicon,hns-dsaf-v2".
   "hisilicon,hns-dsaf-v1" is for hip05.
   "hisilicon,hns-dsaf-v2" is for Hi1610 and Hi1612.
-- dsa-name: dsa fabric name who provide this interface.
-  should be "dsafX", X is the dsaf id.
 - mode: dsa fabric mode string. only support one of dsaf modes like these:
                "2port-64vf",
                "6port-16rss",
@@ -26,9 +24,8 @@ Required properties:
 
 Example:
 
-dsa: dsa@c7000000 {
+dsaf0: dsa@c7000000 {
        compatible = "hisilicon,hns-dsaf-v1";
-       dsa_name = "dsaf0";
        mode = "6port-16rss";
        interrupt-parent = <&mbigen_dsa>;
        reg = <0x0 0xC0000000 0x0 0x420000
index 41d19be7011ecd543af6c1226d2f332b8306a01e..e6a9d1c30878f89ff948d5fcd8cbe308e53327df 100644 (file)
@@ -4,8 +4,9 @@ Required properties:
 - compatible: "hisilicon,hns-nic-v1" or "hisilicon,hns-nic-v2".
   "hisilicon,hns-nic-v1" is for hip05.
   "hisilicon,hns-nic-v2" is for Hi1610 and Hi1612.
-- ae-name: accelerator name who provides this interface,
-  is simply a name referring to the name of name in the accelerator node.
+- ae-handle: accelerator engine handle for hns,
+  specifies a reference to the associating hardware driver node.
+  see Documentation/devicetree/bindings/net/hisilicon-hns-dsaf.txt
 - port-id: is the index of port provided by DSAF (the accelerator). DSAF can
   connect to 8 PHYs. Port 0 to 1 are both used for adminstration purpose. They
   are called debug ports.
@@ -41,7 +42,7 @@ Example:
 
        ethernet@0{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <0>;
                local-mac-address = [a2 14 e4 4b 56 76];
        };
index aeea50c84e921fb301fc7fe9fd2cd6f295c7139c..d0cb8693963b51f447afbb9b6e6c5bd42e87f34e 100644 (file)
@@ -6,12 +6,17 @@ Required properties:
 - interrupts: interrupt for the device
 - phy: See ethernet.txt file in the same directory.
 - phy-mode: See ethernet.txt file in the same directory
-- clocks: a pointer to the reference clock for this device.
+- clocks: List of clocks for this device. At least one clock is
+  mandatory for the core clock. If several clocks are given, then the
+  clock-names property must be used to identify them.
 
 Optional properties:
 - tx-csum-limit: maximum mtu supported by port that allow TX checksum.
   Value is presented in bytes. If not used, by default 1600B is set for
   "marvell,armada-370-neta" and 9800B for others.
+- clock-names: List of names corresponding to clocks property; shall be
+  "core" for core clock and "bus" for the optional bus clock.
+
 
 Example:
 
index 79384113c2b060a4068413d427d3509cdcaa2d5b..694987d3c17a1085ba9fce589d81cc9ee99a7717 100644 (file)
@@ -38,7 +38,6 @@ Example :
 
                        phy11: ethernet-phy@1 {
                                reg = <1>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -48,7 +47,6 @@ Example :
                        };
                        phy12: ethernet-phy@2 {
                                reg = <2>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -58,7 +56,6 @@ Example :
                        };
                        phy13: ethernet-phy@3 {
                                reg = <3>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -68,7 +65,6 @@ Example :
                        };
                        phy14: ethernet-phy@4 {
                                reg = <4>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -85,7 +81,6 @@ Example :
 
                        phy21: ethernet-phy@1 {
                                reg = <1>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -95,7 +90,6 @@ Example :
                        };
                        phy22: ethernet-phy@2 {
                                reg = <2>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -105,7 +99,6 @@ Example :
                        };
                        phy23: ethernet-phy@3 {
                                reg = <3>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -115,7 +108,6 @@ Example :
                        };
                        phy24: ethernet-phy@4 {
                                reg = <4>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
index f65606f8d632b86bab2af28eb4a198d426a378ab..491f5bd55203b31c62b63ed2bcc624a701fd1580 100644 (file)
@@ -47,7 +47,6 @@ Example :
 
                        phy11: ethernet-phy@1 {
                                reg = <1>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -57,7 +56,6 @@ Example :
                        };
                        phy12: ethernet-phy@2 {
                                reg = <2>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -67,7 +65,6 @@ Example :
                        };
                        phy13: ethernet-phy@3 {
                                reg = <3>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -77,7 +74,6 @@ Example :
                        };
                        phy14: ethernet-phy@4 {
                                reg = <4>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -94,7 +90,6 @@ Example :
 
                        phy21: ethernet-phy@1 {
                                reg = <1>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -104,7 +99,6 @@ Example :
                        };
                        phy22: ethernet-phy@2 {
                                reg = <2>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -114,7 +108,6 @@ Example :
                        };
                        phy23: ethernet-phy@3 {
                                reg = <3>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
@@ -124,7 +117,6 @@ Example :
                        };
                        phy24: ethernet-phy@4 {
                                reg = <4>;
-                               compatible = "marvell,88e1149r";
                                marvell,reg-init = <3 0x10 0 0x5777>,
                                        <3 0x11 0 0x00aa>,
                                        <3 0x12 0 0x4105>,
diff --git a/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt b/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt
new file mode 100644 (file)
index 0000000..aa63130
--- /dev/null
@@ -0,0 +1,26 @@
+Mediatek Gigabit Switch
+=======================
+
+The mediatek gigabit switch can be found on Mediatek SoCs (mt7620, mt7621).
+
+Required properties:
+- compatible: Should be "mediatek,mt7620-gsw" or "mediatek,mt7621-gsw"
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the gigabit switches interrupt
+- resets: Should contain the gigabit switches resets
+- reset-names: Should contain the reset names "gsw"
+
+Example:
+
+gsw@10110000 {
+       compatible = "ralink,mt7620-gsw";
+       reg = <0x10110000 8000>;
+
+       resets = <&rstctrl 23>;
+       reset-names = "gsw";
+
+       interrupt-parent = <&intc>;
+       interrupts = <17>;
+};
index 525e1658f2da5ca9ed22594fda97a25646ac2e20..bc1c3c8bf8fa37fa7e08dcabc65f1752a8a79749 100644 (file)
@@ -17,8 +17,7 @@ Optional Properties:
   "ethernet-phy-ieee802.3-c22" or "ethernet-phy-ieee802.3-c45" for
   PHYs that implement IEEE802.3 clause 22 or IEEE802.3 clause 45
   specifications. If neither of these are specified, the default is to
-  assume clause 22. The compatible list may also contain other
-  elements.
+  assume clause 22.
 
   If the phy's identifier is known then the list may contain an entry
   of the form: "ethernet-phy-idAAAA.BBBB" where
@@ -28,6 +27,9 @@ Optional Properties:
             4 hex digits. This is the chip vendor OUI bits 19:24,
             followed by 10 bits of a vendor specific ID.
 
+  The compatible list should not contain other values than those
+  listed here.
+
 - max-speed: Maximum PHY supported speed (10, 100, 1000...)
 
 - broken-turn-around: If set, indicates the PHY device does not correctly
diff --git a/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt b/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt
new file mode 100644 (file)
index 0000000..88b095d
--- /dev/null
@@ -0,0 +1,61 @@
+Ralink Frame Engine Ethernet controller
+=======================================
+
+The Ralink frame engine ethernet controller can be found on Ralink and
+Mediatek SoCs (RT288x, RT3x5x, RT366x, RT388x, rt5350, mt7620, mt7621, mt76x8).
+
+Depending on the SoC, there is a number of ports connected to the CPU port
+directly and/or via a (gigabit-)switch.
+
+* Ethernet controller node
+
+Required properties:
+- compatible: Should be one of "ralink,rt2880-eth", "ralink,rt3050-eth",
+  "ralink,rt3050-eth", "ralink,rt3883-eth", "ralink,rt5350-eth",
+  "mediatek,mt7620-eth", "mediatek,mt7621-eth"
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the frame engines interrupt
+- resets: Should contain the frame engines resets
+- reset-names: Should contain the reset names "fe". If a switch is present
+  "esw" is also required.
+
+
+* Ethernet port node
+
+Required properties:
+- compatible: Should be "ralink,eth-port"
+- reg: The number of the physical port
+- phy-handle: reference to the node describing the phy
+
+Example:
+
+mdio-bus {
+       ...
+       phy0: ethernet-phy@0 {
+               phy-mode = "mii";
+               reg = <0>;
+       };
+};
+
+ethernet@400000 {
+       compatible = "ralink,rt2880-eth";
+       reg = <0x00400000 10000>;
+
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       resets = <&rstctrl 18>;
+       reset-names = "fe";
+
+       interrupt-parent = <&cpuintc>;
+       interrupts = <5>;
+
+       port@0 {
+               compatible = "ralink,eth-port";
+               reg = <0>;
+               phy-handle = <&phy0>;
+       };
+
+};
diff --git a/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt b/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt
new file mode 100644 (file)
index 0000000..2e79bd3
--- /dev/null
@@ -0,0 +1,32 @@
+Ralink Fast Ethernet Embedded Switch
+====================================
+
+The ralink fast ethernet embedded switch can be found on Ralink and Mediatek
+SoCs (RT3x5x, RT5350, MT76x8).
+
+Required properties:
+- compatible: Should be "ralink,rt3050-esw"
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the embedded switches interrupt
+- resets: Should contain the embedded switches resets
+- reset-names: Should contain the reset names "esw"
+
+Optional properties:
+- ralink,portmap: can be used to choose if the default switch setup is
+  llllw or wllll
+- ralink,led_polarity: override the active high/low settings of the leds
+
+Example:
+
+esw@10110000 {
+       compatible = "ralink,rt3050-esw";
+       reg = <0x10110000 8000>;
+
+       resets = <&rstctrl 23>;
+       reset-names = "esw";
+
+       interrupt-parent = <&intc>;
+       interrupts = <17>;
+};
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt b/Documentation/devicetree/bindings/net/wireless/ti,wlcore,spi.txt
new file mode 100644 (file)
index 0000000..9180724
--- /dev/null
@@ -0,0 +1,36 @@
+* Texas Instruments wl1271 wireless lan controller
+
+The wl1271 chip can be connected via SPI or via SDIO. This
+document describes the binding for the SPI connected chip.
+
+Required properties:
+- compatible :          Should be "ti,wl1271"
+- reg :                 Chip select address of device
+- spi-max-frequency :   Maximum SPI clocking speed of device in Hz
+- ref-clock-frequency : Reference clock frequency
+- interrupt-parent, interrupts :
+                        Should contain parameters for 1 interrupt line.
+                        Interrupt parameters: parent, line number, type.
+- vwlan-supply :        Point the node of the regulator that powers/enable the wl1271 chip
+
+Optional properties:
+- clock-xtal :          boolean, clock is generated from XTAL
+
+- Please consult Documentation/devicetree/bindings/spi/spi-bus.txt
+  for optional SPI connection related properties,
+
+Examples:
+
+&spi1 {
+       wl1271@1 {
+               compatible = "ti,wl1271";
+
+               reg = <1>;
+               spi-max-frequency = <48000000>;
+               clock-xtal;
+               ref-clock-frequency = <38400000>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+               vwlan-supply = <&vwlan_fixed>;
+       };
+};
index 4e8b90e43dd83c72d81df6d644317ba60b489559..07a75094c5a8ed07e927c0641584d5e8348e3dc6 100644 (file)
@@ -8,6 +8,7 @@ OHCI and EHCI controllers.
 Required properties:
 - compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
              "renesas,pci-r8a7791" for the R8A7791 SoC;
+             "renesas,pci-r8a7793" for the R8A7793 SoC;
              "renesas,pci-r8a7794" for the R8A7794 SoC;
              "renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
 
index 558fe528ae1951104b6266a2f5ed4849017c6b35..6cf99690eef94fa4ed033fb91e08c2aa010395bf 100644 (file)
@@ -4,6 +4,7 @@ Required properties:
 compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
            "renesas,pcie-r8a7790" for the R8A7790 SoC;
            "renesas,pcie-r8a7791" for the R8A7791 SoC;
+           "renesas,pcie-r8a7793" for the R8A7793 SoC;
            "renesas,pcie-r8a7795" for the R8A7795 SoC;
            "renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
 
diff --git a/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt b/Documentation/devicetree/bindings/phy/phy-ath79-usb.txt
new file mode 100644 (file)
index 0000000..cafe219
--- /dev/null
@@ -0,0 +1,18 @@
+* Atheros AR71XX/9XXX USB PHY
+
+Required properties:
+- compatible: "qca,ar7100-usb-phy"
+- #phys-cells: should be 0
+- reset-names: "usb-phy"[, "usb-suspend-override"]
+- resets: references to the reset controllers
+
+Example:
+
+       usb-phy {
+               compatible = "qca,ar7100-usb-phy";
+
+               reset-names = "usb-phy", "usb-suspend-override";
+               resets = <&rst 4>, <&rst 3>;
+
+               #phy-cells = <0>;
+       };
index add7c38ec7d888b958df828139c1847ddc61c7e4..8662f3aaf312d9484a8e74373a5971a47d6b908a 100644 (file)
@@ -91,6 +91,9 @@ mpp60         60       gpio, dev(ale1), uart1(rxd), sata0(prsnt), pcie(rstout),
 mpp61         61       gpo, dev(we1), uart1(txd), audio(lrclk)
 mpp62         62       gpio, dev(a2), uart1(cts), tdm(drx), pcie(clkreq0),
                        audio(mclk), uart0(cts)
-mpp63         63       gpo, spi0(sck), tclk
+mpp63         63       gpio, spi0(sck), tclk
 mpp64         64       gpio, spi0(miso), spi0(cs1)
 mpp65         65       gpio, spi0(mosi), spi0(cs2)
+
+Note: According to the datasheet mpp63 is a gpo but there is at least
+one example of a gpio usage on the board D-Link DNS-327L
index b494f8b8ef72d8869756c7b18181649e41ef8164..e98a9652ccc8c4d3a2263fe5a67b9064b27d1f04 100644 (file)
@@ -5,15 +5,18 @@ Index     Device     Endianness properties
 ---------------------------------------------------
 1         BE         'big-endian'
 2         LE         'little-endian'
+3        Native     'native-endian'
 
 For one device driver, which will run in different scenarios above
 on different SoCs using the devicetree, we need one way to simplify
 this.
 
-Required properties:
-- {big,little}-endian: these are boolean properties, if absent
-  meaning that the CPU and the Device are in the same endianness mode,
-  these properties are for register values and all the buffers only.
+Optional properties:
+- {big,little,native}-endian: these are boolean properties, if absent
+  then the implementation will choose a default based on the device
+  being controlled.  These properties are for register values and all
+  the buffers only.  Native endian means that the CPU and device have
+  the same endianness.
 
 Examples:
 Scenario 1 : CPU in LE mode & device in LE mode.
diff --git a/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt b/Documentation/devicetree/bindings/regulator/act8945a-regulator.txt
new file mode 100644 (file)
index 0000000..5c80a77
--- /dev/null
@@ -0,0 +1,80 @@
+Device-Tree bindings for regulators of Active-semi ACT8945A Multi-Function Device
+
+Required properties:
+ - compatible: "active-semi,act8945a", please refer to ../mfd/act8945a.txt.
+
+Optional properties:
+- active-semi,vsel-high: Indicates if the VSEL pin is set to logic-high.
+  If this property is missing, assume the VSEL pin is set to logic-low.
+
+Optional input supply properties:
+  - vp1-supply: The input supply for REG_DCDC1
+  - vp2-supply: The input supply for REG_DCDC2
+  - vp3-supply: The input supply for REG_DCDC3
+  - inl45-supply: The input supply for REG_LDO1 and REG_LDO2
+  - inl67-supply: The input supply for REG_LDO3 and REG_LDO4
+
+Any standard regulator properties can be used to configure the single regulator.
+
+The valid names for regulators are:
+       REG_DCDC1, REG_DCDC2, REG_DCDC3, REG_LDO1, REG_LDO2, REG_LDO3, REG_LDO4.
+
+Example:
+       pmic@5b {
+               compatible = "active-semi,act8945a";
+               reg = <0x5b>;
+               status = "okay";
+
+               active-semi,vsel-high;
+
+               regulators {
+                       vdd_1v35_reg: REG_DCDC1 {
+                               regulator-name = "VDD_1V35";
+                               regulator-min-microvolt = <1350000>;
+                               regulator-max-microvolt = <1350000>;
+                               regulator-always-on;
+                       };
+
+                       vdd_1v2_reg: REG_DCDC2 {
+                               regulator-name = "VDD_1V2";
+                               regulator-min-microvolt = <1100000>;
+                               regulator-max-microvolt = <1300000>;
+                               regulator-always-on;
+                       };
+
+                       vdd_3v3_reg: REG_DCDC3 {
+                               regulator-name = "VDD_3V3";
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vdd_fuse_reg: REG_LDO1 {
+                               regulator-name = "VDD_FUSE";
+                               regulator-min-microvolt = <2500000>;
+                               regulator-max-microvolt = <2500000>;
+                               regulator-always-on;
+                       };
+
+                       vdd_3v3_lp_reg: REG_LDO2 {
+                               regulator-name = "VDD_3V3_LP";
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vdd_led_reg: REG_LDO3 {
+                               regulator-name = "VDD_LED";
+                               regulator-min-microvolt = <3300000>;
+                               regulator-max-microvolt = <3300000>;
+                               regulator-always-on;
+                       };
+
+                       vdd_sdhc_1v8_reg: REG_LDO4 {
+                               regulator-name = "VDD_SDHC_1V8";
+                               regulator-min-microvolt = <1800000>;
+                               regulator-max-microvolt = <1800000>;
+                               regulator-always-on;
+                       };
+               };
+       };
index 78183182dad9e04cd9d33d7d20309e8f1a35371b..ca58a68ffdf1c1f830a2a6daa3149d28c6d9bf89 100644 (file)
@@ -28,6 +28,7 @@ Optional properties:
   - ti,dvs-gpio: GPIO specifier for external DVS pin control of LP872x devices.
   - ti,dvs-vsel: DVS selector. 0 = SEL_V1, 1 = SEL_V2.
   - ti,dvs-state: initial DVS pin state. 0 = DVS_LOW, 1 = DVS_HIGH.
+  - enable-gpios: GPIO specifier for EN pin control of LP872x devices.
 
   Sub nodes for regulator_init_data
     LP8720 has maximum 6 nodes. (child name: ldo1 ~ 5 and buck)
index 09d796ed48be4243005e464add1b266678eba4a0..879e98d3b9aa9c8a56492d724e889e64fc2e50fd 100644 (file)
@@ -60,7 +60,7 @@ The possible values for "regulator-initial-mode" and "regulator-mode" are:
        1: Normal regulator voltage output mode.
        3: Low Power which reduces the quiescent current down to only 1uA
 
-The list of valid modes are defined in the dt-bindings/clock/maxim,max77802.h
+The valid modes list is defined in the dt-bindings/regulator/maxim,max77802.h
 header and can be included by device tree source files.
 
 The standard "regulator-mode" property can only be used for regulators that
diff --git a/Documentation/devicetree/bindings/regulator/qcom,saw-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,saw-regulator.txt
new file mode 100644 (file)
index 0000000..977fec0
--- /dev/null
@@ -0,0 +1,31 @@
+Qualcomm SAW Regulators
+
+SAW (Subsystem Power Manager and Adaptive Voltage Scaling Wrapper) is a hardware
+block in the Qualcomm chipsets that regulates the power to the CPU cores on devices
+such as APQ8064, MSM8974, APQ8084 and others.
+
+- compatible:
+       Usage: required
+       Value type: <string>
+       Definition: must be one of:
+                       "qcom,apq8064-saw2-v1.1-regulator"
+
+Example:
+                saw0: power-controller@2089000 {
+                       compatible = "qcom,apq8064-saw2-v1.1-cpu", "qcom,saw2", "syscon", "simple-mfd";
+                       reg = <0x02089000 0x1000>, <0x02009000 0x1000>;
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       saw0_regulator: regulator@2089000 {
+                               compatible = "qcom,apq8064-saw2-v1.1-regulator";
+                               regulator-always-on;
+                               regulator-min-microvolt = <825000>;
+                               regulator-max-microvolt = <1250000>;
+                       };
+               };
+
+
+               &CPU0 {
+                       cpu-supply = <&saw0_regulator>;
+               };
diff --git a/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt b/Documentation/devicetree/bindings/rng/brcm,bcm6368.txt
new file mode 100644 (file)
index 0000000..4b5ac60
--- /dev/null
@@ -0,0 +1,17 @@
+BCM6368 Random number generator
+
+Required properties:
+
+- compatible : should be "brcm,bcm6368-rng"
+- reg : Specifies base physical address and size of the registers
+- clocks : phandle to clock-controller plus clock-specifier pair
+- clock-names : "ipsec" as a clock name
+
+Example:
+       random: rng@10004180 {
+               compatible = "brcm,bcm6368-rng";
+               reg = <0x10004180 0x14>;
+
+               clocks = <&periph_clk 18>;
+               clock-names = "ipsec";
+       };
diff --git a/Documentation/devicetree/bindings/serial/brcm,bcm2835-aux-uart.txt b/Documentation/devicetree/bindings/serial/brcm,bcm2835-aux-uart.txt
new file mode 100644 (file)
index 0000000..b5cc629
--- /dev/null
@@ -0,0 +1,18 @@
+* BCM2835 AUXILIAR UART
+
+Required properties:
+
+- compatible: "brcm,bcm2835-aux-uart"
+- reg: The base address of the UART register bank.
+- interrupts: A single interrupt specifier.
+- clocks: Clock driving the hardware; used to figure out the baud rate
+  divisor.
+
+Example:
+
+       uart1: serial@7e215040 {
+               compatible = "brcm,bcm2835-aux-uart";
+               reg = <0x7e215040 0x40>;
+               interrupts = <1 29>;
+               clocks = <&aux BCM2835_AUX_CLOCK_UART>;
+       };
index 401b1b33c2c406520b2dd881e506cf75dc9d3042..528c3b90f23cb04b931be9637f850e7e315e0415 100644 (file)
@@ -19,6 +19,8 @@ Required properties:
     - "renesas,scifa-r8a7791" for R8A7791 (R-Car M2-W) SCIFA compatible UART.
     - "renesas,scifb-r8a7791" for R8A7791 (R-Car M2-W) SCIFB compatible UART.
     - "renesas,hscif-r8a7791" for R8A7791 (R-Car M2-W) HSCIF compatible UART.
+    - "renesas,scif-r8a7792" for R8A7792 (R-Car V2H) SCIF compatible UART.
+    - "renesas,hscif-r8a7792" for R8A7792 (R-Car V2H) HSCIF compatible UART.
     - "renesas,scif-r8a7793" for R8A7793 (R-Car M2-N) SCIF compatible UART.
     - "renesas,scifa-r8a7793" for R8A7793 (R-Car M2-N) SCIFA compatible UART.
     - "renesas,scifb-r8a7793" for R8A7793 (R-Car M2-N) SCIFB compatible UART.
index 112756e11802c7ccfbaccca4462e0987f88e0053..13dc6a3fdb4a1b239a38f1cd19dd0bb1ff2a681d 100644 (file)
@@ -6,6 +6,7 @@ powered up/down by software based on different application scenes to save power.
 Required properties for power domain controller:
 - compatible: Should be one of the following.
        "rockchip,rk3288-power-controller" - for RK3288 SoCs.
+       "rockchip,rk3368-power-controller" - for RK3368 SoCs.
 - #power-domain-cells: Number of cells in a power-domain specifier.
        Should be 1 for multiple PM domains.
 - #address-cells: Should be 1.
@@ -14,6 +15,7 @@ Required properties for power domain controller:
 Required properties for power domain sub nodes:
 - reg: index of the power domain, should use macros in:
        "include/dt-bindings/power/rk3288-power.h" - for RK3288 type power domain.
+       "include/dt-bindings/power/rk3368-power.h" - for RK3368 type power domain.
 - clocks (optional): phandles to clocks which need to be enabled while power domain
        switches state.
 
@@ -31,11 +33,24 @@ Example:
                };
        };
 
+        power: power-controller {
+                compatible = "rockchip,rk3368-power-controller";
+                #power-domain-cells = <1>;
+                #address-cells = <1>;
+                #size-cells = <0>;
+
+                pd_gpu_1 {
+                        reg = <RK3368_PD_GPU_1>;
+                        clocks = <&cru ACLK_GPU_CFG>;
+                };
+        };
+
 Node of a device using power domains must have a power-domains property,
 containing a phandle to the power device node and an index specifying which
 power domain to use.
 The index should use macros in:
        "include/dt-bindings/power/rk3288-power.h" - for rk3288 type power domain.
+       "include/dt-bindings/power/rk3368-power.h" - for rk3368 type power domain.
 
 Example of the node using power domain:
 
@@ -44,3 +59,9 @@ Example of the node using power domain:
                power-domains = <&power RK3288_PD_GPU>;
                /* ... */
        };
+
+       node {
+                /* ... */
+                power-domains = <&power RK3368_PD_GPU_1>;
+                /* ... */
+        };
index ce55c0a6f7578ee192a207a6b247095d605072b8..ceaef512698967ff16677b3771d2fd8c5227aaac 100644 (file)
@@ -24,12 +24,17 @@ The compatible list for this generic sound card currently:
 
  "fsl,imx-audio-cs42888"
 
+ "fsl,imx-audio-cs427x"
+ (compatible with CS4271 and CS4272)
+
  "fsl,imx-audio-wm8962"
  (compatible with Documentation/devicetree/bindings/sound/imx-audio-wm8962.txt)
 
  "fsl,imx-audio-sgtl5000"
  (compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
 
+ "fsl,imx-audio-wm8960"
+
 Required properties:
 
   - compatible         : Contains one of entries in the compatible list.
@@ -61,6 +66,12 @@ Optional properties:
   - audio-asrc         : The phandle of ASRC. It can be absent if there's no
                          need to add ASRC support via DPCM.
 
+Optional unless SSI is selected as a CPU DAI:
+
+  - mux-int-port       : The internal port of the i.MX audio muxer (AUDMUX)
+
+  - mux-ext-port       : The external port of the i.MX audio muxer
+
 Example:
 sound-cs42888 {
        compatible = "fsl,imx-audio-cs42888";
diff --git a/Documentation/devicetree/bindings/sound/max98926.txt b/Documentation/devicetree/bindings/sound/max98926.txt
new file mode 100644 (file)
index 0000000..0b7f4e4
--- /dev/null
@@ -0,0 +1,32 @@
+max98926 audio CODEC
+
+This device supports I2C.
+
+Required properties:
+
+  - compatible : "maxim,max98926"
+
+  - vmon-slot-no : slot number used to send voltage information
+                   or in inteleave mode this will be used as
+                   interleave slot.
+
+  - imon-slot-no : slot number used to send current information
+
+  - interleave-mode : When using two MAX98926 in a system it is
+                      possible to create ADC data that that will
+                      overflow the frame size. Digital Audio Interleave
+                      mode provides a means to output VMON and IMON data
+                      from two devices on a single DOUT line when running
+                      smaller frames sizes such as 32 BCLKS per LRCLK or
+                      48 BCLKS per LRCLK.
+
+  - reg : the I2C address of the device for I2C
+
+Example:
+
+codec: max98926@1a {
+   compatible = "maxim,max98926";
+   vmon-slot-no = <0>;
+   imon-slot-no = <2>;
+   reg = <0x1a>;
+};
index 4ae70d3462d65e6539278f4036e3fe26dc73f0ca..436c2b247693f5a9a60a58b28c365aecd1741644 100644 (file)
@@ -1,6 +1,6 @@
 Texas Instruments pcm179x DT bindings
 
-This driver supports the SPI bus.
+This driver supports both the I2C and SPI bus.
 
 Required properties:
 
@@ -9,6 +9,11 @@ Required properties:
 For required properties on SPI, please consult
 Documentation/devicetree/bindings/spi/spi-bus.txt
 
+Required properties on I2C:
+
+ - reg: the I2C address
+
+
 Examples:
 
        codec_spi: 1792a@0 {
@@ -16,3 +21,7 @@ Examples:
                spi-max-frequency = <600000>;
        };
 
+       codec_i2c: 1792a@4c {
+               compatible = "ti,pcm1792a";
+               reg = <0x4c>;
+       };
diff --git a/Documentation/devicetree/bindings/sound/rt5514.txt b/Documentation/devicetree/bindings/sound/rt5514.txt
new file mode 100644 (file)
index 0000000..e24436f
--- /dev/null
@@ -0,0 +1,25 @@
+RT5514 audio CODEC
+
+This device supports I2C only.
+
+Required properties:
+
+- compatible : "realtek,rt5514".
+
+- reg : The I2C address of the device.
+
+Pins on the device (for linking into audio routes) for RT5514:
+
+  * DMIC1L
+  * DMIC1R
+  * DMIC2L
+  * DMIC2R
+  * AMICL
+  * AMICR
+
+Example:
+
+codec: rt5514@57 {
+       compatible = "realtek,rt5514";
+       reg = <0x57>;
+};
index efc48c65198db044c10c2516d9988ebcf8c91f76..e410858185596a0fb74ec5812cef3dd3e6282762 100644 (file)
@@ -8,6 +8,12 @@ Required properties:
 
 - reg : The I2C address of the device.
 
+Optional properties:
+
+- clocks: The phandle of the master clock to the CODEC.
+
+- clock-names: Should be "mclk".
+
 Pins on the device (for linking into audio routes) for RT5616:
 
   * IN1P
diff --git a/Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt b/Documentation/devicetree/bindings/sparc_sun_oracle_rng.txt
new file mode 100644 (file)
index 0000000..b0b2111
--- /dev/null
@@ -0,0 +1,30 @@
+HWRNG support for the n2_rng driver
+
+Required properties:
+- reg          : base address to sample from
+- compatible   : should contain one of the following
+       RNG versions:
+       - 'SUNW,n2-rng' for Niagara 2 Platform (SUN UltraSPARC T2 CPU)
+       - 'SUNW,vf-rng' for Victoria Falls Platform (SUN UltraSPARC T2 Plus CPU)
+       - 'SUNW,kt-rng' for Rainbow/Yosemite Falls Platform (SUN SPARC T3/T4), (UltraSPARC KT/Niagara 3 - development names)
+       more recent systems (after Oracle acquisition of SUN)
+       - 'ORCL,m4-rng' for SPARC T5/M5
+       - 'ORCL,m7-rng' for SPARC T7/M7
+
+Examples:
+/* linux LDOM on SPARC T5-2 */
+Node 0xf029a4f4
+       .node:  f029a4f4
+       rng-#units:  00000002
+       compatible: 'ORCL,m4-rng'
+       reg:  0000000e
+       name: 'random-number-generator'
+
+/* solaris on SPARC M7-8 */
+Node 0xf028c08c
+       rng-#units:  00000003
+       compatible: 'ORCL,m7-rng'
+       reg:  0000000e
+       name:  'random-number-generator'
+
+PS: see as well prtconfs.git by DaveM
diff --git a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt
new file mode 100644 (file)
index 0000000..8a18d71
--- /dev/null
@@ -0,0 +1,31 @@
+Analog Devices AXI SPI Engine controller Device Tree Bindings
+
+Required properties:
+- compatible           : Must be "adi,axi-spi-engine-1.00.a""
+- reg                  : Physical base address and size of the register map.
+- interrupts           : Property with a value describing the interrupt
+                         number.
+- clock-names          : List of input clock names - "s_axi_aclk", "spi_clk"
+- clocks               : Clock phandles and specifiers (See clock bindings for
+                         details on clock-names and clocks).
+- #address-cells       : Must be <1>
+- #size-cells          : Must be <0>
+
+Optional subnodes:
+       Subnodes are use to represent the SPI slave devices connected to the SPI
+       master. They follow the generic SPI bindings as outlined in spi-bus.txt.
+
+Example:
+
+    spi@@44a00000 {
+               compatible = "adi,axi-spi-engine-1.00.a";
+               reg = <0x44a00000 0x1000>;
+               interrupts = <0 56 4>;
+               clocks = <&clkc 15 &clkc 15>;
+               clock-names = "s_axi_aclk", "spi_clk";
+
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               /* SPI devices */
+    };
index 332e625f6ed01cb4e442c0ea530ab29eb92f2a77..e5ee3f15989337f37b9d5e43036c31a2b4c9ff11 100644 (file)
@@ -1,8 +1,9 @@
 * Renesas R-Car Thermal
 
 Required properties:
-- compatible           : "renesas,thermal-<soctype>", "renesas,rcar-thermal"
-                         as fallback.
+- compatible           : "renesas,thermal-<soctype>",
+                          "renesas,rcar-gen2-thermal" (with thermal-zone) or
+                          "renesas,rcar-thermal" (without thermal-zone) as fallback.
                          Examples with soctypes are:
                            - "renesas,thermal-r8a73a4" (R-Mobile APE6)
                            - "renesas,thermal-r8a7779" (R-Car H1)
@@ -36,3 +37,35 @@ thermal@e61f0000 {
                0xe61f0300 0x38>;
        interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
 };
+
+Example (with thermal-zone):
+
+thermal-zones {
+       cpu_thermal: cpu-thermal {
+               polling-delay-passive   = <1000>;
+               polling-delay           = <5000>;
+
+               thermal-sensors = <&thermal>;
+
+               trips {
+                       cpu-crit {
+                               temperature     = <115000>;
+                               hysteresis      = <0>;
+                               type            = "critical";
+                       };
+               };
+               cooling-maps {
+               };
+       };
+};
+
+thermal: thermal@e61f0000 {
+       compatible =    "renesas,thermal-r8a7790",
+                       "renesas,rcar-gen2-thermal",
+                       "renesas,rcar-thermal";
+       reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
+       interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+       clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
+       power-domains = <&cpg_clocks>;
+       #thermal-sensor-cells = <0>;
+};
index 0dfa60d88dd3b4bb4ce2a42d2c0b3d15f4835e45..08efe6bc21935fa36785612043489faab5ba33c7 100644 (file)
@@ -2,8 +2,10 @@
 
 Required properties:
 - compatible : should be "rockchip,<name>-tsadc"
+   "rockchip,rk3228-tsadc": found on RK3228 SoCs
    "rockchip,rk3288-tsadc": found on RK3288 SoCs
    "rockchip,rk3368-tsadc": found on RK3368 SoCs
+   "rockchip,rk3399-tsadc": found on RK3399 SoCs
 - reg : physical base address of the controller and length of memory mapped
        region.
 - interrupts : The interrupt number to the cpu. The interrupt specifier format
index 72e2c5a2b3278facb20378383cbb63baa6485e0f..9c548515fc54047a5b3fb0e4af9ac46f2a8f5aa7 100644 (file)
@@ -170,6 +170,7 @@ opencores   OpenCores.org
 option Option NV
 ortustech      Ortus Technology Co., Ltd.
 ovti   OmniVision Technologies
+ORCL   Oracle Corporation
 panasonic      Panasonic Corporation
 parade Parade Technologies Inc.
 pericom        Pericom Technology Inc.
@@ -204,6 +205,7 @@ seagate     Seagate Technology PLC
 semtech        Semtech Corporation
 sgx    SGX Sensortech
 sharp  Sharp Corporation
+si-en  Si-En Technology Ltd.
 sigma  Sigma Designs, Inc.
 sil    Silicon Image
 silabs Silicon Laboratories
@@ -227,6 +229,7 @@ startek     Startek
 ste    ST-Ericsson
 stericsson     ST-Ericsson
 synology       Synology, Inc.
+SUNW   Sun Microsystems, Inc
 tbs    TBS Technologies
 tcl    Toby Churchill Ltd.
 technologic    Technologic Systems
index 480c8de3c2c44786174e112795f61b2381d3b09f..4f4a84b6903a64acbebe38ffcb67bbb25d6815e9 100644 (file)
@@ -257,17 +257,15 @@ Access to a dma_buf from the kernel context involves three steps:
 
    Interface:
       int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
-                                  size_t start, size_t len,
                                   enum dma_data_direction direction)
 
    This allows the exporter to ensure that the memory is actually available for
    cpu access - the exporter might need to allocate or swap-in and pin the
    backing storage. The exporter also needs to ensure that cpu access is
-   coherent for the given range and access direction. The range and access
-   direction can be used by the exporter to optimize the cache flushing, i.e.
-   access outside of the range or with a different direction (read instead of
-   write) might return stale or even bogus data (e.g. when the exporter needs to
-   copy the data to temporary storage).
+   coherent for the access direction. The direction can be used by the exporter
+   to optimize the cache flushing, i.e. access with a different direction (read
+   instead of write) might return stale or even bogus data (e.g. when the
+   exporter needs to copy the data to temporary storage).
 
    This step might fail, e.g. in oom conditions.
 
@@ -322,14 +320,13 @@ Access to a dma_buf from the kernel context involves three steps:
 
 3. Finish access
 
-   When the importer is done accessing the range specified in begin_cpu_access,
-   it needs to announce this to the exporter (to facilitate cache flushing and
-   unpinning of any pinned resources). The result of any dma_buf kmap calls
-   after end_cpu_access is undefined.
+   When the importer is done accessing the CPU, it needs to announce this to
+   the exporter (to facilitate cache flushing and unpinning of any pinned
+   resources). The result of any dma_buf kmap calls after end_cpu_access is
+   undefined.
 
    Interface:
       void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
-                                 size_t start, size_t len,
                                  enum dma_data_direction dir);
 
 
index e456696cfef29bf49ba180ece3bab00b31699789..9d9e47dfc0138a331b504b49a885f2678de4e13a 100644 (file)
@@ -75,7 +75,7 @@ If one of the drivers fails to register, all drivers registered up to that
 point will be unregistered in reverse order. Note that there is a convenience
 macro that passes THIS_MODULE as owner parameter:
 
-       #define platform_register_driver(drivers, count)
+       #define platform_register_drivers(drivers, count)
 
 
 Device Enumeration
index 669dc6ce43305e17f29ea95066bec2a80b59b70d..6f4b12f7b8444cc121ece7f6de89ebb217e64461 100644 (file)
@@ -190,7 +190,7 @@ and watch another one.
 Patches, comments and suggestions are very very welcome.
 
 3. Acknowledgements
-   Amaury Demol (ademol@dibcom.fr) and Francois Kanounnikoff from DiBcom for
+   Amaury Demol (Amaury.Demol@parrot.com) and Francois Kanounnikoff from DiBcom for
     providing specs, code and help, on which the dvb-dibusb, dib3000mb and
     dib3000mc are based.
 
index 7747024d3bb70023fbff500cd3fc44546b31511b..e157469882614ae96ddced101a5fc77315d49043 100644 (file)
@@ -10,12 +10,12 @@ arch/x86/boot/header.S and arch/x86/boot/compressed/eboot.c,
 respectively. For ARM the EFI stub is implemented in
 arch/arm/boot/compressed/efi-header.S and
 arch/arm/boot/compressed/efi-stub.c. EFI stub code that is shared
-between architectures is in drivers/firmware/efi/efi-stub-helper.c.
+between architectures is in drivers/firmware/efi/libstub.
 
 For arm64, there is no compressed kernel support, so the Image itself
 masquerades as a PE/COFF image and the EFI stub is linked into the
 kernel. The arm64 EFI stub lives in arch/arm64/kernel/efi-entry.S
-and arch/arm64/kernel/efi-stub.c.
+and drivers/firmware/efi/libstub/arm64-stub.c.
 
 By using the EFI boot stub it's possible to boot a Linux kernel
 without the use of a conventional EFI boot loader, such as grub or
index 6065124a072f9b0229e77489462a001e884a01a1..c16b5b59568892767354d12397446590572c8a3e 100755 (executable)
@@ -5,7 +5,7 @@
 # (If no arguments are given then it will print the host architecture's status.)
 #
 
-ARCH=${1:-$(arch | sed 's/x86_64/x86/' | sed 's/i386/x86/')}
+ARCH=${1:-$(uname -m | sed 's/x86_64/x86/' | sed 's/i386/x86/')}
 
 cd $(dirname $0)
 echo "#"
index af6816bccb439d76a7f80d43f97f4a95e7d604c2..df1d1f3c9af290aa6ffaa1584f71ba6025e54c4e 100644 (file)
@@ -9,7 +9,7 @@
     |       alpha: | TODO |
     |         arc: | TODO |
     |         arm: | TODO |
-    |       arm64: | TODO |
+    |       arm64: |  ok  |
     |       avr32: | TODO |
     |    blackfin: | TODO |
     |         c6x: | TODO |
diff --git a/Documentation/filesystems/orangefs.txt b/Documentation/filesystems/orangefs.txt
new file mode 100644 (file)
index 0000000..925a53e
--- /dev/null
@@ -0,0 +1,353 @@
+ORANGEFS
+========
+
+OrangeFS is an LGPL userspace scale-out parallel storage system. It is ideal
+for large storage problems faced by HPC, BigData, Streaming Video,
+Genomics, Bioinformatics.
+
+Orangefs, originally called PVFS, was first developed in 1993 by
+Walt Ligon and Eric Blumer as a parallel file system for Parallel
+Virtual Machine (PVM) as part of a NASA grant to study the I/O patterns
+of parallel programs.
+
+Orangefs features include:
+
+  * Distributes file data among multiple file servers
+  * Supports simultaneous access by multiple clients
+  * Stores file data and metadata on servers using local file system
+    and access methods
+  * Userspace implementation is easy to install and maintain
+  * Direct MPI support
+  * Stateless
+
+
+MAILING LIST
+============
+
+http://beowulf-underground.org/mailman/listinfo/pvfs2-users
+
+
+DOCUMENTATION
+=============
+
+http://www.orangefs.org/documentation/
+
+
+USERSPACE FILESYSTEM SOURCE
+===========================
+
+http://www.orangefs.org/download
+
+Orangefs versions prior to 2.9.3 would not be compatible with the
+upstream version of the kernel client.
+
+
+BUILDING THE USERSPACE FILESYSTEM ON A SINGLE SERVER
+====================================================
+
+When Orangefs is upstream, "--with-kernel" shouldn't be needed, but
+until then the path to where the kernel with the Orangefs kernel client
+patch was built is needed to ensure that pvfs2-client-core (the bridge
+between kernel space and user space) will build properly. You can omit
+--prefix if you don't care that things are sprinkled around in
+/usr/local.
+
+./configure --prefix=/opt/ofs --with-kernel=/path/to/orangefs/kernel
+
+make
+
+make install
+
+Create an orangefs config file:
+/opt/ofs/bin/pvfs2-genconfig /etc/pvfs2.conf
+
+  for "Enter hostnames", use the hostname, don't let it default to
+  localhost.
+
+create a pvfs2tab file in /etc:
+cat /etc/pvfs2tab
+tcp://myhostname:3334/orangefs /mymountpoint pvfs2 defaults,noauto 0 0
+
+create the mount point you specified in the tab file if needed:
+mkdir /mymountpoint
+
+bootstrap the server:
+/opt/ofs/sbin/pvfs2-server /etc/pvfs2.conf -f
+
+start the server:
+/opt/osf/sbin/pvfs2-server /etc/pvfs2.conf
+
+Now the server is running. At this point you might like to
+prove things are working with:
+
+/opt/osf/bin/pvfs2-ls /mymountpoint
+
+You might not want to enforce selinux, it doesn't seem to matter by
+linux 3.11...
+
+If stuff seems to be working, turn on the client core:
+/opt/osf/sbin/pvfs2-client -p /opt/osf/sbin/pvfs2-client-core
+
+Mount your filesystem.
+mount -t pvfs2 tcp://myhostname:3334/orangefs /mymountpoint
+
+
+OPTIONS
+=======
+
+The following mount options are accepted:
+
+  acl
+    Allow the use of Access Control Lists on files and directories.
+
+  intr
+    Some operations between the kernel client and the user space
+    filesystem can be interruptible, such as changes in debug levels
+    and the setting of tunable parameters.
+
+  local_lock
+    Enable posix locking from the perspective of "this" kernel. The
+    default file_operations lock action is to return ENOSYS. Posix
+    locking kicks in if the filesystem is mounted with -o local_lock.
+    Distributed locking is being worked on for the future.
+
+
+DEBUGGING
+=========
+
+If you want the debug (GOSSIP) statements in a particular
+source file (inode.c for example) go to syslog:
+
+  echo inode > /sys/kernel/debug/orangefs/kernel-debug
+
+No debugging (the default):
+
+  echo none > /sys/kernel/debug/orangefs/kernel-debug
+
+Debugging from several source files:
+
+  echo inode,dir > /sys/kernel/debug/orangefs/kernel-debug
+
+All debugging:
+
+  echo all > /sys/kernel/debug/orangefs/kernel-debug
+
+Get a list of all debugging keywords:
+
+  cat /sys/kernel/debug/orangefs/debug-help
+
+
+PROTOCOL BETWEEN KERNEL MODULE AND USERSPACE
+============================================
+
+Orangefs is a user space filesystem and an associated kernel module.
+We'll just refer to the user space part of Orangefs as "userspace"
+from here on out. Orangefs descends from PVFS, and userspace code
+still uses PVFS for function and variable names. Userspace typedefs
+many of the important structures. Function and variable names in
+the kernel module have been transitioned to "orangefs", and The Linux
+Coding Style avoids typedefs, so kernel module structures that
+correspond to userspace structures are not typedefed.
+
+The kernel module implements a pseudo device that userspace
+can read from and write to. Userspace can also manipulate the
+kernel module through the pseudo device with ioctl.
+
+THE BUFMAP:
+
+At startup userspace allocates two page-size-aligned (posix_memalign)
+mlocked memory buffers, one is used for IO and one is used for readdir
+operations. The IO buffer is 41943040 bytes and the readdir buffer is
+4194304 bytes. Each buffer contains logical chunks, or partitions, and
+a pointer to each buffer is added to its own PVFS_dev_map_desc structure
+which also describes its total size, as well as the size and number of
+the partitions.
+
+A pointer to the IO buffer's PVFS_dev_map_desc structure is sent to a
+mapping routine in the kernel module with an ioctl. The structure is
+copied from user space to kernel space with copy_from_user and is used
+to initialize the kernel module's "bufmap" (struct orangefs_bufmap), which
+then contains:
+
+  * refcnt - a reference counter
+  * desc_size - PVFS2_BUFMAP_DEFAULT_DESC_SIZE (4194304) - the IO buffer's
+    partition size, which represents the filesystem's block size and
+    is used for s_blocksize in super blocks.
+  * desc_count - PVFS2_BUFMAP_DEFAULT_DESC_COUNT (10) - the number of
+    partitions in the IO buffer.
+  * desc_shift - log2(desc_size), used for s_blocksize_bits in super blocks.
+  * total_size - the total size of the IO buffer.
+  * page_count - the number of 4096 byte pages in the IO buffer.
+  * page_array - a pointer to page_count * (sizeof(struct page*)) bytes
+    of kcalloced memory. This memory is used as an array of pointers
+    to each of the pages in the IO buffer through a call to get_user_pages.
+  * desc_array - a pointer to desc_count * (sizeof(struct orangefs_bufmap_desc))
+    bytes of kcalloced memory. This memory is further intialized:
+
+      user_desc is the kernel's copy of the IO buffer's ORANGEFS_dev_map_desc
+      structure. user_desc->ptr points to the IO buffer.
+
+      pages_per_desc = bufmap->desc_size / PAGE_SIZE
+      offset = 0
+
+        bufmap->desc_array[0].page_array = &bufmap->page_array[offset]
+        bufmap->desc_array[0].array_count = pages_per_desc = 1024
+        bufmap->desc_array[0].uaddr = (user_desc->ptr) + (0 * 1024 * 4096)
+        offset += 1024
+                           .
+                           .
+                           .
+        bufmap->desc_array[9].page_array = &bufmap->page_array[offset]
+        bufmap->desc_array[9].array_count = pages_per_desc = 1024
+        bufmap->desc_array[9].uaddr = (user_desc->ptr) +
+                                               (9 * 1024 * 4096)
+        offset += 1024
+
+  * buffer_index_array - a desc_count sized array of ints, used to
+    indicate which of the IO buffer's partitions are available to use.
+  * buffer_index_lock - a spinlock to protect buffer_index_array during update.
+  * readdir_index_array - a five (ORANGEFS_READDIR_DEFAULT_DESC_COUNT) element
+    int array used to indicate which of the readdir buffer's partitions are
+    available to use.
+  * readdir_index_lock - a spinlock to protect readdir_index_array during
+    update.
+
+OPERATIONS:
+
+The kernel module builds an "op" (struct orangefs_kernel_op_s) when it
+needs to communicate with userspace. Part of the op contains the "upcall"
+which expresses the request to userspace. Part of the op eventually
+contains the "downcall" which expresses the results of the request.
+
+The slab allocator is used to keep a cache of op structures handy.
+
+The life cycle of a typical op goes like this:
+
+  - obtain and initialize an op structure from the op_cache.
+
+  - queue the op to the pvfs device so that its upcall data can be
+    read by userspace.
+
+  - wait for userspace to write downcall data back to the pvfs device.
+
+  - consume the downcall and return the op struct to the op_cache.
+
+Some ops are atypical with respect to their payloads: readdir and io ops.
+
+  - readdir ops use the smaller of the two pre-allocated pre-partitioned
+    memory buffers. The readdir buffer is only available to userspace.
+    The kernel module obtains an index to a free partition before launching
+    a readdir op. Userspace deposits the results into the indexed partition
+    and then writes them to back to the pvfs device.
+
+  - io (read and write) ops use the larger of the two pre-allocated
+    pre-partitioned memory buffers. The IO buffer is accessible from
+    both userspace and the kernel module. The kernel module obtains an
+    index to a free partition before launching an io op. The kernel module
+    deposits write data into the indexed partition, to be consumed
+    directly by userspace. Userspace deposits the results of read
+    requests into the indexed partition, to be consumed directly
+    by the kernel module.
+
+Responses to kernel requests are all packaged in pvfs2_downcall_t
+structs. Besides a few other members, pvfs2_downcall_t contains a
+union of structs, each of which is associated with a particular
+response type.
+
+The several members outside of the union are:
+ - int32_t type - type of operation.
+ - int32_t status - return code for the operation.
+ - int64_t trailer_size - 0 unless readdir operation.
+ - char *trailer_buf - initialized to NULL, used during readdir operations.
+
+The appropriate member inside the union is filled out for any
+particular response.
+
+  PVFS2_VFS_OP_FILE_IO
+    fill a pvfs2_io_response_t
+
+  PVFS2_VFS_OP_LOOKUP
+    fill a PVFS_object_kref
+
+  PVFS2_VFS_OP_CREATE
+    fill a PVFS_object_kref
+
+  PVFS2_VFS_OP_SYMLINK
+    fill a PVFS_object_kref
+
+  PVFS2_VFS_OP_GETATTR
+    fill in a PVFS_sys_attr_s (tons of stuff the kernel doesn't need)
+    fill in a string with the link target when the object is a symlink.
+
+  PVFS2_VFS_OP_MKDIR
+    fill a PVFS_object_kref
+
+  PVFS2_VFS_OP_STATFS
+    fill a pvfs2_statfs_response_t with useless info <g>. It is hard for
+    us to know, in a timely fashion, these statistics about our
+    distributed network filesystem. 
+
+  PVFS2_VFS_OP_FS_MOUNT
+    fill a pvfs2_fs_mount_response_t which is just like a PVFS_object_kref
+    except its members are in a different order and "__pad1" is replaced
+    with "id".
+
+  PVFS2_VFS_OP_GETXATTR
+    fill a pvfs2_getxattr_response_t
+
+  PVFS2_VFS_OP_LISTXATTR
+    fill a pvfs2_listxattr_response_t
+
+  PVFS2_VFS_OP_PARAM
+    fill a pvfs2_param_response_t
+
+  PVFS2_VFS_OP_PERF_COUNT
+    fill a pvfs2_perf_count_response_t
+
+  PVFS2_VFS_OP_FSKEY
+    file a pvfs2_fs_key_response_t
+
+  PVFS2_VFS_OP_READDIR
+    jamb everything needed to represent a pvfs2_readdir_response_t into
+    the readdir buffer descriptor specified in the upcall.
+
+writev() on /dev/pvfs2-req is used to pass responses to the requests
+made by the kernel side.
+
+A buffer_list containing:
+  - a pointer to the prepared response to the request from the
+    kernel (struct pvfs2_downcall_t).
+  - and also, in the case of a readdir request, a pointer to a
+    buffer containing descriptors for the objects in the target
+    directory.
+... is sent to the function (PINT_dev_write_list) which performs
+the writev.
+
+PINT_dev_write_list has a local iovec array: struct iovec io_array[10];
+
+The first four elements of io_array are initialized like this for all
+responses:
+
+  io_array[0].iov_base = address of local variable "proto_ver" (int32_t)
+  io_array[0].iov_len = sizeof(int32_t)
+
+  io_array[1].iov_base = address of global variable "pdev_magic" (int32_t)
+  io_array[1].iov_len = sizeof(int32_t)
+  
+  io_array[2].iov_base = address of parameter "tag" (PVFS_id_gen_t)
+  io_array[2].iov_len = sizeof(int64_t)
+
+  io_array[3].iov_base = address of out_downcall member (pvfs2_downcall_t)
+                         of global variable vfs_request (vfs_request_t)
+  io_array[3].iov_len = sizeof(pvfs2_downcall_t)
+
+Readdir responses initialize the fifth element io_array like this:
+
+  io_array[4].iov_base = contents of member trailer_buf (char *)
+                         from out_downcall member of global variable
+                         vfs_request
+  io_array[4].iov_len = contents of member trailer_size (PVFS_size)
+                        from out_downcall member of global variable
+                        vfs_request
+  
+
index fde9fd06fa988b2eac60f10880973351e783fe37..843b045b4069a62c154bdf6c2723b3061bd81abd 100644 (file)
@@ -240,8 +240,8 @@ Table 1-2: Contents of the status files (as of 4.1)
  RssFile                     size of resident file mappings
  RssShmem                    size of resident shmem memory (includes SysV shm,
                              mapping of tmpfs and shared anonymous mappings)
- VmData                      size of data, stack, and text segments
- VmStk                       size of data, stack, and text segments
+ VmData                      size of private data segments
+ VmStk                       size of stack segments
  VmExe                       size of text segment
  VmLib                       size of shared library code
  VmPTE                       size of page table entries
@@ -356,7 +356,7 @@ address           perms offset  dev   inode      pathname
 a7cb1000-a7cb2000 ---p 00000000 00:00 0
 a7cb2000-a7eb2000 rw-p 00000000 00:00 0
 a7eb2000-a7eb3000 ---p 00000000 00:00 0
-a7eb3000-a7ed5000 rw-p 00000000 00:00 0          [stack:1001]
+a7eb3000-a7ed5000 rw-p 00000000 00:00 0
 a7ed5000-a8008000 r-xp 00000000 03:00 4222       /lib/libc.so.6
 a8008000-a800a000 r--p 00133000 03:00 4222       /lib/libc.so.6
 a800a000-a800b000 rw-p 00135000 03:00 4222       /lib/libc.so.6
@@ -388,7 +388,6 @@ is not associated with a file:
 
  [heap]                   = the heap of the program
  [stack]                  = the stack of the main process
- [stack:1001]             = the stack of the thread with tid 1001
  [vdso]                   = the "virtual dynamic shared object",
                             the kernel system call handler
 
@@ -396,10 +395,8 @@ is not associated with a file:
 
 The /proc/PID/task/TID/maps is a view of the virtual memory from the viewpoint
 of the individual tasks of a process. In this file you will see a mapping marked
-as [stack] if that task sees it as a stack. This is a key difference from the
-content of /proc/PID/maps, where you will see all mappings that are being used
-as stack by all of those tasks. Hence, for the example above, the task-level
-map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
+as [stack] if that task sees it as a stack. Hence, for the example above, the
+task-level map, i.e. /proc/PID/task/TID/maps for thread 1001 will look like this:
 
 08048000-08049000 r-xp 00000000 03:00 8312       /opt/test
 08049000-0804a000 rw-p 00001000 03:00 8312       /opt/test
diff --git a/Documentation/hwmon/ltc2990 b/Documentation/hwmon/ltc2990
new file mode 100644 (file)
index 0000000..c25211e
--- /dev/null
@@ -0,0 +1,43 @@
+Kernel driver ltc2990
+=====================
+
+Supported chips:
+  * Linear Technology LTC2990
+    Prefix: 'ltc2990'
+    Addresses scanned: -
+    Datasheet: http://www.linear.com/product/ltc2990
+
+Author: Mike Looijmans <mike.looijmans@topic.nl>
+
+
+Description
+-----------
+
+LTC2990 is a Quad I2C Voltage, Current and Temperature Monitor.
+The chip's inputs can measure 4 voltages, or two inputs together (1+2 and 3+4)
+can be combined to measure a differential voltage, which is typically used to
+measure current through a series resistor, or a temperature.
+
+This driver currently uses the 2x differential mode only. In order to support
+other modes, the driver will need to be expanded.
+
+
+Usage Notes
+-----------
+
+This driver does not probe for PMBus devices. You will have to instantiate
+devices explicitly.
+
+
+Sysfs attributes
+----------------
+
+The "curr*_input" measurements actually report the voltage drop across the
+input pins in microvolts. This is equivalent to the current through a 1mOhm
+sense resistor. Divide the reported value by the actual sense resistor value
+in mOhm to get the actual value.
+
+in0_input     Voltage at Vcc pin in millivolt (range 2.5V to 5V)
+temp1_input   Internal chip temperature in millidegrees Celcius
+curr1_input   Current in mA across v1-v2 assuming a 1mOhm sense resistor.
+curr2_input   Current in mA across v3-v4 assuming a 1mOhm sense resistor.
index 37cbf472a19d19aef7aea0217d008943eb28909f..027633246bd8a3f052264b770a8115e22e3db194 100644 (file)
@@ -5,17 +5,17 @@ Supported chips:
   * Maxim MAX34440
     Prefixes: 'max34440'
     Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34440.pdf
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34440.pdf
   * Maxim MAX34441
     PMBus 5-Channel Power-Supply Manager and Intelligent Fan Controller
     Prefixes: 'max34441'
     Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34441.pdf
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34441.pdf
   * Maxim MAX34446
     PMBus Power-Supply Data Logger
     Prefixes: 'max34446'
     Addresses scanned: -
-    Datasheet: http://datasheets.maxim-ic.com/en/ds/MAX34446.pdf
+    Datasheet: http://datasheets.maximintegrated.com/en/ds/MAX34446.pdf
   * Maxim MAX34460
     PMBus 12-Channel Voltage Monitor & Sequencer
     Prefix: 'max34460'
index 2ac78ae1039de5c6bf44b4b32e41de7d742be1d6..bcf919d8625ceb058cccbaa9e4e82607e7ab1760 100644 (file)
@@ -4,7 +4,7 @@ the /dev interface. You need to load module i2c-dev for this.
 
 Each registered i2c adapter gets a number, counting from 0. You can
 examine /sys/class/i2c-dev/ to see what number corresponds to which adapter.
-Alternatively, you can run "i2cdetect -l" to obtain a formated list of all
+Alternatively, you can run "i2cdetect -l" to obtain a formatted list of all
 i2c adapters present on your system at a given time. i2cdetect is part of
 the i2c-tools package.
 
index c8444ef82acfa3a07d3dc876f08d38839bf33bd9..04f8d8a9b817dbfb1d693c0796bd918ddda9af35 100644 (file)
@@ -7,8 +7,8 @@ This is a proof-of-concept backend which acts like an EEPROM on the connected
 I2C bus. The memory contents can be modified from userspace via this file
 located in sysfs:
 
-       /sys/bus/i2c/devices/<device-direcory>/slave-eeprom
+       /sys/bus/i2c/devices/<device-directory>/slave-eeprom
 
 As of 2015, Linux doesn't support poll on binary sysfs files, so there is no
-notfication when another master changed the content.
+notification when another master changed the content.
 
index 8d5465d3fdef59cce2f0eeafcd797cfd78e4aadd..52ef02b33da9cda980494ed0d26bb3fcc9c1c3f5 100644 (file)
@@ -440,7 +440,7 @@ MAINTAINERS ファイルにリストがありますので参照してくださ
 てこの状態を変えようとしないように。人々はそのようなことは好みません。
 
 今までのメールでのやりとりとその間のあなたの発言はそのまま残し、
-"John Kernlehacker wrote ...:" の行をあなたのリプライの先頭行にして、
+"John Kernelhacker wrote ...:" の行をあなたのリプライの先頭行にして、
 メールの先頭でなく、各引用行の間にあなたの言いたいことを追加するべきで
 す。
 
index cfb2c0f1a4a89237ce131999d7b1cc03aff178b4..5688b69c26bb6c93f7575e59f482ac4317f500b9 100644 (file)
@@ -666,7 +666,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        clearcpuid=BITNUM [X86]
                        Disable CPUID feature X for the kernel. See
-                       arch/x86/include/asm/cpufeature.h for the valid bit
+                       arch/x86/include/asm/cpufeatures.h for the valid bit
                        numbers. Note the Linux specific bits are not necessarily
                        stable over kernel options, but the vendor specific
                        ones should be.
@@ -1454,6 +1454,41 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        In such case C2/C3 won't be used again.
                        idle=nomwait: Disable mwait for CPU C-states
 
+       ieee754=        [MIPS] Select IEEE Std 754 conformance mode
+                       Format: { strict | legacy | 2008 | relaxed }
+                       Default: strict
+
+                       Choose which programs will be accepted for execution
+                       based on the IEEE 754 NaN encoding(s) supported by
+                       the FPU and the NaN encoding requested with the value
+                       of an ELF file header flag individually set by each
+                       binary.  Hardware implementations are permitted to
+                       support either or both of the legacy and the 2008 NaN
+                       encoding mode.
+
+                       Available settings are as follows:
+                       strict  accept binaries that request a NaN encoding
+                               supported by the FPU
+                       legacy  only accept legacy-NaN binaries, if supported
+                               by the FPU
+                       2008    only accept 2008-NaN binaries, if supported
+                               by the FPU
+                       relaxed accept any binaries regardless of whether
+                               supported by the FPU
+
+                       The FPU emulator is always able to support both NaN
+                       encodings, so if no FPU hardware is present or it has
+                       been disabled with 'nofpu', then the settings of
+                       'legacy' and '2008' strap the emulator accordingly,
+                       'relaxed' straps the emulator for both legacy-NaN and
+                       2008-NaN, whereas 'strict' enables legacy-NaN only on
+                       legacy processors and both NaN encodings on MIPS32 or
+                       MIPS64 CPUs.
+
+                       The setting for ABS.fmt/NEG.fmt instruction execution
+                       mode generally follows that for the NaN encoding,
+                       except where unsupported by hardware.
+
        ignore_loglevel [KNL]
                        Ignore loglevel setting - this will print /all/
                        kernel messages to the console. Useful for debugging.
@@ -1461,6 +1496,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        could change it dynamically, usually by
                        /sys/module/printk/parameters/ignore_loglevel.
 
+       ignore_rlimit_data
+                       Ignore RLIMIT_DATA setting for data mappings,
+                       print warning at first misuse.  Can be changed via
+                       /sys/module/kernel/parameters/ignore_rlimit_data.
+
        ihash_entries=  [KNL]
                        Set number of hash buckets for inode cache.
 
@@ -1647,6 +1687,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        ip=             [IP_PNP]
                        See Documentation/filesystems/nfs/nfsroot.txt.
 
+       irqaffinity=    [SMP] Set the default irq affinity mask
+                       Format:
+                       <cpu number>,...,<cpu number>
+                       or
+                       <cpu number>-<cpu number>
+                       (must be a positive range in ascending order)
+                       or a mixture
+                       <cpu number>,...,<cpu number>-<cpu number>
+
        irqfixup        [HW]
                        When an interrupt is not handled search all handlers
                        for it. Intended to get systems with badly broken
@@ -2526,6 +2575,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        nointroute      [IA-64]
 
+       noinvpcid       [X86] Disable the INVPCID cpu feature.
+
        nojitter        [IA-64] Disables jitter checking for ITC timers.
 
        no-kvmclock     [X86,KVM] Disable paravirtualized KVM clock driver
@@ -3198,6 +3249,38 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Lazy RCU callbacks are those which RCU can
                        prove do nothing more than free memory.
 
+       rcuperf.gp_exp= [KNL]
+                       Measure performance of expedited synchronous
+                       grace-period primitives.
+
+       rcuperf.nreaders= [KNL]
+                       Set number of RCU readers.  The value -1 selects
+                       N, where N is the number of CPUs.  A value
+                       "n" less than -1 selects N-n+1, where N is again
+                       the number of CPUs.  For example, -2 selects N
+                       (the number of CPUs), -3 selects N+1, and so on.
+                       A value of "n" less than or equal to -N selects
+                       a single reader.
+
+       rcuperf.nwriters= [KNL]
+                       Set number of RCU writers.  The values operate
+                       the same as for rcuperf.nreaders.
+                       N, where N is the number of CPUs
+
+       rcuperf.perf_runnable= [BOOT]
+                       Start rcuperf running at boot time.
+
+       rcuperf.shutdown= [KNL]
+                       Shut the system down after performance tests
+                       complete.  This is useful for hands-off automated
+                       testing.
+
+       rcuperf.perf_type= [KNL]
+                       Specify the RCU implementation to test.
+
+       rcuperf.verbose= [KNL]
+                       Enable additional printk() statements.
+
        rcutorture.cbflood_inter_holdoff= [KNL]
                        Set holdoff time (jiffies) between successive
                        callback-flood tests.
@@ -3488,6 +3571,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        sched_debug     [KNL] Enables verbose scheduler debug messages.
 
+       schedstats=     [KNL,X86] Enable or disable scheduled statistics.
+                       Allowed values are enable and disable. This feature
+                       incurs a small amount of overhead in the scheduler
+                       but is useful for debugging and performance tuning.
+
        skew_tick=      [KNL] Offset the periodic timer tick per cpu to mitigate
                        xtime_lock contention on larger systems, and/or RCU lock
                        contention on all systems with CONFIG_MAXSMP set.
@@ -4195,6 +4283,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        The default value of this parameter is determined by
                        the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
 
+       workqueue.debug_force_rr_cpu
+                       Workqueue used to implicitly guarantee that work
+                       items queued without explicit CPU specified are put
+                       on the local CPU.  This guarantee is no longer true
+                       and while local CPU is still preferred work items
+                       may be put on foreign CPUs.  This debug option
+                       forces round-robin CPU selection to flush out
+                       usages which depend on the now broken guarantee.
+                       When enabled, memory and cache locality will be
+                       impacted.
+
        x2apic_phys     [X86-64,APIC] Use x2apic physical mode instead of
                        default x2apic cluster mode on platforms
                        supporting x2apic.
index 904ee42d078e51d8b43fe6548611219c2c0444c0..57e4a4b053c5b02e4dfe537e1879d9a86bceaf7b 100644 (file)
@@ -232,7 +232,7 @@ And there are a number of things that _must_ or _must_not_ be assumed:
      with memory references that are not protected by READ_ONCE() and
      WRITE_ONCE().  Without them, the compiler is within its rights to
      do all sorts of "creative" transformations, which are covered in
-     the Compiler Barrier section.
+     the COMPILER BARRIER section.
 
  (*) It _must_not_ be assumed that independent loads and stores will be issued
      in the order given.  This means that for:
@@ -555,6 +555,30 @@ between the address load and the data load:
 This enforces the occurrence of one of the two implications, and prevents the
 third possibility from arising.
 
+A data-dependency barrier must also order against dependent writes:
+
+       CPU 1                 CPU 2
+       ===============       ===============
+       { A == 1, B == 2, C = 3, P == &A, Q == &C }
+       B = 4;
+       <write barrier>
+       WRITE_ONCE(P, &B);
+                             Q = READ_ONCE(P);
+                             <data dependency barrier>
+                             *Q = 5;
+
+The data-dependency barrier must order the read into Q with the store
+into *Q.  This prohibits this outcome:
+
+       (Q == B) && (B == 4)
+
+Please note that this pattern should be rare.  After all, the whole point
+of dependency ordering is to -prevent- writes to the data structure, along
+with the expensive cache misses associated with those writes.  This pattern
+can be used to record rare error conditions and the like, and the ordering
+prevents such records from being lost.
+
+
 [!] Note that this extremely counterintuitive situation arises most easily on
 machines with split caches, so that, for example, one cache bank processes
 even-numbered cache lines and the other bank processes odd-numbered cache
@@ -565,21 +589,6 @@ odd-numbered bank is idle, one can see the new value of the pointer P (&B),
 but the old value of the variable B (2).
 
 
-Another example of where data dependency barriers might be required is where a
-number is read from memory and then used to calculate the index for an array
-access:
-
-       CPU 1                 CPU 2
-       ===============       ===============
-       { M[0] == 1, M[1] == 2, M[3] = 3, P == 0, Q == 3 }
-       M[1] = 4;
-       <write barrier>
-       WRITE_ONCE(P, 1);
-                             Q = READ_ONCE(P);
-                             <data dependency barrier>
-                             D = M[Q];
-
-
 The data dependency barrier is very important to the RCU system,
 for example.  See rcu_assign_pointer() and rcu_dereference() in
 include/linux/rcupdate.h.  This permits the current target of an RCU'd
@@ -800,9 +809,13 @@ In summary:
       use smp_rmb(), smp_wmb(), or, in the case of prior stores and
       later loads, smp_mb().
 
-  (*) If both legs of the "if" statement begin with identical stores
-      to the same variable, a barrier() statement is required at the
-      beginning of each leg of the "if" statement.
+  (*) If both legs of the "if" statement begin with identical stores to
+      the same variable, then those stores must be ordered, either by
+      preceding both of them with smp_mb() or by using smp_store_release()
+      to carry out the stores.  Please note that it is -not- sufficient
+      to use barrier() at beginning of each leg of the "if" statement,
+      as optimizing compilers do not necessarily respect barrier()
+      in this case.
 
   (*) Control dependencies require at least one run-time conditional
       between the prior load and the subsequent store, and this
@@ -814,7 +827,7 @@ In summary:
   (*) Control dependencies require that the compiler avoid reordering the
       dependency into nonexistence.  Careful use of READ_ONCE() or
       atomic{,64}_read() can help to preserve your control dependency.
-      Please see the Compiler Barrier section for more information.
+      Please see the COMPILER BARRIER section for more information.
 
   (*) Control dependencies pair normally with other types of barriers.
 
@@ -1305,8 +1318,86 @@ or a level of cache, CPU 2 might have early access to CPU 1's writes.
 General barriers are therefore required to ensure that all CPUs agree
 on the combined order of CPU 1's and CPU 2's accesses.
 
-To reiterate, if your code requires transitivity, use general barriers
-throughout.
+General barriers provide "global transitivity", so that all CPUs will
+agree on the order of operations.  In contrast, a chain of release-acquire
+pairs provides only "local transitivity", so that only those CPUs on
+the chain are guaranteed to agree on the combined order of the accesses.
+For example, switching to C code in deference to Herman Hollerith:
+
+       int u, v, x, y, z;
+
+       void cpu0(void)
+       {
+               r0 = smp_load_acquire(&x);
+               WRITE_ONCE(u, 1);
+               smp_store_release(&y, 1);
+       }
+
+       void cpu1(void)
+       {
+               r1 = smp_load_acquire(&y);
+               r4 = READ_ONCE(v);
+               r5 = READ_ONCE(u);
+               smp_store_release(&z, 1);
+       }
+
+       void cpu2(void)
+       {
+               r2 = smp_load_acquire(&z);
+               smp_store_release(&x, 1);
+       }
+
+       void cpu3(void)
+       {
+               WRITE_ONCE(v, 1);
+               smp_mb();
+               r3 = READ_ONCE(u);
+       }
+
+Because cpu0(), cpu1(), and cpu2() participate in a local transitive
+chain of smp_store_release()/smp_load_acquire() pairs, the following
+outcome is prohibited:
+
+       r0 == 1 && r1 == 1 && r2 == 1
+
+Furthermore, because of the release-acquire relationship between cpu0()
+and cpu1(), cpu1() must see cpu0()'s writes, so that the following
+outcome is prohibited:
+
+       r1 == 1 && r5 == 0
+
+However, the transitivity of release-acquire is local to the participating
+CPUs and does not apply to cpu3().  Therefore, the following outcome
+is possible:
+
+       r0 == 0 && r1 == 1 && r2 == 1 && r3 == 0 && r4 == 0
+
+As an aside, the following outcome is also possible:
+
+       r0 == 0 && r1 == 1 && r2 == 1 && r3 == 0 && r4 == 0 && r5 == 1
+
+Although cpu0(), cpu1(), and cpu2() will see their respective reads and
+writes in order, CPUs not involved in the release-acquire chain might
+well disagree on the order.  This disagreement stems from the fact that
+the weak memory-barrier instructions used to implement smp_load_acquire()
+and smp_store_release() are not required to order prior stores against
+subsequent loads in all cases.  This means that cpu3() can see cpu0()'s
+store to u as happening -after- cpu1()'s load from v, even though
+both cpu0() and cpu1() agree that these two operations occurred in the
+intended order.
+
+However, please keep in mind that smp_load_acquire() is not magic.
+In particular, it simply reads from its argument with ordering.  It does
+-not- ensure that any particular value will be read.  Therefore, the
+following outcome is possible:
+
+       r0 == 0 && r1 == 0 && r2 == 0 && r5 == 0
+
+Note that this outcome can happen even on a mythical sequentially
+consistent system where nothing is ever reordered.
+
+To reiterate, if your code requires global transitivity, use general
+barriers throughout.
 
 
 ========================
index ff23b755f5e45cc8b6d367f2e55564cdf3a1782d..1b5e7a7f2185be117ba99d3c4303d14671cb9840 100644 (file)
@@ -187,7 +187,7 @@ interfaces to the kernel module settings.
 
 For more information, please see the manpage (man batctl).
 
-batctl is available on http://www.open-mesh.org/
+batctl is available on https://www.open-mesh.org/
 
 
 CONTACT
index ceb44a095a27ba98804442c1f7aa91ed616b7d42..73b36d7c7b0d67309cce7bf0bd8a46f0c6ee7ea1 100644 (file)
@@ -594,7 +594,7 @@ tcp_fastopen - INTEGER
 
 tcp_syn_retries - INTEGER
        Number of times initial SYNs for an active TCP connection attempt
-       will be retransmitted. Should not be higher than 255. Default value
+       will be retransmitted. Should not be higher than 127. Default value
        is 6, which corresponds to 63seconds till the last retransmission
        with the current initial RTO of 1second. With this the final timeout
        for an active TCP connection attempt will happen after 127seconds.
index 3a930072b161fd51b3b1d007daac673526609dc2..ec8f934c2eb240f93a54b0044faadd913e7855f8 100644 (file)
@@ -28,6 +28,23 @@ radiotap headers and used to control injection:
    IEEE80211_RADIOTAP_F_TX_NOACK: frame should be sent without waiting for
                                  an ACK even if it is a unicast frame
 
+ * IEEE80211_RADIOTAP_RATE
+
+   legacy rate for the transmission (only for devices without own rate control)
+
+ * IEEE80211_RADIOTAP_MCS
+
+   HT rate for the transmission (only for devices without own rate control).
+   Also some flags are parsed
+
+   IEEE80211_TX_RC_SHORT_GI: use short guard interval
+   IEEE80211_TX_RC_40_MHZ_WIDTH: send in HT40 mode
+
+ * IEEE80211_RADIOTAP_DATA_RETRIES
+
+   number of retries when either IEEE80211_RADIOTAP_RATE or
+   IEEE80211_RADIOTAP_MCS was used
+
 The injection code can also skip all other currently defined radiotap fields
 facilitating replay of captured radiotap headers directly.
 
index bc3842dc323a629fae6ec4cf5de0f6d1d6d6f37d..798cba82c762ef85e9aa8c617a8c458f9d5b2e0a 100644 (file)
@@ -72,9 +72,6 @@ flush_buffer()        -       (optional) May be called at any point between
                        open and close, and instructs the line discipline
                        to empty its input buffer.
 
-chars_in_buffer() -    (optional) Report the number of bytes in the input
-                       buffer.
-
 set_termios()  -       (optional) Called on termios structure changes.
                        The caller passes the old termios data and the
                        current data is in the tty. Called under the
index 48148d6d930786849c97cf79329b92fec755e946..fc53ccd9a6293c542b60cb62d3466dd7cc70e07b 100644 (file)
@@ -1910,6 +1910,12 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
                     - Default: 0x0000 
     ignore_ctl_error - Ignore any USB-controller regarding mixer
                       interface (default: no)
+    autoclock      - Enable auto-clock selection for UAC2 devices
+                     (default: yes)
+    quirk_alias            - Quirk alias list, pass strings like
+                     "0123abcd:5678beef", which applies the existing
+                     quirk for the device 5678:beef to a new device
+                     0123:abcd.
 
     This module supports multiple devices, autoprobe and hotplugging.
 
@@ -1919,6 +1925,9 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
     NB: ignore_ctl_error=1 may help when you get an error at accessing
         the mixer element such as URB error -22.  This happens on some
         buggy USB device or the controller.
+    NB: quirk_alias option is provided only for testing / development.
+        If you want to have a proper support, contact to upstream for
+       adding the matching quirk in the driver code statically.
 
   Module snd-usb-caiaq
   --------------------
diff --git a/Documentation/sound/alsa/HD-Audio-DP-MST-audio.txt b/Documentation/sound/alsa/HD-Audio-DP-MST-audio.txt
new file mode 100644 (file)
index 0000000..82744ac
--- /dev/null
@@ -0,0 +1,74 @@
+To support DP MST audio, HD Audio hdmi codec driver introduces virtual pin
+and dynamic pcm assignment.
+
+Virtual pin is an extension of per_pin. The most difference of DP MST
+from legacy is that DP MST introduces device entry. Each pin can contain
+several device entries. Each device entry behaves as a pin.
+
+As each pin may contain several device entries and each codec may contain
+several pins, if we use one pcm per per_pin, there will be many PCMs.
+The new solution is to create a few PCMs and to dynamically bind pcm to
+per_pin. Driver uses spec->dyn_pcm_assign flag to indicate whether to use
+the new solution.
+
+PCM
+===
+To be added
+
+
+Jack
+====
+
+Presume:
+ - MST must be dyn_pcm_assign, and it is acomp (for Intel scenario);
+ - NON-MST may or may not be dyn_pcm_assign, it can be acomp or !acomp;
+
+So there are the following scenarios:
+ a. MST (&& dyn_pcm_assign && acomp)
+ b. NON-MST && dyn_pcm_assign && acomp
+ c. NON-MST && !dyn_pcm_assign && !acomp
+
+Below discussion will ignore MST and NON-MST difference as it doesn't
+impact on jack handling too much.
+
+Driver uses struct hdmi_pcm pcm[] array in hdmi_spec and snd_jack is
+a member of hdmi_pcm. Each pin has one struct hdmi_pcm * pcm pointer.
+
+For !dyn_pcm_assign, per_pin->pcm will assigned to spec->pcm[n] statically.
+
+For dyn_pcm_assign, per_pin->pcm will assigned to spec->pcm[n]
+when monitor is hotplugged.
+
+
+Build Jack
+----------
+
+- dyn_pcm_assign
+Will not use hda_jack but use snd_jack in spec->pcm_rec[pcm_idx].jack directly.
+
+- !dyn_pcm_assign
+Use hda_jack and assign spec->pcm_rec[pcm_idx].jack = jack->jack statically.
+
+
+Unsolicited Event Enabling
+--------------------------
+Enable unsolicited event if !acomp.
+
+
+Monitor Hotplug Event Handling
+------------------------------
+- acomp
+pin_eld_notify() -> check_presence_and_report() -> hdmi_present_sense() ->
+sync_eld_via_acomp().
+Use directly snd_jack_report() on spec->pcm_rec[pcm_idx].jack for
+both dyn_pcm_assign and !dyn_pcm_assign
+
+- !acomp
+Hdmi_unsol_event() -> hdmi_intrinsic_event() -> check_presence_and_report() ->
+hdmi_present_sense() -> hdmi_prepsent_sense_via_verbs()
+Use directly snd_jack_report() on spec->pcm_rec[pcm_idx].jack for dyn_pcm_assign.
+Use hda_jack mechanism to handle jack events.
+
+
+Others to be added later
+========================
index a93b414672a71ac6fa9bac1e848215804bde139c..f4444c94ff285db493961c54a1527a189da20fd2 100644 (file)
@@ -58,6 +58,8 @@ show up in /proc/sys/kernel:
 - panic_on_stackoverflow
 - panic_on_unrecovered_nmi
 - panic_on_warn
+- perf_cpu_time_max_percent
+- perf_event_paranoid
 - pid_max
 - powersave-nap               [ PPC only ]
 - printk
@@ -639,6 +641,17 @@ allowed to execute.
 
 ==============================================================
 
+perf_event_paranoid:
+
+Controls use of the performance events system by unprivileged
+users (without CAP_SYS_ADMIN).  The default value is 1.
+
+ -1: Allow use of (almost) all events by all users
+>=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
+>=1: Disallow CPU event access by users without CAP_SYS_ADMIN
+>=2: Disallow kernel profiling by users without CAP_SYS_ADMIN
+
+==============================================================
 
 pid_max:
 
@@ -760,6 +773,14 @@ rtsig-nr shows the number of RT signals currently queued.
 
 ==============================================================
 
+sched_schedstats:
+
+Enables/disables scheduler statistics. Enabling this feature
+incurs a small amount of overhead in the scheduler but is
+useful for debugging and performance tuning.
+
+==============================================================
+
 sg-big-buff:
 
 This file shows the size of the generic SCSI (sg) buffer.
index 053f613fc9a913af132da7f794742c45d6727dfc..07e4cdf024073033e2a52213bcbed4d562111a33 100644 (file)
@@ -3025,7 +3025,7 @@ len must be a multiple of sizeof(struct kvm_s390_irq). It must be > 0
 and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq),
 which is the maximum number of possibly pending cpu-local interrupts.
 
-4.90 KVM_SMI
+4.96 KVM_SMI
 
 Capability: KVM_CAP_X86_SMM
 Architectures: x86
index d1ad9d5cae467ceb2c1169ce8b53d70078aedf27..e3e314cb83e83fd08f99fc0f3859727b22eb0cc1 100644 (file)
@@ -88,6 +88,8 @@ struct kvm_s390_io_adapter_req {
       perform a gmap translation for the guest address provided in addr,
       pin a userspace page for the translated address and add it to the
       list of mappings
+      Note: A new mapping will be created unconditionally; therefore,
+            the calling code should avoid making duplicate mappings.
 
     KVM_S390_IO_ADAPTER_UNMAP
       release a userspace page for the translated address specified in addr
index f083a168eb350b80895f25a23621b87a131be84b..a9ea8774a45feb50e463c8ff7c1f20dccd7f2052 100644 (file)
@@ -84,3 +84,55 @@ Returns:    -EBUSY in case 1 or more vcpus are already activated (only in write
            -EFAULT if the given address is not accessible from kernel space
            -ENOMEM if not enough memory is available to process the ioctl
            0 in case of success
+
+3. GROUP: KVM_S390_VM_TOD
+Architectures: s390
+
+3.1. ATTRIBUTE: KVM_S390_VM_TOD_HIGH
+
+Allows user space to set/get the TOD clock extension (u8).
+
+Parameters: address of a buffer in user space to store the data (u8) to
+Returns:    -EFAULT if the given address is not accessible from kernel space
+           -EINVAL if setting the TOD clock extension to != 0 is not supported
+
+3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW
+
+Allows user space to set/get bits 0-63 of the TOD clock register as defined in
+the POP (u64).
+
+Parameters: address of a buffer in user space to store the data (u64) to
+Returns:    -EFAULT if the given address is not accessible from kernel space
+
+4. GROUP: KVM_S390_VM_CRYPTO
+Architectures: s390
+
+4.1. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_AES_KW (w/o)
+
+Allows user space to enable aes key wrapping, including generating a new
+wrapping key.
+
+Parameters: none
+Returns:    0
+
+4.2. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_DEA_KW (w/o)
+
+Allows user space to enable dea key wrapping, including generating a new
+wrapping key.
+
+Parameters: none
+Returns:    0
+
+4.3. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_AES_KW (w/o)
+
+Allows user space to disable aes key wrapping, clearing the wrapping key.
+
+Parameters: none
+Returns:    0
+
+4.4. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_DEA_KW (w/o)
+
+Allows user space to disable dea key wrapping, clearing the wrapping key.
+
+Parameters: none
+Returns:    0
index 9f9ec9f76039404a15114c97fe67535fe41fa88c..4e4b6f10d8410f84d8ea64ac51c68a6932bf2158 100644 (file)
@@ -400,3 +400,7 @@ wm8350_wdt:
 nowayout: Watchdog cannot be stopped once started
        (default=kernel config parameter)
 -------------------------------------------------
+sun4v_wdt:
+timeout_ms: Watchdog timeout in milliseconds 1..180000, default=60000)
+nowayout: Watchdog cannot be stopped once started
+-------------------------------------------------
index d62bea6796dad967a2fb651ecb4974d6e4ad2886..c956d99cf1de40a7b2ff49c7e02c9e5fcfa8f472 100644 (file)
@@ -40,3 +40,28 @@ cp ../microcode.bin kernel/x86/microcode/GenuineIntel.bin (or AuthenticAMD.bin)
 find . | cpio -o -H newc >../ucode.cpio
 cd ..
 cat ucode.cpio /boot/initrd-3.5.0.img >/boot/initrd-3.5.0.ucode.img
+
+Builtin microcode
+=================
+
+We can also load builtin microcode supplied through the regular firmware
+builtin method CONFIG_FIRMWARE_IN_KERNEL. Here's an example:
+
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin"
+CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware"
+
+This basically means, you have the following tree structure locally:
+
+/lib/firmware/
+|-- amd-ucode
+...
+|   |-- microcode_amd_fam15h.bin
+...
+|-- intel-ucode
+...
+|   |-- 06-3a-09
+...
+
+so that the build system can find those files and integrate them into
+the final kernel image. The early loader finds them and applies them.
index 05712ac83e3826bfe7802e01932bde593128b129..c518dce7da4d62da22b192cbc3595ebf62ae3037 100644 (file)
@@ -16,6 +16,8 @@ ffffec0000000000 - fffffc0000000000 (=44 bits) kasan shadow memory (16TB)
 ... unused hole ...
 ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
 ... unused hole ...
+ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
+... unused hole ...
 ffffffff80000000 - ffffffffa0000000 (=512 MB)  kernel text mapping, from phys 0
 ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
 ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
@@ -32,11 +34,9 @@ reference.
 Current X86-64 implementations only support 40 bits of address space,
 but we support up to 46 bits. This expands into MBZ space in the page tables.
 
-->trampoline_pgd:
-
-We map EFI runtime services in the aforementioned PGD in the virtual
-range of 64Gb (arbitrarily set, can be raised if needed)
-
-0xffffffef00000000 - 0xffffffff00000000
+We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
+memory window (this size is arbitrary, it can be raised later if needed).
+The mappings are not part of any other kernel PGD and are only available
+during EFI runtime calls.
 
 -Andi Kleen, Jul 2004
index 59373e5bb09ee1c9d5804c968440ea57087c4564..750f24c94fc0d94abed800521f41c5cc7895ee1a 100644 (file)
@@ -223,9 +223,7 @@ F:  drivers/scsi/aacraid/
 
 ABI/API
 L:     linux-api@vger.kernel.org
-F:     Documentation/ABI/
 F:     include/linux/syscalls.h
-F:     include/uapi/
 F:     kernel/sys_ni.c
 
 ABIT UGURU 1,2 HARDWARE MONITOR DRIVER
@@ -675,24 +673,25 @@ F:        drivers/gpu/drm/radeon/radeon_kfd.c
 F:     drivers/gpu/drm/radeon/radeon_kfd.h
 F:     include/uapi/linux/kfd_ioctl.h
 
+AMD SEATTLE DEVICE TREE SUPPORT
+M:     Brijesh Singh <brijeshkumar.singh@amd.com>
+M:     Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
+M:     Tom Lendacky <thomas.lendacky@amd.com>
+S:     Supported
+F:     arch/arm64/boot/dts/amd/
+
 AMD XGBE DRIVER
 M:     Tom Lendacky <thomas.lendacky@amd.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/amd/xgbe/
+F:     arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi
 
 AMS (Apple Motion Sensor) DRIVER
 M:     Michael Hanselmann <linux-kernel@hansmi.ch>
 S:     Supported
 F:     drivers/macintosh/ams/
 
-AMSO1100 RNIC DRIVER
-M:     Tom Tucker <tom@opengridcomputing.com>
-M:     Steve Wise <swise@opengridcomputing.com>
-L:     linux-rdma@vger.kernel.org
-S:     Maintained
-F:     drivers/infiniband/hw/amso1100/
-
 ANALOG DEVICES INC AD9389B DRIVER
 M:     Hans Verkuil <hans.verkuil@cisco.com>
 L:     linux-media@vger.kernel.org
@@ -836,6 +835,12 @@ S: Maintained
 F:     drivers/net/arcnet/
 F:     include/uapi/linux/if_arcnet.h
 
+ARM HDLCD DRM DRIVER
+M:     Liviu Dudau <liviu.dudau@arm.com>
+S:     Supported
+F:     drivers/gpu/drm/arm/
+F:     Documentation/devicetree/bindings/display/arm,hdlcd.txt
+
 ARM MFM AND FLOPPY DRIVERS
 M:     Ian Molton <spyro@f2s.com>
 S:     Maintained
@@ -967,6 +972,8 @@ M:  Rob Herring <robh@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-highbank/
+F:     arch/arm/boot/dts/highbank.dts
+F:     arch/arm/boot/dts/ecx-*.dts*
 
 ARM/CAVIUM NETWORKS CNS3XXX MACHINE SUPPORT
 M:     Krzysztof Halasa <khalasa@piap.pl>
@@ -1042,6 +1049,7 @@ M:        Barry Song <baohua@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/baohua/linux.git
 S:     Maintained
+F:     arch/arm/boot/dts/prima2*
 F:     arch/arm/mach-prima2/
 F:     drivers/clk/sirf/
 F:     drivers/clocksource/timer-prima2.c
@@ -1143,6 +1151,10 @@ W:       http://www.hisilicon.com
 S:     Supported
 T:     git git://github.com/hisilicon/linux-hisi.git
 F:     arch/arm/mach-hisi/
+F:     arch/arm/boot/dts/hi3*
+F:     arch/arm/boot/dts/hip*
+F:     arch/arm/boot/dts/hisi*
+F:     arch/arm64/boot/dts/hisilicon/
 
 ARM/HP JORNADA 7XX MACHINE SUPPORT
 M:     Kristoffer Ericson <kristoffer.ericson@gmail.com>
@@ -1219,6 +1231,7 @@ M:        Santosh Shilimkar <ssantosh@kernel.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-keystone/
+F:     arch/arm/boot/dts/k2*
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
 
 ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
@@ -1287,6 +1300,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-berlin/
 F:     arch/arm/boot/dts/berlin*
+F:     arch/arm64/boot/dts/marvell/berlin*
 
 
 ARM/Marvell Dove/MV78xx0/Orion SOC support
@@ -1425,7 +1439,10 @@ S:       Maintained
 F:     arch/arm/boot/dts/qcom-*.dts
 F:     arch/arm/boot/dts/qcom-*.dtsi
 F:     arch/arm/mach-qcom/
+F:     arch/arm64/boot/dts/qcom/*
+F:     drivers/i2c/busses/i2c-qup.c
 F:     drivers/soc/qcom/
+F:     drivers/spi/spi-qup.c
 F:     drivers/tty/serial/msm_serial.h
 F:     drivers/tty/serial/msm_serial.c
 F:     drivers/*/pm8???-*
@@ -1484,6 +1501,8 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/s3c*
+F:     arch/arm/boot/dts/s5p*
+F:     arch/arm/boot/dts/samsung*
 F:     arch/arm/boot/dts/exynos*
 F:     arch/arm64/boot/dts/exynos/
 F:     arch/arm/plat-samsung/
@@ -1493,6 +1512,7 @@ F:        arch/arm/mach-s5p*/
 F:     arch/arm/mach-exynos*/
 F:     drivers/*/*s3c2410*
 F:     drivers/*/*/*s3c2410*
+F:     drivers/soc/samsung/*
 F:     drivers/spi/spi-s3c*
 F:     sound/soc/samsung/*
 F:     Documentation/arm/Samsung/
@@ -1563,6 +1583,7 @@ S:        Maintained
 F:     arch/arm/mach-socfpga/
 F:     arch/arm/boot/dts/socfpga*
 F:     arch/arm/configs/socfpga_defconfig
+F:     arch/arm64/boot/dts/altera/
 W:     http://www.rocketboards.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 
@@ -1716,7 +1737,7 @@ M:        Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/boot/dts/vexpress*
-F:     arch/arm64/boot/dts/arm/vexpress*
+F:     arch/arm64/boot/dts/arm/
 F:     arch/arm/mach-vexpress/
 F:     */*/vexpress*
 F:     */*/*/vexpress*
@@ -2147,7 +2168,7 @@ M:        Marek Lindner <mareklindner@neomailbox.ch>
 M:     Simon Wunderlich <sw@simonwunderlich.de>
 M:     Antonio Quartulli <a@unstable.cc>
 L:     b.a.t.m.a.n@lists.open-mesh.org
-W:     http://www.open-mesh.org/
+W:     https://www.open-mesh.org/
 S:     Maintained
 F:     net/batman-adv/
 
@@ -2343,6 +2364,7 @@ F:        arch/arm/mach-bcm/
 F:     arch/arm/boot/dts/bcm113*
 F:     arch/arm/boot/dts/bcm216*
 F:     arch/arm/boot/dts/bcm281*
+F:     arch/arm64/boot/dts/broadcom/
 F:     arch/arm/configs/bcm_defconfig
 F:     drivers/mmc/host/sdhci-bcm-kona.c
 F:     drivers/clocksource/bcm_kona_timer.c
@@ -2418,8 +2440,11 @@ F:       arch/mips/bmips/*
 F:     arch/mips/include/asm/mach-bmips/*
 F:     arch/mips/kernel/*bmips*
 F:     arch/mips/boot/dts/brcm/bcm*.dts*
+F:     drivers/irqchip/irq-bcm63*
 F:     drivers/irqchip/irq-bcm7*
 F:     drivers/irqchip/irq-brcmstb*
+F:     include/linux/bcm963xx_nvram.h
+F:     include/linux/bcm963xx_tag.h
 
 BROADCOM TG3 GIGABIT ETHERNET DRIVER
 M:     Prashant Sreedharan <prashant@broadcom.com>
@@ -3443,7 +3468,7 @@ S:        Maintained
 F:     drivers/usb/dwc2/
 
 DESIGNWARE USB3 DRD IP DRIVER
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 L:     linux-omap@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -4182,13 +4207,6 @@ W:       http://aeschi.ch.eu.org/efs/
 S:     Orphan
 F:     fs/efs/
 
-EHCA (IBM GX bus InfiniBand adapter) DRIVER
-M:     Hoang-Nam Nguyen <hnguyen@de.ibm.com>
-M:     Christoph Raisch <raisch@de.ibm.com>
-L:     linux-rdma@vger.kernel.org
-S:     Supported
-F:     drivers/infiniband/hw/ehca/
-
 EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER
 M:     Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
 L:     netdev@vger.kernel.org
@@ -5780,10 +5798,8 @@ INTEL TELEMETRY DRIVER
 M:     Souvik Kumar Chakravarty <souvik.k.chakravarty@intel.com>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
-F:     drivers/platform/x86/intel_telemetry_core.c
 F:     arch/x86/include/asm/intel_telemetry.h
-F:     drivers/platform/x86/intel_telemetry_pltdrv.c
-F:     drivers/platform/x86/intel_telemetry_debugfs.c
+F:     drivers/platform/x86/intel_telemetry*
 
 IOC3 ETHERNET DRIVER
 M:     Ralf Baechle <ralf@linux-mips.org>
@@ -5809,12 +5825,6 @@ M:       Juanjo Ciarlante <jjciarla@raiz.uncu.edu.ar>
 S:     Maintained
 F:     net/ipv4/netfilter/ipt_MASQUERADE.c
 
-IPATH DRIVER
-M:     Mike Marciniszyn <infinipath@intel.com>
-L:     linux-rdma@vger.kernel.org
-S:     Maintained
-F:     drivers/staging/rdma/ipath/
-
 IPMI SUBSYSTEM
 M:     Corey Minyard <minyard@acm.org>
 L:     openipmi-developer@lists.sourceforge.net (moderated for non-subscribers)
@@ -6218,6 +6228,14 @@ F:       arch/arm64/include/uapi/asm/kvm*
 F:     arch/arm64/include/asm/kvm*
 F:     arch/arm64/kvm/
 
+KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
+M:     James Hogan <james.hogan@imgtec.com>
+L:     linux-mips@linux-mips.org
+S:     Supported
+F:     arch/mips/include/uapi/asm/kvm*
+F:     arch/mips/include/asm/kvm*
+F:     arch/mips/kvm/
+
 KEXEC
 M:     Eric Biederman <ebiederm@xmission.com>
 W:     http://kernel.org/pub/linux/utils/kernel/kexec/
@@ -6315,6 +6333,12 @@ S:       Maintained
 F:     net/l3mdev
 F:     include/net/l3mdev.h
 
+LANTIQ MIPS ARCHITECTURE
+M:     John Crispin <blogic@openwrt.org>
+L:     linux-mips@linux-mips.org
+S:     Maintained
+F:     arch/mips/lantiq
+
 LAPB module
 L:     linux-x25@vger.kernel.org
 S:     Orphan
@@ -7356,7 +7380,7 @@ F:        drivers/tty/isicom.c
 F:     include/linux/isicom.h
 
 MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:     Maintained
@@ -7925,7 +7949,7 @@ F:        drivers/media/platform/omap3isp/
 F:     drivers/staging/media/omap4iss/
 
 OMAP USB SUPPORT
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 L:     linux-omap@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -8113,6 +8137,14 @@ S:       Supported
 F:     fs/overlayfs/
 F:     Documentation/filesystems/overlayfs.txt
 
+ORANGEFS FILESYSTEM
+M:     Mike Marshall <hubcap@omnibond.com>
+L:     pvfs2-developers@beowulf-underground.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/hubcap/linux.git
+S:     Supported
+F:     fs/orangefs/
+F:     Documentation/filesystems/orangefs.txt
+
 P54 WIRELESS DRIVER
 M:     Christian Lamparter <chunkeey@googlemail.com>
 L:     linux-wireless@vger.kernel.org
@@ -8804,6 +8836,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://github.com/hzhuang1/linux.git
 T:     git git://github.com/rjarzmik/linux.git
 S:     Maintained
+F:     arch/arm/boot/dts/pxa*
 F:     arch/arm/mach-pxa/
 F:     drivers/dma/pxa*
 F:     drivers/pcmcia/pxa2xx*
@@ -8833,6 +8866,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://github.com/hzhuang1/linux.git
 T:     git git://git.linaro.org/people/ycmiao/pxa-linux.git
 S:     Maintained
+F:     arch/arm/boot/dts/mmp*
 F:     arch/arm/mach-mmp/
 
 PXA MMCI DRIVER
@@ -8999,6 +9033,12 @@ L:       linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/fbdev/aty/aty128fb.c
 
+RALINK MIPS ARCHITECTURE
+M:     John Crispin <blogic@openwrt.org>
+L:     linux-mips@linux-mips.org
+S:     Maintained
+F:     arch/mips/ralink
+
 RALINK RT2X00 WIRELESS LAN DRIVER
 P:     rt2x00 project
 M:     Stanislaw Gruszka <sgruszka@redhat.com>
@@ -9773,10 +9813,11 @@ S:      Supported
 F:     drivers/scsi/be2iscsi/
 
 Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
-M:     Sathya Perla <sathya.perla@avagotech.com>
-M:     Ajit Khaparde <ajit.khaparde@avagotech.com>
-M:     Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
-M:     Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
+M:     Sathya Perla <sathya.perla@broadcom.com>
+M:     Ajit Khaparde <ajit.khaparde@broadcom.com>
+M:     Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
+M:     Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+M:     Somnath Kotur <somnath.kotur@broadcom.com>
 L:     netdev@vger.kernel.org
 W:     http://www.emulex.com
 S:     Supported
@@ -10138,6 +10179,7 @@ S:      Supported
 F:     drivers/media/pci/solo6x10/
 
 SOFTWARE RAID (Multiple Disks) SUPPORT
+M:     Shaohua Li <shli@kernel.org>
 L:     linux-raid@vger.kernel.org
 T:     git git://neil.brown.name/md
 S:     Supported
@@ -10153,7 +10195,7 @@ F:      drivers/net/ethernet/natsemi/sonic.*
 
 SONICS SILICON BACKPLANE DRIVER (SSB)
 M:     Michael Buesch <m@bues.ch>
-L:     netdev@vger.kernel.org
+L:     linux-wireless@vger.kernel.org
 S:     Maintained
 F:     drivers/ssb/
 F:     include/linux/ssb/
@@ -10271,6 +10313,7 @@ L:      spear-devel@list.st.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.st.com/spear
 S:     Maintained
+F:     arch/arm/boot/dts/spear*
 F:     arch/arm/mach-spear/
 
 SPEAR CLOCK FRAMEWORK SUPPORT
@@ -10833,6 +10876,14 @@ L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
+TI VPE/CAL DRIVERS
+M:     Benoit Parrot <bparrot@ti.com>
+L:     linux-media@vger.kernel.org
+W:     http://linuxtv.org/
+Q:     http://patchwork.linuxtv.org/project/linux-media/list/
+S:     Maintained
+F:     drivers/media/platform/ti-vpe/
+
 TI CDCE706 CLOCK DRIVER
 M:     Max Filippov <jcmvbkbc@gmail.com>
 S:     Maintained
@@ -11298,7 +11349,7 @@ F:      Documentation/usb/ehci.txt
 F:     drivers/usb/host/ehci*
 
 USB GADGET/PERIPHERAL SUBSYSTEM
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 W:     http://www.linux-usb.org/gadget
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -11374,7 +11425,7 @@ S:      Maintained
 F:     drivers/net/usb/pegasus.*
 
 USB PHY LAYER
-M:     Felipe Balbi <balbi@ti.com>
+M:     Felipe Balbi <balbi@kernel.org>
 L:     linux-usb@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
 S:     Maintained
@@ -12113,7 +12164,7 @@ F:      drivers/net/hamradio/*scc.c
 F:     drivers/net/hamradio/z8530.h
 
 ZBUD COMPRESSED PAGE ALLOCATOR
-M:     Seth Jennings <sjennings@variantweb.net>
+M:     Seth Jennings <sjenning@redhat.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zbud.c
@@ -12168,7 +12219,7 @@ F:      include/linux/zsmalloc.h
 F:     Documentation/vm/zsmalloc.txt
 
 ZSWAP COMPRESSED SWAP CACHING
-M:     Seth Jennings <sjennings@variantweb.net>
+M:     Seth Jennings <sjenning@redhat.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index abfb3e8eb0b14e680290f3800bd331e870cbb8c6..682840850f580d6c5361d0e6db4084694e90a1c4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
-PATCHLEVEL = 4
+PATCHLEVEL = 5
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc3
 NAME = Blurry Fish Butt
 
 # *DOCUMENTATION*
diff --git a/README b/README
index f4756ee1c918eada0f011f198c10cd8b9942e184..afc4f0d81ee1798d400a173b287709e6ea07babc 100644 (file)
--- a/README
+++ b/README
@@ -59,7 +59,7 @@ DOCUMENTATION:
 INSTALLING the kernel source:
 
  - If you install the full sources, put the kernel tarball in a
-   directory where you have permissions (eg. your home directory) and
+   directory where you have permissions (e.g. your home directory) and
    unpack it:
 
      xz -cd linux-4.X.tar.xz | tar xvf -
@@ -125,7 +125,7 @@ BUILD directory for the kernel:
 
    When compiling the kernel, all output files will per default be
    stored together with the kernel source code.
-   Using the option "make O=output/dir" allow you to specify an alternate
+   Using the option "make O=output/dir" allows you to specify an alternate
    place for the output files (including .config).
    Example:
 
@@ -159,9 +159,9 @@ CONFIGURING the kernel:
 
      "make nconfig"     Enhanced text based color menus.
 
-     "make xconfig"     X windows (Qt) based configuration tool.
+     "make xconfig"     Qt based configuration tool.
 
-     "make gconfig"     X windows (GTK+) based configuration tool.
+     "make gconfig"     GTK+ based configuration tool.
 
      "make oldconfig"   Default all questions based on the contents of
                         your existing ./.config file and asking about
@@ -268,8 +268,8 @@ COMPILING the kernel:
    Normally, the kernel build system runs in a fairly quiet mode (but not
    totally silent).  However, sometimes you or other kernel developers need
    to see compile, link, or other commands exactly as they are executed.
-   For this, use "verbose" build mode.  This is done by inserting
-   "V=1" in the "make" command.  E.g.:
+   For this, use "verbose" build mode.  This is done by passing
+   "V=1" to the "make" command, e.g.
 
      make V=1 all
 
@@ -300,7 +300,7 @@ COMPILING the kernel:
    kernel image file is usually /vmlinuz, /boot/vmlinuz, /bzImage or
    /boot/bzImage.  To use the new kernel, save a copy of the old image
    and copy the new image over the old one.  Then, you MUST RERUN LILO
-   to update the loading map!! If you don't, you won't be able to boot
+   to update the loading map! If you don't, you won't be able to boot
    the new kernel image.
 
    Reinstalling LILO is usually a matter of running /sbin/lilo. 
index 0cb8cdfa63bcf0837582d5db2a443d511e5ddf48..914baf9cf5fa1cf2faaadfd910fdb6760d9b74f0 100644 (file)
@@ -9,7 +9,7 @@ Please see https://www.kernel.org/ for a list of supported kernels.  Any
 kernel marked with [EOL] is "end of life" and will not have any fixes
 backported to it.
 
-If you've found a bug on a kernel version isn't listed on kernel.org,
+If you've found a bug on a kernel version that isn't listed on kernel.org,
 contact your Linux distribution or embedded vendor for support.
 Alternatively, you can attempt to run one of the supported stable or -rc
 kernels, and see if you can reproduce the bug on that.  It's preferable
index 98f2eeee8f681117908a3958a4698de514388bd9..75d8865d763dc7517382d0d84d52e4d8c92a4d6c 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/scatterlist.h>
 #include <asm/machvec.h>
-#include <asm-generic/pci-bridge.h>
 
 /*
  * The following structure is used to manage multiple PCI busses.
index 22909b83f4734d183b8fcf563f21040ab3433e78..e31557fc06cc1056511dc15d81583ee84ec85fc7 100644 (file)
 
 /* Standard COM flags (except for COM4, because of the 8514 problem) */
 #ifdef CONFIG_SERIAL_8250_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
+#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
+#define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ)
 #else
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
+#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
+#define STD_COM4_FLAGS UPF_BOOT_AUTOCONF
 #endif
 
 #define SERIAL_PORT_DFNS                       \
index 76dde9db79349d0977687623307c934de91e314a..01392bcdfd23a94b99087f7bea2e236426e1e653 100644 (file)
@@ -410,7 +410,7 @@ config ARC_HAS_RTC
        default n
        depends on !SMP
 
-config ARC_HAS_GRTC
+config ARC_HAS_GFRC
        bool "SMP synchronized 64-bit cycle counter"
        default y
        depends on SMP
index f36c047b33cad0c469bd2368bb3e5fa4260535e5..735985974a3136d2a1b1d6184ff754efd0a02743 100644 (file)
@@ -16,7 +16,7 @@ CONFIG_ARC_PLAT_AXS10X=y
 CONFIG_AXS103=y
 CONFIG_ISA_ARCV2=y
 CONFIG_SMP=y
-# CONFIG_ARC_HAS_GRTC is not set
+# CONFIG_ARC_HAS_GFRC is not set
 CONFIG_ARC_UBOOT_SUPPORT=y
 CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
 CONFIG_PREEMPT=y
index 7fac7d85ed6a32bb1abaa4f8e36c5d243cec06c0..fdc5be5b10295d612e0b1b70765c74641d55814c 100644 (file)
@@ -349,14 +349,13 @@ struct cpuinfo_arc {
        struct cpuinfo_arc_bpu bpu;
        struct bcr_identity core;
        struct bcr_isa isa;
-       struct bcr_timer timers;
        unsigned int vec_base;
        struct cpuinfo_arc_ccm iccm, dccm;
        struct {
                unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
                             fpu_sp:1, fpu_dp:1, pad2:6,
                             debug:1, ap:1, smart:1, rtt:1, pad3:4,
-                            pad4:8;
+                            timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
        } extn;
        struct bcr_mpy extn_mpy;
        struct bcr_extn_xymem extn_xymem;
index 258b0e5ad3329a614e59204df55d91873e969623..1fc18ee06cf2df7f3dc971241f38f846a857133b 100644 (file)
 /* Was Intr taken in User Mode */
 #define AUX_IRQ_ACT_BIT_U      31
 
-/* 0 is highest level, but taken by FIRQs, if present in design */
-#define ARCV2_IRQ_DEF_PRIO             0
+/*
+ * User space should be interruptable even by lowest prio interrupt
+ * Safe even if actual interrupt priorities is fewer or even one
+ */
+#define ARCV2_IRQ_DEF_PRIO     15
 
 /* seed value for status register */
 #define ISA_INIT_STATUS_BITS   (STATUS_IE_MASK | STATUS_AD_MASK | \
index 46f4e5351b2a56e96d440a27201fc612d2e9c195..847e3bbe387fc92f9b4433bf7e08fc0b11e3ec70 100644 (file)
@@ -39,8 +39,8 @@ struct mcip_cmd {
 #define CMD_DEBUG_SET_MASK             0x34
 #define CMD_DEBUG_SET_SELECT           0x36
 
-#define CMD_GRTC_READ_LO               0x42
-#define CMD_GRTC_READ_HI               0x43
+#define CMD_GFRC_READ_LO               0x42
+#define CMD_GFRC_READ_HI               0x43
 
 #define CMD_IDU_ENABLE                 0x71
 #define CMD_IDU_DISABLE                        0x72
index cbfec79137bf77735fa675d0eb2be57da217ba63..b178302947065660bc4765d86f66276045e29055 100644 (file)
@@ -211,7 +211,11 @@ debug_marker_syscall:
 ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
 ; entry was via Exception in DS which got preempted in kernel).
 ;
-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
+;
+; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
+; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
+
 .Lintr_ret_to_delay_slot:
 debug_marker_ds:
 
@@ -222,18 +226,23 @@ debug_marker_ds:
        ld      r2, [sp, PT_ret]
        ld      r3, [sp, PT_status32]
 
+       ; STAT32 for Int return created from scratch
+       ; (No delay dlot, disable Further intr in trampoline)
+
        bic     r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
        st      r0, [sp, PT_status32]
 
        mov     r1, .Lintr_ret_to_delay_slot_2
        st      r1, [sp, PT_ret]
 
+       ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
        st      r2, [sp, 0]
        st      r3, [sp, 4]
 
        b       .Lisr_ret_fast_path
 
 .Lintr_ret_to_delay_slot_2:
+       ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
        sub     sp, sp, SZ_PT_REGS
        st      r9, [sp, -4]
 
@@ -243,11 +252,19 @@ debug_marker_ds:
        ld      r9, [sp, 4]
        sr      r9, [erstatus]
 
+       ; restore AUX_USER_SP if returning to U mode
+       bbit0   r9, STATUS_U_BIT, 1f
+       ld      r9, [sp, PT_sp]
+       sr      r9, [AUX_USER_SP]
+
+1:
        ld      r9, [sp, 8]
        sr      r9, [erbta]
 
        ld      r9, [sp, -4]
        add     sp, sp, SZ_PT_REGS
+
+       ; return from pure kernel mode to delay slot
        rtie
 
 END(ret_from_exception)
index 0394f9f61b466dea018af3ec371c65a8dbc17acb..942526322ae7125cb1e7910adb0523ed16ae7435 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/irqchip.h>
 #include <asm/irq.h>
 
+static int irq_prio;
+
 /*
  * Early Hardware specific Interrupt setup
  * -Called very early (start_kernel -> setup_arch -> setup_processor)
@@ -24,6 +26,14 @@ void arc_init_IRQ(void)
 {
        unsigned int tmp;
 
+       struct irq_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+               unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
+#else
+               unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
+#endif
+       } irq_bcr;
+
        struct aux_irq_ctrl {
 #ifdef CONFIG_CPU_BIG_ENDIAN
                unsigned int res3:18, save_idx_regs:1, res2:1,
@@ -46,28 +56,25 @@ void arc_init_IRQ(void)
 
        WRITE_AUX(AUX_IRQ_CTRL, ictrl);
 
-       /* setup status32, don't enable intr yet as kernel doesn't want */
-       tmp = read_aux_reg(0xa);
-       tmp |= ISA_INIT_STATUS_BITS;
-       tmp &= ~STATUS_IE_MASK;
-       asm volatile("flag %0   \n"::"r"(tmp));
-
        /*
         * ARCv2 core intc provides multiple interrupt priorities (upto 16).
         * Typical builds though have only two levels (0-high, 1-low)
         * Linux by default uses lower prio 1 for most irqs, reserving 0 for
         * NMI style interrupts in future (say perf)
-        *
-        * Read the intc BCR to confirm that Linux default priority is avail
-        * in h/w
-        *
-        * Note:
-        *  IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level
-        *  is 0 based.
         */
-       tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF;
-       if (ARCV2_IRQ_DEF_PRIO > tmp)
-               panic("Linux default irq prio incorrect\n");
+
+       READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
+
+       irq_prio = irq_bcr.prio;        /* Encoded as N-1 for N levels */
+       pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
+               irq_prio + 1, irq_prio,
+               irq_bcr.firq ? " FIRQ (not used)":"");
+
+       /* setup status32, don't enable intr yet as kernel doesn't want */
+       tmp = read_aux_reg(0xa);
+       tmp |= STATUS_AD_MASK | (irq_prio << 1);
+       tmp &= ~STATUS_IE_MASK;
+       asm volatile("flag %0   \n"::"r"(tmp));
 }
 
 static void arcv2_irq_mask(struct irq_data *data)
@@ -86,7 +93,7 @@ void arcv2_irq_enable(struct irq_data *data)
 {
        /* set default priority */
        write_aux_reg(AUX_IRQ_SELECT, data->irq);
-       write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
+       write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
 
        /*
         * hw auto enables (linux unmask) all by default
index bd237acdf4f2f9601efbf4c25c45067ddd2b69d9..bc771f58fefb4a9c67c90781bf520d4004f61fa2 100644 (file)
@@ -96,13 +96,13 @@ static void mcip_probe_n_setup(void)
 #ifdef CONFIG_CPU_BIG_ENDIAN
                unsigned int pad3:8,
                             idu:1, llm:1, num_cores:6,
-                            iocoh:1,  grtc:1, dbg:1, pad2:1,
+                            iocoh:1,  gfrc:1, dbg:1, pad2:1,
                             msg:1, sem:1, ipi:1, pad:1,
                             ver:8;
 #else
                unsigned int ver:8,
                             pad:1, ipi:1, sem:1, msg:1,
-                            pad2:1, dbg:1, grtc:1, iocoh:1,
+                            pad2:1, dbg:1, gfrc:1, iocoh:1,
                             num_cores:6, llm:1, idu:1,
                             pad3:8;
 #endif
@@ -116,7 +116,7 @@ static void mcip_probe_n_setup(void)
                IS_AVAIL1(mp.ipi, "IPI "),
                IS_AVAIL1(mp.idu, "IDU "),
                IS_AVAIL1(mp.dbg, "DEBUG "),
-               IS_AVAIL1(mp.grtc, "GRTC"));
+               IS_AVAIL1(mp.gfrc, "GFRC"));
 
        idu_detected = mp.idu;
 
@@ -125,8 +125,8 @@ static void mcip_probe_n_setup(void)
                __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
        }
 
-       if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc)
-               panic("kernel trying to use non-existent GRTC\n");
+       if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc)
+               panic("kernel trying to use non-existent GFRC\n");
 }
 
 struct plat_smp_ops plat_smp_ops = {
index e1b87444ea9a0740b9651ad6b68fd39f701121a0..a7edceba5f8447db5c2a2a28be9e87c78ef0a511 100644 (file)
@@ -45,6 +45,7 @@ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
 static void read_arc_build_cfg_regs(void)
 {
        struct bcr_perip uncached_space;
+       struct bcr_timer timer;
        struct bcr_generic bcr;
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
        unsigned long perip_space;
@@ -53,7 +54,11 @@ static void read_arc_build_cfg_regs(void)
        READ_BCR(AUX_IDENTITY, cpu->core);
        READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
 
-       READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers);
+       READ_BCR(ARC_REG_TIMERS_BCR, timer);
+       cpu->extn.timer0 = timer.t0;
+       cpu->extn.timer1 = timer.t1;
+       cpu->extn.rtc = timer.rtc;
+
        cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
 
        READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
@@ -208,9 +213,9 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
                       (unsigned int)(arc_get_core_freq() / 10000) % 100);
 
        n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
-                      IS_AVAIL1(cpu->timers.t0, "Timer0 "),
-                      IS_AVAIL1(cpu->timers.t1, "Timer1 "),
-                      IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ",
+                      IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
+                      IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
+                      IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ",
                                 CONFIG_ARC_HAS_RTC));
 
        n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
@@ -293,13 +298,13 @@ static void arc_chk_core_config(void)
        struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
        int fpu_enabled;
 
-       if (!cpu->timers.t0)
+       if (!cpu->extn.timer0)
                panic("Timer0 is not present!\n");
 
-       if (!cpu->timers.t1)
+       if (!cpu->extn.timer1)
                panic("Timer1 is not present!\n");
 
-       if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc)
+       if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc)
                panic("RTC is not present\n");
 
 #ifdef CONFIG_ARC_HAS_DCCM
@@ -334,6 +339,7 @@ static void arc_chk_core_config(void)
                panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
 
        if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+           IS_ENABLED(CONFIG_ARC_HAS_LLSC) &&
            !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
                panic("llock/scond livelock workaround missing\n");
 }
index dfad287f1db1c6b55b86faacc0b40d2472636795..156d9833ff84b5c77b7a7bd95d2be15c81d7cbf9 100644 (file)
@@ -62,7 +62,7 @@
 
 /********** Clock Source Device *********/
 
-#ifdef CONFIG_ARC_HAS_GRTC
+#ifdef CONFIG_ARC_HAS_GFRC
 
 static int arc_counter_setup(void)
 {
@@ -83,10 +83,10 @@ static cycle_t arc_counter_read(struct clocksource *cs)
 
        local_irq_save(flags);
 
-       __mcip_cmd(CMD_GRTC_READ_LO, 0);
+       __mcip_cmd(CMD_GFRC_READ_LO, 0);
        stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
 
-       __mcip_cmd(CMD_GRTC_READ_HI, 0);
+       __mcip_cmd(CMD_GFRC_READ_HI, 0);
        stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
 
        local_irq_restore(flags);
@@ -95,7 +95,7 @@ static cycle_t arc_counter_read(struct clocksource *cs)
 }
 
 static struct clocksource arc_counter = {
-       .name   = "ARConnect GRTC",
+       .name   = "ARConnect GFRC",
        .rating = 400,
        .read   = arc_counter_read,
        .mask   = CLOCKSOURCE_MASK(64),
index 4f799e567fc870502ae147f3c26c5bb402315a9d..cc95ff8f07cbce92ea93398a57605f0b531169e7 100644 (file)
@@ -1337,7 +1337,6 @@ config BIG_LITTLE
 config BL_SWITCHER
        bool "big.LITTLE switcher support"
        depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
-       select ARM_CPU_SUSPEND
        select CPU_PM
        help
          The big.LITTLE "switcher" provides the core functionality to
@@ -2111,7 +2110,8 @@ config ARCH_SUSPEND_POSSIBLE
        def_bool y
 
 config ARM_CPU_SUSPEND
-       def_bool PM_SLEEP
+       def_bool PM_SLEEP || BL_SWITCHER || ARM_PSCI_FW
+       depends on ARCH_SUSPEND_POSSIBLE
 
 config ARCH_HIBERNATION_POSSIBLE
        bool
index c6b6175d020329ac74eeefb0eebf1a6c353d6ea8..1098e91d6d3f34ff5ca9abd4784dd1793897115c 100644 (file)
@@ -1368,6 +1368,7 @@ config DEBUG_SIRFSOC_UART
 config DEBUG_LL_INCLUDE
        string
        default "debug/sa1100.S" if DEBUG_SA1100
+       default "debug/palmchip.S" if DEBUG_UART_8250_PALMCHIP
        default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
        default "debug/at91.S" if DEBUG_AT91_UART
        default "debug/asm9260.S" if DEBUG_ASM9260_UART
@@ -1656,6 +1657,14 @@ config DEBUG_UART_8250_WORD
                DEBUG_BCM_KONA_UART || DEBUG_RK32_UART2 || \
                DEBUG_BRCMSTB_UART
 
+config DEBUG_UART_8250_PALMCHIP
+       bool "8250 UART is Palmchip BK-310x"
+       depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
+       help
+         Palmchip provides a UART implementation compatible with 16550
+         except for having a different register layout.  Say Y here if
+         the debug UART is of this type.
+
 config DEBUG_UART_8250_FLOW_CONTROL
        bool "Enable flow control for 8250 UART"
        depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
index fe254108d1d92c88bb05b90221ca314edcaceedb..c2aedce3fea389332d750950b8ef1937c496cfcf 100644 (file)
@@ -147,8 +147,7 @@ textofs-$(CONFIG_PM_H1940)      := 0x00108000
 ifeq ($(CONFIG_ARCH_SA1100),y)
 textofs-$(CONFIG_SA1111) := 0x00208000
 endif
-textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000
-textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
+textofs-$(CONFIG_ARCH_QCOM_A_FAMILY) := 0x00208000
 textofs-$(CONFIG_ARCH_AXXIA) := 0x00308000
 
 # Machine directory name.  This list is sorted alphanumerically
diff --git a/arch/arm/arm-soc-for-next-contents.txt b/arch/arm/arm-soc-for-next-contents.txt
new file mode 100644 (file)
index 0000000..983e07b
--- /dev/null
@@ -0,0 +1,51 @@
+fixes
+
+next/fixes-non-critical
+
+next/cleanup
+       patch
+               ARM: drop unused Makefile.boot of Multiplatform SoCs
+               ARM: integrator: remove redundant select in Kconfig
+               ARM: netx: remove redundant "depends on ARCH_NETX"
+       renesas/cleanup
+               git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas tags/renesas-cleanup-for-v4.6
+       mvebu/cleanup
+               git://git.infradead.org/linux-mvebu tags/mvebu-cleanup-4.6-1
+
+next/soc
+       patch
+               ARM: debug: add support for Palmchip BK-310x UART
+       mvebu/drivers
+               git://git.infradead.org/linux-mvebu tags/mvebu-drivers-4.6-1
+
+next/arm64
+       patch
+               arm64: defconfig: add spmi and usb related configs
+
+next/dt
+
+next/dt64
+       patch
+               MAINTAINERS: Adding Maintainers for AMD Seattle Device Tree
+               dtb: amd: Fix GICv2 hypervisor and virtual interface sizes
+               dtb: amd: Fix DMA ranges in device tree
+               dtb: amd: Fix typo in SPI device nodes
+               dtb: amd: Misc changes for I2C device nodes
+               dtb: amd: Misc changes for SATA device tree nodes
+               dtb: amd: Misc changes for GPIO devices
+               dtb: amd: Add PERF CCN-504 device tree node
+               dtb: amd: Add KCS device tree node
+               dtb: amd: Add AMD XGBE device tree file
+               dtb: amd: Add support for new AMD Overdrive boards
+               dtb: amd: Add support for AMD/Linaro 96Boards Enterprise Edition Server board
+       renesas/dt64
+               git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas tags/renesas-arm64-dt-for-v4.6
+
+next/defconfig
+       renesas/defconfig
+               git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas tags/renesas-defconfig-for-v4.6
+       mvebu/defconfig
+               git://git.infradead.org/linux-mvebu tags/mvebu-defconfig-4.6-1
+
+next/drivers
+
index 4c23a68a0917049e4c7e9e864dcebca38d6b75b2..7a6a58ef8aaf815728fd59b0ca7af752495595b0 100644 (file)
@@ -106,6 +106,15 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS)
 KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
 endif
 
+# -fstack-protector-strong triggers protection checks in this code,
+# but it is being used too early to link to meaningful stack_chk logic.
+nossp_flags := $(call cc-option, -fno-stack-protector)
+CFLAGS_atags_to_fdt.o := $(nossp_flags)
+CFLAGS_fdt.o := $(nossp_flags)
+CFLAGS_fdt_ro.o := $(nossp_flags)
+CFLAGS_fdt_rw.o := $(nossp_flags)
+CFLAGS_fdt_wip.o := $(nossp_flags)
+
 ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj)
 asflags-y := -DZIMAGE
 
index a4a6d70e8b26ceaf2d9dc7b3af37e4e4bb32eba1..b0726b5e3f8fd7d3c74609d7426dc642dc8610f9 100644 (file)
@@ -60,6 +60,7 @@ dtb-$(CONFIG_ARCH_AXXIA) += \
        axm5516-amarillo.dtb
 dtb-$(CONFIG_ARCH_BCM2835) += \
        bcm2835-rpi-b.dtb \
+       bcm2835-rpi-a.dtb \
        bcm2835-rpi-b-rev2.dtb \
        bcm2835-rpi-b-plus.dtb \
        bcm2835-rpi-a-plus.dtb \
@@ -156,7 +157,8 @@ dtb-$(CONFIG_ARCH_INTEGRATOR) += \
 dtb-$(CONFIG_ARCH_KEYSTONE) += \
        k2hk-evm.dtb \
        k2l-evm.dtb \
-       k2e-evm.dtb
+       k2e-evm.dtb \
+       k2g-evm.dtb
 dtb-$(CONFIG_MACH_KIRKWOOD) += \
        kirkwood-b3.dtb \
        kirkwood-blackarmor-nas220.dtb \
@@ -189,9 +191,12 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += \
        kirkwood-is2.dtb \
        kirkwood-km_kirkwood.dtb \
        kirkwood-laplug.dtb \
+       kirkwood-linkstation-lsqvl.dtb \
+       kirkwood-linkstation-lsvl.dtb \
+       kirkwood-linkstation-lswsxl.dtb \
+       kirkwood-linkstation-lswvl.dtb \
+       kirkwood-linkstation-lswxl.dtb \
        kirkwood-lschlv2.dtb \
-       kirkwood-lswvl.dtb \
-       kirkwood-lswxl.dtb \
        kirkwood-lsxhl.dtb \
        kirkwood-mplcec4.dtb \
        kirkwood-mv88f6281gtw-ge.dtb \
@@ -332,6 +337,7 @@ dtb-$(CONFIG_SOC_IMX6Q) += \
        imx6q-gw551x.dtb \
        imx6q-gw552x.dtb \
        imx6q-hummingboard.dtb \
+       imx6q-icore-rqs.dtb \
        imx6q-nitrogen6x.dtb \
        imx6q-nitrogen6_max.dtb \
        imx6q-novena.dtb \
@@ -514,6 +520,7 @@ dtb-$(CONFIG_SOC_DRA7XX) += \
 dtb-$(CONFIG_ARCH_ORION5X) += \
        orion5x-lacie-d2-network.dtb \
        orion5x-lacie-ethernet-disk-mini-v2.dtb \
+       orion5x-linkstation-lsgl.dtb \
        orion5x-linkstation-lswtgl.dtb \
        orion5x-lswsgl.dtb \
        orion5x-maxtor-shared-storage-2.dtb \
@@ -666,6 +673,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
        sun7i-a20-cubieboard2.dtb \
        sun7i-a20-cubietruck.dtb \
        sun7i-a20-hummingbird.dtb \
+       sun7i-a20-itead-ibox.dtb \
        sun7i-a20-i12-tvbox.dtb \
        sun7i-a20-icnova-swac.dtb \
        sun7i-a20-m3.dtb \
@@ -691,6 +699,8 @@ dtb-$(CONFIG_MACH_SUN8I) += \
        sun8i-a33-ippo-q8h-v1.2.dtb \
        sun8i-a33-q8-tablet.dtb \
        sun8i-a33-sinlinx-sina33.dtb \
+       sun8i-a83t-allwinner-h8homlet-v2.dtb \
+       sun8i-a83t-cubietruck-plus.dtb \
        sun8i-h3-orangepi-plus.dtb
 dtb-$(CONFIG_MACH_SUN9I) += \
        sun9i-a80-optimus.dtb \
index 42e9b665582ab2672f6b112549d4178e5dc63d38..5d5fb62d8fbe783c4ccd54fae36f1c0fc9f8ba2f 100644 (file)
@@ -11,6 +11,7 @@
 /dts-v1/;
 
 #include "am33xx.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        model = "CompuLab CM-T335";
                regulator-max-microvolt = <3300000>;
        };
 
+       /* Regulator for WiFi */
+       vwlan_fixed: fixedregulator@2 {
+               compatible = "regulator-fixed";
+               regulator-name = "vwlan_fixed";
+               gpio = <&gpio0 20 GPIO_ACTIVE_HIGH>; /* gpio0_20 */
+               enable-active-high;
+               regulator-boot-off;
+       };
+
        backlight {
                compatible = "pwm-backlight";
                pwms = <&ecap0 0 50000 0>;
                brightness-levels = <0 51 53 56 62 75 101 152 255>;
                default-brightness-level = <8>;
        };
+
+       sound {
+               compatible = "simple-audio-card";
+               simple-audio-card,name = "cm-t335";
+
+               simple-audio-card,widgets =
+                       "Microphone", "Mic Jack",
+                       "Line", "Line In",
+                       "Headphone", "Headphone Jack";
+
+               simple-audio-card,routing =
+                       "Headphone Jack", "LHPOUT",
+                       "Headphone Jack", "RHPOUT",
+                       "LLINEIN", "Line In",
+                       "RLINEIN", "Line In",
+                       "MICIN", "Mic Jack";
+
+               simple-audio-card,format = "i2s";
+               simple-audio-card,bitclock-master = <&sound_master>;
+               simple-audio-card,frame-master = <&sound_master>;
+
+               simple-audio-card,cpu {
+                       sound-dai = <&mcasp1>;
+               };
+
+               sound_master: simple-audio-card,codec {
+                       sound-dai = <&tlv320aic23>;
+                       system-clock-frequency = <12000000>;
+               };
+       };
 };
 
 &am33xx_pinmux {
                >;
        };
 
+       dcan0_pins: pinmux_dcan0_pins {
+               pinctrl-single,pins = <
+                       /* uart1_ctsn.dcan0_tx */
+                       AM33XX_IOPAD(0x978, PIN_OUTPUT | MUX_MODE2)
+                       /* uart1_rtsn.dcan0_rx */
+                       AM33XX_IOPAD(0x97C, PIN_INPUT | MUX_MODE2)
+               >;
+       };
+
+       dcan1_pins: pinmux_dcan1_pins {
+               pinctrl-single,pins = <
+                       /* uart1_rxd.dcan1_tx */
+                       AM33XX_IOPAD(0x980, PIN_OUTPUT | MUX_MODE2)
+                       /* uart1_txd.dcan1_rx */
+                       AM33XX_IOPAD(0x984, PIN_INPUT | MUX_MODE2)
+               >;
+       };
+
        ecap0_pins: pinmux_ecap0_pins {
                pinctrl-single,pins = <
                        /* eCAP0_in_PWM0_out.eCAP0_in_PWM0_out MODE0 */
                >;
        };
 
+       spi0_pins: pinmux_spi0_pins {
+               pinctrl-single,pins = <
+                       /* spi0_sclk.spi0_sclk */
+                       AM33XX_IOPAD(0x950, PIN_INPUT | MUX_MODE0)
+                       /* spi0_d0.spi0_d0 */
+                       AM33XX_IOPAD(0x954, PIN_OUTPUT_PULLUP | MUX_MODE0)
+                       /* spi0_d1.spi0_d1 */
+                       AM33XX_IOPAD(0x958, PIN_INPUT | MUX_MODE0)
+                       /* spi0_cs0.spi0_cs0 */
+                       AM33XX_IOPAD(0x95C, PIN_OUTPUT | MUX_MODE0)
+                       /* spi0_cs1.spi0_cs1 */
+                       AM33XX_IOPAD(0x960, PIN_OUTPUT | MUX_MODE0)
+               >;
+       };
+
        /* wl1271 bluetooth */
        bluetooth_pins: pinmux_bluetooth_pins {
                pinctrl-single,pins = <
                        AM33XX_IOPAD(0x9b0, PIN_OUTPUT_PULLUP | MUX_MODE7)
                >;
        };
+
+       /* TLV320AIC23B codec */
+       mcasp1_pins: pinmux_mcasp1_pins {
+               pinctrl-single,pins = <
+                       /* MII1_CRS.mcasp1_aclkx */
+                       AM33XX_IOPAD(0x90c, PIN_INPUT_PULLDOWN | MUX_MODE4)
+                       /* MII1_RX_ER.mcasp1_fsx */
+                       AM33XX_IOPAD(0x910, PIN_INPUT_PULLDOWN | MUX_MODE4)
+                       /* MII1_COL.mcasp1_axr2 */
+                       AM33XX_IOPAD(0x908, PIN_INPUT_PULLDOWN | MUX_MODE4)
+                       /* RMII1_REF_CLK.mcasp1_axr3 */
+                       AM33XX_IOPAD(0x944, PIN_INPUT_PULLDOWN | MUX_MODE4)
+               >;
+       };
+
+       /* wl1271 WiFi */
+       wifi_pins: pinmux_wifi_pins {
+               pinctrl-single,pins = <
+                       /* EMU1.gpio3_8 - WiFi IRQ */
+                       AM33XX_IOPAD(0x9e8, PIN_INPUT_PULLUP | MUX_MODE7)
+                       /* XDMA_EVENT_INTR1.gpio0_20 - WiFi enable */
+                       AM33XX_IOPAD(0x9b4, PIN_OUTPUT | MUX_MODE7)
+               >;
+       };
 };
 
 &uart0 {
@@ -264,6 +361,13 @@ status = "okay";
                compatible = "emmicro,em3027";
                reg = <0x56>;
        };
+       /* Audio codec */
+       tlv320aic23: codec@1a {
+               compatible = "ti,tlv320aic23";
+               reg = <0x1a>;
+               #sound-dai-cells= <0>;
+               status = "okay";
+       };
 };
 
 &usb {
@@ -394,3 +498,70 @@ status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&mmc1_pins>;
 };
+
+&dcan0 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&dcan0_pins>;
+};
+
+&dcan1 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&dcan1_pins>;
+};
+
+/* Touschscreen and analog digital converter */
+&tscadc {
+       status = "okay";
+       tsc {
+               ti,wires = <4>;
+               ti,x-plate-resistance = <200>;
+               ti,coordinate-readouts = <5>;
+               ti,wire-config = <0x01 0x10 0x23 0x32>;
+               ti,charge-delay = <0x400>;
+       };
+
+       adc {
+               ti,adc-channels = <4 5 6 7>;
+       };
+};
+
+/* CPU audio */
+&mcasp1 {
+               pinctrl-names = "default";
+               pinctrl-0 = <&mcasp1_pins>;
+
+               op-mode = <0>;          /* MCASP_IIS_MODE */
+               tdm-slots = <2>;
+               /* 16 serializers */
+               num-serializer = <16>;
+               serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
+                       0 0 2 1 0 0 0 0 0 0 0 0 0 0 0 0
+               >;
+               tx-num-evt = <1>;
+               rx-num-evt = <1>;
+
+               #sound-dai-cells= <0>;
+               status = "okay";
+};
+
+&spi0 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&spi0_pins>;
+       ti,pindir-d0-out-d1-in = <1>;
+       /* WLS1271 WiFi */
+       wlcore: wlcore@1 {
+               compatible = "ti,wl1271";
+               pinctrl-names = "default";
+               pinctrl-0 = <&wifi_pins>;
+               reg = <1>;
+               spi-max-frequency = <48000000>;
+               clock-xtal;
+               ref-clock-frequency = <38400000>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+               vwlan-supply = <&vwlan_fixed>;
+       };
+};
index 04885f9f959e21fcffb388940d64be0b6eb32869..1fafaad516ba0481de34cc12320fb9e51c2884ce 100644 (file)
                        ti,mbox-num-users = <4>;
                        ti,mbox-num-fifos = <8>;
                        mbox_wkupm3: wkup_m3 {
+                               ti,mbox-send-noirq;
                                ti,mbox-tx = <0 0 0>;
                                ti,mbox-rx = <0 0 3>;
                        };
index df955ba4dc6203273ff795cc1492e698764eda0d..92068fbf8b577440409c8e029d7cfba28da43cf8 100644 (file)
@@ -73,7 +73,7 @@
        global_timer: timer@48240200 {
                compatible = "arm,cortex-a9-global-timer";
                reg = <0x48240200 0x100>;
-               interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
                interrupt-parent = <&gic>;
                clocks = <&mpu_periphclk>;
        };
@@ -81,7 +81,7 @@
        local_timer: timer@48240600 {
                compatible = "arm,cortex-a9-twd-timer";
                reg = <0x48240600 0x100>;
-               interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
                interrupt-parent = <&gic>;
                clocks = <&mpu_periphclk>;
        };
                        ti,mbox-num-users = <4>;
                        ti,mbox-num-fifos = <8>;
                        mbox_wkupm3: wkup_m3 {
+                               ti,mbox-send-noirq;
                                ti,mbox-tx = <0 0 0>;
                                ti,mbox-rx = <0 0 3>;
                        };
index 64d43325bcbc73aa08bf1da025c255883737f233..ecd09ab6d581bf95aa0b8bee8d596c3da8b9ee22 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&pixcir_ts_pins>;
                reg = <0x5c>;
-               interrupt-parent = <&gpio3>;
-               interrupts = <22 0>;
 
                attb-gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
 
                 * 0x264 represents the offset of padconf register of
                 * gpio3_22 from am43xx_pinmux base.
                 */
-               interrupts-extended = <&gpio3 22 IRQ_TYPE_NONE>,
+               interrupts-extended = <&gpio3 22 IRQ_TYPE_EDGE_FALLING>,
                                      <&am43xx_pinmux 0x264>;
                interrupt-names = "tsc", "wakeup";
 
index 746fd2b179587fe4522724f5b42b16202e97edef..d580e2b70f9a65f7dc078799add6d56628ae0169 100644 (file)
                pinctrl-0 = <&pixcir_ts_pins>;
                reg = <0x5c>;
                interrupt-parent = <&gpio1>;
-               interrupts = <17 0>;
+               interrupts = <17 IRQ_TYPE_EDGE_FALLING>;
 
                attb-gpio = <&gpio1 17 GPIO_ACTIVE_HIGH>;
 
index c53882643ae96b6da8b15c6a26e5fd93ba3300cb..8d93882dc8d541a77c870e635813c32a7c77b79d 100644 (file)
                        DRA7XX_CORE_IOPAD(0x35b8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d20.rgmii1_rd3 */
                        DRA7XX_CORE_IOPAD(0x35bc, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d21.rgmii1_rd2 */
                        DRA7XX_CORE_IOPAD(0x35c0, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d22.rgmii1_rd1 */
-                       DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT_PULLUP | MUX_MODE3) /* vin2a_d23.rgmii1_rd0 */
+                       DRA7XX_CORE_IOPAD(0x35c4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* vin2a_d23.rgmii1_rd0 */
                >;
        };
 
        pinctrl-names = "default";
        pinctrl-0 = <&qspi1_pins>;
 
-       spi-max-frequency = <20000000>;
+       spi-max-frequency = <48000000>;
 
        spi_flash: spi_flash@0 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "spansion,m25p80", "jedec,spi-nor";
                reg = <0>;                              /* CS0 */
-               spi-max-frequency = <20000000>;
+               spi-max-frequency = <48000000>;
 
                partition@0 {
                        label = "uboot";
 
 &cpsw_emac0 {
        phy_id = <&davinci_mdio>, <0>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-txid";
        dual_emac_res_vlan = <0>;
 };
 
 &cpsw_emac1 {
        phy_id = <&davinci_mdio>, <1>;
-       phy-mode = "rgmii";
+       phy-mode = "rgmii-txid";
        dual_emac_res_vlan = <1>;
 };
 
 };
 
 &usb2 {
-       dr_mode = "peripheral";
+       dr_mode = "host";
 };
 
 &mcasp3 {
index 77bb8e17401a26cad24dcfd2ab5a7c9ddbd4d312..988e99632d49953d012b72e674f8fa040883a1c4 100644 (file)
@@ -25,8 +25,8 @@
 &dra7_pmx_core {
        uart3_pins_default: uart3_pins_default {
                pinctrl-single,pins = <
-                       DRA7XX_CORE_IOPAD(0x37f8, PIN_INPUT_SLEW | MUX_MODE2)   /* uart2_ctsn.uart3_rxd */
-                       DRA7XX_CORE_IOPAD(0x37fc, PIN_INPUT_SLEW | MUX_MODE1)   /* uart2_rtsn.uart3_txd */
+                       DRA7XX_CORE_IOPAD(0x3648, PIN_INPUT_SLEW | MUX_MODE0)   /* uart3_rxd */
+                       DRA7XX_CORE_IOPAD(0x364c, PIN_INPUT_SLEW | MUX_MODE0)   /* uart3_txd */
                >;
        };
 
        pinctrl-0 = <&i2c5_pins_default>;
        clock-frequency = <400000>;
 
-       eeprom_base: atmel@50 {
+       eeprom_base: atmel@54 {
                compatible = "atmel,24c08";
-               reg = <0x50>;
+               reg = <0x54>;
                pagesize = <16>;
        };
 
index 3aa980ad64f0c47f7603d5144ffa04211578b2e1..d5e19cd4d256bc1e8fd51b21201ca576278ee782 100644 (file)
 &pinctrl {
        pwr_led_pin: pwr-led-pin {
                marvell,pins = "mpp63";
-               marvell,function = "gpo";
+               marvell,function = "gpio";
        };
 
        stat_led_pins: stat-led-pins {
index faa474874cb8e5c4436154cf6dcbf38f7daa60ef..11565752b9f6b7f83ea56187ebcac73deaf91a36 100644 (file)
 
        backup_led_pin: backup-led-pin {
                marvell,pins = "mpp63";
-               marvell,function = "gpo";
+               marvell,function = "gpio";
        };
 
        power_led_pin: power-led-pin {
index 836bcc07afc5babe199baaea184b3149d6020dbd..8ca7a4340c0ff8301a80b1d6e75b52770cfdeebd 100644 (file)
 
        fan_ctrl_high_pin: fan-ctrl-high-pin {
                marvell,pins = "mpp63";
-               marvell,function = "gpo";
+               marvell,function = "gpio";
        };
 
        fan_alarm_pin: fan-alarm-pin {
index acd5b1519edb2be2f4cd58246fba337a7059ffa1..86fde0bf552b0b1596353e7351ab9f13b91dbd46 100644 (file)
                                };
                        };
 
+                       /* CON3 */
                        ethernet@30000 {
                                status = "okay";
                                phy = <&phy2>;
                                phy-mode = "sgmii";
                        };
 
+                       /* CON2 */
                        ethernet@34000 {
                                status = "okay";
                                phy = <&phy1>;
                                phy-mode = "sgmii";
                        };
 
+                       /* CON4 */
                        ethernet@70000 {
                                pinctrl-names = "default";
 
index cd316021d6ce2b72af13fa6fc3ccb7b833d423f0..51943598d8585609c5c06a068b7077271ccba45a 100644 (file)
@@ -44,8 +44,8 @@
 #include <dt-bindings/gpio/gpio.h>
 
 / {
-       model = "Marvell Armada 38GP";
-       compatible = "marvell,a385-gp", "marvell,armada388", "marvell,armada380";
+       model = "Marvell Armada 388 DB-88F6820-GP";
+       compatible = "marvell,a388-gp", "marvell,armada388", "marvell,armada380";
 
        chosen {
                stdout-path = "serial0:115200n8";
 
                        /* CON5 */
                        usb3@f0000 {
-                               vcc-supply = <&reg_usb2_1_vbus>;
+                               usb-phy = <&usb2_1_phy>;
                                status = "okay";
                        };
 
                        /* CON7 */
                        usb3@f8000 {
-                               vcc-supply = <&reg_usb3_vbus>;
+                               usb-phy = <&usb3_phy>;
                                status = "okay";
                        };
                };
                };
        };
 
+       usb2_1_phy: usb2_1_phy {
+               compatible = "usb-nop-xceiv";
+               vcc-supply = <&reg_usb2_1_vbus>;
+       };
+
+       usb3_phy: usb3_phy {
+               compatible = "usb-nop-xceiv";
+               vcc-supply = <&reg_usb3_vbus>;
+       };
+
        reg_usb3_vbus: usb3-vbus {
                compatible = "regulator-fixed";
                regulator-name = "usb3-vbus";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
                enable-active-high;
-               regulator-always-on;
                gpio = <&expander1 15 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
                enable-active-high;
-               regulator-always-on;
                gpio = <&expander0 4 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
                enable-active-high;
-               regulator-always-on;
+               regulator-boot-on;
                gpio = <&expander0 2 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-name = "v5.0-sata0";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata0>;
        };
 
                regulator-name = "v12.0-sata0";
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata0>;
        };
 
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
                enable-active-high;
-               regulator-always-on;
+               regulator-boot-on;
                gpio = <&expander0 3 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-name = "v5.0-sata1";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata1>;
        };
 
                regulator-name = "v12.0-sata1";
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata1>;
        };
 
                compatible = "regulator-fixed";
                regulator-name = "pwr_en_sata2";
                enable-active-high;
-               regulator-always-on;
+               regulator-boot-on;
                gpio = <&expander0 11 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-name = "v5.0-sata2";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata2>;
        };
 
                regulator-name = "v12.0-sata2";
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata2>;
        };
 
                compatible = "regulator-fixed";
                regulator-name = "pwr_en_sata3";
                enable-active-high;
-               regulator-always-on;
+               regulator-boot-on;
                gpio = <&expander0 12 GPIO_ACTIVE_HIGH>;
        };
 
                regulator-name = "v5.0-sata3";
                regulator-min-microvolt = <5000000>;
                regulator-max-microvolt = <5000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata3>;
        };
 
                regulator-name = "v12.0-sata3";
                regulator-min-microvolt = <12000000>;
                regulator-max-microvolt = <12000000>;
-               regulator-always-on;
                vin-supply = <&reg_sata3>;
        };
 };
index e8b7f67267723241730c8279902b82b8340d8118..b50784de86e80d5ee75f9154bf00a86f1100922b 100644 (file)
                                reg = <0x22000 0x1000>;
                        };
 
+                       /*
+                        * As a special exception to the "order by
+                        * register address" rule, the eth0 node is
+                        * placed here to ensure that it gets
+                        * registered as the first interface, since
+                        * the network subsystem doesn't allow naming
+                        * interfaces using DT aliases. Without this,
+                        * the ordering of interfaces is different
+                        * from the one used in U-Boot and the
+                        * labeling of interfaces on the boards, which
+                        * is very confusing for users.
+                        */
+                       eth0: ethernet@70000 {
+                               compatible = "marvell,armada-370-neta";
+                               reg = <0x70000 0x4000>;
+                               interrupts-extended = <&mpic 8>;
+                               clocks = <&gateclk 4>;
+                               tx-csum-limit = <9800>;
+                               status = "disabled";
+                       };
+
                        eth1: ethernet@30000 {
                                compatible = "marvell,armada-370-neta";
                                reg = <0x30000 0x4000>;
                                };
                        };
 
-                       eth0: ethernet@70000 {
-                               compatible = "marvell,armada-370-neta";
-                               reg = <0x70000 0x4000>;
-                               interrupts-extended = <&mpic 8>;
-                               clocks = <&gateclk 4>;
-                               tx-csum-limit = <9800>;
-                               status = "disabled";
-                       };
-
                        mdio: mdio@72004 {
                                #address-cells = <1>;
                                #size-cells = <0>;
index 13cf69a8d0fb392b264910f014cf7ff8434f9e2b..fb9e1bbf23385b85b0b82ddb153b922b2d2e0178 100644 (file)
                                nand-on-flash-bbt;
 
                                partitions {
+                                       compatible = "fixed-partitions";
                                        #address-cells = <1>;
                                        #size-cells = <1>;
 
index 77ddff036409f7cb84d28342f8b973ce9792d09e..e683856c507c8bedacb5f8746a43cf9e4aef2207 100644 (file)
 
                        macb0: ethernet@f8008000 {
                                pinctrl-names = "default";
-                               pinctrl-0 = <&pinctrl_macb0_default>;
+                               pinctrl-0 = <&pinctrl_macb0_default &pinctrl_macb0_phy_irq>;
                                phy-mode = "rmii";
                                status = "okay";
+
+                               ethernet-phy@1 {
+                                       reg = <0x1>;
+                                       interrupt-parent = <&pioA>;
+                                       interrupts = <73 IRQ_TYPE_LEVEL_LOW>;
+                               };
                        };
 
                        pdmic@f8018000 {
                                        bias-disable;
                                };
 
+                               pinctrl_macb0_phy_irq: macb0_phy_irq {
+                                       pinmux = <PIN_PC9__GPIO>;
+                               };
+
                                pinctrl_pdmic_default: pdmic_default {
                                        pinmux = <PIN_PB26__PDMIC_DAT>,
                                                <PIN_PB27__PDMIC_CLK>;
index 131614f28e758653e34cc2b993bb6a5a28f20bbb..569026e8f96cadaf25eeb10ca207c02c7f175121 100644 (file)
                        macb0: ethernet@f8020000 {
                                phy-mode = "rmii";
                                status = "okay";
+                               pinctrl-names = "default";
+                               pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
 
                                phy0: ethernet-phy@1 {
                                        interrupt-parent = <&pioE>;
-                                       interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+                                       interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
                                        reg = <1>;
                                };
                        };
                                                atmel,pins =
                                                        <AT91_PIOE 8 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
                                        };
+                                       pinctrl_macb0_phy_irq: macb0_phy_irq_0 {
+                                               atmel,pins =
+                                                       <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+                                       };
                                };
                        };
                };
index 2d4a33100af6bdc4fcd4a3500467456673f54325..4e98cda974032221dbf5a0917de97807653cf943 100644 (file)
                        };
 
                        macb0: ethernet@f8020000 {
+                               pinctrl-0 = <&pinctrl_macb0_rmii &pinctrl_macb0_phy_irq>;
                                phy-mode = "rmii";
                                status = "okay";
+
+                               ethernet-phy@1 {
+                                       reg = <0x1>;
+                                       interrupt-parent = <&pioE>;
+                                       interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+                               };
                        };
 
                        mmc1: mmc@fc000000 {
 
                        pinctrl@fc06a000 {
                                board {
+                                       pinctrl_macb0_phy_irq: macb0_phy_irq {
+                                               atmel,pins =
+                                                       <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
+                                       };
                                        pinctrl_mmc0_cd: mmc0_cd {
                                                atmel,pins =
                                                        <AT91_PIOE 5 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
index ca4ddf86817ab64dc6a8a31193191460bfaf8d4b..626c67d666269d5e878c650b76e8babc424bd48a 100644 (file)
        };
 
        panel: panel {
-               compatible = "qd,qd43003c0-40", "simple-panel";
+               compatible = "qiaodian,qd43003c0-40", "simple-panel";
                backlight = <&backlight>;
                power-supply = <&panel_reg>;
                #address-cells = <1>;
diff --git a/arch/arm/boot/dts/bcm2835-rpi-a.dts b/arch/arm/boot/dts/bcm2835-rpi-a.dts
new file mode 100644 (file)
index 0000000..ddbbbbd
--- /dev/null
@@ -0,0 +1,24 @@
+/dts-v1/;
+#include "bcm2835.dtsi"
+#include "bcm2835-rpi.dtsi"
+
+/ {
+       compatible = "raspberrypi,model-a", "brcm,bcm2835";
+       model = "Raspberry Pi Model A";
+
+       leds {
+               act {
+                       gpios = <&gpio 16 1>;
+               };
+       };
+};
+
+&gpio {
+       pinctrl-0 = <&gpioout &alt0 &i2s_alt2 &alt3>;
+
+       /* I2S interface */
+       i2s_alt2: i2s_alt2 {
+               brcm,pins = <28 29 30 31>;
+               brcm,function = <BCM2835_FSEL_ALT2>;
+       };
+};
index 3afb9fefe2d1fb7cd109e54cbb47856c9d9b520a..76bdbcafab18e8ea7c4e06decd315540aac7ef33 100644 (file)
@@ -1,3 +1,5 @@
+#include <dt-bindings/power/raspberrypi-power.h>
+
 / {
        memory {
                reg = <0 0x10000000>;
                        compatible = "raspberrypi,bcm2835-firmware";
                        mboxes = <&mailbox>;
                };
+
+               power: power {
+                       compatible = "raspberrypi,bcm2835-power";
+                       firmware = <&firmware>;
+                       #power-domain-cells = <1>;
+               };
        };
 };
 
        status = "okay";
        bus-width = <4>;
 };
+
+&pwm {
+       status = "okay";
+};
+
+&usb {
+       power-domains = <&power RPI_POWER_DOMAIN_USB>;
+};
index 971e741e5467b24337f712f5b85e8a4816cdaa4e..05ff5ce91c957786794b68c7e0eae9b83c667545 100644 (file)
@@ -1,5 +1,6 @@
 #include <dt-bindings/pinctrl/bcm2835.h>
 #include <dt-bindings/clock/bcm2835.h>
+#include <dt-bindings/clock/bcm2835-aux.h>
 #include "skeleton.dtsi"
 
 /* This include file covers the common peripherals and configuration between
                        #interrupt-cells = <2>;
                };
 
-               uart0: uart@7e201000 {
+               uart0: serial@7e201000 {
                        compatible = "brcm,bcm2835-pl011", "arm,pl011", "arm,primecell";
                        reg = <0x7e201000 0x1000>;
                        interrupts = <2 25>;
                        clocks = <&clocks BCM2835_CLOCK_VPU>;
                };
 
+               spi1: spi@7e215080 {
+                       compatible = "brcm,bcm2835-aux-spi";
+                       reg = <0x7e215080 0x40>;
+                       interrupts = <1 29>;
+                       clocks = <&aux BCM2835_AUX_CLOCK_SPI1>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+               };
+
+               spi2: spi@7e2150c0 {
+                       compatible = "brcm,bcm2835-aux-spi";
+                       reg = <0x7e2150c0 0x40>;
+                       interrupts = <1 29>;
+                       clocks = <&aux BCM2835_AUX_CLOCK_SPI2>;
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       status = "disabled";
+               };
+
+               pwm: pwm@7e20c000 {
+                       compatible = "brcm,bcm2835-pwm";
+                       reg = <0x7e20c000 0x28>;
+                       clocks = <&clocks BCM2835_CLOCK_PWM>;
+                       assigned-clocks = <&clocks BCM2835_CLOCK_PWM>;
+                       assigned-clock-rates = <10000000>;
+                       #pwm-cells = <2>;
+                       status = "disabled";
+               };
+
                sdhci: sdhci@7e300000 {
                        compatible = "brcm,bcm2835-sdhci";
                        reg = <0x7e300000 0x100>;
                        status = "disabled";
                };
 
-               usb@7e980000 {
+               usb: usb@7e980000 {
                        compatible = "brcm,bcm2835-usb";
                        reg = <0x7e980000 0x10000>;
                        interrupts = <1 9>;
index 57795da616cb40850428861c7756164ace137937..bcce6f50c93d9d2579b7e9b524f65ba8a11af40c 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include "skeleton.dtsi"
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
@@ -53,8 +54,8 @@
 
        pmu {
                compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        clocks@e0110000 {
        timer@e0180000 {
                compatible = "renesas,em-sti";
                reg = <0xe0180000 0x54>;
-               interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&sti_sclk>;
                clock-names = "sclk";
        };
        uart0: serial@e1020000 {
                compatible = "renesas,em-uart";
                reg = <0xe1020000 0x38>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&usia_u0_sclk>;
                clock-names = "sclk";
        };
        uart1: serial@e1030000 {
                compatible = "renesas,em-uart";
                reg = <0xe1030000 0x38>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&usib_u1_sclk>;
                clock-names = "sclk";
        };
        uart2: serial@e1040000 {
                compatible = "renesas,em-uart";
                reg = <0xe1040000 0x38>;
-               interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&usib_u2_sclk>;
                clock-names = "sclk";
        };
        uart3: serial@e1050000 {
                compatible = "renesas,em-uart";
                reg = <0xe1050000 0x38>;
-               interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&usib_u3_sclk>;
                clock-names = "sclk";
        };
        gpio0: gpio@e0050000 {
                compatible = "renesas,em-gio";
                reg = <0xe0050000 0x2c>, <0xe0050040 0x20>;
-               interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 68 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
                #gpio-cells = <2>;
        gpio1: gpio@e0050080 {
                compatible = "renesas,em-gio";
                reg = <0xe0050080 0x2c>, <0xe00500c0 0x20>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 70 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 32>;
                #gpio-cells = <2>;
        gpio2: gpio@e0050100 {
                compatible = "renesas,em-gio";
                reg = <0xe0050100 0x2c>, <0xe0050140 0x20>;
-               interrupts = <0 71 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 72 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
                #gpio-cells = <2>;
        gpio3: gpio@e0050180 {
                compatible = "renesas,em-gio";
                reg = <0xe0050180 0x2c>, <0xe00501c0 0x20>;
-               interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 74 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
                #gpio-cells = <2>;
        gpio4: gpio@e0050200 {
                compatible = "renesas,em-gio";
                reg = <0xe0050200 0x2c>, <0xe0050240 0x20>;
-               interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 76 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 31>;
                #gpio-cells = <2>;
                #size-cells = <0>;
                compatible = "renesas,iic-emev2";
                reg = <0xe0070000 0x28>;
-               interrupts = <0 32 IRQ_TYPE_EDGE_RISING>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_EDGE_RISING>;
                clocks = <&iic0_sclk>;
                clock-names = "sclk";
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-emev2";
                reg = <0xe10a0000 0x28>;
-               interrupts = <0 33 IRQ_TYPE_EDGE_RISING>;
+               interrupts = <GIC_SPI 33 IRQ_TYPE_EDGE_RISING>;
                clocks = <&iic1_sclk>;
                clock-names = "sclk";
                status = "disabled";
index 443a350858460692d18bbe9f9470b0b3778d27a8..9e2840b59ae8520582717eb85369780090de7416 100644 (file)
@@ -43,7 +43,7 @@
                        linux,code = <KEY_POWER>;
                        label = "power key";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
@@ -67,7 +67,7 @@
                        interrupt-parent = <&gpx1>;
                        interrupts = <5 0>;
                        reg = <0x25>;
-                       wakeup;
+                       wakeup-source;
 
                        muic: max77836-muic {
                                compatible = "maxim,max77836-muic";
                interrupt-parent = <&gpx0>;
                interrupts = <7 0>;
                reg = <0x66>;
-               wakeup;
+               wakeup-source;
 
                s2mps14_osc: clocks {
                        compatible = "samsung,s2mps14-clk";
index 3e64d5dcdd60c6abfc9a0a9249f71bb54cd77eb9..1f102f3a1ab1117fd6dae8974878ca4188d03de7 100644 (file)
@@ -43,7 +43,7 @@
                        linux,code = <KEY_POWER>;
                        label = "power key";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
@@ -58,7 +58,7 @@
                        interrupt-parent = <&gpx1>;
                        interrupts = <5 0>;
                        reg = <0x25>;
-                       wakeup;
+                       wakeup-source;
 
                        muic: max77836-muic {
                                compatible = "maxim,max77836-muic";
                interrupt-parent = <&gpx0>;
                interrupts = <7 0>;
                reg = <0x66>;
-               wakeup;
+               wakeup-source;
 
                s2mps14_osc: clocks {
                        compatible = "samsung,s2mps14-clk";
index 045785c44c048b3b15c923b07f059744c717cfa4..ca621a92319e6e2cfb62a607b6a8b1b0998c6e67 100644 (file)
                reg = <0x10000000 0x100>;
        };
 
+       sromc@12570000 {
+               compatible = "samsung,exynos-srom";
+               reg = <0x12570000 0x14>;
+       };
+
        mipi_phy: video-phy@10020710 {
                compatible = "samsung,s5pv210-mipi-video-phy";
                #phy-cells = <1>;
index 5821ad87e32c73298469493de5b6d723eb74ab16..ad7394c1d67a92acc2d64315cde287fed691884c 100644 (file)
                        label = "Up";
                        gpios = <&gpx2 0 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_UP>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                down {
                        label = "Down";
                        gpios = <&gpx2 1 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_DOWN>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                back {
                        label = "Back";
                        gpios = <&gpx1 7 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_BACK>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                home {
                        label = "Home";
                        gpios = <&gpx1 6 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_HOME>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                menu {
                        label = "Menu";
                        gpios = <&gpx1 5 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_MENU>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 104cbb33d2bb0f088b41c943a63bc8d10a523bea..94ca7d36ab3788bd2e80b3294aeb896e20eb8d4a 100644 (file)
@@ -66,7 +66,7 @@
        samsung,keypad-num-rows = <2>;
        samsung,keypad-num-columns = <8>;
        linux,keypad-no-autorepeat;
-       linux,keypad-wakeup;
+       wakeup-source;
        pinctrl-names = "default";
        pinctrl-0 = <&keypad_rows &keypad_cols>;
        status = "okay";
index a50be640f1b03dd0422e6ad8556dcf60b6d5b707..1df2f0bc1d76d81022f1300b6502901df28f8c1f 100644 (file)
                        linux,code = <116>;
                        label = "power";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                ok-key {
index 4f5d37920c8db0375fd3a8c09c8c093987037484..9a75e3effbc98acdbea66e5eedbe3ba04cd1b56b 100644 (file)
@@ -92,7 +92,7 @@
                        linux,code = <171>;
                        label = "config";
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                camera-key {
                        linux,code = <116>;
                        label = "power";
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                ok-key {
index 395c3ca9601eac317e8325bdf857fb6b294aeb75..5e5d3fecb04cd660d1335e9a26691787d754f51e 100644 (file)
@@ -35,7 +35,7 @@
                        linux,code = <KEY_POWER>;
                        label = "power key";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index b44bb682e976705aa23824f421c7cdf419d92151..bf7b21b817e48567d164a91268f5b448d15f1a4f 100644 (file)
@@ -48,7 +48,7 @@
                        linux,code = <KEY_HOME>;
                        label = "home key";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 9e2e24c6177a6bde547b28460958a254ab8c048a..8bca699b7f208366e5c25f911e9ded05bf34ac74 100644 (file)
        samsung,keypad-num-rows = <3>;
        samsung,keypad-num-columns = <2>;
        linux,keypad-no-autorepeat;
-       linux,keypad-wakeup;
+       wakeup-source;
        pinctrl-0 = <&keypad_rows &keypad_cols>;
        pinctrl-names = "default";
        status = "okay";
index a130ab39fa7759763cdfa1d277820cdee5801c4e..a51069f3c03b7abb24a481fc714c44c3f8b3d30f 100644 (file)
@@ -45,7 +45,7 @@
        samsung,keypad-num-rows = <3>;
        samsung,keypad-num-columns = <8>;
        linux,keypad-no-autorepeat;
-       linux,keypad-wakeup;
+       wakeup-source;
        pinctrl-0 = <&keypad_rows &keypad_cols>;
        pinctrl-names = "default";
        status = "okay";
index a6f78c3da935023e27db331c02f1fca1a8826ee8..ed017cc7b14fcf083bd0b89094a6c250fb809e47 100644 (file)
                        linux,code = <116>;
                        label = "power";
                        debounce-interval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                key-ok {
                        linux,code = <139>;
                        label = "ok";
                        debounce-inteval = <10>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index e2439e87ee4ab4b807b2692829f58a4823a73e53..b61d1f6375104ba4ca7ac9e4ad1061aa040fdf03 100644 (file)
                reg = <0x10000000 0x100>;
        };
 
+       sromc@12250000 {
+               compatible = "samsung,exynos-srom";
+               reg = <0x12250000 0x14>;
+       };
+
        combiner: interrupt-controller@10440000 {
                compatible = "samsung,exynos4210-combiner";
                #interrupt-cells = <2>;
index c000532c14446db9cbcb6a942cd7c117a9d5e0aa..8b2acc74aa76fac7bcf73d28ad653e7897f8f7ee 100644 (file)
                        label = "SW-TACT2";
                        gpios = <&gpx1 4 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_MENU>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                home {
                        label = "SW-TACT3";
                        gpios = <&gpx1 5 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_HOME>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                up {
                        label = "SW-TACT4";
                        gpios = <&gpx1 6 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_UP>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                down {
                        label = "SW-TACT5";
                        gpios = <&gpx1 7 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_DOWN>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                back {
                        label = "SW-TACT6";
                        gpios = <&gpx2 0 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_BACK>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                wakeup {
                        label = "SW-TACT7";
                        gpios = <&gpx2 1 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_WAKEUP>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 5cb33ba5e296258e0b5104f5f2c0284815d8b75b..95210ef6a6b51ef129cb75a0ef53fb8d5a9f46b8 100644 (file)
@@ -37,7 +37,7 @@
                        label = "Power";
                        gpios = <&gpx1 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                lid-switch {
@@ -46,7 +46,7 @@
                        linux,input-type = <5>; /* EV_SW */
                        linux,code = <0>; /* SW_LID */
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index c1edd6d038a905dffd68e895cdafae25c967f160..0f500cb1eb2ddb0e1f81e2ed58323220d313c4aa 100644 (file)
@@ -37,7 +37,7 @@
                        label = "Power";
                        gpios = <&gpx1 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                lid-switch {
@@ -46,7 +46,7 @@
                        linux,input-type = <5>; /* EV_SW */
                        linux,code = <0>; /* SW_LID */
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 33e2d5f7315b5265fdfc5b74b4a3e38788737f00..234403422c6f4fceb1a8492a470a2ea28fe10a73 100644 (file)
 
        sss@10830000 {
                compatible = "samsung,exynos4210-secss";
-               reg = <0x10830000 0x10000>;
+               reg = <0x10830000 0x300>;
                interrupts = <0 112 0>;
                clocks = <&clock CLK_SSS>;
                clock-names = "secss";
diff --git a/arch/arm/boot/dts/exynos5410-pinctrl.dtsi b/arch/arm/boot/dts/exynos5410-pinctrl.dtsi
new file mode 100644 (file)
index 0000000..f9aa6bb
--- /dev/null
@@ -0,0 +1,406 @@
+/*
+ * Exynos5410 SoC pin-mux and pin-config device tree source
+ *
+ * Copyright (c) 2013 Hardkernel Co., Ltd.
+ *              http://www.hardkernel.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+&pinctrl_0 {
+       gpa0: gpa0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpa1: gpa1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpa2: gpa2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpb0: gpb0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpb1: gpb1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpb2: gpb2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpb3: gpb3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpc0: gpc0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpc3: gpc3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpc1: gpc1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpc2: gpc2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpm5: gpm5 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpd1: gpd1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpe0: gpe0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpe1: gpe1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpf0: gpf0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpf1: gpf1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpg0: gpg0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpg1: gpg1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpg2: gpg2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gph0: gph0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gph1: gph1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpm7: gpm7 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy0: gpy0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy1: gpy1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy2: gpy2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy3: gpy3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy4: gpy4 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy5: gpy5 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy6: gpy6 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpy7: gpy7 {
+               gpio-controller;
+               #gpio-cells = <2>;
+       };
+
+       gpx0: gpx0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               interrupt-parent = <&combiner>;
+               #interrupt-cells = <2>;
+               interrupts = <23 0>,
+                            <24 0>,
+                            <25 0>,
+                            <25 1>,
+                            <26 0>,
+                            <26 1>,
+                            <27 0>,
+                            <27 1>;
+       };
+
+       gpx1: gpx1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               interrupt-parent = <&combiner>;
+               #interrupt-cells = <2>;
+               interrupts = <28 0>,
+                            <28 1>,
+                            <29 0>,
+                            <29 1>,
+                            <30 0>,
+                            <30 1>,
+                            <31 0>,
+                            <31 1>;
+       };
+
+       gpx2: gpx2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpx3: gpx3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+};
+
+&pinctrl_1 {
+       gpj0: gpj0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpj1: gpj1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpj2: gpj2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpj3: gpj3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpj4: gpj4 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpk0: gpk0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpk1: gpk1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpk2: gpk2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpk3: gpk3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+};
+
+&pinctrl_2 {
+       gpv0: gpv0 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpv1: gpv1 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpv2: gpv2 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpv3: gpv3 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+
+       gpv4: gpv4 {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+};
+
+&pinctrl_3 {
+       gpz: gpz {
+               gpio-controller;
+               #gpio-cells = <2>;
+
+               interrupt-controller;
+               #interrupt-cells = <2>;
+       };
+};
index cebeaab3abecd4177566e45d614ef6da6d8405ab..a731fbe28ebc01bc816f589e46dbceb54b7bad36 100644 (file)
@@ -11,6 +11,7 @@
 
 /dts-v1/;
 #include "exynos5410.dtsi"
+#include <dt-bindings/interrupt-controller/irq.h>
 / {
        model = "Samsung SMDK5410 board based on EXYNOS5410";
        compatible = "samsung,smdk5410", "samsung,exynos5410", "samsung,exynos5";
        disable-wp;
 };
 
+&pinctrl_0 {
+       srom_ctl: srom-ctl {
+               samsung,pins = "gpy0-3", "gpy0-4", "gpy0-5",
+                              "gpy1-0", "gpy1-1", "gpy1-2", "gpy1-3";
+               samsung,pin-function = <2>;
+               samsung,pin-drv = <0>;
+       };
+
+       srom_ebi: srom-ebi {
+               samsung,pins = "gpy3-0", "gpy3-1", "gpy3-2", "gpy3-3",
+                              "gpy3-4", "gpy3-5", "gpy3-6", "gpy3-7",
+                              "gpy5-0", "gpy5-1", "gpy5-2", "gpy5-3",
+                              "gpy5-4", "gpy5-5", "gpy5-6", "gpy5-7",
+                              "gpy6-0", "gpy6-1", "gpy6-2", "gpy6-3",
+                              "gpy6-4", "gpy6-5", "gpy6-6", "gpy6-7";
+               samsung,pin-function = <2>;
+               samsung,pin-pud = <3>;
+               samsung,pin-drv = <0>;
+       };
+};
+
+&sromc {
+       pinctrl-names = "default";
+       pinctrl-0 = <&srom_ctl>, <&srom_ebi>;
+
+       ethernet@3,0 {
+               compatible = "smsc,lan9115";
+               reg = <3 0 0x10000>;
+               phy-mode = "mii";
+               interrupt-parent = <&gpx0>;
+               interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+               reg-io-width = <2>;
+               smsc,irq-push-pull;
+               smsc,force-internal-phy;
+
+               samsung,srom-page-mode = <1>;
+               samsung,srom-timing = <9 12 1 9 1 1>;
+       };
+};
+
 &uart0 {
        status = "okay";
 };
index fad0779b1b6e86d887a926a4afdee929ca33a1d7..f3490f5673449440b353ef9644d5fc944c545c4c 100644 (file)
        interrupt-parent = <&gic>;
 
        aliases {
+               pinctrl0 = &pinctrl_0;
+               pinctrl1 = &pinctrl_1;
+               pinctrl2 = &pinctrl_2;
+               pinctrl3 = &pinctrl_3;
                serial0 = &uart0;
                serial1 = &uart1;
                serial2 = &uart2;
                        reg = <0x10000000 0x100>;
                };
 
+               sromc: sromc@12250000 {
+                       compatible = "samsung,exynos-srom";
+                       reg = <0x12250000 0x14>;
+                       #address-cells = <2>;
+                       #size-cells = <1>;
+                       ranges = <0 0 0x04000000 0x20000
+                                 1 0 0x05000000 0x20000
+                                 2 0 0x06000000 0x20000
+                                 3 0 0x07000000 0x20000>;
+               };
+
                pmu_system_controller: system-controller@10040000 {
                        compatible = "samsung,exynos5410-pmu", "syscon";
                        reg = <0x10040000 0x5000>;
                        status = "disabled";
                };
 
+               pinctrl_0: pinctrl@13400000 {
+                       compatible = "samsung,exynos5410-pinctrl";
+                       reg = <0x13400000 0x1000>;
+                       interrupts = <0 45 0>;
+
+                       wakeup-interrupt-controller {
+                               compatible = "samsung,exynos4210-wakeup-eint";
+                               interrupt-parent = <&gic>;
+                               interrupts = <0 32 0>;
+                       };
+               };
+
+               pinctrl_1: pinctrl@14000000 {
+                       compatible = "samsung,exynos5410-pinctrl";
+                       reg = <0x14000000 0x1000>;
+                       interrupts = <0 46 0>;
+               };
+
+               pinctrl_2: pinctrl@10d10000 {
+                       compatible = "samsung,exynos5410-pinctrl";
+                       reg = <0x10d10000 0x1000>;
+                       interrupts = <0 50 0>;
+               };
+
+               pinctrl_3: pinctrl@03860000 {
+                       compatible = "samsung,exynos5410-pinctrl";
+                       reg = <0x03860000 0x1000>;
+                       interrupts = <0 47 0>;
+               };
+
                uart0: serial@12C00000 {
                        compatible = "samsung,exynos4210-uart";
                        reg = <0x12C00000 0x100>;
                };
        };
 };
+
+#include "exynos5410-pinctrl.dtsi"
index 4ecef6981d5c4c7dd1332324e405fcd0d34ce4f0..a103ce8c398586f857a1c90c9696d40b357a5056 100644 (file)
@@ -11,6 +11,7 @@
 
 /dts-v1/;
 #include "exynos5420.dtsi"
+#include "exynos5420-cpus.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 #include <dt-bindings/input/input.h>
                        label = "SW-TACT1";
                        gpios = <&gpx2 7 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_WAKEUP>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
 
+&cpu0 {
+       cpu-supply = <&buck2_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck6_reg>;
+};
+
 &usbdrd_dwc3_1 {
        dr_mode = "host";
 };
diff --git a/arch/arm/boot/dts/exynos5420-cpus.dtsi b/arch/arm/boot/dts/exynos5420-cpus.dtsi
new file mode 100644 (file)
index 0000000..261d251
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * SAMSUNG EXYNOS5420 SoC cpu device tree source
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * This file provides desired ordering for Exynos5420 and Exynos5800
+ * boards: CPU[0123] being the A15.
+ *
+ * The Exynos5420, 5422 and 5800 actually share the same CPU configuration
+ * but particular boards choose different booting order.
+ *
+ * Exynos5420 and Exynos5800 always boot from Cortex-A15. On Exynos5422
+ * booting cluster (big or LITTLE) is chosen by IROM code by reading
+ * the gpg2-1 GPIO. By default all Exynos5422 based boards choose booting
+ * from the LITTLE: Cortex-A7.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x0>;
+                       clocks = <&clock CLK_ARM_CLK>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu1: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x1>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu2: cpu@2 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x2>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu3: cpu@3 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x3>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu4: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x100>;
+                       clocks = <&clock CLK_KFC_CLK>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
+
+               cpu5: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x101>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
+
+               cpu6: cpu@102 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x102>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
+
+               cpu7: cpu@103 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x103>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
+       };
+};
index 35cfb07dc4bb76073d0458983ad0fe95a8c430a9..3981ddb25036af95090f28d0afac7eaf59271451 100644 (file)
@@ -15,6 +15,7 @@
 #include <dt-bindings/clock/maxim,max77802.h>
 #include <dt-bindings/regulator/maxim,max77802.h>
 #include "exynos5420.dtsi"
+#include "exynos5420-cpus.dtsi"
 
 / {
        model = "Google Peach Pit Rev 6+";
@@ -64,7 +65,7 @@
                        label = "Power";
                        gpios = <&gpx1 2 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                lid-switch {
@@ -73,7 +74,7 @@
                        linux,input-type = <5>; /* EV_SW */
                        linux,code = <0>; /* SW_LID */
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
        vdd-supply = <&ldo9_reg>;
 };
 
+&cpu0 {
+       cpu-supply = <&buck2_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck6_reg>;
+};
+
 &dp {
        status = "okay";
        pinctrl-names = "default";
index ac35aefd320ff0acc0998f11b96c3375dc2454c5..0785fedf441e8001aa33ea198060477c5c644836 100644 (file)
@@ -11,6 +11,7 @@
 
 /dts-v1/;
 #include "exynos5420.dtsi"
+#include "exynos5420-cpus.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 
 / {
 
 };
 
+&cpu0 {
+       cpu-supply = <&buck2_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck6_reg>;
+};
+
 &dp {
        pinctrl-names = "default";
        pinctrl-0 = <&dp_hpd>;
index 48a0a55314f5d184b03d36f1fb52374ef8cf794a..bb559d0cd956223a7a12bd3152d9ee31a0e46512 100644 (file)
                usbdrdphy1 = &usbdrd_phy1;
        };
 
-       cpus {
-               #address-cells = <1>;
-               #size-cells = <0>;
-
-               cpu0: cpu@0 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a15";
-                       reg = <0x0>;
-                       clock-frequency = <1800000000>;
-                       cci-control-port = <&cci_control1>;
+       cluster_a15_opp_table: opp_table0 {
+               compatible = "operating-points-v2";
+               opp-shared;
+               opp@1800000000 {
+                       opp-hz = /bits/ 64 <1800000000>;
+                       opp-microvolt = <1250000>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu1: cpu@1 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a15";
-                       reg = <0x1>;
-                       clock-frequency = <1800000000>;
-                       cci-control-port = <&cci_control1>;
+               opp@1700000000 {
+                       opp-hz = /bits/ 64 <1700000000>;
+                       opp-microvolt = <1212500>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu2: cpu@2 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a15";
-                       reg = <0x2>;
-                       clock-frequency = <1800000000>;
-                       cci-control-port = <&cci_control1>;
+               opp@1600000000 {
+                       opp-hz = /bits/ 64 <1600000000>;
+                       opp-microvolt = <1175000>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu3: cpu@3 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a15";
-                       reg = <0x3>;
-                       clock-frequency = <1800000000>;
-                       cci-control-port = <&cci_control1>;
+               opp@1500000000 {
+                       opp-hz = /bits/ 64 <1500000000>;
+                       opp-microvolt = <1137500>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu4: cpu@100 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a7";
-                       reg = <0x100>;
-                       clock-frequency = <1000000000>;
-                       cci-control-port = <&cci_control0>;
+               opp@1400000000 {
+                       opp-hz = /bits/ 64 <1400000000>;
+                       opp-microvolt = <1112500>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu5: cpu@101 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a7";
-                       reg = <0x101>;
-                       clock-frequency = <1000000000>;
-                       cci-control-port = <&cci_control0>;
+               opp@1300000000 {
+                       opp-hz = /bits/ 64 <1300000000>;
+                       opp-microvolt = <1062500>;
+                       clock-latency-ns = <140000>;
                };
-
-               cpu6: cpu@102 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a7";
-                       reg = <0x102>;
-                       clock-frequency = <1000000000>;
-                       cci-control-port = <&cci_control0>;
+               opp@1200000000 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <1037500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1100000000 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <1012500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1000000000 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = < 987500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@900000000 {
+                       opp-hz = /bits/ 64 <900000000>;
+                       opp-microvolt = < 962500>;
+                       clock-latency-ns = <140000>;
                };
+               opp@800000000 {
+                       opp-hz = /bits/ 64 <800000000>;
+                       opp-microvolt = < 937500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@700000000 {
+                       opp-hz = /bits/ 64 <700000000>;
+                       opp-microvolt = < 912500>;
+                       clock-latency-ns = <140000>;
+               };
+       };
 
-               cpu7: cpu@103 {
-                       device_type = "cpu";
-                       compatible = "arm,cortex-a7";
-                       reg = <0x103>;
-                       clock-frequency = <1000000000>;
-                       cci-control-port = <&cci_control0>;
+       cluster_a7_opp_table: opp_table1 {
+               compatible = "operating-points-v2";
+               opp-shared;
+               opp@1300000000 {
+                       opp-hz = /bits/ 64 <1300000000>;
+                       opp-microvolt = <1275000>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1200000000 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <1212500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1100000000 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <1162500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@1000000000 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <1112500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@900000000 {
+                       opp-hz = /bits/ 64 <900000000>;
+                       opp-microvolt = <1062500>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@800000000 {
+                       opp-hz = /bits/ 64 <800000000>;
+                       opp-microvolt = <1025000>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@700000000 {
+                       opp-hz = /bits/ 64 <700000000>;
+                       opp-microvolt = <975000>;
+                       clock-latency-ns = <140000>;
+               };
+               opp@600000000 {
+                       opp-hz = /bits/ 64 <600000000>;
+                       opp-microvolt = <937500>;
+                       clock-latency-ns = <140000>;
                };
        };
 
+       /*
+        * The 'cpus' node is not present here but instead it is provided
+        * by exynos5420-cpus.dtsi or exynos5422-cpus.dtsi.
+        */
+
        cci: cci@10d20000 {
                compatible = "arm,cci-400";
                #address-cells = <1>;
                compatible = "samsung,exynos4210-pd";
                reg = <0x10044000 0x20>;
                #power-domain-cells = <0>;
-               clocks = <&clock CLK_GSCL0>, <&clock CLK_GSCL1>;
-               clock-names = "asb0", "asb1";
+               clocks = <&clock CLK_FIN_PLL>,
+                        <&clock CLK_MOUT_USER_ACLK300_GSCL>,
+                        <&clock CLK_GSCL0>, <&clock CLK_GSCL1>;
+               clock-names = "oscclk", "clk0", "asb0", "asb1";
        };
 
        isp_pd: power-domain@10044020 {
 
        sss: sss@10830000 {
                compatible = "samsung,exynos4210-secss";
-               reg = <0x10830000 0x10000>;
+               reg = <0x10830000 0x300>;
                interrupts = <0 112 0>;
                clocks = <&clock CLK_SSS>;
                clock-names = "secss";
index b7f60c855459126bf63bb3ffe120ab6f87f37dfe..9b46b9fbac4e774b1542cde204f949bcc791b730 100644 (file)
@@ -4,78 +4,98 @@
  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com
  *
- * The only difference between EXYNOS5422 and EXYNOS5800 is cpu ordering. The
- * EXYNOS5422 is booting from Cortex-A7 core while the EXYNOS5800 is booting
- * from Cortex-A15 core.
+ * This file provides desired ordering for Exynos5422: CPU[0123] being the A7.
  *
- * EXYNOS5422 based board files can include this file to provide cpu ordering
- * which could boot a cortex-a7 from cpu0.
+ * The Exynos5420, 5422 and 5800 actually share the same CPU configuration
+ * but particular boards choose different booting order.
+ *
+ * Exynos5420 and Exynos5800 always boot from Cortex-A15. On Exynos5422
+ * booting cluster (big or LITTLE) is chosen by IROM code by reading
+ * the gpg2-1 GPIO. By default all Exynos5422 based boards choose booting
+ * from the LITTLE: Cortex-A7.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
 
-&cpu0 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a7";
-       reg = <0x100>;
-       clock-frequency = <1000000000>;
-       cci-control-port = <&cci_control0>;
-};
+/ {
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
 
-&cpu1 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a7";
-       reg = <0x101>;
-       clock-frequency = <1000000000>;
-       cci-control-port = <&cci_control0>;
-};
+               cpu0: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x100>;
+                       clocks = <&clock CLK_KFC_CLK>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
 
-&cpu2 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a7";
-       reg = <0x102>;
-       clock-frequency = <1000000000>;
-       cci-control-port = <&cci_control0>;
-};
+               cpu1: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x101>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
 
-&cpu3 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a7";
-       reg = <0x103>;
-       clock-frequency = <1000000000>;
-       cci-control-port = <&cci_control0>;
-};
+               cpu2: cpu@102 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x102>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
 
-&cpu4 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a15";
-       reg = <0x0>;
-       clock-frequency = <1800000000>;
-       cci-control-port = <&cci_control1>;
-};
+               cpu3: cpu@103 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a7";
+                       reg = <0x103>;
+                       clock-frequency = <1000000000>;
+                       cci-control-port = <&cci_control0>;
+                       operating-points-v2 = <&cluster_a7_opp_table>;
+               };
 
-&cpu5 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a15";
-       reg = <0x1>;
-       clock-frequency = <1800000000>;
-       cci-control-port = <&cci_control1>;
-};
+               cpu4: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       clocks = <&clock CLK_ARM_CLK>;
+                       reg = <0x0>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
 
-&cpu6 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a15";
-       reg = <0x2>;
-       clock-frequency = <1800000000>;
-       cci-control-port = <&cci_control1>;
-};
+               cpu5: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x1>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+
+               cpu6: cpu@2 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x2>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
 
-&cpu7 {
-       device_type = "cpu";
-       compatible = "arm,cortex-a15";
-       reg = <0x3>;
-       clock-frequency = <1800000000>;
-       cci-control-port = <&cci_control1>;
+               cpu7: cpu@3 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x3>;
+                       clock-frequency = <1800000000>;
+                       cci-control-port = <&cci_control1>;
+                       operating-points-v2 = <&cluster_a15_opp_table>;
+               };
+       };
 };
index 9134217446b8e2c321ecea78571b0cc3cdaed683..1bd507bfa750155579259b14195e08dab8adbb00 100644 (file)
                        <19200000>;
 };
 
+&cpu0 {
+       cpu-supply = <&buck6_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck2_reg>;
+};
+
 &hdmi {
        status = "okay";
        hpd-gpio = <&gpx3 7 GPIO_ACTIVE_HIGH>;
index 064176f201e74da75c5f2b581602e12a7ab73b6b..6e9edc1610c4990777a516fbde1d8ef9a0a4cdab 100644 (file)
@@ -15,6 +15,7 @@
 #include <dt-bindings/clock/maxim,max77802.h>
 #include <dt-bindings/regulator/maxim,max77802.h>
 #include "exynos5800.dtsi"
+#include "exynos5420-cpus.dtsi"
 
 / {
        model = "Google Peach Pi Rev 10+";
@@ -63,7 +64,7 @@
                        label = "Power";
                        gpios = <&gpx1 2 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                lid-switch {
@@ -72,7 +73,7 @@
                        linux,input-type = <5>; /* EV_SW */
                        linux,code = <0>; /* SW_LID */
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
        };
        vdd-supply = <&ldo9_reg>;
 };
 
+&cpu0 {
+       cpu-supply = <&buck2_reg>;
+};
+
+&cpu4 {
+       cpu-supply = <&buck6_reg>;
+};
+
 &dp {
        status = "okay";
        pinctrl-names = "default";
index c0bb3563cac14cc5dd5e8c8666c3902a3068cb98..8213016803e5d4b1465e7c6742926c6734de10b8 100644 (file)
        compatible = "samsung,exynos5800-clock";
 };
 
+&cluster_a15_opp_table {
+       opp@1700000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1600000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1500000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@1400000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@1300000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@1200000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@1100000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@1000000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@900000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@800000000 {
+               opp-microvolt = <900000>;
+       };
+       opp@700000000 {
+               opp-microvolt = <900000>;
+       };
+       opp@600000000 {
+               opp-hz = /bits/ 64 <600000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@500000000 {
+               opp-hz = /bits/ 64 <500000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@400000000 {
+               opp-hz = /bits/ 64 <400000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@300000000 {
+               opp-hz = /bits/ 64 <300000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@200000000 {
+               opp-hz = /bits/ 64 <200000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+};
+
+&cluster_a7_opp_table {
+       opp@1300000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1200000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1100000000 {
+               opp-microvolt = <1250000>;
+       };
+       opp@1000000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@900000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@800000000 {
+               opp-microvolt = <1100000>;
+       };
+       opp@700000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@600000000 {
+               opp-microvolt = <1000000>;
+       };
+       opp@500000000 {
+               opp-hz = /bits/ 64 <500000000>;
+               opp-microvolt = <1000000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@400000000 {
+               opp-hz = /bits/ 64 <400000000>;
+               opp-microvolt = <1000000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@300000000 {
+               opp-hz = /bits/ 64 <300000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+       opp@200000000 {
+               opp-hz = /bits/ 64 <200000000>;
+               opp-microvolt = <900000>;
+               clock-latency-ns = <140000>;
+       };
+};
+
 &mfc {
        compatible = "samsung,mfc-v8";
 };
index ed1d0b4578ef99402f22941b818451ac366518cf..cda6907a27b9bf18da633088849398634ef07218 100644 (file)
@@ -30,7 +30,7 @@
                        label = "BP1";
                        gpios = <&gpio3 18 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 7ac4f1af16ac856d243674c630b9207da71d8335..1eaa131e2d1809fb38525dc4d1105bc10c9cfef8 100644 (file)
                        label = "User button";
                        gpios = <&gpio0 17 GPIO_ACTIVE_LOW>;
                        linux,code = <0x100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
index 927b391d2058d588eeddc93f7c5acd4e29cafe5f..88594747f454fede06968b152c6a7da76bb3e6b7 100644 (file)
@@ -36,7 +36,7 @@
                        label = "SW3";
                        gpios = <&gpio1 21 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
@@ -49,7 +49,7 @@
                        label = "SW4";
                        gpios = <&gpio1 20 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 4ea89344a5fff51115160fad9e34b16a4a4377eb..fd20e99c777e9764c9c1e298029ea6db0b652ad6 100644 (file)
                        compatible = "fixed-clock";
                        reg = <0>;
                        #clock-cells = <0>;
-                       clock-frequency = <27000000>;
+                       clock-frequency = <26000000>;
                };
        };
 
                        0x02020049 /* row 2, col 2, KEY_KP9 */
                >;
                gpio-activelow;
-               linux,wakeup;
+               wakeup-source;
                debounce-delay-ms = <100>;
                col-scan-delay-us = <5000>;
                linux,no-autorepeat;
index 75b036700d314cefa0040e88b92bc54e2f595a22..4727bbb804e1266a423203118dd22770a853d97f 100644 (file)
@@ -30,7 +30,7 @@
                        label = "BP1";
                        gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,input-type = <1>;
                };
        };
index 649befeb2cf96ef4b968f809a98d5d718227b002..018d24eb9965f8060050555bab1e1b8663bf1f37 100644 (file)
                        label = "Power Button";
                        gpios = <&gpio2 21 GPIO_ACTIVE_HIGH>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 321662f53e3309b835cfdc607681d8b0cc76aeb7..16fc69c69ab2e60a4cbd847aa98825fe8577e318 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc2>;
        cap-sdio-irq;
-       enable-sdio-wakeup;
+       wakeup-source;
        keep-power-in-suspend;
        max-frequency = <50000000>;
        no-1-8-v;
index 34599c547459a2d00756cb42bc05cca3f2fa2798..d270df3e58917aab23f21eb23ef04e667158bf4c 100644 (file)
@@ -41,7 +41,7 @@
                        label = "BP1";
                        gpios = <&gpio3 31 GPIO_ACTIVE_LOW>;
                        linux,code = <256>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,input-type = <1>;
                };
        };
index 3bc18835fb4bbbe0e388922689a70395bb532cd6..4486bc47d14009d96e039d33664a72204f8376c1 100644 (file)
                        label = "Home";
                        gpios = <&gpio5 10 0>;
                        linux,code = <102>; /* KEY_HOME */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                back {
                        label = "Back";
                        gpios = <&gpio5 11 0>;
                        linux,code = <158>; /* KEY_BACK */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                program {
                        label = "Program";
                        gpios = <&gpio5 12 0>;
                        linux,code = <362>; /* KEY_PROGRAM */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                volume-up {
index 53fd75c8ffcfd34c19f43d407bb2f8e6a4573495..368ccd0a053ee6dee4eef00f106e6cf7ede5630c 100644 (file)
                        label = "Volume Up";
                        gpios = <&gpio2 14 0>;
                        linux,code = <115>; /* KEY_VOLUMEUP */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                volume-down {
                        label = "Volume Down";
                        gpios = <&gpio2 15 0>;
                        linux,code = <114>; /* KEY_VOLUMEDOWN */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 13e842b0c7857b1050c73319842113d6bf08dbb3..0ecb43d88522318701304fad35e3c214cfcb276e 100644 (file)
                interrupts = <26 0>;
                gpios = <&gpio3 26 GPIO_ACTIVE_LOW>;
                ti,x-plate-ohms = <660>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
index 64804719f0f442de2225613f54518f68293fc190..3cf682a681f40a986e8eb3142f3621ec5d6736b0 100644 (file)
                interrupt-parent = <&gpio3>;
                interrupts = <23 0>;
                wakeup-gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
                interrupt-parent = <&gpio3>;
                interrupts = <22 0>;
                wakeup-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
        status = "okay";
 
        lvds0: lvds-channel@0 {
-               fsl,data-mapping = "jeida";
-               fsl,data-width = <24>;
+               fsl,data-mapping = "spwg";
+               fsl,data-width = <18>;
                status = "okay";
 
                display-timings {
-                       native-mode = <&lvds_timing0>;
-                       lvds_timing0: hsd100pxn1 {
+                       native-mode = <&lvds0_timing0>;
+
+                       lvds0_timing0: hsd100pxn1 {
                                clock-frequency = <65000000>;
                                hactive = <1024>;
                                vactive = <768>;
                                hsync-active = <0>;
                                vsync-active = <0>;
                                de-active = <1>;
-                               pixelclk-active = <0>;
+                               pixelclk-active = <1>;
+                       };
+
+                       lvds0_timing1: nl12880bc20 {
+                               clock-frequency = <71000000>;
+                               hactive = <1280>;
+                               vactive = <800>;
+                               hback-porch = <50>;
+                               hsync-len = <60>;
+                               hfront-porch = <50>;
+                               vback-porch = <5>;
+                               vsync-len = <13>;
+                               vfront-porch = <5>;
+                               hsync-active = <0>;
+                               vsync-active = <0>;
+                               de-active = <1>;
+                               pixelclk-active = <1>;
                        };
                };
        };
 
        lvds1: lvds-channel@1 {
-               fsl,data-mapping = "jeida";
-               fsl,data-width = <24>;
+               fsl,data-mapping = "spwg";
+               fsl,data-width = <18>;
                status = "okay";
 
                display-timings {
-                       native-mode = <&lvds_timing1>;
-                       lvds_timing1: hsd100pxn1 {
+                       native-mode = <&lvds1_timing0>;
+
+                       lvds1_timing0: hsd100pxn1 {
                                clock-frequency = <65000000>;
                                hactive = <1024>;
                                vactive = <768>;
                                hsync-active = <0>;
                                vsync-active = <0>;
                                de-active = <1>;
-                               pixelclk-active = <0>;
+                               pixelclk-active = <1>;
                        };
                };
        };
index d3e50b22064f28777bbd2a3b60f512d844ff9418..bd3dfefa5778acbcc1124279f02517fc8aecaa3e 100644 (file)
@@ -37,7 +37,7 @@
                        compatible = "fixed-clock";
                        reg = <0>;
                        #clock-cells = <0>;
-                       clock-frequency = <27000000>;
+                       clock-frequency = <26000000>;
                };
        };
 
@@ -50,7 +50,7 @@
                        label = "Power Button";
                        gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
                        linux,code = <116>; /* KEY_POWER */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index c275eecc9472006de381c2a450025badea821f2c..d35a5cdc3229638445b4aacf4414da1bf90b8210 100644 (file)
@@ -77,7 +77,7 @@
                interrupt-parent = <&gpio3>;
                interrupts = <22 0>;
                wakeup-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
index 00bd63e63d0cdd47f4ffa012f572f10323a2d14f..b715deb4ea4666e755ddece8ce198f38187fec15 100644 (file)
@@ -44,7 +44,7 @@
                        label = "recovery";
                        gpios = <&gpio3 16 1>;
                        linux,code = <0x198>; /* KEY_RESTART */
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
diff --git a/arch/arm/boot/dts/imx6q-icore-rqs.dts b/arch/arm/boot/dts/imx6q-icore-rqs.dts
new file mode 100644 (file)
index 0000000..0053188
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2015 Amarula Solutions B.V.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "imx6q.dtsi"
+#include "imx6qdl-icore-rqs.dtsi"
+
+/ {
+       model = "Engicam i.CoreM6 Quad SOM";
+       compatible = "engicam,imx6-icore-rqs", "fsl,imx6q";
+
+       sound {
+               compatible = "fsl,imx-audio-sgtl5000";
+               model = "imx-audio-sgtl5000";
+               ssi-controller = <&ssi1>;
+               audio-codec = <&codec>;
+               audio-routing =
+                       "MIC_IN", "Mic Jack",
+                       "Mic Jack", "Mic Bias",
+                       "Headphone Jack", "HP_OUT";
+               mux-int-port = <1>;
+               mux-ext-port = <4>;
+       };
+};
+
+&i2c3 {
+       codec: sgtl5000@0a {
+               compatible = "fsl,sgtl5000";
+               reg = <0x0a>;
+               clocks = <&clks IMX6QDL_CLK_CKO>;
+               VDDA-supply = <&reg_2p5v>;
+               VDDIO-supply = <&reg_3p3v>;
+               VDDD-supply = <&reg_1p8v>;
+       };
+};
+
+&sata {
+       status = "okay";
+};
index 88aa1e4c792d2c2c5d56d8a819cac4640bb46369..2792da93db1fc7f86d869ac3259f3b19d4e14156 100644 (file)
@@ -77,7 +77,7 @@
                interrupt-parent = <&gpio3>;
                interrupts = <22 0>;
                wakeup-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
index e26ebeb5b45c055a17919cbbc7751fa4588800f8..a8f3500ee522b3999a31d3c1dff2946e1481ba5d 100644 (file)
@@ -94,7 +94,7 @@
                        label = "User button";
                        gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
                        linux,code = <BTN_MISC>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
index 5cd16f2178b80fd955e8be48b776fb601afe606b..9d7ab6cdc9a60b3a0b4a76c853851d8b87700737 100644 (file)
 
                pinctrl_pwm3: pwm3grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT1__PWM3_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_DAT1__PWM3_OUT           0x1b0b1
                        >;
                };
 
                pinctrl_pwm4: pwm4grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT2__PWM4_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_CMD__PWM4_OUT            0x1b0b1
                        >;
                };
 
index 9fa8a10c7cc8892266a87be766974b03fcfff326..8dd74e98ffd69b35f1dd8349e55231db25592148 100644 (file)
 
                pinctrl_pwm3: pwm3grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT1__PWM3_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_DAT1__PWM3_OUT           0x1b0b1
                        >;
                };
 
index e8375e173873edc5dc39ef011321cbbac9378f00..ec3fe7444e154c4d39f28a5a4a6f8afef05eeb1f 100644 (file)
 
                pinctrl_pwm3: pwm3grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT1__PWM3_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_DAT1__PWM3_OUT           0x1b0b1
                        >;
                };
 
index 66983dc5cbdae8868b2f20ade01b3df1be44575d..367cc49eea0d69cd63c3d3a86121f260b91f05c2 100644 (file)
 };
 
 &pwm4 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_pwm4>;
+       pinctrl-names = "default", "state_dio";
+       pinctrl-0 = <&pinctrl_pwm4_backlight>;
+       pinctrl-1 = <&pinctrl_pwm4_dio>;
        status = "okay";
 };
 
                        >;
                };
 
-               pinctrl_pwm4: pwm4grp {
+               pinctrl_pwm4_backlight: pwm4grpbacklight {
                        fsl,pins = <
+                               /* LVDS_PWM J6.5 */
                                MX6QDL_PAD_SD1_CMD__PWM4_OUT            0x1b0b1
                        >;
                };
 
+               pinctrl_pwm4_dio: pwm4grpdio {
+                       fsl,pins = <
+                               /* DIO3 J16.4 */
+                               MX6QDL_PAD_SD4_DAT2__PWM4_OUT           0x1b0b1
+                       >;
+               };
+
                pinctrl_uart1: uart1grp {
                        fsl,pins = <
                                MX6QDL_PAD_SD3_DAT7__UART1_TX_DATA      0x1b0b1
index cca39f194017c262d6c850172f76c584a73f099f..f27f184558fb4daa43ad525afd06750eea5fd946 100644 (file)
 
                pinctrl_pwm3: pwm3grp {
                        fsl,pins = <
-                               MX6QDL_PAD_SD4_DAT1__PWM3_OUT           0x1b0b1
+                               MX6QDL_PAD_SD1_DAT1__PWM3_OUT           0x1b0b1
                        >;
                };
 
index 6dd0b764e036d1c51cdde3015c1a2cc56178a999..d6c2358ffad4490af869a099e1884bc3122415ac 100644 (file)
@@ -48,7 +48,7 @@
 
        ir_recv: ir-receiver {
                compatible = "gpio-ir-receiver";
-               gpios = <&gpio3 5 1>;
+               gpios = <&gpio3 5 GPIO_ACTIVE_LOW>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_hummingboard_gpio3_5>;
        };
@@ -67,7 +67,7 @@
                reg_usbh1_vbus: usb-h1-vbus {
                        compatible = "regulator-fixed";
                        enable-active-high;
-                       gpio = <&gpio1 0 0>;
+                       gpio = <&gpio1 0 GPIO_ACTIVE_HIGH>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_hummingboard_usbh1_vbus>;
                        regulator-name = "usb_h1_vbus";
@@ -78,7 +78,7 @@
                reg_usbotg_vbus: usb-otg-vbus {
                        compatible = "regulator-fixed";
                        enable-active-high;
-                       gpio = <&gpio3 22 0>;
+                       gpio = <&gpio3 22 GPIO_ACTIVE_HIGH>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&pinctrl_hummingboard_usbotg_vbus>;
                        regulator-name = "usb_otg_vbus";
 &pcie {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_hummingboard_pcie_reset>;
-       reset-gpio = <&gpio3 4 0>;
+       reset-gpio = <&gpio3 4 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi b/arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
new file mode 100644 (file)
index 0000000..f8d945a
--- /dev/null
@@ -0,0 +1,411 @@
+/*
+ * Copyright (C) 2015 Amarula Solutions B.V.
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     version 2 as published by the Free Software Foundation.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/clock/imx6qdl-clock.h>
+
+/ {
+       memory {
+               reg = <0x10000000 0x80000000>;
+       };
+
+       reg_1p8v: regulator-1p8v {
+               compatible = "regulator-fixed";
+               regulator-name = "1P8V";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_2p5v: regulator-2p5v {
+               compatible = "regulator-fixed";
+               regulator-name = "2P5V";
+               regulator-min-microvolt = <2500000>;
+               regulator-max-microvolt = <2500000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_3p3v: regulator-3p3v {
+               compatible = "regulator-fixed";
+               regulator-name = "3P3V";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_sd3_vmmc: regulator-sd3-vmmc {
+               compatible = "regulator-fixed";
+               regulator-name = "P3V3_SD3_SWITCHED";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               gpio = <&gpio1 4 GPIO_ACTIVE_LOW>;
+               enable-active-high;
+       };
+
+       reg_sd4_vmmc: regulator-sd4-vmmc {
+               compatible = "regulator-fixed";
+               regulator-name = "P3V3_SD4_SWITCHED";
+               regulator-min-microvolt = <3300000>;
+               regulator-max-microvolt = <3300000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_usb_h1_vbus: regulator-usb-h1-vbus {
+               compatible = "regulator-fixed";
+               regulator-name = "usb_h1_vbus";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       reg_usb_otg_vbus: regulator-usb-otg-vbus {
+               compatible = "regulator-fixed";
+               regulator-name = "usb_otg_vbus";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+               regulator-boot-on;
+               regulator-always-on;
+       };
+
+       usb_hub: usb-hub {
+               compatible = "smsc,usb3503a";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_usbhub>;
+               reset-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+               clocks = <&clks IMX6QDL_CLK_LVDS2_GATE>;
+               clock-names = "refclk";
+       };
+};
+
+&clks {
+       assigned-clocks = <&clks IMX6QDL_CLK_LVDS2_SEL>;
+       assigned-clock-parents = <&clks IMX6QDL_CLK_OSC>;
+};
+
+&audmux {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_audmux>;
+       status = "okay";
+};
+
+&fec {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_enet>;
+       phy-handle = <&eth_phy>;
+       phy-mode = "rgmii";
+       status = "okay";
+
+       mdio {
+               eth_phy: ethernet-phy {
+                       rxc-skew-ps = <1140>;
+                       txc-skew-ps = <1140>;
+                       txen-skew-ps = <600>;
+                       rxdv-skew-ps = <240>;
+                       rxd0-skew-ps = <420>;
+                       rxd1-skew-ps = <600>;
+                       rxd2-skew-ps = <420>;
+                       rxd3-skew-ps = <240>;
+                       txd0-skew-ps = <60>;
+                       txd1-skew-ps = <60>;
+                       txd2-skew-ps = <60>;
+                       txd3-skew-ps = <240>;
+               };
+       };
+};
+
+&i2c1 {
+       clock-frequency = <100000>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c1>;
+       status = "okay";
+};
+
+&i2c2 {
+       clock-frequency = <100000>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c2>;
+       status = "okay";
+};
+
+&i2c3 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c3>;
+       status = "okay";
+};
+
+&pcie {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_pcie>;
+       reset-gpio = <&gpio3 29 GPIO_ACTIVE_LOW>;
+       status = "okay";
+};
+
+&ssi1 {
+       fsl,mode = "i2s-slave";
+       status = "okay";
+};
+
+&uart4 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart4>;
+       status = "okay";
+};
+
+&usbh1 {
+       vbus-supply = <&reg_usb_h1_vbus>;
+       disable-over-current;
+       clocks = <&clks IMX6QDL_CLK_USBOH3>;
+       status = "okay";
+};
+
+&usbotg {
+       vbus-supply = <&reg_usb_otg_vbus>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usbotg>;
+       disable-over-current;
+       status = "okay";
+};
+
+&usdhc1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_usdhc1>;
+       no-1-8-v;
+       status = "okay";
+};
+
+&usdhc3 {
+       pinctrl-names = "default", "state_100mhz", "state_200mhz";
+       pinctrl-0 = <&pinctrl_usdhc3>;
+       pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
+       pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
+       vmcc-supply = <&reg_sd3_vmmc>;
+       cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
+       bus-witdh=<4>;
+       no-1-8-v;
+       status = "okay";
+};
+
+&usdhc4 {
+       pinctrl-names = "default", "state_100mhz", "state_200mhz";
+       pinctrl-0 = <&pinctrl_usdhc4>;
+       pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
+       pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
+       vmcc-supply = <&reg_sd4_vmmc>;
+       bus-witdh=<8>;
+       no-1-8-v;
+       non-removable;
+       status = "okay";
+};
+
+&iomuxc {
+       pinctrl_audmux: audmux {
+               fsl,pins = <
+                       MX6QDL_PAD_DISP0_DAT20__AUD4_TXC  0x130b0
+                       MX6QDL_PAD_DISP0_DAT21__AUD4_TXD  0x110b0
+                       MX6QDL_PAD_DISP0_DAT22__AUD4_TXFS 0x130b0
+                       MX6QDL_PAD_DISP0_DAT23__AUD4_RXD  0x130b0
+               >;
+       };
+
+       pinctrl_enet: enetgrp {
+               fsl,pins = <
+                       MX6QDL_PAD_ENET_MDIO__ENET_MDIO       0x1b0b0
+                       MX6QDL_PAD_ENET_MDC__ENET_MDC         0x1b0b0
+                       MX6QDL_PAD_RGMII_TXC__RGMII_TXC       0x1b0b0
+                       MX6QDL_PAD_RGMII_TD0__RGMII_TD0       0x1b0b0
+                       MX6QDL_PAD_RGMII_TD1__RGMII_TD1       0x1b0b0
+                       MX6QDL_PAD_RGMII_TD2__RGMII_TD2       0x1b0b0
+                       MX6QDL_PAD_RGMII_TD3__RGMII_TD3       0x1b0b0
+                       MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b0b0
+                       MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK  0x1b0b0
+                       MX6QDL_PAD_RGMII_RXC__RGMII_RXC       0x1b0b0
+                       MX6QDL_PAD_RGMII_RD0__RGMII_RD0       0x1b0b0
+                       MX6QDL_PAD_RGMII_RD1__RGMII_RD1       0x1b0b0
+                       MX6QDL_PAD_RGMII_RD2__RGMII_RD2       0x1b0b0
+                       MX6QDL_PAD_RGMII_RD3__RGMII_RD3       0x1b0b0
+                       MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b0b0
+                       MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN     0x1b0b0
+               >;
+       };
+
+       pinctrl_i2c1: i2c1grp {
+               fsl,pins = <
+                       MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1
+                       MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1
+               >;
+       };
+
+       pinctrl_i2c2: i2c2grp {
+               fsl,pins = <
+                       MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1
+                       MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1
+               >;
+       };
+
+       pinctrl_i2c3: i2c3grp {
+               fsl,pins = <
+                       MX6QDL_PAD_GPIO_5__I2C3_SCL  0x4001b8b1
+                       MX6QDL_PAD_EIM_D18__I2C3_SDA 0x4001b8b1
+                       MX6QDL_PAD_GPIO_0__CCM_CLKO1    0x130b0
+               >;
+       };
+
+       pinctrl_pcie: pciegrp {
+               fsl,pins = <
+                       MX6QDL_PAD_EIM_D29__GPIO3_IO29 0x1f059  /* PCIe Reset */
+               >;
+       };
+
+       pinctrl_uart4: uart4grp {
+               fsl,pins = <
+                       MX6QDL_PAD_KEY_COL0__UART4_TX_DATA 0x1b0b1
+                       MX6QDL_PAD_KEY_ROW0__UART4_RX_DATA 0x1b0b1
+               >;
+       };
+
+       pinctrl_usbhub: usbhubgrp {
+               fsl,pins = <
+                       MX6QDL_PAD_GPIO_6__GPIO1_IO06  0x1f059  /* HUB USB Reset */
+               >;
+       };
+
+       pinctrl_usbotg: usbotggrp {
+               fsl,pins = <
+                       MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
+               >;
+       };
+
+       pinctrl_usdhc1: usdhc1grp {
+               fsl,pins = <
+                       MX6QDL_PAD_SD1_CMD__SD1_CMD    0x17071
+                       MX6QDL_PAD_SD1_CLK__SD1_CLK    0x10071
+                       MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17071
+                       MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17071
+                       MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17071
+                       MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17071
+               >;
+       };
+
+       pinctrl_usdhc3: usdhc3grp {
+               fsl,pins = <
+                       MX6QDL_PAD_SD3_CMD__SD3_CMD    0x17070
+                       MX6QDL_PAD_SD3_CLK__SD3_CLK    0x10070
+                       MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17070
+                       MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17070
+                       MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17070
+                       MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17070
+                       MX6QDL_PAD_GPIO_1__GPIO1_IO01  0x1f059  /* CD */
+                       MX6QDL_PAD_GPIO_4__GPIO1_IO04  0x1f059  /* PWR */
+               >;
+       };
+
+       pinctrl_usdhc3_100mhz: usdhc3grp_100mhz {
+               fsl,pins = <
+                       MX6QDL_PAD_SD3_CMD__SD3_CMD    0x170B1
+                       MX6QDL_PAD_SD3_CLK__SD3_CLK    0x100B1
+                       MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170B1
+                       MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170B1
+                       MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170B1
+                       MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170B1
+               >;
+       };
+
+       pinctrl_usdhc3_200mhz: usdhc3grp_200mhz {
+               fsl,pins = <
+                       MX6QDL_PAD_SD3_CMD__SD3_CMD    0x170F9
+                       MX6QDL_PAD_SD3_CLK__SD3_CLK    0x100F9
+                       MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x170F9
+                       MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x170F9
+                       MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x170F9
+                       MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x170F9
+               >;
+       };
+
+       pinctrl_usdhc4: usdhc4grp {
+               fsl,pins = <
+                       MX6QDL_PAD_SD4_CMD__SD4_CMD    0x17070
+                       MX6QDL_PAD_SD4_CLK__SD4_CLK    0x10070
+                       MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17070
+                       MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17070
+                       MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17070
+                       MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17070
+                       MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x17070
+                       MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x17070
+                       MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x17070
+                       MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x17070
+               >;
+       };
+
+       pinctrl_usdhc4_100mhz: usdhc4grp_100mhz {
+               fsl,pins = <
+                       MX6QDL_PAD_SD4_CMD__SD4_CMD    0x170B1
+                       MX6QDL_PAD_SD4_CLK__SD4_CLK    0x100B1
+                       MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x170B1
+                       MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x170B1
+                       MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x170B1
+                       MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x170B1
+                       MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x170B1
+                       MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x170B1
+                       MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x170B1
+                       MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x170B1
+               >;
+       };
+
+       pinctrl_usdhc4_200mhz: usdhc4grp_200mhz {
+               fsl,pins = <
+                       MX6QDL_PAD_SD4_CMD__SD4_CMD    0x170F9
+                       MX6QDL_PAD_SD4_CLK__SD4_CLK    0x100F9
+                       MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x170F9
+                       MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x170F9
+                       MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x170F9
+                       MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x170F9
+                       MX6QDL_PAD_SD4_DAT4__SD4_DATA4 0x170F9
+                       MX6QDL_PAD_SD4_DAT5__SD4_DATA5 0x170F9
+                       MX6QDL_PAD_SD4_DAT6__SD4_DATA6 0x170F9
+                       MX6QDL_PAD_SD4_DAT7__SD4_DATA7 0x170F9
+               >;
+       };
+};
index 6d4069cc9419eae3361fece8cd52a1e29b25b853..86460e46d0559b770a16bcecfad8e2522395f226 100644 (file)
        bus-width = <4>;
        mmc-pwrseq = <&usdhc1_pwrseq>;
        keep-power-in-suspend;
+       no-1-8-v;
        non-removable;
        vmmc-supply = <&reg_brcm>;
        status = "okay";
index a35d54fd9cd320eea5d5890c174161588f47e823..dc74aa395ff5473d4cbdb36f28bfef16a1adf916 100644 (file)
                        label = "Power Button";
                        gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                menu {
index caeed56b74a391ae996b877dea9b0a325a14ff42..c6c590d1e94096e2f95e2e539d09acc10b2bce5a 100644 (file)
                        label = "Power Button";
                        gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                menu {
index 1a69a3420ac8d14a8977a7f8d862decb546f87bd..0f1aca450fe6abf931e6eb207b3f6f490e57cfc4 100644 (file)
                        label = "Power Button";
                        gpios = <&gpio2 3 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
 
                menu {
index a6d445c17779cf5c39c26ff41170bb38eda31624..26b7a20a0abbe796f6c92f8e53ec0caebf0e730e 100644 (file)
                power {
                        label = "Power Button";
                        gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,code = <KEY_POWER>;
                };
 
                volume-up {
                        label = "Volume Up";
                        gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,code = <KEY_VOLUMEUP>;
                };
 
                volume-down {
                        label = "Volume Down";
                        gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                        linux,code = <KEY_VOLUMEDOWN>;
                };
        };
index 13cb7ccfea44dd653e8100669d415b88699c7831..efd06b576f1da445b991a279a0af48a687a084dc 100644 (file)
@@ -41,7 +41,7 @@
                        compatible = "fixed-clock";
                        reg = <0>;
                        #clock-cells = <0>;
-                       clock-frequency = <27000000>;
+                       clock-frequency = <26000000>;
                };
        };
 
@@ -52,7 +52,7 @@
                        label = "Power Button";
                        gpios = <&gpio5 2 GPIO_ACTIVE_HIGH>;
                        linux,code = <KEY_POWER>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
 &fec {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_enet>;
+       clocks = <&clks IMX6QDL_CLK_ENET>,
+                <&clks IMX6QDL_CLK_ENET>,
+                <&clks IMX6QDL_CLK_ENET_REF>,
+                <&clks IMX6QDL_CLK_ENET_REF>;
+       clock-names = "ipg", "ahb", "ptp", "enet_out";
        phy-mode = "rmii";
        phy-reset-gpios = <&gpio7 6 GPIO_ACTIVE_HIGH>;
        phy-supply = <&reg_3v3_etn>;
                interrupts = <15 IRQ_TYPE_EDGE_FALLING>;
                reset-gpios = <&gpio2 22 GPIO_ACTIVE_LOW>;
                wake-gpios = <&gpio2 21 GPIO_ACTIVE_HIGH>;
-               linux,wakeup;
+               wakeup-source;
        };
 
        touchscreen: tsc2007@48 {
                interrupts = <26 0>;
                gpios = <&gpio3 26 GPIO_ACTIVE_LOW>;
                ti,x-plate-ohms = <660>;
-               linux,wakeup;
+               wakeup-source;
        };
 };
 
index 1211da894ee9993e630aa0744e6e2478d326e5be..d3e54e40a017264400e6a787453f421e8aac2f21 100644 (file)
                        gpio = <&gpio7 12 0>;
                };
        };
+
+       sound {
+               compatible = "fsl,imx6q-udoo-ac97",
+                            "fsl,imx-audio-ac97";
+               model = "fsl,imx6q-udoo-ac97";
+               audio-cpu = <&ssi1>;
+               audio-routing =
+                       "RX", "Mic Jack",
+                       "Headphone Jack", "TX";
+               mux-int-port = <1>;
+               mux-ext-port = <6>;
+       };
 };
 
 &fec {
                                MX6QDL_PAD_SD3_DAT3__SD3_DATA3          0x17059
                        >;
                };
+
+               pinctrl_ac97_running: ac97running {
+                       fsl,pins = <
+                               MX6QDL_PAD_DI0_PIN2__AUD6_TXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN3__AUD6_TXFS          0x1b0b0
+                               MX6QDL_PAD_DI0_PIN4__AUD6_RXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN15__AUD6_TXC          0x1b0b0
+                               MX6QDL_PAD_EIM_EB2__GPIO2_IO30          0x1b0b0
+                       >;
+               };
+
+               pinctrl_ac97_warm_reset: ac97warmreset {
+                       fsl,pins = <
+                               MX6QDL_PAD_DI0_PIN2__AUD6_TXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN3__GPIO4_IO19         0x1b0b0
+                               MX6QDL_PAD_DI0_PIN4__AUD6_RXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN15__AUD6_TXC          0x1b0b0
+                               MX6QDL_PAD_EIM_EB2__GPIO2_IO30          0x1b0b0
+                       >;
+               };
+
+               pinctrl_ac97_reset: ac97reset {
+                       fsl,pins = <
+                               MX6QDL_PAD_DI0_PIN2__GPIO4_IO18         0x1b0b0
+                               MX6QDL_PAD_DI0_PIN3__GPIO4_IO19         0x1b0b0
+                               MX6QDL_PAD_DI0_PIN4__AUD6_RXD           0x1b0b0
+                               MX6QDL_PAD_DI0_PIN15__AUD6_TXC          0x1b0b0
+                               MX6QDL_PAD_EIM_EB2__GPIO2_IO30          0x1b0b0
+                       >;
+               };
        };
 };
 
        non-removable;
        status = "okay";
 };
+
+&audmux {
+       status = "okay";
+};
+
+&ssi1 {
+       cell-index = <0>;
+       fsl,mode = "ac97-slave";
+       pinctrl-names = "ac97-running", "ac97-reset", "ac97-warm-reset";
+       pinctrl-0 = <&pinctrl_ac97_running>;
+       pinctrl-1 = <&pinctrl_ac97_reset>;
+       pinctrl-2 = <&pinctrl_ac97_warm_reset>;
+       ac97-gpios = <&gpio4 19 0 &gpio4 18 0 &gpio2 30 0>;
+       status = "okay";
+};
index 4f6ae921656f16dfd3814d7a70972be798e0fce9..f74d3db4846dacacd54b37e9cb1d35d707689368 100644 (file)
                                #size-cells = <1>;
                                reg = <0x2100000 0x10000>;
                                ranges = <0 0x2100000 0x10000>;
-                               interrupt-parent = <&intc>;
                                clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
                                         <&clks IMX6QDL_CLK_CAAM_ACLK>,
                                         <&clks IMX6QDL_CLK_CAAM_IPG>,
index 10c69963100fab7e972069a117a625175c21a72c..058bcdceb81aa1874ebbd1826d18c8a8b2743d4b 100644 (file)
        bus-width = <4>;
        non-removable;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        mmc-pwrseq = <&usdhc3_pwrseq>;
        status = "okay";
 };
index 115f3fd78971868ad7025fbb0f46c17c4252e574..96ea936eeeb0a1dce450696b38893ed3b341ed48 100644 (file)
@@ -52,7 +52,7 @@
        cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        vmmc-supply = <&vcc_sd3>;
        status = "okay";
 };
index 94ac4005d9cd3904a9d0148d8f3e0214835fb067..f1d37306e8bf4865c40a8923d76710654e0a9b0c 100644 (file)
        status = "okay";
 };
 
+&i2c3 {
+       clock-frequency = <100000>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_i2c3>;
+       status = "okay";
+};
+
 &i2c4 {
         clock-frequency = <100000>;
         pinctrl-names = "default";
        non-removable;
        no-1-8-v;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        status = "okay";
 };
 
        cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        vmmc-supply = <&vcc_sd3>;
        status = "okay";
 };
                        >;
                };
 
+               pinctrl_i2c3: i2c3grp {
+                       fsl,pins = <
+                               MX6SX_PAD_KEY_ROW4__I2C3_SDA            0x4001b8b1
+                               MX6SX_PAD_KEY_COL4__I2C3_SCL            0x4001b8b1
+                       >;
+               };
+
                pinctrl_i2c4: i2c4grp {
                        fsl,pins = <
                                MX6SX_PAD_CSI_DATA07__I2C4_SDA          0x4001b8b1
index 6aaa5ec3d846eae6019e4414d4c925a5ebc2adc5..e06fe978ae3b57b389afa7adf2a094b14dd5aa05 100644 (file)
        pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
        cd-gpios = <&gpio1 19 GPIO_ACTIVE_LOW>;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        vmmc-supply = <&reg_sd1_vmmc>;
        status = "okay";
 };
        pinctrl-0 = <&pinctrl_usdhc2>;
        no-1-8-v;
        keep-power-in-suspend;
-       enable-sdio-wakeup;
+       wakeup-source;
        status = "okay";
 };
 
index d63c597c07835595521ae00862343134f71967cf..f8a868552707124b3e1a86d074c62718927c3805 100644 (file)
@@ -22,7 +22,7 @@
        pinctrl-0 = <&pinctrl_usdhc1>;
        cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
-       enable-sdio-wakeup;
+       wakeup-source;
        status = "okay";
 };
 
index b2c453662905e313626a79a93c1543d1bfb301ef..b267f79e30590bba28c50ab9daaaf887d4bb1d76 100644 (file)
        pinctrl-0 = <&pinctrl_usdhc1>;
        cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
-       enable-sdio-wakeup;
+       wakeup-source;
        keep-power-in-suspend;
        status = "okay";
 };
index 25ad3097874016ab3f1281e6a46d7824c7a91fc8..b5a50e0e7ff1980523e6567934135b7aad7a73d9 100644 (file)
                clock-output-names = "osc";
        };
 
+       timer {
+               compatible = "arm,armv7-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupt-parent = <&intc>;
+       };
+
        etr@30086000 {
                compatible = "arm,coresight-tmc", "arm,primecell";
                reg = <0x30086000 0x1000>;
diff --git a/arch/arm/boot/dts/k2g-evm.dts b/arch/arm/boot/dts/k2g-evm.dts
new file mode 100644 (file)
index 0000000..99663b3
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Device Tree Source for K2G EVM
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+/dts-v1/;
+
+#include "k2g.dtsi"
+
+/ {
+       compatible =  "ti,k2g-evm", "ti,k2g", "ti,keystone";
+       model = "Texas Instruments K2G General Purpose EVM";
+
+       memory {
+               device_type = "memory";
+               reg = <0x00000008 0x00000000 0x00000000 0x80000000>;
+       };
+
+};
+
+&uart0 {
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/k2g.dtsi b/arch/arm/boot/dts/k2g.dtsi
new file mode 100644 (file)
index 0000000..7ff2796
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Device Tree Source for K2G SOC
+ *
+ * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include "skeleton.dtsi"
+
+/ {
+       compatible = "ti,k2g","ti,keystone";
+       model = "Texas Instruments K2G SoC";
+       #address-cells = <2>;
+       #size-cells = <2>;
+       interrupt-parent = <&gic>;
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a15";
+                       device_type = "cpu";
+                       reg = <0>;
+               };
+       };
+
+       gic: interrupt-controller@02561000 {
+               compatible = "arm,cortex-a15-gic";
+               #interrupt-cells = <3>;
+               interrupt-controller;
+               reg = <0x0 0x02561000 0x0 0x1000>,
+                     <0x0 0x02562000 0x0 0x2000>,
+                     <0x0 0x02564000 0x0 0x1000>,
+                     <0x0 0x02566000 0x0 0x2000>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) |
+                               IRQ_TYPE_LEVEL_HIGH)>;
+       };
+
+       timer {
+               compatible = "arm,armv7-timer";
+               interrupts =
+                       <GIC_PPI 13
+                               (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 14
+                               (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 11
+                               (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                       <GIC_PPI 10
+                               (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+       };
+
+       pmu {
+               compatible = "arm,cortex-a15-pmu";
+               interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
+       };
+
+       soc {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "ti,keystone","simple-bus";
+               ranges = <0x0 0x0 0x0 0xc0000000>;
+               dma-ranges = <0x80000000 0x8 0x00000000 0x80000000>;
+
+               uart0: serial@02530c00 {
+                       compatible = "ns16550a";
+                       current-speed = <115200>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       reg = <0x02530c00 0x100>;
+                       interrupts = <GIC_SPI 164 IRQ_TYPE_EDGE_RISING>;
+                       clock-frequency = <200000000>;
+                       status = "disabled";
+               };
+       };
+};
index bf4143c6cb8f10991d4abbaae126966f9f9c753c..b84af3da8c84d596b2b950489541c4efd9216783 100644 (file)
@@ -14,7 +14,7 @@
 #include "kirkwood-synology.dtsi"
 
 / {
-       model = "Synology DS111";
+       model = "Synology DS112";
        compatible = "synology,ds111", "marvell,kirkwood";
 
        memory {
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-6282.dtsi b/arch/arm/boot/dts/kirkwood-linkstation-6282.dtsi
new file mode 100644 (file)
index 0000000..6548e68
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ * Device Tree common file for kirkwood-6282 based Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kirkwood.dtsi"
+#include "kirkwood-6282.dtsi"
+#include "kirkwood-linkstation.dtsi"
+
+/ {
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd0: pmx-power-hdd0 {
+                               marvell,pins = "mpp8";
+                               marvell,function = "gpio";
+                       };
+                       pmx_usb_vbus: pmx-usb-vbus {
+                               marvell,pins = "mpp12";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_high: pmx-fan-high {
+                               marvell,pins = "mpp16";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_low: pmx-fan-low {
+                               marvell,pins = "mpp17";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_alarm: pmx-led-alarm {
+                               marvell,pins = "mpp36";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_red: pmx-led-function-red {
+                               marvell,pins = "mpp37";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_info: pmx-led-info {
+                               marvell,pins = "mpp38";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_blue: pmx-led-function-blue {
+                               marvell,pins = "mpp39";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_power: pmx-led-power {
+                               marvell,pins = "mpp40";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_lock: pmx-fan-lock {
+                               marvell,pins = "mpp43";
+                               marvell,function = "gpio";
+                       };
+                       pmx_button_function: pmx-button-function {
+                               marvell,pins = "mpp45";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_switch: pmx-power-switch {
+                               marvell,pins = "mpp46";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_auto_switch: pmx-power-auto-switch {
+                               marvell,pins = "mpp47";
+                               marvell,function = "gpio";
+                       };
+               };
+       };
+
+       gpio_keys {
+               function-button {
+                       gpios = <&gpio1 13 GPIO_ACTIVE_LOW>;
+               };
+
+               power-on-switch {
+                       gpios = <&gpio1 14 GPIO_ACTIVE_LOW>;
+               };
+
+               power-auto-switch {
+                       gpios = <&gpio1 15 GPIO_ACTIVE_LOW>;
+               };
+       };
+
+       gpio_leds {
+               red-alarm-led {
+                       label = "linkstation:red:alarm";
+                       gpios = <&gpio1 4 GPIO_ACTIVE_HIGH>;
+               };
+
+               red-function-led {
+                       label = "linkstation:red:function";
+                       gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+               };
+
+               amber-info-led {
+                       label = "linkstation:amber:info";
+                       gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+               };
+
+               blue-function-led {
+                       label = "linkstation:blue:function";
+                       gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+               };
+
+               blue-power-led {
+                       label = "linkstation:blue:power";
+                       gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+                       default-state = "keep";
+               };
+       };
+
+       gpio_fan {
+               compatible = "gpio-fan";
+               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+               pinctrl-names = "default";
+
+               gpios = <&gpio0 17 GPIO_ACTIVE_LOW
+                        &gpio0 16 GPIO_ACTIVE_LOW>;
+
+               gpio-fan,speed-map = <0 3
+                               1500 2
+                               3250 1
+                               5000 0>;
+
+               alarm-gpios = <&gpio1 11 GPIO_ACTIVE_HIGH>;
+       };
+
+       regulators {
+               usb_power: regulator@1 {
+                       gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
+               };
+
+               hdd_power0: regulator@2 {
+                       gpio = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
+
+&mdio {
+       status = "okay";
+
+       ethphy0: ethernet-phy@0 {
+               device_type = "ethernet-phy";
+               reg = <0>;
+       };
+};
+
+&eth0 {
+       status = "okay";
+
+       ethernet0-port@0 {
+               phy-handle = <&ethphy0>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-duo-6281.dtsi b/arch/arm/boot/dts/kirkwood-linkstation-duo-6281.dtsi
new file mode 100644 (file)
index 0000000..cf2e69f
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Device Tree common file for kirkwood-6281 based 2-Bay Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "kirkwood.dtsi"
+#include "kirkwood-6281.dtsi"
+#include "kirkwood-linkstation.dtsi"
+
+/ {
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd0: pmx-power-hdd0 {
+                               marvell,pins = "mpp28";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_hdd1: pmx-power-hdd1 {
+                               marvell,pins = "mpp29";
+                               marvell,function = "gpio";
+                       };
+                       pmx_usb_vbus: pmx-usb-vbus {
+                               marvell,pins = "mpp37";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_alarm: pmx-led-alarm {
+                               marvell,pins = "mpp49";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_red: pmx-led-function-red {
+                               marvell,pins = "mpp34";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_blue: pmx-led-function-blue {
+                               marvell,pins = "mpp36";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_info: pmx-led-info {
+                               marvell,pins = "mpp38";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_power: pmx-led-power {
+                               marvell,pins = "mpp39";
+                               marvell,function = "gpio";
+                       };
+                       pmx_button_function: pmx-button-function {
+                               marvell,pins = "mpp41";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_switch: pmx-power-switch {
+                               marvell,pins = "mpp42";
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_auto_switch: pmx-power-auto-switch {
+                               marvell,pins = "mpp43";
+                               marvell,function = "gpio";
+                       };
+               };
+
+               sata@80000 {
+                       nr-ports = <2>;
+               };
+       };
+
+       gpio_keys {
+               function-button {
+                       gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
+               };
+
+               power-on-switch {
+                       gpios = <&gpio1 10 GPIO_ACTIVE_LOW>;
+               };
+
+               power-auto-switch {
+                       gpios = <&gpio1 11 GPIO_ACTIVE_LOW>;
+               };
+       };
+
+       gpio_leds {
+               red-alarm-led {
+                       label = "linkstation:red:alarm";
+                       gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+               };
+
+               red-function-led {
+                       label = "linkstation:red:function";
+                       gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+               };
+
+               amber-info-led {
+                       label = "linkstation:amber:info";
+                       gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
+               };
+
+               blue-function-led {
+                       label = "linkstation:blue:function";
+                       gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
+               };
+
+               blue-power-led {
+                       label = "linkstation:blue:power";
+                       gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
+                       default-state = "keep";
+               };
+       };
+
+       regulators {
+               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
+
+               usb_power: regulator@1 {
+                       gpio = <&gpio1 5 GPIO_ACTIVE_HIGH>;
+               };
+
+               hdd_power0: regulator@2 {
+                       gpio = <&gpio0 28 GPIO_ACTIVE_HIGH>;
+               };
+
+               hdd_power1: regulator@3 {
+                       compatible = "regulator-fixed";
+                       reg = <3>;
+                       regulator-name = "HDD1 Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+                       gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
+
+&mdio {
+       status = "okay";
+
+       ethphy1: ethernet-phy@8 {
+               device_type = "ethernet-phy";
+               reg = <8>;
+       };
+};
+
+&eth1 {
+       status = "okay";
+
+       ethernet1-port@0 {
+               phy-handle = <&ethphy1>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lsqvl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lsqvl.dts
new file mode 100644 (file)
index 0000000..6dc0df2
--- /dev/null
@@ -0,0 +1,135 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-QVL
+ *
+ * Copyright (C) 2016, Mario Lange <mario_lange@gmx.net>
+ *
+ * Based on kirkwood-linkstation-lswvl.dts,
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-6282.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-QVL";
+       compatible = "buffalo,lsqvl", "marvell,kirkwood-88f6282", "marvell,kirkwood";
+
+       memory { /* 256 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x10000000>;
+       };
+
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd1: pmx-power-hdd1 {
+                               marvell,pins = "mpp9";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr0: pmx-led-hdderr0 {
+                               marvell,pins = "mpp34";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr1: pmx-led-hdderr1 {
+                               marvell,pins = "mpp35";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr2: pmx-led-hdderr2 {
+                               marvell,pins = "mpp24";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr3: pmx-led-hdderr3 {
+                               marvell,pins = "mpp25";
+                               marvell,function = "gpio";
+                       };
+               };
+
+               sata@80000 {
+                       nr-ports = <2>;
+               };
+       };
+
+       gpio_leds {
+               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+                            &pmx_led_info &pmx_led_power
+                            &pmx_led_function_blue
+                            &pmx_led_hdderr0
+                            &pmx_led_hdderr1
+                            &pmx_led_hdderr2
+                            &pmx_led_hdderr3>;
+
+               red-hdderr0-led {
+                       label = "linkstation:red:hdderr0";
+                       gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+               };
+
+               red-hdderr1-led {
+                       label = "linkstation:red:hdderr1";
+                       gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+               };
+
+               red-hdderr2-led {
+                       label = "linkstation:red:hdderr2";
+                       gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
+               };
+
+               red-hdderr3-led {
+                       label = "linkstation:red:hdderr3";
+                       gpios = <&gpio0 25 GPIO_ACTIVE_LOW>;
+               };
+       };
+
+       regulators {
+               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
+
+               hdd_power1: regulator@3 {
+                       compatible = "regulator-fixed";
+                       reg = <3>;
+                       regulator-name = "HDD1 Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+                       gpio = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lsvl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lsvl.dts
new file mode 100644 (file)
index 0000000..edcba5c
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-VL
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-6282.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-VL";
+       compatible = "buffalo,lsvl", "marvell,kirkwood-88f6282", "marvell,kirkwood";
+
+       memory { /* 256 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x10000000>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lswsxl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lswsxl.dts
new file mode 100644 (file)
index 0000000..4b64501
--- /dev/null
@@ -0,0 +1,57 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-WSXL
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-duo-6281.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-WSXL";
+       compatible = "buffalo,lswsxl", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+       memory { /* 128 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x8000000>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lswvl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lswvl.dts
new file mode 100644 (file)
index 0000000..954ec1d
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-WVL
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-6282.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-WVL";
+       compatible = "buffalo,lswvl","marvell,kirkwood-88f6282", "marvell,kirkwood";
+
+       memory { /* 256 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x10000000>;
+       };
+
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd1: pmx-power-hdd1 {
+                               marvell,pins = "mpp9";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr0: pmx-led-hdderr0 {
+                               marvell,pins = "mpp34";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr1: pmx-led-hdderr1 {
+                               marvell,pins = "mpp35";
+                               marvell,function = "gpio";
+                       };
+               };
+
+               sata@80000 {
+                       nr-ports = <2>;
+               };
+       };
+
+       gpio_leds {
+               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+                            &pmx_led_info &pmx_led_power
+                            &pmx_led_function_blue
+                            &pmx_led_hdderr0
+                            &pmx_led_hdderr1>;
+
+               red-hdderr0-led {
+                       label = "linkstation:red:hdderr0";
+                       gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+               };
+
+               red-hdderr1-led {
+                       label = "linkstation:red:hdderr1";
+                       gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
+               };
+       };
+
+       regulators {
+               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
+
+               hdd_power1: regulator@3 {
+                       compatible = "regulator-fixed";
+                       reg = <3>;
+                       regulator-name = "HDD1 Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+                       gpio = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation-lswxl.dts b/arch/arm/boot/dts/kirkwood-linkstation-lswxl.dts
new file mode 100644 (file)
index 0000000..ecd5c12
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-WXL
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "kirkwood-linkstation-duo-6281.dtsi"
+
+/ {
+       model = "Buffalo Linkstation LS-WXL";
+       compatible = "buffalo,lswxl", "marvell,kirkwood-88f6281", "marvell,kirkwood";
+
+       memory { /* 128 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x8000000>;
+       };
+
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_led_hdderr0: pmx-led-hdderr0 {
+                               marvell,pins = "mpp8";
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_hdderr1: pmx-led-hdderr1 {
+                               marvell,pins = "mpp46";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_lock: pmx-fan-lock {
+                               marvell,pins = "mpp40";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_high: pmx-fan-high {
+                               marvell,pins = "mpp47";
+                               marvell,function = "gpio";
+                       };
+                       pmx_fan_low: pmx-fan-low {
+                               marvell,pins = "mpp48";
+                               marvell,function = "gpio";
+                       };
+               };
+       };
+
+       gpio_leds {
+               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+                            &pmx_led_info &pmx_led_power
+                            &pmx_led_function_blue
+                            &pmx_led_hdderr0
+                            &pmx_led_hdderr1>;
+
+               red-hdderr0-led {
+                       label = "linkstation:red:hdderr0";
+                       gpios = <&gpio0 8 GPIO_ACTIVE_HIGH>;
+               };
+
+               red-hdderr1-led {
+                       label = "linkstation:red:hdderr1";
+                       gpios = <&gpio1 14 GPIO_ACTIVE_HIGH>;
+               };
+       };
+
+       gpio_fan {
+               compatible = "gpio-fan";
+               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+               pinctrl-names = "default";
+
+               gpios = <&gpio1 16 GPIO_ACTIVE_LOW
+                        &gpio1 15 GPIO_ACTIVE_LOW>;
+
+               gpio-fan,speed-map = <0 3
+                               1500 2
+                               3250 1
+                               5000 0>;
+
+               alarm-gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-linkstation.dtsi b/arch/arm/boot/dts/kirkwood-linkstation.dtsi
new file mode 100644 (file)
index 0000000..69061b6
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * Device Tree common file for kirkwood based Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+       chosen {
+               bootargs = "console=ttyS0,115200n8 earlyprintk";
+               stdout-path = &uart0;
+       };
+
+       mbus {
+               pcie-controller {
+                       status = "okay";
+                       pcie@1,0 {
+                               status = "okay";
+                       };
+               };
+       };
+
+       ocp@f1000000 {
+               pinctrl: pin-controller@10000 {
+                       pmx_power_hdd0: pmx-power-hdd0 {
+                               marvell,function = "gpio";
+                       };
+                       pmx_usb_vbus: pmx-usb-vbus {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_alarm: pmx-led-alarm {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_red: pmx-led-function-red {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_function_blue: pmx-led-function-blue {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_info: pmx-led-info {
+                               marvell,function = "gpio";
+                       };
+                       pmx_led_power: pmx-led-power {
+                               marvell,function = "gpio";
+                       };
+                       pmx_button_function: pmx-button-function {
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_switch: pmx-power-switch {
+                               marvell,function = "gpio";
+                       };
+                       pmx_power_auto_switch: pmx-power-auto-switch {
+                               marvell,function = "gpio";
+                       };
+               };
+
+               serial@12000 {
+                       status = "okay";
+               };
+
+               sata@80000 {
+                       status = "okay";
+                       nr-ports = <1>;
+               };
+
+               spi@10600 {
+                       status = "okay";
+
+                       m25p40@0 {
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               compatible = "st,m25p40", "jedec,spi-nor";
+                               reg = <0>;
+                               spi-max-frequency = <25000000>;
+                               mode = <0>;
+
+                               partition@0 {
+                                       reg = <0x0 0x60000>;
+                                       label = "uboot";
+                                       read-only;
+                               };
+
+                               partition@60000 {
+                                       reg = <0x60000 0x10000>;
+                                       label = "dtb";
+                                       read-only;
+                               };
+
+                               partition@70000 {
+                                       reg = <0x70000 0x10000>;
+                                       label = "uboot_env";
+                               };
+                       };
+               };
+       };
+
+       gpio_keys {
+               compatible = "gpio-keys";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-0 = <&pmx_button_function &pmx_power_switch
+                            &pmx_power_auto_switch>;
+               pinctrl-names = "default";
+
+               function-button {
+                       label = "Function Button";
+                       linux,code = <KEY_OPTION>;
+               };
+
+               power-on-switch {
+                       label = "Power-on Switch";
+                       linux,code = <KEY_RESERVED>;
+                       linux,input-type = <5>;
+               };
+
+               power-auto-switch {
+                       label = "Power-auto Switch";
+                       linux,code = <KEY_ESC>;
+                       linux,input-type = <5>;
+               };
+       };
+
+       gpio_leds {
+               compatible = "gpio-leds";
+               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
+                            &pmx_led_info &pmx_led_power
+                            &pmx_led_function_blue>;
+               pinctrl-names = "default";
+       };
+
+       restart_poweroff {
+               compatible = "restart-poweroff";
+       };
+
+       regulators {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-0 = <&pmx_power_hdd0 &pmx_usb_vbus>;
+               pinctrl-names = "default";
+
+               usb_power: regulator@1 {
+                       compatible = "regulator-fixed";
+                       reg = <1>;
+                       regulator-name = "USB Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+               };
+
+               hdd_power0: regulator@2 {
+                       compatible = "regulator-fixed";
+                       reg = <2>;
+                       regulator-name = "HDD0 Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+               };
+       };
+};
diff --git a/arch/arm/boot/dts/kirkwood-lswvl.dts b/arch/arm/boot/dts/kirkwood-lswvl.dts
deleted file mode 100644 (file)
index 09eed3c..0000000
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Device Tree file for Buffalo Linkstation LS-WVL/VL
- *
- * Copyright (C) 2015, rogershimizu@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/dts-v1/;
-
-#include "kirkwood.dtsi"
-#include "kirkwood-6282.dtsi"
-
-/ {
-       model = "Buffalo Linkstation LS-WVL/VL";
-       compatible = "buffalo,lswvl", "buffalo,lsvl", "marvell,kirkwood-88f6282", "marvell,kirkwood";
-
-       memory { /* 256 MB */
-               device_type = "memory";
-               reg = <0x00000000 0x10000000>;
-       };
-
-       chosen {
-               bootargs = "console=ttyS0,115200n8 earlyprintk";
-               stdout-path = &uart0;
-       };
-
-       mbus {
-               pcie-controller {
-                       status = "okay";
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
-       };
-
-       ocp@f1000000 {
-               pinctrl: pin-controller@10000 {
-                       pmx_power_hdd0: pmx-power-hdd0 {
-                               marvell,pins = "mpp8";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_hdd1: pmx-power-hdd1 {
-                               marvell,pins = "mpp9";
-                               marvell,function = "gpio";
-                       };
-                       pmx_usb_vbus: pmx-usb-vbus {
-                               marvell,pins = "mpp12";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_high: pmx-fan-high {
-                               marvell,pins = "mpp16";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_low: pmx-fan-low {
-                               marvell,pins = "mpp17";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_hdderr0: pmx-led-hdderr0 {
-                               marvell,pins = "mpp34";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_hdderr1: pmx-led-hdderr1 {
-                               marvell,pins = "mpp35";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_alarm: pmx-led-alarm {
-                               marvell,pins = "mpp36";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_function_red: pmx-led-function-red {
-                               marvell,pins = "mpp37";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_info: pmx-led-info {
-                               marvell,pins = "mpp38";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_function_blue: pmx-led-function-blue {
-                               marvell,pins = "mpp39";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_power: pmx-led-power {
-                               marvell,pins = "mpp40";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_lock: pmx-fan-lock {
-                               marvell,pins = "mpp43";
-                               marvell,function = "gpio";
-                       };
-                       pmx_button_function: pmx-button-function {
-                               marvell,pins = "mpp45";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_switch: pmx-power-switch {
-                               marvell,pins = "mpp46";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_auto_switch: pmx-power-auto-switch {
-                               marvell,pins = "mpp47";
-                               marvell,function = "gpio";
-                       };
-               };
-
-               serial@12000 {
-                       status = "okay";
-               };
-
-               sata@80000 {
-                       status = "okay";
-                       nr-ports = <2>;
-               };
-
-               spi@10600 {
-                       status = "okay";
-
-                       m25p40@0 {
-                               #address-cells = <1>;
-                               #size-cells = <1>;
-                               compatible = "st,m25p40", "jedec,spi-nor";
-                               reg = <0>;
-                               spi-max-frequency = <25000000>;
-                               mode = <0>;
-
-                               partition@0 {
-                                       reg = <0x0 0x60000>;
-                                       label = "uboot";
-                                       read-only;
-                               };
-
-                               partition@60000 {
-                                       reg = <0x60000 0x10000>;
-                                       label = "dtb";
-                                       read-only;
-                               };
-
-                               partition@70000 {
-                                       reg = <0x70000 0x10000>;
-                                       label = "uboot_env";
-                               };
-                       };
-               };
-       };
-
-       gpio_keys {
-               compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_button_function &pmx_power_switch
-                            &pmx_power_auto_switch>;
-               pinctrl-names = "default";
-
-               button@1 {
-                       label = "Function Button";
-                       linux,code = <KEY_OPTION>;
-                       gpios = <&gpio0 45 GPIO_ACTIVE_LOW>;
-               };
-
-               button@2 {
-                       label = "Power-on Switch";
-                       linux,code = <KEY_RESERVED>;
-                       linux,input-type = <5>;
-                       gpios = <&gpio0 46 GPIO_ACTIVE_LOW>;
-               };
-
-               button@3 {
-                       label = "Power-auto Switch";
-                       linux,code = <KEY_ESC>;
-                       linux,input-type = <5>;
-                       gpios = <&gpio0 47 GPIO_ACTIVE_LOW>;
-               };
-       };
-
-       gpio_leds {
-               compatible = "gpio-leds";
-               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
-                            &pmx_led_info &pmx_led_power
-                            &pmx_led_function_blue
-                            &pmx_led_hdderr0
-                            &pmx_led_hdderr1>;
-               pinctrl-names = "default";
-
-               led@1 {
-                       label = "lswvl:red:alarm";
-                       gpios = <&gpio0 36 GPIO_ACTIVE_LOW>;
-               };
-
-               led@2 {
-                       label = "lswvl:red:func";
-                       gpios = <&gpio0 37 GPIO_ACTIVE_LOW>;
-               };
-
-               led@3 {
-                       label = "lswvl:amber:info";
-                       gpios = <&gpio0 38 GPIO_ACTIVE_LOW>;
-               };
-
-               led@4 {
-                       label = "lswvl:blue:func";
-                       gpios = <&gpio0 39 GPIO_ACTIVE_LOW>;
-               };
-
-               led@5 {
-                       label = "lswvl:blue:power";
-                       gpios = <&gpio0 40 GPIO_ACTIVE_LOW>;
-                       default-state = "keep";
-               };
-
-               led@6 {
-                       label = "lswvl:red:hdderr0";
-                       gpios = <&gpio0 34 GPIO_ACTIVE_LOW>;
-               };
-
-               led@7 {
-                       label = "lswvl:red:hdderr1";
-                       gpios = <&gpio0 35 GPIO_ACTIVE_LOW>;
-               };
-       };
-
-       gpio_fan {
-               compatible = "gpio-fan";
-               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
-               pinctrl-names = "default";
-
-               gpios = <&gpio0 17 GPIO_ACTIVE_LOW
-                        &gpio0 16 GPIO_ACTIVE_LOW>;
-
-               gpio-fan,speed-map = <0 3
-                               1500 2
-                               3250 1
-                               5000 0>;
-
-               alarm-gpios = <&gpio0 43 GPIO_ACTIVE_HIGH>;
-       };
-
-       restart_poweroff {
-               compatible = "restart-poweroff";
-       };
-
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
-               pinctrl-names = "default";
-
-               usb_power: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "USB Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 12 GPIO_ACTIVE_HIGH>;
-               };
-               hdd_power0: regulator@2 {
-                       compatible = "regulator-fixed";
-                       reg = <2>;
-                       regulator-name = "HDD0 Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 8 GPIO_ACTIVE_HIGH>;
-               };
-               hdd_power1: regulator@3 {
-                       compatible = "regulator-fixed";
-                       reg = <3>;
-                       regulator-name = "HDD1 Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 9 GPIO_ACTIVE_HIGH>;
-               };
-       };
-};
-
-&mdio {
-       status = "okay";
-
-       ethphy0: ethernet-phy@0 {
-               device_type = "ethernet-phy";
-               reg = <0>;
-       };
-};
-
-&eth0 {
-       status = "okay";
-
-       ethernet0-port@0 {
-               phy-handle = <&ethphy0>;
-       };
-};
diff --git a/arch/arm/boot/dts/kirkwood-lswxl.dts b/arch/arm/boot/dts/kirkwood-lswxl.dts
deleted file mode 100644 (file)
index f5db16a..0000000
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Device Tree file for Buffalo Linkstation LS-WXL/WSXL
- *
- * Copyright (C) 2015, rogershimizu@gmail.com
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-/dts-v1/;
-
-#include "kirkwood.dtsi"
-#include "kirkwood-6281.dtsi"
-
-/ {
-       model = "Buffalo Linkstation LS-WXL/WSXL";
-       compatible = "buffalo,lswxl", "marvell,kirkwood-88f6281", "marvell,kirkwood";
-
-       memory { /* 128 MB */
-               device_type = "memory";
-               reg = <0x00000000 0x8000000>;
-       };
-
-       chosen {
-               bootargs = "console=ttyS0,115200n8 earlyprintk";
-               stdout-path = &uart0;
-       };
-
-       mbus {
-               pcie-controller {
-                       status = "okay";
-                       pcie@1,0 {
-                               status = "okay";
-                       };
-               };
-       };
-
-       ocp@f1000000 {
-               pinctrl: pin-controller@10000 {
-                       pmx_power_hdd0: pmx-power-hdd0 {
-                               marvell,pins = "mpp28";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_hdd1: pmx-power-hdd1 {
-                               marvell,pins = "mpp29";
-                               marvell,function = "gpio";
-                       };
-                       pmx_usb_vbus: pmx-usb-vbus {
-                               marvell,pins = "mpp37";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_high: pmx-fan-high {
-                               marvell,pins = "mpp47";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_low: pmx-fan-low {
-                               marvell,pins = "mpp48";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_hdderr0: pmx-led-hdderr0 {
-                               marvell,pins = "mpp8";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_hdderr1: pmx-led-hdderr1 {
-                               marvell,pins = "mpp46";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_alarm: pmx-led-alarm {
-                               marvell,pins = "mpp49";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_function_red: pmx-led-function-red {
-                               marvell,pins = "mpp34";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_function_blue: pmx-led-function-blue {
-                               marvell,pins = "mpp36";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_info: pmx-led-info {
-                               marvell,pins = "mpp38";
-                               marvell,function = "gpio";
-                       };
-                       pmx_led_power: pmx-led-power {
-                               marvell,pins = "mpp39";
-                               marvell,function = "gpio";
-                       };
-                       pmx_fan_lock: pmx-fan-lock {
-                               marvell,pins = "mpp40";
-                               marvell,function = "gpio";
-                       };
-                       pmx_button_function: pmx-button-function {
-                               marvell,pins = "mpp41";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_switch: pmx-power-switch {
-                               marvell,pins = "mpp42";
-                               marvell,function = "gpio";
-                       };
-                       pmx_power_auto_switch: pmx-power-auto-switch {
-                               marvell,pins = "mpp43";
-                               marvell,function = "gpio";
-                       };
-               };
-
-               serial@12000 {
-                       status = "okay";
-               };
-
-               sata@80000 {
-                       status = "okay";
-                       nr-ports = <2>;
-               };
-
-               spi@10600 {
-                       status = "okay";
-
-                       m25p40@0 {
-                               #address-cells = <1>;
-                               #size-cells = <1>;
-                               compatible = "st,m25p40", "jedec,spi-nor";
-                               reg = <0>;
-                               spi-max-frequency = <25000000>;
-                               mode = <0>;
-
-                               partition@0 {
-                                       reg = <0x0 0x60000>;
-                                       label = "uboot";
-                                       read-only;
-                               };
-
-                               partition@60000 {
-                                       reg = <0x60000 0x10000>;
-                                       label = "dtb";
-                                       read-only;
-                               };
-
-                               partition@70000 {
-                                       reg = <0x70000 0x10000>;
-                                       label = "uboot_env";
-                               };
-                       };
-               };
-       };
-
-       gpio_keys {
-               compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_button_function &pmx_power_switch
-                            &pmx_power_auto_switch>;
-               pinctrl-names = "default";
-
-               button@1 {
-                       label = "Function Button";
-                       linux,code = <KEY_OPTION>;
-                       gpios = <&gpio1 41 GPIO_ACTIVE_LOW>;
-               };
-
-               button@2 {
-                       label = "Power-on Switch";
-                       linux,code = <KEY_RESERVED>;
-                       linux,input-type = <5>;
-                       gpios = <&gpio1 42 GPIO_ACTIVE_LOW>;
-               };
-
-               button@3 {
-                       label = "Power-auto Switch";
-                       linux,code = <KEY_ESC>;
-                       linux,input-type = <5>;
-                       gpios = <&gpio1 43 GPIO_ACTIVE_LOW>;
-               };
-       };
-
-       gpio_leds {
-               compatible = "gpio-leds";
-               pinctrl-0 = <&pmx_led_function_red &pmx_led_alarm
-                            &pmx_led_info &pmx_led_power
-                            &pmx_led_function_blue
-                            &pmx_led_hdderr0
-                            &pmx_led_hdderr1>;
-               pinctrl-names = "default";
-
-               led@1 {
-                       label = "lswxl:blue:func";
-                       gpios = <&gpio1 36 GPIO_ACTIVE_LOW>;
-               };
-
-               led@2 {
-                       label = "lswxl:red:alarm";
-                       gpios = <&gpio1 49 GPIO_ACTIVE_LOW>;
-               };
-
-               led@3 {
-                       label = "lswxl:amber:info";
-                       gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
-               };
-
-               led@4 {
-                       label = "lswxl:blue:power";
-                       gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
-               };
-
-               led@5 {
-                       label = "lswxl:red:func";
-                       gpios = <&gpio1 5 GPIO_ACTIVE_LOW>;
-                       default-state = "keep";
-               };
-
-               led@6 {
-                       label = "lswxl:red:hdderr0";
-                       gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
-               };
-
-               led@7 {
-                       label = "lswxl:red:hdderr1";
-                       gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
-               };
-       };
-
-       gpio_fan {
-               compatible = "gpio-fan";
-               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
-               pinctrl-names = "default";
-
-               gpios = <&gpio0 47 GPIO_ACTIVE_LOW
-                        &gpio0 48 GPIO_ACTIVE_LOW>;
-
-               gpio-fan,speed-map = <0 3
-                               1500 2
-                               3250 1
-                               5000 0>;
-
-               alarm-gpios = <&gpio1 49 GPIO_ACTIVE_HIGH>;
-       };
-
-       restart_poweroff {
-               compatible = "restart-poweroff";
-       };
-
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_power_hdd0 &pmx_power_hdd1 &pmx_usb_vbus>;
-               pinctrl-names = "default";
-
-               usb_power: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "USB Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 37 GPIO_ACTIVE_HIGH>;
-               };
-               hdd_power0: regulator@2 {
-                       compatible = "regulator-fixed";
-                       reg = <2>;
-                       regulator-name = "HDD0 Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 28 GPIO_ACTIVE_HIGH>;
-               };
-               hdd_power1: regulator@3 {
-                       compatible = "regulator-fixed";
-                       reg = <3>;
-                       regulator-name = "HDD1 Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>;
-               };
-       };
-};
-
-&mdio {
-       status = "okay";
-
-       ethphy1: ethernet-phy@8 {
-               device_type = "ethernet-phy";
-               reg = <8>;
-       };
-};
-
-&eth1 {
-       status = "okay";
-
-       ethernet1-port@0 {
-               phy-handle = <&ethphy1>;
-       };
-};
index 887b9c1fee43869a2b09336eb283116d3623e2af..96ff59d68f445f45a5182a12d6d42a3883ede881 100644 (file)
@@ -20,6 +20,9 @@
        compatible = "marvell,openrd-client", "marvell,openrd", "marvell,kirkwood-88f6281", "marvell,kirkwood";
 
        ocp@f1000000 {
+               audio-controller@a0000 {
+                       status = "okay";
+               };
                i2c@11000 {
                        status = "okay";
                        clock-frequency = <400000>;
@@ -27,6 +30,7 @@
                        cs42l51: cs42l51@4a {
                                compatible = "cirrus,cs42l51";
                                reg = <0x4a>;
+                               #sound-dai-cells = <0>;
                        };
                };
        };
@@ -37,7 +41,7 @@
                simple-audio-card,mclk-fs = <256>;
 
                simple-audio-card,cpu {
-                       sound-dai = <&audio0>;
+                       sound-dai = <&audio0 0>;
                };
 
                simple-audio-card,codec {
index d3330dadf7edd5da9ae9d35f10019a3b44555942..24f1d30970a0650b535bcd67395c0cea8ace7935 100644 (file)
@@ -40,7 +40,7 @@
                        pinctrl-0 = <&pmx_select28 &pmx_sdio_cd &pmx_select34>;
                        pinctrl-names = "default";
 
-                       pmx_select28: pmx-select-uart-sd {
+                       pmx_select28: pmx-select-rs232-rs485 {
                                marvell,pins = "mpp28";
                                marvell,function = "gpio";
                        };
@@ -48,7 +48,7 @@
                                marvell,pins = "mpp29";
                                marvell,function = "gpio";
                        };
-                       pmx_select34: pmx-select-rs232-rs484 {
+                       pmx_select34: pmx-select-uart-sd {
                                marvell,pins = "mpp34";
                                marvell,function = "gpio";
                        };
                        status = "okay";
                        cd-gpios = <&gpio0 29 9>;
                };
+               gpio@10100 {
+                       p28 {
+                               gpio-hog;
+                               gpios = <28 GPIO_ACTIVE_HIGH>;
+                               /*
+                                * SelRS232or485 selects between RS-232 or RS-485
+                                * mode for the second UART.
+                                *
+                                * Low: RS-232
+                                * High: RS-485
+                                *
+                                * To use the second UART, you need to change also
+                                * the SelUARTorSD.
+                                */
+                               output-low;
+                               line-name = "SelRS232or485";
+                       };
+               };
+               gpio@10140 {
+                       p2 {
+                               gpio-hog;
+                               gpios = <2 GPIO_ACTIVE_HIGH>;
+                               /*
+                                * SelUARTorSD selects between the second UART
+                                * (serial@12100) and SD (mvsdio@90000).
+                                *
+                                * Low: UART
+                                * High: SD
+                                *
+                                * When changing this line make sure the newly
+                                * selected device node is enabled and the
+                                * previously selected device node is disabled.
+                                */
+                               output-high; /* Select SD by default */
+                               line-name = "SelUARTorSD";
+                       };
+               };
        };
 };
 
index 1db6f2c506cce3209e64418493039046c79fd0ec..8082d64266a37c33c8fa2e7e722d3683519c3d0a 100644 (file)
        chip-delay = <40>;
        status = "okay";
        partitions {
+               compatible = "fixed-partitions";
                #address-cells = <1>;
                #size-cells = <1>;
 
index 7b5a4a18f49cb6981064c213805652df0911cb90..7445a15e259d05c3a51666694e2858bf3b6fb06c 100644 (file)
 
                audio0: audio-controller@a0000 {
                        compatible = "marvell,kirkwood-audio";
-                       #sound-dai-cells = <0>;
+                       #sound-dai-cells = <1>;
                        reg = <0xa0000 0x2210>;
                        interrupts = <24>;
                        clocks = <&gate_clk 9>;
index 7fed0bd4f3deea46281881ead81b2ebd29c5fa4b..00805322367e7eb73bcc4ba5bda124eee5372467 100644 (file)
        clock-frequency = <400000>;
 };
 
-&i2c2 {
-       clock-frequency = <400000>;
-};
-
-&i2c3 {
-       clock-frequency = <400000>;
-};
-
 /*
  * Only found on the wireless SOM. For the SOM without wireless, the pins for
  * MMC3 can be routed with jumpers to the second MMC slot on the devkit and
                interrupt-parent = <&gpio5>;
                interrupts = <24 IRQ_TYPE_LEVEL_HIGH>; /* gpio 152 */
                ref-clock-frequency = <26000000>;
+               tcxo-clock-frequency = <26000000>;
        };
 };
 
diff --git a/arch/arm/boot/dts/mvebu-linkstation-fan.dtsi b/arch/arm/boot/dts/mvebu-linkstation-fan.dtsi
new file mode 100644 (file)
index 0000000..e211a3c
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Device Tree common file for gpio-fan on Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/ {
+       gpio_fan {
+               compatible = "gpio-fan";
+               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
+               pinctrl-names = "default";
+
+               gpio-fan,speed-map =
+                       <0              3
+                       1500    2
+                       3250    1
+                       5000    0>;
+       };
+};
+
+&pinctrl {
+       pmx_fan_low: pmx-fan-low {
+               marvell,function = "gpio";
+       };
+
+       pmx_fan_high: pmx-fan-high {
+               marvell,function = "gpio";
+       };
+
+       pmx_fan_lock: pmx-fan-lock {
+               marvell,function = "gpio";
+       };
+};
diff --git a/arch/arm/boot/dts/mvebu-linkstation-gpio-simple.dtsi b/arch/arm/boot/dts/mvebu-linkstation-gpio-simple.dtsi
new file mode 100644 (file)
index 0000000..68d75e7
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Device Tree common file for gpio-{keys,leds} on Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <dt-bindings/input/input.h>
+
+/ {
+       gpio_keys {
+               compatible = "gpio-keys";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-0 = <&pmx_power_switch>;
+               pinctrl-names = "default";
+
+               power-on-switch {
+                       label = "Power-on Switch";
+                       linux,code = <KEY_RESERVED>;
+                       linux,input-type = <5>;
+               };
+
+               power-auto-switch {
+                       label = "Power-auto Switch";
+                       linux,code = <KEY_ESC>;
+                       linux,input-type = <5>;
+               };
+       };
+
+       gpio_leds {
+               compatible = "gpio-leds";
+               pinctrl-0 = <&pmx_led_power &pmx_led_alarm &pmx_led_info>;
+               pinctrl-names = "default";
+
+               blue-power-led {
+                       label = "linkstation:blue:power";
+                       default-state = "keep";
+               };
+
+               red-alarm-led {
+                       label = "linkstation:red:alarm";
+               };
+
+               amber-info-led {
+                       label = "linkstation:amber:info";
+               };
+       };
+};
+
+&pinctrl {
+       pmx_power_switch: pmx-power-switch {
+               marvell,function = "gpio";
+       };
+
+       pmx_led_power: pmx-leds {
+               marvell,function = "gpio";
+       };
+
+       pmx_led_alarm: pmx-leds {
+               marvell,function = "gpio";
+       };
+
+       pmx_led_info: pmx-leds {
+               marvell,function = "gpio";
+       };
+};
index 74d8f7eb556399e489eaebc7e754a4515a96f57f..20603f3f9bd07e273523398bca74256a3fd67150 100644 (file)
                amstaos,cover-comp-gain = <16>;
        };
 
+       adp1653: led-controller@30 {
+               compatible = "adi,adp1653";
+               reg = <0x30>;
+               enable-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; /* 88 */
+
+               flash {
+                       flash-timeout-us = <500000>;
+                       flash-max-microamp = <320000>;
+                       led-max-microamp = <50000>;
+               };
+               indicator {
+                       led-max-microamp = <17500>;
+               };
+       };
+
        lp5523: lp5523@32 {
                compatible = "national,lp5523";
                reg = <0x32>;
index a2c2b8d8dd2c70a2ab5a8007b0cc2e24faeaafd0..ab1174bb409e3c9631a32d66d7fe7d6a2ab7319a 100644 (file)
                startup-delay-us = <150>;
                enable-active-high;
        };
+
+       vwlan_fixed: fixedregulator@2 {
+               compatible = "regulator-fixed";
+               regulator-name = "VWLAN";
+               gpio = <&gpio2 3 GPIO_ACTIVE_HIGH>; /* gpio 35 */
+               enable-active-high;
+               regulator-boot-off;
+       };
 };
 
 &omap3_pmx_core {
                        OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3 */
                >;
        };
+
+       wlan_pins: pinmux_wlan_pins {
+               pinctrl-single,pins = <
+                       OMAP3_CORE1_IOPAD(0x207c, PIN_OUTPUT | MUX_MODE4) /* gpio 35 - wlan enable */
+                       OMAP3_CORE1_IOPAD(0x208a, PIN_INPUT | MUX_MODE4) /* gpio 42 - wlan irq */
+               >;
+       };
 };
 
 &i2c1 {
index 0885b34d5d7da252953c3aaef287a1e20b9dda63..e5967262f28bdffc240255affed510d47129f444 100644 (file)
        compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
 };
 
+&omap3_pmx_core {
+       spi4_pins: pinmux_spi4_pins {
+               pinctrl-single,pins = <
+                       OMAP3_CORE1_IOPAD(0x218c, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mcspi4_clk */
+                       OMAP3_CORE1_IOPAD(0x2190, PIN_OUTPUT | MUX_MODE1) /* mcspi4_simo */
+                       OMAP3_CORE1_IOPAD(0x2192, PIN_INPUT_PULLDOWN | MUX_MODE1) /* mcspi4_somi */
+                       OMAP3_CORE1_IOPAD(0x2196, PIN_OUTPUT | MUX_MODE1) /* mcspi4_cs0 */
+               >;
+       };
+};
+
 &i2c2 {
        smia_1: camera@10 {
                compatible = "nokia,smia";
                };
        };
 };
+
+&mcspi4 {
+       status = "okay";
+       pinctrl-names = "default";
+       pinctrl-0 = <&spi4_pins>;
+
+       wlcore: wlcore@0 {
+               compatible = "ti,wl1271";
+               pinctrl-names = "default";
+               pinctrl-0 = <&wlan_pins>;
+               reg = <0>;
+               spi-max-frequency = <48000000>;
+               clock-xtal;
+               ref-clock-frequency = <38400000>;
+               interrupts-extended = <&gpio2 10 IRQ_TYPE_LEVEL_HIGH>; /* gpio 42 */
+               vwlan-supply = <&vwlan_fixed>;
+       };
+};
index 4f6b2d5b1902e96114f12b1f8bd7d15ba0b9819e..387dc31822fe9c8b2729d28da389d410ffb55fe7 100644 (file)
                                #size-cells = <0>;
                        };
                };
+
+               bandgap {
+                       reg = <0x48002524 0x4>;
+                       compatible = "ti,omap34xx-bandgap";
+                       #thermal-sensor-cells = <0>;
+               };
        };
 };
 
index 86253de5a97a99ddb12ea5f41c72f3c59279fced..f19c87bd6bf35b249543495810df3be423b5cd55 100644 (file)
                                #size-cells = <0>;
                        };
                };
+
+               bandgap {
+                       reg = <0x48002524 0x4>;
+                       compatible = "ti,omap36xx-bandgap";
+                       #thermal-sensor-cells = <0>;
+               };
        };
 };
 
index 888412c63f97492ef445368c3208434d748912f2..902657d6713b073df82d33158d3d1cd0fac5ef16 100644 (file)
        };
 };
 
+&gpio8 {
+       /* TI trees use GPIO instead of msecure, see also muxing */
+       p234 {
+               gpio-hog;
+               gpios = <10 GPIO_ACTIVE_HIGH>;
+               output-high;
+               line-name = "gpio8_234/msecure";
+       };
+};
+
 &omap5_pmx_core {
        pinctrl-names = "default";
        pinctrl-0 = <
                >;
        };
 
+       /* TI trees use GPIO mode; msecure mode does not work reliably? */
+       palmas_msecure_pins: palmas_msecure_pins {
+               pinctrl-single,pins = <
+                       OMAP5_IOPAD(0x180, PIN_OUTPUT | MUX_MODE6) /* gpio8_234 */
+               >;
+       };
+
        usbhost_pins: pinmux_usbhost_pins {
                pinctrl-single,pins = <
                        OMAP5_IOPAD(0x0c4, PIN_INPUT | MUX_MODE0) /* usbb2_hsic_strobe */
                        &usbhost_wkup_pins
        >;
 
+       palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+               pinctrl-single,pins = <
+                       OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+               >;
+       };
+
        usbhost_wkup_pins: pinmux_usbhost_wkup_pins {
                pinctrl-single,pins = <
                        OMAP5_IOPAD(0x05a, PIN_OUTPUT | MUX_MODE0) /* fref_clk1_out, USB hub clk */
                interrupt-controller;
                #interrupt-cells = <2>;
                ti,system-power-controller;
+               pinctrl-names = "default";
+               pinctrl-0 = <&palmas_sys_nirq_pins &palmas_msecure_pins>;
 
                extcon_usb3: palmas_usb {
                        compatible = "ti,palmas-usb-vid";
                        #clock-cells = <0>;
                };
 
+               rtc {
+                       compatible = "ti,palmas-rtc";
+                       interrupt-parent = <&palmas>;
+                       interrupts = <8 IRQ_TYPE_NONE>;
+                       ti,backup-battery-chargeable;
+                       ti,backup-battery-charge-high-current;
+               };
+
                palmas_pmic {
                        compatible = "ti,palmas-pmic";
                        interrupt-parent = <&palmas>;
diff --git a/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lsgl.dts
new file mode 100644 (file)
index 0000000..1cf644b
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Device Tree file for Buffalo Linkstation LS-GL
+ *       (also known as Buffalo Linkstation Pro/Live)
+ *
+ * Copyright (C) 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * Based on the board file arch/arm/mach-orion5x/kurobox_pro-setup.c
+ * Copyright (C) Ronen Shitrit <rshitrit@marvell.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+
+#include "orion5x-linkstation.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+       model = "Buffalo Linkstation Pro/Live";
+       compatible = "buffalo,lsgl", "marvell,orion5x-88f5182", "marvell,orion5x";
+
+       memory { /* 128 MB */
+               device_type = "memory";
+               reg = <0x00000000 0x8000000>;
+       };
+};
+
+&pinctrl {
+       pmx_power_hdd: pmx-power-hdd {
+               marvell,pins = "mpp1";
+               marvell,function = "gpio";
+       };
+
+       pmx_power_usb: pmx-power-usb {
+               marvell,pins = "mpp9";
+               marvell,function = "gpio";
+       };
+};
+
+&hdd_power {
+       gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
+};
+
+&usb_power {
+       gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
+};
+
+&ehci1 {
+       status = "okay";
+};
index 3daec912b4bf118edba55ef54fdf633413e696e5..0eead400f42770e46f44250ef1629cdb295a1422 100644 (file)
@@ -1,7 +1,8 @@
 /*
  * Device Tree file for Buffalo Linkstation LS-WTGL
  *
- * Copyright (C) 2015, Roger Shimizu <rogershimizu@gmail.com>
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
 
 /dts-v1/;
 
+#include "orion5x-linkstation.dtsi"
+#include "mvebu-linkstation-gpio-simple.dtsi"
+#include "mvebu-linkstation-fan.dtsi"
 #include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/input/input.h>
-#include "orion5x-mv88f5182.dtsi"
 
 / {
        model = "Buffalo Linkstation LS-WTGL";
                reg = <0x00000000 0x4000000>;
        };
 
-       chosen {
-               bootargs = "console=ttyS0,115200n8 earlyprintk";
-               linux,stdout-path = &uart0;
-       };
-
-       soc {
-               ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>,
-                        <MBUS_ID(0x09, 0x00) 0 0xf2200000 0x800>,
-                        <MBUS_ID(0x01, 0x0f) 0 0xf4000000 0x40000>;
-
-               internal-regs {
-                       pinctrl: pinctrl@10000 {
-                               pinctrl-0 = <&pmx_usb_power &pmx_power_hdd
-                                       &pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
-                               pinctrl-names = "default";
-
-                               pmx_led_power: pmx-leds {
-                                       marvell,pins = "mpp0";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_led_alarm: pmx-leds {
-                                       marvell,pins = "mpp2";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_led_info: pmx-leds {
-                                       marvell,pins = "mpp3";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_power_hdd: pmx-power-hdd {
-                                       marvell,pins = "mpp1";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_usb_power: pmx-usb-power {
-                                       marvell,pins = "mpp9";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_sata0: pmx-sata0 {
-                                       marvell,pins = "mpp12";
-                                       marvell,function = "sata0";
-                               };
-
-                               pmx_sata1: pmx-sata1 {
-                                       marvell,pins = "mpp13";
-                                       marvell,function = "sata1";
-                               };
-
-                               pmx_fan_high: pmx-fan-high {
-                                       marvell,pins = "mpp14";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_fan_low: pmx-fan-low {
-                                       marvell,pins = "mpp17";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_fan_lock: pmx-fan-lock {
-                                       marvell,pins = "mpp6";
-                                       marvell,function = "gpio";
-                               };
-
-                               pmx_power_switch: pmx-power-switch {
-                                       marvell,pins = "mpp8", "mpp10";
-                                       marvell,function = "gpio";
-                               };
-                       };
-               };
-       };
-
        gpio_keys {
-               compatible = "gpio-keys";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_power_switch>;
-               pinctrl-names = "default";
-
-               button@1 {
-                       label = "Power-on Switch";
-                       linux,code = <KEY_RESERVED>;
-                       linux,input-type = <5>;
+               power-on-switch {
                        gpios = <&gpio0 8 GPIO_ACTIVE_LOW>;
                };
 
-               button@2 {
-                       label = "Power-auto Switch";
-                       linux,code = <KEY_ESC>;
-                       linux,input-type = <5>;
+               power-auto-switch {
                        gpios = <&gpio0 10 GPIO_ACTIVE_LOW>;
                };
        };
 
        gpio_leds {
-               compatible = "gpio-leds";
-               pinctrl-0 = <&pmx_led_power &pmx_led_alarm
-                            &pmx_led_info>;
-               pinctrl-names = "default";
-
-               led@1 {
-                       label = "lswtgl:blue:power";
+               blue-power-led {
                        gpios = <&gpio0 0 GPIO_ACTIVE_LOW>;
                };
 
-               led@2 {
-                       label = "lswtgl:red:alarm";
+               red-alarm-led {
                        gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
                };
 
-               led@3 {
-                       label = "lswtgl:amber:info";
+               amber-info-led {
                        gpios = <&gpio0 3 GPIO_ACTIVE_LOW>;
                };
        };
 
        gpio_fan {
-               compatible = "gpio-fan";
-               pinctrl-0 = <&pmx_fan_low &pmx_fan_high &pmx_fan_lock>;
-               pinctrl-names = "default";
-
                gpios = <&gpio0 14 GPIO_ACTIVE_LOW
                         &gpio0 17 GPIO_ACTIVE_LOW>;
 
-               gpio-fan,speed-map = <0 3
-                               1500 2
-                               3250 1
-                               5000 0>;
-
-               alarm-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>;
+               alarm-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
        };
+};
 
-       restart_poweroff {
-               compatible = "restart-poweroff";
+&pinctrl {
+       pmx_led_power: pmx-leds {
+               marvell,pins = "mpp0";
+               marvell,function = "gpio";
        };
 
-       regulators {
-               compatible = "simple-bus";
-               #address-cells = <1>;
-               #size-cells = <0>;
-               pinctrl-0 = <&pmx_power_hdd &pmx_usb_power>;
-               pinctrl-names = "default";
+       pmx_power_hdd: pmx-power-hdd {
+               marvell,pins = "mpp1";
+               marvell,function = "gpio";
+       };
 
-               usb_power: regulator@1 {
-                       compatible = "regulator-fixed";
-                       reg = <1>;
-                       regulator-name = "USB Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
-               };
+       pmx_led_alarm: pmx-leds {
+               marvell,pins = "mpp2";
+               marvell,function = "gpio";
+       };
 
-               hdd_power: regulator@2 {
-                       compatible = "regulator-fixed";
-                       reg = <2>;
-                       regulator-name = "HDD Power";
-                       regulator-min-microvolt = <5000000>;
-                       regulator-max-microvolt = <5000000>;
-                       enable-active-high;
-                       regulator-always-on;
-                       regulator-boot-on;
-                       gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
-               };
+       pmx_led_info: pmx-leds {
+               marvell,pins = "mpp3";
+               marvell,function = "gpio";
        };
-};
 
-&mdio {
-       status = "okay";
+       pmx_fan_lock: pmx-fan-lock {
+               marvell,pins = "mpp6";
+               marvell,function = "gpio";
+       };
 
-       ethphy: ethernet-phy {
-               reg = <8>;
+       pmx_power_switch: pmx-power-switch {
+               marvell,pins = "mpp8", "mpp10";
+               marvell,function = "gpio";
        };
-};
 
-&eth {
-       status = "okay";
+       pmx_power_usb: pmx-power-usb {
+               marvell,pins = "mpp9";
+               marvell,function = "gpio";
+       };
 
-       ethernet-port@0 {
-               phy-handle = <&ethphy>;
+       pmx_fan_high: pmx-fan-high {
+               marvell,pins = "mpp14";
+               marvell,function = "gpio";
        };
-};
 
-&ehci0 {
-       status = "okay";
+       pmx_fan_low: pmx-fan-low {
+               marvell,pins = "mpp17";
+               marvell,function = "gpio";
+       };
 };
 
-&i2c {
-       status = "okay";
-
-       rtc {
-               compatible = "ricoh,rs5c372a";
-               reg = <0x32>;
-       };
+&hdd_power {
+       gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>;
 };
 
-&wdt {
-       status = "disabled";
+&usb_power {
+       gpios = <&gpio0 9 GPIO_ACTIVE_HIGH>;
 };
 
 &sata {
-       pinctrl-0 = <&pmx_sata0 &pmx_sata1>;
-       pinctrl-names = "default";
-       status = "okay";
        nr-ports = <2>;
 };
-
-&uart0 {
-       status = "okay";
-};
diff --git a/arch/arm/boot/dts/orion5x-linkstation.dtsi b/arch/arm/boot/dts/orion5x-linkstation.dtsi
new file mode 100644 (file)
index 0000000..ed456ab
--- /dev/null
@@ -0,0 +1,180 @@
+/*
+ * Device Tree common file for orion5x based Buffalo Linkstation
+ *
+ * Copyright (C) 2015, 2016
+ * Roger Shimizu <rogershimizu@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED , WITHOUT WARRANTY OF ANY KIND
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "orion5x-mv88f5182.dtsi"
+
+/ {
+       chosen {
+               bootargs = "console=ttyS0,115200n8 earlyprintk";
+               linux,stdout-path = &uart0;
+       };
+
+       soc {
+               ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000>,
+                                <MBUS_ID(0x09, 0x00) 0 0xf2200000 0x800>,
+                                <MBUS_ID(0x01, 0x0f) 0 0xf4000000 0x40000>;
+       };
+
+       restart_poweroff {
+               compatible = "restart-poweroff";
+       };
+
+       regulators {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <0>;
+               pinctrl-0 = <&pmx_power_usb &pmx_power_hdd>;
+               pinctrl-names = "default";
+
+               usb_power: regulator@1 {
+                       compatible = "regulator-fixed";
+                       reg = <1>;
+                       regulator-name = "USB Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+               };
+
+               hdd_power: regulator@2 {
+                       compatible = "regulator-fixed";
+                       reg = <2>;
+                       regulator-name = "HDD Power";
+                       regulator-min-microvolt = <5000000>;
+                       regulator-max-microvolt = <5000000>;
+                       enable-active-high;
+                       regulator-always-on;
+                       regulator-boot-on;
+               };
+       };
+};
+
+&pinctrl {
+       pmx_power_hdd: pmx-power-hdd {
+               marvell,function = "gpio";
+       };
+
+       pmx_power_usb: pmx-power-usb {
+               marvell,function = "gpio";
+       };
+};
+
+&devbus_bootcs {
+       status = "okay";
+       devbus,keep-config;
+
+       flash@0 {
+               compatible = "jedec-flash";
+               reg = <0 0x40000>;
+               bank-width = <1>;
+
+               partitions {
+                       compatible = "fixed-partitions";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       header@0 {
+                               reg = <0 0x30000>;
+                               read-only;
+                       };
+
+                       uboot@30000 {
+                               reg = <0x30000 0xF000>;
+                               read-only;
+                       };
+
+                       uboot_env@3F000 {
+                               reg = <0x3F000 0x1000>;
+                       };
+               };
+       };
+};
+
+&mdio {
+       status = "okay";
+
+       ethphy: ethernet-phy {
+               reg = <8>;
+       };
+};
+
+&eth {
+       status = "okay";
+
+       ethernet-port@0 {
+               phy-handle = <&ethphy>;
+       };
+};
+
+&ehci0 {
+       status = "okay";
+};
+
+&i2c {
+       status = "okay";
+
+       rtc {
+               compatible = "ricoh,rs5c372a";
+               reg = <0x32>;
+       };
+};
+
+&wdt {
+       status = "disabled";
+};
+
+&sata {
+       status = "okay";
+       nr-ports = <1>;
+};
+
+&uart0 {
+       status = "okay";
+};
+
+&uart1 {
+       status = "okay";
+};
index ed521e85e208e72bd7e7afd96ac8acb1a5ab77a3..394c43bf0ae7a6db4ea3a5e190801f34269f844a 100644 (file)
                                          <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>;
                        interrupt-names = "ack", "err", "wakeup";
 
+                       rpmcc: clock-controller {
+                               compatible      = "qcom,rpmcc-apq8064", "qcom,rpmcc";
+                               #clock-cells = <1>;
+                       };
+
                        regulators {
                                compatible = "qcom,rpm-pm8921-regulators";
 
index 08214cbae16da84c0f191661528d40bc132e09b3..a33a09f6821edb883da87e0c37d837bd9a468b8d 100644 (file)
                interrupts = <1 7 0xf04>;
        };
 
+       clocks {
+               xo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+               };
+
+               sleep_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32768>;
+               };
+       };
+
        timer {
                compatible = "arm,armv7-timer";
                interrupts = <1 2 0xf08>,
index fa698635eea0d1f859d57c766c679438a416b55d..2601a907947b97f4e58c8e07d1524afa2aad02d1 100644 (file)
        };
 
        clocks {
+               cxo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+               };
+
+               pxo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <27000000>;
+               };
+
                sleep_clk: sleep_clk {
                        compatible = "fixed-clock";
                        clock-frequency = <32768>;
index e5f7f33aa4677739f9bc1e6d57d969db9b856cf9..a4b184db21d08a5c7f38f4edd20912425023f025 100644 (file)
                interrupts = <1 9 0x304>;
        };
 
+       clocks {
+               cxo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+               };
+
+               pxo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <27000000>;
+               };
+
+               sleep_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32768>;
+               };
+       };
+
        soc: soc {
                #address-cells = <1>;
                #size-cells = <1>;
index dfdafdcb8aae99711507bc4abfbbd7374ba39205..c7bd2032a95dbd863b20d54b34df59c85ee9e843 100644 (file)
                #size-cells = <1>;
                ranges;
 
+               mpss@08000000 {
+                       reg = <0x08000000 0x5100000>;
+                       no-map;
+               };
+
+               mba@00d100000 {
+                       reg = <0x0d100000 0x100000>;
+                       no-map;
+               };
+
+               reserved@0d200000 {
+                       reg = <0x0d200000 0xa00000>;
+                       no-map;
+               };
+
+               adsp@0dc00000 {
+                       reg = <0x0dc00000 0x1900000>;
+                       no-map;
+               };
+
+               venus@0f500000 {
+                       reg = <0x0f500000 0x500000>;
+                       no-map;
+               };
+
                smem_region: smem@fa00000 {
                        reg = <0xfa00000 0x200000>;
                        no-map;
                };
+
+               tz@0fc00000 {
+                       reg = <0x0fc00000 0x160000>;
+                       no-map;
+               };
+
+               efs@0fd600000 {
+                       reg = <0x0fd60000 0x1a0000>;
+                       no-map;
+               };
+
+               unused@0ff00000 {
+                       reg = <0x0ff00000 0x10100000>;
+                       no-map;
+               };
+       };
+
+       firmware {
+               compatible = "simple-bus";
+
+               scm {
+                       compatible = "qcom,scm";
+                       clocks = <&gcc GCC_CE1_CLK> , <&gcc GCC_CE1_AXI_CLK>,
+                                <&gcc GCC_CE1_AHB_CLK>;
+                       clock-names = "core", "bus", "iface";
+               };
        };
 
        cpus {
                interrupts = <1 7 0xf04>;
        };
 
+       clocks {
+               xo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+               };
+
+               sleep_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32768>;
+               };
+       };
+
        timer {
                compatible = "arm,armv7-timer";
                interrupts = <1 2 0xf08>,
                hwlocks = <&tcsr_mutex 3>;
        };
 
+       smp2p-wcnss {
+               compatible = "qcom,smp2p";
+               qcom,smem = <451>, <431>;
+
+               interrupt-parent = <&intc>;
+               interrupts = <0 143 IRQ_TYPE_EDGE_RISING>;
+
+               qcom,ipc = <&apcs 8 18>;
+
+               qcom,local-pid = <0>;
+               qcom,remote-pid = <4>;
+
+               wcnss_smp2p_out: master-kernel {
+                       qcom,entry-name = "master-kernel";
+
+                       #qcom,state-cells = <1>;
+               };
+
+               wcnss_smp2p_in: slave-kernel {
+                       qcom,entry-name = "slave-kernel";
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+       };
+
+       smsm {
+               compatible = "qcom,smsm";
+
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               qcom,ipc-1 = <&apcs 8 13>;
+               qcom,ipc-2 = <&apcs 8 9>;
+               qcom,ipc-3 = <&apcs 8 19>;
+
+               apps_smsm: apps@0 {
+                       reg = <0>;
+
+                       #qcom,state-cells = <1>;
+               };
+
+               modem_smsm: modem@1 {
+                       reg = <1>;
+                       interrupts = <0 26 IRQ_TYPE_EDGE_RISING>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               adsp_smsm: adsp@2 {
+                       reg = <2>;
+                       interrupts = <0 157 IRQ_TYPE_EDGE_RISING>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               wcnss_smsm: wcnss@7 {
+                       reg = <7>;
+                       interrupts = <0 144 IRQ_TYPE_EDGE_RISING>;
+
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+       };
+
        soc: soc {
                #address-cells = <1>;
                #size-cells = <1>;
index 4657d7fb5bceede5ea747359257fc68ee9b434ad..89e46ebef1bca7ce3dd02399a2a264f0a903a687 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <dt-bindings/clock/r7s72100-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        scif0: serial@e8007000 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8007000 64>;
-               interrupts = <0 190 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 191 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 192 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 189 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif1: serial@e8007800 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8007800 64>;
-               interrupts = <0 194 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 195 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 196 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 193 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 196 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif2: serial@e8008000 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8008000 64>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 200 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 197 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif3: serial@e8008800 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8008800 64>;
-               interrupts = <0 202 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 203 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 204 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 201 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif4: serial@e8009000 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8009000 64>;
-               interrupts = <0 206 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 207 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 208 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 205 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif5: serial@e8009800 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe8009800 64>;
-               interrupts = <0 210 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 211 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 212 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 209 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif6: serial@e800a000 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe800a000 64>;
-               interrupts = <0 214 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 215 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 216 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 213 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF6>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        scif7: serial@e800a800 {
                compatible = "renesas,scif-r7s72100", "renesas,scif";
                reg = <0xe800a800 64>;
-               interrupts = <0 218 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 219 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 220 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 217 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R7S72100_CLK_SCIF7>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        spi0: spi@e800c800 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800c800 0x24>;
-               interrupts = <0 238 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 239 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 240 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 238 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 240 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI0>;
                power-domains = <&cpg_clocks>;
        spi1: spi@e800d000 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800d000 0x24>;
-               interrupts = <0 241 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 242 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 243 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 241 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 242 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 243 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI1>;
                power-domains = <&cpg_clocks>;
        spi2: spi@e800d800 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800d800 0x24>;
-               interrupts = <0 244 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 245 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 246 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 244 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 245 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI2>;
                power-domains = <&cpg_clocks>;
        spi3: spi@e800e000 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800e000 0x24>;
-               interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 248 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 249 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 248 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 249 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI3>;
                power-domains = <&cpg_clocks>;
        spi4: spi@e800e800 {
                compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
                reg = <0xe800e800 0x24>;
-               interrupts = <0 250 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 251 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 252 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 250 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 251 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 252 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error", "rx", "tx";
                clocks = <&mstp10_clks R7S72100_CLK_SPI4>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
                reg = <0xfcfee000 0x44>;
-               interrupts = <0 157 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 158 IRQ_TYPE_EDGE_RISING>,
-                            <0 159 IRQ_TYPE_EDGE_RISING>,
-                            <0 160 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 161 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 162 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 163 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 164 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 159 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R7S72100_CLK_I2C0>;
                clock-frequency = <100000>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
                reg = <0xfcfee400 0x44>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 166 IRQ_TYPE_EDGE_RISING>,
-                            <0 167 IRQ_TYPE_EDGE_RISING>,
-                            <0 168 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 169 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 170 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 171 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 172 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 167 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R7S72100_CLK_I2C1>;
                clock-frequency = <100000>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
                reg = <0xfcfee800 0x44>;
-               interrupts = <0 173 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 174 IRQ_TYPE_EDGE_RISING>,
-                            <0 175 IRQ_TYPE_EDGE_RISING>,
-                            <0 176 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 177 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 178 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 179 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 180 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 174 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 175 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R7S72100_CLK_I2C2>;
                clock-frequency = <100000>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,riic-r7s72100", "renesas,riic-rz";
                reg = <0xfcfeec00 0x44>;
-               interrupts = <0 181 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 182 IRQ_TYPE_EDGE_RISING>,
-                            <0 183 IRQ_TYPE_EDGE_RISING>,
-                            <0 184 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 185 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 186 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 187 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 188 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 182 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 183 IRQ_TYPE_EDGE_RISING>,
+                            <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R7S72100_CLK_I2C3>;
                clock-frequency = <100000>;
                power-domains = <&cpg_clocks>;
        mtu2: timer@fcff0000 {
                compatible = "renesas,mtu2-r7s72100", "renesas,mtu2";
                reg = <0xfcff0000 0x400>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "tgi0a";
                clocks = <&mstp3_clks R7S72100_CLK_MTU2>;
                clock-names = "fck";
index cb4f7b2798fe23be13facdffaef64430cf168ab1..138414a7d17037814aac69a3327d7107ccb24eb5 100644 (file)
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        dbsc1: memory-controller@e6790000 {
                dma0: dma-controller@e6700020 {
                        compatible = "renesas,shdma-r8a73a4";
                        reg = <0 0xe6700020 0 0x89e0>;
-                       interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                                       0 200 IRQ_TYPE_LEVEL_HIGH
-                                       0 201 IRQ_TYPE_LEVEL_HIGH
-                                       0 202 IRQ_TYPE_LEVEL_HIGH
-                                       0 203 IRQ_TYPE_LEVEL_HIGH
-                                       0 204 IRQ_TYPE_LEVEL_HIGH
-                                       0 205 IRQ_TYPE_LEVEL_HIGH
-                                       0 206 IRQ_TYPE_LEVEL_HIGH
-                                       0 207 IRQ_TYPE_LEVEL_HIGH
-                                       0 208 IRQ_TYPE_LEVEL_HIGH
-                                       0 209 IRQ_TYPE_LEVEL_HIGH
-                                       0 210 IRQ_TYPE_LEVEL_HIGH
-                                       0 211 IRQ_TYPE_LEVEL_HIGH
-                                       0 212 IRQ_TYPE_LEVEL_HIGH
-                                       0 213 IRQ_TYPE_LEVEL_HIGH
-                                       0 214 IRQ_TYPE_LEVEL_HIGH
-                                       0 215 IRQ_TYPE_LEVEL_HIGH
-                                       0 216 IRQ_TYPE_LEVEL_HIGH
-                                       0 217 IRQ_TYPE_LEVEL_HIGH
-                                       0 218 IRQ_TYPE_LEVEL_HIGH
-                                       0 219 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                                       GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH>;
                        interrupt-names = "error",
                                        "ch0", "ch1", "ch2", "ch3",
                                        "ch4", "ch5", "ch6", "ch7",
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe60b0000 0 0x428>;
-               interrupts = <0 179 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IIC5>;
                power-domains = <&pd_a3sp>;
 
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-r8a73a4", "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&pd_c5>;
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 4 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 5 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 6 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 7 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 8 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 9 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 10 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 11 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 12 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 13 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 14 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 15 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 16 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 17 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 18 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 19 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 20 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 21 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 22 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 23 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 24 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 25 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 26 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 27 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 28 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 29 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 30 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IRQC>;
                power-domains = <&pd_c4>;
        };
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0200 0 0x200>;
-               interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 33 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 34 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 35 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 36 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 37 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 38 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 39 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 40 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 41 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 42 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 43 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 44 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 45 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 46 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 47 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 48 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 49 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 50 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 51 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 52 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 53 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 54 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 55 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 56 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 57 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IRQC>;
                power-domains = <&pd_c4>;
        };
                compatible = "renesas,thermal-r8a73a4", "renesas,rcar-thermal";
                reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>,
                         <0 0xe61f0200 0 0x38>, <0 0xe61f0300 0 0x38>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A73A4_CLK_THERMAL>;
                power-domains = <&pd_c5>;
        };
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6500000 0 0x428>;
-               interrupts = <0 174 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC0>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6510000 0 0x428>;
-               interrupts = <0 175 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC1>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6520000 0 0x428>;
-               interrupts = <0 176 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC2>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6530000 0 0x428>;
-               interrupts = <0 177 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IIC3>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6540000 0 0x428>;
-               interrupts = <0 178 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A73A4_CLK_IIC4>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6550000 0 0x428>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC6>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6560000 0 0x428>;
-               interrupts = <0 185 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_IIC7>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a73a4", "renesas,rmobile-iic";
                reg = <0 0xe6570000 0 0x428>;
-               interrupts = <0 173 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A73A4_CLK_IIC8>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
        scifb0: serial@e6c20000 {
                compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
                reg = <0 0xe6c20000 0 0x100>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb1: serial@e6c30000 {
                compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
                reg = <0 0xe6c30000 0 0x100>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa0: serial@e6c40000 {
                compatible = "renesas,scifa-r8a73a4", "renesas,scifa";
                reg = <0 0xe6c40000 0 0x100>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa1: serial@e6c50000 {
                compatible = "renesas,scifa-r8a73a4", "renesas,scifa";
                reg = <0 0xe6c50000 0 0x100>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb2: serial@e6ce0000 {
                compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
                reg = <0 0xe6ce0000 0 0x100>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb3: serial@e6cf0000 {
                compatible = "renesas,scifb-r8a73a4", "renesas,scifb";
                reg = <0 0xe6cf0000 0 0x100>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A73A4_CLK_SCIFB3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_c4>;
                status = "disabled";
        };
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-r8a73a4";
                reg = <0 0xee100000 0 0x100>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_SDHI0>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi1: sd@ee120000 {
                compatible = "renesas,sdhi-r8a73a4";
                reg = <0 0xee120000 0 0x100>;
-               interrupts = <0 166 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_SDHI1>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi2: sd@ee140000 {
                compatible = "renesas,sdhi-r8a73a4";
                reg = <0 0xee140000 0 0x100>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_SDHI2>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        mmcif0: mmc@ee200000 {
                compatible = "renesas,sh-mmcif";
                reg = <0 0xee200000 0 0x80>;
-               interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_MMCIF0>;
                power-domains = <&pd_a3sp>;
                reg-io-width = <4>;
        mmcif1: mmc@ee220000 {
                compatible = "renesas,sh-mmcif";
                reg = <0 0xee220000 0 0x80>;
-               interrupts = <0 170 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A73A4_CLK_MMCIF1>;
                power-domains = <&pd_a3sp>;
                reg-io-width = <4>;
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        bsc: bus@fec10000 {
index 6ef954766eef740f7ef439103fb4e90e46030444..995fbda74b7a057e57577f3a622f6f058fe3667c 100644 (file)
@@ -11,6 +11,7 @@
 /include/ "skeleton.dtsi"
 
 #include <dt-bindings/clock/r8a7740-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
@@ -41,7 +42,7 @@
        L2: cache-controller {
                compatible = "arm,pl310-cache";
                reg = <0xf0100000 0x1000>;
-               interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
                power-domains = <&pd_a3sm>;
                arm,data-latency = <3 3 3>;
                arm,tag-latency = <2 2 2>;
@@ -58,7 +59,7 @@
 
        pmu {
                compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        ptm {
@@ -69,7 +70,7 @@
        cmt1: timer@e6138000 {
                compatible = "renesas,cmt-48-r8a7740", "renesas,cmt-48";
                reg = <0xe6138000 0x170>;
-               interrupts = <0 58 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&pd_c5>;
                        <0xe6900020 1>,
                        <0xe6900040 1>,
                        <0xe6900060 1>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
                power-domains = <&pd_a4s>;
        };
                        <0xe6900024 1>,
                        <0xe6900044 1>,
                        <0xe6900064 1>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
                power-domains = <&pd_a4s>;
        };
                        <0xe6900028 1>,
                        <0xe6900048 1>,
                        <0xe6900068 1>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
                power-domains = <&pd_a4s>;
        };
                        <0xe690002c 1>,
                        <0xe690004c 1>,
                        <0xe690006c 1>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH
-                             0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_INTCA>;
                power-domains = <&pd_a4s>;
        };
                compatible = "renesas,gether-r8a7740";
                reg = <0xe9a00000 0x800>,
                      <0xe9a01800 0x800>;
-               interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_GETHER>;
                power-domains = <&pd_a4s>;
                phy-mode = "mii";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7740", "renesas,rmobile-iic";
                reg = <0xfff20000 0x425>;
-               interrupts = <0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7740_CLK_IIC0>;
                power-domains = <&pd_a4r>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7740", "renesas,rmobile-iic";
                reg = <0xe6c20000 0x425>;
-               interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH
-                             0 71 IRQ_TYPE_LEVEL_HIGH
-                             0 72 IRQ_TYPE_LEVEL_HIGH
-                             0 73 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_IIC1>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
        scifa0: serial@e6c40000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c40000 0x100>;
-               interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa1: serial@e6c50000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c50000 0x100>;
-               interrupts = <0 101 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa2: serial@e6c60000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c60000 0x100>;
-               interrupts = <0 102 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa3: serial@e6c70000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c70000 0x100>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa4: serial@e6c80000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6c80000 0x100>;
-               interrupts = <0 104 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa5: serial@e6cb0000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6cb0000 0x100>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa6: serial@e6cc0000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6cc0000 0x100>;
-               interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA6>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa7: serial@e6cd0000 {
                compatible = "renesas,scifa-r8a7740", "renesas,scifa";
                reg = <0xe6cd0000 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFA7>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb: serial@e6c30000 {
                compatible = "renesas,scifb-r8a7740", "renesas,scifb";
                reg = <0xe6c30000 0x100>;
-               interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7740_CLK_SCIFB>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        mmcif0: mmc@e6bd0000 {
                compatible = "renesas,mmcif-r8a7740", "renesas,sh-mmcif";
                reg = <0xe6bd0000 0x100>;
-               interrupts = <0 56 IRQ_TYPE_LEVEL_HIGH
-                             0 57 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_MMC>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
        sdhi0: sd@e6850000 {
                compatible = "renesas,sdhi-r8a7740";
                reg = <0xe6850000 0x100>;
-               interrupts = <0 117 IRQ_TYPE_LEVEL_HIGH
-                             0 118 IRQ_TYPE_LEVEL_HIGH
-                             0 119 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_SDHI0>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi1: sd@e6860000 {
                compatible = "renesas,sdhi-r8a7740";
                reg = <0xe6860000 0x100>;
-               interrupts = <0 121 IRQ_TYPE_LEVEL_HIGH
-                             0 122 IRQ_TYPE_LEVEL_HIGH
-                             0 123 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7740_CLK_SDHI1>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi2: sd@e6870000 {
                compatible = "renesas,sdhi-r8a7740";
                reg = <0xe6870000 0x100>;
-               interrupts = <0 125 IRQ_TYPE_LEVEL_HIGH
-                             0 126 IRQ_TYPE_LEVEL_HIGH
-                             0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7740_CLK_SDHI2>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
                #sound-dai-cells = <1>;
                compatible = "renesas,fsi2-r8a7740", "renesas,sh_fsi2";
                reg = <0xfe1f0000 0x400>;
-               interrupts = <0 9 0x4>;
+               interrupts = <GIC_SPI 9 0x4>;
                clocks = <&mstp3_clks R8A7740_CLK_FSI>;
                power-domains = <&pd_a4mp>;
                status = "disabled";
        tmu0: timer@fff80000 {
                compatible = "renesas,tmu-r8a7740", "renesas,tmu";
                reg = <0xfff80000 0x2c>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 200 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7740_CLK_TMU0>;
                clock-names = "fck";
                power-domains = <&pd_a4r>;
        tmu1: timer@fff90000 {
                compatible = "renesas,tmu-r8a7740", "renesas,tmu";
                reg = <0xfff90000 0x2c>;
-               interrupts = <0 170 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 171 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 172 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7740_CLK_TMU1>;
                clock-names = "fck";
                power-domains = <&pd_a4r>;
index a52b359e2ae24a300e9cc5f5de946d3005ad2afe..21e3b9dda2dabf5e8d44de253b7cfaad5d11b50a 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        scif0_pins: serial0 {
                renesas,groups = "scif0_data_a", "scif0_ctrl";
                renesas,function = "scif0";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        mmc_pins: mmc {
                renesas,groups = "mmc_data8", "mmc_ctrl";
                renesas,function = "mmc";
 
        status = "okay";
 };
+
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
index 791aafd310a5d78bf0ac92f379b7a77ef2d9a03f..f83a348fc07a49e37f6bef9cbdd33cbc76f6d192 100644 (file)
@@ -17,6 +17,7 @@
 /include/ "skeleton.dtsi"
 
 #include <dt-bindings/clock/r8a7778-clock.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/interrupt-controller/irq.h>
 
 / {
@@ -51,7 +52,7 @@
        ether: ethernet@fde00000 {
                compatible = "renesas,ether-r8a7778";
                reg = <0xfde00000 0x400>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7778_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
                        <0xfe780024 4>,
                        <0xfe780044 4>,
                        <0xfe780064 4>;
-               interrupts =   <0 27 IRQ_TYPE_LEVEL_HIGH
-                               0 28 IRQ_TYPE_LEVEL_HIGH
-                               0 29 IRQ_TYPE_LEVEL_HIGH
-                               0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =   <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH
+                               GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH
+                               GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH
+                               GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                sense-bitfield-width = <2>;
        };
 
        gpio0: gpio@ffc40000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc40000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
        gpio1: gpio@ffc41000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc41000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 32>;
        gpio2: gpio@ffc42000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc42000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@ffc43000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc43000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@ffc44000 {
                compatible = "renesas,gpio-r8a7778", "renesas,gpio-rcar";
                reg = <0xffc44000 0x2c>;
-               interrupts = <0 103 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 27>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7778";
                reg = <0xffc70000 0x1000>;
-               interrupts = <0 67 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7778";
                reg = <0xffc71000 0x1000>;
-               interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7778";
                reg = <0xffc72000 0x1000>;
-               interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7778";
                reg = <0xffc73000 0x1000>;
-               interrupts = <0 77 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        tmu0: timer@ffd80000 {
                compatible = "renesas,tmu-r8a7778", "renesas,tmu";
                reg = <0xffd80000 0x30>;
-               interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 33 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 34 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_TMU0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        tmu1: timer@ffd81000 {
                compatible = "renesas,tmu-r8a7778", "renesas,tmu";
                reg = <0xffd81000 0x30>;
-               interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 37 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 38 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_TMU1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        tmu2: timer@ffd82000 {
                compatible = "renesas,tmu-r8a7778", "renesas,tmu";
                reg = <0xffd82000 0x30>;
-               interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 41 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 42 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_TMU2>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
                };
 
                rcar_sound,ssi {
-                       ssi3: ssi@3 { interrupts = <0 0x85 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi4: ssi@4 { interrupts = <0 0x85 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi5: ssi@5 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi6: ssi@6 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi7: ssi@7 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi8: ssi@8 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
-                       ssi9: ssi@9 { interrupts = <0 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi3: ssi@3 { interrupts = <GIC_SPI 0x85 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi4: ssi@4 { interrupts = <GIC_SPI 0x85 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi5: ssi@5 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi6: ssi@6 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi7: ssi@7 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi8: ssi@8 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
+                       ssi9: ssi@9 { interrupts = <GIC_SPI 0x86 IRQ_TYPE_LEVEL_HIGH>; };
                };
        };
 
        scif0: serial@ffe40000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe40000 0x100>;
-               interrupts = <0 70 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF0>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif1: serial@ffe41000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe41000 0x100>;
-               interrupts = <0 71 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF1>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif2: serial@ffe42000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe42000 0x100>;
-               interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF2>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif3: serial@ffe43000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe43000 0x100>;
-               interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF3>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif4: serial@ffe44000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe44000 0x100>;
-               interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF4>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif5: serial@ffe45000 {
-               compatible = "renesas,scif-r8a7778", "renesas,scif";
+               compatible = "renesas,scif-r8a7778", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe45000 0x100>;
-               interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7778_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7778_CLK_SCIF5>,
+                        <&cpg_clocks R8A7778_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        mmcif: mmc@ffe4e000 {
                compatible = "renesas,sh-mmcif";
                reg = <0xffe4e000 0x100>;
-               interrupts = <0 61 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7778_CLK_MMC>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi0: sd@ffe4c000 {
                compatible = "renesas,sdhi-r8a7778";
                reg = <0xffe4c000 0x100>;
-               interrupts = <0 87 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7778_CLK_SDHI0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi1: sd@ffe4d000 {
                compatible = "renesas,sdhi-r8a7778";
                reg = <0xffe4d000 0x100>;
-               interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7778_CLK_SDHI1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi2: sd@ffe4f000 {
                compatible = "renesas,sdhi-r8a7778";
                reg = <0xffe4f000 0x100>;
-               interrupts = <0 86 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7778_CLK_SDHI2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        hspi0: spi@fffc7000 {
                compatible = "renesas,hspi-r8a7778", "renesas,hspi";
                reg = <0xfffc7000 0x18>;
-               interrupts = <0 63 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        hspi1: spi@fffc8000 {
                compatible = "renesas,hspi-r8a7778", "renesas,hspi";
                reg = <0xfffc8000 0x18>;
-               interrupts = <0 84 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        hspi2: spi@fffc6000 {
                compatible = "renesas,hspi-r8a7778", "renesas,hspi";
                reg = <0xfffc6000 0x18>;
-               interrupts = <0 85 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7778_CLK_HSPI>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
                        clock-output-names = "extal";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* Special CPG clocks */
                cpg_clocks: cpg_clocks@ffc80000 {
                        compatible = "renesas,r8a7778-cpg-clocks";
index fe396c8d58db798637a5fabaa74fe1f069c8089f..e111d35d02aebe19a8c9ae3bce5e10fc9686665b 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        du_pins: du {
                du0 {
                        renesas,groups = "du0_rgb888", "du0_sync_1", "du0_clk_out_0";
                };
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk_b";
+               renesas,function = "scif_clk";
+       };
+
        ethernet_pins: ethernet {
                intc {
                        renesas,groups = "intc_irq1_b";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &sdhi0 {
        pinctrl-0 = <&sdhi0_pins>;
        pinctrl-names = "default";
index 6afa909865b52b71c970087e90dc77860ea177e6..a0cc08e6295b03968b026e2f4b46e49b7c9d6f16 100644 (file)
@@ -74,7 +74,7 @@
        gpio0: gpio@ffc40000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc40000 0x2c>;
-               interrupts = <0 141 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
@@ -85,7 +85,7 @@
        gpio1: gpio@ffc41000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc41000 0x2c>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 32>;
@@ -96,7 +96,7 @@
        gpio2: gpio@ffc42000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc42000 0x2c>;
-               interrupts = <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@ffc43000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc43000 0x2c>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@ffc44000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc44000 0x2c>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@ffc45000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc45000 0x2c>;
-               interrupts = <0 146 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 32>;
        gpio6: gpio@ffc46000 {
                compatible = "renesas,gpio-r8a7779", "renesas,gpio-rcar";
                reg = <0xffc46000 0x2c>;
-               interrupts = <0 147 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 192 9>;
                        <0xfe780044 4>,
                        <0xfe780064 4>,
                        <0xfe780000 4>;
-               interrupts = <0 27 IRQ_TYPE_LEVEL_HIGH
-                             0 28 IRQ_TYPE_LEVEL_HIGH
-                             0 29 IRQ_TYPE_LEVEL_HIGH
-                             0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                sense-bitfield-width = <2>;
        };
 
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7779";
                reg = <0xffc70000 0x1000>;
-               interrupts = <0 79 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7779";
                reg = <0xffc71000 0x1000>;
-               interrupts = <0 82 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7779";
                reg = <0xffc72000 0x1000>;
-               interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7779";
                reg = <0xffc73000 0x1000>;
-               interrupts = <0 81 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif0: serial@ffe40000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe40000 0x100>;
-               interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF0>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif1: serial@ffe41000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe41000 0x100>;
-               interrupts = <0 89 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF1>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif2: serial@ffe42000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe42000 0x100>;
-               interrupts = <0 90 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF2>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif3: serial@ffe43000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe43000 0x100>;
-               interrupts = <0 91 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF3>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif4: serial@ffe44000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe44000 0x100>;
-               interrupts = <0 92 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF4>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        scif5: serial@ffe45000 {
-               compatible = "renesas,scif-r8a7779", "renesas,scif";
+               compatible = "renesas,scif-r8a7779", "renesas,rcar-gen1-scif",
+                            "renesas,scif";
                reg = <0xffe45000 0x100>;
-               interrupts = <0 93 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp0_clks R8A7779_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp0_clks R8A7779_CLK_SCIF5>,
+                        <&cpg_clocks R8A7779_CLK_S1>, <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
        tmu0: timer@ffd80000 {
                compatible = "renesas,tmu-r8a7779", "renesas,tmu";
                reg = <0xffd80000 0x30>;
-               interrupts = <0 32 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 33 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 34 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_TMU0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        tmu1: timer@ffd81000 {
                compatible = "renesas,tmu-r8a7779", "renesas,tmu";
                reg = <0xffd81000 0x30>;
-               interrupts = <0 36 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 37 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 38 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_TMU1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        tmu2: timer@ffd82000 {
                compatible = "renesas,tmu-r8a7779", "renesas,tmu";
                reg = <0xffd82000 0x30>;
-               interrupts = <0 40 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 41 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 42 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7779_CLK_TMU2>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        sata: sata@fc600000 {
                compatible = "renesas,sata-r8a7779", "renesas,rcar-sata";
                reg = <0xfc600000 0x2000>;
-               interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7779_CLK_SATA>;
                power-domains = <&cpg_clocks>;
        };
        sdhi0: sd@ffe4c000 {
                compatible = "renesas,sdhi-r8a7779";
                reg = <0xffe4c000 0x100>;
-               interrupts = <0 104 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7779_CLK_SDHI0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi1: sd@ffe4d000 {
                compatible = "renesas,sdhi-r8a7779";
                reg = <0xffe4d000 0x100>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7779_CLK_SDHI1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi2: sd@ffe4e000 {
                compatible = "renesas,sdhi-r8a7779";
                reg = <0xffe4e000 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7779_CLK_SDHI2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi3: sd@ffe4f000 {
                compatible = "renesas,sdhi-r8a7779";
                reg = <0xffe4f000 0x100>;
-               interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7779_CLK_SDHI3>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        hspi0: spi@fffc7000 {
                compatible = "renesas,hspi-r8a7779", "renesas,hspi";
                reg = <0xfffc7000 0x18>;
-               interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
        hspi1: spi@fffc8000 {
                compatible = "renesas,hspi-r8a7779", "renesas,hspi";
                reg = <0xfffc8000 0x18>;
-               interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
        hspi2: spi@fffc6000 {
                compatible = "renesas,hspi-r8a7779", "renesas,hspi";
                reg = <0xfffc6000 0x18>;
-               interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
                clocks = <&mstp0_clks R8A7779_CLK_HSPI>;
        du: display@fff80000 {
                compatible = "renesas,du-r8a7779";
                reg = <0 0xfff80000 0 0x40000>;
-               interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7779_CLK_DU>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                        clock-output-names = "extal";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* Special CPG clocks */
                cpg_clocks: clocks@ffc80000 {
                        compatible = "renesas,r8a7779-cpg-clocks";
index 052dcee4790dd298d2c84b07b4574e2cb462b5bc..cdc0414f5f0716dde7bc454cb656173dc418f012 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        du_pins: du {
                renesas,groups = "du_rgb666", "du_sync_1", "du_clk_out_0";
                renesas,function = "du";
                renesas,function = "scif0";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &msiof1 {
        pinctrl-0 = <&msiof1_pins>;
        pinctrl-names = "default";
index 7dfd393bfc7e7a5b52826139c9d4ad16a4de3841..c9583fa6cae7139f82387230765c08f6fbb5e70e 100644 (file)
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        gpio0: gpio@e6050000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6050000 0 0x50>;
-               interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
        gpio1: gpio@e6051000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6051000 0 0x50>;
-               interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 30>;
        gpio2: gpio@e6052000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6052000 0 0x50>;
-               interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 30>;
        gpio3: gpio@e6053000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6053000 0 0x50>;
-               interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@e6054000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6054000 0 0x50>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@e6055000 {
                compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
                reg = <0 0xe6055000 0 0x50>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 32>;
        thermal@e61f0000 {
                compatible = "renesas,thermal-r8a7790", "renesas,rcar-thermal";
                reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
                power-domains = <&cpg_clocks>;
        };
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        cmt0: timer@ffca0000 {
                compatible = "renesas,cmt-48-r8a7790", "renesas,cmt-48-gen2";
                reg = <0 0xffca0000 0 0x1004>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_CMT0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-r8a7790", "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 122 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 123 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 124 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 125 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 126 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7790_CLK_IRQC>;
                power-domains = <&cpg_clocks>;
        };
        dmac0: dma-controller@e6700000 {
                compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
                reg = <0 0xe6700000 0 0x20000>;
-               interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
-                             0 200 IRQ_TYPE_LEVEL_HIGH
-                             0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH
-                             0 205 IRQ_TYPE_LEVEL_HIGH
-                             0 206 IRQ_TYPE_LEVEL_HIGH
-                             0 207 IRQ_TYPE_LEVEL_HIGH
-                             0 208 IRQ_TYPE_LEVEL_HIGH
-                             0 209 IRQ_TYPE_LEVEL_HIGH
-                             0 210 IRQ_TYPE_LEVEL_HIGH
-                             0 211 IRQ_TYPE_LEVEL_HIGH
-                             0 212 IRQ_TYPE_LEVEL_HIGH
-                             0 213 IRQ_TYPE_LEVEL_HIGH
-                             0 214 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        dmac1: dma-controller@e6720000 {
                compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
                reg = <0 0xe6720000 0 0x20000>;
-               interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                             0 216 IRQ_TYPE_LEVEL_HIGH
-                             0 217 IRQ_TYPE_LEVEL_HIGH
-                             0 218 IRQ_TYPE_LEVEL_HIGH
-                             0 219 IRQ_TYPE_LEVEL_HIGH
-                             0 308 IRQ_TYPE_LEVEL_HIGH
-                             0 309 IRQ_TYPE_LEVEL_HIGH
-                             0 310 IRQ_TYPE_LEVEL_HIGH
-                             0 311 IRQ_TYPE_LEVEL_HIGH
-                             0 312 IRQ_TYPE_LEVEL_HIGH
-                             0 313 IRQ_TYPE_LEVEL_HIGH
-                             0 314 IRQ_TYPE_LEVEL_HIGH
-                             0 315 IRQ_TYPE_LEVEL_HIGH
-                             0 316 IRQ_TYPE_LEVEL_HIGH
-                             0 317 IRQ_TYPE_LEVEL_HIGH
-                             0 318 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        audma0: dma-controller@ec700000 {
                compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
                reg = <0 0xec700000 0 0x10000>;
-               interrupts =    <0 346 IRQ_TYPE_LEVEL_HIGH
-                                0 320 IRQ_TYPE_LEVEL_HIGH
-                                0 321 IRQ_TYPE_LEVEL_HIGH
-                                0 322 IRQ_TYPE_LEVEL_HIGH
-                                0 323 IRQ_TYPE_LEVEL_HIGH
-                                0 324 IRQ_TYPE_LEVEL_HIGH
-                                0 325 IRQ_TYPE_LEVEL_HIGH
-                                0 326 IRQ_TYPE_LEVEL_HIGH
-                                0 327 IRQ_TYPE_LEVEL_HIGH
-                                0 328 IRQ_TYPE_LEVEL_HIGH
-                                0 329 IRQ_TYPE_LEVEL_HIGH
-                                0 330 IRQ_TYPE_LEVEL_HIGH
-                                0 331 IRQ_TYPE_LEVEL_HIGH
-                                0 332 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =    <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        audma1: dma-controller@ec720000 {
                compatible = "renesas,dmac-r8a7790", "renesas,rcar-dmac";
                reg = <0 0xec720000 0 0x10000>;
-               interrupts =    <0 347 IRQ_TYPE_LEVEL_HIGH
-                                0 333 IRQ_TYPE_LEVEL_HIGH
-                                0 334 IRQ_TYPE_LEVEL_HIGH
-                                0 335 IRQ_TYPE_LEVEL_HIGH
-                                0 336 IRQ_TYPE_LEVEL_HIGH
-                                0 337 IRQ_TYPE_LEVEL_HIGH
-                                0 338 IRQ_TYPE_LEVEL_HIGH
-                                0 339 IRQ_TYPE_LEVEL_HIGH
-                                0 340 IRQ_TYPE_LEVEL_HIGH
-                                0 341 IRQ_TYPE_LEVEL_HIGH
-                                0 342 IRQ_TYPE_LEVEL_HIGH
-                                0 343 IRQ_TYPE_LEVEL_HIGH
-                                0 344 IRQ_TYPE_LEVEL_HIGH
-                                0 345 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =    <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        usb_dmac0: dma-controller@e65a0000 {
                compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac";
                reg = <0 0xe65a0000 0 0x100>;
-               interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH
-                             0 109 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "ch0", "ch1";
                clocks = <&mstp3_clks R8A7790_CLK_USBDMAC0>;
                power-domains = <&cpg_clocks>;
        usb_dmac1: dma-controller@e65b0000 {
                compatible = "renesas,r8a7790-usb-dmac", "renesas,usb-dmac";
                reg = <0 0xe65b0000 0 0x100>;
-               interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH
-                             0 110 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "ch0", "ch1";
                clocks = <&mstp3_clks R8A7790_CLK_USBDMAC1>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7790";
                reg = <0 0xe6508000 0 0x40>;
-               interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <110>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7790";
                reg = <0 0xe6518000 0 0x40>;
-               interrupts = <0 288 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7790";
                reg = <0 0xe6530000 0 0x40>;
-               interrupts = <0 286 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7790";
                reg = <0 0xe6540000 0 0x40>;
-               interrupts = <0 290 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <110>;
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7790", "renesas,rmobile-iic";
                reg = <0 0xe6500000 0 0x425>;
-               interrupts = <0 174 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_IIC0>;
                dmas = <&dmac0 0x61>, <&dmac0 0x62>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7790", "renesas,rmobile-iic";
                reg = <0 0xe6510000 0 0x425>;
-               interrupts = <0 175 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_IIC1>;
                dmas = <&dmac0 0x65>, <&dmac0 0x66>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7790", "renesas,rmobile-iic";
                reg = <0 0xe6520000 0 0x425>;
-               interrupts = <0 176 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_IIC2>;
                dmas = <&dmac0 0x69>, <&dmac0 0x6a>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7790", "renesas,rmobile-iic";
                reg = <0 0xe60b0000 0 0x425>;
-               interrupts = <0 173 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_IICDVFS>;
                dmas = <&dmac0 0x77>, <&dmac0 0x78>;
                dma-names = "tx", "rx";
        mmcif0: mmc@ee200000 {
                compatible = "renesas,mmcif-r8a7790", "renesas,sh-mmcif";
                reg = <0 0xee200000 0 0x80>;
-               interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_MMCIF0>;
                dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
                dma-names = "tx", "rx";
        mmcif1: mmc@ee220000 {
                compatible = "renesas,mmcif-r8a7790", "renesas,sh-mmcif";
                reg = <0 0xee220000 0 0x80>;
-               interrupts = <0 170 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_MMCIF1>;
                dmas = <&dmac0 0xe1>, <&dmac0 0xe2>;
                dma-names = "tx", "rx";
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee100000 0 0x328>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SDHI0>;
                dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
                dma-names = "tx", "rx";
        sdhi1: sd@ee120000 {
                compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee120000 0 0x328>;
-               interrupts = <0 166 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SDHI1>;
                dmas = <&dmac1 0xc9>, <&dmac1 0xca>;
                dma-names = "tx", "rx";
        sdhi2: sd@ee140000 {
                compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee140000 0 0x100>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SDHI2>;
                dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
                dma-names = "tx", "rx";
        sdhi3: sd@ee160000 {
                compatible = "renesas,sdhi-r8a7790";
                reg = <0 0xee160000 0 0x100>;
-               interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SDHI3>;
                dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
                dma-names = "tx", "rx";
        };
 
        scifa0: serial@e6c40000 {
-               compatible = "renesas,scifa-r8a7790", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7790",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c40000 0 64>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x21>, <&dmac0 0x22>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa1: serial@e6c50000 {
-               compatible = "renesas,scifa-r8a7790", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7790",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c50000 0 64>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x25>, <&dmac0 0x26>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa2: serial@e6c60000 {
-               compatible = "renesas,scifa-r8a7790", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7790",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c60000 0 64>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x27>, <&dmac0 0x28>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb0: serial@e6c20000 {
-               compatible = "renesas,scifb-r8a7790", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7790",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c20000 0 64>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb1: serial@e6c30000 {
-               compatible = "renesas,scifb-r8a7790", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7790",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c30000 0 64>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb2: serial@e6ce0000 {
-               compatible = "renesas,scifb-r8a7790", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7790",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6ce0000 0 64>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif0: serial@e6e60000 {
-               compatible = "renesas,scif-r8a7790", "renesas,scif";
+               compatible = "renesas,scif-r8a7790", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e60000 0 64>;
-               interrupts = <0 152 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7790_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7790_CLK_SCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif1: serial@e6e68000 {
-               compatible = "renesas,scif-r8a7790", "renesas,scif";
+               compatible = "renesas,scif-r8a7790", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e68000 0 64>;
-               interrupts = <0 153 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7790_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7790_CLK_SCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif0: serial@e62c0000 {
-               compatible = "renesas,hscif-r8a7790", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7790",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c0000 0 96>;
-               interrupts = <0 154 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7790_CLK_HSCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7790_CLK_HSCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif1: serial@e62c8000 {
-               compatible = "renesas,hscif-r8a7790", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7790",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c8000 0 96>;
-               interrupts = <0 155 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7790_CLK_HSCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7790_CLK_HSCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        ether: ethernet@ee700000 {
                compatible = "renesas,ether-r8a7790";
                reg = <0 0xee700000 0 0x400>;
-               interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
        avb: ethernet@e6800000 {
                compatible = "renesas,etheravb-r8a7790";
                reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
-               interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_ETHERAVB>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        sata0: sata@ee300000 {
                compatible = "renesas,sata-r8a7790";
                reg = <0 0xee300000 0 0x2000>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_SATA0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sata1: sata@ee500000 {
                compatible = "renesas,sata-r8a7790";
                reg = <0 0xee500000 0 0x2000>;
-               interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_SATA1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        hsusb: usb@e6590000 {
-               compatible = "renesas,usbhs-r8a7790";
+               compatible = "renesas,usbhs-r8a7790", "renesas,rcar-gen2-usbhs";
                reg = <0 0xe6590000 0 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7790_CLK_HSUSB>;
                dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
                       <&usb_dmac1 0>, <&usb_dmac1 1>;
        vin0: video@e6ef0000 {
                compatible = "renesas,vin-r8a7790";
                reg = <0 0xe6ef0000 0 0x1000>;
-               interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_VIN0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin1: video@e6ef1000 {
                compatible = "renesas,vin-r8a7790";
                reg = <0 0xe6ef1000 0 0x1000>;
-               interrupts = <0 189 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_VIN1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin2: video@e6ef2000 {
                compatible = "renesas,vin-r8a7790";
                reg = <0 0xe6ef2000 0 0x1000>;
-               interrupts = <0 190 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_VIN2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin3: video@e6ef3000 {
                compatible = "renesas,vin-r8a7790";
                reg = <0 0xe6ef3000 0 0x1000>;
-               interrupts = <0 191 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7790_CLK_VIN3>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vsp1@fe920000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe920000 0 0x8000>;
-               interrupts = <0 266 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 266 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_VSP1_R>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe928000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe928000 0 0x8000>;
-               interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_VSP1_S>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe930000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe930000 0 0x8000>;
-               interrupts = <0 246 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU0>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe938000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe938000 0 0x8000>;
-               interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_VSP1_DU1>;
                power-domains = <&cpg_clocks>;
 
                      <0 0xfeb90000 0 0x1c>,
                      <0 0xfeb94000 0 0x1c>;
                reg-names = "du", "lvds.0", "lvds.1";
-               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 268 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 269 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 269 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7790_CLK_DU0>,
                         <&mstp7_clks R8A7790_CLK_DU1>,
                         <&mstp7_clks R8A7790_CLK_DU2>,
        can0: can@e6e80000 {
                compatible = "renesas,can-r8a7790";
                reg = <0 0xe6e80000 0 0x1000>;
-               interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_RCAN0>,
                         <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
                clock-names = "clkp1", "clkp2", "can_clk";
        can1: can@e6e88000 {
                compatible = "renesas,can-r8a7790";
                reg = <0 0xe6e88000 0 0x1000>;
-               interrupts = <0 187 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_RCAN1>,
                         <&cpg_clocks R8A7790_CLK_RCAN>, <&can_clk>;
                clock-names = "clkp1", "clkp2", "can_clk";
        jpu: jpeg-codec@fe980000 {
                compatible = "renesas,jpu-r8a7790";
                reg = <0 0xfe980000 0 0x10300>;
-               interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7790_CLK_JPU>;
                power-domains = <&cpg_clocks>;
        };
                        clock-output-names = "audio_clk_c";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* External USB clock - can be overridden by the board */
                usb_extal_clk: usb_extal_clk {
                        compatible = "fixed-clock";
        qspi: spi@e6b10000 {
                compatible = "renesas,qspi-r8a7790", "renesas,qspi";
                reg = <0 0xe6b10000 0 0x2c>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7790_CLK_QSPI_MOD>;
                dmas = <&dmac0 0x17>, <&dmac0 0x18>;
                dma-names = "tx", "rx";
        msiof0: spi@e6e20000 {
                compatible = "renesas,msiof-r8a7790";
                reg = <0 0xe6e20000 0 0x0064>;
-               interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7790_CLK_MSIOF0>;
                dmas = <&dmac0 0x51>, <&dmac0 0x52>;
                dma-names = "tx", "rx";
        msiof1: spi@e6e10000 {
                compatible = "renesas,msiof-r8a7790";
                reg = <0 0xe6e10000 0 0x0064>;
-               interrupts = <0 157 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_MSIOF1>;
                dmas = <&dmac0 0x55>, <&dmac0 0x56>;
                dma-names = "tx", "rx";
        msiof2: spi@e6e00000 {
                compatible = "renesas,msiof-r8a7790";
                reg = <0 0xe6e00000 0 0x0064>;
-               interrupts = <0 158 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_MSIOF2>;
                dmas = <&dmac0 0x41>, <&dmac0 0x42>;
                dma-names = "tx", "rx";
        msiof3: spi@e6c90000 {
                compatible = "renesas,msiof-r8a7790";
                reg = <0 0xe6c90000 0 0x0064>;
-               interrupts = <0 159 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7790_CLK_MSIOF3>;
                dmas = <&dmac0 0x45>, <&dmac0 0x46>;
                dma-names = "tx", "rx";
        xhci: usb@ee000000 {
                compatible = "renesas,xhci-r8a7790";
                reg = <0 0xee000000 0 0xc00>;
-               interrupts = <0 101 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_SSUSB>;
                power-domains = <&cpg_clocks>;
                phys = <&usb2 1>;
                device_type = "pci";
                reg = <0 0xee090000 0 0xc00>,
                      <0 0xee080000 0 0x1100>;
-               interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                device_type = "pci";
                reg = <0 0xee0b0000 0 0xc00>,
                      <0 0xee0a0000 0 0x1100>;
-               interrupts = <0 112 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7790_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee0a0000 0 0xee0a0000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 112 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 112 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 112 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        pci2: pci@ee0d0000 {
                power-domains = <&cpg_clocks>;
                reg = <0 0xee0d0000 0 0xc00>,
                      <0 0xee0c0000 0 0x1100>;
-               interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
 
                bus-range = <2 2>;
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                /* Map all possible DDR as inbound ranges */
                dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000
                              0x43000000 1 0x80000000 1 0x80000000 0 0x80000000>;
-               interrupts = <0 116 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 117 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 118 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 116 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7790_CLK_PCIEC>, <&pcie_bus_clk>;
                clock-names = "pcie", "pcie_bus";
                power-domains = <&cpg_clocks>;
 
                rcar_sound,src {
                        src0: src@0 {
-                               interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x85>, <&audma1 0x9a>;
                                dma-names = "rx", "tx";
                        };
                        src1: src@1 {
-                               interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x87>, <&audma1 0x9c>;
                                dma-names = "rx", "tx";
                        };
                        src2: src@2 {
-                               interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x89>, <&audma1 0x9e>;
                                dma-names = "rx", "tx";
                        };
                        src3: src@3 {
-                               interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8b>, <&audma1 0xa0>;
                                dma-names = "rx", "tx";
                        };
                        src4: src@4 {
-                               interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8d>, <&audma1 0xb0>;
                                dma-names = "rx", "tx";
                        };
                        src5: src@5 {
-                               interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8f>, <&audma1 0xb2>;
                                dma-names = "rx", "tx";
                        };
                        src6: src@6 {
-                               interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x91>, <&audma1 0xb4>;
                                dma-names = "rx", "tx";
                        };
                        src7: src@7 {
-                               interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x93>, <&audma1 0xb6>;
                                dma-names = "rx", "tx";
                        };
                        src8: src@8 {
-                               interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x95>, <&audma1 0xb8>;
                                dma-names = "rx", "tx";
                        };
                        src9: src@9 {
-                               interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x97>, <&audma1 0xba>;
                                dma-names = "rx", "tx";
                        };
 
                rcar_sound,ssi {
                        ssi0: ssi@0 {
-                               interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi1: ssi@1 {
-                                interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>;
+                                interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi2: ssi@2 {
-                               interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi3: ssi@3 {
-                               interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi4: ssi@4 {
-                               interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi5: ssi@5 {
-                               interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi6: ssi@6 {
-                               interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi7: ssi@7 {
-                               interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi8: ssi@8 {
-                               interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi9: ssi@9 {
-                               interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
        ipmmu_sy0: mmu@e6280000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xe6280000 0 0x1000>;
-               interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 224 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_sy1: mmu@e6290000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xe6290000 0 0x1000>;
-               interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_ds: mmu@e6740000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xe6740000 0 0x1000>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mp: mmu@ec680000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xec680000 0 0x1000>;
-               interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mx: mmu@fe951000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xfe951000 0 0x1000>;
-               interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 221 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_rt: mmu@ffc80000 {
                compatible = "renesas,ipmmu-r8a7790", "renesas,ipmmu-vmsa";
                reg = <0 0xffc80000 0 0x1000>;
-               interrupts = <0 307 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
index 45256f3cc83560a80fa91b6313b46e0bed4f5a23..0ad71b81d3a25f59aeaa3e329de0ac9f55c0cdad 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        i2c2_pins: i2c2 {
                renesas,groups = "i2c2";
                renesas,function = "i2c2";
                renesas,function = "scif1";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &sdhi0 {
        pinctrl-0 = <&sdhi0_pins>;
        pinctrl-names = "default";
index 6713b1ea732b0b1d2282368ce5b50e42a9ae6043..ed1f6f884e2b216885efb98dd9460d76df7dc768 100644 (file)
@@ -8,6 +8,17 @@
  * kind, whether express or implied.
  */
 
+/*
+ * SSI-AK4642
+ *
+ * SW3: 1: AK4642
+ *      3: ADV7511
+ *
+ * This command is required before playback/capture:
+ *
+ *     amixer set "LINEOUT Mixer DACL" on
+ */
+
 /dts-v1/;
 #include "r8a7791.dtsi"
 #include <dt-bindings/gpio/gpio.h>
                states = <3300000 1
                          1800000 0>;
        };
+
+       hdmi-out {
+               compatible = "hdmi-connector";
+               type = "a";
+
+               port {
+                       hdmi_con: endpoint {
+                               remote-endpoint = <&adv7511_out>;
+                       };
+               };
+       };
+
+       x3_clk: x3-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <148500000>;
+       };
+
+       x16_clk: x16-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <74250000>;
+       };
+
+       x14_clk: x14-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <11289600>;
+               clock-output-names = "audio_clock";
+       };
+
+       sound {
+               compatible = "simple-audio-card";
+
+               simple-audio-card,format = "left_j";
+               simple-audio-card,bitclock-master = <&soundcodec>;
+               simple-audio-card,frame-master = <&soundcodec>;
+
+               simple-audio-card,cpu {
+                       sound-dai = <&rcar_sound>;
+               };
+
+               soundcodec: simple-audio-card,codec {
+                       sound-dai = <&ak4642>;
+                       clocks = <&x14_clk>;
+               };
+       };
 };
 
 &extal_clk {
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        scif0_pins: serial0 {
                renesas,groups = "scif0_data_d";
                renesas,function = "scif0";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
                renesas,groups = "can0_data";
                renesas,function = "can0";
        };
+
+       du_pins: du {
+               renesas,groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
+               renesas,function = "du";
+       };
+
+       ssi_pins: sound {
+               renesas,groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
+               renesas,function = "ssi";
+       };
+
+       audio_clk_pins: audio_clk {
+               renesas,groups = "audio_clk_a";
+               renesas,function = "audio_clk";
+       };
 };
 
 &scif0 {
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &ether {
        pinctrl-0 = <&ether_pins &phy1_pins>;
        pinctrl-names = "default";
        status = "okay";
        clock-frequency = <400000>;
 
+       ak4642: codec@12 {
+               compatible = "asahi-kasei,ak4642";
+               #sound-dai-cells = <0>;
+               reg = <0x12>;
+       };
+
        composite-in@20 {
                compatible = "adi,adv7180";
                reg = <0x20>;
                        };
                };
        };
+
+       hdmi@39 {
+               compatible = "adi,adv7511w";
+               reg = <0x39>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
+
+               adi,input-depth = <8>;
+               adi,input-colorspace = "rgb";
+               adi,input-clock = "1x";
+               adi,input-style = <1>;
+               adi,input-justification = "evenly";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7511_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               adv7511_out: endpoint {
+                                       remote-endpoint = <&hdmi_con>;
+                               };
+                       };
+               };
+       };
 };
 
 &sata0 {
 
        status = "okay";
 };
+
+&du {
+       pinctrl-0 = <&du_pins>;
+       pinctrl-names = "default";
+       status = "okay";
+
+       clocks = <&mstp7_clks R8A7791_CLK_DU0>,
+                <&mstp7_clks R8A7791_CLK_DU1>,
+                <&mstp7_clks R8A7791_CLK_LVDS0>,
+                <&x3_clk>, <&x16_clk>;
+       clock-names = "du.0", "du.1", "lvds.0",
+                     "dclkin.0", "dclkin.1";
+
+       ports {
+               port@1 {
+                       endpoint {
+                               remote-endpoint = <&adv7511_in>;
+                       };
+               };
+       };
+};
+
+&rcar_sound {
+       pinctrl-0 = <&ssi_pins &audio_clk_pins>;
+       pinctrl-names = "default";
+       status = "okay";
+
+       /* Single DAI */
+       #sound-dai-cells = <0>;
+
+       rcar_sound,dai {
+               dai0 {
+                       playback = <&ssi0>;
+                       capture  = <&ssi1>;
+               };
+       };
+};
+
+&ssi1 {
+       shared-pin;
+};
index 2a369ddcb6fd8dff8ce997993710d2f2592cb3f3..14aa62539ff297693c9510124266d6cb2c5f88cf 100644 (file)
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        gpio0: gpio@e6050000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6050000 0 0x50>;
-               interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
@@ -97,7 +97,7 @@
        gpio1: gpio@e6051000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6051000 0 0x50>;
-               interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 26>;
        gpio2: gpio@e6052000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6052000 0 0x50>;
-               interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@e6053000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6053000 0 0x50>;
-               interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@e6054000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6054000 0 0x50>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@e6055000 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6055000 0 0x50>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 32>;
        gpio6: gpio@e6055400 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6055400 0 0x50>;
-               interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 192 32>;
        gpio7: gpio@e6055800 {
                compatible = "renesas,gpio-r8a7791", "renesas,gpio-rcar";
                reg = <0 0xe6055800 0 0x50>;
-               interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 224 26>;
        thermal@e61f0000 {
                compatible = "renesas,thermal-r8a7791", "renesas,rcar-thermal";
                reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A7791_CLK_THERMAL>;
                power-domains = <&cpg_clocks>;
        };
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        cmt0: timer@ffca0000 {
                compatible = "renesas,cmt-48-r8a7791", "renesas,cmt-48-gen2";
                reg = <0 0xffca0000 0 0x1004>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_CMT0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-r8a7791", "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 122 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 123 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 124 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 125 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 126 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 12 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 13 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 14 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 15 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 16 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 17 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7791_CLK_IRQC>;
                power-domains = <&cpg_clocks>;
        };
        dmac0: dma-controller@e6700000 {
                compatible = "renesas,dmac-r8a7791", "renesas,rcar-dmac";
                reg = <0 0xe6700000 0 0x20000>;
-               interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
-                             0 200 IRQ_TYPE_LEVEL_HIGH
-                             0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH
-                             0 205 IRQ_TYPE_LEVEL_HIGH
-                             0 206 IRQ_TYPE_LEVEL_HIGH
-                             0 207 IRQ_TYPE_LEVEL_HIGH
-                             0 208 IRQ_TYPE_LEVEL_HIGH
-                             0 209 IRQ_TYPE_LEVEL_HIGH
-                             0 210 IRQ_TYPE_LEVEL_HIGH
-                             0 211 IRQ_TYPE_LEVEL_HIGH
-                             0 212 IRQ_TYPE_LEVEL_HIGH
-                             0 213 IRQ_TYPE_LEVEL_HIGH
-                             0 214 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        dmac1: dma-controller@e6720000 {
                compatible = "renesas,dmac-r8a7791", "renesas,rcar-dmac";
                reg = <0 0xe6720000 0 0x20000>;
-               interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                             0 216 IRQ_TYPE_LEVEL_HIGH
-                             0 217 IRQ_TYPE_LEVEL_HIGH
-                             0 218 IRQ_TYPE_LEVEL_HIGH
-                             0 219 IRQ_TYPE_LEVEL_HIGH
-                             0 308 IRQ_TYPE_LEVEL_HIGH
-                             0 309 IRQ_TYPE_LEVEL_HIGH
-                             0 310 IRQ_TYPE_LEVEL_HIGH
-                             0 311 IRQ_TYPE_LEVEL_HIGH
-                             0 312 IRQ_TYPE_LEVEL_HIGH
-                             0 313 IRQ_TYPE_LEVEL_HIGH
-                             0 314 IRQ_TYPE_LEVEL_HIGH
-                             0 315 IRQ_TYPE_LEVEL_HIGH
-                             0 316 IRQ_TYPE_LEVEL_HIGH
-                             0 317 IRQ_TYPE_LEVEL_HIGH
-                             0 318 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        audma0: dma-controller@ec700000 {
                compatible = "renesas,dmac-r8a7791", "renesas,rcar-dmac";
                reg = <0 0xec700000 0 0x10000>;
-               interrupts =    <0 346 IRQ_TYPE_LEVEL_HIGH
-                                0 320 IRQ_TYPE_LEVEL_HIGH
-                                0 321 IRQ_TYPE_LEVEL_HIGH
-                                0 322 IRQ_TYPE_LEVEL_HIGH
-                                0 323 IRQ_TYPE_LEVEL_HIGH
-                                0 324 IRQ_TYPE_LEVEL_HIGH
-                                0 325 IRQ_TYPE_LEVEL_HIGH
-                                0 326 IRQ_TYPE_LEVEL_HIGH
-                                0 327 IRQ_TYPE_LEVEL_HIGH
-                                0 328 IRQ_TYPE_LEVEL_HIGH
-                                0 329 IRQ_TYPE_LEVEL_HIGH
-                                0 330 IRQ_TYPE_LEVEL_HIGH
-                                0 331 IRQ_TYPE_LEVEL_HIGH
-                                0 332 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =    <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        audma1: dma-controller@ec720000 {
                compatible = "renesas,dmac-r8a7791", "renesas,rcar-dmac";
                reg = <0 0xec720000 0 0x10000>;
-               interrupts =    <0 347 IRQ_TYPE_LEVEL_HIGH
-                                0 333 IRQ_TYPE_LEVEL_HIGH
-                                0 334 IRQ_TYPE_LEVEL_HIGH
-                                0 335 IRQ_TYPE_LEVEL_HIGH
-                                0 336 IRQ_TYPE_LEVEL_HIGH
-                                0 337 IRQ_TYPE_LEVEL_HIGH
-                                0 338 IRQ_TYPE_LEVEL_HIGH
-                                0 339 IRQ_TYPE_LEVEL_HIGH
-                                0 340 IRQ_TYPE_LEVEL_HIGH
-                                0 341 IRQ_TYPE_LEVEL_HIGH
-                                0 342 IRQ_TYPE_LEVEL_HIGH
-                                0 343 IRQ_TYPE_LEVEL_HIGH
-                                0 344 IRQ_TYPE_LEVEL_HIGH
-                                0 345 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts =    <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
+                                GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        usb_dmac0: dma-controller@e65a0000 {
                compatible = "renesas,r8a7791-usb-dmac", "renesas,usb-dmac";
                reg = <0 0xe65a0000 0 0x100>;
-               interrupts = <0 109 IRQ_TYPE_LEVEL_HIGH
-                             0 109 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "ch0", "ch1";
                clocks = <&mstp3_clks R8A7791_CLK_USBDMAC0>;
                power-domains = <&cpg_clocks>;
        usb_dmac1: dma-controller@e65b0000 {
                compatible = "renesas,r8a7791-usb-dmac", "renesas,usb-dmac";
                reg = <0 0xe65b0000 0 0x100>;
-               interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH
-                             0 110 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "ch0", "ch1";
                clocks = <&mstp3_clks R8A7791_CLK_USBDMAC1>;
                power-domains = <&cpg_clocks>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6508000 0 0x40>;
-               interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6518000 0 0x40>;
-               interrupts = <0 288 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6530000 0 0x40>;
-               interrupts = <0 286 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6540000 0 0x40>;
-               interrupts = <0 290 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6520000 0 0x40>;
-               interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C4>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <6>;
                #size-cells = <0>;
                compatible = "renesas,i2c-r8a7791";
                reg = <0 0xe6528000 0 0x40>;
-               interrupts = <0 20 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_I2C5>;
                power-domains = <&cpg_clocks>;
                i2c-scl-internal-delay-ns = <110>;
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7791", "renesas,rmobile-iic";
                reg = <0 0xe60b0000 0 0x425>;
-               interrupts = <0 173 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_IICDVFS>;
                dmas = <&dmac0 0x77>, <&dmac0 0x78>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7791", "renesas,rmobile-iic";
                reg = <0 0xe6500000 0 0x425>;
-               interrupts = <0 174 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_IIC0>;
                dmas = <&dmac0 0x61>, <&dmac0 0x62>;
                dma-names = "tx", "rx";
                #size-cells = <0>;
                compatible = "renesas,iic-r8a7791", "renesas,rmobile-iic";
                reg = <0 0xe6510000 0 0x425>;
-               interrupts = <0 175 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_IIC1>;
                dmas = <&dmac0 0x65>, <&dmac0 0x66>;
                dma-names = "tx", "rx";
        mmcif0: mmc@ee200000 {
                compatible = "renesas,mmcif-r8a7791", "renesas,sh-mmcif";
                reg = <0 0xee200000 0 0x80>;
-               interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_MMCIF0>;
                dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
                dma-names = "tx", "rx";
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-r8a7791";
                reg = <0 0xee100000 0 0x328>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_SDHI0>;
                dmas = <&dmac1 0xcd>, <&dmac1 0xce>;
                dma-names = "tx", "rx";
        sdhi1: sd@ee140000 {
                compatible = "renesas,sdhi-r8a7791";
                reg = <0 0xee140000 0 0x100>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_SDHI1>;
                dmas = <&dmac1 0xc1>, <&dmac1 0xc2>;
                dma-names = "tx", "rx";
        sdhi2: sd@ee160000 {
                compatible = "renesas,sdhi-r8a7791";
                reg = <0 0xee160000 0 0x100>;
-               interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_SDHI2>;
                dmas = <&dmac1 0xd3>, <&dmac1 0xd4>;
                dma-names = "tx", "rx";
        };
 
        scifa0: serial@e6c40000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c40000 0 64>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x21>, <&dmac0 0x22>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa1: serial@e6c50000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c50000 0 64>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x25>, <&dmac0 0x26>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa2: serial@e6c60000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c60000 0 64>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x27>, <&dmac0 0x28>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa3: serial@e6c70000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c70000 0 64>;
-               interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7791_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa4: serial@e6c78000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c78000 0 64>;
-               interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7791_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa5: serial@e6c80000 {
-               compatible = "renesas,scifa-r8a7791", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7791",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c80000 0 64>;
-               interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7791_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x23>, <&dmac0 0x24>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb0: serial@e6c20000 {
-               compatible = "renesas,scifb-r8a7791", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7791",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c20000 0 64>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb1: serial@e6c30000 {
-               compatible = "renesas,scifb-r8a7791", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7791",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c30000 0 64>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb2: serial@e6ce0000 {
-               compatible = "renesas,scifb-r8a7791", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7791",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6ce0000 0 64>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif0: serial@e6e60000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e60000 0 64>;
-               interrupts = <0 152 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif1: serial@e6e68000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e68000 0 64>;
-               interrupts = <0 153 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif2: serial@e6e58000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e58000 0 64>;
-               interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif3: serial@e6ea8000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ea8000 0 64>;
-               interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF3>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif4: serial@e6ee0000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee0000 0 64>;
-               interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF4>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif5: serial@e6ee8000 {
-               compatible = "renesas,scif-r8a7791", "renesas,scif";
+               compatible = "renesas,scif-r8a7791", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee8000 0 64>;
-               interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_SCIF5>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif0: serial@e62c0000 {
-               compatible = "renesas,hscif-r8a7791", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7791",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c0000 0 96>;
-               interrupts = <0 154 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_HSCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_HSCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif1: serial@e62c8000 {
-               compatible = "renesas,hscif-r8a7791", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7791",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c8000 0 96>;
-               interrupts = <0 155 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_HSCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_HSCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif2: serial@e62d0000 {
-               compatible = "renesas,hscif-r8a7791", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7791",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62d0000 0 96>;
-               interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7791_CLK_HSCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7791_CLK_HSCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        ether: ethernet@ee700000 {
                compatible = "renesas,ether-r8a7791";
                reg = <0 0xee700000 0 0x400>;
-               interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
                compatible = "renesas,etheravb-r8a7791",
                             "renesas,etheravb-rcar-gen2";
                reg = <0 0xe6800000 0 0x800>, <0 0xee0e8000 0 0x4000>;
-               interrupts = <0 163 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_ETHERAVB>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        sata0: sata@ee300000 {
                compatible = "renesas,sata-r8a7791";
                reg = <0 0xee300000 0 0x2000>;
-               interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_SATA0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sata1: sata@ee500000 {
                compatible = "renesas,sata-r8a7791";
                reg = <0 0xee500000 0 0x2000>;
-               interrupts = <0 106 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_SATA1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        };
 
        hsusb: usb@e6590000 {
-               compatible = "renesas,usbhs-r8a7791";
+               compatible = "renesas,usbhs-r8a7791", "renesas,rcar-gen2-usbhs";
                reg = <0 0xe6590000 0 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7791_CLK_HSUSB>;
                dmas = <&usb_dmac0 0>, <&usb_dmac0 1>,
                       <&usb_dmac1 0>, <&usb_dmac1 1>;
        vin0: video@e6ef0000 {
                compatible = "renesas,vin-r8a7791";
                reg = <0 0xe6ef0000 0 0x1000>;
-               interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_VIN0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin1: video@e6ef1000 {
                compatible = "renesas,vin-r8a7791";
                reg = <0 0xe6ef1000 0 0x1000>;
-               interrupts = <0 189 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_VIN1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin2: video@e6ef2000 {
                compatible = "renesas,vin-r8a7791";
                reg = <0 0xe6ef2000 0 0x1000>;
-               interrupts = <0 190 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7791_CLK_VIN2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vsp1@fe928000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe928000 0 0x8000>;
-               interrupts = <0 267 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 267 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_VSP1_S>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe930000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe930000 0 0x8000>;
-               interrupts = <0 246 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 246 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU0>;
                power-domains = <&cpg_clocks>;
 
        vsp1@fe938000 {
                compatible = "renesas,vsp1";
                reg = <0 0xfe938000 0 0x8000>;
-               interrupts = <0 247 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 247 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_VSP1_DU1>;
                power-domains = <&cpg_clocks>;
 
                reg = <0 0xfeb00000 0 0x40000>,
                      <0 0xfeb90000 0 0x1c>;
                reg-names = "du", "lvds.0";
-               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 268 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7791_CLK_DU0>,
                         <&mstp7_clks R8A7791_CLK_DU1>,
                         <&mstp7_clks R8A7791_CLK_LVDS0>;
        can0: can@e6e80000 {
                compatible = "renesas,can-r8a7791";
                reg = <0 0xe6e80000 0 0x1000>;
-               interrupts = <0 186 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_RCAN0>,
                         <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
                clock-names = "clkp1", "clkp2", "can_clk";
        can1: can@e6e88000 {
                compatible = "renesas,can-r8a7791";
                reg = <0 0xe6e88000 0 0x1000>;
-               interrupts = <0 187 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_RCAN1>,
                         <&cpg_clocks R8A7791_CLK_RCAN>, <&can_clk>;
                clock-names = "clkp1", "clkp2", "can_clk";
        jpu: jpeg-codec@fe980000 {
                compatible = "renesas,jpu-r8a7791";
                reg = <0 0xfe980000 0 0x10300>;
-               interrupts = <0 272 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 272 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7791_CLK_JPU>;
                power-domains = <&cpg_clocks>;
        };
                        status = "disabled";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* External USB clock - can be overridden by the board */
                usb_extal_clk: usb_extal_clk {
                        compatible = "fixed-clock";
        qspi: spi@e6b10000 {
                compatible = "renesas,qspi-r8a7791", "renesas,qspi";
                reg = <0 0xe6b10000 0 0x2c>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7791_CLK_QSPI_MOD>;
                dmas = <&dmac0 0x17>, <&dmac0 0x18>;
                dma-names = "tx", "rx";
        msiof0: spi@e6e20000 {
                compatible = "renesas,msiof-r8a7791";
                reg = <0 0xe6e20000 0 0x0064>;
-               interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
                dmas = <&dmac0 0x51>, <&dmac0 0x52>;
                dma-names = "tx", "rx";
        msiof1: spi@e6e10000 {
                compatible = "renesas,msiof-r8a7791";
                reg = <0 0xe6e10000 0 0x0064>;
-               interrupts = <0 157 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_MSIOF1>;
                dmas = <&dmac0 0x55>, <&dmac0 0x56>;
                dma-names = "tx", "rx";
        msiof2: spi@e6e00000 {
                compatible = "renesas,msiof-r8a7791";
                reg = <0 0xe6e00000 0 0x0064>;
-               interrupts = <0 158 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7791_CLK_MSIOF2>;
                dmas = <&dmac0 0x41>, <&dmac0 0x42>;
                dma-names = "tx", "rx";
        xhci: usb@ee000000 {
                compatible = "renesas,xhci-r8a7791";
                reg = <0 0xee000000 0 0xc00>;
-               interrupts = <0 101 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_SSUSB>;
                power-domains = <&cpg_clocks>;
                phys = <&usb2 1>;
                device_type = "pci";
                reg = <0 0xee090000 0 0xc00>,
                      <0 0xee080000 0 0x1100>;
-               interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                device_type = "pci";
                reg = <0 0xee0d0000 0 0xc00>,
                      <0 0xee0c0000 0 0x1100>;
-               interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7791_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                /* Map all possible DDR as inbound ranges */
                dma-ranges = <0x42000000 0 0x40000000 0 0x40000000 0 0x80000000
                              0x43000000 2 0x00000000 2 0x00000000 1 0x00000000>;
-               interrupts = <0 116 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 117 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 118 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
                #interrupt-cells = <1>;
                interrupt-map-mask = <0 0 0 0>;
-               interrupt-map = <0 0 0 0 &gic 0 116 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0 0 0 0 &gic GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7791_CLK_PCIEC>, <&pcie_bus_clk>;
                clock-names = "pcie", "pcie_bus";
                power-domains = <&cpg_clocks>;
        ipmmu_sy0: mmu@e6280000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xe6280000 0 0x1000>;
-               interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 224 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_sy1: mmu@e6290000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xe6290000 0 0x1000>;
-               interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_ds: mmu@e6740000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xe6740000 0 0x1000>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mp: mmu@ec680000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xec680000 0 0x1000>;
-               interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mx: mmu@fe951000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xfe951000 0 0x1000>;
-               interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 221 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_rt: mmu@ffc80000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xffc80000 0 0x1000>;
-               interrupts = <0 307 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_gp: mmu@e62a0000 {
                compatible = "renesas,ipmmu-r8a7791", "renesas,ipmmu-vmsa";
                reg = <0 0xe62a0000 0 0x1000>;
-               interrupts = <0 260 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 261 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
 
                rcar_sound,src {
                        src0: src@0 {
-                               interrupts = <0 352 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x85>, <&audma1 0x9a>;
                                dma-names = "rx", "tx";
                        };
                        src1: src@1 {
-                               interrupts = <0 353 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x87>, <&audma1 0x9c>;
                                dma-names = "rx", "tx";
                        };
                        src2: src@2 {
-                               interrupts = <0 354 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x89>, <&audma1 0x9e>;
                                dma-names = "rx", "tx";
                        };
                        src3: src@3 {
-                               interrupts = <0 355 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8b>, <&audma1 0xa0>;
                                dma-names = "rx", "tx";
                        };
                        src4: src@4 {
-                               interrupts = <0 356 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8d>, <&audma1 0xb0>;
                                dma-names = "rx", "tx";
                        };
                        src5: src@5 {
-                               interrupts = <0 357 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x8f>, <&audma1 0xb2>;
                                dma-names = "rx", "tx";
                        };
                        src6: src@6 {
-                               interrupts = <0 358 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x91>, <&audma1 0xb4>;
                                dma-names = "rx", "tx";
                        };
                        src7: src@7 {
-                               interrupts = <0 359 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x93>, <&audma1 0xb6>;
                                dma-names = "rx", "tx";
                        };
                        src8: src@8 {
-                               interrupts = <0 360 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x95>, <&audma1 0xb8>;
                                dma-names = "rx", "tx";
                        };
                        src9: src@9 {
-                               interrupts = <0 361 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x97>, <&audma1 0xba>;
                                dma-names = "rx", "tx";
                        };
 
                rcar_sound,ssi {
                        ssi0: ssi@0 {
-                               interrupts = <0 370 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi1: ssi@1 {
-                                interrupts = <0 371 IRQ_TYPE_LEVEL_HIGH>;
+                                interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi2: ssi@2 {
-                               interrupts = <0 372 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi3: ssi@3 {
-                               interrupts = <0 373 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi4: ssi@4 {
-                               interrupts = <0 374 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi5: ssi@5 {
-                               interrupts = <0 375 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi6: ssi@6 {
-                               interrupts = <0 376 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi7: ssi@7 {
-                               interrupts = <0 377 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi8: ssi@8 {
-                               interrupts = <0 378 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
                        ssi9: ssi@9 {
-                               interrupts = <0 379 IRQ_TYPE_LEVEL_HIGH>;
+                               interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
                                dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
                                dma-names = "rx", "tx", "rxu", "txu";
                        };
index baa59fe8429869ac2bd975e5e7da5a443df122d6..cfe142c2ba3890316977021b1a64c788669e118b 100644 (file)
@@ -8,6 +8,34 @@
  * kind, whether express or implied.
  */
 
+/*
+ * SSI-AK4643
+ *
+ * SW1: 1: AK4643
+ *      2: CN22
+ *      3: ADV7511
+ *
+ * This command is required when Playback/Capture
+ *
+ *     amixer set "LINEOUT Mixer DACL" on
+ *     amixer set "DVC Out" 100%
+ *     amixer set "DVC In" 100%
+ *
+ * You can use Mute
+ *
+ *     amixer set "DVC Out Mute" on
+ *     amixer set "DVC In Mute" on
+ *
+ * You can use Volume Ramp
+ *
+ *     amixer set "DVC Out Ramp Up Rate"   "0.125 dB/64 steps"
+ *     amixer set "DVC Out Ramp Down Rate" "0.125 dB/512 steps"
+ *     amixer set "DVC Out Ramp" on
+ *     aplay xxx.wav &
+ *     amixer set "DVC Out"  80%  // Volume Down
+ *     amixer set "DVC Out" 100%  // Volume Up
+ */
+
 /dts-v1/;
 #include "r8a7793.dtsi"
 #include <dt-bindings/gpio/gpio.h>
                device_type = "memory";
                reg = <0 0x40000000 0 0x40000000>;
        };
+
+       gpio-keys {
+               compatible = "gpio-keys";
+
+               key-1 {
+                       gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_1>;
+                       label = "SW2-1";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-2 {
+                       gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_2>;
+                       label = "SW2-2";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-3 {
+                       gpios = <&gpio5 2 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_3>;
+                       label = "SW2-3";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-4 {
+                       gpios = <&gpio5 3 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_4>;
+                       label = "SW2-4";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-a {
+                       gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_A>;
+                       label = "SW30";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-b {
+                       gpios = <&gpio7 1 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_B>;
+                       label = "SW31";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-c {
+                       gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_C>;
+                       label = "SW32";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-d {
+                       gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_D>;
+                       label = "SW33";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-e {
+                       gpios = <&gpio7 4 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_E>;
+                       label = "SW34";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-f {
+                       gpios = <&gpio7 5 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_F>;
+                       label = "SW35";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+               key-g {
+                       gpios = <&gpio7 6 GPIO_ACTIVE_LOW>;
+                       linux,code = <KEY_G>;
+                       label = "SW36";
+                       gpio-key,wakeup;
+                       debounce-interval = <20>;
+               };
+       };
+
+       leds {
+               compatible = "gpio-leds";
+               led6 {
+                       gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
+                       label = "LED6";
+               };
+               led7 {
+                       gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>;
+                       label = "LED7";
+               };
+               led8 {
+                       gpios = <&gpio2 21 GPIO_ACTIVE_HIGH>;
+                       label = "LED8";
+               };
+       };
+
+       audio_clock: clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <11289600>;
+               clock-output-names = "audio_clock";
+       };
+
+       rsnd_ak4643: sound {
+               compatible = "simple-audio-card";
+
+               simple-audio-card,format = "left_j";
+               simple-audio-card,bitclock-master = <&sndcodec>;
+               simple-audio-card,frame-master = <&sndcodec>;
+
+               sndcpu: simple-audio-card,cpu {
+                       sound-dai = <&rcar_sound>;
+               };
+
+               sndcodec: simple-audio-card,codec {
+                       sound-dai = <&ak4643>;
+                       clocks = <&audio_clock>;
+               };
+       };
+
+       hdmi-out {
+               compatible = "hdmi-connector";
+               type = "a";
+
+               port {
+                       hdmi_con: endpoint {
+                               remote-endpoint = <&adv7511_out>;
+                       };
+               };
+       };
+
+       x2_clk: x2-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <74250000>;
+       };
+
+       x13_clk: x13-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <148500000>;
+       };
+};
+
+&du {
+       pinctrl-0 = <&du_pins>;
+       pinctrl-names = "default";
+       status = "okay";
+
+       clocks = <&mstp7_clks R8A7793_CLK_DU0>,
+                <&mstp7_clks R8A7793_CLK_DU1>,
+                <&mstp7_clks R8A7793_CLK_LVDS0>,
+                <&x13_clk>, <&x2_clk>;
+       clock-names = "du.0", "du.1", "lvds.0",
+                     "dclkin.0", "dclkin.1";
+
+       ports {
+               port@0 {
+                       endpoint {
+                               remote-endpoint = <&adv7511_in>;
+                       };
+               };
+               port@1 {
+                       lvds_connector: endpoint {
+                       };
+               };
+       };
 };
 
 &extal_clk {
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
+       i2c2_pins: i2c2 {
+               renesas,groups = "i2c2";
+               renesas,function = "i2c2";
+       };
+
+       du_pins: du {
+               renesas,groups = "du_rgb888", "du_sync", "du_disp", "du_clk_out_0";
+               renesas,function = "du";
+       };
+
        scif0_pins: serial0 {
                renesas,groups = "scif0_data_d";
                renesas,function = "scif0";
                renesas,function = "scif1";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
                renesas,groups = "qspi_ctrl", "qspi_data4";
                renesas,function = "qspi";
        };
+
+       sound_pins: sound {
+               renesas,groups = "ssi0129_ctrl", "ssi0_data", "ssi1_data";
+               renesas,function = "ssi";
+       };
+
+       sound_clk_pins: sound_clk {
+               renesas,groups = "audio_clk_a";
+               renesas,function = "audio_clk";
+       };
 };
 
 &ether {
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &qspi {
        pinctrl-0 = <&qspi_pins>;
        pinctrl-names = "default";
                };
        };
 };
+
+&i2c2 {
+       pinctrl-0 = <&i2c2_pins>;
+       pinctrl-names = "default";
+
+       status = "okay";
+       clock-frequency = <100000>;
+
+       ak4643: codec@12 {
+               compatible = "asahi-kasei,ak4643";
+               #sound-dai-cells = <0>;
+               reg = <0x12>;
+       };
+
+       hdmi@39 {
+               compatible = "adi,adv7511w";
+               reg = <0x39>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
+
+               adi,input-depth = <8>;
+               adi,input-colorspace = "rgb";
+               adi,input-clock = "1x";
+               adi,input-style = <1>;
+               adi,input-justification = "evenly";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7511_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               adv7511_out: endpoint {
+                                       remote-endpoint = <&hdmi_con>;
+                               };
+                       };
+               };
+       };
+
+       eeprom@50 {
+               compatible = "renesas,r1ex24002", "atmel,24c02";
+               reg = <0x50>;
+               pagesize = <16>;
+       };
+};
+
+&rcar_sound {
+       pinctrl-0 = <&sound_pins &sound_clk_pins>;
+       pinctrl-names = "default";
+
+       /* Single DAI */
+       #sound-dai-cells = <0>;
+
+       status = "okay";
+
+       rcar_sound,dai {
+               dai0 {
+                       playback = <&ssi0 &src2 &dvc0>;
+                       capture  = <&ssi1 &src3 &dvc1>;
+               };
+       };
+};
+
+&ssi1 {
+       shared-pin;
+};
index aef9e69d6c26ae7cfab58fb5418e6771e0f226ba..45dba1c79a43c28782726685874811b8f7a90650 100644 (file)
        #size-cells = <2>;
 
        aliases {
+               i2c0 = &i2c0;
+               i2c1 = &i2c1;
+               i2c2 = &i2c2;
+               i2c3 = &i2c3;
+               i2c4 = &i2c4;
+               i2c5 = &i2c5;
+               i2c6 = &i2c6;
+               i2c7 = &i2c7;
+               i2c8 = &i2c8;
                spi0 = &qspi;
        };
 
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        gpio0: gpio@e6050000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6050000 0 0x50>;
-               interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
@@ -73,7 +82,7 @@
        gpio1: gpio@e6051000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6051000 0 0x50>;
-               interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 26>;
@@ -86,7 +95,7 @@
        gpio2: gpio@e6052000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6052000 0 0x50>;
-               interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@e6053000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6053000 0 0x50>;
-               interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@e6054000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6054000 0 0x50>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@e6055000 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6055000 0 0x50>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 32>;
        gpio6: gpio@e6055400 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6055400 0 0x50>;
-               interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 192 32>;
        gpio7: gpio@e6055800 {
                compatible = "renesas,gpio-r8a7793", "renesas,gpio-rcar";
                reg = <0 0xe6055800 0 0x50>;
-               interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 224 26>;
        thermal@e61f0000 {
                compatible = "renesas,thermal-r8a7793", "renesas,rcar-thermal";
                reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
-               interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks R8A7793_CLK_THERMAL>;
                power-domains = <&cpg_clocks>;
        };
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        cmt0: timer@ffca0000 {
                compatible = "renesas,cmt-48-r8a7793", "renesas,cmt-48-gen2";
                reg = <0 0xffca0000 0 0x1004>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7793_CLK_CMT0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-r8a7793", "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 122 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 123 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 124 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 125 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 126 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7793_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 12 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 13 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 14 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 15 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 16 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 17 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7793_CLK_IRQC>;
                power-domains = <&cpg_clocks>;
        };
 
-       pfc: pfc@e6060000 {
-               compatible = "renesas,pfc-r8a7793";
-               reg = <0 0xe6060000 0 0x250>;
-       };
-
        dmac0: dma-controller@e6700000 {
                compatible = "renesas,dmac-r8a7793", "renesas,rcar-dmac";
                reg = <0 0xe6700000 0 0x20000>;
-               interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
-                             0 200 IRQ_TYPE_LEVEL_HIGH
-                             0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH
-                             0 205 IRQ_TYPE_LEVEL_HIGH
-                             0 206 IRQ_TYPE_LEVEL_HIGH
-                             0 207 IRQ_TYPE_LEVEL_HIGH
-                             0 208 IRQ_TYPE_LEVEL_HIGH
-                             0 209 IRQ_TYPE_LEVEL_HIGH
-                             0 210 IRQ_TYPE_LEVEL_HIGH
-                             0 211 IRQ_TYPE_LEVEL_HIGH
-                             0 212 IRQ_TYPE_LEVEL_HIGH
-                             0 213 IRQ_TYPE_LEVEL_HIGH
-                             0 214 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        dmac1: dma-controller@e6720000 {
                compatible = "renesas,dmac-r8a7793", "renesas,rcar-dmac";
                reg = <0 0xe6720000 0 0x20000>;
-               interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                             0 216 IRQ_TYPE_LEVEL_HIGH
-                             0 217 IRQ_TYPE_LEVEL_HIGH
-                             0 218 IRQ_TYPE_LEVEL_HIGH
-                             0 219 IRQ_TYPE_LEVEL_HIGH
-                             0 308 IRQ_TYPE_LEVEL_HIGH
-                             0 309 IRQ_TYPE_LEVEL_HIGH
-                             0 310 IRQ_TYPE_LEVEL_HIGH
-                             0 311 IRQ_TYPE_LEVEL_HIGH
-                             0 312 IRQ_TYPE_LEVEL_HIGH
-                             0 313 IRQ_TYPE_LEVEL_HIGH
-                             0 314 IRQ_TYPE_LEVEL_HIGH
-                             0 315 IRQ_TYPE_LEVEL_HIGH
-                             0 316 IRQ_TYPE_LEVEL_HIGH
-                             0 317 IRQ_TYPE_LEVEL_HIGH
-                             0 318 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
                dma-channels = <15>;
        };
 
+       audma0: dma-controller@ec700000 {
+               compatible = "renesas,dmac-r8a7793", "renesas,rcar-dmac";
+               reg = <0 0xec700000 0 0x10000>;
+               interrupts = <GIC_SPI 346 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "error",
+                               "ch0", "ch1", "ch2", "ch3",
+                               "ch4", "ch5", "ch6", "ch7",
+                               "ch8", "ch9", "ch10", "ch11",
+                               "ch12";
+               clocks = <&mstp5_clks R8A7793_CLK_AUDIO_DMAC0>;
+               clock-names = "fck";
+               power-domains = <&cpg_clocks>;
+               #dma-cells = <1>;
+               dma-channels = <13>;
+       };
+
+       audma1: dma-controller@ec720000 {
+               compatible = "renesas,dmac-r8a7793", "renesas,rcar-dmac";
+               reg = <0 0xec720000 0 0x10000>;
+               interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 344 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 345 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-names = "error",
+                               "ch0", "ch1", "ch2", "ch3",
+                               "ch4", "ch5", "ch6", "ch7",
+                               "ch8", "ch9", "ch10", "ch11",
+                               "ch12";
+               clocks = <&mstp5_clks R8A7793_CLK_AUDIO_DMAC1>;
+               clock-names = "fck";
+               power-domains = <&cpg_clocks>;
+               #dma-cells = <1>;
+               dma-channels = <13>;
+       };
+
+       /* The memory map in the User's Manual maps the cores to bus numbers */
+       i2c0: i2c@e6508000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6508000 0 0x40>;
+               interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C0>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c1: i2c@e6518000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6518000 0 0x40>;
+               interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C1>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c2: i2c@e6530000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6530000 0 0x40>;
+               interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C2>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c3: i2c@e6540000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6540000 0 0x40>;
+               interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C3>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c4: i2c@e6520000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6520000 0 0x40>;
+               interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C4>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <6>;
+               status = "disabled";
+       };
+
+       i2c5: i2c@e6528000 {
+               /* doesn't need pinmux */
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,i2c-r8a7793";
+               reg = <0 0xe6528000 0 0x40>;
+               interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_I2C5>;
+               power-domains = <&cpg_clocks>;
+               i2c-scl-internal-delay-ns = <110>;
+               status = "disabled";
+       };
+
+       i2c6: i2c@e60b0000 {
+               /* doesn't need pinmux */
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,iic-r8a7793", "renesas,rmobile-iic";
+               reg = <0 0xe60b0000 0 0x425>;
+               interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp9_clks R8A7793_CLK_IICDVFS>;
+               dmas = <&dmac0 0x77>, <&dmac0 0x78>;
+               dma-names = "tx", "rx";
+               power-domains = <&cpg_clocks>;
+               status = "disabled";
+       };
+
+       i2c7: i2c@e6500000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,iic-r8a7793", "renesas,rmobile-iic";
+               reg = <0 0xe6500000 0 0x425>;
+               interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp3_clks R8A7793_CLK_IIC0>;
+               dmas = <&dmac0 0x61>, <&dmac0 0x62>;
+               dma-names = "tx", "rx";
+               power-domains = <&cpg_clocks>;
+               status = "disabled";
+       };
+
+       i2c8: i2c@e6510000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "renesas,iic-r8a7793", "renesas,rmobile-iic";
+               reg = <0 0xe6510000 0 0x425>;
+               interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp3_clks R8A7793_CLK_IIC1>;
+               dmas = <&dmac0 0x65>, <&dmac0 0x66>;
+               dma-names = "tx", "rx";
+               power-domains = <&cpg_clocks>;
+               status = "disabled";
+       };
+
+       pfc: pfc@e6060000 {
+               compatible = "renesas,pfc-r8a7793";
+               reg = <0 0xe6060000 0 0x250>;
+       };
+
        scifa0: serial@e6c40000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c40000 0 64>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x21>, <&dmac0 0x22>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa1: serial@e6c50000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c50000 0 64>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x25>, <&dmac0 0x26>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa2: serial@e6c60000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c60000 0 64>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x27>, <&dmac0 0x28>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa3: serial@e6c70000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c70000 0 64>;
-               interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7793_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa4: serial@e6c78000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c78000 0 64>;
-               interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7793_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa5: serial@e6c80000 {
-               compatible = "renesas,scifa-r8a7793", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7793",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c80000 0 64>;
-               interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7793_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x23>, <&dmac0 0x24>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb0: serial@e6c20000 {
-               compatible = "renesas,scifb-r8a7793", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7793",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c20000 0 64>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb1: serial@e6c30000 {
-               compatible = "renesas,scifb-r8a7793", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7793",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c30000 0 64>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb2: serial@e6ce0000 {
-               compatible = "renesas,scifb-r8a7793", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7793",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6ce0000 0 64>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7793_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif0: serial@e6e60000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e60000 0 64>;
-               interrupts = <0 152 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif1: serial@e6e68000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e68000 0 64>;
-               interrupts = <0 153 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif2: serial@e6e58000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e58000 0 64>;
-               interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif3: serial@e6ea8000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ea8000 0 64>;
-               interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF3>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif4: serial@e6ee0000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee0000 0 64>;
-               interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF4>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif5: serial@e6ee8000 {
-               compatible = "renesas,scif-r8a7793", "renesas,scif";
+               compatible = "renesas,scif-r8a7793", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee8000 0 64>;
-               interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_SCIF5>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif0: serial@e62c0000 {
-               compatible = "renesas,hscif-r8a7793", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7793",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c0000 0 96>;
-               interrupts = <0 154 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_HSCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_HSCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif1: serial@e62c8000 {
-               compatible = "renesas,hscif-r8a7793", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7793",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c8000 0 96>;
-               interrupts = <0 155 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_HSCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_HSCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif2: serial@e62d0000 {
-               compatible = "renesas,hscif-r8a7793", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7793",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62d0000 0 96>;
-               interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7793_CLK_HSCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7793_CLK_HSCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        ether: ethernet@ee700000 {
                compatible = "renesas,ether-r8a7793";
                reg = <0 0xee700000 0 0x400>;
-               interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7793_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
        qspi: spi@e6b10000 {
                compatible = "renesas,qspi-r8a7793", "renesas,qspi";
                reg = <0 0xe6b10000 0 0x2c>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7793_CLK_QSPI_MOD>;
                dmas = <&dmac0 0x17>, <&dmac0 0x18>;
                dma-names = "tx", "rx";
                reg = <0 0xfeb00000 0 0x40000>,
                      <0 0xfeb90000 0 0x1c>;
                reg-names = "du", "lvds.0";
-               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 268 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7793_CLK_DU0>,
                         <&mstp7_clks R8A7793_CLK_DU1>,
                         <&mstp7_clks R8A7793_CLK_LVDS0>;
                        clock-output-names = "extal";
                };
 
+               /*
+                * The external audio clocks are configured as 0 Hz fixed frequency clocks by
+                * default. Boards that provide audio clocks should override them.
+                */
+               audio_clk_a: audio_clk_a {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <0>;
+                       clock-output-names = "audio_clk_a";
+               };
+               audio_clk_b: audio_clk_b {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <0>;
+                       clock-output-names = "audio_clk_b";
+               };
+               audio_clk_c: audio_clk_c {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <0>;
+                       clock-output-names = "audio_clk_c";
+               };
+
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* Special CPG clocks */
                cpg_clocks: cpg_clocks@e6150000 {
                        compatible = "renesas,r8a7793-cpg-clocks",
                        clock-mult = <1>;
                        clock-output-names = "p";
                };
+               m2_clk: m2_clk {
+                       compatible = "fixed-factor-clock";
+                       clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
+                       #clock-cells = <0>;
+                       clock-div = <8>;
+                       clock-mult = <1>;
+                       clock-output-names = "m2";
+               };
                rclk_clk: rclk_clk {
                        compatible = "fixed-factor-clock";
                        clocks = <&cpg_clocks R8A7793_CLK_PLL1>;
                mstp5_clks: mstp5_clks@e6150144 {
                        compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
                        reg = <0 0xe6150144 0 4>, <0 0xe615003c 0 4>;
-                       clocks = <&extal_clk>;
+                       clocks = <&hp_clk>, <&hp_clk>, <&extal_clk>;
                        #clock-cells = <1>;
-                       clock-indices = <R8A7793_CLK_THERMAL>;
-                       clock-output-names = "thermal";
+                       clock-indices = <R8A7793_CLK_AUDIO_DMAC0 R8A7793_CLK_AUDIO_DMAC1
+                                        R8A7793_CLK_THERMAL>;
+                       clock-output-names = "audmac0", "audmac1", "thermal";
                };
                mstp7_clks: mstp7_clks@e615014c {
                        compatible = "renesas,r8a7793-mstp-clocks",
                        reg = <0 0xe6150994 0 4>, <0 0xe61509a4 0 4>;
                        clocks = <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
                                 <&cp_clk>, <&cp_clk>, <&cp_clk>, <&cp_clk>,
-                                <&cpg_clocks R8A7793_CLK_QSPI>;
+                                <&cpg_clocks R8A7793_CLK_QSPI>, <&hp_clk>,
+                                <&cp_clk>, <&hp_clk>, <&hp_clk>, <&hp_clk>,
+                                <&hp_clk>, <&hp_clk>;
                        #clock-cells = <1>;
                        clock-indices = <
                                R8A7793_CLK_GPIO7 R8A7793_CLK_GPIO6
                                R8A7793_CLK_GPIO5 R8A7793_CLK_GPIO4
                                R8A7793_CLK_GPIO3 R8A7793_CLK_GPIO2
                                R8A7793_CLK_GPIO1 R8A7793_CLK_GPIO0
-                               R8A7793_CLK_QSPI_MOD
+                               R8A7793_CLK_QSPI_MOD R8A7793_CLK_I2C5
+                               R8A7793_CLK_IICDVFS R8A7793_CLK_I2C4
+                               R8A7793_CLK_I2C3 R8A7793_CLK_I2C2
+                               R8A7793_CLK_I2C1 R8A7793_CLK_I2C0
                        >;
                        clock-output-names =
                                "gpio7", "gpio6", "gpio5", "gpio4",
                                "gpio3", "gpio2", "gpio1", "gpio0",
-                               "qspi_mod";
+                               "qspi_mod", "i2c5", "i2c6", "i2c4",
+                               "i2c3", "i2c2", "i2c1", "i2c0";
+               };
+               mstp10_clks: mstp10_clks@e6150998 {
+                       compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
+                       reg = <0 0xe6150998 0 4>, <0 0xe61509a8 0 4>;
+                       clocks = <&p_clk>,
+                               <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+                               <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>, <&p_clk>,
+                               <&p_clk>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>,
+                               <&mstp10_clks R8A7793_CLK_SCU_ALL>, <&mstp10_clks R8A7793_CLK_SCU_ALL>;
+
+                       #clock-cells = <1>;
+                       clock-indices = <
+                               R8A7793_CLK_SSI_ALL
+                               R8A7793_CLK_SSI9 R8A7793_CLK_SSI8 R8A7793_CLK_SSI7 R8A7793_CLK_SSI6 R8A7793_CLK_SSI5
+                               R8A7793_CLK_SSI4 R8A7793_CLK_SSI3 R8A7793_CLK_SSI2 R8A7793_CLK_SSI1 R8A7793_CLK_SSI0
+                               R8A7793_CLK_SCU_ALL
+                               R8A7793_CLK_SCU_DVC1 R8A7793_CLK_SCU_DVC0
+                               R8A7793_CLK_SCU_CTU1_MIX1 R8A7793_CLK_SCU_CTU0_MIX0
+                               R8A7793_CLK_SCU_SRC9 R8A7793_CLK_SCU_SRC8 R8A7793_CLK_SCU_SRC7 R8A7793_CLK_SCU_SRC6 R8A7793_CLK_SCU_SRC5
+                               R8A7793_CLK_SCU_SRC4 R8A7793_CLK_SCU_SRC3 R8A7793_CLK_SCU_SRC2 R8A7793_CLK_SCU_SRC1 R8A7793_CLK_SCU_SRC0
+                       >;
+                       clock-output-names =
+                               "ssi-all",
+                               "ssi9", "ssi8", "ssi7", "ssi6", "ssi5",
+                               "ssi4", "ssi3", "ssi2", "ssi1", "ssi0",
+                               "scu-all",
+                               "scu-dvc1", "scu-dvc0",
+                               "scu-ctu1-mix1", "scu-ctu0-mix0",
+                               "scu-src9", "scu-src8", "scu-src7", "scu-src6", "scu-src5",
+                               "scu-src4", "scu-src3", "scu-src2", "scu-src1", "scu-src0";
                };
                mstp11_clks: mstp11_clks@e615099c {
                        compatible = "renesas,r8a7793-mstp-clocks", "renesas,cpg-mstp-clocks";
        ipmmu_sy0: mmu@e6280000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xe6280000 0 0x1000>;
-               interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 224 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_sy1: mmu@e6290000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xe6290000 0 0x1000>;
-               interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_ds: mmu@e6740000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xe6740000 0 0x1000>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mp: mmu@ec680000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xec680000 0 0x1000>;
-               interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mx: mmu@fe951000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xfe951000 0 0x1000>;
-               interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 221 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_rt: mmu@ffc80000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xffc80000 0 0x1000>;
-               interrupts = <0 307 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 307 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_gp: mmu@e62a0000 {
                compatible = "renesas,ipmmu-r8a7793", "renesas,ipmmu-vmsa";
                reg = <0 0xe62a0000 0 0x1000>;
-               interrupts = <0 260 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 261 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
+
+       rcar_sound: sound@ec500000 {
+               /*
+                * #sound-dai-cells is required
+                *
+                * Single DAI : #sound-dai-cells = <0>;         <&rcar_sound>;
+                * Multi  DAI : #sound-dai-cells = <1>;         <&rcar_sound N>;
+                */
+               compatible =  "renesas,rcar_sound-r8a7793", "renesas,rcar_sound-gen2";
+               reg =   <0 0xec500000 0 0x1000>, /* SCU */
+                       <0 0xec5a0000 0 0x100>,  /* ADG */
+                       <0 0xec540000 0 0x1000>, /* SSIU */
+                       <0 0xec541000 0 0x280>,  /* SSI */
+                       <0 0xec740000 0 0x200>;  /* Audio DMAC peri peri*/
+               reg-names = "scu", "adg", "ssiu", "ssi", "audmapp";
+
+               clocks = <&mstp10_clks R8A7793_CLK_SSI_ALL>,
+                       <&mstp10_clks R8A7793_CLK_SSI9>, <&mstp10_clks R8A7793_CLK_SSI8>,
+                       <&mstp10_clks R8A7793_CLK_SSI7>, <&mstp10_clks R8A7793_CLK_SSI6>,
+                       <&mstp10_clks R8A7793_CLK_SSI5>, <&mstp10_clks R8A7793_CLK_SSI4>,
+                       <&mstp10_clks R8A7793_CLK_SSI3>, <&mstp10_clks R8A7793_CLK_SSI2>,
+                       <&mstp10_clks R8A7793_CLK_SSI1>, <&mstp10_clks R8A7793_CLK_SSI0>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC9>, <&mstp10_clks R8A7793_CLK_SCU_SRC8>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC7>, <&mstp10_clks R8A7793_CLK_SCU_SRC6>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC5>, <&mstp10_clks R8A7793_CLK_SCU_SRC4>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC3>, <&mstp10_clks R8A7793_CLK_SCU_SRC2>,
+                       <&mstp10_clks R8A7793_CLK_SCU_SRC1>, <&mstp10_clks R8A7793_CLK_SCU_SRC0>,
+                       <&mstp10_clks R8A7793_CLK_SCU_DVC0>, <&mstp10_clks R8A7793_CLK_SCU_DVC1>,
+                       <&audio_clk_a>, <&audio_clk_b>, <&audio_clk_c>, <&m2_clk>;
+               clock-names = "ssi-all",
+                               "ssi.9", "ssi.8", "ssi.7", "ssi.6", "ssi.5",
+                               "ssi.4", "ssi.3", "ssi.2", "ssi.1", "ssi.0",
+                               "src.9", "src.8", "src.7", "src.6", "src.5",
+                               "src.4", "src.3", "src.2", "src.1", "src.0",
+                               "dvc.0", "dvc.1",
+                               "clk_a", "clk_b", "clk_c", "clk_i";
+               power-domains = <&cpg_clocks>;
+
+               status = "disabled";
+
+               rcar_sound,dvc {
+                       dvc0: dvc@0 {
+                               dmas = <&audma0 0xbc>;
+                               dma-names = "tx";
+                       };
+                       dvc1: dvc@1 {
+                               dmas = <&audma0 0xbe>;
+                               dma-names = "tx";
+                       };
+               };
+
+               rcar_sound,src {
+                       src0: src@0 {
+                               interrupts = <GIC_SPI 352 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x85>, <&audma1 0x9a>;
+                               dma-names = "rx", "tx";
+                       };
+                       src1: src@1 {
+                               interrupts = <GIC_SPI 353 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x87>, <&audma1 0x9c>;
+                               dma-names = "rx", "tx";
+                       };
+                       src2: src@2 {
+                               interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x89>, <&audma1 0x9e>;
+                               dma-names = "rx", "tx";
+                       };
+                       src3: src@3 {
+                               interrupts = <GIC_SPI 355 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x8b>, <&audma1 0xa0>;
+                               dma-names = "rx", "tx";
+                       };
+                       src4: src@4 {
+                               interrupts = <GIC_SPI 356 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x8d>, <&audma1 0xb0>;
+                               dma-names = "rx", "tx";
+                       };
+                       src5: src@5 {
+                               interrupts = <GIC_SPI 357 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x8f>, <&audma1 0xb2>;
+                               dma-names = "rx", "tx";
+                       };
+                       src6: src@6 {
+                               interrupts = <GIC_SPI 358 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x91>, <&audma1 0xb4>;
+                               dma-names = "rx", "tx";
+                       };
+                       src7: src@7 {
+                               interrupts = <GIC_SPI 359 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x93>, <&audma1 0xb6>;
+                               dma-names = "rx", "tx";
+                       };
+                       src8: src@8 {
+                               interrupts = <GIC_SPI 360 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x95>, <&audma1 0xb8>;
+                               dma-names = "rx", "tx";
+                       };
+                       src9: src@9 {
+                               interrupts = <GIC_SPI 361 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x97>, <&audma1 0xba>;
+                               dma-names = "rx", "tx";
+                       };
+               };
+
+               rcar_sound,ssi {
+                       ssi0: ssi@0 {
+                               interrupts = <GIC_SPI 370 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x01>, <&audma1 0x02>, <&audma0 0x15>, <&audma1 0x16>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi1: ssi@1 {
+                                interrupts = <GIC_SPI 371 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x03>, <&audma1 0x04>, <&audma0 0x49>, <&audma1 0x4a>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi2: ssi@2 {
+                               interrupts = <GIC_SPI 372 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x05>, <&audma1 0x06>, <&audma0 0x63>, <&audma1 0x64>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi3: ssi@3 {
+                               interrupts = <GIC_SPI 373 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x07>, <&audma1 0x08>, <&audma0 0x6f>, <&audma1 0x70>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi4: ssi@4 {
+                               interrupts = <GIC_SPI 374 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x09>, <&audma1 0x0a>, <&audma0 0x71>, <&audma1 0x72>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi5: ssi@5 {
+                               interrupts = <GIC_SPI 375 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x0b>, <&audma1 0x0c>, <&audma0 0x73>, <&audma1 0x74>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi6: ssi@6 {
+                               interrupts = <GIC_SPI 376 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x0d>, <&audma1 0x0e>, <&audma0 0x75>, <&audma1 0x76>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi7: ssi@7 {
+                               interrupts = <GIC_SPI 377 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x0f>, <&audma1 0x10>, <&audma0 0x79>, <&audma1 0x7a>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi8: ssi@8 {
+                               interrupts = <GIC_SPI 378 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x11>, <&audma1 0x12>, <&audma0 0x7b>, <&audma1 0x7c>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+                       ssi9: ssi@9 {
+                               interrupts = <GIC_SPI 379 IRQ_TYPE_LEVEL_HIGH>;
+                               dmas = <&audma0 0x13>, <&audma1 0x14>, <&audma0 0x7d>, <&audma1 0x7e>;
+                               dma-names = "rx", "tx", "rxu", "txu";
+                       };
+               };
+       };
 };
index 2394e4883786f13eb0a4b005fb6a81e456f9a3f0..ca9bc4fff28751ea9e2b11b6afd23c7920449759 100644 (file)
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        du_pins: du {
                renesas,groups = "du1_rgb666", "du1_sync", "du1_disp", "du1_dotclkout0";
                renesas,function = "du";
                renesas,function = "scif2";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
+&pfc {
+       qspi_pins: spi0 {
+               renesas,groups = "qspi_ctrl", "qspi_data4";
+               renesas,function = "qspi";
+       };
+};
+
 &ether {
        pinctrl-0 = <&ether_pins &phy1_pins>;
        pinctrl-names = "default";
 
        status = "okay";
 };
+
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
+&qspi {
+       pinctrl-0 = <&qspi_pins>;
+       pinctrl-names = "default";
+
+       status = "okay";
+
+       flash@0 {
+               compatible = "spansion,s25fl512s", "jedec,spi-nor";
+               reg = <0>;
+               spi-max-frequency = <30000000>;
+               spi-tx-bus-width = <4>;
+               spi-rx-bus-width = <4>;
+               spi-cpol;
+               spi-cpha;
+               m25p,fast-read;
+
+               partitions {
+                       compatible = "fixed-partitions";
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+
+                       partition@0 {
+                               label = "loader";
+                               reg = <0x00000000 0x00040000>;
+                               read-only;
+                       };
+                       partition@40000 {
+                               label = "system";
+                               reg = <0x00040000 0x00040000>;
+                               read-only;
+                       };
+                       partition@80000 {
+                               label = "user";
+                               reg = <0x00080000 0x03f80000>;
+                       };
+               };
+       };
+};
index 5153e3af25d94c0f8f14a93b5f894fa60e217fce..66f077a3ca41d1b06a367ca265ee5aac48f71bf2 100644 (file)
                states = <3300000 1
                          1800000 0>;
        };
+
+       vga-encoder {
+               compatible = "adi,adv7123";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7123_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb1>;
+                               };
+                       };
+                       port@1 {
+                               reg = <1>;
+                               adv7123_out: endpoint {
+                                       remote-endpoint = <&vga_in>;
+                               };
+                       };
+               };
+       };
+
+       hdmi-out {
+               compatible = "hdmi-connector";
+               type = "a";
+
+               port {
+                       hdmi_con: endpoint {
+                               remote-endpoint = <&adv7511_out>;
+                       };
+               };
+       };
+
+       vga {
+               compatible = "vga-connector";
+
+               port {
+                       vga_in: endpoint {
+                               remote-endpoint = <&adv7123_out>;
+                       };
+               };
+       };
+
+       x2_clk: x2-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <148500000>;
+       };
+
+       x3_clk: x3-clock {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <74250000>;
+       };
 };
 
 &extal_clk {
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        scif2_pins: serial2 {
                renesas,groups = "scif2_data";
                renesas,function = "scif2";
        };
 
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk";
+               renesas,function = "scif_clk";
+       };
+
        ether_pins: ether {
                renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
                renesas,function = "eth";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &ether {
        pinctrl-0 = <&ether_pins &phy1_pins>;
        pinctrl-names = "default";
                        };
                };
        };
+
+       hdmi@39 {
+               compatible = "adi,adv7511w";
+               reg = <0x39>;
+               interrupt-parent = <&gpio5>;
+               interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+
+               adi,input-depth = <8>;
+               adi,input-colorspace = "rgb";
+               adi,input-clock = "1x";
+               adi,input-style = <1>;
+               adi,input-justification = "evenly";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+                               adv7511_in: endpoint {
+                                       remote-endpoint = <&du_out_rgb0>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               adv7511_out: endpoint {
+                                       remote-endpoint = <&hdmi_con>;
+                               };
+                       };
+               };
+       };
 };
 
 &mmcif0 {
 &usbphy {
        status = "okay";
 };
+
+&du {
+       status = "okay";
+
+       clocks = <&mstp7_clks R8A7794_CLK_DU0>,
+                <&mstp7_clks R8A7794_CLK_DU0>,
+                <&x2_clk>, <&x3_clk>;
+       clock-names = "du.0", "du.1", "dclkin.0", "dclkin.1";
+
+       ports {
+               port@0 {
+                       endpoint {
+                               remote-endpoint = <&adv7511_in>;
+                       };
+               };
+               port@1 {
+                       endpoint {
+                               remote-endpoint = <&adv7123_in>;
+                       };
+               };
+       };
+};
index 6c78f1fae90f7ae43b42f6a8baea078a73fd30f4..7d4c5597af5b54bd76d55f766324f8ff7985ea4e 100644 (file)
                        <0 0xf1002000 0 0x1000>,
                        <0 0xf1004000 0 0x2000>,
                        <0 0xf1006000 0 0x2000>;
-               interrupts = <1 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
+               interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_HIGH)>;
        };
 
        gpio0: gpio@e6050000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6050000 0 0x50>;
-               interrupts = <0 4 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 0 32>;
@@ -78,7 +78,7 @@
        gpio1: gpio@e6051000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6051000 0 0x50>;
-               interrupts = <0 5 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 32 26>;
@@ -91,7 +91,7 @@
        gpio2: gpio@e6052000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6052000 0 0x50>;
-               interrupts = <0 6 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 64 32>;
        gpio3: gpio@e6053000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6053000 0 0x50>;
-               interrupts = <0 7 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 96 32>;
        gpio4: gpio@e6054000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6054000 0 0x50>;
-               interrupts = <0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 128 32>;
        gpio5: gpio@e6055000 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6055000 0 0x50>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 160 28>;
        gpio6: gpio@e6055400 {
                compatible = "renesas,gpio-r8a7794", "renesas,gpio-rcar";
                reg = <0 0xe6055400 0 0x50>;
-               interrupts = <0 10 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
                #gpio-cells = <2>;
                gpio-controller;
                gpio-ranges = <&pfc 0 192 26>;
        cmt0: timer@ffca0000 {
                compatible = "renesas,cmt-48-gen2";
                reg = <0 0xffca0000 0 0x1004>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks R8A7794_CLK_CMT0>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
        cmt1: timer@e6130000 {
                compatible = "renesas,cmt-48-gen2";
                reg = <0 0xe6130000 0 0x1004>;
-               interrupts = <0 120 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 121 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 122 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 123 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 124 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 125 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 126 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 127 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 126 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&cpg_clocks>;
 
        timer {
                compatible = "arm,armv7-timer";
-               interrupts = <1 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
-                            <1 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_LEVEL_LOW)>;
        };
 
        irqc0: interrupt-controller@e61c0000 {
                #interrupt-cells = <2>;
                interrupt-controller;
                reg = <0 0xe61c0000 0 0x200>;
-               interrupts = <0 0 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 1 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 2 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 3 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 12 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 13 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 14 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 15 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 16 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 17 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks R8A7794_CLK_IRQC>;
                power-domains = <&cpg_clocks>;
        };
        dmac0: dma-controller@e6700000 {
                compatible = "renesas,dmac-r8a7794", "renesas,rcar-dmac";
                reg = <0 0xe6700000 0 0x20000>;
-               interrupts = <0 197 IRQ_TYPE_LEVEL_HIGH
-                             0 200 IRQ_TYPE_LEVEL_HIGH
-                             0 201 IRQ_TYPE_LEVEL_HIGH
-                             0 202 IRQ_TYPE_LEVEL_HIGH
-                             0 203 IRQ_TYPE_LEVEL_HIGH
-                             0 204 IRQ_TYPE_LEVEL_HIGH
-                             0 205 IRQ_TYPE_LEVEL_HIGH
-                             0 206 IRQ_TYPE_LEVEL_HIGH
-                             0 207 IRQ_TYPE_LEVEL_HIGH
-                             0 208 IRQ_TYPE_LEVEL_HIGH
-                             0 209 IRQ_TYPE_LEVEL_HIGH
-                             0 210 IRQ_TYPE_LEVEL_HIGH
-                             0 211 IRQ_TYPE_LEVEL_HIGH
-                             0 212 IRQ_TYPE_LEVEL_HIGH
-                             0 213 IRQ_TYPE_LEVEL_HIGH
-                             0 214 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        dmac1: dma-controller@e6720000 {
                compatible = "renesas,dmac-r8a7794", "renesas,rcar-dmac";
                reg = <0 0xe6720000 0 0x20000>;
-               interrupts = <0 220 IRQ_TYPE_LEVEL_HIGH
-                             0 216 IRQ_TYPE_LEVEL_HIGH
-                             0 217 IRQ_TYPE_LEVEL_HIGH
-                             0 218 IRQ_TYPE_LEVEL_HIGH
-                             0 219 IRQ_TYPE_LEVEL_HIGH
-                             0 308 IRQ_TYPE_LEVEL_HIGH
-                             0 309 IRQ_TYPE_LEVEL_HIGH
-                             0 310 IRQ_TYPE_LEVEL_HIGH
-                             0 311 IRQ_TYPE_LEVEL_HIGH
-                             0 312 IRQ_TYPE_LEVEL_HIGH
-                             0 313 IRQ_TYPE_LEVEL_HIGH
-                             0 314 IRQ_TYPE_LEVEL_HIGH
-                             0 315 IRQ_TYPE_LEVEL_HIGH
-                             0 316 IRQ_TYPE_LEVEL_HIGH
-                             0 317 IRQ_TYPE_LEVEL_HIGH
-                             0 318 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "error",
                                "ch0", "ch1", "ch2", "ch3",
                                "ch4", "ch5", "ch6", "ch7",
        };
 
        scifa0: serial@e6c40000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c40000 0 64>;
-               interrupts = <0 144 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x21>, <&dmac0 0x22>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa1: serial@e6c50000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c50000 0 64>;
-               interrupts = <0 145 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x25>, <&dmac0 0x26>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa2: serial@e6c60000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c60000 0 64>;
-               interrupts = <0 151 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x27>, <&dmac0 0x28>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa3: serial@e6c70000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c70000 0 64>;
-               interrupts = <0 29 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7794_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1b>, <&dmac0 0x1c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa4: serial@e6c78000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c78000 0 64>;
-               interrupts = <0 30 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7794_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1f>, <&dmac0 0x20>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifa5: serial@e6c80000 {
-               compatible = "renesas,scifa-r8a7794", "renesas,scifa";
+               compatible = "renesas,scifa-r8a7794",
+                            "renesas,rcar-gen2-scifa", "renesas,scifa";
                reg = <0 0xe6c80000 0 64>;
-               interrupts = <0 31 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp11_clks R8A7794_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x23>, <&dmac0 0x24>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb0: serial@e6c20000 {
-               compatible = "renesas,scifb-r8a7794", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7794",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c20000 0 64>;
-               interrupts = <0 148 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFB0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x3d>, <&dmac0 0x3e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb1: serial@e6c30000 {
-               compatible = "renesas,scifb-r8a7794", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7794",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6c30000 0 64>;
-               interrupts = <0 149 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFB1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x19>, <&dmac0 0x1a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scifb2: serial@e6ce0000 {
-               compatible = "renesas,scifb-r8a7794", "renesas,scifb";
+               compatible = "renesas,scifb-r8a7794",
+                            "renesas,rcar-gen2-scifb", "renesas,scifb";
                reg = <0 0xe6ce0000 0 64>;
-               interrupts = <0 150 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks R8A7794_CLK_SCIFB2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                dmas = <&dmac0 0x1d>, <&dmac0 0x1e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif0: serial@e6e60000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e60000 0 64>;
-               interrupts = <0 152 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x29>, <&dmac0 0x2a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif1: serial@e6e68000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e68000 0 64>;
-               interrupts = <0 153 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2d>, <&dmac0 0x2e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif2: serial@e6e58000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6e58000 0 64>;
-               interrupts = <0 22 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2b>, <&dmac0 0x2c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif3: serial@e6ea8000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ea8000 0 64>;
-               interrupts = <0 23 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF3>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF3>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x2f>, <&dmac0 0x30>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif4: serial@e6ee0000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee0000 0 64>;
-               interrupts = <0 24 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF4>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF4>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfb>, <&dmac0 0xfc>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        scif5: serial@e6ee8000 {
-               compatible = "renesas,scif-r8a7794", "renesas,scif";
+               compatible = "renesas,scif-r8a7794", "renesas,rcar-gen2-scif",
+                            "renesas,scif";
                reg = <0 0xe6ee8000 0 64>;
-               interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_SCIF5>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_SCIF5>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0xfd>, <&dmac0 0xfe>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif0: serial@e62c0000 {
-               compatible = "renesas,hscif-r8a7794", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7794",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c0000 0 96>;
-               interrupts = <0 154 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_HSCIF0>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_HSCIF0>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x39>, <&dmac0 0x3a>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif1: serial@e62c8000 {
-               compatible = "renesas,hscif-r8a7794", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7794",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62c8000 0 96>;
-               interrupts = <0 155 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_HSCIF1>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_HSCIF1>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x4d>, <&dmac0 0x4e>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        };
 
        hscif2: serial@e62d0000 {
-               compatible = "renesas,hscif-r8a7794", "renesas,hscif";
+               compatible = "renesas,hscif-r8a7794",
+                            "renesas,rcar-gen2-hscif", "renesas,hscif";
                reg = <0 0xe62d0000 0 96>;
-               interrupts = <0 21 IRQ_TYPE_LEVEL_HIGH>;
-               clocks = <&mstp7_clks R8A7794_CLK_HSCIF2>;
-               clock-names = "sci_ick";
+               interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&mstp7_clks R8A7794_CLK_HSCIF2>, <&zs_clk>,
+                        <&scif_clk>;
+               clock-names = "fck", "brg_int", "scif_clk";
                dmas = <&dmac0 0x3b>, <&dmac0 0x3c>;
                dma-names = "tx", "rx";
                power-domains = <&cpg_clocks>;
        ether: ethernet@ee700000 {
                compatible = "renesas,ether-r8a7794";
                reg = <0 0xee700000 0 0x400>;
-               interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7794_CLK_ETHER>;
                power-domains = <&cpg_clocks>;
                phy-mode = "rmii";
        i2c0: i2c@e6508000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6508000 0 0x40>;
-               interrupts = <0 287 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 287 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C0>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c1: i2c@e6518000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6518000 0 0x40>;
-               interrupts = <0 288 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 288 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C1>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c2: i2c@e6530000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6530000 0 0x40>;
-               interrupts = <0 286 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 286 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C2>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c3: i2c@e6540000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6540000 0 0x40>;
-               interrupts = <0 290 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 290 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C3>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c4: i2c@e6520000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6520000 0 0x40>;
-               interrupts = <0 19 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C4>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        i2c5: i2c@e6528000 {
                compatible = "renesas,i2c-r8a7794";
                reg = <0 0xe6528000 0 0x40>;
-               interrupts = <0 20 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_I2C5>;
                power-domains = <&cpg_clocks>;
                #address-cells = <1>;
        mmcif0: mmc@ee200000 {
                compatible = "renesas,mmcif-r8a7794", "renesas,sh-mmcif";
                reg = <0 0xee200000 0 0x80>;
-               interrupts = <0 169 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_MMCIF0>;
                dmas = <&dmac0 0xd1>, <&dmac0 0xd2>;
                dma-names = "tx", "rx";
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-r8a7794";
                reg = <0 0xee100000 0 0x200>;
-               interrupts = <0 165 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 165 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_SDHI0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi1: sd@ee140000 {
                compatible = "renesas,sdhi-r8a7794";
                reg = <0 0xee140000 0 0x100>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_SDHI1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        sdhi2: sd@ee160000 {
                compatible = "renesas,sdhi-r8a7794";
                reg = <0 0xee160000 0 0x100>;
-               interrupts = <0 168 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks R8A7794_CLK_SDHI2>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        qspi: spi@e6b10000 {
                compatible = "renesas,qspi-r8a7794", "renesas,qspi";
                reg = <0 0xe6b10000 0 0x2c>;
-               interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp9_clks R8A7794_CLK_QSPI_MOD>;
                dmas = <&dmac0 0x17>, <&dmac0 0x18>;
                dma-names = "tx", "rx";
        vin0: video@e6ef0000 {
                compatible = "renesas,vin-r8a7794";
                reg = <0 0xe6ef0000 0 0x1000>;
-               interrupts = <0 188 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7794_CLK_VIN0>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
        vin1: video@e6ef1000 {
                compatible = "renesas,vin-r8a7794";
                reg = <0 0xe6ef1000 0 0x1000>;
-               interrupts = <0 189 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp8_clks R8A7794_CLK_VIN1>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                device_type = "pci";
                reg = <0 0xee090000 0 0xc00>,
                      <0 0xee080000 0 0x1100>;
-               interrupts = <0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7794_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee080000 0 0xee080000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 108 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 108 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
                device_type = "pci";
                reg = <0 0xee0d0000 0 0xc00>,
                      <0 0xee0c0000 0 0x1100>;
-               interrupts = <0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7794_CLK_EHCI>;
                power-domains = <&cpg_clocks>;
                status = "disabled";
                #interrupt-cells = <1>;
                ranges = <0x02000000 0 0xee0c0000 0 0xee0c0000 0 0x00010000>;
                interrupt-map-mask = <0xff00 0 0 0x7>;
-               interrupt-map = <0x0000 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x0800 0 0 1 &gic 0 113 IRQ_TYPE_LEVEL_HIGH
-                                0x1000 0 0 2 &gic 0 113 IRQ_TYPE_LEVEL_HIGH>;
+               interrupt-map = <0x0000 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x0800 0 0 1 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH
+                                0x1000 0 0 2 &gic GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
 
                usb@0,1 {
                        reg = <0x800 0 0 0 0>;
        };
 
        hsusb: usb@e6590000 {
-               compatible = "renesas,usbhs-r8a7794";
+               compatible = "renesas,usbhs-r8a7794", "renesas,rcar-gen2-usbhs";
                reg = <0 0xe6590000 0 0x100>;
-               interrupts = <0 107 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7794_CLK_HSUSB>;
                power-domains = <&cpg_clocks>;
                renesas,buswait = <4>;
                compatible = "renesas,du-r8a7794";
                reg = <0 0xfeb00000 0 0x40000>;
                reg-names = "du";
-               interrupts = <0 256 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 268 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp7_clks R8A7794_CLK_DU0>,
                         <&mstp7_clks R8A7794_CLK_DU0>;
                clock-names = "du.0", "du.1";
                        clock-output-names = "extal";
                };
 
+               /* External SCIF clock */
+               scif_clk: scif {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       /* This value must be overridden by the board. */
+                       clock-frequency = <0>;
+                       status = "disabled";
+               };
+
                /* Special CPG clocks */
                cpg_clocks: cpg_clocks@e6150000 {
                        compatible = "renesas,r8a7794-cpg-clocks",
        ipmmu_sy0: mmu@e6280000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xe6280000 0 0x1000>;
-               interrupts = <0 223 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 224 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_sy1: mmu@e6290000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xe6290000 0 0x1000>;
-               interrupts = <0 225 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_ds: mmu@e6740000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xe6740000 0 0x1000>;
-               interrupts = <0 198 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 199 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 198 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mp: mmu@ec680000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xec680000 0 0x1000>;
-               interrupts = <0 226 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_mx: mmu@fe951000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xfe951000 0 0x1000>;
-               interrupts = <0 222 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 221 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 222 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 221 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
        ipmmu_gp: mmu@e62a0000 {
                compatible = "renesas,ipmmu-r8a7794", "renesas,ipmmu-vmsa";
                reg = <0 0xe62a0000 0 0x1000>;
-               interrupts = <0 260 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 261 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 260 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 261 IRQ_TYPE_LEVEL_HIGH>;
                #iommu-cells = <1>;
                status = "disabled";
        };
index 992f9cadbc04e1fa5534b35c2c4966abcaff42b4..d5913fe128eed0e7b39a8a8ff650bf73f57b63ac 100644 (file)
        model = "Rockchip RK3036 KylinBoard";
        compatible = "rockchip,rk3036-kylin", "rockchip,rk3036";
 
+       leds: gpio-leds {
+               compatible = "gpio-leds";
+
+               work {
+                       gpios = <&gpio2 30 GPIO_ACTIVE_HIGH>;
+                       label = "kylin:red:led";
+                       pinctrl-names = "default";
+                       pinctrl-0 = <&led_ctl>;
+               };
+       };
+
+       sdio_pwrseq: sdio-pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               pinctrl-names = "default";
+               pinctrl-0 = <&bt_wake_h>;
+
+               /*
+                * On the module itself this is one of these (depending
+                * on the actual card populated):
+                * - SDIO_RESET_L_WL_REG_ON
+                * - SDIO_RESET_L_WL_RST
+                * - SDIO_RESET_L_BT_EN
+                */
+               reset-gpios = <&gpio0 26 GPIO_ACTIVE_LOW>, /* WL_REG_ON */
+                             <&gpio0 27 GPIO_ACTIVE_LOW>, /* WL_RST */
+                             <&gpio2 9  GPIO_ACTIVE_LOW>; /* BT_EN */
+       };
+
+       sound {
+               compatible = "simple-audio-card";
+               simple-audio-card,format = "i2s";
+               simple-audio-card,name = "rockchip,rt5616-codec";
+               simple-audio-card,mclk-fs = <512>;
+               simple-audio-card,widgets =
+                       "Microphone", "Microphone Jack",
+                       "Headphone", "Headphone Jack";
+               simple-audio-card,routing =
+                       "MIC1", "Microphone Jack",
+                       "MIC2", "Microphone Jack",
+                       "Microphone Jack", "micbias1",
+                       "Headphone Jack", "HPOL",
+                       "Headphone Jack", "HPOR";
+
+               simple-audio-card,cpu {
+                       sound-dai = <&i2s>;
+               };
+
+               simple-audio-card,codec {
+                       sound-dai = <&rt5616>;
+               };
+       };
+
        vcc_sys: vsys-regulator {
                compatible = "regulator-fixed";
                regulator-name = "vcc_sys";
 
 &i2c2 {
        status = "okay";
+
+       rt5616: rt5616@1b {
+               compatible = "rt5616";
+               reg = <0x1b>;
+               clocks = <&cru SCLK_I2S_OUT>;
+               clock-names = "mclk";
+               #sound-dai-cells = <0>;
+       };
+};
+
+&i2s {
+       #sound-dai-cells = <0>;
+       status = "okay";
 };
 
 &sdio {
 
        broken-cd;
        bus-width = <4>;
+       cap-sd-highspeed;
        cap-sdio-irq;
        default-sample-phase = <90>;
        keep-power-in-suspend;
+       mmc-pwrseq = <&sdio_pwrseq>;
        non-removable;
        num-slots = <1>;
        pinctrl-names = "default";
        pinctrl-0 = <&sdio_clk &sdio_cmd &sdio_bus4>;
+       sd-uhs-sdr12;
+       sd-uhs-sdr25;
+       sd-uhs-sdr50;
+       sd-uhs-sdr104;
+};
+
+&sdmmc {
+       bus-width = <4>;
+       cap-mmc-highspeed;
+       cap-sd-highspeed;
+       card-detect-delay = <200>;
+       disable-wp;
+       num-slots = <1>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&sdmmc_clk>, <&sdmmc_cmd>, <&sdmmc_cd>, <&sdmmc_bus4>;
+};
+
+&uart0 {
+       status = "okay";
 };
 
 &uart2 {
 };
 
 &pinctrl {
+       leds {
+               led_ctl: led-ctl {
+                       rockchip,pins = <2 30 RK_FUNC_GPIO &pcfg_pull_none>;
+               };
+       };
+
        pmic {
                pmic_int: pmic-int {
                        rockchip,pins = <2 2 RK_FUNC_GPIO &pcfg_pull_default>;
                };
        };
 
+       sdio {
+               bt_wake_h: bt-wake-h {
+                       rockchip,pins = <2 8 RK_FUNC_GPIO &pcfg_pull_default>;
+               };
+       };
+
+       sdmmc {
+               sdmmc_pwr: sdmmc-pwr {
+                       rockchip,pins = <2 28 RK_FUNC_GPIO &pcfg_pull_none>;
+               };
+       };
+
        sleep {
                global_pwroff: global-pwroff {
                        rockchip,pins = <2 7 RK_FUNC_1 &pcfg_pull_none>;
index b9567c1e068771ba2d822d4c2fae79173a8a29e6..3864fa142df045e6be19fa2480d1c97d6870f0c2 100644 (file)
@@ -60,6 +60,7 @@
                serial0 = &uart0;
                serial1 = &uart1;
                serial2 = &uart2;
+               spi = &spi;
        };
 
        memory {
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMAC2>;
                        clock-names = "apb_pclk";
                };
        };
 
        usb_otg: usb@10180000 {
-               compatible = "rockchip,rk3288-usb", "rockchip,rk3066-usb",
+               compatible = "rockchip,rk3036-usb", "rockchip,rk3066-usb",
                                "snps,dwc2";
                reg = <0x10180000 0x40000>;
                interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        usb_host: usb@101c0000 {
-               compatible = "rockchip,rk3288-usb", "rockchip,rk3066-usb",
+               compatible = "rockchip,rk3036-usb", "rockchip,rk3066-usb",
                                "snps,dwc2";
                reg = <0x101c0000 0x40000>;
                interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        emmc: dwmmc@1021c000 {
-               compatible = "rockchip,rk3288-dw-mshc";
+               compatible = "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc";
                reg = <0x1021c000 0x4000>;
                interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
                broken-cd;
                interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                #size-cells = <0>;
-               clock-names = "i2s_hclk", "i2s_clk";
-               clocks = <&cru HCLK_I2S>, <&cru SCLK_I2S>;
+               clock-names = "i2s_clk", "i2s_hclk";
+               clocks = <&cru SCLK_I2S>, <&cru HCLK_I2S>;
                dmas = <&pdma 0>, <&pdma 1>;
                dma-names = "tx", "rx";
                pinctrl-names = "default";
        };
 
        i2c1: i2c@20056000 {
-               compatible = "rockchip,rk3288-i2c";
+               compatible = "rockchip,rk3036-i2c", "rockchip,rk3288-i2c";
                reg = <0x20056000 0x1000>;
                interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
        };
 
        i2c2: i2c@2005a000 {
-               compatible = "rockchip,rk3288-i2c";
+               compatible = "rockchip,rk3036-i2c", "rockchip,rk3288-i2c";
                reg = <0x2005a000 0x1000>;
                interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
        };
 
        i2c0: i2c@20072000 {
-               compatible = "rockchip,rk3288-i2c";
+               compatible = "rockchip,rk3036-i2c", "rockchip,rk3288-i2c";
                reg = <0x20072000 0x1000>;
                interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
                #address-cells = <1>;
                status = "disabled";
        };
 
+       spi: spi@20074000 {
+               compatible = "rockchip,rockchip-spi";
+               reg = <0x20074000 0x1000>;
+               interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
+               clocks =<&cru PCLK_SPI>, <&cru SCLK_SPI>;
+               clock-names = "apb-pclk","spi_pclk";
+               dmas = <&pdma 8>, <&pdma 9>;
+               dma-names = "tx", "rx";
+               pinctrl-names = "default";
+               pinctrl-0 = <&spi_txd &spi_rxd &spi_clk &spi_cs0>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disabled";
+       };
+
        pinctrl: pinctrl {
                compatible = "rockchip,rk3036-pinctrl";
                rockchip,grf = <&grf>;
 
                i2s {
                        i2s_bus: i2s-bus {
-                               rockchip,pins = <1 0 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 1 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 2 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 3 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 4 RK_FUNC_1 &pcfg_pull_none>,
-                                               <1 5 RK_FUNC_1 &pcfg_pull_none>;
+                               rockchip,pins = <1 0 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 1 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 2 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 3 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 4 RK_FUNC_1 &pcfg_pull_default>,
+                                               <1 5 RK_FUNC_1 &pcfg_pull_default>;
                        };
                };
 
                        };
                        /* no rts / cts for uart2 */
                };
+
+               spi {
+                       spi_txd:spi-txd {
+                               rockchip,pins = <1 29 RK_FUNC_3 &pcfg_pull_default>;
+                       };
+
+                       spi_rxd:spi-rxd {
+                               rockchip,pins = <1 28 RK_FUNC_3 &pcfg_pull_default>;
+                       };
+
+                       spi_clk:spi-clk {
+                               rockchip,pins = <2 0 RK_FUNC_2 &pcfg_pull_default>;
+                       };
+
+                       spi_cs0:spi-cs0 {
+                               rockchip,pins = <1 30 RK_FUNC_3 &pcfg_pull_default>;
+
+                       };
+
+                       spi_cs1:spi-cs1 {
+                               rockchip,pins = <1 31 RK_FUNC_3 &pcfg_pull_default>;
+
+                       };
+               };
        };
 };
index 38c91a839795f3cf77103f56fdb7bb0185ad2854..6d2a5b3a84a86635f65b8d43aae475040dc54c4e 100644 (file)
                reg = <0x60000000 0x40000000>;
        };
 
+       vdd_log: vdd-log {
+               compatible = "pwm-regulator";
+               pwms = <&pwm3 0 1000>;
+               regulator-name = "vdd_log";
+               regulator-min-microvolt = <1200000>;
+               regulator-max-microvolt = <1200000>;
+               regulator-always-on;
+               voltage-table = <1000000 100>,
+                               <1200000 42>;
+               status = "okay";
+       };
+
        vcc_sd0: fixed-regulator {
                compatible = "regulator-fixed";
                regulator-name = "sdmmc-supply";
@@ -74,7 +86,7 @@
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
                button@1 {
@@ -82,7 +94,6 @@
                        linux,code = <104>;
                        label = "GPIO Key Vol-";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <0>;
                        debounce-interval = <100>;
                };
                /* VOL+ comes somehow thru the ADC */
        disable-wp;
 };
 
+&pwm3 {
+       status = "okay";
+};
+
 &uart0 {
        status = "okay";
 };
index 7cdc308bfac54c1ec6e286f29c4ad0c846df41aa..a2b763e949b4648f60e3929d0afba5afc711a61b 100644 (file)
                reg = <0x60000000 0x40000000>;
        };
 
+       vdd_log: vdd-log {
+               compatible = "pwm-regulator";
+               pwms = <&pwm3 0 1000>;
+               regulator-name = "vdd_log";
+               regulator-min-microvolt = <1200000>;
+               regulator-max-microvolt = <1200000>;
+               regulator-always-on;
+               voltage-table = <1000000 100>,
+                               <1200000 42>;
+               status = "okay";
+       };
+
        vcc_sd0: sdmmc-regulator {
                compatible = "regulator-fixed";
                regulator-name = "sdmmc-supply";
        };
 };
 
+&pwm3 {
+       status = "okay";
+};
+
 &uart0 {
        status = "okay";
 };
index 341c1f87936a7d20114175802e79f0a3196e9a25..05533005a809cef3d40731a2f3b4d1477a531d26 100644 (file)
@@ -65,7 +65,7 @@
                #size-cells = <0>;
 
                button@0 {
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        gpios = <&gpio6 2 GPIO_ACTIVE_LOW>;
                        label = "GPIO Power";
                        linux,code = <116>;
                };
        };
 
+       vdd_log: vdd-log {
+               compatible = "pwm-regulator";
+               pwms = <&pwm3 0 1000>;
+               regulator-name = "vdd_log";
+               regulator-min-microvolt = <1200000>;
+               regulator-max-microvolt = <1200000>;
+               regulator-always-on;
+               voltage-table = <1000000 100>,
+                               <1200000 42>;
+               status = "okay";
+       };
+
        vsys: vsys-regulator {
                compatible = "regulator-fixed";
                regulator-name = "vsys";
        status = "okay";
 };
 
+&pwm3 {
+       status = "okay";
+};
+
 &saradc {
        vref-supply = <&vcc_25>;
        status = "okay";
index 58bac5053858bc95bcc8f089fd32deb9fdfa0b32..cb0a552e0b181674365aa0285ae7b92d73dc746b 100644 (file)
                        reg = <0x0>;
                        operating-points = <
                                /* kHz    uV */
-                               1008000 1075000
-                                816000 1025000
-                                600000 1025000
-                                504000 1000000
-                                312000  975000
+                               1416000 1300000
+                               1200000 1175000
+                               1008000 1125000
+                               816000  1125000
+                               600000  1100000
+                               504000  1100000
+                               312000  1075000
                        >;
                        clock-latency = <40000>;
                        clocks = <&cru ARMCLK>;
                clock-names = "timer", "pclk";
        };
 
+       tsadc: tsadc@20060000 {
+               compatible = "rockchip,rk3066-tsadc";
+               reg = <0x20060000 0x100>;
+               clocks = <&cru SCLK_TSADC>, <&cru PCLK_TSADC>;
+               clock-names = "saradc", "apb_pclk";
+               interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+               #io-channel-cells = <1>;
+               status = "disabled";
+       };
+
        usbphy: phy {
                compatible = "rockchip,rk3066a-usb-phy", "rockchip,rk3288-usb-phy";
                rockchip,grf = <&grf>;
                        reg = <0x17c>;
                        clocks = <&cru SCLK_OTGPHY0>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
 
                usbphy1: usb-phy1 {
                        reg = <0x188>;
                        clocks = <&cru SCLK_OTGPHY1>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
        };
 
index 66fa87d1e2c2492f247b703fce10d3b34f2076e9..0b6924c97b6bf0fd40e6062ac0b279e2edaea7a2 100644 (file)
@@ -63,7 +63,7 @@
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
        };
index 348d46b7ada5a07ebdefe4dafc1630ddd079c0b7..9271833958f9dcd506d2b0365671414c4ec72460 100644 (file)
                        reg = <0x10c>;
                        clocks = <&cru SCLK_OTGPHY0>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
 
                usbphy1: usb-phy1 {
                        reg = <0x11c>;
                        clocks = <&cru SCLK_OTGPHY1>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
        };
 
index 4faabdb65868e38c0e4f96143d4a3cd5370afe5e..78d47f7d2938532d435954c731ba7e99063939c2 100644 (file)
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
        };
index 4e3fd9aefe3497e464bc8fecdb10a688372460a6..98c586a43c73011f3aba567c784f573b8ab57dab 100644 (file)
@@ -91,7 +91,7 @@
                #size-cells = <0>;
 
                button@0 {
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
                        label = "GPIO Power";
                        linux,code = <116>;
                output-low;
        };
 
+       pcfg_pull_up_drv_12ma: pcfg-pull-up-drv-12ma {
+               bias-pull-up;
+               drive-strength = <12>;
+       };
+
        act8846 {
                pwr_hold: pwr-hold {
                        rockchip,pins = <0 1 RK_FUNC_GPIO &pcfg_output_high>;
        };
 
        sdmmc {
+               /*
+                * Default drive strength isn't enough to achieve even
+                * high-speed mode on firefly board so bump up to 12ma.
+                */
+               sdmmc_bus4: sdmmc-bus4 {
+                       rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+                                       <6 17 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+                                       <6 18 RK_FUNC_1 &pcfg_pull_up_drv_12ma>,
+                                       <6 19 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+               };
+
+               sdmmc_clk: sdmmc-clk {
+                       rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_12ma>;
+               };
+
+               sdmmc_cmd: sdmmc-cmd {
+                       rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_12ma>;
+               };
+
                sdmmc_pwr: sdmmc-pwr {
                        rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
                };
index 65c475642d5a79196925a50498a567aad75f72ef..2ff9689d2e1b831c26f8e5660ee910ad2c18e6c6 100644 (file)
@@ -74,7 +74,7 @@
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
        };
index 17f13c73fe5e078e1adebbd1932615d41b89bbcb..510a1d0d7abb5b4022c4e2b7500d9049540a1ec3 100644 (file)
@@ -73,7 +73,7 @@
                        linux,code = <116>;
                        label = "GPIO Key Power";
                        linux,input-type = <1>;
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        debounce-interval = <100>;
                };
        };
index 1ece66f3e162786e839c26e5ef89c04b4bc71fec..e1ee9f949035cae2a2a1b84fad09b35feebe6440 100644 (file)
                clock-output-names = "ext_gmac";
        };
 
+       io_domains: io-domains {
+               compatible = "rockchip,rk3288-io-voltage-domain";
+               rockchip,grf = <&grf>;
+
+               audio-supply = <&vcc_io>;
+               bb-supply = <&vcc_io>;
+               dvp-supply = <&vcc_18>;
+               flash0-supply = <&vcc_flash>;
+               flash1-supply = <&vccio_pmu>;
+               gpio30-supply = <&vccio_pmu>;
+               gpio1830 = <&vcc_io>;
+               lcdc-supply = <&vcc_io>;
+               sdcard-supply = <&vccio_sd>;
+               wifi-supply = <&vcc_18>;
+       };
+
+       vcc_flash: flash-regulator {
+               compatible = "regulator-fixed";
+               regulator-name = "vcc_sys";
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+               startup-delay-us = <150>;
+               vin-supply = <&vcc_io>;
+       };
+
        vcc_sys: vsys-regulator {
                compatible = "regulator-fixed";
                regulator-name = "vcc_sys";
        pinctrl-names = "default";
        pinctrl-0 = <&emmc_clk &emmc_cmd &emmc_bus8>;
        vmmc-supply = <&vcc_io>;
+       vqmmc-supply = <&vcc_flash>;
        status = "okay";
 };
 
index c5453a0b07fca940227c8f24a92b838cbe43219d..dd3ad2e93a6d538dcd29c5dbc17ca2feb8d82d5d 100644 (file)
                stdout-path = "serial2:115200n8";
        };
 
+       gpio-leds {
+               compatible = "gpio-leds";
+
+               heartbeat {
+                       gpios = <&gpio7 15 GPIO_ACTIVE_LOW>;
+                       label = "rock2:green:state1";
+                       linux,default-trigger = "heartbeat";
+               };
+
+               mmc {
+                       gpios = <&gpio0 11 GPIO_ACTIVE_LOW>;
+                       label = "rock2:blue:state2";
+                       linux,default-trigger = "mmc0";
+               };
+       };
+
        ir: ir-receiver {
                compatible = "gpio-ir-receiver";
                gpios = <&gpio8 1 GPIO_ACTIVE_LOW>;
                #sound-dai-cells = <0>;
        };
 
+       sdio_pwrseq: sdio-pwrseq {
+               compatible = "mmc-pwrseq-simple";
+               clocks = <&hym8563>;
+               clock-names = "ext_clock";
+               pinctrl-names = "default";
+               pinctrl-0 = <&wifi_enable>;
+               reset-gpios = <&gpio4 28 GPIO_ACTIVE_LOW>;
+       };
+
        vcc_usb_host: vcc-host-regulator {
                compatible = "regulator-fixed";
                enable-active-high;
        };
 };
 
+&sdio0 {
+       bus-width = <4>;
+       cap-sd-highspeed;
+       cap-sdio-irq;
+       disable-wp;
+       mmc-pwrseq = <&sdio_pwrseq>;
+       non-removable;
+       num-slots = <1>;
+       pinctrl-names = "default";
+       pinctrl-0 = <&sdio0_bus4 &sdio0_cmd &sdio0_clk &sdio0_int>;
+       vmmc-supply = <&vcc_io>;
+       vqmmc-supply = <&vcc_18>;
+       status = "okay";
+};
+
 &sdmmc {
        bus-width = <4>;
        cap-mmc-highspeed;
 };
 
 &i2c0 {
-       hym8563@51 {
+       hym8563: hym8563@51 {
                compatible = "haoyu,hym8563";
                reg = <0x51>;
                #clock-cells = <0>;
                        rockchip,pins = <7 11 RK_FUNC_GPIO &pcfg_pull_none>;
                };
        };
+
+       sdio {
+               wifi_enable: wifi-enable {
+                       rockchip,pins = <4 28 RK_FUNC_GPIO &pcfg_pull_none>;
+               };
+       };
 };
 
 &spdif {
index 136d650dd05faf28b5656ea980b2b799edc06ba8..610769d99522ea47e1765282469b797d378dc5c8 100644 (file)
        lid {
                label = "Lid";
                gpios = <&gpio0 6 GPIO_ACTIVE_LOW>;
-               gpio-key,wakeup;
+               wakeup-source;
                linux,code = <0>; /* SW_LID */
                linux,input-type = <5>; /* EV_SW */
                debounce-interval = <1>;
index 9fce91ffff6fd89b1f39ba960dfeecd71ff8ae24..412809c60d01901087f67ed1107f2538ae3a8f1b 100644 (file)
@@ -64,7 +64,7 @@
                        gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
                        linux,code = <KEY_POWER>;
                        debounce-interval = <100>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 
        i2c-scl-rising-time-ns = <1000>;
 };
 
-&power {
-       assigned-clocks = <&cru SCLK_EDP_24M>;
-       assigned-clock-parents = <&xin24m>;
-};
-
 &pwm1 {
        status = "okay";
 };
        status = "okay";
 
        assigned-clocks = <&cru SCLK_USBPHY480M_SRC>;
-       assigned-clock-parents = <&cru SCLK_OTGPHY0>;
+       assigned-clock-parents = <&usbphy0>;
        dr_mode = "host";
 };
 
index 8ac49f3efc178ddc05ead42d5fec2178280275c2..0934b6abcaab59cc29bc9ef8f4ab573a46123291 100644 (file)
                        interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMAC2>;
                        clock-names = "apb_pclk";
                };
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMAC1>;
                        clock-names = "apb_pclk";
                        status = "disabled";
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMAC1>;
                        clock-names = "apb_pclk";
                };
                        #address-cells = <1>;
                        #size-cells = <0>;
 
+                       assigned-clocks = <&cru SCLK_EDP_24M>;
+                       assigned-clock-parents = <&xin24m>;
+
                        /*
                         * Note: Although SCLK_* are the working clocks
                         * of device without including on the NOC, needed for
                                reg = <0>;
                                remote-endpoint = <&hdmi_in_vopb>;
                        };
+                       vopb_out_mipi: endpoint@2 {
+                               reg = <2>;
+                               remote-endpoint = <&mipi_in_vopb>;
+                       };
                };
        };
 
                                reg = <0>;
                                remote-endpoint = <&hdmi_in_vopl>;
                        };
+                       vopl_out_mipi: endpoint@2 {
+                               reg = <2>;
+                               remote-endpoint = <&mipi_in_vopl>;
+                       };
                };
        };
 
                status = "disabled";
        };
 
+       mipi_dsi: mipi@ff960000 {
+               compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi";
+               reg = <0xff960000 0x4000>;
+               interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&cru SCLK_MIPIDSI_24M>, <&cru PCLK_MIPI_DSI0>;
+               clock-names = "ref", "pclk";
+               rockchip,grf = <&grf>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               status = "disabled";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       reg = <1>;
+
+                       mipi_in: port {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               mipi_in_vopb: endpoint@0 {
+                                       reg = <0>;
+                                       remote-endpoint = <&vopb_out_mipi>;
+                               };
+                               mipi_in_vopl: endpoint@1 {
+                                       reg = <1>;
+                                       remote-endpoint = <&vopl_out_mipi>;
+                               };
+                       };
+               };
+       };
+
        hdmi: hdmi@ff980000 {
                compatible = "rockchip,rk3288-dw-hdmi";
                reg = <0xff980000 0x20000>;
                        reg = <0x320>;
                        clocks = <&cru SCLK_OTGPHY0>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
 
                usbphy1: usb-phy1 {
                        reg = <0x334>;
                        clocks = <&cru SCLK_OTGPHY1>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
 
                usbphy2: usb-phy2 {
                        reg = <0x348>;
                        clocks = <&cru SCLK_OTGPHY2>;
                        clock-names = "phyclk";
+                       #clock-cells = <0>;
                };
        };
 
index 99eeea70223b94c9492a9df2fb8f7e4ffa373613..f1581f9d050f54037f24e5029d1814e9f759b520 100644 (file)
@@ -78,6 +78,7 @@
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMA1>;
                        clock-names = "apb_pclk";
                };
@@ -88,6 +89,7 @@
                        interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMA1>;
                        clock-names = "apb_pclk";
                        status = "disabled";
                        interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
                        #dma-cells = <1>;
+                       arm,pl330-broken-no-flushp;
                        clocks = <&cru ACLK_DMA2>;
                        clock-names = "apb_pclk";
                };
index aa64faa72970113a887c22836f9ddb5da5aa6e68..da24ab570b0e8c4b278af7d4844eec2c6e2d777b 100644 (file)
                        linux,code = <KEY_POWER>;
                        label = "power";
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
 
 &keypad {
        linux,input-no-autorepeat;
-       linux,input-wakeup;
+       wakeup-source;
        samsung,keypad-num-rows = <3>;
        samsung,keypad-num-columns = <3>;
        pinctrl-names = "default";
index 3b76eeeb8410a66a31ff33018df38e2ba2ce990e..0a33d402138e14fb054bc32beadf748b4f9c20cd 100644 (file)
                        linux,code = <KEY_POWER>;
                        label = "power";
                        debounce-interval = <1>;
-                       gpio-key,wakeup;
+                       wakeup-source;
                };
        };
 };
 
 &keypad {
        linux,input-no-autorepeat;
-       linux,input-wakeup;
+       wakeup-source;
        samsung,keypad-num-rows = <3>;
        samsung,keypad-num-columns = <3>;
        pinctrl-names = "default";
index da7d210df6704d4304809e290114f3b05bb7506a..54fcc3fc82e20c2c8dba67186f92a213645dcaa6 100644 (file)
@@ -59,7 +59,7 @@
 
 &keypad {
        linux,input-no-autorepeat;
-       linux,input-wakeup;
+       wakeup-source;
        samsung,keypad-num-rows = <8>;
        samsung,keypad-num-columns = <8>;
        pinctrl-names = "default";
index b8032bca462152e6904d299f84514c483dde0165..db1151c18466c3ac530be0ba39ba2f00be3a06bc 100644 (file)
                        dbgu: serial@fc069000 {
                                compatible = "atmel,at91sam9260-dbgu", "atmel,at91sam9260-usart";
                                reg = <0xfc069000 0x200>;
-                               interrupts = <2 IRQ_TYPE_LEVEL_HIGH 7>;
+                               interrupts = <45 IRQ_TYPE_LEVEL_HIGH 7>;
                                pinctrl-names = "default";
                                pinctrl-0 = <&pinctrl_dbgu>;
                                clocks = <&dbgu_clk>;
index 3a6056f9f0d23f17edb87674882e3b98f31fee49..bf825ca4f6f7912a6407abe9b3b3bc65a26e3637 100644 (file)
@@ -58,7 +58,7 @@
        L2: cache-controller {
                compatible = "arm,pl310-cache";
                reg = <0xf0100000 0x1000>;
-               interrupts = <0 44 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>;
                power-domains = <&pd_a3sm>;
                arm,data-latency = <3 3 3>;
                arm,tag-latency = <2 2 2>;
@@ -70,8 +70,8 @@
        sbsc2: memory-controller@fb400000 {
                compatible = "renesas,sbsc-sh73a0";
                reg = <0xfb400000 0x400>;
-               interrupts = <0 37 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 38 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "sec", "temp";
                power-domains = <&pd_a4bc1>;
        };
        sbsc1: memory-controller@fe400000 {
                compatible = "renesas,sbsc-sh73a0";
                reg = <0xfe400000 0x400>;
-               interrupts = <0 35 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 36 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
                interrupt-names = "sec", "temp";
                power-domains = <&pd_a4bc0>;
        };
 
        pmu {
                compatible = "arm,cortex-a9-pmu";
-               interrupts = <0 55 IRQ_TYPE_LEVEL_HIGH>,
-                            <0 56 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>;
        };
 
        cmt1: timer@e6138000 {
                compatible = "renesas,cmt-48-sh73a0", "renesas,cmt-48";
                reg = <0xe6138000 0x200>;
-               interrupts = <0 65 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_CMT1>;
                clock-names = "fck";
                power-domains = <&pd_c5>;
                        <0xe6900020 1>,
                        <0xe6900040 1>,
                        <0xe6900060 1>;
-               interrupts = <0 1 IRQ_TYPE_LEVEL_HIGH
-                             0 2 IRQ_TYPE_LEVEL_HIGH
-                             0 3 IRQ_TYPE_LEVEL_HIGH
-                             0 4 IRQ_TYPE_LEVEL_HIGH
-                             0 5 IRQ_TYPE_LEVEL_HIGH
-                             0 6 IRQ_TYPE_LEVEL_HIGH
-                             0 7 IRQ_TYPE_LEVEL_HIGH
-                             0 8 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
                power-domains = <&pd_a4s>;
                control-parent;
                        <0xe6900024 1>,
                        <0xe6900044 1>,
                        <0xe6900064 1>;
-               interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH
-                             0 10 IRQ_TYPE_LEVEL_HIGH
-                             0 11 IRQ_TYPE_LEVEL_HIGH
-                             0 12 IRQ_TYPE_LEVEL_HIGH
-                             0 13 IRQ_TYPE_LEVEL_HIGH
-                             0 14 IRQ_TYPE_LEVEL_HIGH
-                             0 15 IRQ_TYPE_LEVEL_HIGH
-                             0 16 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
                power-domains = <&pd_a4s>;
                control-parent;
                        <0xe6900028 1>,
                        <0xe6900048 1>,
                        <0xe6900068 1>;
-               interrupts = <0 17 IRQ_TYPE_LEVEL_HIGH
-                             0 18 IRQ_TYPE_LEVEL_HIGH
-                             0 19 IRQ_TYPE_LEVEL_HIGH
-                             0 20 IRQ_TYPE_LEVEL_HIGH
-                             0 21 IRQ_TYPE_LEVEL_HIGH
-                             0 22 IRQ_TYPE_LEVEL_HIGH
-                             0 23 IRQ_TYPE_LEVEL_HIGH
-                             0 24 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
                power-domains = <&pd_a4s>;
                control-parent;
                        <0xe690002c 1>,
                        <0xe690004c 1>,
                        <0xe690006c 1>;
-               interrupts = <0 25 IRQ_TYPE_LEVEL_HIGH
-                             0 26 IRQ_TYPE_LEVEL_HIGH
-                             0 27 IRQ_TYPE_LEVEL_HIGH
-                             0 28 IRQ_TYPE_LEVEL_HIGH
-                             0 29 IRQ_TYPE_LEVEL_HIGH
-                             0 30 IRQ_TYPE_LEVEL_HIGH
-                             0 31 IRQ_TYPE_LEVEL_HIGH
-                             0 32 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp5_clks SH73A0_CLK_INTCA0>;
                power-domains = <&pd_a4s>;
                control-parent;
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6820000 0x425>;
-               interrupts = <0 167 IRQ_TYPE_LEVEL_HIGH
-                             0 168 IRQ_TYPE_LEVEL_HIGH
-                             0 169 IRQ_TYPE_LEVEL_HIGH
-                             0 170 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp1_clks SH73A0_CLK_IIC0>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6822000 0x425>;
-               interrupts = <0 51 IRQ_TYPE_LEVEL_HIGH
-                             0 52 IRQ_TYPE_LEVEL_HIGH
-                             0 53 IRQ_TYPE_LEVEL_HIGH
-                             0 54 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_IIC1>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6824000 0x425>;
-               interrupts = <0 171 IRQ_TYPE_LEVEL_HIGH
-                             0 172 IRQ_TYPE_LEVEL_HIGH
-                             0 173 IRQ_TYPE_LEVEL_HIGH
-                             0 174 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 172 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks SH73A0_CLK_IIC2>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6826000 0x425>;
-               interrupts = <0 183 IRQ_TYPE_LEVEL_HIGH
-                             0 184 IRQ_TYPE_LEVEL_HIGH
-                             0 185 IRQ_TYPE_LEVEL_HIGH
-                             0 186 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks SH73A0_CLK_IIC3>;
                power-domains = <&pd_a3sp>;
                status = "disabled";
                #size-cells = <0>;
                compatible = "renesas,iic-sh73a0", "renesas,rmobile-iic";
                reg = <0xe6828000 0x425>;
-               interrupts = <0 187 IRQ_TYPE_LEVEL_HIGH
-                             0 188 IRQ_TYPE_LEVEL_HIGH
-                             0 189 IRQ_TYPE_LEVEL_HIGH
-                             0 190 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp4_clks SH73A0_CLK_IIC4>;
                power-domains = <&pd_c5>;
                status = "disabled";
        mmcif: mmc@e6bd0000 {
                compatible = "renesas,sh-mmcif";
                reg = <0xe6bd0000 0x100>;
-               interrupts = <0 140 IRQ_TYPE_LEVEL_HIGH
-                             0 141 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_MMCIF0>;
                power-domains = <&pd_a3sp>;
                reg-io-width = <4>;
        msiof0: spi@e6e20000 {
                compatible = "renesas,msiof-sh73a0", "renesas,sh-mobile-msiof";
                reg = <0xe6e20000 0x0064>;
-               interrupts = <0 142 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp0_clks SH73A0_CLK_MSIOF0>;
                power-domains = <&pd_a3sp>;
                #address-cells = <1>;
        msiof1: spi@e6e10000 {
                compatible = "renesas,msiof-sh73a0", "renesas,sh-mobile-msiof";
                reg = <0xe6e10000 0x0064>;
-               interrupts = <0 77 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_MSIOF1>;
                power-domains = <&pd_a3sp>;
                #address-cells = <1>;
        msiof2: spi@e6e00000 {
                compatible = "renesas,msiof-sh73a0", "renesas,sh-mobile-msiof";
                reg = <0xe6e00000 0x0064>;
-               interrupts = <0 76 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_MSIOF2>;
                power-domains = <&pd_a3sp>;
                #address-cells = <1>;
        msiof3: spi@e6c90000 {
                compatible = "renesas,msiof-sh73a0", "renesas,sh-mobile-msiof";
                reg = <0xe6c90000 0x0064>;
-               interrupts = <0 59 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_MSIOF3>;
                power-domains = <&pd_a3sp>;
                #address-cells = <1>;
        sdhi0: sd@ee100000 {
                compatible = "renesas,sdhi-sh73a0";
                reg = <0xee100000 0x100>;
-               interrupts = <0 83 IRQ_TYPE_LEVEL_HIGH
-                             0 84 IRQ_TYPE_LEVEL_HIGH
-                             0 85 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_SDHI0>;
                power-domains = <&pd_a3sp>;
                cap-sd-highspeed;
        sdhi1: sd@ee120000 {
                compatible = "renesas,sdhi-sh73a0";
                reg = <0xee120000 0x100>;
-               interrupts = <0 88 IRQ_TYPE_LEVEL_HIGH
-                             0 89 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_SDHI1>;
                power-domains = <&pd_a3sp>;
                toshiba,mmc-wrprotect-disable;
        sdhi2: sd@ee140000 {
                compatible = "renesas,sdhi-sh73a0";
                reg = <0xee140000 0x100>;
-               interrupts = <0 104 IRQ_TYPE_LEVEL_HIGH
-                             0 105 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH
+                             GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_SDHI2>;
                power-domains = <&pd_a3sp>;
                toshiba,mmc-wrprotect-disable;
        scifa0: serial@e6c40000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c40000 0x100>;
-               interrupts = <0 72 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA0>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa1: serial@e6c50000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c50000 0x100>;
-               interrupts = <0 73 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA1>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa2: serial@e6c60000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c60000 0x100>;
-               interrupts = <0 74 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA2>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa3: serial@e6c70000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c70000 0x100>;
-               interrupts = <0 75 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA3>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa4: serial@e6c80000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6c80000 0x100>;
-               interrupts = <0 78 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA4>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa5: serial@e6cb0000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6cb0000 0x100>;
-               interrupts = <0 79 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA5>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa6: serial@e6cc0000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6cc0000 0x100>;
-               interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp3_clks SH73A0_CLK_SCIFA6>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifa7: serial@e6cd0000 {
                compatible = "renesas,scifa-sh73a0", "renesas,scifa";
                reg = <0xe6cd0000 0x100>;
-               interrupts = <0 143 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFA7>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
        scifb: serial@e6c30000 {
                compatible = "renesas,scifb-sh73a0", "renesas,scifb";
                reg = <0xe6c30000 0x100>;
-               interrupts = <0 80 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&mstp2_clks SH73A0_CLK_SCIFB>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
                power-domains = <&pd_a3sp>;
                status = "disabled";
        };
                #sound-dai-cells = <1>;
                compatible = "renesas,fsi2-sh73a0", "renesas,sh_fsi2";
                reg = <0xec230000 0x400>;
-               interrupts = <0 146 0x4>;
+               interrupts = <GIC_SPI 146 0x4>;
                power-domains = <&pd_a4mp>;
                status = "disabled";
        };
                #size-cells = <1>;
                ranges = <0 0 0x20000000>;
                reg = <0xfec10000 0x400>;
-               interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+               interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
                clocks = <&zb_clk>;
                power-domains = <&pd_a4s>;
        };
index d0c74385331803383d5296d7157516aa63c63758..27a333eb89870167b82a170d3553bd5078fafdfb 100644 (file)
                        };
                        mmcsd_default_mode: mmcsd_default {
                                mmcsd_default_cfg1 {
-                                       /* MCCLK */
-                                       pins = "GPIO8_B10";
-                                       ste,output = <0>;
-                               };
-                               mmcsd_default_cfg2 {
-                                       /* MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2 */
-                                       pins = "GPIO10_C11", "GPIO15_A12",
-                                       "GPIO16_C13", "GPIO23_D15";
-                                       ste,output = <1>;
-                               };
-                               mmcsd_default_cfg3 {
-                                       /* MCCMD, MCDAT3-0, MCMSFBCLK */
-                                       pins = "GPIO9_A10", "GPIO11_B11",
-                                       "GPIO12_A11", "GPIO13_C12",
-                                       "GPIO14_B12", "GPIO24_C15";
-                                       ste,input = <1>;
+                                       /*
+                                        * MCCLK, MCCMDDIR, MCDAT0DIR, MCDAT31DIR, MCDATDIR2
+                                        * MCCMD, MCDAT3-0, MCMSFBCLK
+                                        */
+                                       pins = "GPIO8_B10", "GPIO9_A10", "GPIO10_C11", "GPIO11_B11",
+                                              "GPIO12_A11", "GPIO13_C12", "GPIO14_B12", "GPIO15_A12",
+                                              "GPIO16_C13", "GPIO23_D15", "GPIO24_C15";
+                                       ste,output = <2>;
                                };
                        };
                };
                        clock-names = "mclk", "apb_pclk";
                        interrupt-parent = <&vica>;
                        interrupts = <22>;
-                       max-frequency = <48000000>;
+                       max-frequency = <400000>;
                        bus-width = <4>;
                        cap-mmc-highspeed;
                        cap-sd-highspeed;
+                       full-pwr-cycle;
+                       /*
+                        * The STw4811 circuit used with the Nomadik strictly
+                        * requires that all of these signal direction pins be
+                        * routed and used for its 4-bit levelshifter.
+                        */
+                       st,sig-dir-dat0;
+                       st,sig-dir-dat2;
+                       st,sig-dir-dat31;
+                       st,sig-dir-cmd;
+                       st,sig-pin-fbclk;
                        pinctrl-names = "default";
                        pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
                        vmmc-supply = <&vmmc_regulator>;
index 53660894ea95ebb953446caf650f43526e0f7a8a..023b03efa5fff7cd978b5820b9b8b5e3f2015052 100644 (file)
@@ -45,6 +45,7 @@
 #include "sunxi-common-regulators.dtsi"
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        model = "Chuwi V7 CW0825";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c2_pins_a>;
        status = "okay";
+
+       ft5306de4: touchscreen@38 {
+               compatible = "edt,edt-ft5406";
+               reg = <0x38>;
+               interrupt-parent = <&pio>;
+               interrupts = <7 21 IRQ_TYPE_EDGE_FALLING>;
+               touchscreen-size-x = <1024>;
+               touchscreen-size-y = <768>;
+       };
 };
 
 &lradc {
index 77c31dab86b137d44fac84aec557587b0b594fe7..04b0d2d1ae6c1e2890f28abe0043f3ebcdbd9e95 100644 (file)
@@ -48,6 +48,7 @@
 
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/interrupt-controller/irq.h>
 
 / {
        model = "INet-97F Rev 02";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c2_pins_a>;
        status = "okay";
+
+       ft5406ee8: touchscreen@38 {
+               compatible = "edt,edt-ft5406";
+               reg = <0x38>;
+               interrupt-parent = <&pio>;
+               interrupts = <7 21 IRQ_TYPE_EDGE_FALLING>;
+               touchscreen-size-x = <800>;
+               touchscreen-size-y = <480>;
+       };
 };
 
 &lradc {
index ca49b0d0ce1e0bad3a3de2552acb04f66ee8b952..bba4f9cf9bf5d6c2f51faa05ea0ac90deb48bacc 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&i2c2_pins_a>;
        status = "okay";
+
+       ft5406ee8: touchscreen@38 {
+               compatible = "edt,edt-ft5406";
+               reg = <0x38>;
+               interrupt-parent = <&pio>;
+               interrupts = <7 21 IRQ_TYPE_EDGE_FALLING>;
+               touchscreen-size-x = <800>;
+               touchscreen-size-y = <480>;
+       };
 };
 
 &lradc {
index 985e15503378f818299eed0110f1678fa2c016cc..4e798f014c992c949fdf186facae567c1c8b1de2 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright 2015 Josef Gajdusek <atx@atx.name>
+ * Copyright 2015 - Marcus Cooper <codekipper@gmail.com>
  *
  * This file is dual-licensed: you can use it either under the terms
  * of the GPL or the X11 license, at your option. Note that this dual
 
 /dts-v1/;
 #include "sun4i-a10.dtsi"
-#include "sunxi-common-regulators.dtsi"
-
-#include <dt-bindings/gpio/gpio.h>
-#include <dt-bindings/pinctrl/sun4i-a10.h>
+#include "sunxi-itead-core-common.dtsi"
 
 / {
        model = "Iteaduino Plus A10";
        compatible = "itead,iteaduino-plus-a10", "allwinner,sun4i-a10";
-
-       aliases {
-               serial0 = &uart0;
-       };
-
-       chosen {
-               stdout-path = "serial0:115200n8";
-       };
 };
 
 &ahci {
        status = "okay";
 };
 
-&cpu0 {
-       cpu-supply = <&reg_dcdc2>;
-};
-
-&ehci0 {
-       status = "okay";
-};
-
-&ehci1 {
-       status = "okay";
-};
-
 &emac {
        pinctrl-names = "default";
        pinctrl-0 = <&emac_pins_a>;
 };
 
 &i2c0 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&i2c0_pins_a>;
-       status = "okay";
-
        axp209: pmic@34 {
-               reg = <0x34>;
                interrupts = <0>;
        };
 };
        status = "okay";
 };
 
-&ohci0 {
-       status = "okay";
-};
-
-&ohci1 {
-       status = "okay";
-};
-
 &reg_ahci_5v {
        status = "okay";
 };
 
-#include "axp209.dtsi"
-
-&reg_dcdc2 {
-       regulator-always-on;
-       regulator-min-microvolt = <1000000>;
-       regulator-max-microvolt = <1450000>;
-       regulator-name = "vdd-cpu";
-};
-
-&reg_dcdc3 {
-       regulator-always-on;
-       regulator-min-microvolt = <1000000>;
-       regulator-max-microvolt = <1400000>;
-       regulator-name = "vdd-int-dll";
-};
-
-&reg_ldo1 {
-       regulator-name = "vdd-rtc";
-};
-
-&reg_ldo2 {
-       regulator-always-on;
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
-       regulator-name = "avcc";
-};
-
-&reg_usb1_vbus {
-       status = "okay";
-};
-
-&reg_usb2_vbus {
-       status = "okay";
-};
-
 &spi0 {
        pinctrl-names = "default";
        pinctrl-0 = <&spi0_pins_a>,
                    <&spi0_cs0_pins_a>;
        status = "okay";
 };
-
-&uart0 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&uart0_pins_a>;
-       status = "okay";
-};
-
-&usbphy {
-       usb1_vbus-supply = <&reg_usb1_vbus>;
-       usb2_vbus-supply = <&reg_usb2_vbus>;
-       status = "okay";
-};
index 530ab28e9ca239b2da64d046a1c0c1da7d94b9d0..f6898c6b84d426bdf45df86495bc69cad91bb064 100644 (file)
        status = "okay";
 };
 
+&cpu0 {
+       cpu-supply = <&reg_dcdc2>;
+};
+
 &ehci0 {
        status = "okay";
 };
index b6ad7850fac6931ee1eb6f1716418c16b849eae4..7a198dcd4ae81264ce0ff37b504b7fc43aab5338 100644 (file)
@@ -65,7 +65,7 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0-hdmi";
-                       clocks = <&pll6 0>;
+                       clocks = <&pll6>;
                        status = "disabled";
                };
 
@@ -73,7 +73,7 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll6 0>;
+                       clocks = <&pll6>;
                        status = "disabled";
                };
        };
                };
 
                pll6: clk@01c20028 {
-                       #clock-cells = <1>;
+                       #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-pll6-clk";
                        reg = <0x01c20028 0x4>;
                        clocks = <&osc24M>;
-                       clock-output-names = "pll6", "pll6x2";
+                       clock-output-names = "pll6";
                };
 
                cpu: cpu@01c20050 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
                        clock-output-names = "ahb1";
 
                        /*
                         * controller requires AHB1 clocked from PLL6.
                         */
                        assigned-clocks = <&ahb1>;
-                       assigned-clock-parents = <&pll6 0>;
+                       assigned-clock-parents = <&pll6>;
                };
 
                ahb1_gates: clk@01c20060 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
                        clock-output-names = "apb2";
                };
 
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc0",
                                             "mmc0_output",
                                             "mmc0_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc1",
                                             "mmc1_output",
                                             "mmc1_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc2",
                                             "mmc2_output",
                                             "mmc2_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc3",
                                             "mmc3_output",
                                             "mmc3_sample";
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c2009c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "ss";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c200a0 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "spi0";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c200a4 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "spi1";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c200a8 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "spi2";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c200ac 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "spi3";
                };
 
                                allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
                        };
 
+                       mmc3_8bit_emmc_pins: mmc3@1 {
+                               allwinner,pins = "PC6", "PC7", "PC8", "PC9",
+                                                "PC10", "PC11", "PC12",
+                                                "PC13", "PC14", "PC15",
+                                                "PC24";
+                               allwinner,function = "mmc3";
+                               allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+                               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+                       };
+
                        gmac_pins_mii_a: gmac_mii@0 {
                                allwinner,pins = "PA0", "PA1", "PA2", "PA3",
                                                "PA8", "PA9", "PA11",
                        ar100: ar100_clk {
                                compatible = "allwinner,sun6i-a31-ar100-clk";
                                #clock-cells = <0>;
-                               clocks = <&osc32k>, <&osc24M>, <&pll6 0>,
-                                        <&pll6 0>;
+                               clocks = <&osc32k>, <&osc24M>, <&pll6>,
+                                        <&pll6>;
                                clock-output-names = "ar100";
                        };
 
index ea69fb8ad4d80aab599c5c8e313429706a23bf87..4ec0c8679b2e20b9a15b28160639daf264abd0d6 100644 (file)
 };
 
 /* eMMC on core board */
-&mmc2 {
+&mmc3 {
        pinctrl-names = "default";
-       pinctrl-0 = <&mmc2_8bit_emmc_pins>;
+       pinctrl-0 = <&mmc3_8bit_emmc_pins>;
        vmmc-supply = <&reg_dcdc1>;
+       vqmmc-supply = <&reg_dcdc1>;
        bus-width = <8>;
        non-removable;
+       cap-mmc-hw-reset;
        status = "okay";
 };
 
diff --git a/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts b/arch/arm/boot/dts/sun7i-a20-itead-ibox.dts
new file mode 100644 (file)
index 0000000..661c21d
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2015 - Marcus Cooper <codekipper@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun7i-a20.dtsi"
+#include "sunxi-itead-core-common.dtsi"
+
+/ {
+       model = "Itead Ibox A20";
+       compatible = "itead,itead-ibox-a20", "allwinner,sun7i-a20";
+
+       leds {
+               compatible = "gpio-leds";
+               pinctrl-names = "default";
+               pinctrl-0 = <&led_pins_itead_core>;
+
+               green {
+                       label = "itead_core:green:usr";
+                       gpios = <&pio 7 20 GPIO_ACTIVE_HIGH>;
+                       default-state = "on";
+               };
+
+               blue {
+                       label = "itead_core:blue:usr";
+                       gpios = <&pio 7 21 GPIO_ACTIVE_HIGH>;
+                       default-state = "on";
+               };
+       };
+};
+
+&ahci {
+       target-supply = <&reg_ahci_5v>;
+       status = "okay";
+};
+
+&codec {
+       status = "okay";
+};
+
+&gmac {
+       pinctrl-names = "default";
+       pinctrl-0 = <&gmac_pins_mii_a>;
+       phy = <&phy1>;
+       phy-mode = "mii";
+       status = "okay";
+
+       phy1: ethernet-phy@1 {
+               reg = <1>;
+       };
+};
+
+&i2c0 {
+       axp209: pmic@34 {
+               interrupt-parent = <&nmi_intc>;
+               interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+       };
+};
+
+&ir0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&ir0_rx_pins_a>;
+       status = "okay";
+};
+
+&mmc0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&mmc0_pins_a>, <&mmc0_cd_pin_reference_design>;
+       vmmc-supply = <&reg_vcc3v3>;
+       bus-width = <4>;
+       cd-gpios = <&pio 7 1 GPIO_ACTIVE_HIGH>; /* PH1 */
+       cd-inverted;
+       status = "okay";
+};
+
+&pio {
+       led_pins_itead_core: led_pins@0 {
+               allwinner,pins = "PH20","PH21";
+               allwinner,function = "gpio_out";
+               allwinner,drive = <SUN4I_PINCTRL_20_MA>;
+               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+       };
+};
+
+&reg_ahci_5v {
+       status = "okay";
+};
index 6f88fb0ddbc7d13a1f2ee83befbdd111ee36dfb4..783b4b8b7c189e20f38199ce6e6eba1aafb65004 100644 (file)
@@ -60,7 +60,7 @@
                        compatible = "allwinner,simple-framebuffer",
                                     "simple-framebuffer";
                        allwinner,pipeline = "de_be0-lcd0";
-                       clocks = <&pll6 0>;
+                       clocks = <&pll6>;
                        status = "disabled";
                };
        };
                };
 
                pll6: clk@01c20028 {
-                       #clock-cells = <1>;
+                       #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-pll6-clk";
                        reg = <0x01c20028 0x4>;
                        clocks = <&osc24M>;
-                       clock-output-names = "pll6", "pll6x2";
+                       clock-output-names = "pll6";
+               };
+
+                pll6x2: pll6x2_clk {
+                       compatible = "fixed-factor-clock";
+                       #clock-cells = <0>;
+                       clock-div = <1>;
+                       clock-mult = <2>;
+                       clocks = <&pll6>;
+                       clock-output-names = "pll6-2x";
                };
 
                cpu: cpu_clk@01c20050 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
                        clock-output-names = "ahb1";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
                        clock-output-names = "apb2";
                };
 
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc0",
                                             "mmc0_output",
                                             "mmc0_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc1",
                                             "mmc1_output",
                                             "mmc1_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "mmc2",
                                             "mmc2_output",
                                             "mmc2_sample";
                                allwinner,pins = "PC5", "PC6", "PC8",
                                                 "PC9", "PC10", "PC11",
                                                 "PC12", "PC13", "PC14",
-                                                "PC15";
+                                                "PC15", "PC16";
                                allwinner,function = "mmc2";
                                allwinner,drive = <SUN4I_PINCTRL_30_MA>;
                                allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
index 92e6616979ea42868b3a6bcb77f76ed7ea8083bd..5e589c1ddda93883f8b53852f96d683447ce9b30 100644 (file)
@@ -79,7 +79,7 @@
                        #clock-cells = <0>;
                        compatible = "allwinner,sun8i-a23-mbus-clk";
                        reg = <0x01c2015c 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&pll5>;
+                       clocks = <&osc24M>, <&pll6x2>, <&pll5>;
                        clock-output-names = "mbus";
                };
        };
index 13ce68f06dd6e0ab7c7b9a5ba6e05562e4df3868..bd2a3beb4629201443e0ce30072988b25353bcca 100644 (file)
        vmmc-supply = <&reg_vcc3v0>;
        bus-width = <8>;
        non-removable;
+       cap-mmc-hw-reset;
        status = "okay";
 };
 
 &mmc2_8bit_pins {
+       /* Increase drive strength for DDR modes */
+       allwinner,drive = <SUN4I_PINCTRL_40_MA>;
        /* eMMC is missing pull-ups */
        allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
 };
index 001d8402ca1845bca126adab131d69d439ceab6a..f3eb618bcfa745934d7519b072167d4810f9ce4f 100644 (file)
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-mod0-clk";
                        reg = <0x01c2009c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>;
+                       clocks = <&osc24M>, <&pll6>;
                        clock-output-names = "ss";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun8i-a23-mbus-clk";
                        reg = <0x01c2015c 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&pll5>, <&pll11>;
+                       clocks = <&osc24M>, <&pll6x2>, <&pll5>, <&pll11>;
                        clock-output-names = "mbus";
                };
        };
diff --git a/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts b/arch/arm/boot/dts/sun8i-a83t-allwinner-h8homlet-v2.dts
new file mode 100644 (file)
index 0000000..342e1d3
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2015 Vishnu Patekar
+ * Vishnu Patekar <vishnupatekar0510@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a83t.dtsi"
+
+/ {
+       model = "Allwinner A83T H8Homlet Proto Dev Board v2.0";
+       compatible = "allwinner,h8homlet-v2", "allwinner,sun8i-a83t";
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+};
+
+&uart0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&uart0_pins_b>;
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts b/arch/arm/boot/dts/sun8i-a83t-cubietruck-plus.dts
new file mode 100644 (file)
index 0000000..88b1e09
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2015 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/dts-v1/;
+#include "sun8i-a83t.dtsi"
+
+/ {
+       model = "Cubietech Cubietruck Plus";
+       compatible = "cubietech,cubietruck-plus", "allwinner,sun8i-a83t";
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+};
+
+&uart0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&uart0_pins_b>;
+       status = "okay";
+};
diff --git a/arch/arm/boot/dts/sun8i-a83t.dtsi b/arch/arm/boot/dts/sun8i-a83t.dtsi
new file mode 100644 (file)
index 0000000..d3473f8
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Copyright 2015 Vishnu Patekar
+ *
+ * Vishnu Patekar <vishnupatekar0510@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+
+ */
+
+#include "skeleton.dtsi"
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#include <dt-bindings/pinctrl/sun4i-a10.h>
+
+/ {
+       interrupt-parent = <&gic>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0>;
+               };
+
+               cpu@1 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <1>;
+               };
+
+               cpu@2 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <2>;
+               };
+
+               cpu@3 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <3>;
+               };
+
+               cpu@100 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0x100>;
+               };
+
+               cpu@101 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0x101>;
+               };
+
+               cpu@102 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0x102>;
+               };
+
+               cpu@103 {
+                       compatible = "arm,cortex-a7";
+                       device_type = "cpu";
+                       reg = <0x103>;
+               };
+       };
+
+       timer {
+               compatible = "arm,armv7-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_LOW)>;
+       };
+
+       clocks {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               /* TODO: PRCM block has a mux for this. */
+               osc24M: osc24M_clk {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <24000000>;
+                       clock-output-names = "osc24M";
+               };
+
+               /*
+                * This is called "internal OSC" in some places.
+                * It is an internal RC-based oscillator.
+                * TODO: Its controls are in the PRCM block.
+                */
+               osc16M: osc16M_clk {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <16000000>;
+                       clock-output-names = "osc16M";
+               };
+
+               osc16Md512: osc16Md512_clk {
+                       #clock-cells = <0>;
+                       compatible = "fixed-factor-clock";
+                       clock-div = <512>;
+                       clock-mult = <1>;
+                       clocks = <&osc16M>;
+                       clock-output-names = "osc16M-d512";
+               };
+       };
+
+       soc {
+               compatible = "simple-bus";
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges;
+
+               pio: pinctrl@01c20800 {
+                       compatible = "allwinner,sun8i-a83t-pinctrl";
+                       interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+                       reg = <0x01c20800 0x400>;
+                       clocks = <&osc24M>;
+                       gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <3>;
+                       #gpio-cells = <3>;
+
+                       mmc0_pins_a: mmc0@0 {
+                               allwinner,pins = "PF0", "PF1", "PF2",
+                                                "PF3", "PF4", "PF5";
+                               allwinner,function = "mmc0";
+                               allwinner,drive = <SUN4I_PINCTRL_30_MA>;
+                               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+                       };
+
+                       uart0_pins_a: uart0@0 {
+                               allwinner,pins = "PF2", "PF4";
+                               allwinner,function = "uart0";
+                               allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+                               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+                       };
+
+                       uart0_pins_b: uart0@1 {
+                               allwinner,pins = "PB9", "PB10";
+                               allwinner,function = "uart0";
+                               allwinner,drive = <SUN4I_PINCTRL_10_MA>;
+                               allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
+                       };
+               };
+
+               timer@01c20c00 {
+                       compatible = "allwinner,sun4i-a10-timer";
+                       reg = <0x01c20c00 0xa0>;
+                       interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>,
+                                    <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&osc24M>;
+               };
+
+               watchdog@01c20ca0 {
+                       compatible = "allwinner,sun6i-a31-wdt";
+                       reg = <0x01c20ca0 0x20>;
+                       interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&osc24M>;
+               };
+
+               uart0: serial@01c28000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x01c28000 0x400>;
+                       interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+                       reg-shift = <2>;
+                       reg-io-width = <4>;
+                       clocks = <&osc24M>;
+                       status = "disabled";
+               };
+
+               gic: interrupt-controller@01c81000 {
+                       compatible = "arm,cortex-a7-gic", "arm,cortex-a15-gic";
+                       reg = <0x01c81000 0x1000>,
+                             <0x01c82000 0x1000>,
+                             <0x01c84000 0x2000>,
+                             <0x01c86000 0x2000>;
+                       interrupt-controller;
+                       #interrupt-cells = <3>;
+                       interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(8) | IRQ_TYPE_LEVEL_HIGH)>;
+               };
+       };
+};
index 1524130e43c94c97097b7389dd3951459a1af7ee..6f6b4e469ac933e48cfcea4e27e742edcbb0125e 100644 (file)
                };
 
                pll6: clk@01c20028 {
-                       #clock-cells = <1>;
+                       #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-pll6-clk";
                        reg = <0x01c20028 0x4>;
                        clocks = <&osc24M>;
-                       clock-output-names = "pll6", "pll6x2";
+                       clock-output-names = "pll6";
                };
 
                pll6d2: pll6d2_clk {
                        compatible = "fixed-factor-clock";
                        clock-div = <2>;
                        clock-mult = <1>;
-                       clocks = <&pll6 0>;
-                       clock-output-names = "pll6d2";
+                       clocks = <&pll6>;
+                       clock-output-names = "pll6-d2";
                };
 
-               /* dummy clock until pll6 can be reused */
-               pll8: pll8_clk {
+               pll6x2: pll6x2_clk {
                        #clock-cells = <0>;
-                       compatible = "fixed-clock";
-                       clock-frequency = <1>;
+                       compatible = "fixed-factor-clock";
+                       clock-div = <1>;
+                       clock-mult = <2>;
+                       clocks = <&pll6>;
+                       clock-output-names = "pll6-2x";
+               };
+
+               pll8: clk@01c20044 {
+                       #clock-cells = <0>;
+                       compatible = "allwinner,sun6i-a31-pll6-clk";
+                       reg = <0x01c20044 0x4>;
+                       clocks = <&osc24M>;
                        clock-output-names = "pll8";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
                        clock-output-names = "ahb1";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
+                       clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
                        clock-output-names = "apb2";
                };
 
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>, <&pll8>;
+                       clocks = <&osc24M>, <&pll6>, <&pll8>;
                        clock-output-names = "mmc0",
                                             "mmc0_output",
                                             "mmc0_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>, <&pll8>;
+                       clocks = <&osc24M>, <&pll6>, <&pll8>;
                        clock-output-names = "mmc1",
                                             "mmc1_output",
                                             "mmc1_sample";
                        #clock-cells = <1>;
                        compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
-                       clocks = <&osc24M>, <&pll6 0>, <&pll8>;
+                       clocks = <&osc24M>, <&pll6>, <&pll8>;
                        clock-output-names = "mmc2",
                                             "mmc2_output",
                                             "mmc2_sample";
                        #clock-cells = <0>;
                        compatible = "allwinner,sun8i-a23-mbus-clk";
                        reg = <0x01c2015c 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&pll5>;
+                       clocks = <&osc24M>, <&pll6x2>, <&pll5>;
                        clock-output-names = "mbus";
                };
        };
index 382bd9fc5647abbe4e23eb0d8ac3ed855cd1cd4d..eb2ccd0a3bd5d4217f7a2a91a7383d87594ef074 100644 (file)
        vmmc-supply = <&reg_vcc3v0>;
        bus-width = <8>;
        non-removable;
+       cap-mmc-hw-reset;
        status = "okay";
 };
 
+&mmc2_8bit_pins {
+       /* Increase drive strength for DDR modes */
+       allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+};
+
 &r_ir {
        status = "okay";
 };
index c0060e4f7379fe775d78f6dfb9d5db4c169f7a05..d7a20d92b1143b1a7713d998cec753a2dfada507 100644 (file)
        status = "okay";
 };
 
-&i2c3 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&i2c3_pins_a>;
-       status = "okay";
-};
-
-&i2c3_pins_a {
-       /* Enable internal pull-up */
-       allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
-};
-
 &ohci0 {
        status = "okay";
 };
        vmmc-supply = <&reg_vcc3v0>;
        bus-width = <8>;
        non-removable;
+       cap-mmc-hw-reset;
        status = "okay";
 };
 
+&mmc2_8bit_pins {
+       /* Increase drive strength for DDR modes */
+       allwinner,drive = <SUN4I_PINCTRL_40_MA>;
+};
+
 &reg_usb1_vbus {
        pinctrl-0 = <&usb1_vbus_pin_optimus>;
        gpio = <&pio 7 4 GPIO_ACTIVE_HIGH>; /* PH4 */
        status = "okay";
 };
 
-&uart4 {
-       pinctrl-names = "default";
-       pinctrl-0 = <&uart4_pins_a>;
-       status = "okay";
-};
-
-&uart4_pins_a {
-       /* Enable internal pull-up */
-       allwinner,pull = <SUN4I_PINCTRL_PULL_UP>;
-};
-
 &usbphy1 {
        phy-supply = <&reg_usb1_vbus>;
        status = "okay";
index e838f206f2a0f34f361bf858d00a6762b4d5739c..f68b3242b33a09b0ff0c197c817b75525c9d9bb9 100644 (file)
                };
 
                mmc0: mmc@01c0f000 {
-                       compatible = "allwinner,sun5i-a13-mmc";
+                       compatible = "allwinner,sun9i-a80-mmc";
                        reg = <0x01c0f000 0x1000>;
                        clocks = <&mmc_config_clk 0>, <&mmc0_clk 0>,
                                 <&mmc0_clk 1>, <&mmc0_clk 2>;
                };
 
                mmc1: mmc@01c10000 {
-                       compatible = "allwinner,sun5i-a13-mmc";
+                       compatible = "allwinner,sun9i-a80-mmc";
                        reg = <0x01c10000 0x1000>;
                        clocks = <&mmc_config_clk 1>, <&mmc1_clk 0>,
                                 <&mmc1_clk 1>, <&mmc1_clk 2>;
                };
 
                mmc2: mmc@01c11000 {
-                       compatible = "allwinner,sun5i-a13-mmc";
+                       compatible = "allwinner,sun9i-a80-mmc";
                        reg = <0x01c11000 0x1000>;
                        clocks = <&mmc_config_clk 2>, <&mmc2_clk 0>,
                                 <&mmc2_clk 1>, <&mmc2_clk 2>;
                };
 
                mmc3: mmc@01c12000 {
-                       compatible = "allwinner,sun5i-a13-mmc";
+                       compatible = "allwinner,sun9i-a80-mmc";
                        reg = <0x01c12000 0x1000>;
                        clocks = <&mmc_config_clk 3>, <&mmc3_clk 0>,
                                 <&mmc3_clk 1>, <&mmc3_clk 2>;
                        mmc2_8bit_pins: mmc2_8bit {
                                allwinner,pins = "PC6", "PC7", "PC8", "PC9",
                                                 "PC10", "PC11", "PC12",
-                                                "PC13", "PC14", "PC15";
+                                                "PC13", "PC14", "PC15",
+                                                "PC16";
                                allwinner,function = "mmc2";
                                allwinner,drive = <SUN4I_PINCTRL_30_MA>;
                                allwinner,pull = <SUN4I_PINCTRL_NO_PULL>;
diff --git a/arch/arm/boot/dts/sunxi-itead-core-common.dtsi b/arch/arm/boot/dts/sunxi-itead-core-common.dtsi
new file mode 100644 (file)
index 0000000..2565d51
--- /dev/null
@@ -0,0 +1,136 @@
+/*
+ * Copyright 2015 - Marcus Cooper <codekipper@gmail.com>
+ *
+ * This file is dual-licensed: you can use it either under the terms
+ * of the GPL or the X11 license, at your option. Note that this dual
+ * licensing only applies to this file, and not this project as a
+ * whole.
+ *
+ *  a) This file is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License as
+ *     published by the Free Software Foundation; either version 2 of the
+ *     License, or (at your option) any later version.
+ *
+ *     This file is distributed in the hope that it will be useful,
+ *     but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *     GNU General Public License for more details.
+ *
+ * Or, alternatively,
+ *
+ *  b) Permission is hereby granted, free of charge, to any person
+ *     obtaining a copy of this software and associated documentation
+ *     files (the "Software"), to deal in the Software without
+ *     restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or
+ *     sell copies of the Software, and to permit persons to whom the
+ *     Software is furnished to do so, subject to the following
+ *     conditions:
+ *
+ *     The above copyright notice and this permission notice shall be
+ *     included in all copies or substantial portions of the Software.
+ *
+ *     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ *     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ *     OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ *     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+ *     HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ *     WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ *     FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ *     OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "sunxi-common-regulators.dtsi"
+
+/ {
+       aliases {
+               serial0 = &uart0;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+};
+
+&cpu0 {
+       cpu-supply = <&reg_dcdc2>;
+};
+
+&ehci0 {
+       status = "okay";
+};
+
+&ehci1 {
+       status = "okay";
+};
+
+&i2c0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&i2c0_pins_a>;
+       status = "okay";
+
+       axp209: pmic@34 {
+               reg = <0x34>;
+       };
+};
+
+&i2c1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&i2c1_pins_a>;
+       status = "okay";
+};
+
+&ohci0 {
+       status = "okay";
+};
+
+&ohci1 {
+       status = "okay";
+};
+
+#include "axp209.dtsi"
+
+&reg_dcdc2 {
+       regulator-always-on;
+       regulator-min-microvolt = <1000000>;
+       regulator-max-microvolt = <1400000>;
+       regulator-name = "vdd-cpu";
+};
+
+&reg_dcdc3 {
+       regulator-always-on;
+       regulator-min-microvolt = <1000000>;
+       regulator-max-microvolt = <1400000>;
+       regulator-name = "vdd-int-dll";
+};
+
+&reg_ldo1 {
+       regulator-name = "vdd-rtc";
+};
+
+&reg_ldo2 {
+       regulator-always-on;
+       regulator-min-microvolt = <3000000>;
+       regulator-max-microvolt = <3000000>;
+       regulator-name = "avcc";
+};
+
+&reg_usb1_vbus {
+       status = "okay";
+};
+
+&reg_usb2_vbus {
+       status = "okay";
+};
+
+&uart0 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&uart0_pins_a>;
+       status = "okay";
+};
+
+&usbphy {
+       usb1_vbus-supply = <&reg_usb1_vbus>;
+       usb2_vbus-supply = <&reg_usb2_vbus>;
+       status = "okay";
+};
index d845bd1448b5459f9a6e4ab52c538c541b8832ef..5017ed8ad5c4244b07956ce3243f033e76f98d5d 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra114-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra114-hsuart", "nvidia,tegra30-hsuart".
         */
        uarta: serial@70006000 {
index 66b4451eb2ca1b98bfbc33263eb31d18df37d1b0..4ee2e63e11d04ec4a7dfa00b6925fc56d08deb50 100644 (file)
        aliases {
                rtc0 = "/i2c@0,7000d000/pmic@40";
                rtc1 = "/rtc@0,7000e000";
+
+               /* This order keeps the mapping DB9 connector <-> ttyS0 */
                serial0 = &uartd;
+               serial1 = &uarta;
+               serial2 = &uartb;
        };
 
        memory {
                };
        };
 
+       /*
+        * First high speed UART, exposed on the expansion connector J3A2
+        *   Pin 41: BR_UART1_TXD
+        *   Pin 44: BR_UART1_RXD
+        */
+       serial@0,70006000 {
+               compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
+               status = "okay";
+       };
+
+       /*
+        * Second high speed UART, exposed on the expansion connector J3A2
+        *   Pin 65: UART2_RXD
+        *   Pin 68: UART2_TXD
+        *   Pin 71: UART2_CTS_L
+        *   Pin 74: UART2_RTS_L
+        */
+       serial@0,70006040 {
+               compatible = "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart";
+               status = "okay";
+       };
+
        /* DB9 serial port */
        serial@0,70006300 {
                status = "okay";
index 68669f791c8baa5ec9fd9d27a37e6ef716006861..995289b59e11c7bc5a9ac06c4465839dccf67109 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
         */
        uarta: serial@0,70006000 {
index 33173e1bace9cd289eda8b67679ab0df7d777c9d..8fb61b93c226a255a3ddae7c7c767b54c425f34e 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra20-uart" and to enable the APB DMA based serial
-        * driver, the comptible is "nvidia,tegra20-hsuart".
+        * driver, the compatible is "nvidia,tegra20-hsuart".
         */
        uarta: serial@70006000 {
                compatible = "nvidia,tegra20-uart";
index 313e260529a31283a4e0c01e0b4ac92b8f102112..c6edc8cea34ed79f77c377e819733d783b409191 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra30-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra30-hsuart", "nvidia,tegra20-hsuart".
         */
        uarta: serial@70006000 {
index 6e556be42ccdca53e1b109d8c664bb43dbfa0635..c4312c4a37678997b06211924c778df6abcc1b91 100644 (file)
 / {
        bl: backlight {
                compatible = "pwm-backlight";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_gpio_bl_on>;
                pwms = <&pwm0 0 5000000 0>;
+               enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
                status = "disabled";
        };
 };
                        >;
                };
 
+               pinctrl_gpio_bl_on: gpio_bl_on {
+                       fsl,pins = <
+                               VF610_PAD_PTC0__GPIO_45         0x22ef
+                       >;
+               };
+
                pinctrl_i2c0: i2c0grp {
                        fsl,pins = <
                                VF610_PAD_PTB14__I2C0_SCL               0x37ff
index a9ceb5bac40ef244dc6ca18602eef4072b87a57d..a5f07e3664da5a25ad86a376c74ef0869b272ba8 100644 (file)
@@ -16,6 +16,8 @@
        aliases {
                can0 = &can0;
                can1 = &can1;
+               ethernet0 = &fec0;
+               ethernet1 = &fec1;
                serial0 = &uart0;
                serial1 = &uart1;
                serial2 = &uart2;
                                status = "disabled";
                        };
 
+                       sai0: sai@4002f000 {
+                               compatible = "fsl,vf610-sai";
+                               reg = <0x4002f000 0x1000>;
+                               interrupts = <84 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks VF610_CLK_SAI0>,
+                                       <&clks VF610_CLK_SAI0_DIV>,
+                                       <&clks 0>, <&clks 0>;
+                               clock-names = "bus", "mclk1", "mclk2", "mclk3";
+                               dma-names = "tx", "rx";
+                               dmas = <&edma0 0 17>,
+                                       <&edma0 0 16>;
+                               status = "disabled";
+                       };
+
+                       sai1: sai@40030000 {
+                               compatible = "fsl,vf610-sai";
+                               reg = <0x40030000 0x1000>;
+                               interrupts = <85 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks VF610_CLK_SAI1>,
+                                       <&clks VF610_CLK_SAI1_DIV>,
+                                       <&clks 0>, <&clks 0>;
+                               clock-names = "bus", "mclk1", "mclk2", "mclk3";
+                               dma-names = "tx", "rx";
+                               dmas = <&edma0 0 19>,
+                                       <&edma0 0 18>;
+                               status = "disabled";
+                       };
+
                        sai2: sai@40031000 {
                                compatible = "fsl,vf610-sai";
                                reg = <0x40031000 0x1000>;
                                status = "disabled";
                        };
 
+                       sai3: sai@40032000 {
+                               compatible = "fsl,vf610-sai";
+                               reg = <0x40032000 0x1000>;
+                               interrupts = <87 IRQ_TYPE_LEVEL_HIGH>;
+                               clocks = <&clks VF610_CLK_SAI3>,
+                                       <&clks VF610_CLK_SAI3_DIV>,
+                                       <&clks 0>, <&clks 0>;
+                               clock-names = "bus", "mclk1", "mclk2", "mclk3";
+                               dma-names = "tx", "rx";
+                               dmas = <&edma0 1 9>,
+                                       <&edma0 1 8>;
+                               status = "disabled";
+                       };
+
                        pit: pit@40037000 {
                                compatible = "fsl,vf610-pit";
                                reg = <0x40037000 0x1000>;
index 2dc6da70ae598af4a43f3b8a9bde05d26d386c77..d3c0e69df2591fc8597325919972da4361079ce4 100644 (file)
@@ -16,7 +16,7 @@
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
-
+#include <asm/div64.h>
 #include <asm/hardware/icst.h>
 
 /*
@@ -29,7 +29,11 @@ EXPORT_SYMBOL(icst525_s2div);
 
 unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
 {
-       return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+       u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
+       u32 divisor = (vco.r + 2) * p->s2div[vco.s];
+
+       do_div(dividend, divisor);
+       return (unsigned long)dividend;
 }
 
 EXPORT_SYMBOL(icst_hz);
index 24dcd2bb1215dd9b600fdeea5e59e5077a1de8b4..6ffd7e76f3ce0d96fc72b635d2615d4e38eaaf87 100644 (file)
@@ -26,12 +26,14 @@ CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT_DETAILS=y
 CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPUFREQ_DT=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_EXYNOS_CPUIDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
+CONFIG_KERNEL_MODE_NEON=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
@@ -193,7 +195,6 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_MAX8997=y
 CONFIG_RTC_DRV_MAX77686=y
-CONFIG_RTC_DRV_MAX77802=y
 CONFIG_RTC_DRV_S5M=y
 CONFIG_RTC_DRV_S3C=y
 CONFIG_DMADEVICES=y
@@ -238,7 +239,12 @@ CONFIG_DEBUG_RT_MUTEXES=y
 CONFIG_DEBUG_SPINLOCK=y
 CONFIG_DEBUG_MUTEXES=y
 CONFIG_DEBUG_USER=y
-CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_DEV_S5P=y
+CONFIG_ARM_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM_NEON=m
+CONFIG_CRYPTO_SHA256_ARM=m
+CONFIG_CRYPTO_SHA512_ARM=m
+CONFIG_CRYPTO_AES_ARM_BS=m
 CONFIG_CRC_CCITT=y
 CONFIG_FONTS=y
 CONFIG_FONT_7x14=y
index 2d5253dcc2266174550d709771a28fb8b4161c70..25a6066493e46baf2ac8a99ae77c6515bdbf9aa8 100644 (file)
@@ -47,6 +47,7 @@ CONFIG_PCI=y
 CONFIG_PCI_MSI=y
 CONFIG_PCI_IMX6=y
 CONFIG_SMP=y
+CONFIG_ARM_PSCI=y
 CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_AEABI=y
 CONFIG_HIGHMEM=y
@@ -320,6 +321,8 @@ CONFIG_IIO=y
 CONFIG_VF610_ADC=y
 CONFIG_PWM=y
 CONFIG_PWM_IMX=y
+CONFIG_NVMEM=y
+CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
index 314f6be2dca2b4aec6aaa82cc8cfb2ba06c2bb29..e28c660c35e9c0ca1fb97fcc68da828d45b83a36 100644 (file)
@@ -426,6 +426,7 @@ CONFIG_SUNXI_WATCHDOG=y
 CONFIG_IMX2_WDT=y
 CONFIG_TEGRA_WATCHDOG=m
 CONFIG_MESON_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
 CONFIG_DIGICOLOR_WATCHDOG=y
 CONFIG_MFD_AS3711=y
 CONFIG_MFD_AS3722=y
@@ -577,6 +578,7 @@ CONFIG_SND_SOC_WM8978=m
 CONFIG_USB=y
 CONFIG_USB_XHCI_HCD=y
 CONFIG_USB_XHCI_MVEBU=y
+CONFIG_USB_XHCI_RCAR=m
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_MSM=m
 CONFIG_USB_EHCI_EXYNOS=y
@@ -664,7 +666,6 @@ CONFIG_RTC_DRV_MAX8907=y
 CONFIG_RTC_DRV_MAX8997=m
 CONFIG_RTC_DRV_MAX77686=y
 CONFIG_RTC_DRV_RK808=m
-CONFIG_RTC_DRV_MAX77802=m
 CONFIG_RTC_DRV_RS5C372=m
 CONFIG_RTC_DRV_PALMAS=y
 CONFIG_RTC_DRV_ST_LPC=y
index af29780accdc680e8ba9c6d3741fb8a940d318fa..9317e5a5b730c6f506918cb949ffe07de6add073 100644 (file)
@@ -137,6 +137,7 @@ CONFIG_SND=y
 CONFIG_SND_SOC=y
 CONFIG_SND_KIRKWOOD_SOC=y
 CONFIG_SND_SOC_ALC5623=y
+CONFIG_SND_SOC_CS42L51_I2C=y
 CONFIG_SND_SIMPLE_CARD=y
 CONFIG_HID_DRAGONRISE=y
 CONFIG_HID_GYRATION=y
index c6729bf0a8ddb5e272ee97690cd58c68b013b5fa..cf363abd974ec429b9d82d22c739ec080fa3e3ae 100644 (file)
@@ -109,6 +109,7 @@ CONFIG_USB_XHCI_MVEBU=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
 CONFIG_USB_STORAGE=y
+CONFIG_NOP_USB_XCEIV=y
 CONFIG_MMC=y
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
index b47e7c6628c9f8883322cffba9e97c12f6963098..1b2d9b377976820f802f01455da261d2d9b6876a 100644 (file)
@@ -141,6 +141,8 @@ CONFIG_IIO=y
 CONFIG_IIO_SYSFS_TRIGGER=y
 CONFIG_PWM=y
 CONFIG_PWM_MXS=y
+CONFIG_NVMEM=y
+CONFIG_NVMEM_MXS_OCOTP=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
index c5e1943e5427db037b98ce779566aeab1b1c9310..a7151744b85c17d8c7041f33141e7c4cf7809fec 100644 (file)
@@ -50,6 +50,7 @@ CONFIG_SOC_AM33XX=y
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
 CONFIG_ARM_THUMBEE=y
+CONFIG_ARM_KERNMEM_PERMS=y
 CONFIG_ARM_ERRATA_411920=y
 CONFIG_ARM_ERRATA_430973=y
 CONFIG_SMP=y
@@ -177,6 +178,7 @@ CONFIG_TI_CPTS=y
 CONFIG_AT803X_PHY=y
 CONFIG_SMSC_PHY=y
 CONFIG_USB_USBNET=m
+CONFIG_USB_NET_SMSC75XX=m
 CONFIG_USB_NET_SMSC95XX=m
 CONFIG_USB_ALI_M5632=y
 CONFIG_USB_AN2720=y
@@ -354,6 +356,11 @@ CONFIG_USB_MUSB_DSPS=m
 CONFIG_USB_INVENTRA_DMA=y
 CONFIG_USB_TI_CPPI41_DMA=y
 CONFIG_USB_DWC3=m
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_GENERIC=y
+CONFIG_USB_SERIAL_SIMPLE=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_SERIAL_PL2303=m
 CONFIG_USB_TEST=m
 CONFIG_AM335X_PHY_USB=y
 CONFIG_USB_GADGET=m
@@ -387,6 +394,7 @@ CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_PWM=m
+CONFIG_LEDS_PCA963X=m
 CONFIG_LEDS_TRIGGERS=y
 CONFIG_LEDS_TRIGGER_TIMER=m
 CONFIG_LEDS_TRIGGER_ONESHOT=m
@@ -449,6 +457,8 @@ CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_SPLIT=y
+CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_SCHEDSTATS=y
 CONFIG_TIMER_STATS=y
index 969738324a5d5f815263a12cf2f580098603cf4a..b7b714c3958c2fdad9bb8658680b7a31ee6d04e8 100644 (file)
@@ -20,7 +20,6 @@ CONFIG_ARCH_R8A7791=y
 CONFIG_ARCH_R8A7793=y
 CONFIG_ARCH_R8A7794=y
 CONFIG_ARCH_SH73A0=y
-CONFIG_CPU_BPREDICT_DISABLE=y
 CONFIG_PL310_ERRATA_588369=y
 CONFIG_ARM_ERRATA_754322=y
 CONFIG_PCI=y
@@ -163,6 +162,8 @@ CONFIG_SND_SOC_RSRC_CARD=y
 CONFIG_SND_SOC_AK4642=y
 CONFIG_SND_SOC_WM8978=y
 CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_RCAR=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_R8A66597_HCD=y
index e1f07764b0d6bf99e8ed9386a24a086fa82be1e1..7d919a9b32e5f6e251e1a42d2ccd8f7d33d62483 100644 (file)
@@ -74,7 +74,7 @@ static inline uint32_t __div64_32(uint64_t *n, uint32_t base)
 static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
 {
        unsigned long long res;
-       unsigned int tmp = 0;
+       register unsigned int tmp asm("ip") = 0;
 
        if (!bias) {
                asm (   "umull  %Q0, %R0, %Q1, %Q2\n\t"
@@ -90,12 +90,12 @@ static inline uint64_t __arch_xprod_64(uint64_t m, uint64_t n, bool bias)
                        : "r" (m), "r" (n)
                        : "cc");
        } else {
-               asm (   "umull  %Q0, %R0, %Q1, %Q2\n\t"
-                       "cmn    %Q0, %Q1\n\t"
-                       "adcs   %R0, %R0, %R1\n\t"
-                       "adc    %Q0, %3, #0"
-                       : "=&r" (res)
-                       : "r" (m), "r" (n), "r" (tmp)
+               asm (   "umull  %Q0, %R0, %Q2, %Q3\n\t"
+                       "cmn    %Q0, %Q2\n\t"
+                       "adcs   %R0, %R0, %R2\n\t"
+                       "adc    %Q0, %1, #0"
+                       : "=&r" (res), "+&r" (tmp)
+                       : "r" (m), "r" (n)
                        : "cc");
        }
 
index 194c91b610ffecfd4071da89d16b923c614bf68d..c35c349da06983b5eee05bc8bca52e526ed1bc52 100644 (file)
@@ -79,6 +79,8 @@
 #define rr_lo_hi(a1, a2) a1, a2
 #endif
 
+#define kvm_ksym_ref(kva)      (kva)
+
 #ifndef __ASSEMBLY__
 struct kvm;
 struct kvm_vcpu;
index c79b57bf71c40d1fc2083f67b198377936e0002f..49bf6b1e2177d6dc049baf014dc42a3971f6b6b0 100644 (file)
@@ -273,14 +273,14 @@ static inline void *phys_to_virt(phys_addr_t x)
 #define __va(x)                        ((void *)__phys_to_virt((phys_addr_t)(x)))
 #define pfn_to_kaddr(pfn)      __va((phys_addr_t)(pfn) << PAGE_SHIFT)
 
-extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
+extern unsigned long (*arch_virt_to_idmap)(unsigned long x);
 
 /*
  * These are for systems that have a hardware interconnect supported alias of
  * physical memory for idmap purposes.  Most cases should leave these
- * untouched.
+ * untouched.  Note: this can only return addresses less than 4GiB.
  */
-static inline phys_addr_t __virt_to_idmap(unsigned long x)
+static inline unsigned long __virt_to_idmap(unsigned long x)
 {
        if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
                return arch_virt_to_idmap(x);
index a5635444ca410b49b5109a65535321d9c3df8d5c..d7de19a77d51186b7a673febfe0b0fc8752d2643 100644 (file)
@@ -3,8 +3,6 @@
 
 #ifdef __KERNEL__
 #include <asm-generic/pci-dma-compat.h>
-#include <asm-generic/pci-bridge.h>
-
 #include <asm/mach/pci.h> /* for pci_sys_data */
 
 extern unsigned long pcibios_min_io;
@@ -41,5 +39,4 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 }
 
 #endif /* __KERNEL__ */
 #endif
index 619d8cc1ac125f6c6038daef7a814e146cebebb1..92c44760d6569181d2dd6fd191d995b453cee572 100644 (file)
@@ -11,6 +11,7 @@
  *
  */
 
+#include <asm/assembler.h>
 #include "imx-uart.h"
 
 /*
@@ -34,6 +35,7 @@
                .endm
 
                .macro  senduart,rd,rx
+               ARM_BE8(rev \rd, \rd)
                str     \rd, [\rx, #0x40]       @ TXDATA
                .endm
 
@@ -42,6 +44,7 @@
 
                .macro  busyuart,rd,rx
 1002:          ldr     \rd, [\rx, #0x98]       @ SR2
+               ARM_BE8(rev \rd, \rd)
                tst     \rd, #1 << 3            @ TXDC
                beq     1002b                   @ wait until transmit done
                .endm
diff --git a/arch/arm/include/debug/palmchip.S b/arch/arm/include/debug/palmchip.S
new file mode 100644 (file)
index 0000000..6824b2d
--- /dev/null
@@ -0,0 +1,11 @@
+#include <linux/serial_reg.h>
+
+#undef UART_TX
+#undef UART_LSR
+#undef UART_MSR
+
+#define UART_TX 1
+#define UART_LSR 7
+#define UART_MSR 8
+
+#include <debug/8250.S>
index ede692ffa32ed14958f81eec0d7caac8f7efe853..5dd2528e9e45e369d0879185291e6981dbea15e4 100644 (file)
 #define __NR_userfaultfd               (__NR_SYSCALL_BASE+388)
 #define __NR_membarrier                        (__NR_SYSCALL_BASE+389)
 #define __NR_mlock2                    (__NR_SYSCALL_BASE+390)
+#define __NR_copy_file_range           (__NR_SYSCALL_BASE+391)
 
 /*
  * The following SWIs are ARM private.
index ac368bb068d1409af37a8e2585bb837b1ef1f02b..dfc7cd6851ad4bbd09b11ae94ee6056f691b1fcb 100644 (file)
                CALL(sys_userfaultfd)
                CALL(sys_membarrier)
                CALL(sys_mlock2)
+               CALL(sys_copy_file_range)
 #ifndef syscalls_counted
 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
 #define syscalls_counted
index 3ce377f7251f3429668c2e2b563fcd8062c991ae..788e40c1254f65dfd1d942497e163f88ce40f2ec 100644 (file)
@@ -1064,7 +1064,6 @@ ENDPROC(vector_\name)
        .endm
 
        .section .stubs, "ax", %progbits
-__stubs_start:
        @ This must be the first word
        .word   vector_swi
 
@@ -1206,10 +1205,10 @@ vector_addrexcptn:
        .equ    vector_fiq_offset, vector_fiq
 
        .section .vectors, "ax", %progbits
-__vectors_start:
+.L__vectors_start:
        W(b)    vector_rst
        W(b)    vector_und
-       W(ldr)  pc, __vectors_start + 0x1000
+       W(ldr)  pc, .L__vectors_start + 0x1000
        W(b)    vector_pabt
        W(b)    vector_dabt
        W(b)    vector_addrexcptn
index a71501ff6f1877fc9813fd4c3e10c51ae36f04e7..b09561a6d06a00eb9029fc9916ba1f654ca7e1c2 100644 (file)
@@ -62,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
 
        ret = swsusp_save();
        if (ret == 0)
-               _soft_restart(virt_to_phys(cpu_resume), false);
+               _soft_restart(virt_to_idmap(cpu_resume), false);
        return ret;
 }
 
@@ -87,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
        for (pbe = restore_pblist; pbe; pbe = pbe->next)
                copy_page(pbe->orig_address, pbe->address);
 
-       _soft_restart(virt_to_phys(cpu_resume), false);
+       _soft_restart(virt_to_idmap(cpu_resume), false);
 }
 
 static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
index 1d45320ee125d572b108d8e40bb6e0150fea8b9a..ece04a457486c5998d312bce4f3c69b97e0e7b64 100644 (file)
@@ -95,7 +95,7 @@ void __init init_IRQ(void)
                        outer_cache.write_sec = machine_desc->l2c_write_sec;
                ret = l2x0_of_init(machine_desc->l2c_aux_val,
                                   machine_desc->l2c_aux_mask);
-               if (ret)
+               if (ret && ret != -ENODEV)
                        pr_err("L2C: failed to init: %d\n", ret);
        }
 
index 8bf3b7c098881b951df038575c6baf14e84df7b9..59fd0e24c56b150a1f22ab21983d72022fe52701 100644 (file)
@@ -143,10 +143,8 @@ void (*kexec_reinit)(void);
 
 void machine_kexec(struct kimage *image)
 {
-       unsigned long page_list;
-       unsigned long reboot_code_buffer_phys;
-       unsigned long reboot_entry = (unsigned long)relocate_new_kernel;
-       unsigned long reboot_entry_phys;
+       unsigned long page_list, reboot_entry_phys;
+       void (*reboot_entry)(void);
        void *reboot_code_buffer;
 
        /*
@@ -159,9 +157,6 @@ void machine_kexec(struct kimage *image)
 
        page_list = image->head & PAGE_MASK;
 
-       /* we need both effective and real address here */
-       reboot_code_buffer_phys =
-           page_to_pfn(image->control_code_page) << PAGE_SHIFT;
        reboot_code_buffer = page_address(image->control_code_page);
 
        /* Prepare parameters for reboot_code_buffer*/
@@ -174,10 +169,11 @@ void machine_kexec(struct kimage *image)
 
        /* copy our kernel relocation code to the control code page */
        reboot_entry = fncpy(reboot_code_buffer,
-                            reboot_entry,
+                            &relocate_new_kernel,
                             relocate_new_kernel_size);
-       reboot_entry_phys = (unsigned long)reboot_entry +
-               (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
+
+       /* get the identity mapping physical address for the reboot code */
+       reboot_entry_phys = virt_to_idmap(reboot_entry);
 
        pr_info("Bye!\n");
 
index 38269358fd252c6bb93fd58a0478319c436cdfd3..71a2ff9ec4900c58677f12114c85c82e8cfaa575 100644 (file)
@@ -50,7 +50,7 @@ static void __soft_restart(void *addr)
        flush_cache_all();
 
        /* Switch to the identity mapping. */
-       phys_reset = (phys_reset_t)(unsigned long)virt_to_idmap(cpu_reset);
+       phys_reset = (phys_reset_t)virt_to_idmap(cpu_reset);
        phys_reset((unsigned long)addr);
 
        /* Should never get here. */
index 7d0cba6f1cc5efadff31fd0cde2aca3279e120da..139791ed473d5264682c004ea2ea7af8ddd28f5d 100644 (file)
@@ -176,13 +176,13 @@ static struct resource mem_res[] = {
                .name = "Kernel code",
                .start = 0,
                .end = 0,
-               .flags = IORESOURCE_MEM
+               .flags = IORESOURCE_SYSTEM_RAM
        },
        {
                .name = "Kernel data",
                .start = 0,
                .end = 0,
-               .flags = IORESOURCE_MEM
+               .flags = IORESOURCE_SYSTEM_RAM
        }
 };
 
@@ -851,7 +851,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
                res->name  = "System RAM";
                res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
                res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
-               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
                request_resource(&iomem_resource, res);
 
index 08b7847bf9124f004d7214c0e9c4dae23c0fffd2..ec279d161b3287e1df5b51702c499a2ac0187054 100644 (file)
@@ -40,7 +40,7 @@
  * to run the rebalance_domains for all idle cores and the cpu_capacity can be
  * updated during this sequence.
  */
-static DEFINE_PER_CPU(unsigned long, cpu_scale);
+static DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
 
 unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
 {
@@ -306,8 +306,6 @@ void __init init_cpu_topology(void)
                cpu_topo->socket_id = -1;
                cpumask_clear(&cpu_topo->core_sibling);
                cpumask_clear(&cpu_topo->thread_sibling);
-
-               set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
        }
        smp_wmb();
 
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
new file mode 100644 (file)
index 0000000..6f59ef2
--- /dev/null
@@ -0,0 +1,322 @@
+/* ld script to make ARM Linux kernel
+ * taken from the i386 version by Russell King
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#ifdef CONFIG_DEBUG_RODATA
+#include <asm/pgtable.h>
+#endif
+
+#define PROC_INFO                                                      \
+       . = ALIGN(4);                                                   \
+       VMLINUX_SYMBOL(__proc_info_begin) = .;                          \
+       *(.proc.info.init)                                              \
+       VMLINUX_SYMBOL(__proc_info_end) = .;
+
+#define IDMAP_TEXT                                                     \
+       ALIGN_FUNCTION();                                               \
+       VMLINUX_SYMBOL(__idmap_text_start) = .;                         \
+       *(.idmap.text)                                                  \
+       VMLINUX_SYMBOL(__idmap_text_end) = .;                           \
+       . = ALIGN(PAGE_SIZE);                                           \
+       VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;                     \
+       *(.hyp.idmap.text)                                              \
+       VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define ARM_CPU_DISCARD(x)
+#define ARM_CPU_KEEP(x)                x
+#else
+#define ARM_CPU_DISCARD(x)     x
+#define ARM_CPU_KEEP(x)
+#endif
+
+#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
+       defined(CONFIG_GENERIC_BUG)
+#define ARM_EXIT_KEEP(x)       x
+#define ARM_EXIT_DISCARD(x)
+#else
+#define ARM_EXIT_KEEP(x)
+#define ARM_EXIT_DISCARD(x)    x
+#endif
+
+OUTPUT_ARCH(arm)
+ENTRY(stext)
+
+#ifndef __ARMEB__
+jiffies = jiffies_64;
+#else
+jiffies = jiffies_64 + 4;
+#endif
+
+SECTIONS
+{
+       /*
+        * XXX: The linker does not define how output sections are
+        * assigned to input sections when there are multiple statements
+        * matching the same input section name.  There is no documented
+        * order of matching.
+        *
+        * unwind exit sections must be discarded before the rest of the
+        * unwind sections get included.
+        */
+       /DISCARD/ : {
+               *(.ARM.exidx.exit.text)
+               *(.ARM.extab.exit.text)
+               ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
+               ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
+               ARM_EXIT_DISCARD(EXIT_TEXT)
+               ARM_EXIT_DISCARD(EXIT_DATA)
+               EXIT_CALL
+#ifndef CONFIG_MMU
+               *(.text.fixup)
+               *(__ex_table)
+#endif
+#ifndef CONFIG_SMP_ON_UP
+               *(.alt.smp.init)
+#endif
+               *(.discard)
+               *(.discard.*)
+       }
+
+       . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
+
+       .head.text : {
+               _text = .;
+               HEAD_TEXT
+       }
+
+#ifdef CONFIG_DEBUG_RODATA
+       . = ALIGN(1<<SECTION_SHIFT);
+#endif
+
+       .text : {                       /* Real text segment            */
+               _stext = .;             /* Text and read-only data      */
+                       IDMAP_TEXT
+                       __exception_text_start = .;
+                       *(.exception.text)
+                       __exception_text_end = .;
+                       IRQENTRY_TEXT
+                       TEXT_TEXT
+                       SCHED_TEXT
+                       LOCK_TEXT
+                       KPROBES_TEXT
+                       *(.gnu.warning)
+                       *(.glue_7)
+                       *(.glue_7t)
+               . = ALIGN(4);
+               *(.got)                 /* Global offset table          */
+                       ARM_CPU_KEEP(PROC_INFO)
+       }
+
+#ifdef CONFIG_DEBUG_RODATA
+       . = ALIGN(1<<SECTION_SHIFT);
+#endif
+       RO_DATA(PAGE_SIZE)
+
+       . = ALIGN(4);
+       __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
+               __start___ex_table = .;
+#ifdef CONFIG_MMU
+               *(__ex_table)
+#endif
+               __stop___ex_table = .;
+       }
+
+#ifdef CONFIG_ARM_UNWIND
+       /*
+        * Stack unwinding tables
+        */
+       . = ALIGN(8);
+       .ARM.unwind_idx : {
+               __start_unwind_idx = .;
+               *(.ARM.exidx*)
+               __stop_unwind_idx = .;
+       }
+       .ARM.unwind_tab : {
+               __start_unwind_tab = .;
+               *(.ARM.extab*)
+               __stop_unwind_tab = .;
+       }
+#endif
+
+       NOTES
+
+       _etext = .;                     /* End of text and rodata section */
+
+       /*
+        * The vectors and stubs are relocatable code, and the
+        * only thing that matters is their relative offsets
+        */
+       __vectors_start = .;
+       .vectors 0 : AT(__vectors_start) {
+               *(.vectors)
+       }
+       . = __vectors_start + SIZEOF(.vectors);
+       __vectors_end = .;
+
+       __stubs_start = .;
+       .stubs 0x1000 : AT(__stubs_start) {
+               *(.stubs)
+       }
+       . = __stubs_start + SIZEOF(.stubs);
+       __stubs_end = .;
+
+       INIT_TEXT_SECTION(8)
+       .exit.text : {
+               ARM_EXIT_KEEP(EXIT_TEXT)
+       }
+       .init.proc.info : {
+               ARM_CPU_DISCARD(PROC_INFO)
+       }
+       .init.arch.info : {
+               __arch_info_begin = .;
+               *(.arch.info.init)
+               __arch_info_end = .;
+       }
+       .init.tagtable : {
+               __tagtable_begin = .;
+               *(.taglist.init)
+               __tagtable_end = .;
+       }
+#ifdef CONFIG_SMP_ON_UP
+       .init.smpalt : {
+               __smpalt_begin = .;
+               *(.alt.smp.init)
+               __smpalt_end = .;
+       }
+#endif
+       .init.pv_table : {
+               __pv_table_begin = .;
+               *(.pv_table)
+               __pv_table_end = .;
+       }
+       .init.data : {
+               INIT_SETUP(16)
+               INIT_CALLS
+               CON_INITCALL
+               SECURITY_INITCALL
+               INIT_RAM_FS
+       }
+
+#ifdef CONFIG_SMP
+       PERCPU_SECTION(L1_CACHE_BYTES)
+#endif
+
+       __data_loc = ALIGN(4);          /* location in binary */
+       . = PAGE_OFFSET + TEXT_OFFSET;
+
+       .data : AT(__data_loc) {
+               _data = .;              /* address in memory */
+               _sdata = .;
+
+               /*
+                * first, the init task union, aligned
+                * to an 8192 byte boundary.
+                */
+               INIT_TASK_DATA(THREAD_SIZE)
+
+               . = ALIGN(PAGE_SIZE);
+               __init_begin = .;
+               INIT_DATA
+               ARM_EXIT_KEEP(EXIT_DATA)
+               . = ALIGN(PAGE_SIZE);
+               __init_end = .;
+
+               NOSAVE_DATA
+               CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
+               READ_MOSTLY_DATA(L1_CACHE_BYTES)
+
+               /*
+                * and the usual data section
+                */
+               DATA_DATA
+               CONSTRUCTORS
+
+               _edata = .;
+       }
+       _edata_loc = __data_loc + SIZEOF(.data);
+
+#ifdef CONFIG_HAVE_TCM
+        /*
+        * We align everything to a page boundary so we can
+        * free it after init has commenced and TCM contents have
+        * been copied to its destination.
+        */
+       .tcm_start : {
+               . = ALIGN(PAGE_SIZE);
+               __tcm_start = .;
+               __itcm_start = .;
+       }
+
+       /*
+        * Link these to the ITCM RAM
+        * Put VMA to the TCM address and LMA to the common RAM
+        * and we'll upload the contents from RAM to TCM and free
+        * the used RAM after that.
+        */
+       .text_itcm ITCM_OFFSET : AT(__itcm_start)
+       {
+               __sitcm_text = .;
+               *(.tcm.text)
+               *(.tcm.rodata)
+               . = ALIGN(4);
+               __eitcm_text = .;
+       }
+
+       /*
+        * Reset the dot pointer, this is needed to create the
+        * relative __dtcm_start below (to be used as extern in code).
+        */
+       . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_itcm);
+
+       .dtcm_start : {
+               __dtcm_start = .;
+       }
+
+       /* TODO: add remainder of ITCM as well, that can be used for data! */
+       .data_dtcm DTCM_OFFSET : AT(__dtcm_start)
+       {
+               . = ALIGN(4);
+               __sdtcm_data = .;
+               *(.tcm.data)
+               . = ALIGN(4);
+               __edtcm_data = .;
+       }
+
+       /* Reset the dot pointer or the linker gets confused */
+       . = ADDR(.dtcm_start) + SIZEOF(.data_dtcm);
+
+       /* End marker for freeing TCM copy in linked object */
+       .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_dtcm)){
+               . = ALIGN(PAGE_SIZE);
+               __tcm_end = .;
+       }
+#endif
+
+       BSS_SECTION(0, 0, 0)
+       _end = .;
+
+       STABS_DEBUG
+}
+
+/*
+ * These must never be empty
+ * If you have to comment these two assert statements out, your
+ * binutils is too old (for other reasons as well)
+ */
+ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
+ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
+
+/*
+ * The HYP init code can't be more than a page long,
+ * and should not cross a page boundary.
+ * The above comment applies as well.
+ */
+ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
+       "HYP init code too big or misaligned")
index 8b60fde5ce48a628e5d1f2c682d2aa0684ed6642..cdc84693091b82cde1f0c0c539c848dfcaea6738 100644 (file)
@@ -3,12 +3,16 @@
  * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
  */
 
+#ifdef CONFIG_XIP_KERNEL
+#include "vmlinux-xip.lds.S"
+#else
+
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/page.h>
-#ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
 #include <asm/pgtable.h>
 #endif
 
@@ -84,17 +88,13 @@ SECTIONS
                *(.discard.*)
        }
 
-#ifdef CONFIG_XIP_KERNEL
-       . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
-#else
        . = PAGE_OFFSET + TEXT_OFFSET;
-#endif
        .head.text : {
                _text = .;
                HEAD_TEXT
        }
 
-#ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
 #endif
 
@@ -117,7 +117,7 @@ SECTIONS
                        ARM_CPU_KEEP(PROC_INFO)
        }
 
-#ifdef CONFIG_DEBUG_RODATA
+#ifdef CONFIG_DEBUG_ALIGN_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
 #endif
        RO_DATA(PAGE_SIZE)
@@ -152,14 +152,13 @@ SECTIONS
 
        _etext = .;                     /* End of text and rodata section */
 
-#ifndef CONFIG_XIP_KERNEL
-# ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
-# else
+#else
        . = ALIGN(PAGE_SIZE);
-# endif
-       __init_begin = .;
 #endif
+       __init_begin = .;
+
        /*
         * The vectors and stubs are relocatable code, and the
         * only thing that matters is their relative offsets
@@ -208,37 +207,28 @@ SECTIONS
                __pv_table_end = .;
        }
        .init.data : {
-#ifndef CONFIG_XIP_KERNEL
                INIT_DATA
-#endif
                INIT_SETUP(16)
                INIT_CALLS
                CON_INITCALL
                SECURITY_INITCALL
                INIT_RAM_FS
        }
-#ifndef CONFIG_XIP_KERNEL
        .exit.data : {
                ARM_EXIT_KEEP(EXIT_DATA)
        }
-#endif
 
 #ifdef CONFIG_SMP
        PERCPU_SECTION(L1_CACHE_BYTES)
 #endif
 
-#ifdef CONFIG_XIP_KERNEL
-       __data_loc = ALIGN(4);          /* location in binary */
-       . = PAGE_OFFSET + TEXT_OFFSET;
-#else
-#ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
        . = ALIGN(1<<SECTION_SHIFT);
 #else
        . = ALIGN(THREAD_SIZE);
 #endif
        __init_end = .;
        __data_loc = .;
-#endif
 
        .data : AT(__data_loc) {
                _data = .;              /* address in memory */
@@ -250,15 +240,6 @@ SECTIONS
                 */
                INIT_TASK_DATA(THREAD_SIZE)
 
-#ifdef CONFIG_XIP_KERNEL
-               . = ALIGN(PAGE_SIZE);
-               __init_begin = .;
-               INIT_DATA
-               ARM_EXIT_KEEP(EXIT_DATA)
-               . = ALIGN(PAGE_SIZE);
-               __init_end = .;
-#endif
-
                NOSAVE_DATA
                CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
                READ_MOSTLY_DATA(L1_CACHE_BYTES)
@@ -351,3 +332,5 @@ ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
  */
 ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
        "HYP init code too big or misaligned")
+
+#endif /* CONFIG_XIP_KERNEL */
index dda1959f0ddeb947e8a8020d7da0b02bb19f89cc..975da6cfbf5917e604b576027189469d95cd4a52 100644 (file)
@@ -982,7 +982,7 @@ static void cpu_init_hyp_mode(void *dummy)
        pgd_ptr = kvm_mmu_get_httbr();
        stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
        hyp_stack_ptr = stack_page + PAGE_SIZE;
-       vector_ptr = (unsigned long)__kvm_hyp_vector;
+       vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
 
        __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
 
@@ -1074,13 +1074,15 @@ static int init_hyp_mode(void)
        /*
         * Map the Hyp-code called directly from the host
         */
-       err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
+       err = create_hyp_mappings(kvm_ksym_ref(__kvm_hyp_code_start),
+                                 kvm_ksym_ref(__kvm_hyp_code_end));
        if (err) {
                kvm_err("Cannot map world-switch code\n");
                goto out_free_mappings;
        }
 
-       err = create_hyp_mappings(__start_rodata, __end_rodata);
+       err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
+                                 kvm_ksym_ref(__end_rodata));
        if (err) {
                kvm_err("Cannot map rodata section\n");
                goto out_free_mappings;
diff --git a/arch/arm/mach-cns3xxx/Makefile.boot b/arch/arm/mach-cns3xxx/Makefile.boot
deleted file mode 100644 (file)
index d079de0..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00C00000
index 652a0bb11578927fc0bbc58983bf5aaafd7eb0d3..aeadd2aa12cf6ca9ddab697f3afc8d61ec4c135f 100644 (file)
@@ -17,6 +17,8 @@ menuconfig ARCH_EXYNOS
        select ARM_GIC
        select COMMON_CLK_SAMSUNG
        select EXYNOS_THERMAL
+       select EXYNOS_PMU
+       select EXYNOS_SROM
        select HAVE_ARM_SCU if SMP
        select HAVE_S3C2410_I2C if I2C
        select HAVE_S3C2410_WATCHDOG if WATCHDOG
@@ -25,6 +27,7 @@ menuconfig ARCH_EXYNOS
        select PINCTRL_EXYNOS
        select PM_GENERIC_DOMAINS if PM
        select S5P_DEV_MFC
+       select SOC_SAMSUNG
        select SRAM
        select THERMAL
        select MFD_SYSCON
index 2f306767cdfe425b8f522a20fd756310bc5e2fad..34d29df3e0062342af5510d6a46addbe4fb21083 100644 (file)
@@ -9,7 +9,7 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) += -I$(srctree)/$(src)/include -I$(srctree)
 
 # Core
 
-obj-$(CONFIG_ARCH_EXYNOS)      += exynos.o pmu.o exynos-smc.o firmware.o
+obj-$(CONFIG_ARCH_EXYNOS)      += exynos.o exynos-smc.o firmware.o
 
 obj-$(CONFIG_EXYNOS_CPU_SUSPEND) += pm.o sleep.o
 obj-$(CONFIG_PM_SLEEP)         += suspend.o
diff --git a/arch/arm/mach-exynos/Makefile.boot b/arch/arm/mach-exynos/Makefile.boot
deleted file mode 100644 (file)
index b9862e2..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-   zreladdr-y  += 0x40008000
-params_phys-y  := 0x40000100
index 1c47aee31e9cc60aeabc8c504b41c76c2379a435..99947ad53ddfd983052d140c00bbcebbbf82848f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/platform_device.h>
 #include <linux/pm_domain.h>
 #include <linux/irqchip.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
 
 #include <asm/cacheflush.h>
 #include <asm/hardware/cache-l2x0.h>
 
 #include "common.h"
 #include "mfc.h"
-#include "regs-pmu.h"
-
-void __iomem *pmu_base_addr;
 
 static struct map_desc exynos4_iodesc[] __initdata = {
        {
-               .virtual        = (unsigned long)S5P_VA_SROMC,
-               .pfn            = __phys_to_pfn(EXYNOS4_PA_SROMC),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
                .virtual        = (unsigned long)S5P_VA_CMU,
                .pfn            = __phys_to_pfn(EXYNOS4_PA_CMU),
                .length         = SZ_128K,
@@ -64,20 +57,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
        },
 };
 
-static struct map_desc exynos5_iodesc[] __initdata = {
-       {
-               .virtual        = (unsigned long)S5P_VA_SROMC,
-               .pfn            = __phys_to_pfn(EXYNOS5_PA_SROMC),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
-               .virtual        = (unsigned long)S5P_VA_CMU,
-               .pfn            = __phys_to_pfn(EXYNOS5_PA_CMU),
-               .length         = 144 * SZ_1K,
-               .type           = MT_DEVICE,
-       },
-};
-
 static struct platform_device exynos_cpuidle = {
        .name              = "exynos_cpuidle",
 #ifdef CONFIG_ARM_EXYNOS_CPUIDLE
@@ -149,9 +128,6 @@ static void __init exynos_map_io(void)
 {
        if (soc_is_exynos4())
                iotable_init(exynos4_iodesc, ARRAY_SIZE(exynos4_iodesc));
-
-       if (soc_is_exynos5())
-               iotable_init(exynos5_iodesc, ARRAY_SIZE(exynos5_iodesc));
 }
 
 static void __init exynos_init_io(void)
@@ -230,6 +206,10 @@ static const struct of_device_id exynos_cpufreq_matches[] = {
        { .compatible = "samsung,exynos4212", .data = "cpufreq-dt" },
        { .compatible = "samsung,exynos4412", .data = "cpufreq-dt" },
        { .compatible = "samsung,exynos5250", .data = "cpufreq-dt" },
+#ifndef CONFIG_BL_SWITCHER
+       { .compatible = "samsung,exynos5420", .data = "cpufreq-dt" },
+       { .compatible = "samsung,exynos5800", .data = "cpufreq-dt" },
+#endif
        { /* sentinel */ }
 };
 
index de3ae59e1cfbbb4d4d8248224af62243cb23b548..351e839fcb040174adc72bcf3af44957eb75225f 100644 (file)
@@ -25,7 +25,6 @@
 #define EXYNOS_PA_CHIPID               0x10000000
 
 #define EXYNOS4_PA_CMU                 0x10030000
-#define EXYNOS5_PA_CMU                 0x10010000
 
 #define EXYNOS4_PA_DMC0                        0x10400000
 #define EXYNOS4_PA_DMC1                        0x10410000
 #define EXYNOS4_PA_COREPERI            0x10500000
 #define EXYNOS4_PA_L2CC                        0x10502000
 
-#define EXYNOS4_PA_SROMC               0x12570000
-#define EXYNOS5_PA_SROMC               0x12250000
-
-/* Compatibility UART */
-
-#define EXYNOS5440_PA_UART0            0x000B0000
-
 #endif /* __ASM_ARCH_MAP_H */
index 56978199c4798fa236394c232e50d58a61e4fd3d..f086bf615b2972ee640e61256cba3438c9c9ae2a 100644 (file)
 #include <linux/io.h>
 #include <linux/of_address.h>
 #include <linux/syscore_ops.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
 
 #include <asm/cputype.h>
 #include <asm/cp15.h>
 #include <asm/mcpm.h>
 #include <asm/smp_plat.h>
 
-#include "regs-pmu.h"
 #include "common.h"
 
 #define EXYNOS5420_CPUS_PER_CLUSTER    4
index 5bd9559786ba77d2eade9c29ab1983e8746f4e54..da46c639f62128024503583225e312de96fc71fb 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/smp.h>
 #include <linux/io.h>
 #include <linux/of_address.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cp15.h>
@@ -30,7 +31,6 @@
 #include <mach/map.h>
 
 #include "common.h"
-#include "regs-pmu.h"
 
 extern void exynos4_secondary_startup(void);
 
index 9c1506b499bca6f4599a20ece67494078a95d4d0..b9b9186f878148d604616eea5344e0f88e3d3b5a 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/cpu_pm.h>
 #include <linux/io.h>
 #include <linux/err.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
 
 #include <asm/firmware.h>
 #include <asm/smp_scu.h>
@@ -29,8 +31,6 @@
 #include <plat/pm-common.h>
 
 #include "common.h"
-#include "exynos-pmu.h"
-#include "regs-pmu.h"
 
 static inline void __iomem *exynos_boot_vector_addr(void)
 {
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
deleted file mode 100644 (file)
index dbf9fe9..0000000
+++ /dev/null
@@ -1,967 +0,0 @@
-/*
- * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com/
- *
- * EXYNOS - CPU PMU(Power Management Unit) support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-
-#include <asm/cputype.h>
-
-#include "exynos-pmu.h"
-#include "regs-pmu.h"
-
-#define PMU_TABLE_END  (-1U)
-
-struct exynos_pmu_conf {
-       unsigned int offset;
-       u8 val[NUM_SYS_POWERDOWN];
-};
-
-struct exynos_pmu_data {
-       const struct exynos_pmu_conf *pmu_config;
-       const struct exynos_pmu_conf *pmu_config_extra;
-
-       void (*pmu_init)(void);
-       void (*powerdown_conf)(enum sys_powerdown);
-       void (*powerdown_conf_extra)(enum sys_powerdown);
-};
-
-struct exynos_pmu_context {
-       struct device *dev;
-       const struct exynos_pmu_data *pmu_data;
-};
-
-static void __iomem *pmu_base_addr;
-static struct exynos_pmu_context *pmu_context;
-
-static inline void pmu_raw_writel(u32 val, u32 offset)
-{
-       writel_relaxed(val, pmu_base_addr + offset);
-}
-
-static inline u32 pmu_raw_readl(u32 offset)
-{
-       return readl_relaxed(pmu_base_addr + offset);
-}
-
-static struct exynos_pmu_conf exynos3250_pmu_config[] = {
-       /* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */
-       { EXYNOS3_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
-       { EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
-       { EXYNOS3_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
-       { EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
-       { EXYNOS3_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
-       { EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
-       { EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS3_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
-       { EXYNOS3_ARM_L2_SYS_PWR_REG,                   { 0x0, 0x0, 0x3} },
-       { EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x1, 0x0} },
-       { EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x1, 0x0} },
-       { EXYNOS3_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
-       { EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
-       { EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
-       { EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG,       { 0x1, 0x1, 0x1} },
-       { EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
-       { EXYNOS3_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x1} },
-       { EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS3_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
-       { EXYNOS3_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
-       { EXYNOS3_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x3, 0x3} },
-       { EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG,          { 0x3, 0x0, 0x0} },
-       { EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x1} },
-       { EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG,          { 0x3, 0x3, 0x3} },
-       { EXYNOS3_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS3_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x1, 0x1} },
-       { EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG,      { 0x1, 0x1, 0x0} },
-       { EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
-       { EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG,     { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS3_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x0} },
-       { EXYNOS3_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
-       { EXYNOS3_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
-       { EXYNOS3_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
-       { EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
-       { EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
-       { EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
-       { EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
-       { EXYNOS3_CAM_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS3_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS3_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS3_LCD0_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
-       { EXYNOS3_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS3_MAUDIO_SYS_PWR_REG,                   { 0x7, 0x0, 0x0} },
-       { EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos4210_pmu_config[] = {
-       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
-       { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_CORE1_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE1,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL1,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_COMMON_LOWPWR,                { 0x0, 0x0, 0x2 } },
-       { S5P_L2_0_LOWPWR,                      { 0x2, 0x2, 0x3 } },
-       { S5P_L2_1_LOWPWR,                      { 0x2, 0x2, 0x3 } },
-       { S5P_CMU_ACLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_SCLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_LOWPWR,                 { 0x1, 0x1, 0x0 } },
-       { S5P_APLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_MPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_VPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_EPLL_SYSCLK_LOWPWR,               { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR,     { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_GPSALIVE_LOWPWR,        { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_CAM_LOWPWR,           { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_TV_LOWPWR,            { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_MFC_LOWPWR,           { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_G3D_LOWPWR,           { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_LCD0_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_LCD1_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR,        { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_CLKSTOP_GPS_LOWPWR,           { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_CAM_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_TV_LOWPWR,              { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_MFC_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_G3D_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_LCD0_LOWPWR,            { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_LCD1_LOWPWR,            { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_GPS_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_TOP_BUS_LOWPWR,                   { 0x3, 0x0, 0x0 } },
-       { S5P_TOP_RETENTION_LOWPWR,             { 0x1, 0x0, 0x1 } },
-       { S5P_TOP_PWR_LOWPWR,                   { 0x3, 0x0, 0x3 } },
-       { S5P_LOGIC_RESET_LOWPWR,               { 0x1, 0x1, 0x0 } },
-       { S5P_ONENAND_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_MODIMIF_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_G2D_ACP_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_USBOTG_MEM_LOWPWR,                { 0x3, 0x0, 0x0 } },
-       { S5P_HSMMC_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_CSSYS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_SECSS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_PCIE_MEM_LOWPWR,                  { 0x3, 0x0, 0x0 } },
-       { S5P_SATA_MEM_LOWPWR,                  { 0x3, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_DRAM_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MAUDIO_LOWPWR,      { 0x1, 0x1, 0x0 } },
-       { S5P_PAD_RETENTION_GPIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_UART_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MMCA_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MMCB_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_EBIA_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_EBIB_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_ISOLATION_LOWPWR,   { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_ALV_SEL_LOWPWR,     { 0x1, 0x0, 0x0 } },
-       { S5P_XUSBXTI_LOWPWR,                   { 0x1, 0x1, 0x0 } },
-       { S5P_XXTI_LOWPWR,                      { 0x1, 0x1, 0x0 } },
-       { S5P_EXT_REGULATOR_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_GPIO_MODE_LOWPWR,                 { 0x1, 0x0, 0x0 } },
-       { S5P_GPIO_MODE_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CAM_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_TV_LOWPWR,                        { 0x7, 0x0, 0x0 } },
-       { S5P_MFC_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_G3D_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_LCD0_LOWPWR,                      { 0x7, 0x0, 0x0 } },
-       { S5P_LCD1_LOWPWR,                      { 0x7, 0x0, 0x0 } },
-       { S5P_MAUDIO_LOWPWR,                    { 0x7, 0x7, 0x0 } },
-       { S5P_GPS_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_GPS_ALIVE_LOWPWR,                 { 0x7, 0x0, 0x0 } },
-       { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
-       { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_CORE1_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE1,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL1,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ISP_ARM_LOWPWR,                   { 0x1, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR,     { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR,   { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_COMMON_LOWPWR,                { 0x0, 0x0, 0x2 } },
-       { S5P_L2_0_LOWPWR,                      { 0x0, 0x0, 0x3 } },
-       /* XXX_OPTION register should be set other field */
-       { S5P_ARM_L2_0_OPTION,                  { 0x10, 0x10, 0x0 } },
-       { S5P_L2_1_LOWPWR,                      { 0x0, 0x0, 0x3 } },
-       { S5P_ARM_L2_1_OPTION,                  { 0x10, 0x10, 0x0 } },
-       { S5P_CMU_ACLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_SCLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_LOWPWR,                 { 0x1, 0x1, 0x0 } },
-       { S5P_DRAM_FREQ_DOWN_LOWPWR,            { 0x1, 0x1, 0x1 } },
-       { S5P_DDRPHY_DLLOFF_LOWPWR,             { 0x1, 0x1, 0x1 } },
-       { S5P_LPDDR_PHY_DLL_LOCK_LOWPWR,        { 0x1, 0x1, 0x1 } },
-       { S5P_CMU_ACLKSTOP_COREBLK_LOWPWR,      { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_SCLKSTOP_COREBLK_LOWPWR,      { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_COREBLK_LOWPWR,         { 0x1, 0x1, 0x0 } },
-       { S5P_APLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_MPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_VPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
-       { S5P_EPLL_SYSCLK_LOWPWR,               { 0x1, 0x1, 0x0 } },
-       { S5P_MPLLUSER_SYSCLK_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR,     { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_GPSALIVE_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_CAM_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_TV_LOWPWR,            { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_MFC_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_G3D_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_LCD0_LOWPWR,          { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_ISP_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_CLKSTOP_GPS_LOWPWR,           { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_CAM_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_TV_LOWPWR,              { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_MFC_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_G3D_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_LCD0_LOWPWR,            { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_ISP_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_RESET_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_CMU_RESET_GPS_LOWPWR,             { 0x1, 0x0, 0x0 } },
-       { S5P_TOP_BUS_LOWPWR,                   { 0x3, 0x0, 0x0 } },
-       { S5P_TOP_RETENTION_LOWPWR,             { 0x1, 0x0, 0x1 } },
-       { S5P_TOP_PWR_LOWPWR,                   { 0x3, 0x0, 0x3 } },
-       { S5P_TOP_BUS_COREBLK_LOWPWR,           { 0x3, 0x0, 0x0 } },
-       { S5P_TOP_RETENTION_COREBLK_LOWPWR,     { 0x1, 0x0, 0x1 } },
-       { S5P_TOP_PWR_COREBLK_LOWPWR,           { 0x3, 0x0, 0x3 } },
-       { S5P_LOGIC_RESET_LOWPWR,               { 0x1, 0x1, 0x0 } },
-       { S5P_OSCCLK_GATE_LOWPWR,               { 0x1, 0x0, 0x1 } },
-       { S5P_LOGIC_RESET_COREBLK_LOWPWR,       { 0x1, 0x1, 0x0 } },
-       { S5P_OSCCLK_GATE_COREBLK_LOWPWR,       { 0x1, 0x0, 0x1 } },
-       { S5P_ONENAND_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_ONENAND_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
-       { S5P_HSI_MEM_LOWPWR,                   { 0x3, 0x0, 0x0 } },
-       { S5P_HSI_MEM_OPTION,                   { 0x10, 0x10, 0x0 } },
-       { S5P_G2D_ACP_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_G2D_ACP_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
-       { S5P_USBOTG_MEM_LOWPWR,                { 0x3, 0x0, 0x0 } },
-       { S5P_USBOTG_MEM_OPTION,                { 0x10, 0x10, 0x0 } },
-       { S5P_HSMMC_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_HSMMC_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
-       { S5P_CSSYS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_CSSYS_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
-       { S5P_SECSS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
-       { S5P_SECSS_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
-       { S5P_ROTATOR_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
-       { S5P_ROTATOR_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
-       { S5P_PAD_RETENTION_DRAM_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MAUDIO_LOWPWR,      { 0x1, 0x1, 0x0 } },
-       { S5P_PAD_RETENTION_GPIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_UART_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MMCA_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_MMCB_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_EBIA_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_EBIB_LOWPWR,        { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR,{ 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_ISOLATION_LOWPWR,   { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_ISOLATION_COREBLK_LOWPWR,     { 0x1, 0x0, 0x0 } },
-       { S5P_PAD_RETENTION_ALV_SEL_LOWPWR,     { 0x1, 0x0, 0x0 } },
-       { S5P_XUSBXTI_LOWPWR,                   { 0x1, 0x1, 0x0 } },
-       { S5P_XXTI_LOWPWR,                      { 0x1, 0x1, 0x0 } },
-       { S5P_EXT_REGULATOR_LOWPWR,             { 0x1, 0x1, 0x0 } },
-       { S5P_GPIO_MODE_LOWPWR,                 { 0x1, 0x0, 0x0 } },
-       { S5P_GPIO_MODE_COREBLK_LOWPWR,         { 0x1, 0x0, 0x0 } },
-       { S5P_GPIO_MODE_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
-       { S5P_TOP_ASB_RESET_LOWPWR,             { 0x1, 0x1, 0x1 } },
-       { S5P_TOP_ASB_ISOLATION_LOWPWR,         { 0x1, 0x0, 0x1 } },
-       { S5P_CAM_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_TV_LOWPWR,                        { 0x7, 0x0, 0x0 } },
-       { S5P_MFC_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_G3D_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_LCD0_LOWPWR,                      { 0x7, 0x0, 0x0 } },
-       { S5P_ISP_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_MAUDIO_LOWPWR,                    { 0x7, 0x7, 0x0 } },
-       { S5P_GPS_LOWPWR,                       { 0x7, 0x0, 0x0 } },
-       { S5P_GPS_ALIVE_LOWPWR,                 { 0x7, 0x0, 0x0 } },
-       { S5P_CMU_SYSCLK_ISP_LOWPWR,            { 0x1, 0x0, 0x0 } },
-       { S5P_CMU_SYSCLK_GPS_LOWPWR,            { 0x1, 0x0, 0x0 } },
-       { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
-       { S5P_ARM_CORE2_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE2,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL2,                 { 0x0, 0x0, 0x0 } },
-       { S5P_ARM_CORE3_LOWPWR,                 { 0x0, 0x0, 0x2 } },
-       { S5P_DIS_IRQ_CORE3,                    { 0x0, 0x0, 0x0 } },
-       { S5P_DIS_IRQ_CENTRAL3,                 { 0x0, 0x0, 0x0 } },
-       { PMU_TABLE_END,},
-};
-
-static const struct exynos_pmu_conf exynos5250_pmu_config[] = {
-       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
-       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_FSYS_ARM_SYS_PWR_REG,                 { 0x1, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
-       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
-       { EXYNOS5_ARM_L2_SYS_PWR_REG,                   { 0x3, 0x3, 0x3} },
-       { EXYNOS5_ARM_L2_OPTION,                        { 0x10, 0x10, 0x0 } },
-       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
-       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
-       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
-       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
-       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
-       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
-       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x0, 0x1} },
-       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x0, 0x3} },
-       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
-       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x1} },
-       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x3} },
-       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
-       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
-       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
-       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x1} },
-       { EXYNOS5_USBOTG_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_G2D_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
-       { EXYNOS5_USBDRD_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_SDMMC_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
-       { EXYNOS5_CSSYS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
-       { EXYNOS5_SECSS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
-       { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG,              { 0x3, 0x0, 0x0} },
-       { EXYNOS5_INTRAM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_INTROM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_JPEG_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
-       { EXYNOS5_JPEG_MEM_OPTION,                      { 0x10, 0x10, 0x0} },
-       { EXYNOS5_HSI_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
-       { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
-       { EXYNOS5_SATA_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
-       { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x1} },
-       { EXYNOS5_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
-       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
-       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
-       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x0, 0x1} },
-       { EXYNOS5_GSCL_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
-       { EXYNOS5_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS5_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS5_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
-       { EXYNOS5_DISP1_SYS_PWR_REG,                    { 0x7, 0x0, 0x0} },
-       { EXYNOS5_MAU_SYS_PWR_REG,                      { 0x7, 0x7, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG,          { 0x1, 0x1, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG,           { 0x1, 0x1, 0x0} },
-       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { PMU_TABLE_END,},
-};
-
-static struct exynos_pmu_conf exynos5420_pmu_config[] = {
-       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
-       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_ARM_CORE2_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_ARM_CORE3_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_CORE0_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_CORE1_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_CORE2_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_CORE3_SYS_PWR_REG,                     { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                          { 0x1, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_ARM_COMMON_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_COMMON_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5_ARM_L2_SYS_PWR_REG,                           { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_KFC_L2_SYS_PWR_REG,                        { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,                     { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,                     { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                        { 0x1, 0x1, 0x0} },
-       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
-       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,                 { 0x1, 0x1, 0x0} },
-       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,                   { 0x1, 0x0, 0x1} },
-       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,                    { 0x1, 0x1, 0x1} },
-       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,                   { 0x1, 0x0, 0x1} },
-       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x1, 0x0} },
-       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
-       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG,                   { 0x1, 0x0, 0x0} },
-       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                          { 0x3, 0x0, 0x0} },
-       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,                    { 0x1, 0x1, 0x1} },
-       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                          { 0x3, 0x3, 0x0} },
-       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,                   { 0x3, 0x0, 0x0} },
-       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
-       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,                   { 0x3, 0x0, 0x0} },
-       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,                      { 0x1, 0x1, 0x0} },
-       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,                      { 0x1, 0x0, 0x1} },
-       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,               { 0x1, 0x0, 0x0} },
-       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,               { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_INTRAM_MEM_SYS_PWR_REG,                    { 0x3, 0x0, 0x3} },
-       { EXYNOS5420_INTROM_MEM_SYS_PWR_REG,                    { 0x3, 0x0, 0x3} },
-       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,               { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG,             { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG,             { 0x1, 0x0, 0x0} },
-       { EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,                    { 0x1, 0x1, 0x0} },
-       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,             { 0x1, 0x0, 0x0} },
-       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,                      { 0x1, 0x0, 0x0} },
-       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                          { 0x1, 0x1, 0x0} },
-       { EXYNOS5_XXTI_SYS_PWR_REG,                             { 0x1, 0x1, 0x0} },
-       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,                    { 0x1, 0x1, 0x0} },
-       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                        { 0x1, 0x0, 0x0} },
-       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,                 { 0x1, 0x1, 0x0} },
-       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,                    { 0x1, 0x1, 0x0} },
-       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,                    { 0x1, 0x1, 0x0} },
-       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
-       { EXYNOS5_GSCL_SYS_PWR_REG,                             { 0x7, 0x0, 0x0} },
-       { EXYNOS5_ISP_SYS_PWR_REG,                              { 0x7, 0x0, 0x0} },
-       { EXYNOS5_MFC_SYS_PWR_REG,                              { 0x7, 0x0, 0x0} },
-       { EXYNOS5_G3D_SYS_PWR_REG,                              { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_DISP1_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_MAU_SYS_PWR_REG,                           { 0x7, 0x7, 0x0} },
-       { EXYNOS5420_G2D_SYS_PWR_REG,                           { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_MSC_SYS_PWR_REG,                           { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_FSYS_SYS_PWR_REG,                          { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_FSYS2_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_PSGEN_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_PERIC_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5420_WCORE_SYS_PWR_REG,                         { 0x7, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,                 { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,                  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,                  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,                  { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,                  { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,              { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,                    { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,               { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,                 { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,                 { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,                 { 0x0, 0x0, 0x0} },
-       { EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
-       { PMU_TABLE_END,},
-};
-
-static unsigned int const exynos3250_list_feed[] = {
-       EXYNOS3_ARM_CORE_OPTION(0),
-       EXYNOS3_ARM_CORE_OPTION(1),
-       EXYNOS3_ARM_CORE_OPTION(2),
-       EXYNOS3_ARM_CORE_OPTION(3),
-       EXYNOS3_ARM_COMMON_OPTION,
-       EXYNOS3_TOP_PWR_OPTION,
-       EXYNOS3_CORE_TOP_PWR_OPTION,
-       S5P_CAM_OPTION,
-       S5P_MFC_OPTION,
-       S5P_G3D_OPTION,
-       S5P_LCD0_OPTION,
-       S5P_ISP_OPTION,
-};
-
-static void exynos3250_powerdown_conf_extra(enum sys_powerdown mode)
-{
-       unsigned int i;
-       unsigned int tmp;
-
-       /* Enable only SC_FEEDBACK */
-       for (i = 0; i < ARRAY_SIZE(exynos3250_list_feed); i++) {
-               tmp = pmu_raw_readl(exynos3250_list_feed[i]);
-               tmp &= ~(EXYNOS3_OPTION_USE_SC_COUNTER);
-               tmp |= EXYNOS3_OPTION_USE_SC_FEEDBACK;
-               pmu_raw_writel(tmp, exynos3250_list_feed[i]);
-       }
-
-       if (mode != SYS_SLEEP)
-               return;
-
-       pmu_raw_writel(XUSBXTI_DURATION, EXYNOS3_XUSBXTI_DURATION);
-       pmu_raw_writel(XXTI_DURATION, EXYNOS3_XXTI_DURATION);
-       pmu_raw_writel(EXT_REGULATOR_DURATION, EXYNOS3_EXT_REGULATOR_DURATION);
-       pmu_raw_writel(EXT_REGULATOR_COREBLK_DURATION,
-                      EXYNOS3_EXT_REGULATOR_COREBLK_DURATION);
-}
-
-static unsigned int const exynos5_list_both_cnt_feed[] = {
-       EXYNOS5_ARM_CORE0_OPTION,
-       EXYNOS5_ARM_CORE1_OPTION,
-       EXYNOS5_ARM_COMMON_OPTION,
-       EXYNOS5_GSCL_OPTION,
-       EXYNOS5_ISP_OPTION,
-       EXYNOS5_MFC_OPTION,
-       EXYNOS5_G3D_OPTION,
-       EXYNOS5_DISP1_OPTION,
-       EXYNOS5_MAU_OPTION,
-       EXYNOS5_TOP_PWR_OPTION,
-       EXYNOS5_TOP_PWR_SYSMEM_OPTION,
-};
-
-static unsigned int const exynos5_list_disable_wfi_wfe[] = {
-       EXYNOS5_ARM_CORE1_OPTION,
-       EXYNOS5_FSYS_ARM_OPTION,
-       EXYNOS5_ISP_ARM_OPTION,
-};
-
-static unsigned int const exynos5420_list_disable_pmu_reg[] = {
-       EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,
-       EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,
-       EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,
-       EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,
-       EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,
-       EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,
-       EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,
-       EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,
-       EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,
-       EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,
-       EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,
-       EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,
-};
-
-static void exynos5420_powerdown_conf(enum sys_powerdown mode)
-{
-       u32 this_cluster;
-
-       this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
-
-       /*
-        * set the cluster id to IROM register to ensure that we wake
-        * up with the current cluster.
-        */
-       pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
-}
-
-
-static void exynos5_powerdown_conf(enum sys_powerdown mode)
-{
-       unsigned int i;
-       unsigned int tmp;
-
-       /*
-        * Enable both SC_FEEDBACK and SC_COUNTER
-        */
-       for (i = 0; i < ARRAY_SIZE(exynos5_list_both_cnt_feed); i++) {
-               tmp = pmu_raw_readl(exynos5_list_both_cnt_feed[i]);
-               tmp |= (EXYNOS5_USE_SC_FEEDBACK |
-                       EXYNOS5_USE_SC_COUNTER);
-               pmu_raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
-       }
-
-       /*
-        * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
-        */
-       tmp = pmu_raw_readl(EXYNOS5_ARM_COMMON_OPTION);
-       tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
-       pmu_raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
-
-       /*
-        * Disable WFI/WFE on XXX_OPTION
-        */
-       for (i = 0; i < ARRAY_SIZE(exynos5_list_disable_wfi_wfe); i++) {
-               tmp = pmu_raw_readl(exynos5_list_disable_wfi_wfe[i]);
-               tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
-                        EXYNOS5_OPTION_USE_STANDBYWFI);
-               pmu_raw_writel(tmp, exynos5_list_disable_wfi_wfe[i]);
-       }
-}
-
-void exynos_sys_powerdown_conf(enum sys_powerdown mode)
-{
-       unsigned int i;
-       const struct exynos_pmu_data *pmu_data;
-
-       if (!pmu_context)
-               return;
-
-       pmu_data = pmu_context->pmu_data;
-
-       if (pmu_data->powerdown_conf)
-               pmu_data->powerdown_conf(mode);
-
-       if (pmu_data->pmu_config) {
-               for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
-                       pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
-                                       pmu_data->pmu_config[i].offset);
-       }
-
-       if (pmu_data->powerdown_conf_extra)
-               pmu_data->powerdown_conf_extra(mode);
-
-       if (pmu_data->pmu_config_extra) {
-               for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
-                       pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
-                                       pmu_data->pmu_config_extra[i].offset);
-       }
-}
-
-static void exynos3250_pmu_init(void)
-{
-       unsigned int value;
-
-       /*
-        * To prevent from issuing new bus request form L2 memory system
-        * If core status is power down, should be set '1' to L2 power down
-        */
-       value = pmu_raw_readl(EXYNOS3_ARM_COMMON_OPTION);
-       value |= EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
-       pmu_raw_writel(value, EXYNOS3_ARM_COMMON_OPTION);
-
-       /* Enable USE_STANDBY_WFI for all CORE */
-       pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
-
-       /*
-        * Set PSHOLD port for output high
-        */
-       value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
-       value |= S5P_PS_HOLD_OUTPUT_HIGH;
-       pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
-
-       /*
-        * Enable signal for PSHOLD port
-        */
-       value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
-       value |= S5P_PS_HOLD_EN;
-       pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
-}
-
-static void exynos5250_pmu_init(void)
-{
-       unsigned int value;
-       /*
-        * When SYS_WDTRESET is set, watchdog timer reset request
-        * is ignored by power management unit.
-        */
-       value = pmu_raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE);
-       value &= ~EXYNOS5_SYS_WDTRESET;
-       pmu_raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE);
-
-       value = pmu_raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST);
-       value &= ~EXYNOS5_SYS_WDTRESET;
-       pmu_raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST);
-}
-
-static void exynos5420_pmu_init(void)
-{
-       unsigned int value;
-       int i;
-
-       /*
-        * Set the CMU_RESET, CMU_SYSCLK and CMU_CLKSTOP registers
-        * for local power blocks to Low initially as per Table 8-4:
-        * "System-Level Power-Down Configuration Registers".
-        */
-       for (i = 0; i < ARRAY_SIZE(exynos5420_list_disable_pmu_reg); i++)
-               pmu_raw_writel(0, exynos5420_list_disable_pmu_reg[i]);
-
-       /* Enable USE_STANDBY_WFI for all CORE */
-       pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
-
-       value  = pmu_raw_readl(EXYNOS_L2_OPTION(0));
-       value &= ~EXYNOS5_USE_RETENTION;
-       pmu_raw_writel(value, EXYNOS_L2_OPTION(0));
-
-       value = pmu_raw_readl(EXYNOS_L2_OPTION(1));
-       value &= ~EXYNOS5_USE_RETENTION;
-       pmu_raw_writel(value, EXYNOS_L2_OPTION(1));
-
-       /*
-        * If L2_COMMON is turned off, clocks related to ATB async
-        * bridge are gated. Thus, when ISP power is gated, LPI
-        * may get stuck.
-        */
-       value = pmu_raw_readl(EXYNOS5420_LPI_MASK);
-       value |= EXYNOS5420_ATB_ISP_ARM;
-       pmu_raw_writel(value, EXYNOS5420_LPI_MASK);
-
-       value  = pmu_raw_readl(EXYNOS5420_LPI_MASK1);
-       value |= EXYNOS5420_ATB_KFC;
-       pmu_raw_writel(value, EXYNOS5420_LPI_MASK1);
-
-       /* Prevent issue of new bus request from L2 memory */
-       value = pmu_raw_readl(EXYNOS5420_ARM_COMMON_OPTION);
-       value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
-       pmu_raw_writel(value, EXYNOS5420_ARM_COMMON_OPTION);
-
-       value = pmu_raw_readl(EXYNOS5420_KFC_COMMON_OPTION);
-       value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
-       pmu_raw_writel(value, EXYNOS5420_KFC_COMMON_OPTION);
-
-       /* This setting is to reduce suspend/resume time */
-       pmu_raw_writel(DUR_WAIT_RESET, EXYNOS5420_LOGIC_RESET_DURATION3);
-
-       /* Serialized CPU wakeup of Eagle */
-       pmu_raw_writel(SPREAD_ENABLE, EXYNOS5420_ARM_INTR_SPREAD_ENABLE);
-
-       pmu_raw_writel(SPREAD_USE_STANDWFI,
-                       EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI);
-
-       pmu_raw_writel(0x1, EXYNOS5420_UP_SCHEDULER);
-       pr_info("EXYNOS5420 PMU initialized\n");
-}
-
-static const struct exynos_pmu_data exynos3250_pmu_data = {
-       .pmu_config     = exynos3250_pmu_config,
-       .pmu_init       = exynos3250_pmu_init,
-       .powerdown_conf_extra   = exynos3250_powerdown_conf_extra,
-};
-
-static const struct exynos_pmu_data exynos4210_pmu_data = {
-       .pmu_config     = exynos4210_pmu_config,
-};
-
-static const struct exynos_pmu_data exynos4212_pmu_data = {
-       .pmu_config     = exynos4x12_pmu_config,
-};
-
-static const struct exynos_pmu_data exynos4412_pmu_data = {
-       .pmu_config             = exynos4x12_pmu_config,
-       .pmu_config_extra       = exynos4412_pmu_config,
-};
-
-static const struct exynos_pmu_data exynos5250_pmu_data = {
-       .pmu_config     = exynos5250_pmu_config,
-       .pmu_init       = exynos5250_pmu_init,
-       .powerdown_conf = exynos5_powerdown_conf,
-};
-
-static const struct exynos_pmu_data exynos5420_pmu_data = {
-       .pmu_config     = exynos5420_pmu_config,
-       .pmu_init       = exynos5420_pmu_init,
-       .powerdown_conf = exynos5420_powerdown_conf,
-};
-
-/*
- * PMU platform driver and devicetree bindings.
- */
-static const struct of_device_id exynos_pmu_of_device_ids[] = {
-       {
-               .compatible = "samsung,exynos3250-pmu",
-               .data = &exynos3250_pmu_data,
-       }, {
-               .compatible = "samsung,exynos4210-pmu",
-               .data = &exynos4210_pmu_data,
-       }, {
-               .compatible = "samsung,exynos4212-pmu",
-               .data = &exynos4212_pmu_data,
-       }, {
-               .compatible = "samsung,exynos4412-pmu",
-               .data = &exynos4412_pmu_data,
-       }, {
-               .compatible = "samsung,exynos5250-pmu",
-               .data = &exynos5250_pmu_data,
-       }, {
-               .compatible = "samsung,exynos5420-pmu",
-               .data = &exynos5420_pmu_data,
-       },
-       { /*sentinel*/ },
-};
-
-static int exynos_pmu_probe(struct platform_device *pdev)
-{
-       const struct of_device_id *match;
-       struct device *dev = &pdev->dev;
-       struct resource *res;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       pmu_base_addr = devm_ioremap_resource(dev, res);
-       if (IS_ERR(pmu_base_addr))
-               return PTR_ERR(pmu_base_addr);
-
-       pmu_context = devm_kzalloc(&pdev->dev,
-                       sizeof(struct exynos_pmu_context),
-                       GFP_KERNEL);
-       if (!pmu_context) {
-               dev_err(dev, "Cannot allocate memory.\n");
-               return -ENOMEM;
-       }
-       pmu_context->dev = dev;
-
-       match = of_match_node(exynos_pmu_of_device_ids, dev->of_node);
-
-       pmu_context->pmu_data = match->data;
-
-       if (pmu_context->pmu_data->pmu_init)
-               pmu_context->pmu_data->pmu_init();
-
-       platform_set_drvdata(pdev, pmu_context);
-
-       dev_dbg(dev, "Exynos PMU Driver probe done\n");
-       return 0;
-}
-
-static struct platform_driver exynos_pmu_driver = {
-       .driver  = {
-               .name   = "exynos-pmu",
-               .of_match_table = exynos_pmu_of_device_ids,
-       },
-       .probe = exynos_pmu_probe,
-};
-
-static int __init exynos_pmu_init(void)
-{
-       return platform_driver_register(&exynos_pmu_driver);
-
-}
-postcore_initcall(exynos_pmu_init);
diff --git a/arch/arm/mach-exynos/regs-srom.h b/arch/arm/mach-exynos/regs-srom.h
deleted file mode 100644 (file)
index 5c4d442..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * S5P SROMC register definitions
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifndef __PLAT_SAMSUNG_REGS_SROM_H
-#define __PLAT_SAMSUNG_REGS_SROM_H __FILE__
-
-#include <mach/map.h>
-
-#define S5P_SROMREG(x)         (S5P_VA_SROMC + (x))
-
-#define S5P_SROM_BW            S5P_SROMREG(0x0)
-#define S5P_SROM_BC0           S5P_SROMREG(0x4)
-#define S5P_SROM_BC1           S5P_SROMREG(0x8)
-#define S5P_SROM_BC2           S5P_SROMREG(0xc)
-#define S5P_SROM_BC3           S5P_SROMREG(0x10)
-#define S5P_SROM_BC4           S5P_SROMREG(0x14)
-#define S5P_SROM_BC5           S5P_SROMREG(0x18)
-
-/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
-
-#define S5P_SROM_BW__DATAWIDTH__SHIFT          0
-#define S5P_SROM_BW__ADDRMODE__SHIFT           1
-#define S5P_SROM_BW__WAITENABLE__SHIFT         2
-#define S5P_SROM_BW__BYTEENABLE__SHIFT         3
-
-#define S5P_SROM_BW__CS_MASK                   0xf
-
-#define S5P_SROM_BW__NCS0__SHIFT               0
-#define S5P_SROM_BW__NCS1__SHIFT               4
-#define S5P_SROM_BW__NCS2__SHIFT               8
-#define S5P_SROM_BW__NCS3__SHIFT               12
-#define S5P_SROM_BW__NCS4__SHIFT               16
-#define S5P_SROM_BW__NCS5__SHIFT               20
-
-/* applies to same to BCS0 - BCS3 */
-
-#define S5P_SROM_BCX__PMC__SHIFT               0
-#define S5P_SROM_BCX__TACP__SHIFT              4
-#define S5P_SROM_BCX__TCAH__SHIFT              8
-#define S5P_SROM_BCX__TCOH__SHIFT              12
-#define S5P_SROM_BCX__TACC__SHIFT              16
-#define S5P_SROM_BCX__TCOS__SHIFT              24
-#define S5P_SROM_BCX__TACS__SHIFT              28
-
-#endif /* __PLAT_SAMSUNG_REGS_SROM_H */
index c169cc3049aa3bbe270905eea1840d1b603b125c..f21690937b7d13e7eeb94dc5b40a5496b1261beb 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/of_address.h>
 #include <linux/err.h>
 #include <linux/regulator/machine.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
 
 #include <asm/cacheflush.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/smp_scu.h>
 #include <asm/suspend.h>
 
+#include <mach/map.h>
+
 #include <plat/pm-common.h>
 
 #include "common.h"
-#include "exynos-pmu.h"
-#include "regs-pmu.h"
-#include "regs-srom.h"
 
 #define REG_TABLE_END (-1U)
 
@@ -53,15 +54,6 @@ struct exynos_wkup_irq {
        u32 mask;
 };
 
-static struct sleep_save exynos_core_save[] = {
-       /* SROM side */
-       SAVE_ITEM(S5P_SROM_BW),
-       SAVE_ITEM(S5P_SROM_BC0),
-       SAVE_ITEM(S5P_SROM_BC1),
-       SAVE_ITEM(S5P_SROM_BC2),
-       SAVE_ITEM(S5P_SROM_BC3),
-};
-
 struct exynos_pm_data {
        const struct exynos_wkup_irq *wkup_irq;
        unsigned int wake_disable_mask;
@@ -343,8 +335,6 @@ static void exynos_pm_prepare(void)
        /* Set wake-up mask registers */
        exynos_pm_set_wakeup_mask();
 
-       s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
        exynos_pm_enter_sleep_mode();
 
        /* ensure at least INFORM0 has the resume address */
@@ -375,8 +365,6 @@ static void exynos5420_pm_prepare(void)
        /* Set wake-up mask registers */
        exynos_pm_set_wakeup_mask();
 
-       s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
        exynos_pmu_spare3 = pmu_raw_readl(S5P_PMU_SPARE3);
        /*
         * The cpu state needs to be saved and restored so that the
@@ -467,8 +455,6 @@ static void exynos_pm_resume(void)
        /* For release retention */
        exynos_pm_release_retention();
 
-       s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
        if (cpuid == ARM_CPU_PART_CORTEX_A9)
                scu_enable(S5P_VA_SCU);
 
@@ -535,8 +521,6 @@ static void exynos5420_pm_resume(void)
 
        pmu_raw_writel(exynos_pmu_spare3, S5P_PMU_SPARE3);
 
-       s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
-
 early_wakeup:
 
        tmp = pmu_raw_readl(EXYNOS5420_SFR_AXI_CGDIS1);
index 16496a071ecb8c3174639a15fbc3d6fe092d3e3b..cda330c93d610d35d3e82e9d9574d6611dc489d4 100644 (file)
@@ -94,8 +94,8 @@ static void mxc_expio_irq_handler(struct irq_desc *desc)
        /* irq = gpio irq number */
        desc->irq_data.chip->irq_mask(&desc->irq_data);
 
-       imr_val = __raw_readw(brd_io + INTR_MASK_REG);
-       int_valid = __raw_readw(brd_io + INTR_STATUS_REG) & ~imr_val;
+       imr_val = imx_readw(brd_io + INTR_MASK_REG);
+       int_valid = imx_readw(brd_io + INTR_STATUS_REG) & ~imr_val;
 
        expio_irq = 0;
        for (; int_valid != 0; int_valid >>= 1, expio_irq++) {
@@ -117,17 +117,17 @@ static void expio_mask_irq(struct irq_data *d)
        u16 reg;
        u32 expio = d->hwirq;
 
-       reg = __raw_readw(brd_io + INTR_MASK_REG);
+       reg = imx_readw(brd_io + INTR_MASK_REG);
        reg |= (1 << expio);
-       __raw_writew(reg, brd_io + INTR_MASK_REG);
+       imx_writew(reg, brd_io + INTR_MASK_REG);
 }
 
 static void expio_ack_irq(struct irq_data *d)
 {
        u32 expio = d->hwirq;
 
-       __raw_writew(1 << expio, brd_io + INTR_RESET_REG);
-       __raw_writew(0, brd_io + INTR_RESET_REG);
+       imx_writew(1 << expio, brd_io + INTR_RESET_REG);
+       imx_writew(0, brd_io + INTR_RESET_REG);
        expio_mask_irq(d);
 }
 
@@ -136,9 +136,9 @@ static void expio_unmask_irq(struct irq_data *d)
        u16 reg;
        u32 expio = d->hwirq;
 
-       reg = __raw_readw(brd_io + INTR_MASK_REG);
+       reg = imx_readw(brd_io + INTR_MASK_REG);
        reg &= ~(1 << expio);
-       __raw_writew(reg, brd_io + INTR_MASK_REG);
+       imx_writew(reg, brd_io + INTR_MASK_REG);
 }
 
 static struct irq_chip expio_irq_chip = {
@@ -162,9 +162,9 @@ int __init mxc_expio_init(u32 base, u32 intr_gpio)
        if (brd_io == NULL)
                return -ENOMEM;
 
-       if ((__raw_readw(brd_io + MAGIC_NUMBER1_REG) != 0xAAAA) ||
-           (__raw_readw(brd_io + MAGIC_NUMBER2_REG) != 0x5555) ||
-           (__raw_readw(brd_io + MAGIC_NUMBER3_REG) != 0xCAFE)) {
+       if ((imx_readw(brd_io + MAGIC_NUMBER1_REG) != 0xAAAA) ||
+           (imx_readw(brd_io + MAGIC_NUMBER2_REG) != 0x5555) ||
+           (imx_readw(brd_io + MAGIC_NUMBER3_REG) != 0xCAFE)) {
                pr_info("3-Stack Debug board not detected\n");
                iounmap(brd_io);
                brd_io = NULL;
@@ -181,10 +181,10 @@ int __init mxc_expio_init(u32 base, u32 intr_gpio)
        gpio_direction_input(intr_gpio);
 
        /* disable the interrupt and clear the status */
-       __raw_writew(0, brd_io + INTR_MASK_REG);
-       __raw_writew(0xFFFF, brd_io + INTR_RESET_REG);
-       __raw_writew(0, brd_io + INTR_RESET_REG);
-       __raw_writew(0x1F, brd_io + INTR_MASK_REG);
+       imx_writew(0, brd_io + INTR_MASK_REG);
+       imx_writew(0xFFFF, brd_io + INTR_RESET_REG);
+       imx_writew(0, brd_io + INTR_RESET_REG);
+       imx_writew(0x1F, brd_io + INTR_MASK_REG);
 
        irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
        WARN_ON(irq_base < 0);
index 15df34fbdf44c5abd4182f1c5a0514162e7fe635..ecc374060c27c55e2dfe4fa04d170e213ddd31b5 100644 (file)
@@ -2,6 +2,7 @@ menuconfig ARCH_MXC
        bool "Freescale i.MX family"
        depends on ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7 || ARM_SINGLE_ARMV7M
        select ARCH_REQUIRE_GPIOLIB
+       select ARCH_SUPPORTS_BIG_ENDIAN
        select ARM_CPU_SUSPEND if PM
        select CLKSRC_IMX_GPT
        select GENERIC_IRQ_CHIP
@@ -561,6 +562,7 @@ config SOC_IMX7D
        bool "i.MX7 Dual support"
        select PINCTRL_IMX7D
        select ARM_GIC
+       select HAVE_ARM_ARCH_TIMER
        select HAVE_IMX_ANATOP
        select HAVE_IMX_MMDC
        select HAVE_IMX_SRC
index 231bb250c5719d21962404a9301765cdcf499924..bd3555ee88c8bd8ff1352d2623c82abcd2d632b8 100644 (file)
@@ -151,7 +151,14 @@ void __init imx_init_revision_from_anatop(void)
                revision = IMX_CHIP_REVISION_1_5;
                break;
        default:
-               revision = IMX_CHIP_REVISION_UNKNOWN;
+               /*
+                * Fail back to return raw register value instead of 0xff.
+                * It will be easy to know version information in SOC if it
+                * can't be recognized by known version. And some chip's (i.MX7D)
+                * digprog value match linux version format, so it needn't map
+                * again and we can use register value directly.
+                */
+               revision = digprog & 0xff;
        }
 
        mxc_set_cpu_type(digprog >> 16 & 0xff);
index 1a8932335b2136b9ff941adc3d2bc35abdebb325..7fa176e792bd16fc7b71959ff0f57dcf23ec32d7 100644 (file)
@@ -66,12 +66,12 @@ static int avic_set_irq_fiq(unsigned int irq, unsigned int type)
                return -EINVAL;
 
        if (irq < AVIC_NUM_IRQS / 2) {
-               irqt = __raw_readl(avic_base + AVIC_INTTYPEL) & ~(1 << irq);
-               __raw_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEL);
+               irqt = imx_readl(avic_base + AVIC_INTTYPEL) & ~(1 << irq);
+               imx_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEL);
        } else {
                irq -= AVIC_NUM_IRQS / 2;
-               irqt = __raw_readl(avic_base + AVIC_INTTYPEH) & ~(1 << irq);
-               __raw_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEH);
+               irqt = imx_readl(avic_base + AVIC_INTTYPEH) & ~(1 << irq);
+               imx_writel(irqt | (!!type << irq), avic_base + AVIC_INTTYPEH);
        }
 
        return 0;
@@ -94,8 +94,8 @@ static void avic_irq_suspend(struct irq_data *d)
        struct irq_chip_type *ct = gc->chip_types;
        int idx = d->hwirq >> 5;
 
-       avic_saved_mask_reg[idx] = __raw_readl(avic_base + ct->regs.mask);
-       __raw_writel(gc->wake_active, avic_base + ct->regs.mask);
+       avic_saved_mask_reg[idx] = imx_readl(avic_base + ct->regs.mask);
+       imx_writel(gc->wake_active, avic_base + ct->regs.mask);
 }
 
 static void avic_irq_resume(struct irq_data *d)
@@ -104,7 +104,7 @@ static void avic_irq_resume(struct irq_data *d)
        struct irq_chip_type *ct = gc->chip_types;
        int idx = d->hwirq >> 5;
 
-       __raw_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask);
+       imx_writel(avic_saved_mask_reg[idx], avic_base + ct->regs.mask);
 }
 
 #else
@@ -140,7 +140,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
        u32 nivector;
 
        do {
-               nivector = __raw_readl(avic_base + AVIC_NIVECSR) >> 16;
+               nivector = imx_readl(avic_base + AVIC_NIVECSR) >> 16;
                if (nivector == 0xffff)
                        break;
 
@@ -164,16 +164,16 @@ void __init mxc_init_irq(void __iomem *irqbase)
        /* put the AVIC into the reset value with
         * all interrupts disabled
         */
-       __raw_writel(0, avic_base + AVIC_INTCNTL);
-       __raw_writel(0x1f, avic_base + AVIC_NIMASK);
+       imx_writel(0, avic_base + AVIC_INTCNTL);
+       imx_writel(0x1f, avic_base + AVIC_NIMASK);
 
        /* disable all interrupts */
-       __raw_writel(0, avic_base + AVIC_INTENABLEH);
-       __raw_writel(0, avic_base + AVIC_INTENABLEL);
+       imx_writel(0, avic_base + AVIC_INTENABLEH);
+       imx_writel(0, avic_base + AVIC_INTENABLEL);
 
        /* all IRQ no FIQ */
-       __raw_writel(0, avic_base + AVIC_INTTYPEH);
-       __raw_writel(0, avic_base + AVIC_INTTYPEL);
+       imx_writel(0, avic_base + AVIC_INTTYPEH);
+       imx_writel(0, avic_base + AVIC_INTTYPEL);
 
        irq_base = irq_alloc_descs(-1, 0, AVIC_NUM_IRQS, numa_node_id());
        WARN_ON(irq_base < 0);
@@ -188,7 +188,7 @@ void __init mxc_init_irq(void __iomem *irqbase)
 
        /* Set default priority value (0) for all IRQ's */
        for (i = 0; i < 8; i++)
-               __raw_writel(0, avic_base + AVIC_NIPRIORITY(i));
+               imx_writel(0, avic_base + AVIC_NIPRIORITY(i));
 
        set_handle_irq(avic_handle_irq);
 
index fe8d36f7e30ed95209ae218811c4420af52b5bee..8d2ae4091465b33aabdd06cfabc69954b203644f 100644 (file)
@@ -39,8 +39,7 @@ static int mx27_read_cpu_rev(void)
         * the silicon revision very early we read it here to
         * avoid any further hooks
        */
-       val = __raw_readl(MX27_IO_ADDRESS(MX27_SYSCTRL_BASE_ADDR
-                               + SYS_CHIP_ID));
+       val = imx_readl(MX27_IO_ADDRESS(MX27_SYSCTRL_BASE_ADDR + SYS_CHIP_ID));
 
        mx27_cpu_partnumber = (int)((val >> 12) & 0xFFFF);
 
index fde1860a25216ed9763fb7ba8256ed0b16757e70..3daf1959a2f0ad555e1f5c94d2cb79ca2cd2db39 100644 (file)
@@ -39,7 +39,7 @@ static int mx31_read_cpu_rev(void)
        u32 i, srev;
 
        /* read SREV register from IIM module */
-       srev = __raw_readl(MX31_IO_ADDRESS(MX31_IIM_BASE_ADDR + MXC_IIMSREV));
+       srev = imx_readl(MX31_IO_ADDRESS(MX31_IIM_BASE_ADDR + MXC_IIMSREV));
        srev &= 0xff;
 
        for (i = 0; i < ARRAY_SIZE(mx31_cpu_type); i++)
index ec3aaa098c1706b3d7cf88b9c608302777419aad..8a54234df23b582b5bac78d365f2e35b4fe6cc84 100644 (file)
@@ -20,7 +20,7 @@ static int mx35_read_cpu_rev(void)
 {
        u32 rev;
 
-       rev = __raw_readl(MX35_IO_ADDRESS(MX35_IIM_BASE_ADDR + MXC_IIMSREV));
+       rev = imx_readl(MX35_IO_ADDRESS(MX35_IIM_BASE_ADDR + MXC_IIMSREV));
        switch (rev) {
        case 0x00:
                return IMX_CHIP_REVISION_1_0;
index 5b0f752d5507fe337408cfb9abc3a079beb458dd..6a96b7cf468ff95b7194ce42e9e58c2b1d08261a 100644 (file)
@@ -45,20 +45,20 @@ void __init imx_set_aips(void __iomem *base)
  * Set all MPROTx to be non-bufferable, trusted for R/W,
  * not forced to user-mode.
  */
-       __raw_writel(0x77777777, base + 0x0);
-       __raw_writel(0x77777777, base + 0x4);
+       imx_writel(0x77777777, base + 0x0);
+       imx_writel(0x77777777, base + 0x4);
 
 /*
  * Set all OPACRx to be non-bufferable, to not require
  * supervisor privilege level for access, allow for
  * write access and untrusted master access.
  */
-       __raw_writel(0x0, base + 0x40);
-       __raw_writel(0x0, base + 0x44);
-       __raw_writel(0x0, base + 0x48);
-       __raw_writel(0x0, base + 0x4C);
-       reg = __raw_readl(base + 0x50) & 0x00FFFFFF;
-       __raw_writel(reg, base + 0x50);
+       imx_writel(0x0, base + 0x40);
+       imx_writel(0x0, base + 0x44);
+       imx_writel(0x0, base + 0x48);
+       imx_writel(0x0, base + 0x4C);
+       reg = imx_readl(base + 0x50) & 0x00FFFFFF;
+       imx_writel(reg, base + 0x50);
 }
 
 void __init imx_aips_allow_unprivileged_access(
index 08ce20771bb3f9e49a88294e37216d7c562edb8a..fb9a73a57d00dbf11432f1771180f3391cfdfa55 100644 (file)
@@ -64,23 +64,23 @@ static inline void epit_irq_disable(void)
 {
        u32 val;
 
-       val = __raw_readl(timer_base + EPITCR);
+       val = imx_readl(timer_base + EPITCR);
        val &= ~EPITCR_OCIEN;
-       __raw_writel(val, timer_base + EPITCR);
+       imx_writel(val, timer_base + EPITCR);
 }
 
 static inline void epit_irq_enable(void)
 {
        u32 val;
 
-       val = __raw_readl(timer_base + EPITCR);
+       val = imx_readl(timer_base + EPITCR);
        val |= EPITCR_OCIEN;
-       __raw_writel(val, timer_base + EPITCR);
+       imx_writel(val, timer_base + EPITCR);
 }
 
 static void epit_irq_acknowledge(void)
 {
-       __raw_writel(EPITSR_OCIF, timer_base + EPITSR);
+       imx_writel(EPITSR_OCIF, timer_base + EPITSR);
 }
 
 static int __init epit_clocksource_init(struct clk *timer_clk)
@@ -98,9 +98,9 @@ static int epit_set_next_event(unsigned long evt,
 {
        unsigned long tcmp;
 
-       tcmp = __raw_readl(timer_base + EPITCNR);
+       tcmp = imx_readl(timer_base + EPITCNR);
 
-       __raw_writel(tcmp - evt, timer_base + EPITCMPR);
+       imx_writel(tcmp - evt, timer_base + EPITCMPR);
 
        return 0;
 }
@@ -213,11 +213,11 @@ void __init epit_timer_init(void __iomem *base, int irq)
        /*
         * Initialise to a known state (all timers off, and timing reset)
         */
-       __raw_writel(0x0, timer_base + EPITCR);
+       imx_writel(0x0, timer_base + EPITCR);
 
-       __raw_writel(0xffffffff, timer_base + EPITLR);
-       __raw_writel(EPITCR_EN | EPITCR_CLKSRC_REF_HIGH | EPITCR_WAITEN,
-                       timer_base + EPITCR);
+       imx_writel(0xffffffff, timer_base + EPITLR);
+       imx_writel(EPITCR_EN | EPITCR_CLKSRC_REF_HIGH | EPITCR_WAITEN,
+                  timer_base + EPITCR);
 
        /* init and register the timer to the framework */
        epit_clocksource_init(timer_clk);
index b5e976816b63cf3cd81926dd8378036d21b2888e..6c28d28b3c647982fb25d709f2eaef570e9085e3 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <asm/assembler.h>
 
 diag_reg_offset:
        .word   g_diag_reg - .
@@ -25,6 +26,7 @@ diag_reg_offset:
        .endm
 
 ENTRY(v7_secondary_startup)
+ARM_BE8(setend be)                     @ go BE8 if entered LE
        set_diag_reg
        b       secondary_startup
 ENDPROC(v7_secondary_startup)
index 0b5ba4bf572a252112de2d3c9d13d7cf758e3b00..3982e91b2f3ea49d993bcf2bc32472304c3dadc6 100644 (file)
@@ -57,10 +57,10 @@ void mxc_iomux_mode(unsigned int pin_mode)
 
        spin_lock(&gpio_mux_lock);
 
-       l = __raw_readl(reg);
+       l = imx_readl(reg);
        l &= ~(0xff << (field * 8));
        l |= mode << (field * 8);
-       __raw_writel(l, reg);
+       imx_writel(l, reg);
 
        spin_unlock(&gpio_mux_lock);
 }
@@ -82,10 +82,10 @@ void mxc_iomux_set_pad(enum iomux_pins pin, u32 config)
 
        spin_lock(&gpio_mux_lock);
 
-       l = __raw_readl(reg);
+       l = imx_readl(reg);
        l &= ~(0x1ff << (field * 10));
        l |= config << (field * 10);
-       __raw_writel(l, reg);
+       imx_writel(l, reg);
 
        spin_unlock(&gpio_mux_lock);
 }
@@ -163,12 +163,12 @@ void mxc_iomux_set_gpr(enum iomux_gp_func gp, bool en)
        u32 l;
 
        spin_lock(&gpio_mux_lock);
-       l = __raw_readl(IOMUXGPR);
+       l = imx_readl(IOMUXGPR);
        if (en)
                l |= gp;
        else
                l &= ~gp;
 
-       __raw_writel(l, IOMUXGPR);
+       imx_writel(l, IOMUXGPR);
        spin_unlock(&gpio_mux_lock);
 }
index ecd543664644135084e37d88002ecee88fa2d15a..7aa90c863ad99b21ede09f5438a70de62873c4b8 100644 (file)
@@ -38,12 +38,12 @@ static unsigned imx_iomuxv1_numports;
 
 static inline unsigned long imx_iomuxv1_readl(unsigned offset)
 {
-       return __raw_readl(imx_iomuxv1_baseaddr + offset);
+       return imx_readl(imx_iomuxv1_baseaddr + offset);
 }
 
 static inline void imx_iomuxv1_writel(unsigned long val, unsigned offset)
 {
-       __raw_writel(val, imx_iomuxv1_baseaddr + offset);
+       imx_writel(val, imx_iomuxv1_baseaddr + offset);
 }
 
 static inline void imx_iomuxv1_rmwl(unsigned offset,
index a53b2e64f98d547594243a67bf3581ac63250ea7..ca59d5f2ec92e4337e1f418ac83442ac8844806c 100644 (file)
@@ -45,13 +45,13 @@ int mxc_iomux_v3_setup_pad(iomux_v3_cfg_t pad)
        u32 pad_ctrl = (pad & MUX_PAD_CTRL_MASK) >> MUX_PAD_CTRL_SHIFT;
 
        if (mux_ctrl_ofs)
-               __raw_writel(mux_mode, base + mux_ctrl_ofs);
+               imx_writel(mux_mode, base + mux_ctrl_ofs);
 
        if (sel_input_ofs)
-               __raw_writel(sel_input, base + sel_input_ofs);
+               imx_writel(sel_input, base + sel_input_ofs);
 
        if (!(pad_ctrl & NO_PAD_CTRL) && pad_ctrl_ofs)
-               __raw_writel(pad_ctrl, base + pad_ctrl_ofs);
+               imx_writel(pad_ctrl, base + pad_ctrl_ofs);
 
        return 0;
 }
index f2060523ba489c3feca3c2083fdd28a7594a4412..eaee47a2fcc0f6d2e196af4dea2d7c3dc74edd2b 100644 (file)
@@ -525,8 +525,8 @@ static void __init armadillo5x0_init(void)
        imx31_add_mxc_nand(&armadillo5x0_nand_board_info);
 
        /* set NAND page size to 2k if not configured via boot mode pins */
-       __raw_writel(__raw_readl(mx3_ccm_base + MXC_CCM_RCSR) |
-                                       (1 << 30), mx3_ccm_base + MXC_CCM_RCSR);
+       imx_writel(imx_readl(mx3_ccm_base + MXC_CCM_RCSR) | (1 << 30),
+                  mx3_ccm_base + MXC_CCM_RCSR);
 
        /* RTC */
        /* Get RTC IRQ and register the chip */
index b015129e4045847a3e62b72fb001a71e8b80c55c..6883fbaf9484b2da00324d42ce2ada3e70cefc66 100644 (file)
@@ -40,11 +40,10 @@ static void __init imx51_ipu_mipi_setup(void)
        WARN_ON(!hsc_addr);
 
        /* setup MIPI module to legacy mode */
-       __raw_writel(0xf00, hsc_addr);
+       imx_writel(0xf00, hsc_addr);
 
        /* CSI mode: reserved; DI control mode: legacy (from Freescale BSP) */
-       __raw_writel(__raw_readl(hsc_addr + 0x800) | 0x30ff,
-               hsc_addr + 0x800);
+       imx_writel(imx_readl(hsc_addr + 0x800) | 0x30ff, hsc_addr + 0x800);
 
        iounmap(hsc_addr);
 }
index eb1c3477c48ae1cedbbbc74bf2164110e831beae..267fad23b2257dc80d24cea3eca455f30a60d2ed 100644 (file)
@@ -202,9 +202,9 @@ static struct i2c_board_info mx27ads_i2c_devices[] = {
 static void vgpio_set(struct gpio_chip *chip, unsigned offset, int value)
 {
        if (value)
-               __raw_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_SET_REG);
+               imx_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_SET_REG);
        else
-               __raw_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_CLEAR_REG);
+               imx_writew(PBC_BCTRL1_LCDON, PBC_BCTRL1_CLEAR_REG);
 }
 
 static int vgpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
@@ -364,7 +364,7 @@ static void __init mx27ads_timer_init(void)
 {
        unsigned long fref = 26000000;
 
-       if ((__raw_readw(PBC_VERSION_REG) & CKIH_27MHZ_BIT_SET) == 0)
+       if ((imx_readw(PBC_VERSION_REG) & CKIH_27MHZ_BIT_SET) == 0)
                fref = 27000000;
 
        mx27_clocks_init(fref);
index 2b147e4bf9c91297deb0522bcfb14150b61f5b11..4f2c56d44ba14de19f10ccc2dc16927a4b03a545 100644 (file)
@@ -160,8 +160,8 @@ static void mx31ads_expio_irq_handler(struct irq_desc *desc)
        u32 int_valid;
        u32 expio_irq;
 
-       imr_val = __raw_readw(PBC_INTMASK_SET_REG);
-       int_valid = __raw_readw(PBC_INTSTATUS_REG) & imr_val;
+       imr_val = imx_readw(PBC_INTMASK_SET_REG);
+       int_valid = imx_readw(PBC_INTSTATUS_REG) & imr_val;
 
        expio_irq = 0;
        for (; int_valid != 0; int_valid >>= 1, expio_irq++) {
@@ -180,8 +180,8 @@ static void expio_mask_irq(struct irq_data *d)
 {
        u32 expio = d->hwirq;
        /* mask the interrupt */
-       __raw_writew(1 << expio, PBC_INTMASK_CLEAR_REG);
-       __raw_readw(PBC_INTMASK_CLEAR_REG);
+       imx_writew(1 << expio, PBC_INTMASK_CLEAR_REG);
+       imx_readw(PBC_INTMASK_CLEAR_REG);
 }
 
 /*
@@ -192,7 +192,7 @@ static void expio_ack_irq(struct irq_data *d)
 {
        u32 expio = d->hwirq;
        /* clear the interrupt status */
-       __raw_writew(1 << expio, PBC_INTSTATUS_REG);
+       imx_writew(1 << expio, PBC_INTSTATUS_REG);
 }
 
 /*
@@ -203,7 +203,7 @@ static void expio_unmask_irq(struct irq_data *d)
 {
        u32 expio = d->hwirq;
        /* unmask the interrupt */
-       __raw_writew(1 << expio, PBC_INTMASK_SET_REG);
+       imx_writew(1 << expio, PBC_INTMASK_SET_REG);
 }
 
 static struct irq_chip expio_irq_chip = {
@@ -226,8 +226,8 @@ static void __init mx31ads_init_expio(void)
        mxc_iomux_alloc_pin(IOMUX_MODE(MX31_PIN_GPIO1_4, IOMUX_CONFIG_GPIO), "expio");
 
        /* disable the interrupt and clear the status */
-       __raw_writew(0xFFFF, PBC_INTMASK_CLEAR_REG);
-       __raw_writew(0xFFFF, PBC_INTSTATUS_REG);
+       imx_writew(0xFFFF, PBC_INTMASK_CLEAR_REG);
+       imx_writew(0xFFFF, PBC_INTSTATUS_REG);
 
        irq_base = irq_alloc_descs(-1, 0, MXC_MAX_EXP_IO_LINES, numa_node_id());
        WARN_ON(irq_base < 0);
index bb6f8a52a6b8b2ebaba9b8be1dc6f9478cfc3c2c..4f2d99888afd28833044022396a6ea9582a2c994 100644 (file)
@@ -509,7 +509,7 @@ static void mx31moboard_poweroff(void)
 
        mxc_iomux_mode(MX31_PIN_WATCHDOG_RST__WATCHDOG_RST);
 
-       __raw_writew(1 << 6 | 1 << 2, MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
+       imx_writew(1 << 6 | 1 << 2, MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
 }
 
 static int mx31moboard_baseboard;
index 5c27646047270fc4fbcab94e689f5e533d3d9d09..34df64f133ed2f360ee57354c75a10649815bf90 100644 (file)
@@ -190,9 +190,9 @@ static struct platform_device qong_nand_device = {
 static void __init qong_init_nand_mtd(void)
 {
        /* init CS */
-       __raw_writel(0x00004f00, MX31_IO_ADDRESS(MX31_WEIM_CSCRxU(3)));
-       __raw_writel(0x20013b31, MX31_IO_ADDRESS(MX31_WEIM_CSCRxL(3)));
-       __raw_writel(0x00020800, MX31_IO_ADDRESS(MX31_WEIM_CSCRxA(3)));
+       imx_writel(0x00004f00, MX31_IO_ADDRESS(MX31_WEIM_CSCRxU(3)));
+       imx_writel(0x20013b31, MX31_IO_ADDRESS(MX31_WEIM_CSCRxL(3)));
+       imx_writel(0x00020800, MX31_IO_ADDRESS(MX31_WEIM_CSCRxA(3)));
 
        mxc_iomux_set_gpr(MUX_SDCTL_CSD1_SEL, true);
 
index a5b1af6d7441e262a49bd19d2611adfdc3364b14..d327042567817adf27b95b9eac2a29ac4df23d98 100644 (file)
@@ -193,4 +193,9 @@ extern struct cpu_op *(*get_cpu_op)(int *op);
 #define cpu_is_mx3()   (cpu_is_mx31() || cpu_is_mx35())
 #define cpu_is_mx2()   (cpu_is_mx21() || cpu_is_mx27())
 
+#define imx_readl      readl_relaxed
+#define imx_readw      readw_relaxed
+#define imx_writel     writel_relaxed
+#define imx_writew     writew_relaxed
+
 #endif /*  __ASM_ARCH_MXC_H__ */
index 56d02d064fbf941c3b070cced2bf2f2e88a9056e..43096c8990d4cb913935b62c758107316e46a637 100644 (file)
@@ -19,9 +19,9 @@ static int mx27_suspend_enter(suspend_state_t state)
        switch (state) {
        case PM_SUSPEND_MEM:
                /* Clear MPEN and SPEN to disable MPLL/SPLL */
-               cscr = __raw_readl(MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR));
+               cscr = imx_readl(MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR));
                cscr &= 0xFFFFFFFC;
-               __raw_writel(cscr, MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR));
+               imx_writel(cscr, MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR));
                /* Executes WFI */
                cpu_do_idle();
                break;
index 6a07006ff0f48136591ff0909636cd73b774bf08..94c0898751d8538290a258fa1cb3d4d50ba8b9f0 100644 (file)
  */
 void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode)
 {
-       int reg = __raw_readl(mx3_ccm_base + MXC_CCM_CCMR);
+       int reg = imx_readl(mx3_ccm_base + MXC_CCM_CCMR);
        reg &= ~MXC_CCM_CCMR_LPM_MASK;
 
        switch (mode) {
        case MX3_WAIT:
                if (cpu_is_mx35())
                        reg |= MXC_CCM_CCMR_LPM_WAIT_MX35;
-               __raw_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
+               imx_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
                break;
        default:
                pr_err("Unknown cpu power mode: %d\n", mode);
index 532d4b08276dc84c149b525e6fcc0a85d30a543b..868781fd460c788950ac59e6ef197bede4ab6660 100644 (file)
@@ -153,15 +153,15 @@ static void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
        int stop_mode = 0;
 
        /* always allow platform to issue a deep sleep mode request */
-       plat_lpc = __raw_readl(cortex_base + MXC_CORTEXA8_PLAT_LPC) &
+       plat_lpc = imx_readl(cortex_base + MXC_CORTEXA8_PLAT_LPC) &
            ~(MXC_CORTEXA8_PLAT_LPC_DSM);
-       ccm_clpcr = __raw_readl(ccm_base + MXC_CCM_CLPCR) &
+       ccm_clpcr = imx_readl(ccm_base + MXC_CCM_CLPCR) &
                    ~(MXC_CCM_CLPCR_LPM_MASK);
-       arm_srpgcr = __raw_readl(gpc_base + MXC_SRPG_ARM_SRPGCR) &
+       arm_srpgcr = imx_readl(gpc_base + MXC_SRPG_ARM_SRPGCR) &
                     ~(MXC_SRPGCR_PCR);
-       empgc0 = __raw_readl(gpc_base + MXC_SRPG_EMPGC0_SRPGCR) &
+       empgc0 = imx_readl(gpc_base + MXC_SRPG_EMPGC0_SRPGCR) &
                 ~(MXC_SRPGCR_PCR);
-       empgc1 = __raw_readl(gpc_base + MXC_SRPG_EMPGC1_SRPGCR) &
+       empgc1 = imx_readl(gpc_base + MXC_SRPG_EMPGC1_SRPGCR) &
                 ~(MXC_SRPGCR_PCR);
 
        switch (mode) {
@@ -196,17 +196,17 @@ static void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
                return;
        }
 
-       __raw_writel(plat_lpc, cortex_base + MXC_CORTEXA8_PLAT_LPC);
-       __raw_writel(ccm_clpcr, ccm_base + MXC_CCM_CLPCR);
-       __raw_writel(arm_srpgcr, gpc_base + MXC_SRPG_ARM_SRPGCR);
-       __raw_writel(arm_srpgcr, gpc_base + MXC_SRPG_NEON_SRPGCR);
+       imx_writel(plat_lpc, cortex_base + MXC_CORTEXA8_PLAT_LPC);
+       imx_writel(ccm_clpcr, ccm_base + MXC_CCM_CLPCR);
+       imx_writel(arm_srpgcr, gpc_base + MXC_SRPG_ARM_SRPGCR);
+       imx_writel(arm_srpgcr, gpc_base + MXC_SRPG_NEON_SRPGCR);
 
        if (stop_mode) {
                empgc0 |= MXC_SRPGCR_PCR;
                empgc1 |= MXC_SRPGCR_PCR;
 
-               __raw_writel(empgc0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR);
-               __raw_writel(empgc1, gpc_base + MXC_SRPG_EMPGC1_SRPGCR);
+               imx_writel(empgc0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR);
+               imx_writel(empgc1, gpc_base + MXC_SRPG_EMPGC1_SRPGCR);
        }
 }
 
@@ -228,8 +228,8 @@ static int mx5_suspend_enter(suspend_state_t state)
                flush_cache_all();
 
                /*clear the EMPGC0/1 bits */
-               __raw_writel(0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR);
-               __raw_writel(0, gpc_base + MXC_SRPG_EMPGC1_SRPGCR);
+               imx_writel(0, gpc_base + MXC_SRPG_EMPGC0_SRPGCR);
+               imx_writel(0, gpc_base + MXC_SRPG_EMPGC1_SRPGCR);
 
                if (imx5_suspend_in_ocram_fn)
                        imx5_suspend_in_ocram_fn(suspend_ocram_base);
index 4470376af5f815fd5f8f2d12c5e7d18306675bb4..58924b3844df551a270aac552b0803fb5eedf8dc 100644 (file)
@@ -561,13 +561,13 @@ static int __init imx6q_suspend_init(const struct imx6_pm_socdata *socdata)
        goto put_node;
 
 pl310_cache_map_failed:
-       iounmap(&pm_info->gpc_base.vbase);
+       iounmap(pm_info->gpc_base.vbase);
 gpc_map_failed:
-       iounmap(&pm_info->iomuxc_base.vbase);
+       iounmap(pm_info->iomuxc_base.vbase);
 iomuxc_map_failed:
-       iounmap(&pm_info->src_base.vbase);
+       iounmap(pm_info->src_base.vbase);
 src_map_failed:
-       iounmap(&pm_info->mmdc_base.vbase);
+       iounmap(pm_info->mmdc_base.vbase);
 put_node:
        of_node_put(node);
 
index 51c35013b673b0d356c12ae8d0853557243c1f34..93d4a9a393531677dc0d2e5fbb833b69abe8e068 100644 (file)
@@ -54,7 +54,7 @@ void mxc_restart(enum reboot_mode mode, const char *cmd)
                wcr_enable = (1 << 2);
 
        /* Assert SRS signal */
-       __raw_writew(wcr_enable, wdog_base);
+       imx_writew(wcr_enable, wdog_base);
        /*
         * Due to imx6q errata ERR004346 (WDOG: WDOG SRS bit requires to be
         * written twice), we add another two writes to ensure there must be at
@@ -62,8 +62,8 @@ void mxc_restart(enum reboot_mode mode, const char *cmd)
         * the target check here, since the writes shouldn't be a huge burden
         * for other platforms.
         */
-       __raw_writew(wcr_enable, wdog_base);
-       __raw_writew(wcr_enable, wdog_base);
+       imx_writew(wcr_enable, wdog_base);
+       imx_writew(wcr_enable, wdog_base);
 
        /* wait for reset to assert... */
        mdelay(500);
index 4de65eeda1eb15666e9c82557f50477e37628ec8..ae23d50f7861b4d804e38025927922f0eb5f6e1f 100644 (file)
@@ -65,10 +65,10 @@ static int tzic_set_irq_fiq(unsigned int irq, unsigned int type)
                return -EINVAL;
        mask = 1U << (irq & 0x1F);
 
-       value = __raw_readl(tzic_base + TZIC_INTSEC0(index)) | mask;
+       value = imx_readl(tzic_base + TZIC_INTSEC0(index)) | mask;
        if (type)
                value &= ~mask;
-       __raw_writel(value, tzic_base + TZIC_INTSEC0(index));
+       imx_writel(value, tzic_base + TZIC_INTSEC0(index));
 
        return 0;
 }
@@ -82,15 +82,15 @@ static void tzic_irq_suspend(struct irq_data *d)
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        int idx = d->hwirq >> 5;
 
-       __raw_writel(gc->wake_active, tzic_base + TZIC_WAKEUP0(idx));
+       imx_writel(gc->wake_active, tzic_base + TZIC_WAKEUP0(idx));
 }
 
 static void tzic_irq_resume(struct irq_data *d)
 {
        int idx = d->hwirq >> 5;
 
-       __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(idx)),
-                    tzic_base + TZIC_WAKEUP0(idx));
+       imx_writel(imx_readl(tzic_base + TZIC_ENSET0(idx)),
+                  tzic_base + TZIC_WAKEUP0(idx));
 }
 
 #else
@@ -135,8 +135,8 @@ static void __exception_irq_entry tzic_handle_irq(struct pt_regs *regs)
                handled = 0;
 
                for (i = 0; i < 4; i++) {
-                       stat = __raw_readl(tzic_base + TZIC_HIPND(i)) &
-                               __raw_readl(tzic_base + TZIC_INTSEC0(i));
+                       stat = imx_readl(tzic_base + TZIC_HIPND(i)) &
+                               imx_readl(tzic_base + TZIC_INTSEC0(i));
 
                        while (stat) {
                                handled = 1;
@@ -166,18 +166,18 @@ void __init tzic_init_irq(void)
        /* put the TZIC into the reset value with
         * all interrupts disabled
         */
-       i = __raw_readl(tzic_base + TZIC_INTCNTL);
+       i = imx_readl(tzic_base + TZIC_INTCNTL);
 
-       __raw_writel(0x80010001, tzic_base + TZIC_INTCNTL);
-       __raw_writel(0x1f, tzic_base + TZIC_PRIOMASK);
-       __raw_writel(0x02, tzic_base + TZIC_SYNCCTRL);
+       imx_writel(0x80010001, tzic_base + TZIC_INTCNTL);
+       imx_writel(0x1f, tzic_base + TZIC_PRIOMASK);
+       imx_writel(0x02, tzic_base + TZIC_SYNCCTRL);
 
        for (i = 0; i < 4; i++)
-               __raw_writel(0xFFFFFFFF, tzic_base + TZIC_INTSEC0(i));
+               imx_writel(0xFFFFFFFF, tzic_base + TZIC_INTSEC0(i));
 
        /* disable all interrupts */
        for (i = 0; i < 4; i++)
-               __raw_writel(0xFFFFFFFF, tzic_base + TZIC_ENCLEAR0(i));
+               imx_writel(0xFFFFFFFF, tzic_base + TZIC_ENCLEAR0(i));
 
        /* all IRQ no FIQ Warning :: No selection */
 
@@ -214,13 +214,13 @@ int tzic_enable_wake(void)
 {
        unsigned int i;
 
-       __raw_writel(1, tzic_base + TZIC_DSMINT);
-       if (unlikely(__raw_readl(tzic_base + TZIC_DSMINT) == 0))
+       imx_writel(1, tzic_base + TZIC_DSMINT);
+       if (unlikely(imx_readl(tzic_base + TZIC_DSMINT) == 0))
                return -EAGAIN;
 
        for (i = 0; i < 4; i++)
-               __raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(i)),
-                            tzic_base + TZIC_WAKEUP0(i));
+               imx_writel(imx_readl(tzic_base + TZIC_ENSET0(i)),
+                          tzic_base + TZIC_WAKEUP0(i));
 
        return 0;
 }
index b01bdc9baf89520096ff348ba5d20e14c4aa0b87..b2a85ba13f088fb451377fa09d800df34e926846 100644 (file)
@@ -2,22 +2,16 @@ menuconfig ARCH_INTEGRATOR
        bool "ARM Ltd. Integrator family"
        depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V6
        select ARM_AMBA
-       select ARM_PATCH_PHYS_VIRT if MMU
-       select AUTO_ZRELADDR
-       select COMMON_CLK
        select COMMON_CLK_VERSATILE
-       select GENERIC_CLOCKEVENTS
        select HAVE_TCM
        select ICST
        select MFD_SYSCON
-       select MULTI_IRQ_HANDLER
        select PLAT_VERSATILE
        select POWER_RESET
        select POWER_RESET_VERSATILE
        select POWER_SUPPLY
        select SOC_INTEGRATOR_CM
        select SPARSE_IRQ
-       select USE_OF
        select VERSATILE_FPGA_IRQ
        help
          Support for ARM's Integrator platform.
diff --git a/arch/arm/mach-integrator/Makefile.boot b/arch/arm/mach-integrator/Makefile.boot
deleted file mode 100644 (file)
index ff0a4b5..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
-
diff --git a/arch/arm/mach-keystone/Makefile.boot b/arch/arm/mach-keystone/Makefile.boot
deleted file mode 100644 (file)
index f3835c4..0000000
+++ /dev/null
@@ -1 +0,0 @@
-zreladdr-y     := 0x80008000
index c279293f084cb87c2e0cc4cbf4ada1e2984e83f2..e6b9cb1e6709753b6e25166d8b9cac395d9a2c42 100644 (file)
@@ -63,7 +63,7 @@ static void __init keystone_init(void)
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static phys_addr_t keystone_virt_to_idmap(unsigned long x)
+static unsigned long keystone_virt_to_idmap(unsigned long x)
 {
        return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START;
 }
@@ -100,6 +100,7 @@ static const char *const keystone_match[] __initconst = {
        "ti,k2hk",
        "ti,k2e",
        "ti,k2l",
+       "ti,k2g",
        "ti,keystone",
        NULL,
 };
diff --git a/arch/arm/mach-mmp/Makefile.boot b/arch/arm/mach-mmp/Makefile.boot
deleted file mode 100644 (file)
index 5edf03e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-   zreladdr-y  += 0x00008000
index a32575fa3fba4d727e4584e7c169a5de09f46c48..c32f85559c6509e7f64116c351730141db16938a 100644 (file)
@@ -1,5 +1,6 @@
 menuconfig ARCH_MV78XX0
-       bool "Marvell MV78xx0" if ARCH_MULTI_V5
+       bool "Marvell MV78xx0"
+       depends on ARCH_MULTI_V5
        select ARCH_REQUIRE_GPIOLIB
        select CPU_FEROCEON
        select MVEBU_MBUS
diff --git a/arch/arm/mach-mv78xx0/Makefile.boot b/arch/arm/mach-mv78xx0/Makefile.boot
deleted file mode 100644 (file)
index 760a0ef..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
index f9597b701028a107d6acc11ce481fcaddc5e0411..46c742d3bd41e65c0f0fbaa2fc5cb8d3561b0c0a 100644 (file)
@@ -140,6 +140,7 @@ static void __init armada_xp_smp_prepare_cpus(unsigned int max_cpus)
                panic("Cannot find 'marvell,bootrom' compatible node");
 
        err = of_address_to_resource(node, 0, &res);
+       of_node_put(node);
        if (err < 0)
                panic("Cannot get 'bootrom' node address");
 
index 3d90ef19be2b4cae09a187e5e9541154f714ed03..2da8e5dfcf24df4fca3ba9384b84726654dea4d8 100644 (file)
@@ -3,20 +3,17 @@ menu "NetX Implementations"
 
 config MACH_NXDKN
        bool "Enable Hilscher nxdkn Eval Board support"
-       depends on ARCH_NETX
        help
          Board support for the Hilscher NetX Eval Board
 
 config MACH_NXDB500
        bool "Enable Hilscher nxdb500 Eval Board support"
-       depends on ARCH_NETX
        select ARM_AMBA
        help
          Board support for the Hilscher nxdb500 Eval Board
 
 config MACH_NXEB500HMI
        bool "Enable Hilscher nxeb500hmi Eval Board support"
-       depends on ARCH_NETX
        select ARM_AMBA
        help
          Board support for the Hilscher nxeb500hmi Eval Board
diff --git a/arch/arm/mach-nspire/Makefile.boot b/arch/arm/mach-nspire/Makefile.boot
deleted file mode 100644 (file)
index e69de29..0000000
diff --git a/arch/arm/mach-omap2/Makefile.boot b/arch/arm/mach-omap2/Makefile.boot
deleted file mode 100644 (file)
index b03e562..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-  zreladdr-y           += 0x80008000
-params_phys-y          := 0x80000100
-initrd_phys-y          := 0x80800000
index 9cda974a3009f3295e7bd013709f1bf586752282..d7f1d69daf6d4d083005e15d11d556c412aacfbc 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/slab.h>
 #include <linux/of.h>
 #include <linux/pinctrl/machine.h>
-#include <linux/platform_data/mailbox-omap.h>
 
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
@@ -66,32 +65,6 @@ static int __init omap3_l3_init(void)
 }
 omap_postcore_initcall(omap3_l3_init);
 
-#if defined(CONFIG_OMAP2PLUS_MBOX) || defined(CONFIG_OMAP2PLUS_MBOX_MODULE)
-static inline void __init omap_init_mbox(void)
-{
-       struct omap_hwmod *oh;
-       struct platform_device *pdev;
-       struct omap_mbox_pdata *pdata;
-
-       oh = omap_hwmod_lookup("mailbox");
-       if (!oh) {
-               pr_err("%s: unable to find hwmod\n", __func__);
-               return;
-       }
-       if (!oh->dev_attr) {
-               pr_err("%s: hwmod doesn't have valid attrs\n", __func__);
-               return;
-       }
-
-       pdata = (struct omap_mbox_pdata *)oh->dev_attr;
-       pdev = omap_device_build("omap-mailbox", -1, oh, pdata, sizeof(*pdata));
-       WARN(IS_ERR(pdev), "%s: could not build device, err %ld\n",
-                                               __func__, PTR_ERR(pdev));
-}
-#else
-static inline void omap_init_mbox(void) { }
-#endif /* CONFIG_OMAP2PLUS_MBOX */
-
 static inline void omap_init_sti(void) {}
 
 #if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
@@ -229,7 +202,6 @@ static int __init omap2_init_devices(void)
                 * please keep these calls, and their implementations above,
                 * in alphabetical order so they're easier to sort through.
                 */
-               omap_init_mbox();
                omap_init_mcspi();
                omap_init_sham();
                omap_init_aes();
index e781e4fae13a92fd68aa04ac891b9731bc8fa60e..a935d28443dab40e1a8005b7144aeb8de436c73c 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/platform_data/pinctrl-single.h>
 #include <linux/platform_data/iommu-omap.h>
 #include <linux/platform_data/wkup_m3.h>
+#include <linux/platform_data/pwm_omap_dmtimer.h>
+#include <plat/dmtimer.h>
 
 #include "common.h"
 #include "common-board-devices.h"
@@ -449,6 +451,24 @@ void omap_auxdata_legacy_init(struct device *dev)
        dev->platform_data = &twl_gpio_auxdata;
 }
 
+/* Dual mode timer PWM callbacks platdata */
+#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
+struct pwm_omap_dmtimer_pdata pwm_dmtimer_pdata = {
+       .request_by_node = omap_dm_timer_request_by_node,
+       .free = omap_dm_timer_free,
+       .enable = omap_dm_timer_enable,
+       .disable = omap_dm_timer_disable,
+       .get_fclk = omap_dm_timer_get_fclk,
+       .start = omap_dm_timer_start,
+       .stop = omap_dm_timer_stop,
+       .set_load = omap_dm_timer_set_load,
+       .set_match = omap_dm_timer_set_match,
+       .set_pwm = omap_dm_timer_set_pwm,
+       .set_prescaler = omap_dm_timer_set_prescaler,
+       .write_counter = omap_dm_timer_write_counter,
+};
+#endif
+
 /*
  * Few boards still need auxdata populated before we populate
  * the dev entries in of_platform_populate().
@@ -502,6 +522,9 @@ static struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
        OF_DEV_AUXDATA("ti,am4372-wkup-m3", 0x44d00000, "44d00000.wkup_m3",
                       &wkup_m3_data),
 #endif
+#if IS_ENABLED(CONFIG_OMAP_DM_TIMER)
+       OF_DEV_AUXDATA("ti,omap-dmtimer-pwm", 0, NULL, &pwm_dmtimer_pdata),
+#endif
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
        OF_DEV_AUXDATA("ti,omap4-iommu", 0x4a066000, "4a066000.mmu",
                       &omap4_iommu_pdata),
index f164c6b32ce2b467e5caf2acbcdd335f6f5930c8..8e072de89fed4a860d919a02190c8b359b14eaf9 100644 (file)
@@ -252,7 +252,7 @@ void __init omap_serial_init_port(struct omap_board_data *bdata,
                info = omap_serial_default_info;
 
        oh = uart->oh;
-       name = DRIVER_NAME;
+       name = OMAP_SERIAL_DRIVER_NAME;
 
        omap_up.dma_enabled = info->dma_enabled;
        omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
index eafd120b53f1bc15c82f2cc47dc8033e31ca566e..1b9f0520dea9154afa31f9668241e03f211fdc6a 100644 (file)
@@ -86,13 +86,18 @@ ENTRY(enable_omap3630_toggle_l2_on_restore)
        stmfd   sp!, {lr}       @ save registers on stack
        /* Setup so that we will disable and enable l2 */
        mov     r1, #0x1
-       adrl    r2, l2dis_3630  @ may be too distant for plain adr
-       str     r1, [r2]
+       adrl    r3, l2dis_3630_offset   @ may be too distant for plain adr
+       ldr     r2, [r3]                @ value for offset
+       str     r1, [r2, r3]            @ write to l2dis_3630
        ldmfd   sp!, {pc}       @ restore regs and return
 ENDPROC(enable_omap3630_toggle_l2_on_restore)
 
-       .text
-/* Function to call rom code to save secure ram context */
+/*
+ * Function to call rom code to save secure ram context. This gets
+ * relocated to SRAM, so it can be all in .data section. Otherwise
+ * we need to initialize api_params separately.
+ */
+       .data
        .align  3
 ENTRY(save_secure_ram_context)
        stmfd   sp!, {r4 - r11, lr}     @ save registers on stack
@@ -126,6 +131,8 @@ ENDPROC(save_secure_ram_context)
 ENTRY(save_secure_ram_context_sz)
        .word   . - save_secure_ram_context
 
+       .text
+
 /*
  * ======================
  * == Idle entry point ==
@@ -289,12 +296,6 @@ wait_sdrc_ready:
        bic     r5, r5, #0x40
        str     r5, [r4]
 
-/*
- * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
- * base instead.
- * Be careful not to clobber r7 when maintaing this code.
- */
-
 is_dll_in_lock_mode:
        /* Is dll in lock mode? */
        ldr     r4, sdrc_dlla_ctrl
@@ -302,11 +303,7 @@ is_dll_in_lock_mode:
        tst     r5, #0x4
        bne     exit_nonoff_modes       @ Return if locked
        /* wait till dll locks */
-       adr     r7, kick_counter
 wait_dll_lock_timed:
-       ldr     r4, wait_dll_lock_counter
-       add     r4, r4, #1
-       str     r4, [r7, #wait_dll_lock_counter - kick_counter]
        ldr     r4, sdrc_dlla_status
        /* Wait 20uS for lock */
        mov     r6, #8
@@ -330,9 +327,6 @@ kick_dll:
        orr     r6, r6, #(1<<3)         @ enable dll
        str     r6, [r4]
        dsb
-       ldr     r4, kick_counter
-       add     r4, r4, #1
-       str     r4, [r7]                @ kick_counter
        b       wait_dll_lock_timed
 
 exit_nonoff_modes:
@@ -360,15 +354,6 @@ sdrc_dlla_status:
        .word   SDRC_DLLA_STATUS_V
 sdrc_dlla_ctrl:
        .word   SDRC_DLLA_CTRL_V
-       /*
-        * When exporting to userspace while the counters are in SRAM,
-        * these 2 words need to be at the end to facilitate retrival!
-        */
-kick_counter:
-       .word   0
-wait_dll_lock_counter:
-       .word   0
-
 ENTRY(omap3_do_wfi_sz)
        .word   . - omap3_do_wfi
 
@@ -437,7 +422,9 @@ ENTRY(omap3_restore)
        cmp     r2, #0x0        @ Check if target power state was OFF or RET
        bne     logic_l1_restore
 
-       ldr     r0, l2dis_3630
+       adr     r1, l2dis_3630_offset   @ address for offset
+       ldr     r0, [r1]                @ value for offset
+       ldr     r0, [r1, r0]            @ value at l2dis_3630
        cmp     r0, #0x1        @ should we disable L2 on 3630?
        bne     skipl2dis
        mrc     p15, 0, r0, c1, c0, 1
@@ -449,12 +436,14 @@ skipl2dis:
        and     r1, #0x700
        cmp     r1, #0x300
        beq     l2_inv_gp
+       adr     r0, l2_inv_api_params_offset
+       ldr     r3, [r0]
+       add     r3, r3, r0              @ r3 points to dummy parameters
        mov     r0, #40                 @ set service ID for PPA
        mov     r12, r0                 @ copy secure Service ID in r12
        mov     r1, #0                  @ set task id for ROM code in r1
        mov     r2, #4                  @ set some flags in r2, r6
        mov     r6, #0xff
-       adr     r3, l2_inv_api_params   @ r3 points to dummy parameters
        dsb                             @ data write barrier
        dmb                             @ data memory barrier
        smc     #1                      @ call SMI monitor (smi #1)
@@ -488,8 +477,8 @@ skipl2dis:
        b       logic_l1_restore
 
        .align
-l2_inv_api_params:
-       .word   0x1, 0x00
+l2_inv_api_params_offset:
+       .long   l2_inv_api_params - .
 l2_inv_gp:
        /* Execute smi to invalidate L2 cache */
        mov r12, #0x1                   @ set up to invalidate L2
@@ -506,7 +495,9 @@ l2_inv_gp:
        mov     r12, #0x2
        smc     #0                      @ Call SMI monitor (smieq)
 logic_l1_restore:
-       ldr     r1, l2dis_3630
+       adr     r0, l2dis_3630_offset   @ adress for offset
+       ldr     r1, [r0]                @ value for offset
+       ldr     r1, [r0, r1]            @ value at l2dis_3630
        cmp     r1, #0x1                @ Test if L2 re-enable needed on 3630
        bne     skipl2reen
        mrc     p15, 0, r1, c1, c0, 1
@@ -535,9 +526,17 @@ control_stat:
        .word   CONTROL_STAT
 control_mem_rta:
        .word   CONTROL_MEM_RTA_CTRL
+l2dis_3630_offset:
+       .long   l2dis_3630 - .
+
+       .data
 l2dis_3630:
        .word   0
 
+       .data
+l2_inv_api_params:
+       .word   0x1, 0x00
+
 /*
  * Internal functions
  */
index 9b09d85d811a1c52b0fa735a2e2743e05346843d..c7a3b4aab4b5441249ddd9a7fdc362ef9370d737 100644 (file)
        dsb
 .endm
 
-ppa_zero_params:
-       .word           0x0
-
-ppa_por_params:
-       .word           1, 0
-
 #ifdef CONFIG_ARCH_OMAP4
 
 /*
@@ -266,7 +260,9 @@ ENTRY(omap4_cpu_resume)
        beq     skip_ns_smp_enable
 ppa_actrl_retry:
        mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
-       adr     r3, ppa_zero_params             @ Pointer to parameters
+       adr     r1, ppa_zero_params_offset
+       ldr     r3, [r1]
+       add     r3, r3, r1                      @ Pointer to ppa_zero_params
        mov     r1, #0x0                        @ Process ID
        mov     r2, #0x4                        @ Flag
        mov     r6, #0xff
@@ -303,7 +299,9 @@ skip_ns_smp_enable:
        ldr     r0, =OMAP4_PPA_L2_POR_INDEX
        ldr     r1, =OMAP44XX_SAR_RAM_BASE
        ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
-       adr     r3, ppa_por_params
+       adr     r1, ppa_por_params_offset
+       ldr     r3, [r1]
+       add     r3, r3, r1                      @ Pointer to ppa_por_params
        str     r4, [r3, #0x04]
        mov     r1, #0x0                        @ Process ID
        mov     r2, #0x4                        @ Flag
@@ -328,6 +326,8 @@ skip_l2en:
 #endif
 
        b       cpu_resume                      @ Jump to generic resume
+ppa_por_params_offset:
+       .long   ppa_por_params - .
 ENDPROC(omap4_cpu_resume)
 #endif /* CONFIG_ARCH_OMAP4 */
 
@@ -380,4 +380,13 @@ ENTRY(omap_do_wfi)
        nop
 
        ldmfd   sp!, {pc}
+ppa_zero_params_offset:
+       .long   ppa_zero_params - .
 ENDPROC(omap_do_wfi)
+
+       .data
+ppa_zero_params:
+       .word           0
+
+ppa_por_params:
+       .word           1, 0
diff --git a/arch/arm/mach-orion5x/Makefile.boot b/arch/arm/mach-orion5x/Makefile.boot
deleted file mode 100644 (file)
index 760a0ef..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
diff --git a/arch/arm/mach-prima2/Makefile.boot b/arch/arm/mach-prima2/Makefile.boot
deleted file mode 100644 (file)
index c77a488..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-zreladdr-y             += 0x00008000
-params_phys-y          := 0x00000100
-initrd_phys-y          := 0x00800000
index 73494500b51cbb79b2a1aa025f5ad54f5cf60276..e165ae4600f5d5ef27bb54dbb522d57ac917584b 100644 (file)
@@ -11,16 +11,17 @@ menuconfig ARCH_QCOM
 
 if ARCH_QCOM
 
-config ARCH_MSM8X60
-       bool "Enable support for MSM8X60"
+config ARCH_QCOM_A_FAMILY
+       bool "Support a-family chipsets (msm8660, msm8960, apq8064)"
+       default y
        select CLKSRC_QCOM
+       help
+         Select this option if you want to support a-family platforms.
 
-config ARCH_MSM8960
-       bool "Enable support for MSM8960"
-       select CLKSRC_QCOM
+         A-family includes all Snapdragon S1/S2/S3/S4 chips before 2013,
+         up to the MSM8x60 and APQ8064 SoCs.
 
-config ARCH_MSM8974
-       bool "Enable support for MSM8974"
-       select HAVE_ARM_ARCH_TIMER
+         B-family includes all Snapdragon 2xx/4xx/6xx/8xx models starting
+         in 2013 with the MSM8x74 SoC.
 
 endif
index def40a0dd60cd290d91233cc47a1a8bad4ea6aa1..70ab4a25a5f853d4d8dade14490618d2523fcd13 100644 (file)
@@ -1,5 +1,6 @@
 menuconfig ARCH_REALVIEW
-       bool "ARM Ltd. RealView family" if ARCH_MULTI_V5 || ARCH_MULTI_V6 || ARCH_MULTI_V7
+       bool "ARM Ltd. RealView family"
+       depends on ARCH_MULTI_V5 || ARCH_MULTI_V6 || ARCH_MULTI_V7
        select ARM_AMBA
        select ARM_TIMER_SP804
        select COMMON_CLK_VERSATILE
diff --git a/arch/arm/mach-realview/Makefile.boot b/arch/arm/mach-realview/Makefile.boot
deleted file mode 100644 (file)
index d2c3d78..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-ifeq ($(CONFIG_REALVIEW_HIGH_PHYS_OFFSET),y)
-   zreladdr-y  += 0x70008000
-params_phys-y  := 0x70000100
-initrd_phys-y  := 0x70800000
-else
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
-endif
index 65585392655b3ac499dff837b2f6adfe02d5a708..6964e88760614ba1f8fa60c7f6fd12f3b07dfe20 100644 (file)
@@ -80,7 +80,7 @@ static void __init realview_smp_prepare_cpus(unsigned int max_cpus)
                     virt_to_phys(versatile_secondary_startup));
 }
 
-struct smp_operations realview_dt_smp_ops __initdata = {
+static const struct smp_operations realview_dt_smp_ops __initconst = {
        .smp_prepare_cpus       = realview_smp_prepare_cpus,
        .smp_secondary_init     = versatile_secondary_init,
        .smp_boot_secondary     = versatile_boot_secondary,
index 7c0c420c3016acfb7dcaa74947da675ba3a1f0a7..e5c1888fc67bbfdc461c7c689f76ccf204043b25 100644 (file)
@@ -3,7 +3,8 @@
 #
 # Licensed under GPLv2
 menuconfig ARCH_S3C64XX
-       bool "Samsung S3C64XX" if ARCH_MULTI_V6
+       bool "Samsung S3C64XX"
+       depends on ARCH_MULTI_V6
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
        select ARM_VIC
diff --git a/arch/arm/mach-s3c64xx/Makefile.boot b/arch/arm/mach-s3c64xx/Makefile.boot
deleted file mode 100644 (file)
index c642333..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-   zreladdr-y  += 0x50008000
-params_phys-y  := 0x50000100
index 9cb11215cebaeb2e4e0a8355589ce6d7e5270b78..225c12bb3de91e7662c913ac1b8748dd56dbab8a 100644 (file)
@@ -12,7 +12,8 @@ extern void shmobile_smp_hook(unsigned int cpu, unsigned long fn,
                              unsigned long arg);
 extern bool shmobile_smp_cpu_can_disable(unsigned int cpu);
 extern void shmobile_boot_scu(void);
-extern void shmobile_smp_scu_prepare_cpus(unsigned int max_cpus);
+extern void shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
+                                         unsigned int max_cpus);
 extern void shmobile_smp_scu_cpu_die(unsigned int cpu);
 extern int shmobile_smp_scu_cpu_kill(unsigned int cpu);
 extern struct platform_suspend_ops shmobile_suspend_ops;
@@ -31,8 +32,6 @@ int shmobile_cpufreq_init(void);
 static inline int shmobile_cpufreq_init(void) { return 0; }
 #endif
 
-extern void __iomem *shmobile_scu_base;
-
 static inline void __init shmobile_init_late(void)
 {
        shmobile_suspend_init();
index 57fbff024dcd5dd6ccf23afb94e09d6b0ae47796..634d701c56a7463d9b93b005498362d29e14b777 100644 (file)
@@ -10,6 +10,8 @@
 
 #include <linux/platform_device.h>
 
+#include "common.h"
+
 int __init shmobile_cpufreq_init(void)
 {
        platform_device_register_simple("cpufreq-dt", -1, NULL, 0);
diff --git a/arch/arm/mach-shmobile/emev2.h b/arch/arm/mach-shmobile/emev2.h
new file mode 100644 (file)
index 0000000..916d25f
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __ASM_EMEV2_H__
+#define __ASM_EMEV2_H__
+
+extern const struct smp_operations emev2_smp_ops;
+
+#endif /* __ASM_EMEV2_H__ */
index fa5248c52399c9b5e78e3c1cd7c167523f306424..936d7011c3141b73228c923129f29f3635b0df13 100644 (file)
@@ -27,7 +27,7 @@
  */
 ENTRY(shmobile_boot_scu)
                                        @ r0 = SCU base address
-       mrc     p15, 0, r1, c0, c0, 5   @ read MIPDR
+       mrc     p15, 0, r1, c0, c0, 5   @ read MPIDR
        and     r1, r1, #3              @ mask out cpu ID
        lsl     r1, r1, #3              @ we will shift by cpu_id * 8 bits
        ldr     r2, [r0, #8]            @ SCU Power Status Register
@@ -38,9 +38,3 @@ ENTRY(shmobile_boot_scu)
 
        b       secondary_startup
 ENDPROC(shmobile_boot_scu)
-
-       .text
-       .align  2
-       .globl  shmobile_scu_base
-shmobile_scu_base:
-       .space  4
index 64663110ab6ca0e1d8f06cc04e69175165e2cace..4c7d2caf3730f644ca9cb39332e9295d5b10c600 100644 (file)
 #include <asm/smp_scu.h>
 #include "common.h"
 
+
+static phys_addr_t shmobile_scu_base_phys;
+static void __iomem *shmobile_scu_base;
+
 static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
                                          unsigned long action, void *hcpu)
 {
@@ -26,7 +30,7 @@ static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
                /* For this particular CPU register SCU SMP boot vector */
                shmobile_smp_hook(cpu, virt_to_phys(shmobile_boot_scu),
-                                 (unsigned long)shmobile_scu_base);
+                                 shmobile_scu_base_phys);
                break;
        };
 
@@ -37,13 +41,16 @@ static struct notifier_block shmobile_smp_scu_notifier = {
        .notifier_call = shmobile_smp_scu_notifier_call,
 };
 
-void __init shmobile_smp_scu_prepare_cpus(unsigned int max_cpus)
+void __init shmobile_smp_scu_prepare_cpus(phys_addr_t scu_base_phys,
+                                         unsigned int max_cpus)
 {
        /* install boot code shared by all CPUs */
        shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
        shmobile_boot_arg = MPIDR_HWID_BITMASK;
 
        /* enable SCU and cache coherency on booting CPU */
+       shmobile_scu_base_phys = scu_base_phys;
+       shmobile_scu_base = ioremap(scu_base_phys, PAGE_SIZE);
        scu_enable(shmobile_scu_base);
        scu_power_mode(shmobile_scu_base, SCU_PM_NORMAL);
 
index 10b7cb5dcb3af1e3553761eb5d2f0dbbc33d8bf4..3c99aaf65325cd19860968c24e4cf83703e84970 100644 (file)
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
-#include "common.h"
-
-static struct map_desc emev2_io_desc[] __initdata = {
-#ifdef CONFIG_SMP
-       /* 2M mapping for SCU + L2 controller */
-       {
-               .virtual        = 0xf0000000,
-               .pfn            = __phys_to_pfn(0x1e000000),
-               .length         = SZ_2M,
-               .type           = MT_DEVICE
-       },
-#endif
-};
 
-static void __init emev2_map_io(void)
-{
-       iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc));
-}
+#include "common.h"
+#include "emev2.h"
 
 static const char *const emev2_boards_compat_dt[] __initconst = {
        "renesas,emev2",
        NULL,
 };
 
-extern const struct smp_operations emev2_smp_ops;
-
 DT_MACHINE_START(EMEV2_DT, "Generic Emma Mobile EV2 (Flattened Device Tree)")
        .smp            = smp_ops(emev2_smp_ops),
-       .map_io         = emev2_map_io,
        .init_early     = shmobile_init_delay,
        .init_late      = shmobile_init_late,
        .dt_compat      = emev2_boards_compat_dt,
index 0c8f80c5b04df34d61ed6a195769f3075fef3a56..db6dbfbaf9f10e804618556144034fa34083c7be 100644 (file)
 #include <asm/mach/map.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
-#include <asm/hardware/cache-l2x0.h>
 
 #include "common.h"
 
-static struct map_desc r8a7740_io_desc[] __initdata = {
-        /*
-         * for CPGA/INTC/PFC
-         * 0xe6000000-0xefffffff -> 0xe6000000-0xefffffff
-         */
-       {
-               .virtual        = 0xe6000000,
-               .pfn            = __phys_to_pfn(0xe6000000),
-               .length         = 160 << 20,
-               .type           = MT_DEVICE_NONSHARED
-       },
-#ifdef CONFIG_CACHE_L2X0
-       /*
-        * for l2x0_init()
-        * 0xf0100000-0xf0101000 -> 0xf0002000-0xf0003000
-        */
-       {
-               .virtual        = 0xf0002000,
-               .pfn            = __phys_to_pfn(0xf0100000),
-               .length         = PAGE_SIZE,
-               .type           = MT_DEVICE_NONSHARED
-       },
-#endif
-};
-
-static void __init r8a7740_map_io(void)
-{
-       debug_ll_io_init();
-       iotable_init(r8a7740_io_desc, ARRAY_SIZE(r8a7740_io_desc));
-}
-
 /*
  * r8a7740 chip has lasting errata on MERAM buffer.
  * this is work-around for it.
@@ -110,10 +78,6 @@ static void __init r8a7740_generic_init(void)
 {
        r8a7740_meram_workaround();
 
-#ifdef CONFIG_CACHE_L2X0
-       /* Shared attribute override enable, 32K*8way */
-       l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
-#endif
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
@@ -123,7 +87,8 @@ static const char *const r8a7740_boards_compat_dt[] __initconst = {
 };
 
 DT_MACHINE_START(R8A7740_DT, "Generic R8A7740 (Flattened Device Tree)")
-       .map_io         = r8a7740_map_io,
+       .l2c_aux_val    = 0,
+       .l2c_aux_mask   = ~0,
        .init_early     = shmobile_init_delay,
        .init_irq       = r8a7740_init_irq_of,
        .init_machine   = r8a7740_generic_init,
index 9eccde3c7b137151e3fcff5c882cb6bd62870195..6d0ebdfb03a292166dfa3b0979ce810993ecf658 100644 (file)
@@ -182,8 +182,6 @@ static int __init rcar_gen2_scan_mem(unsigned long node, const char *uname,
        return 0;
 }
 
-struct cma *rcar_gen2_dma_contiguous;
-
 void __init rcar_gen2_reserve(void)
 {
        struct memory_reserve_config mrc;
@@ -194,8 +192,11 @@ void __init rcar_gen2_reserve(void)
 
        of_scan_flat_dt(rcar_gen2_scan_mem, &mrc);
 #ifdef CONFIG_DMA_CMA
-       if (mrc.size && memblock_is_region_memory(mrc.base, mrc.size))
+       if (mrc.size && memblock_is_region_memory(mrc.base, mrc.size)) {
+               static struct cma *rcar_gen2_dma_contiguous;
+
                dma_contiguous_reserve_area(mrc.size, mrc.base, 0,
                                            &rcar_gen2_dma_contiguous, true);
+       }
 #endif
 }
index adbac6963f2b962e2350ecff5638271d55cf5046..3a732199cf5e98c0f5cc625c86c75fb182631513 100644 (file)
@@ -21,7 +21,9 @@
 #include <linux/delay.h>
 #include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
+
 #include "common.h"
+#include "emev2.h"
 
 #define EMEV2_SCU_BASE 0x1e000000
 #define EMEV2_SMU_BASE 0xe0110000
@@ -45,8 +47,7 @@ static void __init emev2_smp_prepare_cpus(unsigned int max_cpus)
        }
 
        /* setup EMEV2 specific SCU bits */
-       shmobile_scu_base = ioremap(EMEV2_SCU_BASE, PAGE_SIZE);
-       shmobile_smp_scu_prepare_cpus(max_cpus);
+       shmobile_smp_scu_prepare_cpus(EMEV2_SCU_BASE, max_cpus);
 }
 
 const struct smp_operations emev2_smp_ops __initconst = {
index b854fe2095ad14616b7c4aae209b47f7e4f7ded3..f5c31fbc10b2efbf3c341f9efbaa66f261c0f4d4 100644 (file)
@@ -92,12 +92,9 @@ static void __init r8a7779_smp_prepare_cpus(unsigned int max_cpus)
 {
        /* Map the reset vector (in headsmp-scu.S, headsmp.S) */
        __raw_writel(__pa(shmobile_boot_vector), AVECR);
-       shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
-       shmobile_boot_arg = (unsigned long)shmobile_scu_base;
 
        /* setup r8a7779 specific SCU bits */
-       shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
-       shmobile_smp_scu_prepare_cpus(max_cpus);
+       shmobile_smp_scu_prepare_cpus(R8A7779_SCU_BASE, max_cpus);
 
        r8a7779_pm_init();
 
index ee1a4b70604bd6eaca0245c10bb6f8e6ec6391d5..41137404382e2f94c61d610988c4e442dc094800 100644 (file)
@@ -52,8 +52,7 @@ static void __init sh73a0_smp_prepare_cpus(unsigned int max_cpus)
        __raw_writel(__pa(shmobile_boot_vector), SBAR);
 
        /* setup sh73a0 specific SCU bits */
-       shmobile_scu_base = IOMEM(SH73A0_SCU_BASE);
-       shmobile_smp_scu_prepare_cpus(max_cpus);
+       shmobile_smp_scu_prepare_cpus(SH73A0_SCU_BASE, max_cpus);
 }
 
 const struct smp_operations sh73a0_smp_ops __initconst = {
index 5d92b5dd486b0b9fd8d501aa3b69f56f96ae1ee2..74b30bade2c15c380c0d3fd721d5aa80a957ef79 100644 (file)
@@ -17,6 +17,8 @@
 #include <asm/io.h>
 #include <asm/system_misc.h>
 
+#include "common.h"
+
 static int shmobile_suspend_default_enter(suspend_state_t suspend_state)
 {
        cpu_do_idle();
index c17d4d3881ffc45e5e169af4e91cf46744ac006d..ad008e4b0c49a4aa1be23992be444198d67e41d5 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/delay.h>
 #include <linux/of_address.h>
 
+#include "common.h"
+
 static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz,
                                           unsigned int mult, unsigned int div)
 {
diff --git a/arch/arm/mach-spear/Makefile.boot b/arch/arm/mach-spear/Makefile.boot
deleted file mode 100644 (file)
index 4674a4c..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-zreladdr-y     += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
index c2be98f38e73b2006ea664b7089dc427a2575d8e..3c156190a1d44223027b4c9a37ab67550219854b 100644 (file)
@@ -69,6 +69,7 @@ MACHINE_END
 static const char * const sun8i_board_dt_compat[] = {
        "allwinner,sun8i-a23",
        "allwinner,sun8i-a33",
+       "allwinner,sun8i-a83t",
        "allwinner,sun8i-h3",
        NULL,
 };
index d6a3714b096e69801c381d320a0ccb1be3472585..ebe15b93bbe870804eb17759b05096a8f7298167 100644 (file)
@@ -1,5 +1,6 @@
 config ARCH_TANGO
-       bool "Sigma Designs Tango4 (SMP87xx)" if ARCH_MULTI_V7
+       bool "Sigma Designs Tango4 (SMP87xx)"
+       depends on ARCH_MULTI_V7
        # Cortex-A9 MPCore r3p0, PL310 r3p2
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARM_ERRATA_754322
index a18d5a34e2f5738e4f62bd24bf8a0b9edff7a3e8..a21f55e000d258c734535cbfc9df57744599887a 100644 (file)
@@ -9,7 +9,7 @@ static int tango_boot_secondary(unsigned int cpu, struct task_struct *idle)
        return 0;
 }
 
-static struct smp_operations tango_smp_ops __initdata = {
+static const struct smp_operations tango_smp_ops __initconst = {
        .smp_boot_secondary     = tango_boot_secondary,
 };
 
diff --git a/arch/arm/mach-u300/Makefile.boot b/arch/arm/mach-u300/Makefile.boot
deleted file mode 100644 (file)
index 87811de..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-   zreladdr-y  += 0x48008000
-params_phys-y  := 0x48000100
-# This isn't used.
-#initrd_phys-y := 0x48800000
diff --git a/arch/arm/mach-ux500/Makefile.boot b/arch/arm/mach-ux500/Makefile.boot
deleted file mode 100644 (file)
index 760a0ef..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
diff --git a/arch/arm/mach-zynq/Makefile.boot b/arch/arm/mach-zynq/Makefile.boot
deleted file mode 100644 (file)
index 760a0ef..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-   zreladdr-y  += 0x00008000
-params_phys-y  := 0x00000100
-initrd_phys-y  := 0x00800000
index 549f6d3aec5b66457a948f579f04b8b5dd1021cc..4daeda0a5b7faa837093e829cdd6aa63e29603ec 100644 (file)
@@ -1037,24 +1037,26 @@ config ARCH_SUPPORTS_BIG_ENDIAN
          This option specifies the architecture can support big endian
          operation.
 
-config ARM_KERNMEM_PERMS
-       bool "Restrict kernel memory permissions"
+config DEBUG_RODATA
+       bool "Make kernel text and rodata read-only"
        depends on MMU
+       default y if CPU_V7
        help
-         If this is set, kernel memory other than kernel text (and rodata)
-         will be made non-executable. The tradeoff is that each region is
-         padded to section-size (1MiB) boundaries (because their permissions
-         are different and splitting the 1M pages into 4K ones causes TLB
-         performance problems), wasting memory.
+         If this is set, kernel text and rodata memory will be made
+         read-only, and non-text kernel memory will be made non-executable.
+         The tradeoff is that each region is padded to section-size (1MiB)
+         boundaries (because their permissions are different and splitting
+         the 1M pages into 4K ones causes TLB performance problems), which
+         can waste memory.
 
-config DEBUG_RODATA
-       bool "Make kernel text and rodata read-only"
-       depends on ARM_KERNMEM_PERMS
+config DEBUG_ALIGN_RODATA
+       bool "Make rodata strictly non-executable"
+       depends on DEBUG_RODATA
        default y
        help
-         If this is set, kernel text and rodata will be made read-only. This
-         is to help catch accidental or malicious attempts to change the
-         kernel's executable code. Additionally splits rodata from kernel
-         text so it can be made explicitly non-executable. This creates
-         another section-size padded region, so it can waste more memory
-         space while gaining the read-only protections.
+         If this is set, rodata will be made explicitly non-executable. This
+         provides protection on the rare chance that attackers might find and
+         use ROP gadgets that exist in the rodata section. This adds an
+         additional section-aligned split of rodata from kernel text so it
+         can be made explicitly non-executable. This padding may waste memory
+         space to gain the additional protection.
index 1e373d268c04c3e3697ac40aaca1915f436a147b..88255bea65e41e61bd16ed2b12513ebcde7b04bd 100644 (file)
 #include <asm/cputype.h>
 #include <asm/hardware/cache-tauros2.h>
 
+/* CP15 PJ4 Control configuration register */
+#define CCR_L2C_PREFETCH_DISABLE       BIT(24)
+#define CCR_L2C_ECC_ENABLE             BIT(23)
+#define CCR_L2C_WAY7_4_DISABLE         BIT(21)
+#define CCR_L2C_BURST8_ENABLE          BIT(20)
 
 /*
  * When Tauros2 is used on a CPU that supports the v7 hierarchical
@@ -182,18 +187,18 @@ static void enable_extra_feature(unsigned int features)
        u = read_extra_features();
 
        if (features & CACHE_TAUROS2_PREFETCH_ON)
-               u &= ~0x01000000;
+               u &= ~CCR_L2C_PREFETCH_DISABLE;
        else
-               u |= 0x01000000;
+               u |= CCR_L2C_PREFETCH_DISABLE;
        pr_info("Tauros2: %s L2 prefetch.\n",
                        (features & CACHE_TAUROS2_PREFETCH_ON)
                        ? "Enabling" : "Disabling");
 
        if (features & CACHE_TAUROS2_LINEFILL_BURST8)
-               u |= 0x00100000;
+               u |= CCR_L2C_BURST8_ENABLE;
        else
-               u &= ~0x00100000;
-       pr_info("Tauros2: %s line fill burt8.\n",
+               u &= ~CCR_L2C_BURST8_ENABLE;
+       pr_info("Tauros2: %s burst8 line fill.\n",
                        (features & CACHE_TAUROS2_LINEFILL_BURST8)
                        ? "Enabling" : "Disabling");
 
@@ -287,16 +292,15 @@ void __init tauros2_init(unsigned int features)
        node = of_find_matching_node(NULL, tauros2_ids);
        if (!node) {
                pr_info("Not found marvell,tauros2-cache, disable it\n");
-               return;
+       } else {
+               ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
+               if (ret) {
+                       pr_info("Not found marvell,tauros-cache-features property, "
+                               "disable extra features\n");
+                       features = 0;
+               } else
+                       features = f;
        }
-
-       ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
-       if (ret) {
-               pr_info("Not found marvell,tauros-cache-features property, "
-                       "disable extra features\n");
-               features = 0;
-       } else
-               features = f;
 #endif
        tauros2_internal_init(features);
 }
index d65909697165b20cc1936f50073bd3eea7521aec..bd274a05b8ffa9446160d1de69f777d9431c629b 100644 (file)
@@ -15,7 +15,7 @@
  * page tables.
  */
 pgd_t *idmap_pgd;
-phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
+unsigned long (*arch_virt_to_idmap)(unsigned long x);
 
 #ifdef CONFIG_ARM_LPAE
 static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
index 49bd08178008adc38cca29b5e50bf26aa14a07c9..53f42508025bf63495ed9ded82925f79d8925029 100644 (file)
@@ -572,8 +572,9 @@ void __init mem_init(void)
        }
 }
 
-#ifdef CONFIG_ARM_KERNMEM_PERMS
+#ifdef CONFIG_DEBUG_RODATA
 struct section_perm {
+       const char *name;
        unsigned long start;
        unsigned long end;
        pmdval_t mask;
@@ -584,6 +585,7 @@ struct section_perm {
 static struct section_perm nx_perms[] = {
        /* Make pages tables, etc before _stext RW (set NX). */
        {
+               .name   = "pre-text NX",
                .start  = PAGE_OFFSET,
                .end    = (unsigned long)_stext,
                .mask   = ~PMD_SECT_XN,
@@ -591,14 +593,16 @@ static struct section_perm nx_perms[] = {
        },
        /* Make init RW (set NX). */
        {
+               .name   = "init NX",
                .start  = (unsigned long)__init_begin,
                .end    = (unsigned long)_sdata,
                .mask   = ~PMD_SECT_XN,
                .prot   = PMD_SECT_XN,
        },
-#ifdef CONFIG_DEBUG_RODATA
+#ifdef CONFIG_DEBUG_ALIGN_RODATA
        /* Make rodata NX (set RO in ro_perms below). */
        {
+               .name   = "rodata NX",
                .start  = (unsigned long)__start_rodata,
                .end    = (unsigned long)__init_begin,
                .mask   = ~PMD_SECT_XN,
@@ -607,10 +611,10 @@ static struct section_perm nx_perms[] = {
 #endif
 };
 
-#ifdef CONFIG_DEBUG_RODATA
 static struct section_perm ro_perms[] = {
        /* Make kernel code and rodata RX (set RO). */
        {
+               .name   = "text/rodata RO",
                .start  = (unsigned long)_stext,
                .end    = (unsigned long)__init_begin,
 #ifdef CONFIG_ARM_LPAE
@@ -623,7 +627,6 @@ static struct section_perm ro_perms[] = {
 #endif
        },
 };
-#endif
 
 /*
  * Updates section permissions only for the current mm (sections are
@@ -670,8 +673,8 @@ void set_section_perms(struct section_perm *perms, int n, bool set,
        for (i = 0; i < n; i++) {
                if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
                    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
-                       pr_err("BUG: section %lx-%lx not aligned to %lx\n",
-                               perms[i].start, perms[i].end,
+                       pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
+                               perms[i].name, perms[i].start, perms[i].end,
                                SECTION_SIZE);
                        continue;
                }
@@ -712,7 +715,6 @@ void fix_kernmem_perms(void)
        stop_machine(__fix_kernmem_perms, NULL, NULL);
 }
 
-#ifdef CONFIG_DEBUG_RODATA
 int __mark_rodata_ro(void *unused)
 {
        update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
@@ -735,11 +737,10 @@ void set_kernel_text_ro(void)
        set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
                                current->active_mm);
 }
-#endif /* CONFIG_DEBUG_RODATA */
 
 #else
 static inline void fix_kernmem_perms(void) { }
-#endif /* CONFIG_ARM_KERNMEM_PERMS */
+#endif /* CONFIG_DEBUG_RODATA */
 
 void free_tcmmem(void)
 {
index 8085a8aac8124839d889849d4d5877be3fa22252..ffb93db68e9c80b3003e6d93c7e93d25eb76182e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/irq.h>
 #include <linux/sched_clock.h>
 #include <plat/time.h>
+#include <asm/delay.h>
 
 /*
  * MBus bridge block registers.
@@ -188,6 +189,15 @@ orion_time_set_base(void __iomem *_timer_base)
        timer_base = _timer_base;
 }
 
+static unsigned long orion_delay_timer_read(void)
+{
+       return ~readl(timer_base + TIMER0_VAL_OFF);
+}
+
+static struct delay_timer orion_delay_timer = {
+       .read_current_timer = orion_delay_timer_read,
+};
+
 void __init
 orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask,
                unsigned int irq, unsigned int tclk)
@@ -202,6 +212,9 @@ orion_time_init(void __iomem *_bridge_base, u32 _bridge_timer1_clr_mask,
 
        ticks_per_jiffy = (tclk + HZ/2) / HZ;
 
+       orion_delay_timer.freq = tclk;
+       register_current_timer_delay(&orion_delay_timer);
+
        /*
         * Set scale and timer for sched_clock.
         */
index efa6e85619ad824c2d9ea9a2bdb0b2c0eb9a3077..daf3db9f0058f6d1387f9bf5e3522c6bac0c62f0 100644 (file)
@@ -422,8 +422,7 @@ static int s3c_adc_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM
 static int s3c_adc_suspend(struct device *dev)
 {
-       struct platform_device *pdev = container_of(dev,
-                       struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        struct adc_device *adc = platform_get_drvdata(pdev);
        unsigned long flags;
        u32 con;
@@ -444,8 +443,7 @@ static int s3c_adc_suspend(struct device *dev)
 
 static int s3c_adc_resume(struct device *dev)
 {
-       struct platform_device *pdev = container_of(dev,
-                       struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        struct adc_device *adc = platform_get_drvdata(pdev);
        enum s3c_cpu_type cpu = platform_get_device_id(pdev)->driver_data;
        int ret;
index b53d4ff3befb69a5e692640443674bca83129f83..84baa16f4c0b674dfe404b54bb9fe09b4fd682bf 100644 (file)
@@ -727,15 +727,6 @@ static int __init s3c_nand_copy_set(struct s3c2410_nand_set *set)
                        return -ENOMEM;
        }
 
-       if (set->ecc_layout) {
-               ptr = kmemdup(set->ecc_layout,
-                             sizeof(struct nand_ecclayout), GFP_KERNEL);
-               set->ecc_layout = ptr;
-
-               if (!ptr)
-                       return -ENOMEM;
-       }
-
        return 0;
 }
 
index f5cf2bd208e0c1b21179c68d0ff2bab1b39eb531..e5557693fa923705df5cd1017fe1cf2427e6c3a8 100644 (file)
@@ -18,7 +18,6 @@
 
 #define S5P_VA_DMC0            S3C_ADDR(0x02440000)
 #define S5P_VA_DMC1            S3C_ADDR(0x02480000)
-#define S5P_VA_SROMC           S3C_ADDR(0x024C0000)
 
 #define S5P_VA_COREPERI_BASE   S3C_ADDR(0x02800000)
 #define S5P_VA_COREPERI(x)     (S5P_VA_COREPERI_BASE + (x))
index 04aff2c31b4607a99265d038d3ba0fd3b9a908b3..70f2f699bed3efe0802def3dc879cdd8e8857a05 100644 (file)
@@ -53,8 +53,8 @@ static void s3c_pm_run_res(struct resource *ptr, run_fn_t fn, u32 *arg)
                if (ptr->child != NULL)
                        s3c_pm_run_res(ptr->child, fn, arg);
 
-               if ((ptr->flags & IORESOURCE_MEM) &&
-                   strcmp(ptr->name, "System RAM") == 0) {
+               if ((ptr->flags & IORESOURCE_SYSTEM_RAM)
+                               == IORESOURCE_SYSTEM_RAM) {
                        S3C_PMDBG("Found system RAM at %08lx..%08lx\n",
                                  (unsigned long)ptr->start,
                                  (unsigned long)ptr->end);
index 8cc62289a63ed9958c9623acaa5462048cc9287b..af7c045e812b7c7112a41ab5cf2325fb8f3c3c2f 100644 (file)
@@ -14,6 +14,7 @@ config ARM64
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select ARCH_WANT_FRAME_POINTERS
+       select ARCH_HAS_UBSAN_SANITIZE_ALL
        select ARM_AMBA
        select ARM_ARCH_TIMER
        select ARM_GIC
@@ -49,6 +50,7 @@ config ARM64
        select HAVE_ALIGNED_STRUCT_PAGE if SLUB
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_BITREVERSE
+       select HAVE_ARCH_HUGE_VMAP
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KASAN if SPARSEMEM_VMEMMAP && !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
        select HAVE_ARCH_KGDB
@@ -537,6 +539,9 @@ config HOTPLUG_CPU
 source kernel/Kconfig.preempt
 source kernel/Kconfig.hz
 
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+       def_bool y
+
 config ARCH_HAS_HOLES_MEMORYMODEL
        def_bool y if SPARSEMEM
 
@@ -756,6 +761,15 @@ endmenu
 
 menu "Boot options"
 
+config ARM64_ACPI_PARKING_PROTOCOL
+       bool "Enable support for the ARM64 ACPI parking protocol"
+       depends on ACPI
+       help
+         Enable support for the ARM64 ACPI parking protocol. If disabled
+         the kernel will not allow booting through the ARM64 ACPI parking
+         protocol even if the corresponding data is present in the ACPI
+         MADT table.
+
 config CMDLINE
        string "Default kernel command string"
        default ""
index 21074f674bdeb707c137a9b87c2a65bde4bbb25a..8a09522752916c5951a41dec856a97b1744069a6 100644 (file)
@@ -60,6 +60,7 @@ config ARCH_ROCKCHIP
        select ARCH_REQUIRE_GPIOLIB
        select PINCTRL
        select PINCTRL_ROCKCHIP
+       select ROCKCHIP_TIMER
        help
          This enables support for the ARMv8 based Rockchip chipsets,
          like the RK3368.
index cd822d8454c0536165810004089c3a7ce5d0068d..307237cfe728b152e350b5fb4f1c1abc18d6fb4c 100644 (file)
@@ -27,6 +27,8 @@ $(warning LSE atomics not supported by binutils)
 endif
 
 KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr)
+KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
+KBUILD_CFLAGS  += $(call cc-option, -mpc-relative-literal-loads)
 KBUILD_AFLAGS  += $(lseinstr)
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
index cfdf701e05dfd6f4baf5395289086f2e486337ce..ba84770f789f5cff754fbdf2ac3b7df54ebbadc7 100644 (file)
@@ -1,4 +1,6 @@
-dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb
+dtb-$(CONFIG_ARCH_SEATTLE) += amd-overdrive.dtb \
+                       amd-overdrive-rev-b0.dtb amd-overdrive-rev-b1.dtb \
+                       husky.dtb
 
 always         := $(dtb-y)
 subdir-y       := $(dts-dirs)
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b0.dts
new file mode 100644 (file)
index 0000000..8e3074a
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * DTS file for AMD Seattle Overdrive Development Board
+ * Note: For Seattle Rev.B0
+ *
+ * Copyright (C) 2015 Advanced Micro Devices, Inc.
+ */
+
+/dts-v1/;
+
+/include/ "amd-seattle-soc.dtsi"
+
+/ {
+       model = "AMD Seattle (Rev.B0) Development Board (Overdrive)";
+       compatible = "amd,seattle-overdrive", "amd,seattle";
+
+       chosen {
+               stdout-path = &serial0;
+       };
+
+       psci {
+               compatible   = "arm,psci-0.2";
+               method       = "smc";
+       };
+};
+
+&ccp0 {
+       status = "ok";
+       amd,zlib-support = <1>;
+};
+
+/**
+ * NOTE: In Rev.B, gpio0 is reserved.
+ */
+&gpio1 {
+       status = "ok";
+};
+
+&gpio2 {
+       status = "ok";
+};
+
+&gpio3 {
+       status = "ok";
+};
+
+&gpio4 {
+       status = "ok";
+};
+
+&i2c0 {
+       status = "ok";
+};
+
+&i2c1 {
+       status = "ok";
+};
+
+&pcie0 {
+       status = "ok";
+};
+
+&spi0 {
+       status = "ok";
+};
+
+&spi1 {
+       status = "ok";
+       sdcard0: sdcard@0 {
+               compatible = "mmc-spi-slot";
+               reg = <0>;
+               spi-max-frequency = <20000000>;
+               voltage-ranges = <3200 3400>;
+               pl022,hierarchy = <0>;
+               pl022,interface = <0>;
+               pl022,com-mode = <0x0>;
+               pl022,rx-level-trig = <0>;
+               pl022,tx-level-trig = <0>;
+       };
+};
+
+&ipmi_kcs {
+       status = "ok";
+};
+
+&smb0 {
+       /include/ "amd-seattle-xgbe-b.dtsi"
+};
diff --git a/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts b/arch/arm64/boot/dts/amd/amd-overdrive-rev-b1.dts
new file mode 100644 (file)
index 0000000..ed5e043
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * DTS file for AMD Seattle Overdrive Development Board
+ * Note: For Seattle Rev.B1
+ *
+ * Copyright (C) 2015 Advanced Micro Devices, Inc.
+ */
+
+/dts-v1/;
+
+/include/ "amd-seattle-soc.dtsi"
+
+/ {
+       model = "AMD Seattle (Rev.B1) Development Board (Overdrive)";
+       compatible = "amd,seattle-overdrive", "amd,seattle";
+
+       chosen {
+               stdout-path = &serial0;
+       };
+
+       psci {
+               compatible   = "arm,psci-0.2";
+               method       = "smc";
+       };
+};
+
+&ccp0 {
+       status = "ok";
+       amd,zlib-support = <1>;
+};
+
+/**
+ * NOTE: In Rev.B, gpio0 is reserved.
+ */
+&gpio1 {
+       status = "ok";
+};
+
+&gpio2 {
+       status = "ok";
+};
+
+&gpio3 {
+       status = "ok";
+};
+
+&gpio4 {
+       status = "ok";
+};
+
+&i2c0 {
+       status = "ok";
+};
+
+&i2c1 {
+       status = "ok";
+};
+
+&pcie0 {
+       status = "ok";
+};
+
+&sata1 {
+       status = "ok";
+};
+
+&spi0 {
+       status = "ok";
+};
+
+&spi1 {
+       status = "ok";
+       sdcard0: sdcard@0 {
+               compatible = "mmc-spi-slot";
+               reg = <0>;
+               spi-max-frequency = <20000000>;
+               voltage-ranges = <3200 3400>;
+               pl022,hierarchy = <0>;
+               pl022,interface = <0>;
+               pl022,com-mode = <0x0>;
+               pl022,rx-level-trig = <0>;
+               pl022,tx-level-trig = <0>;
+       };
+};
+
+&ipmi_kcs {
+       status = "ok";
+};
+
+&smb0 {
+       /include/ "amd-seattle-xgbe-b.dtsi"
+};
index 2874d92881fda36c8a9fdb2a2eb2d6900225134e..a7fc059a7cd91987e84b59f43726a2e332425b7d 100644 (file)
@@ -18,8 +18,8 @@
                #size-cells = <2>;
                reg = <0x0 0xe1110000 0 0x1000>,
                      <0x0 0xe112f000 0 0x2000>,
-                     <0x0 0xe1140000 0 0x10000>,
-                     <0x0 0xe1160000 0 0x10000>;
+                     <0x0 0xe1140000 0 0x2000>,
+                     <0x0 0xe1160000 0 0x2000>;
                interrupts = <1 9 0xf04>;
                ranges = <0 0 0 0xe1100000 0 0x100000>;
                v2m0: v2m@e0080000 {
                #size-cells = <2>;
                ranges;
 
-               /* DDR range is 40-bit addressing */
-               dma-ranges = <0x80 0x0 0x80 0x0 0x7f 0xffffffff>;
+               /*
+                * dma-ranges is 40-bit address space containing:
+                * - GICv2m MSI register is at 0xe0080000
+                * - DRAM range [0x8000000000 to 0xffffffffff]
+                */
+               dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>;
 
                /include/ "amd-seattle-clks.dtsi"
 
                sata0: sata@e0300000 {
                        compatible = "snps,dwc-ahci";
-                       reg = <0 0xe0300000 0 0x800>;
+                       reg = <0 0xe0300000 0 0xf0000>;
                        interrupts = <0 355 4>;
                        clocks = <&sataclk_333mhz>;
                        dma-coherent;
                };
 
+               /* This is for Rev B only */
+               sata1: sata@e0d00000 {
+                       status = "disabled";
+                       compatible = "snps,dwc-ahci";
+                       reg = <0 0xe0d00000 0 0xf0000>;
+                       interrupts = <0 354 4>;
+                       clocks = <&sataclk_333mhz>;
+                       dma-coherent;
+               };
+
                i2c0: i2c@e1000000 {
                        status = "disabled";
                        compatible = "snps,designware-i2c";
                        reg = <0 0xe1000000 0 0x1000>;
                        interrupts = <0 357 4>;
-                       clocks = <&uartspiclk_100mhz>;
+                       clocks = <&miscclk_250mhz>;
+               };
+
+               i2c1: i2c@e0050000 {
+                       status = "disabled";
+                       compatible = "snps,designware-i2c";
+                       reg = <0 0xe0050000 0 0x1000>;
+                       interrupts = <0 340 4>;
+                       clocks = <&miscclk_250mhz>;
                };
 
                serial0: serial@e1010000 {
                spi0: ssp@e1020000 {
                        status = "disabled";
                        compatible = "arm,pl022", "arm,primecell";
-                       #gpio-cells = <2>;
                        reg = <0 0xe1020000 0 0x1000>;
                        spi-controller;
                        interrupts = <0 330 4>;
                spi1: ssp@e1030000 {
                        status = "disabled";
                        compatible = "arm,pl022", "arm,primecell";
-                       #gpio-cells = <2>;
                        reg = <0 0xe1030000 0 0x1000>;
                        spi-controller;
                        interrupts = <0 329 4>;
                        #size-cells = <0>;
                };
 
-               gpio0: gpio@e1040000 {
+               gpio0: gpio@e1040000 { /* Not available to OS for B0 */
                        status = "disabled";
                        compatible = "arm,pl061", "arm,primecell";
                        #gpio-cells = <2>;
                        interrupts = <0 359 4>;
                        interrupt-controller;
                        #interrupt-cells = <2>;
-                       clocks = <&uartspiclk_100mhz>;
+                       clocks = <&miscclk_250mhz>;
                        clock-names = "apb_pclk";
                };
 
-               gpio1: gpio@e1050000 {
+               gpio1: gpio@e1050000 { /* [0:7] */
                        status = "disabled";
                        compatible = "arm,pl061", "arm,primecell";
                        #gpio-cells = <2>;
                        reg = <0 0xe1050000 0 0x1000>;
                        gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
                        interrupts = <0 358 4>;
-                       clocks = <&uartspiclk_100mhz>;
+                       clocks = <&miscclk_250mhz>;
+                       clock-names = "apb_pclk";
+               };
+
+               gpio2: gpio@e0020000 { /* [8:15] */
+                       status = "disabled";
+                       compatible = "arm,pl061", "arm,primecell";
+                       #gpio-cells = <2>;
+                       reg = <0 0xe0020000 0 0x1000>;
+                       gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupts = <0 366 4>;
+                       clocks = <&miscclk_250mhz>;
+                       clock-names = "apb_pclk";
+               };
+
+               gpio3: gpio@e0030000 { /* [16:23] */
+                       status = "disabled";
+                       compatible = "arm,pl061", "arm,primecell";
+                       #gpio-cells = <2>;
+                       reg = <0 0xe0030000 0 0x1000>;
+                       gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupts = <0 365 4>;
+                       clocks = <&miscclk_250mhz>;
+                       clock-names = "apb_pclk";
+               };
+
+               gpio4: gpio@e0080000 { /* [24] */
+                       status = "disabled";
+                       compatible = "arm,pl061", "arm,primecell";
+                       #gpio-cells = <2>;
+                       reg = <0 0xe0080000 0 0x1000>;
+                       gpio-controller;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+                       interrupts = <0 361 4>;
+                       clocks = <&miscclk_250mhz>;
                        clock-names = "apb_pclk";
                };
 
                                <0x1000 0x0 0x0 0x4 &gic0 0x0 0x0 0x0 0x123 0x1>;
 
                        dma-coherent;
-                       dma-ranges = <0x43000000 0x80 0x0 0x80 0x0 0x7f 0xffffffff>;
+                       dma-ranges = <0x43000000 0x0 0x0 0x0 0x0 0x100 0x0>;
                        ranges =
                                /* I/O Memory (size=64K) */
                                <0x01000000 0x00 0x00000000 0x00 0xefff0000 0x00 0x00010000>,
                                /* 64-bit MMIO (size= 124G) */
                                <0x03000000 0x01 0x00000000 0x01 0x00000000 0x7f 0x00000000>;
                };
+
+               /* Perf CCN504 PMU */
+               ccn: ccn@0xe8000000 {
+                       compatible = "arm,ccn-504";
+                       reg = <0x0 0xe8000000 0 0x1000000>;
+                       interrupts = <0 380 4>;
+               };
+
+               ipmi_kcs: kcs@e0010000 {
+                       status = "disabled";
+                       compatible = "ipmi-kcs";
+                       device_type = "ipmi";
+                       reg = <0x0 0xe0010000 0 0x8>;
+                       interrupts = <0 389 4>;
+                       interrupt-names = "ipmi_kcs";
+                       reg-size = <1>;
+                       reg-spacing = <4>;
+               };
        };
 };
diff --git a/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi b/arch/arm64/boot/dts/amd/amd-seattle-xgbe-b.dtsi
new file mode 100644 (file)
index 0000000..8e86319
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * DTS file for AMD Seattle XGBE (RevB)
+ *
+ * Copyright (C) 2015 Advanced Micro Devices, Inc.
+ */
+
+       xgmacclk0_dma_250mhz: clk250mhz_0 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <250000000>;
+               clock-output-names = "xgmacclk0_dma_250mhz";
+       };
+
+       xgmacclk0_ptp_250mhz: clk250mhz_1 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <250000000>;
+               clock-output-names = "xgmacclk0_ptp_250mhz";
+       };
+
+       xgmacclk1_dma_250mhz: clk250mhz_2 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <250000000>;
+               clock-output-names = "xgmacclk1_dma_250mhz";
+       };
+
+       xgmacclk1_ptp_250mhz: clk250mhz_3 {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <250000000>;
+               clock-output-names = "xgmacclk1_ptp_250mhz";
+       };
+
+       xgmac0: xgmac@e0700000 {
+               compatible = "amd,xgbe-seattle-v1a";
+               reg = <0 0xe0700000 0 0x80000>,
+                     <0 0xe0780000 0 0x80000>,
+                     <0 0xe1240800 0 0x00400>, /* SERDES RX/TX0 */
+                     <0 0xe1250000 0 0x00060>, /* SERDES IR 1/2 */
+                     <0 0xe12500f8 0 0x00004>; /* SERDES IR 2/2 */
+               interrupts = <0 325 4>,
+                            <0 346 1>, <0 347 1>, <0 348 1>, <0 349 1>,
+                            <0 323 4>;
+               amd,per-channel-interrupt;
+               amd,speed-set = <0>;
+               amd,serdes-blwc = <1>, <1>, <0>;
+               amd,serdes-cdr-rate = <2>, <2>, <7>;
+               amd,serdes-pq-skew = <10>, <10>, <18>;
+               amd,serdes-tx-amp = <0>, <0>, <0>;
+               amd,serdes-dfe-tap-config = <3>, <3>, <3>;
+               amd,serdes-dfe-tap-enable = <0>, <0>, <7>;
+               mac-address = [ 02 A1 A2 A3 A4 A5 ];
+               clocks = <&xgmacclk0_dma_250mhz>, <&xgmacclk0_ptp_250mhz>;
+               clock-names = "dma_clk", "ptp_clk";
+               phy-mode = "xgmii";
+               #stream-id-cells = <16>;
+               dma-coherent;
+       };
+
+       xgmac1: xgmac@e0900000 {
+               compatible = "amd,xgbe-seattle-v1a";
+               reg = <0 0xe0900000 0 0x80000>,
+                     <0 0xe0980000 0 0x80000>,
+                     <0 0xe1240c00 0 0x00400>, /* SERDES RX/TX1 */
+                     <0 0xe1250080 0 0x00060>, /* SERDES IR 1/2 */
+                     <0 0xe12500fc 0 0x00004>; /* SERDES IR 2/2 */
+               interrupts = <0 324 4>,
+                            <0 341 1>, <0 342 1>, <0 343 1>, <0 344 1>,
+                            <0 322 4>;
+               amd,per-channel-interrupt;
+               amd,speed-set = <0>;
+               amd,serdes-blwc = <1>, <1>, <0>;
+               amd,serdes-cdr-rate = <2>, <2>, <7>;
+               amd,serdes-pq-skew = <10>, <10>, <18>;
+               amd,serdes-tx-amp = <0>, <0>, <0>;
+               amd,serdes-dfe-tap-config = <3>, <3>, <3>;
+               amd,serdes-dfe-tap-enable = <0>, <0>, <7>;
+               mac-address = [ 02 B1 B2 B3 B4 B5 ];
+               clocks = <&xgmacclk1_dma_250mhz>, <&xgmacclk1_ptp_250mhz>;
+               clock-names = "dma_clk", "ptp_clk";
+               phy-mode = "xgmii";
+               #stream-id-cells = <16>;
+               dma-coherent;
+       };
+
+       xgmac0_smmu: smmu@e0600000 {
+                compatible = "arm,mmu-401";
+                reg = <0 0xe0600000 0 0x10000>;
+                #global-interrupts = <1>;
+                interrupts = /* Uses combined intr for both
+                              * global and context
+                              */
+                             <0 336 4>,
+                             <0 336 4>;
+
+                mmu-masters = <&xgmac0
+                         0  1  2  3  4  5  6  7
+                        16 17 18 19 20 21 22 23
+                >;
+        };
+
+        xgmac1_smmu: smmu@e0800000 {
+                compatible = "arm,mmu-401";
+                reg = <0 0xe0800000 0 0x10000>;
+                #global-interrupts = <1>;
+                interrupts = /* Uses combined intr for both
+                              * global and context
+                              */
+                             <0 335 4>,
+                             <0 335 4>;
+
+                mmu-masters = <&xgmac1
+                         0  1  2  3  4  5  6  7
+                        16 17 18 19 20 21 22 23
+                >;
+        };
diff --git a/arch/arm64/boot/dts/amd/husky.dts b/arch/arm64/boot/dts/amd/husky.dts
new file mode 100644 (file)
index 0000000..1381d4b
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * DTS file for AMD/Linaro 96Boards Enterprise Edition Server (Husky) Board
+ * Note: Based-on AMD Seattle Rev.B0
+ *
+ * Copyright (C) 2015 Advanced Micro Devices, Inc.
+ */
+
+/dts-v1/;
+
+/include/ "amd-seattle-soc.dtsi"
+
+/ {
+       model = "Linaro 96Boards Enterprise Edition Server (Husky) Board";
+       compatible = "amd,seattle-overdrive", "amd,seattle";
+
+       chosen {
+               stdout-path = &serial0;
+       };
+
+       psci {
+               compatible   = "arm,psci-0.2";
+               method       = "smc";
+       };
+};
+
+&ccp0 {
+       status = "ok";
+       amd,zlib-support = <1>;
+};
+
+/**
+ * NOTE: In Rev.B, gpio0 is reserved.
+ */
+&gpio1 {
+       status = "ok";
+};
+
+&gpio2 {
+       status = "ok";
+};
+
+&gpio3 {
+       status = "ok";
+};
+
+&gpio4 {
+       status = "ok";
+};
+
+&i2c0 {
+       status = "ok";
+};
+
+&i2c1 {
+       status = "ok";
+};
+
+&pcie0 {
+       status = "ok";
+};
+
+&spi0 {
+       status = "ok";
+};
+
+&spi1 {
+       status = "ok";
+       sdcard0: sdcard@0 {
+               compatible = "mmc-spi-slot";
+               reg = <0>;
+               spi-max-frequency = <20000000>;
+               voltage-ranges = <3200 3400>;
+               pl022,hierarchy = <0>;
+               pl022,interface = <0>;
+               pl022,com-mode = <0x0>;
+               pl022,rx-level-trig = <0>;
+               pl022,tx-level-trig = <0>;
+       };
+};
+
+&smb0 {
+       /include/ "amd-seattle-xgbe-b.dtsi"
+};
index fe30f7671ea3bc2707fb70a312c70e18bb1c2550..3e40bd469cea13c010c08c6434ae1d0aa9df81af 100644 (file)
                        reg = <0x0 0x1054a000 0x0 0x20>;
                };
 
+               rb: rb@7e000000 {
+                       compatible = "apm,xgene-rb", "syscon";
+                       reg = <0x0 0x7e000000 0x0 0x10>;
+               };
+
                edac@78800000 {
                        compatible = "apm,xgene-edac";
                        #address-cells = <2>;
                        regmap-mcba = <&mcba>;
                        regmap-mcbb = <&mcbb>;
                        regmap-efuse = <&efuse>;
+                       regmap-rb = <&rb>;
                        reg = <0x0 0x78800000 0x0 0x100>;
                        interrupts = <0x0 0x20 0x4>,
                                     <0x0 0x21 0x4>,
index dd5158eb5872396693bba678cda765a719e25e97..7c83e3ac84c9ce650fb27d4f49d5c225778ab5b5 100644 (file)
@@ -92,8 +92,8 @@
                        scpi_clk: scpi_clocks@3 {
                                compatible = "arm,scpi-variable-clocks";
                                #clock-cells = <1>;
-                               clock-indices = <3>, <4>;
-                               clock-output-names = "pxlclk0", "pxlclk1";
+                               clock-indices = <3>;
+                               clock-output-names = "pxlclk";
                        };
                };
 
                             <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>,
+                            <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
                             <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
                clock-names = "apb_pclk";
        };
 
+       hdlcd@7ff50000 {
+               compatible = "arm,hdlcd";
+               reg = <0 0x7ff50000 0 0x1000>;
+               interrupts = <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&scpi_clk 3>;
+               clock-names = "pxlclk";
+
+               port {
+                       hdlcd1_output: endpoint@0 {
+                               remote-endpoint = <&tda998x_1_input>;
+                       };
+               };
+       };
+
+       hdlcd@7ff60000 {
+               compatible = "arm,hdlcd";
+               reg = <0 0x7ff60000 0 0x1000>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&scpi_clk 3>;
+               clock-names = "pxlclk";
+
+               port {
+                       hdlcd0_output: endpoint@0 {
+                               remote-endpoint = <&tda998x_0_input>;
+                       };
+               };
+       };
+
        soc_uart0: uart@7ff80000 {
                compatible = "arm,pl011", "arm,primecell";
                reg = <0x0 0x7ff80000 0x0 0x1000>;
                i2c-sda-hold-time-ns = <500>;
                clocks = <&soc_smc50mhz>;
 
-               dvi0: dvi-transmitter@70 {
+               hdmi-transmitter@70 {
                        compatible = "nxp,tda998x";
                        reg = <0x70>;
+                       port {
+                               tda998x_0_input: endpoint@0 {
+                                       remote-endpoint = <&hdlcd0_output>;
+                               };
+                       };
                };
 
-               dvi1: dvi-transmitter@71 {
+               hdmi-transmitter@71 {
                        compatible = "nxp,tda998x";
                        reg = <0x71>;
+                       port {
+                               tda998x_1_input: endpoint@0 {
+                                       remote-endpoint = <&hdlcd1_output>;
+                               };
+                       };
                };
        };
 
index da7b6e61325758c547b521c3df675adeb9568d2b..933cba3599189c7f85c6a6adea77eb661be459f5 100644 (file)
@@ -23,9 +23,8 @@ soc0: soc@000000000 {
                };
        };
 
-       dsa: dsa@c7000000 {
+       dsaf0: dsa@c7000000 {
                compatible = "hisilicon,hns-dsaf-v1";
-               dsa_name = "dsaf0";
                mode = "6port-16rss";
                interrupt-parent = <&mbigen_dsa>;
 
@@ -127,7 +126,7 @@ soc0: soc@000000000 {
 
        eth0: ethernet@0{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <0>;
                local-mac-address = [00 00 00 01 00 58];
                status = "disabled";
@@ -135,14 +134,14 @@ soc0: soc@000000000 {
        };
        eth1: ethernet@1{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <1>;
                status = "disabled";
                dma-coherent;
        };
        eth2: ethernet@2{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <2>;
                local-mac-address = [00 00 00 01 00 5a];
                status = "disabled";
@@ -150,7 +149,7 @@ soc0: soc@000000000 {
        };
        eth3: ethernet@3{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <3>;
                local-mac-address = [00 00 00 01 00 5b];
                status = "disabled";
@@ -158,7 +157,7 @@ soc0: soc@000000000 {
        };
        eth4: ethernet@4{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <4>;
                local-mac-address = [00 00 00 01 00 5c];
                status = "disabled";
@@ -166,7 +165,7 @@ soc0: soc@000000000 {
        };
        eth5: ethernet@5{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <5>;
                local-mac-address = [00 00 00 01 00 5d];
                status = "disabled";
@@ -174,7 +173,7 @@ soc0: soc@000000000 {
        };
        eth6: ethernet@6{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <6>;
                local-mac-address = [00 00 00 01 00 5e];
                status = "disabled";
@@ -182,7 +181,7 @@ soc0: soc@000000000 {
        };
        eth7: ethernet@7{
                compatible = "hisilicon,hns-nic-v1";
-               ae-name = "dsaf0";
+               ae-handle = <&dsaf0>;
                port-id = <7>;
                local-mac-address = [00 00 00 01 00 5f];
                status = "disabled";
index 7dfe1c085966de45b27ee1b10f8073af9c5914dc..62f33fc84e3ea7afd11655069a16a45b7e120b90 100644 (file)
@@ -12,6 +12,8 @@
                rtc1 = "/rtc@0,7000e000";
        };
 
+       chosen { };
+
        memory {
                device_type = "memory";
                reg = <0x0 0x80000000 0x0 0x80000000>;
index e8bb46027bed690385f51905a1cab74cea40274d..be0e9d70e6c4a0eb64146e1a2a995a2086da786b 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
         */
        uarta: serial@0,70006000 {
index bc23f4dea002b51f089b6c42756af16f9c591cca..cd4f45ccd6a72f6fcb9f6c87dee6160fb3c8ceee 100644 (file)
         * driver and APB DMA based serial driver for higher baudrate
         * and performace. To enable the 8250 based driver, the compatible
         * is "nvidia,tegra124-uart", "nvidia,tegra20-uart" and to enable
-        * the APB DMA based serial driver, the comptible is
+        * the APB DMA based serial driver, the compatible is
         * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
         */
        uarta: serial@0,70006000 {
index 8e94af64ee94516166dad513be94613f32a460aa..fa1f661ccccfb21bb71ab0d5918aee5c0742223e 100644 (file)
@@ -1,4 +1,5 @@
 dtb-$(CONFIG_ARCH_QCOM)        += apq8016-sbc.dtb msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM)        += msm8996-mtp.dtb
 
 always         := $(dtb-y)
 subdir-y       := $(dts-dirs)
index db17c5d5689c65ed83b35bc5d51ba4adc290d828..6b4289dd24951ac7f2d5d34466e8d1912424d413 100644 (file)
@@ -24,6 +24,8 @@
                i2c0    = &blsp_i2c2;
                i2c1    = &blsp_i2c6;
                i2c3    = &blsp_i2c4;
+               spi0    = &blsp_spi5;
+               spi1    = &blsp_spi3;
        };
 
        chosen {
                                default-state = "off";
                        };
                };
+
+               sdhci@07824000 {
+                       vmmc-supply = <&pm8916_l8>;
+                       vqmmc-supply = <&pm8916_l5>;
+
+                       pinctrl-names = "default", "sleep";
+                       pinctrl-0 = <&sdc1_clk_on &sdc1_cmd_on &sdc1_data_on>;
+                       pinctrl-1 = <&sdc1_clk_off &sdc1_cmd_off &sdc1_data_off>;
+                       status = "okay";
+               };
        };
 };
 
-&sdhc_1 {
-       status = "okay";
+&smd_rpm_regulators {
+       vdd_l1_l2_l3-supply = <&pm8916_s3>;
+       vdd_l5-supply = <&pm8916_s3>;
+       vdd_l4_l5_l6-supply = <&pm8916_s4>;
+       vdd_l7-supply = <&pm8916_s4>;
+
+       s1 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1562000>;
+       };
+
+       s3 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1562000>;
+       };
+
+       s4 {
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+
+               regulator-always-on;
+               regulator-boot-on;
+       };
+
+       l1 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1525000>;
+       };
+
+       l2 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1525000>;
+       };
+
+       l3 {
+               regulator-min-microvolt = <375000>;
+               regulator-max-microvolt = <1525000>;
+       };
+
+       l4 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l5 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l6 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l7 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l8 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l9 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l10 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l11 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l12 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l13 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l14 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       /**
+        * 1.8v required on LS expansion
+        * for mezzanine boards
+        */
+       l15 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+               regulator-always-on;
+       };
+
+       l16 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l17 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
+
+       l18 {
+               regulator-min-microvolt = <1750000>;
+               regulator-max-microvolt = <3337000>;
+       };
 };
index 955c6f174d4cb16218625a6e590102a6e4a2bbf0..8d050059e9a8019cd35102399149b5010a194dc9 100644 (file)
@@ -81,8 +81,8 @@
                        bias-disable;
                };
                pinconf_cs {
-                       pins = "gpio2";
-                       drive-strength = <2>;
+                       pins = "gpio16";
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio6";
                };
                pinconf {
-                       pins = "gpio4", "gpio5", "gpio6", "gpio7";
+                       pins = "gpio4", "gpio5", "gpio7";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio6";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio10";
                };
                pinconf {
-                       pins = "gpio8", "gpio9", "gpio10", "gpio11";
+                       pins = "gpio8", "gpio9", "gpio11";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio10";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio14";
                };
                pinconf {
-                       pins = "gpio12", "gpio13", "gpio14", "gpio15";
+                       pins = "gpio12", "gpio13", "gpio15";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio14";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio18";
                };
                pinconf {
-                       pins = "gpio16", "gpio17", "gpio18", "gpio19";
+                       pins = "gpio16", "gpio17", "gpio19";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio18";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
                        pins = "gpio22";
                };
                pinconf {
-                       pins = "gpio20", "gpio21", "gpio22", "gpio23";
+                       pins = "gpio20", "gpio21", "gpio23";
                        drive-strength = <12>;
                        bias-disable;
                };
                pinconf_cs {
                        pins = "gpio22";
-                       drive-strength = <2>;
+                       drive-strength = <16>;
                        bias-disable;
                        output-high;
                };
index 915321479998d63ff70db4c08bc3d8e1cb2cdb50..7705207872a50ed3595e5707083e7a9dc78f2148 100644 (file)
                        device_type = "cpu";
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x0>;
+                       next-level-cache = <&L2_0>;
                };
 
                CPU1: cpu@1 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x1>;
+                       next-level-cache = <&L2_0>;
                };
 
                CPU2: cpu@2 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x2>;
+                       next-level-cache = <&L2_0>;
                };
 
                CPU3: cpu@3 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a53", "arm,armv8";
                        reg = <0x3>;
+                       next-level-cache = <&L2_0>;
+               };
+
+               L2_0: l2-cache {
+                     compatible = "cache";
+                     cache-level = <2>;
                };
        };
 
                        #interrupt-cells = <2>;
                };
 
-               gcc: qcom,gcc@1800000 {
+               gcc: clock-controller@1800000 {
                        compatible = "qcom,gcc-msm8916";
                        #clock-cells = <1>;
                        #reset-cells = <1>;
                                compatible = "qcom,rpm-msm8916";
                                qcom,smd-channels = "rpm_requests";
 
-                               pm8916-regulators {
+                               rpmcc: qcom,rpmcc {
+                                       compatible = "qcom,rpmcc-msm8916", "qcom,rpmcc";
+                                       #clock-cells = <1>;
+                               };
+
+                               smd_rpm_regulators: pm8916-regulators {
                                        compatible = "qcom,rpm-pm8916-regulators";
 
                                        pm8916_s1: s1 {};
-                                       pm8916_s2: s2 {};
                                        pm8916_s3: s3 {};
                                        pm8916_s4: s4 {};
 
diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dts b/arch/arm64/boot/dts/qcom/msm8996-mtp.dts
new file mode 100644 (file)
index 0000000..619af44
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/dts-v1/;
+
+#include "msm8996-mtp.dtsi"
+
+/ {
+       model = "Qualcomm Technologies, Inc. MSM 8996 MTP";
+       compatible = "qcom,msm8996-mtp";
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8996-mtp.dtsi
new file mode 100644 (file)
index 0000000..9bab5c0
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8996.dtsi"
+
+/ {
+       aliases {
+               serial0 = &blsp2_uart1;
+       };
+
+       chosen {
+               stdout-path = "serial0";
+       };
+
+       soc {
+               serial@75b0000 {
+                       status = "okay";
+               };
+       };
+};
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
new file mode 100644 (file)
index 0000000..2c2736d
--- /dev/null
@@ -0,0 +1,267 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/clock/qcom,gcc-msm8996.h>
+#include <dt-bindings/clock/qcom,mmcc-msm8996.h>
+
+/ {
+       model = "Qualcomm Technologies, Inc. MSM8996";
+
+       interrupt-parent = <&intc>;
+
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       chosen { };
+
+       memory {
+               device_type = "memory";
+               /* We expect the bootloader to fill in the reg */
+               reg = <0 0 0 0>;
+       };
+
+       cpus {
+               #address-cells = <2>;
+               #size-cells = <0>;
+
+               CPU0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "qcom,kryo";
+                       reg = <0x0 0x0>;
+                       enable-method = "psci";
+                       next-level-cache = <&L2_0>;
+                       L2_0: l2-cache {
+                             compatible = "cache";
+                             cache-level = <2>;
+                       };
+               };
+
+               CPU1: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "qcom,kryo";
+                       reg = <0x0 0x1>;
+                       enable-method = "psci";
+                       next-level-cache = <&L2_0>;
+               };
+
+               CPU2: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "qcom,kryo";
+                       reg = <0x0 0x100>;
+                       enable-method = "psci";
+                       next-level-cache = <&L2_1>;
+                       L2_1: l2-cache {
+                             compatible = "cache";
+                             cache-level = <2>;
+                       };
+               };
+
+               CPU3: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "qcom,kryo";
+                       reg = <0x0 0x101>;
+                       enable-method = "psci";
+                       next-level-cache = <&L2_1>;
+               };
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                       };
+
+                       cluster1 {
+                               core0 {
+                                       cpu = <&CPU2>;
+                               };
+
+                               core1 {
+                                       cpu = <&CPU3>;
+                               };
+                       };
+               };
+       };
+
+       timer {
+               compatible = "arm,armv8-timer";
+               interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                            <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+       };
+
+       clocks {
+               xo_board {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <19200000>;
+                       clock-output-names = "xo_board";
+               };
+
+               sleep_clk {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32764>;
+                       clock-output-names = "sleep_clk";
+               };
+       };
+
+       psci {
+               compatible = "arm,psci-1.0";
+               method = "smc";
+       };
+
+       soc: soc {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               ranges = <0 0 0 0xffffffff>;
+               compatible = "simple-bus";
+
+               intc: interrupt-controller@9bc0000 {
+                       compatible = "arm,gic-v3";
+                       #interrupt-cells = <3>;
+                       interrupt-controller;
+                       #redistributor-regions = <1>;
+                       redistributor-stride = <0x0 0x40000>;
+                       reg = <0x09bc0000 0x10000>,
+                             <0x09c00000 0x100000>;
+                       interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>;
+               };
+
+               gcc: clock-controller@300000 {
+                       compatible = "qcom,gcc-msm8996";
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
+                       reg = <0x300000 0x90000>;
+               };
+
+               blsp2_uart1: serial@75b0000 {
+                       compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+                       reg = <0x75b0000 0x1000>;
+                       interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&gcc GCC_BLSP2_UART2_APPS_CLK>,
+                                <&gcc GCC_BLSP2_AHB_CLK>;
+                       clock-names = "core", "iface";
+                       status = "disabled";
+               };
+
+               pinctrl@1010000 {
+                       compatible = "qcom,msm8996-pinctrl";
+                       reg = <0x01010000 0x300000>;
+                       interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupt-controller;
+                       #interrupt-cells = <2>;
+               };
+
+               timer@09840000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       ranges;
+                       compatible = "arm,armv7-timer-mem";
+                       reg = <0x09840000 0x1000>;
+                       clock-frequency = <19200000>;
+
+                       frame@9850000 {
+                               frame-number = <0>;
+                               interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>,
+                                            <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x09850000 0x1000>,
+                                     <0x09860000 0x1000>;
+                       };
+
+                       frame@9870000 {
+                               frame-number = <1>;
+                               interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x09870000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@9880000 {
+                               frame-number = <2>;
+                               interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x09880000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@9890000 {
+                               frame-number = <3>;
+                               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x09890000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@98a0000 {
+                               frame-number = <4>;
+                               interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x098a0000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@98b0000 {
+                               frame-number = <5>;
+                               interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x098b0000 0x1000>;
+                               status = "disabled";
+                       };
+
+                       frame@98c0000 {
+                               frame-number = <6>;
+                               interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+                               reg = <0x098c0000 0x1000>;
+                               status = "disabled";
+                       };
+               };
+
+               spmi_bus: qcom,spmi@400f000 {
+                       compatible = "qcom,spmi-pmic-arb";
+                       reg = <0x400f000 0x1000>,
+                             <0x4400000 0x800000>,
+                             <0x4c00000 0x800000>,
+                             <0x5800000 0x200000>,
+                             <0x400a000 0x002100>;
+                       reg-names = "core", "chnls", "obsrvr", "intr", "cnfg";
+                       interrupt-names = "periph_irq";
+                       interrupts = <GIC_SPI 326 IRQ_TYPE_NONE>;
+                       qcom,ee = <0>;
+                       qcom,channel = <0>;
+                       #address-cells = <2>;
+                       #size-cells = <0>;
+                       interrupt-controller;
+                       #interrupt-cells = <4>;
+               };
+
+               mmcc: clock-controller@8c0000 {
+                       compatible = "qcom,mmcc-msm8996";
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
+                       reg = <0x8c0000 0x40000>;
+                       assigned-clocks = <&mmcc MMPLL9_PLL>,
+                                         <&mmcc MMPLL1_PLL>,
+                                         <&mmcc MMPLL3_PLL>,
+                                         <&mmcc MMPLL4_PLL>,
+                                         <&mmcc MMPLL5_PLL>;
+                       assigned-clock-rates = <624000000>,
+                                              <810000000>,
+                                              <980000000>,
+                                              <960000000>,
+                                              <825000000>;
+               };
+       };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8004.dtsi b/arch/arm64/boot/dts/qcom/pm8004.dtsi
new file mode 100644 (file)
index 0000000..ef2207a
--- /dev/null
@@ -0,0 +1,19 @@
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+
+       pmic@4 {
+               compatible = "qcom,pm8004", "qcom,spmi-pmic";
+               reg = <0x4 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+
+       pmic@5 {
+               compatible = "qcom,pm8004", "qcom,spmi-pmic";
+               reg = <0x5 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8994.dtsi b/arch/arm64/boot/dts/qcom/pm8994.dtsi
new file mode 100644 (file)
index 0000000..1732f1d
--- /dev/null
@@ -0,0 +1,62 @@
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+
+       pmic@0 {
+               compatible = "qcom,pm8994", "qcom,spmi-pmic";
+               reg = <0x0 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               pm8994_gpios: gpios@c000 {
+                       compatible = "qcom,pm8994-gpio";
+                       reg = <0xc000 0x1500>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupts = <0 0xc0 0 IRQ_TYPE_NONE>,
+                                    <0 0xc1 0 IRQ_TYPE_NONE>,
+                                    <0 0xc2 0 IRQ_TYPE_NONE>,
+                                    <0 0xc3 0 IRQ_TYPE_NONE>,
+                                    <0 0xc4 0 IRQ_TYPE_NONE>,
+                                    <0 0xc5 0 IRQ_TYPE_NONE>,
+                                    <0 0xc6 0 IRQ_TYPE_NONE>,
+                                    <0 0xc7 0 IRQ_TYPE_NONE>,
+                                    <0 0xc8 0 IRQ_TYPE_NONE>,
+                                    <0 0xc9 0 IRQ_TYPE_NONE>,
+                                    <0 0xca 0 IRQ_TYPE_NONE>,
+                                    <0 0xcb 0 IRQ_TYPE_NONE>,
+                                    <0 0xcc 0 IRQ_TYPE_NONE>,
+                                    <0 0xcd 0 IRQ_TYPE_NONE>,
+                                    <0 0xce 0 IRQ_TYPE_NONE>,
+                                    <0 0xd0 0 IRQ_TYPE_NONE>,
+                                    <0 0xd1 0 IRQ_TYPE_NONE>,
+                                    <0 0xd2 0 IRQ_TYPE_NONE>,
+                                    <0 0xd3 0 IRQ_TYPE_NONE>,
+                                    <0 0xd4 0 IRQ_TYPE_NONE>,
+                                    <0 0xd5 0 IRQ_TYPE_NONE>;
+               };
+
+               pm8994_mpps: mpps@a000 {
+                       compatible = "qcom,pm8994-mpp";
+                       reg = <0xa000 0x700>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+                       interrupts = <0 0xa0 0 IRQ_TYPE_NONE>,
+                                    <0 0xa1 0 IRQ_TYPE_NONE>,
+                                    <0 0xa2 0 IRQ_TYPE_NONE>,
+                                    <0 0xa3 0 IRQ_TYPE_NONE>,
+                                    <0 0xa4 0 IRQ_TYPE_NONE>,
+                                    <0 0xa5 0 IRQ_TYPE_NONE>,
+                                    <0 0xa6 0 IRQ_TYPE_NONE>,
+                                    <0 0xa7 0 IRQ_TYPE_NONE>;
+               };
+       };
+
+       pmic@1 {
+               compatible = "qcom,pm8994", "qcom,spmi-pmic";
+               reg = <0x1 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+};
diff --git a/arch/arm64/boot/dts/qcom/pmi8994.dtsi b/arch/arm64/boot/dts/qcom/pmi8994.dtsi
new file mode 100644 (file)
index 0000000..d3879a4
--- /dev/null
@@ -0,0 +1,19 @@
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/spmi/spmi.h>
+
+&spmi_bus {
+
+       pmic@2 {
+               compatible = "qcom,pmi8994", "qcom,spmi-pmic";
+               reg = <0x2 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+
+       pmic@3 {
+               compatible = "qcom,pmi8994", "qcom,spmi-pmic";
+               reg = <0x3 SPMI_USID>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+       };
+};
index 265d12ff6022208f94891149c065951668fd2b15..31ace9c1f79dc70ceb8bfda66e1b83ecad399527 100644 (file)
@@ -93,6 +93,9 @@
 };
 
 &pfc {
+       pinctrl-0 = <&scif_clk_pins>;
+       pinctrl-names = "default";
+
        scif1_pins: scif1 {
                renesas,groups = "scif1_data_a", "scif1_ctrl";
                renesas,function = "scif1";
                renesas,groups = "scif2_data_a";
                renesas,function = "scif2";
        };
+       scif_clk_pins: scif_clk {
+               renesas,groups = "scif_clk_a";
+               renesas,function = "scif_clk";
+       };
 
        i2c2_pins: i2c2 {
                renesas,groups = "i2c2_a";
        status = "okay";
 };
 
+&scif_clk {
+       clock-frequency = <14745600>;
+       status = "okay";
+};
+
 &i2c2 {
        pinctrl-0 = <&i2c2_pins>;
        pinctrl-names = "default";
                interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
        };
 };
+
+&xhci0 {
+       status = "okay";
+};
index bb353cde125333b9f0df4dadcd817316d4ee3f08..b5e46e4ff72ad003708c1189f94b1e3f1c1539cd 100644 (file)
@@ -39,6 +39,7 @@
                        compatible = "arm,cortex-a57", "arm,armv8";
                        reg = <0x0>;
                        device_type = "cpu";
+                       next-level-cache = <&L2_CA57>;
                        enable-method = "psci";
                };
 
                        compatible = "arm,cortex-a57","arm,armv8";
                        reg = <0x1>;
                        device_type = "cpu";
+                       next-level-cache = <&L2_CA57>;
                        enable-method = "psci";
                };
                a57_2: cpu@2 {
                        compatible = "arm,cortex-a57","arm,armv8";
                        reg = <0x2>;
                        device_type = "cpu";
+                       next-level-cache = <&L2_CA57>;
                        enable-method = "psci";
                };
                a57_3: cpu@3 {
                        compatible = "arm,cortex-a57","arm,armv8";
                        reg = <0x3>;
                        device_type = "cpu";
+                       next-level-cache = <&L2_CA57>;
                        enable-method = "psci";
                };
        };
 
+       L2_CA57: cache-controller@0 {
+               compatible = "cache";
+       };
+
        extal_clk: extal {
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <0>;
        };
 
+       /* External SCIF clock - to be overridden by boards that provide it */
+       scif_clk: scif {
+               compatible = "fixed-clock";
+               #clock-cells = <0>;
+               clock-frequency = <0>;
+               status = "disabled";
+       };
+
        soc {
                compatible = "simple-bus";
                interrupt-parent = <&gic>;
                        power-domains = <&cpg>;
                };
 
-               pmu {
-                       compatible = "arm,armv8-pmuv3";
+               pmu_a57 {
+                       compatible = "arm,cortex-a57-pmu";
                        interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>,
                                     <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>,
                };
 
                dmac0: dma-controller@e6700000 {
-                       /* Empty node for now */
+                       compatible = "renesas,dmac-r8a7795",
+                                    "renesas,rcar-dmac";
+                       reg = <0 0xe6700000 0 0x10000>;
+                       interrupts = <GIC_SPI 199 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 200 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 201 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 202 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 203 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 204 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 205 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 207 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 209 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 210 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 211 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 213 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 214 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 215 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "error",
+                                       "ch0", "ch1", "ch2", "ch3",
+                                       "ch4", "ch5", "ch6", "ch7",
+                                       "ch8", "ch9", "ch10", "ch11",
+                                       "ch12", "ch13", "ch14", "ch15";
+                       clocks = <&cpg CPG_MOD 219>;
+                       clock-names = "fck";
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <16>;
                };
 
                dmac1: dma-controller@e7300000 {
-                       /* Empty node for now */
+                       compatible = "renesas,dmac-r8a7795",
+                                    "renesas,rcar-dmac";
+                       reg = <0 0xe7300000 0 0x10000>;
+                       interrupts = <GIC_SPI 220 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 216 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 218 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 219 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 308 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 309 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 310 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 311 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 312 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 313 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 314 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "error",
+                                       "ch0", "ch1", "ch2", "ch3",
+                                       "ch4", "ch5", "ch6", "ch7",
+                                       "ch8", "ch9", "ch10", "ch11",
+                                       "ch12", "ch13", "ch14", "ch15";
+                       clocks = <&cpg CPG_MOD 218>;
+                       clock-names = "fck";
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <16>;
                };
 
                dmac2: dma-controller@e7310000 {
-                       /* Empty node for now */
+                       compatible = "renesas,dmac-r8a7795",
+                                    "renesas,rcar-dmac";
+                       reg = <0 0xe7310000 0 0x10000>;
+                       interrupts = <GIC_SPI 416 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 417 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 418 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 419 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 420 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 423 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 397 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "error",
+                                       "ch0", "ch1", "ch2", "ch3",
+                                       "ch4", "ch5", "ch6", "ch7",
+                                       "ch8", "ch9", "ch10", "ch11",
+                                       "ch12", "ch13", "ch14", "ch15";
+                       clocks = <&cpg CPG_MOD 217>;
+                       clock-names = "fck";
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <16>;
                };
 
                avb: ethernet@e6800000 {
                };
 
                hscif0: serial@e6540000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe6540000 0 96>;
                        interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 520>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 520>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x31>, <&dmac1 0x30>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                hscif1: serial@e6550000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe6550000 0 96>;
                        interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 519>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 519>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x33>, <&dmac1 0x32>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                hscif2: serial@e6560000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe6560000 0 96>;
                        interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 518>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 518>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x35>, <&dmac1 0x34>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                hscif3: serial@e66a0000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe66a0000 0 96>;
                        interrupts = <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 517>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 517>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac0 0x37>, <&dmac0 0x36>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                hscif4: serial@e66b0000 {
-                       compatible = "renesas,hscif-r8a7795", "renesas,hscif";
+                       compatible = "renesas,hscif-r8a7795",
+                                    "renesas,rcar-gen3-hscif",
+                                    "renesas,hscif";
                        reg = <0 0xe66b0000 0 96>;
                        interrupts = <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 516>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 516>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac0 0x39>, <&dmac0 0x38>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif0: serial@e6e60000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6e60000 0 64>;
                        interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 207>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 207>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x51>, <&dmac1 0x50>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif1: serial@e6e68000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6e68000 0 64>;
                        interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 206>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 206>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x53>, <&dmac1 0x52>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif2: serial@e6e88000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6e88000 0 64>;
                        interrupts = <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 310>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 310>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x13>, <&dmac1 0x12>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif3: serial@e6c50000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6c50000 0 64>;
                        interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 204>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 204>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac0 0x57>, <&dmac0 0x56>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif4: serial@e6c40000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6c40000 0 64>;
                        interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 203>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 203>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac0 0x59>, <&dmac0 0x58>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                };
 
                scif5: serial@e6f30000 {
-                       compatible = "renesas,scif-r8a7795", "renesas,scif";
+                       compatible = "renesas,scif-r8a7795",
+                                    "renesas,rcar-gen3-scif", "renesas,scif";
                        reg = <0 0xe6f30000 0 64>;
                        interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&cpg CPG_MOD 202>;
-                       clock-names = "sci_ick";
+                       clocks = <&cpg CPG_MOD 202>,
+                                <&cpg CPG_CORE R8A7795_CLK_S3D1>,
+                                <&scif_clk>;
+                       clock-names = "fck", "brg_int", "scif_clk";
                        dmas = <&dmac1 0x5b>, <&dmac1 0x5a>;
                        dma-names = "tx", "rx";
                        power-domains = <&cpg>;
                        clocks = <&cpg CPG_MOD 815>;
                        status = "disabled";
                };
+
+               xhci0: usb@ee000000 {
+                       compatible = "renesas,xhci-r8a7795";
+                       reg = <0 0xee000000 0 0xc00>;
+                       interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&cpg CPG_MOD 328>;
+                       power-domains = <&cpg>;
+                       status = "disabled";
+               };
+
+               xhci1: usb@ee0400000 {
+                       compatible = "renesas,xhci-r8a7795";
+                       reg = <0 0xee040000 0 0xc00>;
+                       interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+                       clocks = <&cpg CPG_MOD 327>;
+                       power-domains = <&cpg>;
+                       status = "disabled";
+               };
+
+               usb_dmac0: dma-controller@e65a0000 {
+                       compatible = "renesas,r8a7795-usb-dmac",
+                                    "renesas,usb-dmac";
+                       reg = <0 0xe65a0000 0 0x100>;
+                       interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "ch0", "ch1";
+                       clocks = <&cpg CPG_MOD 330>;
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <2>;
+               };
+
+               usb_dmac1: dma-controller@e65b0000 {
+                       compatible = "renesas,r8a7795-usb-dmac",
+                                    "renesas,usb-dmac";
+                       reg = <0 0xe65b0000 0 0x100>;
+                       interrupts = <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH
+                                     GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>;
+                       interrupt-names = "ch0", "ch1";
+                       clocks = <&cpg CPG_MOD 331>;
+                       power-domains = <&cpg>;
+                       #dma-cells = <1>;
+                       dma-channels = <2>;
+               };
        };
 };
index 8c219ccf67a3b4bccba2281e398d77cd450de375..6e27b22704df5630b8a3315ac14cc7032f660695 100644 (file)
                pinctrl-0 = <&pwr_key>;
 
                button@0 {
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
                        label = "GPIO Power";
                        linux,code = <116>;
index 104cbee762bb116d37d06e39ec2b2e644e3adfeb..1f2b642e794ae98219ceb49a58bc177028982dc7 100644 (file)
@@ -71,7 +71,7 @@
                pinctrl-0 = <&pwr_key>;
 
                button@0 {
-                       gpio-key,wakeup = <1>;
+                       wakeup-source;
                        gpios = <&gpio0 2 GPIO_ACTIVE_LOW>;
                        label = "GPIO Power";
                        linux,code = <116>;
index 122777b1441e8b7f4f3f129659bd42ce1d0f94b9..49d119103e31fff10ce4c94698e4cc35e60d9863 100644 (file)
                compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
                reg = <0x0 0xff0c0000 0x0 0x4000>;
                clock-freq-min-max = <400000 150000000>;
-               clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>;
-               clock-names = "biu", "ciu";
+               clocks = <&cru HCLK_SDMMC>, <&cru SCLK_SDMMC>,
+                        <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
                compatible = "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc";
                reg = <0x0 0xff0f0000 0x0 0x4000>;
                clock-freq-min-max = <400000 150000000>;
-               clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>;
-               clock-names = "biu", "ciu";
+               clocks = <&cru HCLK_EMMC>, <&cru SCLK_EMMC>,
+                        <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
+               clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
                fifo-depth = <0x100>;
                interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
                status = "disabled";
index 18ca9fb9e65f667c0679b3dbdb80166442723bb7..91ae2634cae9b9102567e2dfec92622457e454e4 100644 (file)
@@ -16,7 +16,6 @@ CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
 CONFIG_CGROUP_HUGETLB=y
 # CONFIG_UTS_NS is not set
 # CONFIG_IPC_NS is not set
@@ -37,15 +36,13 @@ CONFIG_ARCH_EXYNOS7=y
 CONFIG_ARCH_LAYERSCAPE=y
 CONFIG_ARCH_HISI=y
 CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_ROCKCHIP=y
 CONFIG_ARCH_SEATTLE=y
 CONFIG_ARCH_RENESAS=y
 CONFIG_ARCH_R8A7795=y
 CONFIG_ARCH_STRATIX10=y
 CONFIG_ARCH_TEGRA=y
-CONFIG_ARCH_TEGRA_132_SOC=y
-CONFIG_ARCH_TEGRA_210_SOC=y
-CONFIG_ARCH_QCOM=y
 CONFIG_ARCH_SPRD=y
 CONFIG_ARCH_THUNDER=y
 CONFIG_ARCH_UNIPHIER=y
@@ -54,14 +51,19 @@ CONFIG_ARCH_XGENE=y
 CONFIG_ARCH_ZYNQMP=y
 CONFIG_PCI=y
 CONFIG_PCI_MSI=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_RCAR_GEN2_PCIE=y
 CONFIG_PCI_HOST_GENERIC=y
 CONFIG_PCI_XGENE=y
-CONFIG_SMP=y
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
 CONFIG_SCHED_MC=y
 CONFIG_PREEMPT=y
 CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CMA=y
+CONFIG_XEN=y
 CONFIG_CMDLINE="console=ttyAMA0"
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_COMPAT=y
@@ -100,7 +102,11 @@ CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
 CONFIG_TUN=y
 CONFIG_VIRTIO_NET=y
+CONFIG_AMD_XGBE=y
 CONFIG_NET_XGENE=y
+CONFIG_E1000E=y
+CONFIG_IGB=y
+CONFIG_IGBVF=y
 CONFIG_SKY2=y
 CONFIG_RAVB=y
 CONFIG_SMC91X=y
@@ -117,39 +123,41 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_8250_DW=y
 CONFIG_SERIAL_8250_MT6577=y
 CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_SAMSUNG=y
-CONFIG_SERIAL_SAMSUNG_UARTS_4=y
-CONFIG_SERIAL_SAMSUNG_UARTS=4
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_SH_SCI=y
 CONFIG_SERIAL_SH_SCI_NR_UARTS=11
 CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_SERIAL_TEGRA=y
 CONFIG_SERIAL_MSM=y
 CONFIG_SERIAL_MSM_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_SERIAL_XILINX_PS_UART=y
 CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
 CONFIG_VIRTIO_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
 CONFIG_I2C_QUP=y
+CONFIG_I2C_UNIPHIER_F=y
 CONFIG_I2C_RCAR=y
 CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_SPI_QUP=y
+CONFIG_SPMI=y
 CONFIG_PINCTRL_MSM8916=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
 CONFIG_GPIO_PL061=y
 CONFIG_GPIO_RCAR=y
 CONFIG_GPIO_XGENE=y
 CONFIG_POWER_RESET_XGENE=y
 CONFIG_POWER_RESET_SYSCON=y
 # CONFIG_HWMON is not set
+CONFIG_MFD_SPMI_PMIC=y
 CONFIG_REGULATOR=y
 CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SPMI=y
 CONFIG_FB=y
 CONFIG_FB_ARMCLCD=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
@@ -162,13 +170,21 @@ CONFIG_SND_SOC=y
 CONFIG_SND_SOC_RCAR=y
 CONFIG_SND_SOC_AK4613=y
 CONFIG_USB=y
+CONFIG_USB_OTG=y
 CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_MSM=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
 CONFIG_USB_STORAGE=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_UDC=y
+CONFIG_USB_CHIPIDEA_HOST=y
 CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
+CONFIG_USB_MSM_OTG=y
 CONFIG_USB_ULPI=y
+CONFIG_USB_GADGET=y
 CONFIG_MMC=y
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
@@ -176,8 +192,6 @@ CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_TEGRA=y
 CONFIG_MMC_SPI=y
 CONFIG_MMC_DW=y
-CONFIG_MMC_DW_IDMAC=y
-CONFIG_MMC_DW_PLTFM=y
 CONFIG_MMC_DW_EXYNOS=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
@@ -187,28 +201,34 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=y
 CONFIG_LEDS_TRIGGER_CPU=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_PL031=y
 CONFIG_RTC_DRV_XGENE=y
 CONFIG_DMADEVICES=y
-CONFIG_RCAR_DMAC=y
 CONFIG_QCOM_BAM_DMA=y
 CONFIG_TEGRA20_APB_DMA=y
+CONFIG_RCAR_DMAC=y
+CONFIG_VFIO=y
+CONFIG_VFIO_PCI=y
 CONFIG_VIRTIO_PCI=y
 CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_MMIO=y
+CONFIG_XEN_GNTDEV=y
+CONFIG_XEN_GRANT_DEV_ALLOC=y
 CONFIG_COMMON_CLK_CS2000_CP=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_MSM_GCC_8916=y
 CONFIG_HWSPINLOCK_QCOM=y
-# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_ARM_SMMU=y
 CONFIG_QCOM_SMEM=y
 CONFIG_QCOM_SMD=y
 CONFIG_QCOM_SMD_RPM=y
+CONFIG_ARCH_TEGRA_132_SOC=y
+CONFIG_ARCH_TEGRA_210_SOC=y
+CONFIG_HISILICON_IRQ_MBIGEN=y
+CONFIG_EXTCON_USB_GPIO=y
 CONFIG_PHY_XGENE=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
-# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
-# CONFIG_EXT3_FS_XATTR is not set
-CONFIG_EXT4_FS=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA=y
@@ -239,6 +259,7 @@ CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_FTRACE is not set
 CONFIG_MEMTEST=y
 CONFIG_SECURITY=y
+CONFIG_CRYPTO_ECHAINIV=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
index 70fd9ffb58cfc08e82f3a978e6630c70eaeb1840..cff532a6744e937015371c80e5533a0fe660b611 100644 (file)
@@ -1,5 +1,3 @@
-
-
 generic-y += bug.h
 generic-y += bugs.h
 generic-y += checksum.h
@@ -31,7 +29,6 @@ generic-y += msgbuf.h
 generic-y += msi.h
 generic-y += mutex.h
 generic-y += pci.h
-generic-y += pci-bridge.h
 generic-y += poll.h
 generic-y += preempt.h
 generic-y += resource.h
index caafd63b8092d8102401112d811b1055f4cc3524..aee323b13802ad143e9774d2076a4b1c63731ece 100644 (file)
@@ -87,9 +87,26 @@ void __init acpi_init_cpus(void);
 static inline void acpi_init_cpus(void) { }
 #endif /* CONFIG_ACPI */
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+bool acpi_parking_protocol_valid(int cpu);
+void __init
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor);
+#else
+static inline bool acpi_parking_protocol_valid(int cpu) { return false; }
+static inline void
+acpi_set_mailbox_entry(int cpu, struct acpi_madt_generic_interrupt *processor)
+{}
+#endif
+
 static inline const char *acpi_get_enable_method(int cpu)
 {
-       return acpi_psci_present() ? "psci" : NULL;
+       if (acpi_psci_present())
+               return "psci";
+
+       if (acpi_parking_protocol_valid(cpu))
+               return "parking-protocol";
+
+       return NULL;
 }
 
 #ifdef CONFIG_ACPI_APEI
index 81151b67b26bf61fd756540f5643d8b5ab6882c7..ebf2481889c34848be0b34158647a4f60b770090 100644 (file)
 #define MIN_FDT_ALIGN          8
 #define MAX_FDT_SIZE           SZ_2M
 
+/*
+ * arm64 requires the kernel image to placed
+ * TEXT_OFFSET bytes beyond a 2 MB aligned base
+ */
+#define MIN_KIMG_ALIGN         SZ_2M
+
 #endif
index 8f271b83f9106c7c9753ce2601d3b59e1ffbdfc5..8d56bd8550dc8e60c139092d8ef97fe4c4fcfc2e 100644 (file)
@@ -30,8 +30,9 @@
 #define ARM64_HAS_LSE_ATOMICS                  5
 #define ARM64_WORKAROUND_CAVIUM_23154          6
 #define ARM64_WORKAROUND_834220                        7
+#define ARM64_HAS_NO_HW_PREFETCH               8
 
-#define ARM64_NCAPS                            8
+#define ARM64_NCAPS                            9
 
 #ifndef __ASSEMBLY__
 
index 1a5949364ed0f43eee2be4b61c3497fe4fdbbb7b..7540284a17fe7d2569602e883c8f9443340ce378 100644 (file)
 #define MIDR_IMPLEMENTOR(midr) \
        (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
 
-#define MIDR_CPU_PART(imp, partnum) \
+#define MIDR_CPU_MODEL(imp, partnum) \
        (((imp)                 << MIDR_IMPLEMENTOR_SHIFT) | \
        (0xf                    << MIDR_ARCHITECTURE_SHIFT) | \
        ((partnum)              << MIDR_PARTNUM_SHIFT))
 
+#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
+                            MIDR_ARCHITECTURE_MASK)
+
+#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max)           \
+({                                                                     \
+       u32 _model = (midr) & MIDR_CPU_MODEL_MASK;                      \
+       u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);     \
+                                                                       \
+       _model == (model) && rv >= (rv_min) && rv <= (rv_max);          \
+ })
+
 #define ARM_CPU_IMP_ARM                        0x41
 #define ARM_CPU_IMP_APM                        0x50
 #define ARM_CPU_IMP_CAVIUM             0x43
 
 #define CAVIUM_CPU_PART_THUNDERX       0x0A1
 
+#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
+#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
+#define MIDR_THUNDERX  MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
+
 #ifndef __ASSEMBLY__
 
 /*
index 309704544d22763d6348095814fbdce935172c1b..1a617d46fce93247cf42fd0cda36a1355fc89aa9 100644 (file)
@@ -62,6 +62,16 @@ enum fixed_addresses {
 
        FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
        FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+
+       /*
+        * Used for kernel page table creation, so unmapped memory may be used
+        * for tables.
+        */
+       FIX_PTE,
+       FIX_PMD,
+       FIX_PUD,
+       FIX_PGD,
+
        __end_of_fixed_addresses
 };
 
index 007a69fc4f408d5f2f7e58f4070b6cb354a5e022..5f3ab8c1db55cca8dbf4c9e1fc315e90335b6d60 100644 (file)
@@ -121,6 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
        asm volatile("// futex_atomic_cmpxchg_inatomic\n"
+ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "      prfm    pstl1strm, %2\n"
 "1:    ldxr    %w1, %2\n"
 "      sub     %w3, %w1, %w4\n"
@@ -137,6 +138,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 "      .align  3\n"
 "      .quad   1b, 4b, 2b, 4b\n"
 "      .popsection\n"
+ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
        : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
        : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
        : "memory");
index a57601f9d17cdffb1122e2864ae5353273eb59ee..8740297dac775dac5bac2bb9260fca62df7d0fb9 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/threads.h>
 #include <asm/irq.h>
 
-#define NR_IPI 5
+#define NR_IPI 6
 
 typedef struct {
        unsigned int __softirq_pending;
index 2774fa384c47f27b4e936644cef0e980f7fbcfe7..71ad0f93eb7153226a43d78e909d3e7c3690bf92 100644 (file)
@@ -7,13 +7,14 @@
 
 #include <linux/linkage.h>
 #include <asm/memory.h>
+#include <asm/pgtable-types.h>
 
 /*
  * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
  * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
  */
 #define KASAN_SHADOW_START      (VA_START)
-#define KASAN_SHADOW_END        (KASAN_SHADOW_START + (1UL << (VA_BITS - 3)))
+#define KASAN_SHADOW_END        (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
 
 /*
  * This value is used to map an address to the corresponding shadow
 #define KASAN_SHADOW_OFFSET     (KASAN_SHADOW_END - (1ULL << (64 - 3)))
 
 void kasan_init(void);
+void kasan_copy_shadow(pgd_t *pgdir);
 asmlinkage void kasan_early_init(void);
 
 #else
 static inline void kasan_init(void) { }
+static inline void kasan_copy_shadow(pgd_t *pgdir) { }
 #endif
 
 #endif
index a459714ee29e38fbf81f2061026933736ed1cbfc..5c6375d8528bb8ddd313bfa2911f7a0d77819028 100644 (file)
 #define SWAPPER_MM_MMUFLAGS    (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
 #endif
 
+/*
+ * To make optimal use of block mappings when laying out the linear
+ * mapping, round down the base of physical memory to a size that can
+ * be mapped efficiently, i.e., either PUD_SIZE (4k granule) or PMD_SIZE
+ * (64k granule), or a multiple that can be mapped using contiguous bits
+ * in the page tables: 32 * PMD_SIZE (16k granule)
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define ARM64_MEMSTART_ALIGN   SZ_512M
+#else
+#define ARM64_MEMSTART_ALIGN   SZ_1G
+#endif
 
 #endif /* __ASM_KERNEL_PGTABLE_H */
index 738a95f93e493e3002ac8749857f8599c5ae3404..bef6e9243c636b2aab05688f79de4f37a00154b3 100644 (file)
 #define CPTR_EL2_TCPAC (1 << 31)
 #define CPTR_EL2_TTA   (1 << 20)
 #define CPTR_EL2_TFP   (1 << CPTR_EL2_TFP_SHIFT)
+#define CPTR_EL2_DEFAULT       0x000033ff
 
 /* Hyp Debug Configuration Register bits */
 #define MDCR_EL2_TDRA          (1 << 11)
index 52b777b7d407cfd9c2fe5036c1c7ae96bf025c0b..054ac25e7c2e7c7d9b9f2c3ef32286c35ed9c254 100644 (file)
@@ -26,6 +26,8 @@
 #define KVM_ARM64_DEBUG_DIRTY_SHIFT    0
 #define KVM_ARM64_DEBUG_DIRTY          (1 << KVM_ARM64_DEBUG_DIRTY_SHIFT)
 
+#define kvm_ksym_ref(sym)              phys_to_virt((u64)&sym - kimage_voffset)
+
 #ifndef __ASSEMBLY__
 struct kvm;
 struct kvm_vcpu;
index 3066328cd86b69a91274e0cb841059b428666140..779a5872a2c5fb5f9aa9b49af6f77391aefc2336 100644 (file)
@@ -127,10 +127,14 @@ static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
 
 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 {
-       u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
+       u32 mode;
 
-       if (vcpu_mode_is_32bit(vcpu))
+       if (vcpu_mode_is_32bit(vcpu)) {
+               mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
                return mode > COMPAT_PSR_MODE_USR;
+       }
+
+       mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
 
        return mode != PSR_MODE_EL0t;
 }
index 689d4c95e12fbd0dd7c1cf7ca9521918f37aaab0..e3d67ff8798bec6f5c773ad34c5f1edc67898d8c 100644 (file)
@@ -307,7 +307,7 @@ static inline void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
 struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
 
-u64 kvm_call_hyp(void *hypfn, ...);
+u64 __kvm_call_hyp(void *hypfn, ...);
 void force_vm_exit(const cpumask_t *mask);
 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
 
@@ -328,8 +328,8 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
         * Call initialization code, and switch to the full blown
         * HYP code.
         */
-       kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
-                    hyp_stack_ptr, vector_ptr);
+       __kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
+                      hyp_stack_ptr, vector_ptr);
 }
 
 static inline void kvm_arch_hardware_disable(void) {}
@@ -343,4 +343,6 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
 
+#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)
+
 #endif /* __ARM64_KVM_HOST_H__ */
index 853953cd1f0813fd562b68b7cdddd95582f1e392..61005e7dd6cb17ea8a950d389015593b1a124f94 100644 (file)
  * VA_START - the first kernel virtual address.
  * TASK_SIZE - the maximum size of a user space task.
  * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
- * The module space lives between the addresses given by TASK_SIZE
- * and PAGE_OFFSET - it must be within 128MB of the kernel text.
  */
 #define VA_BITS                        (CONFIG_ARM64_VA_BITS)
 #define VA_START               (UL(0xffffffffffffffff) << VA_BITS)
 #define PAGE_OFFSET            (UL(0xffffffffffffffff) << (VA_BITS - 1))
-#define MODULES_END            (PAGE_OFFSET)
-#define MODULES_VADDR          (MODULES_END - SZ_64M)
-#define PCI_IO_END             (MODULES_VADDR - SZ_2M)
+#define KIMAGE_VADDR           (MODULES_END)
+#define MODULES_END            (MODULES_VADDR + MODULES_VSIZE)
+#define MODULES_VADDR          (VA_START + KASAN_SHADOW_SIZE)
+#define MODULES_VSIZE          (SZ_64M)
+#define PCI_IO_END             (PAGE_OFFSET - SZ_2M)
 #define PCI_IO_START           (PCI_IO_END - PCI_IO_SIZE)
 #define FIXADDR_TOP            (PCI_IO_START - SZ_2M)
 #define TASK_SIZE_64           (UL(1) << VA_BITS)
 
 #define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 4))
 
+/*
+ * The size of the KASAN shadow region. This should be 1/8th of the
+ * size of the entire kernel virtual address space.
+ */
+#ifdef CONFIG_KASAN
+#define KASAN_SHADOW_SIZE      (UL(1) << (VA_BITS - 3))
+#else
+#define KASAN_SHADOW_SIZE      (0)
+#endif
+
 /*
  * Physical vs virtual RAM address space conversion.  These are
  * private definitions which should NOT be used outside memory.h
  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
  */
-#define __virt_to_phys(x)      (((phys_addr_t)(x) - PAGE_OFFSET + PHYS_OFFSET))
+#define __virt_to_phys(x) ({                                           \
+       phys_addr_t __x = (phys_addr_t)(x);                             \
+       __x >= PAGE_OFFSET ? (__x - PAGE_OFFSET + PHYS_OFFSET) :        \
+                            (__x - kimage_voffset); })
+
 #define __phys_to_virt(x)      ((unsigned long)((x) - PHYS_OFFSET + PAGE_OFFSET))
+#define __phys_to_kimg(x)      ((unsigned long)((x) + kimage_voffset))
 
 /*
  * Convert a page to/from a physical address
 #define MT_S2_NORMAL           0xf
 #define MT_S2_DEVICE_nGnRE     0x1
 
+#ifdef CONFIG_ARM64_4K_PAGES
+#define IOREMAP_MAX_ORDER      (PUD_SHIFT)
+#else
+#define IOREMAP_MAX_ORDER      (PMD_SHIFT)
+#endif
+
 #ifndef __ASSEMBLY__
 
 extern phys_addr_t             memstart_addr;
 /* PHYS_OFFSET - the physical address of the start of memory. */
 #define PHYS_OFFSET            ({ memstart_addr; })
 
+/* the offset between the kernel virtual and physical mappings */
+extern u64                     kimage_voffset;
+
 /*
- * The maximum physical address that the linear direct mapping
- * of system RAM can cover. (PAGE_OFFSET can be interpreted as
- * a 2's complement signed quantity and negated to derive the
- * maximum size of the linear mapping.)
+ * Allow all memory at the discovery stage. We will clip it later.
  */
-#define MAX_MEMBLOCK_ADDR      ({ memstart_addr - PAGE_OFFSET - 1; })
+#define MIN_MEMBLOCK_ADDR      0
+#define MAX_MEMBLOCK_ADDR      U64_MAX
 
 /*
  * PFNs are used to describe any physical page; this means
index 24165784b8038b732ea568d1e74fd8c0a699b914..a00f7cf35bbd4d80ce045bfeb0cbb6bd061aeaaa 100644 (file)
@@ -27,6 +27,7 @@
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
 #include <asm/pgtable.h>
+#include <asm/tlbflush.h>
 
 #ifdef CONFIG_PID_IN_CONTEXTIDR
 static inline void contextidr_thread_switch(struct task_struct *next)
@@ -48,7 +49,7 @@ static inline void contextidr_thread_switch(struct task_struct *next)
  */
 static inline void cpu_set_reserved_ttbr0(void)
 {
-       unsigned long ttbr = page_to_phys(empty_zero_page);
+       unsigned long ttbr = virt_to_phys(empty_zero_page);
 
        asm(
        "       msr     ttbr0_el1, %0                   // set TTBR0\n"
@@ -73,7 +74,7 @@ static inline bool __cpu_uses_extended_idmap(void)
 /*
  * Set TCR.T0SZ to its default value (based on VA_BITS)
  */
-static inline void cpu_set_default_tcr_t0sz(void)
+static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
 {
        unsigned long tcr;
 
@@ -86,7 +87,62 @@ static inline void cpu_set_default_tcr_t0sz(void)
        "       msr     tcr_el1, %0     ;"
        "       isb"
        : "=&r" (tcr)
-       : "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+       : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
+}
+
+#define cpu_set_default_tcr_t0sz()     __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
+#define cpu_set_idmap_tcr_t0sz()       __cpu_set_tcr_t0sz(idmap_t0sz)
+
+/*
+ * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
+ *
+ * The idmap lives in the same VA range as userspace, but uses global entries
+ * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from
+ * speculative TLB fetches, we must temporarily install the reserved page
+ * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ.
+ *
+ * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
+ * which should not be installed in TTBR0_EL1. In this case we can leave the
+ * reserved page tables in place.
+ */
+static inline void cpu_uninstall_idmap(void)
+{
+       struct mm_struct *mm = current->active_mm;
+
+       cpu_set_reserved_ttbr0();
+       local_flush_tlb_all();
+       cpu_set_default_tcr_t0sz();
+
+       if (mm != &init_mm)
+               cpu_switch_mm(mm->pgd, mm);
+}
+
+static inline void cpu_install_idmap(void)
+{
+       cpu_set_reserved_ttbr0();
+       local_flush_tlb_all();
+       cpu_set_idmap_tcr_t0sz();
+
+       cpu_switch_mm(idmap_pg_dir, &init_mm);
+}
+
+/*
+ * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
+ * avoiding the possibility of conflicting TLB entries being allocated.
+ */
+static inline void cpu_replace_ttbr1(pgd_t *pgd)
+{
+       typedef void (ttbr_replace_func)(phys_addr_t);
+       extern ttbr_replace_func idmap_cpu_replace_ttbr1;
+       ttbr_replace_func *replace_phys;
+
+       phys_addr_t pgd_phys = virt_to_phys(pgd);
+
+       replace_phys = (void *)virt_to_phys(idmap_cpu_replace_ttbr1);
+
+       cpu_install_idmap();
+       replace_phys(pgd_phys);
+       cpu_uninstall_idmap();
 }
 
 /*
index 9b2f5a9d019df493fa6021ee3ca6b4779401d8c4..ae615b9d9a551bab47bb4900e867c8d15db02bb1 100644 (file)
@@ -39,6 +39,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/personality.h> /* for READ_IMPLIES_EXEC */
 #include <asm/pgtable-types.h>
 
 extern void __cpu_clear_user_page(void *p, unsigned long user);
index b008a72f8bc02733347c782f5b9c286cff16f7a2..f75b04e8d732be53e955c342d325481451950b0c 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/dma-mapping.h>
 
 #include <asm/io.h>
-#include <asm-generic/pci-bridge.h>
 #include <asm-generic/pci-dma-compat.h>
 
 #define PCIBIOS_MIN_IO         0x1000
index c15053902942e0a3a34ba4882545c5b6d163c23c..ff98585d085aa5737c9c17478555f22b11261f2f 100644 (file)
@@ -42,11 +42,20 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
        free_page((unsigned long)pmd);
 }
 
-static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
 {
-       set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
+       set_pud(pud, __pud(pmd | prot));
 }
 
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+       __pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
+}
+#else
+static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
+{
+       BUILD_BUG();
+}
 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
 
 #if CONFIG_PGTABLE_LEVELS > 3
@@ -62,11 +71,20 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
        free_page((unsigned long)pud);
 }
 
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
 {
-       set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
+       set_pgd(pgdp, __pgd(pud | prot));
 }
 
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+       __pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
+}
+#else
+static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
+{
+       BUILD_BUG();
+}
 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
index 2d545d7aa80ba715f65cc82b671213fa5505b495..a440f5a85d08b44894e7e398a2c35ce5eb3f4e03 100644 (file)
  *
  * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
  *     (rounded up to PUD_SIZE).
- * VMALLOC_START: beginning of the kernel VA space
+ * VMALLOC_START: beginning of the kernel vmalloc space
  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
  *     fixed mappings and modules
  */
 #define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
 
-#ifndef CONFIG_KASAN
-#define VMALLOC_START          (VA_START)
-#else
-#include <asm/kasan.h>
-#define VMALLOC_START          (KASAN_SHADOW_END + SZ_64K)
-#endif
-
+#define VMALLOC_START          (MODULES_END)
 #define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define vmemmap                        ((struct page *)(VMALLOC_END + SZ_64K))
@@ -57,6 +51,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/fixmap.h>
 #include <linux/mmdebug.h>
 
 extern void __pte_error(const char *file, int line, unsigned long val);
@@ -67,11 +62,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 #define PROT_DEFAULT           (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_SECT_DEFAULT      (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
 
-#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
 
 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_SECT_NORMAL       (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -81,7 +76,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 
 #define PAGE_KERNEL            __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_RO         __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
-#define PAGE_KERNEL_ROX        __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX                __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
 #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC_CONT  __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
@@ -121,8 +116,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
  * ZERO_PAGE is a global shared page that is always zero: used
  * for zero-mapped memory areas etc..
  */
-extern struct page *empty_zero_page;
-#define ZERO_PAGE(vaddr)       (empty_zero_page)
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr)       virt_to_page(empty_zero_page)
 
 #define pte_ERROR(pte)         __pte_error(__FILE__, __LINE__, pte_val(pte))
 
@@ -134,16 +129,6 @@ extern struct page *empty_zero_page;
 #define pte_clear(mm,addr,ptep)        set_pte(ptep, __pte(0))
 #define pte_page(pte)          (pfn_to_page(pte_pfn(pte)))
 
-/* Find an entry in the third-level page table. */
-#define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define pte_offset_kernel(dir,addr)    (pmd_page_vaddr(*(dir)) + pte_index(addr))
-
-#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
-#define pte_offset_map_nested(dir,addr)        pte_offset_kernel((dir), (addr))
-#define pte_unmap(pte)                 do { } while (0)
-#define pte_unmap_nested(pte)          do { } while (0)
-
 /*
  * The following only work if pte_present(). Undefined behaviour otherwise.
  */
@@ -153,6 +138,7 @@ extern struct page *empty_zero_page;
 #define pte_write(pte)         (!!(pte_val(pte) & PTE_WRITE))
 #define pte_exec(pte)          (!(pte_val(pte) & PTE_UXN))
 #define pte_cont(pte)          (!!(pte_val(pte) & PTE_CONT))
+#define pte_user(pte)          (!!(pte_val(pte) & PTE_USER))
 
 #ifdef CONFIG_ARM64_HW_AFDBM
 #define pte_hw_dirty(pte)      (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
@@ -163,8 +149,6 @@ extern struct page *empty_zero_page;
 #define pte_dirty(pte)         (pte_sw_dirty(pte) || pte_hw_dirty(pte))
 
 #define pte_valid(pte)         (!!(pte_val(pte) & PTE_VALID))
-#define pte_valid_user(pte) \
-       ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
 #define pte_valid_not_user(pte) \
        ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
 #define pte_valid_young(pte) \
@@ -278,13 +262,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
                              pte_t *ptep, pte_t pte)
 {
-       if (pte_valid_user(pte)) {
-               if (!pte_special(pte) && pte_exec(pte))
-                       __sync_icache_dcache(pte, addr);
+       if (pte_valid(pte)) {
                if (pte_sw_dirty(pte) && pte_write(pte))
                        pte_val(pte) &= ~PTE_RDONLY;
                else
                        pte_val(pte) |= PTE_RDONLY;
+               if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
+                       __sync_icache_dcache(pte, addr);
        }
 
        /*
@@ -433,13 +417,31 @@ static inline void pmd_clear(pmd_t *pmdp)
        set_pmd(pmdp, __pmd(0));
 }
 
-static inline pte_t *pmd_page_vaddr(pmd_t pmd)
+static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
 {
-       return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
+       return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
 }
 
+/* Find an entry in the third-level page table. */
+#define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+#define pte_offset_phys(dir,addr)      (pmd_page_paddr(*(dir)) + pte_index(addr) * sizeof(pte_t))
+#define pte_offset_kernel(dir,addr)    ((pte_t *)__va(pte_offset_phys((dir), (addr))))
+
+#define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
+#define pte_offset_map_nested(dir,addr)        pte_offset_kernel((dir), (addr))
+#define pte_unmap(pte)                 do { } while (0)
+#define pte_unmap_nested(pte)          do { } while (0)
+
+#define pte_set_fixmap(addr)           ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
+#define pte_set_fixmap_offset(pmd, addr)       pte_set_fixmap(pte_offset_phys(pmd, addr))
+#define pte_clear_fixmap()             clear_fixmap(FIX_PTE)
+
 #define pmd_page(pmd)          pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pte_offset_kimg(dir,addr)      ((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
+
 /*
  * Conversion functions: convert a page and protection to a page entry,
  * and a page entry and page directory to the page they refer to.
@@ -466,21 +468,37 @@ static inline void pud_clear(pud_t *pudp)
        set_pud(pudp, __pud(0));
 }
 
-static inline pmd_t *pud_page_vaddr(pud_t pud)
+static inline phys_addr_t pud_page_paddr(pud_t pud)
 {
-       return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
+       return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
 }
 
 /* Find an entry in the second-level page table. */
 #define pmd_index(addr)                (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
 
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
-       return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
+#define pmd_offset_phys(dir, addr)     (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
+#define pmd_offset(dir, addr)          ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
+
+#define pmd_set_fixmap(addr)           ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
+#define pmd_set_fixmap_offset(pud, addr)       pmd_set_fixmap(pmd_offset_phys(pud, addr))
+#define pmd_clear_fixmap()             clear_fixmap(FIX_PMD)
 
 #define pud_page(pud)          pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pmd_offset_kimg(dir,addr)      ((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
+
+#else
+
+#define pud_page_paddr(pud)    ({ BUILD_BUG(); 0; })
+
+/* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
+#define pmd_set_fixmap(addr)           NULL
+#define pmd_set_fixmap_offset(pudp, addr)      ((pmd_t *)pudp)
+#define pmd_clear_fixmap()
+
+#define pmd_offset_kimg(dir,addr)      ((pmd_t *)dir)
+
 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
 
 #if CONFIG_PGTABLE_LEVELS > 3
@@ -502,21 +520,37 @@ static inline void pgd_clear(pgd_t *pgdp)
        set_pgd(pgdp, __pgd(0));
 }
 
-static inline pud_t *pgd_page_vaddr(pgd_t pgd)
+static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
 {
-       return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
+       return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
 }
 
 /* Find an entry in the frst-level page table. */
 #define pud_index(addr)                (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
 
-static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
-{
-       return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
-}
+#define pud_offset_phys(dir, addr)     (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
+#define pud_offset(dir, addr)          ((pud_t *)__va(pud_offset_phys((dir), (addr))))
+
+#define pud_set_fixmap(addr)           ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
+#define pud_set_fixmap_offset(pgd, addr)       pud_set_fixmap(pud_offset_phys(pgd, addr))
+#define pud_clear_fixmap()             clear_fixmap(FIX_PUD)
 
 #define pgd_page(pgd)          pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
 
+/* use ONLY for statically allocated translation tables */
+#define pud_offset_kimg(dir,addr)      ((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
+
+#else
+
+#define pgd_page_paddr(pgd)    ({ BUILD_BUG(); 0;})
+
+/* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
+#define pud_set_fixmap(addr)           NULL
+#define pud_set_fixmap_offset(pgdp, addr)      ((pud_t *)pgdp)
+#define pud_clear_fixmap()
+
+#define pud_offset_kimg(dir,addr)      ((pud_t *)dir)
+
 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 
 #define pgd_ERROR(pgd)         __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
@@ -524,11 +558,16 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
 /* to find an entry in a page-table-directory */
 #define pgd_index(addr)                (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 
-#define pgd_offset(mm, addr)   ((mm)->pgd+pgd_index(addr))
+#define pgd_offset_raw(pgd, addr)      ((pgd) + pgd_index(addr))
+
+#define pgd_offset(mm, addr)   (pgd_offset_raw((mm)->pgd, (addr)))
 
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 
+#define pgd_set_fixmap(addr)   ((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
+#define pgd_clear_fixmap()     clear_fixmap(FIX_PGD)
+
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
index 4acb7ca94fcd9c05569f3103ab09097c19ea72d5..5bb1d763d17addb2beb0d0f462869cfc183b0595 100644 (file)
 
 #include <linux/string.h>
 
+#include <asm/alternative.h>
 #include <asm/fpsimd.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/lse.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
@@ -177,9 +179,11 @@ static inline void prefetchw(const void *ptr)
 }
 
 #define ARCH_HAS_SPINLOCK_PREFETCH
-static inline void spin_lock_prefetch(const void *x)
+static inline void spin_lock_prefetch(const void *ptr)
 {
-       prefetchw(x);
+       asm volatile(ARM64_LSE_ATOMIC_INSN(
+                    "prfm pstl1strm, %a0",
+                    "nop") : : "p" (ptr));
 }
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
index d9c3d6a6100ac5d68e9b412113daccd1e43d8371..2013a4dc5124a55c5c304306b41908f16a0e5d64 100644 (file)
@@ -64,6 +64,15 @@ extern void secondary_entry(void);
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
+#else
+static inline void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+       BUILD_BUG();
+}
+#endif
+
 extern int __cpu_disable(void);
 
 extern void __cpu_die(unsigned int cpu);
index 83cd7e68e83b2e59740b8e56268b7f0a2604aa63..8a9c65ccb6369cfcb50c5ee98f8356c8d5c369ba 100644 (file)
@@ -41,6 +41,7 @@ arm64-obj-$(CONFIG_EFI)                       += efi.o efi-entry.stub.o
 arm64-obj-$(CONFIG_PCI)                        += pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)   += armv8_deprecated.o
 arm64-obj-$(CONFIG_ACPI)               += acpi.o
+arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)        += acpi_parking_protocol.o
 arm64-obj-$(CONFIG_PARAVIRT)           += paravirt.o
 
 obj-y                                  += $(arm64-obj-y) vdso/
diff --git a/arch/arm64/kernel/acpi_parking_protocol.c b/arch/arm64/kernel/acpi_parking_protocol.c
new file mode 100644 (file)
index 0000000..4b1e5a7
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * ARM64 ACPI Parking Protocol implementation
+ *
+ * Authors: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *         Mark Salter <msalter@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/acpi.h>
+#include <linux/types.h>
+
+#include <asm/cpu_ops.h>
+
+struct cpu_mailbox_entry {
+       phys_addr_t mailbox_addr;
+       u8 version;
+       u8 gic_cpu_id;
+};
+
+static struct cpu_mailbox_entry cpu_mailbox_entries[NR_CPUS];
+
+void __init acpi_set_mailbox_entry(int cpu,
+                                  struct acpi_madt_generic_interrupt *p)
+{
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+       cpu_entry->mailbox_addr = p->parked_address;
+       cpu_entry->version = p->parking_version;
+       cpu_entry->gic_cpu_id = p->cpu_interface_number;
+}
+
+bool acpi_parking_protocol_valid(int cpu)
+{
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+
+       return cpu_entry->mailbox_addr && cpu_entry->version;
+}
+
+static int acpi_parking_protocol_cpu_init(unsigned int cpu)
+{
+       pr_debug("%s: ACPI parked addr=%llx\n", __func__,
+                 cpu_mailbox_entries[cpu].mailbox_addr);
+
+       return 0;
+}
+
+static int acpi_parking_protocol_cpu_prepare(unsigned int cpu)
+{
+       return 0;
+}
+
+struct parking_protocol_mailbox {
+       __le32 cpu_id;
+       __le32 reserved;
+       __le64 entry_point;
+};
+
+static int acpi_parking_protocol_cpu_boot(unsigned int cpu)
+{
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+       struct parking_protocol_mailbox __iomem *mailbox;
+       __le32 cpu_id;
+
+       /*
+        * Map mailbox memory with attribute device nGnRE (ie ioremap -
+        * this deviates from the parking protocol specifications since
+        * the mailboxes are required to be mapped nGnRnE; the attribute
+        * discrepancy is harmless insofar as the protocol specification
+        * is concerned).
+        * If the mailbox is mistakenly allocated in the linear mapping
+        * by FW ioremap will fail since the mapping will be prevented
+        * by the kernel (it clashes with the linear mapping attributes
+        * specifications).
+        */
+       mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
+       if (!mailbox)
+               return -EIO;
+
+       cpu_id = readl_relaxed(&mailbox->cpu_id);
+       /*
+        * Check if firmware has set-up the mailbox entry properly
+        * before kickstarting the respective cpu.
+        */
+       if (cpu_id != ~0U) {
+               iounmap(mailbox);
+               return -ENXIO;
+       }
+
+       /*
+        * We write the entry point and cpu id as LE regardless of the
+        * native endianness of the kernel. Therefore, any boot-loaders
+        * that read this address need to convert this address to the
+        * Boot-Loader's endianness before jumping.
+        */
+       writeq_relaxed(__pa(secondary_entry), &mailbox->entry_point);
+       writel_relaxed(cpu_entry->gic_cpu_id, &mailbox->cpu_id);
+
+       arch_send_wakeup_ipi_mask(cpumask_of(cpu));
+
+       iounmap(mailbox);
+
+       return 0;
+}
+
+static void acpi_parking_protocol_cpu_postboot(void)
+{
+       int cpu = smp_processor_id();
+       struct cpu_mailbox_entry *cpu_entry = &cpu_mailbox_entries[cpu];
+       struct parking_protocol_mailbox __iomem *mailbox;
+       __le64 entry_point;
+
+       /*
+        * Map mailbox memory with attribute device nGnRE (ie ioremap -
+        * this deviates from the parking protocol specifications since
+        * the mailboxes are required to be mapped nGnRnE; the attribute
+        * discrepancy is harmless insofar as the protocol specification
+        * is concerned).
+        * If the mailbox is mistakenly allocated in the linear mapping
+        * by FW ioremap will fail since the mapping will be prevented
+        * by the kernel (it clashes with the linear mapping attributes
+        * specifications).
+        */
+       mailbox = ioremap(cpu_entry->mailbox_addr, sizeof(*mailbox));
+       if (!mailbox)
+               return;
+
+       entry_point = readl_relaxed(&mailbox->entry_point);
+       /*
+        * Check if firmware has cleared the entry_point as expected
+        * by the protocol specification.
+        */
+       WARN_ON(entry_point);
+
+       iounmap(mailbox);
+}
+
+const struct cpu_operations acpi_parking_protocol_ops = {
+       .name           = "parking-protocol",
+       .cpu_init       = acpi_parking_protocol_cpu_init,
+       .cpu_prepare    = acpi_parking_protocol_cpu_prepare,
+       .cpu_boot       = acpi_parking_protocol_cpu_boot,
+       .cpu_postboot   = acpi_parking_protocol_cpu_postboot
+};
index feb6b4efa6414846d5598ccb0913a544ba0cf441..e6bc988e8dbf0f69fc4b1a48f9a7b4a89ee713f3 100644 (file)
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
 
-#define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
-#define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
-#define MIDR_THUNDERX  MIDR_CPU_PART(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
-
-#define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
-                       MIDR_ARCHITECTURE_MASK)
-
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
 {
-       u32 midr = read_cpuid_id();
-
-       if ((midr & CPU_MODEL_MASK) != entry->midr_model)
-               return false;
-
-       midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
-
-       return (midr >= entry->midr_range_min && midr <= entry->midr_range_max);
+       return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
+                                      entry->midr_range_min,
+                                      entry->midr_range_max);
 }
 
 #define MIDR_RANGE(model, min, max) \
index b6bd7d4477683393fb34dc6b055b07382e8ab050..c7cfb8fe06f94c7f5113abf0cb624980e4227127 100644 (file)
 #include <asm/smp_plat.h>
 
 extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations acpi_parking_protocol_ops;
 extern const struct cpu_operations cpu_psci_ops;
 
 const struct cpu_operations *cpu_ops[NR_CPUS];
 
-static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+static const struct cpu_operations *dt_supported_cpu_ops[] __initconst = {
        &smp_spin_table_ops,
        &cpu_psci_ops,
        NULL,
 };
 
+static const struct cpu_operations *acpi_supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+       &acpi_parking_protocol_ops,
+#endif
+       &cpu_psci_ops,
+       NULL,
+};
+
 static const struct cpu_operations * __init cpu_get_ops(const char *name)
 {
-       const struct cpu_operations **ops = supported_cpu_ops;
+       const struct cpu_operations **ops;
+
+       ops = acpi_disabled ? dt_supported_cpu_ops : acpi_supported_cpu_ops;
 
        while (*ops) {
                if (!strcmp(name, (*ops)->name))
@@ -75,8 +86,16 @@ static const char *__init cpu_read_enable_method(int cpu)
                }
        } else {
                enable_method = acpi_get_enable_method(cpu);
-               if (!enable_method)
-                       pr_err("Unsupported ACPI enable-method\n");
+               if (!enable_method) {
+                       /*
+                        * In ACPI systems the boot CPU does not require
+                        * checking the enable method since for some
+                        * boot protocol (ie parking protocol) it need not
+                        * be initialized. Don't warn spuriously.
+                        */
+                       if (cpu != 0)
+                               pr_err("Unsupported ACPI enable-method\n");
+               }
        }
 
        return enable_method;
index 5c90aa490a2bee2368ae45bba2628603afe1c659..3615d7d7c9af65520c5b835d4e58d130ae6c061e 100644 (file)
@@ -621,6 +621,18 @@ static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry)
        return has_sre;
 }
 
+static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
+{
+       u32 midr = read_cpuid_id();
+       u32 rv_min, rv_max;
+
+       /* Cavium ThunderX pass 1.x and 2.x */
+       rv_min = 0;
+       rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
+
+       return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -651,6 +663,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .min_field_value = 2,
        },
 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+       {
+               .desc = "Software prefetching using PRFM",
+               .capability = ARM64_HAS_NO_HW_PREFETCH,
+               .matches = has_no_hw_prefetch,
+       },
        {},
 };
 
index 8aee3aeec3e687edde6f5be67233299e7a4f7d4f..c1492ba1f6d14e71c263fa26904fab9980439b4f 100644 (file)
@@ -186,20 +186,21 @@ static void clear_regs_spsr_ss(struct pt_regs *regs)
 
 /* EL1 Single Step Handler hooks */
 static LIST_HEAD(step_hook);
-static DEFINE_RWLOCK(step_hook_lock);
+static DEFINE_SPINLOCK(step_hook_lock);
 
 void register_step_hook(struct step_hook *hook)
 {
-       write_lock(&step_hook_lock);
-       list_add(&hook->node, &step_hook);
-       write_unlock(&step_hook_lock);
+       spin_lock(&step_hook_lock);
+       list_add_rcu(&hook->node, &step_hook);
+       spin_unlock(&step_hook_lock);
 }
 
 void unregister_step_hook(struct step_hook *hook)
 {
-       write_lock(&step_hook_lock);
-       list_del(&hook->node);
-       write_unlock(&step_hook_lock);
+       spin_lock(&step_hook_lock);
+       list_del_rcu(&hook->node);
+       spin_unlock(&step_hook_lock);
+       synchronize_rcu();
 }
 
 /*
@@ -213,15 +214,15 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
        struct step_hook *hook;
        int retval = DBG_HOOK_ERROR;
 
-       read_lock(&step_hook_lock);
+       rcu_read_lock();
 
-       list_for_each_entry(hook, &step_hook, node)     {
+       list_for_each_entry_rcu(hook, &step_hook, node) {
                retval = hook->fn(regs, esr);
                if (retval == DBG_HOOK_HANDLED)
                        break;
        }
 
-       read_unlock(&step_hook_lock);
+       rcu_read_unlock();
 
        return retval;
 }
index ffe9c2b6431bd5c0cb1e9982afe84e13a82e2043..05b98289093e79c2651559f309a56b765290c6c4 100644 (file)
@@ -389,7 +389,7 @@ __create_page_tables:
         * Map the kernel image (starting with PHYS_OFFSET).
         */
        mov     x0, x26                         // swapper_pg_dir
-       mov     x5, #PAGE_OFFSET
+       ldr     x5, =KIMAGE_VADDR
        create_pgd_entry x0, x5, x3, x6
        ldr     x6, =KERNEL_END                 // __va(KERNEL_END)
        mov     x3, x24                         // phys offset
@@ -421,13 +421,18 @@ __mmap_switched:
        adr_l   x2, __bss_stop
        sub     x2, x2, x0
        bl      __pi_memset
+       dsb     ishst                           // Make zero page visible to PTW
 
        adr_l   sp, initial_sp, x4
        mov     x4, sp
        and     x4, x4, #~(THREAD_SIZE - 1)
        msr     sp_el0, x4                      // Save thread_info
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
-       str_l   x24, memstart_addr, x6          // Save PHYS_OFFSET
+
+       ldr     x4, =KIMAGE_VADDR               // Save the offset between
+       sub     x4, x4, x24                     // the kernel virtual and
+       str_l   x4, kimage_voffset, x5          // physical mappings
+
        mov     x29, #0
 #ifdef CONFIG_KASAN
        bl      kasan_early_init
@@ -514,9 +519,14 @@ CPU_LE(    movk    x0, #0x30d0, lsl #16    )       // Clear EE and E0E on LE systems
 #endif
 
        /* EL2 debug */
+       mrs     x0, id_aa64dfr0_el1             // Check ID_AA64DFR0_EL1 PMUVer
+       sbfx    x0, x0, #8, #4
+       cmp     x0, #1
+       b.lt    4f                              // Skip if no PMU present
        mrs     x0, pmcr_el0                    // Disable debug access traps
        ubfx    x0, x0, #11, #5                 // to EL2 and allow access to
        msr     mdcr_el2, x0                    // all PMU counters from EL1
+4:
 
        /* Stage-2 translation */
        msr     vttbr_el2, xzr
index bc2abb8b1599576ae2dec02bce0c46c48fc707dd..c9c62cab25a4a6bd62370149033a0074122d66f8 100644 (file)
 #endif
 
 #ifdef CONFIG_CPU_BIG_ENDIAN
-#define __HEAD_FLAG_BE 1
+#define __HEAD_FLAG_BE         1
 #else
-#define __HEAD_FLAG_BE 0
+#define __HEAD_FLAG_BE         0
 #endif
 
-#define __HEAD_FLAG_PAGE_SIZE ((PAGE_SHIFT - 10) / 2)
+#define __HEAD_FLAG_PAGE_SIZE  ((PAGE_SHIFT - 10) / 2)
 
-#define __HEAD_FLAGS   ((__HEAD_FLAG_BE << 0) |        \
-                        (__HEAD_FLAG_PAGE_SIZE << 1))
+#define __HEAD_FLAG_PHYS_BASE  1
+
+#define __HEAD_FLAGS           ((__HEAD_FLAG_BE << 0) |        \
+                                (__HEAD_FLAG_PAGE_SIZE << 1) | \
+                                (__HEAD_FLAG_PHYS_BASE << 3))
 
 /*
  * These will output as part of the Image header, which should be little-endian
 
 #ifdef CONFIG_EFI
 
+/*
+ * Prevent the symbol aliases below from being emitted into the kallsyms
+ * table, by forcing them to be absolute symbols (which are conveniently
+ * ignored by scripts/kallsyms) rather than section relative symbols.
+ * The distinction is only relevant for partial linking, and only for symbols
+ * that are defined within a section declaration (which is not the case for
+ * the definitions below) so the resulting values will be identical.
+ */
+#define KALLSYMS_HIDE(sym)     ABSOLUTE(sym)
+
 /*
  * The EFI stub has its own symbol namespace prefixed by __efistub_, to
  * isolate it from the kernel proper. The following symbols are legally
  * linked at. The routines below are all implemented in assembler in a
  * position independent manner
  */
-__efistub_memcmp               = __pi_memcmp;
-__efistub_memchr               = __pi_memchr;
-__efistub_memcpy               = __pi_memcpy;
-__efistub_memmove              = __pi_memmove;
-__efistub_memset               = __pi_memset;
-__efistub_strlen               = __pi_strlen;
-__efistub_strcmp               = __pi_strcmp;
-__efistub_strncmp              = __pi_strncmp;
-__efistub___flush_dcache_area  = __pi___flush_dcache_area;
+__efistub_memcmp               = KALLSYMS_HIDE(__pi_memcmp);
+__efistub_memchr               = KALLSYMS_HIDE(__pi_memchr);
+__efistub_memcpy               = KALLSYMS_HIDE(__pi_memcpy);
+__efistub_memmove              = KALLSYMS_HIDE(__pi_memmove);
+__efistub_memset               = KALLSYMS_HIDE(__pi_memset);
+__efistub_strlen               = KALLSYMS_HIDE(__pi_strlen);
+__efistub_strcmp               = KALLSYMS_HIDE(__pi_strcmp);
+__efistub_strncmp              = KALLSYMS_HIDE(__pi_strncmp);
+__efistub___flush_dcache_area  = KALLSYMS_HIDE(__pi___flush_dcache_area);
 
 #ifdef CONFIG_KASAN
-__efistub___memcpy             = __pi_memcpy;
-__efistub___memmove            = __pi_memmove;
-__efistub___memset             = __pi_memset;
+__efistub___memcpy             = KALLSYMS_HIDE(__pi_memcpy);
+__efistub___memmove            = KALLSYMS_HIDE(__pi_memmove);
+__efistub___memset             = KALLSYMS_HIDE(__pi_memset);
 #endif
 
-__efistub__text                        = _text;
-__efistub__end                 = _end;
-__efistub__edata               = _edata;
+__efistub__text                        = KALLSYMS_HIDE(_text);
+__efistub__end                 = KALLSYMS_HIDE(_end);
+__efistub__edata               = KALLSYMS_HIDE(_edata);
 
 #endif
 
index b3d098bd34aa3d2a57c3c9b2b3c7b63a6001be9d..c72de668e1d4d0fa549ef3b9f79d3c696ad40c07 100644 (file)
@@ -19,8 +19,6 @@
 #include <linux/of_platform.h>
 #include <linux/slab.h>
 
-#include <asm/pci-bridge.h>
-
 /*
  * Called after each bus is probed, but before its children are examined
  */
index f67f35b6edb12e4d34e1db17750b07a0bec72e39..42816bebb1e0f732d788780fde431028f202c31a 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/smp.h>
 #include <linux/delay.h>
 #include <linux/psci.h>
-#include <linux/slab.h>
 
 #include <uapi/linux/psci.h>
 
 #include <asm/cpu_ops.h>
 #include <asm/errno.h>
 #include <asm/smp_plat.h>
-#include <asm/suspend.h>
-
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
-
-static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
-{
-       int i, ret, count = 0;
-       u32 *psci_states;
-       struct device_node *state_node, *cpu_node;
-
-       cpu_node = of_get_cpu_node(cpu, NULL);
-       if (!cpu_node)
-               return -ENODEV;
-
-       /*
-        * If the PSCI cpu_suspend function hook has not been initialized
-        * idle states must not be enabled, so bail out
-        */
-       if (!psci_ops.cpu_suspend)
-               return -EOPNOTSUPP;
-
-       /* Count idle states */
-       while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
-                                             count))) {
-               count++;
-               of_node_put(state_node);
-       }
-
-       if (!count)
-               return -ENODEV;
-
-       psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
-       if (!psci_states)
-               return -ENOMEM;
-
-       for (i = 0; i < count; i++) {
-               u32 state;
-
-               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
-
-               ret = of_property_read_u32(state_node,
-                                          "arm,psci-suspend-param",
-                                          &state);
-               if (ret) {
-                       pr_warn(" * %s missing arm,psci-suspend-param property\n",
-                               state_node->full_name);
-                       of_node_put(state_node);
-                       goto free_mem;
-               }
-
-               of_node_put(state_node);
-               pr_debug("psci-power-state %#x index %d\n", state, i);
-               if (!psci_power_state_is_valid(state)) {
-                       pr_warn("Invalid PSCI power state %#x\n", state);
-                       ret = -EINVAL;
-                       goto free_mem;
-               }
-               psci_states[i] = state;
-       }
-       /* Idle states parsed correctly, initialize per-cpu pointer */
-       per_cpu(psci_power_state, cpu) = psci_states;
-       return 0;
-
-free_mem:
-       kfree(psci_states);
-       return ret;
-}
 
 static int __init cpu_psci_cpu_init(unsigned int cpu)
 {
@@ -178,38 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
 }
 #endif
 
-static int psci_suspend_finisher(unsigned long index)
-{
-       u32 *state = __this_cpu_read(psci_power_state);
-
-       return psci_ops.cpu_suspend(state[index - 1],
-                                   virt_to_phys(cpu_resume));
-}
-
-static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
-{
-       int ret;
-       u32 *state = __this_cpu_read(psci_power_state);
-       /*
-        * idle state index 0 corresponds to wfi, should never be called
-        * from the cpu_suspend operations
-        */
-       if (WARN_ON_ONCE(!index))
-               return -EINVAL;
-
-       if (!psci_power_state_loses_context(state[index - 1]))
-               ret = psci_ops.cpu_suspend(state[index - 1], 0);
-       else
-               ret = cpu_suspend(index, psci_suspend_finisher);
-
-       return ret;
-}
-
 const struct cpu_operations cpu_psci_ops = {
        .name           = "psci",
 #ifdef CONFIG_CPU_IDLE
-       .cpu_init_idle  = cpu_psci_cpu_init_idle,
-       .cpu_suspend    = cpu_psci_cpu_suspend,
+       .cpu_init_idle  = psci_cpu_init_idle,
+       .cpu_suspend    = psci_cpu_suspend_enter,
 #endif
        .cpu_init       = cpu_psci_cpu_init,
        .cpu_prepare    = cpu_psci_cpu_prepare,
index 8119479147db147c33800f76aa0d07c6072e8559..28c0f90c9c10136aa560cde98e873c9ae53a7ca1 100644 (file)
@@ -62,6 +62,7 @@
 #include <asm/memblock.h>
 #include <asm/efi.h>
 #include <asm/xen/hypervisor.h>
+#include <asm/mmu_context.h>
 
 phys_addr_t __fdt_pointer __initdata;
 
@@ -73,13 +74,13 @@ static struct resource mem_res[] = {
                .name = "Kernel code",
                .start = 0,
                .end = 0,
-               .flags = IORESOURCE_MEM
+               .flags = IORESOURCE_SYSTEM_RAM
        },
        {
                .name = "Kernel data",
                .start = 0,
                .end = 0,
-               .flags = IORESOURCE_MEM
+               .flags = IORESOURCE_SYSTEM_RAM
        }
 };
 
@@ -210,7 +211,7 @@ static void __init request_standard_resources(void)
                res->name  = "System RAM";
                res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
                res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
-               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
                request_resource(&iomem_resource, res);
 
@@ -313,6 +314,12 @@ void __init setup_arch(char **cmdline_p)
         */
        local_async_enable();
 
+       /*
+        * TTBR0 is only used for the identity mapping at this stage. Make it
+        * point to zero page to avoid speculatively fetching new entries.
+        */
+       cpu_uninstall_idmap();
+
        efi_init();
        arm64_memblock_init();
 
index b1adc51b2c2e7682212554ba8276b5e7c25fbff5..24cb4f800033bc2b9d5ad49144f915ca4506e6dc 100644 (file)
@@ -70,6 +70,7 @@ enum ipi_msg_type {
        IPI_CPU_STOP,
        IPI_TIMER,
        IPI_IRQ_WORK,
+       IPI_WAKEUP
 };
 
 /*
@@ -149,9 +150,7 @@ asmlinkage void secondary_start_kernel(void)
         * TTBR0 is only used for the identity mapping at this stage. Make it
         * point to zero page to avoid speculatively fetching new entries.
         */
-       cpu_set_reserved_ttbr0();
-       local_flush_tlb_all();
-       cpu_set_default_tcr_t0sz();
+       cpu_uninstall_idmap();
 
        preempt_disable();
        trace_hardirqs_off();
@@ -445,6 +444,17 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
        /* map the logical cpu id to cpu MPIDR */
        cpu_logical_map(cpu_count) = hwid;
 
+       /*
+        * Set-up the ACPI parking protocol cpu entries
+        * while initializing the cpu_logical_map to
+        * avoid parsing MADT entries multiple times for
+        * nothing (ie a valid cpu_logical_map entry should
+        * contain a valid parking protocol data set to
+        * initialize the cpu if the parking protocol is
+        * the only available enable method).
+        */
+       acpi_set_mailbox_entry(cpu_count, processor);
+
        cpu_count++;
 }
 
@@ -627,6 +637,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
        S(IPI_CPU_STOP, "CPU stop interrupts"),
        S(IPI_TIMER, "Timer broadcast interrupts"),
        S(IPI_IRQ_WORK, "IRQ work interrupts"),
+       S(IPI_WAKEUP, "CPU wake-up interrupts"),
 };
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
@@ -670,6 +681,13 @@ void arch_send_call_function_single_ipi(int cpu)
        smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+void arch_send_wakeup_ipi_mask(const struct cpumask *mask)
+{
+       smp_cross_call(mask, IPI_WAKEUP);
+}
+#endif
+
 #ifdef CONFIG_IRQ_WORK
 void arch_irq_work_raise(void)
 {
@@ -747,6 +765,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                break;
 #endif
 
+#ifdef CONFIG_ARM64_ACPI_PARKING_PROTOCOL
+       case IPI_WAKEUP:
+               WARN_ONCE(!acpi_parking_protocol_valid(cpu),
+                         "CPU%u: Wake-up IPI outside the ACPI parking protocol\n",
+                         cpu);
+               break;
+#endif
+
        default:
                pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
                break;
index 1095aa483a1c28e5387b23895c14d7a1746268a3..66055392f445ef47a7fb3749ca6024df1ea185c9 100644 (file)
@@ -60,7 +60,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
  */
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
-       struct mm_struct *mm = current->active_mm;
        int ret;
        unsigned long flags;
 
@@ -87,22 +86,11 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        ret = __cpu_suspend_enter(arg, fn);
        if (ret == 0) {
                /*
-                * We are resuming from reset with TTBR0_EL1 set to the
-                * idmap to enable the MMU; set the TTBR0 to the reserved
-                * page tables to prevent speculative TLB allocations, flush
-                * the local tlb and set the default tcr_el1.t0sz so that
-                * the TTBR0 address space set-up is properly restored.
-                * If the current active_mm != &init_mm we entered cpu_suspend
-                * with mappings in TTBR0 that must be restored, so we switch
-                * them back to complete the address space configuration
-                * restoration before returning.
+                * We are resuming from reset with the idmap active in TTBR0_EL1.
+                * We must uninstall the idmap and restore the expected MMU
+                * state before we can possibly return to userspace.
                 */
-               cpu_set_reserved_ttbr0();
-               local_flush_tlb_all();
-               cpu_set_default_tcr_t0sz();
-
-               if (mm != &init_mm)
-                       cpu_switch_mm(mm->pgd, mm);
+               cpu_uninstall_idmap();
 
                /*
                 * Restore per-cpu offset before any kernel
index e3928f578891fdd0a5d3535975d350b6489f8df1..282e3e64a17e424f1806433d7b2648db34600837 100644 (file)
@@ -89,13 +89,13 @@ SECTIONS
                *(.discard.*)
        }
 
-       . = PAGE_OFFSET + TEXT_OFFSET;
+       . = KIMAGE_VADDR + TEXT_OFFSET;
 
        .head.text : {
                _text = .;
                HEAD_TEXT
        }
-       ALIGN_DEBUG_RO
+       ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
        .text : {                       /* Real text segment            */
                _stext = .;             /* Text and read-only data      */
                        __exception_text_start = .;
@@ -116,10 +116,9 @@ SECTIONS
        RO_DATA(PAGE_SIZE)
        EXCEPTION_TABLE(8)
        NOTES
-       ALIGN_DEBUG_RO
-       _etext = .;                     /* End of text and rodata section */
 
        ALIGN_DEBUG_RO_MIN(PAGE_SIZE)
+       _etext = .;                     /* End of text and rodata section */
        __init_begin = .;
 
        INIT_TEXT_SECTION(8)
@@ -187,4 +186,4 @@ ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
  */
-ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
+ASSERT(_text == (KIMAGE_VADDR + TEXT_OFFSET), "HEAD is misaligned")
index 0ccdcbbef3c20ce0b4afa9874eb128cf55beb683..870578f84b1ca8940d47a9ea2a90b1600a1148e1 100644 (file)
@@ -20,7 +20,7 @@
 #include <asm/assembler.h>
 
 /*
- * u64 kvm_call_hyp(void *hypfn, ...);
+ * u64 __kvm_call_hyp(void *hypfn, ...);
  *
  * This is not really a variadic function in the classic C-way and care must
  * be taken when calling this to ensure parameters are passed in registers
@@ -37,7 +37,7 @@
  * used to implement __hyp_get_vectors in the same way as in
  * arch/arm64/kernel/hyp_stub.S.
  */
-ENTRY(kvm_call_hyp)
+ENTRY(__kvm_call_hyp)
        hvc     #0
        ret
-ENDPROC(kvm_call_hyp)
+ENDPROC(__kvm_call_hyp)
index ca8f5a5e2f965748fca28ced482c9c195270e5ab..f0e7bdfae134a727ec7c0ac76466020fdb65e0fb 100644 (file)
@@ -36,7 +36,11 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
        write_sysreg(val, hcr_el2);
        /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
        write_sysreg(1 << 15, hstr_el2);
-       write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
+
+       val = CPTR_EL2_DEFAULT;
+       val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+       write_sysreg(val, cptr_el2);
+
        write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
 }
 
@@ -45,7 +49,7 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
        write_sysreg(HCR_RW, hcr_el2);
        write_sysreg(0, hstr_el2);
        write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
-       write_sysreg(0, cptr_el2);
+       write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
 }
 
 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
index 648112e90ed546d2d052ccf7d9f66866d2390d06..4d1ac81870d27e6f272abde088e0f5e8290c80d1 100644 (file)
 
 #define PSTATE_FAULT_BITS_64   (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
                                 PSR_I_BIT | PSR_D_BIT)
-#define EL1_EXCEPT_SYNC_OFFSET 0x200
+
+#define CURRENT_EL_SP_EL0_VECTOR       0x0
+#define CURRENT_EL_SP_ELx_VECTOR       0x200
+#define LOWER_EL_AArch64_VECTOR                0x400
+#define LOWER_EL_AArch32_VECTOR                0x600
 
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 {
@@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
                *fsr = 0x14;
 }
 
+enum exception_type {
+       except_type_sync        = 0,
+       except_type_irq         = 0x80,
+       except_type_fiq         = 0x100,
+       except_type_serror      = 0x180,
+};
+
+static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
+{
+       u64 exc_offset;
+
+       switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
+       case PSR_MODE_EL1t:
+               exc_offset = CURRENT_EL_SP_EL0_VECTOR;
+               break;
+       case PSR_MODE_EL1h:
+               exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+               break;
+       case PSR_MODE_EL0t:
+               exc_offset = LOWER_EL_AArch64_VECTOR;
+               break;
+       default:
+               exc_offset = LOWER_EL_AArch32_VECTOR;
+       }
+
+       return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
+}
+
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 {
        unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
        *vcpu_spsr(vcpu) = cpsr;
        *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+       *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
        *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-       *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
        vcpu_sys_reg(vcpu, FAR_EL1) = addr;
 
@@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
        *vcpu_spsr(vcpu) = cpsr;
        *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+       *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
        *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-       *vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
        /*
         * Build an unknown exception, depending on the instruction
index eec3598b4184077b83b5a1f24321891cb110f5bb..2e90371cfb378b0e064667506a2b74c9275cacfb 100644 (file)
@@ -1007,10 +1007,9 @@ static int emulate_cp(struct kvm_vcpu *vcpu,
                if (likely(r->access(vcpu, params, r))) {
                        /* Skip instruction, since it was emulated */
                        kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+                       /* Handled */
+                       return 0;
                }
-
-               /* Handled */
-               return 0;
        }
 
        /* Not handled */
@@ -1043,7 +1042,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
 }
 
 /**
- * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP15 access
+ * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
  * @vcpu: The VCPU pointer
  * @run:  The kvm_run struct
  */
@@ -1095,7 +1094,7 @@ out:
 }
 
 /**
- * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access
+ * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
  * @vcpu: The VCPU pointer
  * @run:  The kvm_run struct
  */
index 512b9a7b980e98bbed9a699107e936e4b1913dca..4c1e700840b6ced5a0b2f868bfb4f37dddc8abc0 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/const.h>
 #include <asm/assembler.h>
 #include <asm/page.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative.h>
 
 /*
  * Copy a page from src to dest (both are page aligned)
  *     x1 - src
  */
 ENTRY(copy_page)
-       /* Assume cache line size is 64 bytes. */
-       prfm    pldl1strm, [x1, #64]
-1:     ldp     x2, x3, [x1]
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+       nop
+       nop
+alternative_else
+       # Prefetch two cache lines ahead.
+       prfm    pldl1strm, [x1, #128]
+       prfm    pldl1strm, [x1, #256]
+alternative_endif
+
+       ldp     x2, x3, [x1]
        ldp     x4, x5, [x1, #16]
        ldp     x6, x7, [x1, #32]
        ldp     x8, x9, [x1, #48]
-       add     x1, x1, #64
-       prfm    pldl1strm, [x1, #64]
+       ldp     x10, x11, [x1, #64]
+       ldp     x12, x13, [x1, #80]
+       ldp     x14, x15, [x1, #96]
+       ldp     x16, x17, [x1, #112]
+
+       mov     x18, #(PAGE_SIZE - 128)
+       add     x1, x1, #128
+1:
+       subs    x18, x18, #128
+
+alternative_if_not ARM64_HAS_NO_HW_PREFETCH
+       nop
+alternative_else
+       prfm    pldl1strm, [x1, #384]
+alternative_endif
+
        stnp    x2, x3, [x0]
+       ldp     x2, x3, [x1]
        stnp    x4, x5, [x0, #16]
+       ldp     x4, x5, [x1, #16]
        stnp    x6, x7, [x0, #32]
+       ldp     x6, x7, [x1, #32]
        stnp    x8, x9, [x0, #48]
-       add     x0, x0, #64
-       tst     x1, #(PAGE_SIZE - 1)
-       b.ne    1b
+       ldp     x8, x9, [x1, #48]
+       stnp    x10, x11, [x0, #64]
+       ldp     x10, x11, [x1, #64]
+       stnp    x12, x13, [x0, #80]
+       ldp     x12, x13, [x1, #80]
+       stnp    x14, x15, [x0, #96]
+       ldp     x14, x15, [x1, #96]
+       stnp    x16, x17, [x0, #112]
+       ldp     x16, x17, [x1, #112]
+
+       add     x0, x0, #128
+       add     x1, x1, #128
+
+       b.gt    1b
+
+       stnp    x2, x3, [x0]
+       stnp    x4, x5, [x0, #16]
+       stnp    x6, x7, [x0, #32]
+       stnp    x8, x9, [x0, #48]
+       stnp    x10, x11, [x0, #64]
+       stnp    x12, x13, [x0, #80]
+       stnp    x14, x15, [x0, #96]
+       stnp    x16, x17, [x0, #112]
+
        ret
 ENDPROC(copy_page)
index 5a22a119a74c87b4b5b54e114701b3c6eed233e6..6be918478f855021fee88a50fefe1b6642252c81 100644 (file)
@@ -35,7 +35,9 @@ struct addr_marker {
 };
 
 enum address_markers_idx {
-       VMALLOC_START_NR = 0,
+       MODULES_START_NR = 0,
+       MODULES_END_NR,
+       VMALLOC_START_NR,
        VMALLOC_END_NR,
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
        VMEMMAP_START_NR,
@@ -45,12 +47,12 @@ enum address_markers_idx {
        FIXADDR_END_NR,
        PCI_START_NR,
        PCI_END_NR,
-       MODULES_START_NR,
-       MODUELS_END_NR,
        KERNEL_SPACE_NR,
 };
 
 static struct addr_marker address_markers[] = {
+       { MODULES_VADDR,        "Modules start" },
+       { MODULES_END,          "Modules end" },
        { VMALLOC_START,        "vmalloc() Area" },
        { VMALLOC_END,          "vmalloc() End" },
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
@@ -61,9 +63,7 @@ static struct addr_marker address_markers[] = {
        { FIXADDR_TOP,          "Fixmap end" },
        { PCI_IO_START,         "PCI I/O start" },
        { PCI_IO_END,           "PCI I/O end" },
-       { MODULES_VADDR,        "Modules start" },
-       { MODULES_END,          "Modules end" },
-       { PAGE_OFFSET,          "Kernel Mapping" },
+       { PAGE_OFFSET,          "Linear Mapping" },
        { -1,                   NULL },
 };
 
@@ -90,6 +90,11 @@ struct prot_bits {
 
 static const struct prot_bits pte_bits[] = {
        {
+               .mask   = PTE_VALID,
+               .val    = PTE_VALID,
+               .set    = " ",
+               .clear  = "F",
+       }, {
                .mask   = PTE_USER,
                .val    = PTE_USER,
                .set    = "USR",
index f3b061e67bfe0f4565d5df29322eeaef38a9bcc3..a1df35c85395e2de76b245abf8de0dac6329a823 100644 (file)
 #include <linux/efi.h>
 #include <linux/swiotlb.h>
 
+#include <asm/boot.h>
 #include <asm/fixmap.h>
+#include <asm/kasan.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
@@ -159,7 +162,33 @@ early_param("mem", early_mem);
 
 void __init arm64_memblock_init(void)
 {
-       memblock_enforce_memory_limit(memory_limit);
+       const s64 linear_region_size = -(s64)PAGE_OFFSET;
+
+       /*
+        * Select a suitable value for the base of physical memory.
+        */
+       memstart_addr = round_down(memblock_start_of_DRAM(),
+                                  ARM64_MEMSTART_ALIGN);
+
+       /*
+        * Remove the memory that we will not be able to cover with the
+        * linear mapping. Take care not to clip the kernel which may be
+        * high in memory.
+        */
+       memblock_remove(max(memstart_addr + linear_region_size, __pa(_end)),
+                       ULLONG_MAX);
+       if (memblock_end_of_DRAM() > linear_region_size)
+               memblock_remove(0, memblock_end_of_DRAM() - linear_region_size);
+
+       /*
+        * Apply the memory limit if it was set. Since the kernel may be loaded
+        * high up in memory, add back the kernel region that must be accessible
+        * via the linear mapping.
+        */
+       if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+               memblock_enforce_memory_limit(memory_limit);
+               memblock_add(__pa(_text), (u64)(_end - _text));
+       }
 
        /*
         * Register the kernel text, kernel data, initrd, and initial
@@ -302,22 +331,26 @@ void __init mem_init(void)
 #ifdef CONFIG_KASAN
                  "    kasan   : 0x%16lx - 0x%16lx   (%6ld GB)\n"
 #endif
+                 "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
                  "    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n"
+                 "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+                 "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+                 "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n"
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  "    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
                  "              0x%16lx - 0x%16lx   (%6ld MB actual)\n"
 #endif
                  "    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n"
                  "    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-                 "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
-                 "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+                 "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n",
 #ifdef CONFIG_KASAN
                  MLG(KASAN_SHADOW_START, KASAN_SHADOW_END),
 #endif
+                 MLM(MODULES_VADDR, MODULES_END),
                  MLG(VMALLOC_START, VMALLOC_END),
+                 MLK_ROUNDUP(__init_begin, __init_end),
+                 MLK_ROUNDUP(_text, _etext),
+                 MLK_ROUNDUP(_sdata, _edata),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
                  MLG((unsigned long)vmemmap,
                      (unsigned long)vmemmap + VMEMMAP_SIZE),
@@ -326,11 +359,7 @@ void __init mem_init(void)
 #endif
                  MLK(FIXADDR_START, FIXADDR_TOP),
                  MLM(PCI_IO_START, PCI_IO_END),
-                 MLM(MODULES_VADDR, MODULES_END),
-                 MLM(PAGE_OFFSET, (unsigned long)high_memory),
-                 MLK_ROUNDUP(__init_begin, __init_end),
-                 MLK_ROUNDUP(_text, _etext),
-                 MLK_ROUNDUP(_sdata, _edata));
+                 MLM(PAGE_OFFSET, (unsigned long)high_memory));
 
 #undef MLK
 #undef MLM
@@ -358,8 +387,8 @@ void __init mem_init(void)
 
 void free_initmem(void)
 {
-       fixup_init();
        free_initmem_default(0);
+       fixup_init();
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -380,3 +409,28 @@ static int __init keepinitrd_setup(char *__unused)
 
 __setup("keepinitrd", keepinitrd_setup);
 #endif
+
+/*
+ * Dump out memory limit information on panic.
+ */
+static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
+{
+       if (memory_limit != (phys_addr_t)ULLONG_MAX) {
+               pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
+       } else {
+               pr_emerg("Memory Limit: none\n");
+       }
+       return 0;
+}
+
+static struct notifier_block mem_limit_notifier = {
+       .notifier_call = dump_mem_limit,
+};
+
+static int __init register_mem_limit_dumper(void)
+{
+       atomic_notifier_chain_register(&panic_notifier_list,
+                                      &mem_limit_notifier);
+       return 0;
+}
+__initcall(register_mem_limit_dumper);
index cf038c7d9fa994c7d86e05920ffa8961aecc4ad8..66c246871d2e360f36748375d1a0b9e59cd3024e 100644 (file)
 #include <linux/memblock.h>
 #include <linux/start_kernel.h>
 
+#include <asm/mmu_context.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
+#include <asm/sections.h>
 #include <asm/tlbflush.h>
 
 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
@@ -32,7 +35,7 @@ static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
        if (pmd_none(*pmd))
                pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
 
-       pte = pte_offset_kernel(pmd, addr);
+       pte = pte_offset_kimg(pmd, addr);
        do {
                next = addr + PAGE_SIZE;
                set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
@@ -50,7 +53,7 @@ static void __init kasan_early_pmd_populate(pud_t *pud,
        if (pud_none(*pud))
                pud_populate(&init_mm, pud, kasan_zero_pmd);
 
-       pmd = pmd_offset(pud, addr);
+       pmd = pmd_offset_kimg(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
                kasan_early_pte_populate(pmd, addr, next);
@@ -67,7 +70,7 @@ static void __init kasan_early_pud_populate(pgd_t *pgd,
        if (pgd_none(*pgd))
                pgd_populate(&init_mm, pgd, kasan_zero_pud);
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_offset_kimg(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
                kasan_early_pmd_populate(pud, addr, next);
@@ -96,6 +99,21 @@ asmlinkage void __init kasan_early_init(void)
        kasan_map_early_shadow();
 }
 
+/*
+ * Copy the current shadow region into a new pgdir.
+ */
+void __init kasan_copy_shadow(pgd_t *pgdir)
+{
+       pgd_t *pgd, *pgd_new, *pgd_end;
+
+       pgd = pgd_offset_k(KASAN_SHADOW_START);
+       pgd_end = pgd_offset_k(KASAN_SHADOW_END);
+       pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
+       do {
+               set_pgd(pgd_new, *pgd);
+       } while (pgd++, pgd_new++, pgd != pgd_end);
+}
+
 static void __init clear_pgds(unsigned long start,
                        unsigned long end)
 {
@@ -108,18 +126,14 @@ static void __init clear_pgds(unsigned long start,
                set_pgd(pgd_offset_k(start), __pgd(0));
 }
 
-static void __init cpu_set_ttbr1(unsigned long ttbr1)
-{
-       asm(
-       "       msr     ttbr1_el1, %0\n"
-       "       isb"
-       :
-       : "r" (ttbr1));
-}
-
 void __init kasan_init(void)
 {
+       u64 kimg_shadow_start, kimg_shadow_end;
        struct memblock_region *reg;
+       int i;
+
+       kimg_shadow_start = (u64)kasan_mem_to_shadow(_text);
+       kimg_shadow_end = (u64)kasan_mem_to_shadow(_end);
 
        /*
         * We are going to perform proper setup of shadow memory.
@@ -129,13 +143,30 @@ void __init kasan_init(void)
         * setup will be finished.
         */
        memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
-       cpu_set_ttbr1(__pa(tmp_pg_dir));
-       flush_tlb_all();
+       dsb(ishst);
+       cpu_replace_ttbr1(tmp_pg_dir);
 
        clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
 
+       vmemmap_populate(kimg_shadow_start, kimg_shadow_end, NUMA_NO_NODE);
+
+       /*
+        * vmemmap_populate() has populated the shadow region that covers the
+        * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
+        * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
+        * kasan_populate_zero_shadow() from replacing the PMD block mappings
+        * with PMD table mappings at the edges of the shadow region for the
+        * kernel image.
+        */
+       if (ARM64_SWAPPER_USES_SECTION_MAPS) {
+               kimg_shadow_start = round_down(kimg_shadow_start,
+                                              SWAPPER_BLOCK_SIZE);
+               kimg_shadow_end = round_up(kimg_shadow_end, SWAPPER_BLOCK_SIZE);
+       }
        kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
-                       kasan_mem_to_shadow((void *)MODULES_VADDR));
+                                  (void *)kimg_shadow_start);
+       kasan_populate_zero_shadow((void *)kimg_shadow_end,
+                                  kasan_mem_to_shadow((void *)PAGE_OFFSET));
 
        for_each_memblock(memory, reg) {
                void *start = (void *)__phys_to_virt(reg->base);
@@ -155,9 +186,16 @@ void __init kasan_init(void)
                                pfn_to_nid(virt_to_pfn(start)));
        }
 
+       /*
+        * KAsan may reuse the contents of kasan_zero_pte directly, so we
+        * should make sure that it maps the zero page read-only.
+        */
+       for (i = 0; i < PTRS_PER_PTE; i++)
+               set_pte(&kasan_zero_pte[i],
+                       pfn_pte(virt_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+
        memset(kasan_zero_page, 0, PAGE_SIZE);
-       cpu_set_ttbr1(__pa(swapper_pg_dir));
-       flush_tlb_all();
+       cpu_replace_ttbr1(swapper_pg_dir);
 
        /* At this point kasan is fully initialized. Enable error messages */
        init_task.kasan_depth = 0;
index 58faeaa7fbdc6c73a73319e15c18bbd2a0948f06..c3e5df62671e6c4eb8c4a4502dd16f0162224256 100644 (file)
 #include <linux/slab.h>
 #include <linux/stop_machine.h>
 
+#include <asm/barrier.h>
 #include <asm/cputype.h>
 #include <asm/fixmap.h>
+#include <asm/kasan.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 
 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
 
+u64 kimage_voffset __read_mostly;
+EXPORT_SYMBOL(kimage_voffset);
+
 /*
  * Empty_zero_page is a special page that is used for zero-initialized data
  * and COW.
  */
-struct page *empty_zero_page;
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
 
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
+static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
+
 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
                              unsigned long size, pgprot_t vma_prot)
 {
@@ -62,16 +71,30 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 }
 EXPORT_SYMBOL(phys_mem_access_prot);
 
-static void __init *early_alloc(unsigned long sz)
+static phys_addr_t __init early_pgtable_alloc(void)
 {
        phys_addr_t phys;
        void *ptr;
 
-       phys = memblock_alloc(sz, sz);
+       phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
        BUG_ON(!phys);
-       ptr = __va(phys);
-       memset(ptr, 0, sz);
-       return ptr;
+
+       /*
+        * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
+        * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
+        * any level of table.
+        */
+       ptr = pte_set_fixmap(phys);
+
+       memset(ptr, 0, PAGE_SIZE);
+
+       /*
+        * Implicit barriers also ensure the zeroed page is visible to the page
+        * table walker
+        */
+       pte_clear_fixmap();
+
+       return phys;
 }
 
 /*
@@ -95,24 +118,30 @@ static void split_pmd(pmd_t *pmd, pte_t *pte)
 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
                                  unsigned long end, unsigned long pfn,
                                  pgprot_t prot,
-                                 void *(*alloc)(unsigned long size))
+                                 phys_addr_t (*pgtable_alloc)(void))
 {
        pte_t *pte;
 
        if (pmd_none(*pmd) || pmd_sect(*pmd)) {
-               pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
+               phys_addr_t pte_phys;
+               BUG_ON(!pgtable_alloc);
+               pte_phys = pgtable_alloc();
+               pte = pte_set_fixmap(pte_phys);
                if (pmd_sect(*pmd))
                        split_pmd(pmd, pte);
-               __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
+               __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
                flush_tlb_all();
+               pte_clear_fixmap();
        }
        BUG_ON(pmd_bad(*pmd));
 
-       pte = pte_offset_kernel(pmd, addr);
+       pte = pte_set_fixmap_offset(pmd, addr);
        do {
                set_pte(pte, pfn_pte(pfn, prot));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       pte_clear_fixmap();
 }
 
 static void split_pud(pud_t *old_pud, pmd_t *pmd)
@@ -127,10 +156,29 @@ static void split_pud(pud_t *old_pud, pmd_t *pmd)
        } while (pmd++, i++, i < PTRS_PER_PMD);
 }
 
-static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
-                                 unsigned long addr, unsigned long end,
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+
+       /*
+        * If debug_page_alloc is enabled we must map the linear map
+        * using pages. However, other mappings created by
+        * create_mapping_noalloc must use sections in some cases. Allow
+        * sections to be used in those cases, where no pgtable_alloc
+        * function is provided.
+        */
+       return !pgtable_alloc || !debug_pagealloc_enabled();
+}
+#else
+static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
+{
+       return true;
+}
+#endif
+
+static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 void *(*alloc)(unsigned long size))
+                                 phys_addr_t (*pgtable_alloc)(void))
 {
        pmd_t *pmd;
        unsigned long next;
@@ -139,7 +187,10 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
         * Check for initial section mappings in the pgd/pud and remove them.
         */
        if (pud_none(*pud) || pud_sect(*pud)) {
-               pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
+               phys_addr_t pmd_phys;
+               BUG_ON(!pgtable_alloc);
+               pmd_phys = pgtable_alloc();
+               pmd = pmd_set_fixmap(pmd_phys);
                if (pud_sect(*pud)) {
                        /*
                         * need to have the 1G of mappings continue to be
@@ -147,16 +198,18 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
                         */
                        split_pud(pud, pmd);
                }
-               pud_populate(mm, pud, pmd);
+               __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
                flush_tlb_all();
+               pmd_clear_fixmap();
        }
        BUG_ON(pud_bad(*pud));
 
-       pmd = pmd_offset(pud, addr);
+       pmd = pmd_set_fixmap_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
                /* try section mapping first */
-               if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+               if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
+                     block_mappings_allowed(pgtable_alloc)) {
                        pmd_t old_pmd =*pmd;
                        set_pmd(pmd, __pmd(phys |
                                           pgprot_val(mk_sect_prot(prot))));
@@ -167,17 +220,19 @@ static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
                        if (!pmd_none(old_pmd)) {
                                flush_tlb_all();
                                if (pmd_table(old_pmd)) {
-                                       phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
+                                       phys_addr_t table = pmd_page_paddr(old_pmd);
                                        if (!WARN_ON_ONCE(slab_is_available()))
                                                memblock_free(table, PAGE_SIZE);
                                }
                        }
                } else {
                        alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
-                                      prot, alloc);
+                                      prot, pgtable_alloc);
                }
                phys += next - addr;
        } while (pmd++, addr = next, addr != end);
+
+       pmd_clear_fixmap();
 }
 
 static inline bool use_1G_block(unsigned long addr, unsigned long next,
@@ -192,28 +247,30 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
        return true;
 }
 
-static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
-                                 unsigned long addr, unsigned long end,
+static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
                                  phys_addr_t phys, pgprot_t prot,
-                                 void *(*alloc)(unsigned long size))
+                                 phys_addr_t (*pgtable_alloc)(void))
 {
        pud_t *pud;
        unsigned long next;
 
        if (pgd_none(*pgd)) {
-               pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
-               pgd_populate(mm, pgd, pud);
+               phys_addr_t pud_phys;
+               BUG_ON(!pgtable_alloc);
+               pud_phys = pgtable_alloc();
+               __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
        }
        BUG_ON(pgd_bad(*pgd));
 
-       pud = pud_offset(pgd, addr);
+       pud = pud_set_fixmap_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
 
                /*
                 * For 4K granule only, attempt to put down a 1GB block
                 */
-               if (use_1G_block(addr, next, phys)) {
+               if (use_1G_block(addr, next, phys) &&
+                   block_mappings_allowed(pgtable_alloc)) {
                        pud_t old_pud = *pud;
                        set_pud(pud, __pud(phys |
                                           pgprot_val(mk_sect_prot(prot))));
@@ -228,26 +285,28 @@ static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
                        if (!pud_none(old_pud)) {
                                flush_tlb_all();
                                if (pud_table(old_pud)) {
-                                       phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
+                                       phys_addr_t table = pud_page_paddr(old_pud);
                                        if (!WARN_ON_ONCE(slab_is_available()))
                                                memblock_free(table, PAGE_SIZE);
                                }
                        }
                } else {
-                       alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
+                       alloc_init_pmd(pud, addr, next, phys, prot,
+                                      pgtable_alloc);
                }
                phys += next - addr;
        } while (pud++, addr = next, addr != end);
+
+       pud_clear_fixmap();
 }
 
 /*
  * Create the page directory entries and any necessary page tables for the
  * mapping specified by 'md'.
  */
-static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
-                                   phys_addr_t phys, unsigned long virt,
+static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
                                    phys_addr_t size, pgprot_t prot,
-                                   void *(*alloc)(unsigned long size))
+                                   phys_addr_t (*pgtable_alloc)(void))
 {
        unsigned long addr, length, end, next;
 
@@ -265,22 +324,35 @@ static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
        end = addr + length;
        do {
                next = pgd_addr_end(addr, end);
-               alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
+               alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
                phys += next - addr;
        } while (pgd++, addr = next, addr != end);
 }
 
-static void *late_alloc(unsigned long size)
+static phys_addr_t late_pgtable_alloc(void)
 {
-       void *ptr;
-
-       BUG_ON(size > PAGE_SIZE);
-       ptr = (void *)__get_free_page(PGALLOC_GFP);
+       void *ptr = (void *)__get_free_page(PGALLOC_GFP);
        BUG_ON(!ptr);
-       return ptr;
+
+       /* Ensure the zeroed page is visible to the page table walker */
+       dsb(ishst);
+       return __pa(ptr);
+}
+
+static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
+                                unsigned long virt, phys_addr_t size,
+                                pgprot_t prot,
+                                phys_addr_t (*alloc)(void))
+{
+       init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
 }
 
-static void __init create_mapping(phys_addr_t phys, unsigned long virt,
+/*
+ * This function can only be used to modify existing table entries,
+ * without allocating new levels of table. Note that this permits the
+ * creation of new section or page entries.
+ */
+static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
                                  phys_addr_t size, pgprot_t prot)
 {
        if (virt < VMALLOC_START) {
@@ -288,16 +360,16 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
                        &phys, virt);
                return;
        }
-       __create_mapping(&init_mm, pgd_offset_k(virt), phys, virt,
-                        size, prot, early_alloc);
+       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+                            NULL);
 }
 
 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
                               unsigned long virt, phys_addr_t size,
                               pgprot_t prot)
 {
-       __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
-                               late_alloc);
+       __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
+                            late_pgtable_alloc);
 }
 
 static void create_mapping_late(phys_addr_t phys, unsigned long virt,
@@ -309,69 +381,48 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
                return;
        }
 
-       return __create_mapping(&init_mm, pgd_offset_k(virt),
-                               phys, virt, size, prot, late_alloc);
+       __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
+                            late_pgtable_alloc);
 }
 
-#ifdef CONFIG_DEBUG_RODATA
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
+static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
 {
+
+       unsigned long kernel_start = __pa(_stext);
+       unsigned long kernel_end = __pa(_etext);
+
        /*
-        * Set up the executable regions using the existing section mappings
-        * for now. This will get more fine grained later once all memory
-        * is mapped
+        * Take care not to create a writable alias for the
+        * read-only text and rodata sections of the kernel image.
         */
-       unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
-       unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
-
-       if (end < kernel_x_start) {
-               create_mapping(start, __phys_to_virt(start),
-                       end - start, PAGE_KERNEL);
-       } else if (start >= kernel_x_end) {
-               create_mapping(start, __phys_to_virt(start),
-                       end - start, PAGE_KERNEL);
-       } else {
-               if (start < kernel_x_start)
-                       create_mapping(start, __phys_to_virt(start),
-                               kernel_x_start - start,
-                               PAGE_KERNEL);
-               create_mapping(kernel_x_start,
-                               __phys_to_virt(kernel_x_start),
-                               kernel_x_end - kernel_x_start,
-                               PAGE_KERNEL_EXEC);
-               if (kernel_x_end < end)
-                       create_mapping(kernel_x_end,
-                               __phys_to_virt(kernel_x_end),
-                               end - kernel_x_end,
-                               PAGE_KERNEL);
+
+       /* No overlap with the kernel text */
+       if (end < kernel_start || start >= kernel_end) {
+               __create_pgd_mapping(pgd, start, __phys_to_virt(start),
+                                    end - start, PAGE_KERNEL,
+                                    early_pgtable_alloc);
+               return;
        }
 
+       /*
+        * This block overlaps the kernel text mapping. Map the portion(s) which
+        * don't overlap.
+        */
+       if (start < kernel_start)
+               __create_pgd_mapping(pgd, start,
+                                    __phys_to_virt(start),
+                                    kernel_start - start, PAGE_KERNEL,
+                                    early_pgtable_alloc);
+       if (kernel_end < end)
+               __create_pgd_mapping(pgd, kernel_end,
+                                    __phys_to_virt(kernel_end),
+                                    end - kernel_end, PAGE_KERNEL,
+                                    early_pgtable_alloc);
 }
-#else
-static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
-{
-       create_mapping(start, __phys_to_virt(start), end - start,
-                       PAGE_KERNEL_EXEC);
-}
-#endif
 
-static void __init map_mem(void)
+static void __init map_mem(pgd_t *pgd)
 {
        struct memblock_region *reg;
-       phys_addr_t limit;
-
-       /*
-        * Temporarily limit the memblock range. We need to do this as
-        * create_mapping requires puds, pmds and ptes to be allocated from
-        * memory addressable from the initial direct kernel mapping.
-        *
-        * The initial direct kernel mapping, located at swapper_pg_dir, gives
-        * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
-        * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
-        * per Documentation/arm64/booting.txt).
-        */
-       limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
-       memblock_set_current_limit(limit);
 
        /* map all the memory banks */
        for_each_memblock(memory, reg) {
@@ -383,69 +434,87 @@ static void __init map_mem(void)
                if (memblock_is_nomap(reg))
                        continue;
 
-               if (ARM64_SWAPPER_USES_SECTION_MAPS) {
-                       /*
-                        * For the first memory bank align the start address and
-                        * current memblock limit to prevent create_mapping() from
-                        * allocating pte page tables from unmapped memory. With
-                        * the section maps, if the first block doesn't end on section
-                        * size boundary, create_mapping() will try to allocate a pte
-                        * page, which may be returned from an unmapped area.
-                        * When section maps are not used, the pte page table for the
-                        * current limit is already present in swapper_pg_dir.
-                        */
-                       if (start < limit)
-                               start = ALIGN(start, SECTION_SIZE);
-                       if (end < limit) {
-                               limit = end & SECTION_MASK;
-                               memblock_set_current_limit(limit);
-                       }
-               }
-               __map_memblock(start, end);
+               __map_memblock(pgd, start, end);
        }
-
-       /* Limit no longer required. */
-       memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
 }
 
-static void __init fixup_executable(void)
+void mark_rodata_ro(void)
 {
-#ifdef CONFIG_DEBUG_RODATA
-       /* now that we are actually fully mapped, make the start/end more fine grained */
-       if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
-               unsigned long aligned_start = round_down(__pa(_stext),
-                                                        SWAPPER_BLOCK_SIZE);
+       if (!IS_ENABLED(CONFIG_DEBUG_RODATA))
+               return;
 
-               create_mapping(aligned_start, __phys_to_virt(aligned_start),
-                               __pa(_stext) - aligned_start,
-                               PAGE_KERNEL);
-       }
+       create_mapping_late(__pa(_stext), (unsigned long)_stext,
+                               (unsigned long)_etext - (unsigned long)_stext,
+                               PAGE_KERNEL_ROX);
+}
 
-       if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
-               unsigned long aligned_end = round_up(__pa(__init_end),
-                                                         SWAPPER_BLOCK_SIZE);
-               create_mapping(__pa(__init_end), (unsigned long)__init_end,
-                               aligned_end - __pa(__init_end),
-                               PAGE_KERNEL);
-       }
-#endif
+void fixup_init(void)
+{
+       /*
+        * Unmap the __init region but leave the VM area in place. This
+        * prevents the region from being reused for kernel modules, which
+        * is not supported by kallsyms.
+        */
+       unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
 }
 
-#ifdef CONFIG_DEBUG_RODATA
-void mark_rodata_ro(void)
+static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
+                                   pgprot_t prot, struct vm_struct *vma)
 {
-       create_mapping_late(__pa(_stext), (unsigned long)_stext,
-                               (unsigned long)_etext - (unsigned long)_stext,
-                               PAGE_KERNEL_ROX);
+       phys_addr_t pa_start = __pa(va_start);
+       unsigned long size = va_end - va_start;
+
+       BUG_ON(!PAGE_ALIGNED(pa_start));
+       BUG_ON(!PAGE_ALIGNED(size));
 
+       __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
+                            early_pgtable_alloc);
+
+       vma->addr       = va_start;
+       vma->phys_addr  = pa_start;
+       vma->size       = size;
+       vma->flags      = VM_MAP;
+       vma->caller     = map_kernel_chunk;
+
+       vm_area_add_early(vma);
 }
-#endif
 
-void fixup_init(void)
+/*
+ * Create fine-grained mappings for the kernel.
+ */
+static void __init map_kernel(pgd_t *pgd)
 {
-       create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
-                       (unsigned long)__init_end - (unsigned long)__init_begin,
-                       PAGE_KERNEL);
+       static struct vm_struct vmlinux_text, vmlinux_init, vmlinux_data;
+
+       map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
+       map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
+                        &vmlinux_init);
+       map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
+
+       if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
+               /*
+                * The fixmap falls in a separate pgd to the kernel, and doesn't
+                * live in the carveout for the swapper_pg_dir. We can simply
+                * re-use the existing dir for the fixmap.
+                */
+               set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
+                       *pgd_offset_k(FIXADDR_START));
+       } else if (CONFIG_PGTABLE_LEVELS > 3) {
+               /*
+                * The fixmap shares its top level pgd entry with the kernel
+                * mapping. This can really only occur when we are running
+                * with 16k/4 levels, so we can simply reuse the pud level
+                * entry instead.
+                */
+               BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+               set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
+                       __pud(__pa(bm_pmd) | PUD_TYPE_TABLE));
+               pud_clear_fixmap();
+       } else {
+               BUG();
+       }
+
+       kasan_copy_shadow(pgd);
 }
 
 /*
@@ -454,28 +523,35 @@ void fixup_init(void)
  */
 void __init paging_init(void)
 {
-       void *zero_page;
-
-       map_mem();
-       fixup_executable();
+       phys_addr_t pgd_phys = early_pgtable_alloc();
+       pgd_t *pgd = pgd_set_fixmap(pgd_phys);
 
-       /* allocate the zero page. */
-       zero_page = early_alloc(PAGE_SIZE);
+       map_kernel(pgd);
+       map_mem(pgd);
 
-       bootmem_init();
-
-       empty_zero_page = virt_to_page(zero_page);
+       /*
+        * We want to reuse the original swapper_pg_dir so we don't have to
+        * communicate the new address to non-coherent secondaries in
+        * secondary_entry, and so cpu_switch_mm can generate the address with
+        * adrp+add rather than a load from some global variable.
+        *
+        * To do this we need to go via a temporary pgd.
+        */
+       cpu_replace_ttbr1(__va(pgd_phys));
+       memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
+       cpu_replace_ttbr1(swapper_pg_dir);
 
-       /* Ensure the zero page is visible to the page table walker */
-       dsb(ishst);
+       pgd_clear_fixmap();
+       memblock_free(pgd_phys, PAGE_SIZE);
 
        /*
-        * TTBR0 is only used for the identity mapping at this stage. Make it
-        * point to zero page to avoid speculatively fetching new entries.
+        * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
+        * allocated with it.
         */
-       cpu_set_reserved_ttbr0();
-       local_flush_tlb_all();
-       cpu_set_default_tcr_t0sz();
+       memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
+                     SWAPPER_DIR_SIZE - PAGE_SIZE);
+
+       bootmem_init();
 }
 
 /*
@@ -562,21 +638,13 @@ void vmemmap_free(unsigned long start, unsigned long end)
 }
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
-static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
-#if CONFIG_PGTABLE_LEVELS > 2
-static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
-#endif
-#if CONFIG_PGTABLE_LEVELS > 3
-static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
-#endif
-
 static inline pud_t * fixmap_pud(unsigned long addr)
 {
        pgd_t *pgd = pgd_offset_k(addr);
 
        BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
 
-       return pud_offset(pgd, addr);
+       return pud_offset_kimg(pgd, addr);
 }
 
 static inline pmd_t * fixmap_pmd(unsigned long addr)
@@ -585,16 +653,12 @@ static inline pmd_t * fixmap_pmd(unsigned long addr)
 
        BUG_ON(pud_none(*pud) || pud_bad(*pud));
 
-       return pmd_offset(pud, addr);
+       return pmd_offset_kimg(pud, addr);
 }
 
 static inline pte_t * fixmap_pte(unsigned long addr)
 {
-       pmd_t *pmd = fixmap_pmd(addr);
-
-       BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
-
-       return pte_offset_kernel(pmd, addr);
+       return &bm_pte[pte_index(addr)];
 }
 
 void __init early_fixmap_init(void)
@@ -605,15 +669,25 @@ void __init early_fixmap_init(void)
        unsigned long addr = FIXADDR_START;
 
        pgd = pgd_offset_k(addr);
-       pgd_populate(&init_mm, pgd, bm_pud);
-       pud = pud_offset(pgd, addr);
+       if (CONFIG_PGTABLE_LEVELS > 3 && !pgd_none(*pgd)) {
+               /*
+                * We only end up here if the kernel mapping and the fixmap
+                * share the top level pgd entry, which should only happen on
+                * 16k/4 levels configurations.
+                */
+               BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
+               pud = pud_offset_kimg(pgd, addr);
+       } else {
+               pgd_populate(&init_mm, pgd, bm_pud);
+               pud = fixmap_pud(addr);
+       }
        pud_populate(&init_mm, pud, bm_pmd);
-       pmd = pmd_offset(pud, addr);
+       pmd = fixmap_pmd(addr);
        pmd_populate_kernel(&init_mm, pmd, bm_pte);
 
        /*
         * The boot-ioremap range spans multiple pmds, for which
-        * we are not preparted:
+        * we are not prepared:
         */
        BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
                     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
@@ -673,7 +747,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
        /*
         * Make sure that the FDT region can be mapped without the need to
         * allocate additional translation table pages, so that it is safe
-        * to call create_mapping() this early.
+        * to call create_mapping_noalloc() this early.
         *
         * On 64k pages, the FDT will be mapped using PTEs, so we need to
         * be in the same PMD as the rest of the fixmap.
@@ -689,8 +763,8 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
        dt_virt = (void *)dt_virt_base + offset;
 
        /* map the first chunk so we can read the size from the header */
-       create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
-                      SWAPPER_BLOCK_SIZE, prot);
+       create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
+                       dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
 
        if (fdt_check_header(dt_virt) != 0)
                return NULL;
@@ -700,10 +774,51 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
                return NULL;
 
        if (offset + size > SWAPPER_BLOCK_SIZE)
-               create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
+               create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
                               round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
 
        memblock_reserve(dt_phys, size);
 
        return dt_virt;
 }
+
+int __init arch_ioremap_pud_supported(void)
+{
+       /* only 4k granule supports level 1 block mappings */
+       return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
+}
+
+int __init arch_ioremap_pmd_supported(void)
+{
+       return 1;
+}
+
+int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
+{
+       BUG_ON(phys & ~PUD_MASK);
+       set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+       return 1;
+}
+
+int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
+{
+       BUG_ON(phys & ~PMD_MASK);
+       set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
+       return 1;
+}
+
+int pud_clear_huge(pud_t *pud)
+{
+       if (!pud_sect(*pud))
+               return 0;
+       pud_clear(pud);
+       return 1;
+}
+
+int pmd_clear_huge(pmd_t *pmd)
+{
+       if (!pmd_sect(*pmd))
+               return 0;
+       pmd_clear(pmd);
+       return 1;
+}
index 3571c7309c5e79f0d2d3e20986a581ffb6454dae..ca6d268e3313229b0941ce7d33439c7a4c861120 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sched.h>
+#include <linux/vmalloc.h>
 
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -36,14 +37,32 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
        return 0;
 }
 
+/*
+ * This function assumes that the range is mapped with PAGE_SIZE pages.
+ */
+static int __change_memory_common(unsigned long start, unsigned long size,
+                               pgprot_t set_mask, pgprot_t clear_mask)
+{
+       struct page_change_data data;
+       int ret;
+
+       data.set_mask = set_mask;
+       data.clear_mask = clear_mask;
+
+       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+                                       &data);
+
+       flush_tlb_kernel_range(start, start + size);
+       return ret;
+}
+
 static int change_memory_common(unsigned long addr, int numpages,
                                pgprot_t set_mask, pgprot_t clear_mask)
 {
        unsigned long start = addr;
        unsigned long size = PAGE_SIZE*numpages;
        unsigned long end = start + size;
-       int ret;
-       struct page_change_data data;
+       struct vm_struct *area;
 
        if (!PAGE_ALIGNED(addr)) {
                start &= PAGE_MASK;
@@ -51,20 +70,29 @@ static int change_memory_common(unsigned long addr, int numpages,
                WARN_ON_ONCE(1);
        }
 
-       if (start < MODULES_VADDR || start >= MODULES_END)
-               return -EINVAL;
-
-       if (end < MODULES_VADDR || end >= MODULES_END)
+       /*
+        * Kernel VA mappings are always live, and splitting live section
+        * mappings into page mappings may cause TLB conflicts. This means
+        * we have to ensure that changing the permission bits of the range
+        * we are operating on does not result in such splitting.
+        *
+        * Let's restrict ourselves to mappings created by vmalloc (or vmap).
+        * Those are guaranteed to consist entirely of page mappings, and
+        * splitting is never needed.
+        *
+        * So check whether the [addr, addr + size) interval is entirely
+        * covered by precisely one VM area that has the VM_ALLOC flag set.
+        */
+       area = find_vm_area((void *)addr);
+       if (!area ||
+           end > (unsigned long)area->addr + area->size ||
+           !(area->flags & VM_ALLOC))
                return -EINVAL;
 
-       data.set_mask = set_mask;
-       data.clear_mask = clear_mask;
-
-       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
-                                       &data);
+       if (!numpages)
+               return 0;
 
-       flush_tlb_kernel_range(start, end);
-       return ret;
+       return __change_memory_common(start, size, set_mask, clear_mask);
 }
 
 int set_memory_ro(unsigned long addr, int numpages)
@@ -96,3 +124,19 @@ int set_memory_x(unsigned long addr, int numpages)
                                        __pgprot(PTE_PXN));
 }
 EXPORT_SYMBOL_GPL(set_memory_x);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+       unsigned long addr = (unsigned long) page_address(page);
+
+       if (enable)
+               __change_memory_common(addr, PAGE_SIZE * numpages,
+                                       __pgprot(PTE_VALID),
+                                       __pgprot(0));
+       else
+               __change_memory_common(addr, PAGE_SIZE * numpages,
+                                       __pgprot(0),
+                                       __pgprot(PTE_VALID));
+}
+#endif
index 146bd99a7532bcc6f53ad509a839561640eeaf3e..e6a30e1268a8c7570c8298797d7cf27dcb0d649f 100644 (file)
        b.lo    9998b
        dsb     \domain
        .endm
+
+/*
+ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
+ */
+       .macro  reset_pmuserenr_el0, tmpreg
+       mrs     \tmpreg, id_aa64dfr0_el1        // Check ID_AA64DFR0_EL1 PMUVer
+       sbfx    \tmpreg, \tmpreg, #8, #4
+       cmp     \tmpreg, #1                     // Skip if no PMU present
+       b.lt    9000f
+       msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
+9000:
+       .endm
index a3d867e723b4f0c5c23283c89264d4a2f7f51561..0c19534a901e616ecc5fe508ce205dc0de8fe0f4 100644 (file)
@@ -117,7 +117,7 @@ ENTRY(cpu_do_resume)
         */
        ubfx    x11, x11, #1, #1
        msr     oslar_el1, x11
-       msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
+       reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
        mov     x0, x12
        dsb     nsh             // Make sure local tlb invalidation completed
        isb
@@ -140,6 +140,34 @@ ENTRY(cpu_do_switch_mm)
        ret
 ENDPROC(cpu_do_switch_mm)
 
+       .pushsection ".idmap.text", "ax"
+/*
+ * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd)
+ *
+ * This is the low-level counterpart to cpu_replace_ttbr1, and should not be
+ * called by anything else. It can only be executed from a TTBR0 mapping.
+ */
+ENTRY(idmap_cpu_replace_ttbr1)
+       mrs     x2, daif
+       msr     daifset, #0xf
+
+       adrp    x1, empty_zero_page
+       msr     ttbr1_el1, x1
+       isb
+
+       tlbi    vmalle1
+       dsb     nsh
+       isb
+
+       msr     ttbr1_el1, x0
+       isb
+
+       msr     daif, x2
+
+       ret
+ENDPROC(idmap_cpu_replace_ttbr1)
+       .popsection
+
 /*
  *     __cpu_setup
  *
@@ -154,7 +182,7 @@ ENTRY(__cpu_setup)
        msr     cpacr_el1, x0                   // Enable FP/ASIMD
        mov     x0, #1 << 12                    // Reset mdscr_el1 and disable
        msr     mdscr_el1, x0                   // access to the DCC from EL0
-       msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
+       reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
        /*
         * Memory region attributes for LPAE:
         *
index 209ae5ad34958044ae3d6addcafa090f890a8be7..e6928896da2a4a2879828df92d940a2ed321a4ea 100644 (file)
@@ -49,13 +49,13 @@ static struct resource __initdata kernel_data = {
        .name   = "Kernel data",
        .start  = 0,
        .end    = 0,
-       .flags  = IORESOURCE_MEM,
+       .flags  = IORESOURCE_SYSTEM_RAM,
 };
 static struct resource __initdata kernel_code = {
        .name   = "Kernel code",
        .start  = 0,
        .end    = 0,
-       .flags  = IORESOURCE_MEM,
+       .flags  = IORESOURCE_SYSTEM_RAM,
        .sibling = &kernel_data,
 };
 
@@ -134,7 +134,7 @@ add_physical_memory(resource_size_t start, resource_size_t end)
        new->start = start;
        new->end = end;
        new->name = "System RAM";
-       new->flags = IORESOURCE_MEM;
+       new->flags = IORESOURCE_SYSTEM_RAM;
 
        *pprev = new;
 }
index 72e17f7ebd6ff0fba193ac07190d42b41e106e76..786e36e2f61de91c8f446cc941cff895cdb91578 100644 (file)
@@ -281,8 +281,6 @@ notrace void __init machine_init(unsigned long dt_ptr)
         */
        set_ist(_vectors_start);
 
-       lockdep_init();
-
        /*
         * dtb is passed in from bootloader.
         * fdt is linked in blob.
index dbb8259986895143196cd7edf504dba1f0587076..bce0d0d07e606e67dbce64bd4a6b133e9ed50905 100644 (file)
@@ -13,6 +13,6 @@
  */
 #define BASE_BAUD 0
 
-#define STD_COM_FLAGS          ASYNC_BOOT_AUTOCONF
+#define STD_COM_FLAGS          UPF_BOOT_AUTOCONF
 
 #define SERIAL_PORT_DFNS
index 4ce9fa874a577e559d5e916a24ddfb71eef6fcdd..6ae884bf66a5268c9dba08db6111df7dc9f40d06 100644 (file)
                reg = <0xffff78 8>;
                interrupts = <88 0>, <89 0>, <90 0>, <91 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
        sci1: serial@ffff80 {
                compatible = "renesas,sci";
                reg = <0xffff80 8>;
                interrupts = <92 0>, <93 0>, <94 0>, <95 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
        sci2: serial@ffff88 {
                compatible = "renesas,sci";
                reg = <0xffff88 8>;
                interrupts = <96 0>, <97 0>, <98 0>, <99 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
 };
index 545bfb57af9a9f2e8a81ff38aa46dcedf75c3757..9c733d920f1f0dc3eab70dd53e76d22a4860c3f3 100644 (file)
@@ -83,7 +83,7 @@
                reg = <0xffffb0 8>;
                interrupts = <52 0>, <53 0>, <54 0>, <55 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
 
        sci1: serial@ffffb8 {
@@ -91,6 +91,6 @@
                reg = <0xffffb8 8>;
                interrupts = <56 0>, <57 0>, <58 0>, <59 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
 };
index bcedba5a3ce7e8ae8fefaa195d86261b8ed8ba28..97e1f4b17ef067d5f10738e8aed5983359ce61c4 100644 (file)
                reg = <0xffff78 8>;
                interrupts = <88 0>, <89 0>, <90 0>, <91 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
        sci1: serial@ffff80 {
                compatible = "renesas,sci";
                reg = <0xffff80 8>;
                interrupts = <92 0>, <93 0>, <94 0>, <95 0>;
                clocks = <&fclk>;
-               clock-names = "sci_ick";
+               clock-names = "fck";
        };
 };
index caae3f4e4341a40d86f4014871c1ebe929a937ae..300dac3702f11a13460d1954843bfcbd1ddc2034 100644 (file)
@@ -1178,7 +1178,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
        efi_memory_desc_t *md;
        u64 efi_desc_size;
        char *name;
-       unsigned long flags;
+       unsigned long flags, desc;
 
        efi_map_start = __va(ia64_boot_param->efi_memmap);
        efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
@@ -1193,6 +1193,8 @@ efi_initialize_iomem_resources(struct resource *code_resource,
                        continue;
 
                flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               desc = IORES_DESC_NONE;
+
                switch (md->type) {
 
                        case EFI_MEMORY_MAPPED_IO:
@@ -1207,14 +1209,17 @@ efi_initialize_iomem_resources(struct resource *code_resource,
                                if (md->attribute & EFI_MEMORY_WP) {
                                        name = "System ROM";
                                        flags |= IORESOURCE_READONLY;
-                               } else if (md->attribute == EFI_MEMORY_UC)
+                               } else if (md->attribute == EFI_MEMORY_UC) {
                                        name = "Uncached RAM";
-                               else
+                               } else {
                                        name = "System RAM";
+                                       flags |= IORESOURCE_SYSRAM;
+                               }
                                break;
 
                        case EFI_ACPI_MEMORY_NVS:
                                name = "ACPI Non-volatile Storage";
+                               desc = IORES_DESC_ACPI_NV_STORAGE;
                                break;
 
                        case EFI_UNUSABLE_MEMORY:
@@ -1224,6 +1229,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
 
                        case EFI_PERSISTENT_MEMORY:
                                name = "Persistent Memory";
+                               desc = IORES_DESC_PERSISTENT_MEMORY;
                                break;
 
                        case EFI_RESERVED_TYPE:
@@ -1246,6 +1252,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
                res->start = md->phys_addr;
                res->end = md->phys_addr + efi_md_size(md) - 1;
                res->flags = flags;
+               res->desc = desc;
 
                if (insert_resource(&iomem_resource, res) < 0)
                        kfree(res);
index 4f118b0d30915f4f5927719522414a66efe17f08..2029a38a72aeee89793d53ee60418567586d67ff 100644 (file)
@@ -80,17 +80,17 @@ unsigned long vga_console_membase;
 
 static struct resource data_resource = {
        .name   = "Kernel data",
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 static struct resource code_resource = {
        .name   = "Kernel code",
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 static struct resource bss_resource = {
        .name   = "Kernel bss",
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 unsigned long ia64_max_cacheline_size;
index 622772b7fb6c48633624803223f0e601085670b2..e7ae6088350acddbb36d8e2fd04f2bc177668c09 100644 (file)
@@ -1336,8 +1336,11 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
                         * Don't call tty_write_message() if we're in the kernel; we might
                         * be holding locks...
                         */
-                       if (user_mode(regs))
-                               tty_write_message(current->signal->tty, buf);
+                       if (user_mode(regs)) {
+                               struct tty_struct *tty = get_current_tty();
+                               tty_write_message(tty, buf);
+                               tty_kref_put(tty);
+                       }
                        buf[len-1] = '\0';      /* drop '\r' */
                        /* watch for command names containing %s */
                        printk(KERN_WARNING "%s", buf);
index 836ac5a963c83140d18dcd762459d19e1fd0c101..2841c0a3fd3bb201a4bed53d823858b32a0a009b 100644 (file)
@@ -276,6 +276,7 @@ source "kernel/Kconfig.preempt"
 
 config SMP
        bool "Symmetric multi-processing support"
+       depends on MMU
        ---help---
          This enables support for systems with more than one CPU. If you have
          a system with only one CPU, say N. If you have a system with more
index a5ecef7188baa39e6f21d7a5a7c5ed6cca8f040f..136c69f1fb8ab8b73c23e2a752ea371c6e484512 100644 (file)
@@ -70,14 +70,14 @@ static struct resource data_resource = {
        .name   = "Kernel data",
        .start  = 0,
        .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 static struct resource code_resource = {
        .name   = "Kernel code",
        .start  = 0,
        .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 unsigned long memory_start;
index 71ea4c02795d45438727059b708731b0babbf220..a0fc0c192427ef995f3ae7029ade5d592e6d6b1f 100644 (file)
@@ -89,7 +89,7 @@ static struct platform_device mcf_uart = {
        .dev.platform_data      = mcf_uart_platform_data,
 };
 
-#ifdef CONFIG_FEC
+#if IS_ENABLED(CONFIG_FEC)
 
 #ifdef CONFIG_M5441x
 #define FEC_NAME       "enet-fec"
@@ -329,7 +329,7 @@ static struct platform_device mcf_qspi = {
 
 static struct platform_device *mcf_devices[] __initdata = {
        &mcf_uart,
-#ifdef CONFIG_FEC
+#if IS_ENABLED(CONFIG_FEC)
        &mcf_fec0,
 #ifdef MCFFEC_BASE1
        &mcf_fec1,
index fc96e814188e57aa9dee8ed516c62200c543ad56..d1fc4796025edb8769ee01195e64b91821f3625a 100644 (file)
@@ -108,6 +108,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -366,6 +374,7 @@ CONFIG_ARIADNE=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_HYDRA=y
 CONFIG_APNE=y
 CONFIG_ZORRO8390=y
index 05c904f08d9d496fb7455df0d1506fb8d8c9bc23..9bfe8be3658c18231ca4473d8c075a1fb2ac4d5e 100644 (file)
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -344,6 +352,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index d572b731c510fdb2dc60616ee1ff0ce7e10516ce..ebdcfae555801cd1c7367d4ca40974b3d39099cb 100644 (file)
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -353,6 +361,7 @@ CONFIG_ATARILANCE=y
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
index 11a30c65ad44cb52347929d51f80301c8aca648b..8acc65e54995388614666716febdab1dda706376 100644 (file)
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_BVME6000_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index 6630a5154b9d797ebea93cdbe70886bc673f60cd..0c6a3d52b26e2b2559f24963040d3ba65a91ed47 100644 (file)
@@ -106,6 +106,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -345,6 +353,7 @@ CONFIG_HPLANCE=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index 1d90b71d09038da90cfad0cab267d60dae0fd752..12a8a6cb32f4914f06c1f5d4c8e5dd6ba31381ef 100644 (file)
@@ -105,6 +105,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@ CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -362,6 +370,7 @@ CONFIG_MAC89x0=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_MACSONIC=y
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_MAC8390=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
index 1fd21c1ca87fd8da85ace8c64930cfeed4383370..64ff2dcb34c89a3e60e26f9c468d654ed943e97f 100644 (file)
@@ -115,6 +115,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -276,6 +278,12 @@ CONFIG_DEV_APPLETALK=m
 CONFIG_IPDDP=m
 CONFIG_IPDDP_ENCAP=y
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -404,6 +412,7 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 CONFIG_MACSONIC=y
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_HYDRA=y
 CONFIG_MAC8390=y
 CONFIG_NE2000=y
index 74e10f79d7b1f9475574d422451314b7bcc1af64..07fc6abcfe0c50e4b656a63a9da36c728b13cec4 100644 (file)
@@ -103,6 +103,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -261,6 +263,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME147_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index 7034e716f166be8f869f872b12f9cbf960054029..69903ded88f71d1d51ad4b430acbd2f745fdf867 100644 (file)
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@ CONFIG_MVME16x_NET=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index f7deb5f702a6484dda646577f48ade90b902bf18..bd8401686ddef143bf036159cb3f4ea650772f32 100644 (file)
@@ -104,6 +104,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -352,6 +360,7 @@ CONFIG_VETH=m
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 CONFIG_NE2000=y
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
index 0ce79eb0d80503140d928c3c6c77061a2ead34d9..5f9fb3ab9636808d46b75f3696f477ab91e6dd79 100644 (file)
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -340,6 +348,7 @@ CONFIG_SUN3_82586=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index 4cb787e4991fcfd02646b1144999419b40454f83..5d1c674530e2ba73ca43ffc4940f139772962152 100644 (file)
@@ -101,6 +101,8 @@ CONFIG_NFT_NAT=m
 CONFIG_NFT_QUEUE=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
 CONFIG_NETFILTER_XT_SET=m
 CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
 CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@ CONFIG_L2TP=m
 CONFIG_BRIDGE=m
 CONFIG_ATALK=m
 CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
 CONFIG_DNS_RESOLVER=y
 CONFIG_BATMAN_ADV=m
 CONFIG_BATMAN_ADV_DAT=y
@@ -341,6 +349,7 @@ CONFIG_SUN3LANCE=y
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
 # CONFIG_NET_VENDOR_ROCKER is not set
index 06d0cb19b4e19fbe6d1bfcb01c9e568e56abb2ff..6d4497049b4b0e5e10d4a3954f0e79738804611e 100644 (file)
 
 /* Standard COM flags (except for COM4, because of the 8514 problem) */
 #ifdef CONFIG_SERIAL_8250_DETECT_IRQ
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
+#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
+#define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ)
 #else
-#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
+#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
+#define STD_COM4_FLAGS UPF_BOOT_AUTOCONF
 #endif
 
 #ifdef CONFIG_ISA
index f9d96bf869109c028e5a9f1f12ad3e9fe8b933ba..bafaff6dcd7bda8a28101f140159f9a7a76638db 100644 (file)
@@ -4,7 +4,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            376
+#define NR_syscalls            377
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_OLD_STAT
index 36cf129de663a7ca22f1bf1bba5a6245b7b04c03..0ca729665f29e9d67851aed2c1c52be75bbd078b 100644 (file)
 #define __NR_userfaultfd       373
 #define __NR_membarrier                374
 #define __NR_mlock2            375
+#define __NR_copy_file_range   376
 
 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */
index 282cd903f4c469197738eb9e840eaa75c77ec11a..8bb94261ff97d953fcfbe7c305e6a7ecfce27a97 100644 (file)
@@ -396,3 +396,4 @@ ENTRY(sys_call_table)
        .long sys_userfaultfd
        .long sys_membarrier
        .long sys_mlock2                /* 375 */
+       .long sys_copy_file_range
index ac8c039b0318fe48929bff8f60ec4f9d58740120..f7b23d300881b9bf1409c0afd80c4bfd93e27c79 100644 (file)
@@ -115,7 +115,6 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        return ftrace_modify_code(ip, old, new);
 }
 
-/* run from kstop_machine */
 int __init ftrace_dyn_arch_init(void)
 {
        return 0;
index 76ed17b56fead0092462c7f5498767b281c0339a..805ae5d712e8baa63095199f1601ce548c26d606 100644 (file)
@@ -38,6 +38,6 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define __NR_syscalls         389
+#define __NR_syscalls         392
 
 #endif /* _ASM_MICROBLAZE_UNISTD_H */
index 32850c73be09b915309fe892486c0d5187eed0a5..a8bd3fa28bc7f4e97158fff49d25443524647b0d 100644 (file)
 #define __NR_memfd_create      386
 #define __NR_bpf               387
 #define __NR_execveat          388
+#define __NR_userfaultfd       389
+#define __NR_membarrier                390
+#define __NR_mlock2            391
 
 #endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
index 89a2a93949274b16b3508b314d0cbdf6db5946e6..f31ebb5dc26c21922f240545d038f0d6b5699300 100644 (file)
@@ -130,8 +130,6 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
        memset(__bss_start, 0, __bss_stop-__bss_start);
        memset(_ssbss, 0, _esbss-_ssbss);
 
-       lockdep_init();
-
 /* initialize device tree for usage in early_printk */
        early_init_devtree(_fdt_start);
 
index 29c8568ec55c32776139cd79e1e441d23638078d..6b3dd99126d753a22a9ed270ec92761c2f936e27 100644 (file)
@@ -389,3 +389,6 @@ ENTRY(sys_call_table)
        .long sys_memfd_create
        .long sys_bpf
        .long sys_execveat
+       .long sys_userfaultfd
+       .long sys_membarrier            /* 390 */
+       .long sys_mlock2
index a96c81d1d22e6afce0973161639024e93346fd0b..c5cd63a4b6d53b399e91e8ebad7358a3f45fce8c 100644 (file)
@@ -21,6 +21,7 @@ platforms += mti-malta
 platforms += mti-sead3
 platforms += netlogic
 platforms += paravirt
+platforms += pic32
 platforms += pistachio
 platforms += pmcs-msp71xx
 platforms += pnx833x
index fbf3f6670b69a58f23bf3878eb0d5d2df3edeaec..cc172671797d15fb7a335d6d0bda44889cd839fd 100644 (file)
@@ -137,7 +137,7 @@ config ATH79
        select SYS_SUPPORTS_32BIT_KERNEL
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_MIPS16
-       select SYS_SUPPORTS_ZBOOT
+       select SYS_SUPPORTS_ZBOOT_UART_PROM
        select USE_OF
        help
          Support for the Atheros AR71XX/AR724X/AR913X SoCs.
@@ -151,6 +151,7 @@ config BMIPS_GENERIC
        select CSRC_R4K
        select SYNC_R4K
        select COMMON_CLK
+       select BCM6345_L1_IRQ
        select BCM7038_L1_IRQ
        select BCM7120_L2_IRQ
        select BRCMSTB_L2_IRQ
@@ -169,6 +170,7 @@ config BMIPS_GENERIC
        select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
        select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
        select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
+       select ARCH_WANT_OPTIONAL_GPIOLIB
        help
          Build a generic DT-based kernel image that boots on select
          BCM33xx cable modem chips, BCM63xx DSL chips, and BCM7xxx set-top
@@ -480,6 +482,14 @@ config MIPS_MALTA
          This enables support for the MIPS Technologies Malta evaluation
          board.
 
+config MACH_PIC32
+       bool "Microchip PIC32 Family"
+       help
+         This enables support for the Microchip PIC32 family of platforms.
+
+         Microchip PIC32 is a family of general-purpose 32 bit MIPS core
+         microcontrollers.
+
 config MIPS_SEAD3
        bool "MIPS SEAD3 board"
        select BOOT_ELF32
@@ -979,6 +989,7 @@ source "arch/mips/jazz/Kconfig"
 source "arch/mips/jz4740/Kconfig"
 source "arch/mips/lantiq/Kconfig"
 source "arch/mips/lasat/Kconfig"
+source "arch/mips/pic32/Kconfig"
 source "arch/mips/pistachio/Kconfig"
 source "arch/mips/pmcs-msp71xx/Kconfig"
 source "arch/mips/ralink/Kconfig"
@@ -1755,6 +1766,10 @@ config SYS_SUPPORTS_ZBOOT_UART16550
        bool
        select SYS_SUPPORTS_ZBOOT
 
+config SYS_SUPPORTS_ZBOOT_UART_PROM
+       bool
+       select SYS_SUPPORTS_ZBOOT
+
 config CPU_LOONGSON2
        bool
        select CPU_SUPPORTS_32BIT_KERNEL
@@ -2017,7 +2032,8 @@ config KVM_GUEST
        bool "KVM Guest Kernel"
        depends on BROKEN_ON_SMP
        help
-         Select this option if building a guest kernel for KVM (Trap & Emulate) mode
+         Select this option if building a guest kernel for KVM (Trap & Emulate)
+         mode.
 
 config KVM_GUEST_TIMER_FREQ
        int "Count/Compare Timer Frequency (MHz)"
index 3f70ba54ae21e909973ad9913eb71151fcc50492..e78d60dbdffdad93994421e684a87e2f34f8b802 100644 (file)
@@ -166,16 +166,6 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
 endif
 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
 cflags-$(CONFIG_CPU_BMIPS)     += -march=mips32 -Wa,-mips32 -Wa,--trap
-#
-# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
-# as MIPS64 R1; older versions as just R1.  This leaves the possibility open
-# that GCC might generate R2 code for -march=loongson3a which then is rejected
-# by GAS.  The cc-option can't probe for this behaviour so -march=loongson3a
-# can't easily be used safely within the kbuild framework.
-#
-cflags-$(CONFIG_CPU_LOONGSON3)  +=                                     \
-       $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-       -Wa,-mips64r2 -Wa,--trap
 
 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
index f9bc4f520440142b2be4b033c8166f684bd1fc75..84548f704035391d9715d2aeee0a8845735f8dd1 100644 (file)
@@ -40,7 +40,7 @@
 
 static int gpio2_get(struct gpio_chip *chip, unsigned offset)
 {
-       return alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE);
+       return !!alchemy_gpio2_get_value(offset + ALCHEMY_GPIO2_BASE);
 }
 
 static void gpio2_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -68,7 +68,7 @@ static int gpio2_to_irq(struct gpio_chip *chip, unsigned offset)
 
 static int gpio1_get(struct gpio_chip *chip, unsigned offset)
 {
-       return alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE);
+       return !!alchemy_gpio1_get_value(offset + ALCHEMY_GPIO1_BASE);
 }
 
 static void gpio1_set(struct gpio_chip *chip,
@@ -119,7 +119,7 @@ struct gpio_chip alchemy_gpio_chip[] = {
 
 static int alchemy_gpic_get(struct gpio_chip *chip, unsigned int off)
 {
-       return au1300_gpio_get_value(off + AU1300_GPIO_BASE);
+       return !!au1300_gpio_get_value(off + AU1300_GPIO_BASE);
 }
 
 static void alchemy_gpic_set(struct gpio_chip *chip, unsigned int off, int v)
index f4930456eb8e8181098c6035dccdd1203d72ea18..f969f583c68c66c0512d7c648dc16d22c18158f6 100644 (file)
@@ -37,7 +37,7 @@ static int ar7_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
                                container_of(chip, struct ar7_gpio_chip, chip);
        void __iomem *gpio_in = gpch->regs + AR7_GPIO_INPUT;
 
-       return readl(gpio_in) & (1 << gpio);
+       return !!(readl(gpio_in) & (1 << gpio));
 }
 
 static int titan_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
index 13c04cf54afac93bd2c0d8fdaa852ecd9911055e..dfc60209dc63ca231a687175a9e9545c1cd9833a 100644 (file)
@@ -71,18 +71,6 @@ config ATH79_MACH_UBNT_XM
          Say 'Y' here if you want your kernel to support the
          Ubiquiti Networks XM (rev 1.0) board.
 
-choice
-       prompt "Build a DTB in the kernel"
-       optional
-       help
-         Select a devicetree that should be built into the kernel.
-
-       config DTB_TL_WR1043ND_V1
-               bool "TL-WR1043ND Version 1"
-               select BUILTIN_DTB
-               select SOC_AR913X
-endchoice
-
 endmenu
 
 config SOC_AR71XX
index ca7cc19adfea7f01154c55f44e47b5f863499843..870c6b2e97e835127dc88c6ca4bc4219faa44d92 100644 (file)
@@ -23,7 +23,6 @@ void ath79_clocks_init(void);
 unsigned long ath79_get_sys_clk_rate(const char *id);
 
 void ath79_ddr_ctrl_init(void);
-void ath79_ddr_wb_flush(unsigned int reg);
 
 void ath79_gpio_init(void);
 
index eeb3953ed8ac8d8051ccbe3df9af8270915329b3..511c06560dc1f5a95866f832ec9ae2180048b9d5 100644 (file)
 #include "common.h"
 #include "machtypes.h"
 
+static void __init ath79_misc_intc_domain_init(
+       struct device_node *node, int irq);
+
 static void ath79_misc_irq_handler(struct irq_desc *desc)
 {
-       void __iomem *base = ath79_reset_base;
+       struct irq_domain *domain = irq_desc_get_handler_data(desc);
+       void __iomem *base = domain->host_data;
        u32 pending;
 
        pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
@@ -42,15 +46,15 @@ static void ath79_misc_irq_handler(struct irq_desc *desc)
        while (pending) {
                int bit = __ffs(pending);
 
-               generic_handle_irq(ATH79_MISC_IRQ(bit));
+               generic_handle_irq(irq_linear_revmap(domain, bit));
                pending &= ~BIT(bit);
        }
 }
 
 static void ar71xx_misc_irq_unmask(struct irq_data *d)
 {
-       unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
-       void __iomem *base = ath79_reset_base;
+       void __iomem *base = irq_data_get_irq_chip_data(d);
+       unsigned int irq = d->hwirq;
        u32 t;
 
        t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
@@ -62,8 +66,8 @@ static void ar71xx_misc_irq_unmask(struct irq_data *d)
 
 static void ar71xx_misc_irq_mask(struct irq_data *d)
 {
-       unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
-       void __iomem *base = ath79_reset_base;
+       void __iomem *base = irq_data_get_irq_chip_data(d);
+       unsigned int irq = d->hwirq;
        u32 t;
 
        t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
@@ -75,8 +79,8 @@ static void ar71xx_misc_irq_mask(struct irq_data *d)
 
 static void ar724x_misc_irq_ack(struct irq_data *d)
 {
-       unsigned int irq = d->irq - ATH79_MISC_IRQ_BASE;
-       void __iomem *base = ath79_reset_base;
+       void __iomem *base = irq_data_get_irq_chip_data(d);
+       unsigned int irq = d->hwirq;
        u32 t;
 
        t = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS);
@@ -94,12 +98,6 @@ static struct irq_chip ath79_misc_irq_chip = {
 
 static void __init ath79_misc_irq_init(void)
 {
-       void __iomem *base = ath79_reset_base;
-       int i;
-
-       __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
-       __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
-
        if (soc_is_ar71xx() || soc_is_ar913x())
                ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
        else if (soc_is_ar724x() ||
@@ -110,13 +108,7 @@ static void __init ath79_misc_irq_init(void)
        else
                BUG();
 
-       for (i = ATH79_MISC_IRQ_BASE;
-            i < ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT; i++) {
-               irq_set_chip_and_handler(i, &ath79_misc_irq_chip,
-                                        handle_level_irq);
-       }
-
-       irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
+       ath79_misc_intc_domain_init(NULL, ATH79_CPU_IRQ(6));
 }
 
 static void ar934x_ip2_irq_dispatch(struct irq_desc *desc)
@@ -256,10 +248,10 @@ asmlinkage void plat_irq_dispatch(void)
        }
 }
 
-#ifdef CONFIG_IRQCHIP
 static int misc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
 {
        irq_set_chip_and_handler(irq, &ath79_misc_irq_chip, handle_level_irq);
+       irq_set_chip_data(irq, d->host_data);
        return 0;
 }
 
@@ -268,19 +260,14 @@ static const struct irq_domain_ops misc_irq_domain_ops = {
        .map = misc_map,
 };
 
-static int __init ath79_misc_intc_of_init(
-       struct device_node *node, struct device_node *parent)
+static void __init ath79_misc_intc_domain_init(
+       struct device_node *node, int irq)
 {
        void __iomem *base = ath79_reset_base;
        struct irq_domain *domain;
-       int irq;
-
-       irq = irq_of_parse_and_map(node, 0);
-       if (!irq)
-               panic("Failed to get MISC IRQ");
 
        domain = irq_domain_add_legacy(node, ATH79_MISC_IRQ_COUNT,
-                       ATH79_MISC_IRQ_BASE, 0, &misc_irq_domain_ops, NULL);
+                       ATH79_MISC_IRQ_BASE, 0, &misc_irq_domain_ops, base);
        if (!domain)
                panic("Failed to add MISC irqdomain");
 
@@ -288,9 +275,19 @@ static int __init ath79_misc_intc_of_init(
        __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_ENABLE);
        __raw_writel(0, base + AR71XX_RESET_REG_MISC_INT_STATUS);
 
+       irq_set_chained_handler_and_data(irq, ath79_misc_irq_handler, domain);
+}
 
-       irq_set_chained_handler(irq, ath79_misc_irq_handler);
+static int __init ath79_misc_intc_of_init(
+       struct device_node *node, struct device_node *parent)
+{
+       int irq;
 
+       irq = irq_of_parse_and_map(node, 0);
+       if (!irq)
+               panic("Failed to get MISC IRQ");
+
+       ath79_misc_intc_domain_init(node, irq);
        return 0;
 }
 
@@ -349,8 +346,6 @@ static int __init ar79_cpu_intc_of_init(
 IRQCHIP_DECLARE(ar79_cpu_intc, "qca,ar7100-cpu-intc",
                ar79_cpu_intc_of_init);
 
-#endif
-
 void __init arch_init_irq(void)
 {
        if (mips_machtype == ATH79_MACH_GENERIC_OF) {
index 8755d618e116ec32fd6fff90d0081b0bb2e4bb9c..01808e85e263d5be0f87d07bbaa4ddd9b670c1c6 100644 (file)
 
 #define ATH79_SYS_TYPE_LEN     64
 
-#define AR71XX_BASE_FREQ       40000000
-#define AR724X_BASE_FREQ       5000000
-#define AR913X_BASE_FREQ       5000000
-
 static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
 
 static void ath79_restart(char *command)
@@ -207,10 +203,8 @@ void __init plat_mem_setup(void)
        fdt_start = fw_getenvl("fdt_start");
        if (fdt_start)
                __dt_setup_arch((void *)KSEG0ADDR(fdt_start));
-#ifdef CONFIG_BUILTIN_DTB
-       else
-               __dt_setup_arch(__dtb_start);
-#endif
+       else if (fw_arg0 == -2)
+               __dt_setup_arch((void *)KSEG0ADDR(fw_arg1));
 
        ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
                                           AR71XX_RESET_SIZE);
@@ -219,10 +213,11 @@ void __init plat_mem_setup(void)
        ath79_detect_sys_type();
        ath79_ddr_ctrl_init();
 
-       if (mips_machtype != ATH79_MACH_GENERIC_OF)
+       if (mips_machtype != ATH79_MACH_GENERIC_OF) {
                detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
-
-       _machine_restart = ath79_restart;
+               /* OF machines should use the reset driver */
+               _machine_restart = ath79_restart;
+       }
        _machine_halt = ath79_halt;
        pm_power_off = ath79_halt;
 }
@@ -272,15 +267,10 @@ void __init device_tree_init(void)
        unflatten_and_copy_device_tree();
 }
 
-static void __init ath79_generic_init(void)
-{
-       /* Nothing to do */
-}
-
 MIPS_MACHINE(ATH79_MACH_GENERIC,
             "Generic",
             "Generic AR71XX/AR724X/AR913X based board",
-            ath79_generic_init);
+            NULL);
 
 MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
             "DTB",
index a7e569c7968ea7e8bc1a1ce3e629fc54cc2710ee..959c145a0a2c1654d1ac3b58a98db92ef435a713 100644 (file)
@@ -666,9 +666,15 @@ static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
        switch (bus->hosttype) {
        case BCMA_HOSTTYPE_PCI:
                memset(out, 0, sizeof(struct ssb_sprom));
-               snprintf(buf, sizeof(buf), "pci/%u/%u/",
-                        bus->host_pci->bus->number + 1,
-                        PCI_SLOT(bus->host_pci->devfn));
+               /* On BCM47XX all PCI buses share the same domain */
+               if (config_enabled(CONFIG_BCM47XX))
+                       snprintf(buf, sizeof(buf), "pci/%u/%u/",
+                                bus->host_pci->bus->number + 1,
+                                PCI_SLOT(bus->host_pci->devfn));
+               else
+                       snprintf(buf, sizeof(buf), "pci/%u/%u/",
+                                pci_domain_nr(bus->host_pci->bus) + 1,
+                                bus->host_pci->bus->number);
                bcm47xx_sprom_apply_prefix_alias(buf, sizeof(buf));
                prefix = buf;
                break;
index 4b50d40f7451ad86eba859b8a02ea792ff060b35..05757aed016cd6f72bedbd0ac31f379e8f0cd884 100644 (file)
@@ -10,6 +10,7 @@
 
 #define pr_fmt(fmt) "bcm63xx_nvram: " fmt
 
+#include <linux/bcm963xx_nvram.h>
 #include <linux/init.h>
 #include <linux/crc32.h>
 #include <linux/export.h>
 
 #include <bcm63xx_nvram.h>
 
-/*
- * nvram structure
- */
-struct bcm963xx_nvram {
-       u32     version;
-       u8      reserved1[256];
-       u8      name[16];
-       u32     main_tp_number;
-       u32     psi_size;
-       u32     mac_addr_count;
-       u8      mac_addr_base[ETH_ALEN];
-       u8      reserved2[2];
-       u32     checksum_old;
-       u8      reserved3[720];
-       u32     checksum_high;
-};
-
 #define BCM63XX_DEFAULT_PSI_SIZE       64
 
 static struct bcm963xx_nvram nvram;
@@ -42,27 +26,14 @@ static int mac_addr_used;
 
 void __init bcm63xx_nvram_init(void *addr)
 {
-       unsigned int check_len;
        u32 crc, expected_crc;
        u8 hcs_mac_addr[ETH_ALEN] = { 0x00, 0x10, 0x18, 0xff, 0xff, 0xff };
 
        /* extract nvram data */
-       memcpy(&nvram, addr, sizeof(nvram));
+       memcpy(&nvram, addr, BCM963XX_NVRAM_V5_SIZE);
 
        /* check checksum before using data */
-       if (nvram.version <= 4) {
-               check_len = offsetof(struct bcm963xx_nvram, reserved3);
-               expected_crc = nvram.checksum_old;
-               nvram.checksum_old = 0;
-       } else {
-               check_len = sizeof(nvram);
-               expected_crc = nvram.checksum_high;
-               nvram.checksum_high = 0;
-       }
-
-       crc = crc32_le(~0, (u8 *)&nvram, check_len);
-
-       if (crc != expected_crc)
+       if (bcm963xx_nvram_checksum(&nvram, &expected_crc, &crc))
                pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
                        expected_crc, crc);
 
index e7fc6f9348baa2e6bc06f881ced25aec7fb40b46..7efefcf440338c0e657733cdc2ae26e38aba9e35 100644 (file)
 #include <asm/irq_cpu.h>
 #include <asm/time.h>
 
+static const struct of_device_id smp_intc_dt_match[] = {
+       { .compatible = "brcm,bcm7038-l1-intc" },
+       { .compatible = "brcm,bcm6345-l1-intc" },
+       {}
+};
+
 unsigned int get_c0_compare_int(void)
 {
        return CP0_LEGACY_COMPARE_IRQ;
@@ -24,8 +30,8 @@ void __init arch_init_irq(void)
 {
        struct device_node *dn;
 
-       /* Only the STB (bcm7038) controller supports SMP IRQ affinity */
-       dn = of_find_compatible_node(NULL, NULL, "brcm,bcm7038-l1-intc");
+       /* Only these controllers support SMP IRQ affinity */
+       dn = of_find_matching_node(NULL, smp_intc_dt_match);
        if (dn)
                of_node_put(dn);
        else
index 5b16d2955fbb864afae0103753bd8d0d115eba0e..35535284b39efb323cb63b4f8ec37dcdfbdb7f60 100644 (file)
@@ -105,6 +105,7 @@ static const struct bmips_quirk bmips_quirk_list[] = {
        { "brcm,bcm33843-viper",        &bcm3384_viper_quirks           },
        { "brcm,bcm6328",               &bcm6328_quirks                 },
        { "brcm,bcm6368",               &bcm6368_quirks                 },
+       { "brcm,bcm63168",              &bcm6368_quirks                 },
        { },
 };
 
index d5bdee115f22f73058d464e5cc88aee8abc7cf46..90aca95fe3149696d772e3ae41cdcfa388d79b80 100644 (file)
@@ -29,20 +29,29 @@ KBUILD_AFLAGS := $(LINUXINCLUDE) $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
        -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
        -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
 
-targets := head.o decompress.o string.o dbg.o uart-16550.o uart-alchemy.o
-
 # decompressor objects (linked with vmlinuz)
 vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
 
 ifdef CONFIG_DEBUG_ZBOOT
 vmlinuzobjs-$(CONFIG_DEBUG_ZBOOT)                 += $(obj)/dbg.o
 vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
+vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
 vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY)                += $(obj)/uart-alchemy.o
+vmlinuzobjs-$(CONFIG_ATH79)                       += $(obj)/uart-ath79.o
 endif
 
-ifdef CONFIG_KERNEL_XZ
-vmlinuzobjs-y += $(obj)/../../lib/ashldi3.o
-endif
+extra-y += uart-ath79.c
+$(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c
+       $(call cmd,shipped)
+
+vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
+
+extra-y += ashldi3.c bswapsi.c
+$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
+$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c
+       $(call cmd,shipped)
+
+targets := $(notdir $(vmlinuzobjs-y))
 
 targets += vmlinux.bin
 OBJCOPYFLAGS_vmlinux.bin := $(OBJCOPYFLAGS) -O binary -R .comment -S
@@ -60,7 +69,7 @@ targets += vmlinux.bin.z
 $(obj)/vmlinux.bin.z: $(obj)/vmlinux.bin FORCE
        $(call if_changed,$(tool_y))
 
-targets += piggy.o
+targets += piggy.o dummy.o
 OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
                        --set-section-flags=.image=contents,alloc,load,readonly,data
 $(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
diff --git a/arch/mips/boot/compressed/uart-prom.c b/arch/mips/boot/compressed/uart-prom.c
new file mode 100644 (file)
index 0000000..1c3d51b
--- /dev/null
@@ -0,0 +1,7 @@
+
+extern void prom_putchar(unsigned char ch);
+
+void putc(char c)
+{
+       prom_putchar(c);
+}
index a0bf516ec3948827acbc526c6ebac5a7827e1a22..fc7a0a98e9bfc159ef3ba75ba2c791aa76e5c9a2 100644 (file)
@@ -4,6 +4,7 @@ dts-dirs        += ingenic
 dts-dirs       += lantiq
 dts-dirs       += mti
 dts-dirs       += netlogic
+dts-dirs       += pic32
 dts-dirs       += qca
 dts-dirs       += ralink
 dts-dirs       += xilfpga
index d52ce3d07f16dee81c5982212fe69f8cddc015d0..9d19236f53e7b9a33c2a2cc2ccfd593322b04074 100644 (file)
@@ -31,6 +31,7 @@
        };
 
        aliases {
+               leds0 = &leds0;
                uart0 = &uart0;
        };
 
@@ -73,6 +74,7 @@
                timer: timer@10000040 {
                        compatible = "syscon";
                        reg = <0x10000040 0x2c>;
+                       native-endian;
                };
 
                reboot {
                        offset = <0x28>;
                        mask = <0x1>;
                };
+
+               leds0: led-controller@10000800 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "brcm,bcm6328-leds";
+                       reg = <0x10000800 0x24>;
+                       status = "disabled";
+               };
        };
 };
index 45152bc22117f081d15a8b6dd2740305b35efbf6..1f6b9b5cddb4c0890b02cb37edb5c0c3331ba8dd 100644 (file)
@@ -32,6 +32,7 @@
        };
 
        aliases {
+               leds0 = &leds0;
                uart0 = &uart0;
        };
 
                compatible = "simple-bus";
                ranges;
 
+               periph_cntl: syscon@10000000 {
+                       compatible = "syscon";
+                       reg = <0x10000000 0x14>;
+                       native-endian;
+               };
+
+               reboot: syscon-reboot@10000008 {
+                       compatible = "syscon-reboot";
+                       regmap = <&periph_cntl>;
+                       offset = <0x8>;
+                       mask = <0x1>;
+               };
+
                periph_intc: periph_intc@10000020 {
                        compatible = "brcm,bcm3380-l2-intc";
                        reg = <0x10000024 0x4 0x1000002c 0x4>,
                        interrupts = <2>;
                };
 
+               leds0: led-controller@100000d0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "brcm,bcm6358-leds";
+                       reg = <0x100000d0 0x8>;
+                       status = "disabled";
+               };
+
                uart0: serial@10000100 {
                        compatible = "brcm,bcm6345-uart";
                        reg = <0x10000100 0x18>;
index 4fc7ecee273c105027ff20cea02f2bd5cf86c9fe..3ae16053a0c98763d8b8239eff18630105c2239a 100644 (file)
@@ -98,6 +98,7 @@
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7125-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x60c>;
+                       native-endian;
                };
 
                reboot {
index a3039bb53477d40a426fec1d1318f3156ede8be3..be7991917d2950f0d48d0c04dfd9b478856fcc35 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7346-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       native-endian;
                };
 
                reboot {
index 4274ff41ec2122ac0bfd52d814aee8432c26553e..060805be619a23ac8812576a59a47fead67c98aa 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7358-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       native-endian;
                };
 
                reboot {
index 0dcc9163c27bdd0022e5b66f3ffd65da7fd6ce31..bcdb09bfe07ba3ed86369f9938ac50db678d250f 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7360-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       native-endian;
                };
 
                reboot {
index 2f3f9fc2c478df36ef57c5990dd81f6757240e3a..d3b1b762e6c3e21c3e129aea6127f071d4417845 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7362-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       native-endian;
                };
 
                reboot {
index bee221b3b56857c8d84dac3e2fa9bfe8b3c54a85..3302a1b8a5c9b841b46b931ab501327aa2099b30 100644 (file)
@@ -99,6 +99,7 @@
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7420-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x60c>;
+                       native-endian;
                };
 
                reboot {
index 571f30f52e3ff5780ec4fd72e18e72737e51537d..15b27aae15a9620e439de3e13162e16b2e8306b1 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       native-endian;
                };
 
                reboot {
index 614ee211f71a89356dd1eb814a38ec3071885747..adb33e3550430de99adb3b2e2c211310fabc65a9 100644 (file)
                sun_top_ctrl: syscon@404000 {
                        compatible = "brcm,bcm7425-sun-top-ctrl", "syscon";
                        reg = <0x404000 0x51c>;
+                       native-endian;
                };
 
                reboot {
index 9fcb9e7d1f5752902d08ccdb174b9deeca5afad0..1652d8d60b1e4b8673acaa7d2ec182ce7eb7ff98 100644 (file)
 &uart4 {
        status = "okay";
 };
+
+&nemc {
+       status = "okay";
+
+       nandc: nand-controller@1 {
+               compatible = "ingenic,jz4780-nand";
+               reg = <1 0 0x1000000>;
+
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               ingenic,bch-controller = <&bch>;
+
+               ingenic,nemc-tAS = <10>;
+               ingenic,nemc-tAH = <5>;
+               ingenic,nemc-tBP = <10>;
+               ingenic,nemc-tAW = <15>;
+               ingenic,nemc-tSTRV = <100>;
+
+               nand@1 {
+                       reg = <1>;
+
+                       nand-ecc-step-size = <1024>;
+                       nand-ecc-strength = <24>;
+                       nand-ecc-mode = "hw";
+                       nand-on-flash-bbt;
+
+                       partitions {
+                               compatible = "fixed-partitions";
+                               #address-cells = <2>;
+                               #size-cells = <2>;
+
+                               partition@0 {
+                                       label = "u-boot-spl";
+                                       reg = <0x0 0x0 0x0 0x800000>;
+                               };
+
+                               partition@0x800000 {
+                                       label = "u-boot";
+                                       reg = <0x0 0x800000 0x0 0x200000>;
+                               };
+
+                               partition@0xa00000 {
+                                       label = "u-boot-env";
+                                       reg = <0x0 0xa00000 0x0 0x200000>;
+                               };
+
+                               partition@0xc00000 {
+                                       label = "boot";
+                                       reg = <0x0 0xc00000 0x0 0x4000000>;
+                               };
+
+                               partition@0x8c00000 {
+                                       label = "system";
+                                       reg = <0x0 0x4c00000 0x1 0xfb400000>;
+                               };
+                       };
+               };
+       };
+};
+
+&bch {
+       status = "okay";
+};
index 65389f6027333c8c4bed688fb38ed11a31d4bd2e..b868b429add23af4c14f42a366b96760e49eee66 100644 (file)
 
                status = "disabled";
        };
+
+       nemc: nemc@13410000 {
+               compatible = "ingenic,jz4780-nemc";
+               reg = <0x13410000 0x10000>;
+               #address-cells = <2>;
+               #size-cells = <1>;
+               ranges = <1 0 0x1b000000 0x1000000
+                         2 0 0x1a000000 0x1000000
+                         3 0 0x19000000 0x1000000
+                         4 0 0x18000000 0x1000000
+                         5 0 0x17000000 0x1000000
+                         6 0 0x16000000 0x1000000>;
+
+               clocks = <&cgu JZ4780_CLK_NEMC>;
+
+               status = "disabled";
+       };
+
+       bch: bch@134d0000 {
+               compatible = "ingenic,jz4780-bch";
+               reg = <0x134d0000 0x10000>;
+
+               clocks = <&cgu JZ4780_CLK_BCH>;
+
+               status = "disabled";
+       };
 };
diff --git a/arch/mips/boot/dts/pic32/Makefile b/arch/mips/boot/dts/pic32/Makefile
new file mode 100644 (file)
index 0000000..7ac7905
--- /dev/null
@@ -0,0 +1,12 @@
+dtb-$(CONFIG_DTB_PIC32_MZDA_SK)                += pic32mzda_sk.dtb
+
+dtb-$(CONFIG_DTB_PIC32_NONE)           += \
+                                       pic32mzda_sk.dtb
+
+obj-y                          += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
+
+# Force kbuild to make empty built-in.o if necessary
+obj-                           += dummy.o
+
+always                         := $(dtb-y)
+clean-files                    := *.dtb *.dtb.S
diff --git a/arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi b/arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi
new file mode 100644 (file)
index 0000000..ef13350
--- /dev/null
@@ -0,0 +1,236 @@
+/*
+ * Device Tree Source for PIC32MZDA clock data
+ *
+ * Purna Chandra Mandal <purna.mandal@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+/* all fixed rate clocks */
+
+/ {
+       POSC:posc_clk { /* On-chip primary oscillator */
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <24000000>;
+       };
+
+       FRC:frc_clk { /* internal FRC oscillator */
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <8000000>;
+       };
+
+       BFRC:bfrc_clk { /* internal backup FRC oscillator */
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <8000000>;
+       };
+
+       LPRC:lprc_clk { /* internal low-power FRC oscillator */
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <32000>;
+       };
+
+       /* UPLL provides clock to USBCORE */
+       UPLL:usb_phy_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <24000000>;
+               clock-output-names = "usbphy_clk";
+       };
+
+       TxCKI:txcki_clk { /* external clock input on TxCLKI pin */
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <4000000>;
+               status = "disabled";
+       };
+
+       /* external clock input on REFCLKIx pin */
+       REFIx:refix_clk {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <24000000>;
+               status = "disabled";
+       };
+
+       /* PIC32 specific clks */
+       pic32_clktree {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               reg = <0x1f801200 0x200>;
+               compatible = "microchip,pic32mzda-clk";
+               ranges = <0 0x1f801200 0x200>;
+
+               /* secondary oscillator; external input on SOSCI pin */
+               SOSC:sosc_clk@0 {
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-sosc";
+                       clock-frequency = <32768>;
+                       reg = <0x000 0x10>,   /* enable reg */
+                             <0x1d0 0x10>; /* status reg */
+                       microchip,bit-mask = <0x02>; /* enable mask */
+                       microchip,status-bit-mask = <0x10>; /* status-mask*/
+               };
+
+               FRCDIV:frcdiv_clk {
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-frcdivclk";
+                       clocks = <&FRC>;
+                       clock-output-names = "frcdiv_clk";
+               };
+
+               /* System PLL clock */
+               SYSPLL:spll_clk@020 {
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-syspll";
+                       reg = <0x020 0x10>, /* SPLL register */
+                             <0x1d0 0x10>; /* CLKSTAT register */
+                       clocks = <&POSC>, <&FRC>;
+                       clock-output-names = "sys_pll";
+                       microchip,status-bit-mask = <0x80>; /* SPLLRDY */
+               };
+
+               /* system clock; mux with postdiv & slew */
+               SYSCLK:sys_clk@1c0 {
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-sysclk-v2";
+                       reg = <0x1c0 0x04>; /* SLEWCON */
+                       clocks = <&FRCDIV>, <&SYSPLL>, <&POSC>, <&SOSC>,
+                                <&LPRC>, <&FRCDIV>;
+                       microchip,clock-indices = <0>, <1>, <2>, <4>,
+                                                 <5>, <7>;
+                       clock-output-names = "sys_clk";
+               };
+
+               /* Peripheral bus1 clock */
+               PBCLK1:pb1_clk@140 {
+                       reg = <0x140 0x10>;
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-pbclk";
+                       clocks = <&SYSCLK>;
+                       clock-output-names = "pb1_clk";
+                       /* used by system modules, not gateable */
+                       microchip,ignore-unused;
+               };
+
+               /* Peripheral bus2 clock */
+               PBCLK2:pb2_clk@150 {
+                       reg = <0x150 0x10>;
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-pbclk";
+                       clocks = <&SYSCLK>;
+                       clock-output-names = "pb2_clk";
+                       /* avoid gating even if unused */
+                       microchip,ignore-unused;
+               };
+
+               /* Peripheral bus3 clock */
+               PBCLK3:pb3_clk@160 {
+                       reg = <0x160 0x10>;
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-pbclk";
+                       clocks = <&SYSCLK>;
+                       clock-output-names = "pb3_clk";
+               };
+
+               /* Peripheral bus4 clock(I/O ports, GPIO) */
+               PBCLK4:pb4_clk@170 {
+                       reg = <0x170 0x10>;
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-pbclk";
+                       clocks = <&SYSCLK>;
+                       clock-output-names = "pb4_clk";
+               };
+
+               /* Peripheral bus clock */
+               PBCLK5:pb5_clk@180 {
+                       reg = <0x180 0x10>;
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-pbclk";
+                       clocks = <&SYSCLK>;
+                       clock-output-names = "pb5_clk";
+               };
+
+               /* Peripheral Bus6 clock; */
+               PBCLK6:pb6_clk@190 {
+                       reg = <0x190 0x10>;
+                       compatible = "microchip,pic32mzda-pbclk";
+                       clocks = <&SYSCLK>;
+                       #clock-cells = <0>;
+               };
+
+               /* Peripheral bus7 clock */
+               PBCLK7:pb7_clk@1a0 {
+                       reg = <0x1a0 0x10>;
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-pbclk";
+                       /* CPU is driven by this clock; so named */
+                       clock-output-names = "cpu_clk";
+                       clocks = <&SYSCLK>;
+               };
+
+               /* Reference Oscillator clock for SPI/I2S */
+               REFCLKO1:refo1_clk@80 {
+                       reg = <0x080 0x20>;
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-refoclk";
+                       clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
+                                <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
+                       microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
+                                                 <5>, <7>, <8>, <9>;
+                       clock-output-names = "refo1_clk";
+               };
+
+               /* Reference Oscillator clock for SQI */
+               REFCLKO2:refo2_clk@a0 {
+                       reg = <0x0a0 0x20>;
+                       #clock-cells = <0>;
+                       compatible = "microchip,pic32mzda-refoclk";
+                       clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
+                                <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
+                       microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
+                                                 <5>, <7>, <8>, <9>;
+                       clock-output-names = "refo2_clk";
+               };
+
+               /* Reference Oscillator clock, ADC */
+               REFCLKO3:refo3_clk@c0 {
+                       reg = <0x0c0 0x20>;
+                       compatible = "microchip,pic32mzda-refoclk";
+                       clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
+                                <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
+                       microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
+                                                 <5>, <7>, <8>, <9>;
+                       #clock-cells = <0>;
+                       clock-output-names = "refo3_clk";
+               };
+
+               /* Reference Oscillator clock */
+               REFCLKO4:refo4_clk@e0 {
+                       reg = <0x0e0 0x20>;
+                       compatible = "microchip,pic32mzda-refoclk";
+                       clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
+                                <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
+                       microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
+                                                 <5>, <7>, <8>, <9>;
+                       #clock-cells = <0>;
+                       clock-output-names = "refo4_clk";
+               };
+
+               /* Reference Oscillator clock, LCD */
+               REFCLKO5:refo5_clk@100 {
+                       reg = <0x100 0x20>;
+                       compatible = "microchip,pic32mzda-refoclk";
+                       clocks = <&SYSCLK>,<&PBCLK1>,<&POSC>,<&FRC>,<&LPRC>,
+                                <&SOSC>,<&SYSPLL>,<&REFIx>,<&BFRC>;
+                       microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
+                                                 <5>, <7>, <8>, <9>;
+                       #clock-cells = <0>;
+                       clock-output-names = "refo5_clk";
+               };
+       };
+};
diff --git a/arch/mips/boot/dts/pic32/pic32mzda.dtsi b/arch/mips/boot/dts/pic32/pic32mzda.dtsi
new file mode 100644 (file)
index 0000000..ad9e331
--- /dev/null
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "pic32mzda-clk.dtsi"
+
+/ {
+       #address-cells = <1>;
+       #size-cells = <1>;
+       interrupt-parent = <&evic>;
+
+       aliases {
+               gpio0 = &gpio0;
+               gpio1 = &gpio1;
+               gpio2 = &gpio2;
+               gpio3 = &gpio3;
+               gpio4 = &gpio4;
+               gpio5 = &gpio5;
+               gpio6 = &gpio6;
+               gpio7 = &gpio7;
+               gpio8 = &gpio8;
+               gpio9 = &gpio9;
+               serial0 = &uart1;
+               serial1 = &uart2;
+               serial2 = &uart3;
+               serial3 = &uart4;
+               serial4 = &uart5;
+               serial5 = &uart6;
+       };
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu@0 {
+                       compatible = "mti,mips14KEc";
+                       device_type = "cpu";
+               };
+       };
+
+       soc {
+               compatible = "microchip,pic32mzda-infra";
+               interrupts = <0 IRQ_TYPE_EDGE_RISING>;
+       };
+
+       evic: interrupt-controller@1f810000 {
+               compatible = "microchip,pic32mzda-evic";
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               reg = <0x1f810000 0x1000>;
+               microchip,external-irqs = <3 8 13 18 23>;
+       };
+
+       pic32_pinctrl: pinctrl@1f801400{
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "microchip,pic32mzda-pinctrl";
+               reg = <0x1f801400 0x400>;
+               clocks = <&PBCLK1>;
+       };
+
+       /* PORTA */
+       gpio0: gpio0@1f860000 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860000 0x100>;
+               interrupts = <118 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <0>;
+               gpio-ranges = <&pic32_pinctrl 0 0 16>;
+       };
+
+       /* PORTB */
+       gpio1: gpio1@1f860100 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860100 0x100>;
+               interrupts = <119 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <1>;
+               gpio-ranges = <&pic32_pinctrl 0 16 16>;
+       };
+
+       /* PORTC */
+       gpio2: gpio2@1f860200 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860200 0x100>;
+               interrupts = <120 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <2>;
+               gpio-ranges = <&pic32_pinctrl 0 32 16>;
+       };
+
+       /* PORTD */
+       gpio3: gpio3@1f860300 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860300 0x100>;
+               interrupts = <121 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <3>;
+               gpio-ranges = <&pic32_pinctrl 0 48 16>;
+       };
+
+       /* PORTE */
+       gpio4: gpio4@1f860400 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860400 0x100>;
+               interrupts = <122 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <4>;
+               gpio-ranges = <&pic32_pinctrl 0 64 16>;
+       };
+
+       /* PORTF */
+       gpio5: gpio5@1f860500 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860500 0x100>;
+               interrupts = <123 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <5>;
+               gpio-ranges = <&pic32_pinctrl 0 80 16>;
+       };
+
+       /* PORTG */
+       gpio6: gpio6@1f860600 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860600 0x100>;
+               interrupts = <124 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <6>;
+               gpio-ranges = <&pic32_pinctrl 0 96 16>;
+       };
+
+       /* PORTH */
+       gpio7: gpio7@1f860700 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860700 0x100>;
+               interrupts = <125 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <7>;
+               gpio-ranges = <&pic32_pinctrl 0 112 16>;
+       };
+
+       /* PORTI does not exist */
+
+       /* PORTJ */
+       gpio8: gpio8@1f860800 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860800 0x100>;
+               interrupts = <126 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <8>;
+               gpio-ranges = <&pic32_pinctrl 0 128 16>;
+       };
+
+       /* PORTK */
+       gpio9: gpio9@1f860900 {
+               compatible = "microchip,pic32mzda-gpio";
+               reg = <0x1f860900 0x100>;
+               interrupts = <127 IRQ_TYPE_LEVEL_HIGH>;
+               #gpio-cells = <2>;
+               gpio-controller;
+               interrupt-controller;
+               #interrupt-cells = <2>;
+               clocks = <&PBCLK4>;
+               microchip,gpio-bank = <9>;
+               gpio-ranges = <&pic32_pinctrl 0 144 16>;
+       };
+
+       sdhci: sdhci@1f8ec000 {
+               compatible = "microchip,pic32mzda-sdhci";
+               reg = <0x1f8ec000 0x100>;
+               interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&REFCLKO4>, <&PBCLK5>;
+               clock-names = "base_clk", "sys_clk";
+               bus-width = <4>;
+               cap-sd-highspeed;
+               status = "disabled";
+       };
+
+       uart1: serial@1f822000 {
+               compatible = "microchip,pic32mzda-uart";
+               reg = <0x1f822000 0x50>;
+               interrupts = <112 IRQ_TYPE_LEVEL_HIGH>,
+                       <113 IRQ_TYPE_LEVEL_HIGH>,
+                       <114 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&PBCLK2>;
+               status = "disabled";
+       };
+
+       uart2: serial@1f822200 {
+               compatible = "microchip,pic32mzda-uart";
+               reg = <0x1f822200 0x50>;
+               interrupts = <145 IRQ_TYPE_LEVEL_HIGH>,
+                       <146 IRQ_TYPE_LEVEL_HIGH>,
+                       <147 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&PBCLK2>;
+               status = "disabled";
+       };
+
+       uart3: serial@1f822400 {
+               compatible = "microchip,pic32mzda-uart";
+               reg = <0x1f822400 0x50>;
+               interrupts = <157 IRQ_TYPE_LEVEL_HIGH>,
+                       <158 IRQ_TYPE_LEVEL_HIGH>,
+                       <159 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&PBCLK2>;
+               status = "disabled";
+       };
+
+       uart4: serial@1f822600 {
+               compatible = "microchip,pic32mzda-uart";
+               reg = <0x1f822600 0x50>;
+               interrupts = <170 IRQ_TYPE_LEVEL_HIGH>,
+                       <171 IRQ_TYPE_LEVEL_HIGH>,
+                       <172 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&PBCLK2>;
+               status = "disabled";
+       };
+
+       uart5: serial@1f822800 {
+               compatible = "microchip,pic32mzda-uart";
+               reg = <0x1f822800 0x50>;
+               interrupts = <179 IRQ_TYPE_LEVEL_HIGH>,
+                       <180 IRQ_TYPE_LEVEL_HIGH>,
+                       <181 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&PBCLK2>;
+               status = "disabled";
+       };
+
+       uart6: serial@1f822A00 {
+               compatible = "microchip,pic32mzda-uart";
+               reg = <0x1f822A00 0x50>;
+               interrupts = <188 IRQ_TYPE_LEVEL_HIGH>,
+                       <189 IRQ_TYPE_LEVEL_HIGH>,
+                       <190 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&PBCLK2>;
+               status = "disabled";
+       };
+};
diff --git a/arch/mips/boot/dts/pic32/pic32mzda_sk.dts b/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
new file mode 100644 (file)
index 0000000..5d434a5
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+
+#include "pic32mzda.dtsi"
+
+/ {
+       compatible = "microchip,pic32mzda-sk", "microchip,pic32mzda";
+       model = "Microchip PIC32MZDA Starter Kit";
+
+       memory {
+               device_type = "memory";
+               reg = <0x08000000 0x08000000>;
+       };
+
+       chosen {
+               bootargs = "earlyprintk=ttyPIC1,115200n8r console=ttyPIC1,115200n8";
+       };
+
+       leds0 {
+               compatible = "gpio-leds";
+               pinctrl-names = "default";
+               pinctrl-0 = <&user_leds_s0>;
+
+               led@1 {
+                       label = "pic32mzda_sk:red:led1";
+                       gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>;
+                       linux,default-trigger = "heartbeat";
+               };
+
+               led@2 {
+                       label = "pic32mzda_sk:yellow:led2";
+                       gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
+                       linux,default-trigger = "mmc0";
+               };
+
+               led@3 {
+                       label = "pic32mzda_sk:green:led3";
+                       gpios = <&gpio7 2 GPIO_ACTIVE_HIGH>;
+                       default-state = "on";
+               };
+       };
+
+       keys0 {
+               compatible = "gpio-keys";
+               pinctrl-0 = <&user_buttons_s0>;
+               pinctrl-names = "default";
+
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               button@sw1 {
+                       label = "ESC";
+                       linux,code = <1>;
+                       gpios = <&gpio1 12 0>;
+               };
+
+               button@sw2 {
+                       label = "Home";
+                       linux,code = <102>;
+                       gpios = <&gpio1 13 0>;
+               };
+
+               button@sw3 {
+                       label = "Menu";
+                       linux,code = <139>;
+                       gpios = <&gpio1 14 0>;
+               };
+       };
+};
+
+&uart2 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart2>;
+       status = "okay";
+};
+
+&uart4 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_uart4>;
+       status = "okay";
+};
+
+&sdhci {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_sdhc1>;
+       status = "okay";
+       assigned-clocks = <&REFCLKO2>,<&REFCLKO4>,<&REFCLKO5>;
+       assigned-clock-rates = <50000000>,<25000000>,<40000000>;
+};
+
+&pic32_pinctrl {
+
+       pinctrl_sdhc1: sdhc1_pins0 {
+               pins = "A6", "D4", "G13", "G12", "G14", "A7", "A0";
+               microchip,digital;
+       };
+
+       user_leds_s0: user_leds_s0 {
+               pins = "H0", "H1", "H2";
+               output-low;
+               microchip,digital;
+       };
+
+       user_buttons_s0: user_buttons_s0 {
+               pins = "B12", "B13", "B14";
+               microchip,digital;
+               input-enable;
+               bias-pull-up;
+       };
+
+       pinctrl_uart2: pinctrl_uart2 {
+               uart2-tx {
+                       pins = "G9";
+                       function = "U2TX";
+                       microchip,digital;
+                       output-high;
+               };
+               uart2-rx {
+                       pins = "B0";
+                       function = "U2RX";
+                       microchip,digital;
+                       input-enable;
+               };
+       };
+
+       pinctrl_uart4: uart4-0 {
+               uart4-tx {
+                       pins = "C3";
+                       function = "U4TX";
+                       microchip,digital;
+                       output-high;
+               };
+               uart4-rx {
+                       pins = "E8";
+                       function = "U4RX";
+                       microchip,digital;
+                       input-enable;
+               };
+       };
+};
index 2d61455d585d2d59f87616d872db5297b98b2ce5..14bd225142832f15822fd44513380eaa4b72f2e6 100644 (file)
@@ -1,9 +1,6 @@
 # All DTBs
 dtb-$(CONFIG_ATH79)                    += ar9132_tl_wr1043nd_v1.dtb
 
-# Select a DTB to build in the kernel
-obj-$(CONFIG_DTB_TL_WR1043ND_V1)       += ar9132_tl_wr1043nd_v1.dtb.o
-
 # Force kbuild to make empty built-in.o if necessary
 obj-                           += dummy.o
 
index 13d0439496a9d28d500b2d3e37b585e3f4541335..3ad4ba9b12fd6a649eb07b201414f18ba60b83a7 100644 (file)
                        };
                };
 
+               usb@1b000100 {
+                       compatible = "qca,ar7100-ehci", "generic-ehci";
+                       reg = <0x1b000100 0x100>;
+
+                       interrupts = <3>;
+                       resets = <&rst 5>;
+
+                       has-transaction-translator;
+
+                       phy-names = "usb";
+                       phys = <&usb_phy>;
+
+                       status = "disabled";
+               };
+
                spi@1f000000 {
                        compatible = "qca,ar9132-spi", "qca,ar7100-spi";
                        reg = <0x1f000000 0x10>;
                        #size-cells = <0>;
                };
        };
+
+       usb_phy: usb-phy {
+               compatible = "qca,ar7100-usb-phy";
+
+               reset-names = "usb-phy", "usb-suspend-override";
+               resets = <&rst 4>, <&rst 3>;
+
+               #phy-cells = <0>;
+
+               status = "disabled";
+       };
 };
index 003015ab34e7cffcde16231b6c9b871862a697a7..e535ee3c26a4e8402a5da1dcc8c0f748205dacca 100644 (file)
                        };
                };
 
+               usb@1b000100 {
+                       status = "okay";
+               };
+
                spi@1f000000 {
                        status = "okay";
                        num-cs = <1>;
                };
        };
 
+       usb-phy {
+               status = "okay";
+       };
+
        gpio-keys {
                compatible = "gpio-keys-polled";
                #address-cells = <1>;
index b7fa9ae28c3659dbf457aecd7cd17255cd34f5da..0449d935db0acde8bf34254c1f27627b7ab1e8b1 100644 (file)
@@ -242,7 +242,7 @@ static int octeon_cpu_disable(void)
        cpumask_clear_cpu(cpu, &cpu_callin_map);
        octeon_fixup_irqs();
 
-       flush_cache_all();
+       __flush_cache_all();
        local_flush_tlb_all();
 
        return 0;
diff --git a/arch/mips/configs/pic32mzda_defconfig b/arch/mips/configs/pic32mzda_defconfig
new file mode 100644 (file)
index 0000000..52192c6
--- /dev/null
@@ -0,0 +1,89 @@
+CONFIG_MACH_PIC32=y
+CONFIG_DTB_PIC32_MZDA_SK=y
+CONFIG_HZ_100=y
+CONFIG_PREEMPT_VOLUNTARY=y
+# CONFIG_SECCOMP is not set
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RELAY=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_BLK_DEV_BSGLIB=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_SGI_PARTITION=y
+CONFIG_BINFMT_MISC=m
+# CONFIG_SUSPEND is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_INPUT_LEDS=m
+CONFIG_INPUT_POLLDEV=y
+CONFIG_INPUT_MOUSEDEV=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_EVBUG=m
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GPIO=m
+CONFIG_KEYBOARD_GPIO_POLLED=m
+# CONFIG_MOUSE_PS2 is not set
+# CONFIG_SERIO is not set
+CONFIG_SERIAL_PIC32=y
+CONFIG_SERIAL_PIC32_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=m
+CONFIG_GPIO_SYSFS=y
+# CONFIG_HWMON is not set
+CONFIG_HIDRAW=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_MMC=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MICROCHIP_PIC32=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=m
+CONFIG_LEDS_TRIGGER_ONESHOT=m
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_GPIO=m
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+# CONFIG_MIPS_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_FSCACHE=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_XATTR=y
+CONFIG_SQUASHFS_LZ4=y
+CONFIG_SQUASHFS_LZO=y
+CONFIG_SQUASHFS_XZ=y
index 06b9bc7ea14b1da802dc29090174230cc2e1d613..c3212ff2672343fcb52315403c73d16759be77b1 100644 (file)
 #ifndef __ASM_CACHEOPS_H
 #define __ASM_CACHEOPS_H
 
+/*
+ * Most cache ops are split into a 2 bit field identifying the cache, and a 3
+ * bit field identifying the cache operation.
+ */
+#define CacheOp_Cache                  0x03
+#define CacheOp_Op                     0x1c
+
+#define Cache_I                                0x00
+#define Cache_D                                0x01
+#define Cache_T                                0x02
+#define Cache_S                                0x03
+
+#define Index_Writeback_Inv            0x00
+#define Index_Load_Tag                 0x04
+#define Index_Store_Tag                        0x08
+#define Hit_Invalidate                 0x10
+#define Hit_Writeback_Inv              0x14    /* not with Cache_I though */
+#define Hit_Writeback                  0x18
+
 /*
  * Cache Operations available on all MIPS processors with R4000-style caches
  */
-#define Index_Invalidate_I             0x00
-#define Index_Writeback_Inv_D          0x01
-#define Index_Load_Tag_I               0x04
-#define Index_Load_Tag_D               0x05
-#define Index_Store_Tag_I              0x08
-#define Index_Store_Tag_D              0x09
-#define Hit_Invalidate_I               0x10
-#define Hit_Invalidate_D               0x11
-#define Hit_Writeback_Inv_D            0x15
+#define Index_Invalidate_I             (Cache_I | Index_Writeback_Inv)
+#define Index_Writeback_Inv_D          (Cache_D | Index_Writeback_Inv)
+#define Index_Load_Tag_I               (Cache_I | Index_Load_Tag)
+#define Index_Load_Tag_D               (Cache_D | Index_Load_Tag)
+#define Index_Store_Tag_I              (Cache_I | Index_Store_Tag)
+#define Index_Store_Tag_D              (Cache_D | Index_Store_Tag)
+#define Hit_Invalidate_I               (Cache_I | Hit_Invalidate)
+#define Hit_Invalidate_D               (Cache_D | Hit_Invalidate)
+#define Hit_Writeback_Inv_D            (Cache_D | Hit_Writeback_Inv)
 
 /*
  * R4000-specific cacheops
  */
-#define Create_Dirty_Excl_D            0x0d
-#define Fill                           0x14
-#define Hit_Writeback_I                        0x18
-#define Hit_Writeback_D                        0x19
+#define Create_Dirty_Excl_D            (Cache_D | 0x0c)
+#define Fill                           (Cache_I | 0x14)
+#define Hit_Writeback_I                        (Cache_I | Hit_Writeback)
+#define Hit_Writeback_D                        (Cache_D | Hit_Writeback)
 
 /*
  * R4000SC and R4400SC-specific cacheops
  */
-#define Index_Invalidate_SI            0x02
-#define Index_Writeback_Inv_SD         0x03
-#define Index_Load_Tag_SI              0x06
-#define Index_Load_Tag_SD              0x07
-#define Index_Store_Tag_SI             0x0A
-#define Index_Store_Tag_SD             0x0B
-#define Create_Dirty_Excl_SD           0x0f
-#define Hit_Invalidate_SI              0x12
-#define Hit_Invalidate_SD              0x13
-#define Hit_Writeback_Inv_SD           0x17
-#define Hit_Writeback_SD               0x1b
-#define Hit_Set_Virtual_SI             0x1e
-#define Hit_Set_Virtual_SD             0x1f
+#define Cache_SI                       0x02
+#define Cache_SD                       0x03
+
+#define Index_Invalidate_SI            (Cache_SI | Index_Writeback_Inv)
+#define Index_Writeback_Inv_SD         (Cache_SD | Index_Writeback_Inv)
+#define Index_Load_Tag_SI              (Cache_SI | Index_Load_Tag)
+#define Index_Load_Tag_SD              (Cache_SD | Index_Load_Tag)
+#define Index_Store_Tag_SI             (Cache_SI | Index_Store_Tag)
+#define Index_Store_Tag_SD             (Cache_SD | Index_Store_Tag)
+#define Create_Dirty_Excl_SD           (Cache_SD | 0x0c)
+#define Hit_Invalidate_SI              (Cache_SI | Hit_Invalidate)
+#define Hit_Invalidate_SD              (Cache_SD | Hit_Invalidate)
+#define Hit_Writeback_Inv_SD           (Cache_SD | Hit_Writeback_Inv)
+#define Hit_Writeback_SD               (Cache_SD | Hit_Writeback)
+#define Hit_Set_Virtual_SI             (Cache_SI | 0x1c)
+#define Hit_Set_Virtual_SD             (Cache_SD | 0x1c)
 
 /*
  * R5000-specific cacheops
  */
-#define R5K_Page_Invalidate_S          0x17
+#define R5K_Page_Invalidate_S          (Cache_S | 0x14)
 
 /*
  * RM7000-specific cacheops
  */
-#define Page_Invalidate_T              0x16
-#define Index_Store_Tag_T              0x0a
-#define Index_Load_Tag_T               0x06
+#define Page_Invalidate_T              (Cache_T | 0x14)
+#define Index_Store_Tag_T              (Cache_T | Index_Store_Tag)
+#define Index_Load_Tag_T               (Cache_T | Index_Load_Tag)
 
 /*
  * R10000-specific cacheops
  * Cacheops 0x02, 0x06, 0x0a, 0x0c-0x0e, 0x16, 0x1a and 0x1e are unused.
  * Most of the _S cacheops are identical to the R4000SC _SD cacheops.
  */
-#define Index_Writeback_Inv_S          0x03
-#define Index_Load_Tag_S               0x07
-#define Index_Store_Tag_S              0x0B
-#define Hit_Invalidate_S               0x13
+#define Index_Writeback_Inv_S          (Cache_S | Index_Writeback_Inv)
+#define Index_Load_Tag_S               (Cache_S | Index_Load_Tag)
+#define Index_Store_Tag_S              (Cache_S | Index_Store_Tag)
+#define Hit_Invalidate_S               (Cache_S | Hit_Invalidate)
 #define Cache_Barrier                  0x14
-#define Hit_Writeback_Inv_S            0x17
-#define Index_Load_Data_I              0x18
-#define Index_Load_Data_D              0x19
-#define Index_Load_Data_S              0x1b
-#define Index_Store_Data_I             0x1c
-#define Index_Store_Data_D             0x1d
-#define Index_Store_Data_S             0x1f
+#define Hit_Writeback_Inv_S            (Cache_S | Hit_Writeback_Inv)
+#define Index_Load_Data_I              (Cache_I | 0x18)
+#define Index_Load_Data_D              (Cache_D | 0x18)
+#define Index_Load_Data_S              (Cache_S | 0x18)
+#define Index_Store_Data_I             (Cache_I | 0x1c)
+#define Index_Store_Data_D             (Cache_D | 0x1c)
+#define Index_Store_Data_S             (Cache_S | 0x1c)
 
 /*
  * Loongson2-specific cacheops
  */
-#define Hit_Invalidate_I_Loongson2     0x00
+#define Hit_Invalidate_I_Loongson2     (Cache_I | 0x00)
 
 #endif /* __ASM_CACHEOPS_H */
index d1e04c943f5f7c7d9ec232851e14d9b5c4215496..eeec8c8e2da2ed59e3d89d099721209203e9c243 100644 (file)
 # define cpu_has_small_pages   (cpu_data[0].options & MIPS_CPU_SP)
 #endif
 
+#ifndef cpu_has_nan_legacy
+#define cpu_has_nan_legacy     (cpu_data[0].options & MIPS_CPU_NAN_LEGACY)
+#endif
+#ifndef cpu_has_nan_2008
+#define cpu_has_nan_2008       (cpu_data[0].options & MIPS_CPU_NAN_2008)
+#endif
+
 #endif /* __ASM_CPU_FEATURES_H */
index 82ad15f11049284c2f347fc66b68ea5091fb810d..a97ca97285ecf1dd897c9c7a9610209e15ad70c7 100644 (file)
@@ -386,6 +386,8 @@ enum cpu_type_enum {
 #define MIPS_CPU_BP_GHIST      0x8000000000ull /* R12K+ Branch Prediction Global History */
 #define MIPS_CPU_SP            0x10000000000ull /* Small (1KB) page support */
 #define MIPS_CPU_FTLB          0x20000000000ull /* CPU has Fixed-page-size TLB */
+#define MIPS_CPU_NAN_LEGACY    0x40000000000ull /* Legacy NaN implemented */
+#define MIPS_CPU_NAN_2008      0x80000000000ull /* 2008 NaN implemented */
 
 /*
  * CPU ASE encodings
index b01a6ff468e00aab5d185a9dd53023101b34f2b7..cefb7a5968783094befa3ee5925b02ab1f972fac 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/fs.h>
 #include <uapi/linux/elf.h>
 
-#include <asm/cpu-info.h>
 #include <asm/current.h>
 
 /* ELF header e_flags defines. */
@@ -44,6 +43,7 @@
 #define EF_MIPS_OPTIONS_FIRST  0x00000080
 #define EF_MIPS_32BITMODE      0x00000100
 #define EF_MIPS_FP64           0x00000200
+#define EF_MIPS_NAN2008                0x00000400
 #define EF_MIPS_ABI            0x0000f000
 #define EF_MIPS_ARCH           0xf0000000
 
@@ -305,7 +305,7 @@ do {                                                                        \
                                                                        \
        current->thread.abi = &mips_abi;                                \
                                                                        \
-       current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31;            \
+       mips_set_personality_nan(state);                                \
 } while (0)
 
 #endif /* CONFIG_32BIT */
@@ -367,7 +367,7 @@ do {                                                                        \
        else                                                            \
                current->thread.abi = &mips_abi;                        \
                                                                        \
-       current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31;            \
+       mips_set_personality_nan(state);                                \
                                                                        \
        p = personality(current->personality);                          \
        if (p != PER_LINUX32 && p != PER_LINUX)                         \
@@ -432,6 +432,7 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
                                       int uses_interp);
 
 struct arch_elf_state {
+       int nan_2008;
        int fp_abi;
        int interp_fp_abi;
        int overall_fp_mode;
@@ -440,17 +441,23 @@ struct arch_elf_state {
 #define MIPS_ABI_FP_UNKNOWN    (-1)    /* Unknown FP ABI (kernel internal) */
 
 #define INIT_ARCH_ELF_STATE {                  \
+       .nan_2008 = -1,                         \
        .fp_abi = MIPS_ABI_FP_UNKNOWN,          \
        .interp_fp_abi = MIPS_ABI_FP_UNKNOWN,   \
        .overall_fp_mode = -1,                  \
 }
 
+/* Whether to accept legacy-NaN and 2008-NaN user binaries.  */
+extern bool mips_use_nan_legacy;
+extern bool mips_use_nan_2008;
+
 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
                            bool is_interp, struct arch_elf_state *state);
 
-extern int arch_check_elf(void *ehdr, bool has_interpreter,
+extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
                          struct arch_elf_state *state);
 
+extern void mips_set_personality_nan(struct arch_elf_state *state);
 extern void mips_set_personality_fp(struct arch_elf_state *state);
 
 #endif /* _ASM_ELF_H */
index 2f021cdfba4f8fc60a5348f7822f0014d02a3eff..3225c3c0724b3642392bad7e9c395c9451afe555 100644 (file)
@@ -79,7 +79,7 @@ int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
 /*
  * Break instruction with special math emu break code set
  */
-#define BREAK_MATH (0x0000000d | (BRK_MEMU << 16))
+#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16))
 
 #define SIGNALLING_NAN 0x7ff800007ff80000LL
 
index 01880b34a209b37dd5b02d759afda543f7989e9a..64f2500d891b279493f422129fcb2093180a2527 100644 (file)
 
 #ifdef __KERNEL__
 
+#include <linux/bug.h>
 #include <linux/interrupt.h>
 #include <linux/uaccess.h>
+#include <asm/cpu-features.h>
 #include <asm/kmap_types.h>
 
 /* undef for production */
@@ -50,7 +52,7 @@ extern void *kmap_atomic(struct page *page);
 extern void __kunmap_atomic(void *kvaddr);
 extern void *kmap_atomic_pfn(unsigned long pfn);
 
-#define flush_cache_kmaps()    flush_cache_all()
+#define flush_cache_kmaps()    BUG_ON(cpu_has_dc_aliases)
 
 extern void kmap_init(void);
 
index d10fd80dbb7e96b898d2230c2d0f5112d02c3bc1..2b4dc7ad53b8a32bc139dd33d15cb5bb533f8d85 100644 (file)
@@ -275,6 +275,7 @@ static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long si
  */
 #define ioremap_cachable(offset, size)                                 \
        __ioremap_mode((offset), (size), _page_cachable_default)
+#define ioremap_cache ioremap_cachable
 
 /*
  * These two are MIPS specific ioremap variant.         ioremap_cacheable_cow
index e7b138b4b3d37f9f038aaec933b5040f0e7c3b2a..65c351e328cc932af4aedca946ee19f9bd43f8c3 100644 (file)
@@ -84,41 +84,11 @@ static inline void arch_local_irq_restore(unsigned long flags)
        : "memory");
 }
 
-static inline void __arch_local_irq_restore(unsigned long flags)
-{
-       __asm__ __volatile__(
-       "       .set    push                                            \n"
-       "       .set    noreorder                                       \n"
-       "       .set    noat                                            \n"
-#if defined(CONFIG_IRQ_MIPS_CPU)
-       /*
-        * Slow, but doesn't suffer from a relatively unlikely race
-        * condition we're having since days 1.
-        */
-       "       beqz    %[flags], 1f                                    \n"
-       "       di                                                      \n"
-       "       ei                                                      \n"
-       "1:                                                             \n"
-#else
-       /*
-        * Fast, dangerous.  Life is fun, life is good.
-        */
-       "       mfc0    $1, $12                                         \n"
-       "       ins     $1, %[flags], 0, 1                              \n"
-       "       mtc0    $1, $12                                         \n"
-#endif
-       "       " __stringify(__irq_disable_hazard) "                   \n"
-       "       .set    pop                                             \n"
-       : [flags] "=r" (flags)
-       : "0" (flags)
-       : "memory");
-}
 #else
 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
 void arch_local_irq_disable(void);
 unsigned long arch_local_irq_save(void);
 void arch_local_irq_restore(unsigned long flags);
-void __arch_local_irq_restore(unsigned long flags);
 #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 static inline void arch_local_irq_enable(void)
index 7c191443c7ea199f876a2c495b2b6d3f41bfc1d5..f6b12790716c071db8a3ce17971dd146c30025f5 100644 (file)
@@ -58,7 +58,7 @@
 #define KVM_MAX_VCPUS          1
 #define KVM_USER_MEM_SLOTS     8
 /* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS  0
+#define KVM_PRIVATE_MEM_SLOTS  0
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 #define KVM_HALT_POLL_NS_DEFAULT 500000
 #define KVM_INVALID_INST               0xdeadbeef
 #define KVM_INVALID_ADDR               0xdeadbeef
 
-#define KVM_MALTA_GUEST_RTC_ADDR       0xb8000070UL
-
-#define GUEST_TICKS_PER_JIFFY          (40000000/HZ)
-#define MS_TO_NS(x)                    (x * 1E6L)
-
-#define CAUSEB_DC                      27
-#define CAUSEF_DC                      (_ULCAST_(1) << 27)
-
 extern atomic_t kvm_mips_instance;
 extern kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
 extern void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
@@ -289,34 +281,6 @@ enum mips_mmu_types {
        MMU_TYPE_R8000
 };
 
-/*
- * Trap codes
- */
-#define T_INT                  0       /* Interrupt pending */
-#define T_TLB_MOD              1       /* TLB modified fault */
-#define T_TLB_LD_MISS          2       /* TLB miss on load or ifetch */
-#define T_TLB_ST_MISS          3       /* TLB miss on a store */
-#define T_ADDR_ERR_LD          4       /* Address error on a load or ifetch */
-#define T_ADDR_ERR_ST          5       /* Address error on a store */
-#define T_BUS_ERR_IFETCH       6       /* Bus error on an ifetch */
-#define T_BUS_ERR_LD_ST                7       /* Bus error on a load or store */
-#define T_SYSCALL              8       /* System call */
-#define T_BREAK                        9       /* Breakpoint */
-#define T_RES_INST             10      /* Reserved instruction exception */
-#define T_COP_UNUSABLE         11      /* Coprocessor unusable */
-#define T_OVFLOW               12      /* Arithmetic overflow */
-
-/*
- * Trap definitions added for r4000 port.
- */
-#define T_TRAP                 13      /* Trap instruction */
-#define T_VCEI                 14      /* Virtual coherency exception */
-#define T_MSAFPE               14      /* MSA floating point exception */
-#define T_FPE                  15      /* Floating point exception */
-#define T_MSADIS               21      /* MSA disabled exception */
-#define T_WATCH                        23      /* Watch address reference */
-#define T_VCED                 31      /* Virtual coherency data */
-
 /* Resume Flags */
 #define RESUME_FLAG_DR         (1<<0)  /* Reload guest nonvolatile state? */
 #define RESUME_FLAG_HOST       (1<<1)  /* Resume host? */
@@ -686,7 +650,6 @@ extern void kvm_mips_dump_host_tlbs(void);
 extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
 extern void kvm_mips_flush_host_tlb(int skip_kseg0);
 extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
-extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index);
 
 extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
                                     unsigned long entryhi);
index 4eee221b0cf0bce93fa7adb7b634c9feebcdf21d..2b3487213d1e932247077d8b3f67ba28612f2683 100644 (file)
@@ -115,6 +115,7 @@ static inline int soc_is_qca955x(void)
        return soc_is_qca9556() || soc_is_qca9558();
 }
 
+void ath79_ddr_wb_flush(unsigned int reg);
 void ath79_ddr_set_pci_windows(void);
 
 extern void __iomem *ath79_pll_base;
index 79cff26d8b36f16cb333d9af2e56383db72b4222..398733e3e2cf65006541cf5c3eff6c68b06b0eb0 100644 (file)
@@ -25,8 +25,6 @@ struct jz_nand_platform_data {
        int                     num_partitions;
        struct mtd_partition    *partitions;
 
-       struct nand_ecclayout   *ecc_layout;
-
        unsigned char banks[JZ_NAND_NUM_BANKS];
 
        void (*ident_callback)(struct platform_device *, struct nand_chip *,
diff --git a/arch/mips/include/asm/mach-pic32/cpu-feature-overrides.h b/arch/mips/include/asm/mach-pic32/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..4682308
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_MACH_PIC32_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_PIC32_CPU_FEATURE_OVERRIDES_H
+
+/*
+ * CPU feature overrides for PIC32 boards
+ */
+#ifdef CONFIG_CPU_MIPS32
+#define cpu_has_vint           1
+#define cpu_has_veic           0
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_4k_cache       1
+#define cpu_has_fpu            0
+#define cpu_has_counter                1
+#define cpu_has_llsc           1
+#define cpu_has_nofpuex                0
+#define cpu_icache_snoops_remote_store 1
+#endif
+
+#ifdef CONFIG_CPU_MIPS64
+#error This platform does not support 64bit.
+#endif
+
+#endif /* __ASM_MACH_PIC32_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-pic32/irq.h b/arch/mips/include/asm/mach-pic32/irq.h
new file mode 100644 (file)
index 0000000..864330c
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#ifndef __ASM_MACH_PIC32_IRQ_H
+#define __ASM_MACH_PIC32_IRQ_H
+
+#define NR_IRQS        256
+#define MIPS_CPU_IRQ_BASE 0
+
+#include_next <irq.h>
+
+#endif /* __ASM_MACH_PIC32_IRQ_H */
diff --git a/arch/mips/include/asm/mach-pic32/pic32.h b/arch/mips/include/asm/mach-pic32/pic32.h
new file mode 100644 (file)
index 0000000..ce52e91
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#ifndef _ASM_MACH_PIC32_H
+#define _ASM_MACH_PIC32_H
+
+#include <linux/io.h>
+
+/*
+ * PIC32 register offsets for SET/CLR/INV where supported.
+ */
+#define PIC32_CLR(_reg)                ((_reg) + 0x04)
+#define PIC32_SET(_reg)                ((_reg) + 0x08)
+#define PIC32_INV(_reg)                ((_reg) + 0x0C)
+
+/*
+ * PIC32 Base Register Offsets
+ */
+#define PIC32_BASE_CONFIG      0x1f800000
+#define PIC32_BASE_OSC         0x1f801200
+#define PIC32_BASE_RESET       0x1f801240
+#define PIC32_BASE_PPS         0x1f801400
+#define PIC32_BASE_UART                0x1f822000
+#define PIC32_BASE_PORT                0x1f860000
+#define PIC32_BASE_DEVCFG2     0x1fc4ff44
+
+/*
+ * Register unlock sequence required for some register access.
+ */
+void pic32_syskey_unlock_debug(const char *fn, const ulong ln);
+#define pic32_syskey_unlock()  \
+       pic32_syskey_unlock_debug(__func__, __LINE__)
+
+#endif /* _ASM_MACH_PIC32_H */
diff --git a/arch/mips/include/asm/mach-pic32/spaces.h b/arch/mips/include/asm/mach-pic32/spaces.h
new file mode 100644 (file)
index 0000000..046a0a9
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ */
+#ifndef _ASM_MACH_PIC32_SPACES_H
+#define _ASM_MACH_PIC32_SPACES_H
+
+#ifdef CONFIG_PIC32MZDA
+#define PHYS_OFFSET    _AC(0x08000000, UL)
+#define UNCAC_BASE     _AC(0xa8000000, UL)
+#endif
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_MACH_PIC32_SPACES_H */
diff --git a/arch/mips/include/asm/mach-ralink/irq.h b/arch/mips/include/asm/mach-ralink/irq.h
new file mode 100644 (file)
index 0000000..4321865
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef __ASM_MACH_RALINK_IRQ_H
+#define __ASM_MACH_RALINK_IRQ_H
+
+#define GIC_NUM_INTRS  64
+#define NR_IRQS 256
+
+#include_next <irq.h>
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h
new file mode 100644 (file)
index 0000000..610b61e
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _MT7621_REGS_H_
+#define _MT7621_REGS_H_
+
+#define MT7621_PALMBUS_BASE            0x1C000000
+#define MT7621_PALMBUS_SIZE            0x03FFFFFF
+
+#define MT7621_SYSC_BASE               0x1E000000
+
+#define SYSC_REG_CHIP_NAME0            0x00
+#define SYSC_REG_CHIP_NAME1            0x04
+#define SYSC_REG_CHIP_REV              0x0c
+#define SYSC_REG_SYSTEM_CONFIG0                0x10
+#define SYSC_REG_SYSTEM_CONFIG1                0x14
+
+#define CHIP_REV_PKG_MASK              0x1
+#define CHIP_REV_PKG_SHIFT             16
+#define CHIP_REV_VER_MASK              0xf
+#define CHIP_REV_VER_SHIFT             8
+#define CHIP_REV_ECO_MASK              0xf
+
+#define MT7621_DRAM_BASE                0x0
+#define MT7621_DDR2_SIZE_MIN           32
+#define MT7621_DDR2_SIZE_MAX           256
+
+#define MT7621_CHIP_NAME0              0x3637544D
+#define MT7621_CHIP_NAME1              0x20203132
+
+#define MIPS_GIC_IRQ_BASE           (MIPS_CPU_IRQ_BASE + 8)
+
+#endif
diff --git a/arch/mips/include/asm/mach-ralink/mt7621/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ralink/mt7621/cpu-feature-overrides.h
new file mode 100644 (file)
index 0000000..15db1b3
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * Ralink MT7621 specific CPU feature overrides
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2015 Felix Fietkau <nbd@openwrt.org>
+ *
+ * This file was derived from: include/asm-mips/cpu-features.h
+ *     Copyright (C) 2003, 2004 Ralf Baechle
+ *     Copyright (C) 2004 Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ */
+#ifndef _MT7621_CPU_FEATURE_OVERRIDES_H
+#define _MT7621_CPU_FEATURE_OVERRIDES_H
+
+#define cpu_has_tlb            1
+#define cpu_has_4kex           1
+#define cpu_has_3k_cache       0
+#define cpu_has_4k_cache       1
+#define cpu_has_tx39_cache     0
+#define cpu_has_sb1_cache      0
+#define cpu_has_fpu            0
+#define cpu_has_32fpr          0
+#define cpu_has_counter                1
+#define cpu_has_watch          1
+#define cpu_has_divec          1
+
+#define cpu_has_prefetch       1
+#define cpu_has_ejtag          1
+#define cpu_has_llsc           1
+
+#define cpu_has_mips16         1
+#define cpu_has_mdmx           0
+#define cpu_has_mips3d         0
+#define cpu_has_smartmips      0
+
+#define cpu_has_mips32r1       1
+#define cpu_has_mips32r2       1
+#define cpu_has_mips64r1       0
+#define cpu_has_mips64r2       0
+
+#define cpu_has_dsp            1
+#define cpu_has_dsp2           0
+#define cpu_has_mipsmt         1
+
+#define cpu_has_64bits         0
+#define cpu_has_64bit_zero_reg 0
+#define cpu_has_64bit_gp_regs  0
+#define cpu_has_64bit_addresses        0
+
+#define cpu_dcache_line_size() 32
+#define cpu_icache_line_size() 32
+
+#define cpu_has_dc_aliases     0
+#define cpu_has_vtag_icache    0
+
+#define cpu_has_rixi           0
+#define cpu_has_tlbinv         0
+#define cpu_has_userlocal      1
+
+#endif /* _MT7621_CPU_FEATURE_OVERRIDES_H */
index 6516e9da51334916b04b6f8689a0857faad5f0ca..b196825a1de9ca1f3b60df1059978569c93ad8c2 100644 (file)
@@ -243,6 +243,10 @@ BUILD_CM_Cx_R_(tcid_8_priority,    0x80)
 #define  CM_GCR_BASE_CMDEFTGT_IOCU0            2
 #define  CM_GCR_BASE_CMDEFTGT_IOCU1            3
 
+/* GCR_RESET_EXT_BASE register fields */
+#define CM_GCR_RESET_EXT_BASE_EVARESET         BIT(31)
+#define CM_GCR_RESET_EXT_BASE_UEB              BIT(30)
+
 /* GCR_ACCESS register fields */
 #define CM_GCR_ACCESS_ACCESSEN_SHF             0
 #define CM_GCR_ACCESS_ACCESSEN_MSK             (_ULCAST_(0xff) << 0)
index 4b89f28047f7118588acf2152ba43e44a5d353e2..1f6ea8352ca90408772f0a7f72f7e213bece28ba 100644 (file)
@@ -52,7 +52,7 @@ do {                                                                  \
        __this_cpu_inc(mipsr2emustats.M);                               \
        err = __get_user(nir, (u32 __user *)regs->cp0_epc);             \
        if (!err) {                                                     \
-               if (nir == BREAK_MATH)                                  \
+               if (nir == BREAK_MATH(0))                               \
                        __this_cpu_inc(mipsr2bdemustats.M);             \
        }                                                               \
        preempt_enable();                                               \
index e43aca183c9918d4138f000c4a385659f8523eae..3ad19ad04d8a282179423e9fcc5ecbb5557d26c7 100644 (file)
 #define CAUSEF_IV              (_ULCAST_(1)   << 23)
 #define CAUSEB_PCI             26
 #define CAUSEF_PCI             (_ULCAST_(1)   << 26)
+#define CAUSEB_DC              27
+#define CAUSEF_DC              (_ULCAST_(1)   << 27)
 #define CAUSEB_CE              28
 #define CAUSEF_CE              (_ULCAST_(3)   << 28)
 #define CAUSEB_TI              30
 #define CAUSEB_BD              31
 #define CAUSEF_BD              (_ULCAST_(1)   << 31)
 
+/*
+ * Cause.ExcCode trap codes.
+ */
+#define EXCCODE_INT            0       /* Interrupt pending */
+#define EXCCODE_MOD            1       /* TLB modified fault */
+#define EXCCODE_TLBL           2       /* TLB miss on load or ifetch */
+#define EXCCODE_TLBS           3       /* TLB miss on a store */
+#define EXCCODE_ADEL           4       /* Address error on a load or ifetch */
+#define EXCCODE_ADES           5       /* Address error on a store */
+#define EXCCODE_IBE            6       /* Bus error on an ifetch */
+#define EXCCODE_DBE            7       /* Bus error on a load or store */
+#define EXCCODE_SYS            8       /* System call */
+#define EXCCODE_BP             9       /* Breakpoint */
+#define EXCCODE_RI             10      /* Reserved instruction exception */
+#define EXCCODE_CPU            11      /* Coprocessor unusable */
+#define EXCCODE_OV             12      /* Arithmetic overflow */
+#define EXCCODE_TR             13      /* Trap instruction */
+#define EXCCODE_MSAFPE         14      /* MSA floating point exception */
+#define EXCCODE_FPE            15      /* Floating point exception */
+#define EXCCODE_TLBRI          19      /* TLB Read-Inhibit exception */
+#define EXCCODE_TLBXI          20      /* TLB Execution-Inhibit exception */
+#define EXCCODE_MSADIS         21      /* MSA disabled exception */
+#define EXCCODE_MDMX           22      /* MDMX unusable exception */
+#define EXCCODE_WATCH          23      /* Watch address reference */
+#define EXCCODE_MCHECK         24      /* Machine check */
+#define EXCCODE_THREAD         25      /* Thread exceptions (MT) */
+#define EXCCODE_DSPDIS         26      /* DSP disabled exception */
+#define EXCCODE_GE             27      /* Virtualized guest exception (VZ) */
+
+/* Implementation specific trap codes used by MIPS cores */
+#define MIPS_EXCCODE_TLBPAR    16      /* TLB parity error exception */
+
 /*
  * Bits in the coprocessor 0 config register.
  */
index 2046c023022406d8a336197db13df1a75b1e8021..21ed7150fec3f4847aef98ac45cd56241b4f50b3 100644 (file)
@@ -33,7 +33,7 @@
 #define PAGE_SHIFT     16
 #endif
 #define PAGE_SIZE      (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK      (~(PAGE_SIZE - 1))
+#define PAGE_MASK      (~((1 << PAGE_SHIFT) - 1))
 
 /*
  * This is used for calculating the real page sizes
index 98c31e5d95793c68b50d594a5152bd0c2a730c82..108d19376bb126161eea30284b30776cd7b4d58b 100644 (file)
@@ -102,7 +102,6 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
 #include <linux/scatterlist.h>
 #include <linux/string.h>
 #include <asm/io.h>
-#include <asm-generic/pci-bridge.h>
 
 struct pci_dev;
 
index 6995b4a02e2359bf6e2e1b8493bcae55443753f1..9a4fe0133ff1c7d82685d426dc1a91b371a972a6 100644 (file)
@@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
 static inline pte_t pte_mkyoung(pte_t pte)
 {
        pte_val(pte) |= _PAGE_ACCESSED;
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        if (!(pte_val(pte) & _PAGE_NO_READ))
                pte_val(pte) |= _PAGE_SILENT_READ;
        else
@@ -542,7 +542,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
 {
        pmd_val(pmd) |= _PAGE_ACCESSED;
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        if (!(pmd_val(pmd) & _PAGE_NO_READ))
                pmd_val(pmd) |= _PAGE_SILENT_READ;
        else
index 9b44d5a816fa3ee64faf18a80c7f415dfb681a4d..ddea53e3a9bb7bbd56463a0727ff2b3bc378f614 100644 (file)
@@ -116,7 +116,8 @@ enum cop_op {
        dmtc_op       = 0x05, ctc_op        = 0x06,
        mthc0_op      = 0x06, mthc_op       = 0x07,
        bc_op         = 0x08, bc1eqz_op     = 0x09,
-       bc1nez_op     = 0x0d, cop_op        = 0x10,
+       mfmc0_op      = 0x0b, bc1nez_op     = 0x0d,
+       wrpgpr_op     = 0x0e, cop_op        = 0x10,
        copm_op       = 0x18
 };
 
@@ -529,7 +530,7 @@ enum MIPS6e_i8_func {
 };
 
 /*
- * (microMIPS & MIPS16e) NOP instruction.
+ * (microMIPS) NOP instruction.
  */
 #define MM_NOP16       0x0c00
 
@@ -679,7 +680,7 @@ struct fp0_format {         /* FPU multiply and add format (MIPS32) */
        ;))))))
 };
 
-struct mm_fp0_format {         /* FPU multipy and add format (microMIPS) */
+struct mm_fp0_format {         /* FPU multiply and add format (microMIPS) */
        __BITFIELD_FIELD(unsigned int opcode : 6,
        __BITFIELD_FIELD(unsigned int ft : 5,
        __BITFIELD_FIELD(unsigned int fs : 5,
@@ -799,6 +800,13 @@ struct mm_x_format {               /* Scaled indexed load format (microMIPS) */
        ;)))))
 };
 
+struct mm_a_format {           /* ADDIUPC format (microMIPS) */
+       __BITFIELD_FIELD(unsigned int opcode : 6,
+       __BITFIELD_FIELD(unsigned int rs : 3,
+       __BITFIELD_FIELD(signed int simmediate : 23,
+       ;)))
+};
+
 /*
  * microMIPS instruction formats (16-bit length)
  */
@@ -940,6 +948,7 @@ union mips_instruction {
        struct mm_i_format mm_i_format;
        struct mm_m_format mm_m_format;
        struct mm_x_format mm_x_format;
+       struct mm_a_format mm_a_format;
        struct mm_b0_format mm_b0_format;
        struct mm_b1_format mm_b1_format;
        struct mm16_m_format mm16_m_format ;
index 09f4034f239f511867a4d56792de909d0ad0c773..6392dbe504fb3b4050210f7a5ca993362efa352b 100644 (file)
@@ -190,7 +190,7 @@ static inline void check_daddi(void)
        printk("Checking for the daddi bug... ");
 
        local_irq_save(flags);
-       handler = set_except_vector(12, handle_daddi_ov);
+       handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
        /*
         * The following code fails to trigger an overflow exception
         * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
@@ -214,7 +214,7 @@ static inline void check_daddi(void)
                ".set   pop"
                : "=r" (v), "=&r" (tmp)
                : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
-       set_except_vector(12, handler);
+       set_except_vector(EXCCODE_OV, handler);
        local_irq_restore(flags);
 
        if (daddi_ov) {
@@ -225,14 +225,14 @@ static inline void check_daddi(void)
        printk("yes, workaround... ");
 
        local_irq_save(flags);
-       handler = set_except_vector(12, handle_daddi_ov);
+       handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
        asm volatile(
                "addiu  %1, $0, %2\n\t"
                "dsrl   %1, %1, 1\n\t"
                "daddi  %0, %1, %3"
                : "=r" (v), "=&r" (tmp)
                : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
-       set_except_vector(12, handler);
+       set_except_vector(EXCCODE_OV, handler);
        local_irq_restore(flags);
 
        if (daddi_ov) {
index 6b9064499bd3dfb49ef0c09ad3da48034dea0ff2..b725b713b9f8b56e3763784c18b0550dffd77b6d 100644 (file)
@@ -98,6 +98,161 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
        c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask;
 }
 
+/*
+ * Determine the IEEE 754 NaN encodings and ABS.fmt/NEG.fmt execution modes
+ * supported by FPU hardware.
+ */
+static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
+{
+       if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+                           MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+                           MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+               unsigned long sr, fir, fcsr, fcsr0, fcsr1;
+
+               sr = read_c0_status();
+               __enable_fpu(FPU_AS_IS);
+
+               fir = read_32bit_cp1_register(CP1_REVISION);
+               if (fir & MIPS_FPIR_HAS2008) {
+                       fcsr = read_32bit_cp1_register(CP1_STATUS);
+
+                       fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
+                       write_32bit_cp1_register(CP1_STATUS, fcsr0);
+                       fcsr0 = read_32bit_cp1_register(CP1_STATUS);
+
+                       fcsr1 = fcsr | FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+                       write_32bit_cp1_register(CP1_STATUS, fcsr1);
+                       fcsr1 = read_32bit_cp1_register(CP1_STATUS);
+
+                       write_32bit_cp1_register(CP1_STATUS, fcsr);
+
+                       if (!(fcsr0 & FPU_CSR_NAN2008))
+                               c->options |= MIPS_CPU_NAN_LEGACY;
+                       if (fcsr1 & FPU_CSR_NAN2008)
+                               c->options |= MIPS_CPU_NAN_2008;
+
+                       if ((fcsr0 ^ fcsr1) & FPU_CSR_ABS2008)
+                               c->fpu_msk31 &= ~FPU_CSR_ABS2008;
+                       else
+                               c->fpu_csr31 |= fcsr & FPU_CSR_ABS2008;
+
+                       if ((fcsr0 ^ fcsr1) & FPU_CSR_NAN2008)
+                               c->fpu_msk31 &= ~FPU_CSR_NAN2008;
+                       else
+                               c->fpu_csr31 |= fcsr & FPU_CSR_NAN2008;
+               } else {
+                       c->options |= MIPS_CPU_NAN_LEGACY;
+               }
+
+               write_c0_status(sr);
+       } else {
+               c->options |= MIPS_CPU_NAN_LEGACY;
+       }
+}
+
+/*
+ * IEEE 754 conformance mode to use.  Affects the NaN encoding and the
+ * ABS.fmt/NEG.fmt execution mode.
+ */
+static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT;
+
+/*
+ * Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes
+ * to support by the FPU emulator according to the IEEE 754 conformance
+ * mode selected.  Note that "relaxed" straps the emulator so that it
+ * allows 2008-NaN binaries even for legacy processors.
+ */
+static void cpu_set_nofpu_2008(struct cpuinfo_mips *c)
+{
+       c->options &= ~(MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY);
+       c->fpu_csr31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
+       c->fpu_msk31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
+
+       switch (ieee754) {
+       case STRICT:
+               if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+                                   MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+                                   MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+                       c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
+               } else {
+                       c->options |= MIPS_CPU_NAN_LEGACY;
+                       c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+               }
+               break;
+       case LEGACY:
+               c->options |= MIPS_CPU_NAN_LEGACY;
+               c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+               break;
+       case STD2008:
+               c->options |= MIPS_CPU_NAN_2008;
+               c->fpu_csr31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+               c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+               break;
+       case RELAXED:
+               c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
+               break;
+       }
+}
+
+/*
+ * Override the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
+ * according to the "ieee754=" parameter.
+ */
+static void cpu_set_nan_2008(struct cpuinfo_mips *c)
+{
+       switch (ieee754) {
+       case STRICT:
+               mips_use_nan_legacy = !!cpu_has_nan_legacy;
+               mips_use_nan_2008 = !!cpu_has_nan_2008;
+               break;
+       case LEGACY:
+               mips_use_nan_legacy = !!cpu_has_nan_legacy;
+               mips_use_nan_2008 = !cpu_has_nan_legacy;
+               break;
+       case STD2008:
+               mips_use_nan_legacy = !cpu_has_nan_2008;
+               mips_use_nan_2008 = !!cpu_has_nan_2008;
+               break;
+       case RELAXED:
+               mips_use_nan_legacy = true;
+               mips_use_nan_2008 = true;
+               break;
+       }
+}
+
+/*
+ * IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode override
+ * settings:
+ *
+ * strict:  accept binaries that request a NaN encoding supported by the FPU
+ * legacy:  only accept legacy-NaN binaries
+ * 2008:    only accept 2008-NaN binaries
+ * relaxed: accept any binaries regardless of whether supported by the FPU
+ */
+static int __init ieee754_setup(char *s)
+{
+       if (!s)
+               return -1;
+       else if (!strcmp(s, "strict"))
+               ieee754 = STRICT;
+       else if (!strcmp(s, "legacy"))
+               ieee754 = LEGACY;
+       else if (!strcmp(s, "2008"))
+               ieee754 = STD2008;
+       else if (!strcmp(s, "relaxed"))
+               ieee754 = RELAXED;
+       else
+               return -1;
+
+       if (!(boot_cpu_data.options & MIPS_CPU_FPU))
+               cpu_set_nofpu_2008(&boot_cpu_data);
+       cpu_set_nan_2008(&boot_cpu_data);
+
+       return 0;
+}
+
+early_param("ieee754", ieee754_setup);
+
 /*
  * Set the FIR feature flags for the FPU emulator.
  */
@@ -113,6 +268,8 @@ static void cpu_set_nofpu_id(struct cpuinfo_mips *c)
        if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
                            MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
                value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W;
+       if (c->options & MIPS_CPU_NAN_2008)
+               value |= MIPS_FPIR_HAS2008;
        c->fpu_id = value;
 }
 
@@ -137,6 +294,8 @@ static void cpu_set_fpu_opts(struct cpuinfo_mips *c)
        }
 
        cpu_set_fpu_fcsr_mask(c);
+       cpu_set_fpu_2008(c);
+       cpu_set_nan_2008(c);
 }
 
 /*
@@ -147,6 +306,8 @@ static void cpu_set_nofpu_opts(struct cpuinfo_mips *c)
        c->options &= ~MIPS_CPU_FPU;
        c->fpu_msk31 = mips_nofpu_msk31;
 
+       cpu_set_nofpu_2008(c);
+       cpu_set_nan_2008(c);
        cpu_set_nofpu_id(c);
 }
 
index 4a4d9e067c89427fc34e990586f0e8237a9418ba..c3c234dc0c07748ad7696f7b4d3c395df3954293 100644 (file)
 #include <linux/elf.h>
 #include <linux/sched.h>
 
+#include <asm/cpu-info.h>
+
+/* Whether to accept legacy-NaN and 2008-NaN user binaries.  */
+bool mips_use_nan_legacy;
+bool mips_use_nan_2008;
+
 /* FPU modes */
 enum {
        FP_FRE,
@@ -68,15 +74,23 @@ static struct mode_req none_req = { true, true, false, true, true };
 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
                     bool is_interp, struct arch_elf_state *state)
 {
-       struct elf32_hdr *ehdr32 = _ehdr;
+       union {
+               struct elf32_hdr e32;
+               struct elf64_hdr e64;
+       } *ehdr = _ehdr;
        struct elf32_phdr *phdr32 = _phdr;
        struct elf64_phdr *phdr64 = _phdr;
        struct mips_elf_abiflags_v0 abiflags;
+       bool elf32;
+       u32 flags;
        int ret;
 
+       elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
+       flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
+
        /* Lets see if this is an O32 ELF */
-       if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
-               if (ehdr32->e_flags & EF_MIPS_FP64) {
+       if (elf32) {
+               if (flags & EF_MIPS_FP64) {
                        /*
                         * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
                         * later if needed
@@ -120,13 +134,50 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
        return 0;
 }
 
-int arch_check_elf(void *_ehdr, bool has_interpreter,
+int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
                   struct arch_elf_state *state)
 {
-       struct elf32_hdr *ehdr = _ehdr;
+       union {
+               struct elf32_hdr e32;
+               struct elf64_hdr e64;
+       } *ehdr = _ehdr;
+       union {
+               struct elf32_hdr e32;
+               struct elf64_hdr e64;
+       } *iehdr = _interp_ehdr;
        struct mode_req prog_req, interp_req;
        int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
-       bool is_mips64;
+       bool elf32;
+       u32 flags;
+
+       elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
+       flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
+
+       /*
+        * Determine the NaN personality, reject the binary if not allowed.
+        * Also ensure that any interpreter matches the executable.
+        */
+       if (flags & EF_MIPS_NAN2008) {
+               if (mips_use_nan_2008)
+                       state->nan_2008 = 1;
+               else
+                       return -ENOEXEC;
+       } else {
+               if (mips_use_nan_legacy)
+                       state->nan_2008 = 0;
+               else
+                       return -ENOEXEC;
+       }
+       if (has_interpreter) {
+               bool ielf32;
+               u32 iflags;
+
+               ielf32 = iehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
+               iflags = ielf32 ? iehdr->e32.e_flags : iehdr->e64.e_flags;
+
+               if ((flags ^ iflags) & EF_MIPS_NAN2008)
+                       return -ELIBBAD;
+       }
 
        if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
                return 0;
@@ -142,21 +193,18 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
                abi0 = abi1 = fp_abi;
        }
 
-       is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
-                   (ehdr->e_flags & EF_MIPS_ABI2);
+       if (elf32 && !(flags & EF_MIPS_ABI2)) {
+               /* Default to a mode capable of running code expecting FR=0 */
+               state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
 
-       if (is_mips64) {
+               /* Allow all ABIs we know about */
+               max_abi = MIPS_ABI_FP_64A;
+       } else {
                /* MIPS64 code always uses FR=1, thus the default is easy */
                state->overall_fp_mode = FP_FR1;
 
                /* Disallow access to the various FPXX & FP64 ABIs */
                max_abi = MIPS_ABI_FP_SOFT;
-       } else {
-               /* Default to a mode capable of running code expecting FR=0 */
-               state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
-
-               /* Allow all ABIs we know about */
-               max_abi = MIPS_ABI_FP_64A;
        }
 
        if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
@@ -254,3 +302,27 @@ void mips_set_personality_fp(struct arch_elf_state *state)
                BUG();
        }
 }
+
+/*
+ * Select the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
+ * in FCSR according to the ELF NaN personality.
+ */
+void mips_set_personality_nan(struct arch_elf_state *state)
+{
+       struct cpuinfo_mips *c = &boot_cpu_data;
+       struct task_struct *t = current;
+
+       t->thread.fpu.fcr31 = c->fpu_csr31;
+       switch (state->nan_2008) {
+       case 0:
+               break;
+       case 1:
+               if (!(c->fpu_msk31 & FPU_CSR_NAN2008))
+                       t->thread.fpu.fcr31 |= FPU_CSR_NAN2008;
+               if (!(c->fpu_msk31 & FPU_CSR_ABS2008))
+                       t->thread.fpu.fcr31 |= FPU_CSR_ABS2008;
+               break;
+       default:
+               BUG();
+       }
+}
index c6854d9df926d5c44e3654bcc7fd1d57136741b8..705be43c35333f648f04a96f26a4104ba172c46c 100644 (file)
@@ -21,7 +21,7 @@ static struct txx9_pio_reg __iomem *txx9_pioptr;
 
 static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
 {
-       return __raw_readl(&txx9_pioptr->din) & (1 << offset);
+       return !!(__raw_readl(&txx9_pioptr->din) & (1 << offset));
 }
 
 static void txx9_gpio_set_raw(unsigned int offset, int value)
index 4f0ac78d17f196e7c34de33af8bdcd4f708392e3..a5279b2f31989f8c8dcd915fccb233a4ada33332 100644 (file)
@@ -548,9 +548,6 @@ static const struct pt_regs_offset regoffset_table[] = {
        REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
        REG_OFFSET_NAME(c0_cause, cp0_cause),
        REG_OFFSET_NAME(c0_epc, cp0_epc),
-#ifdef CONFIG_MIPS_MT_SMTC
-       REG_OFFSET_NAME(c0_tcstatus, cp0_tcstatus),
-#endif
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
        REG_OFFSET_NAME(mpl0, mpl[0]),
        REG_OFFSET_NAME(mpl1, mpl[1]),
index 66aac55df3497f79d39f1b203601eccf3a25a48f..c745f0ea257734a1d0cb908b0fe69930bb370501 100644 (file)
@@ -623,7 +623,7 @@ static void __init request_crashkernel(struct resource *res)
 
 #define USE_PROM_CMDLINE       IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
 #define USE_DTB_CMDLINE                IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
-#define EXTEND_WITH_PROM       IS_ENABLED(CONFIG_MIPS_CMDLINE_EXTEND)
+#define EXTEND_WITH_PROM       IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
 
 static void __init arch_mem_init(char **cmdline_p)
 {
@@ -732,21 +732,23 @@ static void __init resource_init(void)
                        end = HIGHMEM_START - 1;
 
                res = alloc_bootmem(sizeof(struct resource));
+
+               res->start = start;
+               res->end = end;
+               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
                switch (boot_mem_map.map[i].type) {
                case BOOT_MEM_RAM:
                case BOOT_MEM_INIT_RAM:
                case BOOT_MEM_ROM_DATA:
                        res->name = "System RAM";
+                       res->flags |= IORESOURCE_SYSRAM;
                        break;
                case BOOT_MEM_RESERVED:
                default:
                        res->name = "reserved";
                }
 
-               res->start = start;
-               res->end = end;
-
-               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
                request_resource(&iomem_resource, res);
 
                /*
index e04c8057b88238956efe64c519dfa3aac41e1ba9..2ad4e4c96d61cb02f8703d46efde1c596649a5df 100644 (file)
@@ -202,6 +202,9 @@ static void boot_core(unsigned core)
        /* Ensure its coherency is disabled */
        write_gcr_co_coherence(0);
 
+       /* Start it with the legacy memory map and exception base */
+       write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);
+
        /* Ensure the core can access the GCRs */
        access = read_gcr_access();
        access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
index 2242bdd4370eb19851ff5a31540a02c65fa26498..4472a7f985776ad78a6e032a333cacd5bb2c7415 100644 (file)
 #include <asm/barrier.h>
 #include <asm/mipsregs.h>
 
-static atomic_t count_start_flag = ATOMIC_INIT(0);
+static unsigned int initcount = 0;
 static atomic_t count_count_start = ATOMIC_INIT(0);
 static atomic_t count_count_stop = ATOMIC_INIT(0);
-static atomic_t count_reference = ATOMIC_INIT(0);
 
 #define COUNTON 100
-#define NR_LOOPS 5
+#define NR_LOOPS 3
 
 void synchronise_count_master(int cpu)
 {
        int i;
        unsigned long flags;
-       unsigned int initcount;
 
        printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
 
        local_irq_save(flags);
 
-       /*
-        * Notify the slaves that it's time to start
-        */
-       atomic_set(&count_reference, read_c0_count());
-       atomic_set(&count_start_flag, cpu);
-       smp_wmb();
-
-       /* Count will be initialised to current timer for all CPU's */
-       initcount = read_c0_count();
-
        /*
         * We loop a few times to get a primed instruction cache,
         * then the last pass is more or less synchronised and
@@ -63,9 +51,13 @@ void synchronise_count_master(int cpu)
                atomic_set(&count_count_stop, 0);
                smp_wmb();
 
-               /* this lets the slaves write their count register */
+               /* Let the slave writes its count register */
                atomic_inc(&count_count_start);
 
+               /* Count will be initialised to current timer */
+               if (i == 1)
+                       initcount = read_c0_count();
+
                /*
                 * Everyone initialises count in the last loop:
                 */
@@ -73,7 +65,7 @@ void synchronise_count_master(int cpu)
                        write_c0_count(initcount);
 
                /*
-                * Wait for all slaves to leave the synchronization point:
+                * Wait for slave to leave the synchronization point:
                 */
                while (atomic_read(&count_count_stop) != 1)
                        mb();
@@ -83,7 +75,6 @@ void synchronise_count_master(int cpu)
        }
        /* Arrange for an interrupt in a short while */
        write_c0_compare(read_c0_count() + COUNTON);
-       atomic_set(&count_start_flag, 0);
 
        local_irq_restore(flags);
 
@@ -98,19 +89,12 @@ void synchronise_count_master(int cpu)
 void synchronise_count_slave(int cpu)
 {
        int i;
-       unsigned int initcount;
 
        /*
         * Not every cpu is online at the time this gets called,
         * so we first wait for the master to say everyone is ready
         */
 
-       while (atomic_read(&count_start_flag) != cpu)
-               mb();
-
-       /* Count will be initialised to next expire for all CPU's */
-       initcount = atomic_read(&count_reference);
-
        for (i = 0; i < NR_LOOPS; i++) {
                atomic_inc(&count_count_start);
                while (atomic_read(&count_count_start) != 2)
index 886cb1976e90f682dafac7a958043d68bc4b6473..bafcb7ad5c854d5109010485dd190f12486c363e 100644 (file)
@@ -2250,7 +2250,7 @@ void __init trap_init(void)
         * Only some CPUs have the watch exceptions.
         */
        if (cpu_has_watch)
-               set_except_vector(23, handle_watch);
+               set_except_vector(EXCCODE_WATCH, handle_watch);
 
        /*
         * Initialise interrupt handlers
@@ -2277,27 +2277,27 @@ void __init trap_init(void)
        if (board_be_init)
                board_be_init();
 
-       set_except_vector(0, using_rollback_handler() ? rollback_handle_int
-                                                     : handle_int);
-       set_except_vector(1, handle_tlbm);
-       set_except_vector(2, handle_tlbl);
-       set_except_vector(3, handle_tlbs);
+       set_except_vector(EXCCODE_INT, using_rollback_handler() ?
+                                       rollback_handle_int : handle_int);
+       set_except_vector(EXCCODE_MOD, handle_tlbm);
+       set_except_vector(EXCCODE_TLBL, handle_tlbl);
+       set_except_vector(EXCCODE_TLBS, handle_tlbs);
 
-       set_except_vector(4, handle_adel);
-       set_except_vector(5, handle_ades);
+       set_except_vector(EXCCODE_ADEL, handle_adel);
+       set_except_vector(EXCCODE_ADES, handle_ades);
 
-       set_except_vector(6, handle_ibe);
-       set_except_vector(7, handle_dbe);
+       set_except_vector(EXCCODE_IBE, handle_ibe);
+       set_except_vector(EXCCODE_DBE, handle_dbe);
 
-       set_except_vector(8, handle_sys);
-       set_except_vector(9, handle_bp);
-       set_except_vector(10, rdhwr_noopt ? handle_ri :
+       set_except_vector(EXCCODE_SYS, handle_sys);
+       set_except_vector(EXCCODE_BP, handle_bp);
+       set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
                          (cpu_has_vtag_icache ?
                           handle_ri_rdhwr_vivt : handle_ri_rdhwr));
-       set_except_vector(11, handle_cpu);
-       set_except_vector(12, handle_ov);
-       set_except_vector(13, handle_tr);
-       set_except_vector(14, handle_msa_fpe);
+       set_except_vector(EXCCODE_CPU, handle_cpu);
+       set_except_vector(EXCCODE_OV, handle_ov);
+       set_except_vector(EXCCODE_TR, handle_tr);
+       set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
 
        if (current_cpu_type() == CPU_R6000 ||
            current_cpu_type() == CPU_R6000A) {
@@ -2318,25 +2318,25 @@ void __init trap_init(void)
                board_nmi_handler_setup();
 
        if (cpu_has_fpu && !cpu_has_nofpuex)
-               set_except_vector(15, handle_fpe);
+               set_except_vector(EXCCODE_FPE, handle_fpe);
 
-       set_except_vector(16, handle_ftlb);
+       set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
 
        if (cpu_has_rixiex) {
-               set_except_vector(19, tlb_do_page_fault_0);
-               set_except_vector(20, tlb_do_page_fault_0);
+               set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
+               set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
        }
 
-       set_except_vector(21, handle_msa);
-       set_except_vector(22, handle_mdmx);
+       set_except_vector(EXCCODE_MSADIS, handle_msa);
+       set_except_vector(EXCCODE_MDMX, handle_mdmx);
 
        if (cpu_has_mcheck)
-               set_except_vector(24, handle_mcheck);
+               set_except_vector(EXCCODE_MCHECK, handle_mcheck);
 
        if (cpu_has_mipsmt)
-               set_except_vector(25, handle_mt);
+               set_except_vector(EXCCODE_THREAD, handle_mt);
 
-       set_except_vector(26, handle_dsp);
+       set_except_vector(EXCCODE_DSPDIS, handle_dsp);
 
        if (board_cache_error_setup)
                board_cache_error_setup();
index 313c2e37b978ba730372baafdc51dd707ed8e86b..d88aa2173fb0b16337e3c0ae06d86c87fb959ad4 100644 (file)
@@ -11,4 +11,4 @@
 #include <linux/kvm_host.h>
 
 struct kvm_mips_callbacks *kvm_mips_callbacks;
-EXPORT_SYMBOL(kvm_mips_callbacks);
+EXPORT_SYMBOL_GPL(kvm_mips_callbacks);
index 521121bdebff94a622b280544fcacddab1434b5d..f1527a465c1b1b071356b18ea066a468b30edabb 100644 (file)
@@ -86,10 +86,8 @@ int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
        } else {
                mfc0_inst = LW_TEMPLATE;
                mfc0_inst |= ((rt & 0x1f) << 16);
-               mfc0_inst |=
-                   offsetof(struct mips_coproc,
-                            reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
-                                                     cop0);
+               mfc0_inst |= offsetof(struct kvm_mips_commpage,
+                                     cop0.reg[rd][sel]);
        }
 
        if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
@@ -123,9 +121,7 @@ int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
        sel = inst & 0x7;
 
        mtc0_inst |= ((rt & 0x1f) << 16);
-       mtc0_inst |=
-           offsetof(struct mips_coproc,
-                    reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+       mtc0_inst |= offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
 
        if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
                kseg0_opc =
index 1b675c7ce89f89d25f3457659405419505a8158d..b37954cc880d6ce06d9ca408962a4fabf7e7de06 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/random.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
+#include <asm/cacheops.h>
 #include <asm/cpu-info.h>
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
@@ -29,7 +30,6 @@
 #include <asm/r4kcache.h>
 #define CONFIG_MIPS_MT
 
-#include "opcode.h"
 #include "interrupt.h"
 #include "commpage.h"
 
@@ -1239,21 +1239,20 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
                        er = EMULATE_FAIL;
                        break;
 
-               case mfmcz_op:
+               case mfmc0_op:
 #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
                        cop0->stat[MIPS_CP0_STATUS][0]++;
 #endif
-                       if (rt != 0) {
+                       if (rt != 0)
                                vcpu->arch.gprs[rt] =
                                    kvm_read_c0_guest_status(cop0);
-                       }
                        /* EI */
                        if (inst & 0x20) {
-                               kvm_debug("[%#lx] mfmcz_op: EI\n",
+                               kvm_debug("[%#lx] mfmc0_op: EI\n",
                                          vcpu->arch.pc);
                                kvm_set_c0_guest_status(cop0, ST0_IE);
                        } else {
-                               kvm_debug("[%#lx] mfmcz_op: DI\n",
+                               kvm_debug("[%#lx] mfmc0_op: DI\n",
                                          vcpu->arch.pc);
                                kvm_clear_c0_guest_status(cop0, ST0_IE);
                        }
@@ -1545,19 +1544,6 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
        return 0;
 }
 
-#define MIPS_CACHE_OP_INDEX_INV         0x0
-#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
-#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
-#define MIPS_CACHE_OP_IMP               0x3
-#define MIPS_CACHE_OP_HIT_INV           0x4
-#define MIPS_CACHE_OP_FILL_WB_INV       0x5
-#define MIPS_CACHE_OP_HIT_HB            0x6
-#define MIPS_CACHE_OP_FETCH_LOCK        0x7
-
-#define MIPS_CACHE_ICACHE               0x0
-#define MIPS_CACHE_DCACHE               0x1
-#define MIPS_CACHE_SEC                  0x3
-
 enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                                             uint32_t cause,
                                             struct kvm_run *run,
@@ -1582,8 +1568,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
        base = (inst >> 21) & 0x1f;
        op_inst = (inst >> 16) & 0x1f;
        offset = (int16_t)inst;
-       cache = (inst >> 16) & 0x3;
-       op = (inst >> 18) & 0x7;
+       cache = op_inst & CacheOp_Cache;
+       op = op_inst & CacheOp_Op;
 
        va = arch->gprs[base] + offset;
 
@@ -1595,14 +1581,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
         * invalidate the caches entirely by stepping through all the
         * ways/indexes
         */
-       if (op == MIPS_CACHE_OP_INDEX_INV) {
+       if (op == Index_Writeback_Inv) {
                kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
                          vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
                          arch->gprs[base], offset);
 
-               if (cache == MIPS_CACHE_DCACHE)
+               if (cache == Cache_D)
                        r4k_blast_dcache();
-               else if (cache == MIPS_CACHE_ICACHE)
+               else if (cache == Cache_I)
                        r4k_blast_icache();
                else {
                        kvm_err("%s: unsupported CACHE INDEX operation\n",
@@ -1675,9 +1661,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
 
 skip_fault:
        /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
-       if (cache == MIPS_CACHE_DCACHE
-           && (op == MIPS_CACHE_OP_FILL_WB_INV
-               || op == MIPS_CACHE_OP_HIT_INV)) {
+       if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) {
                flush_dcache_line(va);
 
 #ifdef CONFIG_KVM_MIPS_DYN_TRANS
@@ -1687,7 +1671,7 @@ skip_fault:
                 */
                kvm_mips_trans_cache_va(inst, opc, vcpu);
 #endif
-       } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+       } else if (op_inst == Hit_Invalidate_I) {
                flush_dcache_line(va);
                flush_icache_line(va);
 
@@ -1781,7 +1765,7 @@ enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
                kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
 
                kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_SYSCALL << CAUSEB_EXCCODE));
+                                         (EXCCODE_SYS << CAUSEB_EXCCODE));
 
                /* Set PC to the exception entry point */
                arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -1828,7 +1812,7 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
        }
 
        kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+                                 (EXCCODE_TLBL << CAUSEB_EXCCODE));
 
        /* setup badvaddr, context and entryhi registers for the guest */
        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -1874,7 +1858,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
        }
 
        kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+                                 (EXCCODE_TLBL << CAUSEB_EXCCODE));
 
        /* setup badvaddr, context and entryhi registers for the guest */
        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -1918,7 +1902,7 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
        }
 
        kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+                                 (EXCCODE_TLBS << CAUSEB_EXCCODE));
 
        /* setup badvaddr, context and entryhi registers for the guest */
        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -1962,7 +1946,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
        }
 
        kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+                                 (EXCCODE_TLBS << CAUSEB_EXCCODE));
 
        /* setup badvaddr, context and entryhi registers for the guest */
        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -2033,7 +2017,8 @@ enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
                arch->pc = KVM_GUEST_KSEG0 + 0x180;
        }
 
-       kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (EXCCODE_MOD << CAUSEB_EXCCODE));
 
        /* setup badvaddr, context and entryhi registers for the guest */
        kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
@@ -2068,7 +2053,7 @@ enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
        arch->pc = KVM_GUEST_KSEG0 + 0x180;
 
        kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+                                 (EXCCODE_CPU << CAUSEB_EXCCODE));
        kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
 
        return EMULATE_DONE;
@@ -2096,7 +2081,7 @@ enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
                kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
 
                kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_RES_INST << CAUSEB_EXCCODE));
+                                         (EXCCODE_RI << CAUSEB_EXCCODE));
 
                /* Set PC to the exception entry point */
                arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2131,7 +2116,7 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
                kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
 
                kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_BREAK << CAUSEB_EXCCODE));
+                                         (EXCCODE_BP << CAUSEB_EXCCODE));
 
                /* Set PC to the exception entry point */
                arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2166,7 +2151,7 @@ enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
                kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
 
                kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_TRAP << CAUSEB_EXCCODE));
+                                         (EXCCODE_TR << CAUSEB_EXCCODE));
 
                /* Set PC to the exception entry point */
                arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2201,7 +2186,7 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
                kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc);
 
                kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_MSAFPE << CAUSEB_EXCCODE));
+                                         (EXCCODE_MSAFPE << CAUSEB_EXCCODE));
 
                /* Set PC to the exception entry point */
                arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2236,7 +2221,7 @@ enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
                kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc);
 
                kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_FPE << CAUSEB_EXCCODE));
+                                         (EXCCODE_FPE << CAUSEB_EXCCODE));
 
                /* Set PC to the exception entry point */
                arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2271,7 +2256,7 @@ enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
                kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc);
 
                kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_MSADIS << CAUSEB_EXCCODE));
+                                         (EXCCODE_MSADIS << CAUSEB_EXCCODE));
 
                /* Set PC to the exception entry point */
                arch->pc = KVM_GUEST_KSEG0 + 0x180;
@@ -2480,25 +2465,25 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
 
        if (usermode) {
                switch (exccode) {
-               case T_INT:
-               case T_SYSCALL:
-               case T_BREAK:
-               case T_RES_INST:
-               case T_TRAP:
-               case T_MSAFPE:
-               case T_FPE:
-               case T_MSADIS:
+               case EXCCODE_INT:
+               case EXCCODE_SYS:
+               case EXCCODE_BP:
+               case EXCCODE_RI:
+               case EXCCODE_TR:
+               case EXCCODE_MSAFPE:
+               case EXCCODE_FPE:
+               case EXCCODE_MSADIS:
                        break;
 
-               case T_COP_UNUSABLE:
+               case EXCCODE_CPU:
                        if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
                                er = EMULATE_PRIV_FAIL;
                        break;
 
-               case T_TLB_MOD:
+               case EXCCODE_MOD:
                        break;
 
-               case T_TLB_LD_MISS:
+               case EXCCODE_TLBL:
                        /*
                         * We we are accessing Guest kernel space, then send an
                         * address error exception to the guest
@@ -2507,12 +2492,12 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
                                kvm_debug("%s: LD MISS @ %#lx\n", __func__,
                                          badvaddr);
                                cause &= ~0xff;
-                               cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+                               cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE);
                                er = EMULATE_PRIV_FAIL;
                        }
                        break;
 
-               case T_TLB_ST_MISS:
+               case EXCCODE_TLBS:
                        /*
                         * We we are accessing Guest kernel space, then send an
                         * address error exception to the guest
@@ -2521,26 +2506,26 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
                                kvm_debug("%s: ST MISS @ %#lx\n", __func__,
                                          badvaddr);
                                cause &= ~0xff;
-                               cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+                               cause |= (EXCCODE_ADES << CAUSEB_EXCCODE);
                                er = EMULATE_PRIV_FAIL;
                        }
                        break;
 
-               case T_ADDR_ERR_ST:
+               case EXCCODE_ADES:
                        kvm_debug("%s: address error ST @ %#lx\n", __func__,
                                  badvaddr);
                        if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
                                cause &= ~0xff;
-                               cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+                               cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE);
                        }
                        er = EMULATE_PRIV_FAIL;
                        break;
-               case T_ADDR_ERR_LD:
+               case EXCCODE_ADEL:
                        kvm_debug("%s: address error LD @ %#lx\n", __func__,
                                  badvaddr);
                        if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
                                cause &= ~0xff;
-                               cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+                               cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE);
                        }
                        er = EMULATE_PRIV_FAIL;
                        break;
@@ -2583,13 +2568,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
         * an entry into the guest TLB.
         */
        index = kvm_mips_guest_tlb_lookup(vcpu,
-                                         (va & VPN2_MASK) |
-                                         (kvm_read_c0_guest_entryhi
-                                          (vcpu->arch.cop0) & ASID_MASK));
+                     (va & VPN2_MASK) |
+                     (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
        if (index < 0) {
-               if (exccode == T_TLB_LD_MISS) {
+               if (exccode == EXCCODE_TLBL) {
                        er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
-               } else if (exccode == T_TLB_ST_MISS) {
+               } else if (exccode == EXCCODE_TLBS) {
                        er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
                } else {
                        kvm_err("%s: invalid exc code: %d\n", __func__,
@@ -2604,10 +2588,10 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
                 * exception to the guest
                 */
                if (!TLB_IS_VALID(*tlb, va)) {
-                       if (exccode == T_TLB_LD_MISS) {
+                       if (exccode == EXCCODE_TLBL) {
                                er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
                                                                vcpu);
-                       } else if (exccode == T_TLB_ST_MISS) {
+                       } else if (exccode == EXCCODE_TLBS) {
                                er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
                                                                vcpu);
                        } else {
index 9b4445940c2bcc941b217aa3b983a4369f62f382..95f790663b0c2af09a3e0dbf2fe836b58ecfd078 100644 (file)
@@ -128,7 +128,7 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
                    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
                    && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
                        allowed = 1;
-                       exccode = T_INT;
+                       exccode = EXCCODE_INT;
                }
                break;
 
@@ -137,7 +137,7 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
                    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
                    && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
                        allowed = 1;
-                       exccode = T_INT;
+                       exccode = EXCCODE_INT;
                }
                break;
 
@@ -146,7 +146,7 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
                    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
                    && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
                        allowed = 1;
-                       exccode = T_INT;
+                       exccode = EXCCODE_INT;
                }
                break;
 
@@ -155,7 +155,7 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
                    && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
                    && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
                        allowed = 1;
-                       exccode = T_INT;
+                       exccode = EXCCODE_INT;
                }
                break;
 
index 7e2210846b8b9d1519f679e6a0950fcaf1e6dfd5..81687ab1b523eaebce4f2edbaeffc3b515a1b9e9 100644 (file)
@@ -335,7 +335,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
 
        /* Now restore the host state just enough to run the handlers */
 
-       /* Swtich EBASE to the one used by Linux */
+       /* Switch EBASE to the one used by Linux */
        /* load up the host EBASE */
        mfc0    v0, CP0_STATUS
 
@@ -490,11 +490,11 @@ __kvm_mips_return_to_guest:
        REG_ADDU t3, t1, t2
        LONG_L  k0, (t3)
        andi    k0, k0, 0xff
-       mtc0    k0,CP0_ENTRYHI
+       mtc0    k0, CP0_ENTRYHI
        ehb
 
        /* Disable RDHWR access */
-       mtc0    zero,  CP0_HWRENA
+       mtc0    zero, CP0_HWRENA
 
        /* load the guest context from VCPU and return */
        LONG_L  $0, VCPU_R0(k1)
@@ -606,11 +606,11 @@ __kvm_mips_return_to_host:
 
        /* Restore RDHWR access */
        PTR_LI  k0, 0x2000000F
-       mtc0    k0,  CP0_HWRENA
+       mtc0    k0, CP0_HWRENA
 
        /* Restore RA, which is the address we will return to */
-       LONG_L  ra, PT_R31(k1)
-       j       ra
+       LONG_L  ra, PT_R31(k1)
+       j       ra
         nop
 
 VECTOR_END(MIPSX(GuestExceptionEnd))
index b9b803facdbf7594dc700ca828a2ced604e617aa..8bc3977576e6af5f7b5c1a17f306da0f21b26937 100644 (file)
@@ -229,7 +229,7 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
                            kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
 
                        if (!kvm->arch.guest_pmap) {
-                               kvm_err("Failed to allocate guest PMAP");
+                               kvm_err("Failed to allocate guest PMAP\n");
                                return;
                        }
 
@@ -1264,8 +1264,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
        }
 
        switch (exccode) {
-       case T_INT:
-               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+       case EXCCODE_INT:
+               kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
 
                ++vcpu->stat.int_exits;
                trace_kvm_exit(vcpu, INT_EXITS);
@@ -1276,8 +1276,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
                ret = RESUME_GUEST;
                break;
 
-       case T_COP_UNUSABLE:
-               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+       case EXCCODE_CPU:
+               kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
 
                ++vcpu->stat.cop_unusable_exits;
                trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
@@ -1287,13 +1287,13 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
                        ret = RESUME_HOST;
                break;
 
-       case T_TLB_MOD:
+       case EXCCODE_MOD:
                ++vcpu->stat.tlbmod_exits;
                trace_kvm_exit(vcpu, TLBMOD_EXITS);
                ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
                break;
 
-       case T_TLB_ST_MISS:
+       case EXCCODE_TLBS:
                kvm_debug("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
                          cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
                          badvaddr);
@@ -1303,7 +1303,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
                ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
                break;
 
-       case T_TLB_LD_MISS:
+       case EXCCODE_TLBL:
                kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
                          cause, opc, badvaddr);
 
@@ -1312,55 +1312,55 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
                ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
                break;
 
-       case T_ADDR_ERR_ST:
+       case EXCCODE_ADES:
                ++vcpu->stat.addrerr_st_exits;
                trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
                ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
                break;
 
-       case T_ADDR_ERR_LD:
+       case EXCCODE_ADEL:
                ++vcpu->stat.addrerr_ld_exits;
                trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
                ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
                break;
 
-       case T_SYSCALL:
+       case EXCCODE_SYS:
                ++vcpu->stat.syscall_exits;
                trace_kvm_exit(vcpu, SYSCALL_EXITS);
                ret = kvm_mips_callbacks->handle_syscall(vcpu);
                break;
 
-       case T_RES_INST:
+       case EXCCODE_RI:
                ++vcpu->stat.resvd_inst_exits;
                trace_kvm_exit(vcpu, RESVD_INST_EXITS);
                ret = kvm_mips_callbacks->handle_res_inst(vcpu);
                break;
 
-       case T_BREAK:
+       case EXCCODE_BP:
                ++vcpu->stat.break_inst_exits;
                trace_kvm_exit(vcpu, BREAK_INST_EXITS);
                ret = kvm_mips_callbacks->handle_break(vcpu);
                break;
 
-       case T_TRAP:
+       case EXCCODE_TR:
                ++vcpu->stat.trap_inst_exits;
                trace_kvm_exit(vcpu, TRAP_INST_EXITS);
                ret = kvm_mips_callbacks->handle_trap(vcpu);
                break;
 
-       case T_MSAFPE:
+       case EXCCODE_MSAFPE:
                ++vcpu->stat.msa_fpe_exits;
                trace_kvm_exit(vcpu, MSA_FPE_EXITS);
                ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
                break;
 
-       case T_FPE:
+       case EXCCODE_FPE:
                ++vcpu->stat.fpe_exits;
                trace_kvm_exit(vcpu, FPE_EXITS);
                ret = kvm_mips_callbacks->handle_fpe(vcpu);
                break;
 
-       case T_MSADIS:
+       case EXCCODE_MSADIS:
                ++vcpu->stat.msa_disabled_exits;
                trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
                ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
@@ -1620,7 +1620,7 @@ static struct notifier_block kvm_mips_csr_die_notifier = {
        .notifier_call = kvm_mips_csr_die_notify,
 };
 
-int __init kvm_mips_init(void)
+static int __init kvm_mips_init(void)
 {
        int ret;
 
@@ -1646,7 +1646,7 @@ int __init kvm_mips_init(void)
        return 0;
 }
 
-void __exit kvm_mips_exit(void)
+static void __exit kvm_mips_exit(void)
 {
        kvm_exit();
 
diff --git a/arch/mips/kvm/opcode.h b/arch/mips/kvm/opcode.h
deleted file mode 100644 (file)
index 03a6ae8..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-/* Define opcode values not defined in <asm/isnt.h> */
-
-#ifndef __KVM_MIPS_OPCODE_H__
-#define __KVM_MIPS_OPCODE_H__
-
-/* COP0 Ops */
-#define mfmcz_op       0x0b    /* 01011 */
-#define wrpgpr_op      0x0e    /* 01110 */
-
-/* COP0 opcodes (only if COP0 and CO=1): */
-#define wait_op                0x20    /* 100000 */
-
-#endif /* __KVM_MIPS_OPCODE_H__ */
index 570479c03bdc35009f48985e70d17f00686b5012..a08c439462472e3a2e441c1238d62cdee2988ed7 100644 (file)
 #define PRIx64 "llx"
 
 atomic_t kvm_mips_instance;
-EXPORT_SYMBOL(kvm_mips_instance);
+EXPORT_SYMBOL_GPL(kvm_mips_instance);
 
 /* These function pointers are initialized once the KVM module is loaded */
 kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
-EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
 
 void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
-EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
 
 bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
-EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
 
 uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
 {
@@ -111,7 +111,7 @@ void kvm_mips_dump_host_tlbs(void)
        mtc0_tlbw_hazard();
        local_irq_restore(flags);
 }
-EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
 
 void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
 {
@@ -139,7 +139,7 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
                         (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
        }
 }
-EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
 
 static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
 {
@@ -191,7 +191,7 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
 
        return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
 }
-EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
 
 /* XXXKYMA: Must be called with interrupts disabled */
 /* set flush_dcache_mask == 0 if no dcache flush required */
@@ -308,7 +308,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
        return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
                                       flush_dcache_mask);
 }
-EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
 
 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
        struct kvm_vcpu *vcpu)
@@ -351,7 +351,7 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
 
        return 0;
 }
-EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
 
 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
                                         struct kvm_mips_tlb *tlb,
@@ -401,7 +401,7 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
        return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
                                       tlb->tlb_mask);
 }
-EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
 
 int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
 {
@@ -422,7 +422,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
 
        return index;
 }
-EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup);
 
 int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
 {
@@ -458,7 +458,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
 
        return idx;
 }
-EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_lookup);
 
 int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
 {
@@ -505,44 +505,7 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
 
        return 0;
 }
-EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
-
-/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
-int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
-{
-       unsigned long flags, old_entryhi;
-
-       if (index >= current_cpu_data.tlbsize)
-               BUG();
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-
-       write_c0_entryhi(UNIQUE_ENTRYHI(index));
-       mtc0_tlbw_hazard();
-
-       write_c0_index(index);
-       mtc0_tlbw_hazard();
-
-       write_c0_entrylo0(0);
-       mtc0_tlbw_hazard();
-
-       write_c0_entrylo1(0);
-       mtc0_tlbw_hazard();
-
-       tlb_write_indexed();
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       local_irq_restore(flags);
-
-       return 0;
-}
+EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv);
 
 void kvm_mips_flush_host_tlb(int skip_kseg0)
 {
@@ -594,7 +557,7 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
 
        local_irq_restore(flags);
 }
-EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
 
 void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
                             struct kvm_vcpu *vcpu)
@@ -642,7 +605,7 @@ void kvm_local_flush_tlb_all(void)
 
        local_irq_restore(flags);
 }
-EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
 
 /**
  * kvm_mips_migrate_count() - Migrate timer.
@@ -673,8 +636,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
        local_irq_save(flags);
 
-       if (((vcpu->arch.
-             guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+       if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
+                                                       ASID_VERSION_MASK) {
                kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
                vcpu->arch.guest_kernel_asid[cpu] =
                    vcpu->arch.guest_kernel_mm.context.asid[cpu];
@@ -739,7 +702,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        local_irq_restore(flags);
 
 }
-EXPORT_SYMBOL(kvm_arch_vcpu_load);
+EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
 
 /* ASID can change if another task is scheduled during preemption */
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -768,7 +731,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
        local_irq_restore(flags);
 }
-EXPORT_SYMBOL(kvm_arch_vcpu_put);
+EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
 
 uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
 {
@@ -813,4 +776,4 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
 
        return inst;
 }
-EXPORT_SYMBOL(kvm_get_inst);
+EXPORT_SYMBOL_GPL(kvm_get_inst);
index d836ed5b0bc7ea38e36350304a6238a520e0d74d..ad988000563f264d443b92bd0785e40ba78fe95d 100644 (file)
@@ -16,7 +16,6 @@
 
 #include <linux/kvm_host.h>
 
-#include "opcode.h"
 #include "interrupt.h"
 
 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
index 272af8ac2425290c892849186b170f4b95c259b6..5530070e0d05dd51248afb9a8780eb3a86b173a7 100644 (file)
@@ -57,7 +57,6 @@ notrace void arch_local_irq_disable(void)
 }
 EXPORT_SYMBOL(arch_local_irq_disable);
 
-
 notrace unsigned long arch_local_irq_save(void)
 {
        unsigned long flags;
@@ -111,31 +110,4 @@ notrace void arch_local_irq_restore(unsigned long flags)
 }
 EXPORT_SYMBOL(arch_local_irq_restore);
 
-
-notrace void __arch_local_irq_restore(unsigned long flags)
-{
-       unsigned long __tmp1;
-
-       preempt_disable();
-
-       __asm__ __volatile__(
-       "       .set    push                                            \n"
-       "       .set    noreorder                                       \n"
-       "       .set    noat                                            \n"
-       "       mfc0    $1, $12                                         \n"
-       "       andi    %[flags], 1                                     \n"
-       "       ori     $1, 0x1f                                        \n"
-       "       xori    $1, 0x1f                                        \n"
-       "       or      %[flags], $1                                    \n"
-       "       mtc0    %[flags], $12                                   \n"
-       "       " __stringify(__irq_disable_hazard) "                   \n"
-       "       .set    pop                                             \n"
-       : [flags] "=r" (__tmp1)
-       : "0" (flags)
-       : "memory");
-
-       preempt_enable();
-}
-EXPORT_SYMBOL(__arch_local_irq_restore);
-
-#endif /* !CONFIG_CPU_MIPSR2 */
+#endif /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
index 2e48e83d5524ac13a25a98b0c1d9bd60ae2c7ffa..85d808924c94b52b25f24654def7529b76039266 100644 (file)
@@ -22,6 +22,27 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
   endif
 endif
 
+cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap
+#
+# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
+# as MIPS64 R2; older versions as just R1.  This leaves the possibility open
+# that GCC might generate R2 code for -march=loongson3a which then is rejected
+# by GAS.  The cc-option can't probe for this behaviour so -march=loongson3a
+# can't easily be used safely within the kbuild framework.
+#
+ifeq ($(call cc-ifversion, -ge, 0409, y), y)
+  ifeq ($(call ld-ifversion, -ge, 22500000, y), y)
+    cflags-$(CONFIG_CPU_LOONGSON3)  += \
+      $(call cc-option,-march=loongson3a -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
+  else
+    cflags-$(CONFIG_CPU_LOONGSON3)  += \
+      $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
+  endif
+else
+    cflags-$(CONFIG_CPU_LOONGSON3)  += \
+      $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
+endif
+
 #
 # Loongson Machines' Support
 #
index bf9f1a77f0e59825fcdb6ced3ce73ca78d8c7c19..a2631a52ca998c0029b27cec6fa85bac9d0782d0 100644 (file)
@@ -13,6 +13,9 @@
 #define SMBUS_PCI_REG64                0x64
 #define SMBUS_PCI_REGB4                0xb4
 
+#define HPET_MIN_CYCLES                64
+#define HPET_MIN_PROG_DELTA    (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
+
 static DEFINE_SPINLOCK(hpet_lock);
 DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
 
@@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta,
        cnt += delta;
        hpet_write(HPET_T0_CMP, cnt);
 
-       res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
-       return res;
+       res = (int)(cnt - hpet_read(HPET_COUNTER));
+
+       return res < HPET_MIN_CYCLES ? -ETIME : 0;
 }
 
 static irqreturn_t hpet_irq_handler(int irq, void *data)
@@ -237,7 +241,7 @@ void __init setup_hpet_timer(void)
        cd->cpumask = cpumask_of(cpu);
        clockevent_set_clock(cd, HPET_FREQ);
        cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
-       cd->min_delta_ns = 5000;
+       cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
 
        clockevents_register_device(cd);
        setup_irq(HPET_T0_IRQ, &hpet_irq);
index 1a4738a8f2d3906ccffb58bdf8d9b35ee4b04ef3..b913cd240d77f2c9d6d1ca41838c0a61829064a6 100644 (file)
 #include "smp.h"
 
 DEFINE_PER_CPU(int, cpu_state);
-DEFINE_PER_CPU(uint32_t, core0_c0count);
 
 static void *ipi_set0_regs[16];
 static void *ipi_clear0_regs[16];
 static void *ipi_status0_regs[16];
 static void *ipi_en0_regs[16];
 static void *ipi_mailbox_buf[16];
+static uint32_t core0_c0count[NR_CPUS];
 
 /* read a 32bit value from ipi register */
 #define loongson3_ipi_read32(addr) readl(addr)
@@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
        if (action & SMP_ASK_C0COUNT) {
                BUG_ON(cpu != 0);
                c0count = read_c0_count();
-               for (i = 1; i < num_possible_cpus(); i++)
-                       per_cpu(core0_c0count, i) = c0count;
+               c0count = c0count ? c0count : 1;
+               for (i = 1; i < nr_cpu_ids; i++)
+                       core0_c0count[i] = c0count;
+               __wbflush(); /* Let others see the result ASAP */
        }
 }
 
-#define MAX_LOOPS 1111
+#define MAX_LOOPS 800
 /*
  * SMP init and finish on secondary CPUs
  */
@@ -305,16 +307,20 @@ static void loongson3_init_secondary(void)
                cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
 
        i = 0;
-       __this_cpu_write(core0_c0count, 0);
+       core0_c0count[cpu] = 0;
        loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
-       while (!__this_cpu_read(core0_c0count)) {
+       while (!core0_c0count[cpu]) {
                i++;
                cpu_relax();
        }
 
        if (i > MAX_LOOPS)
                i = MAX_LOOPS;
-       initcount = __this_cpu_read(core0_c0count) + i;
+       if (cpu_data[cpu].package)
+               initcount = core0_c0count[cpu] + i;
+       else /* Local access is faster for loops */
+               initcount = core0_c0count[cpu] + i/2;
+
        write_c0_count(initcount);
 }
 
@@ -415,7 +421,6 @@ static int loongson3_cpu_disable(void)
        local_irq_save(flags);
        fixup_irqs();
        local_irq_restore(flags);
-       flush_cache_all();
        local_flush_tlb_all();
 
        return 0;
index 32f0e19a0d7f71fef2bb693e0da7b7bbc6bc9af9..cdfd44ffa51c88133b7b59417adb3fc070dfeee7 100644 (file)
@@ -1266,6 +1266,8 @@ branch_common:
                                                 */
                                                sig = mips_dsemul(xcp, ir,
                                                                  contpc);
+                                               if (sig < 0)
+                                                       break;
                                                if (sig)
                                                        xcp->cp0_epc = bcpc;
                                                /*
@@ -1319,6 +1321,8 @@ branch_common:
                                 * instruction in the dslot
                                 */
                                sig = mips_dsemul(xcp, ir, contpc);
+                               if (sig < 0)
+                                       break;
                                if (sig)
                                        xcp->cp0_epc = bcpc;
                                /* SIGILL forces out of the emulation loop.  */
index 926d56bf37f24ca1dbfcfcdbf71cc8f3165dfa08..eb96485ed939ccc09c378e184c3a2a83fec80717 100644 (file)
 
 union ieee754dp ieee754dp_neg(union ieee754dp x)
 {
-       unsigned int oldrm;
        union ieee754dp y;
 
-       oldrm = ieee754_csr.rm;
-       ieee754_csr.rm = FPU_CSR_RD;
-       y = ieee754dp_sub(ieee754dp_zero(0), x);
-       ieee754_csr.rm = oldrm;
+       if (ieee754_csr.abs2008) {
+               y = x;
+               DPSIGN(y) = !DPSIGN(x);
+       } else {
+               unsigned int oldrm;
+
+               oldrm = ieee754_csr.rm;
+               ieee754_csr.rm = FPU_CSR_RD;
+               y = ieee754dp_sub(ieee754dp_zero(0), x);
+               ieee754_csr.rm = oldrm;
+       }
        return y;
 }
 
 union ieee754dp ieee754dp_abs(union ieee754dp x)
 {
-       unsigned int oldrm;
        union ieee754dp y;
 
-       oldrm = ieee754_csr.rm;
-       ieee754_csr.rm = FPU_CSR_RD;
-       if (DPSIGN(x))
-               y = ieee754dp_sub(ieee754dp_zero(0), x);
-       else
-               y = ieee754dp_add(ieee754dp_zero(0), x);
-       ieee754_csr.rm = oldrm;
+       if (ieee754_csr.abs2008) {
+               y = x;
+               DPSIGN(y) = 0;
+       } else {
+               unsigned int oldrm;
+
+               oldrm = ieee754_csr.rm;
+               ieee754_csr.rm = FPU_CSR_RD;
+               if (DPSIGN(x))
+                       y = ieee754dp_sub(ieee754dp_zero(0), x);
+               else
+                       y = ieee754dp_add(ieee754dp_zero(0), x);
+               ieee754_csr.rm = oldrm;
+       }
        return y;
 }
index 6ffc336c530e3e11c48c1dcb8ab9eb50224b7ff4..f3985617ce31526c7de1d1fb19a0af3a207bd925 100644 (file)
@@ -38,10 +38,13 @@ int ieee754dp_tint(union ieee754dp x)
        switch (xc) {
        case IEEE754_CLASS_SNAN:
        case IEEE754_CLASS_QNAN:
-       case IEEE754_CLASS_INF:
                ieee754_setcx(IEEE754_INVALID_OPERATION);
                return ieee754si_indef();
 
+       case IEEE754_CLASS_INF:
+               ieee754_setcx(IEEE754_INVALID_OPERATION);
+               return ieee754si_overflow(xs);
+
        case IEEE754_CLASS_ZERO:
                return 0;
 
@@ -53,7 +56,7 @@ int ieee754dp_tint(union ieee754dp x)
                /* Set invalid. We will only use overflow for floating
                   point overflow */
                ieee754_setcx(IEEE754_INVALID_OPERATION);
-               return ieee754si_indef();
+               return ieee754si_overflow(xs);
        }
        /* oh gawd */
        if (xe > DP_FBITS) {
@@ -93,7 +96,7 @@ int ieee754dp_tint(union ieee754dp x)
                if ((xm >> 31) != 0 && (xs == 0 || xm != 0x80000000)) {
                        /* This can happen after rounding */
                        ieee754_setcx(IEEE754_INVALID_OPERATION);
-                       return ieee754si_indef();
+                       return ieee754si_overflow(xs);
                }
                if (round || sticky)
                        ieee754_setcx(IEEE754_INEXACT);
index 9cdc145b75e012d75a22f146c1e3c3b5c65a4e7f..748fa10ed4cf63d3e0995a3d6ae6860b6c432d7b 100644 (file)
@@ -38,10 +38,13 @@ s64 ieee754dp_tlong(union ieee754dp x)
        switch (xc) {
        case IEEE754_CLASS_SNAN:
        case IEEE754_CLASS_QNAN:
-       case IEEE754_CLASS_INF:
                ieee754_setcx(IEEE754_INVALID_OPERATION);
                return ieee754di_indef();
 
+       case IEEE754_CLASS_INF:
+               ieee754_setcx(IEEE754_INVALID_OPERATION);
+               return ieee754di_overflow(xs);
+
        case IEEE754_CLASS_ZERO:
                return 0;
 
@@ -56,7 +59,7 @@ s64 ieee754dp_tlong(union ieee754dp x)
                /* Set invalid. We will only use overflow for floating
                   point overflow */
                ieee754_setcx(IEEE754_INVALID_OPERATION);
-               return ieee754di_indef();
+               return ieee754di_overflow(xs);
        }
        /* oh gawd */
        if (xe > DP_FBITS) {
@@ -97,7 +100,7 @@ s64 ieee754dp_tlong(union ieee754dp x)
                if ((xm >> 63) != 0) {
                        /* This can happen after rounding */
                        ieee754_setcx(IEEE754_INVALID_OPERATION);
-                       return ieee754di_indef();
+                       return ieee754di_overflow(xs);
                }
                if (round || sticky)
                        ieee754_setcx(IEEE754_INEXACT);
index cbb36c14b155ad07dae2250550bc4090622180cd..46b964d2b79c0a0f24aa7cf6e77e1846a88e842f 100644 (file)
@@ -31,17 +31,41 @@ struct emuframe {
        unsigned long           epc;
 };
 
+/*
+ * Set up an emulation frame for instruction IR, from a delay slot of
+ * a branch jumping to CPC.  Return 0 if successful, -1 if no emulation
+ * required, otherwise a signal number causing a frame setup failure.
+ */
 int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
 {
+       int isa16 = get_isa16_mode(regs->cp0_epc);
+       mips_instruction break_math;
        struct emuframe __user *fr;
        int err;
 
-       if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) ||
-               (ir == 0)) {
-               /* NOP is easy */
-               regs->cp0_epc = cpc;
-               clear_delay_slot(regs);
-               return 0;
+       /* NOP is easy */
+       if (ir == 0)
+               return -1;
+
+       /* microMIPS instructions */
+       if (isa16) {
+               union mips_instruction insn = { .word = ir };
+
+               /* NOP16 aka MOVE16 $0, $0 */
+               if ((ir >> 16) == MM_NOP16)
+                       return -1;
+
+               /* ADDIUPC */
+               if (insn.mm_a_format.opcode == mm_addiupc_op) {
+                       unsigned int rs;
+                       s32 v;
+
+                       rs = (((insn.mm_a_format.rs + 0x1e) & 0xf) + 2);
+                       v = regs->cp0_epc & ~3;
+                       v += insn.mm_a_format.simmediate << 2;
+                       regs->regs[rs] = (long)v;
+                       return -1;
+               }
        }
 
        pr_debug("dsemul %lx %lx\n", regs->cp0_epc, cpc);
@@ -55,14 +79,10 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
         * Algorithmics used a system call instruction, and
         * borrowed that vector.  MIPS/Linux version is a bit
         * more heavyweight in the interests of portability and
-        * multiprocessor support.  For Linux we generate a
-        * an unaligned access and force an address error exception.
-        *
-        * For embedded systems (stand-alone) we prefer to use a
-        * non-existing CP1 instruction. This prevents us from emulating
-        * branches, but gives us a cleaner interface to the exception
-        * handler (single entry point).
+        * multiprocessor support.  For Linux we use a BREAK 514
+        * instruction causing a breakpoint exception.
         */
+       break_math = BREAK_MATH(isa16);
 
        /* Ensure that the two instructions are in the same cache line */
        fr = (struct emuframe __user *)
@@ -72,14 +92,18 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
        if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
                return SIGBUS;
 
-       if (get_isa16_mode(regs->cp0_epc)) {
-               err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
-               err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
-               err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
-               err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
+       if (isa16) {
+               err = __put_user(ir >> 16,
+                                (u16 __user *)(&fr->emul));
+               err |= __put_user(ir & 0xffff,
+                                 (u16 __user *)((long)(&fr->emul) + 2));
+               err |= __put_user(break_math >> 16,
+                                 (u16 __user *)(&fr->badinst));
+               err |= __put_user(break_math & 0xffff,
+                                 (u16 __user *)((long)(&fr->badinst) + 2));
        } else {
                err = __put_user(ir, &fr->emul);
-               err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+               err |= __put_user(break_math, &fr->badinst);
        }
 
        err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
@@ -90,8 +114,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
                return SIGBUS;
        }
 
-       regs->cp0_epc = ((unsigned long) &fr->emul) |
-               get_isa16_mode(regs->cp0_epc);
+       regs->cp0_epc = (unsigned long)&fr->emul | isa16;
 
        flush_cache_sigtramp((unsigned long)&fr->emul);
 
@@ -100,6 +123,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
 
 int do_dsemulret(struct pt_regs *xcp)
 {
+       int isa16 = get_isa16_mode(xcp->cp0_epc);
        struct emuframe __user *fr;
        unsigned long epc;
        u32 insn, cookie;
@@ -122,16 +146,19 @@ int do_dsemulret(struct pt_regs *xcp)
         *  - Is the instruction pointed to by the EPC an BREAK_MATH?
         *  - Is the following memory word the BD_COOKIE?
         */
-       if (get_isa16_mode(xcp->cp0_epc)) {
-               err = __get_user(instr[0], (u16 __user *)(&fr->badinst));
-               err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2));
+       if (isa16) {
+               err = __get_user(instr[0],
+                                (u16 __user *)(&fr->badinst));
+               err |= __get_user(instr[1],
+                                 (u16 __user *)((long)(&fr->badinst) + 2));
                insn = (instr[0] << 16) | instr[1];
        } else {
                err = __get_user(insn, &fr->badinst);
        }
        err |= __get_user(cookie, &fr->cookie);
 
-       if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
+       if (unlikely(err ||
+                    insn != BREAK_MATH(isa16) || cookie != BD_COOKIE)) {
                MIPS_FPU_EMU_INC_STATS(errors);
                return 0;
        }
index 8e97acbbe22ceb96789d36c44b98500f9885154a..e16ae7b75dbb775c9af078994e785c257a454f39 100644 (file)
@@ -59,7 +59,8 @@ const union ieee754dp __ieee754dp_spcvals[] = {
        DPCNST(1, 3,           0x4000000000000ULL),     /* - 10.0   */
        DPCNST(0, DP_EMAX + 1, 0x0000000000000ULL),     /* + infinity */
        DPCNST(1, DP_EMAX + 1, 0x0000000000000ULL),     /* - infinity */
-       DPCNST(0, DP_EMAX + 1, 0x7FFFFFFFFFFFFULL),     /* + indef quiet Nan */
+       DPCNST(0, DP_EMAX + 1, 0x7FFFFFFFFFFFFULL),     /* + ind legacy qNaN */
+       DPCNST(0, DP_EMAX + 1, 0x8000000000000ULL),     /* + indef 2008 qNaN */
        DPCNST(0, DP_EMAX,     0xFFFFFFFFFFFFFULL),     /* + max */
        DPCNST(1, DP_EMAX,     0xFFFFFFFFFFFFFULL),     /* - max */
        DPCNST(0, DP_EMIN,     0x0000000000000ULL),     /* + min normal */
@@ -82,7 +83,8 @@ const union ieee754sp __ieee754sp_spcvals[] = {
        SPCNST(1, 3,           0x200000),       /* - 10.0   */
        SPCNST(0, SP_EMAX + 1, 0x000000),       /* + infinity */
        SPCNST(1, SP_EMAX + 1, 0x000000),       /* - infinity */
-       SPCNST(0, SP_EMAX + 1, 0x3FFFFF),       /* + indef quiet Nan  */
+       SPCNST(0, SP_EMAX + 1, 0x3FFFFF),       /* + indef legacy quiet NaN */
+       SPCNST(0, SP_EMAX + 1, 0x400000),       /* + indef 2008 quiet NaN */
        SPCNST(0, SP_EMAX,     0x7FFFFF),       /* + max normal */
        SPCNST(1, SP_EMAX,     0x7FFFFF),       /* - max normal */
        SPCNST(0, SP_EMIN,     0x000000),       /* + min normal */
index df94720714c73cc95736544f7b05804d607d27ca..d3be351aed151305396723ddfff661837a114cde 100644 (file)
@@ -221,15 +221,16 @@ union ieee754dp ieee754dp_dump(char *s, union ieee754dp x);
 #define IEEE754_SPCVAL_NTEN            5       /* -10.0 */
 #define IEEE754_SPCVAL_PINFINITY       6       /* +inf */
 #define IEEE754_SPCVAL_NINFINITY       7       /* -inf */
-#define IEEE754_SPCVAL_INDEF           8       /* quiet NaN */
-#define IEEE754_SPCVAL_PMAX            9       /* +max norm */
-#define IEEE754_SPCVAL_NMAX            10      /* -max norm */
-#define IEEE754_SPCVAL_PMIN            11      /* +min norm */
-#define IEEE754_SPCVAL_NMIN            12      /* -min norm */
-#define IEEE754_SPCVAL_PMIND           13      /* +min denorm */
-#define IEEE754_SPCVAL_NMIND           14      /* -min denorm */
-#define IEEE754_SPCVAL_P1E31           15      /* + 1.0e31 */
-#define IEEE754_SPCVAL_P1E63           16      /* + 1.0e63 */
+#define IEEE754_SPCVAL_INDEF_LEG       8       /* legacy quiet NaN */
+#define IEEE754_SPCVAL_INDEF_2008      9       /* IEEE 754-2008 quiet NaN */
+#define IEEE754_SPCVAL_PMAX            10      /* +max norm */
+#define IEEE754_SPCVAL_NMAX            11      /* -max norm */
+#define IEEE754_SPCVAL_PMIN            12      /* +min norm */
+#define IEEE754_SPCVAL_NMIN            13      /* -min norm */
+#define IEEE754_SPCVAL_PMIND           14      /* +min denorm */
+#define IEEE754_SPCVAL_NMIND           15      /* -min denorm */
+#define IEEE754_SPCVAL_P1E31           16      /* + 1.0e31 */
+#define IEEE754_SPCVAL_P1E63           17      /* + 1.0e63 */
 
 extern const union ieee754dp __ieee754dp_spcvals[];
 extern const union ieee754sp __ieee754sp_spcvals[];
@@ -243,7 +244,8 @@ extern const union ieee754sp __ieee754sp_spcvals[];
 #define ieee754dp_zero(sn)     (ieee754dp_spcvals[IEEE754_SPCVAL_PZERO+(sn)])
 #define ieee754dp_one(sn)      (ieee754dp_spcvals[IEEE754_SPCVAL_PONE+(sn)])
 #define ieee754dp_ten(sn)      (ieee754dp_spcvals[IEEE754_SPCVAL_PTEN+(sn)])
-#define ieee754dp_indef()      (ieee754dp_spcvals[IEEE754_SPCVAL_INDEF])
+#define ieee754dp_indef()      (ieee754dp_spcvals[IEEE754_SPCVAL_INDEF_LEG + \
+                                                  ieee754_csr.nan2008])
 #define ieee754dp_max(sn)      (ieee754dp_spcvals[IEEE754_SPCVAL_PMAX+(sn)])
 #define ieee754dp_min(sn)      (ieee754dp_spcvals[IEEE754_SPCVAL_PMIN+(sn)])
 #define ieee754dp_mind(sn)     (ieee754dp_spcvals[IEEE754_SPCVAL_PMIND+(sn)])
@@ -254,7 +256,8 @@ extern const union ieee754sp __ieee754sp_spcvals[];
 #define ieee754sp_zero(sn)     (ieee754sp_spcvals[IEEE754_SPCVAL_PZERO+(sn)])
 #define ieee754sp_one(sn)      (ieee754sp_spcvals[IEEE754_SPCVAL_PONE+(sn)])
 #define ieee754sp_ten(sn)      (ieee754sp_spcvals[IEEE754_SPCVAL_PTEN+(sn)])
-#define ieee754sp_indef()      (ieee754sp_spcvals[IEEE754_SPCVAL_INDEF])
+#define ieee754sp_indef()      (ieee754sp_spcvals[IEEE754_SPCVAL_INDEF_LEG + \
+                                                  ieee754_csr.nan2008])
 #define ieee754sp_max(sn)      (ieee754sp_spcvals[IEEE754_SPCVAL_PMAX+(sn)])
 #define ieee754sp_min(sn)      (ieee754sp_spcvals[IEEE754_SPCVAL_PMIN+(sn)])
 #define ieee754sp_mind(sn)     (ieee754sp_spcvals[IEEE754_SPCVAL_PMIND+(sn)])
@@ -266,12 +269,25 @@ extern const union ieee754sp __ieee754sp_spcvals[];
  */
 static inline int ieee754si_indef(void)
 {
-       return INT_MAX;
+       return ieee754_csr.nan2008 ? 0 : INT_MAX;
 }
 
 static inline s64 ieee754di_indef(void)
 {
-       return S64_MAX;
+       return ieee754_csr.nan2008 ? 0 : S64_MAX;
+}
+
+/*
+ * Overflow integer value
+ */
+static inline int ieee754si_overflow(int xs)
+{
+       return ieee754_csr.nan2008 && xs ? INT_MIN : INT_MAX;
+}
+
+static inline s64 ieee754di_overflow(int xs)
+{
+       return ieee754_csr.nan2008 && xs ? S64_MIN : S64_MAX;
 }
 
 /* result types for xctx.rt */
index 522d843f2ffd04a47873328724a9d45965dce0ce..ad3c73436777f0c3103c887827c4547d888f5941 100644 (file)
@@ -37,8 +37,11 @@ static inline int ieee754dp_isnan(union ieee754dp x)
 
 static inline int ieee754dp_issnan(union ieee754dp x)
 {
+       int qbit;
+
        assert(ieee754dp_isnan(x));
-       return (DPMANT(x) & DP_MBIT(DP_FBITS - 1)) == DP_MBIT(DP_FBITS - 1);
+       qbit = (DPMANT(x) & DP_MBIT(DP_FBITS - 1)) == DP_MBIT(DP_FBITS - 1);
+       return ieee754_csr.nan2008 ^ qbit;
 }
 
 
@@ -51,7 +54,12 @@ union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp r)
        assert(ieee754dp_issnan(r));
 
        ieee754_setcx(IEEE754_INVALID_OPERATION);
-       return ieee754dp_indef();
+       if (ieee754_csr.nan2008)
+               DPMANT(r) |= DP_MBIT(DP_FBITS - 1);
+       else
+               r = ieee754dp_indef();
+
+       return r;
 }
 
 static u64 ieee754dp_get_rounding(int sn, u64 xm)
index 6383e2c5c1adb5d527fe1023eb173f06e3776dc7..ed7bb277b3e05e224315508f32396fd37c239641 100644 (file)
@@ -63,10 +63,10 @@ static inline int ieee754_class_nan(int xc)
        if (ve == SP_EMAX+1+SP_EBIAS) {                                 \
                if (vm == 0)                                            \
                        vc = IEEE754_CLASS_INF;                         \
-               else if (vm & SP_MBIT(SP_FBITS-1))                      \
-                       vc = IEEE754_CLASS_SNAN;                        \
-               else                                                    \
+               else if (ieee754_csr.nan2008 ^ !(vm & SP_MBIT(SP_FBITS - 1))) \
                        vc = IEEE754_CLASS_QNAN;                        \
+               else                                                    \
+                       vc = IEEE754_CLASS_SNAN;                        \
        } else if (ve == SP_EMIN-1+SP_EBIAS) {                          \
                if (vm) {                                               \
                        ve = SP_EMIN;                                   \
@@ -97,10 +97,10 @@ static inline int ieee754_class_nan(int xc)
        if (ve == DP_EMAX+1+DP_EBIAS) {                                 \
                if (vm == 0)                                            \
                        vc = IEEE754_CLASS_INF;                         \
-               else if (vm & DP_MBIT(DP_FBITS-1))                      \
-                       vc = IEEE754_CLASS_SNAN;                        \
-               else                                                    \
+               else if (ieee754_csr.nan2008 ^ !(vm & DP_MBIT(DP_FBITS - 1))) \
                        vc = IEEE754_CLASS_QNAN;                        \
+               else                                                    \
+                       vc = IEEE754_CLASS_SNAN;                        \
        } else if (ve == DP_EMIN-1+DP_EBIAS) {                          \
                if (vm) {                                               \
                        ve = DP_EMIN;                                   \
index ca8e35e33bf790f594156268e320ce866b2ed5c2..def00ffc50fcc7bc4740933215cdf5872940a450 100644 (file)
@@ -37,8 +37,11 @@ static inline int ieee754sp_isnan(union ieee754sp x)
 
 static inline int ieee754sp_issnan(union ieee754sp x)
 {
+       int qbit;
+
        assert(ieee754sp_isnan(x));
-       return SPMANT(x) & SP_MBIT(SP_FBITS - 1);
+       qbit = (SPMANT(x) & SP_MBIT(SP_FBITS - 1)) == SP_MBIT(SP_FBITS - 1);
+       return ieee754_csr.nan2008 ^ qbit;
 }
 
 
@@ -51,7 +54,12 @@ union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp r)
        assert(ieee754sp_issnan(r));
 
        ieee754_setcx(IEEE754_INVALID_OPERATION);
-       return ieee754sp_indef();
+       if (ieee754_csr.nan2008)
+               SPMANT(r) |= SP_MBIT(SP_FBITS - 1);
+       else
+               r = ieee754sp_indef();
+
+       return r;
 }
 
 static unsigned ieee754sp_get_rounding(int sn, unsigned xm)
index 3797148893adb62c10a989dc78e5e9dce56e1e9e..5060e8fdcb0b8817c5f6836e0a20273f08a79b52 100644 (file)
@@ -44,13 +44,16 @@ union ieee754sp ieee754sp_fdp(union ieee754dp x)
 
        switch (xc) {
        case IEEE754_CLASS_SNAN:
-               return ieee754sp_nanxcpt(ieee754sp_nan_fdp(xs, xm));
-
+               x = ieee754dp_nanxcpt(x);
+               EXPLODEXDP;
+               /* Fall through.  */
        case IEEE754_CLASS_QNAN:
                y = ieee754sp_nan_fdp(xs, xm);
-               EXPLODEYSP;
-               if (!ieee754_class_nan(yc))
-                       y = ieee754sp_indef();
+               if (!ieee754_csr.nan2008) {
+                       EXPLODEYSP;
+                       if (!ieee754_class_nan(yc))
+                               y = ieee754sp_indef();
+               }
                return y;
 
        case IEEE754_CLASS_INF:
index c50e9451f2d21da196d0afbe00979548ca8b217e..756c9cf2dfd2514f7461190a9a03fc065c697195 100644 (file)
 
 union ieee754sp ieee754sp_neg(union ieee754sp x)
 {
-       unsigned int oldrm;
        union ieee754sp y;
 
-       oldrm = ieee754_csr.rm;
-       ieee754_csr.rm = FPU_CSR_RD;
-       y = ieee754sp_sub(ieee754sp_zero(0), x);
-       ieee754_csr.rm = oldrm;
+       if (ieee754_csr.abs2008) {
+               y = x;
+               SPSIGN(y) = !SPSIGN(x);
+       } else {
+               unsigned int oldrm;
+
+               oldrm = ieee754_csr.rm;
+               ieee754_csr.rm = FPU_CSR_RD;
+               y = ieee754sp_sub(ieee754sp_zero(0), x);
+               ieee754_csr.rm = oldrm;
+       }
        return y;
 }
 
 union ieee754sp ieee754sp_abs(union ieee754sp x)
 {
-       unsigned int oldrm;
        union ieee754sp y;
 
-       oldrm = ieee754_csr.rm;
-       ieee754_csr.rm = FPU_CSR_RD;
-       if (SPSIGN(x))
-               y = ieee754sp_sub(ieee754sp_zero(0), x);
-       else
-               y = ieee754sp_add(ieee754sp_zero(0), x);
-       ieee754_csr.rm = oldrm;
+       if (ieee754_csr.abs2008) {
+               y = x;
+               SPSIGN(y) = 0;
+       } else {
+               unsigned int oldrm;
+
+               oldrm = ieee754_csr.rm;
+               ieee754_csr.rm = FPU_CSR_RD;
+               if (SPSIGN(x))
+                       y = ieee754sp_sub(ieee754sp_zero(0), x);
+               else
+                       y = ieee754sp_add(ieee754sp_zero(0), x);
+               ieee754_csr.rm = oldrm;
+       }
        return y;
 }
index 091299a317980b6bf0cfd84f0958b1f271d2bf9d..f4b4cabfe2e11740e17b4c77c913e6762ac26b71 100644 (file)
@@ -38,10 +38,13 @@ int ieee754sp_tint(union ieee754sp x)
        switch (xc) {
        case IEEE754_CLASS_SNAN:
        case IEEE754_CLASS_QNAN:
-       case IEEE754_CLASS_INF:
                ieee754_setcx(IEEE754_INVALID_OPERATION);
                return ieee754si_indef();
 
+       case IEEE754_CLASS_INF:
+               ieee754_setcx(IEEE754_INVALID_OPERATION);
+               return ieee754si_overflow(xs);
+
        case IEEE754_CLASS_ZERO:
                return 0;
 
@@ -56,7 +59,7 @@ int ieee754sp_tint(union ieee754sp x)
                /* Set invalid. We will only use overflow for floating
                   point overflow */
                ieee754_setcx(IEEE754_INVALID_OPERATION);
-               return ieee754si_indef();
+               return ieee754si_overflow(xs);
        }
        /* oh gawd */
        if (xe > SP_FBITS) {
@@ -97,7 +100,7 @@ int ieee754sp_tint(union ieee754sp x)
                if ((xm >> 31) != 0) {
                        /* This can happen after rounding */
                        ieee754_setcx(IEEE754_INVALID_OPERATION);
-                       return ieee754si_indef();
+                       return ieee754si_overflow(xs);
                }
                if (round || sticky)
                        ieee754_setcx(IEEE754_INEXACT);
index 9f3c742c1cea6ac264d1d4d66ef56b6916f675cb..a2450c7e452a672fdeba96bba5a7c262c1b6e876 100644 (file)
@@ -39,10 +39,13 @@ s64 ieee754sp_tlong(union ieee754sp x)
        switch (xc) {
        case IEEE754_CLASS_SNAN:
        case IEEE754_CLASS_QNAN:
-       case IEEE754_CLASS_INF:
                ieee754_setcx(IEEE754_INVALID_OPERATION);
                return ieee754di_indef();
 
+       case IEEE754_CLASS_INF:
+               ieee754_setcx(IEEE754_INVALID_OPERATION);
+               return ieee754di_overflow(xs);
+
        case IEEE754_CLASS_ZERO:
                return 0;
 
@@ -57,7 +60,7 @@ s64 ieee754sp_tlong(union ieee754sp x)
                /* Set invalid. We will only use overflow for floating
                   point overflow */
                ieee754_setcx(IEEE754_INVALID_OPERATION);
-               return ieee754di_indef();
+               return ieee754di_overflow(xs);
        }
        /* oh gawd */
        if (xe > SP_FBITS) {
@@ -94,7 +97,7 @@ s64 ieee754sp_tlong(union ieee754sp x)
                if ((xm >> 63) != 0) {
                        /* This can happen after rounding */
                        ieee754_setcx(IEEE754_INVALID_OPERATION);
-                       return ieee754di_indef();
+                       return ieee754di_overflow(xs);
                }
                if (round || sticky)
                        ieee754_setcx(IEEE754_INEXACT);
index 482192cc8f2b88ae89f4cf1495c0dd6055a5bda6..5a04b6f5c6fb8a2cc52d986f59042ad8cd585850 100644 (file)
@@ -241,7 +241,7 @@ static void output_pgtable_bits_defines(void)
 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
        pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
 #endif
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        if (cpu_has_rixi) {
 #ifdef _PAGE_NO_EXEC_SHIFT
                pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
index 2eda01e6e08fd38d5e572f5e66283536d9e9cd34..139ad1d7ab5e3ce9dfa8aba6b507fe04e8b8d8b9 100644 (file)
@@ -43,6 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80)  += pci-bcm1480.o pci-bcm1480ht.o
 obj-$(CONFIG_SNI_RM)           += fixup-sni.o ops-sni.o
 obj-$(CONFIG_LANTIQ)           += fixup-lantiq.o
 obj-$(CONFIG_PCI_LANTIQ)       += pci-lantiq.o ops-lantiq.o
+obj-$(CONFIG_SOC_MT7620)       += pci-mt7620.o
 obj-$(CONFIG_SOC_RT288X)       += pci-rt2880.o
 obj-$(CONFIG_SOC_RT3883)       += pci-rt3883.o
 obj-$(CONFIG_TANBAC_TB0219)    += fixup-tb0219.o
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
new file mode 100644 (file)
index 0000000..a009ee4
--- /dev/null
@@ -0,0 +1,426 @@
+/*
+ *  Ralink MT7620A SoC PCI support
+ *
+ *  Copyright (C) 2007-2013 Bruce Chang (Mediatek)
+ *  Copyright (C) 2013-2016 John Crispin <blogic@openwrt.org>
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_pci.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7620.h>
+
+#define RALINK_PCI_IO_MAP_BASE         0x10160000
+#define RALINK_PCI_MEMORY_BASE         0x0
+
+#define RALINK_INT_PCIE0               4
+
+#define RALINK_CLKCFG1                 0x30
+#define RALINK_GPIOMODE                        0x60
+
+#define PPLL_CFG1                      0x9c
+#define PDRV_SW_SET                    BIT(23)
+
+#define PPLL_DRV                       0xa0
+#define PDRV_SW_SET                    (1<<31)
+#define LC_CKDRVPD                     (1<<19)
+#define LC_CKDRVOHZ                    (1<<18)
+#define LC_CKDRVHZ                     (1<<17)
+#define LC_CKTEST                      (1<<16)
+
+/* PCI Bridge registers */
+#define RALINK_PCI_PCICFG_ADDR         0x00
+#define PCIRST                         BIT(1)
+
+#define RALINK_PCI_PCIENA              0x0C
+#define PCIINT2                                BIT(20)
+
+#define RALINK_PCI_CONFIG_ADDR         0x20
+#define RALINK_PCI_CONFIG_DATA_VIRT_REG        0x24
+#define RALINK_PCI_MEMBASE             0x28
+#define RALINK_PCI_IOBASE              0x2C
+
+/* PCI RC registers */
+#define RALINK_PCI0_BAR0SETUP_ADDR     0x10
+#define RALINK_PCI0_IMBASEBAR0_ADDR    0x18
+#define RALINK_PCI0_ID                 0x30
+#define RALINK_PCI0_CLASS              0x34
+#define RALINK_PCI0_SUBID              0x38
+#define RALINK_PCI0_STATUS             0x50
+#define PCIE_LINK_UP_ST                        BIT(0)
+
+#define PCIEPHY0_CFG                   0x90
+
+#define RALINK_PCIEPHY_P0_CTL_OFFSET   0x7498
+#define RALINK_PCIE0_CLK_EN            (1 << 26)
+
+#define BUSY                           0x80000000
+#define WAITRETRY_MAX                  10
+#define WRITE_MODE                     (1UL << 23)
+#define DATA_SHIFT                     0
+#define ADDR_SHIFT                     8
+
+
+static void __iomem *bridge_base;
+static void __iomem *pcie_base;
+
+static struct reset_control *rstpcie0;
+
+static inline void bridge_w32(u32 val, unsigned reg)
+{
+       iowrite32(val, bridge_base + reg);
+}
+
+static inline u32 bridge_r32(unsigned reg)
+{
+       return ioread32(bridge_base + reg);
+}
+
+static inline void pcie_w32(u32 val, unsigned reg)
+{
+       iowrite32(val, pcie_base + reg);
+}
+
+static inline u32 pcie_r32(unsigned reg)
+{
+       return ioread32(pcie_base + reg);
+}
+
+static inline void pcie_m32(u32 clr, u32 set, unsigned reg)
+{
+       u32 val = pcie_r32(reg);
+
+       val &= ~clr;
+       val |= set;
+       pcie_w32(val, reg);
+}
+
+static int wait_pciephy_busy(void)
+{
+       unsigned long reg_value = 0x0, retry = 0;
+
+       while (1) {
+               reg_value = pcie_r32(PCIEPHY0_CFG);
+
+               if (reg_value & BUSY)
+                       mdelay(100);
+               else
+                       break;
+               if (retry++ > WAITRETRY_MAX) {
+                       printk(KERN_WARN "PCIE-PHY retry failed.\n");
+                       return -1;
+               }
+       }
+       return 0;
+}
+
+static void pcie_phy(unsigned long addr, unsigned long val)
+{
+       wait_pciephy_busy();
+       pcie_w32(WRITE_MODE | (val << DATA_SHIFT) | (addr << ADDR_SHIFT),
+                PCIEPHY0_CFG);
+       mdelay(1);
+       wait_pciephy_busy();
+}
+
+static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
+                          int size, u32 *val)
+{
+       unsigned int slot = PCI_SLOT(devfn);
+       u8 func = PCI_FUNC(devfn);
+       u32 address;
+       u32 data;
+       u32 num = 0;
+
+       if (bus)
+               num = bus->number;
+
+       address = (((where & 0xF00) >> 8) << 24) | (num << 16) | (slot << 11) |
+                 (func << 8) | (where & 0xfc) | 0x80000000;
+       bridge_w32(address, RALINK_PCI_CONFIG_ADDR);
+       data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRT_REG);
+
+       switch (size) {
+       case 1:
+               *val = (data >> ((where & 3) << 3)) & 0xff;
+               break;
+       case 2:
+               *val = (data >> ((where & 3) << 3)) & 0xffff;
+               break;
+       case 4:
+               *val = data;
+               break;
+       }
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where,
+                           int size, u32 val)
+{
+       unsigned int slot = PCI_SLOT(devfn);
+       u8 func = PCI_FUNC(devfn);
+       u32 address;
+       u32 data;
+       u32 num = 0;
+
+       if (bus)
+               num = bus->number;
+
+       address = (((where & 0xF00) >> 8) << 24) | (num << 16) | (slot << 11) |
+                 (func << 8) | (where & 0xfc) | 0x80000000;
+       bridge_w32(address, RALINK_PCI_CONFIG_ADDR);
+       data = bridge_r32(RALINK_PCI_CONFIG_DATA_VIRT_REG);
+
+       switch (size) {
+       case 1:
+               data = (data & ~(0xff << ((where & 3) << 3))) |
+                       (val << ((where & 3) << 3));
+               break;
+       case 2:
+               data = (data & ~(0xffff << ((where & 3) << 3))) |
+                       (val << ((where & 3) << 3));
+               break;
+       case 4:
+               data = val;
+               break;
+       }
+
+       bridge_w32(data, RALINK_PCI_CONFIG_DATA_VIRT_REG);
+
+       return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops mt7620_pci_ops = {
+       .read   = pci_config_read,
+       .write  = pci_config_write,
+};
+
+static struct resource mt7620_res_pci_mem1;
+static struct resource mt7620_res_pci_io1;
+struct pci_controller mt7620_controller = {
+       .pci_ops        = &mt7620_pci_ops,
+       .mem_resource   = &mt7620_res_pci_mem1,
+       .mem_offset     = 0x00000000UL,
+       .io_resource    = &mt7620_res_pci_io1,
+       .io_offset      = 0x00000000UL,
+       .io_map_base    = 0xa0000000,
+};
+
+static int mt7620_pci_hw_init(struct platform_device *pdev)
+{
+       /* bypass PCIe DLL */
+       pcie_phy(0x0, 0x80);
+       pcie_phy(0x1, 0x04);
+
+       /* Elastic buffer control */
+       pcie_phy(0x68, 0xB4);
+
+       /* put core into reset */
+       pcie_m32(0, PCIRST, RALINK_PCI_PCICFG_ADDR);
+       reset_control_assert(rstpcie0);
+
+       /* disable power and all clocks */
+       rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
+       rt_sysc_m32(LC_CKDRVPD, PDRV_SW_SET, PPLL_DRV);
+
+       /* bring core out of reset */
+       reset_control_deassert(rstpcie0);
+       rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
+       mdelay(100);
+
+       if (!(rt_sysc_r32(PPLL_CFG1) & PDRV_SW_SET)) {
+               dev_err(&pdev->dev, "MT7620 PPLL unlock\n");
+               reset_control_assert(rstpcie0);
+               rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
+               return -1;
+       }
+
+       /* power up the bus */
+       rt_sysc_m32(LC_CKDRVHZ | LC_CKDRVOHZ, LC_CKDRVPD | PDRV_SW_SET,
+                   PPLL_DRV);
+
+       return 0;
+}
+
+static int mt7628_pci_hw_init(struct platform_device *pdev)
+{
+       u32 val = 0;
+
+       /* bring the core out of reset */
+       rt_sysc_m32(BIT(16), 0, RALINK_GPIOMODE);
+       reset_control_deassert(rstpcie0);
+
+       /* enable the pci clk */
+       rt_sysc_m32(0, RALINK_PCIE0_CLK_EN, RALINK_CLKCFG1);
+       mdelay(100);
+
+       /* voodoo from the SDK driver */
+       pcie_m32(~0xff, 0x5, RALINK_PCIEPHY_P0_CTL_OFFSET);
+
+       pci_config_read(NULL, 0, 0x70c, 4, &val);
+       val &= ~(0xff) << 8;
+       val |= 0x50 << 8;
+       pci_config_write(NULL, 0, 0x70c, 4, val);
+
+       pci_config_read(NULL, 0, 0x70c, 4, &val);
+       dev_err(&pdev->dev, "Port 0 N_FTS = %x\n", (unsigned int) val);
+
+       return 0;
+}
+
+static int mt7620_pci_probe(struct platform_device *pdev)
+{
+       struct resource *bridge_res = platform_get_resource(pdev,
+                                                           IORESOURCE_MEM, 0);
+       struct resource *pcie_res = platform_get_resource(pdev,
+                                                         IORESOURCE_MEM, 1);
+       u32 val = 0;
+
+       rstpcie0 = devm_reset_control_get(&pdev->dev, "pcie0");
+       if (IS_ERR(rstpcie0))
+               return PTR_ERR(rstpcie0);
+
+       bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res);
+       if (!bridge_base)
+               return -ENOMEM;
+
+       pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res);
+       if (!pcie_base)
+               return -ENOMEM;
+
+       iomem_resource.start = 0;
+       iomem_resource.end = ~0;
+       ioport_resource.start = 0;
+       ioport_resource.end = ~0;
+
+       /* bring up the pci core */
+       switch (ralink_soc) {
+       case MT762X_SOC_MT7620A:
+               if (mt7620_pci_hw_init(pdev))
+                       return -1;
+               break;
+
+       case MT762X_SOC_MT7628AN:
+               if (mt7628_pci_hw_init(pdev))
+                       return -1;
+               break;
+
+       default:
+               dev_err(&pdev->dev, "pcie is not supported on this hardware\n");
+               return -1;
+       }
+       mdelay(50);
+
+       /* enable write access */
+       pcie_m32(PCIRST, 0, RALINK_PCI_PCICFG_ADDR);
+       mdelay(100);
+
+       /* check if there is a card present */
+       if ((pcie_r32(RALINK_PCI0_STATUS) & PCIE_LINK_UP_ST) == 0) {
+               reset_control_assert(rstpcie0);
+               rt_sysc_m32(RALINK_PCIE0_CLK_EN, 0, RALINK_CLKCFG1);
+               if (ralink_soc == MT762X_SOC_MT7620A)
+                       rt_sysc_m32(LC_CKDRVPD, PDRV_SW_SET, PPLL_DRV);
+               dev_err(&pdev->dev, "PCIE0 no card, disable it(RST&CLK)\n");
+               return -1;
+       }
+
+       /* setup ranges */
+       bridge_w32(0xffffffff, RALINK_PCI_MEMBASE);
+       bridge_w32(RALINK_PCI_IO_MAP_BASE, RALINK_PCI_IOBASE);
+
+       pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR);
+       pcie_w32(RALINK_PCI_MEMORY_BASE, RALINK_PCI0_IMBASEBAR0_ADDR);
+       pcie_w32(0x06040001, RALINK_PCI0_CLASS);
+
+       /* enable interrupts */
+       pcie_m32(0, PCIINT2, RALINK_PCI_PCIENA);
+
+       /* voodoo from the SDK driver */
+       pci_config_read(NULL, 0, 4, 4, &val);
+       pci_config_write(NULL, 0, 4, 4, val | 0x7);
+
+       pci_load_of_ranges(&mt7620_controller, pdev->dev.of_node);
+       register_pci_controller(&mt7620_controller);
+
+       return 0;
+}
+
+int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+       u16 cmd;
+       u32 val;
+       int irq = 0;
+
+       if ((dev->bus->number == 0) && (slot == 0)) {
+               pcie_w32(0x7FFF0001, RALINK_PCI0_BAR0SETUP_ADDR);
+               pci_config_write(dev->bus, 0, PCI_BASE_ADDRESS_0, 4,
+                                RALINK_PCI_MEMORY_BASE);
+               pci_config_read(dev->bus, 0, PCI_BASE_ADDRESS_0, 4, &val);
+       } else if ((dev->bus->number == 1) && (slot == 0x0)) {
+               irq = RALINK_INT_PCIE0;
+       } else {
+               dev_err(&dev->dev, "no irq found - bus=0x%x, slot = 0x%x\n",
+                       dev->bus->number, slot);
+               return 0;
+       }
+       dev_err(&dev->dev, "card - bus=0x%x, slot = 0x%x irq=%d\n",
+               dev->bus->number, slot, irq);
+
+       /* configure the cache line size to 0x14 */
+       pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 0x14);
+
+       /* configure latency timer to 0xff */
+       pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xff);
+       pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+       /* setup the slot */
+       cmd = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+       pci_write_config_word(dev, PCI_COMMAND, cmd);
+       pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+
+       return irq;
+}
+
+int pcibios_plat_dev_init(struct pci_dev *dev)
+{
+       return 0;
+}
+
+static const struct of_device_id mt7620_pci_ids[] = {
+       { .compatible = "mediatek,mt7620-pci" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, mt7620_pci_ids);
+
+static struct platform_driver mt7620_pci_driver = {
+       .probe = mt7620_pci_probe,
+       .driver = {
+               .name = "mt7620-pci",
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(mt7620_pci_ids),
+       },
+};
+
+static int __init mt7620_pci_init(void)
+{
+       return platform_driver_register(&mt7620_pci_driver);
+}
+
+arch_initcall(mt7620_pci_init);
diff --git a/arch/mips/pic32/Kconfig b/arch/mips/pic32/Kconfig
new file mode 100644 (file)
index 0000000..fde56a8
--- /dev/null
@@ -0,0 +1,51 @@
+if MACH_PIC32
+
+choice
+       prompt "Machine Type"
+
+config PIC32MZDA
+       bool "Microchip PIC32MZDA Platform"
+       select BOOT_ELF32
+       select BOOT_RAW
+       select CEVT_R4K
+       select CSRC_R4K
+       select DMA_NONCOHERENT
+       select SYS_HAS_CPU_MIPS32_R2
+       select SYS_HAS_EARLY_PRINTK
+       select SYS_SUPPORTS_32BIT_KERNEL
+       select SYS_SUPPORTS_LITTLE_ENDIAN
+       select ARCH_REQUIRE_GPIOLIB
+       select HAVE_MACH_CLKDEV
+       select COMMON_CLK
+       select CLKDEV_LOOKUP
+       select LIBFDT
+       select USE_OF
+       select PINCTRL
+       select PIC32_EVIC
+       help
+         Support for the Microchip PIC32MZDA microcontroller.
+
+         This is a 32-bit microcontroller with support for external or
+         internally packaged DDR2 memory up to 128MB.
+
+         For more information, see <http://www.microchip.com/>.
+
+endchoice
+
+choice
+       prompt "Devicetree selection"
+       default DTB_PIC32_NONE
+       help
+         Select the devicetree.
+
+config DTB_PIC32_NONE
+       bool "None"
+
+config DTB_PIC32_MZDA_SK
+       bool "PIC32MZDA Starter Kit"
+       depends on PIC32MZDA
+       select BUILTIN_DTB
+
+endchoice
+
+endif # MACH_PIC32
diff --git a/arch/mips/pic32/Makefile b/arch/mips/pic32/Makefile
new file mode 100644 (file)
index 0000000..fd357f4
--- /dev/null
@@ -0,0 +1,6 @@
+#
+# Joshua Henderson, <joshua.henderson@microchip.com>
+# Copyright (C) 2015 Microchip Technology, Inc.  All rights reserved.
+#
+obj-$(CONFIG_MACH_PIC32) += common/
+obj-$(CONFIG_PIC32MZDA) += pic32mzda/
diff --git a/arch/mips/pic32/Platform b/arch/mips/pic32/Platform
new file mode 100644 (file)
index 0000000..cd2084f
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# PIC32MZDA
+#
+platform-$(CONFIG_PIC32MZDA)   += pic32/
+cflags-$(CONFIG_PIC32MZDA)     += -I$(srctree)/arch/mips/include/asm/mach-pic32
+load-$(CONFIG_PIC32MZDA)       += 0xffffffff88000000
+all-$(CONFIG_PIC32MZDA)                := $(COMPRESSION_FNAME).bin
diff --git a/arch/mips/pic32/common/Makefile b/arch/mips/pic32/common/Makefile
new file mode 100644 (file)
index 0000000..be1909c
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Joshua Henderson, <joshua.henderson@microchip.com>
+# Copyright (C) 2015 Microchip Technology, Inc.  All rights reserved.
+#
+obj-y = reset.o irq.o
diff --git a/arch/mips/pic32/common/irq.c b/arch/mips/pic32/common/irq.c
new file mode 100644 (file)
index 0000000..6df347e
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#include <linux/init.h>
+#include <linux/irqchip.h>
+#include <asm/irq.h>
+
+void __init arch_init_irq(void)
+{
+       irqchip_init();
+}
diff --git a/arch/mips/pic32/common/reset.c b/arch/mips/pic32/common/reset.c
new file mode 100644 (file)
index 0000000..8334575
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+#include <asm/mach-pic32/pic32.h>
+
+#define PIC32_RSWRST           0x10
+
+static void pic32_halt(void)
+{
+       while (1) {
+               __asm__(".set push;\n"
+                       ".set arch=r4000;\n"
+                       "wait;\n"
+                       ".set pop;\n"
+               );
+       }
+}
+
+static void pic32_machine_restart(char *command)
+{
+       void __iomem *reg =
+               ioremap(PIC32_BASE_RESET + PIC32_RSWRST, sizeof(u32));
+
+       pic32_syskey_unlock();
+
+       /* magic write/read */
+       __raw_writel(1, reg);
+       (void)__raw_readl(reg);
+
+       pic32_halt();
+}
+
+static void pic32_machine_halt(void)
+{
+       local_irq_disable();
+
+       pic32_halt();
+}
+
+static int __init mips_reboot_setup(void)
+{
+       _machine_restart = pic32_machine_restart;
+       _machine_halt = pic32_machine_halt;
+       pm_power_off = pic32_machine_halt;
+
+       return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/pic32/pic32mzda/Makefile b/arch/mips/pic32/pic32mzda/Makefile
new file mode 100644 (file)
index 0000000..4a4c272
--- /dev/null
@@ -0,0 +1,9 @@
+#
+# Joshua Henderson, <joshua.henderson@microchip.com>
+# Copyright (C) 2015 Microchip Technology, Inc.  All rights reserved.
+#
+obj-y                  := init.o time.o config.o
+
+obj-$(CONFIG_EARLY_PRINTK)     += early_console.o      \
+                                  early_pin.o          \
+                                  early_clk.o
diff --git a/arch/mips/pic32/pic32mzda/config.c b/arch/mips/pic32/pic32mzda/config.c
new file mode 100644 (file)
index 0000000..fe293a0
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Purna Chandra Mandal, purna.mandal@microchip.com
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+
+#include <asm/mach-pic32/pic32.h>
+
+#include "pic32mzda.h"
+
+#define PIC32_CFGCON   0x0000
+#define PIC32_DEVID    0x0020
+#define PIC32_SYSKEY   0x0030
+#define PIC32_CFGEBIA  0x00c0
+#define PIC32_CFGEBIC  0x00d0
+#define PIC32_CFGCON2  0x00f0
+#define PIC32_RCON     0x1240
+
+static void __iomem *pic32_conf_base;
+static DEFINE_SPINLOCK(config_lock);
+static u32 pic32_reset_status;
+
+static u32 pic32_conf_get_reg_field(u32 offset, u32 rshift, u32 mask)
+{
+       u32 v;
+
+       v = readl(pic32_conf_base + offset);
+       v >>= rshift;
+       v &= mask;
+
+       return v;
+}
+
+static u32 pic32_conf_modify_atomic(u32 offset, u32 mask, u32 set)
+{
+       u32 v;
+       unsigned long flags;
+
+       spin_lock_irqsave(&config_lock, flags);
+       v = readl(pic32_conf_base + offset);
+       v &= ~mask;
+       v |= (set & mask);
+       writel(v, pic32_conf_base + offset);
+       spin_unlock_irqrestore(&config_lock, flags);
+
+       return 0;
+}
+
+int pic32_enable_lcd(void)
+{
+       return pic32_conf_modify_atomic(PIC32_CFGCON2, BIT(31), BIT(31));
+}
+
+int pic32_disable_lcd(void)
+{
+       return pic32_conf_modify_atomic(PIC32_CFGCON2, BIT(31), 0);
+}
+
+int pic32_set_lcd_mode(int mode)
+{
+       u32 mask = mode ? BIT(30) : 0;
+
+       return pic32_conf_modify_atomic(PIC32_CFGCON2, BIT(30), mask);
+}
+
+int pic32_set_sdhci_adma_fifo_threshold(u32 rthrsh, u32 wthrsh)
+{
+       u32 clr, set;
+
+       clr = (0x3ff << 4) | (0x3ff << 16);
+       set = (rthrsh << 4) | (wthrsh << 16);
+       return pic32_conf_modify_atomic(PIC32_CFGCON2, clr, set);
+}
+
+void pic32_syskey_unlock_debug(const char *func, const ulong line)
+{
+       void __iomem *syskey = pic32_conf_base + PIC32_SYSKEY;
+
+       pr_debug("%s: called from %s:%lu\n", __func__, func, line);
+       writel(0x00000000, syskey);
+       writel(0xAA996655, syskey);
+       writel(0x556699AA, syskey);
+}
+
+static u32 pic32_get_device_id(void)
+{
+       return pic32_conf_get_reg_field(PIC32_DEVID, 0, 0x0fffffff);
+}
+
+static u32 pic32_get_device_version(void)
+{
+       return pic32_conf_get_reg_field(PIC32_DEVID, 28, 0xf);
+}
+
+u32 pic32_get_boot_status(void)
+{
+       return pic32_reset_status;
+}
+EXPORT_SYMBOL(pic32_get_boot_status);
+
+void __init pic32_config_init(void)
+{
+       pic32_conf_base = ioremap(PIC32_BASE_CONFIG, 0x110);
+       if (!pic32_conf_base)
+               panic("pic32: config base not mapped");
+
+       /* Boot Status */
+       pic32_reset_status = readl(pic32_conf_base + PIC32_RCON);
+       writel(-1, PIC32_CLR(pic32_conf_base + PIC32_RCON));
+
+       /* Device Inforation */
+       pr_info("Device Id: 0x%08x, Device Ver: 0x%04x\n",
+               pic32_get_device_id(),
+               pic32_get_device_version());
+}
diff --git a/arch/mips/pic32/pic32mzda/early_clk.c b/arch/mips/pic32/pic32mzda/early_clk.c
new file mode 100644 (file)
index 0000000..96c090e
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#include <asm/mach-pic32/pic32.h>
+
+#include "pic32mzda.h"
+
+/* Oscillators, PLL & clocks */
+#define ICLK_MASK      0x00000080
+#define PLLDIV_MASK    0x00000007
+#define CUROSC_MASK    0x00000007
+#define PLLMUL_MASK    0x0000007F
+#define PB_MASK                0x00000007
+#define FRC1           0
+#define FRC2           7
+#define SPLL           1
+#define POSC           2
+#define FRC_CLK                8000000
+
+#define PIC32_POSC_FREQ        24000000
+
+#define OSCCON         0x0000
+#define SPLLCON                0x0020
+#define PB1DIV         0x0140
+
+u32 pic32_get_sysclk(void)
+{
+       u32 osc_freq = 0;
+       u32 pllclk;
+       u32 frcdivn;
+       u32 osccon;
+       u32 spllcon;
+       int curr_osc;
+
+       u32 plliclk;
+       u32 pllidiv;
+       u32 pllodiv;
+       u32 pllmult;
+       u32 frcdiv;
+
+       void __iomem *osc_base = ioremap(PIC32_BASE_OSC, 0x200);
+
+       osccon = __raw_readl(osc_base + OSCCON);
+       spllcon = __raw_readl(osc_base + SPLLCON);
+
+       plliclk = (spllcon & ICLK_MASK);
+       pllidiv = ((spllcon >> 8) & PLLDIV_MASK) + 1;
+       pllodiv = ((spllcon >> 24) & PLLDIV_MASK);
+       pllmult = ((spllcon >> 16) & PLLMUL_MASK) + 1;
+       frcdiv = ((osccon >> 24) & PLLDIV_MASK);
+
+       pllclk = plliclk ? FRC_CLK : PIC32_POSC_FREQ;
+       frcdivn = ((1 << frcdiv) + 1) + (128 * (frcdiv == 7));
+
+       if (pllodiv < 2)
+               pllodiv = 2;
+       else if (pllodiv < 5)
+               pllodiv = (1 << pllodiv);
+       else
+               pllodiv = 32;
+
+       curr_osc = (int)((osccon >> 12) & CUROSC_MASK);
+
+       switch (curr_osc) {
+       case FRC1:
+       case FRC2:
+               osc_freq = FRC_CLK / frcdivn;
+               break;
+       case SPLL:
+               osc_freq = ((pllclk / pllidiv) * pllmult) / pllodiv;
+               break;
+       case POSC:
+               osc_freq = PIC32_POSC_FREQ;
+               break;
+       default:
+               break;
+       }
+
+       iounmap(osc_base);
+
+       return osc_freq;
+}
+
+u32 pic32_get_pbclk(int bus)
+{
+       u32 clk_freq;
+       void __iomem *osc_base = ioremap(PIC32_BASE_OSC, 0x200);
+       u32 pbxdiv = PB1DIV + ((bus - 1) * 0x10);
+       u32 pbdiv = (__raw_readl(osc_base + pbxdiv) & PB_MASK) + 1;
+
+       iounmap(osc_base);
+
+       clk_freq = pic32_get_sysclk();
+
+       return clk_freq / pbdiv;
+}
diff --git a/arch/mips/pic32/pic32mzda/early_console.c b/arch/mips/pic32/pic32mzda/early_console.c
new file mode 100644 (file)
index 0000000..d7b7834
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#include <asm/mach-pic32/pic32.h>
+#include <asm/fw/fw.h>
+
+#include "pic32mzda.h"
+#include "early_pin.h"
+
+/* Default early console parameters */
+#define EARLY_CONSOLE_PORT     1
+#define EARLY_CONSOLE_BAUDRATE 115200
+
+#define UART_ENABLE            BIT(15)
+#define UART_ENABLE_RX         BIT(12)
+#define UART_ENABLE_TX         BIT(10)
+#define UART_TX_FULL           BIT(9)
+
+/* UART1(x == 0) - UART6(x == 5) */
+#define UART_BASE(x)   ((x) * 0x0200)
+#define U_MODE(x)      UART_BASE(x)
+#define U_STA(x)       (UART_BASE(x) + 0x10)
+#define U_TXR(x)       (UART_BASE(x) + 0x20)
+#define U_BRG(x)       (UART_BASE(x) + 0x40)
+
+static void __iomem *uart_base;
+static char console_port = -1;
+
+static int __init configure_uart_pins(int port)
+{
+       switch (port) {
+       case 1:
+               pic32_pps_input(IN_FUNC_U2RX, IN_RPB0);
+               pic32_pps_output(OUT_FUNC_U2TX, OUT_RPG9);
+               break;
+       case 5:
+               pic32_pps_input(IN_FUNC_U6RX, IN_RPD0);
+               pic32_pps_output(OUT_FUNC_U6TX, OUT_RPB8);
+               break;
+       default:
+               return -1;
+       }
+
+       return 0;
+}
+
+static void __init configure_uart(char port, int baud)
+{
+       u32 pbclk;
+
+       pbclk = pic32_get_pbclk(2);
+
+       __raw_writel(0, uart_base + U_MODE(port));
+       __raw_writel(((pbclk / baud) / 16) - 1, uart_base + U_BRG(port));
+       __raw_writel(UART_ENABLE, uart_base + U_MODE(port));
+       __raw_writel(UART_ENABLE_TX | UART_ENABLE_RX,
+                    uart_base + PIC32_SET(U_STA(port)));
+}
+
+static void __init setup_early_console(char port, int baud)
+{
+       if (configure_uart_pins(port))
+               return;
+
+       console_port = port;
+       configure_uart(console_port, baud);
+}
+
+static char * __init pic32_getcmdline(void)
+{
+       /*
+        * arch_mem_init() has not been called yet, so we don't have a real
+        * command line setup if using CONFIG_CMDLINE_BOOL.
+        */
+#ifdef CONFIG_CMDLINE_OVERRIDE
+       return CONFIG_CMDLINE;
+#else
+       return fw_getcmdline();
+#endif
+}
+
+static int __init get_port_from_cmdline(char *arch_cmdline)
+{
+       char *s;
+       int port = -1;
+
+       if (!arch_cmdline || *arch_cmdline == '\0')
+               goto _out;
+
+       s = strstr(arch_cmdline, "earlyprintk=");
+       if (s) {
+               s = strstr(s, "ttyS");
+               if (s)
+                       s += 4;
+               else
+                       goto _out;
+
+               port = (*s) - '0';
+       }
+
+_out:
+       return port;
+}
+
+static int __init get_baud_from_cmdline(char *arch_cmdline)
+{
+       char *s;
+       int baud = -1;
+
+       if (!arch_cmdline || *arch_cmdline == '\0')
+               goto _out;
+
+       s = strstr(arch_cmdline, "earlyprintk=");
+       if (s) {
+               s = strstr(s, "ttyS");
+               if (s)
+                       s += 6;
+               else
+                       goto _out;
+
+               baud = 0;
+               while (*s >= '0' && *s <= '9')
+                       baud = baud * 10 + *s++ - '0';
+       }
+
+_out:
+       return baud;
+}
+
+void __init fw_init_early_console(char port)
+{
+       char *arch_cmdline = pic32_getcmdline();
+       int baud = -1;
+
+       uart_base = ioremap_nocache(PIC32_BASE_UART, 0xc00);
+
+       baud = get_baud_from_cmdline(arch_cmdline);
+       if (port == -1)
+               port = get_port_from_cmdline(arch_cmdline);
+
+       if (port == -1)
+               port = EARLY_CONSOLE_PORT;
+
+       if (baud == -1)
+               baud = EARLY_CONSOLE_BAUDRATE;
+
+       setup_early_console(port, baud);
+}
+
+int prom_putchar(char c)
+{
+       if (console_port >= 0) {
+               while (__raw_readl(
+                               uart_base + U_STA(console_port)) & UART_TX_FULL)
+                       ;
+
+               __raw_writel(c, uart_base + U_TXR(console_port));
+       }
+
+       return 1;
+}
diff --git a/arch/mips/pic32/pic32mzda/early_pin.c b/arch/mips/pic32/pic32mzda/early_pin.c
new file mode 100644 (file)
index 0000000..aa673f8
--- /dev/null
@@ -0,0 +1,275 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#include <asm/io.h>
+
+#include "early_pin.h"
+
+#define PPS_BASE 0x1f800000
+
+/* Input PPS Registers */
+#define INT1R 0x1404
+#define INT2R 0x1408
+#define INT3R 0x140C
+#define INT4R 0x1410
+#define T2CKR 0x1418
+#define T3CKR 0x141C
+#define T4CKR 0x1420
+#define T5CKR 0x1424
+#define T6CKR 0x1428
+#define T7CKR 0x142C
+#define T8CKR 0x1430
+#define T9CKR 0x1434
+#define IC1R 0x1438
+#define IC2R 0x143C
+#define IC3R 0x1440
+#define IC4R 0x1444
+#define IC5R 0x1448
+#define IC6R 0x144C
+#define IC7R 0x1450
+#define IC8R 0x1454
+#define IC9R 0x1458
+#define OCFAR 0x1460
+#define U1RXR 0x1468
+#define U1CTSR 0x146C
+#define U2RXR 0x1470
+#define U2CTSR 0x1474
+#define U3RXR 0x1478
+#define U3CTSR 0x147C
+#define U4RXR 0x1480
+#define U4CTSR 0x1484
+#define U5RXR 0x1488
+#define U5CTSR 0x148C
+#define U6RXR 0x1490
+#define U6CTSR 0x1494
+#define SDI1R 0x149C
+#define SS1R 0x14A0
+#define SDI2R 0x14A8
+#define SS2R 0x14AC
+#define SDI3R 0x14B4
+#define SS3R 0x14B8
+#define SDI4R 0x14C0
+#define SS4R 0x14C4
+#define SDI5R 0x14CC
+#define SS5R 0x14D0
+#define SDI6R 0x14D8
+#define SS6R 0x14DC
+#define C1RXR 0x14E0
+#define C2RXR 0x14E4
+#define REFCLKI1R 0x14E8
+#define REFCLKI3R 0x14F0
+#define REFCLKI4R 0x14F4
+
+static const struct
+{
+       int function;
+       int reg;
+} input_pin_reg[] = {
+       { IN_FUNC_INT3, INT3R },
+       { IN_FUNC_T2CK, T2CKR },
+       { IN_FUNC_T6CK, T6CKR },
+       { IN_FUNC_IC3, IC3R  },
+       { IN_FUNC_IC7, IC7R },
+       { IN_FUNC_U1RX, U1RXR },
+       { IN_FUNC_U2CTS, U2CTSR },
+       { IN_FUNC_U5RX, U5RXR },
+       { IN_FUNC_U6CTS, U6CTSR },
+       { IN_FUNC_SDI1, SDI1R },
+       { IN_FUNC_SDI3, SDI3R },
+       { IN_FUNC_SDI5, SDI5R },
+       { IN_FUNC_SS6, SS6R },
+       { IN_FUNC_REFCLKI1, REFCLKI1R },
+       { IN_FUNC_INT4, INT4R },
+       { IN_FUNC_T5CK, T5CKR },
+       { IN_FUNC_T7CK, T7CKR },
+       { IN_FUNC_IC4, IC4R },
+       { IN_FUNC_IC8, IC8R },
+       { IN_FUNC_U3RX, U3RXR },
+       { IN_FUNC_U4CTS, U4CTSR },
+       { IN_FUNC_SDI2, SDI2R },
+       { IN_FUNC_SDI4, SDI4R },
+       { IN_FUNC_C1RX, C1RXR },
+       { IN_FUNC_REFCLKI4, REFCLKI4R },
+       { IN_FUNC_INT2, INT2R },
+       { IN_FUNC_T3CK, T3CKR },
+       { IN_FUNC_T8CK, T8CKR },
+       { IN_FUNC_IC2, IC2R },
+       { IN_FUNC_IC5, IC5R },
+       { IN_FUNC_IC9, IC9R },
+       { IN_FUNC_U1CTS, U1CTSR },
+       { IN_FUNC_U2RX, U2RXR },
+       { IN_FUNC_U5CTS, U5CTSR },
+       { IN_FUNC_SS1, SS1R },
+       { IN_FUNC_SS3, SS3R },
+       { IN_FUNC_SS4, SS4R },
+       { IN_FUNC_SS5, SS5R },
+       { IN_FUNC_C2RX, C2RXR },
+       { IN_FUNC_INT1, INT1R },
+       { IN_FUNC_T4CK, T4CKR },
+       { IN_FUNC_T9CK, T9CKR },
+       { IN_FUNC_IC1, IC1R },
+       { IN_FUNC_IC6, IC6R },
+       { IN_FUNC_U3CTS, U3CTSR },
+       { IN_FUNC_U4RX, U4RXR },
+       { IN_FUNC_U6RX, U6RXR },
+       { IN_FUNC_SS2, SS2R },
+       { IN_FUNC_SDI6, SDI6R },
+       { IN_FUNC_OCFA, OCFAR },
+       { IN_FUNC_REFCLKI3, REFCLKI3R },
+};
+
+void pic32_pps_input(int function, int pin)
+{
+       void __iomem *pps_base = ioremap_nocache(PPS_BASE, 0xF4);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(input_pin_reg); i++) {
+               if (input_pin_reg[i].function == function) {
+                       __raw_writel(pin, pps_base + input_pin_reg[i].reg);
+                       return;
+               }
+       }
+
+       iounmap(pps_base);
+}
+
+/* Output PPS Registers */
+#define RPA14R 0x1538
+#define RPA15R 0x153C
+#define RPB0R 0x1540
+#define RPB1R 0x1544
+#define RPB2R 0x1548
+#define RPB3R 0x154C
+#define RPB5R 0x1554
+#define RPB6R 0x1558
+#define RPB7R 0x155C
+#define RPB8R 0x1560
+#define RPB9R 0x1564
+#define RPB10R 0x1568
+#define RPB14R 0x1578
+#define RPB15R 0x157C
+#define RPC1R 0x1584
+#define RPC2R 0x1588
+#define RPC3R 0x158C
+#define RPC4R 0x1590
+#define RPC13R 0x15B4
+#define RPC14R 0x15B8
+#define RPD0R 0x15C0
+#define RPD1R 0x15C4
+#define RPD2R 0x15C8
+#define RPD3R 0x15CC
+#define RPD4R 0x15D0
+#define RPD5R 0x15D4
+#define RPD6R 0x15D8
+#define RPD7R 0x15DC
+#define RPD9R 0x15E4
+#define RPD10R 0x15E8
+#define RPD11R 0x15EC
+#define RPD12R 0x15F0
+#define RPD14R 0x15F8
+#define RPD15R 0x15FC
+#define RPE3R 0x160C
+#define RPE5R 0x1614
+#define RPE8R 0x1620
+#define RPE9R 0x1624
+#define RPF0R 0x1640
+#define RPF1R 0x1644
+#define RPF2R 0x1648
+#define RPF3R 0x164C
+#define RPF4R 0x1650
+#define RPF5R 0x1654
+#define RPF8R 0x1660
+#define RPF12R 0x1670
+#define RPF13R 0x1674
+#define RPG0R 0x1680
+#define RPG1R 0x1684
+#define RPG6R 0x1698
+#define RPG7R 0x169C
+#define RPG8R 0x16A0
+#define RPG9R 0x16A4
+
+static const struct
+{
+       int pin;
+       int reg;
+} output_pin_reg[] = {
+       { OUT_RPD2, RPD2R },
+       { OUT_RPG8, RPG8R },
+       { OUT_RPF4, RPF4R },
+       { OUT_RPD10, RPD10R },
+       { OUT_RPF1, RPF1R },
+       { OUT_RPB9, RPB9R },
+       { OUT_RPB10, RPB10R },
+       { OUT_RPC14, RPC14R },
+       { OUT_RPB5, RPB5R },
+       { OUT_RPC1, RPC1R },
+       { OUT_RPD14, RPD14R },
+       { OUT_RPG1, RPG1R },
+       { OUT_RPA14, RPA14R },
+       { OUT_RPD6, RPD6R },
+       { OUT_RPD3, RPD3R },
+       { OUT_RPG7, RPG7R },
+       { OUT_RPF5, RPF5R },
+       { OUT_RPD11, RPD11R },
+       { OUT_RPF0, RPF0R },
+       { OUT_RPB1, RPB1R },
+       { OUT_RPE5, RPE5R },
+       { OUT_RPC13, RPC13R },
+       { OUT_RPB3, RPB3R },
+       { OUT_RPC4, RPC4R },
+       { OUT_RPD15, RPD15R },
+       { OUT_RPG0, RPG0R },
+       { OUT_RPA15, RPA15R },
+       { OUT_RPD7, RPD7R },
+       { OUT_RPD9, RPD9R },
+       { OUT_RPG6, RPG6R },
+       { OUT_RPB8, RPB8R },
+       { OUT_RPB15, RPB15R },
+       { OUT_RPD4, RPD4R },
+       { OUT_RPB0, RPB0R },
+       { OUT_RPE3, RPE3R },
+       { OUT_RPB7, RPB7R },
+       { OUT_RPF12, RPF12R },
+       { OUT_RPD12, RPD12R },
+       { OUT_RPF8, RPF8R },
+       { OUT_RPC3, RPC3R },
+       { OUT_RPE9, RPE9R },
+       { OUT_RPD1, RPD1R },
+       { OUT_RPG9, RPG9R },
+       { OUT_RPB14, RPB14R },
+       { OUT_RPD0, RPD0R },
+       { OUT_RPB6, RPB6R },
+       { OUT_RPD5, RPD5R },
+       { OUT_RPB2, RPB2R },
+       { OUT_RPF3, RPF3R },
+       { OUT_RPF13, RPF13R },
+       { OUT_RPC2, RPC2R },
+       { OUT_RPE8, RPE8R },
+       { OUT_RPF2, RPF2R },
+};
+
+void pic32_pps_output(int function, int pin)
+{
+       void __iomem *pps_base = ioremap_nocache(PPS_BASE, 0x170);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(output_pin_reg); i++) {
+               if (output_pin_reg[i].pin == pin) {
+                       __raw_writel(function,
+                               pps_base + output_pin_reg[i].reg);
+                       return;
+               }
+       }
+
+       iounmap(pps_base);
+}
diff --git a/arch/mips/pic32/pic32mzda/early_pin.h b/arch/mips/pic32/pic32mzda/early_pin.h
new file mode 100644 (file)
index 0000000..417fae9
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#ifndef _PIC32MZDA_EARLY_PIN_H
+#define _PIC32MZDA_EARLY_PIN_H
+
+/*
+ * This is a complete, yet overly simplistic and unoptimized, PIC32MZDA PPS
+ * configuration only useful before we have full pinctrl initialized.
+ */
+
+/* Input PPS Functions */
+enum {
+       IN_FUNC_INT3,
+       IN_FUNC_T2CK,
+       IN_FUNC_T6CK,
+       IN_FUNC_IC3,
+       IN_FUNC_IC7,
+       IN_FUNC_U1RX,
+       IN_FUNC_U2CTS,
+       IN_FUNC_U5RX,
+       IN_FUNC_U6CTS,
+       IN_FUNC_SDI1,
+       IN_FUNC_SDI3,
+       IN_FUNC_SDI5,
+       IN_FUNC_SS6,
+       IN_FUNC_REFCLKI1,
+       IN_FUNC_INT4,
+       IN_FUNC_T5CK,
+       IN_FUNC_T7CK,
+       IN_FUNC_IC4,
+       IN_FUNC_IC8,
+       IN_FUNC_U3RX,
+       IN_FUNC_U4CTS,
+       IN_FUNC_SDI2,
+       IN_FUNC_SDI4,
+       IN_FUNC_C1RX,
+       IN_FUNC_REFCLKI4,
+       IN_FUNC_INT2,
+       IN_FUNC_T3CK,
+       IN_FUNC_T8CK,
+       IN_FUNC_IC2,
+       IN_FUNC_IC5,
+       IN_FUNC_IC9,
+       IN_FUNC_U1CTS,
+       IN_FUNC_U2RX,
+       IN_FUNC_U5CTS,
+       IN_FUNC_SS1,
+       IN_FUNC_SS3,
+       IN_FUNC_SS4,
+       IN_FUNC_SS5,
+       IN_FUNC_C2RX,
+       IN_FUNC_INT1,
+       IN_FUNC_T4CK,
+       IN_FUNC_T9CK,
+       IN_FUNC_IC1,
+       IN_FUNC_IC6,
+       IN_FUNC_U3CTS,
+       IN_FUNC_U4RX,
+       IN_FUNC_U6RX,
+       IN_FUNC_SS2,
+       IN_FUNC_SDI6,
+       IN_FUNC_OCFA,
+       IN_FUNC_REFCLKI3,
+};
+
+/* Input PPS Pins */
+#define IN_RPD2 0x00
+#define IN_RPG8 0x01
+#define IN_RPF4 0x02
+#define IN_RPD10 0x03
+#define IN_RPF1 0x04
+#define IN_RPB9 0x05
+#define IN_RPB10 0x06
+#define IN_RPC14 0x07
+#define IN_RPB5 0x08
+#define IN_RPC1 0x0A
+#define IN_RPD14 0x0B
+#define IN_RPG1 0x0C
+#define IN_RPA14 0x0D
+#define IN_RPD6 0x0E
+#define IN_RPD3 0x00
+#define IN_RPG7 0x01
+#define IN_RPF5 0x02
+#define IN_RPD11 0x03
+#define IN_RPF0 0x04
+#define IN_RPB1 0x05
+#define IN_RPE5 0x06
+#define IN_RPC13 0x07
+#define IN_RPB3 0x08
+#define IN_RPC4 0x0A
+#define IN_RPD15 0x0B
+#define IN_RPG0 0x0C
+#define IN_RPA15 0x0D
+#define IN_RPD7 0x0E
+#define IN_RPD9 0x00
+#define IN_RPG6 0x01
+#define IN_RPB8 0x02
+#define IN_RPB15 0x03
+#define IN_RPD4 0x04
+#define IN_RPB0 0x05
+#define IN_RPE3 0x06
+#define IN_RPB7 0x07
+#define IN_RPF12 0x09
+#define IN_RPD12 0x0A
+#define IN_RPF8 0x0B
+#define IN_RPC3 0x0C
+#define IN_RPE9 0x0D
+#define IN_RPD1 0x00
+#define IN_RPG9 0x01
+#define IN_RPB14 0x02
+#define IN_RPD0 0x03
+#define IN_RPB6 0x05
+#define IN_RPD5 0x06
+#define IN_RPB2 0x07
+#define IN_RPF3 0x08
+#define IN_RPF13 0x09
+#define IN_RPF2 0x0B
+#define IN_RPC2 0x0C
+#define IN_RPE8 0x0D
+
+/* Output PPS Pins */
+enum {
+       OUT_RPD2,
+       OUT_RPG8,
+       OUT_RPF4,
+       OUT_RPD10,
+       OUT_RPF1,
+       OUT_RPB9,
+       OUT_RPB10,
+       OUT_RPC14,
+       OUT_RPB5,
+       OUT_RPC1,
+       OUT_RPD14,
+       OUT_RPG1,
+       OUT_RPA14,
+       OUT_RPD6,
+       OUT_RPD3,
+       OUT_RPG7,
+       OUT_RPF5,
+       OUT_RPD11,
+       OUT_RPF0,
+       OUT_RPB1,
+       OUT_RPE5,
+       OUT_RPC13,
+       OUT_RPB3,
+       OUT_RPC4,
+       OUT_RPD15,
+       OUT_RPG0,
+       OUT_RPA15,
+       OUT_RPD7,
+       OUT_RPD9,
+       OUT_RPG6,
+       OUT_RPB8,
+       OUT_RPB15,
+       OUT_RPD4,
+       OUT_RPB0,
+       OUT_RPE3,
+       OUT_RPB7,
+       OUT_RPF12,
+       OUT_RPD12,
+       OUT_RPF8,
+       OUT_RPC3,
+       OUT_RPE9,
+       OUT_RPD1,
+       OUT_RPG9,
+       OUT_RPB14,
+       OUT_RPD0,
+       OUT_RPB6,
+       OUT_RPD5,
+       OUT_RPB2,
+       OUT_RPF3,
+       OUT_RPF13,
+       OUT_RPC2,
+       OUT_RPE8,
+       OUT_RPF2,
+};
+
+/* Output PPS Functions */
+#define OUT_FUNC_U3TX 0x01
+#define OUT_FUNC_U4RTS 0x02
+#define OUT_FUNC_SDO1 0x05
+#define OUT_FUNC_SDO2 0x06
+#define OUT_FUNC_SDO3 0x07
+#define OUT_FUNC_SDO5 0x09
+#define OUT_FUNC_SS6 0x0A
+#define OUT_FUNC_OC3 0x0B
+#define OUT_FUNC_OC6 0x0C
+#define OUT_FUNC_REFCLKO4 0x0D
+#define OUT_FUNC_C2OUT 0x0E
+#define OUT_FUNC_C1TX 0x0F
+#define OUT_FUNC_U1TX 0x01
+#define OUT_FUNC_U2RTS 0x02
+#define OUT_FUNC_U5TX 0x03
+#define OUT_FUNC_U6RTS 0x04
+#define OUT_FUNC_SDO1 0x05
+#define OUT_FUNC_SDO2 0x06
+#define OUT_FUNC_SDO3 0x07
+#define OUT_FUNC_SDO4 0x08
+#define OUT_FUNC_SDO5 0x09
+#define OUT_FUNC_OC4 0x0B
+#define OUT_FUNC_OC7 0x0C
+#define OUT_FUNC_REFCLKO1 0x0F
+#define OUT_FUNC_U3RTS 0x01
+#define OUT_FUNC_U4TX 0x02
+#define OUT_FUNC_U6TX 0x04
+#define OUT_FUNC_SS1 0x05
+#define OUT_FUNC_SS3 0x07
+#define OUT_FUNC_SS4 0x08
+#define OUT_FUNC_SS5 0x09
+#define OUT_FUNC_SDO6 0x0A
+#define OUT_FUNC_OC5 0x0B
+#define OUT_FUNC_OC8 0x0C
+#define OUT_FUNC_C1OUT 0x0E
+#define OUT_FUNC_REFCLKO3 0x0F
+#define OUT_FUNC_U1RTS 0x01
+#define OUT_FUNC_U2TX 0x02
+#define OUT_FUNC_U5RTS 0x03
+#define OUT_FUNC_U6TX 0x04
+#define OUT_FUNC_SS2 0x06
+#define OUT_FUNC_SDO4 0x08
+#define OUT_FUNC_SDO6 0x0A
+#define OUT_FUNC_OC2 0x0B
+#define OUT_FUNC_OC1 0x0C
+#define OUT_FUNC_OC9 0x0D
+#define OUT_FUNC_C2TX 0x0F
+
+void pic32_pps_input(int function, int pin);
+void pic32_pps_output(int function, int pin);
+
+#endif
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c
new file mode 100644 (file)
index 0000000..775ff90
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Joshua Henderson, joshua.henderson@microchip.com
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/platform_data/sdhci-pic32.h>
+
+#include <asm/fw/fw.h>
+#include <asm/mips-boards/generic.h>
+#include <asm/prom.h>
+
+#include "pic32mzda.h"
+
+const char *get_system_type(void)
+{
+       return "PIC32MZDA";
+}
+
+static ulong get_fdtaddr(void)
+{
+       ulong ftaddr = 0;
+
+       if ((fw_arg0 == -2) && fw_arg1 && !fw_arg2 && !fw_arg3)
+               return (ulong)fw_arg1;
+
+       if (__dtb_start < __dtb_end)
+               ftaddr = (ulong)__dtb_start;
+
+       return ftaddr;
+}
+
+void __init plat_mem_setup(void)
+{
+       void *dtb;
+
+       dtb = (void *)get_fdtaddr();
+       if (!dtb) {
+               pr_err("pic32: no DTB found.\n");
+               return;
+       }
+
+       /*
+        * Load the builtin device tree. This causes the chosen node to be
+        * parsed resulting in our memory appearing.
+        */
+       __dt_setup_arch(dtb);
+
+       pr_info("Found following command lines\n");
+       pr_info(" boot_command_line: %s\n", boot_command_line);
+       pr_info(" arcs_cmdline     : %s\n", arcs_cmdline);
+#ifdef CONFIG_CMDLINE_BOOL
+       pr_info(" builtin_cmdline  : %s\n", CONFIG_CMDLINE);
+#endif
+       if (dtb != __dtb_start)
+               strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+
+#ifdef CONFIG_EARLY_PRINTK
+       fw_init_early_console(-1);
+#endif
+       pic32_config_init();
+}
+
+static __init void pic32_init_cmdline(int argc, char *argv[])
+{
+       unsigned int count = COMMAND_LINE_SIZE - 1;
+       int i;
+       char *dst = &(arcs_cmdline[0]);
+       char *src;
+
+       for (i = 1; i < argc && count; ++i) {
+               src = argv[i];
+               while (*src && count) {
+                       *dst++ = *src++;
+                       --count;
+               }
+               *dst++ = ' ';
+       }
+       if (i > 1)
+               --dst;
+
+       *dst = 0;
+}
+
+void __init prom_init(void)
+{
+       pic32_init_cmdline((int)fw_arg0, (char **)fw_arg1);
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
+
+void __init device_tree_init(void)
+{
+       if (!initial_boot_params)
+               return;
+
+       unflatten_and_copy_device_tree();
+}
+
+static struct pic32_sdhci_platform_data sdhci_data = {
+       .setup_dma = pic32_set_sdhci_adma_fifo_threshold,
+};
+
+static struct of_dev_auxdata pic32_auxdata_lookup[] __initdata = {
+       OF_DEV_AUXDATA("microchip,pic32mzda-sdhci", 0, "sdhci", &sdhci_data),
+       { /* sentinel */}
+};
+
+static int __init pic32_of_prepare_platform_data(struct of_dev_auxdata *lookup)
+{
+       struct device_node *root, *np;
+       struct resource res;
+
+       root = of_find_node_by_path("/");
+
+       for (; lookup->compatible; lookup++) {
+               np = of_find_compatible_node(NULL, NULL, lookup->compatible);
+               if (np) {
+                       lookup->name = (char *)np->name;
+                       if (lookup->phys_addr)
+                               continue;
+                       if (!of_address_to_resource(np, 0, &res))
+                               lookup->phys_addr = res.start;
+               }
+       }
+
+       return 0;
+}
+
+static int __init plat_of_setup(void)
+{
+       if (!of_have_populated_dt())
+               panic("Device tree not present");
+
+       pic32_of_prepare_platform_data(pic32_auxdata_lookup);
+       if (of_platform_populate(NULL, of_default_bus_match_table,
+                                pic32_auxdata_lookup, NULL))
+               panic("Failed to populate DT");
+
+       return 0;
+}
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/pic32/pic32mzda/pic32mzda.h b/arch/mips/pic32/pic32mzda/pic32mzda.h
new file mode 100644 (file)
index 0000000..96d10e2
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#ifndef PIC32MZDA_COMMON_H
+#define PIC32MZDA_COMMON_H
+
+/* early clock */
+u32 pic32_get_pbclk(int bus);
+u32 pic32_get_sysclk(void);
+
+/* Device configuration */
+void __init pic32_config_init(void);
+int pic32_set_lcd_mode(int mode);
+int pic32_set_sdhci_adma_fifo_threshold(u32 rthrs, u32 wthrs);
+u32 pic32_get_boot_status(void);
+int pic32_disable_lcd(void);
+int pic32_enable_lcd(void);
+
+#endif
diff --git a/arch/mips/pic32/pic32mzda/time.c b/arch/mips/pic32/pic32mzda/time.c
new file mode 100644 (file)
index 0000000..ca6a62b
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+
+#include <asm/time.h>
+
+#include "pic32mzda.h"
+
+static const struct of_device_id pic32_infra_match[] = {
+       { .compatible = "microchip,pic32mzda-infra", },
+       { },
+};
+
+#define DEFAULT_CORE_TIMER_INTERRUPT 0
+
+static unsigned int pic32_xlate_core_timer_irq(void)
+{
+       static struct device_node *node;
+       unsigned int irq;
+
+       node = of_find_matching_node(NULL, pic32_infra_match);
+
+       if (WARN_ON(!node))
+               goto default_map;
+
+       irq = irq_of_parse_and_map(node, 0);
+       if (!irq)
+               goto default_map;
+
+       return irq;
+
+default_map:
+
+       return irq_create_mapping(NULL, DEFAULT_CORE_TIMER_INTERRUPT);
+}
+
+unsigned int get_c0_compare_int(void)
+{
+       return pic32_xlate_core_timer_irq();
+}
+
+void __init plat_time_init(void)
+{
+       struct clk *clk;
+
+       of_clk_init(NULL);
+       clk = clk_get_sys("cpu_clk", NULL);
+       if (IS_ERR(clk))
+               panic("unable to get CPU clock, err=%ld", PTR_ERR(clk));
+
+       clk_prepare_enable(clk);
+       pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
+       mips_hpt_frequency = clk_get_rate(clk) / 2;
+
+       clocksource_probe();
+}
index d304be22b963fb3a3aa40fab95129123b323eb73..8e6e8db8dd5fa46e2749172c357d328968ddff09 100644 (file)
@@ -110,7 +110,7 @@ void __init msp_serial_setup(void)
        up.uartclk      = uartclk;
        up.regshift     = 2;
        up.iotype       = UPIO_MEM;
-       up.flags        = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
+       up.flags        = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
        up.type         = PORT_16550A;
        up.line         = 0;
        up.serial_out   = msp_serial_out;
index 9d293b3e9130152af49f964e530ece27e482fd7b..a63b73610fd490d1c58293885b474e929f5bd1be 100644 (file)
@@ -118,7 +118,7 @@ void msp_restart(char *command)
        /* No chip-specific reset code, just jump to the ROM reset vector */
        set_c0_status(ST0_BEV | ST0_ERL);
        change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
-       flush_cache_all();
+       __flush_cache_all();
        write_c0_wired(0);
 
        __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
index e9bc8c96174e227a8d9cf730bcc4bfb0ff0fb1cf..813826a456ca33d30ff84dfb9724a2796871fb4a 100644 (file)
@@ -12,6 +12,11 @@ config RALINK_ILL_ACC
        depends on SOC_RT305X
        default y
 
+config IRQ_INTC
+       bool
+       default y
+       depends on !SOC_MT7621
+
 choice
        prompt "Ralink SoC selection"
        default SOC_RT305X
@@ -33,7 +38,18 @@ choice
 
        config SOC_MT7620
                bool "MT7620/8"
+               select HW_HAS_PCI
 
+       config SOC_MT7621
+               bool "MT7621"
+               select MIPS_CPU_SCACHE
+               select SYS_SUPPORTS_MULTITHREADING
+               select SYS_SUPPORTS_SMP
+               select SYS_SUPPORTS_MIPS_CPS
+               select MIPS_GIC
+               select COMMON_CLK
+               select CLKSRC_MIPS_GIC
+               select HW_HAS_PCI
 endchoice
 
 choice
index a6c9d00613269f6a5cbb1d416eb8b8450b9f3a65..0d1795a0321e373d93adbfc073be4ac67423d97b 100644 (file)
@@ -6,16 +6,24 @@
 # Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
 # Copyright (C) 2013 John Crispin <blogic@openwrt.org>
 
-obj-y := prom.o of.o reset.o clk.o irq.o timer.o
+obj-y := prom.o of.o reset.o
+
+ifndef CONFIG_MIPS_GIC
+       obj-y += clk.o timer.o
+endif
 
 obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o
 
 obj-$(CONFIG_RALINK_ILL_ACC) += ill_acc.o
 
+obj-$(CONFIG_IRQ_INTC) += irq.o
+obj-$(CONFIG_MIPS_GIC) += irq-gic.o timer-gic.o
+
 obj-$(CONFIG_SOC_RT288X) += rt288x.o
 obj-$(CONFIG_SOC_RT305X) += rt305x.o
 obj-$(CONFIG_SOC_RT3883) += rt3883.o
 obj-$(CONFIG_SOC_MT7620) += mt7620.o
+obj-$(CONFIG_SOC_MT7621) += mt7621.o
 
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
 
index 6d9c8c499f988d4949f72c6484f8e948dff6fec2..6095fcc334f42377b2bc3b44631eb429154e85a0 100644 (file)
@@ -27,3 +27,8 @@ cflags-$(CONFIG_SOC_RT3883)   += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt
 #
 load-$(CONFIG_SOC_MT7620)      += 0xffffffff80000000
 cflags-$(CONFIG_SOC_MT7620)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/mt7620
+
+# Ralink MT7621
+#
+load-$(CONFIG_SOC_MT7621)      += 0xffffffff80001000
+cflags-$(CONFIG_SOC_MT7621)    += -I$(srctree)/arch/mips/include/asm/mach-ralink/mt7621
diff --git a/arch/mips/ralink/irq-gic.c b/arch/mips/ralink/irq-gic.c
new file mode 100644 (file)
index 0000000..50d6c55
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
+ * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+
+#include <linux/of.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/mips-gic.h>
+
+int get_c0_perfcount_int(void)
+{
+       return gic_get_c0_perfcount_int();
+}
+EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
+
+void __init arch_init_irq(void)
+{
+       irqchip_init();
+}
index dfb04fcedb042d9b80621ac3197886a1ea3bccda..0d3d1a97895f167d613cd469422dc4e2bfe6087c 100644 (file)
@@ -107,31 +107,31 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
 };
 
 static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
-       FUNC("sdcx", 3, 19, 1),
+       FUNC("sdxc d6", 3, 19, 1),
        FUNC("utif", 2, 19, 1),
        FUNC("gpio", 1, 19, 1),
-       FUNC("pwm", 0, 19, 1),
+       FUNC("pwm1", 0, 19, 1),
 };
 
 static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
-       FUNC("sdcx", 3, 18, 1),
+       FUNC("sdxc d7", 3, 18, 1),
        FUNC("utif", 2, 18, 1),
        FUNC("gpio", 1, 18, 1),
-       FUNC("pwm", 0, 18, 1),
+       FUNC("pwm0", 0, 18, 1),
 };
 
 static struct rt2880_pmx_func uart2_grp_mt7628[] = {
-       FUNC("sdcx", 3, 20, 2),
+       FUNC("sdxc d5 d4", 3, 20, 2),
        FUNC("pwm", 2, 20, 2),
        FUNC("gpio", 1, 20, 2),
-       FUNC("uart", 0, 20, 2),
+       FUNC("uart2", 0, 20, 2),
 };
 
 static struct rt2880_pmx_func uart1_grp_mt7628[] = {
-       FUNC("sdcx", 3, 45, 2),
+       FUNC("sw_r", 3, 45, 2),
        FUNC("pwm", 2, 45, 2),
        FUNC("gpio", 1, 45, 2),
-       FUNC("uart", 0, 45, 2),
+       FUNC("uart1", 0, 45, 2),
 };
 
 static struct rt2880_pmx_func i2c_grp_mt7628[] = {
@@ -143,21 +143,21 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
 
 static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
 static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) };
+static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
 static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
 
 static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
        FUNC("jtag", 3, 22, 8),
        FUNC("utif", 2, 22, 8),
        FUNC("gpio", 1, 22, 8),
-       FUNC("sdcx", 0, 22, 8),
+       FUNC("sdxc", 0, 22, 8),
 };
 
 static struct rt2880_pmx_func uart0_grp_mt7628[] = {
        FUNC("-", 3, 12, 2),
        FUNC("-", 2, 12, 2),
        FUNC("gpio", 1, 12, 2),
-       FUNC("uart", 0, 12, 2),
+       FUNC("uart0", 0, 12, 2),
 };
 
 static struct rt2880_pmx_func i2s_grp_mt7628[] = {
@@ -171,7 +171,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
        FUNC("-", 3, 6, 1),
        FUNC("refclk", 2, 6, 1),
        FUNC("gpio", 1, 6, 1),
-       FUNC("spi", 0, 6, 1),
+       FUNC("spi cs1", 0, 6, 1),
 };
 
 static struct rt2880_pmx_func spis_grp_mt7628[] = {
@@ -188,28 +188,44 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
        FUNC("gpio", 0, 11, 1),
 };
 
-#define MT7628_GPIO_MODE_MASK  0x3
-
-#define MT7628_GPIO_MODE_PWM1  30
-#define MT7628_GPIO_MODE_PWM0  28
-#define MT7628_GPIO_MODE_UART2 26
-#define MT7628_GPIO_MODE_UART1 24
-#define MT7628_GPIO_MODE_I2C   20
-#define MT7628_GPIO_MODE_REFCLK        18
-#define MT7628_GPIO_MODE_PERST 16
-#define MT7628_GPIO_MODE_WDT   14
-#define MT7628_GPIO_MODE_SPI   12
-#define MT7628_GPIO_MODE_SDMODE        10
-#define MT7628_GPIO_MODE_UART0 8
-#define MT7628_GPIO_MODE_I2S   6
-#define MT7628_GPIO_MODE_CS1   4
-#define MT7628_GPIO_MODE_SPIS  2
-#define MT7628_GPIO_MODE_GPIO  0
+static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
+       FUNC("rsvd", 3, 35, 1),
+       FUNC("rsvd", 2, 35, 1),
+       FUNC("gpio", 1, 35, 1),
+       FUNC("wled_kn", 0, 35, 1),
+};
+
+static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
+       FUNC("rsvd", 3, 35, 1),
+       FUNC("rsvd", 2, 35, 1),
+       FUNC("gpio", 1, 35, 1),
+       FUNC("wled_an", 0, 35, 1),
+};
+
+#define MT7628_GPIO_MODE_MASK          0x3
+
+#define MT7628_GPIO_MODE_WLED_KN       48
+#define MT7628_GPIO_MODE_WLED_AN       32
+#define MT7628_GPIO_MODE_PWM1          30
+#define MT7628_GPIO_MODE_PWM0          28
+#define MT7628_GPIO_MODE_UART2         26
+#define MT7628_GPIO_MODE_UART1         24
+#define MT7628_GPIO_MODE_I2C           20
+#define MT7628_GPIO_MODE_REFCLK                18
+#define MT7628_GPIO_MODE_PERST         16
+#define MT7628_GPIO_MODE_WDT           14
+#define MT7628_GPIO_MODE_SPI           12
+#define MT7628_GPIO_MODE_SDMODE                10
+#define MT7628_GPIO_MODE_UART0         8
+#define MT7628_GPIO_MODE_I2S           6
+#define MT7628_GPIO_MODE_CS1           4
+#define MT7628_GPIO_MODE_SPIS          2
+#define MT7628_GPIO_MODE_GPIO          0
 
 static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
        GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
                                1, MT7628_GPIO_MODE_PWM1),
-       GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+       GRP_G("pmw0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
                                1, MT7628_GPIO_MODE_PWM0),
        GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
                                1, MT7628_GPIO_MODE_UART2),
@@ -233,6 +249,10 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
                                1, MT7628_GPIO_MODE_SPIS),
        GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
                                1, MT7628_GPIO_MODE_GPIO),
+       GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+                               1, MT7628_GPIO_MODE_WLED_AN),
+       GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+                               1, MT7628_GPIO_MODE_WLED_KN),
        { 0 }
 };
 
@@ -436,10 +456,13 @@ void __init ralink_clk_init(void)
        ralink_clk_add("10000100.timer", periph_rate);
        ralink_clk_add("10000120.watchdog", periph_rate);
        ralink_clk_add("10000b00.spi", sys_rate);
+       ralink_clk_add("10000b40.spi", sys_rate);
        ralink_clk_add("10000c00.uartlite", periph_rate);
+       ralink_clk_add("10000d00.uart1", periph_rate);
+       ralink_clk_add("10000e00.uart2", periph_rate);
        ralink_clk_add("10180000.wmac", xtal_rate);
 
-       if (IS_ENABLED(CONFIG_USB) && is_mt76x8()) {
+       if (IS_ENABLED(CONFIG_USB) && !is_mt76x8()) {
                /*
                 * When the CPU goes into sleep mode, the BUS clock will be
                 * too low for USB to function properly. Adjust the busses
@@ -552,7 +575,7 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
        }
 
        snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
-               "Ralink %s ver:%u eco:%u",
+               "MediaTek %s ver:%u eco:%u",
                name,
                (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
                (rev & CHIP_REV_ECO_MASK));
diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
new file mode 100644 (file)
index 0000000..e9b9fa3
--- /dev/null
@@ -0,0 +1,226 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
+ * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/smp-ops.h>
+#include <asm/mips-cm.h>
+#include <asm/mips-cpc.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/mt7621.h>
+
+#include <pinmux.h>
+
+#include "common.h"
+
+#define SYSC_REG_SYSCFG                0x10
+#define SYSC_REG_CPLL_CLKCFG0  0x2c
+#define SYSC_REG_CUR_CLK_STS   0x44
+#define CPU_CLK_SEL            (BIT(30) | BIT(31))
+
+#define MT7621_GPIO_MODE_UART1         1
+#define MT7621_GPIO_MODE_I2C           2
+#define MT7621_GPIO_MODE_UART3_MASK    0x3
+#define MT7621_GPIO_MODE_UART3_SHIFT   3
+#define MT7621_GPIO_MODE_UART3_GPIO    1
+#define MT7621_GPIO_MODE_UART2_MASK    0x3
+#define MT7621_GPIO_MODE_UART2_SHIFT   5
+#define MT7621_GPIO_MODE_UART2_GPIO    1
+#define MT7621_GPIO_MODE_JTAG          7
+#define MT7621_GPIO_MODE_WDT_MASK      0x3
+#define MT7621_GPIO_MODE_WDT_SHIFT     8
+#define MT7621_GPIO_MODE_WDT_GPIO      1
+#define MT7621_GPIO_MODE_PCIE_RST      0
+#define MT7621_GPIO_MODE_PCIE_REF      2
+#define MT7621_GPIO_MODE_PCIE_MASK     0x3
+#define MT7621_GPIO_MODE_PCIE_SHIFT    10
+#define MT7621_GPIO_MODE_PCIE_GPIO     1
+#define MT7621_GPIO_MODE_MDIO_MASK     0x3
+#define MT7621_GPIO_MODE_MDIO_SHIFT    12
+#define MT7621_GPIO_MODE_MDIO_GPIO     1
+#define MT7621_GPIO_MODE_RGMII1                14
+#define MT7621_GPIO_MODE_RGMII2                15
+#define MT7621_GPIO_MODE_SPI_MASK      0x3
+#define MT7621_GPIO_MODE_SPI_SHIFT     16
+#define MT7621_GPIO_MODE_SPI_GPIO      1
+#define MT7621_GPIO_MODE_SDHCI_MASK    0x3
+#define MT7621_GPIO_MODE_SDHCI_SHIFT   18
+#define MT7621_GPIO_MODE_SDHCI_GPIO    1
+
+static struct rt2880_pmx_func uart1_grp[] =  { FUNC("uart1", 0, 1, 2) };
+static struct rt2880_pmx_func i2c_grp[] =  { FUNC("i2c", 0, 3, 2) };
+static struct rt2880_pmx_func uart3_grp[] = {
+       FUNC("uart3", 0, 5, 4),
+       FUNC("i2s", 2, 5, 4),
+       FUNC("spdif3", 3, 5, 4),
+};
+static struct rt2880_pmx_func uart2_grp[] = {
+       FUNC("uart2", 0, 9, 4),
+       FUNC("pcm", 2, 9, 4),
+       FUNC("spdif2", 3, 9, 4),
+};
+static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) };
+static struct rt2880_pmx_func wdt_grp[] = {
+       FUNC("wdt rst", 0, 18, 1),
+       FUNC("wdt refclk", 2, 18, 1),
+};
+static struct rt2880_pmx_func pcie_rst_grp[] = {
+       FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1),
+       FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1)
+};
+static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) };
+static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) };
+static struct rt2880_pmx_func spi_grp[] = {
+       FUNC("spi", 0, 34, 7),
+       FUNC("nand1", 2, 34, 7),
+};
+static struct rt2880_pmx_func sdhci_grp[] = {
+       FUNC("sdhci", 0, 41, 8),
+       FUNC("nand2", 2, 41, 8),
+};
+static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) };
+
+static struct rt2880_pmx_group mt7621_pinmux_data[] = {
+       GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1),
+       GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C),
+       GRP_G("uart3", uart3_grp, MT7621_GPIO_MODE_UART3_MASK,
+               MT7621_GPIO_MODE_UART3_GPIO, MT7621_GPIO_MODE_UART3_SHIFT),
+       GRP_G("uart2", uart2_grp, MT7621_GPIO_MODE_UART2_MASK,
+               MT7621_GPIO_MODE_UART2_GPIO, MT7621_GPIO_MODE_UART2_SHIFT),
+       GRP("jtag", jtag_grp, 1, MT7621_GPIO_MODE_JTAG),
+       GRP_G("wdt", wdt_grp, MT7621_GPIO_MODE_WDT_MASK,
+               MT7621_GPIO_MODE_WDT_GPIO, MT7621_GPIO_MODE_WDT_SHIFT),
+       GRP_G("pcie", pcie_rst_grp, MT7621_GPIO_MODE_PCIE_MASK,
+               MT7621_GPIO_MODE_PCIE_GPIO, MT7621_GPIO_MODE_PCIE_SHIFT),
+       GRP_G("mdio", mdio_grp, MT7621_GPIO_MODE_MDIO_MASK,
+               MT7621_GPIO_MODE_MDIO_GPIO, MT7621_GPIO_MODE_MDIO_SHIFT),
+       GRP("rgmii2", rgmii2_grp, 1, MT7621_GPIO_MODE_RGMII2),
+       GRP_G("spi", spi_grp, MT7621_GPIO_MODE_SPI_MASK,
+               MT7621_GPIO_MODE_SPI_GPIO, MT7621_GPIO_MODE_SPI_SHIFT),
+       GRP_G("sdhci", sdhci_grp, MT7621_GPIO_MODE_SDHCI_MASK,
+               MT7621_GPIO_MODE_SDHCI_GPIO, MT7621_GPIO_MODE_SDHCI_SHIFT),
+       GRP("rgmii1", rgmii1_grp, 1, MT7621_GPIO_MODE_RGMII1),
+       { 0 }
+};
+
+phys_addr_t mips_cpc_default_phys_base(void)
+{
+       panic("Cannot detect cpc address");
+}
+
+void __init ralink_clk_init(void)
+{
+       int cpu_fdiv = 0;
+       int cpu_ffrac = 0;
+       int fbdiv = 0;
+       u32 clk_sts, syscfg;
+       u8 clk_sel = 0, xtal_mode;
+       u32 cpu_clk;
+
+       if ((rt_sysc_r32(SYSC_REG_CPLL_CLKCFG0) & CPU_CLK_SEL) != 0)
+               clk_sel = 1;
+
+       switch (clk_sel) {
+       case 0:
+               clk_sts = rt_sysc_r32(SYSC_REG_CUR_CLK_STS);
+               cpu_fdiv = ((clk_sts >> 8) & 0x1F);
+               cpu_ffrac = (clk_sts & 0x1F);
+               cpu_clk = (500 * cpu_ffrac / cpu_fdiv) * 1000 * 1000;
+               break;
+
+       case 1:
+               fbdiv = ((rt_sysc_r32(0x648) >> 4) & 0x7F) + 1;
+               syscfg = rt_sysc_r32(SYSC_REG_SYSCFG);
+               xtal_mode = (syscfg >> 6) & 0x7;
+               if (xtal_mode >= 6) {
+                       /* 25Mhz Xtal */
+                       cpu_clk = 25 * fbdiv * 1000 * 1000;
+               } else if (xtal_mode >= 3) {
+                       /* 40Mhz Xtal */
+                       cpu_clk = 40 * fbdiv * 1000 * 1000;
+               } else {
+                       /* 20Mhz Xtal */
+                       cpu_clk = 20 * fbdiv * 1000 * 1000;
+               }
+               break;
+       }
+}
+
+void __init ralink_of_remap(void)
+{
+       rt_sysc_membase = plat_of_remap_node("mtk,mt7621-sysc");
+       rt_memc_membase = plat_of_remap_node("mtk,mt7621-memc");
+
+       if (!rt_sysc_membase || !rt_memc_membase)
+               panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+       void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7621_SYSC_BASE);
+       unsigned char *name = NULL;
+       u32 n0;
+       u32 n1;
+       u32 rev;
+
+       n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+       n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+       if (n0 == MT7621_CHIP_NAME0 && n1 == MT7621_CHIP_NAME1) {
+               name = "MT7621";
+               soc_info->compatible = "mtk,mt7621-soc";
+       } else {
+               panic("mt7621: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+       }
+
+       rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
+
+       snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+               "MediaTek %s ver:%u eco:%u",
+               name,
+               (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK,
+               (rev & CHIP_REV_ECO_MASK));
+
+       soc_info->mem_size_min = MT7621_DDR2_SIZE_MIN;
+       soc_info->mem_size_max = MT7621_DDR2_SIZE_MAX;
+       soc_info->mem_base = MT7621_DRAM_BASE;
+
+       rt2880_pinmux_data = mt7621_pinmux_data;
+
+       /* Early detection of CMP support */
+       mips_cm_probe();
+       mips_cpc_probe();
+
+       if (mips_cm_numiocu()) {
+               /*
+                * mips_cm_probe() wipes out bootloader
+                * config for CM regions and we have to configure them
+                * again. This SoC cannot talk to pamlbus devices
+                * witout proper iocu region set up.
+                *
+                * FIXME: it would be better to do this with values
+                * from DT, but we need this very early because
+                * without this we cannot talk to pretty much anything
+                * including serial.
+                */
+               write_gcr_reg0_base(MT7621_PALMBUS_BASE);
+               write_gcr_reg0_mask(~MT7621_PALMBUS_SIZE |
+                                   CM_GCR_REGn_MASK_CMTGT_IOCU0);
+       }
+
+       if (!register_cps_smp_ops())
+               return;
+       if (!register_cmp_smp_ops())
+               return;
+       if (!register_vsmp_smp_ops())
+               return;
+}
index 844f5cd55c8f1c1e96e20db2a888c22e1fb12327..3c84166ebcb779b1f50f9711c820977295873377 100644 (file)
@@ -119,5 +119,5 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
        soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
 
        rt2880_pinmux_data = rt2880_pinmux_data_act;
-       ralink_soc == RT2880_SOC;
+       ralink_soc = RT2880_SOC;
 }
index 9e45725920650bd55350549097f9330403f3ef6a..d7c4ba43a4284525b6de3b3f95daa42d8a846ad2 100644 (file)
@@ -201,6 +201,7 @@ void __init ralink_clk_init(void)
        ralink_clk_add("cpu", cpu_rate);
        ralink_clk_add("sys", sys_rate);
        ralink_clk_add("10000b00.spi", sys_rate);
+       ralink_clk_add("10000b40.spi", sys_rate);
        ralink_clk_add("10000100.timer", wdt_rate);
        ralink_clk_add("10000120.watchdog", wdt_rate);
        ralink_clk_add("10000500.uart", uart_rate);
index 582995aaaf4e7e8a9848c512d150b515f3628344..fafec947b27df7dc436d4701f698c734904e7421 100644 (file)
@@ -109,6 +109,7 @@ void __init ralink_clk_init(void)
        ralink_clk_add("10000120.watchdog", sys_rate);
        ralink_clk_add("10000500.uart", 40000000);
        ralink_clk_add("10000b00.spi", sys_rate);
+       ralink_clk_add("10000b40.spi", sys_rate);
        ralink_clk_add("10000c00.uartlite", 40000000);
        ralink_clk_add("10100000.ethernet", sys_rate);
        ralink_clk_add("10180000.wmac", 40000000);
diff --git a/arch/mips/ralink/timer-gic.c b/arch/mips/ralink/timer-gic.c
new file mode 100644 (file)
index 0000000..5b4f186
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
+ * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/init.h>
+
+#include <linux/of.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+
+#include "common.h"
+
+void __init plat_time_init(void)
+{
+       ralink_of_remap();
+
+       of_clk_init(NULL);
+       clocksource_probe();
+}
index 650d5d39f34d902a60324cd9def8a48af228b994..fd1108543a71d0398097741a4afdf038c574545e 100644 (file)
@@ -89,7 +89,7 @@ static int rb532_gpio_get(struct gpio_chip *chip, unsigned offset)
        struct rb532_gpio_chip  *gpch;
 
        gpch = container_of(chip, struct rb532_gpio_chip, chip);
-       return rb532_get_bit(offset, gpch->regbase + GPIOD);
+       return !!rb532_get_bit(offset, gpch->regbase + GPIOD);
 }
 
 /*
index 9d9962ab7d25ccfea97238f402e5dc2f65e20d98..2fd350f31f4b54c4c505700c966cb941b62e4980 100644 (file)
@@ -689,7 +689,7 @@ static int txx9_iocled_get(struct gpio_chip *chip, unsigned int offset)
 {
        struct txx9_iocled_data *data =
                container_of(chip, struct txx9_iocled_data, chip);
-       return data->cur_val & (1 << offset);
+       return !!(data->cur_val & (1 << offset));
 }
 
 static void txx9_iocled_set(struct gpio_chip *chip, unsigned int offset,
index d7f755833c3f43431b48f38b296da4df34d993e6..39a0db3e2b346f4d9164eca8935a2e4d39663026 100644 (file)
@@ -73,7 +73,7 @@ static inline void software_reset(void)
        default:
                set_c0_status(ST0_BEV | ST0_ERL);
                change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
-               flush_cache_all();
+               __flush_cache_all();
                write_c0_wired(0);
                __asm__("jr     %0"::"r"(0xbfc00000));
                break;
index c1990218f18c5b6e1cc3fe11172db501857668fd..594ebff15d3f0a2dccb1f2ccefe6f48262598208 100644 (file)
 
 /* Standard COM flags (except for COM4, because of the 8514 problem) */
 #ifdef CONFIG_SERIAL_8250_DETECT_IRQ
-#define STD_COM_FLAGS  (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST | ASYNC_AUTO_IRQ)
-#define STD_COM4_FLAGS (ASYNC_BOOT_AUTOCONF | ASYNC_AUTO_IRQ)
+#define STD_COM_FLAGS  (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_AUTO_IRQ)
+#define STD_COM4_FLAGS (UPF_BOOT_AUTOCONF | UPF_AUTO_IRQ)
 #else
-#define STD_COM_FLAGS  (ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST)
-#define STD_COM4_FLAGS ASYNC_BOOT_AUTOCONF
+#define STD_COM_FLAGS  (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
+#define STD_COM4_FLAGS UPF_BOOT_AUTOCONF
 #endif
 
 #ifdef CONFIG_SERIAL_8250_MANY_PORTS
-#define FOURPORT_FLAGS ASYNC_FOURPORT
+#define FOURPORT_FLAGS UPF_FOURPORT
 #define ACCENT_FLAGS   0
 #define BOCA_FLAGS     0
 #define HUB6_FLAGS     0
index 1b366c47768722081efe57a9db39eaba2a7dcde9..3c07d6b968772bc6de99bada8d83578ffe172c3d 100644 (file)
@@ -55,12 +55,12 @@ signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
 
 static struct resource data_resource = {
        .name   = "Kernel data",
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 };
 
 static struct resource code_resource = {
        .name   = "Kernel code",
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 };
 
 static struct resource pdcdata_resource = {
@@ -201,7 +201,7 @@ static void __init setup_bootmem(void)
                res->name = "System RAM";
                res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT;
                res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1;
-               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
                request_resource(&iomem_resource, res);
        }
 
index e4824fd04bb7449d262c1a7697b5f539b03f6bab..9faa18c4f3f702adceb4f555b05b72bc8437cf6c 100644 (file)
@@ -557,7 +557,7 @@ choice
 
 config PPC_4K_PAGES
        bool "4k page size"
-       select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
+       select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
 
 config PPC_16K_PAGES
        bool "16k page size"
@@ -566,7 +566,7 @@ config PPC_16K_PAGES
 config PPC_64K_PAGES
        bool "64k page size"
        depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
-       select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
+       select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
 
 config PPC_256K_PAGES
        bool "256k page size"
index 06f17e778c2750edd0b2380288da820f0c40f130..8d1c8162f0c1078d1842d754b64a078be8e1ea0e 100644 (file)
@@ -50,7 +50,9 @@
  * set of bits not changed in pmd_modify.
  */
 #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
-                        _PAGE_ACCESSED | _PAGE_THP_HUGE)
+                        _PAGE_ACCESSED | _PAGE_THP_HUGE | _PAGE_PTE | \
+                        _PAGE_SOFT_DIRTY)
+
 
 #ifdef CONFIG_PPC_64K_PAGES
 #include <asm/book3s/64/hash-64k.h>
index 8204b0c393aac69285a666d1b70fae0961053614..ac07a30a7934265ed98706efb9f2c82ac8db85ce 100644 (file)
@@ -223,7 +223,6 @@ static inline pte_t *pmdp_ptep(pmd_t *pmd)
 #define pmd_pfn(pmd)           pte_pfn(pmd_pte(pmd))
 #define pmd_dirty(pmd)         pte_dirty(pmd_pte(pmd))
 #define pmd_young(pmd)         pte_young(pmd_pte(pmd))
-#define pmd_dirty(pmd)         pte_dirty(pmd_pte(pmd))
 #define pmd_mkold(pmd)         pte_pmd(pte_mkold(pmd_pte(pmd)))
 #define pmd_wrprotect(pmd)     pte_pmd(pte_wrprotect(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
@@ -282,6 +281,10 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                            pmd_t *pmdp);
 
+#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
+extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+                                   unsigned long address, pmd_t *pmdp);
+
 #define pmd_move_must_withdraw pmd_move_must_withdraw
 struct spinlock;
 static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
index c5eb86f3d452fbe66d44ae1cce9bbfff91a8b14d..867c39b45df6ce4c1bd5a342ca314a888bb185bf 100644 (file)
@@ -81,6 +81,7 @@ struct pci_dn;
 #define EEH_PE_KEEP            (1 << 8)        /* Keep PE on hotplug   */
 #define EEH_PE_CFG_RESTRICTED  (1 << 9)        /* Block config on error */
 #define EEH_PE_REMOVED         (1 << 10)       /* Removed permanently  */
+#define EEH_PE_PRI_BUS         (1 << 11)       /* Cached primary bus   */
 
 struct eeh_pe {
        int type;                       /* PE type: PHB/Bus/Device      */
index 271fefbbe521badbc1a232b8f3583fb10833388b..9d08d8cbed1a1ec0e5893679c5e1fd9cb69dce09 100644 (file)
@@ -38,8 +38,7 @@
 
 #define KVM_MAX_VCPUS          NR_CPUS
 #define KVM_MAX_VCORES         NR_CPUS
-#define KVM_USER_MEM_SLOTS 32
-#define KVM_MEM_SLOTS_NUM KVM_USER_MEM_SLOTS
+#define KVM_USER_MEM_SLOTS     512
 
 #ifdef CONFIG_KVM_MMIO
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
index 54843ca5fa2bfa03871f28a3854b0d87f5685a76..78968c1ff931941d8244364cedd7f664f352e763 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/pci.h>
 #include <linux/list.h>
 #include <linux/ioport.h>
-#include <asm-generic/pci-bridge.h>
 
 struct device_node;
 
index 5654ece02c0db5ee41da441c30d8a2c5f1cdcbc4..3fa9df70aa20dfa01e90eb1a6af06171a1d8fbde 100644 (file)
@@ -383,3 +383,4 @@ SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL(ni_syscall)
 SYSCALL(mlock2)
+SYSCALL(copy_file_range)
index 8e86b48d03699047dda0f493a3955c8c05e34909..32e36b16773fd876a7b246ddb9e23c28193c3570 100644 (file)
@@ -57,12 +57,14 @@ DEFINE_EVENT(ppc64_interrupt_class, timer_interrupt_exit,
 extern void hcall_tracepoint_regfunc(void);
 extern void hcall_tracepoint_unregfunc(void);
 
-TRACE_EVENT_FN(hcall_entry,
+TRACE_EVENT_FN_COND(hcall_entry,
 
        TP_PROTO(unsigned long opcode, unsigned long *args),
 
        TP_ARGS(opcode, args),
 
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
        TP_STRUCT__entry(
                __field(unsigned long, opcode)
        ),
@@ -76,13 +78,15 @@ TRACE_EVENT_FN(hcall_entry,
        hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
 );
 
-TRACE_EVENT_FN(hcall_exit,
+TRACE_EVENT_FN_COND(hcall_exit,
 
        TP_PROTO(unsigned long opcode, unsigned long retval,
                unsigned long *retbuf),
 
        TP_ARGS(opcode, retval, retbuf),
 
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
        TP_STRUCT__entry(
                __field(unsigned long, opcode)
                __field(unsigned long, retval)
index 6a5ace5fa0c8a8bbf83a73a5fca8fb0150d5d3e8..1f2594d456054262bd2bc4a9eef265c983971afa 100644 (file)
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define NR_syscalls            379
+#define NR_syscalls            380
 
 #define __NR__exit __NR_exit
 
index 12a05652377a28799a89ae59e3d38a7ddeaade92..940290d45b087220711cd0a508ae5c4223b21951 100644 (file)
 #define __NR_userfaultfd       364
 #define __NR_membarrier                365
 #define __NR_mlock2            378
+#define __NR_copy_file_range   379
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
index 938742135ee08fc8dd058df690cfba7eacdabc0b..301be3126ae3e1bcbe6b3f8aabce07d2416c89e9 100644 (file)
@@ -564,6 +564,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
         */
        eeh_pe_state_mark(pe, EEH_PE_KEEP);
        if (bus) {
+               eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                pci_lock_rescan_remove();
                pcibios_remove_pci_devices(bus);
                pci_unlock_rescan_remove();
@@ -803,6 +804,7 @@ perm_error:
         * the their PCI config any more.
         */
        if (frozen_bus) {
+               eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
 
                pci_lock_rescan_remove();
@@ -886,6 +888,7 @@ static void eeh_handle_special_event(void)
                                        continue;
 
                                /* Notify all devices to be down */
+                               eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                                bus = eeh_pe_bus_get(phb_pe);
                                eeh_pe_dev_traverse(pe,
                                        eeh_report_failure, NULL);
index 8654cb166c19b04fea827756513a1d23e4d92272..98f81800e00c1030b69c80d79eb8d43edbda9728 100644 (file)
@@ -883,32 +883,29 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
 const char *eeh_pe_loc_get(struct eeh_pe *pe)
 {
        struct pci_bus *bus = eeh_pe_bus_get(pe);
-       struct device_node *dn = pci_bus_to_OF_node(bus);
+       struct device_node *dn;
        const char *loc = NULL;
 
-       if (!dn)
-               goto out;
+       while (bus) {
+               dn = pci_bus_to_OF_node(bus);
+               if (!dn) {
+                       bus = bus->parent;
+                       continue;
+               }
 
-       /* PHB PE or root PE ? */
-       if (pci_is_root_bus(bus)) {
-               loc = of_get_property(dn, "ibm,loc-code", NULL);
-               if (!loc)
+               if (pci_is_root_bus(bus))
                        loc = of_get_property(dn, "ibm,io-base-loc-code", NULL);
+               else
+                       loc = of_get_property(dn, "ibm,slot-location-code",
+                                             NULL);
+
                if (loc)
-                       goto out;
+                       return loc;
 
-               /* Check the root port */
-               dn = dn->child;
-               if (!dn)
-                       goto out;
+               bus = bus->parent;
        }
 
-       loc = of_get_property(dn, "ibm,loc-code", NULL);
-       if (!loc)
-               loc = of_get_property(dn, "ibm,slot-location-code", NULL);
-
-out:
-       return loc ? loc : "N/A";
+       return "N/A";
 }
 
 /**
@@ -931,7 +928,7 @@ struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe)
                bus = pe->phb->bus;
        } else if (pe->type & EEH_PE_BUS ||
                   pe->type & EEH_PE_DEVICE) {
-               if (pe->bus) {
+               if (pe->state & EEH_PE_PRI_BUS) {
                        bus = pe->bus;
                        goto out;
                }
index db475d41b57a5d4b5313d11f4494792c6070dc67..f28754c497e506cb3291462f94d422822be4e29d 100644 (file)
@@ -701,31 +701,3 @@ _GLOBAL(kexec_sequence)
        li      r5,0
        blr     /* image->start(physid, image->start, 0); */
 #endif /* CONFIG_KEXEC */
-
-#ifdef CONFIG_MODULES
-#if defined(_CALL_ELF) && _CALL_ELF == 2
-
-#ifdef CONFIG_MODVERSIONS
-.weak __crc_TOC.
-.section "___kcrctab+TOC.","a"
-.globl __kcrctab_TOC.
-__kcrctab_TOC.:
-       .llong  __crc_TOC.
-#endif
-
-/*
- * Export a fake .TOC. since both modpost and depmod will complain otherwise.
- * Both modpost and depmod strip the leading . so we do the same here.
- */
-.section "__ksymtab_strings","a"
-__kstrtab_TOC.:
-       .asciz "TOC."
-
-.section "___ksymtab+TOC.","a"
-/* This symbol name is important: it's used by modpost to find exported syms */
-.globl __ksymtab_TOC.
-__ksymtab_TOC.:
-       .llong 0 /* .value */
-       .llong __kstrtab_TOC.
-#endif /* ELFv2 */
-#endif /* MODULES */
index 59663af9315fc16123cd4aacb090a6efd8cbc33d..08b7a40de5f85ab1c6f7e5ed205ca52edd5ab12c 100644 (file)
@@ -326,7 +326,10 @@ static void dedotify_versions(struct modversion_info *vers,
                }
 }
 
-/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
+/*
+ * Undefined symbols which refer to .funcname, hack to funcname. Make .TOC.
+ * seem to be defined (value set later).
+ */
 static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
 {
        unsigned int i;
@@ -334,8 +337,11 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab)
        for (i = 1; i < numsyms; i++) {
                if (syms[i].st_shndx == SHN_UNDEF) {
                        char *name = strtab + syms[i].st_name;
-                       if (name[0] == '.')
-                               memmove(name, name+1, strlen(name));
+                       if (name[0] == '.') {
+                               if (strcmp(name+1, "TOC.") == 0)
+                                       syms[i].st_shndx = SHN_ABS;
+                               syms[i].st_name++;
+                       }
                }
        }
 }
@@ -351,7 +357,7 @@ static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs,
        numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym);
 
        for (i = 1; i < numsyms; i++) {
-               if (syms[i].st_shndx == SHN_UNDEF
+               if (syms[i].st_shndx == SHN_ABS
                    && strcmp(strtab + syms[i].st_name, "TOC.") == 0)
                        return &syms[i];
        }
index ad8c9db61237223c3b807e3a9afed1487c44af1e..d544fa31175766bf48790f75bcd5458d6a505080 100644 (file)
@@ -114,8 +114,6 @@ extern unsigned int memset_nocache_branch; /* Insn to be replaced by NOP */
 
 notrace void __init machine_init(u64 dt_ptr)
 {
-       lockdep_init();
-
        /* Enable early debugging if any specified (see udbg.h) */
        udbg_early_init();
 
index 5c03a6a9b0542fac3d042f2481f367aae9178d38..f98be8383a39994868ba41d0f8492e99f48eb1fe 100644 (file)
@@ -255,9 +255,6 @@ void __init early_setup(unsigned long dt_ptr)
        setup_paca(&boot_paca);
        fixup_boot_paca();
 
-       /* Initialize lockdep early or else spinlocks will blow */
-       lockdep_init();
-
        /* -------- printk is now safe to use ------- */
 
        /* Enable early debugging if any specified (see udbg.h) */
index 774a253ca4e1eaa2ab9c8277d22cbcae0d85f676..9bf7031a67ffc0aedf742e7a9da6afebeb8fe64b 100644 (file)
@@ -377,15 +377,12 @@ no_seg_found:
 
 static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
 {
-       struct kvmppc_vcpu_book3s *vcpu_book3s;
        u64 esid, esid_1t;
        int slb_nr;
        struct kvmppc_slb *slbe;
 
        dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb);
 
-       vcpu_book3s = to_book3s(vcpu);
-
        esid = GET_ESID(rb);
        esid_1t = GET_ESID_1T(rb);
        slb_nr = rb & 0xfff;
index cff207b72c46ae0b54cf48db5e4a3aa828f2fc29..baeddb06811d738a6ea8be4ce920e5ea39a9645a 100644 (file)
@@ -833,6 +833,24 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
 
        vcpu->stat.sum_exits++;
 
+       /*
+        * This can happen if an interrupt occurs in the last stages
+        * of guest entry or the first stages of guest exit (i.e. after
+        * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
+        * and before setting it to KVM_GUEST_MODE_HOST_HV).
+        * That can happen due to a bug, or due to a machine check
+        * occurring at just the wrong time.
+        */
+       if (vcpu->arch.shregs.msr & MSR_HV) {
+               printk(KERN_EMERG "KVM trap in HV mode!\n");
+               printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
+                       vcpu->arch.trap, kvmppc_get_pc(vcpu),
+                       vcpu->arch.shregs.msr);
+               kvmppc_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               run->hw.hardware_exit_reason = vcpu->arch.trap;
+               return RESUME_HOST;
+       }
        run->exit_reason = KVM_EXIT_UNKNOWN;
        run->ready_for_interrupt_injection = 1;
        switch (vcpu->arch.trap) {
index 3c6badcd53efced649af69ff9a4adf823b28d9a0..6ee26de9a1ded02e0e08f376fd612049eedee0b4 100644 (file)
@@ -2153,7 +2153,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 
        /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
 2:     rlwimi  r5, r4, 5, DAWRX_DR | DAWRX_DW
-       rlwimi  r5, r4, 1, DAWRX_WT
+       rlwimi  r5, r4, 2, DAWRX_WT
        clrrdi  r4, r4, 3
        std     r4, VCPU_DAWR(r3)
        std     r5, VCPU_DAWRX(r3)
@@ -2404,6 +2404,8 @@ machine_check_realmode:
         * guest as machine check causing guest to crash.
         */
        ld      r11, VCPU_MSR(r9)
+       rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
+       bne     mc_cont                 /* if so, exit to host */
        andi.   r10, r11, MSR_RI        /* check for unrecoverable exception */
        beq     1f                      /* Deliver a machine check to guest */
        ld      r10, VCPU_PC(r9)
index 6fd2405c7f4aa2e5bdc5ae417a409b33b6c5f7bd..a3b182dcb823640540fcaeac38bc7d89550fddd6 100644 (file)
@@ -919,21 +919,17 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
                                r = -ENXIO;
                                break;
                        }
-                       vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+                       val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
                        break;
                case KVM_REG_PPC_VSCR:
                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
                                r = -ENXIO;
                                break;
                        }
-                       vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+                       val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
                        break;
                case KVM_REG_PPC_VRSAVE:
-                       if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
-                               r = -ENXIO;
-                               break;
-                       }
-                       vcpu->arch.vrsave = set_reg_val(reg->id, val);
+                       val = get_reg_val(reg->id, vcpu->arch.vrsave);
                        break;
 #endif /* CONFIG_ALTIVEC */
                default:
@@ -974,17 +970,21 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
                                r = -ENXIO;
                                break;
                        }
-                       val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+                       vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
                        break;
                case KVM_REG_PPC_VSCR:
                        if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
                                r = -ENXIO;
                                break;
                        }
-                       val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+                       vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
                        break;
                case KVM_REG_PPC_VRSAVE:
-                       val = get_reg_val(reg->id, vcpu->arch.vrsave);
+                       if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+                               r = -ENXIO;
+                               break;
+                       }
+                       vcpu->arch.vrsave = set_reg_val(reg->id, val);
                        break;
 #endif /* CONFIG_ALTIVEC */
                default:
index 22d94c3e6fc43bf7afbd63bfd483074a9e18589a..f078a1f94fc267de0e4de530cd413608c2d93e5f 100644 (file)
@@ -541,7 +541,7 @@ static int __init add_system_ram_resources(void)
                        res->name = "System RAM";
                        res->start = base;
                        res->end = base + size - 1;
-                       res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+                       res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
                        WARN_ON(request_resource(&iomem_resource, res) < 0);
                }
        }
@@ -560,12 +560,12 @@ subsys_initcall(add_system_ram_resources);
  */
 int devmem_is_allowed(unsigned long pfn)
 {
+       if (page_is_rtas_user_buf(pfn))
+               return 1;
        if (iomem_is_exclusive(PFN_PHYS(pfn)))
                return 0;
        if (!page_is_ram(pfn))
                return 1;
-       if (page_is_rtas_user_buf(pfn))
-               return 1;
        return 0;
 }
 #endif /* CONFIG_STRICT_DEVMEM */
index 3124a20d0fab7a66b3a170356037da0d18c9da85..593341f2cf11ef8e07a0c07c561e9be762dbdbf5 100644 (file)
@@ -646,6 +646,28 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
        return pgtable;
 }
 
+void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+                            unsigned long address, pmd_t *pmdp)
+{
+       VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+       VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
+
+       /*
+        * We can't mark the pmd none here, because that will cause a race
+        * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
+        * we spilt, but at the same time we wan't rest of the ppc64 code
+        * not to insert hash pte on this, because we will be modifying
+        * the deposited pgtable in the caller of this function. Hence
+        * clear the _PAGE_USER so that we move the fault handling to
+        * higher level function and that will serialize against ptl.
+        * We need to flush existing hash pte entries here even though,
+        * the translation is still valid, because we will withdraw
+        * pgtable_t after this.
+        */
+       pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
+}
+
+
 /*
  * set a new huge pmd. We should not be called for updating
  * an existing pmd entry. That should go via pmd_hugepage_update.
@@ -663,10 +685,19 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
        return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
 }
 
+/*
+ * We use this to invalidate a pmdp entry before switching from a
+ * hugepte to regular pmd entry.
+ */
 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                     pmd_t *pmdp)
 {
-       pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
+       pmd_hugepage_update(vma->vm_mm, address, pmdp, ~0UL, 0);
+       /*
+        * This ensures that generic code that rely on IRQ disabling
+        * to prevent a parallel THP split work as expected.
+        */
+       kick_all_cpus_sync();
 }
 
 /*
index 7d5e295255b7b25934d51792fb6876c16462d420..9958ba8bf0d27fca74a182546170a487edb88346 100644 (file)
@@ -816,7 +816,7 @@ static struct power_pmu power8_pmu = {
        .get_constraint         = power8_get_constraint,
        .get_alternatives       = power8_get_alternatives,
        .disable_pmc            = power8_disable_pmc,
-       .flags                  = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
+       .flags                  = PPMU_HAS_SIER | PPMU_ARCH_207S,
        .n_generic              = ARRAY_SIZE(power8_generic_events),
        .generic_events         = power8_generic_events,
        .cache_events           = &power8_cache_events,
index 5f152b95ca0c8493536d787a51d741ae628adb8a..87f47e55aab65ac234df1d67c926ca17b1517f06 100644 (file)
@@ -444,9 +444,12 @@ static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
         * PCI devices of the PE are expected to be removed prior
         * to PE reset.
         */
-       if (!edev->pe->bus)
+       if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
                edev->pe->bus = pci_find_bus(hose->global_number,
                                             pdn->busno);
+               if (edev->pe->bus)
+                       edev->pe->state |= EEH_PE_PRI_BUS;
+       }
 
        /*
         * Enable EEH explicitly so that we will do EEH check
index 573ae1994097fb91e15e3f7f6351fe1e73b35c59..f90dc04395bf47bcc0e662a7a1c17526220ccda2 100644 (file)
@@ -3180,6 +3180,7 @@ static void pnv_pci_ioda_shutdown(struct pci_controller *hose)
 
 static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
        .dma_dev_setup = pnv_pci_dma_dev_setup,
+       .dma_bus_setup = pnv_pci_dma_bus_setup,
 #ifdef CONFIG_PCI_MSI
        .setup_msi_irqs = pnv_setup_msi_irqs,
        .teardown_msi_irqs = pnv_teardown_msi_irqs,
index 2f55c86df703554bfd9541a44f8ba26bec072729..cf8417dd4dfc40f0484438e377aca98d5305928d 100644 (file)
@@ -760,6 +760,26 @@ void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
                phb->dma_dev_setup(phb, pdev);
 }
 
+void pnv_pci_dma_bus_setup(struct pci_bus *bus)
+{
+       struct pci_controller *hose = bus->sysdata;
+       struct pnv_phb *phb = hose->private_data;
+       struct pnv_ioda_pe *pe;
+
+       list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+               if (!(pe->flags | (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
+                       continue;
+
+               if (!pe->pbus)
+                       continue;
+
+               if (bus->number == ((pe->rid >> 8) & 0xFF)) {
+                       pe->pbus = bus;
+                       break;
+               }
+       }
+}
+
 void pnv_pci_shutdown(void)
 {
        struct pci_controller *hose;
index 7f56313e8d7223dfd9f22b927c807b20516cf941..00691a9b99af67b09967c73b39b652b973392f06 100644 (file)
@@ -242,6 +242,7 @@ extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
 extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
 
 extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
+extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
 extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
 extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
 
index 16aa0c779e0762e210fe6652a0a5efda24771381..595a275c36f87d5ea4fa58d0a580eb36c601ef34 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/types.h>
 
+#define ARCH_IRQ_ENABLED       (3UL << (BITS_PER_LONG - 8))
+
 /* store then OR system mask. */
 #define __arch_local_irq_stosm(__or)                                   \
 ({                                                                     \
@@ -54,14 +56,17 @@ static inline notrace void arch_local_irq_enable(void)
        __arch_local_irq_stosm(0x03);
 }
 
+/* This only restores external and I/O interrupt state */
 static inline notrace void arch_local_irq_restore(unsigned long flags)
 {
-       __arch_local_irq_ssm(flags);
+       /* only disabled->disabled and disabled->enabled is valid */
+       if (flags & ARCH_IRQ_ENABLED)
+               arch_local_irq_enable();
 }
 
 static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
 {
-       return !(flags & (3UL << (BITS_PER_LONG - 8)));
+       return !(flags & ARCH_IRQ_ENABLED);
 }
 
 static inline notrace bool arch_irqs_disabled(void)
index 6742414dbd6f30b66451f04602cb55de0d20f41c..727e7f7b33fddbb977c91b72e7d922399e7cb1b0 100644 (file)
@@ -229,17 +229,11 @@ struct kvm_s390_itdb {
        __u8    data[256];
 } __packed;
 
-struct kvm_s390_vregs {
-       __vector128 vrs[32];
-       __u8    reserved200[512];       /* for future vector expansion */
-} __packed;
-
 struct sie_page {
        struct kvm_s390_sie_block sie_block;
        __u8 reserved200[1024];         /* 0x0200 */
        struct kvm_s390_itdb itdb;      /* 0x0600 */
-       __u8 reserved700[1280];         /* 0x0700 */
-       struct kvm_s390_vregs vregs;    /* 0x0c00 */
+       __u8 reserved700[2304];         /* 0x0700 */
 } __packed;
 
 struct kvm_vcpu_stat {
@@ -546,7 +540,6 @@ struct kvm_vcpu_arch {
        struct kvm_s390_sie_block *sie_block;
        unsigned int      host_acrs[NUM_ACRS];
        struct fpu        host_fpregs;
-       struct fpu        guest_fpregs;
        struct kvm_s390_local_interrupt local_int;
        struct hrtimer    ckc_timer;
        struct kvm_s390_pgm_info pgm;
index 1a9a98de5bdebc346c469f55818f08ed33b576e7..69aa18be61afcfc1e764c5c51ec5cbe5d0fe2043 100644 (file)
@@ -8,10 +8,13 @@
 #include <asm/pci_insn.h>
 
 /* I/O Map */
-#define ZPCI_IOMAP_MAX_ENTRIES         0x7fff
-#define ZPCI_IOMAP_ADDR_BASE           0x8000000000000000ULL
-#define ZPCI_IOMAP_ADDR_IDX_MASK       0x7fff000000000000ULL
-#define ZPCI_IOMAP_ADDR_OFF_MASK       0x0000ffffffffffffULL
+#define ZPCI_IOMAP_SHIFT               48
+#define ZPCI_IOMAP_ADDR_BASE           0x8000000000000000UL
+#define ZPCI_IOMAP_ADDR_OFF_MASK       ((1UL << ZPCI_IOMAP_SHIFT) - 1)
+#define ZPCI_IOMAP_MAX_ENTRIES                                                 \
+       ((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
+#define ZPCI_IOMAP_ADDR_IDX_MASK                                               \
+       (~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
 
 struct zpci_iomap_entry {
        u32 fh;
@@ -21,8 +24,9 @@ struct zpci_iomap_entry {
 
 extern struct zpci_iomap_entry *zpci_iomap_start;
 
+#define ZPCI_ADDR(idx) (ZPCI_IOMAP_ADDR_BASE | ((u64) idx << ZPCI_IOMAP_SHIFT))
 #define ZPCI_IDX(addr)                                                         \
-       (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> 48)
+       (((__force u64) addr & ZPCI_IOMAP_ADDR_IDX_MASK) >> ZPCI_IOMAP_SHIFT)
 #define ZPCI_OFFSET(addr)                                                      \
        ((__force u64) addr & ZPCI_IOMAP_ADDR_OFF_MASK)
 
index f16debf6a612932a7312428465d9ceb1bee41915..1c4fe129486d2e0a9282dc72a3735c4727277b52 100644 (file)
@@ -166,14 +166,14 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
  */
 #define start_thread(regs, new_psw, new_stackp) do {                   \
        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_EA | PSW_MASK_BA;    \
-       regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
+       regs->psw.addr  = new_psw;                                      \
        regs->gprs[15]  = new_stackp;                                   \
        execve_tail();                                                  \
 } while (0)
 
 #define start_thread31(regs, new_psw, new_stackp) do {                 \
        regs->psw.mask  = PSW_USER_BITS | PSW_MASK_BA;                  \
-       regs->psw.addr  = new_psw | PSW_ADDR_AMODE;                     \
+       regs->psw.addr  = new_psw;                                      \
        regs->gprs[15]  = new_stackp;                                   \
        crst_table_downgrade(current->mm, 1UL << 31);                   \
        execve_tail();                                                  \
index f00cd35c8ac4634e9270ff81e73e603ccc5573f0..99bc456cc26a3087cbec7f35ca82959a94cf2ab3 100644 (file)
@@ -149,7 +149,7 @@ static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
 #define arch_has_block_step()  (1)
 
 #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
-#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
+#define instruction_pointer(regs) ((regs)->psw.addr)
 #define user_stack_pointer(regs)((regs)->gprs[15])
 #define profile_pc(regs) instruction_pointer(regs)
 
@@ -161,7 +161,7 @@ static inline long regs_return_value(struct pt_regs *regs)
 static inline void instruction_pointer_set(struct pt_regs *regs,
                                           unsigned long val)
 {
-       regs->psw.addr = val | PSW_ADDR_AMODE;
+       regs->psw.addr = val;
 }
 
 int regs_query_register_offset(const char *name);
@@ -171,7 +171,7 @@ unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n);
 
 static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
 {
-       return regs->gprs[15] & PSW_ADDR_INSN;
+       return regs->gprs[15];
 }
 
 #endif /* __ASSEMBLY__ */
index fe84bd5fe7ce05a44c4b75630a7f35405d075ec7..347fe5afa419c1cfb0051586009050f12b452c84 100644 (file)
@@ -154,6 +154,7 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_PFAULT (1UL << 5)
 #define KVM_SYNC_VRS    (1UL << 6)
 #define KVM_SYNC_RICCB  (1UL << 7)
+#define KVM_SYNC_FPRS   (1UL << 8)
 /* definition of registers in kvm_run */
 struct kvm_sync_regs {
        __u64 prefix;   /* prefix register */
@@ -168,9 +169,12 @@ struct kvm_sync_regs {
        __u64 pft;      /* pfault token [PFAULT] */
        __u64 pfs;      /* pfault select [PFAULT] */
        __u64 pfc;      /* pfault compare [PFAULT] */
-       __u64 vrs[32][2];       /* vector registers */
+       union {
+               __u64 vrs[32][2];       /* vector registers (KVM_SYNC_VRS) */
+               __u64 fprs[16];         /* fp registers (KVM_SYNC_FPRS) */
+       };
        __u8  reserved[512];    /* for future vector expansion */
-       __u32 fpc;      /* only valid with vector registers */
+       __u32 fpc;              /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
        __u8 padding[52];       /* riccb needs to be 64byte aligned */
        __u8 riccb[64];         /* runtime instrumentation controls block */
 };
index 34ec202472c6cadb5b973b54c6c995bb6d2e897c..ab3aa6875a59ca4dc3760663d79189090286d7ab 100644 (file)
 #define __NR_recvmsg           372
 #define __NR_shutdown          373
 #define __NR_mlock2            374
-#define NR_syscalls 375
+#define __NR_copy_file_range   375
+#define NR_syscalls 376
 
 /* 
  * There are some system calls that are not present on 64 bit, some
index fac4eeddef91fe3acb21e0108069f3f3a70ef72a..ae2cda5eee5a99b35b73e5b7868edd44cba1c6d2 100644 (file)
@@ -177,3 +177,4 @@ COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
 COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
 COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
 COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
+COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags);
index a92b39fd0e63d953db2d94bbfb090d9f257e867f..3986c9f6219174bf7462b88579b5b030d830185e 100644 (file)
@@ -59,8 +59,6 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu)
        struct save_area *sa;
 
        sa = (void *) memblock_alloc(sizeof(*sa), 8);
-       if (!sa)
-               return NULL;
        if (is_boot_cpu)
                list_add(&sa->list, &dump_save_areas);
        else
index 6fca0e46464e01842f613584e4c82e5ca1fe4270..c890a5589e59583bb06b1d7dffe7226069e522a0 100644 (file)
@@ -1470,7 +1470,7 @@ debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
                except_str = "*";
        else
                except_str = "-";
-       caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN;
+       caller = (unsigned long) entry->caller;
        rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p  ",
                      area, (long long)time_spec.tv_sec,
                      time_spec.tv_nsec / 1000, level, except_str,
index dc8e20473484ce32f037f1e9115779a8a9d7d0cd..02bd02ff648b5ab570c5ffe920b27bbfbc3d5fd3 100644 (file)
@@ -34,22 +34,21 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
        unsigned long addr;
 
        while (1) {
-               sp = sp & PSW_ADDR_INSN;
                if (sp < low || sp > high - sizeof(*sf))
                        return sp;
                sf = (struct stack_frame *) sp;
-               addr = sf->gprs[8] & PSW_ADDR_INSN;
+               addr = sf->gprs[8];
                printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
                /* Follow the backchain. */
                while (1) {
                        low = sp;
-                       sp = sf->back_chain & PSW_ADDR_INSN;
+                       sp = sf->back_chain;
                        if (!sp)
                                break;
                        if (sp <= low || sp > high - sizeof(*sf))
                                return sp;
                        sf = (struct stack_frame *) sp;
-                       addr = sf->gprs[8] & PSW_ADDR_INSN;
+                       addr = sf->gprs[8];
                        printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
                }
                /* Zero backchain detected, check for interrupt frame. */
@@ -57,7 +56,7 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
                if (sp <= low || sp > high - sizeof(*regs))
                        return sp;
                regs = (struct pt_regs *) sp;
-               addr = regs->psw.addr & PSW_ADDR_INSN;
+               addr = regs->psw.addr;
                printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
                low = sp;
                sp = regs->gprs[15];
index 20a5caf6d981d9f81852b015765662c745ed6b3b..a0684de5a93b99ae199f9bf6906327f4513f662e 100644 (file)
@@ -252,14 +252,14 @@ static void early_pgm_check_handler(void)
        unsigned long addr;
 
        addr = S390_lowcore.program_old_psw.addr;
-       fixup = search_exception_tables(addr & PSW_ADDR_INSN);
+       fixup = search_exception_tables(addr);
        if (!fixup)
                disabled_wait(0);
        /* Disable low address protection before storing into lowcore. */
        __ctl_store(cr0, 0, 0);
        cr0_new = cr0 & ~(1UL << 28);
        __ctl_load(cr0_new, 0, 0);
-       S390_lowcore.program_old_psw.addr = extable_fixup(fixup)|PSW_ADDR_AMODE;
+       S390_lowcore.program_old_psw.addr = extable_fixup(fixup);
        __ctl_load(cr0, 0, 0);
 }
 
@@ -268,9 +268,9 @@ static noinline __init void setup_lowcore_early(void)
        psw_t psw;
 
        psw.mask = PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA;
-       psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
+       psw.addr = (unsigned long) s390_base_ext_handler;
        S390_lowcore.external_new_psw = psw;
-       psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
+       psw.addr = (unsigned long) s390_base_pgm_handler;
        S390_lowcore.program_new_psw = psw;
        s390_base_pgm_handler_fn = early_pgm_check_handler;
 }
@@ -448,7 +448,6 @@ void __init startup_init(void)
        rescue_initrd();
        clear_bss_section();
        init_kernel_storage_key();
-       lockdep_init();
        lockdep_off();
        setup_lowcore_early();
        setup_facility_list();
index e0eaf11134b44706e1223daca7bd66e938bf37f0..0f7bfeba6da6a3632f2705ebbaccc065ffd356a2 100644 (file)
@@ -203,7 +203,7 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
                goto out;
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                goto out;
-       ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
+       ip -= MCOUNT_INSN_SIZE;
        trace.func = ip;
        trace.depth = current->curr_ret_stack + 1;
        /* Only trace if the calling function expects to. */
index 0a5a6b661b938743684a6aec8d1d5ebcca537be1..f20abdb5630adceaa75690907ddf7cdedc098146 100644 (file)
@@ -2057,12 +2057,12 @@ void s390_reset_system(void)
        /* Set new machine check handler */
        S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
        S390_lowcore.mcck_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
+               (unsigned long) s390_base_mcck_handler;
 
        /* Set new program check handler */
        S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT;
        S390_lowcore.program_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
+               (unsigned long) s390_base_pgm_handler;
 
        /*
         * Clear subchannel ID and number to signal new kernel that no CCW or
index 389db56a2208bfa118d6f31aee9b52d090031960..250f5972536a8948a916e56a4514e942e03bde4a 100644 (file)
@@ -226,7 +226,7 @@ static void enable_singlestep(struct kprobe_ctlblk *kcb,
        __ctl_load(per_kprobe, 9, 11);
        regs->psw.mask |= PSW_MASK_PER;
        regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
-       regs->psw.addr = ip | PSW_ADDR_AMODE;
+       regs->psw.addr = ip;
 }
 NOKPROBE_SYMBOL(enable_singlestep);
 
@@ -238,7 +238,7 @@ static void disable_singlestep(struct kprobe_ctlblk *kcb,
        __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
        regs->psw.mask &= ~PSW_MASK_PER;
        regs->psw.mask |= kcb->kprobe_saved_imask;
-       regs->psw.addr = ip | PSW_ADDR_AMODE;
+       regs->psw.addr = ip;
 }
 NOKPROBE_SYMBOL(disable_singlestep);
 
@@ -310,7 +310,7 @@ static int kprobe_handler(struct pt_regs *regs)
         */
        preempt_disable();
        kcb = get_kprobe_ctlblk();
-       p = get_kprobe((void *)((regs->psw.addr & PSW_ADDR_INSN) - 2));
+       p = get_kprobe((void *)(regs->psw.addr - 2));
 
        if (p) {
                if (kprobe_running()) {
@@ -460,7 +460,7 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
                        break;
        }
 
-       regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
+       regs->psw.addr = orig_ret_address;
 
        pop_kprobe(get_kprobe_ctlblk());
        kretprobe_hash_unlock(current, &flags);
@@ -490,7 +490,7 @@ NOKPROBE_SYMBOL(trampoline_probe_handler);
 static void resume_execution(struct kprobe *p, struct pt_regs *regs)
 {
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
+       unsigned long ip = regs->psw.addr;
        int fixup = probe_get_fixup_type(p->ainsn.insn);
 
        /* Check if the kprobes location is an enabled ftrace caller */
@@ -605,9 +605,9 @@ static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
                 * In case the user-specified fault handler returned
                 * zero, try to fix up.
                 */
-               entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+               entry = search_exception_tables(regs->psw.addr);
                if (entry) {
-                       regs->psw.addr = extable_fixup(entry) | PSW_ADDR_AMODE;
+                       regs->psw.addr = extable_fixup(entry);
                        return 1;
                }
 
@@ -683,7 +683,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
 
        /* setup return addr to the jprobe handler routine */
-       regs->psw.addr = (unsigned long) jp->entry | PSW_ADDR_AMODE;
+       regs->psw.addr = (unsigned long) jp->entry;
        regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
 
        /* r15 is the stack pointer */
index 61595c1f0a0fe7d502bb6c213519929effe48b02..0943b11a2f6e22c088dc13d3b8a0de9b4e6ef92c 100644 (file)
@@ -74,7 +74,7 @@ static unsigned long guest_is_user_mode(struct pt_regs *regs)
 
 static unsigned long instruction_pointer_guest(struct pt_regs *regs)
 {
-       return sie_block(regs)->gpsw.addr & PSW_ADDR_INSN;
+       return sie_block(regs)->gpsw.addr;
 }
 
 unsigned long perf_instruction_pointer(struct pt_regs *regs)
@@ -231,29 +231,27 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry,
        struct pt_regs *regs;
 
        while (1) {
-               sp = sp & PSW_ADDR_INSN;
                if (sp < low || sp > high - sizeof(*sf))
                        return sp;
                sf = (struct stack_frame *) sp;
-               perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
+               perf_callchain_store(entry, sf->gprs[8]);
                /* Follow the backchain. */
                while (1) {
                        low = sp;
-                       sp = sf->back_chain & PSW_ADDR_INSN;
+                       sp = sf->back_chain;
                        if (!sp)
                                break;
                        if (sp <= low || sp > high - sizeof(*sf))
                                return sp;
                        sf = (struct stack_frame *) sp;
-                       perf_callchain_store(entry,
-                                            sf->gprs[8] & PSW_ADDR_INSN);
+                       perf_callchain_store(entry, sf->gprs[8]);
                }
                /* Zero backchain detected, check for interrupt frame. */
                sp = (unsigned long) (sf + 1);
                if (sp <= low || sp > high - sizeof(*regs))
                        return sp;
                regs = (struct pt_regs *) sp;
-               perf_callchain_store(entry, sf->gprs[8] & PSW_ADDR_INSN);
+               perf_callchain_store(entry, sf->gprs[8]);
                low = sp;
                sp = regs->gprs[15];
        }
@@ -262,12 +260,13 @@ static unsigned long __store_trace(struct perf_callchain_entry *entry,
 void perf_callchain_kernel(struct perf_callchain_entry *entry,
                           struct pt_regs *regs)
 {
-       unsigned long head;
+       unsigned long head, frame_size;
        struct stack_frame *head_sf;
 
        if (user_mode(regs))
                return;
 
+       frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
        head = regs->gprs[15];
        head_sf = (struct stack_frame *) head;
 
@@ -275,8 +274,9 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
                return;
 
        head = head_sf->back_chain;
-       head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
-                            S390_lowcore.async_stack);
+       head = __store_trace(entry, head,
+                            S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+                            S390_lowcore.async_stack + frame_size);
 
        __store_trace(entry, head, S390_lowcore.thread_info,
                      S390_lowcore.thread_info + THREAD_SIZE);
index 114ee8b96f17d31d5dba657c704bc34a2e1b9a9e..2bba7df4ac519fdc773551600b8bab2b9fef4789 100644 (file)
@@ -56,10 +56,10 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
                return 0;
        low = task_stack_page(tsk);
        high = (struct stack_frame *) task_pt_regs(tsk);
-       sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN);
+       sf = (struct stack_frame *) tsk->thread.ksp;
        if (sf <= low || sf > high)
                return 0;
-       sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
+       sf = (struct stack_frame *) sf->back_chain;
        if (sf <= low || sf > high)
                return 0;
        return sf->gprs[8];
@@ -154,7 +154,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
                memset(&frame->childregs, 0, sizeof(struct pt_regs));
                frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
                                PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
-               frame->childregs.psw.addr = PSW_ADDR_AMODE |
+               frame->childregs.psw.addr =
                                (unsigned long) kernel_thread_starter;
                frame->childregs.gprs[9] = new_stackp; /* function */
                frame->childregs.gprs[10] = arg;
@@ -220,14 +220,14 @@ unsigned long get_wchan(struct task_struct *p)
                return 0;
        low = task_stack_page(p);
        high = (struct stack_frame *) task_pt_regs(p);
-       sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
+       sf = (struct stack_frame *) p->thread.ksp;
        if (sf <= low || sf > high)
                return 0;
        for (count = 0; count < 16; count++) {
-               sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
+               sf = (struct stack_frame *) sf->back_chain;
                if (sf <= low || sf > high)
                        return 0;
-               return_address = sf->gprs[8] & PSW_ADDR_INSN;
+               return_address = sf->gprs[8];
                if (!in_sched_functions(return_address))
                        return return_address;
        }
index 01c37b36caf964ec4616048c00d6602607a9abca..49b1c13bf6c96372d60815e8549c10b15b40914f 100644 (file)
@@ -84,7 +84,7 @@ void update_cr_regs(struct task_struct *task)
                if (test_tsk_thread_flag(task, TIF_UPROBE_SINGLESTEP))
                        new.control |= PER_EVENT_IFETCH;
                new.start = 0;
-               new.end = PSW_ADDR_INSN;
+               new.end = -1UL;
        }
 
        /* Take care of the PER enablement bit in the PSW. */
@@ -148,7 +148,7 @@ static inline unsigned long __peek_user_per(struct task_struct *child,
        else if (addr == (addr_t) &dummy->cr11)
                /* End address of the active per set. */
                return test_thread_flag(TIF_SINGLE_STEP) ?
-                       PSW_ADDR_INSN : child->thread.per_user.end;
+                       -1UL : child->thread.per_user.end;
        else if (addr == (addr_t) &dummy->bits)
                /* Single-step bit. */
                return test_thread_flag(TIF_SINGLE_STEP) ?
@@ -495,8 +495,6 @@ long arch_ptrace(struct task_struct *child, long request,
                }
                return 0;
        default:
-               /* Removing high order bit from addr (only for 31 bit). */
-               addr &= PSW_ADDR_INSN;
                return ptrace_request(child, request, addr, data);
        }
 }
index c6878fbbcf1324661ac52f4f55ba1e40840fbd89..cedb0198675f7577c7237bb61c006acaf0e984d4 100644 (file)
@@ -301,25 +301,21 @@ static void __init setup_lowcore(void)
        BUILD_BUG_ON(sizeof(struct lowcore) != LC_PAGES * 4096);
        lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
        lc->restart_psw.mask = PSW_KERNEL_BITS;
-       lc->restart_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
+       lc->restart_psw.addr = (unsigned long) restart_int_handler;
        lc->external_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
-       lc->external_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
+       lc->external_new_psw.addr = (unsigned long) ext_int_handler;
        lc->svc_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
-       lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
+       lc->svc_new_psw.addr = (unsigned long) system_call;
        lc->program_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
-       lc->program_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
+       lc->program_new_psw.addr = (unsigned long) pgm_check_handler;
        lc->mcck_new_psw.mask = PSW_KERNEL_BITS;
-       lc->mcck_new_psw.addr =
-               PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
+       lc->mcck_new_psw.addr = (unsigned long) mcck_int_handler;
        lc->io_new_psw.mask = PSW_KERNEL_BITS |
                PSW_MASK_DAT | PSW_MASK_MCHECK;
-       lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
+       lc->io_new_psw.addr = (unsigned long) io_int_handler;
        lc->clock_comparator = -1ULL;
        lc->kernel_stack = ((unsigned long) &init_thread_union)
                + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
@@ -378,17 +374,17 @@ static void __init setup_lowcore(void)
 
 static struct resource code_resource = {
        .name  = "Kernel code",
-       .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+       .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 };
 
 static struct resource data_resource = {
        .name = "Kernel data",
-       .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+       .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 };
 
 static struct resource bss_resource = {
        .name = "Kernel bss",
-       .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+       .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 };
 
 static struct resource __initdata *standard_resources[] = {
@@ -412,7 +408,7 @@ static void __init setup_resources(void)
 
        for_each_memblock(memory, reg) {
                res = alloc_bootmem_low(sizeof(*res));
-               res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+               res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
 
                res->name = "System RAM";
                res->start = reg->base;
index 028cc46cb82ad77ac21a7c9b9ea897e2312480d4..d82562cf0a0e1f5057987f59f6d541ea374ea683 100644 (file)
@@ -331,13 +331,13 @@ static int setup_frame(int sig, struct k_sigaction *ka,
        /* Set up to return from userspace.  If provided, use a stub
           already in userspace.  */
        if (ka->sa.sa_flags & SA_RESTORER) {
-               restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
+               restorer = (unsigned long) ka->sa.sa_restorer;
        } else {
                /* Signal frame without vector registers are short ! */
                __u16 __user *svc = (void __user *) frame + frame_size - 2;
                if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
                        return -EFAULT;
-               restorer = (unsigned long) svc | PSW_ADDR_AMODE;
+               restorer = (unsigned long) svc;
        }
 
        /* Set up registers for signal handler */
@@ -347,7 +347,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
        regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
                (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
-       regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
+       regs->psw.addr = (unsigned long) ka->sa.sa_handler;
 
        regs->gprs[2] = sig;
        regs->gprs[3] = (unsigned long) &frame->sc;
@@ -394,13 +394,12 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        /* Set up to return from userspace.  If provided, use a stub
           already in userspace.  */
        if (ksig->ka.sa.sa_flags & SA_RESTORER) {
-               restorer = (unsigned long)
-                       ksig->ka.sa.sa_restorer | PSW_ADDR_AMODE;
+               restorer = (unsigned long) ksig->ka.sa.sa_restorer;
        } else {
                __u16 __user *svc = &frame->svc_insn;
                if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, svc))
                        return -EFAULT;
-               restorer = (unsigned long) svc | PSW_ADDR_AMODE;
+               restorer = (unsigned long) svc;
        }
 
        /* Create siginfo on the signal stack */
@@ -426,7 +425,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
        regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA |
                (PSW_USER_BITS & PSW_MASK_ASC) |
                (regs->psw.mask & ~PSW_MASK_ASC);
-       regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler | PSW_ADDR_AMODE;
+       regs->psw.addr = (unsigned long) ksig->ka.sa.sa_handler;
 
        regs->gprs[2] = ksig->sig;
        regs->gprs[3] = (unsigned long) &frame->info;
index a13468b9a9133b9f46d8c90575ea0b5ffcd41e04..3c65a8eae34d35b6a670066db2bc80c8fa3ccf67 100644 (file)
@@ -623,8 +623,6 @@ void __init smp_save_dump_cpus(void)
                return;
        /* Allocate a page as dumping area for the store status sigps */
        page = memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, 1UL << 31);
-       if (!page)
-               panic("could not allocate memory for save area\n");
        /* Set multi-threading state to the previous system. */
        pcpu_set_smt(sclp.mtid_prev);
        boot_cpu_addr = stap();
index 1785cd82253caec869abbb7f5d7804b42effad04..8f64ebd63767c7a1a3e434b994b65110e890f741 100644 (file)
@@ -21,12 +21,11 @@ static unsigned long save_context_stack(struct stack_trace *trace,
        unsigned long addr;
 
        while(1) {
-               sp &= PSW_ADDR_INSN;
                if (sp < low || sp > high)
                        return sp;
                sf = (struct stack_frame *)sp;
                while(1) {
-                       addr = sf->gprs[8] & PSW_ADDR_INSN;
+                       addr = sf->gprs[8];
                        if (!trace->skip)
                                trace->entries[trace->nr_entries++] = addr;
                        else
@@ -34,7 +33,7 @@ static unsigned long save_context_stack(struct stack_trace *trace,
                        if (trace->nr_entries >= trace->max_entries)
                                return sp;
                        low = sp;
-                       sp = sf->back_chain & PSW_ADDR_INSN;
+                       sp = sf->back_chain;
                        if (!sp)
                                break;
                        if (sp <= low || sp > high - sizeof(*sf))
@@ -46,7 +45,7 @@ static unsigned long save_context_stack(struct stack_trace *trace,
                if (sp <= low || sp > high - sizeof(*regs))
                        return sp;
                regs = (struct pt_regs *)sp;
-               addr = regs->psw.addr & PSW_ADDR_INSN;
+               addr = regs->psw.addr;
                if (savesched || !in_sched_functions(addr)) {
                        if (!trace->skip)
                                trace->entries[trace->nr_entries++] = addr;
@@ -60,33 +59,43 @@ static unsigned long save_context_stack(struct stack_trace *trace,
        }
 }
 
-void save_stack_trace(struct stack_trace *trace)
+static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
 {
-       register unsigned long sp asm ("15");
-       unsigned long orig_sp, new_sp;
+       unsigned long new_sp, frame_size;
 
-       orig_sp = sp & PSW_ADDR_INSN;
-       new_sp = save_context_stack(trace, orig_sp,
-                                   S390_lowcore.panic_stack - PAGE_SIZE,
-                                   S390_lowcore.panic_stack, 1);
-       if (new_sp != orig_sp)
-               return;
+       frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+       new_sp = save_context_stack(trace, sp,
+                       S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
+                       S390_lowcore.panic_stack + frame_size, 1);
        new_sp = save_context_stack(trace, new_sp,
-                                   S390_lowcore.async_stack - ASYNC_SIZE,
-                                   S390_lowcore.async_stack, 1);
-       if (new_sp != orig_sp)
-               return;
+                       S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+                       S390_lowcore.async_stack + frame_size, 1);
        save_context_stack(trace, new_sp,
                           S390_lowcore.thread_info,
                           S390_lowcore.thread_info + THREAD_SIZE, 1);
 }
+
+void save_stack_trace(struct stack_trace *trace)
+{
+       register unsigned long r15 asm ("15");
+       unsigned long sp;
+
+       sp = r15;
+       __save_stack_trace(trace, sp);
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
 EXPORT_SYMBOL_GPL(save_stack_trace);
 
 void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 {
        unsigned long sp, low, high;
 
-       sp = tsk->thread.ksp & PSW_ADDR_INSN;
+       sp = tsk->thread.ksp;
+       if (tsk == current) {
+               /* Get current stack pointer. */
+               asm volatile("la %0,0(15)" : "=a" (sp));
+       }
        low = (unsigned long) task_stack_page(tsk);
        high = (unsigned long) task_pt_regs(tsk);
        save_context_stack(trace, sp, low, high, 0);
@@ -94,3 +103,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+       unsigned long sp;
+
+       sp = kernel_stack_pointer(regs);
+       __save_stack_trace(trace, sp);
+       if (trace->nr_entries < trace->max_entries)
+               trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
index 5378c3ea1b984918ed3719c8a9cf8d1c8c86574a..293d8b98fd525c992146fa4f5c85f1287d90bc62 100644 (file)
@@ -383,3 +383,4 @@ SYSCALL(sys_recvfrom,compat_sys_recvfrom)
 SYSCALL(sys_recvmsg,compat_sys_recvmsg)
 SYSCALL(sys_shutdown,sys_shutdown)
 SYSCALL(sys_mlock2,compat_sys_mlock2)
+SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */
index d69d648759c9f926ae91771cef6a8cc291ee403e..017eb03daee2e724a603420f65b6e93dde98ac6a 100644 (file)
@@ -32,8 +32,7 @@ static inline void __user *get_trap_ip(struct pt_regs *regs)
                address = *(unsigned long *)(current->thread.trap_tdb + 24);
        else
                address = regs->psw.addr;
-       return (void __user *)
-               ((address - (regs->int_code >> 16)) & PSW_ADDR_INSN);
+       return (void __user *) (address - (regs->int_code >> 16));
 }
 
 static inline void report_user_fault(struct pt_regs *regs, int signr)
@@ -46,7 +45,7 @@ static inline void report_user_fault(struct pt_regs *regs, int signr)
                return;
        printk("User process fault: interruption code %04x ilc:%d ",
               regs->int_code & 0xffff, regs->int_code >> 17);
-       print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
+       print_vma_addr("in ", regs->psw.addr);
        printk("\n");
        show_regs(regs);
 }
@@ -69,13 +68,13 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
                report_user_fault(regs, si_signo);
         } else {
                 const struct exception_table_entry *fixup;
-                fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+               fixup = search_exception_tables(regs->psw.addr);
                 if (fixup)
-                       regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
+                       regs->psw.addr = extable_fixup(fixup);
                else {
                        enum bug_trap_type btt;
 
-                       btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs);
+                       btt = report_bug(regs->psw.addr, regs);
                        if (btt == BUG_TRAP_TYPE_WARN)
                                return;
                        die(regs, str);
index 5fce52cf0e57dc4831d0737ddd3c3a2cfbc386d7..5ea5af3c7db72479a69710a6606d8dddd6143b7a 100644 (file)
@@ -29,6 +29,7 @@ config KVM
        select HAVE_KVM_IRQFD
        select HAVE_KVM_IRQ_ROUTING
        select SRCU
+       select KVM_VFIO
        ---help---
          Support hosting paravirtualized guest machines using the SIE
          virtualization capability on the mainframe. This should work
index b3b553469650888fd31df5d975fe4b93b2399f70..d42fa38c24292157c0da0f015d2063bc1c5e5723 100644 (file)
@@ -7,7 +7,7 @@
 # as published by the Free Software Foundation.
 
 KVM := ../../../virt/kvm
-common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o  $(KVM)/async_pf.o $(KVM)/irqchip.o
+common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o  $(KVM)/async_pf.o $(KVM)/irqchip.o $(KVM)/vfio.o
 
 ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
 
index d30db40437dc0d03943a1c98f79dbb7dfce8e863..66938d283b77094f694bccad132c749bb9c745af 100644 (file)
@@ -373,7 +373,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu)
 }
 
 static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
-                         int write)
+                         enum gacc_mode mode)
 {
        union alet alet;
        struct ale ale;
@@ -454,7 +454,7 @@ static int ar_translation(struct kvm_vcpu *vcpu, union asce *asce, ar_t ar,
                }
        }
 
-       if (ale.fo == 1 && write)
+       if (ale.fo == 1 && mode == GACC_STORE)
                return PGM_PROTECTION;
 
        asce->val = aste.asce;
@@ -477,25 +477,28 @@ enum {
 };
 
 static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
-                        ar_t ar, int write)
+                        ar_t ar, enum gacc_mode mode)
 {
        int rc;
-       psw_t *psw = &vcpu->arch.sie_block->gpsw;
+       struct psw_bits psw = psw_bits(vcpu->arch.sie_block->gpsw);
        struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
        struct trans_exc_code_bits *tec_bits;
 
        memset(pgm, 0, sizeof(*pgm));
        tec_bits = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
-       tec_bits->fsi = write ? FSI_STORE : FSI_FETCH;
-       tec_bits->as = psw_bits(*psw).as;
+       tec_bits->fsi = mode == GACC_STORE ? FSI_STORE : FSI_FETCH;
+       tec_bits->as = psw.as;
 
-       if (!psw_bits(*psw).t) {
+       if (!psw.t) {
                asce->val = 0;
                asce->r = 1;
                return 0;
        }
 
-       switch (psw_bits(vcpu->arch.sie_block->gpsw).as) {
+       if (mode == GACC_IFETCH)
+               psw.as = psw.as == PSW_AS_HOME ? PSW_AS_HOME : PSW_AS_PRIMARY;
+
+       switch (psw.as) {
        case PSW_AS_PRIMARY:
                asce->val = vcpu->arch.sie_block->gcr[1];
                return 0;
@@ -506,7 +509,7 @@ static int get_vcpu_asce(struct kvm_vcpu *vcpu, union asce *asce,
                asce->val = vcpu->arch.sie_block->gcr[13];
                return 0;
        case PSW_AS_ACCREG:
-               rc = ar_translation(vcpu, asce, ar, write);
+               rc = ar_translation(vcpu, asce, ar, mode);
                switch (rc) {
                case PGM_ALEN_TRANSLATION:
                case PGM_ALE_SEQUENCE:
@@ -538,7 +541,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
  * @gva: guest virtual address
  * @gpa: points to where guest physical (absolute) address should be stored
  * @asce: effective asce
- * @write: indicates if access is a write access
+ * @mode: indicates the access mode to be used
  *
  * Translate a guest virtual address into a guest absolute address by means
  * of dynamic address translation as specified by the architecture.
@@ -554,7 +557,7 @@ static int deref_table(struct kvm *kvm, unsigned long gpa, unsigned long *val)
  */
 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
                                     unsigned long *gpa, const union asce asce,
-                                    int write)
+                                    enum gacc_mode mode)
 {
        union vaddress vaddr = {.addr = gva};
        union raddress raddr = {.addr = gva};
@@ -699,7 +702,7 @@ static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva,
 real_address:
        raddr.addr = kvm_s390_real_to_abs(vcpu, raddr.addr);
 absolute_address:
-       if (write && dat_protection)
+       if (mode == GACC_STORE && dat_protection)
                return PGM_PROTECTION;
        if (kvm_is_error_gpa(vcpu->kvm, raddr.addr))
                return PGM_ADDRESSING;
@@ -728,7 +731,7 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
 
 static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
                            unsigned long *pages, unsigned long nr_pages,
-                           const union asce asce, int write)
+                           const union asce asce, enum gacc_mode mode)
 {
        struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
        psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -740,13 +743,13 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
        while (nr_pages) {
                ga = kvm_s390_logical_to_effective(vcpu, ga);
                tec_bits->addr = ga >> PAGE_SHIFT;
-               if (write && lap_enabled && is_low_address(ga)) {
+               if (mode == GACC_STORE && lap_enabled && is_low_address(ga)) {
                        pgm->code = PGM_PROTECTION;
                        return pgm->code;
                }
                ga &= PAGE_MASK;
                if (psw_bits(*psw).t) {
-                       rc = guest_translate(vcpu, ga, pages, asce, write);
+                       rc = guest_translate(vcpu, ga, pages, asce, mode);
                        if (rc < 0)
                                return rc;
                        if (rc == PGM_PROTECTION)
@@ -768,7 +771,7 @@ static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga,
 }
 
 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
-                unsigned long len, int write)
+                unsigned long len, enum gacc_mode mode)
 {
        psw_t *psw = &vcpu->arch.sie_block->gpsw;
        unsigned long _len, nr_pages, gpa, idx;
@@ -780,7 +783,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
 
        if (!len)
                return 0;
-       rc = get_vcpu_asce(vcpu, &asce, ar, write);
+       rc = get_vcpu_asce(vcpu, &asce, ar, mode);
        if (rc)
                return rc;
        nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
@@ -792,11 +795,11 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
        need_ipte_lock = psw_bits(*psw).t && !asce.r;
        if (need_ipte_lock)
                ipte_lock(vcpu);
-       rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, write);
+       rc = guest_page_range(vcpu, ga, pages, nr_pages, asce, mode);
        for (idx = 0; idx < nr_pages && !rc; idx++) {
                gpa = *(pages + idx) + (ga & ~PAGE_MASK);
                _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
-               if (write)
+               if (mode == GACC_STORE)
                        rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
                else
                        rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
@@ -812,7 +815,7 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
 }
 
 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
-                     void *data, unsigned long len, int write)
+                     void *data, unsigned long len, enum gacc_mode mode)
 {
        unsigned long _len, gpa;
        int rc = 0;
@@ -820,7 +823,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
        while (len && !rc) {
                gpa = kvm_s390_real_to_abs(vcpu, gra);
                _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
-               if (write)
+               if (mode)
                        rc = write_guest_abs(vcpu, gpa, data, _len);
                else
                        rc = read_guest_abs(vcpu, gpa, data, _len);
@@ -841,7 +844,7 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
  * has to take care of this.
  */
 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
-                           unsigned long *gpa, int write)
+                           unsigned long *gpa, enum gacc_mode mode)
 {
        struct kvm_s390_pgm_info *pgm = &vcpu->arch.pgm;
        psw_t *psw = &vcpu->arch.sie_block->gpsw;
@@ -851,19 +854,19 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
 
        gva = kvm_s390_logical_to_effective(vcpu, gva);
        tec = (struct trans_exc_code_bits *)&pgm->trans_exc_code;
-       rc = get_vcpu_asce(vcpu, &asce, ar, write);
+       rc = get_vcpu_asce(vcpu, &asce, ar, mode);
        tec->addr = gva >> PAGE_SHIFT;
        if (rc)
                return rc;
        if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
-               if (write) {
+               if (mode == GACC_STORE) {
                        rc = pgm->code = PGM_PROTECTION;
                        return rc;
                }
        }
 
        if (psw_bits(*psw).t && !asce.r) {      /* Use DAT? */
-               rc = guest_translate(vcpu, gva, gpa, asce, write);
+               rc = guest_translate(vcpu, gva, gpa, asce, mode);
                if (rc > 0) {
                        if (rc == PGM_PROTECTION)
                                tec->b61 = 1;
@@ -883,7 +886,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
  * check_gva_range - test a range of guest virtual addresses for accessibility
  */
 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
-                   unsigned long length, int is_write)
+                   unsigned long length, enum gacc_mode mode)
 {
        unsigned long gpa;
        unsigned long currlen;
@@ -892,7 +895,7 @@ int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
        ipte_lock(vcpu);
        while (length > 0 && !rc) {
                currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
-               rc = guest_translate_address(vcpu, gva, ar, &gpa, is_write);
+               rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
                gva += currlen;
                length -= currlen;
        }
index ef03726cc6611acd1e52fb6970e2e802a1730cbd..df0a79dd81595f43f815a8b671cb8dc6502e81e2 100644 (file)
@@ -155,16 +155,22 @@ int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
        return kvm_read_guest(vcpu->kvm, gpa, data, len);
 }
 
+enum gacc_mode {
+       GACC_FETCH,
+       GACC_STORE,
+       GACC_IFETCH,
+};
+
 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva,
-                           ar_t ar, unsigned long *gpa, int write);
+                           ar_t ar, unsigned long *gpa, enum gacc_mode mode);
 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, ar_t ar,
-                   unsigned long length, int is_write);
+                   unsigned long length, enum gacc_mode mode);
 
 int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
-                unsigned long len, int write);
+                unsigned long len, enum gacc_mode mode);
 
 int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
-                     void *data, unsigned long len, int write);
+                     void *data, unsigned long len, enum gacc_mode mode);
 
 /**
  * write_guest - copy data from kernel space to guest space
@@ -215,7 +221,7 @@ static inline __must_check
 int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
                unsigned long len)
 {
-       return access_guest(vcpu, ga, ar, data, len, 1);
+       return access_guest(vcpu, ga, ar, data, len, GACC_STORE);
 }
 
 /**
@@ -235,7 +241,27 @@ static inline __must_check
 int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, ar_t ar, void *data,
               unsigned long len)
 {
-       return access_guest(vcpu, ga, ar, data, len, 0);
+       return access_guest(vcpu, ga, ar, data, len, GACC_FETCH);
+}
+
+/**
+ * read_guest_instr - copy instruction data from guest space to kernel space
+ * @vcpu: virtual cpu
+ * @data: destination address in kernel space
+ * @len: number of bytes to copy
+ *
+ * Copy @len bytes from the current psw address (guest space) to @data (kernel
+ * space).
+ *
+ * The behaviour of read_guest_instr is identical to read_guest, except that
+ * instruction data will be read from primary space when in home-space or
+ * address-space mode.
+ */
+static inline __must_check
+int read_guest_instr(struct kvm_vcpu *vcpu, void *data, unsigned long len)
+{
+       return access_guest(vcpu, vcpu->arch.sie_block->gpsw.addr, 0, data, len,
+                           GACC_IFETCH);
 }
 
 /**
index 47518a324d752286d4b0cb80784c43c316f31c45..d697312ce9ee325571f25c74fb0d963f50943537 100644 (file)
@@ -116,7 +116,7 @@ static void enable_all_hw_wp(struct kvm_vcpu *vcpu)
        if (*cr9 & PER_EVENT_STORE && *cr9 & PER_CONTROL_ALTERATION) {
                *cr9 &= ~PER_CONTROL_ALTERATION;
                *cr10 = 0;
-               *cr11 = PSW_ADDR_INSN;
+               *cr11 = -1UL;
        } else {
                *cr9 &= ~PER_CONTROL_ALTERATION;
                *cr9 |= PER_EVENT_STORE;
@@ -159,7 +159,7 @@ void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu)
                vcpu->arch.sie_block->gcr[0] &= ~0x800ul;
                vcpu->arch.sie_block->gcr[9] |= PER_EVENT_IFETCH;
                vcpu->arch.sie_block->gcr[10] = 0;
-               vcpu->arch.sie_block->gcr[11] = PSW_ADDR_INSN;
+               vcpu->arch.sie_block->gcr[11] = -1UL;
        }
 
        if (guestdbg_hw_bp_enabled(vcpu)) {
index d53c10753c466b47b6fcb4813ec9e3445ca47a04..2e6b54e4d3f955d1c7971fce4ad02551eeac3846 100644 (file)
@@ -38,17 +38,32 @@ static const intercept_handler_t instruction_handlers[256] = {
        [0xeb] = kvm_s390_handle_eb,
 };
 
-void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc)
+u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu)
 {
        struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
+       u8 ilen = 0;
 
-       /* Use the length of the EXECUTE instruction if necessary */
-       if (sie_block->icptstatus & 1) {
-               ilc = (sie_block->icptstatus >> 4) & 0x6;
-               if (!ilc)
-                       ilc = 4;
+       switch (vcpu->arch.sie_block->icptcode) {
+       case ICPT_INST:
+       case ICPT_INSTPROGI:
+       case ICPT_OPEREXC:
+       case ICPT_PARTEXEC:
+       case ICPT_IOINST:
+               /* instruction only stored for these icptcodes */
+               ilen = insn_length(vcpu->arch.sie_block->ipa >> 8);
+               /* Use the length of the EXECUTE instruction if necessary */
+               if (sie_block->icptstatus & 1) {
+                       ilen = (sie_block->icptstatus >> 4) & 0x6;
+                       if (!ilen)
+                               ilen = 4;
+               }
+               break;
+       case ICPT_PROGI:
+               /* bit 1+2 of pgmilc are the ilc, so we directly get ilen */
+               ilen = vcpu->arch.sie_block->pgmilc & 0x6;
+               break;
        }
-       sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilc);
+       return ilen;
 }
 
 static int handle_noop(struct kvm_vcpu *vcpu)
@@ -121,11 +136,13 @@ static int handle_instruction(struct kvm_vcpu *vcpu)
        return -EOPNOTSUPP;
 }
 
-static void __extract_prog_irq(struct kvm_vcpu *vcpu,
-                              struct kvm_s390_pgm_info *pgm_info)
+static int inject_prog_on_prog_intercept(struct kvm_vcpu *vcpu)
 {
-       memset(pgm_info, 0, sizeof(struct kvm_s390_pgm_info));
-       pgm_info->code = vcpu->arch.sie_block->iprcc;
+       struct kvm_s390_pgm_info pgm_info = {
+               .code = vcpu->arch.sie_block->iprcc,
+               /* the PSW has already been rewound */
+               .flags = KVM_S390_PGM_FLAGS_NO_REWIND,
+       };
 
        switch (vcpu->arch.sie_block->iprcc & ~PGM_PER) {
        case PGM_AFX_TRANSLATION:
@@ -138,7 +155,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
        case PGM_PRIMARY_AUTHORITY:
        case PGM_SECONDARY_AUTHORITY:
        case PGM_SPACE_SWITCH:
-               pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
+               pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
                break;
        case PGM_ALEN_TRANSLATION:
        case PGM_ALE_SEQUENCE:
@@ -146,7 +163,7 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
        case PGM_ASTE_SEQUENCE:
        case PGM_ASTE_VALIDITY:
        case PGM_EXTENDED_AUTHORITY:
-               pgm_info->exc_access_id = vcpu->arch.sie_block->eai;
+               pgm_info.exc_access_id = vcpu->arch.sie_block->eai;
                break;
        case PGM_ASCE_TYPE:
        case PGM_PAGE_TRANSLATION:
@@ -154,32 +171,33 @@ static void __extract_prog_irq(struct kvm_vcpu *vcpu,
        case PGM_REGION_SECOND_TRANS:
        case PGM_REGION_THIRD_TRANS:
        case PGM_SEGMENT_TRANSLATION:
-               pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
-               pgm_info->exc_access_id  = vcpu->arch.sie_block->eai;
-               pgm_info->op_access_id  = vcpu->arch.sie_block->oai;
+               pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
+               pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
+               pgm_info.op_access_id  = vcpu->arch.sie_block->oai;
                break;
        case PGM_MONITOR:
-               pgm_info->mon_class_nr = vcpu->arch.sie_block->mcn;
-               pgm_info->mon_code = vcpu->arch.sie_block->tecmc;
+               pgm_info.mon_class_nr = vcpu->arch.sie_block->mcn;
+               pgm_info.mon_code = vcpu->arch.sie_block->tecmc;
                break;
        case PGM_VECTOR_PROCESSING:
        case PGM_DATA:
-               pgm_info->data_exc_code = vcpu->arch.sie_block->dxc;
+               pgm_info.data_exc_code = vcpu->arch.sie_block->dxc;
                break;
        case PGM_PROTECTION:
-               pgm_info->trans_exc_code = vcpu->arch.sie_block->tecmc;
-               pgm_info->exc_access_id  = vcpu->arch.sie_block->eai;
+               pgm_info.trans_exc_code = vcpu->arch.sie_block->tecmc;
+               pgm_info.exc_access_id  = vcpu->arch.sie_block->eai;
                break;
        default:
                break;
        }
 
        if (vcpu->arch.sie_block->iprcc & PGM_PER) {
-               pgm_info->per_code = vcpu->arch.sie_block->perc;
-               pgm_info->per_atmid = vcpu->arch.sie_block->peratmid;
-               pgm_info->per_address = vcpu->arch.sie_block->peraddr;
-               pgm_info->per_access_id = vcpu->arch.sie_block->peraid;
+               pgm_info.per_code = vcpu->arch.sie_block->perc;
+               pgm_info.per_atmid = vcpu->arch.sie_block->peratmid;
+               pgm_info.per_address = vcpu->arch.sie_block->peraddr;
+               pgm_info.per_access_id = vcpu->arch.sie_block->peraid;
        }
+       return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
 }
 
 /*
@@ -208,7 +226,6 @@ static int handle_itdb(struct kvm_vcpu *vcpu)
 
 static int handle_prog(struct kvm_vcpu *vcpu)
 {
-       struct kvm_s390_pgm_info pgm_info;
        psw_t psw;
        int rc;
 
@@ -234,8 +251,7 @@ static int handle_prog(struct kvm_vcpu *vcpu)
        if (rc)
                return rc;
 
-       __extract_prog_irq(vcpu, &pgm_info);
-       return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
+       return inject_prog_on_prog_intercept(vcpu);
 }
 
 /**
@@ -302,7 +318,7 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
 
        /* Make sure that the source is paged-in */
        rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg2],
-                                    reg2, &srcaddr, 0);
+                                    reg2, &srcaddr, GACC_FETCH);
        if (rc)
                return kvm_s390_inject_prog_cond(vcpu, rc);
        rc = kvm_arch_fault_in_page(vcpu, srcaddr, 0);
@@ -311,14 +327,14 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
 
        /* Make sure that the destination is paged-in */
        rc = guest_translate_address(vcpu, vcpu->run->s.regs.gprs[reg1],
-                                    reg1, &dstaddr, 1);
+                                    reg1, &dstaddr, GACC_STORE);
        if (rc)
                return kvm_s390_inject_prog_cond(vcpu, rc);
        rc = kvm_arch_fault_in_page(vcpu, dstaddr, 1);
        if (rc != 0)
                return rc;
 
-       kvm_s390_rewind_psw(vcpu, 4);
+       kvm_s390_retry_instr(vcpu);
 
        return 0;
 }
index f88ca72c3a77a52e05e65cfa79206cd8e41aa786..87e2d1a89d74eaba5e398392ee2bcd86cdbb0acb 100644 (file)
@@ -335,23 +335,6 @@ static void set_intercept_indicators(struct kvm_vcpu *vcpu)
        set_intercept_indicators_stop(vcpu);
 }
 
-static u16 get_ilc(struct kvm_vcpu *vcpu)
-{
-       switch (vcpu->arch.sie_block->icptcode) {
-       case ICPT_INST:
-       case ICPT_INSTPROGI:
-       case ICPT_OPEREXC:
-       case ICPT_PARTEXEC:
-       case ICPT_IOINST:
-               /* last instruction only stored for these icptcodes */
-               return insn_length(vcpu->arch.sie_block->ipa >> 8);
-       case ICPT_PROGI:
-               return vcpu->arch.sie_block->pgmilc;
-       default:
-               return 0;
-       }
-}
-
 static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
 {
        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -588,7 +571,7 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
        struct kvm_s390_pgm_info pgm_info;
        int rc = 0, nullifying = false;
-       u16 ilc = get_ilc(vcpu);
+       u16 ilen;
 
        spin_lock(&li->lock);
        pgm_info = li->irq.pgm;
@@ -596,8 +579,9 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
        memset(&li->irq.pgm, 0, sizeof(pgm_info));
        spin_unlock(&li->lock);
 
-       VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilc:%d",
-                  pgm_info.code, ilc);
+       ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
+       VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
+                  pgm_info.code, ilen);
        vcpu->stat.deliver_program_int++;
        trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
                                         pgm_info.code, 0);
@@ -681,10 +665,11 @@ static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
                                   (u8 *) __LC_PER_ACCESS_ID);
        }
 
-       if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
-               kvm_s390_rewind_psw(vcpu, ilc);
+       if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
+               kvm_s390_rewind_psw(vcpu, ilen);
 
-       rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
+       /* bit 1+2 of the target are the ilc, so we can directly use ilen */
+       rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
        rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
                                 (u64 *) __LC_LAST_BREAK);
        rc |= put_guest_lc(vcpu, pgm_info.code,
@@ -1059,8 +1044,16 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
        trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
                                   irq->u.pgm.code, 0);
 
+       if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
+               /* auto detection if no valid ILC was given */
+               irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
+               irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
+               irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
+       }
+
        if (irq->u.pgm.code == PGM_PER) {
                li->irq.pgm.code |= PGM_PER;
+               li->irq.pgm.flags = irq->u.pgm.flags;
                /* only modify PER related information */
                li->irq.pgm.per_address = irq->u.pgm.per_address;
                li->irq.pgm.per_code = irq->u.pgm.per_code;
@@ -1069,6 +1062,7 @@ static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
        } else if (!(irq->u.pgm.code & PGM_PER)) {
                li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
                                   irq->u.pgm.code;
+               li->irq.pgm.flags = irq->u.pgm.flags;
                /* only modify non-PER information */
                li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
                li->irq.pgm.mon_code = irq->u.pgm.mon_code;
index 835d60bedb545fa1873b6b37e57dffbec3bb9600..28bd5ea1b08f2082a77e78047cd5eb6f650fff29 100644 (file)
@@ -274,7 +274,6 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm,
        unsigned long address;
        struct gmap *gmap = kvm->arch.gmap;
 
-       down_read(&gmap->mm->mmap_sem);
        /* Loop over all guest pages */
        last_gfn = memslot->base_gfn + memslot->npages;
        for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
@@ -282,8 +281,10 @@ static void kvm_s390_sync_dirty_log(struct kvm *kvm,
 
                if (gmap_test_and_clear_dirty(address, gmap))
                        mark_page_dirty(kvm, cur_gfn);
+               if (fatal_signal_pending(current))
+                       return;
+               cond_resched();
        }
-       up_read(&gmap->mm->mmap_sem);
 }
 
 /* Section: vm related */
@@ -1414,8 +1415,13 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                                    KVM_SYNC_PFAULT;
        if (test_kvm_facility(vcpu->kvm, 64))
                vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
-       if (test_kvm_facility(vcpu->kvm, 129))
+       /* fprs can be synchronized via vrs, even if the guest has no vx. With
+        * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
+        */
+       if (MACHINE_HAS_VX)
                vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
+       else
+               vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
 
        if (kvm_is_ucontrol(vcpu->kvm))
                return __kvm_ucontrol_vcpu_init(vcpu);
@@ -1423,44 +1429,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        return 0;
 }
 
-/*
- * Backs up the current FP/VX register save area on a particular
- * destination.  Used to switch between different register save
- * areas.
- */
-static inline void save_fpu_to(struct fpu *dst)
-{
-       dst->fpc = current->thread.fpu.fpc;
-       dst->regs = current->thread.fpu.regs;
-}
-
-/*
- * Switches the FP/VX register save area from which to lazy
- * restore register contents.
- */
-static inline void load_fpu_from(struct fpu *from)
-{
-       current->thread.fpu.fpc = from->fpc;
-       current->thread.fpu.regs = from->regs;
-}
-
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        /* Save host register state */
        save_fpu_regs();
-       save_fpu_to(&vcpu->arch.host_fpregs);
-
-       if (test_kvm_facility(vcpu->kvm, 129)) {
-               current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
-               /*
-                * Use the register save area in the SIE-control block
-                * for register restore and save in kvm_arch_vcpu_put()
-                */
-               current->thread.fpu.vxrs =
-                       (__vector128 *)&vcpu->run->s.regs.vrs;
-       } else
-               load_fpu_from(&vcpu->arch.guest_fpregs);
+       vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
+       vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
 
+       if (MACHINE_HAS_VX)
+               current->thread.fpu.regs = vcpu->run->s.regs.vrs;
+       else
+               current->thread.fpu.regs = vcpu->run->s.regs.fprs;
+       current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
        if (test_fp_ctl(current->thread.fpu.fpc))
                /* User space provided an invalid FPC, let's clear it */
                current->thread.fpu.fpc = 0;
@@ -1476,19 +1456,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
        gmap_disable(vcpu->arch.gmap);
 
+       /* Save guest register state */
        save_fpu_regs();
+       vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
 
-       if (test_kvm_facility(vcpu->kvm, 129))
-               /*
-                * kvm_arch_vcpu_load() set up the register save area to
-                * the &vcpu->run->s.regs.vrs and, thus, the vector registers
-                * are already saved.  Only the floating-point control must be
-                * copied.
-                */
-               vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
-       else
-               save_fpu_to(&vcpu->arch.guest_fpregs);
-       load_fpu_from(&vcpu->arch.host_fpregs);
+       /* Restore host register state */
+       current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
+       current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
 
        save_access_regs(vcpu->run->s.regs.acrs);
        restore_access_regs(vcpu->arch.host_acrs);
@@ -1506,8 +1480,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
        vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
        vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
-       vcpu->arch.guest_fpregs.fpc = 0;
-       asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
+       /* make sure the new fpc will be lazily loaded */
+       save_fpu_regs();
+       current->thread.fpu.fpc = 0;
        vcpu->arch.sie_block->gbea = 1;
        vcpu->arch.sie_block->pp = 0;
        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
@@ -1648,17 +1623,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
        vcpu->arch.local_int.wq = &vcpu->wq;
        vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
 
-       /*
-        * Allocate a save area for floating-point registers.  If the vector
-        * extension is available, register contents are saved in the SIE
-        * control block.  The allocated save area is still required in
-        * particular places, for example, in kvm_s390_vcpu_store_status().
-        */
-       vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
-                                              GFP_KERNEL);
-       if (!vcpu->arch.guest_fpregs.fprs)
-               goto out_free_sie_block;
-
        rc = kvm_vcpu_init(vcpu, kvm, id);
        if (rc)
                goto out_free_sie_block;
@@ -1879,19 +1843,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
+       /* make sure the new values will be lazily loaded */
+       save_fpu_regs();
        if (test_fp_ctl(fpu->fpc))
                return -EINVAL;
-       memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
-       vcpu->arch.guest_fpregs.fpc = fpu->fpc;
-       save_fpu_regs();
-       load_fpu_from(&vcpu->arch.guest_fpregs);
+       current->thread.fpu.fpc = fpu->fpc;
+       if (MACHINE_HAS_VX)
+               convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
+       else
+               memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
-       fpu->fpc = vcpu->arch.guest_fpregs.fpc;
+       /* make sure we have the latest values */
+       save_fpu_regs();
+       if (MACHINE_HAS_VX)
+               convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
+       else
+               memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
+       fpu->fpc = current->thread.fpu.fpc;
        return 0;
 }
 
@@ -2192,8 +2164,10 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
 
 static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
 {
-       psw_t *psw = &vcpu->arch.sie_block->gpsw;
-       u8 opcode;
+       struct kvm_s390_pgm_info pgm_info = {
+               .code = PGM_ADDRESSING,
+       };
+       u8 opcode, ilen;
        int rc;
 
        VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
@@ -2207,12 +2181,21 @@ static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
         * to look up the current opcode to get the length of the instruction
         * to be able to forward the PSW.
         */
-       rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
-       if (rc)
-               return kvm_s390_inject_prog_cond(vcpu, rc);
-       psw->addr = __rewind_psw(*psw, -insn_length(opcode));
-
-       return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       rc = read_guest_instr(vcpu, &opcode, 1);
+       ilen = insn_length(opcode);
+       if (rc < 0) {
+               return rc;
+       } else if (rc) {
+               /* Instruction-Fetching Exceptions - we can't detect the ilen.
+                * Forward by arbitrary ilc, injection will take care of
+                * nullification if necessary.
+                */
+               pgm_info = vcpu->arch.pgm;
+               ilen = 4;
+       }
+       pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
+       kvm_s390_forward_psw(vcpu, ilen);
+       return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
 }
 
 static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
@@ -2396,6 +2379,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
 {
        unsigned char archmode = 1;
+       freg_t fprs[NUM_FPRS];
        unsigned int px;
        u64 clkcomp;
        int rc;
@@ -2411,8 +2395,16 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
                gpa = px;
        } else
                gpa -= __LC_FPREGS_SAVE_AREA;
-       rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
-                            vcpu->arch.guest_fpregs.fprs, 128);
+
+       /* manually convert vector registers if necessary */
+       if (MACHINE_HAS_VX) {
+               convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
+               rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+                                    fprs, 128);
+       } else {
+               rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
+                                    vcpu->run->s.regs.fprs, 128);
+       }
        rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
                              vcpu->run->s.regs.gprs, 128);
        rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
@@ -2420,7 +2412,7 @@ int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
        rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
                              &px, 4);
        rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
-                             &vcpu->arch.guest_fpregs.fpc, 4);
+                             &vcpu->run->s.regs.fpc, 4);
        rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
                              &vcpu->arch.sie_block->todpr, 4);
        rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
@@ -2443,19 +2435,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
         * it into the save area
         */
        save_fpu_regs();
-       if (test_kvm_facility(vcpu->kvm, 129)) {
-               /*
-                * If the vector extension is available, the vector registers
-                * which overlaps with floating-point registers are saved in
-                * the SIE-control block.  Hence, extract the floating-point
-                * registers and the FPC value and store them in the
-                * guest_fpregs structure.
-                */
-               vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
-               convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
-                                current->thread.fpu.vxrs);
-       } else
-               save_fpu_to(&vcpu->arch.guest_fpregs);
+       vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
        save_access_regs(vcpu->run->s.regs.acrs);
 
        return kvm_s390_store_status_unloaded(vcpu, addr);
@@ -2642,7 +2622,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
        switch (mop->op) {
        case KVM_S390_MEMOP_LOGICAL_READ:
                if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
-                       r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
+                       r = check_gva_range(vcpu, mop->gaddr, mop->ar,
+                                           mop->size, GACC_FETCH);
                        break;
                }
                r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
@@ -2653,7 +2634,8 @@ static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
                break;
        case KVM_S390_MEMOP_LOGICAL_WRITE:
                if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
-                       r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
+                       r = check_gva_range(vcpu, mop->gaddr, mop->ar,
+                                           mop->size, GACC_STORE);
                        break;
                }
                if (copy_from_user(tmpbuf, uaddr, mop->size)) {
index df1abada1f36dfc3579ab6ff2ef45c7db828490b..1c756c7dd0c2cdda4409ca496a09d5ece1beae94 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <asm/facility.h>
+#include <asm/processor.h>
 
 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
 
@@ -212,8 +213,22 @@ int kvm_s390_reinject_io_int(struct kvm *kvm,
 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
 
 /* implemented in intercept.c */
-void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilc);
+u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
+static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
+{
+       struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
+
+       sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
+}
+static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
+{
+       kvm_s390_rewind_psw(vcpu, -ilen);
+}
+static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
+{
+       kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
+}
 
 /* implemented in priv.c */
 int is_valid_psw(psw_t *psw);
index ed74e86d9b9eb60c88b2f91e671c9d9b9c56f316..add99094598645cf9a187c6cd29dd6f92764fe1f 100644 (file)
@@ -173,7 +173,7 @@ static int handle_skey(struct kvm_vcpu *vcpu)
        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 
-       kvm_s390_rewind_psw(vcpu, 4);
+       kvm_s390_retry_instr(vcpu);
        VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
        return 0;
 }
@@ -184,7 +184,7 @@ static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
        if (psw_bits(vcpu->arch.sie_block->gpsw).p)
                return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
        wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
-       kvm_s390_rewind_psw(vcpu, 4);
+       kvm_s390_retry_instr(vcpu);
        VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
        return 0;
 }
@@ -759,8 +759,8 @@ static int handle_essa(struct kvm_vcpu *vcpu)
        if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
                return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 
-       /* Rewind PSW to repeat the ESSA instruction */
-       kvm_s390_rewind_psw(vcpu, 4);
+       /* Retry the ESSA instruction */
+       kvm_s390_retry_instr(vcpu);
        vcpu->arch.sie_block->cbrlo &= PAGE_MASK;       /* reset nceo */
        cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
        down_read(&gmap->mm->mmap_sem);
@@ -981,11 +981,12 @@ static int handle_tprot(struct kvm_vcpu *vcpu)
                return -EOPNOTSUPP;
        if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
                ipte_lock(vcpu);
-       ret = guest_translate_address(vcpu, address1, ar, &gpa, 1);
+       ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
        if (ret == PGM_PROTECTION) {
                /* Write protected? Try again with read-only... */
                cc = 1;
-               ret = guest_translate_address(vcpu, address1, ar, &gpa, 0);
+               ret = guest_translate_address(vcpu, address1, ar, &gpa,
+                                             GACC_FETCH);
        }
        if (ret) {
                if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
index 1b903f6ad54a327ca73ee88bdecee0a73e1eaf42..791a4146052c7a48fdb72a6ef3b9e0d1abc06eeb 100644 (file)
@@ -228,7 +228,7 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
                return;
        printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ",
               regs->int_code & 0xffff, regs->int_code >> 17);
-       print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
+       print_vma_addr(KERN_CONT "in ", regs->psw.addr);
        printk(KERN_CONT "\n");
        printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
               regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long);
@@ -256,9 +256,9 @@ static noinline void do_no_context(struct pt_regs *regs)
        const struct exception_table_entry *fixup;
 
        /* Are we prepared to handle this kernel fault?  */
-       fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+       fixup = search_exception_tables(regs->psw.addr);
        if (fixup) {
-               regs->psw.addr = extable_fixup(fixup) | PSW_ADDR_AMODE;
+               regs->psw.addr = extable_fixup(fixup);
                return;
        }
 
index c722400c769784d8df6f13507693e117ad942497..73e29033709285b44ceb5609919c07aff86e9de9 100644 (file)
@@ -98,7 +98,7 @@ void __init paging_init(void)
        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
-       arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
+       __arch_local_irq_stosm(0x04);
 
        sparse_memory_present_with_active_regions(MAX_NUMNODES);
        sparse_init();
index ea01477b4aa67155f87b88e8c6a87f5374eda247..45c4daa49930f9a2bdfaf1d07a6ab5e002aa99c2 100644 (file)
@@ -169,12 +169,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 
 int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
 {
-       if (is_compat_task() || (TASK_SIZE >= (1UL << 53)))
+       if (is_compat_task() || TASK_SIZE >= TASK_MAX_SIZE)
                return 0;
        if (!(flags & MAP_FIXED))
                addr = 0;
        if ((addr + len) >= TASK_SIZE)
-               return crst_table_upgrade(current->mm, 1UL << 53);
+               return crst_table_upgrade(current->mm, TASK_MAX_SIZE);
        return 0;
 }
 
@@ -189,9 +189,9 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
        area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
        if (!(area & ~PAGE_MASK))
                return area;
-       if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+       if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
                /* Upgrade the page table to 4 levels and retry. */
-               rc = crst_table_upgrade(mm, 1UL << 53);
+               rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
                if (rc)
                        return (unsigned long) rc;
                area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -211,9 +211,9 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
        area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
        if (!(area & ~PAGE_MASK))
                return area;
-       if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
+       if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
                /* Upgrade the page table to 4 levels and retry. */
-               rc = crst_table_upgrade(mm, 1UL << 53);
+               rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
                if (rc)
                        return (unsigned long) rc;
                area = arch_get_unmapped_area_topdown(filp, addr, len,
index a809fa8e6f8bd01d2c0ad90891060d6aabd36a80..5109827883acaefc6afc0ac913956fcb79f8f8a0 100644 (file)
@@ -55,7 +55,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
        unsigned long entry;
        int flush;
 
-       BUG_ON(limit > (1UL << 53));
+       BUG_ON(limit > TASK_MAX_SIZE);
        flush = 0;
 repeat:
        table = crst_table_alloc(mm);
index 43f32ce60aa3d98af0b7665090fa3eb080d12fa7..2794845061c66fe3605c0b2e97b591b6cf176d32 100644 (file)
@@ -57,9 +57,7 @@ static __init pg_data_t *alloc_node_data(void)
 {
        pg_data_t *res;
 
-       res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 1);
-       if (!res)
-               panic("Could not allocate memory for node data!\n");
+       res = (pg_data_t *) memblock_alloc(sizeof(pg_data_t), 8);
        memset(res, 0, sizeof(pg_data_t));
        return res;
 }
@@ -162,7 +160,7 @@ static int __init numa_init_late(void)
                register_one_node(nid);
        return 0;
 }
-device_initcall(numa_init_late);
+arch_initcall(numa_init_late);
 
 static int __init parse_debug(char *parm)
 {
index 8a6811b2cdb9523a24cd32341dee0fcef317f661..1884e17595294bbfafdcaf8e183fd80e40d8ea48 100644 (file)
@@ -16,24 +16,23 @@ __show_trace(unsigned int *depth, unsigned long sp,
        struct pt_regs *regs;
 
        while (*depth) {
-               sp = sp & PSW_ADDR_INSN;
                if (sp < low || sp > high - sizeof(*sf))
                        return sp;
                sf = (struct stack_frame *) sp;
                (*depth)--;
-               oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+               oprofile_add_trace(sf->gprs[8]);
 
                /* Follow the backchain.  */
                while (*depth) {
                        low = sp;
-                       sp = sf->back_chain & PSW_ADDR_INSN;
+                       sp = sf->back_chain;
                        if (!sp)
                                break;
                        if (sp <= low || sp > high - sizeof(*sf))
                                return sp;
                        sf = (struct stack_frame *) sp;
                        (*depth)--;
-                       oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+                       oprofile_add_trace(sf->gprs[8]);
 
                }
 
@@ -46,7 +45,7 @@ __show_trace(unsigned int *depth, unsigned long sp,
                        return sp;
                regs = (struct pt_regs *) sp;
                (*depth)--;
-               oprofile_add_trace(sf->gprs[8] & PSW_ADDR_INSN);
+               oprofile_add_trace(sf->gprs[8]);
                low = sp;
                sp = regs->gprs[15];
        }
@@ -55,12 +54,13 @@ __show_trace(unsigned int *depth, unsigned long sp,
 
 void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
 {
-       unsigned long head;
+       unsigned long head, frame_size;
        struct stack_frame* head_sf;
 
        if (user_mode(regs))
                return;
 
+       frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
        head = regs->gprs[15];
        head_sf = (struct stack_frame*)head;
 
@@ -69,8 +69,9 @@ void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
 
        head = head_sf->back_chain;
 
-       head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE,
-                           S390_lowcore.async_stack);
+       head = __show_trace(&depth, head,
+                           S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+                           S390_lowcore.async_stack + frame_size);
 
        __show_trace(&depth, head, S390_lowcore.thread_info,
                     S390_lowcore.thread_info + THREAD_SIZE);
index 11d4f277e9f69ac3d266e14ca9804e4b6c844fac..8f19c8f9d660570839a69f60477f855181ba4e7c 100644 (file)
@@ -68,9 +68,12 @@ static struct airq_struct zpci_airq = {
        .isc = PCI_ISC,
 };
 
-/* I/O Map */
+#define ZPCI_IOMAP_ENTRIES                                             \
+       min(((unsigned long) CONFIG_PCI_NR_FUNCTIONS * PCI_BAR_COUNT),  \
+           ZPCI_IOMAP_MAX_ENTRIES)
+
 static DEFINE_SPINLOCK(zpci_iomap_lock);
-static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
+static unsigned long *zpci_iomap_bitmap;
 struct zpci_iomap_entry *zpci_iomap_start;
 EXPORT_SYMBOL_GPL(zpci_iomap_start);
 
@@ -265,27 +268,20 @@ void __iomem *pci_iomap_range(struct pci_dev *pdev,
                              unsigned long max)
 {
        struct zpci_dev *zdev = to_zpci(pdev);
-       u64 addr;
        int idx;
 
-       if ((bar & 7) != bar)
+       if (!pci_resource_len(pdev, bar))
                return NULL;
 
        idx = zdev->bars[bar].map_idx;
        spin_lock(&zpci_iomap_lock);
-       if (zpci_iomap_start[idx].count++) {
-               BUG_ON(zpci_iomap_start[idx].fh != zdev->fh ||
-                      zpci_iomap_start[idx].bar != bar);
-       } else {
-               zpci_iomap_start[idx].fh = zdev->fh;
-               zpci_iomap_start[idx].bar = bar;
-       }
        /* Detect overrun */
-       BUG_ON(!zpci_iomap_start[idx].count);
+       WARN_ON(!++zpci_iomap_start[idx].count);
+       zpci_iomap_start[idx].fh = zdev->fh;
+       zpci_iomap_start[idx].bar = bar;
        spin_unlock(&zpci_iomap_lock);
 
-       addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
-       return (void __iomem *) addr + offset;
+       return (void __iomem *) ZPCI_ADDR(idx) + offset;
 }
 EXPORT_SYMBOL(pci_iomap_range);
 
@@ -297,12 +293,11 @@ EXPORT_SYMBOL(pci_iomap);
 
 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
 {
-       unsigned int idx;
+       unsigned int idx = ZPCI_IDX(addr);
 
-       idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
        spin_lock(&zpci_iomap_lock);
        /* Detect underrun */
-       BUG_ON(!zpci_iomap_start[idx].count);
+       WARN_ON(!zpci_iomap_start[idx].count);
        if (!--zpci_iomap_start[idx].count) {
                zpci_iomap_start[idx].fh = 0;
                zpci_iomap_start[idx].bar = 0;
@@ -544,15 +539,15 @@ static void zpci_irq_exit(void)
 
 static int zpci_alloc_iomap(struct zpci_dev *zdev)
 {
-       int entry;
+       unsigned long entry;
 
        spin_lock(&zpci_iomap_lock);
-       entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
-       if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
+       entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
+       if (entry == ZPCI_IOMAP_ENTRIES) {
                spin_unlock(&zpci_iomap_lock);
                return -ENOSPC;
        }
-       set_bit(entry, zpci_iomap);
+       set_bit(entry, zpci_iomap_bitmap);
        spin_unlock(&zpci_iomap_lock);
        return entry;
 }
@@ -561,7 +556,7 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
 {
        spin_lock(&zpci_iomap_lock);
        memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
-       clear_bit(entry, zpci_iomap);
+       clear_bit(entry, zpci_iomap_bitmap);
        spin_unlock(&zpci_iomap_lock);
 }
 
@@ -611,8 +606,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
                if (zdev->bars[i].val & 4)
                        flags |= IORESOURCE_MEM_64;
 
-               addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
-
+               addr = ZPCI_ADDR(entry);
                size = 1UL << zdev->bars[i].size;
 
                res = __alloc_res(zdev, addr, size, flags);
@@ -873,23 +867,30 @@ static int zpci_mem_init(void)
        zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
                                16, 0, NULL);
        if (!zdev_fmb_cache)
-               goto error_zdev;
+               goto error_fmb;
 
-       /* TODO: use realloc */
-       zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
-                                  GFP_KERNEL);
+       zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
+                                  sizeof(*zpci_iomap_start), GFP_KERNEL);
        if (!zpci_iomap_start)
                goto error_iomap;
-       return 0;
 
+       zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
+                                   sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
+       if (!zpci_iomap_bitmap)
+               goto error_iomap_bitmap;
+
+       return 0;
+error_iomap_bitmap:
+       kfree(zpci_iomap_start);
 error_iomap:
        kmem_cache_destroy(zdev_fmb_cache);
-error_zdev:
+error_fmb:
        return -ENOMEM;
 }
 
 static void zpci_mem_exit(void)
 {
+       kfree(zpci_iomap_bitmap);
        kfree(zpci_iomap_start);
        kmem_cache_destroy(zdev_fmb_cache);
 }
index 369a3e05d468dbf1973df24c77e4ae07e580c3b6..b0e04751c5d599fbdbb4bdb68fdbed5dcb4039d5 100644 (file)
@@ -53,6 +53,11 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
 
        pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
               pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
+
+       if (!pdev)
+               return;
+
+       pdev->error_state = pci_channel_io_perm_failure;
 }
 
 void zpci_event_error(void *data)
index b48459afefddb835582e548f5b8d7b4ffd026e0d..f3a0649ab5217ac5f87b83580723dfda40fff3de 100644 (file)
@@ -101,7 +101,7 @@ static void __init resource_init(void)
        res->name = "System RAM";
        res->start = MEMORY_START;
        res->end = MEMORY_START + MEMORY_SIZE - 1;
-       res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
        request_resource(&iomem_resource, res);
 
        request_resource(res, &code_resource);
index f887c6465a821b7b96a2a300268f077afd70ae5a..8a84e05adb2e122fa0d39a1c2acdfadf63202266 100644 (file)
@@ -33,7 +33,6 @@
 #endif
 
 #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
-#define smp_store_mb(var, value) __smp_store_mb(var, value)
 
 #include <asm-generic/barrier.h>
 
index de19cfa768f208708406e3350b5e9aecc92a6266..3f1c18b28e8af5fc414c13ebeb29ccbf04cd63a5 100644 (file)
@@ -78,17 +78,17 @@ static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
 
 static struct resource code_resource = {
        .name = "Kernel code",
-       .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+       .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 };
 
 static struct resource data_resource = {
        .name = "Kernel data",
-       .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
+       .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 };
 
 static struct resource bss_resource = {
        .name   = "Kernel bss",
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM,
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
 };
 
 unsigned long memory_start;
@@ -202,7 +202,7 @@ void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
        res->name = "System RAM";
        res->start = start;
        res->end = end - 1;
-       res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
        if (request_resource(&iomem_resource, res)) {
                pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
index 1c26d440d288dfb8d28579fda55e2a76ccf4c139..b6de8b10a55b8b8f09eedb2c90d86906d5445686 100644 (file)
 #define __NR_listen            354
 #define __NR_setsockopt                355
 #define __NR_mlock2            356
+#define __NR_copy_file_range   357
 
-#define NR_syscalls            357
+#define NR_syscalls            358
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 33c02b15f47859a8262677d08635fcfdbb8872cb..a83707c83be803b78b3019cac6dde9dba2cfabd6 100644 (file)
@@ -948,7 +948,24 @@ linux_syscall_trace:
        cmp     %o0, 0
        bne     3f
         mov    -ENOSYS, %o0
+
+       /* Syscall tracing can modify the registers.  */
+       ld      [%sp + STACKFRAME_SZ + PT_G1], %g1
+       sethi   %hi(sys_call_table), %l7
+       ld      [%sp + STACKFRAME_SZ + PT_I0], %i0
+       or      %l7, %lo(sys_call_table), %l7
+       ld      [%sp + STACKFRAME_SZ + PT_I1], %i1
+       ld      [%sp + STACKFRAME_SZ + PT_I2], %i2
+       ld      [%sp + STACKFRAME_SZ + PT_I3], %i3
+       ld      [%sp + STACKFRAME_SZ + PT_I4], %i4
+       ld      [%sp + STACKFRAME_SZ + PT_I5], %i5
+       cmp     %g1, NR_syscalls
+       bgeu    3f
+        mov    -ENOSYS, %o0
+
+       sll     %g1, 2, %l4
        mov     %i0, %o0
+       ld      [%l7 + %l4], %l7
        mov     %i1, %o1
        mov     %i2, %o2
        mov     %i3, %o3
index f2d30cab5b3f388fa9b446cf5b12afda92fa0a9d..cd1f592cd3479f8c94c599bfc589087184d4d180 100644 (file)
@@ -696,14 +696,6 @@ tlb_fixup_done:
        call    __bzero
         sub    %o1, %o0, %o1
 
-#ifdef CONFIG_LOCKDEP
-       /* We have this call this super early, as even prom_init can grab
-        * spinlocks and thus call into the lockdep code.
-        */
-       call    lockdep_init
-        nop
-#endif
-
        call    prom_init
         mov    %l7, %o0                        ! OpenPROM cif handler
 
index afbaba52d2f16cb30daf092f5cd3986e7ca9c299..d127130bf4246032d39cf923248fce7eebf92867 100644 (file)
@@ -338,8 +338,9 @@ ENTRY(sun4v_mach_set_watchdog)
        mov     %o1, %o4
        mov     HV_FAST_MACH_SET_WATCHDOG, %o5
        ta      HV_FAST_TRAP
+       brnz,a,pn %o4, 0f
        stx     %o1, [%o4]
-       retl
+0:     retl
         nop
 ENDPROC(sun4v_mach_set_watchdog)
 
index a92d5d2c46a3a6553bf13a57f95f65c6d61c334c..9e034f29dcc52208ca98a648e028f955f7c8f882 100644 (file)
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(sun4v_niagara_getperf);
 EXPORT_SYMBOL(sun4v_niagara_setperf);
 EXPORT_SYMBOL(sun4v_niagara2_getperf);
 EXPORT_SYMBOL(sun4v_niagara2_setperf);
+EXPORT_SYMBOL(sun4v_mach_set_watchdog);
 
 /* from hweight.S */
 EXPORT_SYMBOL(__arch_hweight8);
index bb0008927598b1f7bbeeccdf30c6fa154219a4ce..c4a1b5c40e4efc7ed17bcc0d67d1f058ea4f09f8 100644 (file)
@@ -158,7 +158,25 @@ linux_syscall_trace32:
         add    %sp, PTREGS_OFF, %o0
        brnz,pn %o0, 3f
         mov    -ENOSYS, %o0
+
+       /* Syscall tracing can modify the registers.  */
+       ldx     [%sp + PTREGS_OFF + PT_V9_G1], %g1
+       sethi   %hi(sys_call_table32), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I0], %i0
+       or      %l7, %lo(sys_call_table32), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I1], %i1
+       ldx     [%sp + PTREGS_OFF + PT_V9_I2], %i2
+       ldx     [%sp + PTREGS_OFF + PT_V9_I3], %i3
+       ldx     [%sp + PTREGS_OFF + PT_V9_I4], %i4
+       ldx     [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+       cmp     %g1, NR_syscalls
+       bgeu,pn %xcc, 3f
+        mov    -ENOSYS, %o0
+
+       sll     %g1, 2, %l4
        srl     %i0, 0, %o0
+       lduw    [%l7 + %l4], %l7
        srl     %i4, 0, %o4
        srl     %i1, 0, %o1
        srl     %i2, 0, %o2
@@ -170,7 +188,25 @@ linux_syscall_trace:
         add    %sp, PTREGS_OFF, %o0
        brnz,pn %o0, 3f
         mov    -ENOSYS, %o0
+
+       /* Syscall tracing can modify the registers.  */
+       ldx     [%sp + PTREGS_OFF + PT_V9_G1], %g1
+       sethi   %hi(sys_call_table64), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I0], %i0
+       or      %l7, %lo(sys_call_table64), %l7
+       ldx     [%sp + PTREGS_OFF + PT_V9_I1], %i1
+       ldx     [%sp + PTREGS_OFF + PT_V9_I2], %i2
+       ldx     [%sp + PTREGS_OFF + PT_V9_I3], %i3
+       ldx     [%sp + PTREGS_OFF + PT_V9_I4], %i4
+       ldx     [%sp + PTREGS_OFF + PT_V9_I5], %i5
+
+       cmp     %g1, NR_syscalls
+       bgeu,pn %xcc, 3f
+        mov    -ENOSYS, %o0
+
+       sll     %g1, 2, %l4
        mov     %i0, %o0
+       lduw    [%l7 + %l4], %l7
        mov     %i1, %o1
        mov     %i2, %o2
        mov     %i3, %o3
index e663b6c78de2e6498a5a458f1129504cae3ab2a8..6c3dd6c52f8bd09135e81f1d56704602d10c4f4e 100644 (file)
@@ -88,4 +88,4 @@ sys_call_table:
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 /*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/        .long sys_setsockopt, sys_mlock2
+/*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range
index 1557121f4cdce8a6ae21c31fb33e9aa2d7cbc443..12b524cfcfa0120caabdf7aa5b8606ecc3978c96 100644 (file)
@@ -89,7 +89,7 @@ sys_call_table32:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word compat_sys_setsockopt, sys_mlock2
+       .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
 
 #endif /* CONFIG_COMPAT */
 
@@ -170,4 +170,4 @@ sys_call_table:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word sys_setsockopt, sys_mlock2
+       .word sys_setsockopt, sys_mlock2, sys_copy_file_range
index 6f216853f2724f8395bdd36ab18c4eadbeae0366..1cfe6aab7a11572d54f6848fe4656b7b40af8cf2 100644 (file)
@@ -2863,17 +2863,17 @@ void hugetlb_setup(struct pt_regs *regs)
 
 static struct resource code_resource = {
        .name   = "Kernel code",
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 static struct resource data_resource = {
        .name   = "Kernel data",
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 static struct resource bss_resource = {
        .name   = "Kernel bss",
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 static inline resource_size_t compute_kern_paddr(void *addr)
@@ -2909,7 +2909,7 @@ static int __init report_memory(void)
                res->name = "System RAM";
                res->start = pavail[i].phys_addr;
                res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
-               res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+               res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
 
                if (insert_resource(&iomem_resource, res) < 0) {
                        pr_warn("Resource insertion failed.\n");
index a506c2c28943715770ab43fa451797a8cb1566e4..6ad99925900e0b15f68323592a2da385a10753e7 100644 (file)
@@ -126,15 +126,15 @@ void
 sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
 {
        struct pt_regs *thread_regs;
+       const int NGPRS = TREG_LAST_GPR + 1;
 
        if (task == NULL)
                return;
 
-       /* Initialize to zero. */
-       memset(gdb_regs, 0, NUMREGBYTES);
-
        thread_regs = task_pt_regs(task);
-       memcpy(gdb_regs, thread_regs, TREG_LAST_GPR * sizeof(unsigned long));
+       memcpy(gdb_regs, thread_regs, NGPRS * sizeof(unsigned long));
+       memset(&gdb_regs[NGPRS], 0,
+              (TILEGX_PC_REGNUM - NGPRS) * sizeof(unsigned long));
        gdb_regs[TILEGX_PC_REGNUM] = thread_regs->pc;
        gdb_regs[TILEGX_FAULTNUM_REGNUM] = thread_regs->faultnum;
 }
index bbb855de6569841200c991bf610b090c4e94c47b..a992238e9b58260fdfa067c4cc2c0d348ceda41f 100644 (file)
@@ -1632,14 +1632,14 @@ static struct resource data_resource = {
        .name   = "Kernel data",
        .start  = 0,
        .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 static struct resource code_resource = {
        .name   = "Kernel code",
        .start  = 0,
        .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 /*
@@ -1673,10 +1673,15 @@ insert_ram_resource(u64 start_pfn, u64 end_pfn, bool reserved)
                kzalloc(sizeof(struct resource), GFP_ATOMIC);
        if (!res)
                return NULL;
-       res->name = reserved ? "Reserved" : "System RAM";
        res->start = start_pfn << PAGE_SHIFT;
        res->end = (end_pfn << PAGE_SHIFT) - 1;
        res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+       if (reserved) {
+               res->name = "Reserved";
+       } else {
+               res->name = "System RAM";
+               res->flags |= IORESOURCE_SYSRAM;
+       }
        if (insert_resource(&iomem_resource, res)) {
                kfree(res);
                return NULL;
index e13d41c392ae4f4940a41e6c8049099c71d28a04..f878bec23576c54c619b633ce0c81508519ddb56 100644 (file)
@@ -34,21 +34,18 @@ struct page;
 
 #if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
 
-typedef struct { unsigned long pte_low, pte_high; } pte_t;
+typedef struct { unsigned long pte; } pte_t;
 typedef struct { unsigned long pmd; } pmd_t;
 typedef struct { unsigned long pgd; } pgd_t;
-#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
-
-#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
-#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
-#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
-#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
-                             smp_wmb(); \
-                             (to).pte_low = (from).pte_low; })
-#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
-#define pte_set_val(pte, phys, prot) \
-       ({ (pte).pte_high = (phys) >> 32; \
-          (pte).pte_low = (phys) | pgprot_val(prot); })
+#define pte_val(p) ((p).pte)
+
+#define pte_get_bits(p, bits) ((p).pte & (bits))
+#define pte_set_bits(p, bits) ((p).pte |= (bits))
+#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
+#define pte_copy(to, from) ({ (to).pte = (from).pte; })
+#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
+#define pte_set_val(p, phys, prot) \
+       ({ (p).pte = (phys) | pgprot_val(prot); })
 
 #define pmd_val(x)     ((x).pmd)
 #define __pmd(x) ((pmd_t) { (x) } )
index 38b3f3785c3c82c6c55a98192d67bc60bd1f45a9..eb9dccecb33884cff6941706d7254abf5d983d67 100644 (file)
@@ -14,7 +14,6 @@
 
 #ifdef __KERNEL__
 #include <asm-generic/pci-dma-compat.h>
-#include <asm-generic/pci-bridge.h>
 #include <asm-generic/pci.h>
 #include <mach/hardware.h> /* for PCIBIOS_MIN_* */
 
@@ -23,5 +22,4 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
        enum pci_mmap_state mmap_state, int write_combine);
 
 #endif /* __KERNEL__ */
-
 #endif
index 3fa317f96122130d30cfc7be41d4e1450fc56a02..c2bffa5614a48102a1d66a60865a2e3474a49597 100644 (file)
@@ -72,13 +72,13 @@ static struct resource mem_res[] = {
                .name = "Kernel code",
                .start = 0,
                .end = 0,
-               .flags = IORESOURCE_MEM
+               .flags = IORESOURCE_SYSTEM_RAM
        },
        {
                .name = "Kernel data",
                .start = 0,
                .end = 0,
-               .flags = IORESOURCE_MEM
+               .flags = IORESOURCE_SYSTEM_RAM
        }
 };
 
@@ -211,7 +211,7 @@ request_standard_resources(struct meminfo *mi)
                res->name  = "System RAM";
                res->start = mi->bank[i].start;
                res->end   = mi->bank[i].start + mi->bank[i].size - 1;
-               res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+               res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
                request_resource(&iomem_resource, res);
 
index 1538562cc720e78132d0eb31c38116b029d56ce0..eb3abf8ac44eb33f333ed29727dfd49ba6bfbf38 100644 (file)
@@ -1,6 +1,7 @@
-
 obj-y += entry/
 
+obj-$(CONFIG_PERF_EVENTS) += events/
+
 obj-$(CONFIG_KVM) += kvm/
 
 # Xen paravirtualization support
index 330e738ccfc13633954a02677e46bb1b12605fa9..405b1858134b005af48526947fa24621764ce573 100644 (file)
@@ -509,11 +509,10 @@ config X86_INTEL_CE
 
 config X86_INTEL_MID
        bool "Intel MID platform support"
-       depends on X86_32
        depends on X86_EXTENDED_PLATFORM
        depends on X86_PLATFORM_DEVICES
        depends on PCI
-       depends on PCI_GOANY
+       depends on X86_64 || (PCI_GOANY && X86_32)
        depends on X86_IO_APIC
        select SFI
        select I2C
@@ -1160,22 +1159,23 @@ config MICROCODE
        bool "CPU microcode loading support"
        default y
        depends on CPU_SUP_AMD || CPU_SUP_INTEL
-       depends on BLK_DEV_INITRD
        select FW_LOADER
        ---help---
-
          If you say Y here, you will be able to update the microcode on
-         certain Intel and AMD processors. The Intel support is for the
-         IA32 family, e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4,
-         Xeon etc. The AMD support is for families 0x10 and later. You will
-         obviously need the actual microcode binary data itself which is not
-         shipped with the Linux kernel.
-
-         This option selects the general module only, you need to select
-         at least one vendor specific module as well.
-
-         To compile this driver as a module, choose M here: the module
-         will be called microcode.
+         Intel and AMD processors. The Intel support is for the IA32 family,
+         e.g. Pentium Pro, Pentium II, Pentium III, Pentium 4, Xeon etc. The
+         AMD support is for families 0x10 and later. You will obviously need
+         the actual microcode binary data itself which is not shipped with
+         the Linux kernel.
+
+         The preferred method to load microcode from a detached initrd is described
+         in Documentation/x86/early-microcode.txt. For that you need to enable
+         CONFIG_BLK_DEV_INITRD in order for the loader to be able to scan the
+         initrd for microcode blobs.
+
+         In addition, you can build-in the microcode into the kernel. For that you
+         need to enable FIRMWARE_IN_KERNEL and add the vendor-supplied microcode
+         to the CONFIG_EXTRA_FIRMWARE config option.
 
 config MICROCODE_INTEL
        bool "Intel microcode loading support"
index 9b18ed97a8a2968b7b293bc83484d8311d830e83..68a2d1f0a6832bdbfafd9ed4122f370252f5e4d1 100644 (file)
@@ -350,16 +350,6 @@ config DEBUG_IMR_SELFTEST
 
          If unsure say N here.
 
-config X86_DEBUG_STATIC_CPU_HAS
-       bool "Debug alternatives"
-       depends on DEBUG_KERNEL
-       ---help---
-         This option causes additional code to be generated which
-         fails if static_cpu_has() is used before alternatives have
-         run.
-
-         If unsure, say N.
-
 config X86_DEBUG_FPU
        bool "Debug the x86 FPU code"
        depends on DEBUG_KERNEL
index ea97697e51e40fd2ff232caa5e3147ff0f6e725c..4cb404fd45ceaa0e89f9e669d6a59de4ce378449 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef BOOT_CPUFLAGS_H
 #define BOOT_CPUFLAGS_H
 
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/processor-flags.h>
 
 struct cpu_features {
index 637097e66a62a675de99040bdcf4f8fc011e2af0..f72498dc90d2376f5f0ffafbc4305be959344343 100644 (file)
@@ -17,7 +17,7 @@
 
 #include "../include/asm/required-features.h"
 #include "../include/asm/disabled-features.h"
-#include "../include/asm/cpufeature.h"
+#include "../include/asm/cpufeatures.h"
 #include "../kernel/cpu/capflags.c"
 
 int main(void)
index 712b13047b41e9cdf7116048d0ffce510d3aa283..3a33124e91129e85c3f34fc6dc85ca43c8e35cff 100644 (file)
@@ -157,7 +157,9 @@ ENTRY(chacha20_4block_xor_ssse3)
        # done with the slightly better performing SSSE3 byte shuffling,
        # 7/12-bit word rotation uses traditional shift+OR.
 
-       sub             $0x40,%rsp
+       mov             %rsp,%r11
+       sub             $0x80,%rsp
+       and             $~63,%rsp
 
        # x0..15[0-3] = s0..3[0..3]
        movq            0x00(%rdi),%xmm1
@@ -620,6 +622,6 @@ ENTRY(chacha20_4block_xor_ssse3)
        pxor            %xmm1,%xmm15
        movdqu          %xmm15,0xf0(%rsi)
 
-       add             $0x40,%rsp
+       mov             %r11,%rsp
        ret
 ENDPROC(chacha20_4block_xor_ssse3)
index 07d2c6c86a5483216684970489fcba3b4478007d..27226df3f7d8aa16a647a051d8ac19ef02ef5aa2 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/crc32.h>
 #include <crypto/internal/hash.h>
 
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/cpu_device_id.h>
 #include <asm/fpu/api.h>
 
index 0e9871693f2469d3106f57ab6dc94d85a3f16146..0857b1a1de3bdc411b3fea94517a9db6c9b9952c 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/kernel.h>
 #include <crypto/internal/hash.h>
 
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/cpu_device_id.h>
 #include <asm/fpu/internal.h>
 
index a3fcfc97a311d5b660e0dcda3be489038f231dce..cd4df93225014b6fc64e3020bf07c0600647fd7c 100644 (file)
@@ -30,7 +30,7 @@
 #include <linux/string.h>
 #include <linux/kernel.h>
 #include <asm/fpu/api.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/cpu_device_id.h>
 
 asmlinkage __u16 crc_t10dif_pcl(__u16 crc, const unsigned char *buf,
index a841e9765bd614b17befbcf647319e2020412d88..a8a0224fa0f8a4682f76281034a3172001f50200 100644 (file)
@@ -762,6 +762,38 @@ static int sha1_mb_async_digest(struct ahash_request *req)
        return crypto_ahash_digest(mcryptd_req);
 }
 
+static int sha1_mb_async_export(struct ahash_request *req, void *out)
+{
+       struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+
+       memcpy(mcryptd_req, req, sizeof(*req));
+       ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+       return crypto_ahash_export(mcryptd_req, out);
+}
+
+static int sha1_mb_async_import(struct ahash_request *req, const void *in)
+{
+       struct ahash_request *mcryptd_req = ahash_request_ctx(req);
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
+       struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
+       struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm);
+       struct mcryptd_hash_request_ctx *rctx;
+       struct shash_desc *desc;
+
+       memcpy(mcryptd_req, req, sizeof(*req));
+       ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
+       rctx = ahash_request_ctx(mcryptd_req);
+       desc = &rctx->desc;
+       desc->tfm = child;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       return crypto_ahash_import(mcryptd_req, in);
+}
+
 static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
 {
        struct mcryptd_ahash *mcryptd_tfm;
@@ -796,8 +828,11 @@ static struct ahash_alg sha1_mb_async_alg = {
        .final          = sha1_mb_async_final,
        .finup          = sha1_mb_async_finup,
        .digest         = sha1_mb_async_digest,
+       .export         = sha1_mb_async_export,
+       .import         = sha1_mb_async_import,
        .halg = {
                .digestsize     = SHA1_DIGEST_SIZE,
+               .statesize      = sizeof(struct sha1_hash_ctx),
                .base = {
                        .cra_name               = "sha1",
                        .cra_driver_name        = "sha1_mb",
index 2ab9560b53c84db34f32334a036111ab3e601ef7..c420d89b175f046491bf0e44d1cf2b22c1541fe2 100644 (file)
@@ -197,7 +197,7 @@ len_is_0:
        vpinsrd  $1, _args_digest+1*32(state , idx, 4), %xmm0, %xmm0
        vpinsrd  $2, _args_digest+2*32(state , idx, 4), %xmm0, %xmm0
        vpinsrd  $3, _args_digest+3*32(state , idx, 4), %xmm0, %xmm0
-       movl    4*32(state, idx, 4), DWORD_tmp
+       movl     _args_digest+4*32(state, idx, 4), DWORD_tmp
 
        vmovdqu  %xmm0, _result_digest(job_rax)
        movl    DWORD_tmp, _result_digest+1*16(job_rax)
index e32206e0986828390e769604ec6cd510b1f2296e..9a9e5884066c6581878b56cde27e75f542b21249 100644 (file)
@@ -201,37 +201,6 @@ For 32-bit we have the following conventions - kernel is built with
        .byte 0xf1
        .endm
 
-#else /* CONFIG_X86_64 */
-
-/*
- * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
- * are different from the entry_32.S versions in not changing the segment
- * registers. So only suitable for in kernel use, not when transitioning
- * from or to user space. The resulting stack frame is not a standard
- * pt_regs frame. The main use case is calling C code from assembler
- * when all the registers need to be preserved.
- */
-
-       .macro SAVE_ALL
-       pushl %eax
-       pushl %ebp
-       pushl %edi
-       pushl %esi
-       pushl %edx
-       pushl %ecx
-       pushl %ebx
-       .endm
-
-       .macro RESTORE_ALL
-       popl %ebx
-       popl %ecx
-       popl %edx
-       popl %esi
-       popl %edi
-       popl %ebp
-       popl %eax
-       .endm
-
 #endif /* CONFIG_X86_64 */
 
 /*
index 03663740c86655cabf21504578e97d73d98595be..c6ab2ebb5f4f7df9840e77fb1f85775e349f2d9b 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/traps.h>
 #include <asm/vdso.h>
 #include <asm/uaccess.h>
+#include <asm/cpufeature.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/syscalls.h>
@@ -344,6 +345,32 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs)
        prepare_exit_to_usermode(regs);
 }
 
+#ifdef CONFIG_X86_64
+__visible void do_syscall_64(struct pt_regs *regs)
+{
+       struct thread_info *ti = pt_regs_to_thread_info(regs);
+       unsigned long nr = regs->orig_ax;
+
+       local_irq_enable();
+
+       if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
+               nr = syscall_trace_enter(regs);
+
+       /*
+        * NB: Native and x32 syscalls are dispatched from the same
+        * table.  The only functional difference is the x32 bit in
+        * regs->orig_ax, which changes the behavior of some syscalls.
+        */
+       if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) {
+               regs->ax = sys_call_table[nr & __SYSCALL_MASK](
+                       regs->di, regs->si, regs->dx,
+                       regs->r10, regs->r8, regs->r9);
+       }
+
+       syscall_return_slowpath(regs);
+}
+#endif
+
 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 /*
  * Does a 32-bit syscall.  Called with IRQs on and does all entry and
index 77d8c5112900e0edb73741af9c7cc4a60b0bb8f5..4c522835274441628c0021042b0aa11c9f10ca43 100644 (file)
@@ -40,7 +40,7 @@
 #include <asm/processor-flags.h>
 #include <asm/ftrace.h>
 #include <asm/irq_vectors.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
index 9d34d3cfceb61c1073c507f5a0f90e939eb146a3..70eadb0ea5fa46407ba16e7bafd6f30386ec0a46 100644 (file)
@@ -145,17 +145,11 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
        movq    %rsp, PER_CPU_VAR(rsp_scratch)
        movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
+       TRACE_IRQS_OFF
+
        /* Construct struct pt_regs on stack */
        pushq   $__USER_DS                      /* pt_regs->ss */
        pushq   PER_CPU_VAR(rsp_scratch)        /* pt_regs->sp */
-       /*
-        * Re-enable interrupts.
-        * We use 'rsp_scratch' as a scratch space, hence irq-off block above
-        * must execute atomically in the face of possible interrupt-driven
-        * task preemption. We must enable interrupts only after we're done
-        * with using rsp_scratch:
-        */
-       ENABLE_INTERRUPTS(CLBR_NONE)
        pushq   %r11                            /* pt_regs->flags */
        pushq   $__USER_CS                      /* pt_regs->cs */
        pushq   %rcx                            /* pt_regs->ip */
@@ -171,9 +165,21 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
        pushq   %r11                            /* pt_regs->r11 */
        sub     $(6*8), %rsp                    /* pt_regs->bp, bx, r12-15 not saved */
 
-       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz     tracesys
+       /*
+        * If we need to do entry work or if we guess we'll need to do
+        * exit work, go straight to the slow path.
+        */
+       testl   $_TIF_WORK_SYSCALL_ENTRY|_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jnz     entry_SYSCALL64_slow_path
+
 entry_SYSCALL_64_fastpath:
+       /*
+        * Easy case: enable interrupts and issue the syscall.  If the syscall
+        * needs pt_regs, we'll call a stub that disables interrupts again
+        * and jumps to the slow path.
+        */
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_NONE)
 #if __SYSCALL_MASK == ~0
        cmpq    $__NR_syscall_max, %rax
 #else
@@ -182,103 +188,56 @@ entry_SYSCALL_64_fastpath:
 #endif
        ja      1f                              /* return -ENOSYS (already in pt_regs->ax) */
        movq    %r10, %rcx
+
+       /*
+        * This call instruction is handled specially in stub_ptregs_64.
+        * It might end up jumping to the slow path.  If it jumps, RAX
+        * and all argument registers are clobbered.
+        */
        call    *sys_call_table(, %rax, 8)
+.Lentry_SYSCALL_64_after_fastpath_call:
+
        movq    %rax, RAX(%rsp)
 1:
-/*
- * Syscall return path ending with SYSRET (fast path).
- * Has incompletely filled pt_regs.
- */
-       LOCKDEP_SYS_EXIT
-       /*
-        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-        * it is too small to ever cause noticeable irq latency.
-        */
-       DISABLE_INTERRUPTS(CLBR_NONE)
 
        /*
-        * We must check ti flags with interrupts (or at least preemption)
-        * off because we must *never* return to userspace without
-        * processing exit work that is enqueued if we're preempted here.
-        * In particular, returning to userspace with any of the one-shot
-        * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
-        * very bad.
+        * If we get here, then we know that pt_regs is clean for SYSRET64.
+        * If we see that no exit work is required (which we are required
+        * to check with IRQs off), then we can go straight to SYSRET64.
         */
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
        testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz     int_ret_from_sys_call_irqs_off  /* Go to the slow path */
+       jnz     1f
 
-       RESTORE_C_REGS_EXCEPT_RCX_R11
+       LOCKDEP_SYS_EXIT
+       TRACE_IRQS_ON           /* user mode is traced as IRQs on */
        movq    RIP(%rsp), %rcx
        movq    EFLAGS(%rsp), %r11
+       RESTORE_C_REGS_EXCEPT_RCX_R11
        movq    RSP(%rsp), %rsp
-       /*
-        * 64-bit SYSRET restores rip from rcx,
-        * rflags from r11 (but RF and VM bits are forced to 0),
-        * cs and ss are loaded from MSRs.
-        * Restoration of rflags re-enables interrupts.
-        *
-        * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
-        * descriptor is not reinitialized.  This means that we should
-        * avoid SYSRET with SS == NULL, which could happen if we schedule,
-        * exit the kernel, and re-enter using an interrupt vector.  (All
-        * interrupt entries on x86_64 set SS to NULL.)  We prevent that
-        * from happening by reloading SS in __switch_to.  (Actually
-        * detecting the failure in 64-bit userspace is tricky but can be
-        * done.)
-        */
        USERGS_SYSRET64
 
-GLOBAL(int_ret_from_sys_call_irqs_off)
+1:
+       /*
+        * The fast path looked good when we started, but something changed
+        * along the way and we need to switch to the slow path.  Calling
+        * raise(3) will trigger this, for example.  IRQs are off.
+        */
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_NONE)
-       jmp int_ret_from_sys_call
-
-       /* Do syscall entry tracing */
-tracesys:
-       movq    %rsp, %rdi
-       movl    $AUDIT_ARCH_X86_64, %esi
-       call    syscall_trace_enter_phase1
-       test    %rax, %rax
-       jnz     tracesys_phase2                 /* if needed, run the slow path */
-       RESTORE_C_REGS_EXCEPT_RAX               /* else restore clobbered regs */
-       movq    ORIG_RAX(%rsp), %rax
-       jmp     entry_SYSCALL_64_fastpath       /* and return to the fast path */
-
-tracesys_phase2:
        SAVE_EXTRA_REGS
        movq    %rsp, %rdi
-       movl    $AUDIT_ARCH_X86_64, %esi
-       movq    %rax, %rdx
-       call    syscall_trace_enter_phase2
-
-       /*
-        * Reload registers from stack in case ptrace changed them.
-        * We don't reload %rax because syscall_trace_entry_phase2() returned
-        * the value it wants us to use in the table lookup.
-        */
-       RESTORE_C_REGS_EXCEPT_RAX
-       RESTORE_EXTRA_REGS
-#if __SYSCALL_MASK == ~0
-       cmpq    $__NR_syscall_max, %rax
-#else
-       andl    $__SYSCALL_MASK, %eax
-       cmpl    $__NR_syscall_max, %eax
-#endif
-       ja      1f                              /* return -ENOSYS (already in pt_regs->ax) */
-       movq    %r10, %rcx                      /* fixup for C */
-       call    *sys_call_table(, %rax, 8)
-       movq    %rax, RAX(%rsp)
-1:
-       /* Use IRET because user could have changed pt_regs->foo */
+       call    syscall_return_slowpath /* returns with IRQs disabled */
+       jmp     return_from_SYSCALL_64
 
-/*
- * Syscall return path ending with IRET.
- * Has correct iret frame.
- */
-GLOBAL(int_ret_from_sys_call)
+entry_SYSCALL64_slow_path:
+       /* IRQs are off. */
        SAVE_EXTRA_REGS
        movq    %rsp, %rdi
-       call    syscall_return_slowpath /* returns with IRQs disabled */
+       call    do_syscall_64           /* returns with IRQs disabled */
+
+return_from_SYSCALL_64:
        RESTORE_EXTRA_REGS
        TRACE_IRQS_IRETQ                /* we're about to change IF */
 
@@ -355,83 +314,45 @@ opportunistic_sysret_failed:
        jmp     restore_c_regs_and_iret
 END(entry_SYSCALL_64)
 
+ENTRY(stub_ptregs_64)
+       /*
+        * Syscalls marked as needing ptregs land here.
+        * If we are on the fast path, we need to save the extra regs,
+        * which we achieve by trying again on the slow path.  If we are on
+        * the slow path, the extra regs are already saved.
+        *
+        * RAX stores a pointer to the C function implementing the syscall.
+        * IRQs are on.
+        */
+       cmpq    $.Lentry_SYSCALL_64_after_fastpath_call, (%rsp)
+       jne     1f
 
-       .macro FORK_LIKE func
-ENTRY(stub_\func)
-       SAVE_EXTRA_REGS 8
-       jmp     sys_\func
-END(stub_\func)
-       .endm
-
-       FORK_LIKE  clone
-       FORK_LIKE  fork
-       FORK_LIKE  vfork
-
-ENTRY(stub_execve)
-       call    sys_execve
-return_from_execve:
-       testl   %eax, %eax
-       jz      1f
-       /* exec failed, can use fast SYSRET code path in this case */
-       ret
-1:
-       /* must use IRET code path (pt_regs->cs may have changed) */
-       addq    $8, %rsp
-       ZERO_EXTRA_REGS
-       movq    %rax, RAX(%rsp)
-       jmp     int_ret_from_sys_call
-END(stub_execve)
-/*
- * Remaining execve stubs are only 7 bytes long.
- * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
- */
-       .align  8
-GLOBAL(stub_execveat)
-       call    sys_execveat
-       jmp     return_from_execve
-END(stub_execveat)
-
-#if defined(CONFIG_X86_X32_ABI)
-       .align  8
-GLOBAL(stub_x32_execve)
-       call    compat_sys_execve
-       jmp     return_from_execve
-END(stub_x32_execve)
-       .align  8
-GLOBAL(stub_x32_execveat)
-       call    compat_sys_execveat
-       jmp     return_from_execve
-END(stub_x32_execveat)
-#endif
-
-/*
- * sigreturn is special because it needs to restore all registers on return.
- * This cannot be done with SYSRET, so use the IRET return path instead.
- */
-ENTRY(stub_rt_sigreturn)
        /*
-        * SAVE_EXTRA_REGS result is not normally needed:
-        * sigreturn overwrites all pt_regs->GPREGS.
-        * But sigreturn can fail (!), and there is no easy way to detect that.
-        * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error,
-        * we SAVE_EXTRA_REGS here.
+        * Called from fast path -- disable IRQs again, pop return address
+        * and jump to slow path
         */
-       SAVE_EXTRA_REGS 8
-       call    sys_rt_sigreturn
-return_from_stub:
-       addq    $8, %rsp
-       RESTORE_EXTRA_REGS
-       movq    %rax, RAX(%rsp)
-       jmp     int_ret_from_sys_call
-END(stub_rt_sigreturn)
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       popq    %rax
+       jmp     entry_SYSCALL64_slow_path
 
-#ifdef CONFIG_X86_X32_ABI
-ENTRY(stub_x32_rt_sigreturn)
-       SAVE_EXTRA_REGS 8
-       call    sys32_x32_rt_sigreturn
-       jmp     return_from_stub
-END(stub_x32_rt_sigreturn)
-#endif
+1:
+       /* Called from C */
+       jmp     *%rax                           /* called from C */
+END(stub_ptregs_64)
+
+.macro ptregs_stub func
+ENTRY(ptregs_\func)
+       leaq    \func(%rip), %rax
+       jmp     stub_ptregs_64
+END(ptregs_\func)
+.endm
+
+/* Instantiate ptregs_stub for each ptregs-using syscall */
+#define __SYSCALL_64_QUAL_(sym)
+#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_stub sym
+#define __SYSCALL_64(nr, sym, qual) __SYSCALL_64_QUAL_##qual(sym)
+#include <asm/syscalls_64.h>
 
 /*
  * A newly forked process directly context switches into this address.
@@ -439,7 +360,6 @@ END(stub_x32_rt_sigreturn)
  * rdi: prev task we switched from
  */
 ENTRY(ret_from_fork)
-
        LOCK ; btr $TIF_FORK, TI_flags(%r8)
 
        pushq   $0x0002
@@ -447,28 +367,32 @@ ENTRY(ret_from_fork)
 
        call    schedule_tail                   /* rdi: 'prev' task parameter */
 
-       RESTORE_EXTRA_REGS
-
        testb   $3, CS(%rsp)                    /* from kernel_thread? */
+       jnz     1f
 
        /*
-        * By the time we get here, we have no idea whether our pt_regs,
-        * ti flags, and ti status came from the 64-bit SYSCALL fast path,
-        * the slow path, or one of the 32-bit compat paths.
-        * Use IRET code path to return, since it can safely handle
-        * all of the above.
+        * We came from kernel_thread.  This code path is quite twisted, and
+        * someone should clean it up.
+        *
+        * copy_thread_tls stashes the function pointer in RBX and the
+        * parameter to be passed in RBP.  The called function is permitted
+        * to call do_execve and thereby jump to user mode.
         */
-       jnz     int_ret_from_sys_call
+       movq    RBP(%rsp), %rdi
+       call    *RBX(%rsp)
+       movl    $0, RAX(%rsp)
 
        /*
-        * We came from kernel_thread
-        * nb: we depend on RESTORE_EXTRA_REGS above
+        * Fall through as though we're exiting a syscall.  This makes a
+        * twisted sort of sense if we just called do_execve.
         */
-       movq    %rbp, %rdi
-       call    *%rbx
-       movl    $0, RAX(%rsp)
-       RESTORE_EXTRA_REGS
-       jmp     int_ret_from_sys_call
+
+1:
+       movq    %rsp, %rdi
+       call    syscall_return_slowpath /* returns with IRQs disabled */
+       TRACE_IRQS_ON                   /* user mode is traced as IRQS on */
+       SWAPGS
+       jmp     restore_regs_and_iret
 END(ret_from_fork)
 
 /*
index 9a6649857106a0e0bc6bfe8ad0074562e394a5d1..8f895ee13a1cbfa1f1666a251471d29d2db01ea2 100644 (file)
@@ -6,17 +6,11 @@
 #include <asm/asm-offsets.h>
 #include <asm/syscall.h>
 
-#ifdef CONFIG_IA32_EMULATION
-#define SYM(sym, compat) compat
-#else
-#define SYM(sym, compat) sym
-#endif
-
-#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long SYM(sym, compat)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_32.h>
 #undef __SYSCALL_I386
 
-#define __SYSCALL_I386(nr, sym, compat) [nr] = SYM(sym, compat),
+#define __SYSCALL_I386(nr, sym, qual) [nr] = sym,
 
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
index 41283d22be7a92db23e46fb427593a2e878ee37d..9dbc5abb6162fa20581069499667a8c49b254868 100644 (file)
@@ -6,19 +6,14 @@
 #include <asm/asm-offsets.h>
 #include <asm/syscall.h>
 
-#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
+#define __SYSCALL_64_QUAL_(sym) sym
+#define __SYSCALL_64_QUAL_ptregs(sym) ptregs_##sym
 
-#ifdef CONFIG_X86_X32_ABI
-# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
-#else
-# define __SYSCALL_X32(nr, sym, compat) /* nothing */
-#endif
-
-#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long __SYSCALL_64_QUAL_##qual(sym)(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 #include <asm/syscalls_64.h>
 #undef __SYSCALL_64
 
-#define __SYSCALL_64(nr, sym, compat) [nr] = sym,
+#define __SYSCALL_64(nr, sym, qual) [nr] = __SYSCALL_64_QUAL_##qual(sym),
 
 extern long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
index dc1040a50bdc21594317f9f9dc4c59196a25ae63..2e5b565adacc542415854612ad752e04cfc5df95 100644 (file)
@@ -21,7 +21,7 @@
 12     common  brk                     sys_brk
 13     64      rt_sigaction            sys_rt_sigaction
 14     common  rt_sigprocmask          sys_rt_sigprocmask
-15     64      rt_sigreturn            stub_rt_sigreturn
+15     64      rt_sigreturn            sys_rt_sigreturn/ptregs
 16     64      ioctl                   sys_ioctl
 17     common  pread64                 sys_pread64
 18     common  pwrite64                sys_pwrite64
 53     common  socketpair              sys_socketpair
 54     64      setsockopt              sys_setsockopt
 55     64      getsockopt              sys_getsockopt
-56     common  clone                   stub_clone
-57     common  fork                    stub_fork
-58     common  vfork                   stub_vfork
-59     64      execve                  stub_execve
+56     common  clone                   sys_clone/ptregs
+57     common  fork                    sys_fork/ptregs
+58     common  vfork                   sys_vfork/ptregs
+59     64      execve                  sys_execve/ptregs
 60     common  exit                    sys_exit
 61     common  wait4                   sys_wait4
 62     common  kill                    sys_kill
 169    common  reboot                  sys_reboot
 170    common  sethostname             sys_sethostname
 171    common  setdomainname           sys_setdomainname
-172    common  iopl                    sys_iopl
+172    common  iopl                    sys_iopl/ptregs
 173    common  ioperm                  sys_ioperm
 174    64      create_module
 175    common  init_module             sys_init_module
 319    common  memfd_create            sys_memfd_create
 320    common  kexec_file_load         sys_kexec_file_load
 321    common  bpf                     sys_bpf
-322    64      execveat                stub_execveat
+322    64      execveat                sys_execveat/ptregs
 323    common  userfaultfd             sys_userfaultfd
 324    common  membarrier              sys_membarrier
 325    common  mlock2                  sys_mlock2
 # for native 64-bit operation.
 #
 512    x32     rt_sigaction            compat_sys_rt_sigaction
-513    x32     rt_sigreturn            stub_x32_rt_sigreturn
+513    x32     rt_sigreturn            sys32_x32_rt_sigreturn
 514    x32     ioctl                   compat_sys_ioctl
 515    x32     readv                   compat_sys_readv
 516    x32     writev                  compat_sys_writev
 517    x32     recvfrom                compat_sys_recvfrom
 518    x32     sendmsg                 compat_sys_sendmsg
 519    x32     recvmsg                 compat_sys_recvmsg
-520    x32     execve                  stub_x32_execve
+520    x32     execve                  compat_sys_execve/ptregs
 521    x32     ptrace                  compat_sys_ptrace
 522    x32     rt_sigpending           compat_sys_rt_sigpending
 523    x32     rt_sigtimedwait         compat_sys_rt_sigtimedwait
 542    x32     getsockopt              compat_sys_getsockopt
 543    x32     io_setup                compat_sys_io_setup
 544    x32     io_submit               compat_sys_io_submit
-545    x32     execveat                stub_x32_execveat
+545    x32     execveat                compat_sys_execveat/ptregs
index 0e7f8ec071e76217a6cee34bc90d0ca7b997443e..cd3d3015d7df87ed79f469c7be0ce6549c62433f 100644 (file)
@@ -3,13 +3,63 @@
 in="$1"
 out="$2"
 
+syscall_macro() {
+    abi="$1"
+    nr="$2"
+    entry="$3"
+
+    # Entry can be either just a function name or "function/qualifier"
+    real_entry="${entry%%/*}"
+    qualifier="${entry:${#real_entry}}"                # Strip the function name
+    qualifier="${qualifier:1}"                 # Strip the slash, if any
+
+    echo "__SYSCALL_${abi}($nr, $real_entry, $qualifier)"
+}
+
+emit() {
+    abi="$1"
+    nr="$2"
+    entry="$3"
+    compat="$4"
+
+    if [ "$abi" == "64" -a -n "$compat" ]; then
+       echo "a compat entry for a 64-bit syscall makes no sense" >&2
+       exit 1
+    fi
+
+    if [ -z "$compat" ]; then
+       if [ -n "$entry" ]; then
+           syscall_macro "$abi" "$nr" "$entry"
+       fi
+    else
+       echo "#ifdef CONFIG_X86_32"
+       if [ -n "$entry" ]; then
+           syscall_macro "$abi" "$nr" "$entry"
+       fi
+       echo "#else"
+       syscall_macro "$abi" "$nr" "$compat"
+       echo "#endif"
+    fi
+}
+
 grep '^[0-9]' "$in" | sort -n | (
     while read nr abi name entry compat; do
        abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
-       if [ -n "$compat" ]; then
-           echo "__SYSCALL_${abi}($nr, $entry, $compat)"
-       elif [ -n "$entry" ]; then
-           echo "__SYSCALL_${abi}($nr, $entry, $entry)"
+       if [ "$abi" == "COMMON" -o "$abi" == "64" ]; then
+           # COMMON is the same as 64, except that we don't expect X32
+           # programs to use it.  Our expectation has nothing to do with
+           # any generated code, so treat them the same.
+           emit 64 "$nr" "$entry" "$compat"
+       elif [ "$abi" == "X32" ]; then
+           # X32 is equivalent to 64 on an X32-compatible kernel.
+           echo "#ifdef CONFIG_X86_X32_ABI"
+           emit 64 "$nr" "$entry" "$compat"
+           echo "#endif"
+       elif [ "$abi" == "I386" ]; then
+           emit "$abi" "$nr" "$entry" "$compat"
+       else
+           echo "Unknown abi $abi" >&2
+           exit 1
        fi
     done
 ) > "$out"
index 0224987556ce80bd606063b56ef124b0857a3f44..abe961c7c71c7464e9707afa01bfe14efc89ba5f 100644 (file)
@@ -150,16 +150,9 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
        }
        fprintf(outfile, "\n};\n\n");
 
-       fprintf(outfile, "static struct page *pages[%lu];\n\n",
-               mapping_size / 4096);
-
        fprintf(outfile, "const struct vdso_image %s = {\n", name);
        fprintf(outfile, "\t.data = raw_data,\n");
        fprintf(outfile, "\t.size = %lu,\n", mapping_size);
-       fprintf(outfile, "\t.text_mapping = {\n");
-       fprintf(outfile, "\t\t.name = \"[vdso]\",\n");
-       fprintf(outfile, "\t\t.pages = pages,\n");
-       fprintf(outfile, "\t},\n");
        if (alt_sec) {
                fprintf(outfile, "\t.alt = %lu,\n",
                        (unsigned long)GET_LE(&alt_sec->sh_offset));
index 08a317a9ae4b582974ec4af4842461bddb762121..7853b53959cd35a8d555a5ddebfbb4af1f40562a 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/kernel.h>
 #include <linux/mm_types.h>
 
-#include <asm/cpufeature.h>
 #include <asm/processor.h>
 #include <asm/vdso.h>
 
index 3a1d9297074bc5e1d2e559735bb5247acffa6164..0109ac6cb79cc73a5d74c99bc2c97705a9ca5c7d 100644 (file)
@@ -3,7 +3,7 @@
 */
 
 #include <asm/dwarf2.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 /*
index b8f69e264ac4148afbdaeedc69e24132980117a9..10f704584922653fd208646cac11c4f8a9cd776b 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/page.h>
 #include <asm/hpet.h>
 #include <asm/desc.h>
+#include <asm/cpufeature.h>
 
 #if defined(CONFIG_X86_64)
 unsigned int __read_mostly vdso64_enabled = 1;
@@ -27,13 +28,7 @@ unsigned int __read_mostly vdso64_enabled = 1;
 
 void __init init_vdso_image(const struct vdso_image *image)
 {
-       int i;
-       int npages = (image->size) / PAGE_SIZE;
-
        BUG_ON(image->size % PAGE_SIZE != 0);
-       for (i = 0; i < npages; i++)
-               image->text_mapping.pages[i] =
-                       virt_to_page(image->data + i*PAGE_SIZE);
 
        apply_alternatives((struct alt_instr *)(image->data + image->alt),
                           (struct alt_instr *)(image->data + image->alt +
@@ -90,18 +85,87 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
 #endif
 }
 
+static int vdso_fault(const struct vm_special_mapping *sm,
+                     struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       const struct vdso_image *image = vma->vm_mm->context.vdso_image;
+
+       if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
+               return VM_FAULT_SIGBUS;
+
+       vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
+       get_page(vmf->page);
+       return 0;
+}
+
+static const struct vm_special_mapping text_mapping = {
+       .name = "[vdso]",
+       .fault = vdso_fault,
+};
+
+static int vvar_fault(const struct vm_special_mapping *sm,
+                     struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       const struct vdso_image *image = vma->vm_mm->context.vdso_image;
+       long sym_offset;
+       int ret = -EFAULT;
+
+       if (!image)
+               return VM_FAULT_SIGBUS;
+
+       sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
+               image->sym_vvar_start;
+
+       /*
+        * Sanity check: a symbol offset of zero means that the page
+        * does not exist for this vdso image, not that the page is at
+        * offset zero relative to the text mapping.  This should be
+        * impossible here, because sym_offset should only be zero for
+        * the page past the end of the vvar mapping.
+        */
+       if (sym_offset == 0)
+               return VM_FAULT_SIGBUS;
+
+       if (sym_offset == image->sym_vvar_page) {
+               ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
+                                   __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
+       } else if (sym_offset == image->sym_hpet_page) {
+#ifdef CONFIG_HPET_TIMER
+               if (hpet_address && vclock_was_used(VCLOCK_HPET)) {
+                       ret = vm_insert_pfn_prot(
+                               vma,
+                               (unsigned long)vmf->virtual_address,
+                               hpet_address >> PAGE_SHIFT,
+                               pgprot_noncached(PAGE_READONLY));
+               }
+#endif
+       } else if (sym_offset == image->sym_pvclock_page) {
+               struct pvclock_vsyscall_time_info *pvti =
+                       pvclock_pvti_cpu0_va();
+               if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
+                       ret = vm_insert_pfn(
+                               vma,
+                               (unsigned long)vmf->virtual_address,
+                               __pa(pvti) >> PAGE_SHIFT);
+               }
+       }
+
+       if (ret == 0 || ret == -EBUSY)
+               return VM_FAULT_NOPAGE;
+
+       return VM_FAULT_SIGBUS;
+}
+
 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
        unsigned long addr, text_start;
        int ret = 0;
-       static struct page *no_pages[] = {NULL};
-       static struct vm_special_mapping vvar_mapping = {
+       static const struct vm_special_mapping vvar_mapping = {
                .name = "[vvar]",
-               .pages = no_pages,
+               .fault = vvar_fault,
        };
-       struct pvclock_vsyscall_time_info *pvti;
 
        if (calculate_addr) {
                addr = vdso_addr(current->mm->start_stack,
@@ -121,6 +185,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
 
        text_start = addr - image->sym_vvar_start;
        current->mm->context.vdso = (void __user *)text_start;
+       current->mm->context.vdso_image = image;
 
        /*
         * MAYWRITE to allow gdb to COW and set breakpoints
@@ -130,7 +195,7 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
                                       image->size,
                                       VM_READ|VM_EXEC|
                                       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-                                      &image->text_mapping);
+                                      &text_mapping);
 
        if (IS_ERR(vma)) {
                ret = PTR_ERR(vma);
@@ -140,7 +205,8 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
        vma = _install_special_mapping(mm,
                                       addr,
                                       -image->sym_vvar_start,
-                                      VM_READ|VM_MAYREAD,
+                                      VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
+                                      VM_PFNMAP,
                                       &vvar_mapping);
 
        if (IS_ERR(vma)) {
@@ -148,41 +214,6 @@ static int map_vdso(const struct vdso_image *image, bool calculate_addr)
                goto up_fail;
        }
 
-       if (image->sym_vvar_page)
-               ret = remap_pfn_range(vma,
-                                     text_start + image->sym_vvar_page,
-                                     __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
-                                     PAGE_SIZE,
-                                     PAGE_READONLY);
-
-       if (ret)
-               goto up_fail;
-
-#ifdef CONFIG_HPET_TIMER
-       if (hpet_address && image->sym_hpet_page) {
-               ret = io_remap_pfn_range(vma,
-                       text_start + image->sym_hpet_page,
-                       hpet_address >> PAGE_SHIFT,
-                       PAGE_SIZE,
-                       pgprot_noncached(PAGE_READONLY));
-
-               if (ret)
-                       goto up_fail;
-       }
-#endif
-
-       pvti = pvclock_pvti_cpu0_va();
-       if (pvti && image->sym_pvclock_page) {
-               ret = remap_pfn_range(vma,
-                                     text_start + image->sym_pvclock_page,
-                                     __pa(pvti) >> PAGE_SHIFT,
-                                     PAGE_SIZE,
-                                     PAGE_READONLY);
-
-               if (ret)
-                       goto up_fail;
-       }
-
 up_fail:
        if (ret)
                current->mm->context.vdso = NULL;
@@ -254,7 +285,7 @@ static void vgetcpu_cpu_init(void *arg)
 #ifdef CONFIG_NUMA
        node = cpu_to_node(cpu);
 #endif
-       if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
+       if (static_cpu_has(X86_FEATURE_RDTSCP))
                write_rdtscp_aux((node << 12) | cpu);
 
        /*
index 51e3304169951619362ea4a1494716e4f20696bf..0fb3a104ac626b07e0a4e604a8ebf36764bea2cf 100644 (file)
@@ -16,6 +16,8 @@
 #include <asm/vgtod.h>
 #include <asm/vvar.h>
 
+int vclocks_used __read_mostly;
+
 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
 
 void update_vsyscall_tz(void)
@@ -26,12 +28,17 @@ void update_vsyscall_tz(void)
 
 void update_vsyscall(struct timekeeper *tk)
 {
+       int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
        struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
 
+       /* Mark the new vclock used. */
+       BUILD_BUG_ON(VCLOCK_MAX >= 32);
+       WRITE_ONCE(vclocks_used, READ_ONCE(vclocks_used) | (1 << vclock_mode));
+
        gtod_write_begin(vdata);
 
        /* copy vsyscall data */
-       vdata->vclock_mode      = tk->tkr_mono.clock->archdata.vclock_mode;
+       vdata->vclock_mode      = vclock_mode;
        vdata->cycle_last       = tk->tkr_mono.cycle_last;
        vdata->mask             = tk->tkr_mono.mask;
        vdata->mult             = tk->tkr_mono.mult;
diff --git a/arch/x86/events/Makefile b/arch/x86/events/Makefile
new file mode 100644 (file)
index 0000000..7d1ecff
--- /dev/null
@@ -0,0 +1,7 @@
+obj-y                  += core.o
+
+obj-$(CONFIG_CPU_SUP_AMD)               += amd/core.o amd/uncore.o
+obj-$(CONFIG_X86_LOCAL_APIC)            += amd/ibs.o
+ifdef CONFIG_AMD_IOMMU
+obj-$(CONFIG_CPU_SUP_AMD)               += amd/iommu.o
+endif
similarity index 99%
rename from arch/x86/kernel/cpu/perf_event_amd.c
rename to arch/x86/events/amd/core.c
index 58610539b0486ec30a357467e2425a4cdc5c5fd0..51b16583679c7b1f5214c1aa63f066050c590864 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/slab.h>
 #include <asm/apicdef.h>
 
-#include "perf_event.h"
+#include "../../kernel/cpu/perf_event.h"
 
 static __initconst const u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
similarity index 98%
rename from arch/x86/kernel/cpu/perf_event_amd_ibs.c
rename to arch/x86/events/amd/ibs.c
index 989d3c215d2bcea3b1a55bc2e8a42d9e20b31a98..a8abd082d932356cbb7128b6d5abcef1c67cac37 100644 (file)
@@ -14,7 +14,7 @@
 
 #include <asm/apic.h>
 
-#include "perf_event.h"
+#include "../../kernel/cpu/perf_event.h"
 
 static u32 ibs_caps;
 
@@ -670,7 +670,7 @@ static __init int perf_event_ibs_init(void)
        perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
 
        register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
-       printk(KERN_INFO "perf: AMD IBS detected (0x%08x)\n", ibs_caps);
+       pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
 
        return 0;
 }
@@ -774,14 +774,14 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
                pci_read_config_dword(cpu_cfg, IBSCTL, &value);
                if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
                        pci_dev_put(cpu_cfg);
-                       printk(KERN_DEBUG "Failed to setup IBS LVT offset, "
-                              "IBSCTL = 0x%08x\n", value);
+                       pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
+                                value);
                        return -EINVAL;
                }
        } while (1);
 
        if (!nodes) {
-               printk(KERN_DEBUG "No CPU node configured for IBS\n");
+               pr_debug("No CPU node configured for IBS\n");
                return -ENODEV;
        }
 
@@ -810,7 +810,7 @@ static void force_ibs_eilvt_setup(void)
        preempt_enable();
 
        if (offset == APIC_EILVT_NR_MAX) {
-               printk(KERN_DEBUG "No EILVT entry available\n");
+               pr_debug("No EILVT entry available\n");
                return;
        }
 
similarity index 99%
rename from arch/x86/kernel/cpu/perf_event_amd_iommu.c
rename to arch/x86/events/amd/iommu.c
index 97242a9242bdfde7dabba99e9a0d245ffe770c01..629bc700eb08d0c429845df6a1b28a6feaa6e9a9 100644 (file)
@@ -16,8 +16,8 @@
 #include <linux/cpumask.h>
 #include <linux/slab.h>
 
-#include "perf_event.h"
-#include "perf_event_amd_iommu.h"
+#include "../../kernel/cpu/perf_event.h"
+#include "iommu.h"
 
 #define COUNTER_SHIFT          16
 
similarity index 99%
rename from arch/x86/kernel/cpu/perf_event_amd_uncore.c
rename to arch/x86/events/amd/uncore.c
index 49742746a6c963c4a86c08c773430087225fec53..19a17363a21d073ee19c23f21810018192d613ac 100644 (file)
@@ -536,7 +536,7 @@ static int __init amd_uncore_init(void)
                if (ret)
                        goto fail_nb;
 
-               printk(KERN_INFO "perf: AMD NB counters detected\n");
+               pr_info("perf: AMD NB counters detected\n");
                ret = 0;
        }
 
@@ -550,7 +550,7 @@ static int __init amd_uncore_init(void)
                if (ret)
                        goto fail_l2;
 
-               printk(KERN_INFO "perf: AMD L2I counters detected\n");
+               pr_info("perf: AMD L2I counters detected\n");
                ret = 0;
        }
 
similarity index 99%
rename from arch/x86/kernel/cpu/perf_event.c
rename to arch/x86/events/core.c
index 1b443db2db5005d62ed4e08f918fdfcaa071ac37..90ca601b5d5c51373432abd1bb996b28d82e6628 100644 (file)
@@ -37,7 +37,7 @@
 #include <asm/desc.h>
 #include <asm/ldt.h>
 
-#include "perf_event.h"
+#include "../kernel/cpu/perf_event.h"
 
 struct x86_pmu x86_pmu __read_mostly;
 
@@ -254,15 +254,16 @@ static bool check_hw_exists(void)
         * We still allow the PMU driver to operate:
         */
        if (bios_fail) {
-               printk(KERN_CONT "Broken BIOS detected, complain to your hardware vendor.\n");
-               printk(KERN_ERR FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n", reg_fail, val_fail);
+               pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
+               pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
+                             reg_fail, val_fail);
        }
 
        return true;
 
 msr_fail:
-       printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
-       printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
+       pr_cont("Broken PMU hardware detected, using software events only.\n");
+       pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
                boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
                reg, val_new);
 
index 7bfc85bbb8ffc0578011ceac2c08548bd140ade3..99afb665a004cb8ea82c1751f6ef29b12d116cc6 100644 (file)
@@ -151,12 +151,6 @@ static inline int alternatives_text_reserved(void *start, void *end)
        ALTINSTR_REPLACEMENT(newinstr2, feature2, 2)                    \
        ".popsection"
 
-/*
- * This must be included *after* the definition of ALTERNATIVE due to
- * <asm/arch_hweight.h>
- */
-#include <asm/cpufeature.h>
-
 /*
  * Alternative instructions for different CPU types or capabilities.
  *
index c80f6b6f3da222dc86dc00747429b618589502eb..0899cfc8dfe8f84f98edc6a0c19e7ff40225ae74 100644 (file)
@@ -6,7 +6,6 @@
 
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
-#include <asm/processor.h>
 #include <asm/apicdef.h>
 #include <linux/atomic.h>
 #include <asm/fixmap.h>
index 259a7c1ef7099bc89b30cdb0ebbd3a311fa88e18..02e799fa43d1b19c878290f6424e1b2f7293074d 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_HWEIGHT_H
 #define _ASM_X86_HWEIGHT_H
 
+#include <asm/cpufeatures.h>
+
 #ifdef CONFIG_64BIT
 /* popcnt %edi, %eax -- redundant REX prefix for alignment */
 #define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
index a584e1c50918406a0398cf4a013432e47abf143e..bfb28caf97b1be1f2d6aa8893bd905a39030e310 100644 (file)
@@ -6,18 +6,17 @@
 
 /*
  * Force strict CPU ordering.
- * And yes, this is required on UP too when we're talking
+ * And yes, this might be required on UP too when we're talking
  * to devices.
  */
 
 #ifdef CONFIG_X86_32
-/*
- * Some non-Intel clones support out of order store. wmb() ceases to be a
- * nop for these.
- */
-#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
-#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
-#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
+#define mb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "mfence", \
+                                     X86_FEATURE_XMM2) ::: "memory", "cc")
+#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "lfence", \
+                                      X86_FEATURE_XMM2) ::: "memory", "cc")
+#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,0(%%esp)", "sfence", \
+                                      X86_FEATURE_XMM2) ::: "memory", "cc")
 #else
 #define mb()   asm volatile("mfence":::"memory")
 #define rmb()  asm volatile("lfence":::"memory")
index cfe3b954d5e41cbd96be1fdb147c164b63581814..7766d1cf096e80d56562d63876f8ca65df869199 100644 (file)
@@ -91,7 +91,7 @@ set_bit(long nr, volatile unsigned long *addr)
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __set_bit(long nr, volatile unsigned long *addr)
+static __always_inline void __set_bit(long nr, volatile unsigned long *addr)
 {
        asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
 }
@@ -128,13 +128,13 @@ clear_bit(long nr, volatile unsigned long *addr)
  * clear_bit() is atomic and implies release semantics before the memory
  * operation. It can be used for an unlock.
  */
-static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
        barrier();
        clear_bit(nr, addr);
 }
 
-static inline void __clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
 {
        asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
 }
@@ -151,7 +151,7 @@ static inline void __clear_bit(long nr, volatile unsigned long *addr)
  * No memory barrier is required here, because x86 cannot reorder stores past
  * older loads. Same principle as spin_unlock.
  */
-static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
 {
        barrier();
        __clear_bit(nr, addr);
@@ -166,7 +166,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
  * If it's called on the same region of memory simultaneously, the effect
  * may be that only one operation succeeds.
  */
-static inline void __change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void __change_bit(long nr, volatile unsigned long *addr)
 {
        asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
 }
@@ -180,7 +180,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
  * Note that @nr may be almost arbitrarily large; this function is not
  * restricted to acting on a single-word quantity.
  */
-static inline void change_bit(long nr, volatile unsigned long *addr)
+static __always_inline void change_bit(long nr, volatile unsigned long *addr)
 {
        if (IS_IMMEDIATE(nr)) {
                asm volatile(LOCK_PREFIX "xorb %1,%0"
@@ -201,7 +201,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline int test_and_set_bit(long nr, volatile unsigned long *addr)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", "c");
 }
@@ -228,7 +228,7 @@ test_and_set_bit_lock(long nr, volatile unsigned long *addr)
  * If two examples of this operation race, one can appear to succeed
  * but actually fail.  You must protect multiple accesses with a lock.
  */
-static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
+static __always_inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
 {
        int oldbit;
 
@@ -247,7 +247,7 @@ static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", "c");
 }
@@ -268,7 +268,7 @@ static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
  * accessed from a hypervisor on the same CPU if running in a VM: don't change
  * this without also updating arch/x86/kernel/kvm.c
  */
-static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
+static __always_inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
 {
        int oldbit;
 
@@ -280,7 +280,7 @@ static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
 }
 
 /* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
 {
        int oldbit;
 
@@ -300,7 +300,7 @@ static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
  * This operation is atomic and cannot be reordered.
  * It also implies a memory barrier.
  */
-static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
+static __always_inline int test_and_change_bit(long nr, volatile unsigned long *addr)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", "c");
 }
@@ -311,7 +311,7 @@ static __always_inline int constant_test_bit(long nr, const volatile unsigned lo
                (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
 }
 
-static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
+static __always_inline int variable_test_bit(long nr, volatile const unsigned long *addr)
 {
        int oldbit;
 
@@ -343,7 +343,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
  *
  * Undefined if no bit exists, so code should check against 0 first.
  */
-static inline unsigned long __ffs(unsigned long word)
+static __always_inline unsigned long __ffs(unsigned long word)
 {
        asm("rep; bsf %1,%0"
                : "=r" (word)
@@ -357,7 +357,7 @@ static inline unsigned long __ffs(unsigned long word)
  *
  * Undefined if no zero exists, so code should check against ~0UL first.
  */
-static inline unsigned long ffz(unsigned long word)
+static __always_inline unsigned long ffz(unsigned long word)
 {
        asm("rep; bsf %1,%0"
                : "=r" (word)
@@ -371,7 +371,7 @@ static inline unsigned long ffz(unsigned long word)
  *
  * Undefined if no set bit exists, so code should check against 0 first.
  */
-static inline unsigned long __fls(unsigned long word)
+static __always_inline unsigned long __fls(unsigned long word)
 {
        asm("bsr %1,%0"
            : "=r" (word)
@@ -393,7 +393,7 @@ static inline unsigned long __fls(unsigned long word)
  * set bit if value is nonzero. The first (least significant) bit
  * is at position 1.
  */
-static inline int ffs(int x)
+static __always_inline int ffs(int x)
 {
        int r;
 
@@ -434,7 +434,7 @@ static inline int ffs(int x)
  * set bit if value is nonzero. The last (most significant) bit is
  * at position 32.
  */
-static inline int fls(int x)
+static __always_inline int fls(int x)
 {
        int r;
 
index eda81dc0f4ae091c5ff085450ff277f68aa933a9..d194266acb28e52d237c19c21291633c15d99c9e 100644 (file)
@@ -3,10 +3,11 @@
 #ifndef _ASM_X86_CLOCKSOURCE_H
 #define _ASM_X86_CLOCKSOURCE_H
 
-#define VCLOCK_NONE 0  /* No vDSO clock available.     */
-#define VCLOCK_TSC  1  /* vDSO should use vread_tsc.   */
-#define VCLOCK_HPET 2  /* vDSO should use vread_hpet.  */
-#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
+#define VCLOCK_NONE    0  /* No vDSO clock available.  */
+#define VCLOCK_TSC     1  /* vDSO should use vread_tsc.        */
+#define VCLOCK_HPET    2  /* vDSO should use vread_hpet.       */
+#define VCLOCK_PVCLOCK 3 /* vDSO should use vread_pvclock. */
+#define VCLOCK_MAX     3
 
 struct arch_clocksource_data {
        int vclock_mode;
index ad19841eddfe142fec6b2b27dc329059d8fb7296..9733361fed6f4f7368d92f98d21b0d0edfbf6261 100644 (file)
@@ -2,6 +2,7 @@
 #define ASM_X86_CMPXCHG_H
 
 #include <linux/compiler.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
 
 /*
index 7ad8c946429776b6dad30238e9d5e2d65a1a3c87..9fba7a5dd24afd39cf6e60a61ba06b73d92e470a 100644 (file)
@@ -1,288 +1,7 @@
-/*
- * Defines x86 CPU feature bits
- */
 #ifndef _ASM_X86_CPUFEATURE_H
 #define _ASM_X86_CPUFEATURE_H
 
-#ifndef _ASM_X86_REQUIRED_FEATURES_H
-#include <asm/required-features.h>
-#endif
-
-#ifndef _ASM_X86_DISABLED_FEATURES_H
-#include <asm/disabled-features.h>
-#endif
-
-#define NCAPINTS       16      /* N 32-bit words worth of info */
-#define NBUGINTS       1       /* N 32-bit bug flags */
-
-/*
- * Note: If the comment begins with a quoted string, that string is used
- * in /proc/cpuinfo instead of the macro name.  If the string is "",
- * this feature bit is not displayed in /proc/cpuinfo at all.
- */
-
-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
-#define X86_FEATURE_FPU                ( 0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME                ( 0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE         ( 0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE                ( 0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC                ( 0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR                ( 0*32+ 5) /* Model-Specific Registers */
-#define X86_FEATURE_PAE                ( 0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE                ( 0*32+ 7) /* Machine Check Exception */
-#define X86_FEATURE_CX8                ( 0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC       ( 0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP                ( 0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR       ( 0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE                ( 0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA                ( 0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV       ( 0*32+15) /* CMOV instructions */
-                                         /* (plus FCMOVcc, FCOMI with FPU) */
-#define X86_FEATURE_PAT                ( 0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36      ( 0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN         ( 0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLUSH    ( 0*32+19) /* CLFLUSH instruction */
-#define X86_FEATURE_DS         ( 0*32+21) /* "dts" Debug Store */
-#define X86_FEATURE_ACPI       ( 0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX                ( 0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR       ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
-#define X86_FEATURE_XMM                ( 0*32+25) /* "sse" */
-#define X86_FEATURE_XMM2       ( 0*32+26) /* "sse2" */
-#define X86_FEATURE_SELFSNOOP  ( 0*32+27) /* "ss" CPU self snoop */
-#define X86_FEATURE_HT         ( 0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC                ( 0*32+29) /* "tm" Automatic clock control */
-#define X86_FEATURE_IA64       ( 0*32+30) /* IA-64 processor */
-#define X86_FEATURE_PBE                ( 0*32+31) /* Pending Break Enable */
-
-/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
-/* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL    ( 1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP         ( 1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX         ( 1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT     ( 1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_FXSR_OPT   ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
-#define X86_FEATURE_GBPAGES    ( 1*32+26) /* "pdpe1gb" GB pages */
-#define X86_FEATURE_RDTSCP     ( 1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM         ( 1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT   ( 1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW      ( 1*32+31) /* 3DNow! */
-
-/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY   ( 2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN    ( 2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI       ( 2*32+ 3) /* LongRun table interface */
-
-/* Other features, Linux-defined mapping, word 3 */
-/* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX      ( 3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR    ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR  ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR        ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-/* cpu types for specific tunings: */
-#define X86_FEATURE_K8         ( 3*32+ 4) /* "" Opteron, Athlon64 */
-#define X86_FEATURE_K7         ( 3*32+ 5) /* "" Athlon */
-#define X86_FEATURE_P3         ( 3*32+ 6) /* "" P3 */
-#define X86_FEATURE_P4         ( 3*32+ 7) /* "" P4 */
-#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
-#define X86_FEATURE_UP         ( 3*32+ 9) /* smp kernel running on up */
-/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
-#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
-#define X86_FEATURE_PEBS       ( 3*32+12) /* Precise-Event Based Sampling */
-#define X86_FEATURE_BTS                ( 3*32+13) /* Branch Trace Store */
-#define X86_FEATURE_SYSCALL32  ( 3*32+14) /* "" syscall in ia32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
-#define X86_FEATURE_REP_GOOD   ( 3*32+16) /* rep microcode works well */
-#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
-#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
-/* free, was #define X86_FEATURE_11AP  ( 3*32+19) * "" Bad local APIC aka 11AP */
-#define X86_FEATURE_NOPL       ( 3*32+20) /* The NOPL (0F 1F) instructions */
-#define X86_FEATURE_ALWAYS     ( 3*32+21) /* "" Always-present feature */
-#define X86_FEATURE_XTOPOLOGY  ( 3*32+22) /* cpu topology enum extensions */
-#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
-#define X86_FEATURE_NONSTOP_TSC        ( 3*32+24) /* TSC does not stop in C states */
-/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
-#define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 bits) */
-#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
-#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-#define X86_FEATURE_EAGER_FPU  ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
-#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
-
-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3       ( 4*32+ 0) /* "pni" SSE-3 */
-#define X86_FEATURE_PCLMULQDQ  ( 4*32+ 1) /* PCLMULQDQ instruction */
-#define X86_FEATURE_DTES64     ( 4*32+ 2) /* 64-bit Debug Store */
-#define X86_FEATURE_MWAIT      ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
-#define X86_FEATURE_DSCPL      ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
-#define X86_FEATURE_VMX                ( 4*32+ 5) /* Hardware virtualization */
-#define X86_FEATURE_SMX                ( 4*32+ 6) /* Safer mode */
-#define X86_FEATURE_EST                ( 4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2                ( 4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_SSSE3      ( 4*32+ 9) /* Supplemental SSE-3 */
-#define X86_FEATURE_CID                ( 4*32+10) /* Context ID */
-#define X86_FEATURE_SDBG       ( 4*32+11) /* Silicon Debug */
-#define X86_FEATURE_FMA                ( 4*32+12) /* Fused multiply-add */
-#define X86_FEATURE_CX16       ( 4*32+13) /* CMPXCHG16B */
-#define X86_FEATURE_XTPR       ( 4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_PDCM       ( 4*32+15) /* Performance Capabilities */
-#define X86_FEATURE_PCID       ( 4*32+17) /* Process Context Identifiers */
-#define X86_FEATURE_DCA                ( 4*32+18) /* Direct Cache Access */
-#define X86_FEATURE_XMM4_1     ( 4*32+19) /* "sse4_1" SSE-4.1 */
-#define X86_FEATURE_XMM4_2     ( 4*32+20) /* "sse4_2" SSE-4.2 */
-#define X86_FEATURE_X2APIC     ( 4*32+21) /* x2APIC */
-#define X86_FEATURE_MOVBE      ( 4*32+22) /* MOVBE instruction */
-#define X86_FEATURE_POPCNT      ( 4*32+23) /* POPCNT instruction */
-#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
-#define X86_FEATURE_AES                ( 4*32+25) /* AES instructions */
-#define X86_FEATURE_XSAVE      ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
-#define X86_FEATURE_OSXSAVE    ( 4*32+27) /* "" XSAVE enabled in the OS */
-#define X86_FEATURE_AVX                ( 4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_F16C       ( 4*32+29) /* 16-bit fp conversions */
-#define X86_FEATURE_RDRAND     ( 4*32+30) /* The RDRAND instruction */
-#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
-
-/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE     ( 5*32+ 2) /* "rng" RNG present (xstore) */
-#define X86_FEATURE_XSTORE_EN  ( 5*32+ 3) /* "rng_en" RNG enabled */
-#define X86_FEATURE_XCRYPT     ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
-#define X86_FEATURE_XCRYPT_EN  ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
-#define X86_FEATURE_ACE2       ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN    ( 5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE                ( 5*32+10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN     ( 5*32+11) /* PHE enabled */
-#define X86_FEATURE_PMM                ( 5*32+12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN     ( 5*32+13) /* PMM enabled */
-
-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
-#define X86_FEATURE_LAHF_LM    ( 6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_SVM                ( 6*32+ 2) /* Secure virtual machine */
-#define X86_FEATURE_EXTAPIC    ( 6*32+ 3) /* Extended APIC space */
-#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
-#define X86_FEATURE_ABM                ( 6*32+ 5) /* Advanced bit manipulation */
-#define X86_FEATURE_SSE4A      ( 6*32+ 6) /* SSE-4A */
-#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
-#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
-#define X86_FEATURE_OSVW       ( 6*32+ 9) /* OS Visible Workaround */
-#define X86_FEATURE_IBS                ( 6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP                ( 6*32+11) /* extended AVX instructions */
-#define X86_FEATURE_SKINIT     ( 6*32+12) /* SKINIT/STGI instructions */
-#define X86_FEATURE_WDT                ( 6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP                ( 6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4       ( 6*32+16) /* 4 operands MAC instructions */
-#define X86_FEATURE_TCE                ( 6*32+17) /* translation cache extension */
-#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM                ( 6*32+21) /* trailing bit manipulations */
-#define X86_FEATURE_TOPOEXT    ( 6*32+22) /* topology extensions CPUID leafs */
-#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
-#define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
-#define X86_FEATURE_BPEXT      (6*32+26) /* data breakpoint extension */
-#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
-#define X86_FEATURE_MWAITX     ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
-
-/*
- * Auxiliary flags: Linux defined - For features scattered in various
- * CPUID levels like 0x6, 0xA etc, word 7.
- *
- * Reuse free bits when adding new feature flags!
- */
-
-#define X86_FEATURE_CPB                ( 7*32+ 2) /* AMD Core Performance Boost */
-#define X86_FEATURE_EPB                ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
-
-#define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
-#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
-
-#define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
-
-/* Virtualization flags: Linux defined, word 8 */
-#define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
-#define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
-#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
-#define X86_FEATURE_EPT         ( 8*32+ 3) /* Intel Extended Page Table */
-#define X86_FEATURE_VPID        ( 8*32+ 4) /* Intel Virtual Processor ID */
-
-#define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */
-#define X86_FEATURE_XENPV       ( 8*32+16) /* "" Xen paravirtual guest */
-
-
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
-#define X86_FEATURE_FSGSBASE   ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
-#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
-#define X86_FEATURE_BMI1       ( 9*32+ 3) /* 1st group bit manipulation extensions */
-#define X86_FEATURE_HLE                ( 9*32+ 4) /* Hardware Lock Elision */
-#define X86_FEATURE_AVX2       ( 9*32+ 5) /* AVX2 instructions */
-#define X86_FEATURE_SMEP       ( 9*32+ 7) /* Supervisor Mode Execution Protection */
-#define X86_FEATURE_BMI2       ( 9*32+ 8) /* 2nd group bit manipulation extensions */
-#define X86_FEATURE_ERMS       ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
-#define X86_FEATURE_INVPCID    ( 9*32+10) /* Invalidate Processor Context ID */
-#define X86_FEATURE_RTM                ( 9*32+11) /* Restricted Transactional Memory */
-#define X86_FEATURE_CQM                ( 9*32+12) /* Cache QoS Monitoring */
-#define X86_FEATURE_MPX                ( 9*32+14) /* Memory Protection Extension */
-#define X86_FEATURE_AVX512F    ( 9*32+16) /* AVX-512 Foundation */
-#define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
-#define X86_FEATURE_ADX                ( 9*32+19) /* The ADCX and ADOX instructions */
-#define X86_FEATURE_SMAP       ( 9*32+20) /* Supervisor Mode Access Prevention */
-#define X86_FEATURE_PCOMMIT    ( 9*32+22) /* PCOMMIT instruction */
-#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
-#define X86_FEATURE_CLWB       ( 9*32+24) /* CLWB instruction */
-#define X86_FEATURE_AVX512PF   ( 9*32+26) /* AVX-512 Prefetch */
-#define X86_FEATURE_AVX512ER   ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
-#define X86_FEATURE_AVX512CD   ( 9*32+28) /* AVX-512 Conflict Detection */
-#define X86_FEATURE_SHA_NI     ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
-
-/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
-#define X86_FEATURE_XSAVEOPT   (10*32+ 0) /* XSAVEOPT */
-#define X86_FEATURE_XSAVEC     (10*32+ 1) /* XSAVEC */
-#define X86_FEATURE_XGETBV1    (10*32+ 2) /* XGETBV with ECX = 1 */
-#define X86_FEATURE_XSAVES     (10*32+ 3) /* XSAVES/XRSTORS */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
-#define X86_FEATURE_CQM_LLC    (11*32+ 1) /* LLC QoS if 1 */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
-
-/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
-#define X86_FEATURE_CLZERO     (13*32+0) /* CLZERO instruction */
-
-/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
-#define X86_FEATURE_DTHERM     (14*32+ 0) /* Digital Thermal Sensor */
-#define X86_FEATURE_IDA                (14*32+ 1) /* Intel Dynamic Acceleration */
-#define X86_FEATURE_ARAT       (14*32+ 2) /* Always Running APIC Timer */
-#define X86_FEATURE_PLN                (14*32+ 4) /* Intel Power Limit Notification */
-#define X86_FEATURE_PTS                (14*32+ 6) /* Intel Package Thermal Status */
-#define X86_FEATURE_HWP                (14*32+ 7) /* Intel Hardware P-states */
-#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
-#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
-#define X86_FEATURE_HWP_EPP    (14*32+10) /* HWP Energy Perf. Preference */
-#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
-
-/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
-#define X86_FEATURE_NPT                (15*32+ 0) /* Nested Page Table support */
-#define X86_FEATURE_LBRV       (15*32+ 1) /* LBR Virtualization support */
-#define X86_FEATURE_SVML       (15*32+ 2) /* "svm_lock" SVM locking MSR */
-#define X86_FEATURE_NRIPS      (15*32+ 3) /* "nrip_save" SVM next_rip save */
-#define X86_FEATURE_TSCRATEMSR  (15*32+ 4) /* "tsc_scale" TSC scaling support */
-#define X86_FEATURE_VMCBCLEAN   (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
-#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
-#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
-#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
-#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
-
-/*
- * BUG word(s)
- */
-#define X86_BUG(x)             (NCAPINTS*32 + (x))
-
-#define X86_BUG_F00F           X86_BUG(0) /* Intel F00F */
-#define X86_BUG_FDIV           X86_BUG(1) /* FPU FDIV */
-#define X86_BUG_COMA           X86_BUG(2) /* Cyrix 6x86 coma */
-#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
-#define X86_BUG_AMD_APIC_C1E   X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
-#define X86_BUG_11AP           X86_BUG(5) /* Bad local APIC aka 11AP */
-#define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
-#define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
-#define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+#include <asm/processor.h>
 
 #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
 
@@ -406,106 +125,19 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
 #define cpu_has_osxsave                boot_cpu_has(X86_FEATURE_OSXSAVE)
 #define cpu_has_hypervisor     boot_cpu_has(X86_FEATURE_HYPERVISOR)
 /*
- * Do not add any more of those clumsy macros - use static_cpu_has_safe() for
+ * Do not add any more of those clumsy macros - use static_cpu_has() for
  * fast paths and boot_cpu_has() otherwise!
  */
 
-#if __GNUC__ >= 4 && defined(CONFIG_X86_FAST_FEATURE_TESTS)
-extern void warn_pre_alternatives(void);
-extern bool __static_cpu_has_safe(u16 bit);
-
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS)
 /*
  * Static testing of CPU features.  Used the same as boot_cpu_has().
- * These are only valid after alternatives have run, but will statically
- * patch the target code for additional performance.
+ * These will statically patch the target code for additional
+ * performance.
  */
-static __always_inline __pure bool __static_cpu_has(u16 bit)
-{
-#ifdef CC_HAVE_ASM_GOTO
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-
-               /*
-                * Catch too early usage of this before alternatives
-                * have run.
-                */
-               asm_volatile_goto("1: jmp %l[t_warn]\n"
-                        "2:\n"
-                        ".section .altinstructions,\"a\"\n"
-                        " .long 1b - .\n"
-                        " .long 0\n"           /* no replacement */
-                        " .word %P0\n"         /* 1: do replace */
-                        " .byte 2b - 1b\n"     /* source len */
-                        " .byte 0\n"           /* replacement len */
-                        " .byte 0\n"           /* pad len */
-                        ".previous\n"
-                        /* skipping size check since replacement size = 0 */
-                        : : "i" (X86_FEATURE_ALWAYS) : : t_warn);
-
-#endif
-
-               asm_volatile_goto("1: jmp %l[t_no]\n"
-                        "2:\n"
-                        ".section .altinstructions,\"a\"\n"
-                        " .long 1b - .\n"
-                        " .long 0\n"           /* no replacement */
-                        " .word %P0\n"         /* feature bit */
-                        " .byte 2b - 1b\n"     /* source len */
-                        " .byte 0\n"           /* replacement len */
-                        " .byte 0\n"           /* pad len */
-                        ".previous\n"
-                        /* skipping size check since replacement size = 0 */
-                        : : "i" (bit) : : t_no);
-               return true;
-       t_no:
-               return false;
-
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-       t_warn:
-               warn_pre_alternatives();
-               return false;
-#endif
-
-#else /* CC_HAVE_ASM_GOTO */
-
-               u8 flag;
-               /* Open-coded due to __stringify() in ALTERNATIVE() */
-               asm volatile("1: movb $0,%0\n"
-                            "2:\n"
-                            ".section .altinstructions,\"a\"\n"
-                            " .long 1b - .\n"
-                            " .long 3f - .\n"
-                            " .word %P1\n"             /* feature bit */
-                            " .byte 2b - 1b\n"         /* source len */
-                            " .byte 4f - 3f\n"         /* replacement len */
-                            " .byte 0\n"               /* pad len */
-                            ".previous\n"
-                            ".section .discard,\"aw\",@progbits\n"
-                            " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
-                            ".previous\n"
-                            ".section .altinstr_replacement,\"ax\"\n"
-                            "3: movb $1,%0\n"
-                            "4:\n"
-                            ".previous\n"
-                            : "=qm" (flag) : "i" (bit));
-               return flag;
-
-#endif /* CC_HAVE_ASM_GOTO */
-}
-
-#define static_cpu_has(bit)                                    \
-(                                                              \
-       __builtin_constant_p(boot_cpu_has(bit)) ?               \
-               boot_cpu_has(bit) :                             \
-       __builtin_constant_p(bit) ?                             \
-               __static_cpu_has(bit) :                         \
-               boot_cpu_has(bit)                               \
-)
-
-static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
+static __always_inline __pure bool _static_cpu_has(u16 bit)
 {
-#ifdef CC_HAVE_ASM_GOTO
-               asm_volatile_goto("1: jmp %l[t_dynamic]\n"
+               asm_volatile_goto("1: jmp 6f\n"
                         "2:\n"
                         ".skip -(((5f-4f) - (2b-1b)) > 0) * "
                                 "((5f-4f) - (2b-1b)),0x90\n"
@@ -530,66 +162,34 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
                         " .byte 0\n"                   /* repl len */
                         " .byte 0\n"                   /* pad len */
                         ".previous\n"
-                        : : "i" (bit), "i" (X86_FEATURE_ALWAYS)
-                        : : t_dynamic, t_no);
+                        ".section .altinstr_aux,\"ax\"\n"
+                        "6:\n"
+                        " testb %[bitnum],%[cap_byte]\n"
+                        " jnz %l[t_yes]\n"
+                        " jmp %l[t_no]\n"
+                        ".previous\n"
+                        : : "i" (bit), "i" (X86_FEATURE_ALWAYS),
+                            [bitnum] "i" (1 << (bit & 7)),
+                            [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3])
+                        : : t_yes, t_no);
+       t_yes:
                return true;
        t_no:
                return false;
-       t_dynamic:
-               return __static_cpu_has_safe(bit);
-#else
-               u8 flag;
-               /* Open-coded due to __stringify() in ALTERNATIVE() */
-               asm volatile("1: movb $2,%0\n"
-                            "2:\n"
-                            ".section .altinstructions,\"a\"\n"
-                            " .long 1b - .\n"          /* src offset */
-                            " .long 3f - .\n"          /* repl offset */
-                            " .word %P2\n"             /* always replace */
-                            " .byte 2b - 1b\n"         /* source len */
-                            " .byte 4f - 3f\n"         /* replacement len */
-                            " .byte 0\n"               /* pad len */
-                            ".previous\n"
-                            ".section .discard,\"aw\",@progbits\n"
-                            " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
-                            ".previous\n"
-                            ".section .altinstr_replacement,\"ax\"\n"
-                            "3: movb $0,%0\n"
-                            "4:\n"
-                            ".previous\n"
-                            ".section .altinstructions,\"a\"\n"
-                            " .long 1b - .\n"          /* src offset */
-                            " .long 5f - .\n"          /* repl offset */
-                            " .word %P1\n"             /* feature bit */
-                            " .byte 4b - 3b\n"         /* src len */
-                            " .byte 6f - 5f\n"         /* repl len */
-                            " .byte 0\n"               /* pad len */
-                            ".previous\n"
-                            ".section .discard,\"aw\",@progbits\n"
-                            " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
-                            ".previous\n"
-                            ".section .altinstr_replacement,\"ax\"\n"
-                            "5: movb $1,%0\n"
-                            "6:\n"
-                            ".previous\n"
-                            : "=qm" (flag)
-                            : "i" (bit), "i" (X86_FEATURE_ALWAYS));
-               return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
-#endif /* CC_HAVE_ASM_GOTO */
 }
 
-#define static_cpu_has_safe(bit)                               \
+#define static_cpu_has(bit)                                    \
 (                                                              \
        __builtin_constant_p(boot_cpu_has(bit)) ?               \
                boot_cpu_has(bit) :                             \
-               _static_cpu_has_safe(bit)                       \
+               _static_cpu_has(bit)                            \
 )
 #else
 /*
- * gcc 3.x is too stupid to do the static test; fall back to dynamic.
+ * Fall back to dynamic for gcc versions which don't support asm goto. Should be
+ * a minority now anyway.
  */
 #define static_cpu_has(bit)            boot_cpu_has(bit)
-#define static_cpu_has_safe(bit)       boot_cpu_has(bit)
 #endif
 
 #define cpu_has_bug(c, bit)            cpu_has(c, (bit))
@@ -597,7 +197,6 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
 #define clear_cpu_bug(c, bit)          clear_cpu_cap(c, (bit))
 
 #define static_cpu_has_bug(bit)                static_cpu_has((bit))
-#define static_cpu_has_bug_safe(bit)   static_cpu_has_safe((bit))
 #define boot_cpu_has_bug(bit)          cpu_has_bug(&boot_cpu_data, (bit))
 
 #define MAX_CPU_FEATURES               (NCAPINTS * 32)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
new file mode 100644 (file)
index 0000000..0ceb6ad
--- /dev/null
@@ -0,0 +1,288 @@
+#ifndef _ASM_X86_CPUFEATURES_H
+#define _ASM_X86_CPUFEATURES_H
+
+#ifndef _ASM_X86_REQUIRED_FEATURES_H
+#include <asm/required-features.h>
+#endif
+
+#ifndef _ASM_X86_DISABLED_FEATURES_H
+#include <asm/disabled-features.h>
+#endif
+
+/*
+ * Defines x86 CPU feature bits
+ */
+#define NCAPINTS       16      /* N 32-bit words worth of info */
+#define NBUGINTS       1       /* N 32-bit bug flags */
+
+/*
+ * Note: If the comment begins with a quoted string, that string is used
+ * in /proc/cpuinfo instead of the macro name.  If the string is "",
+ * this feature bit is not displayed in /proc/cpuinfo at all.
+ */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
+#define X86_FEATURE_FPU                ( 0*32+ 0) /* Onboard FPU */
+#define X86_FEATURE_VME                ( 0*32+ 1) /* Virtual Mode Extensions */
+#define X86_FEATURE_DE         ( 0*32+ 2) /* Debugging Extensions */
+#define X86_FEATURE_PSE                ( 0*32+ 3) /* Page Size Extensions */
+#define X86_FEATURE_TSC                ( 0*32+ 4) /* Time Stamp Counter */
+#define X86_FEATURE_MSR                ( 0*32+ 5) /* Model-Specific Registers */
+#define X86_FEATURE_PAE                ( 0*32+ 6) /* Physical Address Extensions */
+#define X86_FEATURE_MCE                ( 0*32+ 7) /* Machine Check Exception */
+#define X86_FEATURE_CX8                ( 0*32+ 8) /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC       ( 0*32+ 9) /* Onboard APIC */
+#define X86_FEATURE_SEP                ( 0*32+11) /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR       ( 0*32+12) /* Memory Type Range Registers */
+#define X86_FEATURE_PGE                ( 0*32+13) /* Page Global Enable */
+#define X86_FEATURE_MCA                ( 0*32+14) /* Machine Check Architecture */
+#define X86_FEATURE_CMOV       ( 0*32+15) /* CMOV instructions */
+                                         /* (plus FCMOVcc, FCOMI with FPU) */
+#define X86_FEATURE_PAT                ( 0*32+16) /* Page Attribute Table */
+#define X86_FEATURE_PSE36      ( 0*32+17) /* 36-bit PSEs */
+#define X86_FEATURE_PN         ( 0*32+18) /* Processor serial number */
+#define X86_FEATURE_CLFLUSH    ( 0*32+19) /* CLFLUSH instruction */
+#define X86_FEATURE_DS         ( 0*32+21) /* "dts" Debug Store */
+#define X86_FEATURE_ACPI       ( 0*32+22) /* ACPI via MSR */
+#define X86_FEATURE_MMX                ( 0*32+23) /* Multimedia Extensions */
+#define X86_FEATURE_FXSR       ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
+#define X86_FEATURE_XMM                ( 0*32+25) /* "sse" */
+#define X86_FEATURE_XMM2       ( 0*32+26) /* "sse2" */
+#define X86_FEATURE_SELFSNOOP  ( 0*32+27) /* "ss" CPU self snoop */
+#define X86_FEATURE_HT         ( 0*32+28) /* Hyper-Threading */
+#define X86_FEATURE_ACC                ( 0*32+29) /* "tm" Automatic clock control */
+#define X86_FEATURE_IA64       ( 0*32+30) /* IA-64 processor */
+#define X86_FEATURE_PBE                ( 0*32+31) /* Pending Break Enable */
+
+/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* Don't duplicate feature flags which are redundant with Intel! */
+#define X86_FEATURE_SYSCALL    ( 1*32+11) /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP         ( 1*32+19) /* MP Capable. */
+#define X86_FEATURE_NX         ( 1*32+20) /* Execute Disable */
+#define X86_FEATURE_MMXEXT     ( 1*32+22) /* AMD MMX extensions */
+#define X86_FEATURE_FXSR_OPT   ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
+#define X86_FEATURE_GBPAGES    ( 1*32+26) /* "pdpe1gb" GB pages */
+#define X86_FEATURE_RDTSCP     ( 1*32+27) /* RDTSCP */
+#define X86_FEATURE_LM         ( 1*32+29) /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT   ( 1*32+30) /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW      ( 1*32+31) /* 3DNow! */
+
+/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
+#define X86_FEATURE_RECOVERY   ( 2*32+ 0) /* CPU in recovery mode */
+#define X86_FEATURE_LONGRUN    ( 2*32+ 1) /* Longrun power control */
+#define X86_FEATURE_LRTI       ( 2*32+ 3) /* LongRun table interface */
+
+/* Other features, Linux-defined mapping, word 3 */
+/* This range is used for feature bits which conflict or are synthesized */
+#define X86_FEATURE_CXMMX      ( 3*32+ 0) /* Cyrix MMX extensions */
+#define X86_FEATURE_K6_MTRR    ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
+#define X86_FEATURE_CYRIX_ARR  ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
+#define X86_FEATURE_CENTAUR_MCR        ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
+/* cpu types for specific tunings: */
+#define X86_FEATURE_K8         ( 3*32+ 4) /* "" Opteron, Athlon64 */
+#define X86_FEATURE_K7         ( 3*32+ 5) /* "" Athlon */
+#define X86_FEATURE_P3         ( 3*32+ 6) /* "" P3 */
+#define X86_FEATURE_P4         ( 3*32+ 7) /* "" P4 */
+#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
+#define X86_FEATURE_UP         ( 3*32+ 9) /* smp kernel running on up */
+/* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */
+#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
+#define X86_FEATURE_PEBS       ( 3*32+12) /* Precise-Event Based Sampling */
+#define X86_FEATURE_BTS                ( 3*32+13) /* Branch Trace Store */
+#define X86_FEATURE_SYSCALL32  ( 3*32+14) /* "" syscall in ia32 userspace */
+#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
+#define X86_FEATURE_REP_GOOD   ( 3*32+16) /* rep microcode works well */
+#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
+#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
+/* free, was #define X86_FEATURE_11AP  ( 3*32+19) * "" Bad local APIC aka 11AP */
+#define X86_FEATURE_NOPL       ( 3*32+20) /* The NOPL (0F 1F) instructions */
+#define X86_FEATURE_ALWAYS     ( 3*32+21) /* "" Always-present feature */
+#define X86_FEATURE_XTOPOLOGY  ( 3*32+22) /* cpu topology enum extensions */
+#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
+#define X86_FEATURE_NONSTOP_TSC        ( 3*32+24) /* TSC does not stop in C states */
+/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
+#define X86_FEATURE_EXTD_APICID        ( 3*32+26) /* has extended APICID (8 bits) */
+#define X86_FEATURE_AMD_DCM     ( 3*32+27) /* multi-node processor */
+#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
+#define X86_FEATURE_EAGER_FPU  ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
+#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
+#define X86_FEATURE_XMM3       ( 4*32+ 0) /* "pni" SSE-3 */
+#define X86_FEATURE_PCLMULQDQ  ( 4*32+ 1) /* PCLMULQDQ instruction */
+#define X86_FEATURE_DTES64     ( 4*32+ 2) /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT      ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
+#define X86_FEATURE_DSCPL      ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
+#define X86_FEATURE_VMX                ( 4*32+ 5) /* Hardware virtualization */
+#define X86_FEATURE_SMX                ( 4*32+ 6) /* Safer mode */
+#define X86_FEATURE_EST                ( 4*32+ 7) /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2                ( 4*32+ 8) /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3      ( 4*32+ 9) /* Supplemental SSE-3 */
+#define X86_FEATURE_CID                ( 4*32+10) /* Context ID */
+#define X86_FEATURE_SDBG       ( 4*32+11) /* Silicon Debug */
+#define X86_FEATURE_FMA                ( 4*32+12) /* Fused multiply-add */
+#define X86_FEATURE_CX16       ( 4*32+13) /* CMPXCHG16B */
+#define X86_FEATURE_XTPR       ( 4*32+14) /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM       ( 4*32+15) /* Performance Capabilities */
+#define X86_FEATURE_PCID       ( 4*32+17) /* Process Context Identifiers */
+#define X86_FEATURE_DCA                ( 4*32+18) /* Direct Cache Access */
+#define X86_FEATURE_XMM4_1     ( 4*32+19) /* "sse4_1" SSE-4.1 */
+#define X86_FEATURE_XMM4_2     ( 4*32+20) /* "sse4_2" SSE-4.2 */
+#define X86_FEATURE_X2APIC     ( 4*32+21) /* x2APIC */
+#define X86_FEATURE_MOVBE      ( 4*32+22) /* MOVBE instruction */
+#define X86_FEATURE_POPCNT      ( 4*32+23) /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
+#define X86_FEATURE_AES                ( 4*32+25) /* AES instructions */
+#define X86_FEATURE_XSAVE      ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_OSXSAVE    ( 4*32+27) /* "" XSAVE enabled in the OS */
+#define X86_FEATURE_AVX                ( 4*32+28) /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C       ( 4*32+29) /* 16-bit fp conversions */
+#define X86_FEATURE_RDRAND     ( 4*32+30) /* The RDRAND instruction */
+#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
+#define X86_FEATURE_XSTORE     ( 5*32+ 2) /* "rng" RNG present (xstore) */
+#define X86_FEATURE_XSTORE_EN  ( 5*32+ 3) /* "rng_en" RNG enabled */
+#define X86_FEATURE_XCRYPT     ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
+#define X86_FEATURE_XCRYPT_EN  ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
+#define X86_FEATURE_ACE2       ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN    ( 5*32+ 9) /* ACE v2 enabled */
+#define X86_FEATURE_PHE                ( 5*32+10) /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN     ( 5*32+11) /* PHE enabled */
+#define X86_FEATURE_PMM                ( 5*32+12) /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN     ( 5*32+13) /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
+#define X86_FEATURE_LAHF_LM    ( 6*32+ 0) /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM                ( 6*32+ 2) /* Secure virtual machine */
+#define X86_FEATURE_EXTAPIC    ( 6*32+ 3) /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM                ( 6*32+ 5) /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A      ( 6*32+ 6) /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW       ( 6*32+ 9) /* OS Visible Workaround */
+#define X86_FEATURE_IBS                ( 6*32+10) /* Instruction Based Sampling */
+#define X86_FEATURE_XOP                ( 6*32+11) /* extended AVX instructions */
+#define X86_FEATURE_SKINIT     ( 6*32+12) /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT                ( 6*32+13) /* Watchdog timer */
+#define X86_FEATURE_LWP                ( 6*32+15) /* Light Weight Profiling */
+#define X86_FEATURE_FMA4       ( 6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE                ( 6*32+17) /* translation cache extension */
+#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
+#define X86_FEATURE_TBM                ( 6*32+21) /* trailing bit manipulations */
+#define X86_FEATURE_TOPOEXT    ( 6*32+22) /* topology extensions CPUID leafs */
+#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
+#define X86_FEATURE_PERFCTR_NB  ( 6*32+24) /* NB performance counter extensions */
+#define X86_FEATURE_BPEXT      (6*32+26) /* data breakpoint extension */
+#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
+#define X86_FEATURE_MWAITX     ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
+
+/*
+ * Auxiliary flags: Linux defined - For features scattered in various
+ * CPUID levels like 0x6, 0xA etc, word 7.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+
+#define X86_FEATURE_CPB                ( 7*32+ 2) /* AMD Core Performance Boost */
+#define X86_FEATURE_EPB                ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
+
+#define X86_FEATURE_HW_PSTATE  ( 7*32+ 8) /* AMD HW-PState */
+#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
+
+#define X86_FEATURE_INTEL_PT   ( 7*32+15) /* Intel Processor Trace */
+
+/* Virtualization flags: Linux defined, word 8 */
+#define X86_FEATURE_TPR_SHADOW  ( 8*32+ 0) /* Intel TPR Shadow */
+#define X86_FEATURE_VNMI        ( 8*32+ 1) /* Intel Virtual NMI */
+#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
+#define X86_FEATURE_EPT         ( 8*32+ 3) /* Intel Extended Page Table */
+#define X86_FEATURE_VPID        ( 8*32+ 4) /* Intel Virtual Processor ID */
+
+#define X86_FEATURE_VMMCALL     ( 8*32+15) /* Prefer vmmcall to vmcall */
+#define X86_FEATURE_XENPV       ( 8*32+16) /* "" Xen paravirtual guest */
+
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
+#define X86_FEATURE_FSGSBASE   ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
+#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
+#define X86_FEATURE_BMI1       ( 9*32+ 3) /* 1st group bit manipulation extensions */
+#define X86_FEATURE_HLE                ( 9*32+ 4) /* Hardware Lock Elision */
+#define X86_FEATURE_AVX2       ( 9*32+ 5) /* AVX2 instructions */
+#define X86_FEATURE_SMEP       ( 9*32+ 7) /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_BMI2       ( 9*32+ 8) /* 2nd group bit manipulation extensions */
+#define X86_FEATURE_ERMS       ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
+#define X86_FEATURE_INVPCID    ( 9*32+10) /* Invalidate Processor Context ID */
+#define X86_FEATURE_RTM                ( 9*32+11) /* Restricted Transactional Memory */
+#define X86_FEATURE_CQM                ( 9*32+12) /* Cache QoS Monitoring */
+#define X86_FEATURE_MPX                ( 9*32+14) /* Memory Protection Extension */
+#define X86_FEATURE_AVX512F    ( 9*32+16) /* AVX-512 Foundation */
+#define X86_FEATURE_RDSEED     ( 9*32+18) /* The RDSEED instruction */
+#define X86_FEATURE_ADX                ( 9*32+19) /* The ADCX and ADOX instructions */
+#define X86_FEATURE_SMAP       ( 9*32+20) /* Supervisor Mode Access Prevention */
+#define X86_FEATURE_PCOMMIT    ( 9*32+22) /* PCOMMIT instruction */
+#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
+#define X86_FEATURE_CLWB       ( 9*32+24) /* CLWB instruction */
+#define X86_FEATURE_AVX512PF   ( 9*32+26) /* AVX-512 Prefetch */
+#define X86_FEATURE_AVX512ER   ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
+#define X86_FEATURE_AVX512CD   ( 9*32+28) /* AVX-512 Conflict Detection */
+#define X86_FEATURE_SHA_NI     ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
+
+/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
+#define X86_FEATURE_XSAVEOPT   (10*32+ 0) /* XSAVEOPT */
+#define X86_FEATURE_XSAVEC     (10*32+ 1) /* XSAVEC */
+#define X86_FEATURE_XGETBV1    (10*32+ 2) /* XGETBV with ECX = 1 */
+#define X86_FEATURE_XSAVES     (10*32+ 3) /* XSAVES/XRSTORS */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
+#define X86_FEATURE_CQM_LLC    (11*32+ 1) /* LLC QoS if 1 */
+
+/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
+#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
+
+/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
+#define X86_FEATURE_CLZERO     (13*32+0) /* CLZERO instruction */
+
+/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
+#define X86_FEATURE_DTHERM     (14*32+ 0) /* Digital Thermal Sensor */
+#define X86_FEATURE_IDA                (14*32+ 1) /* Intel Dynamic Acceleration */
+#define X86_FEATURE_ARAT       (14*32+ 2) /* Always Running APIC Timer */
+#define X86_FEATURE_PLN                (14*32+ 4) /* Intel Power Limit Notification */
+#define X86_FEATURE_PTS                (14*32+ 6) /* Intel Package Thermal Status */
+#define X86_FEATURE_HWP                (14*32+ 7) /* Intel Hardware P-states */
+#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
+#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
+#define X86_FEATURE_HWP_EPP    (14*32+10) /* HWP Energy Perf. Preference */
+#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
+
+/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
+#define X86_FEATURE_NPT                (15*32+ 0) /* Nested Page Table support */
+#define X86_FEATURE_LBRV       (15*32+ 1) /* LBR Virtualization support */
+#define X86_FEATURE_SVML       (15*32+ 2) /* "svm_lock" SVM locking MSR */
+#define X86_FEATURE_NRIPS      (15*32+ 3) /* "nrip_save" SVM next_rip save */
+#define X86_FEATURE_TSCRATEMSR  (15*32+ 4) /* "tsc_scale" TSC scaling support */
+#define X86_FEATURE_VMCBCLEAN   (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
+#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
+#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
+#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
+#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
+#define X86_FEATURE_AVIC       (15*32+13) /* Virtual Interrupt Controller */
+
+/*
+ * BUG word(s)
+ */
+#define X86_BUG(x)             (NCAPINTS*32 + (x))
+
+#define X86_BUG_F00F           X86_BUG(0) /* Intel F00F */
+#define X86_BUG_FDIV           X86_BUG(1) /* FPU FDIV */
+#define X86_BUG_COMA           X86_BUG(2) /* Cyrix 6x86 coma */
+#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
+#define X86_BUG_AMD_APIC_C1E   X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
+#define X86_BUG_11AP           X86_BUG(5) /* Bad local APIC aka 11AP */
+#define X86_BUG_FXSAVE_LEAK    X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
+#define X86_BUG_CLFLUSH_MONITOR        X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
+#define X86_BUG_SYSRET_SS_ATTRS        X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
+
+#endif /* _ASM_X86_CPUFEATURES_H */
index 535192f6bfad8d67c27235377e1e549b8ba89276..3c69fed215c56c3203e59d97a6c7e11381c97cf8 100644 (file)
@@ -15,7 +15,7 @@ static __always_inline __init void *dmi_alloc(unsigned len)
 /* Use early IO mappings for DMI because it's initialized early */
 #define dmi_early_remap                early_ioremap
 #define dmi_early_unmap                early_iounmap
-#define dmi_remap              ioremap
+#define dmi_remap              ioremap_cache
 #define dmi_unmap              iounmap
 
 #endif /* _ASM_X86_DMI_H */
index 0010c78c4998cf0702299ea2f8a9229e09bb6438..8fd9e637629a0ab22d49eb0ef51ef4feb9aa3987 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <asm/fpu/api.h>
 #include <asm/pgtable.h>
+#include <asm/tlb.h>
 
 /*
  * We map the EFI regions needed for runtime services non-contiguously,
@@ -64,6 +65,17 @@ extern u64 asmlinkage efi_call(void *fp, ...);
 
 #define efi_call_phys(f, args...)              efi_call((f), args)
 
+/*
+ * Scratch space used for switching the pagetable in the EFI stub
+ */
+struct efi_scratch {
+       u64     r15;
+       u64     prev_cr3;
+       pgd_t   *efi_pgt;
+       bool    use_pgd;
+       u64     phys_stack;
+} __packed;
+
 #define efi_call_virt(f, ...)                                          \
 ({                                                                     \
        efi_status_t __s;                                               \
@@ -71,7 +83,20 @@ extern u64 asmlinkage efi_call(void *fp, ...);
        efi_sync_low_kernel_mappings();                                 \
        preempt_disable();                                              \
        __kernel_fpu_begin();                                           \
+                                                                       \
+       if (efi_scratch.use_pgd) {                                      \
+               efi_scratch.prev_cr3 = read_cr3();                      \
+               write_cr3((unsigned long)efi_scratch.efi_pgt);          \
+               __flush_tlb_all();                                      \
+       }                                                               \
+                                                                       \
        __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);    \
+                                                                       \
+       if (efi_scratch.use_pgd) {                                      \
+               write_cr3(efi_scratch.prev_cr3);                        \
+               __flush_tlb_all();                                      \
+       }                                                               \
+                                                                       \
        __kernel_fpu_end();                                             \
        preempt_enable();                                               \
        __s;                                                            \
@@ -111,6 +136,7 @@ extern void __init efi_memory_uc(u64 addr, unsigned long size);
 extern void __init efi_map_region(efi_memory_desc_t *md);
 extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
 extern void efi_sync_low_kernel_mappings(void);
+extern int __init efi_alloc_page_tables(void);
 extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
 extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
 extern void __init old_map_region(efi_memory_desc_t *md);
index 1514753fd43553e079696712b48a8d08b6966e98..15340e36ddcb3364e16eb63cd61c61a42676d756 100644 (file)
@@ -256,7 +256,7 @@ extern int force_personality32;
    instruction set this CPU supports.  This could be done in user space,
    but it's not easy, and we've already done it here.  */
 
-#define ELF_HWCAP              (boot_cpu_data.x86_capability[0])
+#define ELF_HWCAP              (boot_cpu_data.x86_capability[CPUID_1_EDX])
 
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
index 6d7d0e52ed5a5388b87e834083d90307a6a250ba..8554f960e21b7ce0d05dfb67fa36f6cff974650e 100644 (file)
@@ -138,7 +138,7 @@ extern void reserve_top_address(unsigned long reserve);
 extern int fixmaps_set;
 
 extern pte_t *kmap_pte;
-extern pgprot_t kmap_prot;
+#define kmap_prot PAGE_KERNEL
 extern pte_t *pkmap_page_table;
 
 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
index 0fd440df63f18d77c040f8683f34018ed426b4b2..a2124343edf5448fa06eece0015729505cf93412 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/user.h>
 #include <asm/fpu/api.h>
 #include <asm/fpu/xstate.h>
+#include <asm/cpufeature.h>
 
 /*
  * High level FPU state handling functions:
@@ -58,22 +59,22 @@ extern u64 fpu__get_supported_xfeatures_mask(void);
  */
 static __always_inline __pure bool use_eager_fpu(void)
 {
-       return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+       return static_cpu_has(X86_FEATURE_EAGER_FPU);
 }
 
 static __always_inline __pure bool use_xsaveopt(void)
 {
-       return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
+       return static_cpu_has(X86_FEATURE_XSAVEOPT);
 }
 
 static __always_inline __pure bool use_xsave(void)
 {
-       return static_cpu_has_safe(X86_FEATURE_XSAVE);
+       return static_cpu_has(X86_FEATURE_XSAVE);
 }
 
 static __always_inline __pure bool use_fxsr(void)
 {
-       return static_cpu_has_safe(X86_FEATURE_FXSR);
+       return static_cpu_has(X86_FEATURE_FXSR);
 }
 
 /*
@@ -300,7 +301,7 @@ static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
 
        WARN_ON(system_state != SYSTEM_BOOTING);
 
-       if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+       if (static_cpu_has(X86_FEATURE_XSAVES))
                XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
@@ -322,7 +323,7 @@ static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
 
        WARN_ON(system_state != SYSTEM_BOOTING);
 
-       if (static_cpu_has_safe(X86_FEATURE_XSAVES))
+       if (static_cpu_has(X86_FEATURE_XSAVES))
                XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
        else
                XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
@@ -460,7 +461,7 @@ static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
         * pending. Clear the x87 state here by setting it to fixed values.
         * "m" is a random variable that should be in L1.
         */
-       if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
+       if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
                asm volatile(
                        "fnclex\n\t"
                        "emms\n\t"
@@ -589,7 +590,8 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
         * If the task has used the math, pre-load the FPU on xsave processors
         * or if the past 5 consecutive context-switches used math.
         */
-       fpu.preload = new_fpu->fpstate_active &&
+       fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
+                     new_fpu->fpstate_active &&
                      (use_eager_fpu() || new_fpu->counter > 5);
 
        if (old_fpu->fpregs_active) {
index 793179cf8e21aa89636f869fc3a9e2fe0b4a29e0..6e4d170726b758a75a46777f416d07bc7ada01e7 100644 (file)
@@ -1,23 +1,44 @@
-#ifdef __ASSEMBLY__
+#ifndef _ASM_X86_FRAME_H
+#define _ASM_X86_FRAME_H
 
 #include <asm/asm.h>
 
-/* The annotation hides the frame from the unwinder and makes it look
-   like a ordinary ebp save/restore. This avoids some special cases for
-   frame pointer later */
+/*
+ * These are stack frame creation macros.  They should be used by every
+ * callable non-leaf asm function to make kernel stack traces more reliable.
+ */
+
 #ifdef CONFIG_FRAME_POINTER
-       .macro FRAME
-       __ASM_SIZE(push,)       %__ASM_REG(bp)
-       __ASM_SIZE(mov)         %__ASM_REG(sp), %__ASM_REG(bp)
-       .endm
-       .macro ENDFRAME
-       __ASM_SIZE(pop,)        %__ASM_REG(bp)
-       .endm
-#else
-       .macro FRAME
-       .endm
-       .macro ENDFRAME
-       .endm
-#endif
-
-#endif  /*  __ASSEMBLY__  */
+
+#ifdef __ASSEMBLY__
+
+.macro FRAME_BEGIN
+       push %_ASM_BP
+       _ASM_MOV %_ASM_SP, %_ASM_BP
+.endm
+
+.macro FRAME_END
+       pop %_ASM_BP
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#define FRAME_BEGIN                            \
+       "push %" _ASM_BP "\n"                   \
+       _ASM_MOV "%" _ASM_SP ", %" _ASM_BP "\n"
+
+#define FRAME_END "pop %" _ASM_BP "\n"
+
+#endif /* __ASSEMBLY__ */
+
+#define FRAME_OFFSET __ASM_SEL(4, 8)
+
+#else /* !CONFIG_FRAME_POINTER */
+
+#define FRAME_BEGIN
+#define FRAME_END
+#define FRAME_OFFSET 0
+
+#endif /* CONFIG_FRAME_POINTER */
+
+#endif /* _ASM_X86_FRAME_H */
index 881b4768644aad3537364286e468146aba73ad56..e7de5c9a4fbdd2c5d99b78d82d2c58019d20dc04 100644 (file)
@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
 
 #define __ARCH_HAS_DO_SOFTIRQ
 
+struct irq_desc;
+
 #ifdef CONFIG_HOTPLUG_CPU
 #include <linux/cpumask.h>
 extern int check_irq_vectors_for_cpu_disable(void);
 extern void fixup_irqs(void);
-extern void irq_force_complete_move(int);
+extern void irq_force_complete_move(struct irq_desc *desc);
 #endif
 
 #ifdef CONFIG_HAVE_KVM
@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
 extern void (*x86_platform_ipi_callback)(void);
 extern void native_init_IRQ(void);
 
-struct irq_desc;
 extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
 
 extern __visible unsigned int do_IRQ(struct pt_regs *regs);
index 78162f8e248bdc77c2625238f821791d837d24d4..d0afb05c84fc1ffd434e85f6267326b9091d2595 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _ASM_IRQ_WORK_H
 #define _ASM_IRQ_WORK_H
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 
 static inline bool arch_irq_work_has_interrupt(void)
 {
index 44adbb81904184da0cbbab08807075a24918ba28..7b545998243393e4ee4507c97e2c088ba7572660 100644 (file)
@@ -754,6 +754,8 @@ struct kvm_arch {
 
        bool irqchip_split;
        u8 nr_reserved_ioapic_pins;
+
+       bool disabled_lapic_found;
 };
 
 struct kvm_vm_stat {
index 1e1b07a5a7388d45eaaf46e032efd8ee14569662..9d3a96c4da789230f9aeb523e6af35e4fe4a8db3 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <asm/cpu.h>
 #include <linux/earlycpio.h>
+#include <linux/initrd.h>
 
 #define native_rdmsr(msr, val1, val2)                  \
 do {                                                   \
@@ -143,4 +144,29 @@ static inline void reload_early_microcode(void)                    { }
 static inline bool
 get_builtin_firmware(struct cpio_data *cd, const char *name)   { return false; }
 #endif
+
+static inline unsigned long get_initrd_start(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+       return initrd_start;
+#else
+       return 0;
+#endif
+}
+
+static inline unsigned long get_initrd_start_addr(void)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+#ifdef CONFIG_X86_32
+       unsigned long *initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
+
+       return (unsigned long)__pa_nodebug(*initrd_start_p);
+#else
+       return get_initrd_start();
+#endif
+#else /* CONFIG_BLK_DEV_INITRD */
+       return 0;
+#endif
+}
+
 #endif /* _ASM_X86_MICROCODE_H */
index 55234d5e7160db83bd9854a40c87b8e70593b3c7..1ea0baef1175c407e9097e125a84b226be28266c 100644 (file)
@@ -19,7 +19,8 @@ typedef struct {
 #endif
 
        struct mutex lock;
-       void __user *vdso;
+       void __user *vdso;                      /* vdso base address */
+       const struct vdso_image *vdso_image;    /* vdso image in use */
 
        atomic_t perf_rdpmc_allowed;    /* nonzero if rdpmc is allowed */
 } mm_context_t;
index b05402ef3b842203f97def484806bec3c2320b9c..552346598dab02e5176981aff2cfe117b9480308 100644 (file)
 #define MSR_IA32_MC0_CTL2              0x00000280
 #define MSR_IA32_MCx_CTL2(x)           (MSR_IA32_MC0_CTL2 + (x))
 
+/* 'SMCA': AMD64 Scalable MCA */
+#define MSR_AMD64_SMCA_MC0_CONFIG      0xc0002004
+#define MSR_AMD64_SMCA_MCx_CONFIG(x)   (MSR_AMD64_SMCA_MC0_CONFIG + 0x10*(x))
+
 #define MSR_P6_PERFCTR0                        0x000000c1
 #define MSR_P6_PERFCTR1                        0x000000c2
 #define MSR_P6_EVNTSEL0                        0x00000186
index c70689b5e5aa4c07f06aabd4edd3d344df207c3e..0deeb2d26df7cd7510e831d04d4f80ed297acaf0 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <linux/sched.h>
 
+#include <asm/cpufeature.h>
+
 #define MWAIT_SUBSTATE_MASK            0xf
 #define MWAIT_CSTATE_MASK              0xf
 #define MWAIT_SUBSTATE_SIZE            4
index 04c27a0131656db29e97805432e56d3a1aae5992..4432ab7f407ccc5f47ea9c19a850c29af821d8c9 100644 (file)
@@ -366,20 +366,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
 }
 static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
 {
+       pgprotval_t val = pgprot_val(pgprot);
        pgprot_t new;
-       unsigned long val;
 
-       val = pgprot_val(pgprot);
        pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
                ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
        return new;
 }
 static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
 {
+       pgprotval_t val = pgprot_val(pgprot);
        pgprot_t new;
-       unsigned long val;
 
-       val = pgprot_val(pgprot);
        pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
                          ((val & _PAGE_PAT_LARGE) >>
                           (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
index 2d5a50cb61a2d6ad5c68d5563636edcc112ff4f9..ecb410310e700513338e08010a6decf20d84de61 100644 (file)
@@ -13,7 +13,7 @@ struct vm86;
 #include <asm/types.h>
 #include <uapi/asm/sigcontext.h>
 #include <asm/current.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/page.h>
 #include <asm/pgtable_types.h>
 #include <asm/percpu.h>
@@ -24,7 +24,6 @@ struct vm86;
 #include <asm/fpu/types.h>
 
 #include <linux/personality.h>
-#include <linux/cpumask.h>
 #include <linux/cache.h>
 #include <linux/threads.h>
 #include <linux/math64.h>
@@ -766,7 +765,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
  * Return saved PC of a blocked thread.
  * What is this good for? it will be always the scheduler or ret_from_fork.
  */
-#define thread_saved_pc(t)     (*(unsigned long *)((t)->thread.sp - 8))
+#define thread_saved_pc(t)     READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
 
 #define task_pt_regs(tsk)      ((struct pt_regs *)(tsk)->thread.sp0 - 1)
 extern unsigned long KSTK_ESP(struct task_struct *task);
index ba665ebd17bb8f22a39712dbf2a8c398b9e9207f..db333300bd4be17205daf3b2e820c0af101ccc2d 100644 (file)
@@ -15,7 +15,7 @@
 
 #include <linux/stringify.h>
 #include <asm/nops.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 
 /* "Raw" instruction opcodes */
 #define __ASM_CLAC     .byte 0x0f,0x01,0xca
index dfcf0727623b3f89d53374ea504f243e802be149..20a3de5cb3b0dd5e3362833baebd752c1142ae4e 100644 (file)
@@ -16,7 +16,6 @@
 #endif
 #include <asm/thread_info.h>
 #include <asm/cpumask.h>
-#include <asm/cpufeature.h>
 
 extern int smp_num_siblings;
 extern unsigned int num_processors;
index c7b551028740f18a5360070a6ccb5dc9ad714c5e..c0778fcab06d52a8e36a299c6dd730401da408c6 100644 (file)
@@ -49,7 +49,7 @@
  */
 #ifndef __ASSEMBLY__
 struct task_struct;
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <linux/atomic.h>
 
 struct thread_info {
index 6df2029405a3ae55df8b9718dd320b55dde5c1ad..6f9e27aa2aafc269d27acb4822537dacee80f937 100644 (file)
@@ -5,8 +5,57 @@
 #include <linux/sched.h>
 
 #include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/special_insns.h>
 
+static inline void __invpcid(unsigned long pcid, unsigned long addr,
+                            unsigned long type)
+{
+       u64 desc[2] = { pcid, addr };
+
+       /*
+        * The memory clobber is because the whole point is to invalidate
+        * stale TLB entries and, especially if we're flushing global
+        * mappings, we don't want the compiler to reorder any subsequent
+        * memory accesses before the TLB flush.
+        *
+        * The hex opcode is invpcid (%ecx), %eax in 32-bit mode and
+        * invpcid (%rcx), %rax in long mode.
+        */
+       asm volatile (".byte 0x66, 0x0f, 0x38, 0x82, 0x01"
+                     : : "m" (desc), "a" (type), "c" (desc) : "memory");
+}
+
+#define INVPCID_TYPE_INDIV_ADDR                0
+#define INVPCID_TYPE_SINGLE_CTXT       1
+#define INVPCID_TYPE_ALL_INCL_GLOBAL   2
+#define INVPCID_TYPE_ALL_NON_GLOBAL    3
+
+/* Flush all mappings for a given pcid and addr, not including globals. */
+static inline void invpcid_flush_one(unsigned long pcid,
+                                    unsigned long addr)
+{
+       __invpcid(pcid, addr, INVPCID_TYPE_INDIV_ADDR);
+}
+
+/* Flush all mappings for a given PCID, not including globals. */
+static inline void invpcid_flush_single_context(unsigned long pcid)
+{
+       __invpcid(pcid, 0, INVPCID_TYPE_SINGLE_CTXT);
+}
+
+/* Flush all mappings, including globals, for all PCIDs. */
+static inline void invpcid_flush_all(void)
+{
+       __invpcid(0, 0, INVPCID_TYPE_ALL_INCL_GLOBAL);
+}
+
+/* Flush all mappings for all PCIDs except globals. */
+static inline void invpcid_flush_all_nonglobals(void)
+{
+       __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
+}
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else
@@ -104,6 +153,15 @@ static inline void __native_flush_tlb_global(void)
 {
        unsigned long flags;
 
+       if (static_cpu_has(X86_FEATURE_INVPCID)) {
+               /*
+                * Using INVPCID is considerably faster than a pair of writes
+                * to CR4 sandwiched inside an IRQ flag save/restore.
+                */
+               invpcid_flush_all();
+               return;
+       }
+
        /*
         * Read-modify-write to CR4 - protect it from preemption and
         * from interrupts. (Use the raw variant because this code can
index b89c34c4019b5818da47434dcd711235a65baa40..307698688fa1cfde037e9d9f96426269b7cd61e9 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/errno.h>
 #include <linux/lockdep.h>
 #include <asm/alternative.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/page.h>
 
 /*
index deabaf9759b640d5cd93f50f9db67ef2dc60a807..43dc55be524e7fdc8e5150f87b02a8639b20b3f3 100644 (file)
@@ -13,9 +13,6 @@ struct vdso_image {
        void *data;
        unsigned long size;   /* Always a multiple of PAGE_SIZE */
 
-       /* text_mapping.pages is big enough for data/size page pointers */
-       struct vm_special_mapping text_mapping;
-
        unsigned long alt, alt_len;
 
        long sym_vvar_start;  /* Negative offset to the vvar area */
index f556c4843aa18af74359dfeb2a41d39d9a2c3bb9..e728699db7741f0282441a79635a88afd23259c8 100644 (file)
@@ -37,6 +37,12 @@ struct vsyscall_gtod_data {
 };
 extern struct vsyscall_gtod_data vsyscall_gtod_data;
 
+extern int vclocks_used;
+static inline bool vclock_was_used(int vclock)
+{
+       return READ_ONCE(vclocks_used) & (1 << vclock);
+}
+
 static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data *s)
 {
        unsigned ret;
index c80c02c6ec4944a85715f4438443e5987df4f612..ab5c2c685a3ce908c3677262a35fd402ba77a445 100644 (file)
@@ -30,7 +30,7 @@ static unsigned int numachip1_get_apic_id(unsigned long x)
        unsigned long value;
        unsigned int id = (x >> 24) & 0xff;
 
-       if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+       if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
                rdmsrl(MSR_FAM10H_NODE_ID, value);
                id |= (value << 2) & 0xff00;
        }
@@ -178,7 +178,7 @@ static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
        this_cpu_write(cpu_llc_id, node);
 
        /* Account for nodes per socket in multi-core-module processors */
-       if (static_cpu_has_safe(X86_FEATURE_NODEID_MSR)) {
+       if (static_cpu_has(X86_FEATURE_NODEID_MSR)) {
                rdmsrl(MSR_FAM10H_NODE_ID, val);
                nodes = ((val >> 3) & 7) + 1;
        }
index f25321894ad294278c20d5ba7b4076ce2353a8fb..fdb0fbfb1197a4cf4e485c4f330b3a399b3a6d5f 100644 (file)
@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
 {
        int pin, ioapic, irq, irq_entry;
        const struct cpumask *mask;
+       struct irq_desc *desc;
        struct irq_data *idata;
        struct irq_chip *chip;
 
@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
                if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
                        continue;
 
-               idata = irq_get_irq_data(irq);
+               desc = irq_to_desc(irq);
+               raw_spin_lock_irq(&desc->lock);
+               idata = irq_desc_get_irq_data(desc);
 
                /*
                 * Honour affinities which have been set in early boot
@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
                /* Might be lapic_chip for irq 0 */
                if (chip->irq_set_affinity)
                        chip->irq_set_affinity(idata, mask, false);
+               raw_spin_unlock_irq(&desc->lock);
        }
 }
 #endif
index 908cb37da171ebed1a361203dc520c15e4e42736..3b670df4ba7b429f48102b1401c9133957ecb6a9 100644 (file)
@@ -31,7 +31,7 @@ struct apic_chip_data {
 struct irq_domain *x86_vector_domain;
 EXPORT_SYMBOL_GPL(x86_vector_domain);
 static DEFINE_RAW_SPINLOCK(vector_lock);
-static cpumask_var_t vector_cpumask;
+static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
 static struct irq_chip lapic_controller;
 #ifdef CONFIG_X86_IO_APIC
 static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
@@ -118,35 +118,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
         */
        static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
        static int current_offset = VECTOR_OFFSET_START % 16;
-       int cpu, err;
+       int cpu, vector;
 
-       if (d->move_in_progress)
+       /*
+        * If there is still a move in progress or the previous move has not
+        * been cleaned up completely, tell the caller to come back later.
+        */
+       if (d->move_in_progress ||
+           cpumask_intersects(d->old_domain, cpu_online_mask))
                return -EBUSY;
 
        /* Only try and allocate irqs on cpus that are present */
-       err = -ENOSPC;
        cpumask_clear(d->old_domain);
+       cpumask_clear(searched_cpumask);
        cpu = cpumask_first_and(mask, cpu_online_mask);
        while (cpu < nr_cpu_ids) {
-               int new_cpu, vector, offset;
+               int new_cpu, offset;
 
+               /* Get the possible target cpus for @mask/@cpu from the apic */
                apic->vector_allocation_domain(cpu, vector_cpumask, mask);
 
+               /*
+                * Clear the offline cpus from @vector_cpumask for searching
+                * and verify whether the result overlaps with @mask. If true,
+                * then the call to apic->cpu_mask_to_apicid_and() will
+                * succeed as well. If not, no point in trying to find a
+                * vector in this mask.
+                */
+               cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
+               if (!cpumask_intersects(vector_searchmask, mask))
+                       goto next_cpu;
+
                if (cpumask_subset(vector_cpumask, d->domain)) {
-                       err = 0;
                        if (cpumask_equal(vector_cpumask, d->domain))
-                               break;
+                               goto success;
                        /*
-                        * New cpumask using the vector is a proper subset of
-                        * the current in use mask. So cleanup the vector
-                        * allocation for the members that are not used anymore.
+                        * Mark the cpus which are not longer in the mask for
+                        * cleanup.
                         */
-                       cpumask_andnot(d->old_domain, d->domain,
-                                      vector_cpumask);
-                       d->move_in_progress =
-                          cpumask_intersects(d->old_domain, cpu_online_mask);
-                       cpumask_and(d->domain, d->domain, vector_cpumask);
-                       break;
+                       cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
+                       vector = d->cfg.vector;
+                       goto update;
                }
 
                vector = current_vector;
@@ -158,45 +170,60 @@ next:
                        vector = FIRST_EXTERNAL_VECTOR + offset;
                }
 
-               if (unlikely(current_vector == vector)) {
-                       cpumask_or(d->old_domain, d->old_domain,
-                                  vector_cpumask);
-                       cpumask_andnot(vector_cpumask, mask, d->old_domain);
-                       cpu = cpumask_first_and(vector_cpumask,
-                                               cpu_online_mask);
-                       continue;
-               }
+               /* If the search wrapped around, try the next cpu */
+               if (unlikely(current_vector == vector))
+                       goto next_cpu;
 
                if (test_bit(vector, used_vectors))
                        goto next;
 
-               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
+               for_each_cpu(new_cpu, vector_searchmask) {
                        if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
                                goto next;
                }
                /* Found one! */
                current_vector = vector;
                current_offset = offset;
-               if (d->cfg.vector) {
+               /* Schedule the old vector for cleanup on all cpus */
+               if (d->cfg.vector)
                        cpumask_copy(d->old_domain, d->domain);
-                       d->move_in_progress =
-                          cpumask_intersects(d->old_domain, cpu_online_mask);
-               }
-               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
+               for_each_cpu(new_cpu, vector_searchmask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
-               d->cfg.vector = vector;
-               cpumask_copy(d->domain, vector_cpumask);
-               err = 0;
-               break;
-       }
+               goto update;
 
-       if (!err) {
-               /* cache destination APIC IDs into cfg->dest_apicid */
-               err = apic->cpu_mask_to_apicid_and(mask, d->domain,
-                                                  &d->cfg.dest_apicid);
+next_cpu:
+               /*
+                * We exclude the current @vector_cpumask from the requested
+                * @mask and try again with the next online cpu in the
+                * result. We cannot modify @mask, so we use @vector_cpumask
+                * as a temporary buffer here as it will be reassigned when
+                * calling apic->vector_allocation_domain() above.
+                */
+               cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
+               cpumask_andnot(vector_cpumask, mask, searched_cpumask);
+               cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
+               continue;
        }
+       return -ENOSPC;
 
-       return err;
+update:
+       /*
+        * Exclude offline cpus from the cleanup mask and set the
+        * move_in_progress flag when the result is not empty.
+        */
+       cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
+       d->move_in_progress = !cpumask_empty(d->old_domain);
+       d->cfg.vector = vector;
+       cpumask_copy(d->domain, vector_cpumask);
+success:
+       /*
+        * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
+        * as we already established, that mask & d->domain & cpu_online_mask
+        * is not empty.
+        */
+       BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
+                                           &d->cfg.dest_apicid));
+       return 0;
 }
 
 static int assign_irq_vector(int irq, struct apic_chip_data *data,
@@ -226,10 +253,8 @@ static int assign_irq_vector_policy(int irq, int node,
 static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
        struct irq_desc *desc;
-       unsigned long flags;
        int cpu, vector;
 
-       raw_spin_lock_irqsave(&vector_lock, flags);
        BUG_ON(!data->cfg.vector);
 
        vector = data->cfg.vector;
@@ -239,10 +264,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
        data->cfg.vector = 0;
        cpumask_clear(data->domain);
 
-       if (likely(!data->move_in_progress)) {
-               raw_spin_unlock_irqrestore(&vector_lock, flags);
+       /*
+        * If move is in progress or the old_domain mask is not empty,
+        * i.e. the cleanup IPI has not been processed yet, we need to remove
+        * the old references to desc from all cpus vector tables.
+        */
+       if (!data->move_in_progress && cpumask_empty(data->old_domain))
                return;
-       }
 
        desc = irq_to_desc(irq);
        for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
@@ -255,7 +283,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
                }
        }
        data->move_in_progress = 0;
-       raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 void init_irq_alloc_info(struct irq_alloc_info *info,
@@ -276,19 +303,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
 static void x86_vector_free_irqs(struct irq_domain *domain,
                                 unsigned int virq, unsigned int nr_irqs)
 {
+       struct apic_chip_data *apic_data;
        struct irq_data *irq_data;
+       unsigned long flags;
        int i;
 
        for (i = 0; i < nr_irqs; i++) {
                irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
                if (irq_data && irq_data->chip_data) {
+                       raw_spin_lock_irqsave(&vector_lock, flags);
                        clear_irq_vector(virq + i, irq_data->chip_data);
-                       free_apic_chip_data(irq_data->chip_data);
+                       apic_data = irq_data->chip_data;
+                       irq_domain_reset_irq_data(irq_data);
+                       raw_spin_unlock_irqrestore(&vector_lock, flags);
+                       free_apic_chip_data(apic_data);
 #ifdef CONFIG_X86_IO_APIC
                        if (virq + i < nr_legacy_irqs())
                                legacy_irq_data[virq + i] = NULL;
 #endif
-                       irq_domain_reset_irq_data(irq_data);
                }
        }
 }
@@ -406,6 +438,8 @@ int __init arch_early_irq_init(void)
        arch_init_htirq_domain(x86_vector_domain);
 
        BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
+       BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
+       BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
 
        return arch_early_ioapic_init();
 }
@@ -494,14 +528,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
                return -EINVAL;
 
        err = assign_irq_vector(irq, data, dest);
-       if (err) {
-               if (assign_irq_vector(irq, data,
-                                     irq_data_get_affinity_mask(irq_data)))
-                       pr_err("Failed to recover vector for irq %d\n", irq);
-               return err;
-       }
-
-       return IRQ_SET_MASK_OK;
+       return err ? err : IRQ_SET_MASK_OK;
 }
 
 static struct irq_chip lapic_controller = {
@@ -513,20 +540,12 @@ static struct irq_chip lapic_controller = {
 #ifdef CONFIG_SMP
 static void __send_cleanup_vector(struct apic_chip_data *data)
 {
-       cpumask_var_t cleanup_mask;
-
-       if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
-               unsigned int i;
-
-               for_each_cpu_and(i, data->old_domain, cpu_online_mask)
-                       apic->send_IPI_mask(cpumask_of(i),
-                                           IRQ_MOVE_CLEANUP_VECTOR);
-       } else {
-               cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
-               apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
-               free_cpumask_var(cleanup_mask);
-       }
+       raw_spin_lock(&vector_lock);
+       cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
        data->move_in_progress = 0;
+       if (!cpumask_empty(data->old_domain))
+               apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
+       raw_spin_unlock(&vector_lock);
 }
 
 void send_cleanup_vector(struct irq_cfg *cfg)
@@ -570,12 +589,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                        goto unlock;
 
                /*
-                * Check if the irq migration is in progress. If so, we
-                * haven't received the cleanup request yet for this irq.
+                * Nothing to cleanup if irq migration is in progress
+                * or this cpu is not set in the cleanup mask.
                 */
-               if (data->move_in_progress)
+               if (data->move_in_progress ||
+                   !cpumask_test_cpu(me, data->old_domain))
                        goto unlock;
 
+               /*
+                * We have two cases to handle here:
+                * 1) vector is unchanged but the target mask got reduced
+                * 2) vector and the target mask has changed
+                *
+                * #1 is obvious, but in #2 we have two vectors with the same
+                * irq descriptor: the old and the new vector. So we need to
+                * make sure that we only cleanup the old vector. The new
+                * vector has the current @vector number in the config and
+                * this cpu is part of the target mask. We better leave that
+                * one alone.
+                */
                if (vector == data->cfg.vector &&
                    cpumask_test_cpu(me, data->domain))
                        goto unlock;
@@ -593,6 +625,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                        goto unlock;
                }
                __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+               cpumask_clear_cpu(me, data->old_domain);
 unlock:
                raw_spin_unlock(&desc->lock);
        }
@@ -621,12 +654,48 @@ void irq_complete_move(struct irq_cfg *cfg)
        __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
 }
 
-void irq_force_complete_move(int irq)
+/*
+ * Called with @desc->lock held and interrupts disabled.
+ */
+void irq_force_complete_move(struct irq_desc *desc)
 {
-       struct irq_cfg *cfg = irq_cfg(irq);
+       struct irq_data *irqdata = irq_desc_get_irq_data(desc);
+       struct apic_chip_data *data = apic_chip_data(irqdata);
+       struct irq_cfg *cfg = data ? &data->cfg : NULL;
 
-       if (cfg)
-               __irq_complete_move(cfg, cfg->vector);
+       if (!cfg)
+               return;
+
+       __irq_complete_move(cfg, cfg->vector);
+
+       /*
+        * This is tricky. If the cleanup of @data->old_domain has not been
+        * done yet, then the following setaffinity call will fail with
+        * -EBUSY. This can leave the interrupt in a stale state.
+        *
+        * The cleanup cannot make progress because we hold @desc->lock. So in
+        * case @data->old_domain is not yet cleaned up, we need to drop the
+        * lock and acquire it again. @desc cannot go away, because the
+        * hotplug code holds the sparse irq lock.
+        */
+       raw_spin_lock(&vector_lock);
+       /* Clean out all offline cpus (including ourself) first. */
+       cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
+       while (!cpumask_empty(data->old_domain)) {
+               raw_spin_unlock(&vector_lock);
+               raw_spin_unlock(&desc->lock);
+               cpu_relax();
+               raw_spin_lock(&desc->lock);
+               /*
+                * Reevaluate apic_chip_data. It might have been cleared after
+                * we dropped @desc->lock.
+                */
+               data = apic_chip_data(irqdata);
+               if (!data)
+                       return;
+               raw_spin_lock(&vector_lock);
+       }
+       raw_spin_unlock(&vector_lock);
 }
 #endif
 
index d760c6bb37b53c25931bef3bc47c903def86c8ee..624db00583f44a76283c4d8d5e5377a8342f6517 100644 (file)
@@ -889,7 +889,10 @@ void __init uv_system_init(void)
                return;
        }
        pr_info("UV: Found %s hub\n", hub);
-       map_low_mmrs();
+
+       /* We now only need to map the MMRs on UV1 */
+       if (is_uv1_hub())
+               map_low_mmrs();
 
        m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
        m_val = m_n_config.s.m_skt;
index 6ce39025f467fb060ce68c8b8ebfaf8f87258d09..fdeb0ce07c166d38c7cdb5639a6e173932450c13 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/lguest.h>
 #include "../../../drivers/lguest/lg.h"
 
-#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
+#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
 static char syscalls[] = {
 #include <asm/syscalls_32.h>
 };
index f2edafb5f24eb2034ee54751292af3fa9bd70fb9..d875f97d4e0ba0477c648e1244ff238bfd48be03 100644 (file)
@@ -4,17 +4,11 @@
 
 #include <asm/ia32.h>
 
-#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
-#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
-#ifdef CONFIG_X86_X32_ABI
-# define __SYSCALL_X32(nr, sym, compat) [nr] = 1,
-#else
-# define __SYSCALL_X32(nr, sym, compat) /* nothing */
-#endif
+#define __SYSCALL_64(nr, sym, qual) [nr] = 1,
 static char syscalls_64[] = {
 #include <asm/syscalls_64.h>
 };
-#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
+#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
 static char syscalls_ia32[] = {
 #include <asm/syscalls_32.h>
 };
index 58031303e30488c540d609f95d0693918c09fa64..d8cf3338a0353e8a232dcfba34a7deb57a01ac15 100644 (file)
@@ -30,13 +30,7 @@ obj-$(CONFIG_CPU_SUP_CENTAUR)                += centaur.o
 obj-$(CONFIG_CPU_SUP_TRANSMETA_32)     += transmeta.o
 obj-$(CONFIG_CPU_SUP_UMC_32)           += umc.o
 
-obj-$(CONFIG_PERF_EVENTS)              += perf_event.o
-
 ifdef CONFIG_PERF_EVENTS
-obj-$(CONFIG_CPU_SUP_AMD)              += perf_event_amd.o perf_event_amd_uncore.o
-ifdef CONFIG_AMD_IOMMU
-obj-$(CONFIG_CPU_SUP_AMD)              += perf_event_amd_iommu.o
-endif
 obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_p6.o perf_event_knc.o perf_event_p4.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
 obj-$(CONFIG_CPU_SUP_INTEL)            += perf_event_intel_rapl.o perf_event_intel_cqm.o
@@ -56,7 +50,7 @@ obj-$(CONFIG_X86_MCE)                 += mcheck/
 obj-$(CONFIG_MTRR)                     += mtrr/
 obj-$(CONFIG_MICROCODE)                        += microcode/
 
-obj-$(CONFIG_X86_LOCAL_APIC)           += perfctr-watchdog.o perf_event_amd_ibs.o
+obj-$(CONFIG_X86_LOCAL_APIC)           += perfctr-watchdog.o
 
 obj-$(CONFIG_HYPERVISOR_GUEST)         += vmware.o hypervisor.o mshyperv.o
 
@@ -64,7 +58,7 @@ ifdef CONFIG_X86_FEATURE_NAMES
 quiet_cmd_mkcapflags = MKCAP   $@
       cmd_mkcapflags = $(CONFIG_SHELL) $(srctree)/$(src)/mkcapflags.sh $< $@
 
-cpufeature = $(src)/../../include/asm/cpufeature.h
+cpufeature = $(src)/../../include/asm/cpufeatures.h
 
 targets += capflags.c
 $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
index a07956a08936e8ea56c1a73075d700d281895577..97c59fd60702f4ceacad7c4d46a3c8d5de7dacbf 100644 (file)
@@ -117,7 +117,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
                void (*f_vide)(void);
                u64 d, d2;
 
-               printk(KERN_INFO "AMD K6 stepping B detected - ");
+               pr_info("AMD K6 stepping B detected - ");
 
                /*
                 * It looks like AMD fixed the 2.6.2 bug and improved indirect
@@ -133,10 +133,9 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
                d = d2-d;
 
                if (d > 20*K6_BUG_LOOP)
-                       printk(KERN_CONT
-                               "system stability may be impaired when more than 32 MB are used.\n");
+                       pr_cont("system stability may be impaired when more than 32 MB are used.\n");
                else
-                       printk(KERN_CONT "probably OK (after B9730xxxx).\n");
+                       pr_cont("probably OK (after B9730xxxx).\n");
        }
 
        /* K6 with old style WHCR */
@@ -154,7 +153,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
                        wbinvd();
                        wrmsr(MSR_K6_WHCR, l, h);
                        local_irq_restore(flags);
-                       printk(KERN_INFO "Enabling old style K6 write allocation for %d Mb\n",
+                       pr_info("Enabling old style K6 write allocation for %d Mb\n",
                                mbytes);
                }
                return;
@@ -175,7 +174,7 @@ static void init_amd_k6(struct cpuinfo_x86 *c)
                        wbinvd();
                        wrmsr(MSR_K6_WHCR, l, h);
                        local_irq_restore(flags);
-                       printk(KERN_INFO "Enabling new style K6 write allocation for %d Mb\n",
+                       pr_info("Enabling new style K6 write allocation for %d Mb\n",
                                mbytes);
                }
 
@@ -202,7 +201,7 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
         */
        if (c->x86_model >= 6 && c->x86_model <= 10) {
                if (!cpu_has(c, X86_FEATURE_XMM)) {
-                       printk(KERN_INFO "Enabling disabled K7/SSE Support.\n");
+                       pr_info("Enabling disabled K7/SSE Support.\n");
                        msr_clear_bit(MSR_K7_HWCR, 15);
                        set_cpu_cap(c, X86_FEATURE_XMM);
                }
@@ -216,9 +215,8 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
        if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) {
                rdmsr(MSR_K7_CLK_CTL, l, h);
                if ((l & 0xfff00000) != 0x20000000) {
-                       printk(KERN_INFO
-                           "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
-                                       l, ((l & 0x000fffff)|0x20000000));
+                       pr_info("CPU: CLK_CTL MSR was %x. Reprogramming to %x\n",
+                               l, ((l & 0x000fffff)|0x20000000));
                        wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h);
                }
        }
@@ -485,7 +483,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
                        unsigned long pfn = tseg >> PAGE_SHIFT;
 
-                       printk(KERN_DEBUG "tseg: %010llx\n", tseg);
+                       pr_debug("tseg: %010llx\n", tseg);
                        if (pfn_range_is_mapped(pfn, pfn + 1))
                                set_memory_4k((unsigned long)__va(tseg), 1);
                }
@@ -500,8 +498,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
                        rdmsrl(MSR_K7_HWCR, val);
                        if (!(val & BIT(24)))
-                               printk(KERN_WARNING FW_BUG "TSC doesn't count "
-                                       "with P0 frequency!\n");
+                               pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
                }
        }
 
index 04f0fe5af83ec34bb4fd6ec09fd3cc8db1c7ee07..a972ac4c7e7df05238e78b08e093cded47640fcd 100644 (file)
@@ -15,7 +15,7 @@ void __init check_bugs(void)
 {
        identify_boot_cpu();
 #if !defined(CONFIG_SMP)
-       printk(KERN_INFO "CPU: ");
+       pr_info("CPU: ");
        print_cpu_info(&boot_cpu_data);
 #endif
        alternative_instructions();
index ae20be6e483c77703413cf36e9db065134da01f3..1661d8ec92805191cd4581c365db68ea5acfdf02 100644 (file)
@@ -1,7 +1,7 @@
 #include <linux/bitops.h>
 #include <linux/kernel.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/e820.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
@@ -29,7 +29,7 @@ static void init_c3(struct cpuinfo_x86 *c)
                        rdmsr(MSR_VIA_FCR, lo, hi);
                        lo |= ACE_FCR;          /* enable ACE unit */
                        wrmsr(MSR_VIA_FCR, lo, hi);
-                       printk(KERN_INFO "CPU: Enabled ACE h/w crypto\n");
+                       pr_info("CPU: Enabled ACE h/w crypto\n");
                }
 
                /* enable RNG unit, if present and disabled */
@@ -37,7 +37,7 @@ static void init_c3(struct cpuinfo_x86 *c)
                        rdmsr(MSR_VIA_RNG, lo, hi);
                        lo |= RNG_ENABLE;       /* enable RNG unit */
                        wrmsr(MSR_VIA_RNG, lo, hi);
-                       printk(KERN_INFO "CPU: Enabled h/w RNG\n");
+                       pr_info("CPU: Enabled h/w RNG\n");
                }
 
                /* store Centaur Extended Feature Flags as
@@ -130,7 +130,7 @@ static void init_centaur(struct cpuinfo_x86 *c)
                        name = "C6";
                        fcr_set = ECX8|DSMC|EDCTLB|EMMX|ERETSTK;
                        fcr_clr = DPDC;
-                       printk(KERN_NOTICE "Disabling bugged TSC.\n");
+                       pr_notice("Disabling bugged TSC.\n");
                        clear_cpu_cap(c, X86_FEATURE_TSC);
                        break;
                case 8:
@@ -163,11 +163,11 @@ static void init_centaur(struct cpuinfo_x86 *c)
                newlo = (lo|fcr_set) & (~fcr_clr);
 
                if (newlo != lo) {
-                       printk(KERN_INFO "Centaur FCR was 0x%X now 0x%X\n",
+                       pr_info("Centaur FCR was 0x%X now 0x%X\n",
                                lo, newlo);
                        wrmsr(MSR_IDT_FCR1, newlo, hi);
                } else {
-                       printk(KERN_INFO "Centaur FCR is 0x%X\n", lo);
+                       pr_info("Centaur FCR is 0x%X\n", lo);
                }
                /* Emulate MTRRs using Centaur's MCR. */
                set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
index 37830de8f60a8f0d8f8da27d409da72ce1f13551..b25c33dd10df841060a75be679b5e77d6b58c453 100644 (file)
@@ -162,6 +162,22 @@ static int __init x86_mpx_setup(char *s)
 }
 __setup("nompx", x86_mpx_setup);
 
+static int __init x86_noinvpcid_setup(char *s)
+{
+       /* noinvpcid doesn't accept parameters */
+       if (s)
+               return -EINVAL;
+
+       /* do not emit a message if the feature is not present */
+       if (!boot_cpu_has(X86_FEATURE_INVPCID))
+               return 0;
+
+       setup_clear_cpu_cap(X86_FEATURE_INVPCID);
+       pr_info("noinvpcid: INVPCID feature disabled\n");
+       return 0;
+}
+early_param("noinvpcid", x86_noinvpcid_setup);
+
 #ifdef CONFIG_X86_32
 static int cachesize_override = -1;
 static int disable_x86_serial_nr = 1;
@@ -228,7 +244,7 @@ static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
        lo |= 0x200000;
        wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
 
-       printk(KERN_NOTICE "CPU serial number disabled.\n");
+       pr_notice("CPU serial number disabled.\n");
        clear_cpu_cap(c, X86_FEATURE_PN);
 
        /* Disabling the serial number may affect the cpuid level */
@@ -329,9 +345,8 @@ static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
                if (!warn)
                        continue;
 
-               printk(KERN_WARNING
-                      "CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
-                               x86_cap_flag(df->feature), df->level);
+               pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
+                       x86_cap_flag(df->feature), df->level);
        }
 }
 
@@ -510,7 +525,7 @@ void detect_ht(struct cpuinfo_x86 *c)
        smp_num_siblings = (ebx & 0xff0000) >> 16;
 
        if (smp_num_siblings == 1) {
-               printk_once(KERN_INFO "CPU0: Hyper-Threading is disabled\n");
+               pr_info_once("CPU0: Hyper-Threading is disabled\n");
                goto out;
        }
 
@@ -531,10 +546,10 @@ void detect_ht(struct cpuinfo_x86 *c)
 
 out:
        if (!printed && (c->x86_max_cores * smp_num_siblings) > 1) {
-               printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
-                      c->phys_proc_id);
-               printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
-                      c->cpu_core_id);
+               pr_info("CPU: Physical Processor ID: %d\n",
+                       c->phys_proc_id);
+               pr_info("CPU: Processor Core ID: %d\n",
+                       c->cpu_core_id);
                printed = 1;
        }
 #endif
@@ -559,9 +574,8 @@ static void get_cpu_vendor(struct cpuinfo_x86 *c)
                }
        }
 
-       printk_once(KERN_ERR
-                       "CPU: vendor_id '%s' unknown, using generic init.\n" \
-                       "CPU: Your system may be unstable.\n", v);
+       pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
+                   "CPU: Your system may be unstable.\n", v);
 
        c->x86_vendor = X86_VENDOR_UNKNOWN;
        this_cpu = &default_cpu;
@@ -760,7 +774,7 @@ void __init early_cpu_init(void)
        int count = 0;
 
 #ifdef CONFIG_PROCESSOR_SELECT
-       printk(KERN_INFO "KERNEL supported cpus:\n");
+       pr_info("KERNEL supported cpus:\n");
 #endif
 
        for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
@@ -778,7 +792,7 @@ void __init early_cpu_init(void)
                        for (j = 0; j < 2; j++) {
                                if (!cpudev->c_ident[j])
                                        continue;
-                               printk(KERN_INFO "  %s %s\n", cpudev->c_vendor,
+                               pr_info("  %s %s\n", cpudev->c_vendor,
                                        cpudev->c_ident[j]);
                        }
                }
@@ -1061,7 +1075,7 @@ static void __print_cpu_msr(void)
                for (index = index_min; index < index_max; index++) {
                        if (rdmsrl_safe(index, &val))
                                continue;
-                       printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
+                       pr_info(" MSR%08x: %016llx\n", index, val);
                }
        }
 }
@@ -1100,19 +1114,19 @@ void print_cpu_info(struct cpuinfo_x86 *c)
        }
 
        if (vendor && !strstr(c->x86_model_id, vendor))
-               printk(KERN_CONT "%s ", vendor);
+               pr_cont("%s ", vendor);
 
        if (c->x86_model_id[0])
-               printk(KERN_CONT "%s", c->x86_model_id);
+               pr_cont("%s", c->x86_model_id);
        else
-               printk(KERN_CONT "%d86", c->x86);
+               pr_cont("%d86", c->x86);
 
-       printk(KERN_CONT " (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
+       pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
 
        if (c->x86_mask || c->cpuid_level >= 0)
-               printk(KERN_CONT ", stepping: 0x%x)\n", c->x86_mask);
+               pr_cont(", stepping: 0x%x)\n", c->x86_mask);
        else
-               printk(KERN_CONT ")\n");
+               pr_cont(")\n");
 
        print_cpu_msr(c);
 }
@@ -1438,7 +1452,7 @@ void cpu_init(void)
 
        show_ucode_info_early();
 
-       printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+       pr_info("Initializing CPU#%d\n", cpu);
 
        if (cpu_feature_enabled(X86_FEATURE_VME) ||
            cpu_has_tsc ||
@@ -1475,20 +1489,6 @@ void cpu_init(void)
 }
 #endif
 
-#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
-void warn_pre_alternatives(void)
-{
-       WARN(1, "You're using static_cpu_has before alternatives have run!\n");
-}
-EXPORT_SYMBOL_GPL(warn_pre_alternatives);
-#endif
-
-inline bool __static_cpu_has_safe(u16 bit)
-{
-       return boot_cpu_has(bit);
-}
-EXPORT_SYMBOL_GPL(__static_cpu_has_safe);
-
 static void bsp_resume(void)
 {
        if (this_cpu->c_bsp_resume)
index aaf152e79637384781d264afbff76a58da9489cc..6adef9cac23ee99c96924e2789abeb2d1ad123f3 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/timer.h>
 #include <asm/pci-direct.h>
 #include <asm/tsc.h>
+#include <asm/cpufeature.h>
 
 #include "cpu.h"
 
@@ -103,7 +104,7 @@ static void check_cx686_slop(struct cpuinfo_x86 *c)
                local_irq_restore(flags);
 
                if (ccr5 & 2) { /* possible wrong calibration done */
-                       printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n");
+                       pr_info("Recalibrating delay loop with SLOP bit reset\n");
                        calibrate_delay();
                        c->loops_per_jiffy = loops_per_jiffy;
                }
@@ -115,7 +116,7 @@ static void set_cx86_reorder(void)
 {
        u8 ccr3;
 
-       printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n");
+       pr_info("Enable Memory access reorder on Cyrix/NSC processor.\n");
        ccr3 = getCx86(CX86_CCR3);
        setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */
 
@@ -128,7 +129,7 @@ static void set_cx86_reorder(void)
 
 static void set_cx86_memwb(void)
 {
-       printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
+       pr_info("Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
 
        /* CCR2 bit 2: unlock NW bit */
        setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04);
@@ -268,7 +269,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
                 *  VSA1 we work around however.
                 */
 
-               printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n");
+               pr_info("Working around Cyrix MediaGX virtual DMA bugs.\n");
                isa_dma_bridge_buggy = 2;
 
                /* We do this before the PCI layer is running. However we
@@ -426,7 +427,7 @@ static void cyrix_identify(struct cpuinfo_x86 *c)
                if (dir0 == 5 || dir0 == 3) {
                        unsigned char ccr3;
                        unsigned long flags;
-                       printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n");
+                       pr_info("Enabling CPUID on Cyrix processor.\n");
                        local_irq_save(flags);
                        ccr3 = getCx86(CX86_CCR3);
                        /* enable MAPEN  */
index d820d8eae96be0b3daa0ec01d9f4a22bf1ad930e..73d391ae452f82a7bfeca4be2276eed628dcd452 100644 (file)
@@ -56,7 +56,7 @@ detect_hypervisor_vendor(void)
        }
 
        if (max_pri)
-               printk(KERN_INFO "Hypervisor detected: %s\n", x86_hyper->name);
+               pr_info("Hypervisor detected: %s\n", x86_hyper->name);
 }
 
 void init_hypervisor(struct cpuinfo_x86 *c)
index 565648bc1a0aef6c3cf60da92ec9fb60a2408c90..6040bd71679f5eab7f96334f818891abfc5b877f 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/module.h>
 #include <linux/uaccess.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/pgtable.h>
 #include <asm/msr.h>
 #include <asm/bugs.h>
@@ -61,7 +61,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
         */
        if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
            c->microcode < 0x20e) {
-               printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
+               pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
                clear_cpu_cap(c, X86_FEATURE_PSE);
        }
 
@@ -140,7 +140,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
        if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
                rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
                if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
-                       printk(KERN_INFO "Disabled fast string operations\n");
+                       pr_info("Disabled fast string operations\n");
                        setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
                        setup_clear_cpu_cap(X86_FEATURE_ERMS);
                }
@@ -176,7 +176,7 @@ int ppro_with_ram_bug(void)
            boot_cpu_data.x86 == 6 &&
            boot_cpu_data.x86_model == 1 &&
            boot_cpu_data.x86_mask < 8) {
-               printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
+               pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
                return 1;
        }
        return 0;
@@ -225,7 +225,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
 
                set_cpu_bug(c, X86_BUG_F00F);
                if (!f00f_workaround_enabled) {
-                       printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
+                       pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
                        f00f_workaround_enabled = 1;
                }
        }
@@ -244,7 +244,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
         * Forcefully enable PAE if kernel parameter "forcepae" is present.
         */
        if (forcepae) {
-               printk(KERN_WARNING "PAE forced!\n");
+               pr_warn("PAE forced!\n");
                set_cpu_cap(c, X86_FEATURE_PAE);
                add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
        }
index 0b6c52388cf484f809a7f328f475424dd262417c..de6626c18e427b771ea902451ae77ab52e233915 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/sysfs.h>
 #include <linux/pci.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/amd_nb.h>
 #include <asm/smp.h>
 
@@ -444,7 +444,7 @@ static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
        err = amd_set_l3_disable_slot(nb, cpu, slot, val);
        if (err) {
                if (err == -EEXIST)
-                       pr_warning("L3 slot %d in use/index already disabled!\n",
+                       pr_warn("L3 slot %d in use/index already disabled!\n",
                                   slot);
                return err;
        }
index afa9f0d487ea07b79936fcd5b62040540102fad3..fbb5e90557a5257bbde11179cdeb415e60485019 100644 (file)
@@ -1,5 +1,5 @@
 #include <asm/cpu_device_id.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/slab.h>
index 4cfba4371a71f28c4615610475e3eec9e8d9790c..517619ea6498b41839f87f28bce4a76b1e43c7ab 100644 (file)
@@ -115,7 +115,7 @@ static int raise_local(void)
        int cpu = m->extcpu;
 
        if (m->inject_flags & MCJ_EXCEPTION) {
-               printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu);
+               pr_info("Triggering MCE exception on CPU %d\n", cpu);
                switch (context) {
                case MCJ_CTX_IRQ:
                        /*
@@ -128,15 +128,15 @@ static int raise_local(void)
                        raise_exception(m, NULL);
                        break;
                default:
-                       printk(KERN_INFO "Invalid MCE context\n");
+                       pr_info("Invalid MCE context\n");
                        ret = -EINVAL;
                }
-               printk(KERN_INFO "MCE exception done on CPU %d\n", cpu);
+               pr_info("MCE exception done on CPU %d\n", cpu);
        } else if (m->status) {
-               printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu);
+               pr_info("Starting machine check poll CPU %d\n", cpu);
                raise_poll(m);
                mce_notify_irq();
-               printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu);
+               pr_info("Machine check poll done on CPU %d\n", cpu);
        } else
                m->finished = 0;
 
@@ -183,8 +183,7 @@ static void raise_mce(struct mce *m)
                start = jiffies;
                while (!cpumask_empty(mce_inject_cpumask)) {
                        if (!time_before(jiffies, start + 2*HZ)) {
-                               printk(KERN_ERR
-                               "Timeout waiting for mce inject %lx\n",
+                               pr_err("Timeout waiting for mce inject %lx\n",
                                        *cpumask_bits(mce_inject_cpumask));
                                break;
                        }
@@ -241,7 +240,7 @@ static int inject_init(void)
 {
        if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
                return -ENOMEM;
-       printk(KERN_INFO "Machine check injector initialized\n");
+       pr_info("Machine check injector initialized\n");
        register_mce_write_callback(mce_write);
        register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
                                "mce_notify");
index a006f4cd792b10d54a92eff4a3b9c6860aa5382e..b7180801ea33785153ee10b8050754230700c487 100644 (file)
@@ -1617,10 +1617,10 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
        case X86_VENDOR_AMD: {
                u32 ebx = cpuid_ebx(0x80000007);
 
-               mce_amd_feature_init(c);
                mce_flags.overflow_recov = !!(ebx & BIT(0));
                mce_flags.succor         = !!(ebx & BIT(1));
                mce_flags.smca           = !!(ebx & BIT(3));
+               mce_amd_feature_init(c);
 
                break;
                }
index e99b15077e9464b9c9f337873ae58101285e3215..88de27bd5797c3f911435a9d0eb35f7258c4d0d1 100644 (file)
@@ -28,7 +28,7 @@
 #include <asm/msr.h>
 #include <asm/trace/irq_vectors.h>
 
-#define NR_BLOCKS         9
+#define NR_BLOCKS         5
 #define THRESHOLD_MAX     0xFFF
 #define INT_TYPE_APIC     0x00020000
 #define MASK_VALID_HI     0x80000000
 #define DEF_LVT_OFF            0x2
 #define DEF_INT_TYPE_APIC      0x2
 
+/* Scalable MCA: */
+
+/* Threshold LVT offset is at MSR0xC0000410[15:12] */
+#define SMCA_THR_LVT_OFF       0xF000
+
+/*
+ * OS is required to set the MCAX bit to acknowledge that it is now using the
+ * new MSR ranges and new registers under each bank. It also means that the OS
+ * will configure deferred errors in the new MCx_CONFIG register. If the bit is
+ * not set, uncorrectable errors will cause a system panic.
+ */
+#define SMCA_MCAX_EN_OFF       0x1
+
 static const char * const th_names[] = {
        "load_store",
        "insn_fetch",
@@ -84,6 +97,13 @@ struct thresh_restart {
 
 static inline bool is_shared_bank(int bank)
 {
+       /*
+        * Scalable MCA provides for only one core to have access to the MSRs of
+        * a shared bank.
+        */
+       if (mce_flags.smca)
+               return false;
+
        /* Bank 4 is for northbridge reporting and is thus shared */
        return (bank == 4);
 }
@@ -135,6 +155,14 @@ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
        }
 
        if (apic != msr) {
+               /*
+                * On SMCA CPUs, LVT offset is programmed at a different MSR, and
+                * the BIOS provides the value. The original field where LVT offset
+                * was set is reserved. Return early here:
+                */
+               if (mce_flags.smca)
+                       return 0;
+
                pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
                       "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
                       b->cpu, apic, b->bank, b->block, b->address, hi, lo);
@@ -247,14 +275,65 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
        wrmsr(MSR_CU_DEF_ERR, low, high);
 }
 
+static int
+prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
+                       int offset, u32 misc_high)
+{
+       unsigned int cpu = smp_processor_id();
+       struct threshold_block b;
+       int new;
+
+       if (!block)
+               per_cpu(bank_map, cpu) |= (1 << bank);
+
+       memset(&b, 0, sizeof(b));
+       b.cpu                   = cpu;
+       b.bank                  = bank;
+       b.block                 = block;
+       b.address               = addr;
+       b.interrupt_capable     = lvt_interrupt_supported(bank, misc_high);
+
+       if (!b.interrupt_capable)
+               goto done;
+
+       b.interrupt_enable = 1;
+
+       if (mce_flags.smca) {
+               u32 smca_low, smca_high;
+               u32 smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
+
+               if (!rdmsr_safe(smca_addr, &smca_low, &smca_high)) {
+                       smca_high |= SMCA_MCAX_EN_OFF;
+                       wrmsr(smca_addr, smca_low, smca_high);
+               }
+
+               /* Gather LVT offset for thresholding: */
+               if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
+                       goto out;
+
+               new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
+       } else {
+               new = (misc_high & MASK_LVTOFF_HI) >> 20;
+       }
+
+       offset = setup_APIC_mce_threshold(offset, new);
+
+       if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
+               mce_threshold_vector = amd_threshold_interrupt;
+
+done:
+       mce_threshold_block_init(&b, offset);
+
+out:
+       return offset;
+}
+
 /* cpu init entry point, called from mce.c with preempt off */
 void mce_amd_feature_init(struct cpuinfo_x86 *c)
 {
-       struct threshold_block b;
-       unsigned int cpu = smp_processor_id();
        u32 low = 0, high = 0, address = 0;
        unsigned int bank, block;
-       int offset = -1, new;
+       int offset = -1;
 
        for (bank = 0; bank < mca_cfg.banks; ++bank) {
                for (block = 0; block < NR_BLOCKS; ++block) {
@@ -279,29 +358,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
                             (high & MASK_LOCKED_HI))
                                continue;
 
-                       if (!block)
-                               per_cpu(bank_map, cpu) |= (1 << bank);
-
-                       memset(&b, 0, sizeof(b));
-                       b.cpu                   = cpu;
-                       b.bank                  = bank;
-                       b.block                 = block;
-                       b.address               = address;
-                       b.interrupt_capable     = lvt_interrupt_supported(bank, high);
-
-                       if (!b.interrupt_capable)
-                               goto init;
-
-                       b.interrupt_enable = 1;
-                       new     = (high & MASK_LVTOFF_HI) >> 20;
-                       offset  = setup_APIC_mce_threshold(offset, new);
-
-                       if ((offset == new) &&
-                           (mce_threshold_vector != amd_threshold_interrupt))
-                               mce_threshold_vector = amd_threshold_interrupt;
-
-init:
-                       mce_threshold_block_init(&b, offset);
+                       offset = prepare_threshold_block(bank, block, address, offset, high);
                }
        }
 
index 12402e10aeffda428821dd6dcb88597203e48054..2a0717bf803372d3968dca9d2b2e6c81740d946d 100644 (file)
@@ -26,14 +26,12 @@ static void pentium_machine_check(struct pt_regs *regs, long error_code)
        rdmsr(MSR_IA32_P5_MC_ADDR, loaddr, hi);
        rdmsr(MSR_IA32_P5_MC_TYPE, lotype, hi);
 
-       printk(KERN_EMERG
-               "CPU#%d: Machine Check Exception:  0x%8X (type 0x%8X).\n",
-               smp_processor_id(), loaddr, lotype);
+       pr_emerg("CPU#%d: Machine Check Exception:  0x%8X (type 0x%8X).\n",
+                smp_processor_id(), loaddr, lotype);
 
        if (lotype & (1<<5)) {
-               printk(KERN_EMERG
-                       "CPU#%d: Possible thermal failure (CPU on fire ?).\n",
-                       smp_processor_id());
+               pr_emerg("CPU#%d: Possible thermal failure (CPU on fire ?).\n",
+                        smp_processor_id());
        }
 
        add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
@@ -61,12 +59,10 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
        /* Read registers before enabling: */
        rdmsr(MSR_IA32_P5_MC_ADDR, l, h);
        rdmsr(MSR_IA32_P5_MC_TYPE, l, h);
-       printk(KERN_INFO
-              "Intel old style machine check architecture supported.\n");
+       pr_info("Intel old style machine check architecture supported.\n");
 
        /* Enable MCE: */
        cr4_set_bits(X86_CR4_MCE);
-       printk(KERN_INFO
-              "Intel old style machine check reporting enabled on CPU#%d.\n",
-              smp_processor_id());
+       pr_info("Intel old style machine check reporting enabled on CPU#%d.\n",
+               smp_processor_id());
 }
index 2c5aaf8c2e2f3dcc94d348dfe91da6d7d5000ae9..0b445c2ff735d44fedc469fd8ce0c1b8b1f1633b 100644 (file)
@@ -190,7 +190,7 @@ static int therm_throt_process(bool new_event, int event, int level)
        /* if we just entered the thermal event */
        if (new_event) {
                if (event == THERMAL_THROTTLING_EVENT)
-                       printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
+                       pr_crit("CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n",
                                this_cpu,
                                level == CORE_LEVEL ? "Core" : "Package",
                                state->count);
@@ -198,8 +198,7 @@ static int therm_throt_process(bool new_event, int event, int level)
        }
        if (old_event) {
                if (event == THERMAL_THROTTLING_EVENT)
-                       printk(KERN_INFO "CPU%d: %s temperature/speed normal\n",
-                               this_cpu,
+                       pr_info("CPU%d: %s temperature/speed normal\n", this_cpu,
                                level == CORE_LEVEL ? "Core" : "Package");
                return 1;
        }
@@ -417,8 +416,8 @@ static void intel_thermal_interrupt(void)
 
 static void unexpected_thermal_interrupt(void)
 {
-       printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
-                       smp_processor_id());
+       pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
+               smp_processor_id());
 }
 
 static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
@@ -499,7 +498,7 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
 
        if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
                if (system_state == SYSTEM_BOOTING)
-                       printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", cpu);
+                       pr_debug("CPU%d: Thermal monitoring handled by SMI\n", cpu);
                return;
        }
 
@@ -557,8 +556,8 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
        l = apic_read(APIC_LVTTHMR);
        apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
 
-       printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n",
-                      tm2 ? "TM2" : "TM1");
+       pr_info_once("CPU0: Thermal monitoring enabled (%s)\n",
+                     tm2 ? "TM2" : "TM1");
 
        /* enable thermal throttle processing */
        atomic_set(&therm_throt_en, 1);
index 7245980186eea047e643af5010e1509bfc991632..fcf9ae9384f4cb4693d67cc8ee6433562dfc9f34 100644 (file)
@@ -12,8 +12,8 @@
 
 static void default_threshold_interrupt(void)
 {
-       printk(KERN_ERR "Unexpected threshold interrupt at vector %x\n",
-                        THRESHOLD_APIC_VECTOR);
+       pr_err("Unexpected threshold interrupt at vector %x\n",
+               THRESHOLD_APIC_VECTOR);
 }
 
 void (*mce_threshold_vector)(void) = default_threshold_interrupt;
index 01dd8702880b7f2db9fcdc3874ba8809c2ce9b80..c6a722e1d011458fa30ec5ad3ad4e78c58d2fa82 100644 (file)
@@ -17,7 +17,7 @@ static void winchip_machine_check(struct pt_regs *regs, long error_code)
 {
        ist_enter(regs);
 
-       printk(KERN_EMERG "CPU0: Machine Check Exception.\n");
+       pr_emerg("CPU0: Machine Check Exception.\n");
        add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
 
        ist_exit(regs);
@@ -39,6 +39,5 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
 
        cr4_set_bits(X86_CR4_MCE);
 
-       printk(KERN_INFO
-              "Winchip machine check reporting enabled on CPU#0.\n");
+       pr_info("Winchip machine check reporting enabled on CPU#0.\n");
 }
index 2233f8a766156891a52b9a7658f04efeaf4f86c8..5374d5d762e251e9660064a625d47fd61640753c 100644 (file)
@@ -431,10 +431,6 @@ int __init save_microcode_in_initrd_amd(void)
        else
                container = cont_va;
 
-       if (ucode_new_rev)
-               pr_info("microcode: updated early to new patch_level=0x%08x\n",
-                       ucode_new_rev);
-
        eax   = cpuid_eax(0x00000001);
        eax   = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
 
@@ -469,8 +465,7 @@ void reload_ucode_amd(void)
        if (mc && rev < mc->hdr.patch_id) {
                if (!__apply_microcode_amd(mc)) {
                        ucode_new_rev = mc->hdr.patch_id;
-                       pr_info("microcode: reload patch_level=0x%08x\n",
-                               ucode_new_rev);
+                       pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
                }
        }
 }
@@ -953,10 +948,14 @@ struct microcode_ops * __init init_amd_microcode(void)
        struct cpuinfo_x86 *c = &boot_cpu_data;
 
        if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
-               pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
+               pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
                return NULL;
        }
 
+       if (ucode_new_rev)
+               pr_info_once("microcode updated early to new patch_level=0x%08x\n",
+                            ucode_new_rev);
+
        return &microcode_amd_ops;
 }
 
index faec7120c508584c9f038445c0dae593cfb5faae..cea8552e2b3abf2ec2040379886c5e33ec19b32f 100644 (file)
 #define MICROCODE_VERSION      "2.01"
 
 static struct microcode_ops    *microcode_ops;
-
 static bool dis_ucode_ldr;
 
-static int __init disable_loader(char *str)
-{
-       dis_ucode_ldr = true;
-       return 1;
-}
-__setup("dis_ucode_ldr", disable_loader);
-
 /*
  * Synchronization.
  *
@@ -81,15 +73,16 @@ struct cpu_info_ctx {
 
 static bool __init check_loader_disabled_bsp(void)
 {
+       static const char *__dis_opt_str = "dis_ucode_ldr";
+
 #ifdef CONFIG_X86_32
        const char *cmdline = (const char *)__pa_nodebug(boot_command_line);
-       const char *opt     = "dis_ucode_ldr";
-       const char *option  = (const char *)__pa_nodebug(opt);
+       const char *option  = (const char *)__pa_nodebug(__dis_opt_str);
        bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr);
 
 #else /* CONFIG_X86_64 */
        const char *cmdline = boot_command_line;
-       const char *option  = "dis_ucode_ldr";
+       const char *option  = __dis_opt_str;
        bool *res = &dis_ucode_ldr;
 #endif
 
@@ -479,7 +472,7 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
        enum ucode_state ustate;
        struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
 
-       if (uci && uci->valid)
+       if (uci->valid)
                return UCODE_OK;
 
        if (collect_cpu_info(cpu))
index ee81c544ee0daa8f6d8f35569463ba3b9f444f06..cb397947f688692ac0e1cbaf589cd757230bc5e8 100644 (file)
 #include <asm/setup.h>
 #include <asm/msr.h>
 
-static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
+/*
+ * Temporary microcode blobs pointers storage. We note here the pointers to
+ * microcode blobs we've got from whatever storage (detached initrd, builtin).
+ * Later on, we put those into final storage mc_saved_data.mc_saved.
+ */
+static unsigned long mc_tmp_ptrs[MAX_UCODE_COUNT];
+
 static struct mc_saved_data {
-       unsigned int mc_saved_count;
+       unsigned int num_saved;
        struct microcode_intel **mc_saved;
 } mc_saved_data;
 
@@ -78,53 +84,50 @@ load_microcode_early(struct microcode_intel **saved,
 }
 
 static inline void
-copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
-                 unsigned long off, int num_saved)
+copy_ptrs(struct microcode_intel **mc_saved, unsigned long *mc_ptrs,
+         unsigned long off, int num_saved)
 {
        int i;
 
        for (i = 0; i < num_saved; i++)
-               mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
+               mc_saved[i] = (struct microcode_intel *)(mc_ptrs[i] + off);
 }
 
 #ifdef CONFIG_X86_32
 static void
-microcode_phys(struct microcode_intel **mc_saved_tmp,
-              struct mc_saved_data *mc_saved_data)
+microcode_phys(struct microcode_intel **mc_saved_tmp, struct mc_saved_data *mcs)
 {
        int i;
        struct microcode_intel ***mc_saved;
 
-       mc_saved = (struct microcode_intel ***)
-                  __pa_nodebug(&mc_saved_data->mc_saved);
-       for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
+       mc_saved = (struct microcode_intel ***)__pa_nodebug(&mcs->mc_saved);
+
+       for (i = 0; i < mcs->num_saved; i++) {
                struct microcode_intel *p;
 
-               p = *(struct microcode_intel **)
-                       __pa_nodebug(mc_saved_data->mc_saved + i);
+               p = *(struct microcode_intel **)__pa_nodebug(mcs->mc_saved + i);
                mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
        }
 }
 #endif
 
 static enum ucode_state
-load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
-              unsigned long initrd_start, struct ucode_cpu_info *uci)
+load_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
+              unsigned long offset, struct ucode_cpu_info *uci)
 {
        struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
-       unsigned int count = mc_saved_data->mc_saved_count;
+       unsigned int count = mcs->num_saved;
 
-       if (!mc_saved_data->mc_saved) {
-               copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
+       if (!mcs->mc_saved) {
+               copy_ptrs(mc_saved_tmp, mc_ptrs, offset, count);
 
                return load_microcode_early(mc_saved_tmp, count, uci);
        } else {
 #ifdef CONFIG_X86_32
-               microcode_phys(mc_saved_tmp, mc_saved_data);
+               microcode_phys(mc_saved_tmp, mcs);
                return load_microcode_early(mc_saved_tmp, count, uci);
 #else
-               return load_microcode_early(mc_saved_data->mc_saved,
-                                                   count, uci);
+               return load_microcode_early(mcs->mc_saved, count, uci);
 #endif
        }
 }
@@ -175,25 +178,25 @@ matching_model_microcode(struct microcode_header_intel *mc_header,
 }
 
 static int
-save_microcode(struct mc_saved_data *mc_saved_data,
+save_microcode(struct mc_saved_data *mcs,
               struct microcode_intel **mc_saved_src,
-              unsigned int mc_saved_count)
+              unsigned int num_saved)
 {
        int i, j;
        struct microcode_intel **saved_ptr;
        int ret;
 
-       if (!mc_saved_count)
+       if (!num_saved)
                return -EINVAL;
 
        /*
         * Copy new microcode data.
         */
-       saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
+       saved_ptr = kcalloc(num_saved, sizeof(struct microcode_intel *), GFP_KERNEL);
        if (!saved_ptr)
                return -ENOMEM;
 
-       for (i = 0; i < mc_saved_count; i++) {
+       for (i = 0; i < num_saved; i++) {
                struct microcode_header_intel *mc_hdr;
                struct microcode_intel *mc;
                unsigned long size;
@@ -219,8 +222,8 @@ save_microcode(struct mc_saved_data *mc_saved_data,
        /*
         * Point to newly saved microcode.
         */
-       mc_saved_data->mc_saved = saved_ptr;
-       mc_saved_data->mc_saved_count = mc_saved_count;
+       mcs->mc_saved  = saved_ptr;
+       mcs->num_saved = num_saved;
 
        return 0;
 
@@ -284,22 +287,20 @@ static unsigned int _save_mc(struct microcode_intel **mc_saved,
  * BSP can stay in the platform.
  */
 static enum ucode_state __init
-get_matching_model_microcode(int cpu, unsigned long start,
-                            void *data, size_t size,
-                            struct mc_saved_data *mc_saved_data,
-                            unsigned long *mc_saved_in_initrd,
+get_matching_model_microcode(unsigned long start, void *data, size_t size,
+                            struct mc_saved_data *mcs, unsigned long *mc_ptrs,
                             struct ucode_cpu_info *uci)
 {
-       u8 *ucode_ptr = data;
-       unsigned int leftover = size;
+       struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
+       struct microcode_header_intel *mc_header;
+       unsigned int num_saved = mcs->num_saved;
        enum ucode_state state = UCODE_OK;
+       unsigned int leftover = size;
+       u8 *ucode_ptr = data;
        unsigned int mc_size;
-       struct microcode_header_intel *mc_header;
-       struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
-       unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
        int i;
 
-       while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
+       while (leftover && num_saved < ARRAY_SIZE(mc_saved_tmp)) {
 
                if (leftover < sizeof(mc_header))
                        break;
@@ -318,32 +319,31 @@ get_matching_model_microcode(int cpu, unsigned long start,
                 * the platform, we need to find and save microcode patches
                 * with the same family and model as the BSP.
                 */
-               if (matching_model_microcode(mc_header, uci->cpu_sig.sig) !=
-                        UCODE_OK) {
+               if (matching_model_microcode(mc_header, uci->cpu_sig.sig) != UCODE_OK) {
                        ucode_ptr += mc_size;
                        continue;
                }
 
-               mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
+               num_saved = _save_mc(mc_saved_tmp, ucode_ptr, num_saved);
 
                ucode_ptr += mc_size;
        }
 
        if (leftover) {
                state = UCODE_ERROR;
-               goto out;
+               return state;
        }
 
-       if (mc_saved_count == 0) {
+       if (!num_saved) {
                state = UCODE_NFOUND;
-               goto out;
+               return state;
        }
 
-       for (i = 0; i < mc_saved_count; i++)
-               mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
+       for (i = 0; i < num_saved; i++)
+               mc_ptrs[i] = (unsigned long)mc_saved_tmp[i] - start;
+
+       mcs->num_saved = num_saved;
 
-       mc_saved_data->mc_saved_count = mc_saved_count;
-out:
        return state;
 }
 
@@ -373,7 +373,7 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
                native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
                csig.pf = 1 << ((val[1] >> 18) & 7);
        }
-       native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+       native_wrmsrl(MSR_IA32_UCODE_REV, 0);
 
        /* As documented in the SDM: Do a CPUID 1 here */
        sync_core();
@@ -396,11 +396,11 @@ static void show_saved_mc(void)
        unsigned int sig, pf, rev, total_size, data_size, date;
        struct ucode_cpu_info uci;
 
-       if (mc_saved_data.mc_saved_count == 0) {
+       if (!mc_saved_data.num_saved) {
                pr_debug("no microcode data saved.\n");
                return;
        }
-       pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
+       pr_debug("Total microcode saved: %d\n", mc_saved_data.num_saved);
 
        collect_cpu_info_early(&uci);
 
@@ -409,7 +409,7 @@ static void show_saved_mc(void)
        rev = uci.cpu_sig.rev;
        pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
 
-       for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
+       for (i = 0; i < mc_saved_data.num_saved; i++) {
                struct microcode_header_intel *mc_saved_header;
                struct extended_sigtable *ext_header;
                int ext_sigcount;
@@ -465,7 +465,7 @@ int save_mc_for_early(u8 *mc)
 {
        struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
        unsigned int mc_saved_count_init;
-       unsigned int mc_saved_count;
+       unsigned int num_saved;
        struct microcode_intel **mc_saved;
        int ret = 0;
        int i;
@@ -476,23 +476,23 @@ int save_mc_for_early(u8 *mc)
         */
        mutex_lock(&x86_cpu_microcode_mutex);
 
-       mc_saved_count_init = mc_saved_data.mc_saved_count;
-       mc_saved_count = mc_saved_data.mc_saved_count;
+       mc_saved_count_init = mc_saved_data.num_saved;
+       num_saved = mc_saved_data.num_saved;
        mc_saved = mc_saved_data.mc_saved;
 
-       if (mc_saved && mc_saved_count)
+       if (mc_saved && num_saved)
                memcpy(mc_saved_tmp, mc_saved,
-                      mc_saved_count * sizeof(struct microcode_intel *));
+                      num_saved * sizeof(struct microcode_intel *));
        /*
         * Save the microcode patch mc in mc_save_tmp structure if it's a newer
         * version.
         */
-       mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
+       num_saved = _save_mc(mc_saved_tmp, mc, num_saved);
 
        /*
         * Save the mc_save_tmp in global mc_saved_data.
         */
-       ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
+       ret = save_microcode(&mc_saved_data, mc_saved_tmp, num_saved);
        if (ret) {
                pr_err("Cannot save microcode patch.\n");
                goto out;
@@ -536,7 +536,7 @@ static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
 
 static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
 static __init enum ucode_state
-scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
+scan_microcode(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
               unsigned long start, unsigned long size,
               struct ucode_cpu_info *uci)
 {
@@ -551,14 +551,18 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
        cd.data = NULL;
        cd.size = 0;
 
-       cd = find_cpio_data(p, (void *)start, size, &offset);
-       if (!cd.data) {
+       /* try built-in microcode if no initrd */
+       if (!size) {
                if (!load_builtin_intel_microcode(&cd))
                        return UCODE_ERROR;
+       } else {
+               cd = find_cpio_data(p, (void *)start, size, &offset);
+               if (!cd.data)
+                       return UCODE_ERROR;
        }
 
-       return get_matching_model_microcode(0, start, cd.data, cd.size,
-                                           mc_saved_data, initrd, uci);
+       return get_matching_model_microcode(start, cd.data, cd.size,
+                                           mcs, mc_ptrs, uci);
 }
 
 /*
@@ -567,14 +571,11 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
 static void
 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
 {
-       int cpu = smp_processor_id();
-
-       pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
-               cpu,
-               uci->cpu_sig.rev,
-               date & 0xffff,
-               date >> 24,
-               (date >> 16) & 0xff);
+       pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
+                    uci->cpu_sig.rev,
+                    date & 0xffff,
+                    date >> 24,
+                    (date >> 16) & 0xff);
 }
 
 #ifdef CONFIG_X86_32
@@ -603,19 +604,19 @@ void show_ucode_info_early(void)
  */
 static void print_ucode(struct ucode_cpu_info *uci)
 {
-       struct microcode_intel *mc_intel;
+       struct microcode_intel *mc;
        int *delay_ucode_info_p;
        int *current_mc_date_p;
 
-       mc_intel = uci->mc;
-       if (mc_intel == NULL)
+       mc = uci->mc;
+       if (!mc)
                return;
 
        delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
        current_mc_date_p = (int *)__pa_nodebug(&current_mc_date);
 
        *delay_ucode_info_p = 1;
-       *current_mc_date_p = mc_intel->hdr.date;
+       *current_mc_date_p = mc->hdr.date;
 }
 #else
 
@@ -630,37 +631,35 @@ static inline void flush_tlb_early(void)
 
 static inline void print_ucode(struct ucode_cpu_info *uci)
 {
-       struct microcode_intel *mc_intel;
+       struct microcode_intel *mc;
 
-       mc_intel = uci->mc;
-       if (mc_intel == NULL)
+       mc = uci->mc;
+       if (!mc)
                return;
 
-       print_ucode_info(uci, mc_intel->hdr.date);
+       print_ucode_info(uci, mc->hdr.date);
 }
 #endif
 
 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
 {
-       struct microcode_intel *mc_intel;
+       struct microcode_intel *mc;
        unsigned int val[2];
 
-       mc_intel = uci->mc;
-       if (mc_intel == NULL)
+       mc = uci->mc;
+       if (!mc)
                return 0;
 
        /* write microcode via MSR 0x79 */
-       native_wrmsr(MSR_IA32_UCODE_WRITE,
-             (unsigned long) mc_intel->bits,
-             (unsigned long) mc_intel->bits >> 16 >> 16);
-       native_wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+       native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+       native_wrmsrl(MSR_IA32_UCODE_REV, 0);
 
        /* As documented in the SDM: Do a CPUID 1 here */
        sync_core();
 
        /* get the current revision from MSR 0x8B */
        native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-       if (val[1] != mc_intel->hdr.rev)
+       if (val[1] != mc->hdr.rev)
                return -1;
 
 #ifdef CONFIG_X86_64
@@ -672,25 +671,26 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
        if (early)
                print_ucode(uci);
        else
-               print_ucode_info(uci, mc_intel->hdr.date);
+               print_ucode_info(uci, mc->hdr.date);
 
        return 0;
 }
 
 /*
  * This function converts microcode patch offsets previously stored in
- * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
+ * mc_tmp_ptrs to pointers and stores the pointers in mc_saved_data.
  */
 int __init save_microcode_in_initrd_intel(void)
 {
-       unsigned int count = mc_saved_data.mc_saved_count;
+       unsigned int count = mc_saved_data.num_saved;
        struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
        int ret = 0;
 
-       if (count == 0)
+       if (!count)
                return ret;
 
-       copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, initrd_start, count);
+       copy_ptrs(mc_saved, mc_tmp_ptrs, get_initrd_start(), count);
+
        ret = save_microcode(&mc_saved_data, mc_saved, count);
        if (ret)
                pr_err("Cannot save microcode patches from initrd.\n");
@@ -701,8 +701,7 @@ int __init save_microcode_in_initrd_intel(void)
 }
 
 static void __init
-_load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
-                     unsigned long *initrd,
+_load_ucode_intel_bsp(struct mc_saved_data *mcs, unsigned long *mc_ptrs,
                      unsigned long start, unsigned long size)
 {
        struct ucode_cpu_info uci;
@@ -710,11 +709,11 @@ _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
 
        collect_cpu_info_early(&uci);
 
-       ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
+       ret = scan_microcode(mcs, mc_ptrs, start, size, &uci);
        if (ret != UCODE_OK)
                return;
 
-       ret = load_microcode(mc_saved_data, initrd, start, &uci);
+       ret = load_microcode(mcs, mc_ptrs, start, &uci);
        if (ret != UCODE_OK)
                return;
 
@@ -728,53 +727,49 @@ void __init load_ucode_intel_bsp(void)
        struct boot_params *p;
 
        p       = (struct boot_params *)__pa_nodebug(&boot_params);
-       start   = p->hdr.ramdisk_image;
        size    = p->hdr.ramdisk_size;
 
-       _load_ucode_intel_bsp(
-                       (struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
-                       (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
-                       start, size);
+       /*
+        * Set start only if we have an initrd image. We cannot use initrd_start
+        * because it is not set that early yet.
+        */
+       start   = (size ? p->hdr.ramdisk_image : 0);
+
+       _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
+                             (unsigned long *)__pa_nodebug(&mc_tmp_ptrs),
+                             start, size);
 #else
-       start   = boot_params.hdr.ramdisk_image + PAGE_OFFSET;
        size    = boot_params.hdr.ramdisk_size;
+       start   = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
 
-       _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
+       _load_ucode_intel_bsp(&mc_saved_data, mc_tmp_ptrs, start, size);
 #endif
 }
 
 void load_ucode_intel_ap(void)
 {
-       struct mc_saved_data *mc_saved_data_p;
+       unsigned long *mcs_tmp_p;
+       struct mc_saved_data *mcs_p;
        struct ucode_cpu_info uci;
-       unsigned long *mc_saved_in_initrd_p;
-       unsigned long initrd_start_addr;
        enum ucode_state ret;
 #ifdef CONFIG_X86_32
-       unsigned long *initrd_start_p;
 
-       mc_saved_in_initrd_p =
-               (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
-       mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
-       initrd_start_p = (unsigned long *)__pa_nodebug(&initrd_start);
-       initrd_start_addr = (unsigned long)__pa_nodebug(*initrd_start_p);
+       mcs_tmp_p = (unsigned long *)__pa_nodebug(mc_tmp_ptrs);
+       mcs_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
 #else
-       mc_saved_data_p = &mc_saved_data;
-       mc_saved_in_initrd_p = mc_saved_in_initrd;
-       initrd_start_addr = initrd_start;
+       mcs_tmp_p = mc_tmp_ptrs;
+       mcs_p = &mc_saved_data;
 #endif
 
        /*
         * If there is no valid ucode previously saved in memory, no need to
         * update ucode on this AP.
         */
-       if (mc_saved_data_p->mc_saved_count == 0)
+       if (!mcs_p->num_saved)
                return;
 
        collect_cpu_info_early(&uci);
-       ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
-                            initrd_start_addr, &uci);
-
+       ret = load_microcode(mcs_p, mcs_tmp_p, get_initrd_start_addr(), &uci);
        if (ret != UCODE_OK)
                return;
 
@@ -786,13 +781,13 @@ void reload_ucode_intel(void)
        struct ucode_cpu_info uci;
        enum ucode_state ret;
 
-       if (!mc_saved_data.mc_saved_count)
+       if (!mc_saved_data.num_saved)
                return;
 
        collect_cpu_info_early(&uci);
 
        ret = load_microcode_early(mc_saved_data.mc_saved,
-                                  mc_saved_data.mc_saved_count, &uci);
+                                  mc_saved_data.num_saved, &uci);
        if (ret != UCODE_OK)
                return;
 
@@ -825,7 +820,7 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
  * return 0 - no update found
  * return 1 - found update
  */
-static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
+static int get_matching_mc(struct microcode_intel *mc, int cpu)
 {
        struct cpu_signature cpu_sig;
        unsigned int csig, cpf, crev;
@@ -836,39 +831,36 @@ static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
        cpf = cpu_sig.pf;
        crev = cpu_sig.rev;
 
-       return has_newer_microcode(mc_intel, csig, cpf, crev);
+       return has_newer_microcode(mc, csig, cpf, crev);
 }
 
 static int apply_microcode_intel(int cpu)
 {
-       struct microcode_intel *mc_intel;
+       struct microcode_intel *mc;
        struct ucode_cpu_info *uci;
+       struct cpuinfo_x86 *c;
        unsigned int val[2];
-       int cpu_num = raw_smp_processor_id();
-       struct cpuinfo_x86 *c = &cpu_data(cpu_num);
-
-       uci = ucode_cpu_info + cpu;
-       mc_intel = uci->mc;
 
        /* We should bind the task to the CPU */
-       BUG_ON(cpu_num != cpu);
+       if (WARN_ON(raw_smp_processor_id() != cpu))
+               return -1;
 
-       if (mc_intel == NULL)
+       uci = ucode_cpu_info + cpu;
+       mc = uci->mc;
+       if (!mc)
                return 0;
 
        /*
         * Microcode on this CPU could be updated earlier. Only apply the
-        * microcode patch in mc_intel when it is newer than the one on this
+        * microcode patch in mc when it is newer than the one on this
         * CPU.
         */
-       if (get_matching_mc(mc_intel, cpu) == 0)
+       if (!get_matching_mc(mc, cpu))
                return 0;
 
        /* write microcode via MSR 0x79 */
-       wrmsr(MSR_IA32_UCODE_WRITE,
-             (unsigned long) mc_intel->bits,
-             (unsigned long) mc_intel->bits >> 16 >> 16);
-       wrmsr(MSR_IA32_UCODE_REV, 0, 0);
+       wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
+       wrmsrl(MSR_IA32_UCODE_REV, 0);
 
        /* As documented in the SDM: Do a CPUID 1 here */
        sync_core();
@@ -876,16 +868,19 @@ static int apply_microcode_intel(int cpu)
        /* get the current revision from MSR 0x8B */
        rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
 
-       if (val[1] != mc_intel->hdr.rev) {
+       if (val[1] != mc->hdr.rev) {
                pr_err("CPU%d update to revision 0x%x failed\n",
-                      cpu_num, mc_intel->hdr.rev);
+                      cpu, mc->hdr.rev);
                return -1;
        }
+
        pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n",
-               cpu_num, val[1],
-               mc_intel->hdr.date & 0xffff,
-               mc_intel->hdr.date >> 24,
-               (mc_intel->hdr.date >> 16) & 0xff);
+               cpu, val[1],
+               mc->hdr.date & 0xffff,
+               mc->hdr.date >> 24,
+               (mc->hdr.date >> 16) & 0xff);
+
+       c = &cpu_data(cpu);
 
        uci->cpu_sig.rev = val[1];
        c->microcode = val[1];
index 3f20710a5b23b7f7456e99bb7f654ce703bb6abf..6988c74409a825313a2711be6914a20369d223e0 100644 (file)
@@ -1,6 +1,6 @@
 #!/bin/sh
 #
-# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h
+# Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeatures.h
 #
 
 IN=$1
@@ -49,8 +49,8 @@ dump_array()
 trap 'rm "$OUT"' EXIT
 
 (
-       echo "#ifndef _ASM_X86_CPUFEATURE_H"
-       echo "#include <asm/cpufeature.h>"
+       echo "#ifndef _ASM_X86_CPUFEATURES_H"
+       echo "#include <asm/cpufeatures.h>"
        echo "#endif"
        echo ""
 
index 20e242ea1bc46b5f5828c7b95071d920853b7609..4e7c6933691cc8de42aa4a82473482127bd0faab 100644 (file)
@@ -161,8 +161,8 @@ static void __init ms_hyperv_init_platform(void)
        ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
        ms_hyperv.hints    = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
 
-       printk(KERN_INFO "HyperV: features 0x%x, hints 0x%x\n",
-              ms_hyperv.features, ms_hyperv.hints);
+       pr_info("HyperV: features 0x%x, hints 0x%x\n",
+               ms_hyperv.features, ms_hyperv.hints);
 
 #ifdef CONFIG_X86_LOCAL_APIC
        if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
@@ -174,8 +174,8 @@ static void __init ms_hyperv_init_platform(void)
                rdmsrl(HV_X64_MSR_APIC_FREQUENCY, hv_lapic_frequency);
                hv_lapic_frequency = div_u64(hv_lapic_frequency, HZ);
                lapic_timer_frequency = hv_lapic_frequency;
-               printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n",
-                               lapic_timer_frequency);
+               pr_info("HyperV: LAPIC Timer Frequency: %#x\n",
+                       lapic_timer_frequency);
        }
 #endif
 
index 316fe3e60a9764e479d4e49d01346c7921763ee8..3d689937fc1b13974a0a8a5edd31ddda0f6401b2 100644 (file)
@@ -103,7 +103,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
         */
        if (type != MTRR_TYPE_WRCOMB &&
            (centaur_mcr_type == 0 || type != MTRR_TYPE_UNCACHABLE)) {
-               pr_warning("mtrr: only write-combining%s supported\n",
+               pr_warn("mtrr: only write-combining%s supported\n",
                           centaur_mcr_type ? " and uncacheable are" : " is");
                return -EINVAL;
        }
index 0d98503c2245aab81283526c062f746650b99c32..31e951ce6dff33782c9610c7b43898181d59d637 100644 (file)
@@ -57,9 +57,9 @@ static int __initdata                         nr_range;
 static struct var_mtrr_range_state __initdata  range_state[RANGE_NUM];
 
 static int __initdata debug_print;
-#define Dprintk(x...) do { if (debug_print) printk(KERN_DEBUG x); } while (0)
+#define Dprintk(x...) do { if (debug_print) pr_debug(x); } while (0)
 
-#define BIOS_BUG_MSG KERN_WARNING \
+#define BIOS_BUG_MSG \
        "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
 
 static int __init
@@ -81,9 +81,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
                                                base, base + size);
        }
        if (debug_print) {
-               printk(KERN_DEBUG "After WB checking\n");
+               pr_debug("After WB checking\n");
                for (i = 0; i < nr_range; i++)
-                       printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+                       pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
                                 range[i].start, range[i].end);
        }
 
@@ -101,7 +101,7 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
                    (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
                    (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
                        /* Var MTRR contains UC entry below 1M? Skip it: */
-                       printk(BIOS_BUG_MSG, i);
+                       pr_warn(BIOS_BUG_MSG, i);
                        if (base + size <= (1<<(20-PAGE_SHIFT)))
                                continue;
                        size -= (1<<(20-PAGE_SHIFT)) - base;
@@ -114,11 +114,11 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
                                 extra_remove_base + extra_remove_size);
 
        if  (debug_print) {
-               printk(KERN_DEBUG "After UC checking\n");
+               pr_debug("After UC checking\n");
                for (i = 0; i < RANGE_NUM; i++) {
                        if (!range[i].end)
                                continue;
-                       printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+                       pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
                                 range[i].start, range[i].end);
                }
        }
@@ -126,9 +126,9 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
        /* sort the ranges */
        nr_range = clean_sort_range(range, RANGE_NUM);
        if  (debug_print) {
-               printk(KERN_DEBUG "After sorting\n");
+               pr_debug("After sorting\n");
                for (i = 0; i < nr_range; i++)
-                       printk(KERN_DEBUG "MTRR MAP PFN: %016llx - %016llx\n",
+                       pr_debug("MTRR MAP PFN: %016llx - %016llx\n",
                                 range[i].start, range[i].end);
        }
 
@@ -544,7 +544,7 @@ static void __init print_out_mtrr_range_state(void)
                start_base = to_size_factor(start_base, &start_factor),
                type = range_state[i].type;
 
-               printk(KERN_DEBUG "reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
+               pr_debug("reg %d, base: %ld%cB, range: %ld%cB, type %s\n",
                        i, start_base, start_factor,
                        size_base, size_factor,
                        (type == MTRR_TYPE_UNCACHABLE) ? "UC" :
@@ -713,7 +713,7 @@ int __init mtrr_cleanup(unsigned address_bits)
                return 0;
 
        /* Print original var MTRRs at first, for debugging: */
-       printk(KERN_DEBUG "original variable MTRRs\n");
+       pr_debug("original variable MTRRs\n");
        print_out_mtrr_range_state();
 
        memset(range, 0, sizeof(range));
@@ -733,7 +733,7 @@ int __init mtrr_cleanup(unsigned address_bits)
                                          x_remove_base, x_remove_size);
 
        range_sums = sum_ranges(range, nr_range);
-       printk(KERN_INFO "total RAM covered: %ldM\n",
+       pr_info("total RAM covered: %ldM\n",
               range_sums >> (20 - PAGE_SHIFT));
 
        if (mtrr_chunk_size && mtrr_gran_size) {
@@ -745,12 +745,11 @@ int __init mtrr_cleanup(unsigned address_bits)
 
                if (!result[i].bad) {
                        set_var_mtrr_all(address_bits);
-                       printk(KERN_DEBUG "New variable MTRRs\n");
+                       pr_debug("New variable MTRRs\n");
                        print_out_mtrr_range_state();
                        return 1;
                }
-               printk(KERN_INFO "invalid mtrr_gran_size or mtrr_chunk_size, "
-                      "will find optimal one\n");
+               pr_info("invalid mtrr_gran_size or mtrr_chunk_size, will find optimal one\n");
        }
 
        i = 0;
@@ -768,7 +767,7 @@ int __init mtrr_cleanup(unsigned address_bits)
                                      x_remove_base, x_remove_size, i);
                        if (debug_print) {
                                mtrr_print_out_one_result(i);
-                               printk(KERN_INFO "\n");
+                               pr_info("\n");
                        }
 
                        i++;
@@ -779,7 +778,7 @@ int __init mtrr_cleanup(unsigned address_bits)
        index_good = mtrr_search_optimal_index();
 
        if (index_good != -1) {
-               printk(KERN_INFO "Found optimal setting for mtrr clean up\n");
+               pr_info("Found optimal setting for mtrr clean up\n");
                i = index_good;
                mtrr_print_out_one_result(i);
 
@@ -790,7 +789,7 @@ int __init mtrr_cleanup(unsigned address_bits)
                gran_size <<= 10;
                x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
                set_var_mtrr_all(address_bits);
-               printk(KERN_DEBUG "New variable MTRRs\n");
+               pr_debug("New variable MTRRs\n");
                print_out_mtrr_range_state();
                return 1;
        } else {
@@ -799,8 +798,8 @@ int __init mtrr_cleanup(unsigned address_bits)
                        mtrr_print_out_one_result(i);
        }
 
-       printk(KERN_INFO "mtrr_cleanup: can not find optimal value\n");
-       printk(KERN_INFO "please specify mtrr_gran_size/mtrr_chunk_size\n");
+       pr_info("mtrr_cleanup: can not find optimal value\n");
+       pr_info("please specify mtrr_gran_size/mtrr_chunk_size\n");
 
        return 0;
 }
@@ -918,7 +917,7 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
 
        /* kvm/qemu doesn't have mtrr set right, don't trim them all: */
        if (!highest_pfn) {
-               printk(KERN_INFO "CPU MTRRs all blank - virtualized system.\n");
+               pr_info("CPU MTRRs all blank - virtualized system.\n");
                return 0;
        }
 
@@ -973,7 +972,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn)
                                                         end_pfn);
 
        if (total_trim_size) {
-               pr_warning("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n", total_trim_size >> 20);
+               pr_warn("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n",
+                       total_trim_size >> 20);
 
                if (!changed_by_mtrr_cleanup)
                        WARN_ON(1);
index c870af1610083ec3dda7cb61b966860c9a224374..fcbcb2f678ca47360d23623887d216165969d6e8 100644 (file)
@@ -55,7 +55,7 @@ static inline void k8_check_syscfg_dram_mod_en(void)
 
        rdmsr(MSR_K8_SYSCFG, lo, hi);
        if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
-               printk(KERN_ERR FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
+               pr_err(FW_WARN "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
                       " not cleared by BIOS, clearing this bit\n",
                       smp_processor_id());
                lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
@@ -501,14 +501,14 @@ void __init mtrr_state_warn(void)
        if (!mask)
                return;
        if (mask & MTRR_CHANGE_MASK_FIXED)
-               pr_warning("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
+               pr_warn("mtrr: your CPUs had inconsistent fixed MTRR settings\n");
        if (mask & MTRR_CHANGE_MASK_VARIABLE)
-               pr_warning("mtrr: your CPUs had inconsistent variable MTRR settings\n");
+               pr_warn("mtrr: your CPUs had inconsistent variable MTRR settings\n");
        if (mask & MTRR_CHANGE_MASK_DEFTYPE)
-               pr_warning("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
+               pr_warn("mtrr: your CPUs had inconsistent MTRRdefType settings\n");
 
-       printk(KERN_INFO "mtrr: probably your BIOS does not setup all CPUs.\n");
-       printk(KERN_INFO "mtrr: corrected configuration.\n");
+       pr_info("mtrr: probably your BIOS does not setup all CPUs.\n");
+       pr_info("mtrr: corrected configuration.\n");
 }
 
 /*
@@ -519,8 +519,7 @@ void __init mtrr_state_warn(void)
 void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
 {
        if (wrmsr_safe(msr, a, b) < 0) {
-               printk(KERN_ERR
-                       "MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
+               pr_err("MTRR: CPU %u: Writing MSR %x to %x:%x failed\n",
                        smp_processor_id(), msr, a, b);
        }
 }
@@ -607,7 +606,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
                tmp |= ~((1ULL<<(hi - 1)) - 1);
 
                if (tmp != mask) {
-                       printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
+                       pr_warn("mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
                        add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
                        mask = tmp;
                }
@@ -858,13 +857,13 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
            boot_cpu_data.x86_model == 1 &&
            boot_cpu_data.x86_mask <= 7) {
                if (base & ((1 << (22 - PAGE_SHIFT)) - 1)) {
-                       pr_warning("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
+                       pr_warn("mtrr: base(0x%lx000) is not 4 MiB aligned\n", base);
                        return -EINVAL;
                }
                if (!(base + size < 0x70000 || base > 0x7003F) &&
                    (type == MTRR_TYPE_WRCOMB
                     || type == MTRR_TYPE_WRBACK)) {
-                       pr_warning("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
+                       pr_warn("mtrr: writable mtrr between 0x70000000 and 0x7003FFFF may hang the CPU.\n");
                        return -EINVAL;
                }
        }
@@ -878,7 +877,7 @@ int generic_validate_add_page(unsigned long base, unsigned long size,
             lbase = lbase >> 1, last = last >> 1)
                ;
        if (lbase != last) {
-               pr_warning("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
+               pr_warn("mtrr: base(0x%lx000) is not aligned on a size(0x%lx000) boundary\n", base, size);
                return -EINVAL;
        }
        return 0;
index 5c3d149ee91cb1f6c87ff6ad1853a38adfce82da..10f8d4796240709cea61c0e56522d75eeff8ffdd 100644 (file)
@@ -47,7 +47,7 @@
 #include <linux/smp.h>
 #include <linux/syscore_ops.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/e820.h>
 #include <asm/mtrr.h>
 #include <asm/msr.h>
@@ -300,24 +300,24 @@ int mtrr_add_page(unsigned long base, unsigned long size,
                return error;
 
        if (type >= MTRR_NUM_TYPES) {
-               pr_warning("mtrr: type: %u invalid\n", type);
+               pr_warn("mtrr: type: %u invalid\n", type);
                return -EINVAL;
        }
 
        /* If the type is WC, check that this processor supports it */
        if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
-               pr_warning("mtrr: your processor doesn't support write-combining\n");
+               pr_warn("mtrr: your processor doesn't support write-combining\n");
                return -ENOSYS;
        }
 
        if (!size) {
-               pr_warning("mtrr: zero sized request\n");
+               pr_warn("mtrr: zero sized request\n");
                return -EINVAL;
        }
 
        if ((base | (base + size - 1)) >>
            (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
-               pr_warning("mtrr: base or size exceeds the MTRR width\n");
+               pr_warn("mtrr: base or size exceeds the MTRR width\n");
                return -EINVAL;
        }
 
@@ -348,7 +348,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
                                } else if (types_compatible(type, ltype))
                                        continue;
                        }
-                       pr_warning("mtrr: 0x%lx000,0x%lx000 overlaps existing"
+                       pr_warn("mtrr: 0x%lx000,0x%lx000 overlaps existing"
                                " 0x%lx000,0x%lx000\n", base, size, lbase,
                                lsize);
                        goto out;
@@ -357,7 +357,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
                if (ltype != type) {
                        if (types_compatible(type, ltype))
                                continue;
-                       pr_warning("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
+                       pr_warn("mtrr: type mismatch for %lx000,%lx000 old: %s new: %s\n",
                                base, size, mtrr_attrib_to_str(ltype),
                                mtrr_attrib_to_str(type));
                        goto out;
@@ -395,7 +395,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
 static int mtrr_check(unsigned long base, unsigned long size)
 {
        if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
-               pr_warning("mtrr: size and base must be multiples of 4 kiB\n");
+               pr_warn("mtrr: size and base must be multiples of 4 kiB\n");
                pr_debug("mtrr: size: 0x%lx  base: 0x%lx\n", size, base);
                dump_stack();
                return -1;
@@ -493,16 +493,16 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
                }
        }
        if (reg >= max) {
-               pr_warning("mtrr: register: %d too big\n", reg);
+               pr_warn("mtrr: register: %d too big\n", reg);
                goto out;
        }
        mtrr_if->get(reg, &lbase, &lsize, &ltype);
        if (lsize < 1) {
-               pr_warning("mtrr: MTRR %d not used\n", reg);
+               pr_warn("mtrr: MTRR %d not used\n", reg);
                goto out;
        }
        if (mtrr_usage_table[reg] < 1) {
-               pr_warning("mtrr: reg: %d has count=0\n", reg);
+               pr_warn("mtrr: reg: %d has count=0\n", reg);
                goto out;
        }
        if (--mtrr_usage_table[reg] < 1)
index a667078a51807bb5bd1ca2e5d5c4ff205e9dd42b..fed2ab1f1065dfdd5a286822102857a013298dfc 100644 (file)
@@ -1960,7 +1960,8 @@ intel_bts_constraints(struct perf_event *event)
 
 static int intel_alt_er(int idx, u64 config)
 {
-       int alt_idx;
+       int alt_idx = idx;
+
        if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
                return idx;
 
@@ -2897,14 +2898,12 @@ static void intel_pmu_cpu_starting(int cpu)
                return;
 
        if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
-               void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
-
                for_each_cpu(i, topology_sibling_cpumask(cpu)) {
                        struct intel_shared_regs *pc;
 
                        pc = per_cpu(cpu_hw_events, i).shared_regs;
                        if (pc && pc->core_id == core_id) {
-                               *onln = cpuc->shared_regs;
+                               cpuc->kfree_on_online[0] = cpuc->shared_regs;
                                cpuc->shared_regs = pc;
                                break;
                        }
index 10602f0a438fdab19311ff63bdcf014d27d3596a..7c79261ed939da2a219c45342c00491b5c6151dc 100644 (file)
@@ -1325,13 +1325,13 @@ void __init intel_ds_init(void)
 
                switch (format) {
                case 0:
-                       printk(KERN_CONT "PEBS fmt0%c, ", pebs_type);
+                       pr_cont("PEBS fmt0%c, ", pebs_type);
                        x86_pmu.pebs_record_size = sizeof(struct pebs_record_core);
                        x86_pmu.drain_pebs = intel_pmu_drain_pebs_core;
                        break;
 
                case 1:
-                       printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
+                       pr_cont("PEBS fmt1%c, ", pebs_type);
                        x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
                        x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
                        break;
@@ -1351,7 +1351,7 @@ void __init intel_ds_init(void)
                        break;
 
                default:
-                       printk(KERN_CONT "no PEBS fmt%d%c, ", format, pebs_type);
+                       pr_cont("no PEBS fmt%d%c, ", format, pebs_type);
                        x86_pmu.pebs = 0;
                }
        }
index f97f8075bf04c9d6efed980370feca46227afc5a..3bf41d41377505ef28d90ed3363f08e83df70db3 100644 (file)
@@ -995,6 +995,9 @@ static int __init uncore_pci_init(void)
        case 87: /* Knights Landing */
                ret = knl_uncore_pci_init();
                break;
+       case 94: /* SkyLake */
+               ret = skl_uncore_pci_init();
+               break;
        default:
                return 0;
        }
index 07aa2d6bd71094e411a9f98535fc4c65208a66d4..a7086b862156627ebe7ca22996aed6126e61e770 100644 (file)
@@ -336,6 +336,7 @@ int snb_uncore_pci_init(void);
 int ivb_uncore_pci_init(void);
 int hsw_uncore_pci_init(void);
 int bdw_uncore_pci_init(void);
+int skl_uncore_pci_init(void);
 void snb_uncore_cpu_init(void);
 void nhm_uncore_cpu_init(void);
 int snb_pci2phy_map_init(int devid);
index 0b934820fafd1f64747640e09f1544b862eb8c01..2bd030ddd0db3bf6f5ada42830bd9be9ccc4c1b4 100644 (file)
@@ -8,6 +8,7 @@
 #define PCI_DEVICE_ID_INTEL_HSW_IMC    0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC  0x0a04
 #define PCI_DEVICE_ID_INTEL_BDW_IMC    0x1604
+#define PCI_DEVICE_ID_INTEL_SKL_IMC    0x191f
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
@@ -524,6 +525,14 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
        { /* end: all zeroes */ },
 };
 
+static const struct pci_device_id skl_uncore_pci_ids[] = {
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* end: all zeroes */ },
+};
+
 static struct pci_driver snb_uncore_pci_driver = {
        .name           = "snb_uncore",
        .id_table       = snb_uncore_pci_ids,
@@ -544,6 +553,11 @@ static struct pci_driver bdw_uncore_pci_driver = {
        .id_table       = bdw_uncore_pci_ids,
 };
 
+static struct pci_driver skl_uncore_pci_driver = {
+       .name           = "skl_uncore",
+       .id_table       = skl_uncore_pci_ids,
+};
+
 struct imc_uncore_pci_dev {
        __u32 pci_id;
        struct pci_driver *driver;
@@ -558,6 +572,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
        IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
        IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
        IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
+       IMC_DEV(SKL_IMC, &skl_uncore_pci_driver),    /* 6th Gen Core */
        {  /* end marker */ }
 };
 
@@ -610,6 +625,11 @@ int bdw_uncore_pci_init(void)
        return imc_uncore_pci_init();
 }
 
+int skl_uncore_pci_init(void)
+{
+       return imc_uncore_pci_init();
+}
+
 /* end of Sandy Bridge uncore support */
 
 /* Nehalem uncore support */
index 819d94982e078b8597f084f8409fcd3106361293..f6f50c4ceaeceef16170d14d6d55376823bbd63f 100644 (file)
@@ -51,7 +51,7 @@ void x86_init_rdrand(struct cpuinfo_x86 *c)
        for (i = 0; i < SANITY_CHECK_LOOPS; i++) {
                if (!rdrand_long(&tmp)) {
                        clear_cpu_cap(c, X86_FEATURE_RDRAND);
-                       printk_once(KERN_WARNING "rdrand: disabled\n");
+                       pr_warn_once("rdrand: disabled\n");
                        return;
                }
        }
index 4c60eaf0571c2fb1a6d73258861398fc6bcc963e..cd531355e8386b4177d4dcecaf1031ba8c2086c8 100644 (file)
@@ -87,10 +87,10 @@ void detect_extended_topology(struct cpuinfo_x86 *c)
        c->x86_max_cores = (core_level_siblings / smp_num_siblings);
 
        if (!printed) {
-               printk(KERN_INFO  "CPU: Physical Processor ID: %d\n",
+               pr_info("CPU: Physical Processor ID: %d\n",
                       c->phys_proc_id);
                if (c->x86_max_cores > 1)
-                       printk(KERN_INFO  "CPU: Processor Core ID: %d\n",
+                       pr_info("CPU: Processor Core ID: %d\n",
                               c->cpu_core_id);
                printed = 1;
        }
index 252da7aceca67ff580e8189ed267b2ce44d83373..34178564be2a70adbeaf3fd890cac32d97a51635 100644 (file)
@@ -1,6 +1,6 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/msr.h>
 #include "cpu.h"
 
@@ -33,7 +33,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
        if (max >= 0x80860001) {
                cpuid(0x80860001, &dummy, &cpu_rev, &cpu_freq, &cpu_flags);
                if (cpu_rev != 0x02000000) {
-                       printk(KERN_INFO "CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
+                       pr_info("CPU: Processor revision %u.%u.%u.%u, %u MHz\n",
                                (cpu_rev >> 24) & 0xff,
                                (cpu_rev >> 16) & 0xff,
                                (cpu_rev >> 8) & 0xff,
@@ -44,10 +44,10 @@ static void init_transmeta(struct cpuinfo_x86 *c)
        if (max >= 0x80860002) {
                cpuid(0x80860002, &new_cpu_rev, &cms_rev1, &cms_rev2, &dummy);
                if (cpu_rev == 0x02000000) {
-                       printk(KERN_INFO "CPU: Processor revision %08X, %u MHz\n",
+                       pr_info("CPU: Processor revision %08X, %u MHz\n",
                                new_cpu_rev, cpu_freq);
                }
-               printk(KERN_INFO "CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
+               pr_info("CPU: Code Morphing Software revision %u.%u.%u-%u-%u\n",
                       (cms_rev1 >> 24) & 0xff,
                       (cms_rev1 >> 16) & 0xff,
                       (cms_rev1 >> 8) & 0xff,
@@ -76,7 +76,7 @@ static void init_transmeta(struct cpuinfo_x86 *c)
                      (void *)&cpu_info[56],
                      (void *)&cpu_info[60]);
                cpu_info[64] = '\0';
-               printk(KERN_INFO "CPU: %s\n", cpu_info);
+               pr_info("CPU: %s\n", cpu_info);
        }
 
        /* Unhide possibly hidden capability flags */
index 628a059a9a0663ef7cbdf07e4750c9e1e4f53006..364e5834689753fc7da34c6dc8a34cca22ab92db 100644 (file)
@@ -62,7 +62,7 @@ static unsigned long vmware_get_tsc_khz(void)
        tsc_hz = eax | (((uint64_t)ebx) << 32);
        do_div(tsc_hz, 1000);
        BUG_ON(tsc_hz >> 32);
-       printk(KERN_INFO "TSC freq read from hypervisor : %lu.%03lu MHz\n",
+       pr_info("TSC freq read from hypervisor : %lu.%03lu MHz\n",
                         (unsigned long) tsc_hz / 1000,
                         (unsigned long) tsc_hz % 1000);
 
@@ -84,8 +84,7 @@ static void __init vmware_platform_setup(void)
        if (ebx != UINT_MAX)
                x86_platform.calibrate_tsc = vmware_get_tsc_khz;
        else
-               printk(KERN_WARNING
-                      "Failed to get TSC freq from the hypervisor\n");
+               pr_warn("Failed to get TSC freq from the hypervisor\n");
 }
 
 /*
index 58f34319b29ab64aee4bd21e6ff30367d478c262..9ef978d69c22e00cffdce3592cbe5a495027a058 100644 (file)
@@ -57,10 +57,9 @@ struct crash_elf_data {
        struct kimage *image;
        /*
         * Total number of ram ranges we have after various adjustments for
-        * GART, crash reserved region etc.
+        * crash reserved region, etc.
         */
        unsigned int max_nr_ranges;
-       unsigned long gart_start, gart_end;
 
        /* Pointer to elf header */
        void *ehdr;
@@ -201,17 +200,6 @@ static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
        return 0;
 }
 
-static int get_gart_ranges_callback(u64 start, u64 end, void *arg)
-{
-       struct crash_elf_data *ced = arg;
-
-       ced->gart_start = start;
-       ced->gart_end = end;
-
-       /* Not expecting more than 1 gart aperture */
-       return 1;
-}
-
 
 /* Gather all the required information to prepare elf headers for ram regions */
 static void fill_up_crash_elf_data(struct crash_elf_data *ced,
@@ -226,22 +214,6 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
 
        ced->max_nr_ranges = nr_ranges;
 
-       /*
-        * We don't create ELF headers for GART aperture as an attempt
-        * to dump this memory in second kernel leads to hang/crash.
-        * If gart aperture is present, one needs to exclude that region
-        * and that could lead to need of extra phdr.
-        */
-       walk_iomem_res("GART", IORESOURCE_MEM, 0, -1,
-                               ced, get_gart_ranges_callback);
-
-       /*
-        * If we have gart region, excluding that could potentially split
-        * a memory range, resulting in extra header. Account for  that.
-        */
-       if (ced->gart_end)
-               ced->max_nr_ranges++;
-
        /* Exclusion of crash region could split memory ranges */
        ced->max_nr_ranges++;
 
@@ -350,13 +322,6 @@ static int elf_header_exclude_ranges(struct crash_elf_data *ced,
                        return ret;
        }
 
-       /* Exclude GART region */
-       if (ced->gart_end) {
-               ret = exclude_mem_range(cmem, ced->gart_start, ced->gart_end);
-               if (ret)
-                       return ret;
-       }
-
        return ret;
 }
 
@@ -599,12 +564,12 @@ int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
        /* Add ACPI tables */
        cmd.type = E820_ACPI;
        flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-       walk_iomem_res("ACPI Tables", flags, 0, -1, &cmd,
+       walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
                       memmap_entry_callback);
 
        /* Add ACPI Non-volatile Storage */
        cmd.type = E820_NVS;
-       walk_iomem_res("ACPI Non-volatile Storage", flags, 0, -1, &cmd,
+       walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
                        memmap_entry_callback);
 
        /* Add crashk_low_res region */
index 569c1e4f96feb897956a64e35bd8fccb07dca6c6..621b501f89351146b84b090b7160166bb9d5907e 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/e820.h>
 #include <asm/proto.h>
 #include <asm/setup.h>
+#include <asm/cpufeature.h>
 
 /*
  * The e820 map is the map that gets modified e.g. with command line parameters
@@ -925,6 +926,41 @@ static const char *e820_type_to_string(int e820_type)
        }
 }
 
+static unsigned long e820_type_to_iomem_type(int e820_type)
+{
+       switch (e820_type) {
+       case E820_RESERVED_KERN:
+       case E820_RAM:
+               return IORESOURCE_SYSTEM_RAM;
+       case E820_ACPI:
+       case E820_NVS:
+       case E820_UNUSABLE:
+       case E820_PRAM:
+       case E820_PMEM:
+       default:
+               return IORESOURCE_MEM;
+       }
+}
+
+static unsigned long e820_type_to_iores_desc(int e820_type)
+{
+       switch (e820_type) {
+       case E820_ACPI:
+               return IORES_DESC_ACPI_TABLES;
+       case E820_NVS:
+               return IORES_DESC_ACPI_NV_STORAGE;
+       case E820_PMEM:
+               return IORES_DESC_PERSISTENT_MEMORY;
+       case E820_PRAM:
+               return IORES_DESC_PERSISTENT_MEMORY_LEGACY;
+       case E820_RESERVED_KERN:
+       case E820_RAM:
+       case E820_UNUSABLE:
+       default:
+               return IORES_DESC_NONE;
+       }
+}
+
 static bool do_mark_busy(u32 type, struct resource *res)
 {
        /* this is the legacy bios/dos rom-shadow + mmio region */
@@ -967,7 +1003,8 @@ void __init e820_reserve_resources(void)
                res->start = e820.map[i].addr;
                res->end = end;
 
-               res->flags = IORESOURCE_MEM;
+               res->flags = e820_type_to_iomem_type(e820.map[i].type);
+               res->desc = e820_type_to_iores_desc(e820.map[i].type);
 
                /*
                 * don't register the region that could be conflicted with
index d25097c3fc1d1af8af35c156f05121f9f4d46a94..299b58bb975b5e3fa35e9ed66e90d0bd52540ada 100644 (file)
@@ -114,6 +114,10 @@ void __kernel_fpu_begin(void)
        kernel_fpu_disable();
 
        if (fpu->fpregs_active) {
+               /*
+                * Ignore return value -- we don't care if reg state
+                * is clobbered.
+                */
                copy_fpregs_to_fpstate(fpu);
        } else {
                this_cpu_write(fpu_fpregs_owner_ctx, NULL);
@@ -189,8 +193,12 @@ void fpu__save(struct fpu *fpu)
 
        preempt_disable();
        if (fpu->fpregs_active) {
-               if (!copy_fpregs_to_fpstate(fpu))
-                       fpregs_deactivate(fpu);
+               if (!copy_fpregs_to_fpstate(fpu)) {
+                       if (use_eager_fpu())
+                               copy_kernel_to_fpregs(&fpu->state);
+                       else
+                               fpregs_deactivate(fpu);
+               }
        }
        preempt_enable();
 }
@@ -223,14 +231,15 @@ void fpstate_init(union fpregs_state *state)
 }
 EXPORT_SYMBOL_GPL(fpstate_init);
 
-/*
- * Copy the current task's FPU state to a new task's FPU context.
- *
- * In both the 'eager' and the 'lazy' case we save hardware registers
- * directly to the destination buffer.
- */
-static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 {
+       dst_fpu->counter = 0;
+       dst_fpu->fpregs_active = 0;
+       dst_fpu->last_cpu = -1;
+
+       if (!src_fpu->fpstate_active || !cpu_has_fpu)
+               return 0;
+
        WARN_ON_FPU(src_fpu != &current->thread.fpu);
 
        /*
@@ -243,10 +252,9 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
        /*
         * Save current FPU registers directly into the child
         * FPU context, without any memory-to-memory copying.
-        *
-        * If the FPU context got destroyed in the process (FNSAVE
-        * done on old CPUs) then copy it back into the source
-        * context and mark the current task for lazy restore.
+        * In lazy mode, if the FPU context isn't loaded into
+        * fpregs, CR0.TS will be set and do_device_not_available
+        * will load the FPU context.
         *
         * We have to do all this with preemption disabled,
         * mostly because of the FNSAVE case, because in that
@@ -259,19 +267,13 @@ static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
        preempt_disable();
        if (!copy_fpregs_to_fpstate(dst_fpu)) {
                memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
-               fpregs_deactivate(src_fpu);
+
+               if (use_eager_fpu())
+                       copy_kernel_to_fpregs(&src_fpu->state);
+               else
+                       fpregs_deactivate(src_fpu);
        }
        preempt_enable();
-}
-
-int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
-{
-       dst_fpu->counter = 0;
-       dst_fpu->fpregs_active = 0;
-       dst_fpu->last_cpu = -1;
-
-       if (src_fpu->fpstate_active && cpu_has_fpu)
-               fpu_copy(dst_fpu, src_fpu);
 
        return 0;
 }
@@ -423,7 +425,7 @@ void fpu__clear(struct fpu *fpu)
 {
        WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
 
-       if (!use_eager_fpu()) {
+       if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
                /* FPU state will be reallocated lazily at the first use. */
                fpu__drop(fpu);
        } else {
index 6d9f0a7ef4c8e5da3d596444242de7478d7b9356..471fe277ff40ca6fb8c1da27f2d5c4af0434132a 100644 (file)
@@ -260,7 +260,10 @@ static void __init fpu__init_system_xstate_size_legacy(void)
  * not only saved the restores along the way, but we also have the
  * FPU ready to be used for the original task.
  *
- * 'eager' switching is used on modern CPUs, there we switch the FPU
+ * 'lazy' is deprecated because it's almost never a performance win
+ * and it's much more complicated than 'eager'.
+ *
+ * 'eager' switching is by default on all CPUs, there we switch the FPU
  * state during every context switch, regardless of whether the task
  * has used FPU instructions in that time slice or not. This is done
  * because modern FPU context saving instructions are able to optimize
@@ -271,7 +274,7 @@ static void __init fpu__init_system_xstate_size_legacy(void)
  *   to use 'eager' restores, if we detect that a task is using the FPU
  *   frequently. See the fpu->counter logic in fpu/internal.h for that. ]
  */
-static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
+static enum { ENABLE, DISABLE } eagerfpu = ENABLE;
 
 /*
  * Find supported xfeatures based on cpu features and command-line input.
@@ -348,15 +351,9 @@ static void __init fpu__init_system_ctx_switch(void)
  */
 static void __init fpu__init_parse_early_param(void)
 {
-       /*
-        * No need to check "eagerfpu=auto" again, since it is the
-        * initial default.
-        */
        if (cmdline_find_option_bool(boot_command_line, "eagerfpu=off")) {
                eagerfpu = DISABLE;
                fpu__clear_eager_fpu_features();
-       } else if (cmdline_find_option_bool(boot_command_line, "eagerfpu=on")) {
-               eagerfpu = ENABLE;
        }
 
        if (cmdline_find_option_bool(boot_command_line, "no387"))
index f129a9af635747ce616e354a43f30297f3fecabf..1f4422d5c8d013992d6b97a6a08734bde5bdb2b1 100644 (file)
@@ -40,13 +40,8 @@ pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
 /* Wipe all early page tables except for the kernel symbol map */
 static void __init reset_early_page_tables(void)
 {
-       unsigned long i;
-
-       for (i = 0; i < PTRS_PER_PGD-1; i++)
-               early_level4_pgt[i].pgd = 0;
-
+       memset(early_level4_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
        next_early_pgt = 0;
-
        write_cr3(__pa_nodebug(early_level4_pgt));
 }
 
@@ -54,7 +49,6 @@ static void __init reset_early_page_tables(void)
 int __init early_make_pgtable(unsigned long address)
 {
        unsigned long physaddr = address - __PAGE_OFFSET;
-       unsigned long i;
        pgdval_t pgd, *pgd_p;
        pudval_t pud, *pud_p;
        pmdval_t pmd, *pmd_p;
@@ -81,8 +75,7 @@ again:
                }
 
                pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
-               for (i = 0; i < PTRS_PER_PUD; i++)
-                       pud_p[i] = 0;
+               memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
                *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
        }
        pud_p += pud_index(address);
@@ -97,8 +90,7 @@ again:
                }
 
                pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
-               for (i = 0; i < PTRS_PER_PMD; i++)
-                       pmd_p[i] = 0;
+               memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
                *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
        }
        pmd = (physaddr & PMD_MASK) + early_pmd_flags;
@@ -192,5 +184,13 @@ void __init x86_64_start_reservations(char *real_mode_data)
 
        reserve_ebda_region();
 
+       switch (boot_params.hdr.hardware_subarch) {
+       case X86_SUBARCH_INTEL_MID:
+               x86_intel_mid_early_setup();
+               break;
+       default:
+               break;
+       }
+
        start_kernel();
 }
index 6bc9ae24b6d2a74930701c0c6ea60fe090a3ebbc..54cdbd2003fe0930ff0a91158f55d00723e2400e 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/setup.h>
 #include <asm/processor-flags.h>
 #include <asm/msr-index.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/percpu.h>
 #include <asm/nops.h>
 #include <asm/bootparam.h>
@@ -389,6 +389,12 @@ default_entry:
        /* Make changes effective */
        wrmsr
 
+       /*
+        * And make sure that all the mappings we set up have NX set from
+        * the beginning.
+        */
+       orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
+
 enable_paging:
 
 /*
index ffdc0e8603902b12b0cd44fd175d806b046ccb4b..22fbf9df61bb4eecbb5ffe530562b56c1def90b8 100644 (file)
@@ -38,7 +38,6 @@
 #define pud_index(x)   (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
 
 L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
-L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
 L3_START_KERNEL = pud_index(__START_KERNEL_map)
 
@@ -76,9 +75,7 @@ startup_64:
        subq    $_text - __START_KERNEL_map, %rbp
 
        /* Is the address not 2M aligned? */
-       movq    %rbp, %rax
-       andl    $~PMD_PAGE_MASK, %eax
-       testl   %eax, %eax
+       testl   $~PMD_PAGE_MASK, %ebp
        jnz     bad_address
 
        /*
index b8e6ff5cd5d055892715af36f1e9b72340f7b18c..be0ebbb6d1d144fc86bcab6716871ea8cdaa4744 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/pm.h>
 #include <linux/io.h>
 
+#include <asm/cpufeature.h>
 #include <asm/irqdomain.h>
 #include <asm/fixmap.h>
 #include <asm/hpet.h>
index f8062aaf5df9c830ec287f6774ad270e13d34b32..61521dc19c102114e177cbc21a4f5da9d94c20cd 100644 (file)
@@ -462,7 +462,7 @@ void fixup_irqs(void)
                 * non intr-remapping case, we can't wait till this interrupt
                 * arrives at this cpu before completing the irq move.
                 */
-               irq_force_complete_move(irq);
+               irq_force_complete_move(desc);
 
                if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
                        break_affinity = 1;
@@ -470,6 +470,15 @@ void fixup_irqs(void)
                }
 
                chip = irq_data_get_irq_chip(data);
+               /*
+                * The interrupt descriptor might have been cleaned up
+                * already, but it is not yet removed from the radix tree
+                */
+               if (!chip) {
+                       raw_spin_unlock(&desc->lock);
+                       continue;
+               }
+
                if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
                        chip->irq_mask(data);
 
index 30ca7607cbbbbcae4793aa5c14d8f73bbd784d71..97340f2c437c64def7a83f75a8f9b0bc6a968451 100644 (file)
@@ -408,7 +408,7 @@ static inline void __init construct_default_ISA_mptable(int mpc_default_type)
        processor.cpuflag = CPU_ENABLED;
        processor.cpufeature = (boot_cpu_data.x86 << 8) |
            (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
-       processor.featureflag = boot_cpu_data.x86_capability[0];
+       processor.featureflag = boot_cpu_data.x86_capability[CPUID_1_EDX];
        processor.reserved[0] = 0;
        processor.reserved[1] = 0;
        for (i = 0; i < 2; i++) {
index 64f9616f93f1ec39a85494857d2d07728a7ec5d2..7f3550acde1b8ce604f4b5cc8b53c232450c524f 100644 (file)
@@ -40,7 +40,7 @@
 #include <linux/uaccess.h>
 #include <linux/gfp.h>
 
-#include <asm/processor.h>
+#include <asm/cpufeature.h>
 #include <asm/msr.h>
 
 static struct class *msr_class;
index 14415aff18136524cfa3d4c9b07bdde6ee0d6f54..92f70147a9a673e74b3753637adbb08e2a4869fa 100644 (file)
@@ -13,11 +13,11 @@ static int found(u64 start, u64 end, void *data)
 
 static __init int register_e820_pmem(void)
 {
-       char *pmem = "Persistent Memory (legacy)";
        struct platform_device *pdev;
        int rc;
 
-       rc = walk_iomem_res(pmem, IORESOURCE_MEM, 0, -1, NULL, found);
+       rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
+                                IORESOURCE_MEM, 0, -1, NULL, found);
        if (rc <= 0)
                return 0;
 
index 9f7c21c22477e59462d72e930d79a4c2a238a051..9decee2bfdbeedb795d3e1f423e606cdec5e49e0 100644 (file)
@@ -418,9 +418,9 @@ static void mwait_idle(void)
        if (!current_set_polling_and_test()) {
                trace_cpu_idle_rcuidle(1, smp_processor_id());
                if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
-                       smp_mb(); /* quirk */
+                       mb(); /* quirk */
                        clflush((void *)&current_thread_info()->flags);
-                       smp_mb(); /* quirk */
+                       mb(); /* quirk */
                }
 
                __monitor((void *)&current_thread_info()->flags, 0, 0);
index d3d80e6d42a20ea665325b4cf2a5e7be3c43289a..aa52c10094755e2fa9fdd5ac7a39b0c36effac8a 100644 (file)
@@ -152,21 +152,21 @@ static struct resource data_resource = {
        .name   = "Kernel data",
        .start  = 0,
        .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 static struct resource code_resource = {
        .name   = "Kernel code",
        .start  = 0,
        .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 static struct resource bss_resource = {
        .name   = "Kernel bss",
        .start  = 0,
        .end    = 0,
-       .flags  = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags  = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
 };
 
 
index cb6282c3638ffbd32bcb33663d8cefc17eac8a8e..c07ff5ddbd477eb44edb9ea960623d17b6d43924 100644 (file)
@@ -692,12 +692,15 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 
 static inline unsigned long get_nr_restart_syscall(const struct pt_regs *regs)
 {
-#if defined(CONFIG_X86_32) || !defined(CONFIG_X86_64)
+#ifdef CONFIG_X86_64
+       if (is_ia32_task())
+               return __NR_ia32_restart_syscall;
+#endif
+#ifdef CONFIG_X86_X32_ABI
+       return __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
+#else
        return __NR_restart_syscall;
-#else /* !CONFIG_X86_32 && CONFIG_X86_64 */
-       return test_thread_flag(TIF_IA32) ? __NR_ia32_restart_syscall :
-               __NR_restart_syscall | (regs->orig_ax & __X32_SYSCALL_BIT);
-#endif /* CONFIG_X86_32 || !CONFIG_X86_64 */
+#endif
 }
 
 /*
index ade185a46b1da63cea0be1ce47731e301f9d1d5a..1e630d1b7ad9c1ce08be101aaa3e14d68951906a 100644 (file)
@@ -83,30 +83,16 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
 DECLARE_BITMAP(used_vectors, NR_VECTORS);
 EXPORT_SYMBOL_GPL(used_vectors);
 
-static inline void conditional_sti(struct pt_regs *regs)
+static inline void cond_local_irq_enable(struct pt_regs *regs)
 {
        if (regs->flags & X86_EFLAGS_IF)
                local_irq_enable();
 }
 
-static inline void preempt_conditional_sti(struct pt_regs *regs)
-{
-       preempt_count_inc();
-       if (regs->flags & X86_EFLAGS_IF)
-               local_irq_enable();
-}
-
-static inline void conditional_cli(struct pt_regs *regs)
-{
-       if (regs->flags & X86_EFLAGS_IF)
-               local_irq_disable();
-}
-
-static inline void preempt_conditional_cli(struct pt_regs *regs)
+static inline void cond_local_irq_disable(struct pt_regs *regs)
 {
        if (regs->flags & X86_EFLAGS_IF)
                local_irq_disable();
-       preempt_count_dec();
 }
 
 void ist_enter(struct pt_regs *regs)
@@ -286,7 +272,7 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
 
        if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
                        NOTIFY_STOP) {
-               conditional_sti(regs);
+               cond_local_irq_enable(regs);
                do_trap(trapnr, signr, str, regs, error_code,
                        fill_trap_info(regs, signr, trapnr, &info));
        }
@@ -368,7 +354,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
        if (notify_die(DIE_TRAP, "bounds", regs, error_code,
                        X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
                return;
-       conditional_sti(regs);
+       cond_local_irq_enable(regs);
 
        if (!user_mode(regs))
                die("bounds", regs, error_code);
@@ -443,7 +429,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
        struct task_struct *tsk;
 
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
-       conditional_sti(regs);
+       cond_local_irq_enable(regs);
 
        if (v8086_mode(regs)) {
                local_irq_enable();
@@ -517,9 +503,11 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
         * as we may switch to the interrupt stack.
         */
        debug_stack_usage_inc();
-       preempt_conditional_sti(regs);
+       preempt_disable();
+       cond_local_irq_enable(regs);
        do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
-       preempt_conditional_cli(regs);
+       cond_local_irq_disable(regs);
+       preempt_enable_no_resched();
        debug_stack_usage_dec();
 exit:
        ist_exit(regs);
@@ -648,12 +636,14 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        debug_stack_usage_inc();
 
        /* It's safe to allow irq's after DR6 has been saved */
-       preempt_conditional_sti(regs);
+       preempt_disable();
+       cond_local_irq_enable(regs);
 
        if (v8086_mode(regs)) {
                handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
                                        X86_TRAP_DB);
-               preempt_conditional_cli(regs);
+               cond_local_irq_disable(regs);
+               preempt_enable_no_resched();
                debug_stack_usage_dec();
                goto exit;
        }
@@ -673,7 +663,8 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        si_code = get_si_code(tsk->thread.debugreg6);
        if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
                send_sigtrap(tsk, regs, error_code, si_code);
-       preempt_conditional_cli(regs);
+       cond_local_irq_disable(regs);
+       preempt_enable_no_resched();
        debug_stack_usage_dec();
 
 exit:
@@ -696,7 +687,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
 
        if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
                return;
-       conditional_sti(regs);
+       cond_local_irq_enable(regs);
 
        if (!user_mode(regs)) {
                if (!fixup_exception(regs)) {
@@ -743,20 +734,19 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
 dotraplinkage void
 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 {
-       conditional_sti(regs);
+       cond_local_irq_enable(regs);
 }
 
 dotraplinkage void
 do_device_not_available(struct pt_regs *regs, long error_code)
 {
        RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
-       BUG_ON(use_eager_fpu());
 
 #ifdef CONFIG_MATH_EMULATION
-       if (read_cr0() & X86_CR0_EM) {
+       if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
                struct math_emu_info info = { };
 
-               conditional_sti(regs);
+               cond_local_irq_enable(regs);
 
                info.regs = regs;
                math_emulate(&info);
@@ -765,7 +755,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
 #endif
        fpu__restore(&current->thread.fpu); /* interrupts still off */
 #ifdef CONFIG_X86_32
-       conditional_sti(regs);
+       cond_local_irq_enable(regs);
 #endif
 }
 NOKPROBE_SYMBOL(do_device_not_available);
index 07efb35ee4bc3fd5f4452af68214de79dc936716..014ea59aa153e4f37725dd8771cdb37d88371e34 100644 (file)
@@ -30,7 +30,7 @@
  *     appropriately. Either display a message or halt.
  */
 
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/msr-index.h>
 
 verify_cpu:
index e574b85465185fe273a839093452c26521ac3d1e..3dce1ca0a653091967f7089ce9e89c9d54399408 100644 (file)
@@ -362,7 +362,7 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
        /* make room for real-mode segments */
        tsk->thread.sp0 += 16;
 
-       if (static_cpu_has_safe(X86_FEATURE_SEP))
+       if (static_cpu_has(X86_FEATURE_SEP))
                tsk->thread.sysenter_cs = 0;
 
        load_sp0(tss, &tsk->thread);
index 74e4bf11f562e0354c227518421e2375ec16fafa..04afe9b7e52a75e111c0d5ee206578ad64675807 100644 (file)
@@ -195,6 +195,17 @@ SECTIONS
        :init
 #endif
 
+       /*
+        * Section for code used exclusively before alternatives are run. All
+        * references to such code must be patched out by alternatives, normally
+        * by using X86_FEATURE_ALWAYS CPU feature bit.
+        *
+        * See static_cpu_has() for an example.
+        */
+       .altinstr_aux : AT(ADDR(.altinstr_aux) - LOAD_OFFSET) {
+               *(.altinstr_aux)
+       }
+
        INIT_DATA_SECTION(16)
 
        .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
@@ -325,6 +336,7 @@ SECTIONS
                __brk_limit = .;
        }
 
+       . = ALIGN(PAGE_SIZE);
        _end = .;
 
         STABS_DEBUG
index 3982b479bb5fe46ae147b01d0cf91933ea8be455..95fcc7b13866c8af1535edd2b8d66728448dc506 100644 (file)
  */
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 {
-       return apic_has_pending_timer(vcpu);
+       if (lapic_in_kernel(vcpu))
+               return apic_has_pending_timer(vcpu);
+
+       return 0;
 }
 EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
 
@@ -137,8 +140,8 @@ EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
 
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
 {
-       kvm_inject_apic_timer_irqs(vcpu);
-       /* TODO: PIT, RTC etc. */
+       if (lapic_in_kernel(vcpu))
+               kvm_inject_apic_timer_irqs(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
 
index ae5c78f2337d3f1359c99116cf5811ffe190b17a..61ebdc13a29af228a1ff2912c9025a5bbb5694cc 100644 (file)
@@ -109,14 +109,6 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
        return ret;
 }
 
-static inline int lapic_in_kernel(struct kvm_vcpu *vcpu)
-{
-       /* Same as irqchip_in_kernel(vcpu->kvm), but with less
-        * pointer chasing and no unnecessary memory barriers.
-        */
-       return vcpu->arch.apic != NULL;
-}
-
 void kvm_pic_reset(struct kvm_kpic_state *s);
 
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
index 8fc89efb5250fda6d8b9baaa6f4d35af1e8844ad..37217363887d762d9897d4b98ade177096e1b95a 100644 (file)
@@ -34,6 +34,7 @@
 #include "lapic.h"
 
 #include "hyperv.h"
+#include "x86.h"
 
 static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
                           struct kvm *kvm, int irq_source_id, int level,
@@ -57,6 +58,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
 {
        int i, r = -1;
        struct kvm_vcpu *vcpu, *lowest = NULL;
+       unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+       unsigned int dest_vcpus = 0;
 
        if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
                        kvm_lowest_prio_delivery(irq)) {
@@ -67,6 +70,8 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
        if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
                return r;
 
+       memset(dest_vcpu_bitmap, 0, sizeof(dest_vcpu_bitmap));
+
        kvm_for_each_vcpu(i, vcpu, kvm) {
                if (!kvm_apic_present(vcpu))
                        continue;
@@ -80,13 +85,25 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
                                r = 0;
                        r += kvm_apic_set_irq(vcpu, irq, dest_map);
                } else if (kvm_lapic_enabled(vcpu)) {
-                       if (!lowest)
-                               lowest = vcpu;
-                       else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
-                               lowest = vcpu;
+                       if (!kvm_vector_hashing_enabled()) {
+                               if (!lowest)
+                                       lowest = vcpu;
+                               else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
+                                       lowest = vcpu;
+                       } else {
+                               __set_bit(i, dest_vcpu_bitmap);
+                               dest_vcpus++;
+                       }
                }
        }
 
+       if (dest_vcpus != 0) {
+               int idx = kvm_vector_to_index(irq->vector, dest_vcpus,
+                                       dest_vcpu_bitmap, KVM_MAX_VCPUS);
+
+               lowest = kvm_get_vcpu(kvm, idx);
+       }
+
        if (lowest)
                r = kvm_apic_set_irq(lowest, irq, dest_map);
 
index 36591faed13be04d12c13fa520d46ca9df0dfcf8..1482a581a83c659a3a40dd9ca2b224ea98cd3d04 100644 (file)
@@ -281,7 +281,7 @@ void kvm_apic_set_version(struct kvm_vcpu *vcpu)
        struct kvm_cpuid_entry2 *feat;
        u32 v = APIC_VERSION;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
+       if (!lapic_in_kernel(vcpu))
                return;
 
        feat = kvm_find_cpuid_entry(apic->vcpu, 0x1, 0);
@@ -475,18 +475,12 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
 
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
 {
-       int highest_irr;
-
        /* This may race with setting of irr in __apic_accept_irq() and
         * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
         * will cause vmexit immediately and the value will be recalculated
         * on the next vmentry.
         */
-       if (!kvm_vcpu_has_lapic(vcpu))
-               return 0;
-       highest_irr = apic_find_highest_irr(vcpu->arch.apic);
-
-       return highest_irr;
+       return apic_find_highest_irr(vcpu->arch.apic);
 }
 
 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
@@ -675,6 +669,22 @@ bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
        }
 }
 
+int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
+                      const unsigned long *bitmap, u32 bitmap_size)
+{
+       u32 mod;
+       int i, idx = -1;
+
+       mod = vector % dest_vcpus;
+
+       for (i = 0; i <= mod; i++) {
+               idx = find_next_bit(bitmap, bitmap_size, idx + 1);
+               BUG_ON(idx == bitmap_size);
+       }
+
+       return idx;
+}
+
 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
                struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map)
 {
@@ -727,21 +737,49 @@ bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
 
                dst = map->logical_map[cid];
 
-               if (kvm_lowest_prio_delivery(irq)) {
+               if (!kvm_lowest_prio_delivery(irq))
+                       goto set_irq;
+
+               if (!kvm_vector_hashing_enabled()) {
                        int l = -1;
                        for_each_set_bit(i, &bitmap, 16) {
                                if (!dst[i])
                                        continue;
                                if (l < 0)
                                        l = i;
-                               else if (kvm_apic_compare_prio(dst[i]->vcpu, dst[l]->vcpu) < 0)
+                               else if (kvm_apic_compare_prio(dst[i]->vcpu,
+                                                       dst[l]->vcpu) < 0)
                                        l = i;
                        }
-
                        bitmap = (l >= 0) ? 1 << l : 0;
+               } else {
+                       int idx;
+                       unsigned int dest_vcpus;
+
+                       dest_vcpus = hweight16(bitmap);
+                       if (dest_vcpus == 0)
+                               goto out;
+
+                       idx = kvm_vector_to_index(irq->vector,
+                               dest_vcpus, &bitmap, 16);
+
+                       /*
+                        * We may find a hardware disabled LAPIC here, if that
+                        * is the case, print out a error message once for each
+                        * guest and return.
+                        */
+                       if (!dst[idx] && !kvm->arch.disabled_lapic_found) {
+                               kvm->arch.disabled_lapic_found = true;
+                               printk(KERN_INFO
+                                       "Disabled LAPIC found during irq injection\n");
+                               goto out;
+                       }
+
+                       bitmap = (idx >= 0) ? 1 << idx : 0;
                }
        }
 
+set_irq:
        for_each_set_bit(i, &bitmap, 16) {
                if (!dst[i])
                        continue;
@@ -754,6 +792,20 @@ out:
        return ret;
 }
 
+/*
+ * This routine tries to handler interrupts in posted mode, here is how
+ * it deals with different cases:
+ * - For single-destination interrupts, handle it in posted mode
+ * - Else if vector hashing is enabled and it is a lowest-priority
+ *   interrupt, handle it in posted mode and use the following mechanism
+ *   to find the destinaiton vCPU.
+ *     1. For lowest-priority interrupts, store all the possible
+ *        destination vCPUs in an array.
+ *     2. Use "guest vector % max number of destination vCPUs" to find
+ *        the right destination vCPU in the array for the lowest-priority
+ *        interrupt.
+ * - Otherwise, use remapped mode to inject the interrupt.
+ */
 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
                        struct kvm_vcpu **dest_vcpu)
 {
@@ -795,16 +847,44 @@ bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
                if (cid >= ARRAY_SIZE(map->logical_map))
                        goto out;
 
-               for_each_set_bit(i, &bitmap, 16) {
-                       dst = map->logical_map[cid][i];
-                       if (++r == 2)
+               if (kvm_vector_hashing_enabled() &&
+                               kvm_lowest_prio_delivery(irq)) {
+                       int idx;
+                       unsigned int dest_vcpus;
+
+                       dest_vcpus = hweight16(bitmap);
+                       if (dest_vcpus == 0)
                                goto out;
-               }
 
-               if (dst && kvm_apic_present(dst->vcpu))
+                       idx = kvm_vector_to_index(irq->vector, dest_vcpus,
+                                                 &bitmap, 16);
+
+                       /*
+                        * We may find a hardware disabled LAPIC here, if that
+                        * is the case, print out a error message once for each
+                        * guest and return
+                        */
+                       dst = map->logical_map[cid][idx];
+                       if (!dst && !kvm->arch.disabled_lapic_found) {
+                               kvm->arch.disabled_lapic_found = true;
+                               printk(KERN_INFO
+                                       "Disabled LAPIC found during irq injection\n");
+                               goto out;
+                       }
+
                        *dest_vcpu = dst->vcpu;
-               else
-                       goto out;
+               } else {
+                       for_each_set_bit(i, &bitmap, 16) {
+                               dst = map->logical_map[cid][i];
+                               if (++r == 2)
+                                       goto out;
+                       }
+
+                       if (dst && kvm_apic_present(dst->vcpu))
+                               *dest_vcpu = dst->vcpu;
+                       else
+                               goto out;
+               }
        }
 
        ret = true;
@@ -1239,7 +1319,7 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu)
        struct kvm_lapic *apic = vcpu->arch.apic;
        u64 guest_tsc, tsc_deadline;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
+       if (!lapic_in_kernel(vcpu))
                return;
 
        if (apic->lapic_timer.expired_tscdeadline == 0)
@@ -1515,8 +1595,7 @@ static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
 
 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
 {
-       if (kvm_vcpu_has_lapic(vcpu))
-               apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
+       apic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
 }
 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
 
@@ -1566,7 +1645,7 @@ u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
+       if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
                        apic_lvtt_period(apic))
                return 0;
 
@@ -1577,7 +1656,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (!kvm_vcpu_has_lapic(vcpu) || apic_lvtt_oneshot(apic) ||
+       if (!lapic_in_kernel(vcpu) || apic_lvtt_oneshot(apic) ||
                        apic_lvtt_period(apic))
                return;
 
@@ -1590,9 +1669,6 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
-               return;
-
        apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
                     | (kvm_apic_get_reg(apic, APIC_TASKPRI) & 4));
 }
@@ -1601,9 +1677,6 @@ u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
 {
        u64 tpr;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
-               return 0;
-
        tpr = (u64) kvm_apic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
 
        return (tpr & 0xf0) >> 4;
@@ -1728,8 +1801,7 @@ int apic_has_pending_timer(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (kvm_vcpu_has_lapic(vcpu) && apic_enabled(apic) &&
-                       apic_lvt_enabled(apic, APIC_LVTT))
+       if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
                return atomic_read(&apic->lapic_timer.pending);
 
        return 0;
@@ -1826,7 +1898,7 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
        struct kvm_lapic *apic = vcpu->arch.apic;
        int highest_irr;
 
-       if (!kvm_vcpu_has_lapic(vcpu) || !apic_enabled(apic))
+       if (!apic_enabled(apic))
                return -1;
 
        apic_update_ppr(apic);
@@ -1854,9 +1926,6 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
-               return;
-
        if (atomic_read(&apic->lapic_timer.pending) > 0) {
                kvm_apic_local_deliver(apic, APIC_LVTT);
                if (apic_lvtt_tscdeadline(apic))
@@ -1932,7 +2001,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
 {
        struct hrtimer *timer;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
+       if (!lapic_in_kernel(vcpu))
                return;
 
        timer = &vcpu->arch.apic->lapic_timer.timer;
@@ -2105,7 +2174,7 @@ int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
+       if (!lapic_in_kernel(vcpu))
                return 1;
 
        /* if this is ICR write vector before command */
@@ -2119,7 +2188,7 @@ int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
        struct kvm_lapic *apic = vcpu->arch.apic;
        u32 low, high = 0;
 
-       if (!kvm_vcpu_has_lapic(vcpu))
+       if (!lapic_in_kernel(vcpu))
                return 1;
 
        if (apic_reg_read(apic, reg, 4, &low))
@@ -2151,7 +2220,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
        u8 sipi_vector;
        unsigned long pe;
 
-       if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events)
+       if (!lapic_in_kernel(vcpu) || !apic->pending_events)
                return;
 
        /*
index 41bdb35b4b67ab10a2ebcd943d2d75d48431ff46..59610099af04bd46d3a8a3faa085cf8882bd23dd 100644 (file)
@@ -103,7 +103,7 @@ static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off)
 
 extern struct static_key kvm_no_apic_vcpu;
 
-static inline bool kvm_vcpu_has_lapic(struct kvm_vcpu *vcpu)
+static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu)
 {
        if (static_key_false(&kvm_no_apic_vcpu))
                return vcpu->arch.apic;
@@ -130,7 +130,7 @@ static inline bool kvm_apic_sw_enabled(struct kvm_lapic *apic)
 
 static inline bool kvm_apic_present(struct kvm_vcpu *vcpu)
 {
-       return kvm_vcpu_has_lapic(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic);
+       return lapic_in_kernel(vcpu) && kvm_apic_hw_enabled(vcpu->arch.apic);
 }
 
 static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
@@ -150,7 +150,7 @@ static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
 
 static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
 {
-       return kvm_vcpu_has_lapic(vcpu) && vcpu->arch.apic->pending_events;
+       return lapic_in_kernel(vcpu) && vcpu->arch.apic->pending_events;
 }
 
 static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
@@ -161,7 +161,7 @@ static inline bool kvm_lowest_prio_delivery(struct kvm_lapic_irq *irq)
 
 static inline int kvm_lapic_latched_init(struct kvm_vcpu *vcpu)
 {
-       return kvm_vcpu_has_lapic(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
+       return lapic_in_kernel(vcpu) && test_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
 }
 
 static inline int kvm_apic_id(struct kvm_lapic *apic)
@@ -175,4 +175,6 @@ void wait_lapic_expire(struct kvm_vcpu *vcpu);
 
 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
                        struct kvm_vcpu **dest_vcpu);
+int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
+                       const unsigned long *bitmap, u32 bitmap_size);
 #endif
index 31aa2c85dc9761ec104a9b8f36015a120eb41250..06ce377dcbc9ffb40a655a89c9f0a43855f2a732 100644 (file)
@@ -257,7 +257,7 @@ int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
 
 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.apic)
+       if (lapic_in_kernel(vcpu))
                kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
 }
 
index ad9f6a23f13961feca86340c9cda300608da8b8e..2f1ea2f61e1fceef4a77b3955c4d3a9c4a936a72 100644 (file)
@@ -996,11 +996,13 @@ TRACE_EVENT(kvm_enter_smm,
  * Tracepoint for VT-d posted-interrupts.
  */
 TRACE_EVENT(kvm_pi_irte_update,
-       TP_PROTO(unsigned int vcpu_id, unsigned int gsi,
-                unsigned int gvec, u64 pi_desc_addr, bool set),
-       TP_ARGS(vcpu_id, gsi, gvec, pi_desc_addr, set),
+       TP_PROTO(unsigned int host_irq, unsigned int vcpu_id,
+                unsigned int gsi, unsigned int gvec,
+                u64 pi_desc_addr, bool set),
+       TP_ARGS(host_irq, vcpu_id, gsi, gvec, pi_desc_addr, set),
 
        TP_STRUCT__entry(
+               __field(        unsigned int,   host_irq        )
                __field(        unsigned int,   vcpu_id         )
                __field(        unsigned int,   gsi             )
                __field(        unsigned int,   gvec            )
@@ -1009,6 +1011,7 @@ TRACE_EVENT(kvm_pi_irte_update,
        ),
 
        TP_fast_assign(
+               __entry->host_irq       = host_irq;
                __entry->vcpu_id        = vcpu_id;
                __entry->gsi            = gsi;
                __entry->gvec           = gvec;
@@ -1016,9 +1019,10 @@ TRACE_EVENT(kvm_pi_irte_update,
                __entry->set            = set;
        ),
 
-       TP_printk("VT-d PI is %s for this irq, vcpu %u, gsi: 0x%x, "
+       TP_printk("VT-d PI is %s for irq %u, vcpu %u, gsi: 0x%x, "
                  "gvec: 0x%x, pi_desc_addr: 0x%llx",
                  __entry->set ? "enabled and being updated" : "disabled",
+                 __entry->host_irq,
                  __entry->vcpu_id,
                  __entry->gsi,
                  __entry->gvec,
index e2951b6edbbce4deb5bad4fb38b8858c8093f8d5..164eb9e1678b0580d6142f9e60dc78e2afd6a34a 100644 (file)
@@ -10764,13 +10764,26 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
                 */
 
                kvm_set_msi_irq(e, &irq);
-               if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu))
+               if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
+                       /*
+                        * Make sure the IRTE is in remapped mode if
+                        * we don't handle it in posted mode.
+                        */
+                       ret = irq_set_vcpu_affinity(host_irq, NULL);
+                       if (ret < 0) {
+                               printk(KERN_INFO
+                                  "failed to back to remapped mode, irq: %u\n",
+                                  host_irq);
+                               goto out;
+                       }
+
                        continue;
+               }
 
                vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
                vcpu_info.vector = irq.vector;
 
-               trace_kvm_pi_irte_update(vcpu->vcpu_id, e->gsi,
+               trace_kvm_pi_irte_update(vcpu->vcpu_id, host_irq, e->gsi,
                                vcpu_info.vector, vcpu_info.pi_desc_addr, set);
 
                if (set)
index 4244c2baf57da55aa5dd266b63781642dcc8af12..ee3e990d519a65e3a263cdec159a84968b58f1d6 100644 (file)
@@ -123,6 +123,9 @@ module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
 unsigned int __read_mostly lapic_timer_advance_ns = 0;
 module_param(lapic_timer_advance_ns, uint, S_IRUGO | S_IWUSR);
 
+static bool __read_mostly vector_hashing = true;
+module_param(vector_hashing, bool, S_IRUGO);
+
 static bool __read_mostly backwards_tsc_observed = false;
 
 #define KVM_NR_SHARED_MSRS 16
@@ -1196,14 +1199,8 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
 
 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
 {
-       uint32_t quotient, remainder;
-
-       /* Don't try to replace with do_div(), this one calculates
-        * "(dividend << 32) / divisor" */
-       __asm__ ( "divl %4"
-                 : "=a" (quotient), "=d" (remainder)
-                 : "0" (0), "1" (dividend), "r" (divisor) );
-       return quotient;
+       do_shl32_div32(dividend, divisor);
+       return dividend;
 }
 
 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
@@ -2987,7 +2984,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
        kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
 
        if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
-           kvm_vcpu_has_lapic(vcpu))
+           lapic_in_kernel(vcpu))
                vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
@@ -3000,7 +2997,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                        vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
                else
                        vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK;
-               if (kvm_vcpu_has_lapic(vcpu)) {
+               if (lapic_in_kernel(vcpu)) {
                        if (events->smi.latched_init)
                                set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events);
                        else
@@ -3240,7 +3237,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        switch (ioctl) {
        case KVM_GET_LAPIC: {
                r = -EINVAL;
-               if (!vcpu->arch.apic)
+               if (!lapic_in_kernel(vcpu))
                        goto out;
                u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
 
@@ -3258,7 +3255,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        }
        case KVM_SET_LAPIC: {
                r = -EINVAL;
-               if (!vcpu->arch.apic)
+               if (!lapic_in_kernel(vcpu))
                        goto out;
                u.lapic = memdup_user(argp, sizeof(*u.lapic));
                if (IS_ERR(u.lapic))
@@ -4093,7 +4090,7 @@ static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
 
        do {
                n = min(len, 8);
-               if (!(vcpu->arch.apic &&
+               if (!(lapic_in_kernel(vcpu) &&
                      !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v))
                    && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v))
                        break;
@@ -4113,7 +4110,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
 
        do {
                n = min(len, 8);
-               if (!(vcpu->arch.apic &&
+               if (!(lapic_in_kernel(vcpu) &&
                      !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev,
                                         addr, n, v))
                    && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
@@ -6010,7 +6007,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
        if (!kvm_x86_ops->update_cr8_intercept)
                return;
 
-       if (!vcpu->arch.apic)
+       if (!lapic_in_kernel(vcpu))
                return;
 
        if (vcpu->arch.apicv_active)
@@ -7038,7 +7035,7 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
-       if (!kvm_vcpu_has_lapic(vcpu) &&
+       if (!lapic_in_kernel(vcpu) &&
            mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
                return -EINVAL;
 
@@ -7593,6 +7590,7 @@ bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
 }
 
 struct static_key kvm_no_apic_vcpu __read_mostly;
+EXPORT_SYMBOL_GPL(kvm_no_apic_vcpu);
 
 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 {
@@ -8370,6 +8368,12 @@ int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
        return kvm_x86_ops->update_pi_irte(kvm, host_irq, guest_irq, set);
 }
 
+bool kvm_vector_hashing_enabled(void)
+{
+       return vector_hashing;
+}
+EXPORT_SYMBOL_GPL(kvm_vector_hashing_enabled);
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
index f2afa5fe48a6dcbd44f5c573cfe9d16f41b8ad16..007940faa5c6357d1c5c1c4bd200b3b8ae195cb0 100644 (file)
@@ -179,6 +179,7 @@ int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
                                          int page_num);
+bool kvm_vector_hashing_enabled(void);
 
 #define KVM_SUPPORTED_XCR0     (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
                                | XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
@@ -192,4 +193,19 @@ extern unsigned int min_timer_period_us;
 extern unsigned int lapic_timer_advance_ns;
 
 extern struct static_key kvm_no_apic_vcpu;
+
+/* Same "calling convention" as do_div:
+ * - divide (n << 32) by base
+ * - put result in n
+ * - return remainder
+ */
+#define do_shl32_div32(n, base)                                        \
+       ({                                                      \
+           u32 __quot, __rem;                                  \
+           asm("divl %2" : "=a" (__quot), "=d" (__rem)         \
+                       : "rm" (base), "0" (0), "1" ((u32) n)); \
+           n = __quot;                                         \
+           __rem;                                              \
+        })
+
 #endif
index 4ba229ac3f4ff127dddf0c33fb91cc42f2e60b22..fd57d3ae7e16daf24f8cfdb4e708c5e4e9a9f3f9 100644 (file)
@@ -1520,12 +1520,6 @@ __init void lguest_init(void)
         */
        reserve_top_address(lguest_data.reserve_mem);
 
-       /*
-        * If we don't initialize the lock dependency checker now, it crashes
-        * atomic_notifier_chain_register, then paravirt_disable_iospace.
-        */
-       lockdep_init();
-
        /* Hook in our special panic hypercall code. */
        atomic_notifier_chain_register(&panic_notifier_list, &paniced);
 
@@ -1535,7 +1529,7 @@ __init void lguest_init(void)
         */
        cpu_detect(&new_cpu_data);
        /* head.S usually sets up the first capability word, so do it here. */
-       new_cpu_data.x86_capability[0] = cpuid_edx(1);
+       new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
 
        /* Math is always hard! */
        set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
index a2fe51b00ccefc660850e1a6810dd4adee611d2e..65be7cfaf947228d454f2324541e5f5227d85183 100644 (file)
@@ -1,5 +1,5 @@
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 /*
index 422db000d72767adf718dc75db682e5df576e877..5cc78bf572325fb1c5b5d6f854bfe878c55dfeb3 100644 (file)
@@ -21,12 +21,16 @@ static inline int myisspace(u8 c)
  * @option: option string to look for
  *
  * Returns the position of that @option (starts counting with 1)
- * or 0 on not found.
+ * or 0 on not found.  @option will only be found if it is found
+ * as an entire word in @cmdline.  For instance, if @option="car"
+ * then a cmdline which contains "cart" will not match.
  */
-int cmdline_find_option_bool(const char *cmdline, const char *option)
+static int
+__cmdline_find_option_bool(const char *cmdline, int max_cmdline_size,
+                          const char *option)
 {
        char c;
-       int len, pos = 0, wstart = 0;
+       int pos = 0, wstart = 0;
        const char *opptr = NULL;
        enum {
                st_wordstart = 0,       /* Start of word/after whitespace */
@@ -37,11 +41,11 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
        if (!cmdline)
                return -1;      /* No command line */
 
-       len = min_t(int, strlen(cmdline), COMMAND_LINE_SIZE);
-       if (!len)
-               return 0;
-
-       while (len--) {
+       /*
+        * This 'pos' check ensures we do not overrun
+        * a non-NULL-terminated 'cmdline'
+        */
+       while (pos < max_cmdline_size) {
                c = *(char *)cmdline++;
                pos++;
 
@@ -58,18 +62,35 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
                        /* fall through */
 
                case st_wordcmp:
-                       if (!*opptr)
+                       if (!*opptr) {
+                               /*
+                                * We matched all the way to the end of the
+                                * option we were looking for.  If the
+                                * command-line has a space _or_ ends, then
+                                * we matched!
+                                */
                                if (!c || myisspace(c))
                                        return wstart;
-                               else
-                                       state = st_wordskip;
-                       else if (!c)
+                               /*
+                                * We hit the end of the option, but _not_
+                                * the end of a word on the cmdline.  Not
+                                * a match.
+                                */
+                       } else if (!c) {
+                               /*
+                                * Hit the NULL terminator on the end of
+                                * cmdline.
+                                */
                                return 0;
-                       else if (c != *opptr++)
-                               state = st_wordskip;
-                       else if (!len)          /* last word and is matching */
-                               return wstart;
-                       break;
+                       } else if (c == *opptr++) {
+                               /*
+                                * We are currently matching, so continue
+                                * to the next character on the cmdline.
+                                */
+                               break;
+                       }
+                       state = st_wordskip;
+                       /* fall through */
 
                case st_wordskip:
                        if (!c)
@@ -82,3 +103,8 @@ int cmdline_find_option_bool(const char *cmdline, const char *option)
 
        return 0;       /* Buffer overrun */
 }
+
+int cmdline_find_option_bool(const char *cmdline, const char *option)
+{
+       return __cmdline_find_option_bool(cmdline, COMMAND_LINE_SIZE, option);
+}
index 009f98216b7eb316c12847e42f50ac77d4f4b8a3..24ef1c2104d422c35a9ce783934306a0dd9215f1 100644 (file)
@@ -1,7 +1,7 @@
 /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
 
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 /*
index 982ce34f4a9bf66011fc2652b45466d9c2b276f9..fba34306205583329162a876b9de0526fb938cfa 100644 (file)
@@ -10,7 +10,7 @@
 #include <asm/current.h>
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
index 16698bba87deb9ff593d934e8a8fb1447be9b214..a0de849435ad69dda8b50e79c79a4bc9fc8d3d49 100644 (file)
@@ -1,7 +1,7 @@
 /* Copyright 2002 Andi Kleen */
 
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 /*
index ca2afdd6d98ed2be90da6f9ea1624beb102f0fc3..90ce01bee00c17f173719652659edb5972ecef07 100644 (file)
@@ -6,7 +6,7 @@
  *     - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
  */
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 #undef memmove
index 2661fad0582716f780af9904dc5b7c62199a5c58..c9c81227ea37d14968b3acb3ccfa346e73cda5dc 100644 (file)
@@ -1,7 +1,7 @@
 /* Copyright 2002 Andi Kleen, SuSE Labs */
 
 #include <linux/linkage.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/alternative-asm.h>
 
 .weak memset
index 42982b26e32be693713d90e8fa4e18b8f771eb4a..740d7ac03a552bc4937edfc8ff7e8b9d044a61b6 100644 (file)
@@ -173,10 +173,10 @@ static __init int setup_hugepagesz(char *opt)
 }
 __setup("hugepagesz=", setup_hugepagesz);
 
-#ifdef CONFIG_CMA
+#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 static __init int gigantic_pages_init(void)
 {
-       /* With CMA we can allocate gigantic pages at runtime */
+       /* With compaction or CMA we can allocate gigantic pages at runtime */
        if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
                hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
        return 0;
index cb4ef3de61f9ae9c95249876965a71a5d7b58cf8..a4bb1c7ab65eab79807ca8a9bc2dfc63ecc3ebde 100644 (file)
@@ -388,7 +388,6 @@ repeat:
 }
 
 pte_t *kmap_pte;
-pgprot_t kmap_prot;
 
 static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
 {
@@ -405,8 +404,6 @@ static void __init kmap_init(void)
         */
        kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
        kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
-       kmap_prot = PAGE_KERNEL;
 }
 
 #ifdef CONFIG_HIGHMEM
index 5488d21123bd2edb3f71ded119495474f3e8b728..9686535edfb58024a25f22975957cda8e5834dd4 100644 (file)
@@ -53,6 +53,7 @@
 #include <asm/numa.h>
 #include <asm/cacheflush.h>
 #include <asm/init.h>
+#include <asm/uv/uv.h>
 #include <asm/setup.h>
 
 #include "mm_internal.h"
@@ -1206,26 +1207,13 @@ int kern_addr_valid(unsigned long addr)
 
 static unsigned long probe_memory_block_size(void)
 {
-       /* start from 2g */
-       unsigned long bz = 1UL<<31;
+       unsigned long bz = MIN_MEMORY_BLOCK_SIZE;
 
-       if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) {
-               pr_info("Using 2GB memory block size for large-memory system\n");
-               return 2UL * 1024 * 1024 * 1024;
-       }
-
-       /* less than 64g installed */
-       if ((max_pfn << PAGE_SHIFT) < (16UL << 32))
-               return MIN_MEMORY_BLOCK_SIZE;
-
-       /* get the tail size */
-       while (bz > MIN_MEMORY_BLOCK_SIZE) {
-               if (!((max_pfn << PAGE_SHIFT) & (bz - 1)))
-                       break;
-               bz >>= 1;
-       }
+       /* if system is UV or has 64GB of RAM or more, use large blocks */
+       if (is_uv_system() || ((max_pfn << PAGE_SHIFT) >= (64UL << 30)))
+               bz = 2UL << 30; /* 2GB */
 
-       printk(KERN_DEBUG "memory block size : %ldMB\n", bz >> 20);
+       pr_info("x86/mm: Memory block size: %ldMB\n", bz >> 20);
 
        return bz;
 }
index d470cf219a2d8f4608445b47ee8c3307d9929bd8..1b1110fa00570e0d242926ca8f06adc37db518a5 100644 (file)
@@ -120,11 +120,22 @@ void __init kasan_init(void)
        kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
                        (void *)KASAN_SHADOW_END);
 
-       memset(kasan_zero_page, 0, PAGE_SIZE);
-
        load_cr3(init_level4_pgt);
        __flush_tlb_all();
-       init_task.kasan_depth = 0;
 
+       /*
+        * kasan_zero_page has been used as early shadow memory, thus it may
+        * contain some garbage. Now we can clear and write protect it, since
+        * after the TLB flush no one should write to it.
+        */
+       memset(kasan_zero_page, 0, PAGE_SIZE);
+       for (i = 0; i < PTRS_PER_PTE; i++) {
+               pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
+               set_pte(&kasan_zero_pte[i], pte);
+       }
+       /* Flush TLBs again to be sure that write protection applied. */
+       __flush_tlb_all();
+
+       init_task.kasan_depth = 0;
        pr_info("KernelAddressSanitizer initialized\n");
 }
index c3b3f653ed0c6c9112297164c5cf535494f19461..f70c1ff4612515b310ee59fdb843ae15eb2b2a33 100644 (file)
@@ -465,46 +465,67 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
        return true;
 }
 
+/*
+ * Mark all currently memblock-reserved physical memory (which covers the
+ * kernel's own memory ranges) as hot-unswappable.
+ */
 static void __init numa_clear_kernel_node_hotplug(void)
 {
-       int i, nid;
-       nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
-       unsigned long start, end;
-       struct memblock_region *r;
+       nodemask_t reserved_nodemask = NODE_MASK_NONE;
+       struct memblock_region *mb_region;
+       int i;
 
        /*
+        * We have to do some preprocessing of memblock regions, to
+        * make them suitable for reservation.
+        *
         * At this time, all memory regions reserved by memblock are
-        * used by the kernel. Set the nid in memblock.reserved will
-        * mark out all the nodes the kernel resides in.
+        * used by the kernel, but those regions are not split up
+        * along node boundaries yet, and don't necessarily have their
+        * node ID set yet either.
+        *
+        * So iterate over all memory known to the x86 architecture,
+        * and use those ranges to set the nid in memblock.reserved.
+        * This will split up the memblock regions along node
+        * boundaries and will set the node IDs as well.
         */
        for (i = 0; i < numa_meminfo.nr_blks; i++) {
-               struct numa_memblk *mb = &numa_meminfo.blk[i];
+               struct numa_memblk *mb = numa_meminfo.blk + i;
+               int ret;
 
-               memblock_set_node(mb->start, mb->end - mb->start,
-                                 &memblock.reserved, mb->nid);
+               ret = memblock_set_node(mb->start, mb->end - mb->start, &memblock.reserved, mb->nid);
+               WARN_ON_ONCE(ret);
        }
 
        /*
-        * Mark all kernel nodes.
+        * Now go over all reserved memblock regions, to construct a
+        * node mask of all kernel reserved memory areas.
         *
-        * When booting with mem=nn[kMG] or in a kdump kernel, numa_meminfo
-        * may not include all the memblock.reserved memory ranges because
-        * trim_snb_memory() reserves specific pages for Sandy Bridge graphics.
+        * [ Note, when booting with mem=nn[kMG] or in a kdump kernel,
+        *   numa_meminfo might not include all memblock.reserved
+        *   memory ranges, because quirks such as trim_snb_memory()
+        *   reserve specific pages for Sandy Bridge graphics. ]
         */
-       for_each_memblock(reserved, r)
-               if (r->nid != MAX_NUMNODES)
-                       node_set(r->nid, numa_kernel_nodes);
+       for_each_memblock(reserved, mb_region) {
+               if (mb_region->nid != MAX_NUMNODES)
+                       node_set(mb_region->nid, reserved_nodemask);
+       }
 
-       /* Clear MEMBLOCK_HOTPLUG flag for memory in kernel nodes. */
+       /*
+        * Finally, clear the MEMBLOCK_HOTPLUG flag for all memory
+        * belonging to the reserved node mask.
+        *
+        * Note that this will include memory regions that reside
+        * on nodes that contain kernel memory - entire nodes
+        * become hot-unpluggable:
+        */
        for (i = 0; i < numa_meminfo.nr_blks; i++) {
-               nid = numa_meminfo.blk[i].nid;
-               if (!node_isset(nid, numa_kernel_nodes))
-                       continue;
+               struct numa_memblk *mb = numa_meminfo.blk + i;
 
-               start = numa_meminfo.blk[i].start;
-               end = numa_meminfo.blk[i].end;
+               if (!node_isset(mb->nid, reserved_nodemask))
+                       continue;
 
-               memblock_clear_hotplug(start, end - start);
+               memblock_clear_hotplug(mb->start, mb->end - mb->start);
        }
 }
 
index fc6a4c8f6e2abc79c29803264ad16a48067904e8..632d34d2023779ecb8ed2224ac83cce566175492 100644 (file)
@@ -33,7 +33,7 @@ struct cpa_data {
        pgd_t           *pgd;
        pgprot_t        mask_set;
        pgprot_t        mask_clr;
-       int             numpages;
+       unsigned long   numpages;
        int             flags;
        unsigned long   pfn;
        unsigned        force_split : 1;
@@ -910,15 +910,10 @@ static void populate_pte(struct cpa_data *cpa,
        pte = pte_offset_kernel(pmd, start);
 
        while (num_pages-- && start < end) {
-
-               /* deal with the NX bit */
-               if (!(pgprot_val(pgprot) & _PAGE_NX))
-                       cpa->pfn &= ~_PAGE_NX;
-
-               set_pte(pte, pfn_pte(cpa->pfn >> PAGE_SHIFT, pgprot));
+               set_pte(pte, pfn_pte(cpa->pfn, pgprot));
 
                start    += PAGE_SIZE;
-               cpa->pfn += PAGE_SIZE;
+               cpa->pfn++;
                pte++;
        }
 }
@@ -974,11 +969,11 @@ static int populate_pmd(struct cpa_data *cpa,
 
                pmd = pmd_offset(pud, start);
 
-               set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
+               set_pmd(pmd, __pmd(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
                                   massage_pgprot(pmd_pgprot)));
 
                start     += PMD_SIZE;
-               cpa->pfn  += PMD_SIZE;
+               cpa->pfn  += PMD_SIZE >> PAGE_SHIFT;
                cur_pages += PMD_SIZE >> PAGE_SHIFT;
        }
 
@@ -1047,11 +1042,11 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
         * Map everything starting from the Gb boundary, possibly with 1G pages
         */
        while (end - start >= PUD_SIZE) {
-               set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
+               set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
                                   massage_pgprot(pud_pgprot)));
 
                start     += PUD_SIZE;
-               cpa->pfn  += PUD_SIZE;
+               cpa->pfn  += PUD_SIZE >> PAGE_SHIFT;
                cur_pages += PUD_SIZE >> PAGE_SHIFT;
                pud++;
        }
@@ -1350,7 +1345,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
                 * CPA operation. Either a large page has been
                 * preserved or a single page update happened.
                 */
-               BUG_ON(cpa->numpages > numpages);
+               BUG_ON(cpa->numpages > numpages || !cpa->numpages);
                numpages -= cpa->numpages;
                if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
                        cpa->curpage++;
index f4ae536b0914db1db521feabe09ae0ab681a4b87..04e2e7144beee51205df17a4e3df024833eac7db 100644 (file)
@@ -943,7 +943,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
                        return -EINVAL;
        }
 
-       *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
+       *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
                         cachemode2protval(pcm));
 
        return 0;
@@ -959,7 +959,7 @@ int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
 
        /* Set prot based on lookup */
        pcm = lookup_memtype(pfn_t_to_phys(pfn));
-       *prot = __pgprot((pgprot_val(vma->vm_page_prot) & (~_PAGE_CACHE_MASK)) |
+       *prot = __pgprot((pgprot_val(*prot) & (~_PAGE_CACHE_MASK)) |
                         cachemode2protval(pcm));
 
        return 0;
index 92e2eacb33216821e8b18821834c7ce972571f29..8bea84724a7da4bd1c6d3a0509166618c39e5349 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <asm/pgtable.h>
 #include <asm/proto.h>
+#include <asm/cpufeature.h>
 
 static int disable_nx;
 
@@ -31,9 +32,8 @@ early_param("noexec", noexec_setup);
 
 void x86_configure_nx(void)
 {
-       if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
-               __supported_pte_mask |= _PAGE_NX;
-       else
+       /* If disable_nx is set, clear NX on all new mappings going forward. */
+       if (disable_nx)
                __supported_pte_mask &= ~_PAGE_NX;
 }
 
index 50d86c0e9ba4973b707de5917cfae78692fd6f68..660a83c8287b61dc52c069832128a83f256258af 100644 (file)
@@ -24,7 +24,6 @@
 #include <asm/nmi.h>
 #include <asm/apic.h>
 #include <asm/processor.h>
-#include <asm/cpufeature.h>
 
 #include "op_x86_model.h"
 #include "op_counter.h"
index 2879efc73a967bca70606547014decd3703c84bf..b4a9f23d77d9090eb715df2ee76c247bc5af6683 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/dmi.h>
 #include <linux/slab.h>
 
-#include <asm-generic/pci-bridge.h>
 #include <asm/acpi.h>
 #include <asm/segment.h>
 #include <asm/io.h>
index ea48449b2e63d1428de814d654ba58087dee436a..a2433817c987833f525380d8425bb6e902873bbd 100644 (file)
@@ -10,6 +10,9 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
@@ -28,8 +31,7 @@ struct bmp_header {
 void __init efi_bgrt_init(void)
 {
        acpi_status status;
-       void __iomem *image;
-       bool ioremapped = false;
+       void *image;
        struct bmp_header bmp_header;
 
        if (acpi_disabled)
@@ -55,11 +57,6 @@ void __init efi_bgrt_init(void)
                       bgrt_tab->status);
                return;
        }
-       if (bgrt_tab->status != 1) {
-               pr_debug("Ignoring BGRT: invalid status %u (expected 1)\n",
-                        bgrt_tab->status);
-               return;
-       }
        if (bgrt_tab->image_type != 0) {
                pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
                       bgrt_tab->image_type);
@@ -70,20 +67,19 @@ void __init efi_bgrt_init(void)
                return;
        }
 
-       image = efi_lookup_mapped_addr(bgrt_tab->image_address);
+       image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
        if (!image) {
-               image = early_ioremap(bgrt_tab->image_address,
-                                      sizeof(bmp_header));
-               ioremapped = true;
-               if (!image) {
-                       pr_err("Ignoring BGRT: failed to map image header memory\n");
-                       return;
-               }
+               pr_err("Ignoring BGRT: failed to map image header memory\n");
+               return;
        }
 
-       memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
-       if (ioremapped)
-               early_iounmap(image, sizeof(bmp_header));
+       memcpy(&bmp_header, image, sizeof(bmp_header));
+       memunmap(image);
+       if (bmp_header.id != 0x4d42) {
+               pr_err("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
+                       bmp_header.id);
+               return;
+       }
        bgrt_image_size = bmp_header.size;
 
        bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
@@ -93,18 +89,14 @@ void __init efi_bgrt_init(void)
                return;
        }
 
-       if (ioremapped) {
-               image = early_ioremap(bgrt_tab->image_address,
-                                      bmp_header.size);
-               if (!image) {
-                       pr_err("Ignoring BGRT: failed to map image memory\n");
-                       kfree(bgrt_image);
-                       bgrt_image = NULL;
-                       return;
-               }
+       image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
+       if (!image) {
+               pr_err("Ignoring BGRT: failed to map image memory\n");
+               kfree(bgrt_image);
+               bgrt_image = NULL;
+               return;
        }
 
-       memcpy_fromio(bgrt_image, image, bgrt_image_size);
-       if (ioremapped)
-               early_iounmap(image, bmp_header.size);
+       memcpy(bgrt_image, image, bgrt_image_size);
+       memunmap(image);
 }
index ad285404ea7f58ac74e998d5658a8c72a9be019d..e80826e6f3a97caeb059cead5067aa8b3e6148d2 100644 (file)
@@ -235,10 +235,10 @@ void __init efi_print_memmap(void)
                char buf[64];
 
                md = p;
-               pr_info("mem%02u: %s range=[0x%016llx-0x%016llx) (%lluMB)\n",
+               pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
                        i, efi_md_typeattr_format(buf, sizeof(buf), md),
                        md->phys_addr,
-                       md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
+                       md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
                        (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
        }
 #endif  /*  EFI_DEBUG  */
@@ -815,6 +815,7 @@ static void __init kexec_enter_virtual_mode(void)
 {
 #ifdef CONFIG_KEXEC_CORE
        efi_memory_desc_t *md;
+       unsigned int num_pages;
        void *p;
 
        efi.systab = NULL;
@@ -829,6 +830,12 @@ static void __init kexec_enter_virtual_mode(void)
                return;
        }
 
+       if (efi_alloc_page_tables()) {
+               pr_err("Failed to allocate EFI page tables\n");
+               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+               return;
+       }
+
        /*
        * Map efi regions which were passed via setup_data. The virt_addr is a
        * fixed addr which was used in first kernel of a kexec boot.
@@ -843,6 +850,14 @@ static void __init kexec_enter_virtual_mode(void)
 
        BUG_ON(!efi.systab);
 
+       num_pages = ALIGN(memmap.nr_map * memmap.desc_size, PAGE_SIZE);
+       num_pages >>= PAGE_SHIFT;
+
+       if (efi_setup_page_tables(memmap.phys_map, num_pages)) {
+               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+               return;
+       }
+
        efi_sync_low_kernel_mappings();
 
        /*
@@ -869,7 +884,7 @@ static void __init kexec_enter_virtual_mode(void)
  * This function will switch the EFI runtime services to virtual mode.
  * Essentially, we look through the EFI memmap and map every region that
  * has the runtime attribute bit set in its memory descriptor into the
- * ->trampoline_pgd page table using a top-down VA allocation scheme.
+ * efi_pgd page table.
  *
  * The old method which used to update that memory descriptor with the
  * virtual address obtained from ioremap() is still supported when the
@@ -879,8 +894,8 @@ static void __init kexec_enter_virtual_mode(void)
  *
  * The new method does a pagetable switch in a preemption-safe manner
  * so that we're in a different address space when calling a runtime
- * function. For function arguments passing we do copy the PGDs of the
- * kernel page table into ->trampoline_pgd prior to each call.
+ * function. For function arguments passing we do copy the PUDs of the
+ * kernel page table into efi_pgd prior to each call.
  *
  * Specially for kexec boot, efi runtime maps in previous kernel should
  * be passed in via setup_data. In that case runtime ranges will be mapped
@@ -895,6 +910,12 @@ static void __init __efi_enter_virtual_mode(void)
 
        efi.systab = NULL;
 
+       if (efi_alloc_page_tables()) {
+               pr_err("Failed to allocate EFI page tables\n");
+               clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+               return;
+       }
+
        efi_merge_regions();
        new_memmap = efi_map_regions(&count, &pg_shift);
        if (!new_memmap) {
@@ -954,28 +975,11 @@ static void __init __efi_enter_virtual_mode(void)
        efi_runtime_mkexec();
 
        /*
-        * We mapped the descriptor array into the EFI pagetable above but we're
-        * not unmapping it here. Here's why:
-        *
-        * We're copying select PGDs from the kernel page table to the EFI page
-        * table and when we do so and make changes to those PGDs like unmapping
-        * stuff from them, those changes appear in the kernel page table and we
-        * go boom.
-        *
-        * From setup_real_mode():
-        *
-        * ...
-        * trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
-        *
-        * In this particular case, our allocation is in PGD 0 of the EFI page
-        * table but we've copied that PGD from PGD[272] of the EFI page table:
-        *
-        *      pgd_index(__PAGE_OFFSET = 0xffff880000000000) = 272
-        *
-        * where the direct memory mapping in kernel space is.
-        *
-        * new_memmap's VA comes from that direct mapping and thus clearing it,
-        * it would get cleared in the kernel page table too.
+        * We mapped the descriptor array into the EFI pagetable above
+        * but we're not unmapping it here because if we're running in
+        * EFI mixed mode we need all of memory to be accessible when
+        * we pass parameters to the EFI runtime services in the
+        * thunking code.
         *
         * efi_cleanup_page_tables(__pa(new_memmap), 1 << pg_shift);
         */
index ed5b67338294f1325fffe5f7d9fce637731d5917..58d669bc8250bb98caee8b37714a123050456efb 100644 (file)
  * say 0 - 3G.
  */
 
+int __init efi_alloc_page_tables(void)
+{
+       return 0;
+}
+
 void efi_sync_low_kernel_mappings(void) {}
 void __init efi_dump_pagetable(void) {}
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
index a0ac0f9c307f661c8b3ed08c4ca6d23507772e36..b492521503fe3db968bdfc0f7ced68039dea98e5 100644 (file)
@@ -15,6 +15,8 @@
  *
  */
 
+#define pr_fmt(fmt) "efi: " fmt
+
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/mm.h>
@@ -40,6 +42,7 @@
 #include <asm/fixmap.h>
 #include <asm/realmode.h>
 #include <asm/time.h>
+#include <asm/pgalloc.h>
 
 /*
  * We allocate runtime services regions bottom-up, starting from -4G, i.e.
  */
 static u64 efi_va = EFI_VA_START;
 
-/*
- * Scratch space used for switching the pagetable in the EFI stub
- */
-struct efi_scratch {
-       u64 r15;
-       u64 prev_cr3;
-       pgd_t *efi_pgt;
-       bool use_pgd;
-       u64 phys_stack;
-} __packed;
+struct efi_scratch efi_scratch;
 
 static void __init early_code_mapping_set_exec(int executable)
 {
@@ -83,8 +77,11 @@ pgd_t * __init efi_call_phys_prolog(void)
        int pgd;
        int n_pgds;
 
-       if (!efi_enabled(EFI_OLD_MEMMAP))
-               return NULL;
+       if (!efi_enabled(EFI_OLD_MEMMAP)) {
+               save_pgd = (pgd_t *)read_cr3();
+               write_cr3((unsigned long)efi_scratch.efi_pgt);
+               goto out;
+       }
 
        early_code_mapping_set_exec(1);
 
@@ -96,6 +93,7 @@ pgd_t * __init efi_call_phys_prolog(void)
                vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
                set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
        }
+out:
        __flush_tlb_all();
 
        return save_pgd;
@@ -109,8 +107,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
        int pgd_idx;
        int nr_pgds;
 
-       if (!save_pgd)
+       if (!efi_enabled(EFI_OLD_MEMMAP)) {
+               write_cr3((unsigned long)save_pgd);
+               __flush_tlb_all();
                return;
+       }
 
        nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
 
@@ -123,27 +124,98 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
        early_code_mapping_set_exec(0);
 }
 
+static pgd_t *efi_pgd;
+
+/*
+ * We need our own copy of the higher levels of the page tables
+ * because we want to avoid inserting EFI region mappings (EFI_VA_END
+ * to EFI_VA_START) into the standard kernel page tables. Everything
+ * else can be shared, see efi_sync_low_kernel_mappings().
+ */
+int __init efi_alloc_page_tables(void)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       gfp_t gfp_mask;
+
+       if (efi_enabled(EFI_OLD_MEMMAP))
+               return 0;
+
+       gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+       efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
+       if (!efi_pgd)
+               return -ENOMEM;
+
+       pgd = efi_pgd + pgd_index(EFI_VA_END);
+
+       pud = pud_alloc_one(NULL, 0);
+       if (!pud) {
+               free_page((unsigned long)efi_pgd);
+               return -ENOMEM;
+       }
+
+       pgd_populate(NULL, pgd, pud);
+
+       return 0;
+}
+
 /*
  * Add low kernel mappings for passing arguments to EFI functions.
  */
 void efi_sync_low_kernel_mappings(void)
 {
-       unsigned num_pgds;
-       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
+       unsigned num_entries;
+       pgd_t *pgd_k, *pgd_efi;
+       pud_t *pud_k, *pud_efi;
 
        if (efi_enabled(EFI_OLD_MEMMAP))
                return;
 
-       num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET);
+       /*
+        * We can share all PGD entries apart from the one entry that
+        * covers the EFI runtime mapping space.
+        *
+        * Make sure the EFI runtime region mappings are guaranteed to
+        * only span a single PGD entry and that the entry also maps
+        * other important kernel regions.
+        */
+       BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
+       BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
+                       (EFI_VA_END & PGDIR_MASK));
+
+       pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
+       pgd_k = pgd_offset_k(PAGE_OFFSET);
+
+       num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
+       memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
+
+       /*
+        * We share all the PUD entries apart from those that map the
+        * EFI regions. Copy around them.
+        */
+       BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
+       BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
+
+       pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
+       pud_efi = pud_offset(pgd_efi, 0);
 
-       memcpy(pgd + pgd_index(PAGE_OFFSET),
-               init_mm.pgd + pgd_index(PAGE_OFFSET),
-               sizeof(pgd_t) * num_pgds);
+       pgd_k = pgd_offset_k(EFI_VA_END);
+       pud_k = pud_offset(pgd_k, 0);
+
+       num_entries = pud_index(EFI_VA_END);
+       memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
+
+       pud_efi = pud_offset(pgd_efi, EFI_VA_START);
+       pud_k = pud_offset(pgd_k, EFI_VA_START);
+
+       num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
+       memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
 }
 
 int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
-       unsigned long text;
+       unsigned long pfn, text;
+       efi_memory_desc_t *md;
        struct page *page;
        unsigned npages;
        pgd_t *pgd;
@@ -151,8 +223,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
        if (efi_enabled(EFI_OLD_MEMMAP))
                return 0;
 
-       efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd;
-       pgd = __va(efi_scratch.efi_pgt);
+       efi_scratch.efi_pgt = (pgd_t *)__pa(efi_pgd);
+       pgd = efi_pgd;
 
        /*
         * It can happen that the physical address of new_memmap lands in memory
@@ -160,7 +232,8 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
         * and ident-map those pages containing the map before calling
         * phys_efi_set_virtual_address_map().
         */
-       if (kernel_map_pages_in_pgd(pgd, pa_memmap, pa_memmap, num_pages, _PAGE_NX)) {
+       pfn = pa_memmap >> PAGE_SHIFT;
+       if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, _PAGE_NX)) {
                pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
                return 1;
        }
@@ -176,6 +249,25 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
        if (!IS_ENABLED(CONFIG_EFI_MIXED))
                return 0;
 
+       /*
+        * Map all of RAM so that we can access arguments in the 1:1
+        * mapping when making EFI runtime calls.
+        */
+       for_each_efi_memory_desc(&memmap, md) {
+               if (md->type != EFI_CONVENTIONAL_MEMORY &&
+                   md->type != EFI_LOADER_DATA &&
+                   md->type != EFI_LOADER_CODE)
+                       continue;
+
+               pfn = md->phys_addr >> PAGE_SHIFT;
+               npages = md->num_pages;
+
+               if (kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, npages, 0)) {
+                       pr_err("Failed to map 1:1 memory\n");
+                       return 1;
+               }
+       }
+
        page = alloc_page(GFP_KERNEL|__GFP_DMA32);
        if (!page)
                panic("Unable to allocate EFI runtime stack < 4GB\n");
@@ -185,8 +277,9 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
        npages = (_end - _text) >> PAGE_SHIFT;
        text = __pa(_text);
+       pfn = text >> PAGE_SHIFT;
 
-       if (kernel_map_pages_in_pgd(pgd, text >> PAGE_SHIFT, text, npages, 0)) {
+       if (kernel_map_pages_in_pgd(pgd, pfn, text, npages, 0)) {
                pr_err("Failed to map kernel text 1:1\n");
                return 1;
        }
@@ -196,20 +289,20 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 
 void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
 {
-       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-
-       kernel_unmap_pages_in_pgd(pgd, pa_memmap, num_pages);
+       kernel_unmap_pages_in_pgd(efi_pgd, pa_memmap, num_pages);
 }
 
 static void __init __map_region(efi_memory_desc_t *md, u64 va)
 {
-       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-       unsigned long pf = 0;
+       unsigned long flags = 0;
+       unsigned long pfn;
+       pgd_t *pgd = efi_pgd;
 
        if (!(md->attribute & EFI_MEMORY_WB))
-               pf |= _PAGE_PCD;
+               flags |= _PAGE_PCD;
 
-       if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf))
+       pfn = md->phys_addr >> PAGE_SHIFT;
+       if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
                pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
                           md->phys_addr, va);
 }
@@ -312,9 +405,7 @@ void __init efi_runtime_mkexec(void)
 void __init efi_dump_pagetable(void)
 {
 #ifdef CONFIG_EFI_PGT_DUMP
-       pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
-
-       ptdump_walk_pgd_level(NULL, pgd);
+       ptdump_walk_pgd_level(NULL, efi_pgd);
 #endif
 }
 
index 86d0f9e08dd95eb1023d5ac7ec4fb006aafb72c9..32020cb8bb08ce0e12ca0fbf6428cf81344e9054 100644 (file)
        mov %rsi, %cr0;                 \
        mov (%rsp), %rsp
 
-       /* stolen from gcc */
-       .macro FLUSH_TLB_ALL
-       movq %r15, efi_scratch(%rip)
-       movq %r14, efi_scratch+8(%rip)
-       movq %cr4, %r15
-       movq %r15, %r14
-       andb $0x7f, %r14b
-       movq %r14, %cr4
-       movq %r15, %cr4
-       movq efi_scratch+8(%rip), %r14
-       movq efi_scratch(%rip), %r15
-       .endm
-
-       .macro SWITCH_PGT
-       cmpb $0, efi_scratch+24(%rip)
-       je 1f
-       movq %r15, efi_scratch(%rip)            # r15
-       # save previous CR3
-       movq %cr3, %r15
-       movq %r15, efi_scratch+8(%rip)          # prev_cr3
-       movq efi_scratch+16(%rip), %r15         # EFI pgt
-       movq %r15, %cr3
-       1:
-       .endm
-
-       .macro RESTORE_PGT
-       cmpb $0, efi_scratch+24(%rip)
-       je 2f
-       movq efi_scratch+8(%rip), %r15
-       movq %r15, %cr3
-       movq efi_scratch(%rip), %r15
-       FLUSH_TLB_ALL
-       2:
-       .endm
-
 ENTRY(efi_call)
        SAVE_XMM
        mov (%rsp), %rax
@@ -83,16 +48,8 @@ ENTRY(efi_call)
        mov %r8, %r9
        mov %rcx, %r8
        mov %rsi, %rcx
-       SWITCH_PGT
        call *%rdi
-       RESTORE_PGT
        addq $48, %rsp
        RESTORE_XMM
        ret
 ENDPROC(efi_call)
-
-       .data
-ENTRY(efi_scratch)
-       .fill 3,8,0
-       .byte 0
-       .quad 0
index 1c7380da65ffab641f0cb0aedc88cb7ef7e8f6cc..2326bf51978f04e0bc3da8a30375a57536ea5bca 100644 (file)
@@ -1,3 +1,5 @@
+#define pr_fmt(fmt) "efi: " fmt
+
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
@@ -8,6 +10,7 @@
 #include <linux/memblock.h>
 #include <linux/bootmem.h>
 #include <linux/acpi.h>
+#include <linux/dmi.h>
 #include <asm/efi.h>
 #include <asm/uv/uv.h>
 
@@ -53,6 +56,33 @@ void efi_delete_dummy_variable(void)
                         0, NULL);
 }
 
+/*
+ * In the nonblocking case we do not attempt to perform garbage
+ * collection if we do not have enough free space. Rather, we do the
+ * bare minimum check and give up immediately if the available space
+ * is below EFI_MIN_RESERVE.
+ *
+ * This function is intended to be small and simple because it is
+ * invoked from crash handler paths.
+ */
+static efi_status_t
+query_variable_store_nonblocking(u32 attributes, unsigned long size)
+{
+       efi_status_t status;
+       u64 storage_size, remaining_size, max_size;
+
+       status = efi.query_variable_info_nonblocking(attributes, &storage_size,
+                                                    &remaining_size,
+                                                    &max_size);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       if (remaining_size - size < EFI_MIN_RESERVE)
+               return EFI_OUT_OF_RESOURCES;
+
+       return EFI_SUCCESS;
+}
+
 /*
  * Some firmware implementations refuse to boot if there's insufficient space
  * in the variable store. Ensure that we never use more than a safe limit.
@@ -60,7 +90,8 @@ void efi_delete_dummy_variable(void)
  * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
  * store.
  */
-efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+efi_status_t efi_query_variable_store(u32 attributes, unsigned long size,
+                                     bool nonblocking)
 {
        efi_status_t status;
        u64 storage_size, remaining_size, max_size;
@@ -68,6 +99,9 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
        if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
                return 0;
 
+       if (nonblocking)
+               return query_variable_store_nonblocking(attributes, size);
+
        status = efi.query_variable_info(attributes, &storage_size,
                                         &remaining_size, &max_size);
        if (status != EFI_SUCCESS)
@@ -248,6 +282,16 @@ out:
        return ret;
 }
 
+static const struct dmi_system_id sgi_uv1_dmi[] = {
+       { NULL, "SGI UV1",
+               {       DMI_MATCH(DMI_PRODUCT_NAME,     "Stoutland Platform"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION,  "1.0"),
+                       DMI_MATCH(DMI_BIOS_VENDOR,      "SGI.COM"),
+               }
+       },
+       { } /* NULL entry stops DMI scanning */
+};
+
 void __init efi_apply_memmap_quirks(void)
 {
        /*
@@ -256,14 +300,12 @@ void __init efi_apply_memmap_quirks(void)
         * services.
         */
        if (!efi_runtime_supported()) {
-               pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+               pr_info("Setup done, disabling due to 32/64-bit mismatch\n");
                efi_unmap_memmap();
        }
 
-       /*
-        * UV doesn't support the new EFI pagetable mapping yet.
-        */
-       if (is_uv_system())
+       /* UV2+ BIOS has a fix for this issue.  UV1 still needs the quirk. */
+       if (dmi_check_system(sgi_uv1_dmi))
                set_bit(EFI_OLD_MEMMAP, &efi.flags);
 }
 
index 1bbc21e2e4aeab9fadaaa1f21856ab0ab9a4cea1..90bb997ed0a21889fd95482bfe298862b46eaacf 100644 (file)
@@ -138,7 +138,7 @@ static void intel_mid_arch_setup(void)
                intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
        else {
                intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
-               pr_info("ARCH: Unknown SoC, assuming PENWELL!\n");
+               pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
        }
 
 out:
@@ -214,12 +214,10 @@ static inline int __init setup_x86_intel_mid_timer(char *arg)
        else if (strcmp("lapic_and_apbt", arg) == 0)
                intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
        else {
-               pr_warn("X86 INTEL_MID timer option %s not recognised"
-                          " use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
-                          arg);
+               pr_warn("X86 INTEL_MID timer option %s not recognised use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
+                       arg);
                return -EINVAL;
        }
        return 0;
 }
 __setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
-
index c1bdafaac3ca828cec43af9e43d12933e4de7ea9..c61b6c332e971929fd4d639b9754e8707f513930 100644 (file)
@@ -220,11 +220,12 @@ static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
                if (imr_is_enabled(&imr)) {
                        base = imr_to_phys(imr.addr_lo);
                        end = imr_to_phys(imr.addr_hi) + IMR_MASK;
+                       size = end - base + 1;
                } else {
                        base = 0;
                        end = 0;
+                       size = 0;
                }
-               size = end - base;
                seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
                           "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
                           &base, &end, size, imr.rmask, imr.wmask,
@@ -579,6 +580,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
 {
        phys_addr_t base = virt_to_phys(&_text);
        size_t size = virt_to_phys(&__end_rodata) - base;
+       unsigned long start, end;
        int i;
        int ret;
 
@@ -586,18 +588,24 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
        for (i = 0; i < idev->max_imr; i++)
                imr_clear(i);
 
+       start = (unsigned long)_text;
+       end = (unsigned long)__end_rodata - 1;
+
        /*
         * Setup a locked IMR around the physical extent of the kernel
         * from the beginning of the .text secton to the end of the
         * .rodata section as one physically contiguous block.
+        *
+        * We don't round up @size since it is already PAGE_SIZE aligned.
+        * See vmlinux.lds.S for details.
         */
        ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
        if (ret < 0) {
-               pr_err("unable to setup IMR for kernel: (%p - %p)\n",
-                       &_text, &__end_rodata);
+               pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
+                       size / 1024, start, end);
        } else {
-               pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
-                       size / 1024, &_text, &__end_rodata);
+               pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
+                       size / 1024, start, end);
        }
 
 }
index 174781a404ff3cfbeb1f9dccaef820f7aa2e9a51..00c319048d52d78e58755f7da2639aa044e0b885 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <asm/asm.h>
 #include <asm/segment.h>
-#include <asm/cpufeature.h>
+#include <asm/cpufeatures.h>
 #include <asm/cmpxchg.h>
 #include <asm/nops.h>
 
index 8502ad30e61bcfc49a9e28c975d356b4b628a62c..5adb6a2fd117df01c232156e2091bfc3d190eac7 100644 (file)
@@ -109,7 +109,7 @@ unsigned long os_get_top_address(void)
                exit(1);
        }
 
-       printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT);
+       printf("0x%lx\n", bottom << UM_KERN_PAGE_SHIFT);
        printf("Locating the top of the address space ... ");
        fflush(stdout);
 
@@ -134,7 +134,7 @@ out:
                exit(1);
        }
        top <<= UM_KERN_PAGE_SHIFT;
-       printf("0x%x\n", top);
+       printf("0x%lx\n", top);
 
        return top;
 }
index 439c0994b69689ade4fa49c6a9ead46e25597880..bfce503dffae23bb0b39c621f5d8c2e0d936fec5 100644 (file)
 
 #define old_mmap sys_old_mmap
 
-#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_32.h>
 
 #undef __SYSCALL_I386
-#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
+#define __SYSCALL_I386(nr, sym, qual) [ nr ] = sym,
 
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
index b74ea6c2c0e7b2fdb43d2f60231edc8c994333c6..f306413d3eb6e332d5ad10db63700101cafd40f0 100644 (file)
 #define stub_execveat sys_execveat
 #define stub_rt_sigreturn sys_rt_sigreturn
 
-#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
-#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
-
-#define __SYSCALL_64(nr, sym, compat) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
+#define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long) ;
 #include <asm/syscalls_64.h>
 
 #undef __SYSCALL_64
-#define __SYSCALL_64(nr, sym, compat) [ nr ] = sym,
+#define __SYSCALL_64(nr, sym, qual) [ nr ] = sym,
 
 extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
 
index ce7e3607a870feac770b277970b74099c3049208..470564bbd08ea16359e32ab8a275a80f40bcfbda 100644 (file)
@@ -9,14 +9,12 @@
 #include <asm/types.h>
 
 #ifdef __i386__
-#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
+#define __SYSCALL_I386(nr, sym, qual) [nr] = 1,
 static char syscalls[] = {
 #include <asm/syscalls_32.h>
 };
 #else
-#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
-#define __SYSCALL_COMMON(nr, sym, compat) [nr] = 1,
-#define __SYSCALL_X32(nr, sym, compat) /* Not supported */
+#define __SYSCALL_64(nr, sym, qual) [nr] = 1,
 static char syscalls[] = {
 #include <asm/syscalls_64.h>
 };
index d09e4c9d7cc5b4044c4421bde8692aaa6f8b21be..2c261082eadf82f693d062e7f600660fcd56817f 100644 (file)
@@ -1654,7 +1654,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
        cpu_detect(&new_cpu_data);
        set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
        new_cpu_data.wp_works_ok = 1;
-       new_cpu_data.x86_capability[0] = cpuid_edx(1);
+       new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
 #endif
 
        if (xen_start_info->mod_start) {
index e9df1567d778e2f87ecdb1a64df1c2dfeb874071..a3b4e907c5bf28dcba1803c309cbbcceb6c1b2ca 100644 (file)
@@ -138,6 +138,22 @@ config XTENSA_VARIANT_HAVE_PERF_EVENTS
 
          If unsure, say N.
 
+config XTENSA_FAKE_NMI
+       bool "Treat PMM IRQ as NMI"
+       depends on XTENSA_VARIANT_HAVE_PERF_EVENTS
+       default n
+       help
+         If PMM IRQ is the only IRQ at EXCM level it is safe to
+         treat it as NMI, which improves accuracy of profiling.
+
+         If there are other interrupts at or above PMM IRQ priority level
+         but not above the EXCM level, PMM IRQ still may be treated as NMI,
+         but only if these IRQs are not used. There will be a build warning
+         saying that this is not safe, and a bugcheck if one of these IRQs
+         actually fire.
+
+         If unsure, say N.
+
 config XTENSA_UNALIGNED_USER
        bool "Unaligned memory access in use space"
        help
index 74fed0b4e2c2b58a84bd0bbc699d7bca74f422de..c38e5a732d86f69b44826349314a6fb286305985 100644 (file)
 
 #ifdef CONFIG_MMU
 
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size);
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size);
+void xtensa_iounmap(volatile void __iomem *addr);
+
 /*
  * Return the virtual address for the specified bus memory.
- * Note that we currently don't support any address outside the KIO segment.
  */
 static inline void __iomem *ioremap_nocache(unsigned long offset,
                unsigned long size)
@@ -36,7 +39,7 @@ static inline void __iomem *ioremap_nocache(unsigned long offset,
            && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
                return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_BYPASS_VADDR);
        else
-               BUG();
+               return xtensa_ioremap_nocache(offset, size);
 }
 
 static inline void __iomem *ioremap_cache(unsigned long offset,
@@ -46,7 +49,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
            && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
                return (void*)(offset-XCHAL_KIO_PADDR+XCHAL_KIO_CACHED_VADDR);
        else
-               BUG();
+               return xtensa_ioremap_cache(offset, size);
 }
 #define ioremap_cache ioremap_cache
 
@@ -60,6 +63,13 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 
 static inline void iounmap(volatile void __iomem *addr)
 {
+       unsigned long va = (unsigned long) addr;
+
+       if (!(va >= XCHAL_KIO_CACHED_VADDR &&
+             va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) &&
+           !(va >= XCHAL_KIO_BYPASS_VADDR &&
+             va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE))
+               xtensa_iounmap(addr);
 }
 
 #define virt_to_bus     virt_to_phys
index 83e2e4bc01ba24f54965df8b4ee3de7855e028e0..744ecf0dc3a4f51c441732493f29ab977ae572ea 100644 (file)
 #define XTENSA_INTLEVEL_MASK(level) _XTENSA_INTLEVEL_MASK(level)
 #define _XTENSA_INTLEVEL_MASK(level) (XCHAL_INTLEVEL##level##_MASK)
 
-#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+#define XTENSA_INTLEVEL_ANDBELOW_MASK(l) _XTENSA_INTLEVEL_ANDBELOW_MASK(l)
+#define _XTENSA_INTLEVEL_ANDBELOW_MASK(l) (XCHAL_INTLEVEL##l##_ANDBELOW_MASK)
 
 #define PROFILING_INTLEVEL XTENSA_INT_LEVEL(XCHAL_PROFILING_INTERRUPT)
 
 /* LOCKLEVEL defines the interrupt level that masks all
  * general-purpose interrupts.
  */
-#if defined(CONFIG_XTENSA_VARIANT_HAVE_PERF_EVENTS) && \
-       defined(XCHAL_PROFILING_INTERRUPT) && \
-       PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
-       XCHAL_EXCM_LEVEL > 1 && \
-       IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL))
-#define LOCKLEVEL (XCHAL_EXCM_LEVEL - 1)
+#if defined(CONFIG_XTENSA_FAKE_NMI) && defined(XCHAL_PROFILING_INTERRUPT)
+#define LOCKLEVEL (PROFILING_INTLEVEL - 1)
 #else
 #define LOCKLEVEL XCHAL_EXCM_LEVEL
 #endif
+
 #define TOPLEVEL XCHAL_EXCM_LEVEL
 #define XTENSA_FAKE_NMI (LOCKLEVEL < TOPLEVEL)
 
index ca929e6a38b5f70a5e1e2d1492b1cdde5e5f0e2a..f9b389d4e97393b1c93be083b8db149b632602fa 100644 (file)
 #include <asm/processor.h>
 #include <linux/stringify.h>
 
-#define _INTLEVEL(x)   XCHAL_INT ## x ## _LEVEL
-#define INTLEVEL(x)    _INTLEVEL(x)
-
 #if XCHAL_NUM_TIMERS > 0 && \
-       INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
+       XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     0
 # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT
 #elif XCHAL_NUM_TIMERS > 1 && \
-       INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
+       XTENSA_INT_LEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     1
 # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT
 #elif XCHAL_NUM_TIMERS > 2 && \
-       INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
+       XTENSA_INT_LEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL
 # define LINUX_TIMER     2
 # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT
 #else
index 42d441f7898b94db35e3a2cd55af500f23f6435a..be0cae8082c71be5d42c6b5ecf02991cd67c7ec6 100644 (file)
@@ -205,6 +205,32 @@ extern void do_IRQ(int, struct pt_regs *);
 
 #if XTENSA_FAKE_NMI
 
+#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
+
+#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
+      IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
+#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
+#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
+
+static inline void check_valid_nmi(void)
+{
+       unsigned intread = get_sr(interrupt);
+       unsigned intenable = get_sr(intenable);
+
+       BUG_ON(intread & intenable &
+              ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
+                XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
+                BIT(XCHAL_PROFILING_INTERRUPT)));
+}
+
+#else
+
+static inline void check_valid_nmi(void)
+{
+}
+
+#endif
+
 irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
 
 DEFINE_PER_CPU(unsigned long, nmi_count);
@@ -219,6 +245,7 @@ void do_nmi(struct pt_regs *regs)
        old_regs = set_irq_regs(regs);
        nmi_enter();
        ++*this_cpu_ptr(&nmi_count);
+       check_valid_nmi();
        xtensa_pmu_irq_handler(0, NULL);
        nmi_exit();
        set_irq_regs(old_regs);
index e601e2fbe8e6ebadc109a1c2754f6a47ce64f1c2..0b3d296a016a38cd1373588e52a1623b14833403 100644 (file)
@@ -3,5 +3,5 @@
 #
 
 obj-y                  := init.o misc.o
-obj-$(CONFIG_MMU)      += cache.o fault.o mmu.o tlb.o
+obj-$(CONFIG_MMU)      += cache.o fault.o ioremap.o mmu.o tlb.o
 obj-$(CONFIG_HIGHMEM)  += highmem.o
diff --git a/arch/xtensa/mm/ioremap.c b/arch/xtensa/mm/ioremap.c
new file mode 100644 (file)
index 0000000..d89c3c5
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * ioremap implementation.
+ *
+ * Copyright (C) 2015 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+
+static void __iomem *xtensa_ioremap(unsigned long paddr, unsigned long size,
+                                   pgprot_t prot)
+{
+       unsigned long offset = paddr & ~PAGE_MASK;
+       unsigned long pfn = __phys_to_pfn(paddr);
+       struct vm_struct *area;
+       unsigned long vaddr;
+       int err;
+
+       paddr &= PAGE_MASK;
+
+       WARN_ON(pfn_valid(pfn));
+
+       size = PAGE_ALIGN(offset + size);
+
+       area = get_vm_area(size, VM_IOREMAP);
+       if (!area)
+               return NULL;
+
+       vaddr = (unsigned long)area->addr;
+       area->phys_addr = paddr;
+
+       err = ioremap_page_range(vaddr, vaddr + size, paddr, prot);
+
+       if (err) {
+               vunmap((void *)vaddr);
+               return NULL;
+       }
+
+       flush_cache_vmap(vaddr, vaddr + size);
+       return (void __iomem *)(offset + vaddr);
+}
+
+void __iomem *xtensa_ioremap_nocache(unsigned long addr, unsigned long size)
+{
+       return xtensa_ioremap(addr, size, pgprot_noncached(PAGE_KERNEL));
+}
+EXPORT_SYMBOL(xtensa_ioremap_nocache);
+
+void __iomem *xtensa_ioremap_cache(unsigned long addr, unsigned long size)
+{
+       return xtensa_ioremap(addr, size, PAGE_KERNEL);
+}
+EXPORT_SYMBOL(xtensa_ioremap_cache);
+
+void xtensa_iounmap(volatile void __iomem *io_addr)
+{
+       void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
+
+       vunmap(addr);
+}
+EXPORT_SYMBOL(xtensa_iounmap);
index 70cb408bc20dc8fc593fbbcebf68d3a06cb967b0..c54505dcf4db9b5e973fb9bbcd5e48b26c40c04d 100644 (file)
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 
-#ifdef SERIAL_INLINE
-#define _INLINE_ inline
-#endif
-
 #define SERIAL_MAX_NUM_LINES 1
 #define SERIAL_TIMER_VALUE (HZ / 10)
 
index 87678961a8c87348e56556dd9b04ab3fa623de36..5f4bd71971d6a317562289f2e9156f60fd07ed02 100644 (file)
@@ -113,7 +113,7 @@ void platform_heartbeat(void)
 }
 
 //#define RS_TABLE_SIZE 2
-//#define STD_COM_FLAGS (ASYNC_BOOT_AUTOCONF|ASYNC_SKIP_TEST)
+//#define STD_COM_FLAGS (UPF_BOOT_AUTOCONF|UPF_SKIP_TEST)
 
 #define _SERIAL_PORT(_base,_irq)                                       \
 {                                                                      \
index ab51685988c253616f751615ba06206732149de4..c60e233eb08bdc110eb37018f3593c8ddb4001fc 100644 (file)
@@ -2198,7 +2198,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
        if (q->mq_ops) {
                if (blk_queue_io_stat(q))
                        blk_account_io_start(rq, true);
-               blk_mq_insert_request(rq, false, true, true);
+               blk_mq_insert_request(rq, false, true, false);
                return 0;
        }
 
index 1699df5b0493a721a5ee1945a07c8ec516856164..888a7fec81f715199c3050576516e55b692f2b9b 100644 (file)
@@ -70,6 +70,18 @@ static struct bio *blk_bio_write_same_split(struct request_queue *q,
        return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
 }
 
+static inline unsigned get_max_io_size(struct request_queue *q,
+                                      struct bio *bio)
+{
+       unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
+       unsigned mask = queue_logical_block_size(q) - 1;
+
+       /* aligned to logical block size */
+       sectors &= ~(mask >> 9);
+
+       return sectors;
+}
+
 static struct bio *blk_bio_segment_split(struct request_queue *q,
                                         struct bio *bio,
                                         struct bio_set *bs,
@@ -81,6 +93,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
        unsigned front_seg_size = bio->bi_seg_front_size;
        bool do_split = true;
        struct bio *new = NULL;
+       const unsigned max_sectors = get_max_io_size(q, bio);
 
        bio_for_each_segment(bv, bio, iter) {
                /*
@@ -90,20 +103,19 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
                if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
                        goto split;
 
-               if (sectors + (bv.bv_len >> 9) >
-                               blk_max_size_offset(q, bio->bi_iter.bi_sector)) {
+               if (sectors + (bv.bv_len >> 9) > max_sectors) {
                        /*
                         * Consider this a new segment if we're splitting in
                         * the middle of this vector.
                         */
                        if (nsegs < queue_max_segments(q) &&
-                           sectors < blk_max_size_offset(q,
-                                               bio->bi_iter.bi_sector)) {
+                           sectors < max_sectors) {
                                nsegs++;
-                               sectors = blk_max_size_offset(q,
-                                               bio->bi_iter.bi_sector);
+                               sectors = max_sectors;
                        }
-                       goto split;
+                       if (sectors)
+                               goto split;
+                       /* Make this single bvec as the 1st segment */
                }
 
                if (bvprvp && blk_queue_cluster(q)) {
index 1cf18784c5cf3c44be94dbd003ca9d7088f883e0..431fdda21737cb91b9a5a0f49fd1c2ccd2cfae7c 100644 (file)
@@ -408,17 +408,18 @@ void blk_mq_unregister_disk(struct gendisk *disk)
        blk_mq_enable_hotplug();
 }
 
+void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
+{
+       kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
+}
+
 static void blk_mq_sysfs_init(struct request_queue *q)
 {
-       struct blk_mq_hw_ctx *hctx;
        struct blk_mq_ctx *ctx;
        int i;
 
        kobject_init(&q->mq_kobj, &blk_mq_ktype);
 
-       queue_for_each_hw_ctx(q, hctx, i)
-               kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
-
        queue_for_each_ctx(q, ctx, i)
                kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
 }
index 4c0622fae41383d0f5577ea9b8b127d93df33bb6..645eb9e716d0106a5a3582fb1e4516e1d63169ce 100644 (file)
@@ -1742,31 +1742,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
        return -1;
 }
 
-static int blk_mq_init_hw_queues(struct request_queue *q,
-               struct blk_mq_tag_set *set)
-{
-       struct blk_mq_hw_ctx *hctx;
-       unsigned int i;
-
-       /*
-        * Initialize hardware queues
-        */
-       queue_for_each_hw_ctx(q, hctx, i) {
-               if (blk_mq_init_hctx(q, set, hctx, i))
-                       break;
-       }
-
-       if (i == q->nr_hw_queues)
-               return 0;
-
-       /*
-        * Init failed
-        */
-       blk_mq_exit_hw_queues(q, set, i);
-
-       return 1;
-}
-
 static void blk_mq_init_cpu_queues(struct request_queue *q,
                                   unsigned int nr_hw_queues)
 {
@@ -1824,6 +1799,7 @@ static void blk_mq_map_swqueue(struct request_queue *q,
                        continue;
 
                hctx = q->mq_ops->map_queue(q, i);
+
                cpumask_set_cpu(i, hctx->cpumask);
                ctx->index_hw = hctx->nr_ctx;
                hctx->ctxs[hctx->nr_ctx++] = ctx;
@@ -1972,54 +1948,89 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 }
 EXPORT_SYMBOL(blk_mq_init_queue);
 
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
-                                                 struct request_queue *q)
+static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
+                                               struct request_queue *q)
 {
-       struct blk_mq_hw_ctx **hctxs;
-       struct blk_mq_ctx __percpu *ctx;
-       unsigned int *map;
-       int i;
-
-       ctx = alloc_percpu(struct blk_mq_ctx);
-       if (!ctx)
-               return ERR_PTR(-ENOMEM);
-
-       hctxs = kmalloc_node(set->nr_hw_queues * sizeof(*hctxs), GFP_KERNEL,
-                       set->numa_node);
-
-       if (!hctxs)
-               goto err_percpu;
-
-       map = blk_mq_make_queue_map(set);
-       if (!map)
-               goto err_map;
+       int i, j;
+       struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
 
+       blk_mq_sysfs_unregister(q);
        for (i = 0; i < set->nr_hw_queues; i++) {
-               int node = blk_mq_hw_queue_to_node(map, i);
+               int node;
 
+               if (hctxs[i])
+                       continue;
+
+               node = blk_mq_hw_queue_to_node(q->mq_map, i);
                hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
                                        GFP_KERNEL, node);
                if (!hctxs[i])
-                       goto err_hctxs;
+                       break;
 
                if (!zalloc_cpumask_var_node(&hctxs[i]->cpumask, GFP_KERNEL,
-                                               node))
-                       goto err_hctxs;
+                                               node)) {
+                       kfree(hctxs[i]);
+                       hctxs[i] = NULL;
+                       break;
+               }
 
                atomic_set(&hctxs[i]->nr_active, 0);
                hctxs[i]->numa_node = node;
                hctxs[i]->queue_num = i;
+
+               if (blk_mq_init_hctx(q, set, hctxs[i], i)) {
+                       free_cpumask_var(hctxs[i]->cpumask);
+                       kfree(hctxs[i]);
+                       hctxs[i] = NULL;
+                       break;
+               }
+               blk_mq_hctx_kobj_init(hctxs[i]);
        }
+       for (j = i; j < q->nr_hw_queues; j++) {
+               struct blk_mq_hw_ctx *hctx = hctxs[j];
+
+               if (hctx) {
+                       if (hctx->tags) {
+                               blk_mq_free_rq_map(set, hctx->tags, j);
+                               set->tags[j] = NULL;
+                       }
+                       blk_mq_exit_hctx(q, set, hctx, j);
+                       free_cpumask_var(hctx->cpumask);
+                       kobject_put(&hctx->kobj);
+                       kfree(hctx->ctxs);
+                       kfree(hctx);
+                       hctxs[j] = NULL;
+
+               }
+       }
+       q->nr_hw_queues = i;
+       blk_mq_sysfs_register(q);
+}
+
+struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+                                                 struct request_queue *q)
+{
+       q->queue_ctx = alloc_percpu(struct blk_mq_ctx);
+       if (!q->queue_ctx)
+               return ERR_PTR(-ENOMEM);
+
+       q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
+                                               GFP_KERNEL, set->numa_node);
+       if (!q->queue_hw_ctx)
+               goto err_percpu;
+
+       q->mq_map = blk_mq_make_queue_map(set);
+       if (!q->mq_map)
+               goto err_map;
+
+       blk_mq_realloc_hw_ctxs(set, q);
+       if (!q->nr_hw_queues)
+               goto err_hctxs;
 
        INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
        blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
 
        q->nr_queues = nr_cpu_ids;
-       q->nr_hw_queues = set->nr_hw_queues;
-       q->mq_map = map;
-
-       q->queue_ctx = ctx;
-       q->queue_hw_ctx = hctxs;
 
        q->mq_ops = set->ops;
        q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
@@ -2048,9 +2059,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
 
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
 
-       if (blk_mq_init_hw_queues(q, set))
-               goto err_hctxs;
-
        get_online_cpus();
        mutex_lock(&all_q_mutex);
 
@@ -2064,17 +2072,11 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        return q;
 
 err_hctxs:
-       kfree(map);
-       for (i = 0; i < set->nr_hw_queues; i++) {
-               if (!hctxs[i])
-                       break;
-               free_cpumask_var(hctxs[i]->cpumask);
-               kfree(hctxs[i]);
-       }
+       kfree(q->mq_map);
 err_map:
-       kfree(hctxs);
+       kfree(q->queue_hw_ctx);
 err_percpu:
-       free_percpu(ctx);
+       free_percpu(q->queue_ctx);
        return ERR_PTR(-ENOMEM);
 }
 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
@@ -2282,9 +2284,13 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
                set->nr_hw_queues = 1;
                set->queue_depth = min(64U, set->queue_depth);
        }
+       /*
+        * There is no use for more h/w queues than cpus.
+        */
+       if (set->nr_hw_queues > nr_cpu_ids)
+               set->nr_hw_queues = nr_cpu_ids;
 
-       set->tags = kmalloc_node(set->nr_hw_queues *
-                                sizeof(struct blk_mq_tags *),
+       set->tags = kzalloc_node(nr_cpu_ids * sizeof(struct blk_mq_tags *),
                                 GFP_KERNEL, set->numa_node);
        if (!set->tags)
                return -ENOMEM;
@@ -2307,7 +2313,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
 {
        int i;
 
-       for (i = 0; i < set->nr_hw_queues; i++) {
+       for (i = 0; i < nr_cpu_ids; i++) {
                if (set->tags[i])
                        blk_mq_free_rq_map(set, set->tags[i], i);
        }
@@ -2339,6 +2345,35 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
        return ret;
 }
 
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
+{
+       struct request_queue *q;
+
+       if (nr_hw_queues > nr_cpu_ids)
+               nr_hw_queues = nr_cpu_ids;
+       if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)
+               return;
+
+       list_for_each_entry(q, &set->tag_list, tag_set_list)
+               blk_mq_freeze_queue(q);
+
+       set->nr_hw_queues = nr_hw_queues;
+       list_for_each_entry(q, &set->tag_list, tag_set_list) {
+               blk_mq_realloc_hw_ctxs(set, q);
+
+               if (q->nr_hw_queues > 1)
+                       blk_queue_make_request(q, blk_mq_make_request);
+               else
+                       blk_queue_make_request(q, blk_sq_make_request);
+
+               blk_mq_queue_reinit(q, cpu_online_mask);
+       }
+
+       list_for_each_entry(q, &set->tag_list, tag_set_list)
+               blk_mq_unfreeze_queue(q);
+}
+EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
+
 void blk_mq_disable_hotplug(void)
 {
        mutex_lock(&all_q_mutex);
index eaede8e45c9c3e5f2555ea86ec39492e1c042e59..9087b11037b70ae514fd46ea10f659152f291aa2 100644 (file)
@@ -57,6 +57,7 @@ extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
  */
 extern int blk_mq_sysfs_register(struct request_queue *q);
 extern void blk_mq_sysfs_unregister(struct request_queue *q);
+extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
 
 extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
 
index 1f9093e901daed7849a54633be5d80ce96b010f4..e3c591dd8f19d0b46fe42dc842510f9d220af143 100644 (file)
@@ -632,6 +632,13 @@ static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg)
        return pblkg ? blkg_to_cfqg(pblkg) : NULL;
 }
 
+static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
+                                     struct cfq_group *ancestor)
+{
+       return cgroup_is_descendant(cfqg_to_blkg(cfqg)->blkcg->css.cgroup,
+                                   cfqg_to_blkg(ancestor)->blkcg->css.cgroup);
+}
+
 static inline void cfqg_get(struct cfq_group *cfqg)
 {
        return blkg_get(cfqg_to_blkg(cfqg));
@@ -758,6 +765,11 @@ static void cfqg_stats_xfer_dead(struct cfq_group *cfqg)
 #else  /* CONFIG_CFQ_GROUP_IOSCHED */
 
 static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; }
+static inline bool cfqg_is_descendant(struct cfq_group *cfqg,
+                                     struct cfq_group *ancestor)
+{
+       return true;
+}
 static inline void cfqg_get(struct cfq_group *cfqg) { }
 static inline void cfqg_put(struct cfq_group *cfqg) { }
 
@@ -2897,6 +2909,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 static void cfq_arm_slice_timer(struct cfq_data *cfqd)
 {
        struct cfq_queue *cfqq = cfqd->active_queue;
+       struct cfq_rb_root *st = cfqq->service_tree;
        struct cfq_io_cq *cic;
        unsigned long sl, group_idle = 0;
 
@@ -2947,8 +2960,13 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
                return;
        }
 
-       /* There are other queues in the group, don't do group idle */
-       if (group_idle && cfqq->cfqg->nr_cfqq > 1)
+       /*
+        * There are other queues in the group or this is the only group and
+        * it has too big thinktime, don't do group idle.
+        */
+       if (group_idle &&
+           (cfqq->cfqg->nr_cfqq > 1 ||
+            cfq_io_thinktime_big(cfqd, &st->ttime, true)))
                return;
 
        cfq_mark_cfqq_wait_request(cfqq);
@@ -3947,16 +3965,27 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
        if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
                return true;
 
-       if (new_cfqq->cfqg != cfqq->cfqg)
+       /*
+        * Treat ancestors of current cgroup the same way as current cgroup.
+        * For anybody else we disallow preemption to guarantee service
+        * fairness among cgroups.
+        */
+       if (!cfqg_is_descendant(cfqq->cfqg, new_cfqq->cfqg))
                return false;
 
        if (cfq_slice_used(cfqq))
                return true;
 
+       /*
+        * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
+        */
+       if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
+               return true;
+
+       WARN_ON_ONCE(cfqq->ioprio_class != new_cfqq->ioprio_class);
        /* Allow preemption only if we are idling on sync-noidle tree */
        if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
            cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
-           new_cfqq->service_tree->count == 2 &&
            RB_EMPTY_ROOT(&cfqq->sort_list))
                return true;
 
@@ -3967,12 +3996,6 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
        if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
                return true;
 
-       /*
-        * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
-        */
-       if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
-               return true;
-
        /* An idle queue should not be idle now for some reason */
        if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
                return true;
index 77f5d17779d6875d0595f23410cbd61116491450..d8996bbd7f12805b9c74429813f5b3ee3d744bdc 100644 (file)
@@ -434,42 +434,6 @@ bool blkdev_dax_capable(struct block_device *bdev)
 
        return true;
 }
-
-static int blkdev_daxset(struct block_device *bdev, unsigned long argp)
-{
-       unsigned long arg;
-       int rc = 0;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EACCES;
-
-       if (get_user(arg, (int __user *)(argp)))
-               return -EFAULT;
-       arg = !!arg;
-       if (arg == !!(bdev->bd_inode->i_flags & S_DAX))
-               return 0;
-
-       if (arg)
-               arg = S_DAX;
-
-       if (arg && !blkdev_dax_capable(bdev))
-               return -ENOTTY;
-
-       inode_lock(bdev->bd_inode);
-       if (bdev->bd_map_count == 0)
-               inode_set_flags(bdev->bd_inode, arg, S_DAX);
-       else
-               rc = -EBUSY;
-       inode_unlock(bdev->bd_inode);
-       return rc;
-}
-#else
-static int blkdev_daxset(struct block_device *bdev, int arg)
-{
-       if (arg)
-               return -ENOTTY;
-       return 0;
-}
 #endif
 
 static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
@@ -634,8 +598,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
        case BLKTRACESETUP:
        case BLKTRACETEARDOWN:
                return blk_trace_ioctl(bdev, cmd, argp);
-       case BLKDAXSET:
-               return blkdev_daxset(bdev, arg);
        case BLKDAXGET:
                return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX));
                break;
index 746935a5973ca6c76b8f66cfff8d58ce50566994..fefd01b496a0852754d9a586c9b0dd6d940ff746 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/kmod.h>
 #include <linux/ctype.h>
 #include <linux/genhd.h>
+#include <linux/dax.h>
 #include <linux/blktrace_api.h>
 
 #include "partitions/check.h"
@@ -550,13 +551,24 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
        return 0;
 }
 
-unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
 {
        struct address_space *mapping = bdev->bd_inode->i_mapping;
+
+       return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
+                       NULL);
+}
+
+unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
+{
        struct page *page;
 
-       page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
-                                NULL);
+       /* don't populate page cache for dax capable devices */
+       if (IS_DAX(bdev->bd_inode))
+               page = read_dax_sector(bdev, n);
+       else
+               page = read_pagecache_sector(bdev, n);
+
        if (!IS_ERR(page)) {
                if (PageError(page))
                        goto fail;
index 7240821137fde371f0e32d9ae8ac25b64ab58862..93a1fdc1feee68c9a8b15bef682886015884ce98 100644 (file)
@@ -84,15 +84,6 @@ config CRYPTO_RNG_DEFAULT
        tristate
        select CRYPTO_DRBG_MENU
 
-config CRYPTO_PCOMP
-       tristate
-       select CRYPTO_PCOMP2
-       select CRYPTO_ALGAPI
-
-config CRYPTO_PCOMP2
-       tristate
-       select CRYPTO_ALGAPI2
-
 config CRYPTO_AKCIPHER2
        tristate
        select CRYPTO_ALGAPI2
@@ -122,7 +113,6 @@ config CRYPTO_MANAGER2
        select CRYPTO_AEAD2
        select CRYPTO_HASH2
        select CRYPTO_BLKCIPHER2
-       select CRYPTO_PCOMP2
        select CRYPTO_AKCIPHER2
 
 config CRYPTO_USER
@@ -227,6 +217,9 @@ config CRYPTO_GLUE_HELPER_X86
        depends on X86
        select CRYPTO_ALGAPI
 
+config CRYPTO_ENGINE
+       tristate
+
 comment "Authenticated Encryption with Associated Data"
 
 config CRYPTO_CCM
@@ -472,11 +465,13 @@ config CRYPTO_CRCT10DIF_PCLMUL
 config CRYPTO_GHASH
        tristate "GHASH digest algorithm"
        select CRYPTO_GF128MUL
+       select CRYPTO_HASH
        help
          GHASH is message digest algorithm for GCM (Galois/Counter Mode).
 
 config CRYPTO_POLY1305
        tristate "Poly1305 authenticator algorithm"
+       select CRYPTO_HASH
        help
          Poly1305 authenticator algorithm, RFC7539.
 
@@ -1504,15 +1499,6 @@ config CRYPTO_DEFLATE
 
          You will most probably want this if using IPSec.
 
-config CRYPTO_ZLIB
-       tristate "Zlib compression algorithm"
-       select CRYPTO_PCOMP
-       select ZLIB_INFLATE
-       select ZLIB_DEFLATE
-       select NLATTR
-       help
-         This is the zlib algorithm.
-
 config CRYPTO_LZO
        tristate "LZO compression algorithm"
        select CRYPTO_ALGAPI
@@ -1593,6 +1579,7 @@ endif     # if CRYPTO_DRBG_MENU
 
 config CRYPTO_JITTERENTROPY
        tristate "Jitterentropy Non-Deterministic Random Number Generator"
+       select CRYPTO_RNG
        help
          The Jitterentropy RNG is a noise that is intended
          to provide seed to another RNG. The RNG does not
index 2acdbbd304758986a0b95adbbb628e609c2a88d0..4f4ef7eaae3f27df7768ea8fbe11ad02596ed702 100644 (file)
@@ -7,6 +7,7 @@ crypto-y := api.o cipher.o compress.o memneq.o
 
 obj-$(CONFIG_CRYPTO_WORKQUEUE) += crypto_wq.o
 
+obj-$(CONFIG_CRYPTO_ENGINE) += crypto_engine.o
 obj-$(CONFIG_CRYPTO_FIPS) += fips.o
 
 crypto_algapi-$(CONFIG_PROC_FS) += proc.o
@@ -28,7 +29,6 @@ crypto_hash-y += ahash.o
 crypto_hash-y += shash.o
 obj-$(CONFIG_CRYPTO_HASH2) += crypto_hash.o
 
-obj-$(CONFIG_CRYPTO_PCOMP2) += pcompress.o
 obj-$(CONFIG_CRYPTO_AKCIPHER2) += akcipher.o
 
 $(obj)/rsapubkey-asn1.o: $(obj)/rsapubkey-asn1.c $(obj)/rsapubkey-asn1.h
@@ -99,10 +99,9 @@ obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
 obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
 obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
 obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o
-obj-$(CONFIG_CRYPTO_ZLIB) += zlib.o
 obj-$(CONFIG_CRYPTO_MICHAEL_MIC) += michael_mic.o
 obj-$(CONFIG_CRYPTO_CRC32C) += crc32c_generic.o
-obj-$(CONFIG_CRYPTO_CRC32) += crc32.o
+obj-$(CONFIG_CRYPTO_CRC32) += crc32_generic.o
 obj-$(CONFIG_CRYPTO_CRCT10DIF) += crct10dif_common.o crct10dif_generic.o
 obj-$(CONFIG_CRYPTO_AUTHENC) += authenc.o authencesn.o
 obj-$(CONFIG_CRYPTO_LZO) += lzo.o
index d19b52324cf520ee777743ee895efb2f537f5e62..5fc1f172963dc6914f0f6def8435943acd67dfe7 100644 (file)
@@ -166,24 +166,6 @@ int crypto_ahash_walk_first(struct ahash_request *req,
 }
 EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
 
-int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
-                                 struct crypto_hash_walk *walk,
-                                 struct scatterlist *sg, unsigned int len)
-{
-       walk->total = len;
-
-       if (!walk->total) {
-               walk->entrylen = 0;
-               return 0;
-       }
-
-       walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
-       walk->sg = sg;
-       walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
-
-       return hash_walk_new_entry(walk);
-}
-
 static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
                                unsigned int keylen)
 {
@@ -542,6 +524,12 @@ struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type,
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_ahash);
 
+int crypto_has_ahash(const char *alg_name, u32 type, u32 mask)
+{
+       return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask);
+}
+EXPORT_SYMBOL_GPL(crypto_has_ahash);
+
 static int ahash_prepare_alg(struct ahash_alg *alg)
 {
        struct crypto_alg *base = &alg->halg.base;
index 7be76aa315796dfad085ad74be5da35204bda9e6..731255a6104f7e3482c584e4f7b737b4d0b94eef 100644 (file)
@@ -987,6 +987,21 @@ unsigned int crypto_alg_extsize(struct crypto_alg *alg)
 }
 EXPORT_SYMBOL_GPL(crypto_alg_extsize);
 
+int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
+                       u32 type, u32 mask)
+{
+       int ret = 0;
+       struct crypto_alg *alg = crypto_find_alg(name, frontend, type, mask);
+
+       if (!IS_ERR(alg)) {
+               crypto_mod_put(alg);
+               ret = 1;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_type_has_alg);
+
 static int __init crypto_algapi_init(void)
 {
        crypto_init_proc();
index 608a7562839d2fe549fbebd8714733436ed2c481..68a5ceaa04c81072f453a7b2ca2605bed39bd5a1 100644 (file)
@@ -54,7 +54,8 @@ static int hash_sendmsg(struct socket *sock, struct msghdr *msg,
 
        lock_sock(sk);
        if (!ctx->more) {
-               err = crypto_ahash_init(&ctx->req);
+               err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req),
+                                               &ctx->completion);
                if (err)
                        goto unlock;
        }
@@ -125,6 +126,7 @@ static ssize_t hash_sendpage(struct socket *sock, struct page *page,
        } else {
                if (!ctx->more) {
                        err = crypto_ahash_init(&ctx->req);
+                       err = af_alg_wait_for_completion(err, &ctx->completion);
                        if (err)
                                goto unlock;
                }
index 38c1aa89d3a0a7c4448bcd094ca1f268b9156a2d..28556fce42671e2f182d5239d3dc6468e5b1d970 100644 (file)
@@ -65,18 +65,10 @@ struct skcipher_async_req {
        struct skcipher_async_rsgl first_sgl;
        struct list_head list;
        struct scatterlist *tsg;
-       char iv[];
+       atomic_t *inflight;
+       struct skcipher_request req;
 };
 
-#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
-       crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
-
-#define GET_REQ_SIZE(ctx) \
-       crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
-
-#define GET_IV_SIZE(ctx) \
-       crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
-
 #define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
                      sizeof(struct scatterlist) - 1)
 
@@ -102,15 +94,12 @@ static void skcipher_free_async_sgls(struct skcipher_async_req *sreq)
 
 static void skcipher_async_cb(struct crypto_async_request *req, int err)
 {
-       struct sock *sk = req->data;
-       struct alg_sock *ask = alg_sk(sk);
-       struct skcipher_ctx *ctx = ask->private;
-       struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+       struct skcipher_async_req *sreq = req->data;
        struct kiocb *iocb = sreq->iocb;
 
-       atomic_dec(&ctx->inflight);
+       atomic_dec(sreq->inflight);
        skcipher_free_async_sgls(sreq);
-       kfree(req);
+       kzfree(sreq);
        iocb->ki_complete(iocb, err, err);
 }
 
@@ -306,8 +295,11 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
        struct skcipher_ctx *ctx = ask->private;
-       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+       struct skcipher_tfm *skc = pask->private;
+       struct crypto_skcipher *tfm = skc->skcipher;
        unsigned ivsize = crypto_skcipher_ivsize(tfm);
        struct skcipher_sg_list *sgl;
        struct af_alg_control con = {};
@@ -509,37 +501,43 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
        struct skcipher_ctx *ctx = ask->private;
+       struct skcipher_tfm *skc = pask->private;
+       struct crypto_skcipher *tfm = skc->skcipher;
        struct skcipher_sg_list *sgl;
        struct scatterlist *sg;
        struct skcipher_async_req *sreq;
        struct skcipher_request *req;
        struct skcipher_async_rsgl *last_rsgl = NULL;
-       unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
-       unsigned int reqlen = sizeof(struct skcipher_async_req) +
-                               GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+       unsigned int txbufs = 0, len = 0, tx_nents;
+       unsigned int reqsize = crypto_skcipher_reqsize(tfm);
+       unsigned int ivsize = crypto_skcipher_ivsize(tfm);
        int err = -ENOMEM;
        bool mark = false;
+       char *iv;
 
-       lock_sock(sk);
-       req = kmalloc(reqlen, GFP_KERNEL);
-       if (unlikely(!req))
-               goto unlock;
+       sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
+       if (unlikely(!sreq))
+               goto out;
 
-       sreq = GET_SREQ(req, ctx);
+       req = &sreq->req;
+       iv = (char *)(req + 1) + reqsize;
        sreq->iocb = msg->msg_iocb;
-       memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
        INIT_LIST_HEAD(&sreq->list);
+       sreq->inflight = &ctx->inflight;
+
+       lock_sock(sk);
+       tx_nents = skcipher_all_sg_nents(ctx);
        sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
-       if (unlikely(!sreq->tsg)) {
-               kfree(req);
+       if (unlikely(!sreq->tsg))
                goto unlock;
-       }
        sg_init_table(sreq->tsg, tx_nents);
-       memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
-       skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
-       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                     skcipher_async_cb, sk);
+       memcpy(iv, ctx->iv, ivsize);
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     skcipher_async_cb, sreq);
 
        while (iov_iter_count(&msg->msg_iter)) {
                struct skcipher_async_rsgl *rsgl;
@@ -615,20 +613,22 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
                sg_mark_end(sreq->tsg + txbufs - 1);
 
        skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
-                                  len, sreq->iv);
+                                  len, iv);
        err = ctx->enc ? crypto_skcipher_encrypt(req) :
                         crypto_skcipher_decrypt(req);
        if (err == -EINPROGRESS) {
                atomic_inc(&ctx->inflight);
                err = -EIOCBQUEUED;
+               sreq = NULL;
                goto unlock;
        }
 free:
        skcipher_free_async_sgls(sreq);
-       kfree(req);
 unlock:
        skcipher_wmem_wakeup(sk);
        release_sock(sk);
+       kzfree(sreq);
+out:
        return err;
 }
 
@@ -637,9 +637,12 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
 {
        struct sock *sk = sock->sk;
        struct alg_sock *ask = alg_sk(sk);
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
        struct skcipher_ctx *ctx = ask->private;
-       unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
-               &ctx->req));
+       struct skcipher_tfm *skc = pask->private;
+       struct crypto_skcipher *tfm = skc->skcipher;
+       unsigned bs = crypto_skcipher_blocksize(tfm);
        struct skcipher_sg_list *sgl;
        struct scatterlist *sg;
        int err = -EAGAIN;
@@ -947,7 +950,8 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
        ask->private = ctx;
 
        skcipher_request_set_tfm(&ctx->req, skcipher);
-       skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+       skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+                                                CRYPTO_TFM_REQ_MAY_BACKLOG,
                                      af_alg_complete, &ctx->completion);
 
        sk->sk_destruct = skcipher_sock_destruct;
index 758acabf2d819727ecd7f4cf0c6aa21c1c40649f..8f3056cd03991ddf820c3d8442d35daf3db7ee84 100644 (file)
@@ -547,9 +547,7 @@ int pkcs7_sig_note_set_of_authattrs(void *context, size_t hdrlen,
        struct pkcs7_signed_info *sinfo = ctx->sinfo;
 
        if (!test_bit(sinfo_has_content_type, &sinfo->aa_set) ||
-           !test_bit(sinfo_has_message_digest, &sinfo->aa_set) ||
-           (ctx->msg->data_type == OID_msIndirectData &&
-            !test_bit(sinfo_has_ms_opus_info, &sinfo->aa_set))) {
+           !test_bit(sinfo_has_message_digest, &sinfo->aa_set)) {
                pr_warn("Missing required AuthAttr\n");
                return -EBADMSG;
        }
similarity index 98%
rename from crypto/crc32.c
rename to crypto/crc32_generic.c
index 187ded28cb0bd76825475dfd3b4684d8043de752..aa2a25fc7482a28d0ef3fcb22cdd4edabeed8d98 100644 (file)
@@ -131,7 +131,7 @@ static struct shash_alg alg = {
        .digestsize     = CHKSUM_DIGEST_SIZE,
        .base           = {
                .cra_name               = "crc32",
-               .cra_driver_name        = "crc32-table",
+               .cra_driver_name        = "crc32-generic",
                .cra_priority           = 100,
                .cra_blocksize          = CHKSUM_BLOCK_SIZE,
                .cra_ctxsize            = sizeof(u32),
@@ -157,3 +157,4 @@ MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
 MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32-generic");
diff --git a/crypto/crypto_engine.c b/crypto/crypto_engine.c
new file mode 100644 (file)
index 0000000..a55c82d
--- /dev/null
@@ -0,0 +1,355 @@
+/*
+ * Handle async block request by crypto hardware engine.
+ *
+ * Copyright (C) 2016 Linaro, Inc.
+ *
+ * Author: Baolin Wang <baolin.wang@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/delay.h>
+#include "internal.h"
+
+#define CRYPTO_ENGINE_MAX_QLEN 10
+
+void crypto_finalize_request(struct crypto_engine *engine,
+                            struct ablkcipher_request *req, int err);
+
+/**
+ * crypto_pump_requests - dequeue one request from engine queue to process
+ * @engine: the hardware engine
+ * @in_kthread: true if we are in the context of the request pump thread
+ *
+ * This function checks if there is any request in the engine queue that
+ * needs processing and if so call out to the driver to initialize hardware
+ * and handle each request.
+ */
+static void crypto_pump_requests(struct crypto_engine *engine,
+                                bool in_kthread)
+{
+       struct crypto_async_request *async_req, *backlog;
+       struct ablkcipher_request *req;
+       unsigned long flags;
+       bool was_busy = false;
+       int ret;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+
+       /* Make sure we are not already running a request */
+       if (engine->cur_req)
+               goto out;
+
+       /* If another context is idling then defer */
+       if (engine->idling) {
+               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+               goto out;
+       }
+
+       /* Check if the engine queue is idle */
+       if (!crypto_queue_len(&engine->queue) || !engine->running) {
+               if (!engine->busy)
+                       goto out;
+
+               /* Only do teardown in the thread */
+               if (!in_kthread) {
+                       queue_kthread_work(&engine->kworker,
+                                          &engine->pump_requests);
+                       goto out;
+               }
+
+               engine->busy = false;
+               engine->idling = true;
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+               if (engine->unprepare_crypt_hardware &&
+                   engine->unprepare_crypt_hardware(engine))
+                       pr_err("failed to unprepare crypt hardware\n");
+
+               spin_lock_irqsave(&engine->queue_lock, flags);
+               engine->idling = false;
+               goto out;
+       }
+
+       /* Get the fist request from the engine queue to handle */
+       backlog = crypto_get_backlog(&engine->queue);
+       async_req = crypto_dequeue_request(&engine->queue);
+       if (!async_req)
+               goto out;
+
+       req = ablkcipher_request_cast(async_req);
+
+       engine->cur_req = req;
+       if (backlog)
+               backlog->complete(backlog, -EINPROGRESS);
+
+       if (engine->busy)
+               was_busy = true;
+       else
+               engine->busy = true;
+
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+       /* Until here we get the request need to be encrypted successfully */
+       if (!was_busy && engine->prepare_crypt_hardware) {
+               ret = engine->prepare_crypt_hardware(engine);
+               if (ret) {
+                       pr_err("failed to prepare crypt hardware\n");
+                       goto req_err;
+               }
+       }
+
+       if (engine->prepare_request) {
+               ret = engine->prepare_request(engine, engine->cur_req);
+               if (ret) {
+                       pr_err("failed to prepare request: %d\n", ret);
+                       goto req_err;
+               }
+               engine->cur_req_prepared = true;
+       }
+
+       ret = engine->crypt_one_request(engine, engine->cur_req);
+       if (ret) {
+               pr_err("failed to crypt one request from queue\n");
+               goto req_err;
+       }
+       return;
+
+req_err:
+       crypto_finalize_request(engine, engine->cur_req, ret);
+       return;
+
+out:
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+}
+
+static void crypto_pump_work(struct kthread_work *work)
+{
+       struct crypto_engine *engine =
+               container_of(work, struct crypto_engine, pump_requests);
+
+       crypto_pump_requests(engine, true);
+}
+
+/**
+ * crypto_transfer_request - transfer the new request into the engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_request(struct crypto_engine *engine,
+                           struct ablkcipher_request *req, bool need_pump)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+
+       if (!engine->running) {
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+               return -ESHUTDOWN;
+       }
+
+       ret = ablkcipher_enqueue_request(&engine->queue, req);
+
+       if (!engine->busy && need_pump)
+               queue_kthread_work(&engine->kworker, &engine->pump_requests);
+
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_request);
+
+/**
+ * crypto_transfer_request_to_engine - transfer one request to list into the
+ * engine queue
+ * @engine: the hardware engine
+ * @req: the request need to be listed into the engine queue
+ */
+int crypto_transfer_request_to_engine(struct crypto_engine *engine,
+                                     struct ablkcipher_request *req)
+{
+       return crypto_transfer_request(engine, req, true);
+}
+EXPORT_SYMBOL_GPL(crypto_transfer_request_to_engine);
+
+/**
+ * crypto_finalize_request - finalize one request if the request is done
+ * @engine: the hardware engine
+ * @req: the request need to be finalized
+ * @err: error number
+ */
+void crypto_finalize_request(struct crypto_engine *engine,
+                            struct ablkcipher_request *req, int err)
+{
+       unsigned long flags;
+       bool finalize_cur_req = false;
+       int ret;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+       if (engine->cur_req == req)
+               finalize_cur_req = true;
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+       if (finalize_cur_req) {
+               if (engine->cur_req_prepared && engine->unprepare_request) {
+                       ret = engine->unprepare_request(engine, req);
+                       if (ret)
+                               pr_err("failed to unprepare request\n");
+               }
+
+               spin_lock_irqsave(&engine->queue_lock, flags);
+               engine->cur_req = NULL;
+               engine->cur_req_prepared = false;
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+       }
+
+       req->base.complete(&req->base, err);
+
+       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+}
+EXPORT_SYMBOL_GPL(crypto_finalize_request);
+
+/**
+ * crypto_engine_start - start the hardware engine
+ * @engine: the hardware engine need to be started
+ *
+ * Return 0 on success, else on fail.
+ */
+int crypto_engine_start(struct crypto_engine *engine)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+
+       if (engine->running || engine->busy) {
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+               return -EBUSY;
+       }
+
+       engine->running = true;
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+       queue_kthread_work(&engine->kworker, &engine->pump_requests);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_start);
+
+/**
+ * crypto_engine_stop - stop the hardware engine
+ * @engine: the hardware engine need to be stopped
+ *
+ * Return 0 on success, else on fail.
+ */
+int crypto_engine_stop(struct crypto_engine *engine)
+{
+       unsigned long flags;
+       unsigned limit = 500;
+       int ret = 0;
+
+       spin_lock_irqsave(&engine->queue_lock, flags);
+
+       /*
+        * If the engine queue is not empty or the engine is on busy state,
+        * we need to wait for a while to pump the requests of engine queue.
+        */
+       while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
+               spin_unlock_irqrestore(&engine->queue_lock, flags);
+               msleep(20);
+               spin_lock_irqsave(&engine->queue_lock, flags);
+       }
+
+       if (crypto_queue_len(&engine->queue) || engine->busy)
+               ret = -EBUSY;
+       else
+               engine->running = false;
+
+       spin_unlock_irqrestore(&engine->queue_lock, flags);
+
+       if (ret)
+               pr_warn("could not stop engine\n");
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_stop);
+
+/**
+ * crypto_engine_alloc_init - allocate crypto hardware engine structure and
+ * initialize it.
+ * @dev: the device attached with one hardware engine
+ * @rt: whether this queue is set to run as a realtime task
+ *
+ * This must be called from context that can sleep.
+ * Return: the crypto engine structure on success, else NULL.
+ */
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
+{
+       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+       struct crypto_engine *engine;
+
+       if (!dev)
+               return NULL;
+
+       engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
+       if (!engine)
+               return NULL;
+
+       engine->rt = rt;
+       engine->running = false;
+       engine->busy = false;
+       engine->idling = false;
+       engine->cur_req_prepared = false;
+       engine->priv_data = dev;
+       snprintf(engine->name, sizeof(engine->name),
+                "%s-engine", dev_name(dev));
+
+       crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN);
+       spin_lock_init(&engine->queue_lock);
+
+       init_kthread_worker(&engine->kworker);
+       engine->kworker_task = kthread_run(kthread_worker_fn,
+                                          &engine->kworker, "%s",
+                                          engine->name);
+       if (IS_ERR(engine->kworker_task)) {
+               dev_err(dev, "failed to create crypto request pump task\n");
+               return NULL;
+       }
+       init_kthread_work(&engine->pump_requests, crypto_pump_work);
+
+       if (engine->rt) {
+               dev_info(dev, "will run requests pump with realtime priority\n");
+               sched_setscheduler(engine->kworker_task, SCHED_FIFO, &param);
+       }
+
+       return engine;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
+
+/**
+ * crypto_engine_exit - free the resources of hardware engine when exit
+ * @engine: the hardware engine need to be freed
+ *
+ * Return 0 for success.
+ */
+int crypto_engine_exit(struct crypto_engine *engine)
+{
+       int ret;
+
+       ret = crypto_engine_stop(engine);
+       if (ret)
+               return ret;
+
+       flush_kthread_worker(&engine->kworker);
+       kthread_stop(engine->kworker_task);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(crypto_engine_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Crypto hardware engine framework");
index 237f3795cfaaa1f988fadf5b07eefe3c44609091..43fe85f20d577b4f3d1bbd6576b6d752bc578531 100644 (file)
@@ -499,6 +499,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (link->dump == NULL)
                        return -EINVAL;
 
+               down_read(&crypto_alg_sem);
                list_for_each_entry(alg, &crypto_alg_list, cra_list)
                        dump_alloc += CRYPTO_REPORT_MAXSIZE;
 
@@ -508,8 +509,11 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                                .done = link->done,
                                .min_dump_alloc = dump_alloc,
                        };
-                       return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+                       err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
                }
+               up_read(&crypto_alg_sem);
+
+               return err;
        }
 
        err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
index ab6ef1d0856896e140580b93eb1ee5051b4e76ef..1b86310db7b1e9aca14e5c4151a34ccdf6c375cd 100644 (file)
@@ -219,48 +219,6 @@ static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
        }
 }
 
-/*
- * FIPS 140-2 continuous self test
- * The test is performed on the result of one round of the output
- * function. Thus, the function implicitly knows the size of the
- * buffer.
- *
- * @drbg DRBG handle
- * @buf output buffer of random data to be checked
- *
- * return:
- *     true on success
- *     false on error
- */
-static bool drbg_fips_continuous_test(struct drbg_state *drbg,
-                                     const unsigned char *buf)
-{
-#ifdef CONFIG_CRYPTO_FIPS
-       int ret = 0;
-       /* skip test if we test the overall system */
-       if (list_empty(&drbg->test_data.list))
-               return true;
-       /* only perform test in FIPS mode */
-       if (0 == fips_enabled)
-               return true;
-       if (!drbg->fips_primed) {
-               /* Priming of FIPS test */
-               memcpy(drbg->prev, buf, drbg_blocklen(drbg));
-               drbg->fips_primed = true;
-               /* return false due to priming, i.e. another round is needed */
-               return false;
-       }
-       ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
-       if (!ret)
-               panic("DRBG continuous self test failed\n");
-       memcpy(drbg->prev, buf, drbg_blocklen(drbg));
-       /* the test shall pass when the two compared values are not equal */
-       return ret != 0;
-#else
-       return true;
-#endif /* CONFIG_CRYPTO_FIPS */
-}
-
 /*
  * Convert an integer into a byte representation of this integer.
  * The byte representation is big-endian
@@ -603,11 +561,6 @@ static int drbg_ctr_generate(struct drbg_state *drbg,
                }
                outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
                          drbg_blocklen(drbg) : (buflen - len);
-               if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) {
-                       /* 10.2.1.5.2 step 6 */
-                       crypto_inc(drbg->V, drbg_blocklen(drbg));
-                       continue;
-               }
                /* 10.2.1.5.2 step 4.3 */
                memcpy(buf + len, drbg->scratchpad, outlen);
                len += outlen;
@@ -733,8 +686,6 @@ static int drbg_hmac_generate(struct drbg_state *drbg,
                        return ret;
                outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
                          drbg_blocklen(drbg) : (buflen - len);
-               if (!drbg_fips_continuous_test(drbg, drbg->V))
-                       continue;
 
                /* 10.1.2.5 step 4.2 */
                memcpy(buf + len, drbg->V, outlen);
@@ -963,10 +914,6 @@ static int drbg_hash_hashgen(struct drbg_state *drbg,
                }
                outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
                          drbg_blocklen(drbg) : (buflen - len);
-               if (!drbg_fips_continuous_test(drbg, dst)) {
-                       crypto_inc(src, drbg_statelen(drbg));
-                       continue;
-               }
                /* 10.1.1.4 step hashgen 4.2 */
                memcpy(buf + len, dst, outlen);
                len += outlen;
@@ -1201,11 +1148,6 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
        drbg->reseed_ctr = 0;
        drbg->d_ops = NULL;
        drbg->core = NULL;
-#ifdef CONFIG_CRYPTO_FIPS
-       kzfree(drbg->prev);
-       drbg->prev = NULL;
-       drbg->fips_primed = false;
-#endif
 }
 
 /*
@@ -1244,12 +1186,6 @@ static inline int drbg_alloc_state(struct drbg_state *drbg)
        drbg->C = kmalloc(drbg_statelen(drbg), GFP_KERNEL);
        if (!drbg->C)
                goto err;
-#ifdef CONFIG_CRYPTO_FIPS
-       drbg->prev = kmalloc(drbg_blocklen(drbg), GFP_KERNEL);
-       if (!drbg->prev)
-               goto err;
-       drbg->fips_primed = false;
-#endif
        /* scratchpad is only generated for CTR and Hash */
        if (drbg->core->flags & DRBG_HMAC)
                sb_size = 0;
index 00e42a3ed81431638b78a8df1616de978da1eaa6..7eefcdb00227740e39a22d61515d016b4f36caa0 100644 (file)
@@ -104,6 +104,9 @@ int crypto_probing_notify(unsigned long val, void *v);
 
 unsigned int crypto_alg_extsize(struct crypto_alg *alg);
 
+int crypto_type_has_alg(const char *name, const struct crypto_type *frontend,
+                       u32 type, u32 mask);
+
 static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
 {
        atomic_inc(&alg->cra_refcnt);
index b1d106ce55f3d9c98bba9a54f301d4d94f12d982..72014f963ba7a65cbb3dd856a63f25ab647c3209 100644 (file)
@@ -212,7 +212,7 @@ static int crypto_kw_decrypt(struct blkcipher_desc *desc,
                          SEMIBSIZE))
                ret = -EBADMSG;
 
-       memzero_explicit(&block, sizeof(struct crypto_kw_block));
+       memzero_explicit(block, sizeof(struct crypto_kw_block));
 
        return ret;
 }
@@ -297,7 +297,7 @@ static int crypto_kw_encrypt(struct blkcipher_desc *desc,
        /* establish the IV for the caller to pick up */
        memcpy(desc->info, block->A, SEMIBSIZE);
 
-       memzero_explicit(&block, sizeof(struct crypto_kw_block));
+       memzero_explicit(block, sizeof(struct crypto_kw_block));
 
        return 0;
 }
index f78d4fc4e38a3fb842463229b3fa013d7c0a903c..c4eb9da49d4f55e856ea4199fd0442af77d9fd92 100644 (file)
@@ -522,6 +522,7 @@ static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
        inst->alg.halg.base.cra_flags = type;
 
        inst->alg.halg.digestsize = salg->digestsize;
+       inst->alg.halg.statesize = salg->statesize;
        inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
 
        inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
deleted file mode 100644 (file)
index 7a13b40..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Partial (de)compression operations.
- *
- * Copyright 2008 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- * If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/crypto.h>
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/cryptouser.h>
-#include <net/netlink.h>
-
-#include <crypto/compress.h>
-#include <crypto/internal/compress.h>
-
-#include "internal.h"
-
-
-static int crypto_pcomp_init(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
-       return 0;
-}
-
-static int crypto_pcomp_init_tfm(struct crypto_tfm *tfm)
-{
-       return 0;
-}
-
-#ifdef CONFIG_NET
-static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       struct crypto_report_comp rpcomp;
-
-       strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
-       if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
-                   sizeof(struct crypto_report_comp), &rpcomp))
-               goto nla_put_failure;
-       return 0;
-
-nla_put_failure:
-       return -EMSGSIZE;
-}
-#else
-static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
-{
-       return -ENOSYS;
-}
-#endif
-
-static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
-       __attribute__ ((unused));
-static void crypto_pcomp_show(struct seq_file *m, struct crypto_alg *alg)
-{
-       seq_printf(m, "type         : pcomp\n");
-}
-
-static const struct crypto_type crypto_pcomp_type = {
-       .extsize        = crypto_alg_extsize,
-       .init           = crypto_pcomp_init,
-       .init_tfm       = crypto_pcomp_init_tfm,
-#ifdef CONFIG_PROC_FS
-       .show           = crypto_pcomp_show,
-#endif
-       .report         = crypto_pcomp_report,
-       .maskclear      = ~CRYPTO_ALG_TYPE_MASK,
-       .maskset        = CRYPTO_ALG_TYPE_MASK,
-       .type           = CRYPTO_ALG_TYPE_PCOMPRESS,
-       .tfmsize        = offsetof(struct crypto_pcomp, base),
-};
-
-struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
-                                       u32 mask)
-{
-       return crypto_alloc_tfm(alg_name, &crypto_pcomp_type, type, mask);
-}
-EXPORT_SYMBOL_GPL(crypto_alloc_pcomp);
-
-int crypto_register_pcomp(struct pcomp_alg *alg)
-{
-       struct crypto_alg *base = &alg->base;
-
-       base->cra_type = &crypto_pcomp_type;
-       base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
-       base->cra_flags |= CRYPTO_ALG_TYPE_PCOMPRESS;
-
-       return crypto_register_alg(base);
-}
-EXPORT_SYMBOL_GPL(crypto_register_pcomp);
-
-int crypto_unregister_pcomp(struct pcomp_alg *alg)
-{
-       return crypto_unregister_alg(&alg->base);
-}
-EXPORT_SYMBOL_GPL(crypto_unregister_pcomp);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Partial (de)compression type");
-MODULE_AUTHOR("Sony Corporation");
index 88a27de798480cc01eb0220cdf1f0002ddd897e4..a051541a4a1718c996ba7a7b678b5b9e5e857488 100644 (file)
@@ -354,11 +354,10 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
        crt->final = shash_async_final;
        crt->finup = shash_async_finup;
        crt->digest = shash_async_digest;
+       crt->setkey = shash_async_setkey;
+
+       crt->has_setkey = alg->setkey != shash_no_setkey;
 
-       if (alg->setkey) {
-               crt->setkey = shash_async_setkey;
-               crt->has_setkey = true;
-       }
        if (alg->export)
                crt->export = shash_async_export;
        if (alg->import)
@@ -369,151 +368,6 @@ int crypto_init_shash_ops_async(struct crypto_tfm *tfm)
        return 0;
 }
 
-static int shash_compat_setkey(struct crypto_hash *tfm, const u8 *key,
-                              unsigned int keylen)
-{
-       struct shash_desc **descp = crypto_hash_ctx(tfm);
-       struct shash_desc *desc = *descp;
-
-       return crypto_shash_setkey(desc->tfm, key, keylen);
-}
-
-static int shash_compat_init(struct hash_desc *hdesc)
-{
-       struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
-       struct shash_desc *desc = *descp;
-
-       desc->flags = hdesc->flags;
-
-       return crypto_shash_init(desc);
-}
-
-static int shash_compat_update(struct hash_desc *hdesc, struct scatterlist *sg,
-                              unsigned int len)
-{
-       struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
-       struct shash_desc *desc = *descp;
-       struct crypto_hash_walk walk;
-       int nbytes;
-
-       for (nbytes = crypto_hash_walk_first_compat(hdesc, &walk, sg, len);
-            nbytes > 0; nbytes = crypto_hash_walk_done(&walk, nbytes))
-               nbytes = crypto_shash_update(desc, walk.data, nbytes);
-
-       return nbytes;
-}
-
-static int shash_compat_final(struct hash_desc *hdesc, u8 *out)
-{
-       struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
-
-       return crypto_shash_final(*descp, out);
-}
-
-static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
-                              unsigned int nbytes, u8 *out)
-{
-       unsigned int offset = sg->offset;
-       int err;
-
-       if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
-               struct shash_desc **descp = crypto_hash_ctx(hdesc->tfm);
-               struct shash_desc *desc = *descp;
-               void *data;
-
-               desc->flags = hdesc->flags;
-
-               data = kmap_atomic(sg_page(sg));
-               err = crypto_shash_digest(desc, data + offset, nbytes, out);
-               kunmap_atomic(data);
-               crypto_yield(desc->flags);
-               goto out;
-       }
-
-       err = shash_compat_init(hdesc);
-       if (err)
-               goto out;
-
-       err = shash_compat_update(hdesc, sg, nbytes);
-       if (err)
-               goto out;
-
-       err = shash_compat_final(hdesc, out);
-
-out:
-       return err;
-}
-
-static void crypto_exit_shash_ops_compat(struct crypto_tfm *tfm)
-{
-       struct shash_desc **descp = crypto_tfm_ctx(tfm);
-       struct shash_desc *desc = *descp;
-
-       crypto_free_shash(desc->tfm);
-       kzfree(desc);
-}
-
-static int crypto_init_shash_ops_compat(struct crypto_tfm *tfm)
-{
-       struct hash_tfm *crt = &tfm->crt_hash;
-       struct crypto_alg *calg = tfm->__crt_alg;
-       struct shash_alg *alg = __crypto_shash_alg(calg);
-       struct shash_desc **descp = crypto_tfm_ctx(tfm);
-       struct crypto_shash *shash;
-       struct shash_desc *desc;
-
-       if (!crypto_mod_get(calg))
-               return -EAGAIN;
-
-       shash = crypto_create_tfm(calg, &crypto_shash_type);
-       if (IS_ERR(shash)) {
-               crypto_mod_put(calg);
-               return PTR_ERR(shash);
-       }
-
-       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash),
-                      GFP_KERNEL);
-       if (!desc) {
-               crypto_free_shash(shash);
-               return -ENOMEM;
-       }
-
-       *descp = desc;
-       desc->tfm = shash;
-       tfm->exit = crypto_exit_shash_ops_compat;
-
-       crt->init = shash_compat_init;
-       crt->update = shash_compat_update;
-       crt->final  = shash_compat_final;
-       crt->digest = shash_compat_digest;
-       crt->setkey = shash_compat_setkey;
-
-       crt->digestsize = alg->digestsize;
-
-       return 0;
-}
-
-static int crypto_init_shash_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
-{
-       switch (mask & CRYPTO_ALG_TYPE_MASK) {
-       case CRYPTO_ALG_TYPE_HASH_MASK:
-               return crypto_init_shash_ops_compat(tfm);
-       }
-
-       return -EINVAL;
-}
-
-static unsigned int crypto_shash_ctxsize(struct crypto_alg *alg, u32 type,
-                                        u32 mask)
-{
-       switch (mask & CRYPTO_ALG_TYPE_MASK) {
-       case CRYPTO_ALG_TYPE_HASH_MASK:
-               return sizeof(struct shash_desc *);
-       }
-
-       return 0;
-}
-
 static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
 {
        struct crypto_shash *hash = __crypto_shash_cast(tfm);
@@ -560,9 +414,7 @@ static void crypto_shash_show(struct seq_file *m, struct crypto_alg *alg)
 }
 
 static const struct crypto_type crypto_shash_type = {
-       .ctxsize = crypto_shash_ctxsize,
        .extsize = crypto_alg_extsize,
-       .init = crypto_init_shash_ops,
        .init_tfm = crypto_shash_init_tfm,
 #ifdef CONFIG_PROC_FS
        .show = crypto_shash_show,
index d199c0b1751c91cbcc5a978aadb97cdf609e33ab..69230e9d4ac99499f621ff925f3f4265fa539f41 100644 (file)
@@ -118,7 +118,7 @@ static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
        skcipher->decrypt = skcipher_decrypt_blkcipher;
 
        skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
-       skcipher->has_setkey = calg->cra_blkcipher.max_keysize;
+       skcipher->keysize = calg->cra_blkcipher.max_keysize;
 
        return 0;
 }
@@ -211,7 +211,7 @@ static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
        skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
        skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
                            sizeof(struct ablkcipher_request);
-       skcipher->has_setkey = calg->cra_ablkcipher.max_keysize;
+       skcipher->keysize = calg->cra_ablkcipher.max_keysize;
 
        return 0;
 }
index 270bc4b82bd9aaf2625bbeddf426f0cfbfea0f7e..579dce07146389f9a38d036fe48b00f347d7f070 100644 (file)
@@ -554,164 +554,6 @@ out:
        crypto_free_blkcipher(tfm);
 }
 
-static int test_hash_jiffies_digest(struct hash_desc *desc,
-                                   struct scatterlist *sg, int blen,
-                                   char *out, int secs)
-{
-       unsigned long start, end;
-       int bcount;
-       int ret;
-
-       for (start = jiffies, end = start + secs * HZ, bcount = 0;
-            time_before(jiffies, end); bcount++) {
-               ret = crypto_hash_digest(desc, sg, blen, out);
-               if (ret)
-                       return ret;
-       }
-
-       printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / secs, ((long)bcount * blen) / secs);
-
-       return 0;
-}
-
-static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
-                            int blen, int plen, char *out, int secs)
-{
-       unsigned long start, end;
-       int bcount, pcount;
-       int ret;
-
-       if (plen == blen)
-               return test_hash_jiffies_digest(desc, sg, blen, out, secs);
-
-       for (start = jiffies, end = start + secs * HZ, bcount = 0;
-            time_before(jiffies, end); bcount++) {
-               ret = crypto_hash_init(desc);
-               if (ret)
-                       return ret;
-               for (pcount = 0; pcount < blen; pcount += plen) {
-                       ret = crypto_hash_update(desc, sg, plen);
-                       if (ret)
-                               return ret;
-               }
-               /* we assume there is enough space in 'out' for the result */
-               ret = crypto_hash_final(desc, out);
-               if (ret)
-                       return ret;
-       }
-
-       printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / secs, ((long)bcount * blen) / secs);
-
-       return 0;
-}
-
-static int test_hash_cycles_digest(struct hash_desc *desc,
-                                  struct scatterlist *sg, int blen, char *out)
-{
-       unsigned long cycles = 0;
-       int i;
-       int ret;
-
-       local_irq_disable();
-
-       /* Warm-up run. */
-       for (i = 0; i < 4; i++) {
-               ret = crypto_hash_digest(desc, sg, blen, out);
-               if (ret)
-                       goto out;
-       }
-
-       /* The real thing. */
-       for (i = 0; i < 8; i++) {
-               cycles_t start, end;
-
-               start = get_cycles();
-
-               ret = crypto_hash_digest(desc, sg, blen, out);
-               if (ret)
-                       goto out;
-
-               end = get_cycles();
-
-               cycles += end - start;
-       }
-
-out:
-       local_irq_enable();
-
-       if (ret)
-               return ret;
-
-       printk("%6lu cycles/operation, %4lu cycles/byte\n",
-              cycles / 8, cycles / (8 * blen));
-
-       return 0;
-}
-
-static int test_hash_cycles(struct hash_desc *desc, struct scatterlist *sg,
-                           int blen, int plen, char *out)
-{
-       unsigned long cycles = 0;
-       int i, pcount;
-       int ret;
-
-       if (plen == blen)
-               return test_hash_cycles_digest(desc, sg, blen, out);
-
-       local_irq_disable();
-
-       /* Warm-up run. */
-       for (i = 0; i < 4; i++) {
-               ret = crypto_hash_init(desc);
-               if (ret)
-                       goto out;
-               for (pcount = 0; pcount < blen; pcount += plen) {
-                       ret = crypto_hash_update(desc, sg, plen);
-                       if (ret)
-                               goto out;
-               }
-               ret = crypto_hash_final(desc, out);
-               if (ret)
-                       goto out;
-       }
-
-       /* The real thing. */
-       for (i = 0; i < 8; i++) {
-               cycles_t start, end;
-
-               start = get_cycles();
-
-               ret = crypto_hash_init(desc);
-               if (ret)
-                       goto out;
-               for (pcount = 0; pcount < blen; pcount += plen) {
-                       ret = crypto_hash_update(desc, sg, plen);
-                       if (ret)
-                               goto out;
-               }
-               ret = crypto_hash_final(desc, out);
-               if (ret)
-                       goto out;
-
-               end = get_cycles();
-
-               cycles += end - start;
-       }
-
-out:
-       local_irq_enable();
-
-       if (ret)
-               return ret;
-
-       printk("%6lu cycles/operation, %4lu cycles/byte\n",
-              cycles / 8, cycles / (8 * blen));
-
-       return 0;
-}
-
 static void test_hash_sg_init(struct scatterlist *sg)
 {
        int i;
@@ -723,69 +565,6 @@ static void test_hash_sg_init(struct scatterlist *sg)
        }
 }
 
-static void test_hash_speed(const char *algo, unsigned int secs,
-                           struct hash_speed *speed)
-{
-       struct scatterlist sg[TVMEMSIZE];
-       struct crypto_hash *tfm;
-       struct hash_desc desc;
-       static char output[1024];
-       int i;
-       int ret;
-
-       tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
-
-       if (IS_ERR(tfm)) {
-               printk(KERN_ERR "failed to load transform for %s: %ld\n", algo,
-                      PTR_ERR(tfm));
-               return;
-       }
-
-       printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo,
-                       get_driver_name(crypto_hash, tfm));
-
-       desc.tfm = tfm;
-       desc.flags = 0;
-
-       if (crypto_hash_digestsize(tfm) > sizeof(output)) {
-               printk(KERN_ERR "digestsize(%u) > outputbuffer(%zu)\n",
-                      crypto_hash_digestsize(tfm), sizeof(output));
-               goto out;
-       }
-
-       test_hash_sg_init(sg);
-       for (i = 0; speed[i].blen != 0; i++) {
-               if (speed[i].blen > TVMEMSIZE * PAGE_SIZE) {
-                       printk(KERN_ERR
-                              "template (%u) too big for tvmem (%lu)\n",
-                              speed[i].blen, TVMEMSIZE * PAGE_SIZE);
-                       goto out;
-               }
-
-               if (speed[i].klen)
-                       crypto_hash_setkey(tfm, tvmem[0], speed[i].klen);
-
-               printk(KERN_INFO "test%3u "
-                      "(%5u byte blocks,%5u bytes per update,%4u updates): ",
-                      i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
-
-               if (secs)
-                       ret = test_hash_jiffies(&desc, sg, speed[i].blen,
-                                               speed[i].plen, output, secs);
-               else
-                       ret = test_hash_cycles(&desc, sg, speed[i].blen,
-                                              speed[i].plen, output);
-
-               if (ret) {
-                       printk(KERN_ERR "hashing failed ret=%d\n", ret);
-                       break;
-               }
-       }
-
-out:
-       crypto_free_hash(tfm);
-}
-
 static inline int do_one_ahash_op(struct ahash_request *req, int ret)
 {
        if (ret == -EINPROGRESS || ret == -EBUSY) {
@@ -945,8 +724,8 @@ out:
        return 0;
 }
 
-static void test_ahash_speed(const char *algo, unsigned int secs,
-                            struct hash_speed *speed)
+static void test_ahash_speed_common(const char *algo, unsigned int secs,
+                                   struct hash_speed *speed, unsigned mask)
 {
        struct scatterlist sg[TVMEMSIZE];
        struct tcrypt_result tresult;
@@ -955,7 +734,7 @@ static void test_ahash_speed(const char *algo, unsigned int secs,
        char *output;
        int i, ret;
 
-       tfm = crypto_alloc_ahash(algo, 0, 0);
+       tfm = crypto_alloc_ahash(algo, 0, mask);
        if (IS_ERR(tfm)) {
                pr_err("failed to load transform for %s: %ld\n",
                       algo, PTR_ERR(tfm));
@@ -1021,6 +800,18 @@ out:
        crypto_free_ahash(tfm);
 }
 
+static void test_ahash_speed(const char *algo, unsigned int secs,
+                            struct hash_speed *speed)
+{
+       return test_ahash_speed_common(algo, secs, speed, 0);
+}
+
+static void test_hash_speed(const char *algo, unsigned int secs,
+                           struct hash_speed *speed)
+{
+       return test_ahash_speed_common(algo, secs, speed, CRYPTO_ALG_ASYNC);
+}
+
 static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
 {
        if (ret == -EINPROGRESS || ret == -EBUSY) {
index ae8c57fd8bc7f855e4fb73145a1ec72a34540add..93f3527962eca23e1418c2c8f7691187228813f6 100644 (file)
@@ -96,13 +96,6 @@ struct comp_test_suite {
        } comp, decomp;
 };
 
-struct pcomp_test_suite {
-       struct {
-               struct pcomp_testvec *vecs;
-               unsigned int count;
-       } comp, decomp;
-};
-
 struct hash_test_suite {
        struct hash_testvec *vecs;
        unsigned int count;
@@ -133,7 +126,6 @@ struct alg_test_desc {
                struct aead_test_suite aead;
                struct cipher_test_suite cipher;
                struct comp_test_suite comp;
-               struct pcomp_test_suite pcomp;
                struct hash_test_suite hash;
                struct cprng_test_suite cprng;
                struct drbg_test_suite drbg;
@@ -198,6 +190,61 @@ static int wait_async_op(struct tcrypt_result *tr, int ret)
        return ret;
 }
 
+static int ahash_partial_update(struct ahash_request **preq,
+       struct crypto_ahash *tfm, struct hash_testvec *template,
+       void *hash_buff, int k, int temp, struct scatterlist *sg,
+       const char *algo, char *result, struct tcrypt_result *tresult)
+{
+       char *state;
+       struct ahash_request *req;
+       int statesize, ret = -EINVAL;
+
+       req = *preq;
+       statesize = crypto_ahash_statesize(
+                       crypto_ahash_reqtfm(req));
+       state = kmalloc(statesize, GFP_KERNEL);
+       if (!state) {
+               pr_err("alt: hash: Failed to alloc state for %s\n", algo);
+               goto out_nostate;
+       }
+       ret = crypto_ahash_export(req, state);
+       if (ret) {
+               pr_err("alt: hash: Failed to export() for %s\n", algo);
+               goto out;
+       }
+       ahash_request_free(req);
+       req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               pr_err("alg: hash: Failed to alloc request for %s\n", algo);
+               goto out_noreq;
+       }
+       ahash_request_set_callback(req,
+               CRYPTO_TFM_REQ_MAY_BACKLOG,
+               tcrypt_complete, tresult);
+
+       memcpy(hash_buff, template->plaintext + temp,
+               template->tap[k]);
+       sg_init_one(&sg[0], hash_buff, template->tap[k]);
+       ahash_request_set_crypt(req, sg, result, template->tap[k]);
+       ret = crypto_ahash_import(req, state);
+       if (ret) {
+               pr_err("alg: hash: Failed to import() for %s\n", algo);
+               goto out;
+       }
+       ret = wait_async_op(tresult, crypto_ahash_update(req));
+       if (ret)
+               goto out;
+       *preq = req;
+       ret = 0;
+       goto out_noreq;
+out:
+       ahash_request_free(req);
+out_noreq:
+       kfree(state);
+out_nostate:
+       return ret;
+}
+
 static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
                       unsigned int tcount, bool use_digest,
                       const int align_offset)
@@ -385,6 +432,84 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
                }
        }
 
+       /* partial update exercise */
+       j = 0;
+       for (i = 0; i < tcount; i++) {
+               /* alignment tests are only done with continuous buffers */
+               if (align_offset != 0)
+                       break;
+
+               if (template[i].np < 2)
+                       continue;
+
+               j++;
+               memset(result, 0, MAX_DIGEST_SIZE);
+
+               ret = -EINVAL;
+               hash_buff = xbuf[0];
+               memcpy(hash_buff, template[i].plaintext,
+                       template[i].tap[0]);
+               sg_init_one(&sg[0], hash_buff, template[i].tap[0]);
+
+               if (template[i].ksize) {
+                       crypto_ahash_clear_flags(tfm, ~0);
+                       if (template[i].ksize > MAX_KEYLEN) {
+                               pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+                                       j, algo, template[i].ksize, MAX_KEYLEN);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(key, template[i].key, template[i].ksize);
+                       ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
+                       if (ret) {
+                               pr_err("alg: hash: setkey failed on test %d for %s: ret=%d\n",
+                                       j, algo, -ret);
+                               goto out;
+                       }
+               }
+
+               ahash_request_set_crypt(req, sg, result, template[i].tap[0]);
+               ret = wait_async_op(&tresult, crypto_ahash_init(req));
+               if (ret) {
+                       pr_err("alt: hash: init failed on test %d for %s: ret=%d\n",
+                               j, algo, -ret);
+                       goto out;
+               }
+               ret = wait_async_op(&tresult, crypto_ahash_update(req));
+               if (ret) {
+                       pr_err("alt: hash: update failed on test %d for %s: ret=%d\n",
+                               j, algo, -ret);
+                       goto out;
+               }
+
+               temp = template[i].tap[0];
+               for (k = 1; k < template[i].np; k++) {
+                       ret = ahash_partial_update(&req, tfm, &template[i],
+                               hash_buff, k, temp, &sg[0], algo, result,
+                               &tresult);
+                       if (ret) {
+                               pr_err("hash: partial update failed on test %d for %s: ret=%d\n",
+                                       j, algo, -ret);
+                               goto out_noreq;
+                       }
+                       temp += template[i].tap[k];
+               }
+               ret = wait_async_op(&tresult, crypto_ahash_final(req));
+               if (ret) {
+                       pr_err("alt: hash: final failed on test %d for %s: ret=%d\n",
+                               j, algo, -ret);
+                       goto out;
+               }
+               if (memcmp(result, template[i].digest,
+                          crypto_ahash_digestsize(tfm))) {
+                       pr_err("alg: hash: Partial Test %d failed for %s\n",
+                              j, algo);
+                       hexdump(result, crypto_ahash_digestsize(tfm));
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
        ret = 0;
 
 out:
@@ -488,6 +613,8 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
        aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
                                  tcrypt_complete, &result);
 
+       iv_len = crypto_aead_ivsize(tfm);
+
        for (i = 0, j = 0; i < tcount; i++) {
                if (template[i].np)
                        continue;
@@ -508,7 +635,6 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
 
                memcpy(input, template[i].input, template[i].ilen);
                memcpy(assoc, template[i].assoc, template[i].alen);
-               iv_len = crypto_aead_ivsize(tfm);
                if (template[i].iv)
                        memcpy(iv, template[i].iv, iv_len);
                else
@@ -617,7 +743,7 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
                j++;
 
                if (template[i].iv)
-                       memcpy(iv, template[i].iv, MAX_IVLEN);
+                       memcpy(iv, template[i].iv, iv_len);
                else
                        memset(iv, 0, MAX_IVLEN);
 
@@ -1293,183 +1419,6 @@ out:
        return ret;
 }
 
-static int test_pcomp(struct crypto_pcomp *tfm,
-                     struct pcomp_testvec *ctemplate,
-                     struct pcomp_testvec *dtemplate, int ctcount,
-                     int dtcount)
-{
-       const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
-       unsigned int i;
-       char result[COMP_BUF_SIZE];
-       int res;
-
-       for (i = 0; i < ctcount; i++) {
-               struct comp_request req;
-               unsigned int produced = 0;
-
-               res = crypto_compress_setup(tfm, ctemplate[i].params,
-                                           ctemplate[i].paramsize);
-               if (res) {
-                       pr_err("alg: pcomp: compression setup failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-
-               res = crypto_compress_init(tfm);
-               if (res) {
-                       pr_err("alg: pcomp: compression init failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-
-               memset(result, 0, sizeof(result));
-
-               req.next_in = ctemplate[i].input;
-               req.avail_in = ctemplate[i].inlen / 2;
-               req.next_out = result;
-               req.avail_out = ctemplate[i].outlen / 2;
-
-               res = crypto_compress_update(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: compression update failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               /* Add remaining input data */
-               req.avail_in += (ctemplate[i].inlen + 1) / 2;
-
-               res = crypto_compress_update(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: compression update failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               /* Provide remaining output space */
-               req.avail_out += COMP_BUF_SIZE - ctemplate[i].outlen / 2;
-
-               res = crypto_compress_final(tfm, &req);
-               if (res < 0) {
-                       pr_err("alg: pcomp: compression final failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               produced += res;
-
-               if (COMP_BUF_SIZE - req.avail_out != ctemplate[i].outlen) {
-                       pr_err("alg: comp: Compression test %d failed for %s: "
-                              "output len = %d (expected %d)\n", i + 1, algo,
-                              COMP_BUF_SIZE - req.avail_out,
-                              ctemplate[i].outlen);
-                       return -EINVAL;
-               }
-
-               if (produced != ctemplate[i].outlen) {
-                       pr_err("alg: comp: Compression test %d failed for %s: "
-                              "returned len = %u (expected %d)\n", i + 1,
-                              algo, produced, ctemplate[i].outlen);
-                       return -EINVAL;
-               }
-
-               if (memcmp(result, ctemplate[i].output, ctemplate[i].outlen)) {
-                       pr_err("alg: pcomp: Compression test %d failed for "
-                              "%s\n", i + 1, algo);
-                       hexdump(result, ctemplate[i].outlen);
-                       return -EINVAL;
-               }
-       }
-
-       for (i = 0; i < dtcount; i++) {
-               struct comp_request req;
-               unsigned int produced = 0;
-
-               res = crypto_decompress_setup(tfm, dtemplate[i].params,
-                                             dtemplate[i].paramsize);
-               if (res) {
-                       pr_err("alg: pcomp: decompression setup failed on "
-                              "test %d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-
-               res = crypto_decompress_init(tfm);
-               if (res) {
-                       pr_err("alg: pcomp: decompression init failed on test "
-                              "%d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-
-               memset(result, 0, sizeof(result));
-
-               req.next_in = dtemplate[i].input;
-               req.avail_in = dtemplate[i].inlen / 2;
-               req.next_out = result;
-               req.avail_out = dtemplate[i].outlen / 2;
-
-               res = crypto_decompress_update(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: decompression update failed on "
-                              "test %d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               /* Add remaining input data */
-               req.avail_in += (dtemplate[i].inlen + 1) / 2;
-
-               res = crypto_decompress_update(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: decompression update failed on "
-                              "test %d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               /* Provide remaining output space */
-               req.avail_out += COMP_BUF_SIZE - dtemplate[i].outlen / 2;
-
-               res = crypto_decompress_final(tfm, &req);
-               if (res < 0 && (res != -EAGAIN || req.avail_in)) {
-                       pr_err("alg: pcomp: decompression final failed on "
-                              "test %d for %s: error=%d\n", i + 1, algo, res);
-                       return res;
-               }
-               if (res > 0)
-                       produced += res;
-
-               if (COMP_BUF_SIZE - req.avail_out != dtemplate[i].outlen) {
-                       pr_err("alg: comp: Decompression test %d failed for "
-                              "%s: output len = %d (expected %d)\n", i + 1,
-                              algo, COMP_BUF_SIZE - req.avail_out,
-                              dtemplate[i].outlen);
-                       return -EINVAL;
-               }
-
-               if (produced != dtemplate[i].outlen) {
-                       pr_err("alg: comp: Decompression test %d failed for "
-                              "%s: returned len = %u (expected %d)\n", i + 1,
-                              algo, produced, dtemplate[i].outlen);
-                       return -EINVAL;
-               }
-
-               if (memcmp(result, dtemplate[i].output, dtemplate[i].outlen)) {
-                       pr_err("alg: pcomp: Decompression test %d failed for "
-                              "%s\n", i + 1, algo);
-                       hexdump(result, dtemplate[i].outlen);
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-
 static int test_cprng(struct crypto_rng *tfm, struct cprng_testvec *template,
                      unsigned int tcount)
 {
@@ -1640,28 +1589,6 @@ static int alg_test_comp(const struct alg_test_desc *desc, const char *driver,
        return err;
 }
 
-static int alg_test_pcomp(const struct alg_test_desc *desc, const char *driver,
-                         u32 type, u32 mask)
-{
-       struct crypto_pcomp *tfm;
-       int err;
-
-       tfm = crypto_alloc_pcomp(driver, type, mask);
-       if (IS_ERR(tfm)) {
-               pr_err("alg: pcomp: Failed to load transform for %s: %ld\n",
-                      driver, PTR_ERR(tfm));
-               return PTR_ERR(tfm);
-       }
-
-       err = test_pcomp(tfm, desc->suite.pcomp.comp.vecs,
-                        desc->suite.pcomp.decomp.vecs,
-                        desc->suite.pcomp.comp.count,
-                        desc->suite.pcomp.decomp.count);
-
-       crypto_free_pcomp(tfm);
-       return err;
-}
-
 static int alg_test_hash(const struct alg_test_desc *desc, const char *driver,
                         u32 type, u32 mask)
 {
@@ -2081,7 +2008,6 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "ansi_cprng",
                .test = alg_test_cprng,
-               .fips_allowed = 1,
                .suite = {
                        .cprng = {
                                .vecs = ansi_cprng_aes_tv_template,
@@ -2132,6 +2058,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha1),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2177,6 +2104,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha224),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2190,6 +2118,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha256),cbc(aes))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2216,6 +2145,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha256),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2242,6 +2172,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha384),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -2254,6 +2185,7 @@ static const struct alg_test_desc alg_test_descs[] = {
                }
        }, {
                .alg = "authenc(hmac(sha512),cbc(aes))",
+               .fips_allowed = 1,
                .test = alg_test_aead,
                .suite = {
                        .aead = {
@@ -2281,6 +2213,7 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "authenc(hmac(sha512),cbc(des3_ede))",
                .test = alg_test_aead,
+               .fips_allowed = 1,
                .suite = {
                        .aead = {
                                .enc = {
@@ -3840,22 +3773,6 @@ static const struct alg_test_desc alg_test_descs[] = {
                                }
                        }
                }
-       }, {
-               .alg = "zlib",
-               .test = alg_test_pcomp,
-               .fips_allowed = 1,
-               .suite = {
-                       .pcomp = {
-                               .comp = {
-                                       .vecs = zlib_comp_tv_template,
-                                       .count = ZLIB_COMP_TEST_VECTORS
-                               },
-                               .decomp = {
-                                       .vecs = zlib_decomp_tv_template,
-                                       .count = ZLIB_DECOMP_TEST_VECTORS
-                               }
-                       }
-               }
        }
 };
 
index da0a8fd765f4ee2574db8ebcc671119e723b2a7a..487ec880e889c50659d9e033fb4b2261d593349d 100644 (file)
@@ -25,9 +25,6 @@
 #define _CRYPTO_TESTMGR_H
 
 #include <linux/netlink.h>
-#include <linux/zlib.h>
-
-#include <crypto/compress.h>
 
 #define MAX_DIGEST_SIZE                64
 #define MAX_TAP                        8
@@ -32268,14 +32265,6 @@ struct comp_testvec {
        char output[COMP_BUF_SIZE];
 };
 
-struct pcomp_testvec {
-       const void *params;
-       unsigned int paramsize;
-       int inlen, outlen;
-       char input[COMP_BUF_SIZE];
-       char output[COMP_BUF_SIZE];
-};
-
 /*
  * Deflate test vectors (null-terminated strings).
  * Params: winbits=-11, Z_DEFAULT_COMPRESSION, MAX_MEM_LEVEL.
@@ -32356,139 +32345,6 @@ static struct comp_testvec deflate_decomp_tv_template[] = {
        },
 };
 
-#define ZLIB_COMP_TEST_VECTORS 2
-#define ZLIB_DECOMP_TEST_VECTORS 2
-
-static const struct {
-       struct nlattr nla;
-       int val;
-} deflate_comp_params[] = {
-       {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_LEVEL,
-               },
-               .val                    = Z_DEFAULT_COMPRESSION,
-       }, {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_METHOD,
-               },
-               .val                    = Z_DEFLATED,
-       }, {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_WINDOWBITS,
-               },
-               .val                    = -11,
-       }, {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_MEMLEVEL,
-               },
-               .val                    = MAX_MEM_LEVEL,
-       }, {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_COMP_STRATEGY,
-               },
-               .val                    = Z_DEFAULT_STRATEGY,
-       }
-};
-
-static const struct {
-       struct nlattr nla;
-       int val;
-} deflate_decomp_params[] = {
-       {
-               .nla = {
-                       .nla_len        = NLA_HDRLEN + sizeof(int),
-                       .nla_type       = ZLIB_DECOMP_WINDOWBITS,
-               },
-               .val                    = -11,
-       }
-};
-
-static struct pcomp_testvec zlib_comp_tv_template[] = {
-       {
-               .params = &deflate_comp_params,
-               .paramsize = sizeof(deflate_comp_params),
-               .inlen  = 70,
-               .outlen = 38,
-               .input  = "Join us now and share the software "
-                       "Join us now and share the software ",
-               .output = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
-                         "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
-                         "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
-                         "\x48\x55\x28\xce\x4f\x2b\x29\x07"
-                         "\x71\xbc\x08\x2b\x01\x00",
-       }, {
-               .params = &deflate_comp_params,
-               .paramsize = sizeof(deflate_comp_params),
-               .inlen  = 191,
-               .outlen = 122,
-               .input  = "This document describes a compression method based on the DEFLATE"
-                       "compression algorithm.  This document defines the application of "
-                       "the DEFLATE algorithm to the IP Payload Compression Protocol.",
-               .output = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
-                         "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
-                         "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
-                         "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
-                         "\x68\x12\x51\xae\x76\x67\xd6\x27"
-                         "\x19\x88\x1a\xde\x85\xab\x21\xf2"
-                         "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
-                         "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
-                         "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
-                         "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
-                         "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
-                         "\x52\x37\xed\x0e\x52\x6b\x59\x02"
-                         "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
-                         "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
-                         "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
-                         "\xfa\x02",
-       },
-};
-
-static struct pcomp_testvec zlib_decomp_tv_template[] = {
-       {
-               .params = &deflate_decomp_params,
-               .paramsize = sizeof(deflate_decomp_params),
-               .inlen  = 122,
-               .outlen = 191,
-               .input  = "\x5d\x8d\x31\x0e\xc2\x30\x10\x04"
-                         "\xbf\xb2\x2f\xc8\x1f\x10\x04\x09"
-                         "\x89\xc2\x85\x3f\x70\xb1\x2f\xf8"
-                         "\x24\xdb\x67\xd9\x47\xc1\xef\x49"
-                         "\x68\x12\x51\xae\x76\x67\xd6\x27"
-                         "\x19\x88\x1a\xde\x85\xab\x21\xf2"
-                         "\x08\x5d\x16\x1e\x20\x04\x2d\xad"
-                         "\xf3\x18\xa2\x15\x85\x2d\x69\xc4"
-                         "\x42\x83\x23\xb6\x6c\x89\x71\x9b"
-                         "\xef\xcf\x8b\x9f\xcf\x33\xca\x2f"
-                         "\xed\x62\xa9\x4c\x80\xff\x13\xaf"
-                         "\x52\x37\xed\x0e\x52\x6b\x59\x02"
-                         "\xd9\x4e\xe8\x7a\x76\x1d\x02\x98"
-                         "\xfe\x8a\x87\x83\xa3\x4f\x56\x8a"
-                         "\xb8\x9e\x8e\x5c\x57\xd3\xa0\x79"
-                         "\xfa\x02",
-               .output = "This document describes a compression method based on the DEFLATE"
-                       "compression algorithm.  This document defines the application of "
-                       "the DEFLATE algorithm to the IP Payload Compression Protocol.",
-       }, {
-               .params = &deflate_decomp_params,
-               .paramsize = sizeof(deflate_decomp_params),
-               .inlen  = 38,
-               .outlen = 70,
-               .input  = "\xf3\xca\xcf\xcc\x53\x28\x2d\x56"
-                         "\xc8\xcb\x2f\x57\x48\xcc\x4b\x51"
-                         "\x28\xce\x48\x2c\x4a\x55\x28\xc9"
-                         "\x48\x55\x28\xce\x4f\x2b\x29\x07"
-                         "\x71\xbc\x08\x2b\x01\x00",
-               .output = "Join us now and share the software "
-                       "Join us now and share the software ",
-       },
-};
-
 /*
  * LZO test vectors (null-terminated strings).
  */
diff --git a/crypto/zlib.c b/crypto/zlib.c
deleted file mode 100644 (file)
index d51a30a..0000000
+++ /dev/null
@@ -1,381 +0,0 @@
-/*
- * Cryptographic API.
- *
- * Zlib algorithm
- *
- * Copyright 2008 Sony Corporation
- *
- * Based on deflate.c, which is
- * Copyright (c) 2003 James Morris <jmorris@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- * FIXME: deflate transforms will require up to a total of about 436k of kernel
- * memory on i386 (390k for compression, the rest for decompression), as the
- * current zlib kernel code uses a worst case pre-allocation system by default.
- * This needs to be fixed so that the amount of memory required is properly
- * related to the winbits and memlevel parameters.
- */
-
-#define pr_fmt(fmt)    "%s: " fmt, __func__
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/zlib.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/mm.h>
-#include <linux/net.h>
-
-#include <crypto/internal/compress.h>
-
-#include <net/netlink.h>
-
-
-struct zlib_ctx {
-       struct z_stream_s comp_stream;
-       struct z_stream_s decomp_stream;
-       int decomp_windowBits;
-};
-
-
-static void zlib_comp_exit(struct zlib_ctx *ctx)
-{
-       struct z_stream_s *stream = &ctx->comp_stream;
-
-       if (stream->workspace) {
-               zlib_deflateEnd(stream);
-               vfree(stream->workspace);
-               stream->workspace = NULL;
-       }
-}
-
-static void zlib_decomp_exit(struct zlib_ctx *ctx)
-{
-       struct z_stream_s *stream = &ctx->decomp_stream;
-
-       if (stream->workspace) {
-               zlib_inflateEnd(stream);
-               vfree(stream->workspace);
-               stream->workspace = NULL;
-       }
-}
-
-static int zlib_init(struct crypto_tfm *tfm)
-{
-       return 0;
-}
-
-static void zlib_exit(struct crypto_tfm *tfm)
-{
-       struct zlib_ctx *ctx = crypto_tfm_ctx(tfm);
-
-       zlib_comp_exit(ctx);
-       zlib_decomp_exit(ctx);
-}
-
-
-static int zlib_compress_setup(struct crypto_pcomp *tfm, const void *params,
-                              unsigned int len)
-{
-       struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &ctx->comp_stream;
-       struct nlattr *tb[ZLIB_COMP_MAX + 1];
-       int window_bits, mem_level;
-       size_t workspacesize;
-       int ret;
-
-       ret = nla_parse(tb, ZLIB_COMP_MAX, params, len, NULL);
-       if (ret)
-               return ret;
-
-       zlib_comp_exit(ctx);
-
-       window_bits = tb[ZLIB_COMP_WINDOWBITS]
-                                       ? nla_get_u32(tb[ZLIB_COMP_WINDOWBITS])
-                                       : MAX_WBITS;
-       mem_level = tb[ZLIB_COMP_MEMLEVEL]
-                                       ? nla_get_u32(tb[ZLIB_COMP_MEMLEVEL])
-                                       : DEF_MEM_LEVEL;
-
-       workspacesize = zlib_deflate_workspacesize(window_bits, mem_level);
-       stream->workspace = vzalloc(workspacesize);
-       if (!stream->workspace)
-               return -ENOMEM;
-
-       ret = zlib_deflateInit2(stream,
-                               tb[ZLIB_COMP_LEVEL]
-                                       ? nla_get_u32(tb[ZLIB_COMP_LEVEL])
-                                       : Z_DEFAULT_COMPRESSION,
-                               tb[ZLIB_COMP_METHOD]
-                                       ? nla_get_u32(tb[ZLIB_COMP_METHOD])
-                                       : Z_DEFLATED,
-                               window_bits,
-                               mem_level,
-                               tb[ZLIB_COMP_STRATEGY]
-                                       ? nla_get_u32(tb[ZLIB_COMP_STRATEGY])
-                                       : Z_DEFAULT_STRATEGY);
-       if (ret != Z_OK) {
-               vfree(stream->workspace);
-               stream->workspace = NULL;
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int zlib_compress_init(struct crypto_pcomp *tfm)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->comp_stream;
-
-       ret = zlib_deflateReset(stream);
-       if (ret != Z_OK)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int zlib_compress_update(struct crypto_pcomp *tfm,
-                               struct comp_request *req)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->comp_stream;
-
-       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
-       stream->next_in = req->next_in;
-       stream->avail_in = req->avail_in;
-       stream->next_out = req->next_out;
-       stream->avail_out = req->avail_out;
-
-       ret = zlib_deflate(stream, Z_NO_FLUSH);
-       switch (ret) {
-       case Z_OK:
-               break;
-
-       case Z_BUF_ERROR:
-               pr_debug("zlib_deflate could not make progress\n");
-               return -EAGAIN;
-
-       default:
-               pr_debug("zlib_deflate failed %d\n", ret);
-               return -EINVAL;
-       }
-
-       ret = req->avail_out - stream->avail_out;
-       pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
-                stream->avail_in, stream->avail_out,
-                req->avail_in - stream->avail_in, ret);
-       req->next_in = stream->next_in;
-       req->avail_in = stream->avail_in;
-       req->next_out = stream->next_out;
-       req->avail_out = stream->avail_out;
-       return ret;
-}
-
-static int zlib_compress_final(struct crypto_pcomp *tfm,
-                              struct comp_request *req)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->comp_stream;
-
-       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
-       stream->next_in = req->next_in;
-       stream->avail_in = req->avail_in;
-       stream->next_out = req->next_out;
-       stream->avail_out = req->avail_out;
-
-       ret = zlib_deflate(stream, Z_FINISH);
-       if (ret != Z_STREAM_END) {
-               pr_debug("zlib_deflate failed %d\n", ret);
-               return -EINVAL;
-       }
-
-       ret = req->avail_out - stream->avail_out;
-       pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
-                stream->avail_in, stream->avail_out,
-                req->avail_in - stream->avail_in, ret);
-       req->next_in = stream->next_in;
-       req->avail_in = stream->avail_in;
-       req->next_out = stream->next_out;
-       req->avail_out = stream->avail_out;
-       return ret;
-}
-
-
-static int zlib_decompress_setup(struct crypto_pcomp *tfm, const void *params,
-                                unsigned int len)
-{
-       struct zlib_ctx *ctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &ctx->decomp_stream;
-       struct nlattr *tb[ZLIB_DECOMP_MAX + 1];
-       int ret = 0;
-
-       ret = nla_parse(tb, ZLIB_DECOMP_MAX, params, len, NULL);
-       if (ret)
-               return ret;
-
-       zlib_decomp_exit(ctx);
-
-       ctx->decomp_windowBits = tb[ZLIB_DECOMP_WINDOWBITS]
-                                ? nla_get_u32(tb[ZLIB_DECOMP_WINDOWBITS])
-                                : DEF_WBITS;
-
-       stream->workspace = vzalloc(zlib_inflate_workspacesize());
-       if (!stream->workspace)
-               return -ENOMEM;
-
-       ret = zlib_inflateInit2(stream, ctx->decomp_windowBits);
-       if (ret != Z_OK) {
-               vfree(stream->workspace);
-               stream->workspace = NULL;
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int zlib_decompress_init(struct crypto_pcomp *tfm)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->decomp_stream;
-
-       ret = zlib_inflateReset(stream);
-       if (ret != Z_OK)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int zlib_decompress_update(struct crypto_pcomp *tfm,
-                                 struct comp_request *req)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->decomp_stream;
-
-       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
-       stream->next_in = req->next_in;
-       stream->avail_in = req->avail_in;
-       stream->next_out = req->next_out;
-       stream->avail_out = req->avail_out;
-
-       ret = zlib_inflate(stream, Z_SYNC_FLUSH);
-       switch (ret) {
-       case Z_OK:
-       case Z_STREAM_END:
-               break;
-
-       case Z_BUF_ERROR:
-               pr_debug("zlib_inflate could not make progress\n");
-               return -EAGAIN;
-
-       default:
-               pr_debug("zlib_inflate failed %d\n", ret);
-               return -EINVAL;
-       }
-
-       ret = req->avail_out - stream->avail_out;
-       pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
-                stream->avail_in, stream->avail_out,
-                req->avail_in - stream->avail_in, ret);
-       req->next_in = stream->next_in;
-       req->avail_in = stream->avail_in;
-       req->next_out = stream->next_out;
-       req->avail_out = stream->avail_out;
-       return ret;
-}
-
-static int zlib_decompress_final(struct crypto_pcomp *tfm,
-                                struct comp_request *req)
-{
-       int ret;
-       struct zlib_ctx *dctx = crypto_tfm_ctx(crypto_pcomp_tfm(tfm));
-       struct z_stream_s *stream = &dctx->decomp_stream;
-
-       pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out);
-       stream->next_in = req->next_in;
-       stream->avail_in = req->avail_in;
-       stream->next_out = req->next_out;
-       stream->avail_out = req->avail_out;
-
-       if (dctx->decomp_windowBits < 0) {
-               ret = zlib_inflate(stream, Z_SYNC_FLUSH);
-               /*
-                * Work around a bug in zlib, which sometimes wants to taste an
-                * extra byte when being used in the (undocumented) raw deflate
-                * mode. (From USAGI).
-                */
-               if (ret == Z_OK && !stream->avail_in && stream->avail_out) {
-                       const void *saved_next_in = stream->next_in;
-                       u8 zerostuff = 0;
-
-                       stream->next_in = &zerostuff;
-                       stream->avail_in = 1;
-                       ret = zlib_inflate(stream, Z_FINISH);
-                       stream->next_in = saved_next_in;
-                       stream->avail_in = 0;
-               }
-       } else
-               ret = zlib_inflate(stream, Z_FINISH);
-       if (ret != Z_STREAM_END) {
-               pr_debug("zlib_inflate failed %d\n", ret);
-               return -EINVAL;
-       }
-
-       ret = req->avail_out - stream->avail_out;
-       pr_debug("avail_in %lu, avail_out %lu (consumed %lu, produced %u)\n",
-                stream->avail_in, stream->avail_out,
-                req->avail_in - stream->avail_in, ret);
-       req->next_in = stream->next_in;
-       req->avail_in = stream->avail_in;
-       req->next_out = stream->next_out;
-       req->avail_out = stream->avail_out;
-       return ret;
-}
-
-
-static struct pcomp_alg zlib_alg = {
-       .compress_setup         = zlib_compress_setup,
-       .compress_init          = zlib_compress_init,
-       .compress_update        = zlib_compress_update,
-       .compress_final         = zlib_compress_final,
-       .decompress_setup       = zlib_decompress_setup,
-       .decompress_init        = zlib_decompress_init,
-       .decompress_update      = zlib_decompress_update,
-       .decompress_final       = zlib_decompress_final,
-
-       .base                   = {
-               .cra_name       = "zlib",
-               .cra_flags      = CRYPTO_ALG_TYPE_PCOMPRESS,
-               .cra_ctxsize    = sizeof(struct zlib_ctx),
-               .cra_module     = THIS_MODULE,
-               .cra_init       = zlib_init,
-               .cra_exit       = zlib_exit,
-       }
-};
-
-static int __init zlib_mod_init(void)
-{
-       return crypto_register_pcomp(&zlib_alg);
-}
-
-static void __exit zlib_mod_fini(void)
-{
-       crypto_unregister_pcomp(&zlib_alg);
-}
-
-module_init(zlib_mod_init);
-module_exit(zlib_mod_fini);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Zlib Compression Algorithm");
-MODULE_AUTHOR("Sony Corporation");
-MODULE_ALIAS_CRYPTO("zlib");
index c570b1d9f09480ecec5513d3cfe61d33c26c81b5..0872d5fecb82f2dee893c7feb0ac6a408f903384 100644 (file)
@@ -880,7 +880,7 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
                break;
        case BUS_NOTIFY_DRIVER_NOT_BOUND:
        case BUS_NOTIFY_UNBOUND_DRIVER:
-               pdev->dev.pm_domain = NULL;
+               dev_pm_domain_set(&pdev->dev, NULL);
                break;
        case BUS_NOTIFY_ADD_DEVICE:
                dev_pm_domain_set(&pdev->dev, &acpi_lpss_pm_domain);
index 296b7a14893aabba895c578e8671e36b34875a37..b6f7fa3a1d403ea95428808f49edab475422e80a 100644 (file)
@@ -62,7 +62,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
        if (count < 0) {
                return NULL;
        } else if (count > 0) {
-               resources = kmalloc(count * sizeof(struct resource),
+               resources = kzalloc(count * sizeof(struct resource),
                                    GFP_KERNEL);
                if (!resources) {
                        dev_err(&adev->dev, "No memory for resources\n");
index 0431883653bed9d3c0394368fe87ced99043f185..559c1173de1c99177ba702c60d27993826510e9c 100644 (file)
@@ -519,7 +519,7 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
                             u64 param3, u64 param4)
 {
        int rc;
-       unsigned long pfn;
+       u64 base_addr, size;
 
        /* If user manually set "flags", make sure it is legal */
        if (flags && (flags &
@@ -545,10 +545,17 @@ static int einj_error_inject(u32 type, u32 flags, u64 param1, u64 param2,
        /*
         * Disallow crazy address masks that give BIOS leeway to pick
         * injection address almost anywhere. Insist on page or
-        * better granularity and that target address is normal RAM.
+        * better granularity and that target address is normal RAM or
+        * NVDIMM.
         */
-       pfn = PFN_DOWN(param1 & param2);
-       if (!page_is_ram(pfn) || ((param2 & PAGE_MASK) != PAGE_MASK))
+       base_addr = param1 & param2;
+       size = ~param2 + 1;
+
+       if (((param2 & PAGE_MASK) != PAGE_MASK) ||
+           ((region_intersects(base_addr, size, IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
+                               != REGION_INTERSECTS) &&
+            (region_intersects(base_addr, size, IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY)
+                               != REGION_INTERSECTS)))
                return -EINVAL;
 
 inject:
index 90e2d54be526bcc0df8ec41bf292a8016d99ba3d..1316ddd92fac0f6c59f4be85759947a2d1643334 100644 (file)
@@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
                },
        },
-       {
-       .callback = video_detect_force_vendor,
-       .ident = "Dell Inspiron 5737",
-       .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-               DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
-               },
-       },
 
        /*
         * These models have a working acpi_video backlight control, and using
index 594fcabd22cd16bfcc09626338a3da33481497cc..546a3692774f8d3819ef76d9bcc656ef1af1c99d 100644 (file)
@@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
        { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
        { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+       { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
        { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
        { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
        { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
index a4faa438889c075070084fb1f1cd943d61082a88..167ba7e3b92e8fcec115ddcbd1988326e7c67ba5 100644 (file)
@@ -240,8 +240,7 @@ enum {
                                                        error-handling stage) */
        AHCI_HFLAG_NO_DEVSLP            = (1 << 17), /* no device sleep */
        AHCI_HFLAG_NO_FBS               = (1 << 18), /* no FBS */
-       AHCI_HFLAG_EDGE_IRQ             = (1 << 19), /* HOST_IRQ_STAT behaves as
-                                                       Edge Triggered */
+
 #ifdef CONFIG_PCI_MSI
        AHCI_HFLAG_MULTI_MSI            = (1 << 20), /* multiple PCI MSIs */
        AHCI_HFLAG_MULTI_MSIX           = (1 << 21), /* per-port MSI-X */
@@ -250,6 +249,7 @@ enum {
        AHCI_HFLAG_MULTI_MSI            = 0,
        AHCI_HFLAG_MULTI_MSIX           = 0,
 #endif
+       AHCI_HFLAG_WAKE_BEFORE_STOP     = (1 << 22), /* wake before DMA stop */
 
        /* ap->flags bits */
 
@@ -360,6 +360,7 @@ struct ahci_host_priv {
         * be overridden anytime before the host is activated.
         */
        void                    (*start_engine)(struct ata_port *ap);
+       irqreturn_t             (*irq_handler)(int irq, void *dev_instance);
 };
 
 #ifdef CONFIG_PCI_MSI
@@ -423,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
 void ahci_print_info(struct ata_host *host, const char *scc_s);
 int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
 void ahci_error_handler(struct ata_port *ap);
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
 
 static inline void __iomem *__ahci_port_base(struct ata_host *host,
                                             unsigned int port_no)
index b36cae2fd04b2b2969cfefcfe54f63beadef48e2..e87bcec0fd7c31d9623e54cf84fa6c62b41f2f39 100644 (file)
@@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
        if (IS_ERR(hpriv))
                return PTR_ERR(hpriv);
        hpriv->plat_data = priv;
+       hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
 
        brcm_sata_alpm_init(hpriv);
 
index e2c6d9e0c5ac5ce5ce389b0ae1b0597ae979ee8b..8e3f7faf00d383f8eea284fe122e90a44ee7ce40 100644 (file)
@@ -548,6 +548,88 @@ softreset_retry:
        return rc;
 }
 
+/**
+ * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
+ * @ata_host: Host that recieved the irq
+ * @irq_masked: HOST_IRQ_STAT value
+ *
+ * For hardware with broken edge trigger latch
+ * the HOST_IRQ_STAT register misses the edge interrupt
+ * when clearing of HOST_IRQ_STAT register and hardware
+ * reporting the PORT_IRQ_STAT register at the
+ * same clock cycle.
+ * As such, the algorithm below outlines the workaround.
+ *
+ * 1. Read HOST_IRQ_STAT register and save the state.
+ * 2. Clear the HOST_IRQ_STAT register.
+ * 3. Read back the HOST_IRQ_STAT register.
+ * 4. If HOST_IRQ_STAT register equals to zero, then
+ *    traverse the rest of port's PORT_IRQ_STAT register
+ *    to check if an interrupt is triggered at that point else
+ *    go to step 6.
+ * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
+ *    then update the state of HOST_IRQ_STAT saved in step 1.
+ * 6. Handle port interrupts.
+ * 7. Exit
+ */
+static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
+                                            u32 irq_masked)
+{
+       struct ahci_host_priv *hpriv = host->private_data;
+       void __iomem *port_mmio;
+       int i;
+
+       if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
+               for (i = 0; i < host->n_ports; i++) {
+                       if (irq_masked & (1 << i))
+                               continue;
+
+                       port_mmio = ahci_port_base(host->ports[i]);
+                       if (readl(port_mmio + PORT_IRQ_STAT))
+                               irq_masked |= (1 << i);
+               }
+       }
+
+       return ahci_handle_port_intr(host, irq_masked);
+}
+
+static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
+{
+       struct ata_host *host = dev_instance;
+       struct ahci_host_priv *hpriv;
+       unsigned int rc = 0;
+       void __iomem *mmio;
+       u32 irq_stat, irq_masked;
+
+       VPRINTK("ENTER\n");
+
+       hpriv = host->private_data;
+       mmio = hpriv->mmio;
+
+       /* sigh.  0xffffffff is a valid return from h/w */
+       irq_stat = readl(mmio + HOST_IRQ_STAT);
+       if (!irq_stat)
+               return IRQ_NONE;
+
+       irq_masked = irq_stat & hpriv->port_map;
+
+       spin_lock(&host->lock);
+
+       /*
+        * HOST_IRQ_STAT behaves as edge triggered latch meaning that
+        * it should be cleared before all the port events are cleared.
+        */
+       writel(irq_stat, mmio + HOST_IRQ_STAT);
+
+       rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
+
+       spin_unlock(&host->lock);
+
+       VPRINTK("EXIT\n");
+
+       return IRQ_RETVAL(rc);
+}
+
 static struct ata_port_operations xgene_ahci_v1_ops = {
        .inherits = &ahci_ops,
        .host_stop = xgene_ahci_host_stop,
@@ -779,7 +861,8 @@ skip_clk_phy:
                hpriv->flags = AHCI_HFLAG_NO_NCQ;
                break;
        case XGENE_AHCI_V2:
-               hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ;
+               hpriv->flags |= AHCI_HFLAG_YES_FBS;
+               hpriv->irq_handler = xgene_ahci_irq_intr;
                break;
        default:
                break;
index d61740e78d6dc93a1b17d9e3c9d9425067190a21..513b3fa74d7886949419448cc18a591cf4bcd4cd 100644 (file)
@@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
                                    const char *buf, size_t size);
 static ssize_t ahci_show_em_supported(struct device *dev,
                                      struct device_attribute *attr, char *buf);
+static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
 
 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -496,8 +497,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
                }
        }
 
-       /* fabricate port_map from cap.nr_ports */
-       if (!port_map) {
+       /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
+       if (!port_map && vers < 0x10300) {
                port_map = (1 << ahci_nr_ports(cap)) - 1;
                dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
 
@@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
 
        if (!hpriv->start_engine)
                hpriv->start_engine = ahci_start_engine;
+
+       if (!hpriv->irq_handler)
+               hpriv->irq_handler = ahci_single_level_irq_intr;
 }
 EXPORT_SYMBOL_GPL(ahci_save_initial_config);
 
@@ -593,8 +597,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine);
 int ahci_stop_engine(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
+       struct ahci_host_priv *hpriv = ap->host->private_data;
        u32 tmp;
 
+       /*
+        * On some controllers, stopping a port's DMA engine while the port
+        * is in ALPM state (partial or slumber) results in failures on
+        * subsequent DMA engine starts.  For those controllers, put the
+        * port back in active state before stopping its DMA engine.
+        */
+       if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
+           (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
+           ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
+               dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
+               return -EIO;
+       }
+
        tmp = readl(port_mmio + PORT_CMD);
 
        /* check if the HBA is idle */
@@ -689,6 +707,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
        void __iomem *port_mmio = ahci_port_base(ap);
 
        if (policy != ATA_LPM_MAX_POWER) {
+               /* wakeup flag only applies to the max power policy */
+               hints &= ~ATA_LPM_WAKE_ONLY;
+
                /*
                 * Disable interrupts on Phy Ready. This keeps us from
                 * getting woken up due to spurious phy ready
@@ -704,7 +725,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
                u32 cmd = readl(port_mmio + PORT_CMD);
 
                if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
-                       cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
+                       if (!(hints & ATA_LPM_WAKE_ONLY))
+                               cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
                        cmd |= PORT_CMD_ICC_ACTIVE;
 
                        writel(cmd, port_mmio + PORT_CMD);
@@ -712,6 +734,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
 
                        /* wait 10ms to be sure we've come out of LPM state */
                        ata_msleep(ap, 10);
+
+                       if (hints & ATA_LPM_WAKE_ONLY)
+                               return 0;
                } else {
                        cmd |= PORT_CMD_ALPE;
                        if (policy == ATA_LPM_MIN_POWER)
@@ -1825,7 +1850,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
-static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
+u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
 {
        unsigned int i, handled = 0;
 
@@ -1851,43 +1876,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
 
        return handled;
 }
-
-static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
-{
-       struct ata_host *host = dev_instance;
-       struct ahci_host_priv *hpriv;
-       unsigned int rc = 0;
-       void __iomem *mmio;
-       u32 irq_stat, irq_masked;
-
-       VPRINTK("ENTER\n");
-
-       hpriv = host->private_data;
-       mmio = hpriv->mmio;
-
-       /* sigh.  0xffffffff is a valid return from h/w */
-       irq_stat = readl(mmio + HOST_IRQ_STAT);
-       if (!irq_stat)
-               return IRQ_NONE;
-
-       irq_masked = irq_stat & hpriv->port_map;
-
-       spin_lock(&host->lock);
-
-       /*
-        * HOST_IRQ_STAT behaves as edge triggered latch meaning that
-        * it should be cleared before all the port events are cleared.
-        */
-       writel(irq_stat, mmio + HOST_IRQ_STAT);
-
-       rc = ahci_handle_port_intr(host, irq_masked);
-
-       spin_unlock(&host->lock);
-
-       VPRINTK("EXIT\n");
-
-       return IRQ_RETVAL(rc);
-}
+EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
 
 static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
 {
@@ -2514,14 +2503,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
        int irq = hpriv->irq;
        int rc;
 
-       if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX))
+       if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
+               if (hpriv->irq_handler)
+                       dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
+                                and custom irq handler implemented\n");
+
                rc = ahci_host_activate_multi_irqs(host, sht);
-       else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ)
-               rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr,
-                                      IRQF_SHARED, sht);
-       else
-               rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
+       } else {
+               rc = ata_host_activate(host, irq, hpriv->irq_handler,
                                       IRQF_SHARED, sht);
+       }
+
+
        return rc;
 }
 EXPORT_SYMBOL_GPL(ahci_host_activate);
index cbb74719d2c1b80d61590ddf3fb2f9c71a31ce06..55e257c268ddde37677cf4ff85472f65e9a63c97 100644 (file)
@@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
        { "SAMSUNG CD-ROM SN-124", "N001",      ATA_HORKAGE_NODMA },
        { "Seagate STT20000A", NULL,            ATA_HORKAGE_NODMA },
        { " 2GB ATA Flash Disk", "ADMA428M",    ATA_HORKAGE_NODMA },
+       { "VRFDFC22048UCHC-TE*", NULL,          ATA_HORKAGE_NODMA },
        /* Odd clown on sil3726/4726 PMPs */
        { "Config  Disk",       NULL,           ATA_HORKAGE_DISABLE },
 
index cdf6215a9a22beb93ede75a702797e12c5ad4870..051b6158d1b7f45b7116b9debc98376a9d4d240f 100644 (file)
@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
 {
        struct ata_port *ap = qc->ap;
-       unsigned long flags;
 
        if (ap->ops->error_handler) {
                if (in_wq) {
-                       spin_lock_irqsave(ap->lock, flags);
-
                        /* EH might have kicked in while host lock is
                         * released.
                         */
@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
                                } else
                                        ata_port_freeze(ap);
                        }
-
-                       spin_unlock_irqrestore(ap->lock, flags);
                } else {
                        if (likely(!(qc->err_mask & AC_ERR_HSM)))
                                ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
                }
        } else {
                if (in_wq) {
-                       spin_lock_irqsave(ap->lock, flags);
                        ata_sff_irq_on(ap);
                        ata_qc_complete(qc);
-                       spin_unlock_irqrestore(ap->lock, flags);
                } else
                        ata_qc_complete(qc);
        }
@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
 {
        struct ata_link *link = qc->dev->link;
        struct ata_eh_info *ehi = &link->eh_info;
-       unsigned long flags = 0;
        int poll_next;
 
+       lockdep_assert_held(ap->lock);
+
        WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
 
        /* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@ fsm_start:
                        }
                }
 
-               /* Send the CDB (atapi) or the first data block (ata pio out).
-                * During the state transition, interrupt handler shouldn't
-                * be invoked before the data transfer is complete and
-                * hsm_task_state is changed. Hence, the following locking.
-                */
-               if (in_wq)
-                       spin_lock_irqsave(ap->lock, flags);
-
                if (qc->tf.protocol == ATA_PROT_PIO) {
                        /* PIO data out protocol.
                         * send first data block.
@@ -1135,9 +1121,6 @@ fsm_start:
                        /* send CDB */
                        atapi_send_cdb(ap, qc);
 
-               if (in_wq)
-                       spin_unlock_irqrestore(ap->lock, flags);
-
                /* if polling, ata_sff_pio_task() handles the rest.
                 * otherwise, interrupt handler takes over from here.
                 */
@@ -1296,7 +1279,8 @@ fsm_start:
                break;
        default:
                poll_next = 0;
-               BUG();
+               WARN(true, "ata%d: SFF host state machine in invalid state %d",
+                    ap->print_id, ap->hsm_task_state);
        }
 
        return poll_next;
@@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work)
        u8 status;
        int poll_next;
 
+       spin_lock_irq(ap->lock);
+
        BUG_ON(ap->sff_pio_task_link == NULL);
        /* qc can be NULL if timeout occurred */
        qc = ata_qc_from_tag(ap, link->active_tag);
        if (!qc) {
                ap->sff_pio_task_link = NULL;
-               return;
+               goto out_unlock;
        }
 
 fsm_start:
@@ -1381,11 +1367,14 @@ fsm_start:
         */
        status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
        if (status & ATA_BUSY) {
+               spin_unlock_irq(ap->lock);
                ata_msleep(ap, 2);
+               spin_lock_irq(ap->lock);
+
                status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
                if (status & ATA_BUSY) {
                        ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
-                       return;
+                       goto out_unlock;
                }
        }
 
@@ -1402,6 +1391,8 @@ fsm_start:
         */
        if (poll_next)
                goto fsm_start;
+out_unlock:
+       spin_unlock_irq(ap->lock);
 }
 
 /**
index e3d4b059fcd14bcb8f4a9e21809c5cb7d30ededa..e347e7acd8edb440d284d6309187bbb80cd90cd2 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/scatterlist.h>
 #include <linux/of.h>
 #include <linux/gfp.h>
+#include <linux/pci.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -30,7 +31,6 @@
 #include <asm/macio.h>
 #include <asm/io.h>
 #include <asm/dbdma.h>
-#include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/mediabay.h>
index 82f2ae0d7cc488a337772aaf8d7a19b3373bcd94..a969a7e443be26edded6d21245c0b29f24450a72 100644 (file)
@@ -168,7 +168,7 @@ static char *res_strings[] = {
        "reserved 14", 
        "Unrecognized cell", 
        "reserved 16", 
-       "reassemby abort: AAL5 abort", 
+       "reassembly abort: AAL5 abort", 
        "packet purged", 
        "packet ageing timeout", 
        "channel ageing timeout", 
index 500592486e8832cfd61673302359cc352b51efa0..6470eb8088f4f4da92215f5009c3b63a2a492876 100644 (file)
@@ -149,8 +149,7 @@ EXPORT_SYMBOL_GPL(bus_remove_file);
 
 static void bus_release(struct kobject *kobj)
 {
-       struct subsys_private *priv =
-               container_of(kobj, typeof(*priv), subsys.kobj);
+       struct subsys_private *priv = to_subsys_private(kobj);
        struct bus_type *bus = priv->bus;
 
        kfree(priv);
@@ -1019,13 +1018,11 @@ static void device_insertion_sort_klist(struct device *a, struct list_head *list
                                        int (*compare)(const struct device *a,
                                                        const struct device *b))
 {
-       struct list_head *pos;
        struct klist_node *n;
        struct device_private *dev_prv;
        struct device *b;
 
-       list_for_each(pos, list) {
-               n = container_of(pos, struct klist_node, n_node);
+       list_for_each_entry(n, list, n_node) {
                dev_prv = to_device_private_bus(n);
                b = dev_prv->device;
                if (compare(a, b) <= 0) {
@@ -1042,8 +1039,7 @@ void bus_sort_breadthfirst(struct bus_type *bus,
                                          const struct device *b))
 {
        LIST_HEAD(sorted_devices);
-       struct list_head *pos, *tmp;
-       struct klist_node *n;
+       struct klist_node *n, *tmp;
        struct device_private *dev_prv;
        struct device *dev;
        struct klist *device_klist;
@@ -1051,8 +1047,7 @@ void bus_sort_breadthfirst(struct bus_type *bus,
        device_klist = bus_get_device_klist(bus);
 
        spin_lock(&device_klist->k_lock);
-       list_for_each_safe(pos, tmp, &device_klist->k_list) {
-               n = container_of(pos, struct klist_node, n_node);
+       list_for_each_entry_safe(n, tmp, &device_klist->k_list, n_node) {
                dev_prv = to_device_private_bus(n);
                dev = dev_prv->device;
                device_insertion_sort_klist(dev, &sorted_devices, compare);
@@ -1107,7 +1102,7 @@ struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
                knode = klist_next(&iter->ki);
                if (!knode)
                        return NULL;
-               dev = container_of(knode, struct device_private, knode_bus)->device;
+               dev = to_device_private_bus(knode)->device;
                if (!iter->type || iter->type == dev->type)
                        return dev;
        }
index 89f5cf68d80a143198c2c253d6d21c0d9fb38388..0ed3b72b814b37ec16944ac5a88937390f01d80c 100644 (file)
@@ -206,6 +206,8 @@ static void component_match_release(struct device *master,
                if (mc->release)
                        mc->release(master, mc->data);
        }
+
+       kfree(match->compare);
 }
 
 static void devm_component_match_release(struct device *dev, void *res)
@@ -221,14 +223,14 @@ static int component_match_realloc(struct device *dev,
        if (match->alloc == num)
                return 0;
 
-       new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL);
+       new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
        if (match->compare) {
                memcpy(new, match->compare, sizeof(*new) *
                                            min(match->num, num));
-               devm_kfree(dev, match->compare);
+               kfree(match->compare);
        }
        match->compare = new;
        match->alloc = num;
@@ -265,7 +267,7 @@ void component_match_add_release(struct device *master,
        }
 
        if (match->num == match->alloc) {
-               size_t new_size = match ? match->alloc + 16 : 15;
+               size_t new_size = match->alloc + 16;
                int ret;
 
                ret = component_match_realloc(master, match, new_size);
@@ -283,6 +285,24 @@ void component_match_add_release(struct device *master,
 }
 EXPORT_SYMBOL(component_match_add_release);
 
+static void free_master(struct master *master)
+{
+       struct component_match *match = master->match;
+       int i;
+
+       list_del(&master->node);
+
+       if (match) {
+               for (i = 0; i < match->num; i++) {
+                       struct component *c = match->compare[i].component;
+                       if (c)
+                               c->master = NULL;
+               }
+       }
+
+       kfree(master);
+}
+
 int component_master_add_with_match(struct device *dev,
        const struct component_master_ops *ops,
        struct component_match *match)
@@ -309,11 +329,9 @@ int component_master_add_with_match(struct device *dev,
 
        ret = try_to_bring_up_master(master, NULL);
 
-       if (ret < 0) {
-               /* Delete off the list if we weren't successful */
-               list_del(&master->node);
-               kfree(master);
-       }
+       if (ret < 0)
+               free_master(master);
+
        mutex_unlock(&component_mutex);
 
        return ret < 0 ? ret : 0;
@@ -324,25 +342,12 @@ void component_master_del(struct device *dev,
        const struct component_master_ops *ops)
 {
        struct master *master;
-       int i;
 
        mutex_lock(&component_mutex);
        master = __master_find(dev, ops);
        if (master) {
-               struct component_match *match = master->match;
-
                take_down_master(master);
-
-               list_del(&master->node);
-
-               if (match) {
-                       for (i = 0; i < match->num; i++) {
-                               struct component *c = match->compare[i].component;
-                               if (c)
-                                       c->master = NULL;
-                       }
-               }
-               kfree(master);
+               free_master(master);
        }
        mutex_unlock(&component_mutex);
 }
index 0a8bdade53f2f67943dca83bc53cfa74f0b8da7d..b405b773adec1b4128ed0b3fdb38ebf2272290e9 100644 (file)
@@ -2155,7 +2155,7 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
        return pos;
 
 overflow:
-       dev_WARN(dev, "device/subsystem name too long");
+       dev_WARN(dev, true, "device/subsystem name too long");
        return 0;
 }
 
index 55b83983a9c00c8b5984c887a0435ffb6e03fe1d..87b8083748885592c8da34f2defa4a1b46be0be5 100644 (file)
@@ -17,9 +17,9 @@ struct dma_coherent_mem {
        spinlock_t      spinlock;
 };
 
-static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_addr,
-                            size_t size, int flags,
-                            struct dma_coherent_mem **mem)
+static bool dma_init_coherent_memory(
+       phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
+       struct dma_coherent_mem **mem)
 {
        struct dma_coherent_mem *dma_mem = NULL;
        void __iomem *mem_base = NULL;
@@ -50,17 +50,13 @@ static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_add
        spin_lock_init(&dma_mem->spinlock);
 
        *mem = dma_mem;
-
-       if (flags & DMA_MEMORY_MAP)
-               return DMA_MEMORY_MAP;
-
-       return DMA_MEMORY_IO;
+       return true;
 
 out:
        kfree(dma_mem);
        if (mem_base)
                iounmap(mem_base);
-       return 0;
+       return false;
 }
 
 static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
@@ -88,15 +84,13 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
                                dma_addr_t device_addr, size_t size, int flags)
 {
        struct dma_coherent_mem *mem;
-       int ret;
 
-       ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags,
-                                      &mem);
-       if (ret == 0)
+       if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
+                                     &mem))
                return 0;
 
        if (dma_assign_coherent_memory(dev, mem) == 0)
-               return ret;
+               return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
 
        dma_release_coherent_memory(mem);
        return 0;
@@ -281,9 +275,9 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
        struct dma_coherent_mem *mem = rmem->priv;
 
        if (!mem &&
-           dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
-                                    DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
-                                    &mem) != DMA_MEMORY_MAP) {
+           !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
+                                     DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
+                                     &mem)) {
                pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
                        &rmem->base, (unsigned long)rmem->size / SZ_1M);
                return -ENODEV;
index 47c43386786b13229f4fbeb5cbdbfe03a2009804..279e53989374f065d01039caf110a14b95f887e0 100644 (file)
@@ -284,6 +284,7 @@ out_free_priv_data:
 
        return err;
 }
+EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
 
 /**
  * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
@@ -301,6 +302,7 @@ void platform_msi_domain_free_irqs(struct device *dev)
        msi_domain_free_irqs(dev->msi_domain, dev);
        platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
 }
+EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
 
 /**
  * platform_msi_get_host_data - Query the private data associated with
index 73d6e5d39e33e1edef9033d5e97b99ecc50a80cf..1b5b7b5b50c0a96a8035dc8076b86790e1084914 100644 (file)
@@ -558,10 +558,15 @@ static int platform_drv_probe(struct device *_dev)
                return ret;
 
        ret = dev_pm_domain_attach(_dev, true);
-       if (ret != -EPROBE_DEFER && drv->probe) {
-               ret = drv->probe(dev);
-               if (ret)
-                       dev_pm_domain_detach(_dev, true);
+       if (ret != -EPROBE_DEFER) {
+               if (drv->probe) {
+                       ret = drv->probe(dev);
+                       if (ret)
+                               dev_pm_domain_detach(_dev, true);
+               } else {
+                       /* don't fail if just dev_pm_domain_attach failed */
+                       ret = 0;
+               }
        }
 
        if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
@@ -944,8 +949,8 @@ static int platform_match(struct device *dev, struct device_driver *drv)
                return !strcmp(pdev->driver_override, drv->name);
 
        /* Attempt an OF style match first */
-       if (of_driver_match_device(dev, drv))
-               return 1;
+       if (pdev->dev.of_node)
+               return of_driver_match_device(dev, drv);
 
        /* Then try ACPI style match */
        if (acpi_driver_match_device(dev, drv))
index 93ed14cc22524ccdd04301dbe460e0958bb69acf..f6a9ad52cbbff0663e58540f33bb29e08abf0e96 100644 (file)
@@ -146,7 +146,7 @@ void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd)
        if (dev->pm_domain == pd)
                return;
 
-       WARN(device_is_bound(dev),
+       WARN(pd && device_is_bound(dev),
             "PM domains can only be changed for unbound devices\n");
        dev->pm_domain = pd;
        device_pm_check_callbacks(dev);
index 6ac9a7f33b640ae1105a72f76974e8939a30ae45..301b785f9f56f5d00b7f83798b6f7cce53bf0e03 100644 (file)
@@ -162,7 +162,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
 
 /**
  * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
- * @genpd: PM domait to power off.
+ * @genpd: PM domain to power off.
  *
  * Queue up the execution of genpd_poweroff() unless it's already been done
  * before.
@@ -172,16 +172,15 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
        queue_work(pm_wq, &genpd->power_off_work);
 }
 
-static int genpd_poweron(struct generic_pm_domain *genpd);
-
 /**
- * __genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_poweron - Restore power to a given PM domain and its masters.
  * @genpd: PM domain to power up.
+ * @depth: nesting count for lockdep.
  *
  * Restore power to @genpd and all of its masters so that it is possible to
  * resume a device belonging to it.
  */
-static int __genpd_poweron(struct generic_pm_domain *genpd)
+static int genpd_poweron(struct generic_pm_domain *genpd, unsigned int depth)
 {
        struct gpd_link *link;
        int ret = 0;
@@ -196,11 +195,16 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
         * with it.
         */
        list_for_each_entry(link, &genpd->slave_links, slave_node) {
-               genpd_sd_counter_inc(link->master);
+               struct generic_pm_domain *master = link->master;
+
+               genpd_sd_counter_inc(master);
+
+               mutex_lock_nested(&master->lock, depth + 1);
+               ret = genpd_poweron(master, depth + 1);
+               mutex_unlock(&master->lock);
 
-               ret = genpd_poweron(link->master);
                if (ret) {
-                       genpd_sd_counter_dec(link->master);
+                       genpd_sd_counter_dec(master);
                        goto err;
                }
        }
@@ -223,20 +227,6 @@ static int __genpd_poweron(struct generic_pm_domain *genpd)
        return ret;
 }
 
-/**
- * genpd_poweron - Restore power to a given PM domain and its masters.
- * @genpd: PM domain to power up.
- */
-static int genpd_poweron(struct generic_pm_domain *genpd)
-{
-       int ret;
-
-       mutex_lock(&genpd->lock);
-       ret = __genpd_poweron(genpd);
-       mutex_unlock(&genpd->lock);
-       return ret;
-}
-
 static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
 {
        return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
@@ -484,7 +474,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
        }
 
        mutex_lock(&genpd->lock);
-       ret = __genpd_poweron(genpd);
+       ret = genpd_poweron(genpd, 0);
        mutex_unlock(&genpd->lock);
 
        if (ret)
@@ -1339,8 +1329,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
        if (!link)
                return -ENOMEM;
 
-       mutex_lock(&genpd->lock);
-       mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
+       mutex_lock(&subdomain->lock);
+       mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
 
        if (genpd->status == GPD_STATE_POWER_OFF
            &&  subdomain->status != GPD_STATE_POWER_OFF) {
@@ -1363,8 +1353,8 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
                genpd_sd_counter_inc(genpd);
 
  out:
-       mutex_unlock(&subdomain->lock);
        mutex_unlock(&genpd->lock);
+       mutex_unlock(&subdomain->lock);
        if (ret)
                kfree(link);
        return ret;
@@ -1385,7 +1375,8 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
        if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
                return -EINVAL;
 
-       mutex_lock(&genpd->lock);
+       mutex_lock(&subdomain->lock);
+       mutex_lock_nested(&genpd->lock, SINGLE_DEPTH_NESTING);
 
        if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
                pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
@@ -1398,22 +1389,19 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
                if (link->slave != subdomain)
                        continue;
 
-               mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING);
-
                list_del(&link->master_node);
                list_del(&link->slave_node);
                kfree(link);
                if (subdomain->status != GPD_STATE_POWER_OFF)
                        genpd_sd_counter_dec(genpd);
 
-               mutex_unlock(&subdomain->lock);
-
                ret = 0;
                break;
        }
 
 out:
        mutex_unlock(&genpd->lock);
+       mutex_unlock(&subdomain->lock);
 
        return ret;
 }
@@ -1818,8 +1806,10 @@ int genpd_dev_pm_attach(struct device *dev)
 
        dev->pm_domain->detach = genpd_dev_pm_detach;
        dev->pm_domain->sync = genpd_dev_pm_sync;
-       ret = genpd_poweron(pd);
 
+       mutex_lock(&pd->lock);
+       ret = genpd_poweron(pd, 0);
+       mutex_unlock(&pd->lock);
 out:
        return ret ? -EPROBE_DEFER : 0;
 }
index cf351d3dab1c3b7a9badf6b468092ca216c47012..ab711c2c3e0031ec3324e56942b32ed0129dd337 100644 (file)
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/clk.h>
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/of.h>
 #include <linux/export.h>
+#include <linux/regulator/consumer.h>
 
 #include "opp.h"
 
@@ -229,6 +231,82 @@ unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
 
+/**
+ * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max voltage latency in nanoseconds.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+       struct device_opp *dev_opp;
+       struct dev_pm_opp *opp;
+       struct regulator *reg;
+       unsigned long latency_ns = 0;
+       unsigned long min_uV = ~0, max_uV = 0;
+       int ret;
+
+       rcu_read_lock();
+
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               rcu_read_unlock();
+               return 0;
+       }
+
+       reg = dev_opp->regulator;
+       if (IS_ERR_OR_NULL(reg)) {
+               /* Regulator may not be required for device */
+               if (reg)
+                       dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
+                               PTR_ERR(reg));
+               rcu_read_unlock();
+               return 0;
+       }
+
+       list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
+               if (!opp->available)
+                       continue;
+
+               if (opp->u_volt_min < min_uV)
+                       min_uV = opp->u_volt_min;
+               if (opp->u_volt_max > max_uV)
+                       max_uV = opp->u_volt_max;
+       }
+
+       rcu_read_unlock();
+
+       /*
+        * The caller needs to ensure that dev_opp (and hence the regulator)
+        * isn't freed, while we are executing this routine.
+        */
+       ret = regulator_set_voltage_time(reg, min_uV, max_uV);
+       if (ret > 0)
+               latency_ns = ret * 1000;
+
+       return latency_ns;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
+
+/**
+ * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
+ *                                          nanoseconds
+ * @dev: device for which we do this operation
+ *
+ * Return: This function returns the max transition latency, in nanoseconds, to
+ * switch from one OPP to other.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+       return dev_pm_opp_get_max_volt_latency(dev) +
+               dev_pm_opp_get_max_clock_latency(dev);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
+
 /**
  * dev_pm_opp_get_suspend_opp() - Get suspend opp
  * @dev:       device for which we do this operation
@@ -451,6 +529,182 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 
+/*
+ * The caller needs to ensure that device_opp (and hence the clk) isn't freed,
+ * while clk returned here is used.
+ */
+static struct clk *_get_opp_clk(struct device *dev)
+{
+       struct device_opp *dev_opp;
+       struct clk *clk;
+
+       rcu_read_lock();
+
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+               clk = ERR_CAST(dev_opp);
+               goto unlock;
+       }
+
+       clk = dev_opp->clk;
+       if (IS_ERR(clk))
+               dev_err(dev, "%s: No clock available for the device\n",
+                       __func__);
+
+unlock:
+       rcu_read_unlock();
+       return clk;
+}
+
+static int _set_opp_voltage(struct device *dev, struct regulator *reg,
+                           unsigned long u_volt, unsigned long u_volt_min,
+                           unsigned long u_volt_max)
+{
+       int ret;
+
+       /* Regulator not available for device */
+       if (IS_ERR(reg)) {
+               dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
+                       PTR_ERR(reg));
+               return 0;
+       }
+
+       dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
+               u_volt, u_volt_max);
+
+       ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
+                                           u_volt_max);
+       if (ret)
+               dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
+                       __func__, u_volt_min, u_volt, u_volt_max, ret);
+
+       return ret;
+}
+
+/**
+ * dev_pm_opp_set_rate() - Configure new OPP based on frequency
+ * @dev:        device for which we do this operation
+ * @target_freq: frequency to achieve
+ *
+ * This configures the power-supplies and clock source to the levels specified
+ * by the OPP corresponding to the target_freq.
+ *
+ * Locking: This function takes rcu_read_lock().
+ */
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+       struct device_opp *dev_opp;
+       struct dev_pm_opp *old_opp, *opp;
+       struct regulator *reg;
+       struct clk *clk;
+       unsigned long freq, old_freq;
+       unsigned long u_volt, u_volt_min, u_volt_max;
+       unsigned long ou_volt, ou_volt_min, ou_volt_max;
+       int ret;
+
+       if (unlikely(!target_freq)) {
+               dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
+                       target_freq);
+               return -EINVAL;
+       }
+
+       clk = _get_opp_clk(dev);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       freq = clk_round_rate(clk, target_freq);
+       if ((long)freq <= 0)
+               freq = target_freq;
+
+       old_freq = clk_get_rate(clk);
+
+       /* Return early if nothing to do */
+       if (old_freq == freq) {
+               dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+                       __func__, freq);
+               return 0;
+       }
+
+       rcu_read_lock();
+
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               dev_err(dev, "%s: device opp doesn't exist\n", __func__);
+               rcu_read_unlock();
+               return PTR_ERR(dev_opp);
+       }
+
+       old_opp = dev_pm_opp_find_freq_ceil(dev, &old_freq);
+       if (!IS_ERR(old_opp)) {
+               ou_volt = old_opp->u_volt;
+               ou_volt_min = old_opp->u_volt_min;
+               ou_volt_max = old_opp->u_volt_max;
+       } else {
+               dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
+                       __func__, old_freq, PTR_ERR(old_opp));
+       }
+
+       opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+       if (IS_ERR(opp)) {
+               ret = PTR_ERR(opp);
+               dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
+                       __func__, freq, ret);
+               rcu_read_unlock();
+               return ret;
+       }
+
+       u_volt = opp->u_volt;
+       u_volt_min = opp->u_volt_min;
+       u_volt_max = opp->u_volt_max;
+
+       reg = dev_opp->regulator;
+
+       rcu_read_unlock();
+
+       /* Scaling up? Scale voltage before frequency */
+       if (freq > old_freq) {
+               ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+                                      u_volt_max);
+               if (ret)
+                       goto restore_voltage;
+       }
+
+       /* Change frequency */
+
+       dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
+               __func__, old_freq, freq);
+
+       ret = clk_set_rate(clk, freq);
+       if (ret) {
+               dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
+                       ret);
+               goto restore_voltage;
+       }
+
+       /* Scaling down? Scale voltage after frequency */
+       if (freq < old_freq) {
+               ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
+                                      u_volt_max);
+               if (ret)
+                       goto restore_freq;
+       }
+
+       return 0;
+
+restore_freq:
+       if (clk_set_rate(clk, old_freq))
+               dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
+                       __func__, old_freq);
+restore_voltage:
+       /* This shouldn't harm even if the voltages weren't updated earlier */
+       if (!IS_ERR(old_opp))
+               _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
+
 /* List-dev Helpers */
 static void _kfree_list_dev_rcu(struct rcu_head *head)
 {
@@ -505,6 +759,8 @@ static struct device_opp *_add_device_opp(struct device *dev)
 {
        struct device_opp *dev_opp;
        struct device_list_opp *list_dev;
+       struct device_node *np;
+       int ret;
 
        /* Check for existing list for 'dev' first */
        dev_opp = _find_device_opp(dev);
@@ -527,6 +783,30 @@ static struct device_opp *_add_device_opp(struct device *dev)
                return NULL;
        }
 
+       /*
+        * Only required for backward compatibility with v1 bindings, but isn't
+        * harmful for other cases. And so we do it unconditionally.
+        */
+       np = of_node_get(dev->of_node);
+       if (np) {
+               u32 val;
+
+               if (!of_property_read_u32(np, "clock-latency", &val))
+                       dev_opp->clock_latency_ns_max = val;
+               of_property_read_u32(np, "voltage-tolerance",
+                                    &dev_opp->voltage_tolerance_v1);
+               of_node_put(np);
+       }
+
+       /* Find clk for the device */
+       dev_opp->clk = clk_get(dev, NULL);
+       if (IS_ERR(dev_opp->clk)) {
+               ret = PTR_ERR(dev_opp->clk);
+               if (ret != -EPROBE_DEFER)
+                       dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
+                               ret);
+       }
+
        srcu_init_notifier_head(&dev_opp->srcu_head);
        INIT_LIST_HEAD(&dev_opp->opp_list);
 
@@ -565,6 +845,13 @@ static void _remove_device_opp(struct device_opp *dev_opp)
        if (dev_opp->prop_name)
                return;
 
+       if (!IS_ERR_OR_NULL(dev_opp->regulator))
+               return;
+
+       /* Release clk */
+       if (!IS_ERR(dev_opp->clk))
+               clk_put(dev_opp->clk);
+
        list_dev = list_first_entry(&dev_opp->dev_list, struct device_list_opp,
                                    node);
 
@@ -683,6 +970,22 @@ static struct dev_pm_opp *_allocate_opp(struct device *dev,
        return opp;
 }
 
+static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
+                                        struct device_opp *dev_opp)
+{
+       struct regulator *reg = dev_opp->regulator;
+
+       if (!IS_ERR(reg) &&
+           !regulator_is_supported_voltage(reg, opp->u_volt_min,
+                                           opp->u_volt_max)) {
+               pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
+                       __func__, opp->u_volt_min, opp->u_volt_max);
+               return false;
+       }
+
+       return true;
+}
+
 static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
                    struct device_opp *dev_opp)
 {
@@ -724,6 +1027,12 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
                dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
                        __func__, ret);
 
+       if (!_opp_supported_by_regulators(new_opp, dev_opp)) {
+               new_opp->available = false;
+               dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
+                        __func__, new_opp->rate);
+       }
+
        return 0;
 }
 
@@ -759,6 +1068,7 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
 {
        struct device_opp *dev_opp;
        struct dev_pm_opp *new_opp;
+       unsigned long tol;
        int ret;
 
        /* Hold our list modification lock here */
@@ -772,7 +1082,10 @@ static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
 
        /* populate the opp table */
        new_opp->rate = freq;
+       tol = u_volt * dev_opp->voltage_tolerance_v1 / 100;
        new_opp->u_volt = u_volt;
+       new_opp->u_volt_min = u_volt - tol;
+       new_opp->u_volt_max = u_volt + tol;
        new_opp->available = true;
        new_opp->dynamic = dynamic;
 
@@ -1085,6 +1398,113 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
 
+/**
+ * dev_pm_opp_set_regulator() - Set regulator name for the device
+ * @dev: Device for which regulator name is being set.
+ * @name: Name of the regulator.
+ *
+ * In order to support OPP switching, OPP layer needs to know the name of the
+ * device's regulator, as the core would be required to switch voltages as well.
+ *
+ * This must be called before any OPPs are initialized for the device.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+       struct device_opp *dev_opp;
+       struct regulator *reg;
+       int ret;
+
+       mutex_lock(&dev_opp_list_lock);
+
+       dev_opp = _add_device_opp(dev);
+       if (!dev_opp) {
+               ret = -ENOMEM;
+               goto unlock;
+       }
+
+       /* This should be called before OPPs are initialized */
+       if (WARN_ON(!list_empty(&dev_opp->opp_list))) {
+               ret = -EBUSY;
+               goto err;
+       }
+
+       /* Already have a regulator set */
+       if (WARN_ON(!IS_ERR_OR_NULL(dev_opp->regulator))) {
+               ret = -EBUSY;
+               goto err;
+       }
+       /* Allocate the regulator */
+       reg = regulator_get_optional(dev, name);
+       if (IS_ERR(reg)) {
+               ret = PTR_ERR(reg);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(dev, "%s: no regulator (%s) found: %d\n",
+                               __func__, name, ret);
+               goto err;
+       }
+
+       dev_opp->regulator = reg;
+
+       mutex_unlock(&dev_opp_list_lock);
+       return 0;
+
+err:
+       _remove_device_opp(dev_opp);
+unlock:
+       mutex_unlock(&dev_opp_list_lock);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
+
+/**
+ * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
+ * @dev: Device for which regulator was set.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Hence this function internally uses RCU updater strategy with mutex locks
+ * to keep the integrity of the internal data structures. Callers should ensure
+ * that this function is *NOT* called under RCU protection or in contexts where
+ * mutex cannot be locked.
+ */
+void dev_pm_opp_put_regulator(struct device *dev)
+{
+       struct device_opp *dev_opp;
+
+       mutex_lock(&dev_opp_list_lock);
+
+       /* Check for existing list for 'dev' first */
+       dev_opp = _find_device_opp(dev);
+       if (IS_ERR(dev_opp)) {
+               dev_err(dev, "Failed to find dev_opp: %ld\n", PTR_ERR(dev_opp));
+               goto unlock;
+       }
+
+       if (IS_ERR_OR_NULL(dev_opp->regulator)) {
+               dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
+               goto unlock;
+       }
+
+       /* Make sure there are no concurrent readers while updating dev_opp */
+       WARN_ON(!list_empty(&dev_opp->opp_list));
+
+       regulator_put(dev_opp->regulator);
+       dev_opp->regulator = ERR_PTR(-EINVAL);
+
+       /* Try freeing device_opp if this was the last blocking resource */
+       _remove_device_opp(dev_opp);
+
+unlock:
+       mutex_unlock(&dev_opp_list_lock);
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
+
 static bool _opp_is_supported(struct device *dev, struct device_opp *dev_opp,
                              struct device_node *np)
 {
index 690638ef36ee534c1ab9a57d8c4c7ee840f78a51..4f1bdfc7da03be2be60da43e62e21be3a6a55ea5 100644 (file)
@@ -22,6 +22,9 @@
 #include <linux/rculist.h>
 #include <linux/rcupdate.h>
 
+struct clk;
+struct regulator;
+
 /* Lock to allow exclusive modification to the device and opp lists */
 extern struct mutex dev_opp_list_lock;
 
@@ -132,9 +135,13 @@ struct device_list_opp {
  * @supported_hw: Array of version number to support.
  * @supported_hw_count: Number of elements in supported_hw array.
  * @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @clk: Device's clock handle
+ * @regulator: Supply regulator
  * @dentry:    debugfs dentry pointer of the real device directory (not links).
  * @dentry_name: Name of the real dentry.
  *
+ * @voltage_tolerance_v1: In percentage, for v1 bindings only.
+ *
  * This is an internal data structure maintaining the link to opps attached to
  * a device. This structure is not meant to be shared to users as it is
  * meant for book keeping and private to OPP library.
@@ -153,12 +160,18 @@ struct device_opp {
 
        struct device_node *np;
        unsigned long clock_latency_ns_max;
+
+       /* For backward compatibility with v1 bindings */
+       unsigned int voltage_tolerance_v1;
+
        bool shared_opp;
        struct dev_pm_opp *suspend_opp;
 
        unsigned int *supported_hw;
        unsigned int supported_hw_count;
        const char *prop_name;
+       struct clk *clk;
+       struct regulator *regulator;
 
 #ifdef CONFIG_DEBUG_FS
        struct dentry *dentry;
index 348be3a354108eb2533587dabed9495e9c324b5a..4170b7d952767074140ce26c4c1ac621823607ad 100644 (file)
@@ -30,7 +30,7 @@ static int regcache_hw_init(struct regmap *map)
        int i, j;
        int ret;
        int count;
-       unsigned int val;
+       unsigned int reg, val;
        void *tmp_buf;
 
        if (!map->num_reg_defaults_raw)
@@ -57,7 +57,7 @@ static int regcache_hw_init(struct regmap *map)
                bool cache_bypass = map->cache_bypass;
                dev_warn(map->dev, "No cache defaults, reading back from HW\n");
 
-               /* Bypass the cache access till data read from HW*/
+               /* Bypass the cache access till data read from HW */
                map->cache_bypass = true;
                tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
                if (!tmp_buf) {
@@ -65,29 +65,48 @@ static int regcache_hw_init(struct regmap *map)
                        goto err_free;
                }
                ret = regmap_raw_read(map, 0, tmp_buf,
-                                     map->num_reg_defaults_raw);
+                                     map->cache_size_raw);
                map->cache_bypass = cache_bypass;
-               if (ret < 0)
-                       goto err_cache_free;
-
-               map->reg_defaults_raw = tmp_buf;
-               map->cache_free = 1;
+               if (ret == 0) {
+                       map->reg_defaults_raw = tmp_buf;
+                       map->cache_free = 1;
+               } else {
+                       kfree(tmp_buf);
+               }
        }
 
        /* fill the reg_defaults */
        for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
-               if (regmap_volatile(map, i * map->reg_stride))
+               reg = i * map->reg_stride;
+
+               if (!regmap_readable(map, reg))
                        continue;
-               val = regcache_get_val(map, map->reg_defaults_raw, i);
-               map->reg_defaults[j].reg = i * map->reg_stride;
+
+               if (regmap_volatile(map, reg))
+                       continue;
+
+               if (map->reg_defaults_raw) {
+                       val = regcache_get_val(map, map->reg_defaults_raw, i);
+               } else {
+                       bool cache_bypass = map->cache_bypass;
+
+                       map->cache_bypass = true;
+                       ret = regmap_read(map, reg, &val);
+                       map->cache_bypass = cache_bypass;
+                       if (ret != 0) {
+                               dev_err(map->dev, "Failed to read %d: %d\n",
+                                       reg, ret);
+                               goto err_free;
+                       }
+               }
+
+               map->reg_defaults[j].reg = reg;
                map->reg_defaults[j].def = val;
                j++;
        }
 
        return 0;
 
-err_cache_free:
-       kfree(tmp_buf);
 err_free:
        kfree(map->reg_defaults);
 
index 9b0d202414d065bf6e03a6f39bb33c9925dee4db..7e1e9e86c70b9833ead6d6aea499ee8b0282bc40 100644 (file)
@@ -655,13 +655,34 @@ EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
  *
  * @irq: Primary IRQ for the device
  * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ *
+ * This function also dispose all mapped irq on chip.
  */
 void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
 {
+       unsigned int virq;
+       int hwirq;
+
        if (!d)
                return;
 
        free_irq(irq, d);
+
+       /* Dispose all virtual irq from irq domain before removing it */
+       for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
+               /* Ignore hwirq if holes in the IRQ list */
+               if (!d->chip->irqs[hwirq].mask)
+                       continue;
+
+               /*
+                * Find the virtual irq of hwirq on chip and if it is
+                * there then dispose it
+                */
+               virq = irq_find_mapping(d->domain, hwirq);
+               if (virq)
+                       irq_dispose_mapping(virq);
+       }
+
        irq_domain_remove(d->domain);
        kfree(d->type_buf);
        kfree(d->type_buf_def);
index 8812bfb9e3b89256f4a5d1468cf45484b18e4cd1..7526906ca080f81dcff1499b7e57c2ff0b79569e 100644 (file)
 
 struct regmap_mmio_context {
        void __iomem *regs;
-       unsigned reg_bytes;
        unsigned val_bytes;
-       unsigned pad_bytes;
        struct clk *clk;
-};
 
-static inline void regmap_mmio_regsize_check(size_t reg_size)
-{
-       switch (reg_size) {
-       case 1:
-       case 2:
-       case 4:
-#ifdef CONFIG_64BIT
-       case 8:
-#endif
-               break;
-       default:
-               BUG();
-       }
-}
+       void (*reg_write)(struct regmap_mmio_context *ctx,
+                         unsigned int reg, unsigned int val);
+       unsigned int (*reg_read)(struct regmap_mmio_context *ctx,
+                                unsigned int reg);
+};
 
 static int regmap_mmio_regbits_check(size_t reg_bits)
 {
@@ -88,72 +76,62 @@ static int regmap_mmio_get_min_stride(size_t val_bits)
        return min_stride;
 }
 
-static inline void regmap_mmio_count_check(size_t count, u32 offset)
+static void regmap_mmio_write8(struct regmap_mmio_context *ctx,
+                               unsigned int reg,
+                               unsigned int val)
+{
+       writeb(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write16le(struct regmap_mmio_context *ctx,
+                                 unsigned int reg,
+                                 unsigned int val)
 {
-       BUG_ON(count <= offset);
+       writew(val, ctx->regs + reg);
 }
 
-static inline unsigned int
-regmap_mmio_get_offset(const void *reg, size_t reg_size)
+static void regmap_mmio_write16be(struct regmap_mmio_context *ctx,
+                                 unsigned int reg,
+                                 unsigned int val)
 {
-       switch (reg_size) {
-       case 1:
-               return *(u8 *)reg;
-       case 2:
-               return *(u16 *)reg;
-       case 4:
-               return *(u32 *)reg;
+       iowrite16be(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32le(struct regmap_mmio_context *ctx,
+                                 unsigned int reg,
+                                 unsigned int val)
+{
+       writel(val, ctx->regs + reg);
+}
+
+static void regmap_mmio_write32be(struct regmap_mmio_context *ctx,
+                                 unsigned int reg,
+                                 unsigned int val)
+{
+       iowrite32be(val, ctx->regs + reg);
+}
+
 #ifdef CONFIG_64BIT
-       case 8:
-               return *(u64 *)reg;
-#endif
-       default:
-               BUG();
-       }
+static void regmap_mmio_write64le(struct regmap_mmio_context *ctx,
+                                 unsigned int reg,
+                                 unsigned int val)
+{
+       writeq(val, ctx->regs + reg);
 }
+#endif
 
-static int regmap_mmio_gather_write(void *context,
-                                   const void *reg, size_t reg_size,
-                                   const void *val, size_t val_size)
+static int regmap_mmio_write(void *context, unsigned int reg, unsigned int val)
 {
        struct regmap_mmio_context *ctx = context;
-       unsigned int offset;
        int ret;
 
-       regmap_mmio_regsize_check(reg_size);
-
        if (!IS_ERR(ctx->clk)) {
                ret = clk_enable(ctx->clk);
                if (ret < 0)
                        return ret;
        }
 
-       offset = regmap_mmio_get_offset(reg, reg_size);
-
-       while (val_size) {
-               switch (ctx->val_bytes) {
-               case 1:
-                       __raw_writeb(*(u8 *)val, ctx->regs + offset);
-                       break;
-               case 2:
-                       __raw_writew(*(u16 *)val, ctx->regs + offset);
-                       break;
-               case 4:
-                       __raw_writel(*(u32 *)val, ctx->regs + offset);
-                       break;
-#ifdef CONFIG_64BIT
-               case 8:
-                       __raw_writeq(*(u64 *)val, ctx->regs + offset);
-                       break;
-#endif
-               default:
-                       /* Should be caught by regmap_mmio_check_config */
-                       BUG();
-               }
-               val_size -= ctx->val_bytes;
-               val += ctx->val_bytes;
-               offset += ctx->val_bytes;
-       }
+       ctx->reg_write(ctx, reg, val);
 
        if (!IS_ERR(ctx->clk))
                clk_disable(ctx->clk);
@@ -161,59 +139,56 @@ static int regmap_mmio_gather_write(void *context,
        return 0;
 }
 
-static int regmap_mmio_write(void *context, const void *data, size_t count)
+static unsigned int regmap_mmio_read8(struct regmap_mmio_context *ctx,
+                                     unsigned int reg)
 {
-       struct regmap_mmio_context *ctx = context;
-       unsigned int offset = ctx->reg_bytes + ctx->pad_bytes;
+       return readb(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16le(struct regmap_mmio_context *ctx,
+                                        unsigned int reg)
+{
+       return readw(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read16be(struct regmap_mmio_context *ctx,
+                                        unsigned int reg)
+{
+       return ioread16be(ctx->regs + reg);
+}
+
+static unsigned int regmap_mmio_read32le(struct regmap_mmio_context *ctx,
+                                        unsigned int reg)
+{
+       return readl(ctx->regs + reg);
+}
 
-       regmap_mmio_count_check(count, offset);
+static unsigned int regmap_mmio_read32be(struct regmap_mmio_context *ctx,
+                                        unsigned int reg)
+{
+       return ioread32be(ctx->regs + reg);
+}
 
-       return regmap_mmio_gather_write(context, data, ctx->reg_bytes,
-                                       data + offset, count - offset);
+#ifdef CONFIG_64BIT
+static unsigned int regmap_mmio_read64le(struct regmap_mmio_context *ctx,
+                                        unsigned int reg)
+{
+       return readq(ctx->regs + reg);
 }
+#endif
 
-static int regmap_mmio_read(void *context,
-                           const void *reg, size_t reg_size,
-                           void *val, size_t val_size)
+static int regmap_mmio_read(void *context, unsigned int reg, unsigned int *val)
 {
        struct regmap_mmio_context *ctx = context;
-       unsigned int offset;
        int ret;
 
-       regmap_mmio_regsize_check(reg_size);
-
        if (!IS_ERR(ctx->clk)) {
                ret = clk_enable(ctx->clk);
                if (ret < 0)
                        return ret;
        }
 
-       offset = regmap_mmio_get_offset(reg, reg_size);
-
-       while (val_size) {
-               switch (ctx->val_bytes) {
-               case 1:
-                       *(u8 *)val = __raw_readb(ctx->regs + offset);
-                       break;
-               case 2:
-                       *(u16 *)val = __raw_readw(ctx->regs + offset);
-                       break;
-               case 4:
-                       *(u32 *)val = __raw_readl(ctx->regs + offset);
-                       break;
-#ifdef CONFIG_64BIT
-               case 8:
-                       *(u64 *)val = __raw_readq(ctx->regs + offset);
-                       break;
-#endif
-               default:
-                       /* Should be caught by regmap_mmio_check_config */
-                       BUG();
-               }
-               val_size -= ctx->val_bytes;
-               val += ctx->val_bytes;
-               offset += ctx->val_bytes;
-       }
+       *val = ctx->reg_read(ctx, reg);
 
        if (!IS_ERR(ctx->clk))
                clk_disable(ctx->clk);
@@ -232,14 +207,11 @@ static void regmap_mmio_free_context(void *context)
        kfree(context);
 }
 
-static struct regmap_bus regmap_mmio = {
+static const struct regmap_bus regmap_mmio = {
        .fast_io = true,
-       .write = regmap_mmio_write,
-       .gather_write = regmap_mmio_gather_write,
-       .read = regmap_mmio_read,
+       .reg_write = regmap_mmio_write,
+       .reg_read = regmap_mmio_read,
        .free_context = regmap_mmio_free_context,
-       .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
-       .val_format_endian_default = REGMAP_ENDIAN_NATIVE,
 };
 
 static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
@@ -265,24 +237,71 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
        if (config->reg_stride < min_stride)
                return ERR_PTR(-EINVAL);
 
-       switch (config->reg_format_endian) {
-       case REGMAP_ENDIAN_DEFAULT:
-       case REGMAP_ENDIAN_NATIVE:
-               break;
-       default:
-               return ERR_PTR(-EINVAL);
-       }
-
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return ERR_PTR(-ENOMEM);
 
        ctx->regs = regs;
        ctx->val_bytes = config->val_bits / 8;
-       ctx->reg_bytes = config->reg_bits / 8;
-       ctx->pad_bytes = config->pad_bits / 8;
        ctx->clk = ERR_PTR(-ENODEV);
 
+       switch (config->reg_format_endian) {
+       case REGMAP_ENDIAN_DEFAULT:
+       case REGMAP_ENDIAN_LITTLE:
+#ifdef __LITTLE_ENDIAN
+       case REGMAP_ENDIAN_NATIVE:
+#endif
+               switch (config->val_bits) {
+               case 8:
+                       ctx->reg_read = regmap_mmio_read8;
+                       ctx->reg_write = regmap_mmio_write8;
+                       break;
+               case 16:
+                       ctx->reg_read = regmap_mmio_read16le;
+                       ctx->reg_write = regmap_mmio_write16le;
+                       break;
+               case 32:
+                       ctx->reg_read = regmap_mmio_read32le;
+                       ctx->reg_write = regmap_mmio_write32le;
+                       break;
+#ifdef CONFIG_64BIT
+               case 64:
+                       ctx->reg_read = regmap_mmio_read64le;
+                       ctx->reg_write = regmap_mmio_write64le;
+                       break;
+#endif
+               default:
+                       ret = -EINVAL;
+                       goto err_free;
+               }
+               break;
+       case REGMAP_ENDIAN_BIG:
+#ifdef __BIG_ENDIAN
+       case REGMAP_ENDIAN_NATIVE:
+#endif
+               switch (config->val_bits) {
+               case 8:
+                       ctx->reg_read = regmap_mmio_read8;
+                       ctx->reg_write = regmap_mmio_write8;
+                       break;
+               case 16:
+                       ctx->reg_read = regmap_mmio_read16be;
+                       ctx->reg_write = regmap_mmio_write16be;
+                       break;
+               case 32:
+                       ctx->reg_read = regmap_mmio_read32be;
+                       ctx->reg_write = regmap_mmio_write32be;
+                       break;
+               default:
+                       ret = -EINVAL;
+                       goto err_free;
+               }
+               break;
+       default:
+               ret = -EINVAL;
+               goto err_free;
+       }
+
        if (clk_id == NULL)
                return ctx;
 
index ee54e841de4ad69c96ecb7897ddc04e889e8c702..e2f68807d970f6bce49124d4eaf39ec3c0e57ec7 100644 (file)
@@ -557,6 +557,8 @@ enum regmap_endian regmap_get_val_endian(struct device *dev,
                        endian = REGMAP_ENDIAN_BIG;
                else if (of_property_read_bool(np, "little-endian"))
                        endian = REGMAP_ENDIAN_LITTLE;
+               else if (of_property_read_bool(np, "native-endian"))
+                       endian = REGMAP_ENDIAN_NATIVE;
 
                /* If the endianness was specified in DT, use that */
                if (endian != REGMAP_ENDIAN_DEFAULT)
@@ -2253,6 +2255,9 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
 
        WARN_ON(!map->bus);
 
+       if (!map->bus || !map->bus->read)
+               return -EINVAL;
+
        range = _regmap_range_lookup(map, reg);
        if (range) {
                ret = _regmap_select_page(map, &reg, range,
index 38f156745d533a666deae90f167dd6d51d718d54..7e4ddfb076d3824ef1024b0d5381e312610835b1 100644 (file)
@@ -48,7 +48,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
 void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
 void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
 #ifdef CONFIG_BCMA_DRIVER_MIPS
-void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
 extern struct platform_device bcma_pflash_dev;
 #endif /* CONFIG_BCMA_DRIVER_MIPS */
 
index b7c8a8d4e6d1a232d44a28412a8b9ceba94e1ce9..b0f44a2937b9cb03d3ae8bbbebfc7035fe134ab2 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/platform_device.h>
 #include <linux/bcma/bcma.h>
 
+static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+
 static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
                                         u32 mask, u32 value)
 {
@@ -115,6 +117,8 @@ int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
 
 void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
 {
+       struct bcma_bus *bus = cc->core->bus;
+
        if (cc->early_setup_done)
                return;
 
@@ -129,6 +133,9 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
        if (cc->capabilities & BCMA_CC_CAP_PMU)
                bcma_pmu_early_init(cc);
 
+       if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
+               bcma_chipco_serial_init(cc);
+
        cc->early_setup_done = true;
 }
 
@@ -185,11 +192,12 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
                        ticks = 2;
                else if (ticks > maxt)
                        ticks = maxt;
-               bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
        } else {
                struct bcma_bus *bus = cc->core->bus;
 
                if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 &&
+                   bus->chipinfo.id != BCMA_CHIP_ID_BCM47094 &&
                    bus->chipinfo.id != BCMA_CHIP_ID_BCM53018)
                        bcma_core_set_clockmode(cc->core,
                                                ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC);
@@ -314,9 +322,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
        return res;
 }
 
-#ifdef CONFIG_BCMA_DRIVER_MIPS
-void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
 {
+#if IS_BUILTIN(CONFIG_BCM47XX)
        unsigned int irq;
        u32 baud_base;
        u32 i;
@@ -358,5 +366,5 @@ void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
                ports[i].baud_base = baud_base;
                ports[i].reg_shift = 0;
        }
+#endif /* CONFIG_BCM47XX */
 }
-#endif /* CONFIG_BCMA_DRIVER_MIPS */
index fe0d48cb17788a9f3614fd8a36add7bac5f58848..f1eb4d3e1d575b2806c3d43155efb85f7640228d 100644 (file)
 
 u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
 {
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
-       return bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+       return bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
 
 void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
 {
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
 
 void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
                             u32 set)
 {
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_PLLCTL_ADDR);
-       bcma_cc_maskset32(cc, BCMA_CC_PLLCTL_DATA, mask, set);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+       bcma_pmu_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
 
 void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
                                 u32 offset, u32 mask, u32 set)
 {
-       bcma_cc_write32(cc, BCMA_CC_CHIPCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_CHIPCTL_ADDR);
-       bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL_DATA, mask, set);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR);
+       bcma_pmu_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
 
 void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
                                u32 set)
 {
-       bcma_cc_write32(cc, BCMA_CC_REGCTL_ADDR, offset);
-       bcma_cc_read32(cc, BCMA_CC_REGCTL_ADDR);
-       bcma_cc_maskset32(cc, BCMA_CC_REGCTL_DATA, mask, set);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset);
+       bcma_pmu_read32(cc, BCMA_CC_PMU_REGCTL_ADDR);
+       bcma_pmu_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set);
 }
 EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
 
@@ -60,18 +60,18 @@ static u32 bcma_pmu_xtalfreq(struct bcma_drv_cc *cc)
 {
        u32 ilp_ctl, alp_hz;
 
-       if (!(bcma_cc_read32(cc, BCMA_CC_PMU_STAT) &
+       if (!(bcma_pmu_read32(cc, BCMA_CC_PMU_STAT) &
              BCMA_CC_PMU_STAT_EXT_LPO_AVAIL))
                return 0;
 
-       bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
-                       BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
+       bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
+                        BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
        usleep_range(1000, 2000);
 
-       ilp_ctl = bcma_cc_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
+       ilp_ctl = bcma_pmu_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
        ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK;
 
-       bcma_cc_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
 
        alp_hz = ilp_ctl * 32768 / 4;
        return (alp_hz + 50000) / 100000 * 100;
@@ -127,8 +127,8 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
                mask = (u32)~(BCMA_RES_4314_HT_AVAIL |
                              BCMA_RES_4314_MACPHY_CLK_AVAIL);
 
-               bcma_cc_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
-               bcma_cc_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
+               bcma_pmu_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
+               bcma_pmu_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
                bcma_wait_value(cc->core, BCMA_CLKCTLST,
                                BCMA_CLKCTLST_HAVEHT, 0, 20000);
                break;
@@ -140,7 +140,7 @@ static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
 
        /* Flush */
        if (cc->pmu.rev >= 2)
-               bcma_cc_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
+               bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
 
        /* TODO: Do we need to update OTP? */
 }
@@ -195,9 +195,9 @@ static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
 
        /* Set the resource masks. */
        if (min_msk)
-               bcma_cc_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
        if (max_msk)
-               bcma_cc_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
 
        /*
         * Add some delay; allow resources to come up and settle.
@@ -269,23 +269,33 @@ static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
 
 void bcma_pmu_early_init(struct bcma_drv_cc *cc)
 {
+       struct bcma_bus *bus = cc->core->bus;
        u32 pmucap;
 
-       pmucap = bcma_cc_read32(cc, BCMA_CC_PMU_CAP);
+       if (cc->core->id.rev >= 35 &&
+           cc->capabilities_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
+               cc->pmu.core = bcma_find_core(bus, BCMA_CORE_PMU);
+               if (!cc->pmu.core)
+                       bcma_warn(bus, "Couldn't find expected PMU core");
+       }
+       if (!cc->pmu.core)
+               cc->pmu.core = cc->core;
+
+       pmucap = bcma_pmu_read32(cc, BCMA_CC_PMU_CAP);
        cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
 
-       bcma_debug(cc->core->bus, "Found rev %u PMU (capabilities 0x%08X)\n",
-                  cc->pmu.rev, pmucap);
+       bcma_debug(bus, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
+                  pmucap);
 }
 
 void bcma_pmu_init(struct bcma_drv_cc *cc)
 {
        if (cc->pmu.rev == 1)
-               bcma_cc_mask32(cc, BCMA_CC_PMU_CTL,
-                             ~BCMA_CC_PMU_CTL_NOILPONW);
+               bcma_pmu_mask32(cc, BCMA_CC_PMU_CTL,
+                               ~BCMA_CC_PMU_CTL_NOILPONW);
        else
-               bcma_cc_set32(cc, BCMA_CC_PMU_CTL,
-                            BCMA_CC_PMU_CTL_NOILPONW);
+               bcma_pmu_set32(cc, BCMA_CC_PMU_CTL,
+                              BCMA_CC_PMU_CTL_NOILPONW);
 
        bcma_pmu_pll_init(cc);
        bcma_pmu_resources_init(cc);
@@ -472,8 +482,8 @@ u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
 static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
                                         u32 value)
 {
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR, offset);
-       bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, value);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
 }
 
 void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
@@ -497,20 +507,20 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
                       bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
 
                /* RMW only the P1 divider */
-               bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+               bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
                                BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
-               tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+               tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
                tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
                tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
-               bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
 
                /* RMW only the int feedback divider */
-               bcma_cc_write32(cc, BCMA_CC_PLLCTL_ADDR,
+               bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
                                BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
-               tmp = bcma_cc_read32(cc, BCMA_CC_PLLCTL_DATA);
+               tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
                tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
                tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
-               bcma_cc_write32(cc, BCMA_CC_PLLCTL_DATA, tmp);
+               bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
 
                tmp = BCMA_CC_PMU_CTL_PLL_UPD;
                break;
@@ -646,7 +656,7 @@ void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
                break;
        }
 
-       tmp |= bcma_cc_read32(cc, BCMA_CC_PMU_CTL);
-       bcma_cc_write32(cc, BCMA_CC_PMU_CTL, tmp);
+       tmp |= bcma_pmu_read32(cc, BCMA_CC_PMU_CTL);
+       bcma_pmu_write32(cc, BCMA_CC_PMU_CTL, tmp);
 }
 EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
index 7e11ef4cb7dbed69bd52685cf03a177fe6a8a01a..04d706ca5f439be4576c8f78afb4b10a66b5dd3e 100644 (file)
@@ -38,6 +38,7 @@ static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
        { "M25P32", 0x15, 0x10000, 64, },
        { "M25P64", 0x16, 0x10000, 128, },
        { "M25FL128", 0x17, 0x10000, 256, },
+       { "MX25L25635F", 0x18, 0x10000, 512, },
        { NULL },
 };
 
index 98067f757fb0d9348fc8fb8f6fd25eb81e32bbf2..771a2a253440f736711bd996044d78b6ce3b723a 100644 (file)
@@ -192,6 +192,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
        case BCMA_CHIP_ID_BCM4707:
        case BCMA_CHIP_ID_BCM5357:
        case BCMA_CHIP_ID_BCM53572:
+       case BCMA_CHIP_ID_BCM47094:
                chip->ngpio     = 32;
                break;
        default:
index 24424f3fef96d5784be6d5af59dbf40b3abc94e2..a40a203314db7278a1aaf81149d4463ad6381cc3 100644 (file)
@@ -337,12 +337,9 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
 
 void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
 {
-       struct bcma_bus *bus = mcore->core->bus;
-
        if (mcore->early_setup_done)
                return;
 
-       bcma_chipco_serial_init(&bus->drv_cc);
        bcma_core_mips_flash_detect(mcore);
 
        mcore->early_setup_done = true;
index 0856189c065fd57826df7edab2dba3217d5e283c..cae5385cf4996c28353e55f0676f8aa9ba8ec1ac 100644 (file)
@@ -294,7 +294,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
-       { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4365) },
+       { PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
        { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
index df806b9c549084fab8e61569718b2441485f17e4..4a2d1b235fb5af3e51ac8b044e6a18aed5753cec 100644 (file)
@@ -98,6 +98,9 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
        { BCMA_CORE_SHIM, "SHIM" },
        { BCMA_CORE_PCIE2, "PCIe Gen2" },
        { BCMA_CORE_ARM_CR4, "ARM CR4" },
+       { BCMA_CORE_GCI, "GCI" },
+       { BCMA_CORE_CMEM, "CNDS DDR2/3 memory controller" },
+       { BCMA_CORE_ARM_CA7, "ARM CA7" },
        { BCMA_CORE_DEFAULT, "Default" },
 };
 
@@ -315,6 +318,8 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
                switch (core->id.id) {
                case BCMA_CORE_4706_MAC_GBIT_COMMON:
                case BCMA_CORE_NS_CHIPCOMMON_B:
+               case BCMA_CORE_PMU:
+               case BCMA_CORE_GCI:
                /* Not used yet: case BCMA_CORE_OOB_ROUTER: */
                        break;
                default:
index 99e773cb70d0b58d4a54115be3593986966def0f..3d31761c0ed05c6054f6a5de7251fb6d811b6caa 100644 (file)
@@ -21,9 +21,9 @@
 
 #include <linux/module.h>
 
+#include <crypto/skcipher.h>
 #include <linux/init.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
 #include <linux/blkdev.h>
 #include <linux/scatterlist.h>
 #include <asm/uaccess.h>
@@ -46,7 +46,7 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
        char *cipher;
        char *mode;
        char *cmsp = cms;                       /* c-m string pointer */
-       struct crypto_blkcipher *tfm;
+       struct crypto_skcipher *tfm;
 
        /* encryption breaks for non sector aligned offsets */
 
@@ -82,12 +82,12 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
        *cmsp++ = ')';
        *cmsp = 0;
 
-       tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_skcipher(cms, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key,
-                                     info->lo_encrypt_key_size);
+       err = crypto_skcipher_setkey(tfm, info->lo_encrypt_key,
+                                    info->lo_encrypt_key_size);
        
        if (err != 0)
                goto out_free_tfm;
@@ -96,17 +96,14 @@ cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
        return 0;
 
  out_free_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
 
  out:
        return err;
 }
 
 
-typedef int (*encdec_cbc_t)(struct blkcipher_desc *desc,
-                       struct scatterlist *sg_out,
-                       struct scatterlist *sg_in,
-                       unsigned int nsg);
+typedef int (*encdec_cbc_t)(struct skcipher_request *req);
 
 static int
 cryptoloop_transfer(struct loop_device *lo, int cmd,
@@ -114,11 +111,8 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
                    struct page *loop_page, unsigned loop_off,
                    int size, sector_t IV)
 {
-       struct crypto_blkcipher *tfm = lo->key_data;
-       struct blkcipher_desc desc = {
-               .tfm = tfm,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP,
-       };
+       struct crypto_skcipher *tfm = lo->key_data;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        struct scatterlist sg_out;
        struct scatterlist sg_in;
 
@@ -127,6 +121,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
        unsigned in_offs, out_offs;
        int err;
 
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     NULL, NULL);
+
        sg_init_table(&sg_out, 1);
        sg_init_table(&sg_in, 1);
 
@@ -135,13 +133,13 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
                in_offs = raw_off;
                out_page = loop_page;
                out_offs = loop_off;
-               encdecfunc = crypto_blkcipher_crt(tfm)->decrypt;
+               encdecfunc = crypto_skcipher_decrypt;
        } else {
                in_page = loop_page;
                in_offs = loop_off;
                out_page = raw_page;
                out_offs = raw_off;
-               encdecfunc = crypto_blkcipher_crt(tfm)->encrypt;
+               encdecfunc = crypto_skcipher_encrypt;
        }
 
        while (size > 0) {
@@ -152,10 +150,10 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
                sg_set_page(&sg_in, in_page, sz, in_offs);
                sg_set_page(&sg_out, out_page, sz, out_offs);
 
-               desc.info = iv;
-               err = encdecfunc(&desc, &sg_out, &sg_in, sz);
+               skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv);
+               err = encdecfunc(req);
                if (err)
-                       return err;
+                       goto out;
 
                IV++;
                size -= sz;
@@ -163,7 +161,11 @@ cryptoloop_transfer(struct loop_device *lo, int cmd,
                out_offs += sz;
        }
 
-       return 0;
+       err = 0;
+
+out:
+       skcipher_request_zero(req);
+       return err;
 }
 
 static int
@@ -175,9 +177,9 @@ cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
 static int
 cryptoloop_release(struct loop_device *lo)
 {
-       struct crypto_blkcipher *tfm = lo->key_data;
+       struct crypto_skcipher *tfm = lo->key_data;
        if (tfm != NULL) {
-               crypto_free_blkcipher(tfm);
+               crypto_free_skcipher(tfm);
                lo->key_data = NULL;
                return 0;
        }
index 34bc84efc29e99085d9ccdeb6a4fa49f5b079446..c227fd4cad75fe62eba5bcfd8b0ef22742a080e3 100644 (file)
 #ifndef _DRBD_INT_H
 #define _DRBD_INT_H
 
+#include <crypto/hash.h>
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/sched.h>
 #include <linux/bitops.h>
 #include <linux/slab.h>
-#include <linux/crypto.h>
 #include <linux/ratelimit.h>
 #include <linux/tcp.h>
 #include <linux/mutex.h>
@@ -724,11 +724,11 @@ struct drbd_connection {
 
        struct list_head transfer_log;  /* all requests not yet fully processed */
 
-       struct crypto_hash *cram_hmac_tfm;
-       struct crypto_hash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
-       struct crypto_hash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
-       struct crypto_hash *csums_tfm;
-       struct crypto_hash *verify_tfm;
+       struct crypto_shash *cram_hmac_tfm;
+       struct crypto_ahash *integrity_tfm;  /* checksums we compute, updates protected by connection->data->mutex */
+       struct crypto_ahash *peer_integrity_tfm;  /* checksums we verify, only accessed from receiver thread  */
+       struct crypto_ahash *csums_tfm;
+       struct crypto_ahash *verify_tfm;
        void *int_dig_in;
        void *int_dig_vv;
 
@@ -1524,8 +1524,8 @@ static inline void ov_out_of_sync_print(struct drbd_device *device)
 }
 
 
-extern void drbd_csum_bio(struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct crypto_hash *, struct drbd_peer_request *, void *);
+extern void drbd_csum_bio(struct crypto_ahash *, struct bio *, void *);
+extern void drbd_csum_ee(struct crypto_ahash *, struct drbd_peer_request *, void *);
 /* worker callbacks */
 extern int w_e_end_data_req(struct drbd_work *, int);
 extern int w_e_end_rsdata_req(struct drbd_work *, int);
index 5b43dfb798191d0c90cc3b5115956575e56410f9..fa209773d494a4efd6857aa4ef4437ee11636250 100644 (file)
@@ -1340,7 +1340,7 @@ void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd
                      struct p_data *dp, int data_size)
 {
        if (peer_device->connection->peer_integrity_tfm)
-               data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+               data_size -= crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
        _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
                       dp->block_id);
 }
@@ -1629,7 +1629,7 @@ int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *
        sock = &peer_device->connection->data;
        p = drbd_prepare_command(peer_device, sock);
        digest_size = peer_device->connection->integrity_tfm ?
-                     crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
+                     crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
 
        if (!p)
                return -EIO;
@@ -1718,7 +1718,7 @@ int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
        p = drbd_prepare_command(peer_device, sock);
 
        digest_size = peer_device->connection->integrity_tfm ?
-                     crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
+                     crypto_ahash_digestsize(peer_device->connection->integrity_tfm) : 0;
 
        if (!p)
                return -EIO;
@@ -2498,11 +2498,11 @@ void conn_free_crypto(struct drbd_connection *connection)
 {
        drbd_free_sock(connection);
 
-       crypto_free_hash(connection->csums_tfm);
-       crypto_free_hash(connection->verify_tfm);
-       crypto_free_hash(connection->cram_hmac_tfm);
-       crypto_free_hash(connection->integrity_tfm);
-       crypto_free_hash(connection->peer_integrity_tfm);
+       crypto_free_ahash(connection->csums_tfm);
+       crypto_free_ahash(connection->verify_tfm);
+       crypto_free_shash(connection->cram_hmac_tfm);
+       crypto_free_ahash(connection->integrity_tfm);
+       crypto_free_ahash(connection->peer_integrity_tfm);
        kfree(connection->int_dig_in);
        kfree(connection->int_dig_vv);
 
index c055c5e12f248dc77b60c89d4b723f0de3f934b3..226eb0c9f0fb33a7ee795dbf3661ec5bac58da77 100644 (file)
@@ -2160,19 +2160,34 @@ check_net_options(struct drbd_connection *connection, struct net_conf *new_net_c
 }
 
 struct crypto {
-       struct crypto_hash *verify_tfm;
-       struct crypto_hash *csums_tfm;
-       struct crypto_hash *cram_hmac_tfm;
-       struct crypto_hash *integrity_tfm;
+       struct crypto_ahash *verify_tfm;
+       struct crypto_ahash *csums_tfm;
+       struct crypto_shash *cram_hmac_tfm;
+       struct crypto_ahash *integrity_tfm;
 };
 
 static int
-alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
+alloc_shash(struct crypto_shash **tfm, char *tfm_name, int err_alg)
 {
        if (!tfm_name[0])
                return NO_ERROR;
 
-       *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
+       *tfm = crypto_alloc_shash(tfm_name, 0, 0);
+       if (IS_ERR(*tfm)) {
+               *tfm = NULL;
+               return err_alg;
+       }
+
+       return NO_ERROR;
+}
+
+static int
+alloc_ahash(struct crypto_ahash **tfm, char *tfm_name, int err_alg)
+{
+       if (!tfm_name[0])
+               return NO_ERROR;
+
+       *tfm = crypto_alloc_ahash(tfm_name, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(*tfm)) {
                *tfm = NULL;
                return err_alg;
@@ -2187,24 +2202,24 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
        char hmac_name[CRYPTO_MAX_ALG_NAME];
        enum drbd_ret_code rv;
 
-       rv = alloc_hash(&crypto->csums_tfm, new_net_conf->csums_alg,
-                      ERR_CSUMS_ALG);
+       rv = alloc_ahash(&crypto->csums_tfm, new_net_conf->csums_alg,
+                        ERR_CSUMS_ALG);
        if (rv != NO_ERROR)
                return rv;
-       rv = alloc_hash(&crypto->verify_tfm, new_net_conf->verify_alg,
-                      ERR_VERIFY_ALG);
+       rv = alloc_ahash(&crypto->verify_tfm, new_net_conf->verify_alg,
+                        ERR_VERIFY_ALG);
        if (rv != NO_ERROR)
                return rv;
-       rv = alloc_hash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
-                      ERR_INTEGRITY_ALG);
+       rv = alloc_ahash(&crypto->integrity_tfm, new_net_conf->integrity_alg,
+                        ERR_INTEGRITY_ALG);
        if (rv != NO_ERROR)
                return rv;
        if (new_net_conf->cram_hmac_alg[0] != 0) {
                snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
                         new_net_conf->cram_hmac_alg);
 
-               rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
-                              ERR_AUTH_ALG);
+               rv = alloc_shash(&crypto->cram_hmac_tfm, hmac_name,
+                                ERR_AUTH_ALG);
        }
 
        return rv;
@@ -2212,10 +2227,10 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_net_conf)
 
 static void free_crypto(struct crypto *crypto)
 {
-       crypto_free_hash(crypto->cram_hmac_tfm);
-       crypto_free_hash(crypto->integrity_tfm);
-       crypto_free_hash(crypto->csums_tfm);
-       crypto_free_hash(crypto->verify_tfm);
+       crypto_free_shash(crypto->cram_hmac_tfm);
+       crypto_free_ahash(crypto->integrity_tfm);
+       crypto_free_ahash(crypto->csums_tfm);
+       crypto_free_ahash(crypto->verify_tfm);
 }
 
 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
@@ -2292,23 +2307,23 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
        rcu_assign_pointer(connection->net_conf, new_net_conf);
 
        if (!rsr) {
-               crypto_free_hash(connection->csums_tfm);
+               crypto_free_ahash(connection->csums_tfm);
                connection->csums_tfm = crypto.csums_tfm;
                crypto.csums_tfm = NULL;
        }
        if (!ovr) {
-               crypto_free_hash(connection->verify_tfm);
+               crypto_free_ahash(connection->verify_tfm);
                connection->verify_tfm = crypto.verify_tfm;
                crypto.verify_tfm = NULL;
        }
 
-       crypto_free_hash(connection->integrity_tfm);
+       crypto_free_ahash(connection->integrity_tfm);
        connection->integrity_tfm = crypto.integrity_tfm;
        if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
                /* Do this without trying to take connection->data.mutex again.  */
                __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
 
-       crypto_free_hash(connection->cram_hmac_tfm);
+       crypto_free_shash(connection->cram_hmac_tfm);
        connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
 
        mutex_unlock(&connection->resource->conf_update);
index 1957fe8601dcbb60ee2b8d3aa9367104fd503daf..050aaa1c03504e7bb1f90be3628997385b8fe4cc 100644 (file)
@@ -1627,7 +1627,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector,
 
        digest_size = 0;
        if (!trim && peer_device->connection->peer_integrity_tfm) {
-               digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+               digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
                /*
                 * FIXME: Receive the incoming digest into the receive buffer
                 *        here, together with its struct p_data?
@@ -1741,7 +1741,7 @@ static int recv_dless_read(struct drbd_peer_device *peer_device, struct drbd_req
 
        digest_size = 0;
        if (peer_device->connection->peer_integrity_tfm) {
-               digest_size = crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
+               digest_size = crypto_ahash_digestsize(peer_device->connection->peer_integrity_tfm);
                err = drbd_recv_all_warn(peer_device->connection, dig_in, digest_size);
                if (err)
                        return err;
@@ -3321,7 +3321,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
        int p_proto, p_discard_my_data, p_two_primaries, cf;
        struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
        char integrity_alg[SHARED_SECRET_MAX] = "";
-       struct crypto_hash *peer_integrity_tfm = NULL;
+       struct crypto_ahash *peer_integrity_tfm = NULL;
        void *int_dig_in = NULL, *int_dig_vv = NULL;
 
        p_proto         = be32_to_cpu(p->protocol);
@@ -3402,14 +3402,14 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
                 * change.
                 */
 
-               peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
+               peer_integrity_tfm = crypto_alloc_ahash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
                if (!peer_integrity_tfm) {
                        drbd_err(connection, "peer data-integrity-alg %s not supported\n",
                                 integrity_alg);
                        goto disconnect;
                }
 
-               hash_size = crypto_hash_digestsize(peer_integrity_tfm);
+               hash_size = crypto_ahash_digestsize(peer_integrity_tfm);
                int_dig_in = kmalloc(hash_size, GFP_KERNEL);
                int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
                if (!(int_dig_in && int_dig_vv)) {
@@ -3439,7 +3439,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
        mutex_unlock(&connection->resource->conf_update);
        mutex_unlock(&connection->data.mutex);
 
-       crypto_free_hash(connection->peer_integrity_tfm);
+       crypto_free_ahash(connection->peer_integrity_tfm);
        kfree(connection->int_dig_in);
        kfree(connection->int_dig_vv);
        connection->peer_integrity_tfm = peer_integrity_tfm;
@@ -3457,7 +3457,7 @@ static int receive_protocol(struct drbd_connection *connection, struct packet_in
 disconnect_rcu_unlock:
        rcu_read_unlock();
 disconnect:
-       crypto_free_hash(peer_integrity_tfm);
+       crypto_free_ahash(peer_integrity_tfm);
        kfree(int_dig_in);
        kfree(int_dig_vv);
        conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD);
@@ -3469,15 +3469,15 @@ disconnect:
  * return: NULL (alg name was "")
  *         ERR_PTR(error) if something goes wrong
  *         or the crypto hash ptr, if it worked out ok. */
-static struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
+static struct crypto_ahash *drbd_crypto_alloc_digest_safe(const struct drbd_device *device,
                const char *alg, const char *name)
 {
-       struct crypto_hash *tfm;
+       struct crypto_ahash *tfm;
 
        if (!alg[0])
                return NULL;
 
-       tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_ahash(alg, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm)) {
                drbd_err(device, "Can not allocate \"%s\" as %s (reason: %ld)\n",
                        alg, name, PTR_ERR(tfm));
@@ -3530,8 +3530,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
        struct drbd_device *device;
        struct p_rs_param_95 *p;
        unsigned int header_size, data_size, exp_max_sz;
-       struct crypto_hash *verify_tfm = NULL;
-       struct crypto_hash *csums_tfm = NULL;
+       struct crypto_ahash *verify_tfm = NULL;
+       struct crypto_ahash *csums_tfm = NULL;
        struct net_conf *old_net_conf, *new_net_conf = NULL;
        struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
        const int apv = connection->agreed_pro_version;
@@ -3678,14 +3678,14 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
                        if (verify_tfm) {
                                strcpy(new_net_conf->verify_alg, p->verify_alg);
                                new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
-                               crypto_free_hash(peer_device->connection->verify_tfm);
+                               crypto_free_ahash(peer_device->connection->verify_tfm);
                                peer_device->connection->verify_tfm = verify_tfm;
                                drbd_info(device, "using verify-alg: \"%s\"\n", p->verify_alg);
                        }
                        if (csums_tfm) {
                                strcpy(new_net_conf->csums_alg, p->csums_alg);
                                new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
-                               crypto_free_hash(peer_device->connection->csums_tfm);
+                               crypto_free_ahash(peer_device->connection->csums_tfm);
                                peer_device->connection->csums_tfm = csums_tfm;
                                drbd_info(device, "using csums-alg: \"%s\"\n", p->csums_alg);
                        }
@@ -3729,9 +3729,9 @@ disconnect:
        mutex_unlock(&connection->resource->conf_update);
        /* just for completeness: actually not needed,
         * as this is not reached if csums_tfm was ok. */
-       crypto_free_hash(csums_tfm);
+       crypto_free_ahash(csums_tfm);
        /* but free the verify_tfm again, if csums_tfm did not work out */
-       crypto_free_hash(verify_tfm);
+       crypto_free_ahash(verify_tfm);
        conn_request_state(peer_device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
        return -EIO;
 }
@@ -4925,14 +4925,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
 {
        struct drbd_socket *sock;
        char my_challenge[CHALLENGE_LEN];  /* 64 Bytes... */
-       struct scatterlist sg;
        char *response = NULL;
        char *right_response = NULL;
        char *peers_ch = NULL;
        unsigned int key_len;
        char secret[SHARED_SECRET_MAX]; /* 64 byte */
        unsigned int resp_size;
-       struct hash_desc desc;
+       SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
        struct packet_info pi;
        struct net_conf *nc;
        int err, rv;
@@ -4945,12 +4944,12 @@ static int drbd_do_auth(struct drbd_connection *connection)
        memcpy(secret, nc->shared_secret, key_len);
        rcu_read_unlock();
 
-       desc.tfm = connection->cram_hmac_tfm;
-       desc.flags = 0;
+       desc->tfm = connection->cram_hmac_tfm;
+       desc->flags = 0;
 
-       rv = crypto_hash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
+       rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
        if (rv) {
-               drbd_err(connection, "crypto_hash_setkey() failed with %d\n", rv);
+               drbd_err(connection, "crypto_shash_setkey() failed with %d\n", rv);
                rv = -1;
                goto fail;
        }
@@ -5011,7 +5010,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
                goto fail;
        }
 
-       resp_size = crypto_hash_digestsize(connection->cram_hmac_tfm);
+       resp_size = crypto_shash_digestsize(connection->cram_hmac_tfm);
        response = kmalloc(resp_size, GFP_NOIO);
        if (response == NULL) {
                drbd_err(connection, "kmalloc of response failed\n");
@@ -5019,10 +5018,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
                goto fail;
        }
 
-       sg_init_table(&sg, 1);
-       sg_set_buf(&sg, peers_ch, pi.size);
-
-       rv = crypto_hash_digest(&desc, &sg, sg.length, response);
+       rv = crypto_shash_digest(desc, peers_ch, pi.size, response);
        if (rv) {
                drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
                rv = -1;
@@ -5070,9 +5066,8 @@ static int drbd_do_auth(struct drbd_connection *connection)
                goto fail;
        }
 
-       sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
-
-       rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
+       rv = crypto_shash_digest(desc, my_challenge, CHALLENGE_LEN,
+                                right_response);
        if (rv) {
                drbd_err(connection, "crypto_hash_digest() failed with %d\n", rv);
                rv = -1;
@@ -5091,6 +5086,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
        kfree(peers_ch);
        kfree(response);
        kfree(right_response);
+       shash_desc_zero(desc);
 
        return rv;
 }
index eff716c27b1fdf53caa7440ac5bad755c271ad43..4d87499f0d54829bd22594f97202f77fdd533f2e 100644 (file)
@@ -274,51 +274,56 @@ void drbd_request_endio(struct bio *bio)
                complete_master_bio(device, &m);
 }
 
-void drbd_csum_ee(struct crypto_hash *tfm, struct drbd_peer_request *peer_req, void *digest)
+void drbd_csum_ee(struct crypto_ahash *tfm, struct drbd_peer_request *peer_req, void *digest)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm);
        struct scatterlist sg;
        struct page *page = peer_req->pages;
        struct page *tmp;
        unsigned len;
 
-       desc.tfm = tfm;
-       desc.flags = 0;
+       ahash_request_set_tfm(req, tfm);
+       ahash_request_set_callback(req, 0, NULL, NULL);
 
        sg_init_table(&sg, 1);
-       crypto_hash_init(&desc);
+       crypto_ahash_init(req);
 
        while ((tmp = page_chain_next(page))) {
                /* all but the last page will be fully used */
                sg_set_page(&sg, page, PAGE_SIZE, 0);
-               crypto_hash_update(&desc, &sg, sg.length);
+               ahash_request_set_crypt(req, &sg, NULL, sg.length);
+               crypto_ahash_update(req);
                page = tmp;
        }
        /* and now the last, possibly only partially used page */
        len = peer_req->i.size & (PAGE_SIZE - 1);
        sg_set_page(&sg, page, len ?: PAGE_SIZE, 0);
-       crypto_hash_update(&desc, &sg, sg.length);
-       crypto_hash_final(&desc, digest);
+       ahash_request_set_crypt(req, &sg, digest, sg.length);
+       crypto_ahash_finup(req);
+       ahash_request_zero(req);
 }
 
-void drbd_csum_bio(struct crypto_hash *tfm, struct bio *bio, void *digest)
+void drbd_csum_bio(struct crypto_ahash *tfm, struct bio *bio, void *digest)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm);
        struct scatterlist sg;
        struct bio_vec bvec;
        struct bvec_iter iter;
 
-       desc.tfm = tfm;
-       desc.flags = 0;
+       ahash_request_set_tfm(req, tfm);
+       ahash_request_set_callback(req, 0, NULL, NULL);
 
        sg_init_table(&sg, 1);
-       crypto_hash_init(&desc);
+       crypto_ahash_init(req);
 
        bio_for_each_segment(bvec, bio, iter) {
                sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
-               crypto_hash_update(&desc, &sg, sg.length);
+               ahash_request_set_crypt(req, &sg, NULL, sg.length);
+               crypto_ahash_update(req);
        }
-       crypto_hash_final(&desc, digest);
+       ahash_request_set_crypt(req, NULL, digest, 0);
+       crypto_ahash_final(req);
+       ahash_request_zero(req);
 }
 
 /* MAYBE merge common code with w_e_end_ov_req */
@@ -337,7 +342,7 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
        if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
                goto out;
 
-       digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+       digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
        digest = kmalloc(digest_size, GFP_NOIO);
        if (digest) {
                sector_t sector = peer_req->i.sector;
@@ -1113,7 +1118,7 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
                 * a real fix would be much more involved,
                 * introducing more locking mechanisms */
                if (peer_device->connection->csums_tfm) {
-                       digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm);
+                       digest_size = crypto_ahash_digestsize(peer_device->connection->csums_tfm);
                        D_ASSERT(device, digest_size == di->digest_size);
                        digest = kmalloc(digest_size, GFP_NOIO);
                }
@@ -1163,7 +1168,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
        if (unlikely(cancel))
                goto out;
 
-       digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
+       digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
        digest = kmalloc(digest_size, GFP_NOIO);
        if (!digest) {
                err = 1;        /* terminate the connection in case the allocation failed */
@@ -1235,7 +1240,7 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
        di = peer_req->digest;
 
        if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
-               digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm);
+               digest_size = crypto_ahash_digestsize(peer_device->connection->verify_tfm);
                digest = kmalloc(digest_size, GFP_NOIO);
                if (digest) {
                        drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest);
index 81ea69fee7ca183313b8e8322833062262279187..4a876785b68cd5c550dbbb1d2b63071446454081 100644 (file)
@@ -5185,8 +5185,7 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
 
 out_err:
        rbd_dev_unparent(rbd_dev);
-       if (parent)
-               rbd_dev_destroy(parent);
+       rbd_dev_destroy(parent);
        return ret;
 }
 
index fa893c3ec4087f39382f3dc83b968c9859f41cca..ebd641b85396d63a7a07a2039b008008d7f0d459 100644 (file)
@@ -497,6 +497,7 @@ static int ath3k_probe(struct usb_interface *intf,
        /* match device ID in ath3k blacklist table */
        if (!id->driver_info) {
                const struct usb_device_id *match;
+
                match = usb_match_id(intf, ath3k_blist_tbl);
                if (match)
                        id = match;
index 129d47bcc5fc8d1b9d0ef0e087bbf36cee1336be..9a92c072a485def0b2730340503d1984d5a6dc1d 100644 (file)
@@ -132,7 +132,7 @@ config SUNXI_RSB
          and AC100/AC200 ICs.
 
 config UNIPHIER_SYSTEM_BUS
-       bool "UniPhier System Bus driver"
+       tristate "UniPhier System Bus driver"
        depends on ARCH_UNIPHIER && OF
        default y
        help
index 25996e2561105ac615cf4cc99c468f7a8db0f3df..795c9d9c96a6d5ae08c036a221ae1f9a4b1f9c1b 100644 (file)
@@ -330,7 +330,7 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
                cmd = RSB_CMD_RD32;
                break;
        default:
-               dev_err(rsb->dev, "Invalid access width: %d\n", len);
+               dev_err(rsb->dev, "Invalid access width: %zd\n", len);
                return -EINVAL;
        }
 
@@ -372,7 +372,7 @@ static int sunxi_rsb_write(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
                cmd = RSB_CMD_WR32;
                break;
        default:
-               dev_err(rsb->dev, "Invalid access width: %d\n", len);
+               dev_err(rsb->dev, "Invalid access width: %zd\n", len);
                return -EINVAL;
        }
 
index 6575c0fe6a4ea3a1e3bc1bae99010eefd9d7ff94..c3cb76b363c63c54d67343dacf4c9ec20032a95f 100644 (file)
@@ -192,8 +192,10 @@ static int __init vexpress_config_init(void)
        /* Need the config devices early, before the "normal" devices... */
        for_each_compatible_node(node, NULL, "arm,vexpress,config-bus") {
                err = vexpress_config_populate(node);
-               if (err)
+               if (err) {
+                       of_node_put(node);
                        break;
+               }
        }
 
        return err;
index 1341a94cc7793aa0425eb83c4e446393be7f60c6..e657f989745e13bfb703989ef06a73fa767acfe2 100644 (file)
@@ -555,8 +555,10 @@ static unsigned int intel_gtt_mappable_entries(void)
 static void intel_gtt_teardown_scratch_page(void)
 {
        set_pages_wb(intel_private.scratch_page, 1);
-       pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
-                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       if (intel_private.needs_dmar)
+               pci_unmap_page(intel_private.pcidev,
+                              intel_private.scratch_page_dma,
+                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
        __free_page(intel_private.scratch_page);
 }
 
@@ -1430,6 +1432,8 @@ void intel_gmch_remove(void)
        if (--intel_private.refcount)
                return;
 
+       if (intel_private.scratch_page)
+               intel_gtt_teardown_scratch_page();
        if (intel_private.pcidev)
                pci_dev_put(intel_private.pcidev);
        if (intel_private.bridge_dev)
index 05755441250c1de8495725ea80253957b71abd07..fdced547ad5961ff722cfb438c0e33de9f5fabb2 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/delay.h>
 #include <linux/vmalloc.h>
 #include <asm/uninorth.h>
-#include <asm/pci-bridge.h>
 #include <asm/prom.h>
 #include <asm/pmac_feature.h>
 #include "agp.h"
index dbf22719462f9481006b5c874ca99200342b4bee..f7d89387bd6260ae994fadf3923e5209f185da37 100644 (file)
@@ -77,7 +77,7 @@ config HW_RANDOM_ATMEL
 
 config HW_RANDOM_BCM63XX
        tristate "Broadcom BCM63xx Random Number Generator support"
-       depends on BCM63XX
+       depends on BCM63XX || BMIPS_GENERIC
        default HW_RANDOM
        ---help---
          This driver provides kernel-side support for the Random Number
@@ -372,6 +372,7 @@ config HW_RANDOM_XGENE
 config HW_RANDOM_STM32
        tristate "STMicroelectronics STM32 random number generator"
        depends on HW_RANDOM && (ARCH_STM32 || COMPILE_TEST)
+       depends on HAS_IOMEM
        help
          This driver provides kernel-side support for the Random Number
          Generator hardware found on STM32 microcontrollers.
index 4b31f1387f37fa9cbe8f09afc682df62aedab384..38553f0500c90955b2f188fd54ff5548eb16786b 100644 (file)
@@ -79,10 +79,8 @@ static int bcm63xx_rng_data_read(struct hwrng *rng, u32 *data)
 static int bcm63xx_rng_probe(struct platform_device *pdev)
 {
        struct resource *r;
-       struct clk *clk;
        int ret;
        struct bcm63xx_rng_priv *priv;
-       struct hwrng *rng;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
@@ -132,10 +130,17 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
        return 0;
 }
 
+static const struct of_device_id bcm63xx_rng_of_match[] = {
+       { .compatible = "brcm,bcm6368-rng", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_rng_of_match);
+
 static struct platform_driver bcm63xx_rng_driver = {
        .probe          = bcm63xx_rng_probe,
        .driver         = {
                .name   = "bcm63xx-rng",
+               .of_match_table = bcm63xx_rng_of_match,
        },
 };
 
index 843d6f6aee7a30ec60f7f1f93de42b0d3dc6dc8a..3b06c1d6cfb280a18bd13716bfdbf2c38fa085ed 100644 (file)
@@ -743,6 +743,16 @@ static const struct of_device_id n2rng_match[] = {
                .compatible     = "SUNW,kt-rng",
                .data           = (void *) 1,
        },
+       {
+               .name           = "random-number-generator",
+               .compatible     = "ORCL,m4-rng",
+               .data           = (void *) 1,
+       },
+       {
+               .name           = "random-number-generator",
+               .compatible     = "ORCL,m7-rng",
+               .data           = (void *) 1,
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, n2rng_match);
index 9fda22e3387e59030b4a16f4ab1712bb1555b0ce..8671236013f800d67e2d0375940b4f26818ba82c 100644 (file)
@@ -68,6 +68,7 @@
 #include <linux/of_platform.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/acpi.h>
 
 #ifdef CONFIG_PARISC
 #include <asm/hardware.h>      /* for register_parisc_driver() stuff */
@@ -848,7 +849,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                smi_inc_stat(smi_info, complete_transactions);
 
                handle_transaction_done(smi_info);
-               si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+               goto restart;
        } else if (si_sm_result == SI_SM_HOSED) {
                smi_inc_stat(smi_info, hosed_count);
 
@@ -865,7 +866,7 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                         */
                        return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
                }
-               si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
+               goto restart;
        }
 
        /*
@@ -1362,12 +1363,12 @@ MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the"
                 " default scan of the interfaces identified via DMI");
 #endif
 module_param_named(tryplatform, si_tryplatform, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(tryplatform, "Setting this to zero will disable the"
                 " default scan of the interfaces identified via platform"
                 " interfaces like openfirmware");
 #ifdef CONFIG_PCI
 module_param_named(trypci, si_trypci, bool, 0);
-MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the"
+MODULE_PARM_DESC(trypci, "Setting this to zero will disable the"
                 " default scan of the interfaces identified via pci");
 #endif
 module_param_named(trydefaults, si_trydefaults, bool, 0);
@@ -2054,8 +2055,6 @@ static int hardcode_find_bmc(void)
 
 #ifdef CONFIG_ACPI
 
-#include <linux/acpi.h>
-
 /*
  * Once we get an ACPI failure, we don't try any more, because we go
  * through the tables sequentially.  Once we don't find a table, there
index 5f1c3d08ba6540e4afc9469f888e442862d09fa3..8b3be8b9257398b7f0dd306158a6428ea0949ad9 100644 (file)
@@ -920,23 +920,18 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
                        msg_done_handler(ssif_info, -EIO, NULL, 0);
                }
        } else {
+               /* Ready to request the result. */
                unsigned long oflags, *flags;
-               bool got_alert;
 
                ssif_inc_stat(ssif_info, sent_messages);
                ssif_inc_stat(ssif_info, sent_messages_parts);
 
                flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
-               got_alert = ssif_info->got_alert;
-               if (got_alert) {
+               if (ssif_info->got_alert) {
+                       /* The result is already ready, just start it. */
                        ssif_info->got_alert = false;
-                       ssif_info->waiting_alert = false;
-               }
-
-               if (got_alert) {
                        ipmi_ssif_unlock_cond(ssif_info, flags);
-                       /* The alert already happened, try now. */
-                       retry_timeout((unsigned long) ssif_info);
+                       start_get(ssif_info);
                } else {
                        /* Wait a jiffie then request the next message */
                        ssif_info->waiting_alert = true;
index 45df4bf914f89fc6ec1fb2398c272f1b6f0444b9..22c27652e46a8dd6851a56edd408e39095eaa4ad 100644 (file)
@@ -1349,7 +1349,7 @@ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty)
        /* TODO:disable interrupts instead of reset to preserve signal states */
        reset_device(info);
 
-       if (!tty || tty->termios.c_cflag & HUPCL) {
+       if (!tty || C_HUPCL(tty)) {
                info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
                set_signals(info);
        }
@@ -1390,7 +1390,7 @@ static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty)
        port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI);
        get_signals(info);
 
-       if (info->netcount || (tty && (tty->termios.c_cflag & CREAD)))
+       if (info->netcount || (tty && C_CREAD(tty)))
                rx_start(info);
 
        spin_unlock_irqrestore(&info->lock, flags);
@@ -1733,7 +1733,7 @@ static void mgslpc_throttle(struct tty_struct * tty)
        if (I_IXOFF(tty))
                mgslpc_send_xchar(tty, STOP_CHAR(tty));
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock, flags);
                info->serial_signals &= ~SerialSignal_RTS;
                set_signals(info);
@@ -1762,7 +1762,7 @@ static void mgslpc_unthrottle(struct tty_struct * tty)
                        mgslpc_send_xchar(tty, START_CHAR(tty));
        }
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock, flags);
                info->serial_signals |= SerialSignal_RTS;
                set_signals(info);
@@ -2306,8 +2306,7 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
        mgslpc_change_params(info, tty);
 
        /* Handle transition to B0 status */
-       if (old_termios->c_cflag & CBAUD &&
-           !(tty->termios.c_cflag & CBAUD)) {
+       if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
                info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
                spin_lock_irqsave(&info->lock, flags);
                set_signals(info);
@@ -2315,21 +2314,17 @@ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_term
        }
 
        /* Handle transition away from B0 status */
-       if (!(old_termios->c_cflag & CBAUD) &&
-           tty->termios.c_cflag & CBAUD) {
+       if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
                info->serial_signals |= SerialSignal_DTR;
-               if (!(tty->termios.c_cflag & CRTSCTS) ||
-                   !test_bit(TTY_THROTTLED, &tty->flags)) {
+               if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
                        info->serial_signals |= SerialSignal_RTS;
-               }
                spin_lock_irqsave(&info->lock, flags);
                set_signals(info);
                spin_unlock_irqrestore(&info->lock, flags);
        }
 
        /* Handle turning off CRTSCTS */
-       if (old_termios->c_cflag & CRTSCTS &&
-           !(tty->termios.c_cflag & CRTSCTS)) {
+       if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
                tty->hw_stopped = 0;
                tx_release(tty);
        }
index a15ce4ef39cd97cbef47f5ca31579700ffb0e572..b098d2d0b7c4a7cb8470f4f3fe5d4c2c461a3f04 100644 (file)
@@ -171,7 +171,7 @@ static const struct tty_operations ttyprintk_ops = {
        .ioctl = tpk_ioctl,
 };
 
-static struct tty_port_operations null_ops = { };
+static const struct tty_port_operations null_ops = { };
 
 static struct tty_driver *ttyprintk_driver;
 
index ebce98033fbb76ea687d439b5842e707f62eeb80..5759d75780cf70b4d30becc7fc442dd8cad1e4d1 100644 (file)
@@ -177,6 +177,8 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
        GATE(0, "gpll_armclk", "gpll", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(0), 6, GFLAGS),
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        /*
         * Clock-Architecture Diagram 2
         */
@@ -187,6 +189,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(0), 8, GFLAGS),
        COMPOSITE_NOGATE(0, "ddrphy2x", mux_ddrphy_p, CLK_IGNORE_UNUSED,
                        RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+       FACTOR(0, "ddrphy", "ddrphy2x", 0, 1, 2),
 
        COMPOSITE_NOMUX(0, "pclk_dbg", "armclk", CLK_IGNORE_UNUSED,
                        RK2928_CLKSEL_CON(1), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
@@ -263,6 +266,8 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
        COMPOSITE(0, "aclk_vcodec", mux_pll_src_3plls_p, 0,
                        RK2928_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 11, GFLAGS),
+       FACTOR_GATE(HCLK_VCODEC, "hclk_vcodec", "aclk_vcodec", 0, 1, 4,
+                       RK2928_CLKGATE_CON(3), 12, GFLAGS),
 
        COMPOSITE(0, "aclk_hvec", mux_pll_src_3plls_p, 0,
                        RK2928_CLKSEL_CON(20), 0, 2, MFLAGS, 2, 5, DFLAGS,
@@ -351,6 +356,7 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
        COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
                        RK2928_CLKSEL_CON(21), 9, 5, DFLAGS,
                        RK2928_CLKGATE_CON(2), 6, GFLAGS),
+       FACTOR(0, "sclk_macref_out", "hclk_peri_src", 0, 1, 2),
 
        MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
                        RK2928_CLKSEL_CON(31), 0, 1, MFLAGS),
@@ -376,11 +382,9 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
        GATE(ACLK_VIO, "aclk_vio", "aclk_disp1_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 13, GFLAGS),
        GATE(ACLK_LCDC, "aclk_lcdc", "aclk_disp1_pre", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
 
-       GATE(HCLK_VIO_BUS, "hclk_vio_bus", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
+       GATE(HCLK_VIO_BUS, "hclk_vio_bus", "hclk_disp_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(6), 12, GFLAGS),
        GATE(HCLK_LCDC, "hclk_lcdc", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(9), 5, GFLAGS),
 
-       /* hclk_video gates */
-       GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_disp_pre", 0, RK2928_CLKGATE_CON(3), 12, GFLAGS),
 
        /* xin24m gates */
        GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK2928_CLKGATE_CON(10), 0, GFLAGS),
@@ -444,34 +448,11 @@ static void __init rk3036_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
 
-       /* xin12m is created by an cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
        if (IS_ERR(clk))
                pr_warn("%s: could not register clock usb480m: %ld\n",
                        __func__, PTR_ERR(clk));
 
-       clk = clk_register_fixed_factor(NULL, "ddrphy", "ddrphy2x", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock ddrphy: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_vcodec_pre",
-                                       "aclk_vcodec", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "sclk_macref_out",
-                                       "hclk_peri_src", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock sclk_macref_out: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        rockchip_clk_register_plls(rk3036_pll_clks,
                                   ARRAY_SIZE(rk3036_pll_clks),
                                   RK3036_GRF_SOC_STATUS0);
index 7f7444cbf6fcc0e62ae85c195792edc10daca2f3..40bab39014915087e691257caf0b063b72fd04bf 100644 (file)
@@ -339,13 +339,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        INVERTER(0, "pclk_cif0", "pclkin_cif0",
                        RK2928_CLKSEL_CON(30), 8, IFLAGS),
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        /*
         * the 480m are generated inside the usb block from these clocks,
         * but they are also a source for the hsicphy clock.
         */
-       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(1), 5, GFLAGS),
-       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(1), 6, GFLAGS),
 
        COMPOSITE(0, "mac_src", mux_mac_p, 0,
@@ -605,7 +607,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
        GATE(SCLK_TIMER2, "timer2", "xin24m", 0,
                        RK2928_CLKGATE_CON(3), 2, GFLAGS),
 
-       COMPOSITE_NOMUX(0, "sclk_tsadc", "xin24m", 0,
+       COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin24m", 0,
                        RK2928_CLKSEL_CON(34), 0, 16, DFLAGS,
                        RK2928_CLKGATE_CON(2), 15, GFLAGS),
 
@@ -662,11 +664,11 @@ static struct clk_div_table div_rk3188_aclk_core_t[] = {
        { /* sentinel */ },
 };
 
-PNAME(mux_hsicphy_p)           = { "sclk_otgphy0", "sclk_otgphy1",
+PNAME(mux_hsicphy_p)           = { "sclk_otgphy0_480m", "sclk_otgphy1_480m",
                                    "gpll", "cpll" };
 
 static struct rockchip_clk_branch rk3188_i2s0_fracmux __initdata =
-       MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+       MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, CLK_SET_RATE_PARENT,
                        RK2928_CLKSEL_CON(3), 8, 2, MFLAGS);
 
 static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
@@ -722,7 +724,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
        COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
                        RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
                        RK2928_CLKGATE_CON(0), 9, GFLAGS),
-       COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", 0,
+       COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", CLK_SET_RATE_PARENT,
                        RK2928_CLKSEL_CON(7), 0,
                        RK2928_CLKGATE_CON(0), 10, GFLAGS,
                        &rk3188_i2s0_fracmux),
@@ -748,12 +750,12 @@ static const char *const rk3188_critical_clocks[] __initconst = {
        "hclk_peri",
        "pclk_cpu",
        "pclk_peri",
+       "hclk_cpubus"
 };
 
 static void __init rk3188_common_clk_init(struct device_node *np)
 {
        void __iomem *reg_base;
-       struct clk *clk;
 
        reg_base = of_iomap(np, 0);
        if (!reg_base) {
@@ -763,17 +765,6 @@ static void __init rk3188_common_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
 
-       /* xin12m is created by an cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock usb480m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        rockchip_clk_register_branches(common_clk_branches,
                                  ARRAY_SIZE(common_clk_branches));
 
index 981a50205339609617c271556c9d3227c0b3bd2c..c515915850a1dc6a3bf8f0186fb9d13c2484ccaa 100644 (file)
@@ -187,7 +187,7 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(7), 1, GFLAGS),
        GATE(0, "ddrc", "ddrphy_pre", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(8), 5, GFLAGS),
-       GATE(0, "ddrphy", "ddrphy_pre", CLK_IGNORE_UNUSED,
+       FACTOR_GATE(0, "ddrphy", "ddrphy4x", CLK_IGNORE_UNUSED, 1, 4,
                        RK2928_CLKGATE_CON(7), 0, GFLAGS),
 
        /* PD_CORE */
@@ -240,13 +240,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
        COMPOSITE(0, "aclk_vpu_pre", mux_pll_src_4plls_p, 0,
                        RK2928_CLKSEL_CON(32), 5, 2, MFLAGS, 0, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 11, GFLAGS),
-       GATE(0, "hclk_vpu_src", "aclk_vpu_pre", 0,
+       FACTOR_GATE(0, "hclk_vpu_pre", "aclk_vpu_pre", 0, 1, 4,
                        RK2928_CLKGATE_CON(4), 4, GFLAGS),
 
        COMPOSITE(0, "aclk_rkvdec_pre", mux_pll_src_4plls_p, 0,
                        RK2928_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 2, GFLAGS),
-       GATE(0, "hclk_rkvdec_src", "aclk_rkvdec_pre", 0,
+       FACTOR_GATE(0, "hclk_rkvdec_pre", "aclk_rkvdec_pre", 0, 1, 4,
                        RK2928_CLKGATE_CON(4), 5, GFLAGS),
 
        COMPOSITE(0, "sclk_vdec_cabac", mux_pll_src_4plls_p, 0,
@@ -371,6 +371,8 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
        MUX(0, "dclk_vop", mux_dclk_vop_p, 0,
                        RK2928_CLKSEL_CON(27), 1, 1, MFLAGS),
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        COMPOSITE(0, "i2s0_src", mux_pll_src_2plls_p, 0,
                        RK2928_CLKSEL_CON(9), 15, 1, MFLAGS, 0, 7, DFLAGS,
                        RK2928_CLKGATE_CON(0), 3, GFLAGS),
@@ -605,13 +607,13 @@ static struct rockchip_clk_branch rk3228_clk_branches[] __initdata = {
 
        /* PD_MMC */
        MMC(SCLK_SDMMC_DRV,    "sdmmc_drv",    "sclk_sdmmc", RK3228_SDMMC_CON0, 1),
-       MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 1),
+       MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3228_SDMMC_CON1, 0),
 
        MMC(SCLK_SDIO_DRV,     "sdio_drv",     "sclk_sdio",  RK3228_SDIO_CON0,  1),
-       MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  1),
+       MMC(SCLK_SDIO_SAMPLE,  "sdio_sample",  "sclk_sdio",  RK3228_SDIO_CON1,  0),
 
        MMC(SCLK_EMMC_DRV,     "emmc_drv",     "sclk_emmc",  RK3228_EMMC_CON0,  1),
-       MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  1),
+       MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3228_EMMC_CON1,  0),
 };
 
 static const char *const rk3228_critical_clocks[] __initconst = {
@@ -624,7 +626,6 @@ static const char *const rk3228_critical_clocks[] __initconst = {
 static void __init rk3228_clk_init(struct device_node *np)
 {
        void __iomem *reg_base;
-       struct clk *clk;
 
        reg_base = of_iomap(np, 0);
        if (!reg_base) {
@@ -634,29 +635,6 @@ static void __init rk3228_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
 
-       /* xin12m is created by an cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                               __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "ddrphy_pre", "ddrphy4x", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock ddrphy_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_vpu_pre",
-                                       "hclk_vpu_src", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_vpu_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_rkvdec_pre",
-                                       "hclk_rkvdec_src", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_rkvdec_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        rockchip_clk_register_plls(rk3228_pll_clks,
                                   ARRAY_SIZE(rk3228_pll_clks),
                                   RK3228_GRF_SOC_STATUS0);
index 984fc187d12ecf6e80d23bb52d2868c75c0daeca..3cb72163a5122ba9ef7695f0c5ba5d679afa9bd7 100644 (file)
@@ -195,8 +195,8 @@ PNAME(mux_hsadcout_p)       = { "hsadc_src", "ext_hsadc" };
 PNAME(mux_edp_24m_p)   = { "ext_edp_24m", "xin24m" };
 PNAME(mux_tspout_p)    = { "cpll", "gpll", "npll", "xin27m" };
 
-PNAME(mux_usbphy480m_p)                = { "sclk_otgphy1", "sclk_otgphy2",
-                                   "sclk_otgphy0" };
+PNAME(mux_usbphy480m_p)                = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
+                                   "sclk_otgphy0_480m" };
 PNAME(mux_hsicphy480m_p)       = { "cpll", "gpll", "usbphy480m_src" };
 PNAME(mux_hsicphy12m_p)                = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
 
@@ -333,6 +333,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(0), 7, GFLAGS),
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
                        RK3288_CLKSEL_CON(4), 15, 1, MFLAGS, 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(4), 1, GFLAGS),
@@ -399,12 +401,10 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
         */
        GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vdpu", 0,
                RK3288_CLKGATE_CON(9), 0, GFLAGS),
-       /*
-        * We introduce a virtul node of hclk_vodec_pre_v to split one clock
-        * struct with a gate and a fix divider into two node in software.
-        */
-       GATE(0, "hclk_vcodec_pre_v", "aclk_vdpu", 0,
+
+       FACTOR_GATE(0, "hclk_vcodec_pre", "aclk_vdpu", 0, 1, 4,
                RK3288_CLKGATE_CON(3), 10, GFLAGS),
+
        GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0,
                RK3288_CLKGATE_CON(9), 1, GFLAGS),
 
@@ -537,11 +537,11 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
                        RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK3288_CLKGATE_CON(4), 10, GFLAGS),
 
-       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "xin24m", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 4, GFLAGS),
-       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "xin24m", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 5, GFLAGS),
-       GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", CLK_IGNORE_UNUSED,
+       GATE(SCLK_OTGPHY2, "sclk_otgphy2", "xin24m", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 6, GFLAGS),
        GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 7, GFLAGS),
@@ -888,24 +888,6 @@ static void __init rk3288_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, rk3288_cru_base, CLK_NR_CLKS);
 
-       /* xin12m is created by an cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-
-       clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock usb480m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_vcodec_pre",
-                                       "hclk_vcodec_pre_v", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
        clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
        if (IS_ERR(clk))
index be0ede52226994a0ae7248c8098285e0ebfd4a86..31facd8426f754640a63b2984cf27aa7cb4d74d6 100644 (file)
@@ -121,7 +121,7 @@ PNAME(mux_i2s_2ch_p)                = { "i2s_2ch_src", "i2s_2ch_frac",
                                    "dummy", "xin12m" };
 PNAME(mux_spdif_8ch_p)         = { "spdif_8ch_pre", "spdif_8ch_frac",
                                    "ext_i2s", "xin12m" };
-PNAME(mux_edp_24m_p)           = { "dummy", "xin24m" };
+PNAME(mux_edp_24m_p)           = { "xin24m", "dummy" };
 PNAME(mux_vip_out_p)           = { "vip_src", "xin24m" };
 PNAME(mux_usbphy480m_p)                = { "usbotg_out", "xin24m" };
 PNAME(mux_hsic_usbphy480m_p)   = { "usbotg_out", "dummy" };
@@ -165,7 +165,7 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkb_data = {
        .core_reg = RK3368_CLKSEL_CON(0),
        .div_core_shift = 0,
        .div_core_mask = 0x1f,
-       .mux_core_shift = 15,
+       .mux_core_shift = 7,
 };
 
 static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
@@ -218,29 +218,29 @@ static const struct rockchip_cpuclk_reg_data rk3368_cpuclkl_data = {
        }
 
 static struct rockchip_cpuclk_rate_table rk3368_cpuclkb_rates[] __initdata = {
-       RK3368_CPUCLKB_RATE(1512000000, 2, 6, 6),
-       RK3368_CPUCLKB_RATE(1488000000, 2, 5, 5),
-       RK3368_CPUCLKB_RATE(1416000000, 2, 5, 5),
-       RK3368_CPUCLKB_RATE(1200000000, 2, 4, 4),
-       RK3368_CPUCLKB_RATE(1008000000, 2, 4, 4),
-       RK3368_CPUCLKB_RATE( 816000000, 2, 3, 3),
-       RK3368_CPUCLKB_RATE( 696000000, 2, 3, 3),
-       RK3368_CPUCLKB_RATE( 600000000, 2, 2, 2),
-       RK3368_CPUCLKB_RATE( 408000000, 2, 2, 2),
-       RK3368_CPUCLKB_RATE( 312000000, 2, 2, 2),
+       RK3368_CPUCLKB_RATE(1512000000, 1, 5, 5),
+       RK3368_CPUCLKB_RATE(1488000000, 1, 4, 4),
+       RK3368_CPUCLKB_RATE(1416000000, 1, 4, 4),
+       RK3368_CPUCLKB_RATE(1200000000, 1, 3, 3),
+       RK3368_CPUCLKB_RATE(1008000000, 1, 3, 3),
+       RK3368_CPUCLKB_RATE( 816000000, 1, 2, 2),
+       RK3368_CPUCLKB_RATE( 696000000, 1, 2, 2),
+       RK3368_CPUCLKB_RATE( 600000000, 1, 1, 1),
+       RK3368_CPUCLKB_RATE( 408000000, 1, 1, 1),
+       RK3368_CPUCLKB_RATE( 312000000, 1, 1, 1),
 };
 
 static struct rockchip_cpuclk_rate_table rk3368_cpuclkl_rates[] __initdata = {
-       RK3368_CPUCLKL_RATE(1512000000, 2, 7, 7),
-       RK3368_CPUCLKL_RATE(1488000000, 2, 6, 6),
-       RK3368_CPUCLKL_RATE(1416000000, 2, 6, 6),
-       RK3368_CPUCLKL_RATE(1200000000, 2, 5, 5),
-       RK3368_CPUCLKL_RATE(1008000000, 2, 5, 5),
-       RK3368_CPUCLKL_RATE( 816000000, 2, 4, 4),
-       RK3368_CPUCLKL_RATE( 696000000, 2, 3, 3),
-       RK3368_CPUCLKL_RATE( 600000000, 2, 3, 3),
-       RK3368_CPUCLKL_RATE( 408000000, 2, 2, 2),
-       RK3368_CPUCLKL_RATE( 312000000, 2, 2, 2),
+       RK3368_CPUCLKL_RATE(1512000000, 1, 6, 6),
+       RK3368_CPUCLKL_RATE(1488000000, 1, 5, 5),
+       RK3368_CPUCLKL_RATE(1416000000, 1, 5, 5),
+       RK3368_CPUCLKL_RATE(1200000000, 1, 4, 4),
+       RK3368_CPUCLKL_RATE(1008000000, 1, 4, 4),
+       RK3368_CPUCLKL_RATE( 816000000, 1, 3, 3),
+       RK3368_CPUCLKL_RATE( 696000000, 1, 2, 2),
+       RK3368_CPUCLKL_RATE( 600000000, 1, 2, 2),
+       RK3368_CPUCLKL_RATE( 408000000, 1, 1, 1),
+       RK3368_CPUCLKL_RATE( 312000000, 1, 1, 1),
 };
 
 static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
@@ -248,6 +248,8 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
         * Clock-Architecture Diagram 2
         */
 
+       FACTOR(0, "xin12m", "xin24m", 0, 1, 2),
+
        MUX(SCLK_USBPHY480M, "usbphy_480m", mux_usbphy480m_p, CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(13), 8, 1, MFLAGS),
 
@@ -299,7 +301,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
        COMPOSITE_NOGATE_DIVTBL(0, "ddrphy_src", mux_ddrphy_p, CLK_IGNORE_UNUSED,
                        RK3368_CLKSEL_CON(13), 4, 1, MFLAGS, 0, 2, DFLAGS, div_ddrphy_t),
 
-       GATE(0, "sclk_ddr", "ddrphy_div4", CLK_IGNORE_UNUSED,
+       FACTOR_GATE(0, "sclk_ddr", "ddrphy_src", CLK_IGNORE_UNUSED, 1, 4,
                        RK3368_CLKGATE_CON(6), 14, GFLAGS),
        GATE(0, "sclk_ddr4x", "ddrphy_src", CLK_IGNORE_UNUSED,
                        RK3368_CLKGATE_CON(6), 15, GFLAGS),
@@ -353,7 +355,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
        COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(32), 0,
                        RK3368_CLKGATE_CON(6), 5, GFLAGS),
-       COMPOSITE_NODIV(SCLK_SPDIF_8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
+       COMPOSITE_NODIV(SCLK_SPDIF_8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(31), 8, 2, MFLAGS,
                        RK3368_CLKGATE_CON(6), 6, GFLAGS),
        COMPOSITE(0, "i2s_2ch_src", mux_pll_src_cpll_gpll_p, 0,
@@ -362,7 +364,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
        COMPOSITE_FRAC(0, "i2s_2ch_frac", "i2s_2ch_src", CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(54), 0,
                        RK3368_CLKGATE_CON(5), 14, GFLAGS),
-       COMPOSITE_NODIV(SCLK_I2S_2CH, "sclk_i2s_2ch", mux_i2s_2ch_p, 0,
+       COMPOSITE_NODIV(SCLK_I2S_2CH, "sclk_i2s_2ch", mux_i2s_2ch_p, CLK_SET_RATE_PARENT,
                        RK3368_CLKSEL_CON(53), 8, 2, MFLAGS,
                        RK3368_CLKGATE_CON(5), 15, GFLAGS),
 
@@ -384,18 +386,18 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
         * Clock-Architecture Diagram 3
         */
 
-       COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb_p, 0,
+       COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
                        RK3368_CLKSEL_CON(15), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK3368_CLKGATE_CON(4), 6, GFLAGS),
-       COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb_p, 0,
+       COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_npll_usb_p, 0,
                        RK3368_CLKSEL_CON(15), 14, 2, MFLAGS, 8, 5, DFLAGS,
                        RK3368_CLKGATE_CON(4), 7, GFLAGS),
 
        /*
-        * We introduce a virtual node of hclk_vodec_pre_v to split one clock
-        * struct with a gate and a fix divider into two node in software.
+        * We use aclk_vdpu by default ---GRF_SOC_CON0[7] setting in system,
+        * so we ignore the mux and make clocks nodes as following,
         */
-       GATE(0, "hclk_video_pre_v", "aclk_vdpu", 0,
+       FACTOR_GATE(0, "hclk_video_pre", "aclk_vdpu", 0, 1, 4,
                RK3368_CLKGATE_CON(4), 8, GFLAGS),
 
        COMPOSITE(0, "sclk_hevc_cabac_src", mux_pll_src_cpll_gpll_npll_usb_p, 0,
@@ -442,7 +444,7 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
        GATE(SCLK_HDMI_HDCP, "sclk_hdmi_hdcp", "xin24m", 0,
                        RK3368_CLKGATE_CON(4), 13, GFLAGS),
        GATE(SCLK_HDMI_CEC, "sclk_hdmi_cec", "xin32k", 0,
-                       RK3368_CLKGATE_CON(5), 12, GFLAGS),
+                       RK3368_CLKGATE_CON(4), 12, GFLAGS),
 
        COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
                        RK3368_CLKSEL_CON(21), 15, 1, MFLAGS,
@@ -842,24 +844,6 @@ static void __init rk3368_clk_init(struct device_node *np)
 
        rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
 
-       /* xin12m is created by a cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       /* ddrphy_div4 is created by a cru-internal divider */
-       clk = clk_register_fixed_factor(NULL, "ddrphy_div4", "ddrphy_src", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock xin12m: %ld\n",
-                       __func__, PTR_ERR(clk));
-
-       clk = clk_register_fixed_factor(NULL, "hclk_video_pre",
-                                       "hclk_video_pre_v", 0, 1, 4);
-       if (IS_ERR(clk))
-               pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
-                       __func__, PTR_ERR(clk));
-
        /* Watchdog pclk is controlled by sgrf_soc_con3[7]. */
        clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
        if (IS_ERR(clk))
index d9a0b5d4d47f91137e78944de023b910972c4387..ab505247887013d8b5eac570c996e96627165f26 100644 (file)
@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
        if (gate_offset >= 0) {
                gate = kzalloc(sizeof(*gate), GFP_KERNEL);
                if (!gate)
-                       return ERR_PTR(-ENOMEM);
+                       goto err_gate;
 
                gate->flags = gate_flags;
                gate->reg = base + gate_offset;
@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
        if (div_width > 0) {
                div = kzalloc(sizeof(*div), GFP_KERNEL);
                if (!div)
-                       return ERR_PTR(-ENOMEM);
+                       goto err_div;
 
                div->flags = div_flags;
                div->reg = base + muxdiv_offset;
@@ -100,6 +100,11 @@ static struct clk *rockchip_clk_register_branch(const char *name,
                                     flags);
 
        return clk;
+err_div:
+       kfree(gate);
+err_gate:
+       kfree(mux);
+       return ERR_PTR(-ENOMEM);
 }
 
 struct rockchip_clk_frac {
@@ -260,6 +265,53 @@ static struct clk *rockchip_clk_register_frac_branch(const char *name,
        return clk;
 }
 
+static struct clk *rockchip_clk_register_factor_branch(const char *name,
+               const char *const *parent_names, u8 num_parents,
+               void __iomem *base, unsigned int mult, unsigned int div,
+               int gate_offset, u8 gate_shift, u8 gate_flags,
+               unsigned long flags, spinlock_t *lock)
+{
+       struct clk *clk;
+       struct clk_gate *gate = NULL;
+       struct clk_fixed_factor *fix = NULL;
+
+       /* without gate, register a simple factor clock */
+       if (gate_offset == 0) {
+               return clk_register_fixed_factor(NULL, name,
+                               parent_names[0], flags, mult,
+                               div);
+       }
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate)
+               return ERR_PTR(-ENOMEM);
+
+       gate->flags = gate_flags;
+       gate->reg = base + gate_offset;
+       gate->bit_idx = gate_shift;
+       gate->lock = lock;
+
+       fix = kzalloc(sizeof(*fix), GFP_KERNEL);
+       if (!fix) {
+               kfree(gate);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       fix->mult = mult;
+       fix->div = div;
+
+       clk = clk_register_composite(NULL, name, parent_names, num_parents,
+                                    NULL, NULL,
+                                    &fix->hw, &clk_fixed_factor_ops,
+                                    &gate->hw, &clk_gate_ops, flags);
+       if (IS_ERR(clk)) {
+               kfree(fix);
+               kfree(gate);
+       }
+
+       return clk;
+}
+
 static DEFINE_SPINLOCK(clk_lock);
 static struct clk **clk_table;
 static void __iomem *reg_base;
@@ -395,6 +447,14 @@ void __init rockchip_clk_register_branches(
                                reg_base + list->muxdiv_offset,
                                list->div_shift, list->div_flags, &clk_lock);
                        break;
+               case branch_factor:
+                       clk = rockchip_clk_register_factor_branch(
+                               list->name, list->parent_names,
+                               list->num_parents, reg_base,
+                               list->div_shift, list->div_width,
+                               list->gate_offset, list->gate_shift,
+                               list->gate_flags, flags, &clk_lock);
+                       break;
                }
 
                /* none of the cases above matched */
index ff8bd23a93ec2741ad00a40adbf0a462d836b82a..39c198bbcbee7be22f47b0c3f4f30bb297eea772 100644 (file)
@@ -254,6 +254,7 @@ enum rockchip_clk_branch_type {
        branch_gate,
        branch_mmc,
        branch_inverter,
+       branch_factor,
 };
 
 struct rockchip_clk_branch {
@@ -508,6 +509,33 @@ struct rockchip_clk_branch {
                .div_flags      = if,                           \
        }
 
+#define FACTOR(_id, cname, pname,  f, fm, fd)                  \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_factor,                \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .div_shift      = fm,                           \
+               .div_width      = fd,                           \
+       }
+
+#define FACTOR_GATE(_id, cname, pname,  f, fm, fd, go, gb, gf) \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_factor,                \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .div_shift      = fm,                           \
+               .div_width      = fd,                           \
+               .gate_offset    = go,                           \
+               .gate_shift     = gb,                           \
+               .gate_flags     = gf,                           \
+       }
+
 void rockchip_clk_init(struct device_node *np, void __iomem *base,
                       unsigned long nr_clks);
 struct regmap *rockchip_clk_get_grf(void);
index 0481d5d673d68a718a8052762c66b78d482543eb..6b598c6a02136d64bee24e23bc0d61ec8d8733c6 100644 (file)
@@ -15,9 +15,9 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/slab.h>
 
 #define SUNXI_OSC24M_GATE      0
 
@@ -61,7 +61,6 @@ static void __init sun4i_osc_clk_setup(struct device_node *node)
                goto err_free_gate;
 
        of_clk_add_provider(node, of_clk_src_simple_get, clk);
-       clk_register_clkdev(clk, clk_name, NULL);
 
        return;
 
index 1611b036421c20e76f438e13fb326111b24e5cda..3437f734c9bf1f7e2ba95051f2debbf71b3c3444 100644 (file)
@@ -17,7 +17,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/slab.h>
@@ -107,7 +106,6 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
                goto iounmap_reg;
 
        of_clk_add_provider(node, of_clk_src_simple_get, clk);
-       clk_register_clkdev(clk, clk_name, NULL);
 
        return;
 
index 59428dbd607a85d3ff15e3b648f529e1a575abf9..ddefe9668863b0ffbaebfc5e33c709c49c2486e2 100644 (file)
@@ -48,7 +48,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
        u32 reg;
        unsigned long rate;
        struct clk_factors *factors = to_clk_factors(hw);
-       struct clk_factors_config *config = factors->config;
+       const struct clk_factors_config *config = factors->config;
 
        /* Fetch the register value */
        reg = readl(factors->reg);
@@ -63,18 +63,28 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
        if (config->pwidth != SUNXI_FACTORS_NOT_APPLICABLE)
                p = FACTOR_GET(config->pshift, config->pwidth, reg);
 
-       /* Calculate the rate */
-       rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
+       if (factors->recalc) {
+               struct factors_request factors_req = {
+                       .parent_rate = parent_rate,
+                       .n = n,
+                       .k = k,
+                       .m = m,
+                       .p = p,
+               };
 
-       return rate;
-}
+               /* get mux details from mux clk structure */
+               if (factors->mux)
+                       factors_req.parent_index =
+                               (reg >> factors->mux->shift) &
+                               factors->mux->mask;
 
-static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
-                                  unsigned long *parent_rate)
-{
-       struct clk_factors *factors = to_clk_factors(hw);
-       factors->get_factors((u32 *)&rate, (u32)*parent_rate,
-                            NULL, NULL, NULL, NULL);
+               factors->recalc(&factors_req);
+
+               return factors_req.rate;
+       }
+
+       /* Calculate the rate */
+       rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
 
        return rate;
 }
@@ -82,6 +92,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
 static int clk_factors_determine_rate(struct clk_hw *hw,
                                      struct clk_rate_request *req)
 {
+       struct clk_factors *factors = to_clk_factors(hw);
        struct clk_hw *parent, *best_parent = NULL;
        int i, num_parents;
        unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
@@ -89,6 +100,10 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
        /* find the parent that can help provide the fastest rate <= rate */
        num_parents = clk_hw_get_num_parents(hw);
        for (i = 0; i < num_parents; i++) {
+               struct factors_request factors_req = {
+                       .rate = req->rate,
+                       .parent_index = i,
+               };
                parent = clk_hw_get_parent_by_index(hw, i);
                if (!parent)
                        continue;
@@ -97,8 +112,9 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
                else
                        parent_rate = clk_hw_get_rate(parent);
 
-               child_rate = clk_factors_round_rate(hw, req->rate,
-                                                   &parent_rate);
+               factors_req.parent_rate = parent_rate;
+               factors->get_factors(&factors_req);
+               child_rate = factors_req.rate;
 
                if (child_rate <= req->rate && child_rate > best_child_rate) {
                        best_parent = parent;
@@ -120,13 +136,16 @@ static int clk_factors_determine_rate(struct clk_hw *hw,
 static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
                                unsigned long parent_rate)
 {
-       u8 n = 0, k = 0, m = 0, p = 0;
+       struct factors_request req = {
+               .rate = rate,
+               .parent_rate = parent_rate,
+       };
        u32 reg;
        struct clk_factors *factors = to_clk_factors(hw);
-       struct clk_factors_config *config = factors->config;
+       const struct clk_factors_config *config = factors->config;
        unsigned long flags = 0;
 
-       factors->get_factors((u32 *)&rate, (u32)parent_rate, &n, &k, &m, &p);
+       factors->get_factors(&req);
 
        if (factors->lock)
                spin_lock_irqsave(factors->lock, flags);
@@ -135,10 +154,10 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
        reg = readl(factors->reg);
 
        /* Set up the new factors - macros do not do anything if width is 0 */
-       reg = FACTOR_SET(config->nshift, config->nwidth, reg, n);
-       reg = FACTOR_SET(config->kshift, config->kwidth, reg, k);
-       reg = FACTOR_SET(config->mshift, config->mwidth, reg, m);
-       reg = FACTOR_SET(config->pshift, config->pwidth, reg, p);
+       reg = FACTOR_SET(config->nshift, config->nwidth, reg, req.n);
+       reg = FACTOR_SET(config->kshift, config->kwidth, reg, req.k);
+       reg = FACTOR_SET(config->mshift, config->mwidth, reg, req.m);
+       reg = FACTOR_SET(config->pshift, config->pwidth, reg, req.p);
 
        /* Apply them now */
        writel(reg, factors->reg);
@@ -155,7 +174,6 @@ static int clk_factors_set_rate(struct clk_hw *hw, unsigned long rate,
 static const struct clk_ops clk_factors_ops = {
        .determine_rate = clk_factors_determine_rate,
        .recalc_rate = clk_factors_recalc_rate,
-       .round_rate = clk_factors_round_rate,
        .set_rate = clk_factors_set_rate,
 };
 
@@ -172,7 +190,7 @@ struct clk *sunxi_factors_register(struct device_node *node,
        struct clk_hw *mux_hw = NULL;
        const char *clk_name = node->name;
        const char *parents[FACTORS_MAX_PARENTS];
-       int i = 0;
+       int ret, i = 0;
 
        /* if we have a mux, we will have >1 parents */
        i = of_clk_parent_fill(node, parents, FACTORS_MAX_PARENTS);
@@ -188,21 +206,22 @@ struct clk *sunxi_factors_register(struct device_node *node,
 
        factors = kzalloc(sizeof(struct clk_factors), GFP_KERNEL);
        if (!factors)
-               return NULL;
+               goto err_factors;
 
        /* set up factors properties */
        factors->reg = reg;
        factors->config = data->table;
        factors->get_factors = data->getter;
+       factors->recalc = data->recalc;
        factors->lock = lock;
 
        /* Add a gate if this factor clock can be gated */
        if (data->enable) {
                gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
-               if (!gate) {
-                       kfree(factors);
-                       return NULL;
-               }
+               if (!gate)
+                       goto err_gate;
+
+               factors->gate = gate;
 
                /* set up gate properties */
                gate->reg = reg;
@@ -214,11 +233,10 @@ struct clk *sunxi_factors_register(struct device_node *node,
        /* Add a mux if this factor clock can be muxed */
        if (data->mux) {
                mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
-               if (!mux) {
-                       kfree(factors);
-                       kfree(gate);
-                       return NULL;
-               }
+               if (!mux)
+                       goto err_mux;
+
+               factors->mux = mux;
 
                /* set up gate properties */
                mux->reg = reg;
@@ -233,11 +251,44 @@ struct clk *sunxi_factors_register(struct device_node *node,
                        mux_hw, &clk_mux_ops,
                        &factors->hw, &clk_factors_ops,
                        gate_hw, &clk_gate_ops, 0);
+       if (IS_ERR(clk))
+               goto err_register;
 
-       if (!IS_ERR(clk)) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               clk_register_clkdev(clk, clk_name, NULL);
-       }
+       ret = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (ret)
+               goto err_provider;
 
        return clk;
+
+err_provider:
+       /* TODO: The composite clock stuff will leak a bit here. */
+       clk_unregister(clk);
+err_register:
+       kfree(mux);
+err_mux:
+       kfree(gate);
+err_gate:
+       kfree(factors);
+err_factors:
+       return NULL;
+}
+
+void sunxi_factors_unregister(struct device_node *node, struct clk *clk)
+{
+       struct clk_hw *hw = __clk_get_hw(clk);
+       struct clk_factors *factors;
+       const char *name;
+
+       if (!hw)
+               return;
+
+       factors = to_clk_factors(hw);
+       name = clk_hw_get_name(hw);
+
+       of_clk_del_provider(node);
+       /* TODO: The composite clock stuff will leak a bit here. */
+       clk_unregister(clk);
+       kfree(factors->mux);
+       kfree(factors->gate);
+       kfree(factors);
 }
index 171085ab5513e4f724017d05d6f91929262a4f78..1e63c5b2d5f4cb6866127100be2274a7095641cd 100644 (file)
@@ -2,7 +2,6 @@
 #define __MACH_SUNXI_CLK_FACTORS_H
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/spinlock.h>
 
 #define SUNXI_FACTORS_NOT_APPLICABLE   (0)
@@ -19,21 +18,36 @@ struct clk_factors_config {
        u8 n_start;
 };
 
+struct factors_request {
+       unsigned long rate;
+       unsigned long parent_rate;
+       u8 parent_index;
+       u8 n;
+       u8 k;
+       u8 m;
+       u8 p;
+};
+
 struct factors_data {
        int enable;
        int mux;
        int muxmask;
-       struct clk_factors_config *table;
-       void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
+       const struct clk_factors_config *table;
+       void (*getter)(struct factors_request *req);
+       void (*recalc)(struct factors_request *req);
        const char *name;
 };
 
 struct clk_factors {
        struct clk_hw hw;
        void __iomem *reg;
-       struct clk_factors_config *config;
-       void (*get_factors) (u32 *rate, u32 parent, u8 *n, u8 *k, u8 *m, u8 *p);
+       const struct clk_factors_config *config;
+       void (*get_factors)(struct factors_request *req);
+       void (*recalc)(struct factors_request *req);
        spinlock_t *lock;
+       /* for cleanup */
+       struct clk_mux *mux;
+       struct clk_gate *gate;
 };
 
 struct clk *sunxi_factors_register(struct device_node *node,
@@ -41,4 +55,6 @@ struct clk *sunxi_factors_register(struct device_node *node,
                                   spinlock_t *lock,
                                   void __iomem *reg);
 
+void sunxi_factors_unregister(struct device_node *node, struct clk *clk);
+
 #endif
index d167e1efb92761a0f223a6054787587d58b11152..b38d71cec74c78300fce175de198cbd3fcb12623 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of_address.h>
 #include <linux/platform_device.h>
  * rate = (parent_rate >> p) / (m + 1);
  */
 
-static void sun4i_a10_get_mod0_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_a10_get_mod0_factors(struct factors_request *req)
 {
        u8 div, calcm, calcp;
 
        /* These clocks can only divide, so we will never be able to achieve
         * frequencies higher than the parent frequency */
-       if (*freq > parent_rate)
-               *freq = parent_rate;
+       if (req->rate > req->parent_rate)
+               req->rate = req->parent_rate;
 
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        if (div < 16)
                calcp = 0;
@@ -51,18 +51,13 @@ static void sun4i_a10_get_mod0_factors(u32 *freq, u32 parent_rate,
 
        calcm = DIV_ROUND_UP(div, 1 << calcp);
 
-       *freq = (parent_rate >> calcp) / calcm;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
-
-       *m = calcm - 1;
-       *p = calcp;
+       req->rate = (req->parent_rate >> calcp) / calcm;
+       req->m = calcm - 1;
+       req->p = calcp;
 }
 
 /* user manual says "n" but it's really "p" */
-static struct clk_factors_config sun4i_a10_mod0_config = {
+static const struct clk_factors_config sun4i_a10_mod0_config = {
        .mshift = 0,
        .mwidth = 4,
        .pshift = 16,
index f4da52b5ca0e838d37531a4cccf1e2d1941180f5..2cfc5a8a55340c4131b77443f4ac0f142f543be6 100644 (file)
@@ -130,6 +130,8 @@ CLK_OF_DECLARE(sun8i_a23_apb2, "allwinner,sun8i-a23-apb2-gates-clk",
               sunxi_simple_gates_init);
 CLK_OF_DECLARE(sun8i_a33_ahb1, "allwinner,sun8i-a33-ahb1-gates-clk",
               sunxi_simple_gates_init);
+CLK_OF_DECLARE(sun8i_a83t_apb0, "allwinner,sun8i-a83t-apb0-gates-clk",
+              sunxi_simple_gates_init);
 CLK_OF_DECLARE(sun9i_a80_ahb0, "allwinner,sun9i-a80-ahb0-gates-clk",
               sunxi_simple_gates_init);
 CLK_OF_DECLARE(sun9i_a80_ahb1, "allwinner,sun9i-a80-ahb1-gates-clk",
index 23d042aabb4f7724d2cf1602b8a2b1f4028a8c41..68021fa5ecd9af5026a941940e0496329b567d5a 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clkdev.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
@@ -87,7 +86,6 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
                                                      clk_parent, 0, reg, i,
                                                      0, NULL);
                WARN_ON(IS_ERR(clk_data->clks[i]));
-               clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
 
                j++;
        }
index 20887686bdbee09fe7fbc90963f179ed45c651ab..84a187e55360fce909cdb5564aeffa423193d84c 100644 (file)
  *
  */
 
+#include <linux/bitops.h>
 #include <linux/clk-provider.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
+#include <linux/spinlock.h>
 
-#define SUN6I_AR100_MAX_PARENTS                4
-#define SUN6I_AR100_SHIFT_MASK         0x3
-#define SUN6I_AR100_SHIFT_MAX          SUN6I_AR100_SHIFT_MASK
-#define SUN6I_AR100_SHIFT_SHIFT                4
-#define SUN6I_AR100_DIV_MASK           0x1f
-#define SUN6I_AR100_DIV_MAX            (SUN6I_AR100_DIV_MASK + 1)
-#define SUN6I_AR100_DIV_SHIFT          8
-#define SUN6I_AR100_MUX_MASK           0x3
-#define SUN6I_AR100_MUX_SHIFT          16
-
-struct ar100_clk {
-       struct clk_hw hw;
-       void __iomem *reg;
-};
-
-static inline struct ar100_clk *to_ar100_clk(struct clk_hw *hw)
-{
-       return container_of(hw, struct ar100_clk, hw);
-}
-
-static unsigned long ar100_recalc_rate(struct clk_hw *hw,
-                                      unsigned long parent_rate)
-{
-       struct ar100_clk *clk = to_ar100_clk(hw);
-       u32 val = readl(clk->reg);
-       int shift = (val >> SUN6I_AR100_SHIFT_SHIFT) & SUN6I_AR100_SHIFT_MASK;
-       int div = (val >> SUN6I_AR100_DIV_SHIFT) & SUN6I_AR100_DIV_MASK;
-
-       return (parent_rate >> shift) / (div + 1);
-}
-
-static int ar100_determine_rate(struct clk_hw *hw,
-                               struct clk_rate_request *req)
-{
-       int nparents = clk_hw_get_num_parents(hw);
-       long best_rate = -EINVAL;
-       int i;
-
-       req->best_parent_hw = NULL;
-
-       for (i = 0; i < nparents; i++) {
-               unsigned long parent_rate;
-               unsigned long tmp_rate;
-               struct clk_hw *parent;
-               unsigned long div;
-               int shift;
-
-               parent = clk_hw_get_parent_by_index(hw, i);
-               parent_rate = clk_hw_get_rate(parent);
-               div = DIV_ROUND_UP(parent_rate, req->rate);
-
-               /*
-                * The AR100 clk contains 2 divisors:
-                * - one power of 2 divisor
-                * - one regular divisor
-                *
-                * First check if we can safely shift (or divide by a power
-                * of 2) without losing precision on the requested rate.
-                */
-               shift = ffs(div) - 1;
-               if (shift > SUN6I_AR100_SHIFT_MAX)
-                       shift = SUN6I_AR100_SHIFT_MAX;
-
-               div >>= shift;
-
-               /*
-                * Then if the divisor is still bigger than what the HW
-                * actually supports, use a bigger shift (or power of 2
-                * divider) value and accept to lose some precision.
-                */
-               while (div > SUN6I_AR100_DIV_MAX) {
-                       shift++;
-                       div >>= 1;
-                       if (shift > SUN6I_AR100_SHIFT_MAX)
-                               break;
-               }
-
-               /*
-                * If the shift value (or power of 2 divider) is bigger
-                * than what the HW actually support, skip this parent.
-                */
-               if (shift > SUN6I_AR100_SHIFT_MAX)
-                       continue;
-
-               tmp_rate = (parent_rate >> shift) / div;
-               if (!req->best_parent_hw || tmp_rate > best_rate) {
-                       req->best_parent_hw = parent;
-                       req->best_parent_rate = parent_rate;
-                       best_rate = tmp_rate;
-               }
-       }
-
-       if (best_rate < 0)
-               return best_rate;
-
-       req->rate = best_rate;
-
-       return 0;
-}
-
-static int ar100_set_parent(struct clk_hw *hw, u8 index)
-{
-       struct ar100_clk *clk = to_ar100_clk(hw);
-       u32 val = readl(clk->reg);
-
-       if (index >= SUN6I_AR100_MAX_PARENTS)
-               return -EINVAL;
-
-       val &= ~(SUN6I_AR100_MUX_MASK << SUN6I_AR100_MUX_SHIFT);
-       val |= (index << SUN6I_AR100_MUX_SHIFT);
-       writel(val, clk->reg);
-
-       return 0;
-}
+#include "clk-factors.h"
 
-static u8 ar100_get_parent(struct clk_hw *hw)
-{
-       struct ar100_clk *clk = to_ar100_clk(hw);
-       return (readl(clk->reg) >> SUN6I_AR100_MUX_SHIFT) &
-              SUN6I_AR100_MUX_MASK;
-}
-
-static int ar100_set_rate(struct clk_hw *hw, unsigned long rate,
-                         unsigned long parent_rate)
+/**
+ * sun6i_get_ar100_factors - Calculates factors p, m for AR100
+ *
+ * AR100 rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+static void sun6i_get_ar100_factors(struct factors_request *req)
 {
-       unsigned long div = parent_rate / rate;
-       struct ar100_clk *clk = to_ar100_clk(hw);
-       u32 val = readl(clk->reg);
+       unsigned long div;
        int shift;
 
-       if (parent_rate % rate)
-               return -EINVAL;
+       /* clock only divides */
+       if (req->rate > req->parent_rate)
+               req->rate = req->parent_rate;
 
-       shift = ffs(div) - 1;
-       if (shift > SUN6I_AR100_SHIFT_MAX)
-               shift = SUN6I_AR100_SHIFT_MAX;
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
-       div >>= shift;
+       if (div < 32)
+               shift = 0;
+       else if (div >> 1 < 32)
+               shift = 1;
+       else if (div >> 2 < 32)
+               shift = 2;
+       else
+               shift = 3;
 
-       if (div > SUN6I_AR100_DIV_MAX)
-               return -EINVAL;
+       div >>= shift;
 
-       val &= ~((SUN6I_AR100_SHIFT_MASK << SUN6I_AR100_SHIFT_SHIFT) |
-                (SUN6I_AR100_DIV_MASK << SUN6I_AR100_DIV_SHIFT));
-       val |= (shift << SUN6I_AR100_SHIFT_SHIFT) |
-              (div << SUN6I_AR100_DIV_SHIFT);
-       writel(val, clk->reg);
+       if (div > 32)
+               div = 32;
 
-       return 0;
+       req->rate = (req->parent_rate >> shift) / div;
+       req->m = div - 1;
+       req->p = shift;
 }
 
-static struct clk_ops ar100_ops = {
-       .recalc_rate = ar100_recalc_rate,
-       .determine_rate = ar100_determine_rate,
-       .set_parent = ar100_set_parent,
-       .get_parent = ar100_get_parent,
-       .set_rate = ar100_set_rate,
+static const struct clk_factors_config sun6i_ar100_config = {
+       .mwidth = 5,
+       .mshift = 8,
+       .pwidth = 2,
+       .pshift = 4,
 };
 
+static const struct factors_data sun6i_ar100_data = {
+       .mux = 16,
+       .muxmask = GENMASK(1, 0),
+       .table = &sun6i_ar100_config,
+       .getter = sun6i_get_ar100_factors,
+};
+
+static DEFINE_SPINLOCK(sun6i_ar100_lock);
+
 static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
 {
-       const char *parents[SUN6I_AR100_MAX_PARENTS];
        struct device_node *np = pdev->dev.of_node;
-       const char *clk_name = np->name;
-       struct clk_init_data init;
-       struct ar100_clk *ar100;
        struct resource *r;
+       void __iomem *reg;
        struct clk *clk;
-       int nparents;
-
-       ar100 = devm_kzalloc(&pdev->dev, sizeof(*ar100), GFP_KERNEL);
-       if (!ar100)
-               return -ENOMEM;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       ar100->reg = devm_ioremap_resource(&pdev->dev, r);
-       if (IS_ERR(ar100->reg))
-               return PTR_ERR(ar100->reg);
+       reg = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(reg))
+               return PTR_ERR(reg);
 
-       nparents = of_clk_get_parent_count(np);
-       if (nparents > SUN6I_AR100_MAX_PARENTS)
-               nparents = SUN6I_AR100_MAX_PARENTS;
-
-       of_clk_parent_fill(np, parents, nparents);
+       clk = sunxi_factors_register(np, &sun6i_ar100_data, &sun6i_ar100_lock,
+                                    reg);
+       if (!clk)
+               return -ENOMEM;
 
-       of_property_read_string(np, "clock-output-names", &clk_name);
+       platform_set_drvdata(pdev, clk);
 
-       init.name = clk_name;
-       init.ops = &ar100_ops;
-       init.parent_names = parents;
-       init.num_parents = nparents;
-       init.flags = 0;
+       return 0;
+}
 
-       ar100->hw.init = &init;
+static int sun6i_a31_ar100_clk_remove(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct clk *clk = platform_get_drvdata(pdev);
 
-       clk = clk_register(&pdev->dev, &ar100->hw);
-       if (IS_ERR(clk))
-               return PTR_ERR(clk);
+       sunxi_factors_unregister(np, clk);
 
-       return of_clk_add_provider(np, of_clk_src_simple_get, clk);
+       return 0;
 }
 
 static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
@@ -227,6 +113,7 @@ static struct platform_driver sun6i_a31_ar100_clk_driver = {
                .of_match_table = sun6i_a31_ar100_clk_dt_ids,
        },
        .probe = sun6i_a31_ar100_clk_probe,
+       .remove = sun6i_a31_ar100_clk_remove,
 };
 module_platform_driver(sun6i_a31_ar100_clk_driver);
 
index e32d18ba252be91ad308c66f10828a5ff7adcf36..63fdb790df2938ba1f1c35df96020926820ad065 100644 (file)
@@ -17,7 +17,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -110,3 +109,5 @@ err_unmap:
 
 CLK_OF_DECLARE(sun8i_h3_bus_gates, "allwinner,sun8i-h3-bus-gates-clk",
               sun8i_h3_bus_gates_init);
+CLK_OF_DECLARE(sun8i_a83t_bus_gates, "allwinner,sun8i-a83t-bus-gates-clk",
+              sun8i_h3_bus_gates_init);
index bf117a636d2397f6a7059f55962134cade4b3cc0..3aaa9cbef791a910d14965b6858d91067da94091 100644 (file)
  */
 
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
 #include <linux/of_address.h>
 
-#include "clk-factors.h"
+#define SUN8I_MBUS_ENABLE      31
+#define SUN8I_MBUS_MUX_SHIFT   24
+#define SUN8I_MBUS_MUX_MASK    0x3
+#define SUN8I_MBUS_DIV_SHIFT   0
+#define SUN8I_MBUS_DIV_WIDTH   3
+#define SUN8I_MBUS_MAX_PARENTS 4
 
-/**
- * sun8i_a23_get_mbus_factors() - calculates m factor for MBUS clocks
- * MBUS rate is calculated as follows
- * rate = parent_rate / (m + 1);
- */
+static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
 
-static void sun8i_a23_get_mbus_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void __init sun8i_a23_mbus_setup(struct device_node *node)
 {
-       u8 div;
-
-       /*
-        * These clocks can only divide, so we will never be able to
-        * achieve frequencies higher than the parent frequency
-        */
-       if (*freq > parent_rate)
-               *freq = parent_rate;
-
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       int num_parents = of_clk_get_parent_count(node);
+       const char *parents[num_parents];
+       const char *clk_name = node->name;
+       struct resource res;
+       struct clk_divider *div;
+       struct clk_gate *gate;
+       struct clk_mux *mux;
+       struct clk *clk;
+       void __iomem *reg;
+       int err;
 
-       if (div > 8)
-               div = 8;
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for sun8i-mbus-clk\n");
+               return;
+       }
 
-       *freq = parent_rate / div;
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               goto err_unmap;
 
-       /* we were called to round the frequency, we can now return */
-       if (m == NULL)
-               return;
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               goto err_free_div;
 
-       *m = div - 1;
-}
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate)
+               goto err_free_mux;
 
-static struct clk_factors_config sun8i_a23_mbus_config = {
-       .mshift = 0,
-       .mwidth = 3,
-};
+       of_property_read_string(node, "clock-output-names", &clk_name);
+       of_clk_parent_fill(node, parents, num_parents);
 
-static const struct factors_data sun8i_a23_mbus_data __initconst = {
-       .enable = 31,
-       .mux = 24,
-       .muxmask = BIT(1) | BIT(0),
-       .table = &sun8i_a23_mbus_config,
-       .getter = sun8i_a23_get_mbus_factors,
-};
+       gate->reg = reg;
+       gate->bit_idx = SUN8I_MBUS_ENABLE;
+       gate->lock = &sun8i_a23_mbus_lock;
 
-static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
+       div->reg = reg;
+       div->shift = SUN8I_MBUS_DIV_SHIFT;
+       div->width = SUN8I_MBUS_DIV_WIDTH;
+       div->lock = &sun8i_a23_mbus_lock;
 
-static void __init sun8i_a23_mbus_setup(struct device_node *node)
-{
-       struct clk *mbus;
-       void __iomem *reg;
+       mux->reg = reg;
+       mux->shift = SUN8I_MBUS_MUX_SHIFT;
+       mux->mask = SUN8I_MBUS_MUX_MASK;
+       mux->lock = &sun8i_a23_mbus_lock;
 
-       reg = of_iomap(node, 0);
-       if (!reg) {
-               pr_err("Could not get registers for a23-mbus-clk\n");
-               return;
-       }
+       clk = clk_register_composite(NULL, clk_name, parents, num_parents,
+                                    &mux->hw, &clk_mux_ops,
+                                    &div->hw, &clk_divider_ops,
+                                    &gate->hw, &clk_gate_ops,
+                                    0);
+       if (IS_ERR(clk))
+               goto err_free_gate;
 
-       mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
-                                     &sun8i_a23_mbus_lock, reg);
+       err = of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (err)
+               goto err_unregister_clk;
 
        /* The MBUS clocks needs to be always enabled */
-       __clk_get(mbus);
-       clk_prepare_enable(mbus);
+       __clk_get(clk);
+       clk_prepare_enable(clk);
+
+       return;
+
+err_unregister_clk:
+       /* TODO: The composite clock stuff will leak a bit here. */
+       clk_unregister(clk);
+err_free_gate:
+       kfree(gate);
+err_free_mux:
+       kfree(mux);
+err_free_div:
+       kfree(div);
+err_unmap:
+       iounmap(reg);
+       of_address_to_resource(node, 0, &res);
+       release_mem_region(res.start, resource_size(&res));
 }
 CLK_OF_DECLARE(sun8i_a23_mbus, "allwinner,sun8i-a23-mbus-clk", sun8i_a23_mbus_setup);
index 6c4c98324d3cc6800bace1b4a918707e2b4cc53f..43f014f85803109aa5cca03dbe045d05a7a4b120 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include <linux/clk.h>
+#include <linux/clkdev.h>
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
  * p and m are named div1 and div2 in Allwinner's SDK
  */
 
-static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
+static void sun9i_a80_get_pll4_factors(struct factors_request *req)
 {
        int n;
        int m = 1;
        int p = 1;
 
        /* Normalize value to a 6 MHz multiple (24 MHz / 4) */
-       n = DIV_ROUND_UP(*freq, 6000000);
+       n = DIV_ROUND_UP(req->rate, 6000000);
 
        /* If n is too large switch to steps of 12 MHz */
        if (n > 255) {
@@ -60,18 +60,13 @@ static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
        else if (n < 12)
                n = 12;
 
-       *freq = ((24000000 * n) >> p) / (m + 1);
-
-       /* we were called to round the frequency, we can now return */
-       if (n_ret == NULL)
-               return;
-
-       *n_ret = n;
-       *m_ret = m;
-       *p_ret = p;
+       req->rate = ((24000000 * n) >> p) / (m + 1);
+       req->n = n;
+       req->m = m;
+       req->p = p;
 }
 
-static struct clk_factors_config sun9i_a80_pll4_config = {
+static const struct clk_factors_config sun9i_a80_pll4_config = {
        .mshift = 18,
        .mwidth = 1,
        .nshift = 8,
@@ -111,30 +106,24 @@ CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_se
  * rate = parent_rate / (m + 1);
  */
 
-static void sun9i_a80_get_gt_factors(u32 *freq, u32 parent_rate,
-                                    u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_gt_factors(struct factors_request *req)
 {
        u32 div;
 
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        /* maximum divider is 4 */
        if (div > 4)
                div = 4;
 
-       *freq = parent_rate / div;
-
-       /* we were called to round the frequency, we can now return */
-       if (!m)
-               return;
-
-       *m = div;
+       req->rate = req->parent_rate / div;
+       req->m = div;
 }
 
-static struct clk_factors_config sun9i_a80_gt_config = {
+static const struct clk_factors_config sun9i_a80_gt_config = {
        .mshift = 0,
        .mwidth = 2,
 };
@@ -176,30 +165,24 @@ CLK_OF_DECLARE(sun9i_a80_gt, "allwinner,sun9i-a80-gt-clk", sun9i_a80_gt_setup);
  * rate = parent_rate >> p;
  */
 
-static void sun9i_a80_get_ahb_factors(u32 *freq, u32 parent_rate,
-                                     u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_ahb_factors(struct factors_request *req)
 {
        u32 _p;
 
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
-       _p = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+       _p = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
 
        /* maximum p is 3 */
        if (_p > 3)
                _p = 3;
 
-       *freq = parent_rate >> _p;
-
-       /* we were called to round the frequency, we can now return */
-       if (!p)
-               return;
-
-       *p = _p;
+       req->rate = req->parent_rate >> _p;
+       req->p = _p;
 }
 
-static struct clk_factors_config sun9i_a80_ahb_config = {
+static const struct clk_factors_config sun9i_a80_ahb_config = {
        .pshift = 0,
        .pwidth = 2,
 };
@@ -262,34 +245,25 @@ CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_se
  * rate = (parent_rate >> p) / (m + 1);
  */
 
-static void sun9i_a80_get_apb1_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun9i_a80_get_apb1_factors(struct factors_request *req)
 {
        u32 div;
-       u8 calcm, calcp;
 
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        /* Highest possible divider is 256 (p = 3, m = 31) */
        if (div > 256)
                div = 256;
 
-       calcp = order_base_2(div);
-       calcm = (parent_rate >> calcp) - 1;
-       *freq = (parent_rate >> calcp) / (calcm + 1);
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
-
-       *m = calcm;
-       *p = calcp;
+       req->p = order_base_2(div);
+       req->m = (req->parent_rate >> req->p) - 1;
+       req->rate = (req->parent_rate >> req->p) / (req->m + 1);
 }
 
-static struct clk_factors_config sun9i_a80_apb1_config = {
+static const struct clk_factors_config sun9i_a80_apb1_config = {
        .mshift = 0,
        .mwidth = 5,
        .pshift = 16,
index 5ba2188ee99ccaa1f806113073c2e21b17813e5f..49ce2830489b6a75af2ba1281e8ed47469d5f8b4 100644 (file)
 
 static DEFINE_SPINLOCK(clk_lock);
 
-/**
- * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
- */
-
-#define SUN6I_AHB1_MAX_PARENTS         4
-#define SUN6I_AHB1_MUX_PARENT_PLL6     3
-#define SUN6I_AHB1_MUX_SHIFT           12
-/* un-shifted mask is what mux_clk expects */
-#define SUN6I_AHB1_MUX_MASK            0x3
-#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \
-                                        SUN6I_AHB1_MUX_MASK)
-
-#define SUN6I_AHB1_DIV_SHIFT           4
-#define SUN6I_AHB1_DIV_MASK            (0x3 << SUN6I_AHB1_DIV_SHIFT)
-#define SUN6I_AHB1_DIV_GET(reg)                ((reg & SUN6I_AHB1_DIV_MASK) >> \
-                                               SUN6I_AHB1_DIV_SHIFT)
-#define SUN6I_AHB1_DIV_SET(reg, div)   ((reg & ~SUN6I_AHB1_DIV_MASK) | \
-                                               (div << SUN6I_AHB1_DIV_SHIFT))
-#define SUN6I_AHB1_PLL6_DIV_SHIFT      6
-#define SUN6I_AHB1_PLL6_DIV_MASK       (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
-#define SUN6I_AHB1_PLL6_DIV_GET(reg)   ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
-                                               SUN6I_AHB1_PLL6_DIV_SHIFT)
-#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
-                                               (div << SUN6I_AHB1_PLL6_DIV_SHIFT))
-
-struct sun6i_ahb1_clk {
-       struct clk_hw hw;
-       void __iomem *reg;
-};
-
-#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
-
-static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
-                                               unsigned long parent_rate)
-{
-       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
-       unsigned long rate;
-       u32 reg;
-
-       /* Fetch the register value */
-       reg = readl(ahb1->reg);
-
-       /* apply pre-divider first if parent is pll6 */
-       if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
-               parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
-
-       /* clk divider */
-       rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
-
-       return rate;
-}
-
-static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
-                                u8 parent, unsigned long parent_rate)
-{
-       u8 div, calcp, calcm = 1;
-
-       /*
-        * clock can only divide, so we will never be able to achieve
-        * frequencies higher than the parent frequency
-        */
-       if (parent_rate && rate > parent_rate)
-               rate = parent_rate;
-
-       div = DIV_ROUND_UP(parent_rate, rate);
-
-       /* calculate pre-divider if parent is pll6 */
-       if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
-               if (div < 4)
-                       calcp = 0;
-               else if (div / 2 < 4)
-                       calcp = 1;
-               else if (div / 4 < 4)
-                       calcp = 2;
-               else
-                       calcp = 3;
-
-               calcm = DIV_ROUND_UP(div, 1 << calcp);
-       } else {
-               calcp = __roundup_pow_of_two(div);
-               calcp = calcp > 3 ? 3 : calcp;
-       }
-
-       /* we were asked to pass back divider values */
-       if (divp) {
-               *divp = calcp;
-               *pre_divp = calcm - 1;
-       }
-
-       return (parent_rate / calcm) >> calcp;
-}
-
-static int sun6i_ahb1_clk_determine_rate(struct clk_hw *hw,
-                                        struct clk_rate_request *req)
-{
-       struct clk_hw *parent, *best_parent = NULL;
-       int i, num_parents;
-       unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
-
-       /* find the parent that can help provide the fastest rate <= rate */
-       num_parents = clk_hw_get_num_parents(hw);
-       for (i = 0; i < num_parents; i++) {
-               parent = clk_hw_get_parent_by_index(hw, i);
-               if (!parent)
-                       continue;
-               if (clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)
-                       parent_rate = clk_hw_round_rate(parent, req->rate);
-               else
-                       parent_rate = clk_hw_get_rate(parent);
-
-               child_rate = sun6i_ahb1_clk_round(req->rate, NULL, NULL, i,
-                                                 parent_rate);
-
-               if (child_rate <= req->rate && child_rate > best_child_rate) {
-                       best_parent = parent;
-                       best = parent_rate;
-                       best_child_rate = child_rate;
-               }
-       }
-
-       if (!best_parent)
-               return -EINVAL;
-
-       req->best_parent_hw = best_parent;
-       req->best_parent_rate = best;
-       req->rate = best_child_rate;
-
-       return 0;
-}
-
-static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
-                                  unsigned long parent_rate)
-{
-       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
-       unsigned long flags;
-       u8 div, pre_div, parent;
-       u32 reg;
-
-       spin_lock_irqsave(&clk_lock, flags);
-
-       reg = readl(ahb1->reg);
-
-       /* need to know which parent is used to apply pre-divider */
-       parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
-       sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
-
-       reg = SUN6I_AHB1_DIV_SET(reg, div);
-       reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
-       writel(reg, ahb1->reg);
-
-       spin_unlock_irqrestore(&clk_lock, flags);
-
-       return 0;
-}
-
-static const struct clk_ops sun6i_ahb1_clk_ops = {
-       .determine_rate = sun6i_ahb1_clk_determine_rate,
-       .recalc_rate    = sun6i_ahb1_clk_recalc_rate,
-       .set_rate       = sun6i_ahb1_clk_set_rate,
-};
-
-static void __init sun6i_ahb1_clk_setup(struct device_node *node)
-{
-       struct clk *clk;
-       struct sun6i_ahb1_clk *ahb1;
-       struct clk_mux *mux;
-       const char *clk_name = node->name;
-       const char *parents[SUN6I_AHB1_MAX_PARENTS];
-       void __iomem *reg;
-       int i;
-
-       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (IS_ERR(reg))
-               return;
-
-       /* we have a mux, we will have >1 parents */
-       i = of_clk_parent_fill(node, parents, SUN6I_AHB1_MAX_PARENTS);
-       of_property_read_string(node, "clock-output-names", &clk_name);
-
-       ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
-       if (!ahb1)
-               return;
-
-       mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
-       if (!mux) {
-               kfree(ahb1);
-               return;
-       }
-
-       /* set up clock properties */
-       mux->reg = reg;
-       mux->shift = SUN6I_AHB1_MUX_SHIFT;
-       mux->mask = SUN6I_AHB1_MUX_MASK;
-       mux->lock = &clk_lock;
-       ahb1->reg = reg;
-
-       clk = clk_register_composite(NULL, clk_name, parents, i,
-                                    &mux->hw, &clk_mux_ops,
-                                    &ahb1->hw, &sun6i_ahb1_clk_ops,
-                                    NULL, NULL, 0);
-
-       if (!IS_ERR(clk)) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               clk_register_clkdev(clk, clk_name, NULL);
-       }
-}
-CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
-
 /* Maximum number of parents our clocks have */
 #define SUNXI_MAX_PARENTS      5
 
@@ -246,49 +38,45 @@ CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_se
  * parent_rate is always 24Mhz
  */
 
-static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
-                                  u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_pll1_factors(struct factors_request *req)
 {
        u8 div;
 
        /* Normalize value to a 6M multiple */
-       div = *freq / 6000000;
-       *freq = 6000000 * div;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
+       div = req->rate / 6000000;
+       req->rate = 6000000 * div;
 
        /* m is always zero for pll1 */
-       *m = 0;
+       req->m = 0;
 
        /* k is 1 only on these cases */
-       if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
-               *k = 1;
+       if (req->rate >= 768000000 || req->rate == 42000000 ||
+                       req->rate == 54000000)
+               req->k = 1;
        else
-               *k = 0;
+               req->k = 0;
 
        /* p will be 3 for divs under 10 */
        if (div < 10)
-               *p = 3;
+               req->p = 3;
 
        /* p will be 2 for divs between 10 - 20 and odd divs under 32 */
        else if (div < 20 || (div < 32 && (div & 1)))
-               *p = 2;
+               req->p = 2;
 
        /* p will be 1 for even divs under 32, divs under 40 and odd pairs
         * of divs between 40-62 */
        else if (div < 40 || (div < 64 && (div & 2)))
-               *p = 1;
+               req->p = 1;
 
        /* any other entries have p = 0 */
        else
-               *p = 0;
+               req->p = 0;
 
        /* calculate a suitable n based on k and p */
-       div <<= *p;
-       div /= (*k + 1);
-       *n = div / 4;
+       div <<= req->p;
+       div /= (req->k + 1);
+       req->n = div / 4;
 }
 
 /**
@@ -297,15 +85,14 @@ static void sun4i_get_pll1_factors(u32 *freq, u32 parent_rate,
  * rate = parent_rate * (n + 1) * (k + 1) / (m + 1);
  * parent_rate should always be 24MHz
  */
-static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun6i_a31_get_pll1_factors(struct factors_request *req)
 {
        /*
         * We can operate only on MHz, this will make our life easier
         * later.
         */
-       u32 freq_mhz = *freq / 1000000;
-       u32 parent_freq_mhz = parent_rate / 1000000;
+       u32 freq_mhz = req->rate / 1000000;
+       u32 parent_freq_mhz = req->parent_rate / 1000000;
 
        /*
         * Round down the frequency to the closest multiple of either
@@ -319,28 +106,20 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
        else
                freq_mhz = round_freq_16;
 
-       *freq = freq_mhz * 1000000;
-
-       /*
-        * If the factors pointer are null, we were just called to
-        * round down the frequency.
-        * Exit.
-        */
-       if (n == NULL)
-               return;
+       req->rate = freq_mhz * 1000000;
 
        /* If the frequency is a multiple of 32 MHz, k is always 3 */
        if (!(freq_mhz % 32))
-               *k = 3;
+               req->k = 3;
        /* If the frequency is a multiple of 9 MHz, k is always 2 */
        else if (!(freq_mhz % 9))
-               *k = 2;
+               req->k = 2;
        /* If the frequency is a multiple of 8 MHz, k is always 1 */
        else if (!(freq_mhz % 8))
-               *k = 1;
+               req->k = 1;
        /* Otherwise, we don't use the k factor */
        else
-               *k = 0;
+               req->k = 0;
 
        /*
         * If the frequency is a multiple of 2 but not a multiple of
@@ -351,27 +130,28 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
         * somehow relates to this frequency.
         */
        if ((freq_mhz % 6) == 2 || (freq_mhz % 6) == 4)
-               *m = 2;
+               req->m = 2;
        /*
         * If the frequency is a multiple of 6MHz, but the factor is
         * odd, m will be 3
         */
        else if ((freq_mhz / 6) & 1)
-               *m = 3;
+               req->m = 3;
        /* Otherwise, we end up with m = 1 */
        else
-               *m = 1;
+               req->m = 1;
 
        /* Calculate n thanks to the above factors we already got */
-       *n = freq_mhz * (*m + 1) / ((*k + 1) * parent_freq_mhz) - 1;
+       req->n = freq_mhz * (req->m + 1) / ((req->k + 1) * parent_freq_mhz)
+                - 1;
 
        /*
         * If n end up being outbound, and that we can still decrease
         * m, do it.
         */
-       if ((*n + 1) > 31 && (*m + 1) > 1) {
-               *n = (*n + 1) / 2 - 1;
-               *m = (*m + 1) / 2 - 1;
+       if ((req->n + 1) > 31 && (req->m + 1) > 1) {
+               req->n = (req->n + 1) / 2 - 1;
+               req->m = (req->m + 1) / 2 - 1;
        }
 }
 
@@ -382,45 +162,41 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
  * parent_rate is always 24Mhz
  */
 
-static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
-                                  u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun8i_a23_get_pll1_factors(struct factors_request *req)
 {
        u8 div;
 
        /* Normalize value to a 6M multiple */
-       div = *freq / 6000000;
-       *freq = 6000000 * div;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
+       div = req->rate / 6000000;
+       req->rate = 6000000 * div;
 
        /* m is always zero for pll1 */
-       *m = 0;
+       req->m = 0;
 
        /* k is 1 only on these cases */
-       if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
-               *k = 1;
+       if (req->rate >= 768000000 || req->rate == 42000000 ||
+                       req->rate == 54000000)
+               req->k = 1;
        else
-               *k = 0;
+               req->k = 0;
 
        /* p will be 2 for divs under 20 and odd divs under 32 */
        if (div < 20 || (div < 32 && (div & 1)))
-               *p = 2;
+               req->p = 2;
 
        /* p will be 1 for even divs under 32, divs under 40 and odd pairs
         * of divs between 40-62 */
        else if (div < 40 || (div < 64 && (div & 2)))
-               *p = 1;
+               req->p = 1;
 
        /* any other entries have p = 0 */
        else
-               *p = 0;
+               req->p = 0;
 
        /* calculate a suitable n based on k and p */
-       div <<= *p;
-       div /= (*k + 1);
-       *n = div / 4 - 1;
+       div <<= req->p;
+       div /= (req->k + 1);
+       req->n = div / 4 - 1;
 }
 
 /**
@@ -430,56 +206,55 @@ static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
  * parent_rate is always 24Mhz
  */
 
-static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
-                                  u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_pll5_factors(struct factors_request *req)
 {
        u8 div;
 
        /* Normalize value to a parent_rate multiple (24M) */
-       div = *freq / parent_rate;
-       *freq = parent_rate * div;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
+       div = req->rate / req->parent_rate;
+       req->rate = req->parent_rate * div;
 
        if (div < 31)
-               *k = 0;
+               req->k = 0;
        else if (div / 2 < 31)
-               *k = 1;
+               req->k = 1;
        else if (div / 3 < 31)
-               *k = 2;
+               req->k = 2;
        else
-               *k = 3;
+               req->k = 3;
 
-       *n = DIV_ROUND_UP(div, (*k+1));
+       req->n = DIV_ROUND_UP(div, (req->k + 1));
 }
 
 /**
- * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6x2
- * PLL6x2 rate is calculated as follows
- * rate = parent_rate * (n + 1) * (k + 1)
+ * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6
+ * PLL6 rate is calculated as follows
+ * rate = parent_rate * (n + 1) * (k + 1) / 2
  * parent_rate is always 24Mhz
  */
 
-static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun6i_a31_get_pll6_factors(struct factors_request *req)
 {
        u8 div;
 
        /* Normalize value to a parent_rate multiple (24M) */
-       div = *freq / parent_rate;
-       *freq = parent_rate * div;
+       div = req->rate / (req->parent_rate / 2);
+       req->rate = (req->parent_rate / 2) * div;
 
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
+       req->k = div / 32;
+       if (req->k > 3)
+               req->k = 3;
 
-       *k = div / 32;
-       if (*k > 3)
-               *k = 3;
+       req->n = DIV_ROUND_UP(div, (req->k + 1)) - 1;
+}
+
+static void sun6i_a31_pll6_recalc(struct factors_request *req)
+{
+       req->rate = req->parent_rate;
 
-       *n = DIV_ROUND_UP(div, (*k+1)) - 1;
+       req->rate *= req->n + 1;
+       req->rate *= req->k + 1;
+       req->rate /= 2;
 }
 
 /**
@@ -488,37 +263,94 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
  * rate = parent_rate >> p
  */
 
-static void sun5i_a13_get_ahb_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun5i_a13_get_ahb_factors(struct factors_request *req)
 {
        u32 div;
 
        /* divide only */
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
        /*
         * user manual says valid speed is 8k ~ 276M, but tests show it
         * can work at speeds up to 300M, just after reparenting to pll6
         */
-       if (*freq < 8000)
-               *freq = 8000;
-       if (*freq > 300000000)
-               *freq = 300000000;
+       if (req->rate < 8000)
+               req->rate = 8000;
+       if (req->rate > 300000000)
+               req->rate = 300000000;
 
-       div = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+       div = order_base_2(DIV_ROUND_UP(req->parent_rate, req->rate));
 
        /* p = 0 ~ 3 */
        if (div > 3)
                div = 3;
 
-       *freq = parent_rate >> div;
+       req->rate = req->parent_rate >> div;
 
-       /* we were called to round the frequency, we can now return */
-       if (p == NULL)
-               return;
+       req->p = div;
+}
+
+#define SUN6I_AHB1_PARENT_PLL6 3
+
+/**
+ * sun6i_a31_get_ahb_factors() - calculates m, p factors for AHB
+ * AHB rate is calculated as follows
+ * rate = parent_rate >> p
+ *
+ * if parent is pll6, then
+ * parent_rate = pll6 rate / (m + 1)
+ */
+
+static void sun6i_get_ahb1_factors(struct factors_request *req)
+{
+       u8 div, calcp, calcm = 1;
+
+       /*
+        * clock can only divide, so we will never be able to achieve
+        * frequencies higher than the parent frequency
+        */
+       if (req->parent_rate && req->rate > req->parent_rate)
+               req->rate = req->parent_rate;
+
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
+
+       /* calculate pre-divider if parent is pll6 */
+       if (req->parent_index == SUN6I_AHB1_PARENT_PLL6) {
+               if (div < 4)
+                       calcp = 0;
+               else if (div / 2 < 4)
+                       calcp = 1;
+               else if (div / 4 < 4)
+                       calcp = 2;
+               else
+                       calcp = 3;
 
-       *p = div;
+               calcm = DIV_ROUND_UP(div, 1 << calcp);
+       } else {
+               calcp = __roundup_pow_of_two(div);
+               calcp = calcp > 3 ? 3 : calcp;
+       }
+
+       req->rate = (req->parent_rate / calcm) >> calcp;
+       req->p = calcp;
+       req->m = calcm - 1;
+}
+
+/**
+ * sun6i_ahb1_recalc() - calculates AHB clock rate from m, p factors and
+ *                      parent index
+ */
+static void sun6i_ahb1_recalc(struct factors_request *req)
+{
+       req->rate = req->parent_rate;
+
+       /* apply pre-divider first if parent is pll6 */
+       if (req->parent_index == SUN6I_AHB1_PARENT_PLL6)
+               req->rate /= req->m + 1;
+
+       /* clk divider */
+       req->rate >>= req->p;
 }
 
 /**
@@ -527,39 +359,34 @@ static void sun5i_a13_get_ahb_factors(u32 *freq, u32 parent_rate,
  * rate = (parent_rate >> p) / (m + 1);
  */
 
-static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
-                                  u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun4i_get_apb1_factors(struct factors_request *req)
 {
        u8 calcm, calcp;
+       int div;
 
-       if (parent_rate < *freq)
-               *freq = parent_rate;
+       if (req->parent_rate < req->rate)
+               req->rate = req->parent_rate;
 
-       parent_rate = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        /* Invalid rate! */
-       if (parent_rate > 32)
+       if (div > 32)
                return;
 
-       if (parent_rate <= 4)
+       if (div <= 4)
                calcp = 0;
-       else if (parent_rate <= 8)
+       else if (div <= 8)
                calcp = 1;
-       else if (parent_rate <= 16)
+       else if (div <= 16)
                calcp = 2;
        else
                calcp = 3;
 
-       calcm = (parent_rate >> calcp) - 1;
+       calcm = (req->parent_rate >> calcp) - 1;
 
-       *freq = (parent_rate >> calcp) / (calcm + 1);
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
-
-       *m = calcm;
-       *p = calcp;
+       req->rate = (req->parent_rate >> calcp) / (calcm + 1);
+       req->m = calcm;
+       req->p = calcp;
 }
 
 
@@ -571,17 +398,16 @@ static void sun4i_get_apb1_factors(u32 *freq, u32 parent_rate,
  * rate = (parent_rate >> p) / (m + 1);
  */
 
-static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
-                                     u8 *n, u8 *k, u8 *m, u8 *p)
+static void sun7i_a20_get_out_factors(struct factors_request *req)
 {
        u8 div, calcm, calcp;
 
        /* These clocks can only divide, so we will never be able to achieve
         * frequencies higher than the parent frequency */
-       if (*freq > parent_rate)
-               *freq = parent_rate;
+       if (req->rate > req->parent_rate)
+               req->rate = req->parent_rate;
 
-       div = DIV_ROUND_UP(parent_rate, *freq);
+       div = DIV_ROUND_UP(req->parent_rate, req->rate);
 
        if (div < 32)
                calcp = 0;
@@ -594,21 +420,16 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
 
        calcm = DIV_ROUND_UP(div, 1 << calcp);
 
-       *freq = (parent_rate >> calcp) / calcm;
-
-       /* we were called to round the frequency, we can now return */
-       if (n == NULL)
-               return;
-
-       *m = calcm - 1;
-       *p = calcp;
+       req->rate = (req->parent_rate >> calcp) / calcm;
+       req->m = calcm - 1;
+       req->p = calcp;
 }
 
 /**
  * sunxi_factors_clk_setup() - Setup function for factor clocks
  */
 
-static struct clk_factors_config sun4i_pll1_config = {
+static const struct clk_factors_config sun4i_pll1_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
@@ -619,7 +440,7 @@ static struct clk_factors_config sun4i_pll1_config = {
        .pwidth = 2,
 };
 
-static struct clk_factors_config sun6i_a31_pll1_config = {
+static const struct clk_factors_config sun6i_a31_pll1_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
@@ -629,7 +450,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
        .n_start = 1,
 };
 
-static struct clk_factors_config sun8i_a23_pll1_config = {
+static const struct clk_factors_config sun8i_a23_pll1_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
@@ -641,14 +462,14 @@ static struct clk_factors_config sun8i_a23_pll1_config = {
        .n_start = 1,
 };
 
-static struct clk_factors_config sun4i_pll5_config = {
+static const struct clk_factors_config sun4i_pll5_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
        .kwidth = 2,
 };
 
-static struct clk_factors_config sun6i_a31_pll6_config = {
+static const struct clk_factors_config sun6i_a31_pll6_config = {
        .nshift = 8,
        .nwidth = 5,
        .kshift = 4,
@@ -656,12 +477,19 @@ static struct clk_factors_config sun6i_a31_pll6_config = {
        .n_start = 1,
 };
 
-static struct clk_factors_config sun5i_a13_ahb_config = {
+static const struct clk_factors_config sun5i_a13_ahb_config = {
+       .pshift = 4,
+       .pwidth = 2,
+};
+
+static const struct clk_factors_config sun6i_ahb1_config = {
+       .mshift = 6,
+       .mwidth = 2,
        .pshift = 4,
        .pwidth = 2,
 };
 
-static struct clk_factors_config sun4i_apb1_config = {
+static const struct clk_factors_config sun4i_apb1_config = {
        .mshift = 0,
        .mwidth = 5,
        .pshift = 16,
@@ -669,7 +497,7 @@ static struct clk_factors_config sun4i_apb1_config = {
 };
 
 /* user manual says "n" but it's really "p" */
-static struct clk_factors_config sun7i_a20_out_config = {
+static const struct clk_factors_config sun7i_a20_out_config = {
        .mshift = 8,
        .mwidth = 5,
        .pshift = 20,
@@ -718,7 +546,7 @@ static const struct factors_data sun6i_a31_pll6_data __initconst = {
        .enable = 31,
        .table = &sun6i_a31_pll6_config,
        .getter = sun6i_a31_get_pll6_factors,
-       .name = "pll6x2",
+       .recalc = sun6i_a31_pll6_recalc,
 };
 
 static const struct factors_data sun5i_a13_ahb_data __initconst = {
@@ -728,6 +556,14 @@ static const struct factors_data sun5i_a13_ahb_data __initconst = {
        .getter = sun5i_a13_get_ahb_factors,
 };
 
+static const struct factors_data sun6i_ahb1_data __initconst = {
+       .mux = 12,
+       .muxmask = BIT(1) | BIT(0),
+       .table = &sun6i_ahb1_config,
+       .getter = sun6i_get_ahb1_factors,
+       .recalc = sun6i_ahb1_recalc,
+};
+
 static const struct factors_data sun4i_apb1_data __initconst = {
        .mux = 24,
        .muxmask = BIT(1) | BIT(0),
@@ -758,6 +594,68 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
        return sunxi_factors_register(node, data, &clk_lock, reg);
 }
 
+static void __init sun4i_pll1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun4i_pll1_data);
+}
+CLK_OF_DECLARE(sun4i_pll1, "allwinner,sun4i-a10-pll1-clk",
+              sun4i_pll1_clk_setup);
+
+static void __init sun6i_pll1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun6i_a31_pll1_data);
+}
+CLK_OF_DECLARE(sun6i_pll1, "allwinner,sun6i-a31-pll1-clk",
+              sun6i_pll1_clk_setup);
+
+static void __init sun8i_pll1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun8i_a23_pll1_data);
+}
+CLK_OF_DECLARE(sun8i_pll1, "allwinner,sun8i-a23-pll1-clk",
+              sun8i_pll1_clk_setup);
+
+static void __init sun7i_pll4_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun7i_a20_pll4_data);
+}
+CLK_OF_DECLARE(sun7i_pll4, "allwinner,sun7i-a20-pll4-clk",
+              sun7i_pll4_clk_setup);
+
+static void __init sun6i_pll6_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun6i_a31_pll6_data);
+}
+CLK_OF_DECLARE(sun6i_pll6, "allwinner,sun6i-a31-pll6-clk",
+              sun6i_pll6_clk_setup);
+
+static void __init sun5i_ahb_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun5i_a13_ahb_data);
+}
+CLK_OF_DECLARE(sun5i_ahb, "allwinner,sun5i-a13-ahb-clk",
+              sun5i_ahb_clk_setup);
+
+static void __init sun6i_ahb1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun6i_ahb1_data);
+}
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk",
+              sun6i_ahb1_clk_setup);
+
+static void __init sun4i_apb1_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun4i_apb1_data);
+}
+CLK_OF_DECLARE(sun4i_apb1, "allwinner,sun4i-a10-apb1-clk",
+              sun4i_apb1_clk_setup);
+
+static void __init sun7i_out_clk_setup(struct device_node *node)
+{
+       sunxi_factors_clk_setup(node, &sun7i_a20_out_data);
+}
+CLK_OF_DECLARE(sun7i_out, "allwinner,sun7i-a20-out-clk",
+              sun7i_out_clk_setup);
 
 
 /**
@@ -782,8 +680,8 @@ static const struct mux_data sun8i_h3_ahb2_mux_data __initconst = {
        .shift = 0,
 };
 
-static void __init sunxi_mux_clk_setup(struct device_node *node,
-                                      struct mux_data *data)
+static struct clk * __init sunxi_mux_clk_setup(struct device_node *node,
+                                              const struct mux_data *data)
 {
        struct clk *clk;
        const char *clk_name = node->name;
@@ -794,19 +692,60 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
        reg = of_iomap(node, 0);
 
        i = of_clk_parent_fill(node, parents, SUNXI_MAX_PARENTS);
-       of_property_read_string(node, "clock-output-names", &clk_name);
+       if (of_property_read_string(node, "clock-output-names", &clk_name)) {
+               pr_warn("%s: could not read clock-output-names for \"%s\"\n",
+                       __func__, clk_name);
+               goto out_unmap;
+       }
 
        clk = clk_register_mux(NULL, clk_name, parents, i,
                               CLK_SET_RATE_PARENT, reg,
                               data->shift, SUNXI_MUX_GATE_WIDTH,
                               0, &clk_lock);
 
-       if (clk) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               clk_register_clkdev(clk, clk_name, NULL);
+       if (IS_ERR(clk)) {
+               pr_warn("%s: failed to register mux clock %s: %ld\n", __func__,
+                       clk_name, PTR_ERR(clk));
+               goto out_unmap;
        }
+
+       of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+       return clk;
+
+out_unmap:
+       iounmap(reg);
+       return NULL;
 }
 
+static void __init sun4i_cpu_clk_setup(struct device_node *node)
+{
+       struct clk *clk;
+
+       clk = sunxi_mux_clk_setup(node, &sun4i_cpu_mux_data);
+       if (!clk)
+               return;
+
+       /* Protect CPU clock */
+       __clk_get(clk);
+       clk_prepare_enable(clk);
+}
+CLK_OF_DECLARE(sun4i_cpu, "allwinner,sun4i-a10-cpu-clk",
+              sun4i_cpu_clk_setup);
+
+static void __init sun6i_ahb1_mux_clk_setup(struct device_node *node)
+{
+       sunxi_mux_clk_setup(node, &sun6i_a31_ahb1_mux_data);
+}
+CLK_OF_DECLARE(sun6i_ahb1_mux, "allwinner,sun6i-a31-ahb1-mux-clk",
+              sun6i_ahb1_mux_clk_setup);
+
+static void __init sun8i_ahb2_clk_setup(struct device_node *node)
+{
+       sunxi_mux_clk_setup(node, &sun8i_h3_ahb2_mux_data);
+}
+CLK_OF_DECLARE(sun8i_ahb2, "allwinner,sun8i-h3-ahb2-clk",
+              sun8i_ahb2_clk_setup);
 
 
 /**
@@ -865,7 +804,7 @@ static const struct div_data sun4i_apb0_data __initconst = {
 };
 
 static void __init sunxi_divider_clk_setup(struct device_node *node,
-                                          struct div_data *data)
+                                          const struct div_data *data)
 {
        struct clk *clk;
        const char *clk_name = node->name;
@@ -882,12 +821,38 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
                                         reg, data->shift, data->width,
                                         data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
                                         data->table, &clk_lock);
-       if (clk) {
+       if (clk)
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               clk_register_clkdev(clk, clk_name, NULL);
-       }
 }
 
+static void __init sun4i_ahb_clk_setup(struct device_node *node)
+{
+       sunxi_divider_clk_setup(node, &sun4i_ahb_data);
+}
+CLK_OF_DECLARE(sun4i_ahb, "allwinner,sun4i-a10-ahb-clk",
+              sun4i_ahb_clk_setup);
+
+static void __init sun4i_apb0_clk_setup(struct device_node *node)
+{
+       sunxi_divider_clk_setup(node, &sun4i_apb0_data);
+}
+CLK_OF_DECLARE(sun4i_apb0, "allwinner,sun4i-a10-apb0-clk",
+              sun4i_apb0_clk_setup);
+
+static void __init sun4i_axi_clk_setup(struct device_node *node)
+{
+       sunxi_divider_clk_setup(node, &sun4i_axi_data);
+}
+CLK_OF_DECLARE(sun4i_axi, "allwinner,sun4i-a10-axi-clk",
+              sun4i_axi_clk_setup);
+
+static void __init sun8i_axi_clk_setup(struct device_node *node)
+{
+       sunxi_divider_clk_setup(node, &sun8i_a23_axi_data);
+}
+CLK_OF_DECLARE(sun8i_axi, "allwinner,sun8i-a23-axi-clk",
+              sun8i_axi_clk_setup);
+
 
 
 /**
@@ -955,15 +920,6 @@ static const struct divs_data pll6_divs_data __initconst = {
        }
 };
 
-static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
-       .factors = &sun6i_a31_pll6_data,
-       .ndivs = 2,
-       .div = {
-               { .fixed = 2 }, /* normal output */
-               { .self = 1 }, /* base factor clock, 2x */
-       }
-};
-
 /**
  * sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
  *
@@ -975,8 +931,8 @@ static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
  *           |________________________|
  */
 
-static void __init sunxi_divs_clk_setup(struct device_node *node,
-                                       struct divs_data *data)
+static struct clk ** __init sunxi_divs_clk_setup(struct device_node *node,
+                                                const struct divs_data *data)
 {
        struct clk_onecell_data *clk_data;
        const char *parent;
@@ -1003,7 +959,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
 
        clk_data = kmalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
        if (!clk_data)
-               return;
+               return NULL;
 
        clks = kcalloc(ndivs, sizeof(*clks), GFP_KERNEL);
        if (!clks)
@@ -1081,7 +1037,6 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
                                                 clkflags);
 
                WARN_ON(IS_ERR(clk_data->clks[i]));
-               clk_register_clkdev(clks[i], clk_name, NULL);
        }
 
        /* Adjust to the real max */
@@ -1089,7 +1044,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
 
        of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 
-       return;
+       return clks;
 
 free_gate:
        kfree(gate);
@@ -1097,130 +1052,28 @@ free_clks:
        kfree(clks);
 free_clkdata:
        kfree(clk_data);
+       return NULL;
 }
 
-
-
-/* Matches for factors clocks */
-static const struct of_device_id clk_factors_match[] __initconst = {
-       {.compatible = "allwinner,sun4i-a10-pll1-clk", .data = &sun4i_pll1_data,},
-       {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
-       {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
-       {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
-       {.compatible = "allwinner,sun5i-a13-ahb-clk", .data = &sun5i_a13_ahb_data,},
-       {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
-       {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
-       {}
-};
-
-/* Matches for divider clocks */
-static const struct of_device_id clk_div_match[] __initconst = {
-       {.compatible = "allwinner,sun4i-a10-axi-clk", .data = &sun4i_axi_data,},
-       {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
-       {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
-       {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
-       {}
-};
-
-/* Matches for divided outputs */
-static const struct of_device_id clk_divs_match[] __initconst = {
-       {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
-       {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
-       {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_divs_data,},
-       {}
-};
-
-/* Matches for mux clocks */
-static const struct of_device_id clk_mux_match[] __initconst = {
-       {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
-       {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
-       {.compatible = "allwinner,sun8i-h3-ahb2-clk", .data = &sun8i_h3_ahb2_mux_data,},
-       {}
-};
-
-
-static void __init of_sunxi_table_clock_setup(const struct of_device_id *clk_match,
-                                             void *function)
-{
-       struct device_node *np;
-       const struct div_data *data;
-       const struct of_device_id *match;
-       void (*setup_function)(struct device_node *, const void *) = function;
-
-       for_each_matching_node_and_match(np, clk_match, &match) {
-               data = match->data;
-               setup_function(np, data);
-       }
-}
-
-static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
-{
-       unsigned int i;
-
-       /* Register divided output clocks */
-       of_sunxi_table_clock_setup(clk_divs_match, sunxi_divs_clk_setup);
-
-       /* Register factor clocks */
-       of_sunxi_table_clock_setup(clk_factors_match, sunxi_factors_clk_setup);
-
-       /* Register divider clocks */
-       of_sunxi_table_clock_setup(clk_div_match, sunxi_divider_clk_setup);
-
-       /* Register mux clocks */
-       of_sunxi_table_clock_setup(clk_mux_match, sunxi_mux_clk_setup);
-
-       /* Protect the clocks that needs to stay on */
-       for (i = 0; i < nclocks; i++) {
-               struct clk *clk = clk_get(NULL, clocks[i]);
-
-               if (!IS_ERR(clk))
-                       clk_prepare_enable(clk);
-       }
-}
-
-static const char *sun4i_a10_critical_clocks[] __initdata = {
-       "pll5_ddr",
-};
-
-static void __init sun4i_a10_init_clocks(struct device_node *node)
+static void __init sun4i_pll5_clk_setup(struct device_node *node)
 {
-       sunxi_init_clocks(sun4i_a10_critical_clocks,
-                         ARRAY_SIZE(sun4i_a10_critical_clocks));
-}
-CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks);
+       struct clk **clks;
 
-static const char *sun5i_critical_clocks[] __initdata = {
-       "cpu",
-       "pll5_ddr",
-};
+       clks = sunxi_divs_clk_setup(node, &pll5_divs_data);
+       if (!clks)
+               return;
 
-static void __init sun5i_init_clocks(struct device_node *node)
-{
-       sunxi_init_clocks(sun5i_critical_clocks,
-                         ARRAY_SIZE(sun5i_critical_clocks));
+       /* Protect PLL5_DDR */
+       __clk_get(clks[0]);
+       clk_prepare_enable(clks[0]);
 }
-CLK_OF_DECLARE(sun5i_a10s_clk_init, "allwinner,sun5i-a10s", sun5i_init_clocks);
-CLK_OF_DECLARE(sun5i_a13_clk_init, "allwinner,sun5i-a13", sun5i_init_clocks);
-CLK_OF_DECLARE(sun5i_r8_clk_init, "allwinner,sun5i-r8", sun5i_init_clocks);
-CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
+CLK_OF_DECLARE(sun4i_pll5, "allwinner,sun4i-a10-pll5-clk",
+              sun4i_pll5_clk_setup);
 
-static const char *sun6i_critical_clocks[] __initdata = {
-       "cpu",
-};
-
-static void __init sun6i_init_clocks(struct device_node *node)
+static void __init sun4i_pll6_clk_setup(struct device_node *node)
 {
-       sunxi_init_clocks(sun6i_critical_clocks,
-                         ARRAY_SIZE(sun6i_critical_clocks));
+       sunxi_divs_clk_setup(node, &pll6_divs_data);
 }
-CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
-CLK_OF_DECLARE(sun6i_a31s_clk_init, "allwinner,sun6i-a31s", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_a33_clk_init, "allwinner,sun8i-a33", sun6i_init_clocks);
-CLK_OF_DECLARE(sun8i_h3_clk_init, "allwinner,sun8i-h3", sun6i_init_clocks);
+CLK_OF_DECLARE(sun4i_pll6, "allwinner,sun4i-a10-pll6-clk",
+              sun4i_pll6_clk_setup);
 
-static void __init sun9i_init_clocks(struct device_node *node)
-{
-       sunxi_init_clocks(NULL, 0);
-}
-CLK_OF_DECLARE(sun9i_a80_clk_init, "allwinner,sun9i-a80", sun9i_init_clocks);
index 67b8e38f4ee96d1dfec6a1230f0258ab65285980..5432b1c198a4e2fc77825d21f9382eee30b2249c 100644 (file)
@@ -216,6 +216,18 @@ static void __init sun8i_a23_usb_setup(struct device_node *node)
 }
 CLK_OF_DECLARE(sun8i_a23_usb, "allwinner,sun8i-a23-usb-clk", sun8i_a23_usb_setup);
 
+static const struct usb_clk_data sun8i_h3_usb_clk_data __initconst = {
+       .clk_mask =  BIT(19) | BIT(18) | BIT(17) | BIT(16) |
+                    BIT(11) | BIT(10) | BIT(9) | BIT(8),
+       .reset_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+};
+
+static void __init sun8i_h3_usb_setup(struct device_node *node)
+{
+       sunxi_usb_clk_setup(node, &sun8i_h3_usb_clk_data, &sun4i_a10_usb_lock);
+}
+CLK_OF_DECLARE(sun8i_h3_usb, "allwinner,sun8i-h3-usb-clk", sun8i_h3_usb_setup);
+
 static const struct usb_clk_data sun9i_a80_usb_mod_data __initconst = {
        .clk_mask = BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2) | BIT(1),
        .reset_mask = BIT(19) | BIT(18) | BIT(17),
@@ -243,15 +255,3 @@ static void __init sun9i_a80_usb_phy_setup(struct device_node *node)
        sunxi_usb_clk_setup(node, &sun9i_a80_usb_phy_data, &a80_usb_phy_lock);
 }
 CLK_OF_DECLARE(sun9i_a80_usb_phy, "allwinner,sun9i-a80-usb-phy-clk", sun9i_a80_usb_phy_setup);
-
-static const struct usb_clk_data sun8i_h3_usb_clk_data __initconst = {
-       .clk_mask =  BIT(19) | BIT(18) | BIT(17) | BIT(16) |
-                    BIT(11) | BIT(10) | BIT(9) | BIT(8),
-       .reset_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
-};
-
-static void __init sun8i_h3_usb_setup(struct device_node *node)
-{
-       sunxi_usb_clk_setup(node, &sun8i_h3_usb_clk_data, &sun4i_a10_usb_lock);
-}
-CLK_OF_DECLARE(sun8i_h3_usb, "allwinner,sun8i-h3-usb-clk", sun8i_h3_usb_setup);
index e1fe8f35d45c47c553997845657e09d3b71ca9a1..74e7544f861ba083f63f1d5aa886a7a90b0f295f 100644 (file)
@@ -450,8 +450,10 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
                struct emc_timing *timing = tegra->timings + (i++);
 
                err = load_one_timing_from_dt(tegra, timing, child);
-               if (err)
+               if (err) {
+                       of_node_put(child);
                        return err;
+               }
 
                timing->ram_code = ram_code;
        }
@@ -499,9 +501,9 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
                 * fuses until the apbmisc driver is loaded.
                 */
                err = load_timings_from_dt(tegra, node, node_ram_code);
+               of_node_put(node);
                if (err)
                        return ERR_PTR(err);
-               of_node_put(node);
                break;
        }
 
index 19ce0738ee764bb13e9dfde5ca542fd2cf472e9a..62ea38187b715814ac5d7b5265cbd4cd934a22c3 100644 (file)
@@ -11,6 +11,7 @@ enum clk_id {
        tegra_clk_afi,
        tegra_clk_amx,
        tegra_clk_amx1,
+       tegra_clk_apb2ape,
        tegra_clk_apbdma,
        tegra_clk_apbif,
        tegra_clk_ape,
index a534bfab30b39ee53c81a89c3a5c5b1d2979144e..6ac3f843e7caa3a8abb6513a0eb9fc2c829d3a39 100644 (file)
 #define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
                                PLLE_SS_CNTL_SSC_BYP)
 #define PLLE_SS_MAX_MASK 0x1ff
-#define PLLE_SS_MAX_VAL 0x25
+#define PLLE_SS_MAX_VAL_TEGRA114 0x25
+#define PLLE_SS_MAX_VAL_TEGRA210 0x21
 #define PLLE_SS_INC_MASK (0xff << 16)
 #define PLLE_SS_INC_VAL (0x1 << 16)
 #define PLLE_SS_INCINTRV_MASK (0x3f << 24)
-#define PLLE_SS_INCINTRV_VAL (0x20 << 24)
+#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24)
+#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24)
 #define PLLE_SS_COEFFICIENTS_MASK \
        (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
-#define PLLE_SS_COEFFICIENTS_VAL \
-       (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL)
+#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \
+       (PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\
+        PLLE_SS_INCINTRV_VAL_TEGRA114)
+#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \
+       (PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\
+        PLLE_SS_INCINTRV_VAL_TEGRA210)
 
 #define PLLE_AUX_PLLP_SEL      BIT(2)
 #define PLLE_AUX_USE_LOCKDET   BIT(3)
@@ -880,7 +886,7 @@ static int clk_plle_training(struct tegra_clk_pll *pll)
 static int clk_plle_enable(struct clk_hw *hw)
 {
        struct tegra_clk_pll *pll = to_clk_pll(hw);
-       unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+       unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
        struct tegra_clk_pll_freq_table sel;
        u32 val;
        int err;
@@ -1378,7 +1384,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
        u32 val;
        int ret;
        unsigned long flags = 0;
-       unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+       unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
 
        if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
                return -EINVAL;
@@ -1401,7 +1407,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
        val |= PLLE_MISC_IDDQ_SW_CTRL;
        val &= ~PLLE_MISC_IDDQ_SW_VALUE;
        val |= PLLE_MISC_PLLE_PTS;
-       val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
+       val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
        pll_writel_misc(val, pll);
        udelay(5);
 
@@ -1428,7 +1434,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
        val = pll_readl(PLLE_SS_CTRL, pll);
        val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
        val &= ~PLLE_SS_COEFFICIENTS_MASK;
-       val |= PLLE_SS_COEFFICIENTS_VAL;
+       val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114;
        pll_writel(val, PLLE_SS_CTRL, pll);
        val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
        pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2012,9 +2018,9 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
        struct tegra_clk_pll *pll = to_clk_pll(hw);
        struct tegra_clk_pll_freq_table sel;
        u32 val;
-       int ret;
+       int ret = 0;
        unsigned long flags = 0;
-       unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+       unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
 
        if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
                return -EINVAL;
@@ -2022,22 +2028,20 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
        if (pll->lock)
                spin_lock_irqsave(pll->lock, flags);
 
+       val = pll_readl(pll->params->aux_reg, pll);
+       if (val & PLLE_AUX_SEQ_ENABLE)
+               goto out;
+
        val = pll_readl_base(pll);
        val &= ~BIT(30); /* Disable lock override */
        pll_writel_base(val, pll);
 
-       val = pll_readl(pll->params->aux_reg, pll);
-       val |= PLLE_AUX_ENABLE_SWCTL;
-       val &= ~PLLE_AUX_SEQ_ENABLE;
-       pll_writel(val, pll->params->aux_reg, pll);
-       udelay(1);
-
        val = pll_readl_misc(pll);
        val |= PLLE_MISC_LOCK_ENABLE;
        val |= PLLE_MISC_IDDQ_SW_CTRL;
        val &= ~PLLE_MISC_IDDQ_SW_VALUE;
        val |= PLLE_MISC_PLLE_PTS;
-       val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
+       val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
        pll_writel_misc(val, pll);
        udelay(5);
 
@@ -2067,7 +2071,7 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
        val = pll_readl(PLLE_SS_CTRL, pll);
        val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
        val &= ~PLLE_SS_COEFFICIENTS_MASK;
-       val |= PLLE_SS_COEFFICIENTS_VAL;
+       val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210;
        pll_writel(val, PLLE_SS_CTRL, pll);
        val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
        pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2104,15 +2108,25 @@ static void clk_plle_tegra210_disable(struct clk_hw *hw)
        if (pll->lock)
                spin_lock_irqsave(pll->lock, flags);
 
+       /* If PLLE HW sequencer is enabled, SW should not disable PLLE */
+       val = pll_readl(pll->params->aux_reg, pll);
+       if (val & PLLE_AUX_SEQ_ENABLE)
+               goto out;
+
        val = pll_readl_base(pll);
        val &= ~PLLE_BASE_ENABLE;
        pll_writel_base(val, pll);
 
+       val = pll_readl(pll->params->aux_reg, pll);
+       val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL;
+       pll_writel(val, pll->params->aux_reg, pll);
+
        val = pll_readl_misc(pll);
        val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
        pll_writel_misc(val, pll);
        udelay(1);
 
+out:
        if (pll->lock)
                spin_unlock_irqrestore(pll->lock, flags);
 }
index 6ad381a888a6176801494c60bcc4f057f23ebe82..ea2b9cbf9e70b0c10204d5d34dc96ab929d36427 100644 (file)
@@ -773,7 +773,7 @@ static struct tegra_periph_init_data periph_clks[] = {
        XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
        XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8),
        MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb),
-       MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
+       MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
        MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
        MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
        MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
@@ -782,7 +782,7 @@ static struct tegra_periph_init_data periph_clks[] = {
        NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
        MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
        MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
-       MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c),
+       I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
        MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif),
        MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape),
        MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb),
@@ -829,6 +829,7 @@ static struct tegra_periph_init_data gate_clks[] = {
        GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0),
        GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0),
        GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0),
+       GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0),
 };
 
 static struct tegra_periph_init_data div_clks[] = {
index 4559a20e3af6e424e52c7b08930cd7664965fea2..474de0f0c26d80b0f9b20ff845575ce049165038 100644 (file)
@@ -67,7 +67,7 @@ static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
                                         "pll_p", "pll_p_out4", "unused",
                                         "unused", "pll_x", "pll_x_out0" };
 
-const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
+static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
        .gen = gen4,
        .sclk_parents = sclk_parents,
        .cclk_g_parents = cclk_g_parents,
@@ -93,7 +93,7 @@ static const char *cclk_lp_parents_gen5[] = { "clk_m", "unused", "clk_32k", "unu
                                        "unused", "unused", "unused", "unused",
                                        "dfllCPU_out" };
 
-const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
+static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
        .gen = gen5,
        .sclk_parents = sclk_parents_gen5,
        .cclk_g_parents = cclk_g_parents_gen5,
@@ -171,7 +171,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
        *dt_clk = clk;
 }
 
-void __init tegra_super_clk_init(void __iomem *clk_base,
+static void __init tegra_super_clk_init(void __iomem *clk_base,
                                void __iomem *pmc_base,
                                struct tegra_clk *tegra_clks,
                                struct tegra_clk_pll_params *params,
index 58514c44ea830c476b8b23606e962ecf8ef8ef7a..637041fd53ad11b95cd305952cb0a80eaf0d3b61 100644 (file)
@@ -59,8 +59,8 @@
 #define PLLC3_MISC3 0x50c
 
 #define PLLM_BASE 0x90
-#define PLLM_MISC0 0x9c
 #define PLLM_MISC1 0x98
+#define PLLM_MISC2 0x9c
 #define PLLP_BASE 0xa0
 #define PLLP_MISC0 0xac
 #define PLLP_MISC1 0x680
@@ -99,7 +99,7 @@
 #define PLLC4_MISC0 0x5a8
 #define PLLC4_OUT 0x5e4
 #define PLLMB_BASE 0x5e8
-#define PLLMB_MISC0 0x5ec
+#define PLLMB_MISC1 0x5ec
 #define PLLA1_BASE 0x6a4
 #define PLLA1_MISC0 0x6a8
 #define PLLA1_MISC1 0x6ac
@@ -243,7 +243,8 @@ static unsigned long tegra210_input_freq[] = {
 };
 
 static const char *mux_pllmcp_clkm[] = {
-       "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
+       "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
+       "pll_p",
 };
 #define mux_pllmcp_clkm_idx NULL
 
@@ -367,12 +368,12 @@ static const char *mux_pllmcp_clkm[] = {
 /* PLLMB */
 #define PLLMB_BASE_LOCK                        (1 << 27)
 
-#define PLLMB_MISC0_LOCK_OVERRIDE      (1 << 18)
-#define PLLMB_MISC0_IDDQ               (1 << 17)
-#define PLLMB_MISC0_LOCK_ENABLE                (1 << 16)
+#define PLLMB_MISC1_LOCK_OVERRIDE      (1 << 18)
+#define PLLMB_MISC1_IDDQ               (1 << 17)
+#define PLLMB_MISC1_LOCK_ENABLE                (1 << 16)
 
-#define PLLMB_MISC0_DEFAULT_VALUE      0x00030000
-#define PLLMB_MISC0_WRITE_MASK         0x0007ffff
+#define PLLMB_MISC1_DEFAULT_VALUE      0x00030000
+#define PLLMB_MISC1_WRITE_MASK         0x0007ffff
 
 /* PLLP */
 #define PLLP_BASE_OVERRIDE             (1 << 28)
@@ -457,7 +458,8 @@ static void pllcx_check_defaults(struct tegra_clk_pll_params *params)
                        PLLCX_MISC3_WRITE_MASK);
 }
 
-void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
+static void tegra210_pllcx_set_defaults(const char *name,
+                                       struct tegra_clk_pll *pllcx)
 {
        pllcx->params->defaults_set = true;
 
@@ -482,22 +484,22 @@ void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
        udelay(1);
 }
 
-void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
 {
        tegra210_pllcx_set_defaults("PLL_C", pllcx);
 }
 
-void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
 {
        tegra210_pllcx_set_defaults("PLL_C2", pllcx);
 }
 
-void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
 {
        tegra210_pllcx_set_defaults("PLL_C3", pllcx);
 }
 
-void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
+static void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
 {
        tegra210_pllcx_set_defaults("PLL_A1", pllcx);
 }
@@ -507,7 +509,7 @@ void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
  * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used.
  * Fractional SDM is allowed to provide exact audio rates.
  */
-void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
+static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
 {
        u32 mask;
        u32 val = readl_relaxed(clk_base + plla->params->base_reg);
@@ -559,7 +561,7 @@ void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
  * PLLD
  * PLL with fractional SDM.
  */
-void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
+static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
 {
        u32 val;
        u32 mask = 0xffff;
@@ -698,7 +700,7 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss,
        udelay(1);
 }
 
-void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
+static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
 {
        plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE,
                        PLLD2_MISC1_CFG_DEFAULT_VALUE,
@@ -706,7 +708,7 @@ void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
                        PLLD2_MISC3_CTRL2_DEFAULT_VALUE);
 }
 
-void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
+static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
 {
        plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE,
                        PLLDP_MISC1_CFG_DEFAULT_VALUE,
@@ -719,7 +721,7 @@ void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
  * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support.
  * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers.
  */
-void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
+static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
 {
        plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0);
 }
@@ -728,7 +730,7 @@ void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
  * PLLRE
  * VCO is exposed to the clock tree directly along with post-divider output
  */
-void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
+static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
 {
        u32 mask;
        u32 val = readl_relaxed(clk_base + pllre->params->base_reg);
@@ -780,13 +782,13 @@ static void pllx_get_dyn_steps(struct clk_hw *hw, u32 *step_a, u32 *step_b)
 {
        unsigned long input_rate;
 
-       if (!IS_ERR_OR_NULL(hw->clk)) {
+       /* cf rate */
+       if (!IS_ERR_OR_NULL(hw->clk))
                input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
-               /* cf rate */
-               input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
-       } else {
+       else
                input_rate = 38400000;
-       }
+
+       input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
 
        switch (input_rate) {
        case 12000000:
@@ -841,7 +843,7 @@ static void pllx_check_defaults(struct tegra_clk_pll *pll)
                        PLLX_MISC5_WRITE_MASK);
 }
 
-void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
+static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
 {
        u32 val;
        u32 step_a, step_b;
@@ -901,7 +903,7 @@ void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
 }
 
 /* PLLMB */
-void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
+static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
 {
        u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg);
 
@@ -914,15 +916,15 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
                 * PLL is ON: check if defaults already set, then set those
                 * that can be updated in flight.
                 */
-               val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ);
-               mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE;
+               val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ);
+               mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE;
                _pll_misc_chk_default(clk_base, pllmb->params, 0, val,
-                               ~mask & PLLMB_MISC0_WRITE_MASK);
+                               ~mask & PLLMB_MISC1_WRITE_MASK);
 
                /* Enable lock detect */
                val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]);
                val &= ~mask;
-               val |= PLLMB_MISC0_DEFAULT_VALUE & mask;
+               val |= PLLMB_MISC1_DEFAULT_VALUE & mask;
                writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]);
                udelay(1);
 
@@ -930,7 +932,7 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
        }
 
        /* set IDDQ, enable lock detect */
-       writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE,
+       writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE,
                        clk_base + pllmb->params->ext_misc_reg[0]);
        udelay(1);
 }
@@ -960,7 +962,7 @@ static void pllp_check_defaults(struct tegra_clk_pll *pll, bool enabled)
                        ~mask & PLLP_MISC1_WRITE_MASK);
 }
 
-void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
+static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
 {
        u32 mask;
        u32 val = readl_relaxed(clk_base + pllp->params->base_reg);
@@ -1022,7 +1024,7 @@ static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control)
                        ~mask & PLLU_MISC1_WRITE_MASK);
 }
 
-void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
+static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
 {
        u32 val = readl_relaxed(clk_base + pllu->params->base_reg);
 
@@ -1212,8 +1214,9 @@ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg)
        cfg->m *= PLL_SDM_COEFF;
 }
 
-unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
-                                         unsigned long parent_rate)
+static unsigned long
+tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
+                           unsigned long parent_rate)
 {
        unsigned long vco_min = params->vco_min;
 
@@ -1386,7 +1389,7 @@ static struct tegra_clk_pll_params pll_c_params = {
        .mdiv_default = 3,
        .div_nmp = &pllc_nmp,
        .freq_table = pll_cx_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = _pllc_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1425,7 +1428,7 @@ static struct tegra_clk_pll_params pll_c2_params = {
        .ext_misc_reg[2] = PLLC2_MISC2,
        .ext_misc_reg[3] = PLLC2_MISC3,
        .freq_table = pll_cx_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = _pllc2_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1455,7 +1458,7 @@ static struct tegra_clk_pll_params pll_c3_params = {
        .ext_misc_reg[2] = PLLC3_MISC2,
        .ext_misc_reg[3] = PLLC3_MISC3,
        .freq_table = pll_cx_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = _pllc3_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1505,7 +1508,6 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
        .base_reg = PLLC4_BASE,
        .misc_reg = PLLC4_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .max_p = PLL_QLIN_PDIV_MAX,
        .ext_misc_reg[0] = PLLC4_MISC0,
@@ -1517,8 +1519,7 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
        .div_nmp = &pllss_nmp,
        .freq_table = pll_c4_vco_freq_table,
        .set_defaults = tegra210_pllc4_set_defaults,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE |
-                TEGRA_PLL_VCO_OUT,
+       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
 
@@ -1559,15 +1560,15 @@ static struct tegra_clk_pll_params pll_m_params = {
        .vco_min = 800000000,
        .vco_max = 1866000000,
        .base_reg = PLLM_BASE,
-       .misc_reg = PLLM_MISC1,
+       .misc_reg = PLLM_MISC2,
        .lock_mask = PLL_BASE_LOCK,
        .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE,
        .lock_delay = 300,
-       .iddq_reg = PLLM_MISC0,
+       .iddq_reg = PLLM_MISC2,
        .iddq_bit_idx = PLLM_IDDQ_BIT,
        .max_p = PLL_QLIN_PDIV_MAX,
-       .ext_misc_reg[0] = PLLM_MISC0,
-       .ext_misc_reg[0] = PLLM_MISC1,
+       .ext_misc_reg[0] = PLLM_MISC2,
+       .ext_misc_reg[1] = PLLM_MISC1,
        .round_p_to_pdiv = pll_qlin_p_to_pdiv,
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
        .div_nmp = &pllm_nmp,
@@ -1586,19 +1587,18 @@ static struct tegra_clk_pll_params pll_mb_params = {
        .vco_min = 800000000,
        .vco_max = 1866000000,
        .base_reg = PLLMB_BASE,
-       .misc_reg = PLLMB_MISC0,
+       .misc_reg = PLLMB_MISC1,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE,
        .lock_delay = 300,
-       .iddq_reg = PLLMB_MISC0,
+       .iddq_reg = PLLMB_MISC1,
        .iddq_bit_idx = PLLMB_IDDQ_BIT,
        .max_p = PLL_QLIN_PDIV_MAX,
-       .ext_misc_reg[0] = PLLMB_MISC0,
+       .ext_misc_reg[0] = PLLMB_MISC1,
        .round_p_to_pdiv = pll_qlin_p_to_pdiv,
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
        .div_nmp = &pllm_nmp,
        .freq_table = pll_m_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = tegra210_pllmb_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1671,7 +1671,6 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
        .base_reg = PLLRE_BASE,
        .misc_reg = PLLRE_MISC0,
        .lock_mask = PLLRE_MISC_LOCK,
-       .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .max_p = PLL_QLIN_PDIV_MAX,
        .ext_misc_reg[0] = PLLRE_MISC0,
@@ -1681,8 +1680,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
        .div_nmp = &pllre_nmp,
        .freq_table = pll_re_vco_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC |
-                TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
+       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT,
        .set_defaults = tegra210_pllre_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1712,7 +1710,6 @@ static struct tegra_clk_pll_params pll_p_params = {
        .base_reg = PLLP_BASE,
        .misc_reg = PLLP_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .iddq_reg = PLLP_MISC0,
        .iddq_bit_idx = PLLXP_IDDQ_BIT,
@@ -1721,8 +1718,7 @@ static struct tegra_clk_pll_params pll_p_params = {
        .div_nmp = &pllp_nmp,
        .freq_table = pll_p_freq_table,
        .fixed_rate = 408000000,
-       .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK |
-                TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
+       .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
        .set_defaults = tegra210_pllp_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1750,7 +1746,7 @@ static struct tegra_clk_pll_params pll_a1_params = {
        .ext_misc_reg[2] = PLLA1_MISC2,
        .ext_misc_reg[3] = PLLA1_MISC3,
        .freq_table = pll_cx_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .set_defaults = _plla1_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -1787,7 +1783,6 @@ static struct tegra_clk_pll_params pll_a_params = {
        .base_reg = PLLA_BASE,
        .misc_reg = PLLA_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .round_p_to_pdiv = pll_qlin_p_to_pdiv,
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
@@ -1802,8 +1797,7 @@ static struct tegra_clk_pll_params pll_a_params = {
        .ext_misc_reg[1] = PLLA_MISC1,
        .ext_misc_reg[2] = PLLA_MISC2,
        .freq_table = pll_a_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW |
-                TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW,
        .set_defaults = tegra210_plla_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
        .set_gain = tegra210_clk_pll_set_gain,
@@ -1836,7 +1830,6 @@ static struct tegra_clk_pll_params pll_d_params = {
        .base_reg = PLLD_BASE,
        .misc_reg = PLLD_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE,
        .lock_delay = 1000,
        .iddq_reg = PLLD_MISC0,
        .iddq_bit_idx = PLLD_IDDQ_BIT,
@@ -1850,7 +1843,7 @@ static struct tegra_clk_pll_params pll_d_params = {
        .ext_misc_reg[0] = PLLD_MISC0,
        .ext_misc_reg[1] = PLLD_MISC1,
        .freq_table = pll_d_freq_table,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .mdiv_default = 1,
        .set_defaults = tegra210_plld_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
@@ -1876,7 +1869,6 @@ static struct tegra_clk_pll_params pll_d2_params = {
        .base_reg = PLLD2_BASE,
        .misc_reg = PLLD2_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .iddq_reg = PLLD2_BASE,
        .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1897,7 +1889,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
        .mdiv_default = 1,
        .freq_table = tegra210_pll_d2_freq_table,
        .set_defaults = tegra210_plld2_set_defaults,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
        .set_gain = tegra210_clk_pll_set_gain,
        .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1920,7 +1912,6 @@ static struct tegra_clk_pll_params pll_dp_params = {
        .base_reg = PLLDP_BASE,
        .misc_reg = PLLDP_MISC,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
        .lock_delay = 300,
        .iddq_reg = PLLDP_BASE,
        .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1941,7 +1932,7 @@ static struct tegra_clk_pll_params pll_dp_params = {
        .mdiv_default = 1,
        .freq_table = pll_dp_freq_table,
        .set_defaults = tegra210_plldp_set_defaults,
-       .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+       .flags = TEGRA_PLL_USE_LOCK,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
        .set_gain = tegra210_clk_pll_set_gain,
        .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1973,7 +1964,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
        .base_reg = PLLU_BASE,
        .misc_reg = PLLU_MISC0,
        .lock_mask = PLL_BASE_LOCK,
-       .lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE,
        .lock_delay = 1000,
        .iddq_reg = PLLU_MISC0,
        .iddq_bit_idx = PLLU_IDDQ_BIT,
@@ -1983,8 +1973,7 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
        .pdiv_tohw = pll_qlin_pdiv_to_hw,
        .div_nmp = &pllu_nmp,
        .freq_table = pll_u_freq_table,
-       .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE |
-                TEGRA_PLL_VCO_OUT,
+       .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
        .set_defaults = tegra210_pllu_set_defaults,
        .calc_rate = tegra210_pll_fixed_mdiv_cfg,
 };
@@ -2218,6 +2207,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
        [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true },
        [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true },
        [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true },
+       [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true },
 };
 
 static struct tegra_devclk devclks[] __initdata = {
@@ -2519,7 +2509,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
 
        /* PLLU_VCO */
        val = readl(clk_base + pll_u_vco_params.base_reg);
-       val &= ~BIT(24); /* disable PLLU_OVERRIDE */
+       val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
        writel(val, clk_base + pll_u_vco_params.base_reg);
 
        clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
@@ -2738,8 +2728,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
        { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
        { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
-       { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
-       { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
        { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
        { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
        { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
index 56777f04d2d96381593b4344ac3306bb5dcafbb4..33db7406c0e274e8c39c7b0d71509a117c324022 100644 (file)
@@ -30,6 +30,8 @@ config CLKSRC_MMIO
 config DIGICOLOR_TIMER
        bool "Digicolor timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       select CLKSRC_MMIO
+       depends on HAS_IOMEM
        help
          Enables the support for the digicolor timer driver.
 
@@ -55,6 +57,7 @@ config ARMADA_370_XP_TIMER
        bool "Armada 370 and XP timer driver" if COMPILE_TEST
        depends on ARM
        select CLKSRC_OF
+       select CLKSRC_MMIO
        help
          Enables the support for the Armada 370 and XP timer driver.
 
@@ -76,6 +79,7 @@ config ORION_TIMER
 config SUN4I_TIMER
        bool "Sun4i timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        select CLKSRC_MMIO
        help
          Enables support for the Sun4i timer.
@@ -89,6 +93,7 @@ config SUN5I_HSTIMER
 
 config TEGRA_TIMER
        bool "Tegra timer driver" if COMPILE_TEST
+       select CLKSRC_MMIO
        depends on ARM
        help
          Enables support for the Tegra driver.
@@ -96,6 +101,7 @@ config TEGRA_TIMER
 config VT8500_TIMER
        bool "VT8500 timer driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        help
          Enables support for the VT8500 driver.
 
@@ -131,6 +137,7 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK
 config CLKSRC_DBX500_PRCMU
        bool "Clocksource PRCMU Timer" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        help
          Use the always on PRCMU Timer as clocksource
 
@@ -248,6 +255,7 @@ config CLKSRC_EXYNOS_MCT
 config CLKSRC_SAMSUNG_PWM
        bool "PWM timer drvier for Samsung S3C, S5P" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        help
          This is a new clocksource driver for the PWM timer found in
          Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
@@ -257,12 +265,14 @@ config CLKSRC_SAMSUNG_PWM
 config FSL_FTM_TIMER
        bool "Freescale FlexTimer Module driver" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        select CLKSRC_MMIO
        help
          Support for Freescale FlexTimer Module (FTM) timer.
 
 config VF_PIT_TIMER
        bool
+       select CLKSRC_MMIO
        help
          Support for Period Interrupt Timer on Freescale Vybrid Family SoCs.
 
@@ -360,6 +370,7 @@ config CLKSRC_TANGO_XTAL
 config CLKSRC_PXA
        bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST
        depends on GENERIC_CLOCKEVENTS
+       depends on HAS_IOMEM
        select CLKSRC_MMIO
        help
          This enables OST0 support available on PXA and SA-11x0
@@ -394,6 +405,7 @@ config CLKSRC_ST_LPC
        bool "Low power clocksource found in the LPC" if COMPILE_TEST
        select CLKSRC_OF if OF
        depends on HAS_IOMEM
+       select CLKSRC_MMIO
        help
          Enable this option to use the Low Power controller timer
          as clocksource.
index 6ee91401918eba99a33c67b554da853747a0c5fa..4da2af9694a23b267cebdfa52da624e9f43b61fe 100644 (file)
@@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
 
        __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
        __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
-       clk_disable(tcd->clk);
+       if (!clockevent_state_detached(d))
+               clk_disable(tcd->clk);
 
        return 0;
 }
index 9bc37c437874a6ba185799680a093598cb213948..0ca74d07005830cec5abaf92a84c59af17325616 100644 (file)
@@ -142,15 +142,16 @@ static int allocate_resources(int cpu, struct device **cdev,
 
 try_again:
        cpu_reg = regulator_get_optional(cpu_dev, reg);
-       if (IS_ERR(cpu_reg)) {
+       ret = PTR_ERR_OR_ZERO(cpu_reg);
+       if (ret) {
                /*
                 * If cpu's regulator supply node is present, but regulator is
                 * not yet registered, we should try defering probe.
                 */
-               if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
+               if (ret == -EPROBE_DEFER) {
                        dev_dbg(cpu_dev, "cpu%d regulator not ready, retry\n",
                                cpu);
-                       return -EPROBE_DEFER;
+                       return ret;
                }
 
                /* Try with "cpu-supply" */
@@ -159,18 +160,16 @@ try_again:
                        goto try_again;
                }
 
-               dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
-                       cpu, PTR_ERR(cpu_reg));
+               dev_dbg(cpu_dev, "no regulator for cpu%d: %d\n", cpu, ret);
        }
 
        cpu_clk = clk_get(cpu_dev, NULL);
-       if (IS_ERR(cpu_clk)) {
+       ret = PTR_ERR_OR_ZERO(cpu_clk);
+       if (ret) {
                /* put regulator */
                if (!IS_ERR(cpu_reg))
                        regulator_put(cpu_reg);
 
-               ret = PTR_ERR(cpu_clk);
-
                /*
                 * If cpu's clk node is present, but clock is not yet
                 * registered, we should try defering probe.
index c35e7da1ed7a185fd95d0f0ae7f7b2961d235a0f..34b17447e0d19ee6fbeec91135f697a3beed653f 100644 (file)
@@ -48,11 +48,11 @@ static struct cpufreq_policy *next_policy(struct cpufreq_policy *policy,
                                          bool active)
 {
        do {
-               policy = list_next_entry(policy, policy_list);
-
                /* No more policies in the list */
-               if (&policy->policy_list == &cpufreq_policy_list)
+               if (list_is_last(&policy->policy_list, &cpufreq_policy_list))
                        return NULL;
+
+               policy = list_next_entry(policy, policy_list);
        } while (!suitable_policy(policy, active));
 
        return policy;
@@ -959,6 +959,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
        return cpufreq_add_dev_symlink(policy);
 }
 
+__weak struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return NULL;
+}
+
 static int cpufreq_init_policy(struct cpufreq_policy *policy)
 {
        struct cpufreq_governor *gov = NULL;
@@ -968,11 +973,14 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
 
        /* Update governor of new_policy to the governor used before hotplug */
        gov = find_governor(policy->last_governor);
-       if (gov)
+       if (gov) {
                pr_debug("Restoring governor %s for cpu %d\n",
                                policy->governor->name, policy->cpu);
-       else
-               gov = CPUFREQ_DEFAULT_GOVERNOR;
+       } else {
+               gov = cpufreq_default_governor();
+               if (!gov)
+                       return -ENODATA;
+       }
 
        new_policy.governor = gov;
 
@@ -1920,21 +1928,16 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
 }
 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
 
+__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
+{
+       return NULL;
+}
+
 static int __cpufreq_governor(struct cpufreq_policy *policy,
                                        unsigned int event)
 {
        int ret;
 
-       /* Only must be defined when default governor is known to have latency
-          restrictions, like e.g. conservative or ondemand.
-          That this is the case is already ensured in Kconfig
-       */
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
-       struct cpufreq_governor *gov = &cpufreq_gov_performance;
-#else
-       struct cpufreq_governor *gov = NULL;
-#endif
-
        /* Don't start any governor operations if we are entering suspend */
        if (cpufreq_suspended)
                return 0;
@@ -1948,12 +1951,14 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
        if (policy->governor->max_transition_latency &&
            policy->cpuinfo.transition_latency >
            policy->governor->max_transition_latency) {
-               if (!gov)
-                       return -EINVAL;
-               else {
+               struct cpufreq_governor *gov = cpufreq_fallback_governor();
+
+               if (gov) {
                        pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
                                policy->governor->name, gov->name);
                        policy->governor = gov;
+               } else {
+                       return -EINVAL;
                }
        }
 
index 606ad74abe6e8b248b15a142c4f3fca7241c64be..8504a70a47857ff21a39a401d29c86cde40a50a4 100644 (file)
@@ -26,10 +26,7 @@ static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
 static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
                                   unsigned int event);
 
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_conservative = {
+static struct cpufreq_governor cpufreq_gov_conservative = {
        .name                   = "conservative",
        .governor               = cs_cpufreq_governor_dbs,
        .max_transition_latency = TRANSITION_LATENCY_LIMIT,
@@ -399,6 +396,11 @@ MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
 MODULE_LICENSE("GPL");
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_conservative;
+}
+
 fs_initcall(cpufreq_gov_dbs_init);
 #else
 module_init(cpufreq_gov_dbs_init);
index bab3a514ec128254d8cff0ccdeaf02f427625d06..e0d111024d4840e8078fc0553c9ef7dabed71447 100644 (file)
@@ -387,16 +387,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
        if (!have_governor_per_policy())
                cdata->gdbs_data = dbs_data;
 
+       policy->governor_data = dbs_data;
+
        ret = sysfs_create_group(get_governor_parent_kobj(policy),
                                 get_sysfs_attr(dbs_data));
        if (ret)
                goto reset_gdbs_data;
 
-       policy->governor_data = dbs_data;
-
        return 0;
 
 reset_gdbs_data:
+       policy->governor_data = NULL;
+
        if (!have_governor_per_policy())
                cdata->gdbs_data = NULL;
        cdata->exit(dbs_data, !policy->governor->initialized);
@@ -417,16 +419,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
        if (!cdbs->shared || cdbs->shared->policy)
                return -EBUSY;
 
-       policy->governor_data = NULL;
        if (!--dbs_data->usage_count) {
                sysfs_remove_group(get_governor_parent_kobj(policy),
                                   get_sysfs_attr(dbs_data));
 
+               policy->governor_data = NULL;
+
                if (!have_governor_per_policy())
                        cdata->gdbs_data = NULL;
 
                cdata->exit(dbs_data, policy->governor->initialized == 1);
                kfree(dbs_data);
+       } else {
+               policy->governor_data = NULL;
        }
 
        free_common_dbs_info(policy, cdata);
index eae51070c03427573708fe2bd67a081ccc2ffe82..929e193ac1c19d7601a207659ed788a72df0a3d4 100644 (file)
@@ -31,9 +31,7 @@ static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
 
 static struct od_ops od_ops;
 
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
 static struct cpufreq_governor cpufreq_gov_ondemand;
-#endif
 
 static unsigned int default_powersave_bias;
 
@@ -554,6 +552,19 @@ static struct common_dbs_data od_dbs_cdata = {
        .mutex = __MUTEX_INITIALIZER(od_dbs_cdata.mutex),
 };
 
+static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
+               unsigned int event)
+{
+       return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
+}
+
+static struct cpufreq_governor cpufreq_gov_ondemand = {
+       .name                   = "ondemand",
+       .governor               = od_cpufreq_governor_dbs,
+       .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+       .owner                  = THIS_MODULE,
+};
+
 static void od_set_powersave_bias(unsigned int powersave_bias)
 {
        struct cpufreq_policy *policy;
@@ -605,22 +616,6 @@ void od_unregister_powersave_bias_handler(void)
 }
 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
 
-static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
-               unsigned int event)
-{
-       return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
-}
-
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
-static
-#endif
-struct cpufreq_governor cpufreq_gov_ondemand = {
-       .name                   = "ondemand",
-       .governor               = od_cpufreq_governor_dbs,
-       .max_transition_latency = TRANSITION_LATENCY_LIMIT,
-       .owner                  = THIS_MODULE,
-};
-
 static int __init cpufreq_gov_dbs_init(void)
 {
        return cpufreq_register_governor(&cpufreq_gov_ondemand);
@@ -638,6 +633,11 @@ MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
 MODULE_LICENSE("GPL");
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_ondemand;
+}
+
 fs_initcall(cpufreq_gov_dbs_init);
 #else
 module_init(cpufreq_gov_dbs_init);
index cf117deb39b1f45c53ade61086236eb888d24a71..af9f4b96f5a8e1ae1fd622ce05f9b198b86ed599 100644 (file)
@@ -33,10 +33,7 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy,
        return 0;
 }
 
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_performance = {
+static struct cpufreq_governor cpufreq_gov_performance = {
        .name           = "performance",
        .governor       = cpufreq_governor_performance,
        .owner          = THIS_MODULE,
@@ -52,6 +49,19 @@ static void __exit cpufreq_gov_performance_exit(void)
        cpufreq_unregister_governor(&cpufreq_gov_performance);
 }
 
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_performance;
+}
+#endif
+#ifndef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE
+struct cpufreq_governor *cpufreq_fallback_governor(void)
+{
+       return &cpufreq_gov_performance;
+}
+#endif
+
 MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
 MODULE_LICENSE("GPL");
index e3b874c235eada5d353f4b02831140c6a244af68..b8b400232a7451fe392f767ed7aaecf6600c680d 100644 (file)
@@ -33,10 +33,7 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
        return 0;
 }
 
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_powersave = {
+static struct cpufreq_governor cpufreq_gov_powersave = {
        .name           = "powersave",
        .governor       = cpufreq_governor_powersave,
        .owner          = THIS_MODULE,
@@ -57,6 +54,11 @@ MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
 MODULE_LICENSE("GPL");
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_powersave;
+}
+
 fs_initcall(cpufreq_gov_powersave_init);
 #else
 module_init(cpufreq_gov_powersave_init);
index 4dbf1db16aca0e5d29d44b002a6d4ad0cae6d139..4d16f45ee1daf3e64e23c97a9cec8b7a4ea0fc0f 100644 (file)
@@ -89,10 +89,7 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
        return rc;
 }
 
-#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
-static
-#endif
-struct cpufreq_governor cpufreq_gov_userspace = {
+static struct cpufreq_governor cpufreq_gov_userspace = {
        .name           = "userspace",
        .governor       = cpufreq_governor_userspace,
        .store_setspeed = cpufreq_set,
@@ -116,6 +113,11 @@ MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
 MODULE_LICENSE("GPL");
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+       return &cpufreq_gov_userspace;
+}
+
 fs_initcall(cpufreq_gov_userspace_init);
 #else
 module_init(cpufreq_gov_userspace_init);
index cd83d477e32d412394da574e8e02adb6dd7be832..3a4b39afc0abb0cb9a7fc34845266176c95487a3 100644 (file)
@@ -1431,7 +1431,7 @@ static int __init intel_pstate_init(void)
        if (!all_cpu_data)
                return -ENOMEM;
 
-       if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
+       if (static_cpu_has(X86_FEATURE_HWP) && !no_hwp) {
                pr_info("intel_pstate: HWP enabled\n");
                hwp_active++;
        }
index 547890fd9572179eba4c6ce81caf5bbc8b06b947..1bbc10a54c59c8a7cf6e82b06607f44c7988f3fb 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/of.h>
 #include <linux/reboot.h>
 #include <linux/slab.h>
+#include <linux/cpu.h>
+#include <trace/events/power.h>
 
 #include <asm/cputhreads.h>
 #include <asm/firmware.h>
 
 static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1];
 static bool rebooting, throttled, occ_reset;
+static unsigned int *core_to_chip_map;
+
+static const char * const throttle_reason[] = {
+       "No throttling",
+       "Power Cap",
+       "Processor Over Temperature",
+       "Power Supply Failure",
+       "Over Current",
+       "OCC Reset"
+};
 
 static struct chip {
        unsigned int id;
        bool throttled;
+       bool restore;
+       u8 throttle_reason;
        cpumask_t mask;
        struct work_struct throttle;
-       bool restore;
 } *chips;
 
 static int nr_chips;
@@ -312,13 +325,14 @@ static inline unsigned int get_nominal_index(void)
 static void powernv_cpufreq_throttle_check(void *data)
 {
        unsigned int cpu = smp_processor_id();
+       unsigned int chip_id = core_to_chip_map[cpu_core_index_of_thread(cpu)];
        unsigned long pmsr;
        int pmsr_pmax, i;
 
        pmsr = get_pmspr(SPRN_PMSR);
 
        for (i = 0; i < nr_chips; i++)
-               if (chips[i].id == cpu_to_chip_id(cpu))
+               if (chips[i].id == chip_id)
                        break;
 
        /* Check for Pmax Capping */
@@ -328,17 +342,17 @@ static void powernv_cpufreq_throttle_check(void *data)
                        goto next;
                chips[i].throttled = true;
                if (pmsr_pmax < powernv_pstate_info.nominal)
-                       pr_crit("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
-                               cpu, chips[i].id, pmsr_pmax,
-                               powernv_pstate_info.nominal);
-               else
-                       pr_info("CPU %d on Chip %u has Pmax reduced below turbo frequency (%d < %d)\n",
-                               cpu, chips[i].id, pmsr_pmax,
-                               powernv_pstate_info.max);
+                       pr_warn_once("CPU %d on Chip %u has Pmax reduced below nominal frequency (%d < %d)\n",
+                                    cpu, chips[i].id, pmsr_pmax,
+                                    powernv_pstate_info.nominal);
+               trace_powernv_throttle(chips[i].id,
+                                     throttle_reason[chips[i].throttle_reason],
+                                     pmsr_pmax);
        } else if (chips[i].throttled) {
                chips[i].throttled = false;
-               pr_info("CPU %d on Chip %u has Pmax restored to %d\n", cpu,
-                       chips[i].id, pmsr_pmax);
+               trace_powernv_throttle(chips[i].id,
+                                     throttle_reason[chips[i].throttle_reason],
+                                     pmsr_pmax);
        }
 
        /* Check if Psafe_mode_active is set in PMSR. */
@@ -356,7 +370,7 @@ next:
 
        if (throttled) {
                pr_info("PMSR = %16lx\n", pmsr);
-               pr_crit("CPU Frequency could be throttled\n");
+               pr_warn("CPU Frequency could be throttled\n");
        }
 }
 
@@ -423,18 +437,19 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
 {
        struct chip *chip = container_of(work, struct chip, throttle);
        unsigned int cpu;
-       cpumask_var_t mask;
+       cpumask_t mask;
 
-       smp_call_function_any(&chip->mask,
+       get_online_cpus();
+       cpumask_and(&mask, &chip->mask, cpu_online_mask);
+       smp_call_function_any(&mask,
                              powernv_cpufreq_throttle_check, NULL, 0);
 
        if (!chip->restore)
-               return;
+               goto out;
 
        chip->restore = false;
-       cpumask_copy(mask, &chip->mask);
-       for_each_cpu_and(cpu, mask, cpu_online_mask) {
-               int index, tcpu;
+       for_each_cpu(cpu, &mask) {
+               int index;
                struct cpufreq_policy policy;
 
                cpufreq_get_policy(&policy, cpu);
@@ -442,20 +457,12 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
                                               policy.cur,
                                               CPUFREQ_RELATION_C, &index);
                powernv_cpufreq_target_index(&policy, index);
-               for_each_cpu(tcpu, policy.cpus)
-                       cpumask_clear_cpu(tcpu, mask);
+               cpumask_andnot(&mask, &mask, policy.cpus);
        }
+out:
+       put_online_cpus();
 }
 
-static char throttle_reason[][30] = {
-                                       "No throttling",
-                                       "Power Cap",
-                                       "Processor Over Temperature",
-                                       "Power Supply Failure",
-                                       "Over Current",
-                                       "OCC Reset"
-                                    };
-
 static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
                                   unsigned long msg_type, void *_msg)
 {
@@ -481,7 +488,7 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
                 */
                if (!throttled) {
                        throttled = true;
-                       pr_crit("CPU frequency is throttled for duration\n");
+                       pr_warn("CPU frequency is throttled for duration\n");
                }
 
                break;
@@ -505,23 +512,18 @@ static int powernv_cpufreq_occ_msg(struct notifier_block *nb,
                        return 0;
                }
 
-               if (omsg.throttle_status &&
+               for (i = 0; i < nr_chips; i++)
+                       if (chips[i].id == omsg.chip)
+                               break;
+
+               if (omsg.throttle_status >= 0 &&
                    omsg.throttle_status <= OCC_MAX_THROTTLE_STATUS)
-                       pr_info("OCC: Chip %u Pmax reduced due to %s\n",
-                               (unsigned int)omsg.chip,
-                               throttle_reason[omsg.throttle_status]);
-               else if (!omsg.throttle_status)
-                       pr_info("OCC: Chip %u %s\n", (unsigned int)omsg.chip,
-                               throttle_reason[omsg.throttle_status]);
-               else
-                       return 0;
+                       chips[i].throttle_reason = omsg.throttle_status;
 
-               for (i = 0; i < nr_chips; i++)
-                       if (chips[i].id == omsg.chip) {
-                               if (!omsg.throttle_status)
-                                       chips[i].restore = true;
-                               schedule_work(&chips[i].throttle);
-                       }
+               if (!omsg.throttle_status)
+                       chips[i].restore = true;
+
+               schedule_work(&chips[i].throttle);
        }
        return 0;
 }
@@ -556,29 +558,41 @@ static int init_chip_info(void)
        unsigned int chip[256];
        unsigned int cpu, i;
        unsigned int prev_chip_id = UINT_MAX;
+       cpumask_t cpu_mask;
+       int ret = -ENOMEM;
+
+       core_to_chip_map = kcalloc(cpu_nr_cores(), sizeof(unsigned int),
+                                  GFP_KERNEL);
+       if (!core_to_chip_map)
+               goto out;
 
-       for_each_possible_cpu(cpu) {
+       cpumask_copy(&cpu_mask, cpu_possible_mask);
+       for_each_cpu(cpu, &cpu_mask) {
                unsigned int id = cpu_to_chip_id(cpu);
 
                if (prev_chip_id != id) {
                        prev_chip_id = id;
                        chip[nr_chips++] = id;
                }
+               core_to_chip_map[cpu_core_index_of_thread(cpu)] = id;
+               cpumask_andnot(&cpu_mask, &cpu_mask, cpu_sibling_mask(cpu));
        }
 
-       chips = kmalloc_array(nr_chips, sizeof(struct chip), GFP_KERNEL);
+       chips = kcalloc(nr_chips, sizeof(struct chip), GFP_KERNEL);
        if (!chips)
-               return -ENOMEM;
+               goto free_chip_map;
 
        for (i = 0; i < nr_chips; i++) {
                chips[i].id = chip[i];
-               chips[i].throttled = false;
                cpumask_copy(&chips[i].mask, cpumask_of_node(chip[i]));
                INIT_WORK(&chips[i].throttle, powernv_cpufreq_work_fn);
-               chips[i].restore = false;
        }
 
        return 0;
+free_chip_map:
+       kfree(core_to_chip_map);
+out:
+       return ret;
 }
 
 static int __init powernv_cpufreq_init(void)
@@ -612,6 +626,8 @@ static void __exit powernv_cpufreq_exit(void)
        unregister_reboot_notifier(&powernv_cpufreq_reboot_nb);
        opal_message_notifier_unregister(OPAL_MSG_OCC,
                                         &powernv_cpufreq_opal_nb);
+       kfree(chips);
+       kfree(core_to_chip_map);
        cpufreq_unregister_driver(&powernv_cpufreq_driver);
 }
 module_exit(powernv_cpufreq_exit);
index 1d99c97defa9204f52ad8605fd5f25c1d2ad0540..096377232747652f99d8a89e0681da7134b716fe 100644 (file)
@@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
        }
 }
 #else
-static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
+static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
 {
        return 0;
 }
index 051a8a8224cd7ade8b846e62fc27d0733ba8f73b..a145b319d1717a996fb193dc3f382a22030d5327 100644 (file)
@@ -576,10 +576,8 @@ static struct cpufreq_driver s5pv210_driver = {
        .get            = cpufreq_generic_get,
        .init           = s5pv210_cpu_init,
        .name           = "s5pv210",
-#ifdef CONFIG_PM
        .suspend        = cpufreq_generic_suspend,
        .resume         = cpufreq_generic_suspend, /* We need to set SLEEP FREQ again */
-#endif
 };
 
 static struct notifier_block s5pv210_cpufreq_reboot_notifier = {
index 344058f8501a2c2ee888189950b79f615e815a02..d5657d50ac4044d5da886050e822517fe1134b6f 100644 (file)
@@ -119,7 +119,6 @@ struct cpuidle_coupled {
 
 #define CPUIDLE_COUPLED_NOT_IDLE       (-1)
 
-static DEFINE_MUTEX(cpuidle_coupled_lock);
 static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
 
 /*
index 046423b0c5ca22aad3455101d3cafae7fc4dda87..f996efc56605a3d4739ae0bcac83099fd65b0751 100644 (file)
@@ -153,7 +153,7 @@ int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * be frozen safely.
         */
        index = find_deepest_state(drv, dev, UINT_MAX, 0, true);
-       if (index >= 0)
+       if (index > 0)
                enter_freeze_proper(drv, dev, index);
 
        return index;
index 07d494276aad04195c9e7247089649b302d3082c..fed3ffbec4c16e49cf9df2b56616aca5ef58cefc 100644 (file)
@@ -296,6 +296,7 @@ config CRYPTO_DEV_OMAP_AES
        depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP2PLUS
        select CRYPTO_AES
        select CRYPTO_BLKCIPHER
+       select CRYPTO_ENGINE
        help
          OMAP processors have AES module accelerator. Select this if you
          want to use the OMAP module for AES algorithms.
@@ -487,7 +488,7 @@ config CRYPTO_DEV_IMGTEC_HASH
 
 config CRYPTO_DEV_SUN4I_SS
        tristate "Support for Allwinner Security System cryptographic accelerator"
-       depends on ARCH_SUNXI
+       depends on ARCH_SUNXI && !64BIT
        select CRYPTO_MD5
        select CRYPTO_SHA1
        select CRYPTO_AES
index 6dd3317ca3653947c00ee4790ca18417f3431d79..0751035b2cb047e3f26bea709bf1d1a9d910a3a6 100644 (file)
@@ -369,12 +369,6 @@ static inline size_t atmel_aes_padlen(size_t len, size_t block_size)
        return len ? block_size - len : 0;
 }
 
-static inline struct aead_request *
-aead_request_cast(struct crypto_async_request *req)
-{
-       return container_of(req, struct aead_request, base);
-}
-
 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx)
 {
        struct atmel_aes_dev *aes_dd = NULL;
@@ -400,7 +394,7 @@ static int atmel_aes_hw_init(struct atmel_aes_dev *dd)
 {
        int err;
 
-       err = clk_prepare_enable(dd->iclk);
+       err = clk_enable(dd->iclk);
        if (err)
                return err;
 
@@ -430,7 +424,7 @@ static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd)
 
        dev_info(dd->dev, "version: 0x%x\n", dd->hw_version);
 
-       clk_disable_unprepare(dd->iclk);
+       clk_disable(dd->iclk);
        return 0;
 }
 
@@ -448,7 +442,7 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
 
 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
 {
-       clk_disable_unprepare(dd->iclk);
+       clk_disable(dd->iclk);
        dd->flags &= ~AES_FLAGS_BUSY;
 
        if (dd->is_async)
@@ -2091,10 +2085,14 @@ static int atmel_aes_probe(struct platform_device *pdev)
                goto res_err;
        }
 
-       err = atmel_aes_hw_version_init(aes_dd);
+       err = clk_prepare(aes_dd->iclk);
        if (err)
                goto res_err;
 
+       err = atmel_aes_hw_version_init(aes_dd);
+       if (err)
+               goto iclk_unprepare;
+
        atmel_aes_get_cap(aes_dd);
 
        err = atmel_aes_buff_init(aes_dd);
@@ -2127,6 +2125,8 @@ err_algs:
 err_aes_dma:
        atmel_aes_buff_cleanup(aes_dd);
 err_aes_buff:
+iclk_unprepare:
+       clk_unprepare(aes_dd->iclk);
 res_err:
        tasklet_kill(&aes_dd->done_task);
        tasklet_kill(&aes_dd->queue_task);
@@ -2155,6 +2155,8 @@ static int atmel_aes_remove(struct platform_device *pdev)
        atmel_aes_dma_cleanup(aes_dd);
        atmel_aes_buff_cleanup(aes_dd);
 
+       clk_unprepare(aes_dd->iclk);
+
        return 0;
 }
 
index 83b2d74256666ecff11ead015c5f07fbb8dc74f5..e08897109cabe1ce2a44c68a7c3046026cc015c7 100644 (file)
@@ -8,6 +8,8 @@
 #define SHA_CR_START                   (1 << 0)
 #define SHA_CR_FIRST                   (1 << 4)
 #define SHA_CR_SWRST                   (1 << 8)
+#define SHA_CR_WUIHV                   (1 << 12)
+#define SHA_CR_WUIEHV                  (1 << 13)
 
 #define SHA_MR                         0x04
 #define SHA_MR_MODE_MASK               (0x3 << 0)
@@ -15,6 +17,8 @@
 #define SHA_MR_MODE_AUTO               0x1
 #define SHA_MR_MODE_PDC                        0x2
 #define SHA_MR_PROCDLY                 (1 << 4)
+#define SHA_MR_UIHV                    (1 << 5)
+#define SHA_MR_UIEHV                   (1 << 6)
 #define SHA_MR_ALGO_SHA1               (0 << 8)
 #define SHA_MR_ALGO_SHA256             (1 << 8)
 #define SHA_MR_ALGO_SHA384             (2 << 8)
index 20de861aa0ea6c275edb746e14fcb372344fcd48..f8407dc7dd38a306991851152c932fe06ac6ddc9 100644 (file)
@@ -53,6 +53,7 @@
 
 #define SHA_FLAGS_FINUP                BIT(16)
 #define SHA_FLAGS_SG           BIT(17)
+#define SHA_FLAGS_ALGO_MASK    GENMASK(22, 18)
 #define SHA_FLAGS_SHA1         BIT(18)
 #define SHA_FLAGS_SHA224       BIT(19)
 #define SHA_FLAGS_SHA256       BIT(20)
 #define SHA_FLAGS_SHA512       BIT(22)
 #define SHA_FLAGS_ERROR                BIT(23)
 #define SHA_FLAGS_PAD          BIT(24)
+#define SHA_FLAGS_RESTORE      BIT(25)
 
 #define SHA_OP_UPDATE  1
 #define SHA_OP_FINAL   2
 
-#define SHA_BUFFER_LEN         PAGE_SIZE
+#define SHA_BUFFER_LEN         (PAGE_SIZE / 16)
 
 #define ATMEL_SHA_DMA_THRESHOLD                56
 
@@ -73,10 +75,22 @@ struct atmel_sha_caps {
        bool    has_dualbuff;
        bool    has_sha224;
        bool    has_sha_384_512;
+       bool    has_uihv;
 };
 
 struct atmel_sha_dev;
 
+/*
+ * .statesize = sizeof(struct atmel_sha_state) must be <= PAGE_SIZE / 8 as
+ * tested by the ahash_prepare_alg() function.
+ */
+struct atmel_sha_state {
+       u8      digest[SHA512_DIGEST_SIZE];
+       u8      buffer[SHA_BUFFER_LEN];
+       u64     digcnt[2];
+       size_t  bufcnt;
+};
+
 struct atmel_sha_reqctx {
        struct atmel_sha_dev    *dd;
        unsigned long   flags;
@@ -122,6 +136,7 @@ struct atmel_sha_dev {
        spinlock_t              lock;
        int                     err;
        struct tasklet_struct   done_task;
+       struct tasklet_struct   queue_task;
 
        unsigned long           flags;
        struct crypto_queue     queue;
@@ -317,7 +332,8 @@ static int atmel_sha_init(struct ahash_request *req)
 static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
 {
        struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req);
-       u32 valcr = 0, valmr = SHA_MR_MODE_AUTO;
+       u32 valmr = SHA_MR_MODE_AUTO;
+       unsigned int i, hashsize = 0;
 
        if (likely(dma)) {
                if (!dd->caps.has_dma)
@@ -329,22 +345,62 @@ static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma)
                atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY);
        }
 
-       if (ctx->flags & SHA_FLAGS_SHA1)
+       switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+       case SHA_FLAGS_SHA1:
                valmr |= SHA_MR_ALGO_SHA1;
-       else if (ctx->flags & SHA_FLAGS_SHA224)
+               hashsize = SHA1_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA224:
                valmr |= SHA_MR_ALGO_SHA224;
-       else if (ctx->flags & SHA_FLAGS_SHA256)
+               hashsize = SHA256_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA256:
                valmr |= SHA_MR_ALGO_SHA256;
-       else if (ctx->flags & SHA_FLAGS_SHA384)
+               hashsize = SHA256_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA384:
                valmr |= SHA_MR_ALGO_SHA384;
-       else if (ctx->flags & SHA_FLAGS_SHA512)
+               hashsize = SHA512_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA512:
                valmr |= SHA_MR_ALGO_SHA512;
+               hashsize = SHA512_DIGEST_SIZE;
+               break;
+
+       default:
+               break;
+       }
 
        /* Setting CR_FIRST only for the first iteration */
-       if (!(ctx->digcnt[0] || ctx->digcnt[1]))
-               valcr = SHA_CR_FIRST;
+       if (!(ctx->digcnt[0] || ctx->digcnt[1])) {
+               atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+       } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) {
+               const u32 *hash = (const u32 *)ctx->digest;
+
+               /*
+                * Restore the hardware context: update the User Initialize
+                * Hash Value (UIHV) with the value saved when the latest
+                * 'update' operation completed on this very same crypto
+                * request.
+                */
+               ctx->flags &= ~SHA_FLAGS_RESTORE;
+               atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV);
+               for (i = 0; i < hashsize / sizeof(u32); ++i)
+                       atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]);
+               atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST);
+               valmr |= SHA_MR_UIHV;
+       }
+       /*
+        * WARNING: If the UIHV feature is not available, the hardware CANNOT
+        * process concurrent requests: the internal registers used to store
+        * the hash/digest are still set to the partial digest output values
+        * computed during the latest round.
+        */
 
-       atmel_sha_write(dd, SHA_CR, valcr);
        atmel_sha_write(dd, SHA_MR, valmr);
 }
 
@@ -713,23 +769,31 @@ static void atmel_sha_copy_hash(struct ahash_request *req)
 {
        struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
        u32 *hash = (u32 *)ctx->digest;
-       int i;
+       unsigned int i, hashsize;
 
-       if (ctx->flags & SHA_FLAGS_SHA1)
-               for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
-       else if (ctx->flags & SHA_FLAGS_SHA224)
-               for (i = 0; i < SHA224_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
-       else if (ctx->flags & SHA_FLAGS_SHA256)
-               for (i = 0; i < SHA256_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
-       else if (ctx->flags & SHA_FLAGS_SHA384)
-               for (i = 0; i < SHA384_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
-       else
-               for (i = 0; i < SHA512_DIGEST_SIZE / sizeof(u32); i++)
-                       hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+       switch (ctx->flags & SHA_FLAGS_ALGO_MASK) {
+       case SHA_FLAGS_SHA1:
+               hashsize = SHA1_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA224:
+       case SHA_FLAGS_SHA256:
+               hashsize = SHA256_DIGEST_SIZE;
+               break;
+
+       case SHA_FLAGS_SHA384:
+       case SHA_FLAGS_SHA512:
+               hashsize = SHA512_DIGEST_SIZE;
+               break;
+
+       default:
+               /* Should not happen... */
+               return;
+       }
+
+       for (i = 0; i < hashsize / sizeof(u32); ++i)
+               hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i));
+       ctx->flags |= SHA_FLAGS_RESTORE;
 }
 
 static void atmel_sha_copy_ready_hash(struct ahash_request *req)
@@ -782,20 +846,20 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
        dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
                        SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
 
-       clk_disable_unprepare(dd->iclk);
+       clk_disable(dd->iclk);
 
        if (req->base.complete)
                req->base.complete(&req->base, err);
 
        /* handle new request */
-       tasklet_schedule(&dd->done_task);
+       tasklet_schedule(&dd->queue_task);
 }
 
 static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
 {
        int err;
 
-       err = clk_prepare_enable(dd->iclk);
+       err = clk_enable(dd->iclk);
        if (err)
                return err;
 
@@ -822,7 +886,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
        dev_info(dd->dev,
                        "version: 0x%x\n", dd->hw_version);
 
-       clk_disable_unprepare(dd->iclk);
+       clk_disable(dd->iclk);
 }
 
 static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -939,6 +1003,7 @@ static int atmel_sha_final(struct ahash_request *req)
                if (err)
                        goto err1;
 
+               dd->req = req;
                dd->flags |= SHA_FLAGS_BUSY;
                err = atmel_sha_final_req(dd);
        } else {
@@ -979,6 +1044,39 @@ static int atmel_sha_digest(struct ahash_request *req)
        return atmel_sha_init(req) ?: atmel_sha_finup(req);
 }
 
+
+static int atmel_sha_export(struct ahash_request *req, void *out)
+{
+       const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+       struct atmel_sha_state state;
+
+       memcpy(state.digest, ctx->digest, SHA512_DIGEST_SIZE);
+       memcpy(state.buffer, ctx->buffer, ctx->bufcnt);
+       state.bufcnt = ctx->bufcnt;
+       state.digcnt[0] = ctx->digcnt[0];
+       state.digcnt[1] = ctx->digcnt[1];
+
+       /* out might be unaligned. */
+       memcpy(out, &state, sizeof(state));
+       return 0;
+}
+
+static int atmel_sha_import(struct ahash_request *req, const void *in)
+{
+       struct atmel_sha_reqctx *ctx = ahash_request_ctx(req);
+       struct atmel_sha_state state;
+
+       /* in might be unaligned. */
+       memcpy(&state, in, sizeof(state));
+
+       memcpy(ctx->digest, state.digest, SHA512_DIGEST_SIZE);
+       memcpy(ctx->buffer, state.buffer, state.bufcnt);
+       ctx->bufcnt = state.bufcnt;
+       ctx->digcnt[0] = state.digcnt[0];
+       ctx->digcnt[1] = state.digcnt[1];
+       return 0;
+}
+
 static int atmel_sha_cra_init(struct crypto_tfm *tfm)
 {
        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
@@ -995,8 +1093,11 @@ static struct ahash_alg sha_1_256_algs[] = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA1_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha1",
                        .cra_driver_name        = "atmel-sha1",
@@ -1016,8 +1117,11 @@ static struct ahash_alg sha_1_256_algs[] = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA256_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha256",
                        .cra_driver_name        = "atmel-sha256",
@@ -1039,8 +1143,11 @@ static struct ahash_alg sha_224_alg = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA224_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha224",
                        .cra_driver_name        = "atmel-sha224",
@@ -1062,8 +1169,11 @@ static struct ahash_alg sha_384_512_algs[] = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA384_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha384",
                        .cra_driver_name        = "atmel-sha384",
@@ -1083,8 +1193,11 @@ static struct ahash_alg sha_384_512_algs[] = {
        .final          = atmel_sha_final,
        .finup          = atmel_sha_finup,
        .digest         = atmel_sha_digest,
+       .export         = atmel_sha_export,
+       .import         = atmel_sha_import,
        .halg = {
                .digestsize     = SHA512_DIGEST_SIZE,
+               .statesize      = sizeof(struct atmel_sha_state),
                .base   = {
                        .cra_name               = "sha512",
                        .cra_driver_name        = "atmel-sha512",
@@ -1100,16 +1213,18 @@ static struct ahash_alg sha_384_512_algs[] = {
 },
 };
 
+static void atmel_sha_queue_task(unsigned long data)
+{
+       struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
+
+       atmel_sha_handle_queue(dd, NULL);
+}
+
 static void atmel_sha_done_task(unsigned long data)
 {
        struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data;
        int err = 0;
 
-       if (!(SHA_FLAGS_BUSY & dd->flags)) {
-               atmel_sha_handle_queue(dd, NULL);
-               return;
-       }
-
        if (SHA_FLAGS_CPU & dd->flags) {
                if (SHA_FLAGS_OUTPUT_READY & dd->flags) {
                        dd->flags &= ~SHA_FLAGS_OUTPUT_READY;
@@ -1272,14 +1387,23 @@ static void atmel_sha_get_cap(struct atmel_sha_dev *dd)
        dd->caps.has_dualbuff = 0;
        dd->caps.has_sha224 = 0;
        dd->caps.has_sha_384_512 = 0;
+       dd->caps.has_uihv = 0;
 
        /* keep only major version number */
        switch (dd->hw_version & 0xff0) {
+       case 0x510:
+               dd->caps.has_dma = 1;
+               dd->caps.has_dualbuff = 1;
+               dd->caps.has_sha224 = 1;
+               dd->caps.has_sha_384_512 = 1;
+               dd->caps.has_uihv = 1;
+               break;
        case 0x420:
                dd->caps.has_dma = 1;
                dd->caps.has_dualbuff = 1;
                dd->caps.has_sha224 = 1;
                dd->caps.has_sha_384_512 = 1;
+               dd->caps.has_uihv = 1;
                break;
        case 0x410:
                dd->caps.has_dma = 1;
@@ -1366,6 +1490,8 @@ static int atmel_sha_probe(struct platform_device *pdev)
 
        tasklet_init(&sha_dd->done_task, atmel_sha_done_task,
                                        (unsigned long)sha_dd);
+       tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task,
+                                       (unsigned long)sha_dd);
 
        crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH);
 
@@ -1410,6 +1536,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
                goto res_err;
        }
 
+       err = clk_prepare(sha_dd->iclk);
+       if (err)
+               goto res_err;
+
        atmel_sha_hw_version_init(sha_dd);
 
        atmel_sha_get_cap(sha_dd);
@@ -1421,12 +1551,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
                        if (IS_ERR(pdata)) {
                                dev_err(&pdev->dev, "platform data not available\n");
                                err = PTR_ERR(pdata);
-                               goto res_err;
+                               goto iclk_unprepare;
                        }
                }
                if (!pdata->dma_slave) {
                        err = -ENXIO;
-                       goto res_err;
+                       goto iclk_unprepare;
                }
                err = atmel_sha_dma_init(sha_dd, pdata);
                if (err)
@@ -1457,7 +1587,10 @@ err_algs:
        if (sha_dd->caps.has_dma)
                atmel_sha_dma_cleanup(sha_dd);
 err_sha_dma:
+iclk_unprepare:
+       clk_unprepare(sha_dd->iclk);
 res_err:
+       tasklet_kill(&sha_dd->queue_task);
        tasklet_kill(&sha_dd->done_task);
 sha_dd_err:
        dev_err(dev, "initialization failed.\n");
@@ -1478,17 +1611,13 @@ static int atmel_sha_remove(struct platform_device *pdev)
 
        atmel_sha_unregister_algs(sha_dd);
 
+       tasklet_kill(&sha_dd->queue_task);
        tasklet_kill(&sha_dd->done_task);
 
        if (sha_dd->caps.has_dma)
                atmel_sha_dma_cleanup(sha_dd);
 
-       iounmap(sha_dd->io_base);
-
-       clk_put(sha_dd->iclk);
-
-       if (sha_dd->irq >= 0)
-               free_irq(sha_dd->irq, sha_dd);
+       clk_unprepare(sha_dd->iclk);
 
        return 0;
 }
index 8abb4bc548cc06a7a8ae97779701d308255ff050..44d30b45f3cc35a44ee692bc6b48706e819b760c 100644 (file)
@@ -534,8 +534,8 @@ static int caam_probe(struct platform_device *pdev)
         * long pointers in master configuration register
         */
        clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
-                     MCFGR_WDENABLE | (sizeof(dma_addr_t) == sizeof(u64) ?
-                                       MCFGR_LONG_PTR : 0));
+                     MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
+                     (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
 
        /*
         *  Read the Compile Time paramters and SCFGR to determine
index a8a79975682f2c5096e1beccd1c71714530a7176..0ba9c40597dcbbf75163bedb5f4e7ef0f1b46972 100644 (file)
@@ -455,7 +455,8 @@ struct caam_ctrl {
 #define MCFGR_AXIPIPE_MASK     (0xf << MCFGR_AXIPIPE_SHIFT)
 
 #define MCFGR_AXIPRI           0x00000008 /* Assert AXI priority sideband */
-#define MCFGR_BURST_64         0x00000001 /* Max burst size */
+#define MCFGR_LARGE_BURST      0x00000004 /* 128/256-byte burst size */
+#define MCFGR_BURST_64         0x00000001 /* 64-byte burst size */
 
 /* JRSTART register offsets */
 #define JRSTART_JR0_START       0x00000001 /* Start Job ring 0 */
index d89f20c04266b31ad85bf82ccf5a2169c52ba727..d095452b8828e6c50b30c8ac3254975f9f8cef6c 100644 (file)
@@ -220,6 +220,38 @@ static int ccp_aes_cmac_digest(struct ahash_request *req)
        return ccp_aes_cmac_finup(req);
 }
 
+static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
+{
+       struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+       struct ccp_aes_cmac_exp_ctx state;
+
+       state.null_msg = rctx->null_msg;
+       memcpy(state.iv, rctx->iv, sizeof(state.iv));
+       state.buf_count = rctx->buf_count;
+       memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+       /* 'out' may not be aligned so memcpy from local variable */
+       memcpy(out, &state, sizeof(state));
+
+       return 0;
+}
+
+static int ccp_aes_cmac_import(struct ahash_request *req, const void *in)
+{
+       struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
+       struct ccp_aes_cmac_exp_ctx state;
+
+       /* 'in' may not be aligned so memcpy to local variable */
+       memcpy(&state, in, sizeof(state));
+
+       rctx->null_msg = state.null_msg;
+       memcpy(rctx->iv, state.iv, sizeof(rctx->iv));
+       rctx->buf_count = state.buf_count;
+       memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+       return 0;
+}
+
 static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
                               unsigned int key_len)
 {
@@ -352,10 +384,13 @@ int ccp_register_aes_cmac_algs(struct list_head *head)
        alg->final = ccp_aes_cmac_final;
        alg->finup = ccp_aes_cmac_finup;
        alg->digest = ccp_aes_cmac_digest;
+       alg->export = ccp_aes_cmac_export;
+       alg->import = ccp_aes_cmac_import;
        alg->setkey = ccp_aes_cmac_setkey;
 
        halg = &alg->halg;
        halg->digestsize = AES_BLOCK_SIZE;
+       halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx);
 
        base = &halg->base;
        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)");
index d14b3f28e010897990223fffc8f932fe368a033d..7002c6b283e5763b48ce71bcb84d56963875ae40 100644 (file)
@@ -207,6 +207,42 @@ static int ccp_sha_digest(struct ahash_request *req)
        return ccp_sha_finup(req);
 }
 
+static int ccp_sha_export(struct ahash_request *req, void *out)
+{
+       struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+       struct ccp_sha_exp_ctx state;
+
+       state.type = rctx->type;
+       state.msg_bits = rctx->msg_bits;
+       state.first = rctx->first;
+       memcpy(state.ctx, rctx->ctx, sizeof(state.ctx));
+       state.buf_count = rctx->buf_count;
+       memcpy(state.buf, rctx->buf, sizeof(state.buf));
+
+       /* 'out' may not be aligned so memcpy from local variable */
+       memcpy(out, &state, sizeof(state));
+
+       return 0;
+}
+
+static int ccp_sha_import(struct ahash_request *req, const void *in)
+{
+       struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
+       struct ccp_sha_exp_ctx state;
+
+       /* 'in' may not be aligned so memcpy to local variable */
+       memcpy(&state, in, sizeof(state));
+
+       rctx->type = state.type;
+       rctx->msg_bits = state.msg_bits;
+       rctx->first = state.first;
+       memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx));
+       rctx->buf_count = state.buf_count;
+       memcpy(rctx->buf, state.buf, sizeof(rctx->buf));
+
+       return 0;
+}
+
 static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key,
                          unsigned int key_len)
 {
@@ -403,9 +439,12 @@ static int ccp_register_sha_alg(struct list_head *head,
        alg->final = ccp_sha_final;
        alg->finup = ccp_sha_finup;
        alg->digest = ccp_sha_digest;
+       alg->export = ccp_sha_export;
+       alg->import = ccp_sha_import;
 
        halg = &alg->halg;
        halg->digestsize = def->digest_size;
+       halg->statesize = sizeof(struct ccp_sha_exp_ctx);
 
        base = &halg->base;
        snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
index 76a96f0f44c6d7c659c57cfed3e93928683d9687..a326ec20bfa877c1a2dce78c5f15f3a5ec0731f4 100644 (file)
@@ -129,6 +129,15 @@ struct ccp_aes_cmac_req_ctx {
        struct ccp_cmd cmd;
 };
 
+struct ccp_aes_cmac_exp_ctx {
+       unsigned int null_msg;
+
+       u8 iv[AES_BLOCK_SIZE];
+
+       unsigned int buf_count;
+       u8 buf[AES_BLOCK_SIZE];
+};
+
 /***** SHA related defines *****/
 #define MAX_SHA_CONTEXT_SIZE   SHA256_DIGEST_SIZE
 #define MAX_SHA_BLOCK_SIZE     SHA256_BLOCK_SIZE
@@ -171,6 +180,19 @@ struct ccp_sha_req_ctx {
        struct ccp_cmd cmd;
 };
 
+struct ccp_sha_exp_ctx {
+       enum ccp_sha_type type;
+
+       u64 msg_bits;
+
+       unsigned int first;
+
+       u8 ctx[MAX_SHA_CONTEXT_SIZE];
+
+       unsigned int buf_count;
+       u8 buf[MAX_SHA_BLOCK_SIZE];
+};
+
 /***** Common Context Structure *****/
 struct ccp_ctx {
        int (*complete)(struct crypto_async_request *req, int ret);
index e52496a172d05e70893f48ff3cfca309e93db343..2296934455fcd8d5ce75356365f4bc60cac320b1 100644 (file)
@@ -1031,6 +1031,18 @@ static int aead_perform(struct aead_request *req, int encrypt,
        BUG_ON(ivsize && !req->iv);
        memcpy(crypt->iv, req->iv, ivsize);
 
+       buf = chainup_buffers(dev, req->src, crypt->auth_len,
+                             &src_hook, flags, src_direction);
+       req_ctx->src = src_hook.next;
+       crypt->src_buf = src_hook.phys_next;
+       if (!buf)
+               goto free_buf_src;
+
+       lastlen = buf->buf_len;
+       if (lastlen >= authsize)
+               crypt->icv_rev_aes = buf->phys_addr +
+                                    buf->buf_len - authsize;
+
        req_ctx->dst = NULL;
 
        if (req->src != req->dst) {
@@ -1055,20 +1067,6 @@ static int aead_perform(struct aead_request *req, int encrypt,
                }
        }
 
-       buf = chainup_buffers(dev, req->src, crypt->auth_len,
-                             &src_hook, flags, src_direction);
-       req_ctx->src = src_hook.next;
-       crypt->src_buf = src_hook.phys_next;
-       if (!buf)
-               goto free_buf_src;
-
-       if (!encrypt || !req_ctx->dst) {
-               lastlen = buf->buf_len;
-               if (lastlen >= authsize)
-                       crypt->icv_rev_aes = buf->phys_addr +
-                                            buf->buf_len - authsize;
-       }
-
        if (unlikely(lastlen < authsize)) {
                /* The 12 hmac bytes are scattered,
                 * we need to copy them into a safe buffer */
index 0643e3366e3309de88a03e687a2d5353f5715a22..c0656e7f37b5993672002a8a192dc1c9dcf0c4bd 100644 (file)
@@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
                return -ENOMEM;
 
        dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
-       if (!dma->cache_pool)
+       if (!dma->padding_pool)
                return -ENOMEM;
 
        cesa->dma = dma;
index 046c1c45411bbc7fe21b5207479644130183ac68..d94e25df503b91fc873c6b12f74d5d83192b0c02 100644 (file)
@@ -308,7 +308,7 @@ int nx842_crypto_compress(struct crypto_tfm *tfm,
                h = !n && add_header ? hdrsize : 0;
 
                if (ignore)
-                       pr_warn("interal error, ignore is set %x\n", ignore);
+                       pr_warn("internal error, ignore is set %x\n", ignore);
 
                ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h);
                if (ret)
index dd355bd19474fa990d8a2cb0856d4d56ab076345..d420ec751c7c9e090710f568f9b4370591c09110 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/interrupt.h>
 #include <crypto/scatterwalk.h>
 #include <crypto/aes.h>
+#include <crypto/algapi.h>
 
 #define DST_MAXBURST                   4
 #define DMA_MIN                                (DST_MAXBURST * sizeof(u32))
@@ -152,13 +153,10 @@ struct omap_aes_dev {
        unsigned long           flags;
        int                     err;
 
-       spinlock_t              lock;
-       struct crypto_queue     queue;
-
        struct tasklet_struct   done_task;
-       struct tasklet_struct   queue_task;
 
        struct ablkcipher_request       *req;
+       struct crypto_engine            *engine;
 
        /*
         * total is used by PIO mode for book keeping so introduce
@@ -532,9 +530,7 @@ static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
 
        pr_debug("err: %d\n", err);
 
-       dd->flags &= ~FLAGS_BUSY;
-
-       req->base.complete(&req->base, err);
+       crypto_finalize_request(dd->engine, req, err);
 }
 
 static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
@@ -604,34 +600,25 @@ static int omap_aes_copy_sgs(struct omap_aes_dev *dd)
 }
 
 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
-                              struct ablkcipher_request *req)
+                                struct ablkcipher_request *req)
 {
-       struct crypto_async_request *async_req, *backlog;
-       struct omap_aes_ctx *ctx;
-       struct omap_aes_reqctx *rctx;
-       unsigned long flags;
-       int err, ret = 0, len;
-
-       spin_lock_irqsave(&dd->lock, flags);
        if (req)
-               ret = ablkcipher_enqueue_request(&dd->queue, req);
-       if (dd->flags & FLAGS_BUSY) {
-               spin_unlock_irqrestore(&dd->lock, flags);
-               return ret;
-       }
-       backlog = crypto_get_backlog(&dd->queue);
-       async_req = crypto_dequeue_request(&dd->queue);
-       if (async_req)
-               dd->flags |= FLAGS_BUSY;
-       spin_unlock_irqrestore(&dd->lock, flags);
+               return crypto_transfer_request_to_engine(dd->engine, req);
 
-       if (!async_req)
-               return ret;
+       return 0;
+}
 
-       if (backlog)
-               backlog->complete(backlog, -EINPROGRESS);
+static int omap_aes_prepare_req(struct crypto_engine *engine,
+                               struct ablkcipher_request *req)
+{
+       struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+                       crypto_ablkcipher_reqtfm(req));
+       struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+       struct omap_aes_reqctx *rctx;
+       int len;
 
-       req = ablkcipher_request_cast(async_req);
+       if (!dd)
+               return -ENODEV;
 
        /* assign new request to device */
        dd->req = req;
@@ -662,16 +649,20 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
        dd->ctx = ctx;
        ctx->dd = dd;
 
-       err = omap_aes_write_ctrl(dd);
-       if (!err)
-               err = omap_aes_crypt_dma_start(dd);
-       if (err) {
-               /* aes_task will not finish it, so do it here */
-               omap_aes_finish_req(dd, err);
-               tasklet_schedule(&dd->queue_task);
-       }
+       return omap_aes_write_ctrl(dd);
+}
 
-       return ret; /* return ret, which is enqueue return value */
+static int omap_aes_crypt_req(struct crypto_engine *engine,
+                             struct ablkcipher_request *req)
+{
+       struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
+                       crypto_ablkcipher_reqtfm(req));
+       struct omap_aes_dev *dd = omap_aes_find_dev(ctx);
+
+       if (!dd)
+               return -ENODEV;
+
+       return omap_aes_crypt_dma_start(dd);
 }
 
 static void omap_aes_done_task(unsigned long data)
@@ -704,18 +695,10 @@ static void omap_aes_done_task(unsigned long data)
        }
 
        omap_aes_finish_req(dd, 0);
-       omap_aes_handle_queue(dd, NULL);
 
        pr_debug("exit\n");
 }
 
-static void omap_aes_queue_task(unsigned long data)
-{
-       struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
-
-       omap_aes_handle_queue(dd, NULL);
-}
-
 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
 {
        struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
@@ -1175,9 +1158,6 @@ static int omap_aes_probe(struct platform_device *pdev)
        dd->dev = dev;
        platform_set_drvdata(pdev, dd);
 
-       spin_lock_init(&dd->lock);
-       crypto_init_queue(&dd->queue, OMAP_AES_QUEUE_LENGTH);
-
        err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
                               omap_aes_get_res_pdev(dd, pdev, &res);
        if (err)
@@ -1209,7 +1189,6 @@ static int omap_aes_probe(struct platform_device *pdev)
                 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
 
        tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
-       tasklet_init(&dd->queue_task, omap_aes_queue_task, (unsigned long)dd);
 
        err = omap_aes_dma_init(dd);
        if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
@@ -1250,7 +1229,20 @@ static int omap_aes_probe(struct platform_device *pdev)
                }
        }
 
+       /* Initialize crypto engine */
+       dd->engine = crypto_engine_alloc_init(dev, 1);
+       if (!dd->engine)
+               goto err_algs;
+
+       dd->engine->prepare_request = omap_aes_prepare_req;
+       dd->engine->crypt_one_request = omap_aes_crypt_req;
+       err = crypto_engine_start(dd->engine);
+       if (err)
+               goto err_engine;
+
        return 0;
+err_engine:
+       crypto_engine_exit(dd->engine);
 err_algs:
        for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
                for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
@@ -1260,7 +1252,6 @@ err_algs:
                omap_aes_dma_cleanup(dd);
 err_irq:
        tasklet_kill(&dd->done_task);
-       tasklet_kill(&dd->queue_task);
        pm_runtime_disable(dev);
 err_res:
        dd = NULL;
@@ -1286,8 +1277,8 @@ static int omap_aes_remove(struct platform_device *pdev)
                        crypto_unregister_alg(
                                        &dd->pdata->algs_info[i].algs_list[j]);
 
+       crypto_engine_exit(dd->engine);
        tasklet_kill(&dd->done_task);
-       tasklet_kill(&dd->queue_task);
        omap_aes_dma_cleanup(dd);
        pm_runtime_disable(dd->dev);
        dd = NULL;
index f96d427e502c05ea7316f66830adfeebb49fb17d..5a07208ce778209ef7397c7f628cc7fbbede4791 100644 (file)
@@ -55,8 +55,8 @@
 
 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
-#define ADF_C62X_DEVICE_NAME "c62x"
-#define ADF_C62XVF_DEVICE_NAME "c62xvf"
+#define ADF_C62X_DEVICE_NAME "c6xx"
+#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
 #define ADF_C3XXX_DEVICE_NAME "c3xxx"
 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
 #define ADF_DH895XCC_PCI_DEVICE_ID 0x435
index e78a1d7d88fc76ffd77d074dc9e904e523503f3d..b40d9c8dad964a122dba95371466d736a3f6a9f1 100644 (file)
@@ -121,7 +121,6 @@ static void adf_device_reset_worker(struct work_struct *work)
        adf_dev_restarting_notify(accel_dev);
        adf_dev_stop(accel_dev);
        adf_dev_shutdown(accel_dev);
-       adf_dev_restore(accel_dev);
        if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
                /* The device hanged and we can't restart it so stop here */
                dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
index ef5988afd4c60f59287e8a446af62e8ac6b783b9..b5484bfa699609d6b572a4e820c854a52802dbc9 100644 (file)
@@ -58,7 +58,7 @@ struct adf_user_cfg_key_val {
                uint64_t padding3;
        };
        enum adf_cfg_val_type type;
-};
+} __packed;
 
 struct adf_user_cfg_section {
        char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
@@ -70,7 +70,7 @@ struct adf_user_cfg_section {
                struct adf_user_cfg_section *next;
                uint64_t padding3;
        };
-};
+} __packed;
 
 struct adf_user_cfg_ctl_data {
        union {
@@ -78,5 +78,5 @@ struct adf_user_cfg_ctl_data {
                uint64_t padding;
        };
        uint8_t device_id;
-};
+} __packed;
 #endif
index 59e4c3af15edb10fe46f4e6afb0ad2d18bc92760..1e8852a8a0574593b3bb498db335e144bbeb29d7 100644 (file)
@@ -1064,8 +1064,7 @@ static int qat_alg_aead_init(struct crypto_aead *tfm,
        if (IS_ERR(ctx->hash_tfm))
                return PTR_ERR(ctx->hash_tfm);
        ctx->qat_hash_alg = hash;
-       crypto_aead_set_reqsize(tfm, sizeof(struct aead_request) +
-                                    sizeof(struct qat_crypto_request));
+       crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
        return 0;
 }
 
@@ -1114,8 +1113,7 @@ static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
        struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
 
        spin_lock_init(&ctx->lock);
-       tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
-                                       sizeof(struct qat_crypto_request);
+       tfm->crt_ablkcipher.reqsize = sizeof(struct qat_crypto_request);
        ctx->tfm = tfm;
        return 0;
 }
index f214a875582731cd2850b162b5ba96e161c6b4b6..5f161a9777e3f32f6e37aa67f607efc9d234b445 100644 (file)
@@ -224,6 +224,7 @@ static inline struct samsung_aes_variant *find_s5p_sss_version
 {
        if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
                const struct of_device_id *match;
+
                match = of_match_node(s5p_sss_dt_match,
                                        pdev->dev.of_node);
                return (struct samsung_aes_variant *)match->data;
@@ -382,7 +383,7 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
        void __iomem *keystart;
 
        if (iv)
-               memcpy(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
+               memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv, 0x10);
 
        if (keylen == AES_KEYSIZE_256)
                keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
@@ -391,13 +392,12 @@ static void s5p_set_aes(struct s5p_aes_dev *dev,
        else
                keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
 
-       memcpy(keystart, key, keylen);
+       memcpy_toio(keystart, key, keylen);
 }
 
 static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
 {
        struct ablkcipher_request  *req = dev->req;
-
        uint32_t                    aes_control;
        int                         err;
        unsigned long               flags;
@@ -518,7 +518,7 @@ static int s5p_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
        struct s5p_aes_dev         *dev    = ctx->dev;
 
        if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
-               pr_err("request size is not exact amount of AES blocks\n");
+               dev_err(dev->dev, "request size is not exact amount of AES blocks\n");
                return -EINVAL;
        }
 
@@ -566,7 +566,7 @@ static int s5p_aes_cbc_decrypt(struct ablkcipher_request *req)
 
 static int s5p_aes_cra_init(struct crypto_tfm *tfm)
 {
-       struct s5p_aes_ctx  *ctx = crypto_tfm_ctx(tfm);
+       struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
        ctx->dev = s5p_dev;
        tfm->crt_ablkcipher.reqsize = sizeof(struct s5p_aes_reqctx);
@@ -701,7 +701,7 @@ static int s5p_aes_probe(struct platform_device *pdev)
                        goto err_algs;
        }
 
-       pr_info("s5p-sss driver registered\n");
+       dev_info(dev, "s5p-sss driver registered\n");
 
        return 0;
 
index 6c4f91c5e6b352e13f630cb71f4f236cf5e75263..c3f3d89e4831cdce56d8c8fccd96f4315ae00db9 100644 (file)
@@ -182,7 +182,6 @@ struct sahara_sha_reqctx {
        u8                      buf[SAHARA_MAX_SHA_BLOCK_SIZE];
        u8                      rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
        u8                      context[SHA256_DIGEST_SIZE + 4];
-       struct mutex            mutex;
        unsigned int            mode;
        unsigned int            digest_size;
        unsigned int            context_size;
@@ -1096,7 +1095,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
        if (!req->nbytes && !last)
                return 0;
 
-       mutex_lock(&rctx->mutex);
        rctx->last = last;
 
        if (!rctx->active) {
@@ -1109,7 +1107,6 @@ static int sahara_sha_enqueue(struct ahash_request *req, int last)
        mutex_unlock(&dev->queue_mutex);
 
        wake_up_process(dev->kthread);
-       mutex_unlock(&rctx->mutex);
 
        return ret;
 }
@@ -1137,8 +1134,6 @@ static int sahara_sha_init(struct ahash_request *req)
        rctx->context_size = rctx->digest_size + 4;
        rctx->active = 0;
 
-       mutex_init(&rctx->mutex);
-
        return 0;
 }
 
@@ -1167,26 +1162,18 @@ static int sahara_sha_digest(struct ahash_request *req)
 
 static int sahara_sha_export(struct ahash_request *req, void *out)
 {
-       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-       struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 
-       memcpy(out, ctx, sizeof(struct sahara_ctx));
-       memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
-              sizeof(struct sahara_sha_reqctx));
+       memcpy(out, rctx, sizeof(struct sahara_sha_reqctx));
 
        return 0;
 }
 
 static int sahara_sha_import(struct ahash_request *req, const void *in)
 {
-       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
-       struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
        struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
 
-       memcpy(ctx, in, sizeof(struct sahara_ctx));
-       memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
-              sizeof(struct sahara_sha_reqctx));
+       memcpy(rctx, in, sizeof(struct sahara_sha_reqctx));
 
        return 0;
 }
@@ -1272,6 +1259,7 @@ static struct ahash_alg sha_v3_algs[] = {
        .export         = sahara_sha_export,
        .import         = sahara_sha_import,
        .halg.digestsize        = SHA1_DIGEST_SIZE,
+       .halg.statesize         = sizeof(struct sahara_sha_reqctx),
        .halg.base      = {
                .cra_name               = "sha1",
                .cra_driver_name        = "sahara-sha1",
@@ -1299,6 +1287,7 @@ static struct ahash_alg sha_v4_algs[] = {
        .export         = sahara_sha_export,
        .import         = sahara_sha_import,
        .halg.digestsize        = SHA256_DIGEST_SIZE,
+       .halg.statesize         = sizeof(struct sahara_sha_reqctx),
        .halg.base      = {
                .cra_name               = "sha256",
                .cra_driver_name        = "sahara-sha256",
index a19ee127edcafd3c70ad9e6ee8a86b1823f020f7..7be3fbcd8d78a6b5c0968a4b0846694313989d6b 100644 (file)
@@ -251,11 +251,10 @@ static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
                spaces = readl(ss->base + SS_FCSR);
                rx_cnt = SS_RXFIFO_SPACES(spaces);
                tx_cnt = SS_TXFIFO_SPACES(spaces);
-               dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u %u\n",
+               dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
                        mode,
                        oi, mi.length, ileft, areq->nbytes, rx_cnt,
-                       oo, mo.length, oleft, areq->nbytes, tx_cnt,
-                       todo, ob);
+                       oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
 
                if (tx_cnt == 0)
                        continue;
index 155c1464948e7ad02f885302754353725775b3be..b2ac13b4ddaa89d0c98fcecc31b4c3c4d1d4d264 100644 (file)
@@ -539,13 +539,11 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
  * preparations. Coherency is only guaranteed in the specified range for the
  * specified access direction.
  * @dmabuf:    [in]    buffer to prepare cpu access for.
- * @start:     [in]    start of range for cpu access.
- * @len:       [in]    length of range for cpu access.
  * @direction: [in]    length of range for cpu access.
  *
  * Can return negative error values, returns 0 on success.
  */
-int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                             enum dma_data_direction direction)
 {
        int ret = 0;
@@ -554,8 +552,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
                return -EINVAL;
 
        if (dmabuf->ops->begin_cpu_access)
-               ret = dmabuf->ops->begin_cpu_access(dmabuf, start,
-                                                       len, direction);
+               ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 
        return ret;
 }
@@ -567,19 +564,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
  * actions. Coherency is only guaranteed in the specified range for the
  * specified access direction.
  * @dmabuf:    [in]    buffer to complete cpu access for.
- * @start:     [in]    start of range for cpu access.
- * @len:       [in]    length of range for cpu access.
  * @direction: [in]    length of range for cpu access.
  *
  * This call must always succeed.
  */
-void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
                            enum dma_data_direction direction)
 {
        WARN_ON(!dmabuf);
 
        if (dmabuf->ops->end_cpu_access)
-               dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
+               dmabuf->ops->end_cpu_access(dmabuf, direction);
 }
 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 
index c50a247be2e0365d4cac7ccc8ce6bbb33f671354..0cb259c59916ac0cadd4a3c3f7e5d4ae7fa2a1d8 100644 (file)
@@ -496,6 +496,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
        caps->src_addr_widths = device->src_addr_widths;
        caps->dst_addr_widths = device->dst_addr_widths;
        caps->directions = device->directions;
+       caps->max_burst = device->max_burst;
        caps->residue_granularity = device->residue_granularity;
        caps->descriptor_reuse = device->descriptor_reuse;
 
index e893318560db9f79300579ff01e73b07b5baae38..74417e7e3ec8186f3d3e72c0567f53a44638bb27 100644 (file)
@@ -507,8 +507,10 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
         * controller flagged an error instead of scribbling over
         * random memory locations.
         */
-       dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
-                                      "  cookie: %d\n", bad_desc->txd.cookie);
+       dev_WARN(chan2dev(&dwc->chan), true,
+                       "Bad descriptor submitted for DMA!\n cookie: %d\n",
+                       bad_desc->txd.cookie);
+
        dwc_dump_lli(dwc, &bad_desc->lli);
        list_for_each_entry(child, &bad_desc->tx_list, desc_node)
                dwc_dump_lli(dwc, &child->lli);
index 4c30fdd092b3b1e5b7e6050b7e7d1afee6c11e57..358f9689a3f5ace77d3dfd601f1873b880704060 100644 (file)
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
 
        /* Haswell */
        { PCI_VDEVICE(INTEL, 0x9c60) },
+
+       /* Broadwell */
+       { PCI_VDEVICE(INTEL, 0x9ce0) },
+
        { }
 };
 MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
index 241ff2b1402bf95572d053c09616414ba5eb2044..0a50c18d85b8bbfb9c5574f61ebba50d57f2d617 100644 (file)
@@ -150,7 +150,7 @@ enum dw_dma_msize {
 #define DWC_CTLL_DST_INC       (0<<7)          /* DAR update/not */
 #define DWC_CTLL_DST_DEC       (1<<7)
 #define DWC_CTLL_DST_FIX       (2<<7)
-#define DWC_CTLL_SRC_INC       (0<<7)          /* SAR update/not */
+#define DWC_CTLL_SRC_INC       (0<<9)          /* SAR update/not */
 #define DWC_CTLL_SRC_DEC       (1<<9)
 #define DWC_CTLL_SRC_FIX       (2<<9)
 #define DWC_CTLL_DST_MSIZE(n)  ((n)<<11)       /* burst, #elements */
index d92d655494068992f5a689ff83604158115bee4d..e3d7fcb69b4c2e4ffc4221c8ea8ec4e360648fd7 100644 (file)
 #define GET_NUM_REGN(x)                ((x & 0x300000) >> 20) /* bits 20-21 */
 #define CHMAP_EXIST            BIT(24)
 
+/* CCSTAT register */
+#define EDMA_CCSTAT_ACTV       BIT(4)
+
 /*
  * Max of 20 segments per channel to conserve PaRAM slots
  * Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan)
        spin_unlock_irqrestore(&echan->vchan.lock, flags);
 }
 
+/*
+ * This limit exists to avoid a possible infinite loop when waiting for proof
+ * that a particular transfer is completed. This limit can be hit if there
+ * are large bursts to/from slow devices or the CPU is never able to catch
+ * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
+ * RX-FIFO, as many as 55 loops have been seen.
+ */
+#define EDMA_MAX_TR_WAIT_LOOPS 1000
+
 static u32 edma_residue(struct edma_desc *edesc)
 {
        bool dst = edesc->direction == DMA_DEV_TO_MEM;
+       int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
+       struct edma_chan *echan = edesc->echan;
        struct edma_pset *pset = edesc->pset;
        dma_addr_t done, pos;
        int i;
@@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc)
         * We always read the dst/src position from the first RamPar
         * pset. That's the one which is active now.
         */
-       pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
+       pos = edma_get_position(echan->ecc, echan->slot[0], dst);
+
+       /*
+        * "pos" may represent a transfer request that is still being
+        * processed by the EDMACC or EDMATC. We will busy wait until
+        * any one of the situations occurs:
+        *   1. the DMA hardware is idle
+        *   2. a new transfer request is setup
+        *   3. we hit the loop limit
+        */
+       while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
+               /* check if a new transfer request is setup */
+               if (edma_get_position(echan->ecc,
+                                     echan->slot[0], dst) != pos) {
+                       break;
+               }
+
+               if (!--loop_count) {
+                       dev_dbg_ratelimited(echan->vchan.chan.device->dev,
+                               "%s: timeout waiting for PaRAM update\n",
+                               __func__);
+                       break;
+               }
+
+               cpu_relax();
+       }
 
        /*
         * Cyclic is simple. Just subtract pset[0].addr from pos.
index 57ff46284f159eabca5c9b6b28320414605d150f..21f08cc3352b97fbf047cd8661a2191e1988e0f6 100644 (file)
@@ -421,23 +421,25 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
                        desc->size);
        }
 
-       switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
-       case M2P_INTERRUPT_STALL:
-               /* Disable interrupts */
-               control = readl(edmac->regs + M2P_CONTROL);
-               control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
-               m2p_set_control(edmac, control);
-
-               return INTERRUPT_DONE;
-
-       case M2P_INTERRUPT_NFB:
-               if (ep93xx_dma_advance_active(edmac))
-                       m2p_fill_desc(edmac);
+       /*
+        * Even latest E2 silicon revision sometimes assert STALL interrupt
+        * instead of NFB. Therefore we treat them equally, basing on the
+        * amount of data we still have to transfer.
+        */
+       if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
+               return INTERRUPT_UNKNOWN;
 
+       if (ep93xx_dma_advance_active(edmac)) {
+               m2p_fill_desc(edmac);
                return INTERRUPT_NEXT_BUFFER;
        }
 
-       return INTERRUPT_UNKNOWN;
+       /* Disable interrupts */
+       control = readl(edmac->regs + M2P_CONTROL);
+       control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
+       m2p_set_control(edmac, control);
+
+       return INTERRUPT_DONE;
 }
 
 /*
index 1d5df2ef148b16d3c379a11e14a7da5283f9d5b8..21539d5c54c3d5c2d2dc3244650691bf414cf879 100644 (file)
@@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data)
                        return;
        }
 
+       spin_lock_bh(&ioat_chan->cleanup_lock);
+
+       /* handle the no-actives case */
+       if (!ioat_ring_active(ioat_chan)) {
+               spin_lock_bh(&ioat_chan->prep_lock);
+               check_active(ioat_chan);
+               spin_unlock_bh(&ioat_chan->prep_lock);
+               spin_unlock_bh(&ioat_chan->cleanup_lock);
+               return;
+       }
+
        /* if we haven't made progress and we have already
         * acknowledged a pending completion once, then be more
         * forceful with a restart
         */
-       spin_lock_bh(&ioat_chan->cleanup_lock);
        if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
                __cleanup(ioat_chan, phys_complete);
        else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+               u32 chanerr;
+
+               chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+               dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
+               dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
+                        status, chanerr);
+               dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
+                        ioat_ring_active(ioat_chan));
+
                spin_lock_bh(&ioat_chan->prep_lock);
                ioat_restart_channel(ioat_chan);
                spin_unlock_bh(&ioat_chan->prep_lock);
                spin_unlock_bh(&ioat_chan->cleanup_lock);
                return;
-       } else {
+       } else
                set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
-               mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
-       }
-
 
-       if (ioat_ring_active(ioat_chan))
-               mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
-       else {
-               spin_lock_bh(&ioat_chan->prep_lock);
-               check_active(ioat_chan);
-               spin_unlock_bh(&ioat_chan->prep_lock);
-       }
+       mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
        spin_unlock_bh(&ioat_chan->cleanup_lock);
 }
 
index 4ef0c5e07912e5ba7fd1a9e02141c3704434cc7d..b49b2944e38d8b4284f5e9df0949d49a944b86cd 100644 (file)
@@ -741,7 +741,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
 
        chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
 
-       dev_WARN(to_dev(ioat_chan),
+       dev_WARN(to_dev(ioat_chan), true,
                 "failed to start channel chanerr: %#x\n", chanerr);
        ioat_free_chan_resources(c);
        return -EFAULT;
index 6bb4a13a8fbd2f4306179384fc0afb23bf4d2d31..243421af888f09fedb65682eaeb51aea8efc85cb 100644 (file)
@@ -26,7 +26,7 @@
 #include "hw.h"
 #include "dma.h"
 
-#define MAX_SCF        1024
+#define MAX_SCF        256
 
 /* provide a lookup table for setting the source address in the base or
  * extended descriptor of an xor or pq descriptor
index 17ee758b419ffbf6e10722fbcac95a08a96e4892..1b0453b9e32df946eab574746055fc5a80389a9e 100644 (file)
@@ -33,6 +33,9 @@
 #define PL330_MAX_CHAN         8
 #define PL330_MAX_IRQS         32
 #define PL330_MAX_PERI         32
+#define PL330_MAX_BURST         16
+
+#define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
 
 enum pl330_cachectrl {
        CCTRL0,         /* Noncacheable and nonbufferable */
@@ -488,6 +491,17 @@ struct pl330_dmac {
        /* Peripheral channels connected to this DMAC */
        unsigned int num_peripherals;
        struct dma_pl330_chan *peripherals; /* keep at end */
+       int quirks;
+};
+
+static struct pl330_of_quirks {
+       char *quirk;
+       int id;
+} of_quirks[] = {
+       {
+               .quirk = "arm,pl330-broken-no-flushp",
+               .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
+       }
 };
 
 struct dma_pl330_desc {
@@ -1137,47 +1151,68 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
        return off;
 }
 
-static inline int _ldst_devtomem(unsigned dry_run, u8 buf[],
-               const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_devtomem(struct pl330_dmac *pl330, unsigned dry_run,
+                                u8 buf[], const struct _xfer_spec *pxs,
+                                int cyc)
 {
        int off = 0;
+       enum pl330_cond cond;
+
+       if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+               cond = BURST;
+       else
+               cond = (pxs->desc->rqcfg.brst_len == 1) ? SINGLE : BURST;
 
        while (cyc--) {
-               off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
-               off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+               off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
+               off += _emit_LDP(dry_run, &buf[off], cond, pxs->desc->peri);
                off += _emit_ST(dry_run, &buf[off], ALWAYS);
-               off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+
+               if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+                       off += _emit_FLUSHP(dry_run, &buf[off],
+                                           pxs->desc->peri);
        }
 
        return off;
 }
 
-static inline int _ldst_memtodev(unsigned dry_run, u8 buf[],
-               const struct _xfer_spec *pxs, int cyc)
+static inline int _ldst_memtodev(struct pl330_dmac *pl330,
+                                unsigned dry_run, u8 buf[],
+                                const struct _xfer_spec *pxs, int cyc)
 {
        int off = 0;
+       enum pl330_cond cond;
+
+       if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
+               cond = BURST;
+       else
+               cond = (pxs->desc->rqcfg.brst_len == 1) ? SINGLE : BURST;
+
 
        while (cyc--) {
-               off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
+               off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
                off += _emit_LD(dry_run, &buf[off], ALWAYS);
-               off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri);
-               off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
+               off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri);
+
+               if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
+                       off += _emit_FLUSHP(dry_run, &buf[off],
+                                           pxs->desc->peri);
        }
 
        return off;
 }
 
-static int _bursts(unsigned dry_run, u8 buf[],
+static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
                const struct _xfer_spec *pxs, int cyc)
 {
        int off = 0;
 
        switch (pxs->desc->rqtype) {
        case DMA_MEM_TO_DEV:
-               off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc);
+               off += _ldst_memtodev(pl330, dry_run, &buf[off], pxs, cyc);
                break;
        case DMA_DEV_TO_MEM:
-               off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc);
+               off += _ldst_devtomem(pl330, dry_run, &buf[off], pxs, cyc);
                break;
        case DMA_MEM_TO_MEM:
                off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
@@ -1191,7 +1226,7 @@ static int _bursts(unsigned dry_run, u8 buf[],
 }
 
 /* Returns bytes consumed and updates bursts */
-static inline int _loop(unsigned dry_run, u8 buf[],
+static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
                unsigned long *bursts, const struct _xfer_spec *pxs)
 {
        int cyc, cycmax, szlp, szlpend, szbrst, off;
@@ -1199,7 +1234,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
        struct _arg_LPEND lpend;
 
        if (*bursts == 1)
-               return _bursts(dry_run, buf, pxs, 1);
+               return _bursts(pl330, dry_run, buf, pxs, 1);
 
        /* Max iterations possible in DMALP is 256 */
        if (*bursts >= 256*256) {
@@ -1217,7 +1252,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
        }
 
        szlp = _emit_LP(1, buf, 0, 0);
-       szbrst = _bursts(1, buf, pxs, 1);
+       szbrst = _bursts(pl330, 1, buf, pxs, 1);
 
        lpend.cond = ALWAYS;
        lpend.forever = false;
@@ -1249,7 +1284,7 @@ static inline int _loop(unsigned dry_run, u8 buf[],
        off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
        ljmp1 = off;
 
-       off += _bursts(dry_run, &buf[off], pxs, cyc);
+       off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
 
        lpend.cond = ALWAYS;
        lpend.forever = false;
@@ -1272,8 +1307,9 @@ static inline int _loop(unsigned dry_run, u8 buf[],
        return off;
 }
 
-static inline int _setup_loops(unsigned dry_run, u8 buf[],
-               const struct _xfer_spec *pxs)
+static inline int _setup_loops(struct pl330_dmac *pl330,
+                              unsigned dry_run, u8 buf[],
+                              const struct _xfer_spec *pxs)
 {
        struct pl330_xfer *x = &pxs->desc->px;
        u32 ccr = pxs->ccr;
@@ -1282,15 +1318,16 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[],
 
        while (bursts) {
                c = bursts;
-               off += _loop(dry_run, &buf[off], &c, pxs);
+               off += _loop(pl330, dry_run, &buf[off], &c, pxs);
                bursts -= c;
        }
 
        return off;
 }
 
-static inline int _setup_xfer(unsigned dry_run, u8 buf[],
-               const struct _xfer_spec *pxs)
+static inline int _setup_xfer(struct pl330_dmac *pl330,
+                             unsigned dry_run, u8 buf[],
+                             const struct _xfer_spec *pxs)
 {
        struct pl330_xfer *x = &pxs->desc->px;
        int off = 0;
@@ -1301,7 +1338,7 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
        off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
 
        /* Setup Loop(s) */
-       off += _setup_loops(dry_run, &buf[off], pxs);
+       off += _setup_loops(pl330, dry_run, &buf[off], pxs);
 
        return off;
 }
@@ -1310,8 +1347,9 @@ static inline int _setup_xfer(unsigned dry_run, u8 buf[],
  * A req is a sequence of one or more xfer units.
  * Returns the number of bytes taken to setup the MC for the req.
  */
-static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
-               unsigned index, struct _xfer_spec *pxs)
+static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
+                     struct pl330_thread *thrd, unsigned index,
+                     struct _xfer_spec *pxs)
 {
        struct _pl330_req *req = &thrd->req[index];
        struct pl330_xfer *x;
@@ -1328,7 +1366,7 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd,
        if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr)))
                return -EINVAL;
 
-       off += _setup_xfer(dry_run, &buf[off], pxs);
+       off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
 
        /* DMASEV peripheral/event */
        off += _emit_SEV(dry_run, &buf[off], thrd->ev);
@@ -1422,7 +1460,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
        xs.desc = desc;
 
        /* First dry run to check if req is acceptable */
-       ret = _setup_req(1, thrd, idx, &xs);
+       ret = _setup_req(pl330, 1, thrd, idx, &xs);
        if (ret < 0)
                goto xfer_exit;
 
@@ -1436,7 +1474,7 @@ static int pl330_submit_req(struct pl330_thread *thrd,
        /* Hook the request */
        thrd->lstenq = idx;
        thrd->req[idx].desc = desc;
-       _setup_req(0, thrd, idx, &xs);
+       _setup_req(pl330, 0, thrd, idx, &xs);
 
        ret = 0;
 
@@ -2560,7 +2598,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
 
                desc->rqtype = direction;
                desc->rqcfg.brst_size = pch->burst_sz;
-               desc->rqcfg.brst_len = 1;
+               desc->rqcfg.brst_len = pch->burst_len;
                desc->bytes_requested = period_len;
                fill_px(&desc->px, dst, src, period_len);
 
@@ -2705,7 +2743,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                }
 
                desc->rqcfg.brst_size = pch->burst_sz;
-               desc->rqcfg.brst_len = 1;
+               desc->rqcfg.brst_len = pch->burst_len;
                desc->rqtype = direction;
                desc->bytes_requested = sg_dma_len(sg);
        }
@@ -2781,6 +2819,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        struct resource *res;
        int i, ret, irq;
        int num_chan;
+       struct device_node *np = adev->dev.of_node;
 
        pdat = dev_get_platdata(&adev->dev);
 
@@ -2800,6 +2839,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
        pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
 
+       /* get quirk */
+       for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
+               if (of_property_read_bool(np, of_quirks[i].quirk))
+                       pl330->quirks |= of_quirks[i].id;
+
        res = &adev->res;
        pl330->base = devm_ioremap_resource(&adev->dev, res);
        if (IS_ERR(pl330->base))
@@ -2895,6 +2939,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
        pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
        pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
        pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+       pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
+                        1 : PL330_MAX_BURST);
 
        ret = dma_async_device_register(pd);
        if (ret) {
index f32c430eb16cfae8145edcba85ee0a9d8007882c..6e0685f1a83814f4484e65d1ae0467b0ffdbed14 100644 (file)
@@ -12,7 +12,7 @@ config RENESAS_DMA
 
 config SH_DMAE_BASE
        bool "Renesas SuperH DMA Engine support"
-       depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+       depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
        depends on !SUPERH || SH_DMA
        depends on !SH_DMA_API
        default y
@@ -41,7 +41,7 @@ endif
 
 config RCAR_DMAC
        tristate "Renesas R-Car Gen2 DMA Controller"
-       depends on ARCH_SHMOBILE || COMPILE_TEST
+       depends on ARCH_RENESAS || COMPILE_TEST
        select RENESAS_DMA
        help
          This driver supports the general purpose DMA controller found in the
@@ -49,7 +49,7 @@ config RCAR_DMAC
 
 config RENESAS_USB_DMAC
        tristate "Renesas USB-DMA Controller"
-       depends on ARCH_SHMOBILE || COMPILE_TEST
+       depends on ARCH_RENESAS || COMPILE_TEST
        select RENESAS_DMA
        select DMA_VIRTUAL_CHANNELS
        help
index 9eee13ef83a560fd0c54153c61bc01a77cd3eb7f..d87a47547ba59933d719d39cf1fe1460df2eeb55 100644 (file)
@@ -1452,7 +1452,7 @@ static u64 f1x_get_norm_dct_addr(struct amd64_pvt *pvt, u8 range,
        u64 chan_off;
        u64 dram_base           = get_dram_base(pvt, range);
        u64 hole_off            = f10_dhar_offset(pvt);
-       u64 dct_sel_base_off    = (pvt->dct_sel_hi & 0xFFFFFC00) << 16;
+       u64 dct_sel_base_off    = (u64)(pvt->dct_sel_hi & 0xFFFFFC00) << 16;
 
        if (hi_rng) {
                /*
index 54d2f668cb0ae09cccaf559fb199e18dda9b95bc..92dbb7e2320c78fe8cfc69fa80078de5b7b7f682 100644 (file)
@@ -53,7 +53,7 @@ int __init edac_debugfs_init(void)
 
 void edac_debugfs_exit(void)
 {
-       debugfs_remove(edac_debugfs);
+       debugfs_remove_recursive(edac_debugfs);
 }
 
 int edac_create_debugfs_nodes(struct mem_ctl_info *mci)
index 8adfc167c2e38e7de64c0a08224a803ad737ae7a..1472f48c8ac6109677f28da6d1067c11e3e16fa4 100644 (file)
@@ -535,59 +535,20 @@ static void edac_mc_workq_function(struct work_struct *work_req)
 
        mutex_lock(&mem_ctls_mutex);
 
-       /* if this control struct has movd to offline state, we are done */
-       if (mci->op_state == OP_OFFLINE) {
+       if (mci->op_state != OP_RUNNING_POLL) {
                mutex_unlock(&mem_ctls_mutex);
                return;
        }
 
-       /* Only poll controllers that are running polled and have a check */
-       if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL))
+       if (edac_mc_assert_error_check_and_clear())
                mci->edac_check(mci);
 
        mutex_unlock(&mem_ctls_mutex);
 
-       /* Reschedule */
+       /* Queue ourselves again. */
        edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
 }
 
-/*
- * edac_mc_workq_setup
- *     initialize a workq item for this mci
- *     passing in the new delay period in msec
- *
- *     locking model:
- *
- *             called with the mem_ctls_mutex held
- */
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
-{
-       edac_dbg(0, "\n");
-
-       /* if this instance is not in the POLL state, then simply return */
-       if (mci->op_state != OP_RUNNING_POLL)
-               return;
-
-       INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
-
-       edac_queue_work(&mci->work, msecs_to_jiffies(msec));
-}
-
-/*
- * edac_mc_workq_teardown
- *     stop the workq processing on this mci
- *
- *     locking model:
- *
- *             called WITHOUT lock held
- */
-static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
-{
-       mci->op_state = OP_OFFLINE;
-
-       edac_stop_work(&mci->work);
-}
-
 /*
  * edac_mc_reset_delay_period(unsigned long value)
  *
@@ -771,12 +732,12 @@ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci,
                goto fail1;
        }
 
-       /* If there IS a check routine, then we are running POLLED */
-       if (mci->edac_check != NULL) {
-               /* This instance is NOW RUNNING */
+       if (mci->edac_check) {
                mci->op_state = OP_RUNNING_POLL;
 
-               edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+               INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+               edac_queue_work(&mci->work, msecs_to_jiffies(edac_mc_get_poll_msec()));
+
        } else {
                mci->op_state = OP_RUNNING_INTERRUPT;
        }
@@ -823,15 +784,16 @@ struct mem_ctl_info *edac_mc_del_mc(struct device *dev)
                return NULL;
        }
 
+       /* mark MCI offline: */
+       mci->op_state = OP_OFFLINE;
+
        if (!del_mc_from_global_list(mci))
                edac_mc_owner = NULL;
-       mutex_unlock(&mem_ctls_mutex);
 
-       /* flush workq processes */
-       edac_mc_workq_teardown(mci);
+       mutex_unlock(&mem_ctls_mutex);
 
-       /* marking MCI offline */
-       mci->op_state = OP_OFFLINE;
+       if (mci->edac_check)
+               edac_stop_work(&mci->work);
 
        /* remove from sysfs */
        edac_remove_sysfs_mci_device(mci);
index 99685388d3fb5a1a8c6f52f5fbb675a61fa4a065..8f2f2899a7a2e06070689a535a1eb54171fd0ef7 100644 (file)
@@ -195,55 +195,24 @@ static void edac_pci_workq_function(struct work_struct *work_req)
 
        mutex_lock(&edac_pci_ctls_mutex);
 
-       if (pci->op_state == OP_RUNNING_POLL) {
-               /* we might be in POLL mode, but there may NOT be a poll func
-                */
-               if ((pci->edac_check != NULL) && edac_pci_get_check_errors())
-                       pci->edac_check(pci);
-
-               /* if we are on a one second period, then use round */
-               msec = edac_pci_get_poll_msec();
-               if (msec == 1000)
-                       delay = round_jiffies_relative(msecs_to_jiffies(msec));
-               else
-                       delay = msecs_to_jiffies(msec);
-
-               /* Reschedule only if we are in POLL mode */
-               edac_queue_work(&pci->work, delay);
+       if (pci->op_state != OP_RUNNING_POLL) {
+               mutex_unlock(&edac_pci_ctls_mutex);
+               return;
        }
 
-       mutex_unlock(&edac_pci_ctls_mutex);
-}
-
-/*
- * edac_pci_workq_setup()
- *     initialize a workq item for this edac_pci instance
- *     passing in the new delay period in msec
- *
- *     locking model:
- *             called when 'edac_pci_ctls_mutex' is locked
- */
-static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
-                                unsigned int msec)
-{
-       edac_dbg(0, "\n");
+       if (edac_pci_get_check_errors())
+               pci->edac_check(pci);
 
-       INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+       /* if we are on a one second period, then use round */
+       msec = edac_pci_get_poll_msec();
+       if (msec == 1000)
+               delay = round_jiffies_relative(msecs_to_jiffies(msec));
+       else
+               delay = msecs_to_jiffies(msec);
 
-       edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
-}
+       edac_queue_work(&pci->work, delay);
 
-/*
- * edac_pci_workq_teardown()
- *     stop the workq processing on this edac_pci instance
- */
-static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
-{
-       edac_dbg(0, "\n");
-
-       pci->op_state = OP_OFFLINE;
-
-       edac_stop_work(&pci->work);
+       mutex_unlock(&edac_pci_ctls_mutex);
 }
 
 /*
@@ -289,10 +258,12 @@ int edac_pci_add_device(struct edac_pci_ctl_info *pci, int edac_idx)
                goto fail1;
        }
 
-       if (pci->edac_check != NULL) {
+       if (pci->edac_check) {
                pci->op_state = OP_RUNNING_POLL;
 
-               edac_pci_workq_setup(pci, 1000);
+               INIT_DELAYED_WORK(&pci->work, edac_pci_workq_function);
+               edac_queue_work(&pci->work, msecs_to_jiffies(edac_pci_get_poll_msec()));
+
        } else {
                pci->op_state = OP_RUNNING_INTERRUPT;
        }
@@ -350,8 +321,8 @@ struct edac_pci_ctl_info *edac_pci_del_device(struct device *dev)
 
        mutex_unlock(&edac_pci_ctls_mutex);
 
-       /* stop the workq timer */
-       edac_pci_workq_teardown(pci);
+       if (pci->edac_check)
+               edac_stop_work(&pci->work);
 
        edac_printk(KERN_INFO, EDAC_PCI,
                "Removed device %d for %s %s: DEV %s\n",
index b7139c160bafdaf71a4e3a2d45a00b5e67e6cc7f..ca63d0da8889d71db2499b46323783846cf66009 100644 (file)
@@ -1244,7 +1244,7 @@ static struct platform_driver * const drivers[] = {
 static int __init mpc85xx_mc_init(void)
 {
        int res = 0;
-       u32 pvr = 0;
+       u32 __maybe_unused pvr = 0;
 
        printk(KERN_INFO "Freescale(R) MPC85xx EDAC driver, "
               "(C) 2006 Montavista Software\n");
index 41f876414a18d759ee2d89c33b5a4791ddcd6f43..bf19b6e3bd129929372ba486450b822c098169b4 100644 (file)
@@ -61,6 +61,7 @@ struct xgene_edac {
        struct regmap           *mcba_map;
        struct regmap           *mcbb_map;
        struct regmap           *efuse_map;
+       struct regmap           *rb_map;
        void __iomem            *pcp_csr;
        spinlock_t              lock;
        struct dentry           *dfs;
@@ -1057,7 +1058,7 @@ static bool xgene_edac_l3_promote_to_uc_err(u32 l3cesr, u32 l3celr)
                case 0x041:
                        return true;
                }
-       } else if (L3C_ELR_ERRSYN(l3celr) == 9)
+       } else if (L3C_ELR_ERRWAY(l3celr) == 9)
                return true;
 
        return false;
@@ -1353,6 +1354,17 @@ static int xgene_edac_l3_remove(struct xgene_edac_dev_ctx *l3)
 #define GLBL_MDED_ERRH                 0x0848
 #define GLBL_MDED_ERRHMASK             0x084c
 
+/* IO Bus Registers */
+#define RBCSR                          0x0000
+#define STICKYERR_MASK                 BIT(0)
+#define RBEIR                          0x0008
+#define AGENT_OFFLINE_ERR_MASK         BIT(30)
+#define UNIMPL_RBPAGE_ERR_MASK         BIT(29)
+#define WORD_ALIGNED_ERR_MASK          BIT(28)
+#define PAGE_ACCESS_ERR_MASK           BIT(27)
+#define WRITE_ACCESS_MASK              BIT(26)
+#define RBERRADDR_RD(src)              ((src) & 0x03FFFFFF)
+
 static const char * const soc_mem_err_v1[] = {
        "10GbE0",
        "10GbE1",
@@ -1470,6 +1482,51 @@ static void xgene_edac_rb_report(struct edac_device_ctl_info *edac_dev)
        u32 err_addr_hi;
        u32 reg;
 
+       /* If the register bus resource isn't available, just skip it */
+       if (!ctx->edac->rb_map)
+               goto rb_skip;
+
+       /*
+        * Check RB access errors
+        * 1. Out of range
+        * 2. Un-implemented page
+        * 3. Un-aligned access
+        * 4. Offline slave IP
+        */
+       if (regmap_read(ctx->edac->rb_map, RBCSR, &reg))
+               return;
+       if (reg & STICKYERR_MASK) {
+               bool write;
+               u32 address;
+
+               dev_err(edac_dev->dev, "IOB bus access error(s)\n");
+               if (regmap_read(ctx->edac->rb_map, RBEIR, &reg))
+                       return;
+               write = reg & WRITE_ACCESS_MASK ? 1 : 0;
+               address = RBERRADDR_RD(reg);
+               if (reg & AGENT_OFFLINE_ERR_MASK)
+                       dev_err(edac_dev->dev,
+                               "IOB bus %s access to offline agent error\n",
+                               write ? "write" : "read");
+               if (reg & UNIMPL_RBPAGE_ERR_MASK)
+                       dev_err(edac_dev->dev,
+                               "IOB bus %s access to unimplemented page error\n",
+                               write ? "write" : "read");
+               if (reg & WORD_ALIGNED_ERR_MASK)
+                       dev_err(edac_dev->dev,
+                               "IOB bus %s word aligned access error\n",
+                               write ? "write" : "read");
+               if (reg & PAGE_ACCESS_ERR_MASK)
+                       dev_err(edac_dev->dev,
+                               "IOB bus %s to page out of range access error\n",
+                               write ? "write" : "read");
+               if (regmap_write(ctx->edac->rb_map, RBEIR, 0))
+                       return;
+               if (regmap_write(ctx->edac->rb_map, RBCSR, 0))
+                       return;
+       }
+rb_skip:
+
        /* IOB Bridge agent transaction error interrupt */
        reg = readl(ctx->dev_csr + IOBBATRANSERRINTSTS);
        if (!reg)
@@ -1852,6 +1909,17 @@ static int xgene_edac_probe(struct platform_device *pdev)
                goto out_err;
        }
 
+       /*
+        * NOTE: The register bus resource is optional for compatibility
+        * reason.
+        */
+       edac->rb_map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                      "regmap-rb");
+       if (IS_ERR(edac->rb_map)) {
+               dev_warn(edac->dev, "missing syscon regmap rb\n");
+               edac->rb_map = NULL;
+       }
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        edac->pcp_csr = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(edac->pcp_csr)) {
index 49a3a1185bb607ed45297fe4dea5ff2c6344f37f..5130f74ae3bd87d7c32b1f13af5bc78ddfdfa6be 100644 (file)
@@ -161,6 +161,25 @@ config RASPBERRYPI_FIRMWARE
          This option enables support for communicating with the firmware on the
          Raspberry Pi.
 
+config FW_CFG_SYSFS
+       tristate "QEMU fw_cfg device support in sysfs"
+       depends on SYSFS && (ARM || ARM64 || PPC_PMAC || SPARC || X86)
+       default n
+       help
+         Say Y or M here to enable the exporting of the QEMU firmware
+         configuration (fw_cfg) file entries via sysfs. Entries are
+         found under /sys/firmware/fw_cfg when this option is enabled
+         and loaded.
+
+config FW_CFG_SYSFS_CMDLINE
+       bool "QEMU fw_cfg device parameter parsing"
+       depends on FW_CFG_SYSFS
+       help
+         Allow the qemu_fw_cfg device to be initialized via the kernel
+         command line or using a module parameter.
+         WARNING: Using incorrect parameters (base address in particular)
+         may crash your system.
+
 config QCOM_SCM
        bool
        depends on ARM || ARM64
index 48dd4175297e6cb24151fab67e4c822c940ddf5c..474bada56fcdcd2819afefe5e94e3cb0e191f10c 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
 obj-$(CONFIG_ISCSI_IBFT)       += iscsi_ibft.o
 obj-$(CONFIG_FIRMWARE_MEMMAP)  += memmap.o
 obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
+obj-$(CONFIG_FW_CFG_SYSFS)     += qemu_fw_cfg.o
 obj-$(CONFIG_QCOM_SCM)         += qcom_scm.o
 obj-$(CONFIG_QCOM_SCM_64)      += qcom_scm-64.o
 obj-$(CONFIG_QCOM_SCM_32)      += qcom_scm-32.o
index 2cd37dad67a63645b15d9f2f496630a1e820ce19..3a69ed5ecfcb5f1254eaf1b0c3fb53cbdbcc61fb 100644 (file)
@@ -182,6 +182,7 @@ static int generic_ops_register(void)
 {
        generic_ops.get_variable = efi.get_variable;
        generic_ops.set_variable = efi.set_variable;
+       generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
        generic_ops.get_next_variable = efi.get_next_variable;
        generic_ops.query_variable_store = efi_query_variable_store;
 
@@ -326,38 +327,6 @@ u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
        return end;
 }
 
-/*
- * We can't ioremap data in EFI boot services RAM, because we've already mapped
- * it as RAM.  So, look it up in the existing EFI memory map instead.  Only
- * callable after efi_enter_virtual_mode and before efi_free_boot_services.
- */
-void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
-{
-       struct efi_memory_map *map;
-       void *p;
-       map = efi.memmap;
-       if (!map)
-               return NULL;
-       if (WARN_ON(!map->map))
-               return NULL;
-       for (p = map->map; p < map->map_end; p += map->desc_size) {
-               efi_memory_desc_t *md = p;
-               u64 size = md->num_pages << EFI_PAGE_SHIFT;
-               u64 end = md->phys_addr + size;
-               if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
-                   md->type != EFI_BOOT_SERVICES_CODE &&
-                   md->type != EFI_BOOT_SERVICES_DATA)
-                       continue;
-               if (!md->virt_addr)
-                       continue;
-               if (phys_addr >= md->phys_addr && phys_addr < end) {
-                       phys_addr += md->virt_addr - md->phys_addr;
-                       return (__force void __iomem *)(unsigned long)phys_addr;
-               }
-       }
-       return NULL;
-}
-
 static __initdata efi_config_table_type_t common_tables[] = {
        {ACPI_20_TABLE_GUID, "ACPI 2.0", &efi.acpi20},
        {ACPI_TABLE_GUID, "ACPI", &efi.acpi},
@@ -586,7 +555,8 @@ static __initdata char memory_type_name[][20] = {
        "ACPI Memory NVS",
        "Memory Mapped I/O",
        "MMIO Port Space",
-       "PAL Code"
+       "PAL Code",
+       "Persistent Memory",
 };
 
 char * __init efi_md_typeattr_format(char *buf, size_t size,
@@ -613,13 +583,16 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
        if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
                     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
                     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
+                    EFI_MEMORY_NV |
                     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
                snprintf(pos, size, "|attr=0x%016llx]",
                         (unsigned long long)attr);
        else
-               snprintf(pos, size, "|%3s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+               snprintf(pos, size,
+                        "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
                         attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
                         attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
+                        attr & EFI_MEMORY_NV      ? "NV"  : "",
                         attr & EFI_MEMORY_XP      ? "XP"  : "",
                         attr & EFI_MEMORY_RP      ? "RP"  : "",
                         attr & EFI_MEMORY_WP      ? "WP"  : "",
index 756eca8c4cf8f291025a3ad44f7cbb9981aeb5fd..c5b0d2bc1111e4fd62f9164b6f02170ef1ea56c9 100644 (file)
@@ -386,7 +386,7 @@ static const struct sysfs_ops efivar_attr_ops = {
 
 static void efivar_release(struct kobject *kobj)
 {
-       struct efivar_entry *var = container_of(kobj, struct efivar_entry, kobj);
+       struct efivar_entry *var = to_efivar_entry(kobj);
        kfree(var);
 }
 
index 22c5285f77050f27d2d96a22b5ade945bae54316..75feb3f5829bab96067054dc7063400a723569f6 100644 (file)
@@ -167,14 +167,11 @@ static struct kset *esrt_kset;
 static int esre_create_sysfs_entry(void *esre, int entry_num)
 {
        struct esre_entry *entry;
-       char name[20];
 
        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (!entry)
                return -ENOMEM;
 
-       sprintf(name, "entry%d", entry_num);
-
        entry->kobj.kset = esrt_kset;
 
        if (esrt->fw_resource_version == 1) {
@@ -182,7 +179,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
 
                entry->esre.esre1 = esre;
                rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
-                                         "%s", name);
+                                         "entry%d", entry_num);
                if (rc) {
                        kfree(entry);
                        return rc;
index 228bbf91046137de619a4f8f89825e8ccc34b5ab..7b8b2f2702cade6056420cabe32530c8ae0849fd 100644 (file)
  */
 static DEFINE_SPINLOCK(efi_runtime_lock);
 
-/*
- * Some runtime services calls can be reentrant under NMI, even if the table
- * above says they are not. (source: UEFI Specification v2.4A)
- *
- * Table 32. Functions that may be called after Machine Check, INIT and NMI
- * +----------------------------+------------------------------------------+
- * | Function                  | Called after Machine Check, INIT and NMI |
- * +----------------------------+------------------------------------------+
- * | GetTime()                 | Yes, even if previously busy.            |
- * | GetVariable()             | Yes, even if previously busy             |
- * | GetNextVariableName()     | Yes, even if previously busy             |
- * | QueryVariableInfo()       | Yes, even if previously busy             |
- * | SetVariable()             | Yes, even if previously busy             |
- * | UpdateCapsule()           | Yes, even if previously busy             |
- * | QueryCapsuleCapabilities()        | Yes, even if previously busy             |
- * | ResetSystem()             | Yes, even if previously busy             |
- * +----------------------------+------------------------------------------+
- *
- * In order to prevent deadlocks under NMI, the wrappers for these functions
- * may only grab the efi_runtime_lock or rtc_lock spinlocks if !efi_in_nmi().
- * However, not all of the services listed are reachable through NMI code paths,
- * so the the special handling as suggested by the UEFI spec is only implemented
- * for QueryVariableInfo() and SetVariable(), as these can be reached in NMI
- * context through efi_pstore_write().
- */
-
-/*
- * As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"),
- * the EFI specification requires that callers of the time related runtime
- * functions serialize with other CMOS accesses in the kernel, as the EFI time
- * functions may choose to also use the legacy CMOS RTC.
- */
-__weak DEFINE_SPINLOCK(rtc_lock);
-
 static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
        unsigned long flags;
        efi_status_t status;
 
-       spin_lock_irqsave(&rtc_lock, flags);
-       spin_lock(&efi_runtime_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
        status = efi_call_virt(get_time, tm, tc);
-       spin_unlock(&efi_runtime_lock);
-       spin_unlock_irqrestore(&rtc_lock, flags);
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        return status;
 }
 
@@ -113,11 +77,9 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
        unsigned long flags;
        efi_status_t status;
 
-       spin_lock_irqsave(&rtc_lock, flags);
-       spin_lock(&efi_runtime_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
        status = efi_call_virt(set_time, tm);
-       spin_unlock(&efi_runtime_lock);
-       spin_unlock_irqrestore(&rtc_lock, flags);
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        return status;
 }
 
@@ -128,11 +90,9 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
        unsigned long flags;
        efi_status_t status;
 
-       spin_lock_irqsave(&rtc_lock, flags);
-       spin_lock(&efi_runtime_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
        status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
-       spin_unlock(&efi_runtime_lock);
-       spin_unlock_irqrestore(&rtc_lock, flags);
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        return status;
 }
 
@@ -141,11 +101,9 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
        unsigned long flags;
        efi_status_t status;
 
-       spin_lock_irqsave(&rtc_lock, flags);
-       spin_lock(&efi_runtime_lock);
+       spin_lock_irqsave(&efi_runtime_lock, flags);
        status = efi_call_virt(set_wakeup_time, enabled, tm);
-       spin_unlock(&efi_runtime_lock);
-       spin_unlock_irqrestore(&rtc_lock, flags);
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
        return status;
 }
 
@@ -230,6 +188,27 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
        return status;
 }
 
+static efi_status_t
+virt_efi_query_variable_info_nonblocking(u32 attr,
+                                        u64 *storage_space,
+                                        u64 *remaining_space,
+                                        u64 *max_variable_size)
+{
+       unsigned long flags;
+       efi_status_t status;
+
+       if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+               return EFI_UNSUPPORTED;
+
+       if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+               return EFI_NOT_READY;
+
+       status = efi_call_virt(query_variable_info, attr, storage_space,
+                              remaining_space, max_variable_size);
+       spin_unlock_irqrestore(&efi_runtime_lock, flags);
+       return status;
+}
+
 static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
 {
        unsigned long flags;
@@ -300,6 +279,7 @@ void efi_native_runtime_setup(void)
        efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
        efi.reset_system = virt_efi_reset_system;
        efi.query_variable_info = virt_efi_query_variable_info;
+       efi.query_variable_info_nonblocking = virt_efi_query_variable_info_nonblocking;
        efi.update_capsule = virt_efi_update_capsule;
        efi.query_capsule_caps = virt_efi_query_capsule_caps;
 }
index 70a0fb10517f94ea5b28bada280d9935f0693cc7..d2a49626a3355942f0da4c91047f16f0fbb49939 100644 (file)
@@ -234,7 +234,18 @@ check_var_size(u32 attributes, unsigned long size)
        if (!fops->query_variable_store)
                return EFI_UNSUPPORTED;
 
-       return fops->query_variable_store(attributes, size);
+       return fops->query_variable_store(attributes, size, false);
+}
+
+static efi_status_t
+check_var_size_nonblocking(u32 attributes, unsigned long size)
+{
+       const struct efivar_operations *fops = __efivars->ops;
+
+       if (!fops->query_variable_store)
+               return EFI_UNSUPPORTED;
+
+       return fops->query_variable_store(attributes, size, true);
 }
 
 static int efi_status_to_err(efi_status_t status)
@@ -615,7 +626,8 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
        if (!spin_trylock_irqsave(&__efivars->lock, flags))
                return -EBUSY;
 
-       status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
+       status = check_var_size_nonblocking(attributes,
+                                           size + ucs2_strsize(name, 1024));
        if (status != EFI_SUCCESS) {
                spin_unlock_irqrestore(&__efivars->lock, flags);
                return -ENOSPC;
index f25cd79c8a79f834f8d3e169684d20ee4ced08e6..11bfee8b79a9f65418bcbe53dfc708edb220e69d 100644 (file)
@@ -14,6 +14,7 @@
 #define pr_fmt(fmt) "psci: " fmt
 
 #include <linux/arm-smccc.h>
+#include <linux/cpuidle.h>
 #include <linux/errno.h>
 #include <linux/linkage.h>
 #include <linux/of.h>
 #include <linux/printk.h>
 #include <linux/psci.h>
 #include <linux/reboot.h>
+#include <linux/slab.h>
 #include <linux/suspend.h>
 
 #include <uapi/linux/psci.h>
 
+#include <asm/cpuidle.h>
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
 #include <asm/smp_plat.h>
@@ -244,6 +247,123 @@ static int __init psci_features(u32 psci_func_id)
                              psci_func_id, 0, 0);
 }
 
+#ifdef CONFIG_CPU_IDLE
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+       int i, ret, count = 0;
+       u32 *psci_states;
+       struct device_node *state_node;
+
+       /*
+        * If the PSCI cpu_suspend function hook has not been initialized
+        * idle states must not be enabled, so bail out
+        */
+       if (!psci_ops.cpu_suspend)
+               return -EOPNOTSUPP;
+
+       /* Count idle states */
+       while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+                                             count))) {
+               count++;
+               of_node_put(state_node);
+       }
+
+       if (!count)
+               return -ENODEV;
+
+       psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+       if (!psci_states)
+               return -ENOMEM;
+
+       for (i = 0; i < count; i++) {
+               u32 state;
+
+               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+
+               ret = of_property_read_u32(state_node,
+                                          "arm,psci-suspend-param",
+                                          &state);
+               if (ret) {
+                       pr_warn(" * %s missing arm,psci-suspend-param property\n",
+                               state_node->full_name);
+                       of_node_put(state_node);
+                       goto free_mem;
+               }
+
+               of_node_put(state_node);
+               pr_debug("psci-power-state %#x index %d\n", state, i);
+               if (!psci_power_state_is_valid(state)) {
+                       pr_warn("Invalid PSCI power state %#x\n", state);
+                       ret = -EINVAL;
+                       goto free_mem;
+               }
+               psci_states[i] = state;
+       }
+       /* Idle states parsed correctly, initialize per-cpu pointer */
+       per_cpu(psci_power_state, cpu) = psci_states;
+       return 0;
+
+free_mem:
+       kfree(psci_states);
+       return ret;
+}
+
+int psci_cpu_init_idle(unsigned int cpu)
+{
+       struct device_node *cpu_node;
+       int ret;
+
+       cpu_node = of_get_cpu_node(cpu, NULL);
+       if (!cpu_node)
+               return -ENODEV;
+
+       ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+       of_node_put(cpu_node);
+
+       return ret;
+}
+
+static int psci_suspend_finisher(unsigned long index)
+{
+       u32 *state = __this_cpu_read(psci_power_state);
+
+       return psci_ops.cpu_suspend(state[index - 1],
+                                   virt_to_phys(cpu_resume));
+}
+
+int psci_cpu_suspend_enter(unsigned long index)
+{
+       int ret;
+       u32 *state = __this_cpu_read(psci_power_state);
+       /*
+        * idle state index 0 corresponds to wfi, should never be called
+        * from the cpu_suspend operations
+        */
+       if (WARN_ON_ONCE(!index))
+               return -EINVAL;
+
+       if (!psci_power_state_loses_context(state[index - 1]))
+               ret = psci_ops.cpu_suspend(state[index - 1], 0);
+       else
+               ret = cpu_suspend(index, psci_suspend_finisher);
+
+       return ret;
+}
+
+/* ARM specific CPU idle operations */
+#ifdef CONFIG_ARM
+static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+       .suspend = psci_cpu_suspend_enter,
+       .init = psci_dt_cpu_init_idle,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
+#endif
+#endif
+
 static int psci_system_suspend(unsigned long unused)
 {
        return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
index 0883292f640f4d512c8b198d90a65ed945c02f87..c1e43259c044abee406e77e5f15a33670534857f 100644 (file)
@@ -499,3 +499,85 @@ int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
        return qcom_scm_call(QCOM_SCM_SVC_HDCP, QCOM_SCM_CMD_HDCP,
                req, req_cnt * sizeof(*req), resp, sizeof(*resp));
 }
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+       __le32 out;
+       __le32 in;
+       int ret;
+
+       in = cpu_to_le32(peripheral);
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_IS_SUPPORTED_CMD,
+                           &in, sizeof(in),
+                           &out, sizeof(out));
+
+       return ret ? false : !!out;
+}
+
+int __qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata_phys)
+{
+       __le32 scm_ret;
+       int ret;
+       struct {
+               __le32 proc;
+               __le32 image_addr;
+       } request;
+
+       request.proc = cpu_to_le32(peripheral);
+       request.image_addr = cpu_to_le32(metadata_phys);
+
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_INIT_IMAGE_CMD,
+                           &request, sizeof(request),
+                           &scm_ret, sizeof(scm_ret));
+
+       return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+       __le32 scm_ret;
+       int ret;
+       struct {
+               __le32 proc;
+               __le32 addr;
+               __le32 len;
+       } request;
+
+       request.proc = cpu_to_le32(peripheral);
+       request.addr = cpu_to_le32(addr);
+       request.len = cpu_to_le32(size);
+
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_MEM_SETUP_CMD,
+                           &request, sizeof(request),
+                           &scm_ret, sizeof(scm_ret));
+
+       return ret ? : le32_to_cpu(scm_ret);
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+       __le32 out;
+       __le32 in;
+       int ret;
+
+       in = cpu_to_le32(peripheral);
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_AUTH_AND_RESET_CMD,
+                           &in, sizeof(in),
+                           &out, sizeof(out));
+
+       return ret ? : le32_to_cpu(out);
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+       __le32 out;
+       __le32 in;
+       int ret;
+
+       in = cpu_to_le32(peripheral);
+       ret = qcom_scm_call(QCOM_SCM_SVC_PIL, QCOM_SCM_PAS_SHUTDOWN_CMD,
+                           &in, sizeof(in),
+                           &out, sizeof(out));
+
+       return ret ? : le32_to_cpu(out);
+}
index bb6555f6d63b849d4bb3330fed5de6d01e31edd2..e64fd927e5ae5b2396a827f5581ae23af239dbad 100644 (file)
@@ -61,3 +61,28 @@ int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
 {
        return -ENOTSUPP;
 }
+
+bool __qcom_scm_pas_supported(u32 peripheral)
+{
+       return false;
+}
+
+int __qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata_phys)
+{
+       return -ENOTSUPP;
+}
+
+int __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+       return -ENOTSUPP;
+}
+
+int __qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+       return -ENOTSUPP;
+}
+
+int __qcom_scm_pas_shutdown(u32 peripheral)
+{
+       return -ENOTSUPP;
+}
index 45c008d688914fcbd63eb47f059bf0ac679761dd..6fc9580a26bd96159b1560a043ba218c420d8a32 100644 (file)
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
  */
-
+#include <linux/platform_device.h>
+#include <linux/module.h>
 #include <linux/cpumask.h>
 #include <linux/export.h>
+#include <linux/dma-mapping.h>
 #include <linux/types.h>
 #include <linux/qcom_scm.h>
+#include <linux/of.h>
+#include <linux/clk.h>
 
 #include "qcom_scm.h"
 
+struct qcom_scm {
+       struct device *dev;
+       struct clk *core_clk;
+       struct clk *iface_clk;
+       struct clk *bus_clk;
+};
+
+static struct qcom_scm *__scm;
+
+static int qcom_scm_clk_enable(void)
+{
+       int ret;
+
+       ret = clk_prepare_enable(__scm->core_clk);
+       if (ret)
+               goto bail;
+       ret = clk_prepare_enable(__scm->iface_clk);
+       if (ret)
+               goto disable_core;
+       ret = clk_prepare_enable(__scm->bus_clk);
+       if (ret)
+               goto disable_iface;
+
+       return 0;
+
+disable_iface:
+       clk_disable_unprepare(__scm->iface_clk);
+disable_core:
+       clk_disable_unprepare(__scm->core_clk);
+bail:
+       return ret;
+}
+
+static void qcom_scm_clk_disable(void)
+{
+       clk_disable_unprepare(__scm->core_clk);
+       clk_disable_unprepare(__scm->iface_clk);
+       clk_disable_unprepare(__scm->bus_clk);
+}
+
 /**
  * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
  * @entry: Entry point function for the cpus
@@ -72,11 +112,17 @@ EXPORT_SYMBOL(qcom_scm_cpu_power_down);
  */
 bool qcom_scm_hdcp_available(void)
 {
-       int ret;
+       int ret = qcom_scm_clk_enable();
+
+       if (ret)
+               goto clk_err;
 
        ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_HDCP,
-               QCOM_SCM_CMD_HDCP);
+                                               QCOM_SCM_CMD_HDCP);
 
+       qcom_scm_clk_disable();
+
+clk_err:
        return (ret > 0) ? true : false;
 }
 EXPORT_SYMBOL(qcom_scm_hdcp_available);
@@ -91,6 +137,215 @@ EXPORT_SYMBOL(qcom_scm_hdcp_available);
  */
 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
 {
-       return __qcom_scm_hdcp_req(req, req_cnt, resp);
+       int ret = qcom_scm_clk_enable();
+
+       if (ret)
+               return ret;
+
+       ret = __qcom_scm_hdcp_req(req, req_cnt, resp);
+       qcom_scm_clk_disable();
+       return ret;
 }
 EXPORT_SYMBOL(qcom_scm_hdcp_req);
+
+/**
+ * qcom_scm_pas_supported() - Check if the peripheral authentication service is
+ *                           available for the given peripherial
+ * @peripheral:        peripheral id
+ *
+ * Returns true if PAS is supported for this peripheral, otherwise false.
+ */
+bool qcom_scm_pas_supported(u32 peripheral)
+{
+       int ret;
+
+       ret = __qcom_scm_is_call_available(QCOM_SCM_SVC_PIL,
+                                          QCOM_SCM_PAS_IS_SUPPORTED_CMD);
+       if (ret <= 0)
+               return false;
+
+       return __qcom_scm_pas_supported(peripheral);
+}
+EXPORT_SYMBOL(qcom_scm_pas_supported);
+
+/**
+ * qcom_scm_pas_init_image() - Initialize peripheral authentication service
+ *                            state machine for a given peripheral, using the
+ *                            metadata
+ * @peripheral: peripheral id
+ * @metadata:  pointer to memory containing ELF header, program header table
+ *             and optional blob of data used for authenticating the metadata
+ *             and the rest of the firmware
+ * @size:      size of the metadata
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
+{
+       dma_addr_t mdata_phys;
+       void *mdata_buf;
+       int ret;
+
+       /*
+        * During the scm call memory protection will be enabled for the meta
+        * data blob, so make sure it's physically contiguous, 4K aligned and
+        * non-cachable to avoid XPU violations.
+        */
+       mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys, GFP_KERNEL);
+       if (!mdata_buf) {
+               dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
+               return -ENOMEM;
+       }
+       memcpy(mdata_buf, metadata, size);
+
+       ret = qcom_scm_clk_enable();
+       if (ret)
+               goto free_metadata;
+
+       ret = __qcom_scm_pas_init_image(peripheral, mdata_phys);
+
+       qcom_scm_clk_disable();
+
+free_metadata:
+       dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
+
+       return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_init_image);
+
+/**
+ * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
+ *                           for firmware loading
+ * @peripheral:        peripheral id
+ * @addr:      start address of memory area to prepare
+ * @size:      size of the memory area to prepare
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
+{
+       int ret;
+
+       ret = qcom_scm_clk_enable();
+       if (ret)
+               return ret;
+
+       ret = __qcom_scm_pas_mem_setup(peripheral, addr, size);
+       qcom_scm_clk_disable();
+
+       return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
+
+/**
+ * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
+ *                                and reset the remote processor
+ * @peripheral:        peripheral id
+ *
+ * Return 0 on success.
+ */
+int qcom_scm_pas_auth_and_reset(u32 peripheral)
+{
+       int ret;
+
+       ret = qcom_scm_clk_enable();
+       if (ret)
+               return ret;
+
+       ret = __qcom_scm_pas_auth_and_reset(peripheral);
+       qcom_scm_clk_disable();
+
+       return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
+
+/**
+ * qcom_scm_pas_shutdown() - Shut down the remote processor
+ * @peripheral: peripheral id
+ *
+ * Returns 0 on success.
+ */
+int qcom_scm_pas_shutdown(u32 peripheral)
+{
+       int ret;
+
+       ret = qcom_scm_clk_enable();
+       if (ret)
+               return ret;
+
+       ret = __qcom_scm_pas_shutdown(peripheral);
+       qcom_scm_clk_disable();
+
+       return ret;
+}
+EXPORT_SYMBOL(qcom_scm_pas_shutdown);
+
+/**
+ * qcom_scm_is_available() - Checks if SCM is available
+ */
+bool qcom_scm_is_available(void)
+{
+       return !!__scm;
+}
+EXPORT_SYMBOL(qcom_scm_is_available);
+
+static int qcom_scm_probe(struct platform_device *pdev)
+{
+       struct qcom_scm *scm;
+       long rate;
+       int ret;
+
+       scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
+       if (!scm)
+               return -ENOMEM;
+
+       scm->dev = &pdev->dev;
+
+       scm->core_clk = devm_clk_get(&pdev->dev, "core");
+       if (IS_ERR(scm->core_clk)) {
+               if (PTR_ERR(scm->core_clk) != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to acquire core clk\n");
+               return PTR_ERR(scm->core_clk);
+       }
+
+       scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
+       if (IS_ERR(scm->iface_clk)) {
+               if (PTR_ERR(scm->iface_clk) != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to acquire iface clk\n");
+               return PTR_ERR(scm->iface_clk);
+       }
+
+       scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
+       if (IS_ERR(scm->bus_clk)) {
+               if (PTR_ERR(scm->bus_clk) != -EPROBE_DEFER)
+                       dev_err(&pdev->dev, "failed to acquire bus clk\n");
+               return PTR_ERR(scm->bus_clk);
+       }
+
+       /* vote for max clk rate for highest performance */
+       rate = clk_round_rate(scm->core_clk, INT_MAX);
+       ret = clk_set_rate(scm->core_clk, rate);
+       if (ret)
+               return ret;
+
+       __scm = scm;
+
+       return 0;
+}
+
+static const struct of_device_id qcom_scm_dt_match[] = {
+       { .compatible = "qcom,scm",},
+       {},
+};
+
+MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
+
+static struct platform_driver qcom_scm_driver = {
+       .driver = {
+               .name   = "qcom_scm",
+               .of_match_table = qcom_scm_dt_match,
+       },
+       .probe = qcom_scm_probe,
+};
+
+builtin_platform_driver(qcom_scm_driver);
index 2cce75c08b9989329f8e72a58b41675e5f0a575e..220d19c93cfc8328fb386fa954a283e312a3f499 100644 (file)
@@ -36,6 +36,18 @@ extern int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id);
 extern int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
                u32 *resp);
 
+#define QCOM_SCM_SVC_PIL               0x2
+#define QCOM_SCM_PAS_INIT_IMAGE_CMD    0x1
+#define QCOM_SCM_PAS_MEM_SETUP_CMD     0x2
+#define QCOM_SCM_PAS_AUTH_AND_RESET_CMD        0x5
+#define QCOM_SCM_PAS_SHUTDOWN_CMD      0x6
+#define QCOM_SCM_PAS_IS_SUPPORTED_CMD  0x7
+extern bool __qcom_scm_pas_supported(u32 peripheral);
+extern int  __qcom_scm_pas_init_image(u32 peripheral, dma_addr_t metadata_phys);
+extern int  __qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+extern int  __qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int  __qcom_scm_pas_shutdown(u32 peripheral);
+
 /* common error codes */
 #define QCOM_SCM_ENOMEM                -5
 #define QCOM_SCM_EOPNOTSUPP    -4
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
new file mode 100644 (file)
index 0000000..19f6851
--- /dev/null
@@ -0,0 +1,751 @@
+/*
+ * drivers/firmware/qemu_fw_cfg.c
+ *
+ * Copyright 2015 Carnegie Mellon University
+ *
+ * Expose entries from QEMU's firmware configuration (fw_cfg) device in
+ * sysfs (read-only, under "/sys/firmware/qemu_fw_cfg/...").
+ *
+ * The fw_cfg device may be instantiated via either an ACPI node (on x86
+ * and select subsets of aarch64), a Device Tree node (on arm), or using
+ * a kernel module (or command line) parameter with the following syntax:
+ *
+ *      [fw_cfg.]ioport=<size>@<base>[:<ctrl_off>:<data_off>]
+ * or
+ *      [fw_cfg.]mmio=<size>@<base>[:<ctrl_off>:<data_off>]
+ *
+ * where:
+ *      <size>     := size of ioport or mmio range
+ *      <base>     := physical base address of ioport or mmio range
+ *      <ctrl_off> := (optional) offset of control register
+ *      <data_off> := (optional) offset of data register
+ *
+ * e.g.:
+ *      fw_cfg.ioport=2@0x510:0:1              (the default on x86)
+ * or
+ *      fw_cfg.mmio=0xA@0x9020000:8:0          (the default on arm)
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+
+MODULE_AUTHOR("Gabriel L. Somlo <somlo@cmu.edu>");
+MODULE_DESCRIPTION("QEMU fw_cfg sysfs support");
+MODULE_LICENSE("GPL");
+
+/* selector key values for "well-known" fw_cfg entries */
+#define FW_CFG_SIGNATURE  0x00
+#define FW_CFG_ID         0x01
+#define FW_CFG_FILE_DIR   0x19
+
+/* size in bytes of fw_cfg signature */
+#define FW_CFG_SIG_SIZE 4
+
+/* fw_cfg "file name" is up to 56 characters (including terminating nul) */
+#define FW_CFG_MAX_FILE_PATH 56
+
+/* fw_cfg file directory entry type */
+struct fw_cfg_file {
+       u32 size;
+       u16 select;
+       u16 reserved;
+       char name[FW_CFG_MAX_FILE_PATH];
+};
+
+/* fw_cfg device i/o register addresses */
+static bool fw_cfg_is_mmio;
+static phys_addr_t fw_cfg_p_base;
+static resource_size_t fw_cfg_p_size;
+static void __iomem *fw_cfg_dev_base;
+static void __iomem *fw_cfg_reg_ctrl;
+static void __iomem *fw_cfg_reg_data;
+
+/* atomic access to fw_cfg device (potentially slow i/o, so using mutex) */
+static DEFINE_MUTEX(fw_cfg_dev_lock);
+
+/* pick appropriate endianness for selector key */
+static inline u16 fw_cfg_sel_endianness(u16 key)
+{
+       return fw_cfg_is_mmio ? cpu_to_be16(key) : cpu_to_le16(key);
+}
+
+/* read chunk of given fw_cfg blob (caller responsible for sanity-check) */
+static inline void fw_cfg_read_blob(u16 key,
+                                   void *buf, loff_t pos, size_t count)
+{
+       mutex_lock(&fw_cfg_dev_lock);
+       iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl);
+       while (pos-- > 0)
+               ioread8(fw_cfg_reg_data);
+       ioread8_rep(fw_cfg_reg_data, buf, count);
+       mutex_unlock(&fw_cfg_dev_lock);
+}
+
+/* clean up fw_cfg device i/o */
+static void fw_cfg_io_cleanup(void)
+{
+       if (fw_cfg_is_mmio) {
+               iounmap(fw_cfg_dev_base);
+               release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+       } else {
+               ioport_unmap(fw_cfg_dev_base);
+               release_region(fw_cfg_p_base, fw_cfg_p_size);
+       }
+}
+
+/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
+#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CTRL_DATA_OFF))
+# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
+#  define FW_CFG_CTRL_OFF 0x08
+#  define FW_CFG_DATA_OFF 0x00
+# elif (defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC32)) /* ppc/mac,sun4m */
+#  define FW_CFG_CTRL_OFF 0x00
+#  define FW_CFG_DATA_OFF 0x02
+# elif (defined(CONFIG_X86) || defined(CONFIG_SPARC64)) /* x86, sun4u */
+#  define FW_CFG_CTRL_OFF 0x00
+#  define FW_CFG_DATA_OFF 0x01
+# else
+#  warning "QEMU FW_CFG may not be available on this architecture!"
+#  define FW_CFG_CTRL_OFF 0x00
+#  define FW_CFG_DATA_OFF 0x01
+# endif
+#endif
+
+/* initialize fw_cfg device i/o from platform data */
+static int fw_cfg_do_platform_probe(struct platform_device *pdev)
+{
+       char sig[FW_CFG_SIG_SIZE];
+       struct resource *range, *ctrl, *data;
+
+       /* acquire i/o range details */
+       fw_cfg_is_mmio = false;
+       range = platform_get_resource(pdev, IORESOURCE_IO, 0);
+       if (!range) {
+               fw_cfg_is_mmio = true;
+               range = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+               if (!range)
+                       return -EINVAL;
+       }
+       fw_cfg_p_base = range->start;
+       fw_cfg_p_size = resource_size(range);
+
+       if (fw_cfg_is_mmio) {
+               if (!request_mem_region(fw_cfg_p_base,
+                                       fw_cfg_p_size, "fw_cfg_mem"))
+                       return -EBUSY;
+               fw_cfg_dev_base = ioremap(fw_cfg_p_base, fw_cfg_p_size);
+               if (!fw_cfg_dev_base) {
+                       release_mem_region(fw_cfg_p_base, fw_cfg_p_size);
+                       return -EFAULT;
+               }
+       } else {
+               if (!request_region(fw_cfg_p_base,
+                                   fw_cfg_p_size, "fw_cfg_io"))
+                       return -EBUSY;
+               fw_cfg_dev_base = ioport_map(fw_cfg_p_base, fw_cfg_p_size);
+               if (!fw_cfg_dev_base) {
+                       release_region(fw_cfg_p_base, fw_cfg_p_size);
+                       return -EFAULT;
+               }
+       }
+
+       /* were custom register offsets provided (e.g. on the command line)? */
+       ctrl = platform_get_resource_byname(pdev, IORESOURCE_REG, "ctrl");
+       data = platform_get_resource_byname(pdev, IORESOURCE_REG, "data");
+       if (ctrl && data) {
+               fw_cfg_reg_ctrl = fw_cfg_dev_base + ctrl->start;
+               fw_cfg_reg_data = fw_cfg_dev_base + data->start;
+       } else {
+               /* use architecture-specific offsets */
+               fw_cfg_reg_ctrl = fw_cfg_dev_base + FW_CFG_CTRL_OFF;
+               fw_cfg_reg_data = fw_cfg_dev_base + FW_CFG_DATA_OFF;
+       }
+
+       /* verify fw_cfg device signature */
+       fw_cfg_read_blob(FW_CFG_SIGNATURE, sig, 0, FW_CFG_SIG_SIZE);
+       if (memcmp(sig, "QEMU", FW_CFG_SIG_SIZE) != 0) {
+               fw_cfg_io_cleanup();
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+/* fw_cfg revision attribute, in /sys/firmware/qemu_fw_cfg top-level dir. */
+static u32 fw_cfg_rev;
+
+static ssize_t fw_cfg_showrev(struct kobject *k, struct attribute *a, char *buf)
+{
+       return sprintf(buf, "%u\n", fw_cfg_rev);
+}
+
+static const struct {
+       struct attribute attr;
+       ssize_t (*show)(struct kobject *k, struct attribute *a, char *buf);
+} fw_cfg_rev_attr = {
+       .attr = { .name = "rev", .mode = S_IRUSR },
+       .show = fw_cfg_showrev,
+};
+
+/* fw_cfg_sysfs_entry type */
+struct fw_cfg_sysfs_entry {
+       struct kobject kobj;
+       struct fw_cfg_file f;
+       struct list_head list;
+};
+
+/* get fw_cfg_sysfs_entry from kobject member */
+static inline struct fw_cfg_sysfs_entry *to_entry(struct kobject *kobj)
+{
+       return container_of(kobj, struct fw_cfg_sysfs_entry, kobj);
+}
+
+/* fw_cfg_sysfs_attribute type */
+struct fw_cfg_sysfs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct fw_cfg_sysfs_entry *entry, char *buf);
+};
+
+/* get fw_cfg_sysfs_attribute from attribute member */
+static inline struct fw_cfg_sysfs_attribute *to_attr(struct attribute *attr)
+{
+       return container_of(attr, struct fw_cfg_sysfs_attribute, attr);
+}
+
+/* global cache of fw_cfg_sysfs_entry objects */
+static LIST_HEAD(fw_cfg_entry_cache);
+
+/* kobjects removed lazily by kernel, mutual exclusion needed */
+static DEFINE_SPINLOCK(fw_cfg_cache_lock);
+
+static inline void fw_cfg_sysfs_cache_enlist(struct fw_cfg_sysfs_entry *entry)
+{
+       spin_lock(&fw_cfg_cache_lock);
+       list_add_tail(&entry->list, &fw_cfg_entry_cache);
+       spin_unlock(&fw_cfg_cache_lock);
+}
+
+static inline void fw_cfg_sysfs_cache_delist(struct fw_cfg_sysfs_entry *entry)
+{
+       spin_lock(&fw_cfg_cache_lock);
+       list_del(&entry->list);
+       spin_unlock(&fw_cfg_cache_lock);
+}
+
+static void fw_cfg_sysfs_cache_cleanup(void)
+{
+       struct fw_cfg_sysfs_entry *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
+               /* will end up invoking fw_cfg_sysfs_cache_delist()
+                * via each object's release() method (i.e. destructor)
+                */
+               kobject_put(&entry->kobj);
+       }
+}
+
+/* default_attrs: per-entry attributes and show methods */
+
+#define FW_CFG_SYSFS_ATTR(_attr) \
+struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
+       .attr = { .name = __stringify(_attr), .mode = S_IRUSR }, \
+       .show = fw_cfg_sysfs_show_##_attr, \
+}
+
+static ssize_t fw_cfg_sysfs_show_size(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+       return sprintf(buf, "%u\n", e->f.size);
+}
+
+static ssize_t fw_cfg_sysfs_show_key(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+       return sprintf(buf, "%u\n", e->f.select);
+}
+
+static ssize_t fw_cfg_sysfs_show_name(struct fw_cfg_sysfs_entry *e, char *buf)
+{
+       return sprintf(buf, "%s\n", e->f.name);
+}
+
+static FW_CFG_SYSFS_ATTR(size);
+static FW_CFG_SYSFS_ATTR(key);
+static FW_CFG_SYSFS_ATTR(name);
+
+static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
+       &fw_cfg_sysfs_attr_size.attr,
+       &fw_cfg_sysfs_attr_key.attr,
+       &fw_cfg_sysfs_attr_name.attr,
+       NULL,
+};
+
+/* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */
+static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
+                                     char *buf)
+{
+       struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+       struct fw_cfg_sysfs_attribute *attr = to_attr(a);
+
+       return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops fw_cfg_sysfs_attr_ops = {
+       .show = fw_cfg_sysfs_attr_show,
+};
+
+/* release: destructor, to be called via kobject_put() */
+static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
+{
+       struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+       fw_cfg_sysfs_cache_delist(entry);
+       kfree(entry);
+}
+
+/* kobj_type: ties together all properties required to register an entry */
+static struct kobj_type fw_cfg_sysfs_entry_ktype = {
+       .default_attrs = fw_cfg_sysfs_entry_attrs,
+       .sysfs_ops = &fw_cfg_sysfs_attr_ops,
+       .release = fw_cfg_sysfs_release_entry,
+};
+
+/* raw-read method and attribute */
+static ssize_t fw_cfg_sysfs_read_raw(struct file *filp, struct kobject *kobj,
+                                    struct bin_attribute *bin_attr,
+                                    char *buf, loff_t pos, size_t count)
+{
+       struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
+
+       if (pos > entry->f.size)
+               return -EINVAL;
+
+       if (count > entry->f.size - pos)
+               count = entry->f.size - pos;
+
+       fw_cfg_read_blob(entry->f.select, buf, pos, count);
+       return count;
+}
+
+static struct bin_attribute fw_cfg_sysfs_attr_raw = {
+       .attr = { .name = "raw", .mode = S_IRUSR },
+       .read = fw_cfg_sysfs_read_raw,
+};
+
+/*
+ * Create a kset subdirectory matching each '/' delimited dirname token
+ * in 'name', starting with sysfs kset/folder 'dir'; At the end, create
+ * a symlink directed at the given 'target'.
+ * NOTE: We do this on a best-effort basis, since 'name' is not guaranteed
+ * to be a well-behaved path name. Whenever a symlink vs. kset directory
+ * name collision occurs, the kernel will issue big scary warnings while
+ * refusing to add the offending link or directory. We follow up with our
+ * own, slightly less scary error messages explaining the situation :)
+ */
+static int fw_cfg_build_symlink(struct kset *dir,
+                               struct kobject *target, const char *name)
+{
+       int ret;
+       struct kset *subdir;
+       struct kobject *ko;
+       char *name_copy, *p, *tok;
+
+       if (!dir || !target || !name || !*name)
+               return -EINVAL;
+
+       /* clone a copy of name for parsing */
+       name_copy = p = kstrdup(name, GFP_KERNEL);
+       if (!name_copy)
+               return -ENOMEM;
+
+       /* create folders for each dirname token, then symlink for basename */
+       while ((tok = strsep(&p, "/")) && *tok) {
+
+               /* last (basename) token? If so, add symlink here */
+               if (!p || !*p) {
+                       ret = sysfs_create_link(&dir->kobj, target, tok);
+                       break;
+               }
+
+               /* does the current dir contain an item named after tok ? */
+               ko = kset_find_obj(dir, tok);
+               if (ko) {
+                       /* drop reference added by kset_find_obj */
+                       kobject_put(ko);
+
+                       /* ko MUST be a kset - we're about to use it as one ! */
+                       if (ko->ktype != dir->kobj.ktype) {
+                               ret = -EINVAL;
+                               break;
+                       }
+
+                       /* descend into already existing subdirectory */
+                       dir = to_kset(ko);
+               } else {
+                       /* create new subdirectory kset */
+                       subdir = kzalloc(sizeof(struct kset), GFP_KERNEL);
+                       if (!subdir) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       subdir->kobj.kset = dir;
+                       subdir->kobj.ktype = dir->kobj.ktype;
+                       ret = kobject_set_name(&subdir->kobj, "%s", tok);
+                       if (ret) {
+                               kfree(subdir);
+                               break;
+                       }
+                       ret = kset_register(subdir);
+                       if (ret) {
+                               kfree(subdir);
+                               break;
+                       }
+
+                       /* descend into newly created subdirectory */
+                       dir = subdir;
+               }
+       }
+
+       /* we're done with cloned copy of name */
+       kfree(name_copy);
+       return ret;
+}
+
+/* recursively unregister fw_cfg/by_name/ kset directory tree */
+static void fw_cfg_kset_unregister_recursive(struct kset *kset)
+{
+       struct kobject *k, *next;
+
+       list_for_each_entry_safe(k, next, &kset->list, entry)
+               /* all set members are ksets too, but check just in case... */
+               if (k->ktype == kset->kobj.ktype)
+                       fw_cfg_kset_unregister_recursive(to_kset(k));
+
+       /* symlinks are cleanly and automatically removed with the directory */
+       kset_unregister(kset);
+}
+
+/* kobjects & kset representing top-level, by_key, and by_name folders */
+static struct kobject *fw_cfg_top_ko;
+static struct kobject *fw_cfg_sel_ko;
+static struct kset *fw_cfg_fname_kset;
+
+/* register an individual fw_cfg file */
+static int fw_cfg_register_file(const struct fw_cfg_file *f)
+{
+       int err;
+       struct fw_cfg_sysfs_entry *entry;
+
+       /* allocate new entry */
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       /* set file entry information */
+       memcpy(&entry->f, f, sizeof(struct fw_cfg_file));
+
+       /* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
+       err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
+                                  fw_cfg_sel_ko, "%d", entry->f.select);
+       if (err)
+               goto err_register;
+
+       /* add raw binary content access */
+       err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
+       if (err)
+               goto err_add_raw;
+
+       /* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
+       fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->f.name);
+
+       /* success, add entry to global cache */
+       fw_cfg_sysfs_cache_enlist(entry);
+       return 0;
+
+err_add_raw:
+       kobject_del(&entry->kobj);
+err_register:
+       kfree(entry);
+       return err;
+}
+
+/* iterate over all fw_cfg directory entries, registering each one */
+static int fw_cfg_register_dir_entries(void)
+{
+       int ret = 0;
+       u32 count, i;
+       struct fw_cfg_file *dir;
+       size_t dir_size;
+
+       fw_cfg_read_blob(FW_CFG_FILE_DIR, &count, 0, sizeof(count));
+       count = be32_to_cpu(count);
+       dir_size = count * sizeof(struct fw_cfg_file);
+
+       dir = kmalloc(dir_size, GFP_KERNEL);
+       if (!dir)
+               return -ENOMEM;
+
+       fw_cfg_read_blob(FW_CFG_FILE_DIR, dir, sizeof(count), dir_size);
+
+       for (i = 0; i < count; i++) {
+               dir[i].size = be32_to_cpu(dir[i].size);
+               dir[i].select = be16_to_cpu(dir[i].select);
+               ret = fw_cfg_register_file(&dir[i]);
+               if (ret)
+                       break;
+       }
+
+       kfree(dir);
+       return ret;
+}
+
+/* unregister top-level or by_key folder */
+static inline void fw_cfg_kobj_cleanup(struct kobject *kobj)
+{
+       kobject_del(kobj);
+       kobject_put(kobj);
+}
+
+static int fw_cfg_sysfs_probe(struct platform_device *pdev)
+{
+       int err;
+
+       /* NOTE: If we supported multiple fw_cfg devices, we'd first create
+        * a subdirectory named after e.g. pdev->id, then hang per-device
+        * by_key (and by_name) subdirectories underneath it. However, only
+        * one fw_cfg device exist system-wide, so if one was already found
+        * earlier, we might as well stop here.
+        */
+       if (fw_cfg_sel_ko)
+               return -EBUSY;
+
+       /* create by_key and by_name subdirs of /sys/firmware/qemu_fw_cfg/ */
+       err = -ENOMEM;
+       fw_cfg_sel_ko = kobject_create_and_add("by_key", fw_cfg_top_ko);
+       if (!fw_cfg_sel_ko)
+               goto err_sel;
+       fw_cfg_fname_kset = kset_create_and_add("by_name", NULL, fw_cfg_top_ko);
+       if (!fw_cfg_fname_kset)
+               goto err_name;
+
+       /* initialize fw_cfg device i/o from platform data */
+       err = fw_cfg_do_platform_probe(pdev);
+       if (err)
+               goto err_probe;
+
+       /* get revision number, add matching top-level attribute */
+       fw_cfg_read_blob(FW_CFG_ID, &fw_cfg_rev, 0, sizeof(fw_cfg_rev));
+       fw_cfg_rev = le32_to_cpu(fw_cfg_rev);
+       err = sysfs_create_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+       if (err)
+               goto err_rev;
+
+       /* process fw_cfg file directory entry, registering each file */
+       err = fw_cfg_register_dir_entries();
+       if (err)
+               goto err_dir;
+
+       /* success */
+       pr_debug("fw_cfg: loaded.\n");
+       return 0;
+
+err_dir:
+       fw_cfg_sysfs_cache_cleanup();
+       sysfs_remove_file(fw_cfg_top_ko, &fw_cfg_rev_attr.attr);
+err_rev:
+       fw_cfg_io_cleanup();
+err_probe:
+       fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+err_name:
+       fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+err_sel:
+       return err;
+}
+
+static int fw_cfg_sysfs_remove(struct platform_device *pdev)
+{
+       pr_debug("fw_cfg: unloading.\n");
+       fw_cfg_sysfs_cache_cleanup();
+       fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset);
+       fw_cfg_kobj_cleanup(fw_cfg_sel_ko);
+       fw_cfg_io_cleanup();
+       return 0;
+}
+
+static const struct of_device_id fw_cfg_sysfs_mmio_match[] = {
+       { .compatible = "qemu,fw-cfg-mmio", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, fw_cfg_sysfs_mmio_match);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id fw_cfg_sysfs_acpi_match[] = {
+       { "QEMU0002", },
+       {},
+};
+MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match);
+#endif
+
+static struct platform_driver fw_cfg_sysfs_driver = {
+       .probe = fw_cfg_sysfs_probe,
+       .remove = fw_cfg_sysfs_remove,
+       .driver = {
+               .name = "fw_cfg",
+               .of_match_table = fw_cfg_sysfs_mmio_match,
+               .acpi_match_table = ACPI_PTR(fw_cfg_sysfs_acpi_match),
+       },
+};
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+
+static struct platform_device *fw_cfg_cmdline_dev;
+
+/* this probably belongs in e.g. include/linux/types.h,
+ * but right now we are the only ones doing it...
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+#define __PHYS_ADDR_PREFIX "ll"
+#else
+#define __PHYS_ADDR_PREFIX ""
+#endif
+
+/* use special scanf/printf modifier for phys_addr_t, resource_size_t */
+#define PH_ADDR_SCAN_FMT "@%" __PHYS_ADDR_PREFIX "i%n" \
+                        ":%" __PHYS_ADDR_PREFIX "i" \
+                        ":%" __PHYS_ADDR_PREFIX "i%n"
+
+#define PH_ADDR_PR_1_FMT "0x%" __PHYS_ADDR_PREFIX "x@" \
+                        "0x%" __PHYS_ADDR_PREFIX "x"
+
+#define PH_ADDR_PR_3_FMT PH_ADDR_PR_1_FMT \
+                        ":%" __PHYS_ADDR_PREFIX "u" \
+                        ":%" __PHYS_ADDR_PREFIX "u"
+
+static int fw_cfg_cmdline_set(const char *arg, const struct kernel_param *kp)
+{
+       struct resource res[3] = {};
+       char *str;
+       phys_addr_t base;
+       resource_size_t size, ctrl_off, data_off;
+       int processed, consumed = 0;
+
+       /* only one fw_cfg device can exist system-wide, so if one
+        * was processed on the command line already, we might as
+        * well stop here.
+        */
+       if (fw_cfg_cmdline_dev) {
+               /* avoid leaking previously registered device */
+               platform_device_unregister(fw_cfg_cmdline_dev);
+               return -EINVAL;
+       }
+
+       /* consume "<size>" portion of command line argument */
+       size = memparse(arg, &str);
+
+       /* get "@<base>[:<ctrl_off>:<data_off>]" chunks */
+       processed = sscanf(str, PH_ADDR_SCAN_FMT,
+                          &base, &consumed,
+                          &ctrl_off, &data_off, &consumed);
+
+       /* sscanf() must process precisely 1 or 3 chunks:
+        * <base> is mandatory, optionally followed by <ctrl_off>
+        * and <data_off>;
+        * there must be no extra characters after the last chunk,
+        * so str[consumed] must be '\0'.
+        */
+       if (str[consumed] ||
+           (processed != 1 && processed != 3))
+               return -EINVAL;
+
+       res[0].start = base;
+       res[0].end = base + size - 1;
+       res[0].flags = !strcmp(kp->name, "mmio") ? IORESOURCE_MEM :
+                                                  IORESOURCE_IO;
+
+       /* insert register offsets, if provided */
+       if (processed > 1) {
+               res[1].name = "ctrl";
+               res[1].start = ctrl_off;
+               res[1].flags = IORESOURCE_REG;
+               res[2].name = "data";
+               res[2].start = data_off;
+               res[2].flags = IORESOURCE_REG;
+       }
+
+       /* "processed" happens to nicely match the number of resources
+        * we need to pass in to this platform device.
+        */
+       fw_cfg_cmdline_dev = platform_device_register_simple("fw_cfg",
+                                       PLATFORM_DEVID_NONE, res, processed);
+       if (IS_ERR(fw_cfg_cmdline_dev))
+               return PTR_ERR(fw_cfg_cmdline_dev);
+
+       return 0;
+}
+
+static int fw_cfg_cmdline_get(char *buf, const struct kernel_param *kp)
+{
+       /* stay silent if device was not configured via the command
+        * line, or if the parameter name (ioport/mmio) doesn't match
+        * the device setting
+        */
+       if (!fw_cfg_cmdline_dev ||
+           (!strcmp(kp->name, "mmio") ^
+            (fw_cfg_cmdline_dev->resource[0].flags == IORESOURCE_MEM)))
+               return 0;
+
+       switch (fw_cfg_cmdline_dev->num_resources) {
+       case 1:
+               return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_1_FMT,
+                               resource_size(&fw_cfg_cmdline_dev->resource[0]),
+                               fw_cfg_cmdline_dev->resource[0].start);
+       case 3:
+               return snprintf(buf, PAGE_SIZE, PH_ADDR_PR_3_FMT,
+                               resource_size(&fw_cfg_cmdline_dev->resource[0]),
+                               fw_cfg_cmdline_dev->resource[0].start,
+                               fw_cfg_cmdline_dev->resource[1].start,
+                               fw_cfg_cmdline_dev->resource[2].start);
+       }
+
+       /* Should never get here */
+       WARN(1, "Unexpected number of resources: %d\n",
+               fw_cfg_cmdline_dev->num_resources);
+       return 0;
+}
+
+static const struct kernel_param_ops fw_cfg_cmdline_param_ops = {
+       .set = fw_cfg_cmdline_set,
+       .get = fw_cfg_cmdline_get,
+};
+
+device_param_cb(ioport, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR);
+
+#endif /* CONFIG_FW_CFG_SYSFS_CMDLINE */
+
+static int __init fw_cfg_sysfs_init(void)
+{
+       /* create /sys/firmware/qemu_fw_cfg/ top level directory */
+       fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj);
+       if (!fw_cfg_top_ko)
+               return -ENOMEM;
+
+       return platform_driver_register(&fw_cfg_sysfs_driver);
+}
+
+static void __exit fw_cfg_sysfs_exit(void)
+{
+       platform_driver_unregister(&fw_cfg_sysfs_driver);
+
+#ifdef CONFIG_FW_CFG_SYSFS_CMDLINE
+       platform_device_unregister(fw_cfg_cmdline_dev);
+#endif
+
+       /* clean up /sys/firmware/qemu_fw_cfg/ */
+       fw_cfg_kobj_cleanup(fw_cfg_top_ko);
+}
+
+module_init(fw_cfg_sysfs_init);
+module_exit(fw_cfg_sysfs_exit);
index 8ae7ab68cb9781fd04f176136564aa841e16fd20..438e92d4c389cb762fefad5c400acde0a4f32c93 100644 (file)
@@ -106,6 +106,8 @@ config DRM_TDFX
          Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
          graphics card.  If M is selected, the module will be called tdfx.
 
+source "drivers/gpu/drm/arm/Kconfig"
+
 config DRM_R128
        tristate "ATI Rage 128"
        depends on DRM && PCI
index 61766dec6a8d3d7a4e9282b740161145818a51bc..f80fdbaeb64124482cb11a5fba18c81d570b1ae7 100644 (file)
@@ -33,6 +33,7 @@ CFLAGS_drm_trace_points.o := -I$(src)
 
 obj-$(CONFIG_DRM)      += drm.o
 obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
+obj-$(CONFIG_DRM_ARM)  += arm/
 obj-$(CONFIG_DRM_TTM)  += ttm/
 obj-$(CONFIG_DRM_TDFX) += tdfx/
 obj-$(CONFIG_DRM_R128) += r128/
index 66f729eaf00bb99f09ef5960daf8c7630ceccb96..20c9539abc36e887dea47d00982e308be412d93e 100644 (file)
@@ -25,7 +25,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
 
 # add asic specific block
-amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
+amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
        ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
        amdgpu_amdkfd_gfx_v7.o
 
@@ -34,6 +34,7 @@ amdgpu-y += \
 
 # add GMC block
 amdgpu-y += \
+       gmc_v7_0.o \
        gmc_v8_0.o
 
 # add IH block
index 313b0cc8d676d90d1f4073c28579f096afea1a74..82edf95b7740d7fe9070cba62009374c08c34562 100644 (file)
@@ -2278,60 +2278,60 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
 
 #define amdgpu_dpm_get_temperature(adev) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
-             (adev)->pm.funcs->get_temperature((adev))
+             (adev)->pm.funcs->get_temperature((adev)))
 
 #define amdgpu_dpm_set_fan_control_mode(adev, m) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
-             (adev)->pm.funcs->set_fan_control_mode((adev), (m))
+             (adev)->pm.funcs->set_fan_control_mode((adev), (m)))
 
 #define amdgpu_dpm_get_fan_control_mode(adev) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
-             (adev)->pm.funcs->get_fan_control_mode((adev))
+             (adev)->pm.funcs->get_fan_control_mode((adev)))
 
 #define amdgpu_dpm_set_fan_speed_percent(adev, s) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
-             (adev)->pm.funcs->set_fan_speed_percent((adev), (s))
+             (adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
 
 #define amdgpu_dpm_get_fan_speed_percent(adev, s) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
-             (adev)->pm.funcs->get_fan_speed_percent((adev), (s))
+             (adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
 
 #define amdgpu_dpm_get_sclk(adev, l) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
-               (adev)->pm.funcs->get_sclk((adev), (l))
+               (adev)->pm.funcs->get_sclk((adev), (l)))
 
 #define amdgpu_dpm_get_mclk(adev, l)  \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
-             (adev)->pm.funcs->get_mclk((adev), (l))
+             (adev)->pm.funcs->get_mclk((adev), (l)))
 
 
 #define amdgpu_dpm_force_performance_level(adev, l) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
-             (adev)->pm.funcs->force_performance_level((adev), (l))
+             (adev)->pm.funcs->force_performance_level((adev), (l)))
 
 #define amdgpu_dpm_powergate_uvd(adev, g) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
-             (adev)->pm.funcs->powergate_uvd((adev), (g))
+             (adev)->pm.funcs->powergate_uvd((adev), (g)))
 
 #define amdgpu_dpm_powergate_vce(adev, g) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
-             (adev)->pm.funcs->powergate_vce((adev), (g))
+             (adev)->pm.funcs->powergate_vce((adev), (g)))
 
 #define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
-       (adev)->pp_enabled ?                                            \
+       ((adev)->pp_enabled ?                                           \
              (adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
-             (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
+             (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m)))
 
 #define amdgpu_dpm_get_current_power_state(adev) \
        (adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
index 0e1376317683e4de30803a987450dba3e01b9621..362bedc9e50791ee6a9b1e60f4885a87ba158677 100644 (file)
@@ -154,7 +154,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .get_fw_version = get_fw_version
 };
 
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions()
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
 {
        return (struct kfd2kgd_calls *)&kfd2kgd;
 }
index 79fa5c7de856eab635ea9010ab0b8bfc446c37c2..04b744d64b57a6355f38ff071f48836148880257 100644 (file)
@@ -115,7 +115,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .get_fw_version = get_fw_version
 };
 
-struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions()
+struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
 {
        return (struct kfd2kgd_calls *)&kfd2kgd;
 }
index 3c895863fcf50e9d4cbe15cd23d51b10b8da6f3c..fa948dcbdd5df368789d4a67621c95dc109deee7 100644 (file)
@@ -552,13 +552,14 @@ static bool amdgpu_atpx_detect(void)
 void amdgpu_register_atpx_handler(void)
 {
        bool r;
+       enum vga_switcheroo_handler_flags_t handler_flags = 0;
 
        /* detect if we have any ATPX + 2 VGA in the system */
        r = amdgpu_atpx_detect();
        if (!r)
                return;
 
-       vga_switcheroo_register_handler(&amdgpu_atpx_handler);
+       vga_switcheroo_register_handler(&amdgpu_atpx_handler, handler_flags);
 }
 
 /**
index 6f89f8e034d0eb05b96378581f8dfa7dee0fb319..b882e8175615fba482ac9c8b8f29561e7549dc18 100644 (file)
@@ -478,9 +478,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
        struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
        unsigned i;
 
-       amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
-
        if (!error) {
+               amdgpu_vm_move_pt_bos_in_lru(parser->adev, &fpriv->vm);
+
                /* Sort the buffer list from the smallest to largest buffer,
                 * which affects the order of buffers in the LRU list.
                 * This assures that the smallest buffers are added first
index b5dbbb57349190ce4b28409547781c515283994f..9c1af8976befea20cf371cc4a15f226dbe434d75 100644 (file)
@@ -256,11 +256,11 @@ static struct pci_device_id pciidlist[] = {
        {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
 #endif
        /* topaz */
-       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
-       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
+       {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
+       {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
        /* tonga */
        {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
        {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
index cfb6caad2a73a4264ab9386f1c6616cf75ef0c77..919146780a15a91434f7383e666eedabdf402ca0 100644 (file)
@@ -333,6 +333,10 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
        if (!adev->mode_info.mode_config_initialized)
                return 0;
 
+       /* don't init fbdev if there are no connectors */
+       if (list_empty(&adev->ddev->mode_config.connector_list))
+               return 0;
+
        /* select 8 bpp console on low vram cards */
        if (adev->mc.real_vram_size <= (32*1024*1024))
                bpp_sel = 8;
index c3ce103b6a33b2ef3a0a6f7f14877e18150ca405..b8fbbd7699e4586e13e57905b0cef818f6e43e6b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/amdgpu_drm.h>
+#include <drm/drm_cache.h>
 #include "amdgpu.h"
 #include "amdgpu_trace.h"
 
@@ -261,6 +262,13 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
                                       AMDGPU_GEM_DOMAIN_OA);
 
        bo->flags = flags;
+
+       /* For architectures that don't support WC memory,
+        * mask out the WC flag from the BO
+        */
+       if (!drm_arch_can_wc_memory())
+               bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+
        amdgpu_fill_placement_to_bo(bo, placement);
        /* Kernel allocation are uninterruptible */
        r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
@@ -399,7 +407,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                }
                if (fpfn > bo->placements[i].fpfn)
                        bo->placements[i].fpfn = fpfn;
-               if (lpfn && lpfn < bo->placements[i].lpfn)
+               if (!bo->placements[i].lpfn ||
+                   (lpfn && lpfn < bo->placements[i].lpfn))
                        bo->placements[i].lpfn = lpfn;
                bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
        }
index 5ee9a069027885989f8aad55e9340e1a9d3424ea..b9d0d55f6b47df33ab7ebf7024fe7ca6e821892a 100644 (file)
@@ -99,13 +99,24 @@ static int amdgpu_pp_early_init(void *handle)
 
 #ifdef CONFIG_DRM_AMD_POWERPLAY
        switch (adev->asic_type) {
-               case CHIP_TONGA:
-               case CHIP_FIJI:
-                       adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
-                       break;
-               default:
-                       adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
-                       break;
+       case CHIP_TONGA:
+       case CHIP_FIJI:
+               adev->pp_enabled = (amdgpu_powerplay == 0) ? false : true;
+               break;
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
+               adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
+               break;
+       /* These chips don't have powerplay implemenations */
+       case CHIP_BONAIRE:
+       case CHIP_HAWAII:
+       case CHIP_KABINI:
+       case CHIP_MULLINS:
+       case CHIP_KAVERI:
+       case CHIP_TOPAZ:
+       default:
+               adev->pp_enabled = false;
+               break;
        }
 #else
        adev->pp_enabled = false;
index 78e9b0f14661a0d0f404f67df3789b052dcd9133..d1f234dd21261e06764b9e5b623d11ff08f20fe4 100644 (file)
@@ -487,7 +487,7 @@ static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data)
        seq_printf(m, "rptr: 0x%08x [%5d]\n",
                   rptr, rptr);
 
-       rptr_next = ~0;
+       rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr);
 
        seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n",
                   ring->wptr, ring->wptr);
index 8a1752ff3d8e55a1d8b8ab3061f5c9c425058d0f..55cf05e1c81c398fc747f598d1ce0bc78a38a435 100644 (file)
@@ -808,7 +808,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
                        flags |= AMDGPU_PTE_SNOOPED;
        }
 
-       if (adev->asic_type >= CHIP_TOPAZ)
+       if (adev->asic_type >= CHIP_TONGA)
                flags |= AMDGPU_PTE_EXECUTABLE;
 
        flags |= AMDGPU_PTE_READABLE;
index aefc668e6b5d79e65873e973275d5f0beb7b10d3..9599f7559b3dc86308d2c375739d0cd20ee60941 100644 (file)
@@ -1282,7 +1282,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 {
        const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
                AMDGPU_VM_PTE_COUNT * 8);
-       unsigned pd_size, pd_entries, pts_size;
+       unsigned pd_size, pd_entries;
        int i, r;
 
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
@@ -1300,8 +1300,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
        pd_entries = amdgpu_vm_num_pdes(adev);
 
        /* allocate page table array */
-       pts_size = pd_entries * sizeof(struct amdgpu_vm_pt);
-       vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
+       vm->page_tables = drm_calloc_large(pd_entries, sizeof(struct amdgpu_vm_pt));
        if (vm->page_tables == NULL) {
                DRM_ERROR("Cannot allocate memory for page table array\n");
                return -ENOMEM;
@@ -1361,7 +1360,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
 
        for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
                amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
-       kfree(vm->page_tables);
+       drm_free_large(vm->page_tables);
 
        amdgpu_bo_unref(&vm->page_directory);
        fence_put(vm->page_directory_fence);
index 72793f93e2fcc9989e5f936ec1d43ba0593ac69e..6c76139de1c9c5992579a7eb44d1f6f4eff9da9a 100644 (file)
@@ -4738,6 +4738,22 @@ static int gfx_v7_0_early_init(void *handle)
        return 0;
 }
 
+static int gfx_v7_0_late_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+       if (r)
+               return r;
+
+       return 0;
+}
+
 static int gfx_v7_0_sw_init(void *handle)
 {
        struct amdgpu_ring *ring;
@@ -4890,6 +4906,8 @@ static int gfx_v7_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+       amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
        gfx_v7_0_cp_enable(adev, false);
        gfx_v7_0_rlc_stop(adev);
        gfx_v7_0_fini_pg(adev);
@@ -5527,7 +5545,7 @@ static int gfx_v7_0_set_powergating_state(void *handle,
 
 const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
        .early_init = gfx_v7_0_early_init,
-       .late_init = NULL,
+       .late_init = gfx_v7_0_late_init,
        .sw_init = gfx_v7_0_sw_init,
        .sw_fini = gfx_v7_0_sw_fini,
        .hw_init = gfx_v7_0_hw_init,
index 13235d84e5a67d45ca52bf4de9536d1c22a26dc6..8f8ec37ecd883599b416773a0a6a579da6131515 100644 (file)
@@ -111,7 +111,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
 MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
 MODULE_FIRMWARE("amdgpu/topaz_me.bin");
 MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
-MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
 MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
 
 MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
@@ -828,7 +827,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
        adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
        adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
 
-       if (adev->asic_type != CHIP_STONEY) {
+       if ((adev->asic_type != CHIP_STONEY) &&
+           (adev->asic_type != CHIP_TOPAZ)) {
                snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
                err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
                if (!err) {
@@ -3851,10 +3851,16 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
                        if (r)
                                return -EINVAL;
 
-                       r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
-                                                       AMDGPU_UCODE_ID_CP_MEC1);
-                       if (r)
-                               return -EINVAL;
+                       if (adev->asic_type == CHIP_TOPAZ) {
+                               r = gfx_v8_0_cp_compute_load_microcode(adev);
+                               if (r)
+                                       return r;
+                       } else {
+                               r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
+                                                                                AMDGPU_UCODE_ID_CP_MEC1);
+                               if (r)
+                                       return -EINVAL;
+                       }
                }
        }
 
@@ -3901,6 +3907,8 @@ static int gfx_v8_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
+       amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
        gfx_v8_0_cp_enable(adev, false);
        gfx_v8_0_rlc_stop(adev);
        gfx_v8_0_cp_compute_fini(adev);
@@ -4186,7 +4194,18 @@ static int gfx_v8_0_soft_reset(void *handle)
                gfx_v8_0_cp_gfx_enable(adev, false);
 
                /* Disable MEC parsing/prefetching */
-               /* XXX todo */
+               gfx_v8_0_cp_compute_enable(adev, false);
+
+               if (grbm_soft_reset || srbm_soft_reset) {
+                       tmp = RREG32(mmGMCON_DEBUG);
+                       tmp = REG_SET_FIELD(tmp,
+                                           GMCON_DEBUG, GFX_STALL, 1);
+                       tmp = REG_SET_FIELD(tmp,
+                                           GMCON_DEBUG, GFX_CLEAR, 1);
+                       WREG32(mmGMCON_DEBUG, tmp);
+
+                       udelay(50);
+               }
 
                if (grbm_soft_reset) {
                        tmp = RREG32(mmGRBM_SOFT_RESET);
@@ -4215,6 +4234,16 @@ static int gfx_v8_0_soft_reset(void *handle)
                        WREG32(mmSRBM_SOFT_RESET, tmp);
                        tmp = RREG32(mmSRBM_SOFT_RESET);
                }
+
+               if (grbm_soft_reset || srbm_soft_reset) {
+                       tmp = RREG32(mmGMCON_DEBUG);
+                       tmp = REG_SET_FIELD(tmp,
+                                           GMCON_DEBUG, GFX_STALL, 0);
+                       tmp = REG_SET_FIELD(tmp,
+                                           GMCON_DEBUG, GFX_CLEAR, 0);
+                       WREG32(mmGMCON_DEBUG, tmp);
+               }
+
                /* Wait a little for things to settle down */
                udelay(50);
                gfx_v8_0_print_status((void *)adev);
@@ -4308,6 +4337,14 @@ static int gfx_v8_0_late_init(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int r;
 
+       r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
+       if (r)
+               return r;
+
+       r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
+       if (r)
+               return r;
+
        /* requires IBs so do in late init after IB pool is initialized */
        r = gfx_v8_0_do_edc_gpr_workarounds(adev);
        if (r)
index 3f956065d069125df0200f78527437134bf7f6d1..8aa2991ab379af7c6d981c1e01734f6b83da3692 100644 (file)
@@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 
 MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
+MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
+
+static const u32 golden_settings_iceland_a11[] =
+{
+       mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
+       mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
+};
+
+static const u32 iceland_mgcg_cgcg_init[] =
+{
+       mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
+};
+
+static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_TOPAZ:
+               amdgpu_program_register_sequence(adev,
+                                                iceland_mgcg_cgcg_init,
+                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
+               amdgpu_program_register_sequence(adev,
+                                                golden_settings_iceland_a11,
+                                                (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
+               break;
+       default:
+               break;
+       }
+}
 
 /**
- * gmc8_mc_wait_for_idle - wait for MC idle callback.
+ * gmc7_mc_wait_for_idle - wait for MC idle callback.
  *
  * @adev: amdgpu_device pointer
  *
@@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
        case CHIP_HAWAII:
                chip_name = "hawaii";
                break;
+       case CHIP_TOPAZ:
+               chip_name = "topaz";
+               break;
        case CHIP_KAVERI:
        case CHIP_KABINI:
                return 0;
        default: BUG();
        }
 
-       snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+       if (adev->asic_type == CHIP_TOPAZ)
+               snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
+       else
+               snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+
        err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
        if (err)
                goto out;
@@ -984,6 +1021,8 @@ static int gmc_v7_0_hw_init(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       gmc_v7_0_init_golden_registers(adev);
+
        gmc_v7_0_mc_program(adev);
 
        if (!(adev->flags & AMD_IS_APU)) {
index c0c9a0101eb453c3139b9796ac8449350f0fb4bd..3efd45546241afc65e5d7db1fbc541fda946857b 100644 (file)
@@ -42,9 +42,7 @@
 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
 
-MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
-MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
 
 static const u32 golden_settings_tonga_a11[] =
 {
@@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
 };
 
-static const u32 golden_settings_iceland_a11[] =
-{
-       mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-       mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-       mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
-       mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
-};
-
-static const u32 iceland_mgcg_cgcg_init[] =
-{
-       mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
-};
-
 static const u32 cz_mgcg_cgcg_init[] =
 {
        mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
@@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
 static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
 {
        switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-               amdgpu_program_register_sequence(adev,
-                                                iceland_mgcg_cgcg_init,
-                                                (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
-               amdgpu_program_register_sequence(adev,
-                                                golden_settings_iceland_a11,
-                                                (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
-               break;
        case CHIP_FIJI:
                amdgpu_program_register_sequence(adev,
                                                 fiji_mgcg_cgcg_init,
@@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
-       case CHIP_TOPAZ:
-               chip_name = "topaz";
-               break;
        case CHIP_TONGA:
                chip_name = "tonga";
                break;
        case CHIP_FIJI:
-               chip_name = "fiji";
-               break;
        case CHIP_CARRIZO:
        case CHIP_STONEY:
                return 0;
@@ -1007,7 +979,7 @@ static int gmc_v8_0_hw_init(void *handle)
 
        gmc_v8_0_mc_program(adev);
 
-       if (!(adev->flags & AMD_IS_APU)) {
+       if (adev->asic_type == CHIP_TONGA) {
                r = gmc_v8_0_mc_load_microcode(adev);
                if (r) {
                        DRM_ERROR("Failed to load MC firmware!\n");
index 966d4b2ed9dad0527a2d0246b23731f1d802d9e2..090486c182497ba8de2aaa7840242be9cf4750aa 100644 (file)
@@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
                case AMDGPU_UCODE_ID_CP_ME:
                        return UCODE_ID_CP_ME_MASK;
                case AMDGPU_UCODE_ID_CP_MEC1:
-                       return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
+                       return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
                case AMDGPU_UCODE_ID_CP_MEC2:
                        return UCODE_ID_CP_MEC_MASK;
                case AMDGPU_UCODE_ID_RLC_G:
@@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
-       if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
-                       &toc->entry[toc->num_entries++])) {
-               DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
-               return -EINVAL;
-       }
-
        if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
                        &toc->entry[toc->num_entries++])) {
                DRM_ERROR("Failed to get firmware entry for SDMA0\n");
@@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
                        UCODE_ID_CP_ME_MASK |
                        UCODE_ID_CP_PFP_MASK |
                        UCODE_ID_CP_MEC_MASK |
-                       UCODE_ID_CP_MEC_JT1_MASK |
-                       UCODE_ID_CP_MEC_JT2_MASK;
+                       UCODE_ID_CP_MEC_JT1_MASK;
+
 
        if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
                DRM_ERROR("Fail to request SMU load ucode\n");
index f4a1346525febf9c9a463e32525c635f70873a93..0497784b36523086a029bb5a08cd977c9a9f6372 100644 (file)
@@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
 
 static int tonga_dpm_suspend(void *handle)
 {
-       return 0;
+       return tonga_dpm_hw_fini(handle);
 }
 
 static int tonga_dpm_resume(void *handle)
 {
-       int ret;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       mutex_lock(&adev->pm.mutex);
-
-       ret = tonga_smu_start(adev);
-       if (ret) {
-               DRM_ERROR("SMU start failed\n");
-               goto fail;
-       }
-
-fail:
-       mutex_unlock(&adev->pm.mutex);
-       return ret;
+       return tonga_dpm_hw_init(handle);
 }
 
 static int tonga_dpm_set_clockgating_state(void *handle,
index 652e76644c31cab37fee1964797d890749fbf0a7..89f5a1ff6f43aba335945d4f7a2643c0310623f8 100644 (file)
@@ -61,6 +61,7 @@
 #include "vi.h"
 #include "vi_dpm.h"
 #include "gmc_v8_0.h"
+#include "gmc_v7_0.h"
 #include "gfx_v8_0.h"
 #include "sdma_v2_4.h"
 #include "sdma_v3_0.h"
@@ -1109,10 +1110,10 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
        },
        {
                .type = AMD_IP_BLOCK_TYPE_GMC,
-               .major = 8,
-               .minor = 0,
+               .major = 7,
+               .minor = 4,
                .rev = 0,
-               .funcs = &gmc_v8_0_ip_funcs,
+               .funcs = &gmc_v7_0_ip_funcs,
        },
        {
                .type = AMD_IP_BLOCK_TYPE_IH,
@@ -1442,8 +1443,7 @@ static int vi_common_early_init(void *handle)
                break;
        case CHIP_FIJI:
                adev->has_uvd = true;
-               adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG |
-                               AMDGPU_CG_SUPPORT_VCE_MGCG;
+               adev->cg_flags = 0;
                adev->pg_flags = 0;
                adev->external_rev_id = adev->rev_id + 0x3c;
                break;
index 9be007081b72a8b5c75b4baf1fe01e92bfe792bc..a902ae037398389cf6e19c1ab7379ca64e6a46ed 100644 (file)
@@ -194,7 +194,7 @@ static void kfd_process_wq_release(struct work_struct *work)
 
        kfree(p);
 
-       kfree((void *)work);
+       kfree(work);
 }
 
 static void kfd_process_destroy_delayed(struct rcu_head *rcu)
index 8f5d5edcf193b67a67f6990824f8ef8260e1afa5..aa67244a77ae38cf69761bc964c5f1d4400874c5 100644 (file)
@@ -64,6 +64,11 @@ static int pp_sw_init(void *handle)
        if (ret == 0)
                ret = hwmgr->hwmgr_func->backend_init(hwmgr);
 
+       if (ret)
+               printk("amdgpu: powerplay initialization failed\n");
+       else
+               printk("amdgpu: powerplay initialized\n");
+
        return ret;
 }
 
index 873a8d264d5c6aa7af31322fd5818dc1f46c7317..ec222c665602df7113cb4ec75b3fc2633d86a8a4 100644 (file)
@@ -272,6 +272,9 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
                                UCODE_ID_CP_MEC_JT1_MASK |
                                UCODE_ID_CP_MEC_JT2_MASK;
 
+       if (smumgr->chip_id == CHIP_STONEY)
+               fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
+
        cz_request_smu_load_fw(smumgr);
        cz_check_fw_load_finish(smumgr, fw_to_check);
 
@@ -282,7 +285,7 @@ static int cz_start_smu(struct pp_smumgr *smumgr)
        return ret;
 }
 
-static uint8_t cz_translate_firmware_enum_to_arg(
+static uint8_t cz_translate_firmware_enum_to_arg(struct pp_smumgr *smumgr,
                        enum cz_scratch_entry firmware_enum)
 {
        uint8_t ret = 0;
@@ -292,7 +295,10 @@ static uint8_t cz_translate_firmware_enum_to_arg(
                ret = UCODE_ID_SDMA0;
                break;
        case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
-               ret = UCODE_ID_SDMA1;
+               if (smumgr->chip_id == CHIP_STONEY)
+                       ret = UCODE_ID_SDMA0;
+               else
+                       ret = UCODE_ID_SDMA1;
                break;
        case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
                ret = UCODE_ID_CP_CE;
@@ -307,7 +313,10 @@ static uint8_t cz_translate_firmware_enum_to_arg(
                ret = UCODE_ID_CP_MEC_JT1;
                break;
        case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
-               ret = UCODE_ID_CP_MEC_JT2;
+               if (smumgr->chip_id == CHIP_STONEY)
+                       ret = UCODE_ID_CP_MEC_JT1;
+               else
+                       ret = UCODE_ID_CP_MEC_JT2;
                break;
        case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
                ret = UCODE_ID_GMCON_RENG;
@@ -396,7 +405,7 @@ static int cz_smu_populate_single_scratch_task(
        struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
 
        task->type = type;
-       task->arg = cz_translate_firmware_enum_to_arg(fw_enum);
+       task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
        task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
 
        for (i = 0; i < cz_smu->scratch_buffer_length; i++)
@@ -433,7 +442,7 @@ static int cz_smu_populate_single_ucode_load_task(
        struct SMU_Task *task = &toc->tasks[cz_smu->toc_entry_used_count++];
 
        task->type = TASK_TYPE_UCODE_LOAD;
-       task->arg = cz_translate_firmware_enum_to_arg(fw_enum);
+       task->arg = cz_translate_firmware_enum_to_arg(smumgr, fw_enum);
        task->next = is_last ? END_OF_TASK_LIST : cz_smu->toc_entry_used_count;
 
        for (i = 0; i < cz_smu->driver_buffer_length; i++)
@@ -509,8 +518,14 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct pp_smumgr *smumgr)
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-       cz_smu_populate_single_ucode_load_task(smumgr,
+
+       if (smumgr->chip_id == CHIP_STONEY)
+               cz_smu_populate_single_ucode_load_task(smumgr,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+       else
+               cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
+
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
 
@@ -551,7 +566,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
 
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
-       cz_smu_populate_single_ucode_load_task(smumgr,
+       if (smumgr->chip_id == CHIP_STONEY)
+               cz_smu_populate_single_ucode_load_task(smumgr,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
+       else
+               cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
@@ -561,7 +580,11 @@ static int cz_smu_construct_toc_for_bootup(struct pp_smumgr *smumgr)
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
-       cz_smu_populate_single_ucode_load_task(smumgr,
+       if (smumgr->chip_id == CHIP_STONEY)
+               cz_smu_populate_single_ucode_load_task(smumgr,
+                               CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
+       else
+               cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
        cz_smu_populate_single_ucode_load_task(smumgr,
                                CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
@@ -618,7 +641,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
 
        for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) {
 
-               firmware_type = cz_translate_firmware_enum_to_arg(
+               firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
                                        firmware_list[i]);
 
                ucode_id = cz_convert_fw_type_to_cgs(firmware_type);
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
new file mode 100644 (file)
index 0000000..eaed454
--- /dev/null
@@ -0,0 +1,27 @@
+config DRM_ARM
+       bool
+       help
+         Choose this option to select drivers for ARM's devices
+
+config DRM_HDLCD
+       tristate "ARM HDLCD"
+       depends on DRM && OF && (ARM || ARM64)
+       depends on COMMON_CLK
+       select DRM_ARM
+       select DRM_KMS_HELPER
+       select DRM_KMS_FB_HELPER
+       select DRM_KMS_CMA_HELPER
+       help
+         Choose this option if you have an ARM High Definition Colour LCD
+         controller.
+
+         If M is selected the module will be called hdlcd.
+
+config DRM_HDLCD_SHOW_UNDERRUN
+       bool "Show underrun conditions"
+       depends on DRM_HDLCD
+       default n
+       help
+         Enable this option to show in red colour the pixels that the
+         HDLCD device did not fetch from framebuffer due to underrun
+         conditions.
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
new file mode 100644 (file)
index 0000000..89dcb7b
--- /dev/null
@@ -0,0 +1,2 @@
+hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
+obj-$(CONFIG_DRM_HDLCD)        += hdlcd.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
new file mode 100644 (file)
index 0000000..fef1b04
--- /dev/null
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ *  Implementation of a CRTC class for the HDLCD driver.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_plane_helper.h>
+#include <linux/clk.h>
+#include <linux/of_graph.h>
+#include <linux/platform_data/simplefb.h>
+#include <video/videomode.h>
+
+#include "hdlcd_drv.h"
+#include "hdlcd_regs.h"
+
+/*
+ * The HDLCD controller is a dumb RGB streamer that gets connected to
+ * a single HDMI transmitter or in the case of the ARM Models it gets
+ * emulated by the software that does the actual rendering.
+ *
+ */
+
+static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
+       .destroy = drm_crtc_cleanup,
+       .set_config = drm_atomic_helper_set_config,
+       .page_flip = drm_atomic_helper_page_flip,
+       .reset = drm_atomic_helper_crtc_reset,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS;
+
+/*
+ * Setup the HDLCD registers for decoding the pixels out of the framebuffer
+ */
+static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
+{
+       unsigned int btpp;
+       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+       uint32_t pixel_format;
+       struct simplefb_format *format = NULL;
+       int i;
+
+       pixel_format = crtc->primary->state->fb->pixel_format;
+
+       for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
+               if (supported_formats[i].fourcc == pixel_format)
+                       format = &supported_formats[i];
+       }
+
+       if (WARN_ON(!format))
+               return 0;
+
+       /* HDLCD uses 'bytes per pixel', zero means 1 byte */
+       btpp = (format->bits_per_pixel + 7) / 8;
+       hdlcd_write(hdlcd, HDLCD_REG_PIXEL_FORMAT, (btpp - 1) << 3);
+
+       /*
+        * The format of the HDLCD_REG_<color>_SELECT register is:
+        *   - bits[23:16] - default value for that color component
+        *   - bits[11:8]  - number of bits to extract for each color component
+        *   - bits[4:0]   - index of the lowest bit to extract
+        *
+        * The default color value is used when bits[11:8] are zero, when the
+        * pixel is outside the visible frame area or when there is a
+        * buffer underrun.
+        */
+       hdlcd_write(hdlcd, HDLCD_REG_RED_SELECT, format->red.offset |
+#ifdef CONFIG_DRM_HDLCD_SHOW_UNDERRUN
+                   0x00ff0000 |        /* show underruns in red */
+#endif
+                   ((format->red.length & 0xf) << 8));
+       hdlcd_write(hdlcd, HDLCD_REG_GREEN_SELECT, format->green.offset |
+                   ((format->green.length & 0xf) << 8));
+       hdlcd_write(hdlcd, HDLCD_REG_BLUE_SELECT, format->blue.offset |
+                   ((format->blue.length & 0xf) << 8));
+
+       return 0;
+}
+
+static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
+{
+       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+       struct drm_display_mode *m = &crtc->state->adjusted_mode;
+       struct videomode vm;
+       unsigned int polarities, line_length, err;
+
+       vm.vfront_porch = m->crtc_vsync_start - m->crtc_vdisplay;
+       vm.vback_porch = m->crtc_vtotal - m->crtc_vsync_end;
+       vm.vsync_len = m->crtc_vsync_end - m->crtc_vsync_start;
+       vm.hfront_porch = m->crtc_hsync_start - m->crtc_hdisplay;
+       vm.hback_porch = m->crtc_htotal - m->crtc_hsync_end;
+       vm.hsync_len = m->crtc_hsync_end - m->crtc_hsync_start;
+
+       polarities = HDLCD_POLARITY_DATAEN | HDLCD_POLARITY_DATA;
+
+       if (m->flags & DRM_MODE_FLAG_PHSYNC)
+               polarities |= HDLCD_POLARITY_HSYNC;
+       if (m->flags & DRM_MODE_FLAG_PVSYNC)
+               polarities |= HDLCD_POLARITY_VSYNC;
+
+       line_length = crtc->primary->state->fb->pitches[0];
+
+       /* Allow max number of outstanding requests and largest burst size */
+       hdlcd_write(hdlcd, HDLCD_REG_BUS_OPTIONS,
+                   HDLCD_BUS_MAX_OUTSTAND | HDLCD_BUS_BURST_16);
+
+       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, line_length);
+       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, line_length);
+       hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, m->crtc_vdisplay - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_V_DATA, m->crtc_vdisplay - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_V_BACK_PORCH, vm.vback_porch - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_V_FRONT_PORCH, vm.vfront_porch - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_V_SYNC, vm.vsync_len - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_H_BACK_PORCH, vm.hback_porch - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_H_FRONT_PORCH, vm.hfront_porch - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_H_SYNC, vm.hsync_len - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_H_DATA, m->crtc_hdisplay - 1);
+       hdlcd_write(hdlcd, HDLCD_REG_POLARITIES, polarities);
+
+       err = hdlcd_set_pxl_fmt(crtc);
+       if (err)
+               return;
+
+       clk_set_rate(hdlcd->clk, m->crtc_clock * 1000);
+}
+
+static void hdlcd_crtc_enable(struct drm_crtc *crtc)
+{
+       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+       clk_prepare_enable(hdlcd->clk);
+       hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
+       drm_crtc_vblank_on(crtc);
+}
+
+static void hdlcd_crtc_disable(struct drm_crtc *crtc)
+{
+       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+
+       if (!crtc->primary->fb)
+               return;
+
+       clk_disable_unprepare(hdlcd->clk);
+       hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
+       drm_crtc_vblank_off(crtc);
+}
+
+static int hdlcd_crtc_atomic_check(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *state)
+{
+       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+       struct drm_display_mode *mode = &state->adjusted_mode;
+       long rate, clk_rate = mode->clock * 1000;
+
+       rate = clk_round_rate(hdlcd->clk, clk_rate);
+       if (rate != clk_rate) {
+               /* clock required by mode not supported by hardware */
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *state)
+{
+       struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
+       unsigned long flags;
+
+       if (crtc->state->event) {
+               struct drm_pending_vblank_event *event = crtc->state->event;
+
+               crtc->state->event = NULL;
+               event->pipe = drm_crtc_index(crtc);
+
+               WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+               spin_lock_irqsave(&crtc->dev->event_lock, flags);
+               list_add_tail(&event->base.link, &hdlcd->event_list);
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+       }
+}
+
+static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *state)
+{
+}
+
+static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
+                       const struct drm_display_mode *mode,
+                       struct drm_display_mode *adjusted_mode)
+{
+       return true;
+}
+
+static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
+       .mode_fixup     = hdlcd_crtc_mode_fixup,
+       .mode_set       = drm_helper_crtc_mode_set,
+       .mode_set_base  = drm_helper_crtc_mode_set_base,
+       .mode_set_nofb  = hdlcd_crtc_mode_set_nofb,
+       .enable         = hdlcd_crtc_enable,
+       .disable        = hdlcd_crtc_disable,
+       .prepare        = hdlcd_crtc_disable,
+       .commit         = hdlcd_crtc_enable,
+       .atomic_check   = hdlcd_crtc_atomic_check,
+       .atomic_begin   = hdlcd_crtc_atomic_begin,
+       .atomic_flush   = hdlcd_crtc_atomic_flush,
+};
+
+static int hdlcd_plane_atomic_check(struct drm_plane *plane,
+                                   struct drm_plane_state *state)
+{
+       return 0;
+}
+
+static void hdlcd_plane_atomic_update(struct drm_plane *plane,
+                                     struct drm_plane_state *state)
+{
+       struct hdlcd_drm_private *hdlcd;
+       struct drm_gem_cma_object *gem;
+       dma_addr_t scanout_start;
+
+       if (!plane->state->crtc || !plane->state->fb)
+               return;
+
+       hdlcd = crtc_to_hdlcd_priv(plane->state->crtc);
+       gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
+       scanout_start = gem->paddr;
+       hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start);
+}
+
+static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
+       .prepare_fb = NULL,
+       .cleanup_fb = NULL,
+       .atomic_check = hdlcd_plane_atomic_check,
+       .atomic_update = hdlcd_plane_atomic_update,
+};
+
+static void hdlcd_plane_destroy(struct drm_plane *plane)
+{
+       drm_plane_helper_disable(plane);
+       drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs hdlcd_plane_funcs = {
+       .update_plane           = drm_atomic_helper_update_plane,
+       .disable_plane          = drm_atomic_helper_disable_plane,
+       .destroy                = hdlcd_plane_destroy,
+       .reset                  = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
+};
+
+static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
+{
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       struct drm_plane *plane = NULL;
+       u32 formats[ARRAY_SIZE(supported_formats)], i;
+       int ret;
+
+       plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
+       if (!plane)
+               return ERR_PTR(-ENOMEM);
+
+       for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
+               formats[i] = supported_formats[i].fourcc;
+
+       ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs,
+                                      formats, ARRAY_SIZE(formats),
+                                      DRM_PLANE_TYPE_PRIMARY, NULL);
+       if (ret) {
+               devm_kfree(drm->dev, plane);
+               return ERR_PTR(ret);
+       }
+
+       drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
+       hdlcd->plane = plane;
+
+       return plane;
+}
+
+void hdlcd_crtc_suspend(struct drm_crtc *crtc)
+{
+       hdlcd_crtc_disable(crtc);
+}
+
+void hdlcd_crtc_resume(struct drm_crtc *crtc)
+{
+       hdlcd_crtc_enable(crtc);
+}
+
+int hdlcd_setup_crtc(struct drm_device *drm)
+{
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       struct drm_plane *primary;
+       int ret;
+
+       primary = hdlcd_plane_init(drm);
+       if (IS_ERR(primary))
+               return PTR_ERR(primary);
+
+       ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL,
+                                       &hdlcd_crtc_funcs, NULL);
+       if (ret) {
+               hdlcd_plane_destroy(primary);
+               devm_kfree(drm->dev, primary);
+               return ret;
+       }
+
+       drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs);
+       return 0;
+}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
new file mode 100644 (file)
index 0000000..56b829f
--- /dev/null
@@ -0,0 +1,550 @@
+/*
+ * Copyright (C) 2013-2015 ARM Limited
+ * Author: Liviu Dudau <Liviu.Dudau@arm.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ *  ARM HDLCD Driver
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/list.h>
+#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_of.h>
+
+#include "hdlcd_drv.h"
+#include "hdlcd_regs.h"
+
+static int hdlcd_load(struct drm_device *drm, unsigned long flags)
+{
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       struct platform_device *pdev = to_platform_device(drm->dev);
+       struct resource *res;
+       u32 version;
+       int ret;
+
+       hdlcd->clk = devm_clk_get(drm->dev, "pxlclk");
+       if (IS_ERR(hdlcd->clk))
+               return PTR_ERR(hdlcd->clk);
+
+#ifdef CONFIG_DEBUG_FS
+       atomic_set(&hdlcd->buffer_underrun_count, 0);
+       atomic_set(&hdlcd->bus_error_count, 0);
+       atomic_set(&hdlcd->vsync_count, 0);
+       atomic_set(&hdlcd->dma_end_count, 0);
+#endif
+
+       INIT_LIST_HEAD(&hdlcd->event_list);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       hdlcd->mmio = devm_ioremap_resource(drm->dev, res);
+       if (IS_ERR(hdlcd->mmio)) {
+               DRM_ERROR("failed to map control registers area\n");
+               ret = PTR_ERR(hdlcd->mmio);
+               hdlcd->mmio = NULL;
+               goto fail;
+       }
+
+       version = hdlcd_read(hdlcd, HDLCD_REG_VERSION);
+       if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) {
+               DRM_ERROR("unknown product id: 0x%x\n", version);
+               ret = -EINVAL;
+               goto fail;
+       }
+       DRM_INFO("found ARM HDLCD version r%dp%d\n",
+               (version & HDLCD_VERSION_MAJOR_MASK) >> 8,
+               version & HDLCD_VERSION_MINOR_MASK);
+
+       /* Get the optional framebuffer memory resource */
+       ret = of_reserved_mem_device_init(drm->dev);
+       if (ret && ret != -ENODEV)
+               goto fail;
+
+       ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+       if (ret)
+               goto setup_fail;
+
+       ret = hdlcd_setup_crtc(drm);
+       if (ret < 0) {
+               DRM_ERROR("failed to create crtc\n");
+               goto setup_fail;
+       }
+
+       pm_runtime_enable(drm->dev);
+
+       pm_runtime_get_sync(drm->dev);
+       ret = drm_irq_install(drm, platform_get_irq(pdev, 0));
+       pm_runtime_put_sync(drm->dev);
+       if (ret < 0) {
+               DRM_ERROR("failed to install IRQ handler\n");
+               goto irq_fail;
+       }
+
+       return 0;
+
+irq_fail:
+       drm_crtc_cleanup(&hdlcd->crtc);
+setup_fail:
+       of_reserved_mem_device_release(drm->dev);
+fail:
+       devm_clk_put(drm->dev, hdlcd->clk);
+
+       return ret;
+}
+
+static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
+{
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+       if (hdlcd->fbdev)
+               drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
+}
+
+static int hdlcd_atomic_commit(struct drm_device *dev,
+                              struct drm_atomic_state *state, bool async)
+{
+       return drm_atomic_helper_commit(dev, state, false);
+}
+
+static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
+       .fb_create = drm_fb_cma_create,
+       .output_poll_changed = hdlcd_fb_output_poll_changed,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = hdlcd_atomic_commit,
+};
+
+static void hdlcd_setup_mode_config(struct drm_device *drm)
+{
+       drm_mode_config_init(drm);
+       drm->mode_config.min_width = 0;
+       drm->mode_config.min_height = 0;
+       drm->mode_config.max_width = HDLCD_MAX_XRES;
+       drm->mode_config.max_height = HDLCD_MAX_YRES;
+       drm->mode_config.funcs = &hdlcd_mode_config_funcs;
+}
+
+static void hdlcd_lastclose(struct drm_device *drm)
+{
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+       drm_fbdev_cma_restore_mode(hdlcd->fbdev);
+}
+
+static irqreturn_t hdlcd_irq(int irq, void *arg)
+{
+       struct drm_device *drm = arg;
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       unsigned long irq_status;
+
+       irq_status = hdlcd_read(hdlcd, HDLCD_REG_INT_STATUS);
+
+#ifdef CONFIG_DEBUG_FS
+       if (irq_status & HDLCD_INTERRUPT_UNDERRUN)
+               atomic_inc(&hdlcd->buffer_underrun_count);
+
+       if (irq_status & HDLCD_INTERRUPT_DMA_END)
+               atomic_inc(&hdlcd->dma_end_count);
+
+       if (irq_status & HDLCD_INTERRUPT_BUS_ERROR)
+               atomic_inc(&hdlcd->bus_error_count);
+
+       if (irq_status & HDLCD_INTERRUPT_VSYNC)
+               atomic_inc(&hdlcd->vsync_count);
+
+#endif
+       if (irq_status & HDLCD_INTERRUPT_VSYNC) {
+               bool events_sent = false;
+               unsigned long flags;
+               struct drm_pending_vblank_event *e, *t;
+
+               drm_crtc_handle_vblank(&hdlcd->crtc);
+
+               spin_lock_irqsave(&drm->event_lock, flags);
+               list_for_each_entry_safe(e, t, &hdlcd->event_list, base.link) {
+                       list_del(&e->base.link);
+                       drm_crtc_send_vblank_event(&hdlcd->crtc, e);
+                       events_sent = true;
+               }
+               if (events_sent)
+                       drm_crtc_vblank_put(&hdlcd->crtc);
+               spin_unlock_irqrestore(&drm->event_lock, flags);
+       }
+
+       /* acknowledge interrupt(s) */
+       hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, irq_status);
+
+       return IRQ_HANDLED;
+}
+
+static void hdlcd_irq_preinstall(struct drm_device *drm)
+{
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       /* Ensure interrupts are disabled */
+       hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, 0);
+       hdlcd_write(hdlcd, HDLCD_REG_INT_CLEAR, ~0);
+}
+
+static int hdlcd_irq_postinstall(struct drm_device *drm)
+{
+#ifdef CONFIG_DEBUG_FS
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+       /* enable debug interrupts */
+       irq_mask |= HDLCD_DEBUG_INT_MASK;
+
+       hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
+#endif
+       return 0;
+}
+
+static void hdlcd_irq_uninstall(struct drm_device *drm)
+{
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       /* disable all the interrupts that we might have enabled */
+       unsigned long irq_mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+#ifdef CONFIG_DEBUG_FS
+       /* disable debug interrupts */
+       irq_mask &= ~HDLCD_DEBUG_INT_MASK;
+#endif
+
+       /* disable vsync interrupts */
+       irq_mask &= ~HDLCD_INTERRUPT_VSYNC;
+
+       hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, irq_mask);
+}
+
+static int hdlcd_enable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+       hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask | HDLCD_INTERRUPT_VSYNC);
+
+       return 0;
+}
+
+static void hdlcd_disable_vblank(struct drm_device *drm, unsigned int crtc)
+{
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       unsigned int mask = hdlcd_read(hdlcd, HDLCD_REG_INT_MASK);
+
+       hdlcd_write(hdlcd, HDLCD_REG_INT_MASK, mask & ~HDLCD_INTERRUPT_VSYNC);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *drm = node->minor->dev;
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+       seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
+       seq_printf(m, "dma_end  : %d\n", atomic_read(&hdlcd->dma_end_count));
+       seq_printf(m, "bus_error: %d\n", atomic_read(&hdlcd->bus_error_count));
+       seq_printf(m, "vsync    : %d\n", atomic_read(&hdlcd->vsync_count));
+       return 0;
+}
+
+static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
+{
+       struct drm_info_node *node = (struct drm_info_node *)m->private;
+       struct drm_device *drm = node->minor->dev;
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+       unsigned long clkrate = clk_get_rate(hdlcd->clk);
+       unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
+
+       seq_printf(m, "hw  : %lu\n", clkrate);
+       seq_printf(m, "mode: %lu\n", mode_clock);
+       return 0;
+}
+
+static struct drm_info_list hdlcd_debugfs_list[] = {
+       { "interrupt_count", hdlcd_show_underrun_count, 0 },
+       { "clocks", hdlcd_show_pxlclock, 0 },
+};
+
+static int hdlcd_debugfs_init(struct drm_minor *minor)
+{
+       return drm_debugfs_create_files(hdlcd_debugfs_list,
+               ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor);
+}
+
+static void hdlcd_debugfs_cleanup(struct drm_minor *minor)
+{
+       drm_debugfs_remove_files(hdlcd_debugfs_list,
+               ARRAY_SIZE(hdlcd_debugfs_list), minor);
+}
+#endif
+
+static const struct file_operations fops = {
+       .owner          = THIS_MODULE,
+       .open           = drm_open,
+       .release        = drm_release,
+       .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = drm_compat_ioctl,
+#endif
+       .poll           = drm_poll,
+       .read           = drm_read,
+       .llseek         = noop_llseek,
+       .mmap           = drm_gem_cma_mmap,
+};
+
+static struct drm_driver hdlcd_driver = {
+       .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
+                          DRIVER_MODESET | DRIVER_PRIME |
+                          DRIVER_ATOMIC,
+       .lastclose = hdlcd_lastclose,
+       .irq_handler = hdlcd_irq,
+       .irq_preinstall = hdlcd_irq_preinstall,
+       .irq_postinstall = hdlcd_irq_postinstall,
+       .irq_uninstall = hdlcd_irq_uninstall,
+       .get_vblank_counter = drm_vblank_no_hw_counter,
+       .enable_vblank = hdlcd_enable_vblank,
+       .disable_vblank = hdlcd_disable_vblank,
+       .gem_free_object = drm_gem_cma_free_object,
+       .gem_vm_ops = &drm_gem_cma_vm_ops,
+       .dumb_create = drm_gem_cma_dumb_create,
+       .dumb_map_offset = drm_gem_cma_dumb_map_offset,
+       .dumb_destroy = drm_gem_dumb_destroy,
+       .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+       .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+       .gem_prime_export = drm_gem_prime_export,
+       .gem_prime_import = drm_gem_prime_import,
+       .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
+       .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+       .gem_prime_vmap = drm_gem_cma_prime_vmap,
+       .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
+       .gem_prime_mmap = drm_gem_cma_prime_mmap,
+#ifdef CONFIG_DEBUG_FS
+       .debugfs_init = hdlcd_debugfs_init,
+       .debugfs_cleanup = hdlcd_debugfs_cleanup,
+#endif
+       .fops = &fops,
+       .name = "hdlcd",
+       .desc = "ARM HDLCD Controller DRM",
+       .date = "20151021",
+       .major = 1,
+       .minor = 0,
+};
+
+static int hdlcd_drm_bind(struct device *dev)
+{
+       struct drm_device *drm;
+       struct hdlcd_drm_private *hdlcd;
+       int ret;
+
+       hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL);
+       if (!hdlcd)
+               return -ENOMEM;
+
+       drm = drm_dev_alloc(&hdlcd_driver, dev);
+       if (!drm)
+               return -ENOMEM;
+
+       drm->dev_private = hdlcd;
+       hdlcd_setup_mode_config(drm);
+       ret = hdlcd_load(drm, 0);
+       if (ret)
+               goto err_free;
+
+       ret = drm_dev_register(drm, 0);
+       if (ret)
+               goto err_unload;
+
+       dev_set_drvdata(dev, drm);
+
+       ret = component_bind_all(dev, drm);
+       if (ret) {
+               DRM_ERROR("Failed to bind all components\n");
+               goto err_unregister;
+       }
+
+       ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+       if (ret < 0) {
+               DRM_ERROR("failed to initialise vblank\n");
+               goto err_vblank;
+       }
+       drm->vblank_disable_allowed = true;
+
+       drm_mode_config_reset(drm);
+       drm_kms_helper_poll_init(drm);
+
+       hdlcd->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
+                                         drm->mode_config.num_connector);
+
+       if (IS_ERR(hdlcd->fbdev)) {
+               ret = PTR_ERR(hdlcd->fbdev);
+               hdlcd->fbdev = NULL;
+               goto err_fbdev;
+       }
+
+       return 0;
+
+err_fbdev:
+       drm_kms_helper_poll_fini(drm);
+       drm_mode_config_cleanup(drm);
+       drm_vblank_cleanup(drm);
+err_vblank:
+       component_unbind_all(dev, drm);
+err_unregister:
+       drm_dev_unregister(drm);
+err_unload:
+       pm_runtime_get_sync(drm->dev);
+       drm_irq_uninstall(drm);
+       pm_runtime_put_sync(drm->dev);
+       pm_runtime_disable(drm->dev);
+       of_reserved_mem_device_release(drm->dev);
+       devm_clk_put(dev, hdlcd->clk);
+err_free:
+       drm_dev_unref(drm);
+
+       return ret;
+}
+
+static void hdlcd_drm_unbind(struct device *dev)
+{
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct hdlcd_drm_private *hdlcd = drm->dev_private;
+
+       if (hdlcd->fbdev) {
+               drm_fbdev_cma_fini(hdlcd->fbdev);
+               hdlcd->fbdev = NULL;
+       }
+       drm_kms_helper_poll_fini(drm);
+       component_unbind_all(dev, drm);
+       drm_vblank_cleanup(drm);
+       pm_runtime_get_sync(drm->dev);
+       drm_irq_uninstall(drm);
+       pm_runtime_put_sync(drm->dev);
+       pm_runtime_disable(drm->dev);
+       of_reserved_mem_device_release(drm->dev);
+       if (!IS_ERR(hdlcd->clk)) {
+               devm_clk_put(drm->dev, hdlcd->clk);
+               hdlcd->clk = NULL;
+       }
+       drm_mode_config_cleanup(drm);
+       drm_dev_unregister(drm);
+       drm_dev_unref(drm);
+       drm->dev_private = NULL;
+       dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops hdlcd_master_ops = {
+       .bind           = hdlcd_drm_bind,
+       .unbind         = hdlcd_drm_unbind,
+};
+
+static int compare_dev(struct device *dev, void *data)
+{
+       return dev->of_node == data;
+}
+
+static int hdlcd_probe(struct platform_device *pdev)
+{
+       struct device_node *port, *ep;
+       struct component_match *match = NULL;
+
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
+       /* there is only one output port inside each device, find it */
+       ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+       if (!ep)
+               return -ENODEV;
+
+       if (!of_device_is_available(ep)) {
+               of_node_put(ep);
+               return -ENODEV;
+       }
+
+       /* add the remote encoder port as component */
+       port = of_graph_get_remote_port_parent(ep);
+       of_node_put(ep);
+       if (!port || !of_device_is_available(port)) {
+               of_node_put(port);
+               return -EAGAIN;
+       }
+
+       component_match_add(&pdev->dev, &match, compare_dev, port);
+
+       return component_master_add_with_match(&pdev->dev, &hdlcd_master_ops,
+                                              match);
+}
+
+static int hdlcd_remove(struct platform_device *pdev)
+{
+       component_master_del(&pdev->dev, &hdlcd_master_ops);
+       return 0;
+}
+
+static const struct of_device_id  hdlcd_of_match[] = {
+       { .compatible   = "arm,hdlcd" },
+       {},
+};
+MODULE_DEVICE_TABLE(of, hdlcd_of_match);
+
+static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
+{
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct drm_crtc *crtc;
+
+       if (pm_runtime_suspended(dev))
+               return 0;
+
+       drm_modeset_lock_all(drm);
+       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+               hdlcd_crtc_suspend(crtc);
+       drm_modeset_unlock_all(drm);
+       return 0;
+}
+
+static int __maybe_unused hdlcd_pm_resume(struct device *dev)
+{
+       struct drm_device *drm = dev_get_drvdata(dev);
+       struct drm_crtc *crtc;
+
+       if (!pm_runtime_suspended(dev))
+               return 0;
+
+       drm_modeset_lock_all(drm);
+       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+               hdlcd_crtc_resume(crtc);
+       drm_modeset_unlock_all(drm);
+       return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
+
+static struct platform_driver hdlcd_platform_driver = {
+       .probe          = hdlcd_probe,
+       .remove         = hdlcd_remove,
+       .driver = {
+               .name = "hdlcd",
+               .pm = &hdlcd_pm_ops,
+               .of_match_table = hdlcd_of_match,
+       },
+};
+
+module_platform_driver(hdlcd_platform_driver);
+
+MODULE_AUTHOR("Liviu Dudau");
+MODULE_DESCRIPTION("ARM HDLCD DRM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
new file mode 100644 (file)
index 0000000..aa23478
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ *  ARM HDLCD Controller register definition
+ */
+
+#ifndef __HDLCD_DRV_H__
+#define __HDLCD_DRV_H__
+
+struct hdlcd_drm_private {
+       void __iomem                    *mmio;
+       struct clk                      *clk;
+       struct drm_fbdev_cma            *fbdev;
+       struct drm_framebuffer          *fb;
+       struct list_head                event_list;
+       struct drm_crtc                 crtc;
+       struct drm_plane                *plane;
+#ifdef CONFIG_DEBUG_FS
+       atomic_t buffer_underrun_count;
+       atomic_t bus_error_count;
+       atomic_t vsync_count;
+       atomic_t dma_end_count;
+#endif
+};
+
+#define crtc_to_hdlcd_priv(x)  container_of(x, struct hdlcd_drm_private, crtc)
+
+static inline void hdlcd_write(struct hdlcd_drm_private *hdlcd,
+                              unsigned int reg, u32 value)
+{
+       writel(value, hdlcd->mmio + reg);
+}
+
+static inline u32 hdlcd_read(struct hdlcd_drm_private *hdlcd, unsigned int reg)
+{
+       return readl(hdlcd->mmio + reg);
+}
+
+int hdlcd_setup_crtc(struct drm_device *dev);
+void hdlcd_set_scanout(struct hdlcd_drm_private *hdlcd);
+void hdlcd_crtc_suspend(struct drm_crtc *crtc);
+void hdlcd_crtc_resume(struct drm_crtc *crtc);
+
+#endif /* __HDLCD_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/hdlcd_regs.h b/drivers/gpu/drm/arm/hdlcd_regs.h
new file mode 100644 (file)
index 0000000..66799eb
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2013,2014 ARM Limited
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ *  ARM HDLCD Controller register definition
+ */
+
+#ifndef __HDLCD_REGS_H__
+#define __HDLCD_REGS_H__
+
+/* register offsets */
+#define HDLCD_REG_VERSION              0x0000  /* ro */
+#define HDLCD_REG_INT_RAWSTAT          0x0010  /* rw */
+#define HDLCD_REG_INT_CLEAR            0x0014  /* wo */
+#define HDLCD_REG_INT_MASK             0x0018  /* rw */
+#define HDLCD_REG_INT_STATUS           0x001c  /* ro */
+#define HDLCD_REG_FB_BASE              0x0100  /* rw */
+#define HDLCD_REG_FB_LINE_LENGTH       0x0104  /* rw */
+#define HDLCD_REG_FB_LINE_COUNT                0x0108  /* rw */
+#define HDLCD_REG_FB_LINE_PITCH                0x010c  /* rw */
+#define HDLCD_REG_BUS_OPTIONS          0x0110  /* rw */
+#define HDLCD_REG_V_SYNC               0x0200  /* rw */
+#define HDLCD_REG_V_BACK_PORCH         0x0204  /* rw */
+#define HDLCD_REG_V_DATA               0x0208  /* rw */
+#define HDLCD_REG_V_FRONT_PORCH                0x020c  /* rw */
+#define HDLCD_REG_H_SYNC               0x0210  /* rw */
+#define HDLCD_REG_H_BACK_PORCH         0x0214  /* rw */
+#define HDLCD_REG_H_DATA               0x0218  /* rw */
+#define HDLCD_REG_H_FRONT_PORCH                0x021c  /* rw */
+#define HDLCD_REG_POLARITIES           0x0220  /* rw */
+#define HDLCD_REG_COMMAND              0x0230  /* rw */
+#define HDLCD_REG_PIXEL_FORMAT         0x0240  /* rw */
+#define HDLCD_REG_RED_SELECT           0x0244  /* rw */
+#define HDLCD_REG_GREEN_SELECT         0x0248  /* rw */
+#define HDLCD_REG_BLUE_SELECT          0x024c  /* rw */
+
+/* version */
+#define HDLCD_PRODUCT_ID               0x1CDC0000
+#define HDLCD_PRODUCT_MASK             0xFFFF0000
+#define HDLCD_VERSION_MAJOR_MASK       0x0000FF00
+#define HDLCD_VERSION_MINOR_MASK       0x000000FF
+
+/* interrupts */
+#define HDLCD_INTERRUPT_DMA_END                (1 << 0)
+#define HDLCD_INTERRUPT_BUS_ERROR      (1 << 1)
+#define HDLCD_INTERRUPT_VSYNC          (1 << 2)
+#define HDLCD_INTERRUPT_UNDERRUN       (1 << 3)
+#define HDLCD_DEBUG_INT_MASK           (HDLCD_INTERRUPT_DMA_END |  \
+                                       HDLCD_INTERRUPT_BUS_ERROR | \
+                                       HDLCD_INTERRUPT_UNDERRUN)
+
+/* polarities */
+#define HDLCD_POLARITY_VSYNC           (1 << 0)
+#define HDLCD_POLARITY_HSYNC           (1 << 1)
+#define HDLCD_POLARITY_DATAEN          (1 << 2)
+#define HDLCD_POLARITY_DATA            (1 << 3)
+#define HDLCD_POLARITY_PIXELCLK                (1 << 4)
+
+/* commands */
+#define HDLCD_COMMAND_DISABLE          (0 << 0)
+#define HDLCD_COMMAND_ENABLE           (1 << 0)
+
+/* pixel format */
+#define HDLCD_PIXEL_FMT_LITTLE_ENDIAN  (0 << 31)
+#define HDLCD_PIXEL_FMT_BIG_ENDIAN     (1 << 31)
+#define HDLCD_BYTES_PER_PIXEL_MASK     (3 << 3)
+
+/* bus options */
+#define HDLCD_BUS_BURST_MASK           0x01f
+#define HDLCD_BUS_MAX_OUTSTAND         0xf00
+#define HDLCD_BUS_BURST_NONE           (0 << 0)
+#define HDLCD_BUS_BURST_1              (1 << 0)
+#define HDLCD_BUS_BURST_2              (1 << 1)
+#define HDLCD_BUS_BURST_4              (1 << 2)
+#define HDLCD_BUS_BURST_8              (1 << 3)
+#define HDLCD_BUS_BURST_16             (1 << 4)
+
+/* Max resolution supported is 4096x4096, 32bpp */
+#define HDLCD_MAX_XRES                 4096
+#define HDLCD_MAX_YRES                 4096
+
+#define NR_PALETTE                     256
+
+#endif /* __HDLCD_REGS_H__ */
index 3bd7e1cde99ece810a80f74ca3248ac6041dfba6..82043c204b76be3f105d6c57eac7ec2979d16ef3 100644 (file)
@@ -188,9 +188,6 @@ static const struct file_operations armada_drm_fops = {
 
 static struct drm_driver armada_drm_driver = {
        .load                   = armada_drm_load,
-       .open                   = NULL,
-       .preclose               = NULL,
-       .postclose              = NULL,
        .lastclose              = armada_drm_lastclose,
        .unload                 = armada_drm_unload,
        .set_busid              = drm_platform_set_busid,
index 468a14f266a7bb122dfea97fec20c19a7245317c..9863291a9a5413d1b0969a9dab4a4c532d3e0f7c 100644 (file)
@@ -280,24 +280,6 @@ static void atmel_hlcdc_crtc_destroy(struct drm_crtc *c)
        kfree(crtc);
 }
 
-void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *c,
-                                      struct drm_file *file)
-{
-       struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
-       struct drm_pending_vblank_event *event;
-       struct drm_device *dev = c->dev;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       event = crtc->event;
-       if (event && event->base.file_priv == file) {
-               event->base.destroy(&event->base);
-               drm_vblank_put(dev, crtc->id);
-               crtc->event = NULL;
-       }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
 {
        struct drm_device *dev = crtc->base.dev;
index a45b32ba029ed412a1451c1536b57306beb7f59b..3d8d16402d075ebba5086debbc7812167ba0cd73 100644 (file)
@@ -619,15 +619,6 @@ static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
        mutex_unlock(&dev->mode_config.mutex);
 }
 
-static void atmel_hlcdc_dc_preclose(struct drm_device *dev,
-                                   struct drm_file *file)
-{
-       struct drm_crtc *crtc;
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-               atmel_hlcdc_crtc_cancel_page_flip(crtc, file);
-}
-
 static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
 {
        struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -698,7 +689,6 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
                           DRIVER_MODESET | DRIVER_PRIME |
                           DRIVER_ATOMIC,
-       .preclose = atmel_hlcdc_dc_preclose,
        .lastclose = atmel_hlcdc_dc_lastclose,
        .irq_handler = atmel_hlcdc_dc_irq_handler,
        .irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
index cf6b375bc38d1430c892ea2bf69cdf144047efd8..fed517f297da1ead290a60c1ab0140467cefed01 100644 (file)
@@ -152,9 +152,6 @@ int atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state);
 
 void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
 
-void atmel_hlcdc_crtc_cancel_page_flip(struct drm_crtc *crtc,
-                                      struct drm_file *file);
-
 void atmel_hlcdc_crtc_suspend(struct drm_crtc *crtc);
 void atmel_hlcdc_crtc_resume(struct drm_crtc *crtc);
 
index 3f74193885f1ff1df95db09fa76e01174f6353b4..8fb469c4e4b88485102cd864b49153aaf7f8b8a8 100644 (file)
@@ -1347,44 +1347,23 @@ static struct drm_pending_vblank_event *create_vblank_event(
                struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data)
 {
        struct drm_pending_vblank_event *e = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       if (file_priv->event_space < sizeof e->event) {
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-               goto out;
-       }
-       file_priv->event_space -= sizeof e->event;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
+       int ret;
 
        e = kzalloc(sizeof *e, GFP_KERNEL);
-       if (e == NULL) {
-               spin_lock_irqsave(&dev->event_lock, flags);
-               file_priv->event_space += sizeof e->event;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-               goto out;
-       }
+       if (!e)
+               return NULL;
 
        e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
-       e->event.base.length = sizeof e->event;
+       e->event.base.length = sizeof(e->event);
        e->event.user_data = user_data;
-       e->base.event = &e->event.base;
-       e->base.file_priv = file_priv;
-       e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
-
-out:
-       return e;
-}
 
-static void destroy_vblank_event(struct drm_device *dev,
-               struct drm_file *file_priv, struct drm_pending_vblank_event *e)
-{
-       unsigned long flags;
+       ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+       if (ret) {
+               kfree(e);
+               return NULL;
+       }
 
-       spin_lock_irqsave(&dev->event_lock, flags);
-       file_priv->event_space += sizeof e->event;
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-       kfree(e);
+       return e;
 }
 
 static int atomic_set_prop(struct drm_atomic_state *state,
@@ -1646,8 +1625,7 @@ out:
                        if (!crtc_state->event)
                                continue;
 
-                       destroy_vblank_event(dev, file_priv,
-                                            crtc_state->event);
+                       drm_event_cancel_free(dev, &crtc_state->event->base);
                }
        }
 
index 57cccd68ca522548c6c098ee72798a5b3369def0..2b430b05f35d9268b41240f3c78cf36c9c8d54ea 100644 (file)
@@ -125,6 +125,47 @@ get_current_crtc_for_encoder(struct drm_device *dev,
        return NULL;
 }
 
+static void
+set_best_encoder(struct drm_atomic_state *state,
+                struct drm_connector_state *conn_state,
+                struct drm_encoder *encoder)
+{
+       struct drm_crtc_state *crtc_state;
+       struct drm_crtc *crtc;
+
+       if (conn_state->best_encoder) {
+               /* Unset the encoder_mask in the old crtc state. */
+               crtc = conn_state->connector->state->crtc;
+
+               /* A NULL crtc is an error here because we should have
+                *  duplicated a NULL best_encoder when crtc was NULL.
+                * As an exception restoring duplicated atomic state
+                * during resume is allowed, so don't warn when
+                * best_encoder is equal to encoder we intend to set.
+                */
+               WARN_ON(!crtc && encoder != conn_state->best_encoder);
+               if (crtc) {
+                       crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+
+                       crtc_state->encoder_mask &=
+                               ~(1 << drm_encoder_index(conn_state->best_encoder));
+               }
+       }
+
+       if (encoder) {
+               crtc = conn_state->crtc;
+               WARN_ON(!crtc);
+               if (crtc) {
+                       crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
+
+                       crtc_state->encoder_mask |=
+                               1 << drm_encoder_index(encoder);
+               }
+       }
+
+       conn_state->best_encoder = encoder;
+}
+
 static int
 steal_encoder(struct drm_atomic_state *state,
              struct drm_encoder *encoder,
@@ -134,7 +175,6 @@ steal_encoder(struct drm_atomic_state *state,
        struct drm_crtc_state *crtc_state;
        struct drm_connector *connector;
        struct drm_connector_state *connector_state;
-       int ret;
 
        /*
         * We can only steal an encoder coming from a connector, which means we
@@ -165,10 +205,10 @@ steal_encoder(struct drm_atomic_state *state,
                if (IS_ERR(connector_state))
                        return PTR_ERR(connector_state);
 
-               ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
-               if (ret)
-                       return ret;
-               connector_state->best_encoder = NULL;
+               if (connector_state->best_encoder != encoder)
+                       continue;
+
+               set_best_encoder(state, connector_state, NULL);
        }
 
        return 0;
@@ -216,7 +256,7 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
                                connector->base.id,
                                connector->name);
 
-               connector_state->best_encoder = NULL;
+               set_best_encoder(state, connector_state, NULL);
 
                return 0;
        }
@@ -245,6 +285,8 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        }
 
        if (new_encoder == connector_state->best_encoder) {
+               set_best_encoder(state, connector_state, new_encoder);
+
                DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d:%s]\n",
                                 connector->base.id,
                                 connector->name,
@@ -279,7 +321,8 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
        if (WARN_ON(!connector_state->crtc))
                return -EINVAL;
 
-       connector_state->best_encoder = new_encoder;
+       set_best_encoder(state, connector_state, new_encoder);
+
        idx = drm_crtc_index(connector_state->crtc);
 
        crtc_state = state->crtc_states[idx];
@@ -617,7 +660,6 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
        for_each_connector_in_state(old_state, connector, old_conn_state, i) {
                const struct drm_encoder_helper_funcs *funcs;
                struct drm_encoder *encoder;
-               struct drm_crtc_state *old_crtc_state;
 
                /* Shut down everything that's in the changeset and currently
                 * still on. So need to check the old, saved state. */
@@ -946,9 +988,23 @@ static void wait_for_fences(struct drm_device *dev,
        }
 }
 
-static bool framebuffer_changed(struct drm_device *dev,
-                               struct drm_atomic_state *old_state,
-                               struct drm_crtc *crtc)
+/**
+ * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ * @crtc: DRM crtc
+ *
+ * Checks whether the framebuffer used for this CRTC changes as a result of
+ * the atomic update.  This is useful for drivers which cannot use
+ * drm_atomic_helper_wait_for_vblanks() and need to reimplement its
+ * functionality.
+ *
+ * Returns:
+ * true if the framebuffer changed.
+ */
+bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
+                                          struct drm_atomic_state *old_state,
+                                          struct drm_crtc *crtc)
 {
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state;
@@ -965,6 +1021,7 @@ static bool framebuffer_changed(struct drm_device *dev,
 
        return false;
 }
+EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed);
 
 /**
  * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
@@ -999,7 +1056,8 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
                if (old_state->legacy_cursor_update)
                        continue;
 
-               if (!framebuffer_changed(dev, old_state, crtc))
+               if (!drm_atomic_helper_framebuffer_changed(dev,
+                               old_state, crtc))
                        continue;
 
                ret = drm_crtc_vblank_get(crtc);
@@ -2533,8 +2591,10 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
        kfree(plane->state);
        plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
 
-       if (plane->state)
+       if (plane->state) {
                plane->state->plane = plane;
+               plane->state->rotation = BIT(DRM_ROTATE_0);
+       }
 }
 EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
 
index d40bab29747edb0ea989afd81b0e5b5102dfb555..65258acddb902000bbb9c458f7c5a1f1a8b62c49 100644 (file)
@@ -1160,6 +1160,29 @@ out_unlock:
 }
 EXPORT_SYMBOL(drm_encoder_init);
 
+/**
+ * drm_encoder_index - find the index of a registered encoder
+ * @encoder: encoder to find index for
+ *
+ * Given a registered encoder, return the index of that encoder within a DRM
+ * device's list of encoders.
+ */
+unsigned int drm_encoder_index(struct drm_encoder *encoder)
+{
+       unsigned int index = 0;
+       struct drm_encoder *tmp;
+
+       drm_for_each_encoder(tmp, encoder->dev) {
+               if (tmp == encoder)
+                       return index;
+
+               index++;
+       }
+
+       BUG();
+}
+EXPORT_SYMBOL(drm_encoder_index);
+
 /**
  * drm_encoder_cleanup - cleans up an initialised encoder
  * @encoder: encoder to cleanup
@@ -5265,7 +5288,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
        struct drm_crtc *crtc;
        struct drm_framebuffer *fb = NULL;
        struct drm_pending_vblank_event *e = NULL;
-       unsigned long flags;
        int ret = -EINVAL;
 
        if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
@@ -5316,41 +5338,26 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
        }
 
        if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-               ret = -ENOMEM;
-               spin_lock_irqsave(&dev->event_lock, flags);
-               if (file_priv->event_space < sizeof(e->event)) {
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
-                       goto out;
-               }
-               file_priv->event_space -= sizeof(e->event);
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-
-               e = kzalloc(sizeof(*e), GFP_KERNEL);
-               if (e == NULL) {
-                       spin_lock_irqsave(&dev->event_lock, flags);
-                       file_priv->event_space += sizeof(e->event);
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
+               e = kzalloc(sizeof *e, GFP_KERNEL);
+               if (!e) {
+                       ret = -ENOMEM;
                        goto out;
                }
-
                e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
                e->event.base.length = sizeof(e->event);
                e->event.user_data = page_flip->user_data;
-               e->base.event = &e->event.base;
-               e->base.file_priv = file_priv;
-               e->base.destroy =
-                       (void (*) (struct drm_pending_event *)) kfree;
+               ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
+               if (ret) {
+                       kfree(e);
+                       goto out;
+               }
        }
 
        crtc->primary->old_fb = crtc->primary->fb;
        ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
        if (ret) {
-               if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
-                       spin_lock_irqsave(&dev->event_lock, flags);
-                       file_priv->event_space += sizeof(e->event);
-                       spin_unlock_irqrestore(&dev->event_lock, flags);
-                       kfree(e);
-               }
+               if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT)
+                       drm_event_cancel_free(dev, &e->base);
                /* Keep the old fb, don't unref it. */
                crtc->primary->old_fb = NULL;
        } else {
@@ -5730,6 +5737,48 @@ int drm_format_vert_chroma_subsampling(uint32_t format)
 }
 EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
 
+/**
+ * drm_format_plane_width - width of the plane given the first plane
+ * @width: width of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The width of @plane, given that the width of the first plane is @width.
+ */
+int drm_format_plane_width(int width, uint32_t format, int plane)
+{
+       if (plane >= drm_format_num_planes(format))
+               return 0;
+
+       if (plane == 0)
+               return width;
+
+       return width / drm_format_horz_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_width);
+
+/**
+ * drm_format_plane_height - height of the plane given the first plane
+ * @height: height of the first plane
+ * @format: pixel format
+ * @plane: plane index
+ *
+ * Returns:
+ * The height of @plane, given that the height of the first plane is @height.
+ */
+int drm_format_plane_height(int height, uint32_t format, int plane)
+{
+       if (plane >= drm_format_num_planes(format))
+               return 0;
+
+       if (plane == 0)
+               return height;
+
+       return height / drm_format_vert_chroma_subsampling(format);
+}
+EXPORT_SYMBOL(drm_format_plane_height);
+
 /**
  * drm_rotation_simplify() - Try to simplify the rotation
  * @rotation: Rotation to be simplified
index a02a7f9a6a9d872cbf5be0fdf521f8e9512ebe0b..df6a12de208bc5f997f4a31eabd7224a5591098e 100644 (file)
@@ -220,6 +220,15 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
  * disconnected connectors. Then it will disable all unused encoders and CRTCs
  * either by calling their disable callback if available or by calling their
  * dpms callback with DRM_MODE_DPMS_OFF.
+ *
+ * NOTE:
+ *
+ * This function is part of the legacy modeset helper library and will cause
+ * major confusion with atomic drivers. This is because atomic helpers guarantee
+ * to never call ->disable() hooks on a disabled function, or ->enable() hooks
+ * on an enabled functions. drm_helper_disable_unused_functions() on the other
+ * hand throws such guarantees into the wind and calls disable hooks
+ * unconditionally on unused functions.
  */
 void drm_helper_disable_unused_functions(struct drm_device *dev)
 {
@@ -328,10 +337,12 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
                }
 
                encoder_funcs = encoder->helper_private;
-               if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
-                                                     adjusted_mode))) {
-                       DRM_DEBUG_KMS("Encoder fixup failed\n");
-                       goto done;
+               if (encoder_funcs->mode_fixup) {
+                       if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
+                                                             adjusted_mode))) {
+                               DRM_DEBUG_KMS("Encoder fixup failed\n");
+                               goto done;
+                       }
                }
        }
 
@@ -578,8 +589,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                if (set->crtc->primary->fb == NULL) {
                        DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
                        mode_changed = true;
-               } else if (set->fb == NULL) {
-                       mode_changed = true;
                } else if (set->fb->pixel_format !=
                           set->crtc->primary->fb->pixel_format) {
                        mode_changed = true;
@@ -590,7 +599,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
        if (set->x != set->crtc->x || set->y != set->crtc->y)
                fb_changed = true;
 
-       if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
+       if (!drm_mode_equal(set->mode, &set->crtc->mode)) {
                DRM_DEBUG_KMS("modes are different, full mode set\n");
                drm_mode_debug_printmodeline(&set->crtc->mode);
                drm_mode_debug_printmodeline(set->mode);
index 6ed90a2437e50438c713fc028f28705e7954b806..8ae13de272c43a7f79822098318a5c5317b54483 100644 (file)
@@ -803,12 +803,33 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
        return mstb;
 }
 
+static void drm_dp_free_mst_port(struct kref *kref);
+
+static void drm_dp_free_mst_branch_device(struct kref *kref)
+{
+       struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+       if (mstb->port_parent) {
+               if (list_empty(&mstb->port_parent->next))
+                       kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
+       }
+       kfree(mstb);
+}
+
 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
 {
        struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
        struct drm_dp_mst_port *port, *tmp;
        bool wake_tx = false;
 
+       /*
+        * init kref again to be used by ports to remove mst branch when it is
+        * not needed anymore
+        */
+       kref_init(kref);
+
+       if (mstb->port_parent && list_empty(&mstb->port_parent->next))
+               kref_get(&mstb->port_parent->kref);
+
        /*
         * destroy all ports - don't need lock
         * as there are no more references to the mst branch
@@ -835,7 +856,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
 
        if (wake_tx)
                wake_up(&mstb->mgr->tx_waitq);
-       kfree(mstb);
+
+       kref_put(kref, drm_dp_free_mst_branch_device);
 }
 
 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
@@ -883,6 +905,7 @@ static void drm_dp_destroy_port(struct kref *kref)
                         * from an EDID retrieval */
 
                        mutex_lock(&mgr->destroy_connector_lock);
+                       kref_get(&port->parent->kref);
                        list_add(&port->next, &mgr->destroy_connector_list);
                        mutex_unlock(&mgr->destroy_connector_lock);
                        schedule_work(&mgr->destroy_connector_work);
@@ -1018,18 +1041,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
        return send_link;
 }
 
-static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
-                                  struct drm_dp_mst_port *port)
+static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
 {
        int ret;
-       if (port->dpcd_rev >= 0x12) {
-               port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
-               if (!port->guid_valid) {
-                       ret = drm_dp_send_dpcd_write(mstb->mgr,
-                                                    port,
-                                                    DP_GUID,
-                                                    16, port->guid);
-                       port->guid_valid = true;
+
+       memcpy(mstb->guid, guid, 16);
+
+       if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
+               if (mstb->port_parent) {
+                       ret = drm_dp_send_dpcd_write(
+                                       mstb->mgr,
+                                       mstb->port_parent,
+                                       DP_GUID,
+                                       16,
+                                       mstb->guid);
+               } else {
+
+                       ret = drm_dp_dpcd_write(
+                                       mstb->mgr->aux,
+                                       DP_GUID,
+                                       mstb->guid,
+                                       16);
                }
        }
 }
@@ -1086,7 +1118,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
        port->dpcd_rev = port_msg->dpcd_revision;
        port->num_sdp_streams = port_msg->num_sdp_streams;
        port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
-       memcpy(port->guid, port_msg->peer_guid, 16);
 
        /* manage mstb port lists with mgr lock - take a reference
           for this list */
@@ -1099,11 +1130,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
 
        if (old_ddps != port->ddps) {
                if (port->ddps) {
-                       drm_dp_check_port_guid(mstb, port);
                        if (!port->input)
                                drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
                } else {
-                       port->guid_valid = false;
                        port->available_pbn = 0;
                        }
        }
@@ -1130,13 +1159,11 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
                        drm_dp_put_port(port);
                        goto out;
                }
-               if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
-                       port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
-                       drm_mode_connector_set_tile_property(port->connector);
-               }
+
+               drm_mode_connector_set_tile_property(port->connector);
+
                (*mstb->mgr->cbs->register_connector)(port->connector);
        }
-
 out:
        /* put reference to this port */
        drm_dp_put_port(port);
@@ -1161,11 +1188,9 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
        port->ddps = conn_stat->displayport_device_plug_status;
 
        if (old_ddps != port->ddps) {
+               dowork = true;
                if (port->ddps) {
-                       drm_dp_check_port_guid(mstb, port);
-                       dowork = true;
                } else {
-                       port->guid_valid = false;
                        port->available_pbn = 0;
                }
        }
@@ -1222,13 +1247,14 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
        struct drm_dp_mst_branch *found_mstb;
        struct drm_dp_mst_port *port;
 
+       if (memcmp(mstb->guid, guid, 16) == 0)
+               return mstb;
+
+
        list_for_each_entry(port, &mstb->ports, next) {
                if (!port->mstb)
                        continue;
 
-               if (port->guid_valid && memcmp(port->guid, guid, 16) == 0)
-                       return port->mstb;
-
                found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
 
                if (found_mstb)
@@ -1247,10 +1273,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
        /* find the port by iterating down */
        mutex_lock(&mgr->lock);
 
-       if (mgr->guid_valid && memcmp(mgr->guid, guid, 16) == 0)
-               mstb = mgr->mst_primary;
-       else
-               mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
+       mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
 
        if (mstb)
                kref_get(&mstb->kref);
@@ -1271,8 +1294,13 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
                if (port->input)
                        continue;
 
-               if (!port->ddps)
+               if (!port->ddps) {
+                       if (port->cached_edid) {
+                               kfree(port->cached_edid);
+                               port->cached_edid = NULL;
+                       }
                        continue;
+               }
 
                if (!port->available_pbn)
                        drm_dp_send_enum_path_resources(mgr, mstb, port);
@@ -1283,6 +1311,12 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
                                drm_dp_check_and_send_link_address(mgr, mstb_child);
                                drm_dp_put_mst_branch_device(mstb_child);
                        }
+               } else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
+                       port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
+                       if (!port->cached_edid) {
+                               port->cached_edid =
+                                       drm_get_edid(port->connector, &port->aux.ddc);
+                       }
                }
        }
 }
@@ -1302,6 +1336,8 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
                drm_dp_check_and_send_link_address(mgr, mstb);
                drm_dp_put_mst_branch_device(mstb);
        }
+
+       (*mgr->cbs->hotplug)(mgr);
 }
 
 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1555,10 +1591,12 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
                                       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
                                       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
                        }
+
+                       drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
+
                        for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
                                drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
                        }
-                       (*mgr->cbs->hotplug)(mgr);
                }
        } else {
                mstb->link_address_sent = false;
@@ -1602,6 +1640,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
        return 0;
 }
 
+static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
+{
+       if (!mstb->port_parent)
+               return NULL;
+
+       if (mstb->port_parent->mstb != mstb)
+               return mstb->port_parent;
+
+       return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
+}
+
+static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+                                                                        struct drm_dp_mst_branch *mstb,
+                                                                        int *port_num)
+{
+       struct drm_dp_mst_branch *rmstb = NULL;
+       struct drm_dp_mst_port *found_port;
+       mutex_lock(&mgr->lock);
+       if (mgr->mst_primary) {
+               found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+
+               if (found_port) {
+                       rmstb = found_port->parent;
+                       kref_get(&rmstb->kref);
+                       *port_num = found_port->port_num;
+               }
+       }
+       mutex_unlock(&mgr->lock);
+       return rmstb;
+}
+
 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
                                   struct drm_dp_mst_port *port,
                                   int id,
@@ -1609,13 +1678,18 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
 {
        struct drm_dp_sideband_msg_tx *txmsg;
        struct drm_dp_mst_branch *mstb;
-       int len, ret;
+       int len, ret, port_num;
        u8 sinks[DRM_DP_MAX_SDP_STREAMS];
        int i;
 
+       port_num = port->port_num;
        mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
-       if (!mstb)
-               return -EINVAL;
+       if (!mstb) {
+               mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+
+               if (!mstb)
+                       return -EINVAL;
+       }
 
        txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
        if (!txmsg) {
@@ -1627,7 +1701,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
                sinks[i] = i;
 
        txmsg->dst = mstb;
-       len = build_allocate_payload(txmsg, port->port_num,
+       len = build_allocate_payload(txmsg, port_num,
                                     id,
                                     pbn, port->num_sdp_streams, sinks);
 
@@ -1983,31 +2057,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
                mgr->mst_primary = mstb;
                kref_get(&mgr->mst_primary->kref);
 
-               {
-                       struct drm_dp_payload reset_pay;
-                       reset_pay.start_slot = 0;
-                       reset_pay.num_slots = 0x3f;
-                       drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
-               }
-
                ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
-                                        DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+                                                        DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
                if (ret < 0) {
                        goto out_unlock;
                }
 
-
-               /* sort out guid */
-               ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
-               if (ret != 16) {
-                       DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
-                       goto out_unlock;
-               }
-
-               mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
-               if (!mgr->guid_valid) {
-                       ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
-                       mgr->guid_valid = true;
+               {
+                       struct drm_dp_payload reset_pay;
+                       reset_pay.start_slot = 0;
+                       reset_pay.num_slots = 0x3f;
+                       drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
                }
 
                queue_work(system_long_wq, &mgr->work);
@@ -2231,9 +2291,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
                        }
 
                        drm_dp_update_port(mstb, &msg.u.conn_stat);
-                       DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
-                       (*mgr->cbs->hotplug)(mgr);
 
+                       DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
                } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
                        drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
                        if (!mstb)
@@ -2320,10 +2379,6 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
 
        case DP_PEER_DEVICE_SST_SINK:
                status = connector_status_connected;
-               /* for logical ports - cache the EDID */
-               if (port->port_num >= 8 && !port->cached_edid) {
-                       port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
-               }
                break;
        case DP_PEER_DEVICE_DP_LEGACY_CONV:
                if (port->ldps)
@@ -2378,10 +2433,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
 
        if (port->cached_edid)
                edid = drm_edid_duplicate(port->cached_edid);
-       else {
-               edid = drm_get_edid(connector, &port->aux.ddc);
-               drm_mode_connector_set_tile_property(connector);
-       }
+
        port->has_audio = drm_detect_monitor_audio(edid);
        drm_dp_put_port(port);
        return edid;
@@ -2446,6 +2498,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
                DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
                if (pbn == port->vcpi.pbn) {
                        *slots = port->vcpi.num_slots;
+                       drm_dp_put_port(port);
                        return true;
                }
        }
@@ -2605,32 +2658,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
  */
 int drm_dp_calc_pbn_mode(int clock, int bpp)
 {
-       fixed20_12 pix_bw;
-       fixed20_12 fbpp;
-       fixed20_12 result;
-       fixed20_12 margin, tmp;
-       u32 res;
-
-       pix_bw.full = dfixed_const(clock);
-       fbpp.full = dfixed_const(bpp);
-       tmp.full = dfixed_const(8);
-       fbpp.full = dfixed_div(fbpp, tmp);
-
-       result.full = dfixed_mul(pix_bw, fbpp);
-       margin.full = dfixed_const(54);
-       tmp.full = dfixed_const(64);
-       margin.full = dfixed_div(margin, tmp);
-       result.full = dfixed_div(result, margin);
-
-       margin.full = dfixed_const(1006);
-       tmp.full = dfixed_const(1000);
-       margin.full = dfixed_div(margin, tmp);
-       result.full = dfixed_mul(result, margin);
-
-       result.full = dfixed_div(result, tmp);
-       result.full = dfixed_ceil(result);
-       res = dfixed_trunc(result);
-       return res;
+       u64 kbps;
+       s64 peak_kbps;
+       u32 numerator;
+       u32 denominator;
+
+       kbps = clock * bpp;
+
+       /*
+        * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
+        * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
+        * common multiplier to render an integer PBN for all link rate/lane
+        * counts combinations
+        * calculate
+        * peak_kbps *= (1006/1000)
+        * peak_kbps *= (64/54)
+        * peak_kbps *= 8    convert to bytes
+        */
+
+       numerator = 64 * 1006;
+       denominator = 54 * 8 * 1000 * 1000;
+
+       kbps *= numerator;
+       peak_kbps = drm_fixp_from_fraction(kbps, denominator);
+
+       return drm_fixp2int_ceil(peak_kbps);
 }
 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
 
@@ -2638,11 +2690,23 @@ static int test_calc_pbn_mode(void)
 {
        int ret;
        ret = drm_dp_calc_pbn_mode(154000, 30);
-       if (ret != 689)
+       if (ret != 689) {
+               DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+                               154000, 30, 689, ret);
                return -EINVAL;
+       }
        ret = drm_dp_calc_pbn_mode(234000, 30);
-       if (ret != 1047)
+       if (ret != 1047) {
+               DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+                               234000, 30, 1047, ret);
                return -EINVAL;
+       }
+       ret = drm_dp_calc_pbn_mode(297000, 24);
+       if (ret != 1063) {
+               DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
+                               297000, 24, 1063, ret);
+               return -EINVAL;
+       }
        return 0;
 }
 
@@ -2783,6 +2847,13 @@ static void drm_dp_tx_work(struct work_struct *work)
        mutex_unlock(&mgr->qlock);
 }
 
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+       struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+       kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
+       kfree(port);
+}
+
 static void drm_dp_destroy_connector_work(struct work_struct *work)
 {
        struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -2803,13 +2874,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                list_del(&port->next);
                mutex_unlock(&mgr->destroy_connector_lock);
 
+               kref_init(&port->kref);
+               INIT_LIST_HEAD(&port->next);
+
                mgr->cbs->destroy_connector(mgr, port->connector);
 
                drm_dp_port_teardown_pdt(port, port->pdt);
 
-               if (!port->input && port->vcpi.vcpi > 0)
-                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
-               kfree(port);
+               if (!port->input && port->vcpi.vcpi > 0) {
+                       if (mgr->mst_state) {
+                               drm_dp_mst_reset_vcpi_slots(mgr, port);
+                               drm_dp_update_payload_part1(mgr);
+                               drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
+                       }
+               }
+
+               kref_put(&port->kref, drm_dp_free_mst_port);
                send_hotplug = true;
        }
        if (send_hotplug)
@@ -2847,6 +2927,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
        mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
        mgr->max_payloads = max_payloads;
        mgr->conn_base_id = conn_base_id;
+       if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
+           max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
+               return -EINVAL;
        mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
        if (!mgr->payloads)
                return -ENOMEM;
@@ -2854,7 +2937,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
        if (!mgr->proposed_vcpis)
                return -ENOMEM;
        set_bit(0, &mgr->payload_mask);
-       test_calc_pbn_mode();
+       if (test_calc_pbn_mode() < 0)
+               DRM_ERROR("MST PBN self-test failed\n");
+
        return 0;
 }
 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
index 04cb4877fabd89d8e27f16c073912a97e199718e..fdb1eb01458690fc10ef6cdcb53b381c76d36a42 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/hdmi.h>
 #include <linux/i2c.h>
 #include <linux/module.h>
+#include <linux/vga_switcheroo.h>
 #include <drm/drmP.h>
 #include <drm/drm_edid.h>
 #include <drm/drm_displayid.h>
@@ -1394,6 +1395,31 @@ struct edid *drm_get_edid(struct drm_connector *connector,
 }
 EXPORT_SYMBOL(drm_get_edid);
 
+/**
+ * drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
+ * @connector: connector we're probing
+ * @adapter: I2C adapter to use for DDC
+ *
+ * Wrapper around drm_get_edid() for laptops with dual GPUs using one set of
+ * outputs. The wrapper adds the requisite vga_switcheroo calls to temporarily
+ * switch DDC to the GPU which is retrieving EDID.
+ *
+ * Return: Pointer to valid EDID or %NULL if we couldn't find any.
+ */
+struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+                                    struct i2c_adapter *adapter)
+{
+       struct pci_dev *pdev = connector->dev->pdev;
+       struct edid *edid;
+
+       vga_switcheroo_lock_ddc(pdev);
+       edid = drm_get_edid(connector, adapter);
+       vga_switcheroo_unlock_ddc(pdev);
+
+       return edid;
+}
+EXPORT_SYMBOL(drm_get_edid_switcheroo);
+
 /**
  * drm_edid_duplicate - duplicate an EDID and the extensions
  * @edid: EDID to duplicate
index e8629076de32161a82d3103b772ce198f5c4e59e..4484785cd9ac22a45fd8a2dcf95d54d2e22fde99 100644 (file)
@@ -140,6 +140,9 @@ bool drm_i2c_encoder_mode_fixup(struct drm_encoder *encoder,
                const struct drm_display_mode *mode,
                struct drm_display_mode *adjusted_mode)
 {
+       if (!get_slave_funcs(encoder)->mode_fixup)
+               return true;
+
        return get_slave_funcs(encoder)->mode_fixup(encoder, mode, adjusted_mode);
 }
 EXPORT_SYMBOL(drm_i2c_encoder_mode_fixup);
index c895b6fddbd818052335dff264cfe9685fc3af67..bb88e3df925712e52af66869e4a23563221dc36c 100644 (file)
@@ -74,7 +74,8 @@ static struct drm_framebuffer_funcs drm_fb_cma_funcs = {
 };
 
 static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
-       const const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_cma_object **obj,
+       const struct drm_mode_fb_cmd2 *mode_cmd,
+       struct drm_gem_cma_object **obj,
        unsigned int num_planes)
 {
        struct drm_fb_cma *fb_cma;
index 1e103c4c6ee056d5011178bbbe8675d490f94a8f..76a364e620811d0f828eab3a1b1c939f95b6c9ba 100644 (file)
@@ -2091,6 +2091,27 @@ out:
  * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
  * values for the fbdev info structure.
  *
+ * HANG DEBUGGING:
+ *
+ * When you have fbcon support built-in or already loaded, this function will do
+ * a full modeset to setup the fbdev console. Due to locking misdesign in the
+ * VT/fbdev subsystem that entire modeset sequence has to be done while holding
+ * console_lock. Until console_unlock is called no dmesg lines will be sent out
+ * to consoles, not even serial console. This means when your driver crashes,
+ * you will see absolutely nothing else but a system stuck in this function,
+ * with no further output. Any kind of printk() you place within your own driver
+ * or in the drm core modeset code will also never show up.
+ *
+ * Standard debug practice is to run the fbcon setup without taking the
+ * console_lock as a hack, to be able to see backtraces and crashes on the
+ * serial line. This can be done by setting the fb.lockless_register_fb=1 kernel
+ * cmdline option.
+ *
+ * The other option is to just disable fbdev emulation since very likely the
+ * first modest from userspace will crash in the same way, and is even easier to
+ * debug. This can be done by setting the drm_kms_helper.fbdev_emulation=0
+ * kernel cmdline option.
+ *
  * RETURNS:
  * Zero if everything went ok, nonzero otherwise.
  */
index 1ea8790e50909cc45acfa4a10a85485129d5c9bf..aeef58ed359b7ab2fc8ac146bcc22dea271f6b93 100644 (file)
@@ -1,4 +1,4 @@
-/**
+/*
  * \file drm_fops.c
  * File operations for DRM
  *
 /* from BKL pushdown */
 DEFINE_MUTEX(drm_global_mutex);
 
+/**
+ * DOC: file operations
+ *
+ * Drivers must define the file operations structure that forms the DRM
+ * userspace API entry point, even though most of those operations are
+ * implemented in the DRM core. The mandatory functions are drm_open(),
+ * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
+ * Drivers which implement private ioctls that require 32/64 bit compatibility
+ * support must provided their onw .compat_ioctl() handler that processes
+ * private ioctls and calls drm_compat_ioctl() for core ioctls.
+ *
+ * In addition drm_read() and drm_poll() provide support for DRM events. DRM
+ * events are a generic and extensible means to send asynchronous events to
+ * userspace through the file descriptor. They are used to send vblank event and
+ * page flip completions by the KMS API. But drivers can also use it for their
+ * own needs, e.g. to signal completion of rendering.
+ *
+ * The memory mapping implementation will vary depending on how the driver
+ * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
+ * function, modern drivers should use one of the provided memory-manager
+ * specific implementations. For GEM-based drivers this is drm_gem_mmap().
+ *
+ * No other file operations are supported by the DRM userspace API. Overall the
+ * following is an example #file_operations structure:
+ *
+ *     static const example_drm_fops = {
+ *             .owner = THIS_MODULE,
+ *             .open = drm_open,
+ *             .release = drm_release,
+ *             .unlocked_ioctl = drm_ioctl,
+ *     #ifdef CONFIG_COMPAT
+ *             .compat_ioctl = drm_compat_ioctl,
+ *     #endif
+ *             .poll = drm_poll,
+ *             .read = drm_read,
+ *             .llseek = no_llseek,
+ *             .mmap = drm_gem_mmap,
+ *     };
+ */
+
 static int drm_open_helper(struct file *filp, struct drm_minor *minor);
 
 static int drm_setup(struct drm_device * dev)
@@ -67,15 +107,17 @@ static int drm_setup(struct drm_device * dev)
 }
 
 /**
- * Open file.
+ * drm_open - open method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
  *
- * \param inode device inode
- * \param filp file pointer.
- * \return zero on success or a negative number on failure.
+ * This function must be used by drivers as their .open() #file_operations
+ * method. It looks up the correct DRM device and instantiates all the per-file
+ * resources for it.
+ *
+ * RETURNS:
  *
- * Searches the DRM device with the same minor number, calls open_helper(), and
- * increments the device open count. If the open count was previous at zero,
- * i.e., it's the first that the device is open, then calls setup().
+ * 0 on success or negative errno value on falure.
  */
 int drm_open(struct inode *inode, struct file *filp)
 {
@@ -112,7 +154,7 @@ err_undo:
 }
 EXPORT_SYMBOL(drm_open);
 
-/**
+/*
  * Check whether DRI will run on this CPU.
  *
  * \return non-zero if the DRI will run on this CPU, or zero otherwise.
@@ -125,7 +167,7 @@ static int drm_cpu_valid(void)
        return 1;
 }
 
-/**
+/*
  * drm_new_set_master - Allocate a new master object and become master for the
  * associated master realm.
  *
@@ -179,7 +221,7 @@ out_err:
        return ret;
 }
 
-/**
+/*
  * Called whenever a process opens /dev/drm.
  *
  * \param filp file pointer.
@@ -222,6 +264,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
        INIT_LIST_HEAD(&priv->fbs);
        mutex_init(&priv->fbs_lock);
        INIT_LIST_HEAD(&priv->blobs);
+       INIT_LIST_HEAD(&priv->pending_event_list);
        INIT_LIST_HEAD(&priv->event_list);
        init_waitqueue_head(&priv->event_wait);
        priv->event_space = 4096; /* set aside 4k for event buffer */
@@ -311,18 +354,16 @@ static void drm_events_release(struct drm_file *file_priv)
 {
        struct drm_device *dev = file_priv->minor->dev;
        struct drm_pending_event *e, *et;
-       struct drm_pending_vblank_event *v, *vt;
        unsigned long flags;
 
        spin_lock_irqsave(&dev->event_lock, flags);
 
-       /* Remove pending flips */
-       list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
-               if (v->base.file_priv == file_priv) {
-                       list_del(&v->base.link);
-                       drm_vblank_put(dev, v->pipe);
-                       v->base.destroy(&v->base);
-               }
+       /* Unlink pending events */
+       list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
+                                pending_link) {
+               list_del(&e->pending_link);
+               e->file_priv = NULL;
+       }
 
        /* Remove unconsumed events */
        list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
@@ -333,7 +374,7 @@ static void drm_events_release(struct drm_file *file_priv)
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
-/**
+/*
  * drm_legacy_dev_reinit
  *
  * Reinitializes a legacy/ums drm device in it's lastclose function.
@@ -350,7 +391,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
        dev->if_version = 0;
 }
 
-/**
+/*
  * Take down the DRM device.
  *
  * \param dev DRM device structure.
@@ -387,16 +428,17 @@ int drm_lastclose(struct drm_device * dev)
 }
 
 /**
- * Release file.
+ * drm_release - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
  *
- * \param inode device inode
- * \param file_priv DRM file private.
- * \return zero on success or a negative number on failure.
+ * This function must be used by drivers as their .release() #file_operations
+ * method. It frees any resources associated with the open file, and if this is
+ * the last open file for the DRM device also proceeds to call drm_lastclose().
  *
- * If the hardware lock is held then free it, and take it again for the kernel
- * context since it's necessary to reclaim buffers. Unlink the file private
- * data from its list and free it. Decreases the open count and if it reaches
- * zero calls drm_lastclose().
+ * RETURNS:
+ *
+ * Always succeeds and returns 0.
  */
 int drm_release(struct inode *inode, struct file *filp)
 {
@@ -451,7 +493,7 @@ int drm_release(struct inode *inode, struct file *filp)
        if (file_priv->is_master) {
                struct drm_master *master = file_priv->master;
 
-               /**
+               /*
                 * Since the master is disappearing, so is the
                 * possibility to lock.
                 */
@@ -508,6 +550,32 @@ int drm_release(struct inode *inode, struct file *filp)
 }
 EXPORT_SYMBOL(drm_release);
 
+/**
+ * drm_read - read method for DRM file
+ * @filp: file pointer
+ * @buffer: userspace destination pointer for the read
+ * @count: count in bytes to read
+ * @offset: offset to read
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * @offset is ignore, DRM events are read like a pipe. Therefore drivers also
+ * must set the .llseek() #file_operation to no_llseek(). Polling support is
+ * provided by drm_poll().
+ *
+ * This function will only ever read a full event. Therefore userspace must
+ * supply a big enough buffer to fit any event to ensure forward progress. Since
+ * the maximum event space is currently 4K it's recommended to just use that for
+ * safety.
+ *
+ * RETURNS:
+ *
+ * Number of bytes read (always aligned to full events, and can be 0) or a
+ * negative error code on failure.
+ */
 ssize_t drm_read(struct file *filp, char __user *buffer,
                 size_t count, loff_t *offset)
 {
@@ -578,6 +646,22 @@ put_back_event:
 }
 EXPORT_SYMBOL(drm_read);
 
+/**
+ * drm_poll - poll method for DRM file
+ * @filp: file pointer
+ * @wait: poll waiter table
+ *
+ * This function must be used by drivers as their .read() #file_operations
+ * method iff they use DRM events for asynchronous signalling to userspace.
+ * Since events are used by the KMS API for vblank and page flip completion this
+ * means all modern display drivers must use it.
+ *
+ * See also drm_read().
+ *
+ * RETURNS:
+ *
+ * Mask of POLL flags indicating the current status of the file.
+ */
 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
 {
        struct drm_file *file_priv = filp->private_data;
@@ -591,3 +675,164 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
        return mask;
 }
 EXPORT_SYMBOL(drm_poll);
+
+/**
+ * drm_event_reserve_init_locked - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * This is the locked version of drm_event_reserve_init() for callers which
+ * already hold dev->event_lock.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init_locked(struct drm_device *dev,
+                                 struct drm_file *file_priv,
+                                 struct drm_pending_event *p,
+                                 struct drm_event *e)
+{
+       if (file_priv->event_space < e->length)
+               return -ENOMEM;
+
+       file_priv->event_space -= e->length;
+
+       p->event = e;
+       list_add(&p->pending_link, &file_priv->pending_event_list);
+       p->file_priv = file_priv;
+
+       /* we *could* pass this in as arg, but everyone uses kfree: */
+       p->destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_event_reserve_init_locked);
+
+/**
+ * drm_event_reserve_init - init a DRM event and reserve space for it
+ * @dev: DRM device
+ * @file_priv: DRM file private data
+ * @p: tracking structure for the pending event
+ * @e: actual event data to deliver to userspace
+ *
+ * This function prepares the passed in event for eventual delivery. If the event
+ * doesn't get delivered (because the IOCTL fails later on, before queuing up
+ * anything) then the even must be cancelled and freed using
+ * drm_event_cancel_free(). Successfully initialized events should be sent out
+ * using drm_send_event() or drm_send_event_locked() to signal completion of the
+ * asynchronous event to userspace.
+ *
+ * If callers embedded @p into a larger structure it must be allocated with
+ * kmalloc and @p must be the first member element.
+ *
+ * Callers which already hold dev->event_lock should use
+ * drm_event_reserve_init() instead.
+ *
+ * RETURNS:
+ *
+ * 0 on success or a negative error code on failure.
+ */
+int drm_event_reserve_init(struct drm_device *dev,
+                          struct drm_file *file_priv,
+                          struct drm_pending_event *p,
+                          struct drm_event *e)
+{
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&dev->event_lock, flags);
+       ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(drm_event_reserve_init);
+
+/**
+ * drm_event_cancel_free - free a DRM event and release it's space
+ * @dev: DRM device
+ * @p: tracking structure for the pending event
+ *
+ * This function frees the event @p initialized with drm_event_reserve_init()
+ * and releases any allocated space.
+ */
+void drm_event_cancel_free(struct drm_device *dev,
+                          struct drm_pending_event *p)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&dev->event_lock, flags);
+       if (p->file_priv) {
+               p->file_priv->event_space += p->event->length;
+               list_del(&p->pending_link);
+       }
+       spin_unlock_irqrestore(&dev->event_lock, flags);
+       p->destroy(p);
+}
+EXPORT_SYMBOL(drm_event_cancel_free);
+
+/**
+ * drm_send_event_locked - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. Callers must already hold
+ * dev->event_lock, see drm_send_event() for the unlocked version.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
+{
+       assert_spin_locked(&dev->event_lock);
+
+       if (!e->file_priv) {
+               e->destroy(e);
+               return;
+       }
+
+       list_del(&e->pending_link);
+       list_add_tail(&e->link,
+                     &e->file_priv->event_list);
+       wake_up_interruptible(&e->file_priv->event_wait);
+}
+EXPORT_SYMBOL(drm_send_event_locked);
+
+/**
+ * drm_send_event - send DRM event to file descriptor
+ * @dev: DRM device
+ * @e: DRM event to deliver
+ *
+ * This function sends the event @e, initialized with drm_event_reserve_init(),
+ * to its associated userspace DRM file. This function acquires dev->event_lock,
+ * see drm_send_event_locked() for callers which already hold this lock.
+ *
+ * Note that the core will take care of unlinking and disarming events when the
+ * corresponding DRM file is closed. Drivers need not worry about whether the
+ * DRM file for this event still exists and can call this function upon
+ * completion of the asynchronous work unconditionally.
+ */
+void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
+{
+       unsigned long irqflags;
+
+       spin_lock_irqsave(&dev->event_lock, irqflags);
+       drm_send_event_locked(dev, e);
+       spin_unlock_irqrestore(&dev->event_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_send_event);
index d12a4efa651b015179f776e1fc056da96dc1faf0..96d03ac38ef76c5fef60f87a3e916d6930ea5bee 100644 (file)
@@ -983,15 +983,12 @@ static void send_vblank_event(struct drm_device *dev,
                struct drm_pending_vblank_event *e,
                unsigned long seq, struct timeval *now)
 {
-       assert_spin_locked(&dev->event_lock);
-
        e->event.sequence = seq;
        e->event.tv_sec = now->tv_sec;
        e->event.tv_usec = now->tv_usec;
 
-       list_add_tail(&e->base.link,
-                     &e->base.file_priv->event_list);
-       wake_up_interruptible(&e->base.file_priv->event_wait);
+       drm_send_event_locked(dev, &e->base);
+
        trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
                                         e->event.sequence);
 }
@@ -1601,9 +1598,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
        e->event.base.type = DRM_EVENT_VBLANK;
        e->event.base.length = sizeof(e->event);
        e->event.user_data = vblwait->request.signal;
-       e->base.event = &e->event.base;
-       e->base.file_priv = file_priv;
-       e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
 
        spin_lock_irqsave(&dev->event_lock, flags);
 
@@ -1619,12 +1613,12 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
                goto err_unlock;
        }
 
-       if (file_priv->event_space < sizeof(e->event)) {
-               ret = -EBUSY;
+       ret = drm_event_reserve_init_locked(dev, file_priv, &e->base,
+                                           &e->event.base);
+
+       if (ret)
                goto err_unlock;
-       }
 
-       file_priv->event_space -= sizeof(e->event);
        seq = drm_vblank_count_and_time(dev, pipe, &now);
 
        if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
index 20775c05235a4d81ef8569011d573db303ab0a6d..f7448a5e95a9b3bf6fcdf491a079581ea5d8e7c1 100644 (file)
@@ -1371,8 +1371,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
        }
 done:
        if (i >= 0) {
-               printk(KERN_WARNING
-                       "parse error at position %i in video mode '%s'\n",
+               pr_warn("[drm] parse error at position %i in video mode '%s'\n",
                        i, name);
                mode->specified = false;
                return false;
index 27aa7183b20bc83577a87bd4fc30149b35048305..df6cdc76a16e2ed3586dd61edab3bcc553da77dc 100644 (file)
@@ -329,7 +329,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
  * drm_gem_prime_export - helper library implementation of the export callback
  * @dev: drm_device to export from
  * @obj: GEM object to export
- * @flags: flags like DRM_CLOEXEC
+ * @flags: flags like DRM_CLOEXEC and DRM_RDWR
  *
  * This is the implementation of the gem_prime_export functions for GEM drivers
  * using the PRIME helpers.
@@ -628,7 +628,6 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *file_priv)
 {
        struct drm_prime_handle *args = data;
-       uint32_t flags;
 
        if (!drm_core_check_feature(dev, DRIVER_PRIME))
                return -EINVAL;
@@ -637,14 +636,11 @@ int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
                return -ENOSYS;
 
        /* check flags are valid */
-       if (args->flags & ~DRM_CLOEXEC)
+       if (args->flags & ~(DRM_CLOEXEC | DRM_RDWR))
                return -EINVAL;
 
-       /* we only want to pass DRM_CLOEXEC which is == O_CLOEXEC */
-       flags = args->flags & DRM_CLOEXEC;
-
        return dev->driver->prime_handle_to_fd(dev, file_priv,
-                       args->handle, flags, &args->fd);
+                       args->handle, args->flags, &args->fd);
 }
 
 int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
index 9e585d51fb7807278aeef68221f186f8f36c4a3c..e881482b5971d16ee2539307f6bf41746fc4dbef 100644 (file)
@@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
 git clone git://0x04.net/rules-ng-ng
 
 The rules-ng-ng source files this header was generated from are:
-- state_vg.xml (   5973 bytes, from 2015-03-25 11:26:01)
-- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
+- state_hi.xml (  24309 bytes, from 2015-12-12 09:02:53)
+- common.xml   (  18379 bytes, from 2015-12-12 09:02:53)
 
 Copyright (C) 2015
 */
@@ -30,15 +30,19 @@ Copyright (C) 2015
 #define ENDIAN_MODE_NO_SWAP                                    0x00000000
 #define ENDIAN_MODE_SWAP_16                                    0x00000001
 #define ENDIAN_MODE_SWAP_32                                    0x00000002
+#define chipModel_GC200                                                0x00000200
 #define chipModel_GC300                                                0x00000300
 #define chipModel_GC320                                                0x00000320
+#define chipModel_GC328                                                0x00000328
 #define chipModel_GC350                                                0x00000350
 #define chipModel_GC355                                                0x00000355
 #define chipModel_GC400                                                0x00000400
 #define chipModel_GC410                                                0x00000410
 #define chipModel_GC420                                                0x00000420
+#define chipModel_GC428                                                0x00000428
 #define chipModel_GC450                                                0x00000450
 #define chipModel_GC500                                                0x00000500
+#define chipModel_GC520                                                0x00000520
 #define chipModel_GC530                                                0x00000530
 #define chipModel_GC600                                                0x00000600
 #define chipModel_GC700                                                0x00000700
@@ -46,9 +50,16 @@ Copyright (C) 2015
 #define chipModel_GC860                                                0x00000860
 #define chipModel_GC880                                                0x00000880
 #define chipModel_GC1000                                       0x00001000
+#define chipModel_GC1500                                       0x00001500
 #define chipModel_GC2000                                       0x00002000
 #define chipModel_GC2100                                       0x00002100
+#define chipModel_GC2200                                       0x00002200
+#define chipModel_GC2500                                       0x00002500
+#define chipModel_GC3000                                       0x00003000
 #define chipModel_GC4000                                       0x00004000
+#define chipModel_GC5000                                       0x00005000
+#define chipModel_GC5200                                       0x00005200
+#define chipModel_GC6400                                       0x00006400
 #define RGBA_BITS_R                                            0x00000001
 #define RGBA_BITS_G                                            0x00000002
 #define RGBA_BITS_B                                            0x00000004
@@ -160,7 +171,7 @@ Copyright (C) 2015
 #define chipMinorFeatures2_UNK8                                        0x00000100
 #define chipMinorFeatures2_UNK9                                        0x00000200
 #define chipMinorFeatures2_UNK10                               0x00000400
-#define chipMinorFeatures2_SAMPLERBASE_16                      0x00000800
+#define chipMinorFeatures2_HALTI1                              0x00000800
 #define chipMinorFeatures2_UNK12                               0x00001000
 #define chipMinorFeatures2_UNK13                               0x00002000
 #define chipMinorFeatures2_UNK14                               0x00004000
@@ -189,7 +200,7 @@ Copyright (C) 2015
 #define chipMinorFeatures3_UNK5                                        0x00000020
 #define chipMinorFeatures3_UNK6                                        0x00000040
 #define chipMinorFeatures3_UNK7                                        0x00000080
-#define chipMinorFeatures3_UNK8                                        0x00000100
+#define chipMinorFeatures3_FAST_MSAA                           0x00000100
 #define chipMinorFeatures3_UNK9                                        0x00000200
 #define chipMinorFeatures3_BUG_FIXES10                         0x00000400
 #define chipMinorFeatures3_UNK11                               0x00000800
@@ -199,7 +210,7 @@ Copyright (C) 2015
 #define chipMinorFeatures3_UNK15                               0x00008000
 #define chipMinorFeatures3_UNK16                               0x00010000
 #define chipMinorFeatures3_UNK17                               0x00020000
-#define chipMinorFeatures3_UNK18                               0x00040000
+#define chipMinorFeatures3_ACE                                 0x00040000
 #define chipMinorFeatures3_UNK19                               0x00080000
 #define chipMinorFeatures3_UNK20                               0x00100000
 #define chipMinorFeatures3_UNK21                               0x00200000
@@ -207,7 +218,7 @@ Copyright (C) 2015
 #define chipMinorFeatures3_UNK23                               0x00800000
 #define chipMinorFeatures3_UNK24                               0x01000000
 #define chipMinorFeatures3_UNK25                               0x02000000
-#define chipMinorFeatures3_UNK26                               0x04000000
+#define chipMinorFeatures3_NEW_HZ                              0x04000000
 #define chipMinorFeatures3_UNK27                               0x08000000
 #define chipMinorFeatures3_UNK28                               0x10000000
 #define chipMinorFeatures3_UNK29                               0x20000000
@@ -229,9 +240,9 @@ Copyright (C) 2015
 #define chipMinorFeatures4_UNK13                               0x00002000
 #define chipMinorFeatures4_UNK14                               0x00004000
 #define chipMinorFeatures4_UNK15                               0x00008000
-#define chipMinorFeatures4_UNK16                               0x00010000
+#define chipMinorFeatures4_HALTI2                              0x00010000
 #define chipMinorFeatures4_UNK17                               0x00020000
-#define chipMinorFeatures4_UNK18                               0x00040000
+#define chipMinorFeatures4_SMALL_MSAA                          0x00040000
 #define chipMinorFeatures4_UNK19                               0x00080000
 #define chipMinorFeatures4_UNK20                               0x00100000
 #define chipMinorFeatures4_UNK21                               0x00200000
@@ -245,5 +256,37 @@ Copyright (C) 2015
 #define chipMinorFeatures4_UNK29                               0x20000000
 #define chipMinorFeatures4_UNK30                               0x40000000
 #define chipMinorFeatures4_UNK31                               0x80000000
+#define chipMinorFeatures5_UNK0                                        0x00000001
+#define chipMinorFeatures5_UNK1                                        0x00000002
+#define chipMinorFeatures5_UNK2                                        0x00000004
+#define chipMinorFeatures5_UNK3                                        0x00000008
+#define chipMinorFeatures5_UNK4                                        0x00000010
+#define chipMinorFeatures5_UNK5                                        0x00000020
+#define chipMinorFeatures5_UNK6                                        0x00000040
+#define chipMinorFeatures5_UNK7                                        0x00000080
+#define chipMinorFeatures5_UNK8                                        0x00000100
+#define chipMinorFeatures5_HALTI3                              0x00000200
+#define chipMinorFeatures5_UNK10                               0x00000400
+#define chipMinorFeatures5_UNK11                               0x00000800
+#define chipMinorFeatures5_UNK12                               0x00001000
+#define chipMinorFeatures5_UNK13                               0x00002000
+#define chipMinorFeatures5_UNK14                               0x00004000
+#define chipMinorFeatures5_UNK15                               0x00008000
+#define chipMinorFeatures5_UNK16                               0x00010000
+#define chipMinorFeatures5_UNK17                               0x00020000
+#define chipMinorFeatures5_UNK18                               0x00040000
+#define chipMinorFeatures5_UNK19                               0x00080000
+#define chipMinorFeatures5_UNK20                               0x00100000
+#define chipMinorFeatures5_UNK21                               0x00200000
+#define chipMinorFeatures5_UNK22                               0x00400000
+#define chipMinorFeatures5_UNK23                               0x00800000
+#define chipMinorFeatures5_UNK24                               0x01000000
+#define chipMinorFeatures5_UNK25                               0x02000000
+#define chipMinorFeatures5_UNK26                               0x04000000
+#define chipMinorFeatures5_UNK27                               0x08000000
+#define chipMinorFeatures5_UNK28                               0x10000000
+#define chipMinorFeatures5_UNK29                               0x20000000
+#define chipMinorFeatures5_UNK30                               0x40000000
+#define chipMinorFeatures5_UNK31                               0x80000000
 
 #endif /* COMMON_XML */
index 5c89ebb52fd29b1357316fa000d4a61188f4d320..e8858985f01ea0fb5a17e39a3c7b246d6ae562d9 100644 (file)
@@ -668,7 +668,6 @@ static struct platform_driver etnaviv_platform_driver = {
        .probe      = etnaviv_pdev_probe,
        .remove     = etnaviv_pdev_remove,
        .driver     = {
-               .owner  = THIS_MODULE,
                .name   = "etnaviv",
                .of_match_table = dt_match,
        },
index d6bd438bd5bec73eb074aa15626fc8eb3d92d70b..1cd6046e76b11d824a06b24bd77a4cea3d1a1a93 100644 (file)
@@ -85,7 +85,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
        struct dma_buf_attachment *attach, struct sg_table *sg);
 int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
 void etnaviv_gem_prime_unpin(struct drm_gem_object *obj);
-void *etnaviv_gem_vaddr(struct drm_gem_object *obj);
+void *etnaviv_gem_vmap(struct drm_gem_object *obj);
 int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
                struct timespec *timeout);
 int etnaviv_gem_cpu_fini(struct drm_gem_object *obj);
index bf8fa859e8bece4f037e1ef7eae1c6caf0a0c85d..4a29eeadbf1e738da23a6c29a2e00e8f68d81576 100644 (file)
@@ -201,7 +201,9 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
 
                obj = vram->object;
 
+               mutex_lock(&obj->lock);
                pages = etnaviv_gem_get_pages(obj);
+               mutex_unlock(&obj->lock);
                if (pages) {
                        int j;
 
@@ -213,8 +215,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
 
                iter.hdr->iova = cpu_to_le64(vram->iova);
 
-               vaddr = etnaviv_gem_vaddr(&obj->base);
-               if (vaddr && !IS_ERR(vaddr))
+               vaddr = etnaviv_gem_vmap(&obj->base);
+               if (vaddr)
                        memcpy(iter.data, vaddr, obj->base.size);
 
                etnaviv_core_dump_header(&iter, ETDUMP_BUF_BO, iter.data +
index 9f77c3b94cc696c4683d18da3e7848fb040874c6..4b519e4309b28edb1a1a9f56c40fb00153c87036 100644 (file)
@@ -353,25 +353,39 @@ void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
        drm_gem_object_unreference_unlocked(obj);
 }
 
-void *etnaviv_gem_vaddr(struct drm_gem_object *obj)
+void *etnaviv_gem_vmap(struct drm_gem_object *obj)
 {
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
 
-       mutex_lock(&etnaviv_obj->lock);
-       if (!etnaviv_obj->vaddr) {
-               struct page **pages = etnaviv_gem_get_pages(etnaviv_obj);
-
-               if (IS_ERR(pages))
-                       return ERR_CAST(pages);
+       if (etnaviv_obj->vaddr)
+               return etnaviv_obj->vaddr;
 
-               etnaviv_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
-                               VM_MAP, pgprot_writecombine(PAGE_KERNEL));
-       }
+       mutex_lock(&etnaviv_obj->lock);
+       /*
+        * Need to check again, as we might have raced with another thread
+        * while waiting for the mutex.
+        */
+       if (!etnaviv_obj->vaddr)
+               etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj);
        mutex_unlock(&etnaviv_obj->lock);
 
        return etnaviv_obj->vaddr;
 }
 
+static void *etnaviv_gem_vmap_impl(struct etnaviv_gem_object *obj)
+{
+       struct page **pages;
+
+       lockdep_assert_held(&obj->lock);
+
+       pages = etnaviv_gem_get_pages(obj);
+       if (IS_ERR(pages))
+               return NULL;
+
+       return vmap(pages, obj->base.size >> PAGE_SHIFT,
+                       VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+}
+
 static inline enum dma_data_direction etnaviv_op_to_dma_dir(u32 op)
 {
        if (op & ETNA_PREP_READ)
@@ -522,6 +536,7 @@ static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
 static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
        .get_pages = etnaviv_gem_shmem_get_pages,
        .release = etnaviv_gem_shmem_release,
+       .vmap = etnaviv_gem_vmap_impl,
 };
 
 void etnaviv_gem_free_object(struct drm_gem_object *obj)
@@ -866,6 +881,7 @@ static void etnaviv_gem_userptr_release(struct etnaviv_gem_object *etnaviv_obj)
 static const struct etnaviv_gem_ops etnaviv_gem_userptr_ops = {
        .get_pages = etnaviv_gem_userptr_get_pages,
        .release = etnaviv_gem_userptr_release,
+       .vmap = etnaviv_gem_vmap_impl,
 };
 
 int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
index a300b4b3d5458654fd1c0f200227976f64529e97..ab5df8147a5f6f1aaccad5c6ce14d99612e0ea27 100644 (file)
@@ -78,6 +78,7 @@ struct etnaviv_gem_object *to_etnaviv_bo(struct drm_gem_object *obj)
 struct etnaviv_gem_ops {
        int (*get_pages)(struct etnaviv_gem_object *);
        void (*release)(struct etnaviv_gem_object *);
+       void *(*vmap)(struct etnaviv_gem_object *);
 };
 
 static inline bool is_active(struct etnaviv_gem_object *etnaviv_obj)
index e94db4f95770e586837653b0a1b73d251bbe75d8..4e67395f5fa1c1572aa7051a86538aa16389f807 100644 (file)
@@ -31,7 +31,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
 
 void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj)
 {
-       return etnaviv_gem_vaddr(obj);
+       return etnaviv_gem_vmap(obj);
 }
 
 void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
@@ -77,9 +77,17 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
        drm_prime_gem_destroy(&etnaviv_obj->base, etnaviv_obj->sgt);
 }
 
+static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
+{
+       lockdep_assert_held(&etnaviv_obj->lock);
+
+       return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
+}
+
 static const struct etnaviv_gem_ops etnaviv_gem_prime_ops = {
        /* .get_pages should never be called */
        .release = etnaviv_gem_prime_release,
+       .vmap = etnaviv_gem_prime_vmap_impl,
 };
 
 struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
index 056a72e6ed26273146269ee0258784e8a714d36a..a33162cf4f4cc6c4f0aee66433680a15f64cfffc 100644 (file)
@@ -72,6 +72,14 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
                *value = gpu->identity.minor_features3;
                break;
 
+       case ETNAVIV_PARAM_GPU_FEATURES_5:
+               *value = gpu->identity.minor_features4;
+               break;
+
+       case ETNAVIV_PARAM_GPU_FEATURES_6:
+               *value = gpu->identity.minor_features5;
+               break;
+
        case ETNAVIV_PARAM_GPU_STREAM_COUNT:
                *value = gpu->identity.stream_count;
                break;
@@ -112,6 +120,10 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
                *value = gpu->identity.num_constants;
                break;
 
+       case ETNAVIV_PARAM_GPU_NUM_VARYINGS:
+               *value = gpu->identity.varyings_count;
+               break;
+
        default:
                DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
                return -EINVAL;
@@ -120,46 +132,56 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
        return 0;
 }
 
+
+#define etnaviv_is_model_rev(gpu, mod, rev) \
+       ((gpu)->identity.model == chipModel_##mod && \
+        (gpu)->identity.revision == rev)
+#define etnaviv_field(val, field) \
+       (((val) & field##__MASK) >> field##__SHIFT)
+
 static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 {
        if (gpu->identity.minor_features0 &
            chipMinorFeatures0_MORE_MINOR_FEATURES) {
-               u32 specs[2];
+               u32 specs[4];
+               unsigned int streams;
 
                specs[0] = gpu_read(gpu, VIVS_HI_CHIP_SPECS);
                specs[1] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_2);
-
-               gpu->identity.stream_count =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_STREAM_COUNT__MASK)
-                               >> VIVS_HI_CHIP_SPECS_STREAM_COUNT__SHIFT;
-               gpu->identity.register_max =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_REGISTER_MAX__MASK)
-                               >> VIVS_HI_CHIP_SPECS_REGISTER_MAX__SHIFT;
-               gpu->identity.thread_count =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_THREAD_COUNT__MASK)
-                               >> VIVS_HI_CHIP_SPECS_THREAD_COUNT__SHIFT;
-               gpu->identity.vertex_cache_size =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__MASK)
-                               >> VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE__SHIFT;
-               gpu->identity.shader_core_count =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__MASK)
-                               >> VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT__SHIFT;
-               gpu->identity.pixel_pipes =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_PIXEL_PIPES__MASK)
-                               >> VIVS_HI_CHIP_SPECS_PIXEL_PIPES__SHIFT;
+               specs[2] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_3);
+               specs[3] = gpu_read(gpu, VIVS_HI_CHIP_SPECS_4);
+
+               gpu->identity.stream_count = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_STREAM_COUNT);
+               gpu->identity.register_max = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_REGISTER_MAX);
+               gpu->identity.thread_count = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_THREAD_COUNT);
+               gpu->identity.vertex_cache_size = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_VERTEX_CACHE_SIZE);
+               gpu->identity.shader_core_count = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_SHADER_CORE_COUNT);
+               gpu->identity.pixel_pipes = etnaviv_field(specs[0],
+                                       VIVS_HI_CHIP_SPECS_PIXEL_PIPES);
                gpu->identity.vertex_output_buffer_size =
-                       (specs[0] & VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__MASK)
-                               >> VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE__SHIFT;
-
-               gpu->identity.buffer_size =
-                       (specs[1] & VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__MASK)
-                               >> VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE__SHIFT;
-               gpu->identity.instruction_count =
-                       (specs[1] & VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__MASK)
-                               >> VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT__SHIFT;
-               gpu->identity.num_constants =
-                       (specs[1] & VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__MASK)
-                               >> VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS__SHIFT;
+                       etnaviv_field(specs[0],
+                               VIVS_HI_CHIP_SPECS_VERTEX_OUTPUT_BUFFER_SIZE);
+
+               gpu->identity.buffer_size = etnaviv_field(specs[1],
+                                       VIVS_HI_CHIP_SPECS_2_BUFFER_SIZE);
+               gpu->identity.instruction_count = etnaviv_field(specs[1],
+                                       VIVS_HI_CHIP_SPECS_2_INSTRUCTION_COUNT);
+               gpu->identity.num_constants = etnaviv_field(specs[1],
+                                       VIVS_HI_CHIP_SPECS_2_NUM_CONSTANTS);
+
+               gpu->identity.varyings_count = etnaviv_field(specs[2],
+                                       VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT);
+
+               /* This overrides the value from older register if non-zero */
+               streams = etnaviv_field(specs[3],
+                                       VIVS_HI_CHIP_SPECS_4_STREAM_COUNT);
+               if (streams)
+                       gpu->identity.stream_count = streams;
        }
 
        /* Fill in the stream count if not specified */
@@ -173,7 +195,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
        /* Convert the register max value */
        if (gpu->identity.register_max)
                gpu->identity.register_max = 1 << gpu->identity.register_max;
-       else if (gpu->identity.model == 0x0400)
+       else if (gpu->identity.model == chipModel_GC400)
                gpu->identity.register_max = 32;
        else
                gpu->identity.register_max = 64;
@@ -181,10 +203,10 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
        /* Convert thread count */
        if (gpu->identity.thread_count)
                gpu->identity.thread_count = 1 << gpu->identity.thread_count;
-       else if (gpu->identity.model == 0x0400)
+       else if (gpu->identity.model == chipModel_GC400)
                gpu->identity.thread_count = 64;
-       else if (gpu->identity.model == 0x0500 ||
-                gpu->identity.model == 0x0530)
+       else if (gpu->identity.model == chipModel_GC500 ||
+                gpu->identity.model == chipModel_GC530)
                gpu->identity.thread_count = 128;
        else
                gpu->identity.thread_count = 256;
@@ -206,7 +228,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
        if (gpu->identity.vertex_output_buffer_size) {
                gpu->identity.vertex_output_buffer_size =
                        1 << gpu->identity.vertex_output_buffer_size;
-       } else if (gpu->identity.model == 0x0400) {
+       } else if (gpu->identity.model == chipModel_GC400) {
                if (gpu->identity.revision < 0x4000)
                        gpu->identity.vertex_output_buffer_size = 512;
                else if (gpu->identity.revision < 0x4200)
@@ -219,9 +241,8 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 
        switch (gpu->identity.instruction_count) {
        case 0:
-               if ((gpu->identity.model == 0x2000 &&
-                    gpu->identity.revision == 0x5108) ||
-                   gpu->identity.model == 0x880)
+               if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
+                   gpu->identity.model == chipModel_GC880)
                        gpu->identity.instruction_count = 512;
                else
                        gpu->identity.instruction_count = 256;
@@ -242,6 +263,30 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu)
 
        if (gpu->identity.num_constants == 0)
                gpu->identity.num_constants = 168;
+
+       if (gpu->identity.varyings_count == 0) {
+               if (gpu->identity.minor_features1 & chipMinorFeatures1_HALTI0)
+                       gpu->identity.varyings_count = 12;
+               else
+                       gpu->identity.varyings_count = 8;
+       }
+
+       /*
+        * For some cores, two varyings are consumed for position, so the
+        * maximum varying count needs to be reduced by one.
+        */
+       if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) ||
+           etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
+           etnaviv_is_model_rev(gpu, GC4000, 0x5245) ||
+           etnaviv_is_model_rev(gpu, GC4000, 0x5208) ||
+           etnaviv_is_model_rev(gpu, GC3000, 0x5435) ||
+           etnaviv_is_model_rev(gpu, GC2200, 0x5244) ||
+           etnaviv_is_model_rev(gpu, GC2100, 0x5108) ||
+           etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
+           etnaviv_is_model_rev(gpu, GC1500, 0x5246) ||
+           etnaviv_is_model_rev(gpu, GC880, 0x5107) ||
+           etnaviv_is_model_rev(gpu, GC880, 0x5106))
+               gpu->identity.varyings_count -= 1;
 }
 
 static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
@@ -251,12 +296,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
        chipIdentity = gpu_read(gpu, VIVS_HI_CHIP_IDENTITY);
 
        /* Special case for older graphic cores. */
-       if (((chipIdentity & VIVS_HI_CHIP_IDENTITY_FAMILY__MASK)
-            >> VIVS_HI_CHIP_IDENTITY_FAMILY__SHIFT) ==  0x01) {
-               gpu->identity.model    = 0x500; /* gc500 */
-               gpu->identity.revision =
-                       (chipIdentity & VIVS_HI_CHIP_IDENTITY_REVISION__MASK)
-                       >> VIVS_HI_CHIP_IDENTITY_REVISION__SHIFT;
+       if (etnaviv_field(chipIdentity, VIVS_HI_CHIP_IDENTITY_FAMILY) == 0x01) {
+               gpu->identity.model    = chipModel_GC500;
+               gpu->identity.revision = etnaviv_field(chipIdentity,
+                                        VIVS_HI_CHIP_IDENTITY_REVISION);
        } else {
 
                gpu->identity.model = gpu_read(gpu, VIVS_HI_CHIP_MODEL);
@@ -269,13 +312,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
                 * same.  Only for GC400 family.
                 */
                if ((gpu->identity.model & 0xff00) == 0x0400 &&
-                   gpu->identity.model != 0x0420) {
+                   gpu->identity.model != chipModel_GC420) {
                        gpu->identity.model = gpu->identity.model & 0x0400;
                }
 
                /* Another special case */
-               if (gpu->identity.model == 0x300 &&
-                   gpu->identity.revision == 0x2201) {
+               if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) {
                        u32 chipDate = gpu_read(gpu, VIVS_HI_CHIP_DATE);
                        u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME);
 
@@ -295,11 +337,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
        gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
 
        /* Disable fast clear on GC700. */
-       if (gpu->identity.model == 0x700)
+       if (gpu->identity.model == chipModel_GC700)
                gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
 
-       if ((gpu->identity.model == 0x500 && gpu->identity.revision < 2) ||
-           (gpu->identity.model == 0x300 && gpu->identity.revision < 0x2000)) {
+       if ((gpu->identity.model == chipModel_GC500 &&
+            gpu->identity.revision < 2) ||
+           (gpu->identity.model == chipModel_GC300 &&
+            gpu->identity.revision < 0x2000)) {
 
                /*
                 * GC500 rev 1.x and GC300 rev < 2.0 doesn't have these
@@ -309,6 +353,8 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
                gpu->identity.minor_features1 = 0;
                gpu->identity.minor_features2 = 0;
                gpu->identity.minor_features3 = 0;
+               gpu->identity.minor_features4 = 0;
+               gpu->identity.minor_features5 = 0;
        } else
                gpu->identity.minor_features0 =
                                gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_0);
@@ -321,6 +367,10 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
                                gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_2);
                gpu->identity.minor_features3 =
                                gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_3);
+               gpu->identity.minor_features4 =
+                               gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_4);
+               gpu->identity.minor_features5 =
+                               gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5);
        }
 
        /* GC600 idle register reports zero bits where modules aren't present */
@@ -441,10 +491,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
 {
        u16 prefetch;
 
-       if (gpu->identity.model == chipModel_GC320 &&
-           gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400 &&
-           (gpu->identity.revision == 0x5007 ||
-            gpu->identity.revision == 0x5220)) {
+       if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) ||
+            etnaviv_is_model_rev(gpu, GC320, 0x5220)) &&
+           gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) {
                u32 mc_memory_debug;
 
                mc_memory_debug = gpu_read(gpu, VIVS_MC_DEBUG_MEMORY) & ~0xff;
@@ -466,7 +515,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
                  VIVS_HI_AXI_CONFIG_ARCACHE(2));
 
        /* GC2000 rev 5108 needs a special bus config */
-       if (gpu->identity.model == 0x2000 && gpu->identity.revision == 0x5108) {
+       if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) {
                u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG);
                bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK |
                                VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK);
@@ -511,8 +560,16 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 
        if (gpu->identity.model == 0) {
                dev_err(gpu->dev, "Unknown GPU model\n");
-               pm_runtime_put_autosuspend(gpu->dev);
-               return -ENXIO;
+               ret = -ENXIO;
+               goto fail;
+       }
+
+       /* Exclude VG cores with FE2.0 */
+       if (gpu->identity.features & chipFeatures_PIPE_VG &&
+           gpu->identity.features & chipFeatures_FE20) {
+               dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
+               ret = -ENXIO;
+               goto fail;
        }
 
        ret = etnaviv_hw_reset(gpu);
@@ -539,10 +596,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
                goto fail;
        }
 
-       /* TODO: we will leak here memory - fix it! */
-
        gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
        if (!gpu->mmu) {
+               iommu_domain_free(iommu);
                ret = -ENOMEM;
                goto fail;
        }
@@ -552,7 +608,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
        if (!gpu->buffer) {
                ret = -ENOMEM;
                dev_err(gpu->dev, "could not create command buffer\n");
-               goto fail;
+               goto destroy_iommu;
        }
        if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
                ret = -EINVAL;
@@ -582,6 +638,9 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
 free_buffer:
        etnaviv_gpu_cmdbuf_free(gpu->buffer);
        gpu->buffer = NULL;
+destroy_iommu:
+       etnaviv_iommu_destroy(gpu->mmu);
+       gpu->mmu = NULL;
 fail:
        pm_runtime_mark_last_busy(gpu->dev);
        pm_runtime_put_autosuspend(gpu->dev);
@@ -642,6 +701,10 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
                   gpu->identity.minor_features2);
        seq_printf(m, "\t minor_features3: 0x%08x\n",
                   gpu->identity.minor_features3);
+       seq_printf(m, "\t minor_features4: 0x%08x\n",
+                  gpu->identity.minor_features4);
+       seq_printf(m, "\t minor_features5: 0x%08x\n",
+                  gpu->identity.minor_features5);
 
        seq_puts(m, "\tspecs\n");
        seq_printf(m, "\t stream_count:  %d\n",
@@ -664,6 +727,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
                        gpu->identity.instruction_count);
        seq_printf(m, "\t num_constants: %d\n",
                        gpu->identity.num_constants);
+       seq_printf(m, "\t varyings_count: %d\n",
+                       gpu->identity.varyings_count);
 
        seq_printf(m, "\taxi: 0x%08x\n", axi);
        seq_printf(m, "\tidle: 0x%08x\n", idle);
index c75d50359ab07baa40d7813fe07cd9928009df91..f233ac4c7c1cea456ee4c4c4b7e0d0878aefc093 100644 (file)
@@ -46,6 +46,12 @@ struct etnaviv_chip_identity {
        /* Supported minor feature 3 fields. */
        u32 minor_features3;
 
+       /* Supported minor feature 4 fields. */
+       u32 minor_features4;
+
+       /* Supported minor feature 5 fields. */
+       u32 minor_features5;
+
        /* Number of streams supported. */
        u32 stream_count;
 
@@ -75,6 +81,9 @@ struct etnaviv_chip_identity {
 
        /* Buffer size */
        u32 buffer_size;
+
+       /* Number of varyings */
+       u8 varyings_count;
 };
 
 struct etnaviv_event {
index 0064f2640396111bce54f3ae0caa9f45e94717d0..6a7de5f1454a51532c0809e2e741c53db4fba4e9 100644 (file)
@@ -8,8 +8,8 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
 git clone git://0x04.net/rules-ng-ng
 
 The rules-ng-ng source files this header was generated from are:
-- state_hi.xml (  23420 bytes, from 2015-03-25 11:47:21)
-- common.xml   (  18437 bytes, from 2015-03-25 11:27:41)
+- state_hi.xml (  24309 bytes, from 2015-12-12 09:02:53)
+- common.xml   (  18437 bytes, from 2015-12-12 09:02:53)
 
 Copyright (C) 2015
 */
@@ -182,8 +182,25 @@ Copyright (C) 2015
 
 #define VIVS_HI_CHIP_MINOR_FEATURE_3                           0x00000088
 
+#define VIVS_HI_CHIP_SPECS_3                                   0x0000008c
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK              0x000001f0
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT             4
+#define VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT(x)                 (((x) << VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_VARYINGS_COUNT__MASK)
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK              0x00000007
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT             0
+#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT(x)                 (((x) << VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK)
+
 #define VIVS_HI_CHIP_MINOR_FEATURE_4                           0x00000094
 
+#define VIVS_HI_CHIP_SPECS_4                                   0x0000009c
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK                        0x0001f000
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT               12
+#define VIVS_HI_CHIP_SPECS_4_STREAM_COUNT(x)                   (((x) << VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_4_STREAM_COUNT__MASK)
+
+#define VIVS_HI_CHIP_MINOR_FEATURE_5                           0x000000a0
+
+#define VIVS_HI_CHIP_PRODUCT_ID                                        0x000000a8
+
 #define VIVS_PM                                                        0x00000000
 
 #define VIVS_PM_POWER_CONTROLS                                 0x00000100
@@ -206,6 +223,11 @@ Copyright (C) 2015
 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE            0x00000001
 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_DE            0x00000002
 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PE            0x00000004
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SH            0x00000008
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_PA            0x00000010
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_SE            0x00000020
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_RA            0x00000040
+#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_TX            0x00000080
 
 #define VIVS_PM_PULSE_EATER                                    0x0000010c
 
index b79c316c2ad2ce7b7eb48e152a49c41ab6e93431..673164b331c8806e14aaa10d82e2f787c10bfb09 100644 (file)
@@ -1392,7 +1392,7 @@ static const struct component_ops exynos_dp_ops = {
 static int exynos_dp_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
-       struct device_node *panel_node = NULL, *bridge_node, *endpoint = NULL;
+       struct device_node *np = NULL, *endpoint = NULL;
        struct exynos_dp_device *dp;
        int ret;
 
@@ -1404,41 +1404,36 @@ static int exynos_dp_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, dp);
 
        /* This is for the backward compatibility. */
-       panel_node = of_parse_phandle(dev->of_node, "panel", 0);
-       if (panel_node) {
-               dp->panel = of_drm_find_panel(panel_node);
-               of_node_put(panel_node);
+       np = of_parse_phandle(dev->of_node, "panel", 0);
+       if (np) {
+               dp->panel = of_drm_find_panel(np);
+               of_node_put(np);
                if (!dp->panel)
                        return -EPROBE_DEFER;
-       } else {
-               endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
-               if (endpoint) {
-                       panel_node = of_graph_get_remote_port_parent(endpoint);
-                       if (panel_node) {
-                               dp->panel = of_drm_find_panel(panel_node);
-                               of_node_put(panel_node);
-                               if (!dp->panel)
-                                       return -EPROBE_DEFER;
-                       } else {
-                               DRM_ERROR("no port node for panel device.\n");
-                               return -EINVAL;
-                       }
-               }
-       }
-
-       if (endpoint)
                goto out;
+       }
 
        endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
        if (endpoint) {
-               bridge_node = of_graph_get_remote_port_parent(endpoint);
-               if (bridge_node) {
-                       dp->ptn_bridge = of_drm_find_bridge(bridge_node);
-                       of_node_put(bridge_node);
-                       if (!dp->ptn_bridge)
-                               return -EPROBE_DEFER;
-               } else
-                       return -EPROBE_DEFER;
+               np = of_graph_get_remote_port_parent(endpoint);
+               if (np) {
+                       /* The remote port can be either a panel or a bridge */
+                       dp->panel = of_drm_find_panel(np);
+                       if (!dp->panel) {
+                               dp->ptn_bridge = of_drm_find_bridge(np);
+                               if (!dp->ptn_bridge) {
+                                       of_node_put(np);
+                                       return -EPROBE_DEFER;
+                               }
+                       }
+                       of_node_put(np);
+               } else {
+                       DRM_ERROR("no remote endpoint device node found.\n");
+                       return -EINVAL;
+               }
+       } else {
+               DRM_ERROR("no port endpoint subnode found.\n");
+               return -EINVAL;
        }
 
 out:
index 68f0f36f6e7e073af4d88da75a7a84f0734576f2..1e535f981240d3bb81ac002c71b26f22f0085114 100644 (file)
@@ -340,20 +340,6 @@ static void exynos_drm_preclose(struct drm_device *dev,
 
 static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_pending_event *e, *et;
-       unsigned long flags;
-
-       if (!file->driver_priv)
-               return;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-       /* Release all events handled by page flip handler but not freed. */
-       list_for_each_entry_safe(e, et, &file->event_list, link) {
-               list_del(&e->link);
-               e->destroy(e);
-       }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-
        kfree(file->driver_priv);
        file->driver_priv = NULL;
 }
index d84a498ef099712c56f319c8333c8c8564ef457a..e977a81af2e67d91101e644b393d8ce1393b3fc8 100644 (file)
@@ -1906,8 +1906,7 @@ static int exynos_dsi_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
-static int exynos_dsi_suspend(struct device *dev)
+static int __maybe_unused exynos_dsi_suspend(struct device *dev)
 {
        struct drm_encoder *encoder = dev_get_drvdata(dev);
        struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1938,7 +1937,7 @@ static int exynos_dsi_suspend(struct device *dev)
        return 0;
 }
 
-static int exynos_dsi_resume(struct device *dev)
+static int __maybe_unused exynos_dsi_resume(struct device *dev)
 {
        struct drm_encoder *encoder = dev_get_drvdata(dev);
        struct exynos_dsi *dsi = encoder_to_dsi(encoder);
@@ -1972,7 +1971,6 @@ err_clk:
 
        return ret;
 }
-#endif
 
 static const struct dev_pm_ops exynos_dsi_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
index c17efdb238a6e24f6fcecac58c395b8964b12703..99369816ff9760e9ca9d18b7a8b576f007e93432 100644 (file)
@@ -880,7 +880,6 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
        struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
        struct drm_exynos_pending_g2d_event *e;
        struct timeval now;
-       unsigned long flags;
 
        if (list_empty(&runqueue_node->event_list))
                return;
@@ -893,10 +892,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
        e->event.tv_usec = now.tv_usec;
        e->event.cmdlist_no = cmdlist_no;
 
-       spin_lock_irqsave(&drm_dev->event_lock, flags);
-       list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-       wake_up_interruptible(&e->base.file_priv->event_wait);
-       spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+       drm_send_event(drm_dev, &e->base);
 }
 
 static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
@@ -1072,7 +1068,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        struct drm_exynos_pending_g2d_event *e;
        struct g2d_cmdlist_node *node;
        struct g2d_cmdlist *cmdlist;
-       unsigned long flags;
        int size;
        int ret;
 
@@ -1094,21 +1089,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
        node->event = NULL;
 
        if (req->event_type != G2D_EVENT_NOT) {
-               spin_lock_irqsave(&drm_dev->event_lock, flags);
-               if (file->event_space < sizeof(e->event)) {
-                       spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-                       ret = -ENOMEM;
-                       goto err;
-               }
-               file->event_space -= sizeof(e->event);
-               spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-
                e = kzalloc(sizeof(*node->event), GFP_KERNEL);
                if (!e) {
-                       spin_lock_irqsave(&drm_dev->event_lock, flags);
-                       file->event_space += sizeof(e->event);
-                       spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-
                        ret = -ENOMEM;
                        goto err;
                }
@@ -1116,9 +1098,12 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
                e->event.base.type = DRM_EXYNOS_G2D_EVENT;
                e->event.base.length = sizeof(e->event);
                e->event.user_data = req->user_data;
-               e->base.event = &e->event.base;
-               e->base.file_priv = file;
-               e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+               ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base);
+               if (ret) {
+                       kfree(e);
+                       goto err;
+               }
 
                node->event = e;
        }
@@ -1219,12 +1204,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
 err_unmap:
        g2d_unmap_cmdlist_gem(g2d, node, file);
 err_free_event:
-       if (node->event) {
-               spin_lock_irqsave(&drm_dev->event_lock, flags);
-               file->event_space += sizeof(e->event);
-               spin_unlock_irqrestore(&drm_dev->event_lock, flags);
-               kfree(node->event);
-       }
+       if (node->event)
+               drm_event_cancel_free(drm_dev, &node->event->base);
 err:
        g2d_put_cmdlist(g2d, node);
        return ret;
index 67d24236e745c4dd1cc014ba0c5386784d3441f8..3eab0d15f0b4c3ed0640635a44c0e3d1dd4f49fd 100644 (file)
@@ -618,27 +618,18 @@ static void ipp_clean_mem_nodes(struct drm_device *drm_dev,
        mutex_unlock(&c_node->mem_lock);
 }
 
-static void ipp_free_event(struct drm_pending_event *event)
-{
-       kfree(event);
-}
-
 static int ipp_get_event(struct drm_device *drm_dev,
                struct drm_exynos_ipp_cmd_node *c_node,
                struct drm_exynos_ipp_queue_buf *qbuf)
 {
        struct drm_exynos_ipp_send_event *e;
-       unsigned long flags;
+       int ret;
 
        DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
 
        e = kzalloc(sizeof(*e), GFP_KERNEL);
-       if (!e) {
-               spin_lock_irqsave(&drm_dev->event_lock, flags);
-               c_node->filp->event_space += sizeof(e->event);
-               spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+       if (!e)
                return -ENOMEM;
-       }
 
        /* make event */
        e->event.base.type = DRM_EXYNOS_IPP_EVENT;
@@ -646,9 +637,13 @@ static int ipp_get_event(struct drm_device *drm_dev,
        e->event.user_data = qbuf->user_data;
        e->event.prop_id = qbuf->prop_id;
        e->event.buf_id[EXYNOS_DRM_OPS_DST] = qbuf->buf_id;
-       e->base.event = &e->event.base;
-       e->base.file_priv = c_node->filp;
-       e->base.destroy = ipp_free_event;
+
+       ret = drm_event_reserve_init(drm_dev, c_node->filp, &e->base, &e->event.base);
+       if (ret) {
+               kfree(e);
+               return ret;
+       }
+
        mutex_lock(&c_node->event_lock);
        list_add_tail(&e->base.link, &c_node->event_list);
        mutex_unlock(&c_node->event_lock);
@@ -1412,7 +1407,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
        struct drm_exynos_ipp_send_event *e;
        struct list_head *head;
        struct timeval now;
-       unsigned long flags;
        u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
        int ret, i;
 
@@ -1525,10 +1519,7 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
        for_each_ipp_ops(i)
                e->event.buf_id[i] = tbuf_id[i];
 
-       spin_lock_irqsave(&drm_dev->event_lock, flags);
-       list_move_tail(&e->base.link, &e->base.file_priv->event_list);
-       wake_up_interruptible(&e->base.file_priv->event_wait);
-       spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+       drm_send_event(drm_dev, &e->base);
        mutex_unlock(&c_node->event_lock);
 
        DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",
index b5fbc1cbf02454d0e8c76de88f91f2c63e343e2c..0a5a60005f7e5711e14147dfa21990be1b88a2b8 100644 (file)
@@ -1289,8 +1289,7 @@ static int mixer_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int exynos_mixer_suspend(struct device *dev)
+static int __maybe_unused exynos_mixer_suspend(struct device *dev)
 {
        struct mixer_context *ctx = dev_get_drvdata(dev);
        struct mixer_resources *res = &ctx->mixer_res;
@@ -1306,7 +1305,7 @@ static int exynos_mixer_suspend(struct device *dev)
        return 0;
 }
 
-static int exynos_mixer_resume(struct device *dev)
+static int __maybe_unused exynos_mixer_resume(struct device *dev)
 {
        struct mixer_context *ctx = dev_get_drvdata(dev);
        struct mixer_resources *res = &ctx->mixer_res;
@@ -1342,7 +1341,6 @@ static int exynos_mixer_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static const struct dev_pm_ops exynos_mixer_pm_ops = {
        SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
index fca97d3fc84666b5d96b7f34194dd46b35946d37..9648b7f9a31cd759f7da4bb4e9ac26f990488bf4 100644 (file)
@@ -112,10 +112,6 @@ static int fsl_dcu_unload(struct drm_device *dev)
        return 0;
 }
 
-static void fsl_dcu_drm_preclose(struct drm_device *dev, struct drm_file *file)
-{
-}
-
 static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg)
 {
        struct drm_device *dev = arg;
@@ -191,7 +187,6 @@ static struct drm_driver fsl_dcu_drm_driver = {
                                | DRIVER_PRIME | DRIVER_ATOMIC,
        .load                   = fsl_dcu_load,
        .unload                 = fsl_dcu_unload,
-       .preclose               = fsl_dcu_drm_preclose,
        .irq_handler            = fsl_dcu_drm_irq,
        .get_vblank_counter     = drm_vblank_no_hw_counter,
        .enable_vblank          = fsl_dcu_drm_enable_vblank,
index cb95765050cc0aade141941dfde9e77993373179..033d894d030e476fed14e393482dfa0d2d10dbb6 100644 (file)
@@ -674,29 +674,17 @@ static const struct drm_mode_config_funcs psb_mode_funcs = {
        .output_poll_changed = psbfb_output_poll_changed,
 };
 
-static int psb_create_backlight_property(struct drm_device *dev)
-{
-       struct drm_psb_private *dev_priv = dev->dev_private;
-       struct drm_property *backlight;
-
-       if (dev_priv->backlight_property)
-               return 0;
-
-       backlight = drm_property_create_range(dev, 0, "backlight", 0, 100);
-
-       dev_priv->backlight_property = backlight;
-
-       return 0;
-}
-
 static void psb_setup_outputs(struct drm_device *dev)
 {
        struct drm_psb_private *dev_priv = dev->dev_private;
        struct drm_connector *connector;
 
        drm_mode_create_scaling_mode_property(dev);
-       psb_create_backlight_property(dev);
 
+       /* It is ok for this to fail - we just don't get backlight control */
+       if (!dev_priv->backlight_property)
+               dev_priv->backlight_property = drm_property_create_range(dev, 0,
+                                                       "backlight", 0, 100);
        dev_priv->ops->output_init(dev);
 
        list_for_each_entry(connector, &dev->mode_config.connector_list,
index 566d330aaeea272c94f168784b2e07bea6aa9554..e7e22187c5390b91b793aae948a0634fb2019f1d 100644 (file)
@@ -436,7 +436,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
        return 0;
 
 err:
-       while (--i) {
+       while (i--) {
                struct intel_gmbus *bus = &dev_priv->gmbus[i];
                i2c_del_adapter(&bus->adapter);
        }
index d758f4cc68055b39b8b02b5202f35f936b653a95..907cb51795c36fd4535444cbbd401d49a82ff0cb 100644 (file)
@@ -382,16 +382,6 @@ static int mdfld_dsi_connector_mode_valid(struct drm_connector *connector,
        return MODE_OK;
 }
 
-static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
-       if (mode == connector->dpms)
-               return;
-
-       /*first, execute dpms*/
-
-       drm_helper_connector_dpms(connector, mode);
-}
-
 static struct drm_encoder *mdfld_dsi_connector_best_encoder(
                                struct drm_connector *connector)
 {
@@ -404,7 +394,7 @@ static struct drm_encoder *mdfld_dsi_connector_best_encoder(
 
 /*DSI connector funcs*/
 static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
-       .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
+       .dpms = drm_helper_connector_dpms,
        .detect = mdfld_dsi_connector_detect,
        .fill_modes = drm_helper_probe_single_connector_modes,
        .set_property = mdfld_dsi_connector_set_property,
index 92e7e5795398e80a6ab46f51d3aad4d22a4e8346..4e1c6850520ee50bd024fb3da610a0f5ab8c754d 100644 (file)
@@ -442,14 +442,6 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
        /* FIXME: do we need to wrap the other side of this */
 }
 
-/*
- * When a client dies:
- *    - Check for and clean up flipped page state
- */
-static void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
-{
-}
-
 static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        return drm_get_pci_dev(pdev, ent, &driver);
@@ -495,7 +487,6 @@ static struct drm_driver driver = {
        .load = psb_driver_load,
        .unload = psb_driver_unload,
        .lastclose = psb_driver_lastclose,
-       .preclose = psb_driver_preclose,
        .set_busid = drm_pci_set_busid,
 
        .num_ioctls = ARRAY_SIZE(psb_ioctls),
index 533d1e3d4a999f971b2479f471c965e3708a73f2..a02112ba1c3df692b3a089a3ab24a8f30d35b7ec 100644 (file)
@@ -136,6 +136,7 @@ static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
        case ADV7511_REG_BKSV(3):
        case ADV7511_REG_BKSV(4):
        case ADV7511_REG_DDC_STATUS:
+       case ADV7511_REG_EDID_READ_CTRL:
        case ADV7511_REG_BSTATUS(0):
        case ADV7511_REG_BSTATUS(1):
        case ADV7511_REG_CHIP_ID_HIGH:
@@ -362,24 +363,31 @@ static void adv7511_power_on(struct adv7511 *adv7511)
 {
        adv7511->current_edid_segment = -1;
 
-       regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-                    ADV7511_INT0_EDID_READY);
-       regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
-                    ADV7511_INT1_DDC_ERROR);
        regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
                           ADV7511_POWER_POWER_DOWN, 0);
+       if (adv7511->i2c_main->irq) {
+               /*
+                * Documentation says the INT_ENABLE registers are reset in
+                * POWER_DOWN mode. My 7511w preserved the bits, however.
+                * Still, let's be safe and stick to the documentation.
+                */
+               regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+                            ADV7511_INT0_EDID_READY);
+               regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+                            ADV7511_INT1_DDC_ERROR);
+       }
 
        /*
-        * Per spec it is allowed to pulse the HDP signal to indicate that the
+        * Per spec it is allowed to pulse the HPD signal to indicate that the
         * EDID information has changed. Some monitors do this when they wakeup
-        * from standby or are enabled. When the HDP goes low the adv7511 is
+        * from standby or are enabled. When the HPD goes low the adv7511 is
         * reset and the outputs are disabled which might cause the monitor to
-        * go to standby again. To avoid this we ignore the HDP pin for the
+        * go to standby again. To avoid this we ignore the HPD pin for the
         * first few seconds after enabling the output.
         */
        regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
-                          ADV7511_REG_POWER2_HDP_SRC_MASK,
-                          ADV7511_REG_POWER2_HDP_SRC_NONE);
+                          ADV7511_REG_POWER2_HPD_SRC_MASK,
+                          ADV7511_REG_POWER2_HPD_SRC_NONE);
 
        /*
         * Most of the registers are reset during power down or when HPD is low.
@@ -413,9 +421,9 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
        if (ret < 0)
                return false;
 
-       if (irq0 & ADV7511_INT0_HDP) {
+       if (irq0 & ADV7511_INT0_HPD) {
                regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-                            ADV7511_INT0_HDP);
+                            ADV7511_INT0_HPD);
                return true;
        }
 
@@ -438,7 +446,7 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
        regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
        regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
 
-       if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+       if (irq0 & ADV7511_INT0_HPD && adv7511->encoder)
                drm_helper_hpd_irq_event(adv7511->encoder->dev);
 
        if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
@@ -567,12 +575,14 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
 
        /* Reading the EDID only works if the device is powered */
        if (!adv7511->powered) {
-               regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
-                            ADV7511_INT0_EDID_READY);
-               regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
-                            ADV7511_INT1_DDC_ERROR);
                regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
                                   ADV7511_POWER_POWER_DOWN, 0);
+               if (adv7511->i2c_main->irq) {
+                       regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+                                    ADV7511_INT0_EDID_READY);
+                       regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+                                    ADV7511_INT1_DDC_ERROR);
+               }
                adv7511->current_edid_segment = -1;
        }
 
@@ -638,10 +648,10 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
                if (adv7511->status == connector_status_connected)
                        status = connector_status_disconnected;
        } else {
-               /* Renable HDP sensing */
+               /* Renable HPD sensing */
                regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
-                                  ADV7511_REG_POWER2_HDP_SRC_MASK,
-                                  ADV7511_REG_POWER2_HDP_SRC_BOTH);
+                                  ADV7511_REG_POWER2_HPD_SRC_MASK,
+                                  ADV7511_REG_POWER2_HPD_SRC_BOTH);
        }
 
        adv7511->status = status;
index 6599ed538426d60f41a9dbebe3e9bcd8e8b11acc..38515b30cedfc84812a556638fe7333ff6ebeb8b 100644 (file)
@@ -90,7 +90,7 @@
 #define ADV7511_CSC_ENABLE                     BIT(7)
 #define ADV7511_CSC_UPDATE_MODE                        BIT(5)
 
-#define ADV7511_INT0_HDP                       BIT(7)
+#define ADV7511_INT0_HPD                       BIT(7)
 #define ADV7511_INT0_VSYNC                     BIT(5)
 #define ADV7511_INT0_AUDIO_FIFO_FULL           BIT(4)
 #define ADV7511_INT0_EDID_READY                        BIT(2)
 #define ADV7511_PACKET_ENABLE_SPARE2           BIT(1)
 #define ADV7511_PACKET_ENABLE_SPARE1           BIT(0)
 
-#define ADV7511_REG_POWER2_HDP_SRC_MASK                0xc0
-#define ADV7511_REG_POWER2_HDP_SRC_BOTH                0x00
-#define ADV7511_REG_POWER2_HDP_SRC_HDP         0x40
-#define ADV7511_REG_POWER2_HDP_SRC_CEC         0x80
-#define ADV7511_REG_POWER2_HDP_SRC_NONE                0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_MASK                0xc0
+#define ADV7511_REG_POWER2_HPD_SRC_BOTH                0x00
+#define ADV7511_REG_POWER2_HPD_SRC_HPD         0x40
+#define ADV7511_REG_POWER2_HPD_SRC_CEC         0x80
+#define ADV7511_REG_POWER2_HPD_SRC_NONE                0xc0
 #define ADV7511_REG_POWER2_TDMS_ENABLE         BIT(4)
 #define ADV7511_REG_POWER2_GATE_INPUT_CLK      BIT(0)
 
index 90db5f4dcce552622104c0d74d2cbc199b3e8400..0594c45f71646d7a512707d7f0b4f89a0875b30b 100644 (file)
@@ -253,6 +253,8 @@ static int ch7006_encoder_create_resources(struct drm_encoder *encoder,
        drm_mode_create_tv_properties(dev, NUM_TV_NORMS, ch7006_tv_norm_names);
 
        priv->scale_property = drm_property_create_range(dev, 0, "scale", 0, 2);
+       if (!priv->scale_property)
+               return -ENOMEM;
 
        drm_object_attach_property(&connector->base, conf->tv_select_subconnector_property,
                                      priv->select_subconnector);
index fcd77b27514dfdb738334024c2b1201732af6dc3..4c59793c4ccb6e01865dbfe97efe385b80a8fae1 100644 (file)
@@ -2,15 +2,12 @@ config DRM_I915
        tristate "Intel 8xx/9xx/G3x/G4x/HD Graphics"
        depends on DRM
        depends on X86 && PCI
-       depends on (AGP || AGP=n)
        select INTEL_GTT
-       select AGP_INTEL if AGP
        select INTERVAL_TREE
        # we need shmfs for the swappable backing store, and in particular
        # the shmem_readpage() which depends upon tmpfs
        select SHMEM
        select TMPFS
-       select STOP_MACHINE
        select DRM_KMS_HELPER
        select DRM_PANEL
        select DRM_MIPI_DSI
index 0fc38bb7276c26920b372ca52c053eba504aa722..ec0c2a05eed67983e8a9076fad057faddadb8f38 100644 (file)
@@ -1331,7 +1331,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
        struct intel_engine_cs *ring;
        u64 acthd[I915_NUM_RINGS];
        u32 seqno[I915_NUM_RINGS];
-       int i;
+       u32 instdone[I915_NUM_INSTDONE_REG];
+       int i, j;
 
        if (!i915.enable_hangcheck) {
                seq_printf(m, "Hangcheck disabled\n");
@@ -1345,6 +1346,8 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
                acthd[i] = intel_ring_get_active_head(ring);
        }
 
+       i915_get_extra_instdone(dev, instdone);
+
        intel_runtime_pm_put(dev_priv);
 
        if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
@@ -1365,6 +1368,21 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
                           (long long)ring->hangcheck.max_acthd);
                seq_printf(m, "\tscore = %d\n", ring->hangcheck.score);
                seq_printf(m, "\taction = %d\n", ring->hangcheck.action);
+
+               if (ring->id == RCS) {
+                       seq_puts(m, "\tinstdone read =");
+
+                       for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
+                               seq_printf(m, " 0x%08x", instdone[j]);
+
+                       seq_puts(m, "\n\tinstdone accu =");
+
+                       for (j = 0; j < I915_NUM_INSTDONE_REG; j++)
+                               seq_printf(m, " 0x%08x",
+                                          ring->hangcheck.instdone[j]);
+
+                       seq_puts(m, "\n");
+               }
        }
 
        return 0;
@@ -1942,11 +1960,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
 
                seq_puts(m, "HW context ");
                describe_ctx(m, ctx);
-               for_each_ring(ring, dev_priv, i) {
-                       if (ring->default_context == ctx)
-                               seq_printf(m, "(default context %s) ",
-                                          ring->name);
-               }
+               if (ctx == dev_priv->kernel_context)
+                       seq_printf(m, "(kernel context) ");
 
                if (i915.enable_execlists) {
                        seq_putc(m, '\n');
@@ -1976,12 +1991,13 @@ static int i915_context_status(struct seq_file *m, void *unused)
 }
 
 static void i915_dump_lrc_obj(struct seq_file *m,
-                             struct intel_engine_cs *ring,
-                             struct drm_i915_gem_object *ctx_obj)
+                             struct intel_context *ctx,
+                             struct intel_engine_cs *ring)
 {
        struct page *page;
        uint32_t *reg_state;
        int j;
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
        unsigned long ggtt_offset = 0;
 
        if (ctx_obj == NULL) {
@@ -1991,7 +2007,7 @@ static void i915_dump_lrc_obj(struct seq_file *m,
        }
 
        seq_printf(m, "CONTEXT: %s %u\n", ring->name,
-                  intel_execlists_ctx_id(ctx_obj));
+                  intel_execlists_ctx_id(ctx, ring));
 
        if (!i915_gem_obj_ggtt_bound(ctx_obj))
                seq_puts(m, "\tNot bound in GGTT\n");
@@ -2037,13 +2053,10 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
        if (ret)
                return ret;
 
-       list_for_each_entry(ctx, &dev_priv->context_list, link) {
-               for_each_ring(ring, dev_priv, i) {
-                       if (ring->default_context != ctx)
-                               i915_dump_lrc_obj(m, ring,
-                                                 ctx->engine[i].state);
-               }
-       }
+       list_for_each_entry(ctx, &dev_priv->context_list, link)
+               if (ctx != dev_priv->kernel_context)
+                       for_each_ring(ring, dev_priv, i)
+                               i915_dump_lrc_obj(m, ctx, ring);
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -2092,13 +2105,13 @@ static int i915_execlists(struct seq_file *m, void *data)
                seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer);
 
                read_pointer = ring->next_context_status_buffer;
-               write_pointer = status_pointer & 0x07;
+               write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
                if (read_pointer > write_pointer)
-                       write_pointer += 6;
+                       write_pointer += GEN8_CSB_ENTRIES;
                seq_printf(m, "\tRead pointer: 0x%08X, write pointer 0x%08X\n",
                           read_pointer, write_pointer);
 
-               for (i = 0; i < 6; i++) {
+               for (i = 0; i < GEN8_CSB_ENTRIES; i++) {
                        status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
                        ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
 
@@ -2115,11 +2128,8 @@ static int i915_execlists(struct seq_file *m, void *data)
 
                seq_printf(m, "\t%d requests in queue\n", count);
                if (head_req) {
-                       struct drm_i915_gem_object *ctx_obj;
-
-                       ctx_obj = head_req->ctx->engine[ring_id].state;
                        seq_printf(m, "\tHead request id: %u\n",
-                                  intel_execlists_ctx_id(ctx_obj));
+                                  intel_execlists_ctx_id(head_req->ctx, ring));
                        seq_printf(m, "\tHead request tail: %u\n",
                                   head_req->tail);
                }
@@ -2453,9 +2463,9 @@ static void i915_guc_client_info(struct seq_file *m,
 
        for_each_ring(ring, dev_priv, i) {
                seq_printf(m, "\tSubmissions: %llu %s\n",
-                               client->submissions[i],
+                               client->submissions[ring->guc_id],
                                ring->name);
-               tot += client->submissions[i];
+               tot += client->submissions[ring->guc_id];
        }
        seq_printf(m, "\tTotal: %llu\n", tot);
 }
@@ -2492,10 +2502,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
 
        seq_printf(m, "\nGuC submissions:\n");
        for_each_ring(ring, dev_priv, i) {
-               seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x %9d\n",
-                       ring->name, guc.submissions[i],
-                       guc.last_seqno[i], guc.last_seqno[i]);
-               total += guc.submissions[i];
+               seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
+                       ring->name, guc.submissions[ring->guc_id],
+                       guc.last_seqno[ring->guc_id]);
+               total += guc.submissions[ring->guc_id];
        }
        seq_printf(m, "\t%s: %llu\n", "Total", total);
 
@@ -2573,6 +2583,10 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
                                enabled = true;
                }
        }
+
+       seq_printf(m, "Main link in standby mode: %s\n",
+                  yesno(dev_priv->psr.link_standby));
+
        seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled));
 
        if (!HAS_DDI(dev))
@@ -3211,9 +3225,11 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 {
        int i;
        int ret;
+       struct intel_engine_cs *ring;
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct i915_workarounds *workarounds = &dev_priv->workarounds;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
@@ -3221,15 +3237,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
 
        intel_runtime_pm_get(dev_priv);
 
-       seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
-       for (i = 0; i < dev_priv->workarounds.count; ++i) {
+       seq_printf(m, "Workarounds applied: %d\n", workarounds->count);
+       for_each_ring(ring, dev_priv, i)
+               seq_printf(m, "HW whitelist count for %s: %d\n",
+                          ring->name, workarounds->hw_whitelist_count[i]);
+       for (i = 0; i < workarounds->count; ++i) {
                i915_reg_t addr;
                u32 mask, value, read;
                bool ok;
 
-               addr = dev_priv->workarounds.reg[i].addr;
-               mask = dev_priv->workarounds.reg[i].mask;
-               value = dev_priv->workarounds.reg[i].value;
+               addr = workarounds->reg[i].addr;
+               mask = workarounds->reg[i].mask;
+               value = workarounds->reg[i].value;
                read = I915_READ(addr);
                ok = (value & mask) == (read & mask);
                seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
index d70d96fe553bb65515540f048bdfe224f15a48e6..2df2fac04708be85f0b596ee07df82e894434b9c 100644 (file)
@@ -391,20 +391,13 @@ static int i915_load_modeset_init(struct drm_device *dev)
        if (ret)
                goto cleanup_vga_client;
 
-       /* Initialise stolen first so that we may reserve preallocated
-        * objects for the BIOS to KMS transition.
-        */
-       ret = i915_gem_init_stolen(dev);
-       if (ret)
-               goto cleanup_vga_switcheroo;
-
        intel_power_domains_init_hw(dev_priv, false);
 
        intel_csr_ucode_init(dev_priv);
 
        ret = intel_irq_install(dev_priv);
        if (ret)
-               goto cleanup_gem_stolen;
+               goto cleanup_csr;
 
        intel_setup_gmbus(dev);
 
@@ -451,16 +444,15 @@ static int i915_load_modeset_init(struct drm_device *dev)
 
 cleanup_gem:
        mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
+       i915_gem_cleanup_engines(dev);
        mutex_unlock(&dev->struct_mutex);
 cleanup_irq:
        intel_guc_ucode_fini(dev);
        drm_irq_uninstall(dev);
        intel_teardown_gmbus(dev);
-cleanup_gem_stolen:
-       i915_gem_cleanup_stolen(dev);
-cleanup_vga_switcheroo:
+cleanup_csr:
+       intel_csr_ucode_fini(dev_priv);
        vga_switcheroo_unregister_client(dev->pdev);
 cleanup_vga_client:
        vga_client_register(dev->pdev, NULL, NULL, NULL);
@@ -816,7 +808,41 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
                     !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
                        DRM_INFO("Display fused off, disabling\n");
                        info->num_pipes = 0;
+               } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
+                       DRM_INFO("PipeC fused off\n");
+                       info->num_pipes -= 1;
                }
+       } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
+               u32 dfsm = I915_READ(SKL_DFSM);
+               u8 disabled_mask = 0;
+               bool invalid;
+               int num_bits;
+
+               if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
+                       disabled_mask |= BIT(PIPE_A);
+               if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
+                       disabled_mask |= BIT(PIPE_B);
+               if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
+                       disabled_mask |= BIT(PIPE_C);
+
+               num_bits = hweight8(disabled_mask);
+
+               switch (disabled_mask) {
+               case BIT(PIPE_A):
+               case BIT(PIPE_B):
+               case BIT(PIPE_A) | BIT(PIPE_B):
+               case BIT(PIPE_A) | BIT(PIPE_C):
+                       invalid = true;
+                       break;
+               default:
+                       invalid = false;
+               }
+
+               if (num_bits > info->num_pipes || invalid)
+                       DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
+                                 disabled_mask);
+               else
+                       info->num_pipes -= num_bits;
        }
 
        /* Initialize slice/subslice/EU info */
@@ -855,6 +881,94 @@ static void intel_init_dpio(struct drm_i915_private *dev_priv)
        }
 }
 
+static int i915_workqueues_init(struct drm_i915_private *dev_priv)
+{
+       /*
+        * The i915 workqueue is primarily used for batched retirement of
+        * requests (and thus managing bo) once the task has been completed
+        * by the GPU. i915_gem_retire_requests() is called directly when we
+        * need high-priority retirement, such as waiting for an explicit
+        * bo.
+        *
+        * It is also used for periodic low-priority events, such as
+        * idle-timers and recording error state.
+        *
+        * All tasks on the workqueue are expected to acquire the dev mutex
+        * so there is no point in running more than one instance of the
+        * workqueue at any time.  Use an ordered one.
+        */
+       dev_priv->wq = alloc_ordered_workqueue("i915", 0);
+       if (dev_priv->wq == NULL)
+               goto out_err;
+
+       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
+       if (dev_priv->hotplug.dp_wq == NULL)
+               goto out_free_wq;
+
+       dev_priv->gpu_error.hangcheck_wq =
+               alloc_ordered_workqueue("i915-hangcheck", 0);
+       if (dev_priv->gpu_error.hangcheck_wq == NULL)
+               goto out_free_dp_wq;
+
+       return 0;
+
+out_free_dp_wq:
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
+out_free_wq:
+       destroy_workqueue(dev_priv->wq);
+out_err:
+       DRM_ERROR("Failed to allocate workqueues.\n");
+
+       return -ENOMEM;
+}
+
+static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
+{
+       destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
+       destroy_workqueue(dev_priv->hotplug.dp_wq);
+       destroy_workqueue(dev_priv->wq);
+}
+
+static int i915_mmio_setup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       int mmio_bar;
+       int mmio_size;
+
+       mmio_bar = IS_GEN2(dev) ? 1 : 0;
+       /*
+        * Before gen4, the registers and the GTT are behind different BARs.
+        * However, from gen4 onwards, the registers and the GTT are shared
+        * in the same BAR, so we want to restrict this ioremap from
+        * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+        * the register BAR remains the same size for all the earlier
+        * generations up to Ironlake.
+        */
+       if (INTEL_INFO(dev)->gen < 5)
+               mmio_size = 512 * 1024;
+       else
+               mmio_size = 2 * 1024 * 1024;
+       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
+       if (dev_priv->regs == NULL) {
+               DRM_ERROR("failed to map registers\n");
+
+               return -EIO;
+       }
+
+       /* Try to make sure MCHBAR is enabled before poking at it */
+       intel_setup_mchbar(dev);
+
+       return 0;
+}
+
+static void i915_mmio_cleanup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       intel_teardown_mchbar(dev);
+       pci_iounmap(dev->pdev, dev_priv->regs);
+}
+
 /**
  * i915_driver_load - setup chip and create an initial config
  * @dev: DRM device
@@ -870,7 +984,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 {
        struct drm_i915_private *dev_priv;
        struct intel_device_info *info, *device_info;
-       int ret = 0, mmio_bar, mmio_size;
+       int ret = 0;
        uint32_t aperture_size;
 
        info = (struct intel_device_info *) flags;
@@ -897,6 +1011,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        mutex_init(&dev_priv->modeset_restore_lock);
        mutex_init(&dev_priv->av_mutex);
 
+       ret = i915_workqueues_init(dev_priv);
+       if (ret < 0)
+               goto out_free_priv;
+
        intel_pm_setup(dev);
 
        intel_runtime_pm_get(dev_priv);
@@ -915,28 +1033,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        if (i915_get_bridge_dev(dev)) {
                ret = -EIO;
-               goto free_priv;
+               goto out_runtime_pm_put;
        }
 
-       mmio_bar = IS_GEN2(dev) ? 1 : 0;
-       /* Before gen4, the registers and the GTT are behind different BARs.
-        * However, from gen4 onwards, the registers and the GTT are shared
-        * in the same BAR, so we want to restrict this ioremap from
-        * clobbering the GTT which we want ioremap_wc instead. Fortunately,
-        * the register BAR remains the same size for all the earlier
-        * generations up to Ironlake.
-        */
-       if (info->gen < 5)
-               mmio_size = 512*1024;
-       else
-               mmio_size = 2*1024*1024;
-
-       dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
-       if (!dev_priv->regs) {
-               DRM_ERROR("failed to map registers\n");
-               ret = -EIO;
+       ret = i915_mmio_setup(dev);
+       if (ret < 0)
                goto put_bridge;
-       }
 
        /* This must be called before any calls to HAS_PCH_* */
        intel_detect_pch(dev);
@@ -945,7 +1047,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
        ret = i915_gem_gtt_init(dev);
        if (ret)
-               goto out_freecsr;
+               goto out_uncore_fini;
 
        /* WARNING: Apparently we must kick fbdev drivers before vgacon,
         * otherwise the vga fbdev driver falls over. */
@@ -991,49 +1093,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
                                              aperture_size);
 
-       /* The i915 workqueue is primarily used for batched retirement of
-        * requests (and thus managing bo) once the task has been completed
-        * by the GPU. i915_gem_retire_requests() is called directly when we
-        * need high-priority retirement, such as waiting for an explicit
-        * bo.
-        *
-        * It is also used for periodic low-priority events, such as
-        * idle-timers and recording error state.
-        *
-        * All tasks on the workqueue are expected to acquire the dev mutex
-        * so there is no point in running more than one instance of the
-        * workqueue at any time.  Use an ordered one.
-        */
-       dev_priv->wq = alloc_ordered_workqueue("i915", 0);
-       if (dev_priv->wq == NULL) {
-               DRM_ERROR("Failed to create our workqueue.\n");
-               ret = -ENOMEM;
-               goto out_mtrrfree;
-       }
-
-       dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
-       if (dev_priv->hotplug.dp_wq == NULL) {
-               DRM_ERROR("Failed to create our dp workqueue.\n");
-               ret = -ENOMEM;
-               goto out_freewq;
-       }
-
-       dev_priv->gpu_error.hangcheck_wq =
-               alloc_ordered_workqueue("i915-hangcheck", 0);
-       if (dev_priv->gpu_error.hangcheck_wq == NULL) {
-               DRM_ERROR("Failed to create our hangcheck workqueue.\n");
-               ret = -ENOMEM;
-               goto out_freedpwq;
-       }
-
        intel_irq_init(dev_priv);
        intel_uncore_sanitize(dev);
 
-       /* Try to make sure MCHBAR is enabled before poking at it */
-       intel_setup_mchbar(dev);
        intel_opregion_setup(dev);
 
-       i915_gem_load(dev);
+       i915_gem_load_init(dev);
+       i915_gem_shrinker_init(dev_priv);
 
        /* On the 945G/GM, the chipset reports the MSI capability on the
         * integrated graphics even though the support isn't actually there
@@ -1046,8 +1112,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
         * be lost or delayed, but we use them anyways to avoid
         * stuck interrupts on some machines.
         */
-       if (!IS_I945G(dev) && !IS_I945GM(dev))
-               pci_enable_msi(dev->pdev);
+       if (!IS_I945G(dev) && !IS_I945GM(dev)) {
+               if (pci_enable_msi(dev->pdev) < 0)
+                       DRM_DEBUG_DRIVER("can't enable MSI");
+       }
 
        intel_device_info_runtime_init(dev);
 
@@ -1097,38 +1165,29 @@ out_power_well:
        intel_power_domains_fini(dev_priv);
        drm_vblank_cleanup(dev);
 out_gem_unload:
-       WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
-       unregister_shrinker(&dev_priv->mm.shrinker);
+       i915_gem_shrinker_cleanup(dev_priv);
 
        if (dev->pdev->msi_enabled)
                pci_disable_msi(dev->pdev);
 
        intel_teardown_mchbar(dev);
        pm_qos_remove_request(&dev_priv->pm_qos);
-       destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
-out_freedpwq:
-       destroy_workqueue(dev_priv->hotplug.dp_wq);
-out_freewq:
-       destroy_workqueue(dev_priv->wq);
-out_mtrrfree:
        arch_phys_wc_del(dev_priv->gtt.mtrr);
        io_mapping_free(dev_priv->gtt.mappable);
 out_gtt:
        i915_global_gtt_cleanup(dev);
-out_freecsr:
-       intel_csr_ucode_fini(dev_priv);
+out_uncore_fini:
        intel_uncore_fini(dev);
-       pci_iounmap(dev->pdev, dev_priv->regs);
+       i915_mmio_cleanup(dev);
 put_bridge:
        pci_dev_put(dev_priv->bridge_dev);
-free_priv:
-       kmem_cache_destroy(dev_priv->requests);
-       kmem_cache_destroy(dev_priv->vmas);
-       kmem_cache_destroy(dev_priv->objects);
-
+       i915_gem_load_cleanup(dev);
+out_runtime_pm_put:
        intel_runtime_pm_put(dev_priv);
-
+       i915_workqueues_cleanup(dev_priv);
+out_free_priv:
        kfree(dev_priv);
+
        return ret;
 }
 
@@ -1153,8 +1212,7 @@ int i915_driver_unload(struct drm_device *dev)
 
        i915_teardown_sysfs(dev);
 
-       WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
-       unregister_shrinker(&dev_priv->mm.shrinker);
+       i915_gem_shrinker_cleanup(dev_priv);
 
        io_mapping_free(dev_priv->gtt.mappable);
        arch_phys_wc_del(dev_priv->gtt.mtrr);
@@ -1182,6 +1240,8 @@ int i915_driver_unload(struct drm_device *dev)
        vga_switcheroo_unregister_client(dev->pdev);
        vga_client_register(dev->pdev, NULL, NULL, NULL);
 
+       intel_csr_ucode_fini(dev_priv);
+
        /* Free error state after interrupts are fully disabled. */
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
        i915_destroy_error_state(dev);
@@ -1196,31 +1256,21 @@ int i915_driver_unload(struct drm_device *dev)
 
        intel_guc_ucode_fini(dev);
        mutex_lock(&dev->struct_mutex);
-       i915_gem_cleanup_ringbuffer(dev);
        i915_gem_context_fini(dev);
+       i915_gem_cleanup_engines(dev);
        mutex_unlock(&dev->struct_mutex);
        intel_fbc_cleanup_cfb(dev_priv);
-       i915_gem_cleanup_stolen(dev);
 
-       intel_csr_ucode_fini(dev_priv);
-
-       intel_teardown_mchbar(dev);
-
-       destroy_workqueue(dev_priv->hotplug.dp_wq);
-       destroy_workqueue(dev_priv->wq);
-       destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
        pm_qos_remove_request(&dev_priv->pm_qos);
 
        i915_global_gtt_cleanup(dev);
 
        intel_uncore_fini(dev);
-       if (dev_priv->regs != NULL)
-               pci_iounmap(dev->pdev, dev_priv->regs);
+       i915_mmio_cleanup(dev);
 
-       kmem_cache_destroy(dev_priv->requests);
-       kmem_cache_destroy(dev_priv->vmas);
-       kmem_cache_destroy(dev_priv->objects);
+       i915_gem_load_cleanup(dev);
        pci_dev_put(dev_priv->bridge_dev);
+       i915_workqueues_cleanup(dev_priv);
        kfree(dev_priv);
 
        return 0;
@@ -1261,8 +1311,6 @@ void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
        i915_gem_context_close(dev, file);
        i915_gem_release(dev, file);
        mutex_unlock(&dev->struct_mutex);
-
-       intel_modeset_preclose(dev, file);
 }
 
 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
index 3ac616d7363bafcc4197aa126c509932d15fb299..44912ecebc1a287b93bfb2ac6294304fc1c81b5a 100644 (file)
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+#include <linux/apple-gmux.h>
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
 #include <drm/drm_crtc_helper.h>
 
 static struct drm_driver driver;
@@ -501,7 +504,9 @@ void intel_detect_pch(struct drm_device *dev)
                                WARN_ON(!IS_SKYLAKE(dev) &&
                                        !IS_KABYLAKE(dev));
                        } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
-                                  (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE)) {
+                                  ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
+                                   pch->subsystem_vendor == 0x1af4 &&
+                                   pch->subsystem_device == 0x1100)) {
                                dev_priv->pch_type = intel_virt_detect_pch(dev);
                        } else
                                continue;
@@ -967,6 +972,15 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (PCI_FUNC(pdev->devfn))
                return -ENODEV;
 
+       /*
+        * apple-gmux is needed on dual GPU MacBook Pro
+        * to probe the panel if we're the inactive GPU.
+        */
+       if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
+           apple_gmux_present() && pdev != vga_default_device() &&
+           !vga_switcheroo_handler_flags())
+               return -EPROBE_DEFER;
+
        return drm_get_pci_dev(pdev, ent, &driver);
 }
 
@@ -1077,7 +1091,6 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
         */
        broxton_init_cdclk(dev);
        broxton_ddi_phy_init(dev);
-       intel_prepare_ddi(dev);
 
        return 0;
 }
@@ -1336,8 +1349,8 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
                return 0;
 
        DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
-                       wait_for_on ? "on" : "off",
-                       I915_READ(VLV_GTLC_PW_STATUS));
+                     onoff(wait_for_on),
+                     I915_READ(VLV_GTLC_PW_STATUS));
 
        /*
         * RC6 transitioning can be delayed up to 2 msec (see
@@ -1346,7 +1359,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
        err = wait_for(COND, 3);
        if (err)
                DRM_ERROR("timeout waiting for GT wells to go %s\n",
-                         wait_for_on ? "on" : "off");
+                         onoff(wait_for_on));
 
        return err;
 #undef COND
@@ -1357,7 +1370,7 @@ static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
        if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
                return;
 
-       DRM_ERROR("GT register access while GT waking disabled\n");
+       DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
        I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
 }
 
@@ -1501,6 +1514,10 @@ static int intel_runtime_suspend(struct device *device)
 
        enable_rpm_wakeref_asserts(dev_priv);
        WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
+
+       if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
+               DRM_ERROR("Unclaimed access detected prior to suspending\n");
+
        dev_priv->pm.suspended = true;
 
        /*
@@ -1549,6 +1566,8 @@ static int intel_runtime_resume(struct device *device)
 
        intel_opregion_notify_adapter(dev, PCI_D0);
        dev_priv->pm.suspended = false;
+       if (intel_uncore_unclaimed_mmio(dev_priv))
+               DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
 
        intel_guc_resume(dev);
 
index f0f75d7c0d94263f86ccfdd7d8b84af831462be0..e11eef1e513453745ea51ed8775579854ea7a477 100644 (file)
@@ -34,6 +34,7 @@
 #include <uapi/drm/drm_fourcc.h>
 
 #include <drm/drmP.h>
+#include "i915_params.h"
 #include "i915_reg.h"
 #include "intel_bios.h"
 #include "intel_ringbuffer.h"
@@ -58,7 +59,7 @@
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20151218"
+#define DRIVER_DATE            "20160124"
 
 #undef WARN_ON
 /* Many gcc seem to no see through this and fall over :( */
                BUILD_BUG_ON(__i915_warn_cond); \
        WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
 #else
-#define WARN_ON(x) WARN((x), "WARN_ON(%s)", #x )
+#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 #endif
 
 #undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(%s)", #x )
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
 
 #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
                             (long) (x), __func__);
  */
 #define I915_STATE_WARN(condition, format...) ({                       \
        int __ret_warn_on = !!(condition);                              \
-       if (unlikely(__ret_warn_on)) {                                  \
-               if (i915.verbose_state_checks)                          \
-                       WARN(1, format);                                \
-               else                                                    \
+       if (unlikely(__ret_warn_on))                                    \
+               if (!WARN(i915.verbose_state_checks, format))           \
                        DRM_ERROR(format);                              \
-       }                                                               \
        unlikely(__ret_warn_on);                                        \
 })
 
-#define I915_STATE_WARN_ON(condition) ({                               \
-       int __ret_warn_on = !!(condition);                              \
-       if (unlikely(__ret_warn_on)) {                                  \
-               if (i915.verbose_state_checks)                          \
-                       WARN(1, "WARN_ON(" #condition ")\n");           \
-               else                                                    \
-                       DRM_ERROR("WARN_ON(" #condition ")\n");         \
-       }                                                               \
-       unlikely(__ret_warn_on);                                        \
-})
+#define I915_STATE_WARN_ON(x)                                          \
+       I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
 
 static inline const char *yesno(bool v)
 {
        return v ? "yes" : "no";
 }
 
+static inline const char *onoff(bool v)
+{
+       return v ? "on" : "off";
+}
+
 enum pipe {
        INVALID_PIPE = -1,
        PIPE_A = 0,
@@ -339,7 +334,7 @@ struct drm_i915_file_private {
                unsigned boosts;
        } rps;
 
-       struct intel_engine_cs *bsd_ring;
+       unsigned int bsd_ring;
 };
 
 enum intel_dpll_id {
@@ -633,6 +628,7 @@ struct drm_i915_display_funcs {
                          struct dpll *best_clock);
        int (*compute_pipe_wm)(struct intel_crtc *crtc,
                               struct drm_atomic_state *state);
+       void (*program_watermarks)(struct intel_crtc_state *cstate);
        void (*update_wm)(struct drm_crtc *crtc);
        int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
        void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -657,9 +653,6 @@ struct drm_i915_display_funcs {
                          struct drm_i915_gem_object *obj,
                          struct drm_i915_gem_request *req,
                          uint32_t flags);
-       void (*update_primary_plane)(struct drm_crtc *crtc,
-                                    struct drm_framebuffer *fb,
-                                    int x, int y);
        void (*hpd_irq_setup)(struct drm_device *dev);
        /* clock updates for mode set */
        /* cursor updates */
@@ -726,6 +719,8 @@ struct intel_uncore {
                i915_reg_t reg_post;
                u32 val_reset;
        } fw_domain[FW_DOMAIN_ID_COUNT];
+
+       int unclaimed_mmio_check;
 };
 
 /* Iterate over initialised fw domains */
@@ -889,6 +884,9 @@ struct intel_context {
                struct drm_i915_gem_object *state;
                struct intel_ringbuffer *ringbuf;
                int pin_count;
+               struct i915_vma *lrc_vma;
+               u64 lrc_desc;
+               uint32_t *lrc_reg_state;
        } engine[I915_NUM_RINGS];
 
        struct list_head link;
@@ -902,16 +900,15 @@ enum fb_op_origin {
        ORIGIN_DIRTYFB,
 };
 
-struct i915_fbc {
+struct intel_fbc {
        /* This is always the inner lock when overlapping with struct_mutex and
         * it's the outer lock when overlapping with stolen_lock. */
        struct mutex lock;
        unsigned threshold;
-       unsigned int fb_id;
        unsigned int possible_framebuffer_bits;
        unsigned int busy_bits;
+       unsigned int visible_pipes_mask;
        struct intel_crtc *crtc;
-       int y;
 
        struct drm_mm_node compressed_fb;
        struct drm_mm_node *compressed_llb;
@@ -921,18 +918,52 @@ struct i915_fbc {
        bool enabled;
        bool active;
 
+       struct intel_fbc_state_cache {
+               struct {
+                       unsigned int mode_flags;
+                       uint32_t hsw_bdw_pixel_rate;
+               } crtc;
+
+               struct {
+                       unsigned int rotation;
+                       int src_w;
+                       int src_h;
+                       bool visible;
+               } plane;
+
+               struct {
+                       u64 ilk_ggtt_offset;
+                       uint32_t pixel_format;
+                       unsigned int stride;
+                       int fence_reg;
+                       unsigned int tiling_mode;
+               } fb;
+       } state_cache;
+
+       struct intel_fbc_reg_params {
+               struct {
+                       enum pipe pipe;
+                       enum plane plane;
+                       unsigned int fence_y_offset;
+               } crtc;
+
+               struct {
+                       u64 ggtt_offset;
+                       uint32_t pixel_format;
+                       unsigned int stride;
+                       int fence_reg;
+               } fb;
+
+               int cfb_size;
+       } params;
+
        struct intel_fbc_work {
                bool scheduled;
+               u32 scheduled_vblank;
                struct work_struct work;
-               struct drm_framebuffer *fb;
-               unsigned long enable_jiffies;
        } work;
 
        const char *no_fbc_reason;
-
-       bool (*is_active)(struct drm_i915_private *dev_priv);
-       void (*activate)(struct intel_crtc *crtc);
-       void (*deactivate)(struct drm_i915_private *dev_priv);
 };
 
 /**
@@ -972,6 +1003,7 @@ struct i915_psr {
        unsigned busy_frontbuffer_bits;
        bool psr2_support;
        bool aux_frame_sync;
+       bool link_standby;
 };
 
 enum intel_pch {
@@ -1301,7 +1333,7 @@ struct i915_gem_mm {
        bool busy;
 
        /* the indicator for dispatch video commands on two BSD rings */
-       int bsd_ring_dispatch_index;
+       unsigned int bsd_ring_dispatch_index;
 
        /** Bit 6 swizzling required for X tiling */
        uint32_t bit_6_swizzle_x;
@@ -1487,7 +1519,7 @@ struct intel_vbt_data {
                u8 seq_version;
                u32 size;
                u8 *data;
-               u8 *sequence[MIPI_SEQ_MAX];
+               const u8 *sequence[MIPI_SEQ_MAX];
        } dsi;
 
        int crt_ddc_pin;
@@ -1659,11 +1691,18 @@ struct i915_wa_reg {
        u32 mask;
 };
 
-#define I915_MAX_WA_REGS 16
+/*
+ * RING_MAX_NONPRIV_SLOTS is per-engine but at this point we are only
+ * allowing it for RCS as we don't foresee any requirement of having
+ * a whitelist for other engines. When it is really required for
+ * other engines then the limit need to be increased.
+ */
+#define I915_MAX_WA_REGS (16 + RING_MAX_NONPRIV_SLOTS)
 
 struct i915_workarounds {
        struct i915_wa_reg reg[I915_MAX_WA_REGS];
        u32 count;
+       u32 hw_whitelist_count[I915_NUM_RINGS];
 };
 
 struct i915_virtual_gpu {
@@ -1760,7 +1799,7 @@ struct drm_i915_private {
        u32 pipestat_irq_mask[I915_MAX_PIPES];
 
        struct i915_hotplug hotplug;
-       struct i915_fbc fbc;
+       struct intel_fbc fbc;
        struct i915_drrs drrs;
        struct intel_opregion opregion;
        struct intel_vbt_data vbt;
@@ -1784,7 +1823,7 @@ struct drm_i915_private {
 
        unsigned int fsb_freq, mem_freq, is_ddr3;
        unsigned int skl_boot_cdclk;
-       unsigned int cdclk_freq, max_cdclk_freq;
+       unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
        unsigned int max_dotclk_freq;
        unsigned int hpll_freq;
        unsigned int czclk_freq;
@@ -1829,8 +1868,13 @@ struct drm_i915_private {
        struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
 #endif
 
+       /* dpll and cdclk state is protected by connection_mutex */
        int num_shared_dpll;
        struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+
+       unsigned int active_crtcs;
+       unsigned int min_pixclk[I915_MAX_PIPES];
+
        int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
 
        struct i915_workarounds workarounds;
@@ -1945,6 +1989,8 @@ struct drm_i915_private {
                void (*stop_ring)(struct intel_engine_cs *ring);
        } gt;
 
+       struct intel_context *kernel_context;
+
        bool edp_low_vswing;
 
        /* perform PHY state sanity checks? */
@@ -1988,6 +2034,9 @@ enum hdmi_force_audio {
 #define I915_GTT_OFFSET_NONE ((u32)-1)
 
 struct drm_i915_gem_object_ops {
+       unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+
        /* Interface between the GEM object and its backing storage.
         * get_pages() is called once prior to the use of the associated set
         * of pages before to binding them into the GTT, and put_pages() is
@@ -2003,6 +2052,7 @@ struct drm_i915_gem_object_ops {
         */
        int (*get_pages)(struct drm_i915_gem_object *);
        void (*put_pages)(struct drm_i915_gem_object *);
+
        int (*dmabuf_export)(struct drm_i915_gem_object *);
        void (*release)(struct drm_i915_gem_object *);
 };
@@ -2265,9 +2315,9 @@ struct drm_i915_gem_request {
 
 };
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx,
-                          struct drm_i915_gem_request **req_out);
+struct drm_i915_gem_request * __must_check
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+                      struct intel_context *ctx);
 void i915_gem_request_cancel(struct drm_i915_gem_request *req);
 void i915_gem_request_free(struct kref *req_ref);
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
@@ -2576,6 +2626,11 @@ struct drm_i915_cmd_table {
 
 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
 #define HAS_BROKEN_CS_TLB(dev)         (IS_I830(dev) || IS_845G(dev))
+
+/* WaRsDisableCoarsePowerGating:skl,bxt */
+#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
+                                                ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
+                                                 IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
 /*
  * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
  * even when in MSI mode. This results in spurious interrupt warnings if the
@@ -2665,44 +2720,7 @@ extern int i915_max_ioctl;
 extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
 extern int i915_resume_switcheroo(struct drm_device *dev);
 
-/* i915_params.c */
-struct i915_params {
-       int modeset;
-       int panel_ignore_lid;
-       int semaphores;
-       int lvds_channel_mode;
-       int panel_use_ssc;
-       int vbt_sdvo_panel_type;
-       int enable_rc6;
-       int enable_dc;
-       int enable_fbc;
-       int enable_ppgtt;
-       int enable_execlists;
-       int enable_psr;
-       unsigned int preliminary_hw_support;
-       int disable_power_well;
-       int enable_ips;
-       int invert_brightness;
-       int enable_cmd_parser;
-       /* leave bools at the end to not create holes */
-       bool enable_hangcheck;
-       bool fastboot;
-       bool prefault_disable;
-       bool load_detect_test;
-       bool reset;
-       bool disable_display;
-       bool disable_vtd_wa;
-       bool enable_guc_submission;
-       int guc_log_level;
-       int use_mmio_flip;
-       int mmio_debug;
-       bool verbose_state_checks;
-       bool nuclear_pageflip;
-       int edp_vswing;
-};
-extern struct i915_params i915 __read_mostly;
-
-                               /* i915_dma.c */
+/* i915_dma.c */
 extern int i915_driver_load(struct drm_device *, unsigned long flags);
 extern int i915_driver_unload(struct drm_device *);
 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2745,7 +2763,8 @@ extern void intel_uncore_sanitize(struct drm_device *dev);
 extern void intel_uncore_early_sanitize(struct drm_device *dev,
                                        bool restore_forcewake);
 extern void intel_uncore_init(struct drm_device *dev);
-extern void intel_uncore_check_errors(struct drm_device *dev);
+extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
+extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
 extern void intel_uncore_fini(struct drm_device *dev);
 extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
 const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
@@ -2867,7 +2886,8 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
-void i915_gem_load(struct drm_device *dev);
+void i915_gem_load_init(struct drm_device *dev);
+void i915_gem_load_cleanup(struct drm_device *dev);
 void *i915_gem_object_alloc(struct drm_device *dev);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -3038,7 +3058,7 @@ int i915_gem_init_rings(struct drm_device *dev);
 int __must_check i915_gem_init_hw(struct drm_device *dev);
 int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
 void i915_gem_init_swizzling(struct drm_device *dev);
-void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+void i915_gem_cleanup_engines(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
 void __i915_add_request(struct drm_i915_gem_request *req,
@@ -3280,6 +3300,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
 #define I915_SHRINK_ACTIVE 0x8
 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);
+void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
 
 
 /* i915_gem_tiling.c */
@@ -3450,16 +3471,14 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val
 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
 void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
-u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
+void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
 void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
-u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
-void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
index ddc21d4b388d2419a63fbd4f4a5745deea80957b..de57e7f0be0f6ad9922846f95650b5890882cb5c 100644 (file)
@@ -1251,7 +1251,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
        int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
        DEFINE_WAIT(wait);
        unsigned long timeout_expire;
-       s64 before, now;
+       s64 before = 0; /* Only to silence a compiler warning. */
        int ret;
 
        WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
@@ -1271,14 +1271,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                        return -ETIME;
 
                timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
+
+               /*
+                * Record current time in case interrupted by signal, or wedged.
+                */
+               before = ktime_get_raw_ns();
        }
 
        if (INTEL_INFO(dev_priv)->gen >= 6)
                gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
 
-       /* Record current time in case interrupted by signal, or wedged */
        trace_i915_gem_request_wait_begin(req);
-       before = ktime_get_raw_ns();
 
        /* Optimistic spin for the next jiffie before touching IRQs */
        ret = __i915_spin_request(req, state);
@@ -1343,11 +1346,10 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
        finish_wait(&ring->irq_queue, &wait);
 
 out:
-       now = ktime_get_raw_ns();
        trace_i915_gem_request_wait_end(req);
 
        if (timeout) {
-               s64 tres = *timeout - (now - before);
+               s64 tres = *timeout - (ktime_get_raw_ns() - before);
 
                *timeout = tres < 0 ? 0 : tres;
 
@@ -2677,10 +2679,8 @@ void i915_gem_request_free(struct kref *req_ref)
                i915_gem_request_remove_from_client(req);
 
        if (ctx) {
-               if (i915.enable_execlists) {
-                       if (ctx != req->ring->default_context)
-                               intel_lr_context_unpin(req);
-               }
+               if (i915.enable_execlists && ctx != req->i915->kernel_context)
+                       intel_lr_context_unpin(ctx, req->ring);
 
                i915_gem_context_unreference(ctx);
        }
@@ -2688,9 +2688,10 @@ void i915_gem_request_free(struct kref *req_ref)
        kmem_cache_free(req->i915->requests, req);
 }
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-                          struct intel_context *ctx,
-                          struct drm_i915_gem_request **req_out)
+static inline int
+__i915_gem_request_alloc(struct intel_engine_cs *ring,
+                        struct intel_context *ctx,
+                        struct drm_i915_gem_request **req_out)
 {
        struct drm_i915_private *dev_priv = to_i915(ring->dev);
        struct drm_i915_gem_request *req;
@@ -2753,6 +2754,31 @@ err:
        return ret;
 }
 
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ *       This can be NULL if the request is not directly related to
+ *       any specific user context, in which case this function will
+ *       choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+                      struct intel_context *ctx)
+{
+       struct drm_i915_gem_request *req;
+       int err;
+
+       if (ctx == NULL)
+               ctx = to_i915(engine->dev)->kernel_context;
+       err = __i915_gem_request_alloc(engine, ctx, &req);
+       return err ? ERR_PTR(err) : req;
+}
+
 void i915_gem_request_cancel(struct drm_i915_gem_request *req)
 {
        intel_ring_reserved_space_cancel(req->ringbuf);
@@ -3170,9 +3196,13 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
                        return 0;
 
                if (*to_req == NULL) {
-                       ret = i915_gem_request_alloc(to, to->default_context, to_req);
-                       if (ret)
-                               return ret;
+                       struct drm_i915_gem_request *req;
+
+                       req = i915_gem_request_alloc(to, NULL);
+                       if (IS_ERR(req))
+                               return PTR_ERR(req);
+
+                       *to_req = req;
                }
 
                trace_i915_gem_ring_sync_to(*to_req, from, from_req);
@@ -3372,9 +3402,9 @@ int i915_gpu_idle(struct drm_device *dev)
                if (!i915.enable_execlists) {
                        struct drm_i915_gem_request *req;
 
-                       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-                       if (ret)
-                               return ret;
+                       req = i915_gem_request_alloc(ring, NULL);
+                       if (IS_ERR(req))
+                               return PTR_ERR(req);
 
                        ret = i915_switch_context(req);
                        if (ret) {
@@ -4328,10 +4358,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto unref;
 
-       BUILD_BUG_ON(I915_NUM_RINGS > 16);
-       args->busy = obj->active << 16;
-       if (obj->last_write_req)
-               args->busy |= obj->last_write_req->ring->id;
+       args->busy = 0;
+       if (obj->active) {
+               int i;
+
+               for (i = 0; i < I915_NUM_RINGS; i++) {
+                       struct drm_i915_gem_request *req;
+
+                       req = obj->last_read_req[i];
+                       if (req)
+                               args->busy |= 1 << (16 + req->ring->exec_id);
+               }
+               if (obj->last_write_req)
+                       args->busy |= obj->last_write_req->ring->exec_id;
+       }
 
 unref:
        drm_gem_object_unreference(&obj->base);
@@ -4425,6 +4465,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+       .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
        .get_pages = i915_gem_object_get_pages_gtt,
        .put_pages = i915_gem_object_put_pages_gtt,
 };
@@ -4832,7 +4873,7 @@ i915_gem_init_hw(struct drm_device *dev)
         */
        init_unused_rings(dev);
 
-       BUG_ON(!dev_priv->ring[RCS].default_context);
+       BUG_ON(!dev_priv->kernel_context);
 
        ret = i915_ppgtt_init_hw(dev);
        if (ret) {
@@ -4869,11 +4910,10 @@ i915_gem_init_hw(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i) {
                struct drm_i915_gem_request *req;
 
-               WARN_ON(!ring->default_context);
-
-               ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-               if (ret) {
-                       i915_gem_cleanup_ringbuffer(dev);
+               req = i915_gem_request_alloc(ring, NULL);
+               if (IS_ERR(req)) {
+                       ret = PTR_ERR(req);
+                       i915_gem_cleanup_engines(dev);
                        goto out;
                }
 
@@ -4886,7 +4926,7 @@ i915_gem_init_hw(struct drm_device *dev)
                if (ret && ret != -EIO) {
                        DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
                        i915_gem_request_cancel(req);
-                       i915_gem_cleanup_ringbuffer(dev);
+                       i915_gem_cleanup_engines(dev);
                        goto out;
                }
 
@@ -4894,7 +4934,7 @@ i915_gem_init_hw(struct drm_device *dev)
                if (ret && ret != -EIO) {
                        DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
                        i915_gem_request_cancel(req);
-                       i915_gem_cleanup_ringbuffer(dev);
+                       i915_gem_cleanup_engines(dev);
                        goto out;
                }
 
@@ -4969,7 +5009,7 @@ out_unlock:
 }
 
 void
-i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+i915_gem_cleanup_engines(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring;
@@ -4978,13 +5018,14 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
        for_each_ring(ring, dev_priv, i)
                dev_priv->gt.cleanup_ring(ring);
 
-    if (i915.enable_execlists)
-            /*
-             * Neither the BIOS, ourselves or any other kernel
-             * expects the system to be in execlists mode on startup,
-             * so we need to reset the GPU back to legacy mode.
-             */
-            intel_gpu_reset(dev);
+       if (i915.enable_execlists) {
+               /*
+                * Neither the BIOS, ourselves or any other kernel
+                * expects the system to be in execlists mode on startup,
+                * so we need to reset the GPU back to legacy mode.
+                */
+               intel_gpu_reset(dev);
+       }
 }
 
 static void
@@ -4995,7 +5036,7 @@ init_ring_lists(struct intel_engine_cs *ring)
 }
 
 void
-i915_gem_load(struct drm_device *dev)
+i915_gem_load_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int i;
@@ -5061,11 +5102,18 @@ i915_gem_load(struct drm_device *dev)
 
        dev_priv->mm.interruptible = true;
 
-       i915_gem_shrinker_init(dev_priv);
-
        mutex_init(&dev_priv->fb_tracking.lock);
 }
 
+void i915_gem_load_cleanup(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+
+       kmem_cache_destroy(dev_priv->requests);
+       kmem_cache_destroy(dev_priv->vmas);
+       kmem_cache_destroy(dev_priv->objects);
+}
+
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5112,6 +5160,8 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
        spin_lock_init(&file_priv->mm.lock);
        INIT_LIST_HEAD(&file_priv->mm.request_list);
 
+       file_priv->bsd_ring = -1;
+
        ret = i915_gem_context_open(dev, file);
        if (ret)
                kfree(file_priv);
@@ -5261,7 +5311,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
        struct page *page;
 
        /* Only default objects have per-page dirty tracking */
-       if (WARN_ON(obj->ops != &i915_gem_object_ops))
+       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
                return NULL;
 
        page = i915_gem_object_get_page(obj, n);
index c25083c78ba7c21c7cdc92ed43f62c7e499851bb..83a097c94911935f85129a15789ca03d5912d61e 100644 (file)
@@ -321,6 +321,18 @@ err_destroy:
        return ERR_PTR(ret);
 }
 
+static void i915_gem_context_unpin(struct intel_context *ctx,
+                                  struct intel_engine_cs *engine)
+{
+       if (i915.enable_execlists) {
+               intel_lr_context_unpin(ctx, engine);
+       } else {
+               if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state)
+                       i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state);
+               i915_gem_context_unreference(ctx);
+       }
+}
+
 void i915_gem_context_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -329,40 +341,31 @@ void i915_gem_context_reset(struct drm_device *dev)
        if (i915.enable_execlists) {
                struct intel_context *ctx;
 
-               list_for_each_entry(ctx, &dev_priv->context_list, link) {
+               list_for_each_entry(ctx, &dev_priv->context_list, link)
                        intel_lr_context_reset(dev, ctx);
-               }
-
-               return;
        }
 
        for (i = 0; i < I915_NUM_RINGS; i++) {
                struct intel_engine_cs *ring = &dev_priv->ring[i];
-               struct intel_context *lctx = ring->last_context;
-
-               if (lctx) {
-                       if (lctx->legacy_hw_ctx.rcs_state && i == RCS)
-                               i915_gem_object_ggtt_unpin(lctx->legacy_hw_ctx.rcs_state);
 
-                       i915_gem_context_unreference(lctx);
+               if (ring->last_context) {
+                       i915_gem_context_unpin(ring->last_context, ring);
                        ring->last_context = NULL;
                }
-
-               /* Force the GPU state to be reinitialised on enabling */
-               if (ring->default_context)
-                       ring->default_context->legacy_hw_ctx.initialized = false;
        }
+
+       /* Force the GPU state to be reinitialised on enabling */
+       dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
 }
 
 int i915_gem_context_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_context *ctx;
-       int i;
 
        /* Init should only be called once per module load. Eventually the
         * restriction on the context_disabled check can be loosened. */
-       if (WARN_ON(dev_priv->ring[RCS].default_context))
+       if (WARN_ON(dev_priv->kernel_context))
                return 0;
 
        if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
@@ -392,12 +395,7 @@ int i915_gem_context_init(struct drm_device *dev)
                return PTR_ERR(ctx);
        }
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct intel_engine_cs *ring = &dev_priv->ring[i];
-
-               /* NB: RCS will hold a ref for all rings */
-               ring->default_context = ctx;
-       }
+       dev_priv->kernel_context = ctx;
 
        DRM_DEBUG_DRIVER("%s context support initialized\n",
                        i915.enable_execlists ? "LR" :
@@ -408,7 +406,7 @@ int i915_gem_context_init(struct drm_device *dev)
 void i915_gem_context_fini(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_context *dctx = dev_priv->ring[RCS].default_context;
+       struct intel_context *dctx = dev_priv->kernel_context;
        int i;
 
        if (dctx->legacy_hw_ctx.rcs_state) {
@@ -424,28 +422,21 @@ void i915_gem_context_fini(struct drm_device *dev)
                 * to offset the do_switch part, so that i915_gem_context_unreference()
                 * can then free the base object correctly. */
                WARN_ON(!dev_priv->ring[RCS].last_context);
-               if (dev_priv->ring[RCS].last_context == dctx) {
-                       /* Fake switch to NULL context */
-                       WARN_ON(dctx->legacy_hw_ctx.rcs_state->active);
-                       i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
-                       i915_gem_context_unreference(dctx);
-                       dev_priv->ring[RCS].last_context = NULL;
-               }
 
                i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
        }
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
+       for (i = I915_NUM_RINGS; --i >= 0;) {
                struct intel_engine_cs *ring = &dev_priv->ring[i];
 
-               if (ring->last_context)
-                       i915_gem_context_unreference(ring->last_context);
-
-               ring->default_context = NULL;
-               ring->last_context = NULL;
+               if (ring->last_context) {
+                       i915_gem_context_unpin(ring->last_context, ring);
+                       ring->last_context = NULL;
+               }
        }
 
        i915_gem_context_unreference(dctx);
+       dev_priv->kernel_context = NULL;
 }
 
 int i915_gem_context_enable(struct drm_i915_gem_request *req)
index e9c2bfd85b5268425ce9465ee2d23d99ee92c534..1f3eef6fb345cdb97ccd31b8cefe750ab65b9c2b 100644 (file)
@@ -193,10 +193,26 @@ static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_n
 
 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
 {
-       return -EINVAL;
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+       int ret;
+
+       if (obj->base.size < vma->vm_end - vma->vm_start)
+               return -EINVAL;
+
+       if (!obj->base.filp)
+               return -ENODEV;
+
+       ret = obj->base.filp->f_op->mmap(obj->base.filp, vma);
+       if (ret)
+               return ret;
+
+       fput(vma->vm_file);
+       vma->vm_file = get_file(obj->base.filp);
+
+       return 0;
 }
 
-static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        struct drm_device *dev = obj->base.dev;
@@ -212,6 +228,27 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
        return ret;
 }
 
+static void i915_gem_end_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
+{
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       bool was_interruptible;
+       int ret;
+
+       mutex_lock(&dev->struct_mutex);
+       was_interruptible = dev_priv->mm.interruptible;
+       dev_priv->mm.interruptible = false;
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, false);
+
+       dev_priv->mm.interruptible = was_interruptible;
+       mutex_unlock(&dev->struct_mutex);
+
+       if (unlikely(ret))
+               DRM_ERROR("unable to flush buffer following CPU access; rendering may be corrupt\n");
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
        .map_dma_buf = i915_gem_map_dma_buf,
        .unmap_dma_buf = i915_gem_unmap_dma_buf,
@@ -224,6 +261,7 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
        .vmap = i915_gem_dmabuf_vmap,
        .vunmap = i915_gem_dmabuf_vunmap,
        .begin_cpu_access = i915_gem_begin_cpu_access,
+       .end_cpu_access = i915_gem_end_cpu_access,
 };
 
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
index dccb517361b3f1763e3241f9c87f04c58ec4d7fc..8fd00d2794478cd08e46986ffa03ae88a4e351e5 100644 (file)
@@ -193,13 +193,10 @@ static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
                return eb->lut[handle];
        } else {
                struct hlist_head *head;
-               struct hlist_node *node;
+               struct i915_vma *vma;
 
                head = &eb->buckets[handle & eb->and];
-               hlist_for_each(node, head) {
-                       struct i915_vma *vma;
-
-                       vma = hlist_entry(node, struct i915_vma, exec_node);
+               hlist_for_each_entry(vma, head, exec_node) {
                        if (vma->exec_handle == handle)
                                return vma;
                }
@@ -1309,6 +1306,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
        exec_start = params->batch_obj_vm_offset +
                     params->args_batch_start_offset;
 
+       if (exec_len == 0)
+               exec_len = params->batch_obj->base.size;
+
        ret = ring->dispatch_execbuffer(params->request,
                                        exec_start, exec_len,
                                        params->dispatch_flags);
@@ -1325,33 +1325,23 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 /**
  * Find one BSD ring to dispatch the corresponding BSD command.
- * The Ring ID is returned.
+ * The ring index is returned.
  */
-static int gen8_dispatch_bsd_ring(struct drm_device *dev,
-                                 struct drm_file *file)
+static unsigned int
+gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_file_private *file_priv = file->driver_priv;
 
-       /* Check whether the file_priv is using one ring */
-       if (file_priv->bsd_ring)
-               return file_priv->bsd_ring->id;
-       else {
-               /* If no, use the ping-pong mechanism to select one ring */
-               int ring_id;
-
-               mutex_lock(&dev->struct_mutex);
-               if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
-                       ring_id = VCS;
-                       dev_priv->mm.bsd_ring_dispatch_index = 1;
-               } else {
-                       ring_id = VCS2;
-                       dev_priv->mm.bsd_ring_dispatch_index = 0;
-               }
-               file_priv->bsd_ring = &dev_priv->ring[ring_id];
-               mutex_unlock(&dev->struct_mutex);
-               return ring_id;
+       /* Check whether the file_priv has already selected one ring. */
+       if ((int)file_priv->bsd_ring < 0) {
+               /* If not, use the ping-pong mechanism to select one. */
+               mutex_lock(&dev_priv->dev->struct_mutex);
+               file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
+               dev_priv->mm.bsd_ring_dispatch_index ^= 1;
+               mutex_unlock(&dev_priv->dev->struct_mutex);
        }
+
+       return file_priv->bsd_ring;
 }
 
 static struct drm_i915_gem_object *
@@ -1374,6 +1364,64 @@ eb_get_batch(struct eb_vmas *eb)
        return vma->obj;
 }
 
+#define I915_USER_RINGS (4)
+
+static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = {
+       [I915_EXEC_DEFAULT]     = RCS,
+       [I915_EXEC_RENDER]      = RCS,
+       [I915_EXEC_BLT]         = BCS,
+       [I915_EXEC_BSD]         = VCS,
+       [I915_EXEC_VEBOX]       = VECS
+};
+
+static int
+eb_select_ring(struct drm_i915_private *dev_priv,
+              struct drm_file *file,
+              struct drm_i915_gem_execbuffer2 *args,
+              struct intel_engine_cs **ring)
+{
+       unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
+
+       if (user_ring_id > I915_USER_RINGS) {
+               DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+               return -EINVAL;
+       }
+
+       if ((user_ring_id != I915_EXEC_BSD) &&
+           ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
+               DRM_DEBUG("execbuf with non bsd ring but with invalid "
+                         "bsd dispatch flags: %d\n", (int)(args->flags));
+               return -EINVAL;
+       }
+
+       if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+               unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
+
+               if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
+                       bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
+               } else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
+                          bsd_idx <= I915_EXEC_BSD_RING2) {
+                       bsd_idx >>= I915_EXEC_BSD_SHIFT;
+                       bsd_idx--;
+               } else {
+                       DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
+                                 bsd_idx);
+                       return -EINVAL;
+               }
+
+               *ring = &dev_priv->ring[_VCS(bsd_idx)];
+       } else {
+               *ring = &dev_priv->ring[user_ring_map[user_ring_id]];
+       }
+
+       if (!intel_ring_initialized(*ring)) {
+               DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_file *file,
@@ -1381,6 +1429,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_i915_gem_exec_object2 *exec)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_request *req = NULL;
        struct eb_vmas *eb;
        struct drm_i915_gem_object *batch_obj;
        struct drm_i915_gem_exec_object2 shadow_exec_entry;
@@ -1411,51 +1460,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        if (args->flags & I915_EXEC_IS_PINNED)
                dispatch_flags |= I915_DISPATCH_PINNED;
 
-       if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
-               DRM_DEBUG("execbuf with unknown ring: %d\n",
-                         (int)(args->flags & I915_EXEC_RING_MASK));
-               return -EINVAL;
-       }
-
-       if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
-           ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
-               DRM_DEBUG("execbuf with non bsd ring but with invalid "
-                       "bsd dispatch flags: %d\n", (int)(args->flags));
-               return -EINVAL;
-       } 
-
-       if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
-               ring = &dev_priv->ring[RCS];
-       else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
-               if (HAS_BSD2(dev)) {
-                       int ring_id;
-
-                       switch (args->flags & I915_EXEC_BSD_MASK) {
-                       case I915_EXEC_BSD_DEFAULT:
-                               ring_id = gen8_dispatch_bsd_ring(dev, file);
-                               ring = &dev_priv->ring[ring_id];
-                               break;
-                       case I915_EXEC_BSD_RING1:
-                               ring = &dev_priv->ring[VCS];
-                               break;
-                       case I915_EXEC_BSD_RING2:
-                               ring = &dev_priv->ring[VCS2];
-                               break;
-                       default:
-                               DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
-                                         (int)(args->flags & I915_EXEC_BSD_MASK));
-                               return -EINVAL;
-                       }
-               } else
-                       ring = &dev_priv->ring[VCS];
-       } else
-               ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
-
-       if (!intel_ring_initialized(ring)) {
-               DRM_DEBUG("execbuf with invalid ring: %d\n",
-                         (int)(args->flags & I915_EXEC_RING_MASK));
-               return -EINVAL;
-       }
+       ret = eb_select_ring(dev_priv, file, args, &ring);
+       if (ret)
+               return ret;
 
        if (args->buffer_count < 1) {
                DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
@@ -1602,11 +1609,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
        /* Allocate a request for this batch buffer nice and early. */
-       ret = i915_gem_request_alloc(ring, ctx, &params->request);
-       if (ret)
+       req = i915_gem_request_alloc(ring, ctx);
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
                goto err_batch_unpin;
+       }
 
-       ret = i915_gem_request_add_to_client(params->request, file);
+       ret = i915_gem_request_add_to_client(req, file);
        if (ret)
                goto err_batch_unpin;
 
@@ -1622,6 +1631,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        params->dispatch_flags          = dispatch_flags;
        params->batch_obj               = batch_obj;
        params->ctx                     = ctx;
+       params->request                 = req;
 
        ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 
@@ -1645,8 +1655,8 @@ err:
         * must be freed again. If it was submitted then it is being tracked
         * on the active request list and no clean up is required here.
         */
-       if (ret && params->request)
-               i915_gem_request_cancel(params->request);
+       if (ret && !IS_ERR_OR_NULL(req))
+               i915_gem_request_cancel(req);
 
        mutex_unlock(&dev->struct_mutex);
 
index 598198543dcdbd303d9e77749009a5e922fea3f1..a2b938ec01a7ca6d4854a83213c56aa3f02f35ef 100644 (file)
@@ -34,8 +34,8 @@
  * set of these objects.
  *
  * Fences are used to detile GTT memory mappings. They're also connected to the
- * hardware frontbuffer render tracking and hence interract with frontbuffer
- * conmpression. Furthermore on older platforms fences are required for tiled
+ * hardware frontbuffer render tracking and hence interact with frontbuffer
+ * compression. Furthermore on older platforms fences are required for tiled
  * objects used by the display engine. They can also be used by the render
  * engine - they're required for blitter commands and are optional for render
  * commands. But on gen4+ both display (with the exception of fbc) and rendering
@@ -46,8 +46,8 @@
  *
  * Finally note that because fences are such a restricted resource they're
  * dynamically associated with objects. Furthermore fence state is committed to
- * the hardware lazily to avoid unecessary stalls on gen2/3. Therefore code must
- * explictly call i915_gem_object_get_fence() to synchronize fencing status
+ * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
+ * explicitly call i915_gem_object_get_fence() to synchronize fencing status
  * for cpu access. Also note that some code wants an unfenced view, for those
  * cases the fence can be removed forcefully with i915_gem_object_put_fence().
  *
@@ -527,7 +527,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
  * required.
  *
  * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
- * 17 is not just a page offset, so as we page an objet out and back in,
+ * 17 is not just a page offset, so as we page an object out and back in,
  * individual pages in it will have different bit 17 addresses, resulting in
  * each 64 bytes being swapped with its neighbor!
  *
index 56f4f2e58d53be25f3024ad8e6c259a1e6a44bfb..9127f8f3561c040744cab46a276753b9b4d95e49 100644 (file)
 static int
 i915_get_ggtt_vma_pages(struct i915_vma *vma);
 
-const struct i915_ggtt_view i915_ggtt_view_normal;
+const struct i915_ggtt_view i915_ggtt_view_normal = {
+       .type = I915_GGTT_VIEW_NORMAL,
+};
 const struct i915_ggtt_view i915_ggtt_view_rotated = {
-        .type = I915_GGTT_VIEW_ROTATED
+       .type = I915_GGTT_VIEW_ROTATED,
 };
 
 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
@@ -2130,6 +2132,25 @@ static void i915_address_space_init(struct i915_address_space *vm,
        list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
+static void gtt_write_workarounds(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       /* This function is for gtt related workarounds. This function is
+        * called on driver load and after a GPU reset, so you can place
+        * workarounds here even if they get overwritten by GPU reset.
+        */
+       /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
+       if (IS_BROADWELL(dev))
+               I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
+       else if (IS_CHERRYVIEW(dev))
+               I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
+       else if (IS_SKYLAKE(dev))
+               I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
+       else if (IS_BROXTON(dev))
+               I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
+}
+
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2146,6 +2167,8 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
 
 int i915_ppgtt_init_hw(struct drm_device *dev)
 {
+       gtt_write_workarounds(dev);
+
        /* In the case of execlists, PPGTT is enabled by the context descriptor
         * and the PDPs are contained within the context itself.  We don't
         * need to do anything here. */
@@ -2807,6 +2830,8 @@ void i915_global_gtt_cleanup(struct drm_device *dev)
                ppgtt->base.cleanup(&ppgtt->base);
        }
 
+       i915_gem_cleanup_stolen(dev);
+
        if (drm_mm_initialized(&vm->mm)) {
                if (intel_vgpu_active(dev))
                        intel_vgt_deballoon();
@@ -3179,6 +3204,14 @@ int i915_gem_gtt_init(struct drm_device *dev)
        if (ret)
                return ret;
 
+       /*
+        * Initialise stolen early so that we may reserve preallocated
+        * objects for the BIOS to KMS transition.
+        */
+       ret = i915_gem_init_stolen(dev);
+       if (ret)
+               goto out_gtt_cleanup;
+
        /* GMADR is the PCI mmio aperture into the global GTT. */
        DRM_INFO("Memory usable by graphics device = %lluM\n",
                 gtt->base.total >> 20);
@@ -3198,6 +3231,11 @@ int i915_gem_gtt_init(struct drm_device *dev)
        DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
 
        return 0;
+
+out_gtt_cleanup:
+       gtt->base.cleanup(&dev_priv->gtt.base);
+
+       return ret;
 }
 
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
@@ -3329,8 +3367,9 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 }
 
 static struct scatterlist *
-rotate_pages(dma_addr_t *in, unsigned int offset,
+rotate_pages(const dma_addr_t *in, unsigned int offset,
             unsigned int width, unsigned int height,
+            unsigned int stride,
             struct sg_table *st, struct scatterlist *sg)
 {
        unsigned int column, row;
@@ -3342,7 +3381,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
        }
 
        for (column = 0; column < width; column++) {
-               src_idx = width * (height - 1) + column;
+               src_idx = stride * (height - 1) + column;
                for (row = 0; row < height; row++) {
                        st->nents++;
                        /* We don't need the pages, but need to initialize
@@ -3353,7 +3392,7 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
                        sg_dma_address(sg) = in[offset + src_idx];
                        sg_dma_len(sg) = PAGE_SIZE;
                        sg = sg_next(sg);
-                       src_idx -= width;
+                       src_idx -= stride;
                }
        }
 
@@ -3361,10 +3400,9 @@ rotate_pages(dma_addr_t *in, unsigned int offset,
 }
 
 static struct sg_table *
-intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
+intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
                          struct drm_i915_gem_object *obj)
 {
-       struct intel_rotation_info *rot_info = &ggtt_view->params.rotation_info;
        unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
        unsigned int size_pages_uv;
        struct sg_page_iter sg_iter;
@@ -3406,6 +3444,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
        /* Rotate the pages. */
        sg = rotate_pages(page_addr_list, 0,
                     rot_info->width_pages, rot_info->height_pages,
+                    rot_info->width_pages,
                     st, NULL);
 
        /* Append the UV plane if NV12. */
@@ -3421,6 +3460,7 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
                rotate_pages(page_addr_list, uv_start_page,
                             rot_info->width_pages_uv,
                             rot_info->height_pages_uv,
+                            rot_info->width_pages_uv,
                             st, sg);
        }
 
@@ -3502,7 +3542,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
                vma->ggtt_view.pages = vma->obj->pages;
        else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
                vma->ggtt_view.pages =
-                       intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
+                       intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
        else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
                vma->ggtt_view.pages =
                        intel_partial_pages(&vma->ggtt_view, vma->obj);
@@ -3596,7 +3636,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
        if (view->type == I915_GGTT_VIEW_NORMAL) {
                return obj->base.size;
        } else if (view->type == I915_GGTT_VIEW_ROTATED) {
-               return view->params.rotation_info.size;
+               return view->params.rotated.size;
        } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
                return view->params.partial.size << PAGE_SHIFT;
        } else {
index b448ad832dcf2e9e22054d443c88790c6ad59085..66a6da2396a2c1a13f193a8d21038a1a93f454a0 100644 (file)
@@ -44,7 +44,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
 
 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
 
-
 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
 #define GEN6_GTT_ADDR_ENCODE(addr)     ((addr) | (((addr) >> 28) & 0xff0))
 #define GEN6_PTE_ADDR_ENCODE(addr)     GEN6_GTT_ADDR_ENCODE(addr)
@@ -156,7 +155,7 @@ struct i915_ggtt_view {
                        u64 offset;
                        unsigned int size;
                } partial;
-               struct intel_rotation_info rotation_info;
+               struct intel_rotation_info rotated;
        } params;
 
        struct sg_table *pages;
@@ -343,6 +342,8 @@ struct i915_gtt {
 
        size_t stolen_size;             /* Total size of stolen memory */
        size_t stolen_usable_size;      /* Total size minus BIOS reserved */
+       size_t stolen_reserved_base;
+       size_t stolen_reserved_size;
        u64 mappable_end;               /* End offset that we can CPU map */
        struct io_mapping *mappable;    /* Mapping to our CPU mappable region */
        phys_addr_t mappable_base;      /* PA of our GMADR */
index f7df54a8ee2bb231d4b9baf53519aac580eb158e..58c1e592bbdb6718a3abfadc99d785be4b28e5ef 100644 (file)
@@ -47,6 +47,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 #endif
 }
 
+static int num_vma_bound(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma;
+       int count = 0;
+
+       list_for_each_entry(vma, &obj->vma_list, vma_link) {
+               if (drm_mm_node_allocated(&vma->node))
+                       count++;
+               if (vma->pin_count)
+                       count++;
+       }
+
+       return count;
+}
+
+static bool swap_available(void)
+{
+       return get_nr_swap_pages() > 0;
+}
+
+static bool can_release_pages(struct drm_i915_gem_object *obj)
+{
+       /* Only report true if by unbinding the object and putting its pages
+        * we can actually make forward progress towards freeing physical
+        * pages.
+        *
+        * If the pages are pinned for any other reason than being bound
+        * to the GPU, simply unbinding from the GPU is not going to succeed
+        * in releasing our pin count on the pages themselves.
+        */
+       if (obj->pages_pin_count != num_vma_bound(obj))
+               return false;
+
+       /* We can only return physical pages to the system if we can either
+        * discard the contents (because the user has marked them as being
+        * purgeable) or if we can move their contents out to swap.
+        */
+       return swap_available() || obj->madv == I915_MADV_DONTNEED;
+}
+
 /**
  * i915_gem_shrink - Shrink buffer object caches
  * @dev_priv: i915 device
@@ -129,6 +169,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
                        if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
                                continue;
 
+                       if (!can_release_pages(obj))
+                               continue;
+
                        drm_gem_object_reference(&obj->base);
 
                        /* For the unbound phase, this should be a no-op! */
@@ -188,21 +231,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
        return true;
 }
 
-static int num_vma_bound(struct drm_i915_gem_object *obj)
-{
-       struct i915_vma *vma;
-       int count = 0;
-
-       list_for_each_entry(vma, &obj->vma_list, vma_link) {
-               if (drm_mm_node_allocated(&vma->node))
-                       count++;
-               if (vma->pin_count)
-                       count++;
-       }
-
-       return count;
-}
-
 static unsigned long
 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 {
@@ -222,7 +250,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
                        count += obj->base.size >> PAGE_SHIFT;
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-               if (!obj->active && obj->pages_pin_count == num_vma_bound(obj))
+               if (!obj->active && can_release_pages(obj))
                        count += obj->base.size >> PAGE_SHIFT;
        }
 
@@ -339,8 +367,20 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv)
        dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
        dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
        dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
-       register_shrinker(&dev_priv->mm.shrinker);
+       WARN_ON(register_shrinker(&dev_priv->mm.shrinker));
 
        dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
-       register_oom_notifier(&dev_priv->mm.oom_notifier);
+       WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier));
+}
+
+/**
+ * i915_gem_shrinker_cleanup - Clean up i915 shrinker
+ * @dev_priv: i915 device
+ *
+ * This function unregisters the i915 shrinker and OOM handler.
+ */
+void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv)
+{
+       WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
+       unregister_shrinker(&dev_priv->mm.shrinker);
 }
index 3476877fc0d6459df7d1d2ee15673c42afb946cf..ba1a00d815d30587570587df8f4e0da630da9417 100644 (file)
@@ -458,6 +458,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
                return 0;
        }
 
+       dev_priv->gtt.stolen_reserved_base = reserved_base;
+       dev_priv->gtt.stolen_reserved_size = reserved_size;
+
        /* It is possible for the reserved area to end before the end of stolen
         * memory, so just consider the start. */
        reserved_total = stolen_top - reserved_base;
@@ -569,6 +572,9 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
        if (obj->pages == NULL)
                goto cleanup;
 
+       obj->get_page.sg = obj->pages->sgl;
+       obj->get_page.last = 0;
+
        i915_gem_object_pin_pages(obj);
        obj->stolen = stolen;
 
index 19fb0bddc1cddfce0804e459319dc11ba96c5ab7..7107f2fd38f563552af6e090653c10818b013e19 100644 (file)
@@ -49,21 +49,18 @@ struct i915_mmu_notifier {
        struct hlist_node node;
        struct mmu_notifier mn;
        struct rb_root objects;
-       struct list_head linear;
-       bool has_linear;
 };
 
 struct i915_mmu_object {
        struct i915_mmu_notifier *mn;
+       struct drm_i915_gem_object *obj;
        struct interval_tree_node it;
        struct list_head link;
-       struct drm_i915_gem_object *obj;
        struct work_struct work;
-       bool active;
-       bool is_linear;
+       bool attached;
 };
 
-static void __cancel_userptr__worker(struct work_struct *work)
+static void cancel_userptr(struct work_struct *work)
 {
        struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
        struct drm_i915_gem_object *obj = mo->obj;
@@ -94,24 +91,22 @@ static void __cancel_userptr__worker(struct work_struct *work)
        mutex_unlock(&dev->struct_mutex);
 }
 
-static unsigned long cancel_userptr(struct i915_mmu_object *mo)
+static void add_object(struct i915_mmu_object *mo)
 {
-       unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
-
-       /* The mmu_object is released late when destroying the
-        * GEM object so it is entirely possible to gain a
-        * reference on an object in the process of being freed
-        * since our serialisation is via the spinlock and not
-        * the struct_mutex - and consequently use it after it
-        * is freed and then double free it.
-        */
-       if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
-               schedule_work(&mo->work);
-               /* only schedule one work packet to avoid the refleak */
-               mo->active = false;
-       }
+       if (mo->attached)
+               return;
+
+       interval_tree_insert(&mo->it, &mo->mn->objects);
+       mo->attached = true;
+}
 
-       return end;
+static void del_object(struct i915_mmu_object *mo)
+{
+       if (!mo->attached)
+               return;
+
+       interval_tree_remove(&mo->it, &mo->mn->objects);
+       mo->attached = false;
 }
 
 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
@@ -122,28 +117,36 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
        struct i915_mmu_notifier *mn =
                container_of(_mn, struct i915_mmu_notifier, mn);
        struct i915_mmu_object *mo;
+       struct interval_tree_node *it;
+       LIST_HEAD(cancelled);
+
+       if (RB_EMPTY_ROOT(&mn->objects))
+               return;
 
        /* interval ranges are inclusive, but invalidate range is exclusive */
        end--;
 
        spin_lock(&mn->lock);
-       if (mn->has_linear) {
-               list_for_each_entry(mo, &mn->linear, link) {
-                       if (mo->it.last < start || mo->it.start > end)
-                               continue;
-
-                       cancel_userptr(mo);
-               }
-       } else {
-               struct interval_tree_node *it;
+       it = interval_tree_iter_first(&mn->objects, start, end);
+       while (it) {
+               /* The mmu_object is released late when destroying the
+                * GEM object so it is entirely possible to gain a
+                * reference on an object in the process of being freed
+                * since our serialisation is via the spinlock and not
+                * the struct_mutex - and consequently use it after it
+                * is freed and then double free it. To prevent that
+                * use-after-free we only acquire a reference on the
+                * object if it is not in the process of being destroyed.
+                */
+               mo = container_of(it, struct i915_mmu_object, it);
+               if (kref_get_unless_zero(&mo->obj->base.refcount))
+                       schedule_work(&mo->work);
 
-               it = interval_tree_iter_first(&mn->objects, start, end);
-               while (it) {
-                       mo = container_of(it, struct i915_mmu_object, it);
-                       start = cancel_userptr(mo);
-                       it = interval_tree_iter_next(it, start, end);
-               }
+               list_add(&mo->link, &cancelled);
+               it = interval_tree_iter_next(it, start, end);
        }
+       list_for_each_entry(mo, &cancelled, link)
+               del_object(mo);
        spin_unlock(&mn->lock);
 }
 
@@ -164,8 +167,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
        spin_lock_init(&mn->lock);
        mn->mn.ops = &i915_gem_userptr_notifier;
        mn->objects = RB_ROOT;
-       INIT_LIST_HEAD(&mn->linear);
-       mn->has_linear = false;
 
         /* Protected by mmap_sem (write-lock) */
        ret = __mmu_notifier_register(&mn->mn, mm);
@@ -177,85 +178,6 @@ i915_mmu_notifier_create(struct mm_struct *mm)
        return mn;
 }
 
-static int
-i915_mmu_notifier_add(struct drm_device *dev,
-                     struct i915_mmu_notifier *mn,
-                     struct i915_mmu_object *mo)
-{
-       struct interval_tree_node *it;
-       int ret = 0;
-
-       /* By this point we have already done a lot of expensive setup that
-        * we do not want to repeat just because the caller (e.g. X) has a
-        * signal pending (and partly because of that expensive setup, X
-        * using an interrupt timer is likely to get stuck in an EINTR loop).
-        */
-       mutex_lock(&dev->struct_mutex);
-
-       /* Make sure we drop the final active reference (and thereby
-        * remove the objects from the interval tree) before we do
-        * the check for overlapping objects.
-        */
-       i915_gem_retire_requests(dev);
-
-       spin_lock(&mn->lock);
-       it = interval_tree_iter_first(&mn->objects,
-                                     mo->it.start, mo->it.last);
-       if (it) {
-               struct drm_i915_gem_object *obj;
-
-               /* We only need to check the first object in the range as it
-                * either has cancelled gup work queued and we need to
-                * return back to the user to give time for the gup-workers
-                * to flush their object references upon which the object will
-                * be removed from the interval-tree, or the the range is
-                * still in use by another client and the overlap is invalid.
-                *
-                * If we do have an overlap, we cannot use the interval tree
-                * for fast range invalidation.
-                */
-
-               obj = container_of(it, struct i915_mmu_object, it)->obj;
-               if (!obj->userptr.workers)
-                       mn->has_linear = mo->is_linear = true;
-               else
-                       ret = -EAGAIN;
-       } else
-               interval_tree_insert(&mo->it, &mn->objects);
-
-       if (ret == 0)
-               list_add(&mo->link, &mn->linear);
-
-       spin_unlock(&mn->lock);
-       mutex_unlock(&dev->struct_mutex);
-
-       return ret;
-}
-
-static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
-{
-       struct i915_mmu_object *mo;
-
-       list_for_each_entry(mo, &mn->linear, link)
-               if (mo->is_linear)
-                       return true;
-
-       return false;
-}
-
-static void
-i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
-                     struct i915_mmu_object *mo)
-{
-       spin_lock(&mn->lock);
-       list_del(&mo->link);
-       if (mo->is_linear)
-               mn->has_linear = i915_mmu_notifier_has_linear(mn);
-       else
-               interval_tree_remove(&mo->it, &mn->objects);
-       spin_unlock(&mn->lock);
-}
-
 static void
 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
 {
@@ -265,7 +187,9 @@ i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
        if (mo == NULL)
                return;
 
-       i915_mmu_notifier_del(mo->mn, mo);
+       spin_lock(&mo->mn->lock);
+       del_object(mo);
+       spin_unlock(&mo->mn->lock);
        kfree(mo);
 
        obj->userptr.mmu_object = NULL;
@@ -299,7 +223,6 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
 {
        struct i915_mmu_notifier *mn;
        struct i915_mmu_object *mo;
-       int ret;
 
        if (flags & I915_USERPTR_UNSYNCHRONIZED)
                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
@@ -316,16 +239,10 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
                return -ENOMEM;
 
        mo->mn = mn;
-       mo->it.start = obj->userptr.ptr;
-       mo->it.last = mo->it.start + obj->base.size - 1;
        mo->obj = obj;
-       INIT_WORK(&mo->work, __cancel_userptr__worker);
-
-       ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
-       if (ret) {
-               kfree(mo);
-               return ret;
-       }
+       mo->it.start = obj->userptr.ptr;
+       mo->it.last = obj->userptr.ptr + obj->base.size - 1;
+       INIT_WORK(&mo->work, cancel_userptr);
 
        obj->userptr.mmu_object = mo;
        return 0;
@@ -552,8 +469,10 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
        /* In order to serialise get_pages with an outstanding
         * cancel_userptr, we must drop the struct_mutex and try again.
         */
-       if (!value || !work_pending(&obj->userptr.mmu_object->work))
-               obj->userptr.mmu_object->active = value;
+       if (!value)
+               del_object(obj->userptr.mmu_object);
+       else if (!work_pending(&obj->userptr.mmu_object->work))
+               add_object(obj->userptr.mmu_object);
        else
                ret = -EAGAIN;
        spin_unlock(&obj->userptr.mmu_object->mn->lock);
@@ -789,9 +708,10 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
-       .dmabuf_export = i915_gem_userptr_dmabuf_export,
+       .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
        .get_pages = i915_gem_userptr_get_pages,
        .put_pages = i915_gem_userptr_put_pages,
+       .dmabuf_export = i915_gem_userptr_dmabuf_export,
        .release = i915_gem_userptr_release,
 };
 
index 06ca4082735bc84c4ff5073bf60fb303096fce08..978c026963b82d78a596b69b5c3f9a33e6bd7f10 100644 (file)
@@ -365,6 +365,10 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
        err_printf(m, "Reset count: %u\n", error->reset_count);
        err_printf(m, "Suspend count: %u\n", error->suspend_count);
        err_printf(m, "PCI ID: 0x%04x\n", dev->pdev->device);
+       err_printf(m, "PCI Revision: 0x%02x\n", dev->pdev->revision);
+       err_printf(m, "PCI Subsystem: %04x:%04x\n",
+                  dev->pdev->subsystem_vendor,
+                  dev->pdev->subsystem_device);
        err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
 
        if (HAS_CSR(dev)) {
@@ -1050,7 +1054,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
                        if (request)
                                rbuf = request->ctx->engine[ring->id].ringbuf;
                        else
-                               rbuf = ring->default_context->engine[ring->id].ringbuf;
+                               rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
                } else
                        rbuf = ring->buffer;
 
index 685c7991e24f33bc549baf1e50f8c9416c177cf6..e4ba5822289bb6dcbc0a225c02e87d62fe48175a 100644 (file)
@@ -40,6 +40,7 @@
 #define   GS_MIA_CORE_STATE              (1 << GS_MIA_SHIFT)
 
 #define SOFT_SCRATCH(n)                        _MMIO(0xc180 + (n) * 4)
+#define SOFT_SCRATCH_COUNT             16
 
 #define UOS_RSA_SCRATCH(i)             _MMIO(0xc200 + (i) * 4)
 #define   UOS_RSA_SCRATCH_MAX_COUNT      64
index 05aa7e61cbe05952d32281f49511ced642d07ca1..d7543efc8a5e7a4a6f065f38504366826853d724 100644 (file)
@@ -158,10 +158,8 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
 
        data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
        /* WaRsDisableCoarsePowerGating:skl,bxt */
-       if (!intel_enable_rc6(dev_priv->dev) ||
-           IS_BXT_REVID(dev, 0, BXT_REVID_A1) ||
-           (IS_SKL_GT3(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)) ||
-           (IS_SKL_GT4(dev) && IS_SKL_REVID(dev, 0, SKL_REVID_E0)))
+       if (!intel_enable_rc6(dev) ||
+           NEEDS_WaRsDisableCoarsePowerGating(dev))
                data[1] = 0;
        else
                /* bit 0 and 1 are for Render and Media domain separately */
@@ -246,6 +244,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
                        db_exc.cookie = 1;
        }
 
+       /* Finally, update the cached copy of the GuC's WQ head */
+       gc->wq_head = desc->head;
+
        kunmap_atomic(base);
        return ret;
 }
@@ -375,6 +376,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
 static void guc_init_ctx_desc(struct intel_guc *guc,
                              struct i915_guc_client *client)
 {
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct intel_engine_cs *ring;
        struct intel_context *ctx = client->owner;
        struct guc_context_desc desc;
        struct sg_table *sg;
@@ -387,10 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
        desc.priority = client->priority;
        desc.db_id = client->doorbell_id;
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
-               struct guc_execlist_context *lrc = &desc.lrc[i];
-               struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
-               struct intel_engine_cs *ring;
+       for_each_ring(ring, dev_priv, i) {
+               struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id];
                struct drm_i915_gem_object *obj;
                uint64_t ctx_desc;
 
@@ -405,7 +406,6 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
                if (!obj)
                        break;  /* XXX: continue? */
 
-               ring = ringbuf->ring;
                ctx_desc = intel_lr_context_descriptor(ctx, ring);
                lrc->context_desc = (u32)ctx_desc;
 
@@ -413,16 +413,16 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
                lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
                                LRC_STATE_PN * PAGE_SIZE;
                lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
-                               (ring->id << GUC_ELC_ENGINE_OFFSET);
+                               (ring->guc_id << GUC_ELC_ENGINE_OFFSET);
 
-               obj = ringbuf->obj;
+               obj = ctx->engine[i].ringbuf->obj;
 
                lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
                lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
                lrc->ring_next_free_location = lrc->ring_begin;
                lrc->ring_current_tail_pointer_value = 0;
 
-               desc.engines_used |= (1 << ring->id);
+               desc.engines_used |= (1 << ring->guc_id);
        }
 
        WARN_ON(desc.engines_used == 0);
@@ -471,28 +471,30 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
                             sizeof(desc) * client->ctx_index);
 }
 
-/* Get valid workqueue item and return it back to offset */
-static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
+int i915_guc_wq_check_space(struct i915_guc_client *gc)
 {
        struct guc_process_desc *desc;
        void *base;
        u32 size = sizeof(struct guc_wq_item);
        int ret = -ETIMEDOUT, timeout_counter = 200;
 
+       if (!gc)
+               return 0;
+
+       /* Quickly return if wq space is available since last time we cache the
+        * head position. */
+       if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
+               return 0;
+
        base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
        desc = base + gc->proc_desc_offset;
 
        while (timeout_counter-- > 0) {
-               if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
-                       *offset = gc->wq_tail;
+               gc->wq_head = desc->head;
 
-                       /* advance the tail for next workqueue item */
-                       gc->wq_tail += size;
-                       gc->wq_tail &= gc->wq_size - 1;
-
-                       /* this will break the loop */
-                       timeout_counter = 0;
+               if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
                        ret = 0;
+                       break;
                }
 
                if (timeout_counter)
@@ -507,15 +509,18 @@ static int guc_get_workqueue_space(struct i915_guc_client *gc, u32 *offset)
 static int guc_add_workqueue_item(struct i915_guc_client *gc,
                                  struct drm_i915_gem_request *rq)
 {
-       enum intel_ring_id ring_id = rq->ring->id;
        struct guc_wq_item *wqi;
        void *base;
-       u32 tail, wq_len, wq_off = 0;
-       int ret;
+       u32 tail, wq_len, wq_off, space;
+
+       space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
+       if (WARN_ON(space < sizeof(struct guc_wq_item)))
+               return -ENOSPC; /* shouldn't happen */
 
-       ret = guc_get_workqueue_space(gc, &wq_off);
-       if (ret)
-               return ret;
+       /* postincrement WQ tail for next time */
+       wq_off = gc->wq_tail;
+       gc->wq_tail += sizeof(struct guc_wq_item);
+       gc->wq_tail &= gc->wq_size - 1;
 
        /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
         * should not have the case where structure wqi is across page, neither
@@ -537,7 +542,7 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
        wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
        wqi->header = WQ_TYPE_INORDER |
                        (wq_len << WQ_LEN_SHIFT) |
-                       (ring_id << WQ_TARGET_SHIFT) |
+                       (rq->ring->guc_id << WQ_TARGET_SHIFT) |
                        WQ_NO_WCFLUSH_WAIT;
 
        /* The GuC wants only the low-order word of the context descriptor */
@@ -553,29 +558,6 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
        return 0;
 }
 
-#define CTX_RING_BUFFER_START          0x08
-
-/* Update the ringbuffer pointer in a saved context image */
-static void lr_context_update(struct drm_i915_gem_request *rq)
-{
-       enum intel_ring_id ring_id = rq->ring->id;
-       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state;
-       struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
-       struct page *page;
-       uint32_t *reg_state;
-
-       BUG_ON(!ctx_obj);
-       WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
-       WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
-
-       page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-       reg_state = kmap_atomic(page);
-
-       reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
-
-       kunmap_atomic(reg_state);
-}
-
 /**
  * i915_guc_submit() - Submit commands through GuC
  * @client:    the guc client where commands will go through
@@ -587,18 +569,14 @@ int i915_guc_submit(struct i915_guc_client *client,
                    struct drm_i915_gem_request *rq)
 {
        struct intel_guc *guc = client->guc;
-       enum intel_ring_id ring_id = rq->ring->id;
+       unsigned int engine_id = rq->ring->guc_id;
        int q_ret, b_ret;
 
-       /* Need this because of the deferred pin ctx and ring */
-       /* Shall we move this right after ring is pinned? */
-       lr_context_update(rq);
-
        q_ret = guc_add_workqueue_item(client, rq);
        if (q_ret == 0)
                b_ret = guc_ring_doorbell(client);
 
-       client->submissions[ring_id] += 1;
+       client->submissions[engine_id] += 1;
        if (q_ret) {
                client->q_fail += 1;
                client->retcode = q_ret;
@@ -608,8 +586,8 @@ int i915_guc_submit(struct i915_guc_client *client,
        } else {
                client->retcode = 0;
        }
-       guc->submissions[ring_id] += 1;
-       guc->last_seqno[ring_id] = rq->seqno;
+       guc->submissions[engine_id] += 1;
+       guc->last_seqno[engine_id] = rq->seqno;
 
        return q_ret;
 }
@@ -832,6 +810,96 @@ static void guc_create_log(struct intel_guc *guc)
        guc->log_flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
 }
 
+static void init_guc_policies(struct guc_policies *policies)
+{
+       struct guc_policy *policy;
+       u32 p, i;
+
+       policies->dpc_promote_time = 500000;
+       policies->max_num_work_items = POLICY_MAX_NUM_WI;
+
+       for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
+               for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
+                       policy = &policies->policy[p][i];
+
+                       policy->execution_quantum = 1000000;
+                       policy->preemption_time = 500000;
+                       policy->fault_time = 250000;
+                       policy->policy_flags = 0;
+               }
+       }
+
+       policies->is_valid = 1;
+}
+
+static void guc_create_ads(struct intel_guc *guc)
+{
+       struct drm_i915_private *dev_priv = guc_to_i915(guc);
+       struct drm_i915_gem_object *obj;
+       struct guc_ads *ads;
+       struct guc_policies *policies;
+       struct guc_mmio_reg_state *reg_state;
+       struct intel_engine_cs *ring;
+       struct page *page;
+       u32 size, i;
+
+       /* The ads obj includes the struct itself and buffers passed to GuC */
+       size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
+                       sizeof(struct guc_mmio_reg_state) +
+                       GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
+
+       obj = guc->ads_obj;
+       if (!obj) {
+               obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size));
+               if (!obj)
+                       return;
+
+               guc->ads_obj = obj;
+       }
+
+       page = i915_gem_object_get_page(obj, 0);
+       ads = kmap(page);
+
+       /*
+        * The GuC requires a "Golden Context" when it reinitialises
+        * engines after a reset. Here we use the Render ring default
+        * context, which must already exist and be pinned in the GGTT,
+        * so its address won't change after we've told the GuC where
+        * to find it.
+        */
+       ring = &dev_priv->ring[RCS];
+       ads->golden_context_lrca = ring->status_page.gfx_addr;
+
+       for_each_ring(ring, dev_priv, i)
+               ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring);
+
+       /* GuC scheduling policies */
+       policies = (void *)ads + sizeof(struct guc_ads);
+       init_guc_policies(policies);
+
+       ads->scheduler_policies = i915_gem_obj_ggtt_offset(obj) +
+                       sizeof(struct guc_ads);
+
+       /* MMIO reg state */
+       reg_state = (void *)policies + sizeof(struct guc_policies);
+
+       for_each_ring(ring, dev_priv, i) {
+               reg_state->mmio_white_list[ring->guc_id].mmio_start =
+                       ring->mmio_base + GUC_MMIO_WHITE_LIST_START;
+
+               /* Nothing to be saved or restored for now. */
+               reg_state->mmio_white_list[ring->guc_id].count = 0;
+       }
+
+       ads->reg_state_addr = ads->scheduler_policies +
+                       sizeof(struct guc_policies);
+
+       ads->reg_state_buffer = ads->reg_state_addr +
+                       sizeof(struct guc_mmio_reg_state);
+
+       kunmap(page);
+}
+
 /*
  * Set up the memory resources to be shared with the GuC.  At this point,
  * we require just one object that can be mapped through the GGTT.
@@ -858,6 +926,8 @@ int i915_guc_submission_init(struct drm_device *dev)
 
        guc_create_log(guc);
 
+       guc_create_ads(guc);
+
        return 0;
 }
 
@@ -865,7 +935,7 @@ int i915_guc_submission_enable(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
-       struct intel_context *ctx = dev_priv->ring[RCS].default_context;
+       struct intel_context *ctx = dev_priv->kernel_context;
        struct i915_guc_client *client;
 
        /* client for execbuf submission */
@@ -896,6 +966,9 @@ void i915_guc_submission_fini(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc *guc = &dev_priv->guc;
 
+       gem_release_guc_obj(dev_priv->guc.ads_obj);
+       guc->ads_obj = NULL;
+
        gem_release_guc_obj(dev_priv->guc.log_obj);
        guc->log_obj = NULL;
 
@@ -919,7 +992,7 @@ int intel_guc_suspend(struct drm_device *dev)
        if (!i915.enable_guc_submission)
                return 0;
 
-       ctx = dev_priv->ring[RCS].default_context;
+       ctx = dev_priv->kernel_context;
 
        data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
        /* any value greater than GUC_POWER_D0 */
@@ -945,7 +1018,7 @@ int intel_guc_resume(struct drm_device *dev)
        if (!i915.enable_guc_submission)
                return 0;
 
-       ctx = dev_priv->ring[RCS].default_context;
+       ctx = dev_priv->kernel_context;
 
        data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
        data[1] = GUC_POWER_D0;
index fa8afa7860ae175753b501622d9295f1475a6c96..25a89373df6340db8bafc7dab346b687ef4c48ff 100644 (file)
@@ -2188,10 +2188,6 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
        disable_rpm_wakeref_asserts(dev_priv);
 
-       /* We get interrupts on unclaimed registers, so check for this before we
-        * do any I915_{READ,WRITE}. */
-       intel_uncore_check_errors(dev);
-
        /* disable master interrupt before clearing iir  */
        de_ier = I915_READ(DEIER);
        I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -2268,43 +2264,20 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
        intel_hpd_irq_handler(dev, pin_mask, long_mask);
 }
 
-static irqreturn_t gen8_irq_handler(int irq, void *arg)
+static irqreturn_t
+gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
 {
-       struct drm_device *dev = arg;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 master_ctl;
+       struct drm_device *dev = dev_priv->dev;
        irqreturn_t ret = IRQ_NONE;
-       uint32_t tmp = 0;
+       u32 iir;
        enum pipe pipe;
-       u32 aux_mask = GEN8_AUX_CHANNEL_A;
-
-       if (!intel_irqs_enabled(dev_priv))
-               return IRQ_NONE;
-
-       /* IRQs are synced during runtime_suspend, we don't require a wakeref */
-       disable_rpm_wakeref_asserts(dev_priv);
-
-       if (INTEL_INFO(dev_priv)->gen >= 9)
-               aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
-                       GEN9_AUX_CHANNEL_D;
-
-       master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
-       master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
-       if (!master_ctl)
-               goto out;
-
-       I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
-
-       /* Find, clear, then process each source of interrupt */
-
-       ret = gen8_gt_irq_handler(dev_priv, master_ctl);
 
        if (master_ctl & GEN8_DE_MISC_IRQ) {
-               tmp = I915_READ(GEN8_DE_MISC_IIR);
-               if (tmp) {
-                       I915_WRITE(GEN8_DE_MISC_IIR, tmp);
+               iir = I915_READ(GEN8_DE_MISC_IIR);
+               if (iir) {
+                       I915_WRITE(GEN8_DE_MISC_IIR, iir);
                        ret = IRQ_HANDLED;
-                       if (tmp & GEN8_DE_MISC_GSE)
+                       if (iir & GEN8_DE_MISC_GSE)
                                intel_opregion_asle_intr(dev);
                        else
                                DRM_ERROR("Unexpected DE Misc interrupt\n");
@@ -2314,33 +2287,40 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        }
 
        if (master_ctl & GEN8_DE_PORT_IRQ) {
-               tmp = I915_READ(GEN8_DE_PORT_IIR);
-               if (tmp) {
+               iir = I915_READ(GEN8_DE_PORT_IIR);
+               if (iir) {
+                       u32 tmp_mask;
                        bool found = false;
-                       u32 hotplug_trigger = 0;
-
-                       if (IS_BROXTON(dev_priv))
-                               hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
-                       else if (IS_BROADWELL(dev_priv))
-                               hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
 
-                       I915_WRITE(GEN8_DE_PORT_IIR, tmp);
+                       I915_WRITE(GEN8_DE_PORT_IIR, iir);
                        ret = IRQ_HANDLED;
 
-                       if (tmp & aux_mask) {
+                       tmp_mask = GEN8_AUX_CHANNEL_A;
+                       if (INTEL_INFO(dev_priv)->gen >= 9)
+                               tmp_mask |= GEN9_AUX_CHANNEL_B |
+                                           GEN9_AUX_CHANNEL_C |
+                                           GEN9_AUX_CHANNEL_D;
+
+                       if (iir & tmp_mask) {
                                dp_aux_irq_handler(dev);
                                found = true;
                        }
 
-                       if (hotplug_trigger) {
-                               if (IS_BROXTON(dev))
-                                       bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
-                               else
-                                       ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
-                               found = true;
+                       if (IS_BROXTON(dev_priv)) {
+                               tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
+                               if (tmp_mask) {
+                                       bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
+                                       found = true;
+                               }
+                       } else if (IS_BROADWELL(dev_priv)) {
+                               tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
+                               if (tmp_mask) {
+                                       ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
+                                       found = true;
+                               }
                        }
 
-                       if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
+                       if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
                                gmbus_irq_handler(dev);
                                found = true;
                        }
@@ -2353,49 +2333,51 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
        }
 
        for_each_pipe(dev_priv, pipe) {
-               uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
+               u32 flip_done, fault_errors;
 
                if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
                        continue;
 
-               pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
-               if (pipe_iir) {
-                       ret = IRQ_HANDLED;
-                       I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
+               iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
+               if (!iir) {
+                       DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+                       continue;
+               }
 
-                       if (pipe_iir & GEN8_PIPE_VBLANK &&
-                           intel_pipe_handle_vblank(dev, pipe))
-                               intel_check_page_flip(dev, pipe);
+               ret = IRQ_HANDLED;
+               I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
 
-                       if (INTEL_INFO(dev_priv)->gen >= 9)
-                               flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
-                       else
-                               flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
+               if (iir & GEN8_PIPE_VBLANK &&
+                   intel_pipe_handle_vblank(dev, pipe))
+                       intel_check_page_flip(dev, pipe);
 
-                       if (flip_done) {
-                               intel_prepare_page_flip(dev, pipe);
-                               intel_finish_page_flip_plane(dev, pipe);
-                       }
+               flip_done = iir;
+               if (INTEL_INFO(dev_priv)->gen >= 9)
+                       flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
+               else
+                       flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
 
-                       if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
-                               hsw_pipe_crc_irq_handler(dev, pipe);
+               if (flip_done) {
+                       intel_prepare_page_flip(dev, pipe);
+                       intel_finish_page_flip_plane(dev, pipe);
+               }
 
-                       if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
-                               intel_cpu_fifo_underrun_irq_handler(dev_priv,
-                                                                   pipe);
+               if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
+                       hsw_pipe_crc_irq_handler(dev, pipe);
 
+               if (iir & GEN8_PIPE_FIFO_UNDERRUN)
+                       intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
 
-                       if (INTEL_INFO(dev_priv)->gen >= 9)
-                               fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
-                       else
-                               fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
+               fault_errors = iir;
+               if (INTEL_INFO(dev_priv)->gen >= 9)
+                       fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
+               else
+                       fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
 
-                       if (fault_errors)
-                               DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
-                                         pipe_name(pipe),
-                                         pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
-               } else
-                       DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+               if (fault_errors)
+                       DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
+                                 pipe_name(pipe),
+                                 fault_errors);
        }
 
        if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
@@ -2405,15 +2387,15 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                 * scheme also closed the SDE interrupt handling race we've seen
                 * on older pch-split platforms. But this needs testing.
                 */
-               u32 pch_iir = I915_READ(SDEIIR);
-               if (pch_iir) {
-                       I915_WRITE(SDEIIR, pch_iir);
+               iir = I915_READ(SDEIIR);
+               if (iir) {
+                       I915_WRITE(SDEIIR, iir);
                        ret = IRQ_HANDLED;
 
                        if (HAS_PCH_SPT(dev_priv))
-                               spt_irq_handler(dev, pch_iir);
+                               spt_irq_handler(dev, iir);
                        else
-                               cpt_irq_handler(dev, pch_iir);
+                               cpt_irq_handler(dev, iir);
                } else {
                        /*
                         * Like on previous PCH there seems to be something
@@ -2423,10 +2405,36 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
                }
        }
 
+       return ret;
+}
+
+static irqreturn_t gen8_irq_handler(int irq, void *arg)
+{
+       struct drm_device *dev = arg;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 master_ctl;
+       irqreturn_t ret;
+
+       if (!intel_irqs_enabled(dev_priv))
+               return IRQ_NONE;
+
+       master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
+       master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
+       if (!master_ctl)
+               return IRQ_NONE;
+
+       I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
+
+       /* IRQs are synced during runtime_suspend, we don't require a wakeref */
+       disable_rpm_wakeref_asserts(dev_priv);
+
+       /* Find, clear, then process each source of interrupt */
+       ret = gen8_gt_irq_handler(dev_priv, master_ctl);
+       ret |= gen8_de_irq_handler(dev_priv, master_ctl);
+
        I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
        POSTING_READ_FW(GEN8_MASTER_IRQ);
 
-out:
        enable_rpm_wakeref_asserts(dev_priv);
 
        return ret;
@@ -2949,14 +2957,44 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
                ring->hangcheck.deadlock = 0;
 }
 
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+static bool subunits_stuck(struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 tmp;
+       u32 instdone[I915_NUM_INSTDONE_REG];
+       bool stuck;
+       int i;
+
+       if (ring->id != RCS)
+               return true;
+
+       i915_get_extra_instdone(ring->dev, instdone);
+
+       /* There might be unstable subunit states even when
+        * actual head is not moving. Filter out the unstable ones by
+        * accumulating the undone -> done transitions and only
+        * consider those as progress.
+        */
+       stuck = true;
+       for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
+               const u32 tmp = instdone[i] | ring->hangcheck.instdone[i];
+
+               if (tmp != ring->hangcheck.instdone[i])
+                       stuck = false;
+
+               ring->hangcheck.instdone[i] |= tmp;
+       }
 
+       return stuck;
+}
+
+static enum intel_ring_hangcheck_action
+head_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
        if (acthd != ring->hangcheck.acthd) {
+
+               /* Clear subunit states on head movement */
+               memset(ring->hangcheck.instdone, 0,
+                      sizeof(ring->hangcheck.instdone));
+
                if (acthd > ring->hangcheck.max_acthd) {
                        ring->hangcheck.max_acthd = acthd;
                        return HANGCHECK_ACTIVE;
@@ -2965,6 +3003,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd)
                return HANGCHECK_ACTIVE_LOOP;
        }
 
+       if (!subunits_stuck(ring))
+               return HANGCHECK_ACTIVE;
+
+       return HANGCHECK_HUNG;
+}
+
+static enum intel_ring_hangcheck_action
+ring_stuck(struct intel_engine_cs *ring, u64 acthd)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       enum intel_ring_hangcheck_action ha;
+       u32 tmp;
+
+       ha = head_stuck(ring, acthd);
+       if (ha != HANGCHECK_HUNG)
+               return ha;
+
        if (IS_GEN2(dev))
                return HANGCHECK_HUNG;
 
@@ -3032,6 +3088,12 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
         */
        DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
 
+       /* As enabling the GPU requires fairly extensive mmio access,
+        * periodically arm the mmio checker to see if we are triggering
+        * any invalid access.
+        */
+       intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
        for_each_ring(ring, dev_priv, i) {
                u64 acthd;
                u32 seqno;
@@ -3106,7 +3168,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
                        if (ring->hangcheck.score > 0)
                                ring->hangcheck.score--;
 
+                       /* Clear head and subunit states on seqno movement */
                        ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
+
+                       memset(ring->hangcheck.instdone, 0,
+                              sizeof(ring->hangcheck.instdone));
                }
 
                ring->hangcheck.seqno = seqno;
index 835d6099c7699889cbbe099dbdc768a0cbe028d9..8b9f36814165469a0496e611591e705a20f04e04 100644 (file)
@@ -22,6 +22,7 @@
  * IN THE SOFTWARE.
  */
 
+#include "i915_params.h"
 #include "i915_drv.h"
 
 struct i915_params i915 __read_mostly = {
@@ -126,7 +127,8 @@ MODULE_PARM_DESC(enable_execlists,
        "(-1=auto [default], 0=disabled, 1=enabled)");
 
 module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
-MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+MODULE_PARM_DESC(enable_psr, "Enable PSR "
+                "(0=disabled [default], 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode)");
 
 module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
 MODULE_PARM_DESC(preliminary_hw_support,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
new file mode 100644 (file)
index 0000000..5299290
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright © 2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _I915_PARAMS_H_
+#define _I915_PARAMS_H_
+
+#include <linux/cache.h> /* for __read_mostly */
+
+struct i915_params {
+       int modeset;
+       int panel_ignore_lid;
+       int semaphores;
+       int lvds_channel_mode;
+       int panel_use_ssc;
+       int vbt_sdvo_panel_type;
+       int enable_rc6;
+       int enable_dc;
+       int enable_fbc;
+       int enable_ppgtt;
+       int enable_execlists;
+       int enable_psr;
+       unsigned int preliminary_hw_support;
+       int disable_power_well;
+       int enable_ips;
+       int invert_brightness;
+       int enable_cmd_parser;
+       int guc_log_level;
+       int use_mmio_flip;
+       int mmio_debug;
+       int edp_vswing;
+       /* leave bools at the end to not create holes */
+       bool enable_hangcheck;
+       bool fastboot;
+       bool prefault_disable;
+       bool load_detect_test;
+       bool reset;
+       bool disable_display;
+       bool disable_vtd_wa;
+       bool enable_guc_submission;
+       bool verbose_state_checks;
+       bool nuclear_pageflip;
+};
+
+extern struct i915_params i915 __read_mostly;
+
+#endif
+
index 007ae83a4086d65ff7e4d3862f2ad2a035c74540..144586ee74d5a510e55dd87c01c6d2fb201159a7 100644 (file)
@@ -610,16 +610,17 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   IOSF_BYTE_ENABLES_SHIFT              4
 #define   IOSF_BAR_SHIFT                       1
 #define   IOSF_SB_BUSY                         (1<<0)
-#define   IOSF_PORT_BUNIT                      0x3
-#define   IOSF_PORT_PUNIT                      0x4
+#define   IOSF_PORT_BUNIT                      0x03
+#define   IOSF_PORT_PUNIT                      0x04
 #define   IOSF_PORT_NC                         0x11
 #define   IOSF_PORT_DPIO                       0x12
-#define   IOSF_PORT_DPIO_2                     0x1a
 #define   IOSF_PORT_GPIO_NC                    0x13
 #define   IOSF_PORT_CCK                                0x14
-#define   IOSF_PORT_CCU                                0xA9
-#define   IOSF_PORT_GPS_CORE                   0x48
-#define   IOSF_PORT_FLISDSI                    0x1B
+#define   IOSF_PORT_DPIO_2                     0x1a
+#define   IOSF_PORT_FLISDSI                    0x1b
+#define   IOSF_PORT_GPIO_SC                    0x48
+#define   IOSF_PORT_GPIO_SUS                   0xa8
+#define   IOSF_PORT_CCU                                0xa9
 #define VLV_IOSF_DATA                          _MMIO(VLV_DISPLAY_BASE + 0x2104)
 #define VLV_IOSF_ADDR                          _MMIO(VLV_DISPLAY_BASE + 0x2108)
 
@@ -1635,6 +1636,9 @@ enum skl_disp_power_wells {
 #define   RING_WAIT            (1<<11) /* gen3+, PRBx_CTL */
 #define   RING_WAIT_SEMAPHORE  (1<<10) /* gen6+ */
 
+#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base)+0x4D0) + (i)*4)
+#define   RING_MAX_NONPRIV_SLOTS  12
+
 #define GEN7_TLB_RD_ADDR       _MMIO(0x4700)
 
 #if 0
@@ -1711,6 +1715,11 @@ enum skl_disp_power_wells {
 #define FPGA_DBG               _MMIO(0x42300)
 #define   FPGA_DBG_RM_NOCLAIM  (1<<31)
 
+#define CLAIM_ER               _MMIO(VLV_DISPLAY_BASE + 0x2028)
+#define   CLAIM_ER_CLR         (1 << 31)
+#define   CLAIM_ER_OVERFLOW    (1 << 16)
+#define   CLAIM_ER_CTR_MASK    0xffff
+
 #define DERRMR         _MMIO(0x44050)
 /* Note that HBLANK events are reserved on bdw+ */
 #define   DERRMR_PIPEA_SCANLINE                (1<<0)
@@ -5940,6 +5949,7 @@ enum skl_disp_power_wells {
 #define  ILK_INTERNAL_GRAPHICS_DISABLE (1 << 31)
 #define  ILK_INTERNAL_DISPLAY_DISABLE  (1 << 30)
 #define  ILK_DISPLAY_DEBUG_DISABLE     (1 << 29)
+#define  IVB_PIPE_C_DISABLE            (1 << 28)
 #define  ILK_HDCP_DISABLE              (1 << 25)
 #define  ILK_eDP_A_DISABLE             (1 << 24)
 #define  HSW_CDCLK_LIMIT               (1 << 24)
@@ -5986,10 +5996,19 @@ enum skl_disp_power_wells {
 #define SKL_DFSM_CDCLK_LIMIT_540       (1 << 23)
 #define SKL_DFSM_CDCLK_LIMIT_450       (2 << 23)
 #define SKL_DFSM_CDCLK_LIMIT_337_5     (3 << 23)
+#define SKL_DFSM_PIPE_A_DISABLE                (1 << 30)
+#define SKL_DFSM_PIPE_B_DISABLE                (1 << 21)
+#define SKL_DFSM_PIPE_C_DISABLE                (1 << 28)
+
+#define GEN7_FF_SLICE_CS_CHICKEN1      _MMIO(0x20e0)
+#define   GEN9_FFSC_PERCTX_PREEMPT_CTRL        (1<<14)
 
 #define FF_SLICE_CS_CHICKEN2                   _MMIO(0x20e4)
 #define  GEN9_TSG_BARRIER_ACK_DISABLE          (1<<8)
 
+#define GEN9_CS_DEBUG_MODE1            _MMIO(0x20ec)
+#define GEN8_CS_CHICKEN1               _MMIO(0x2580)
+
 /* GEN7 chicken */
 #define GEN7_COMMON_SLICE_CHICKEN1             _MMIO(0x7010)
 # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC     ((1<<10) | (1<<26))
@@ -6035,6 +6054,8 @@ enum skl_disp_power_wells {
 #define  HDC_FORCE_NON_COHERENT                        (1<<4)
 #define  HDC_BARRIER_PERFORMANCE_DISABLE       (1<<10)
 
+#define GEN8_HDC_CHICKEN1                      _MMIO(0x7304)
+
 /* GEN9 chicken */
 #define SLICE_ECO_CHICKEN0                     _MMIO(0x7308)
 #define   PIXEL_MASK_CAMMING_DISABLE           (1 << 14)
@@ -6765,6 +6786,16 @@ enum skl_disp_power_wells {
 
 #define  VLV_PMWGICZ                           _MMIO(0x1300a4)
 
+#define  RC6_LOCATION                          _MMIO(0xD40)
+#define           RC6_CTX_IN_DRAM                      (1 << 0)
+#define  RC6_CTX_BASE                          _MMIO(0xD48)
+#define    RC6_CTX_BASE_MASK                   0xFFFFFFF0
+#define  PWRCTX_MAXCNT_RCSUNIT                 _MMIO(0x2054)
+#define  PWRCTX_MAXCNT_VCSUNIT0                        _MMIO(0x12054)
+#define  PWRCTX_MAXCNT_BCSUNIT                 _MMIO(0x22054)
+#define  PWRCTX_MAXCNT_VECSUNIT                        _MMIO(0x1A054)
+#define  PWRCTX_MAXCNT_VCSUNIT1                        _MMIO(0x1C054)
+#define    IDLE_TIME_MASK                      0xFFFFF
 #define  FORCEWAKE                             _MMIO(0xA18C)
 #define  FORCEWAKE_VLV                         _MMIO(0x1300b0)
 #define  FORCEWAKE_ACK_VLV                     _MMIO(0x1300b4)
@@ -6903,6 +6934,7 @@ enum skl_disp_power_wells {
 #define GEN6_RPDEUC                            _MMIO(0xA084)
 #define GEN6_RPDEUCSW                          _MMIO(0xA088)
 #define GEN6_RC_STATE                          _MMIO(0xA094)
+#define   RC6_STATE                            (1 << 18)
 #define GEN6_RC1_WAKE_RATE_LIMIT               _MMIO(0xA098)
 #define GEN6_RC6_WAKE_RATE_LIMIT               _MMIO(0xA09C)
 #define GEN6_RC6pp_WAKE_RATE_LIMIT             _MMIO(0xA0A0)
@@ -7514,7 +7546,7 @@ enum skl_disp_power_wells {
 #define  DPLL_CFGCR2_PDIV_7 (4<<2)
 #define  DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
 
-#define DPLL_CFGCR1(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2)
+#define DPLL_CFGCR1(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
 #define DPLL_CFGCR2(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
 
 /* BXT display engine PLL */
@@ -8154,4 +8186,11 @@ enum skl_disp_power_wells {
 #define GEN9_VEBOX_MOCS(i)     _MMIO(0xcb00 + (i) * 4) /* Video MOCS registers */
 #define GEN9_BLT_MOCS(i)       _MMIO(0xcc00 + (i) * 4) /* Blitter MOCS registers */
 
+/* gamt regs */
+#define GEN8_L3_LRA_1_GPGPU _MMIO(0x4dd4)
+#define   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW  0x67F1427F /* max/min for LRA1/2 */
+#define   GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV  0x5FF101FF /* max/min for LRA1/2 */
+#define   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL  0x67F1427F /*    "        " */
+#define   GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT  0x5FF101FF /*    "        " */
+
 #endif /* _I915_REG_H_ */
index a2aa09ce3202f3ca72e64f550244e2a2c8e8a29f..34e061a9ef0673fde527c9ecbd14aabf532bb926 100644 (file)
@@ -49,7 +49,7 @@ static void i915_save_display(struct drm_device *dev)
                dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
                dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
                dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
-       } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+       } else if (INTEL_INFO(dev)->gen <= 4) {
                dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
                dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
                dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
@@ -84,7 +84,7 @@ static void i915_restore_display(struct drm_device *dev)
                I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
                I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
                I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
-       } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+       } else if (INTEL_INFO(dev)->gen <= 4) {
                I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
                I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
                I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -92,7 +92,7 @@ static void i915_restore_display(struct drm_device *dev)
        }
 
        /* only restore FBC info on the platform that supports FBC*/
-       intel_fbc_disable(dev_priv);
+       intel_fbc_global_disable(dev_priv);
 
        /* restore FBC interval */
        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev))
index 37e3f0ddf8e04ac8a79e1eec2cf6ae171812c0b8..c6188dddb3414dd20188776ad63a70505ab293cb 100644 (file)
@@ -164,7 +164,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
             struct bin_attribute *attr, char *buf,
             loff_t offset, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct drm_minor *dminor = dev_to_drm_minor(dev);
        struct drm_device *drm_dev = dminor->dev;
        struct drm_i915_private *dev_priv = drm_dev->dev_private;
@@ -200,7 +200,7 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
              struct bin_attribute *attr, char *buf,
              loff_t offset, size_t count)
 {
-       struct device *dev = container_of(kobj, struct device, kobj);
+       struct device *dev = kobj_to_dev(kobj);
        struct drm_minor *dminor = dev_to_drm_minor(dev);
        struct drm_device *drm_dev = dminor->dev;
        struct drm_i915_private *dev_priv = drm_dev->dev_private;
@@ -521,7 +521,7 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
                                loff_t off, size_t count)
 {
 
-       struct device *kdev = container_of(kobj, struct device, kobj);
+       struct device *kdev = kobj_to_dev(kobj);
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        struct i915_error_state_file_priv error_priv;
@@ -556,7 +556,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
                                 struct bin_attribute *attr, char *buf,
                                 loff_t off, size_t count)
 {
-       struct device *kdev = container_of(kobj, struct device, kobj);
+       struct device *kdev = kobj_to_dev(kobj);
        struct drm_minor *minor = dev_to_drm_minor(kdev);
        struct drm_device *dev = minor->dev;
        int ret;
index d0b1c9afa35ef6a909e9146eeb61dd343f87e9e4..4625f8a9ba1214e7d28c2ad72138fe9c08575d01 100644 (file)
@@ -308,5 +308,5 @@ void intel_atomic_state_clear(struct drm_atomic_state *s)
 {
        struct intel_atomic_state *state = to_intel_atomic_state(s);
        drm_atomic_state_default_clear(&state->base);
-       state->dpll_set = false;
+       state->dpll_set = state->modeset = false;
 }
index c6bb0fc1edfb57280c37972e656c1eb6f04c0990..e0b851a0004abd3dc2dde47e9b7af90e5a177510 100644 (file)
@@ -152,9 +152,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
        intel_state->clip.x1 = 0;
        intel_state->clip.y1 = 0;
        intel_state->clip.x2 =
-               crtc_state->base.active ? crtc_state->pipe_src_w : 0;
+               crtc_state->base.enable ? crtc_state->pipe_src_w : 0;
        intel_state->clip.y2 =
-               crtc_state->base.active ? crtc_state->pipe_src_h : 0;
+               crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
 
        if (state->fb && intel_rotation_90_or_270(state->rotation)) {
                if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
@@ -194,8 +194,16 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
        struct intel_plane *intel_plane = to_intel_plane(plane);
        struct intel_plane_state *intel_state =
                to_intel_plane_state(plane->state);
+       struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+       struct drm_crtc_state *crtc_state =
+               drm_atomic_get_existing_crtc_state(old_state->state, crtc);
 
-       intel_plane->commit_plane(plane, intel_state);
+       if (intel_state->visible)
+               intel_plane->update_plane(plane,
+                                         to_intel_crtc_state(crtc_state),
+                                         intel_state);
+       else
+               intel_plane->disable_plane(plane, crtc);
 }
 
 const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
index eba3e0f87181edbf3b4749f24709a35f2c83a8a9..bf62a19c8f690cd6682234765475436db961ffc4 100644 (file)
 #include "i915_drv.h"
 #include "intel_bios.h"
 
+/**
+ * DOC: Video BIOS Table (VBT)
+ *
+ * The Video BIOS Table, or VBT, provides platform and board specific
+ * configuration information to the driver that is not discoverable or available
+ * through other means. The configuration is mostly related to display
+ * hardware. The VBT is available via the ACPI OpRegion or, on older systems, in
+ * the PCI ROM.
+ *
+ * The VBT consists of a VBT Header (defined as &struct vbt_header), a BDB
+ * Header (&struct bdb_header), and a number of BIOS Data Blocks (BDB) that
+ * contain the actual configuration information. The VBT Header, and thus the
+ * VBT, begins with "$VBT" signature. The VBT Header contains the offset of the
+ * BDB Header. The data blocks are concatenated after the BDB Header. The data
+ * blocks have a 1-byte Block ID, 2-byte Block Size, and Block Size bytes of
+ * data. (Block 53, the MIPI Sequence Block is an exception.)
+ *
+ * The driver parses the VBT during load. The relevant information is stored in
+ * driver private data for ease of use, and the actual VBT is not read after
+ * that.
+ */
+
 #define        SLAVE_ADDR1     0x70
 #define        SLAVE_ADDR2     0x72
 
 static int panel_type;
 
+/* Get BDB block size given a pointer to Block ID. */
+static u32 _get_blocksize(const u8 *block_base)
+{
+       /* The MIPI Sequence Block v3+ has a separate size field. */
+       if (*block_base == BDB_MIPI_SEQUENCE && *(block_base + 3) >= 3)
+               return *((const u32 *)(block_base + 4));
+       else
+               return *((const u16 *)(block_base + 1));
+}
+
+/* Get BDB block size give a pointer to data after Block ID and Block Size. */
+static u32 get_blocksize(const void *block_data)
+{
+       return _get_blocksize(block_data - 3);
+}
+
 static const void *
 find_section(const void *_bdb, int section_id)
 {
@@ -52,14 +90,8 @@ find_section(const void *_bdb, int section_id)
        /* walk the sections looking for section_id */
        while (index + 3 < total) {
                current_id = *(base + index);
-               index++;
-
-               current_size = *((const u16 *)(base + index));
-               index += 2;
-
-               /* The MIPI Sequence Block v3+ has a separate size field. */
-               if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
-                       current_size = *((const u32 *)(base + index + 1));
+               current_size = _get_blocksize(base + index);
+               index += 3;
 
                if (index + current_size > total)
                        return NULL;
@@ -73,16 +105,6 @@ find_section(const void *_bdb, int section_id)
        return NULL;
 }
 
-static u16
-get_blocksize(const void *p)
-{
-       u16 *block_ptr, block_size;
-
-       block_ptr = (u16 *)((char *)p - 2);
-       block_size = *block_ptr;
-       return block_size;
-}
-
 static void
 fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
                        const struct lvds_dvo_timing *dvo_timing)
@@ -675,84 +697,13 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
        dev_priv->vbt.psr.tp2_tp3_wakeup_time = psr_table->tp2_tp3_wakeup_time;
 }
 
-static u8 *goto_next_sequence(u8 *data, int *size)
-{
-       u16 len;
-       int tmp = *size;
-
-       if (--tmp < 0)
-               return NULL;
-
-       /* goto first element */
-       data++;
-       while (1) {
-               switch (*data) {
-               case MIPI_SEQ_ELEM_SEND_PKT:
-                       /*
-                        * skip by this element payload size
-                        * skip elem id, command flag and data type
-                        */
-                       tmp -= 5;
-                       if (tmp < 0)
-                               return NULL;
-
-                       data += 3;
-                       len = *((u16 *)data);
-
-                       tmp -= len;
-                       if (tmp < 0)
-                               return NULL;
-
-                       /* skip by len */
-                       data = data + 2 + len;
-                       break;
-               case MIPI_SEQ_ELEM_DELAY:
-                       /* skip by elem id, and delay is 4 bytes */
-                       tmp -= 5;
-                       if (tmp < 0)
-                               return NULL;
-
-                       data += 5;
-                       break;
-               case MIPI_SEQ_ELEM_GPIO:
-                       tmp -= 3;
-                       if (tmp < 0)
-                               return NULL;
-
-                       data += 3;
-                       break;
-               default:
-                       DRM_ERROR("Unknown element\n");
-                       return NULL;
-               }
-
-               /* end of sequence ? */
-               if (*data == 0)
-                       break;
-       }
-
-       /* goto next sequence or end of block byte */
-       if (--tmp < 0)
-               return NULL;
-
-       data++;
-
-       /* update amount of data left for the sequence block to be parsed */
-       *size = tmp;
-       return data;
-}
-
 static void
-parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
+parse_mipi_config(struct drm_i915_private *dev_priv,
+                 const struct bdb_header *bdb)
 {
        const struct bdb_mipi_config *start;
-       const struct bdb_mipi_sequence *sequence;
        const struct mipi_config *config;
        const struct mipi_pps_data *pps;
-       u8 *data;
-       const u8 *seq_data;
-       int i, panel_id, seq_size;
-       u16 block_size;
 
        /* parse MIPI blocks only if LFP type is MIPI */
        if (!dev_priv->vbt.has_mipi)
@@ -798,104 +749,233 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
 
        /* We have mandatory mipi config blocks. Initialize as generic panel */
        dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
+}
 
-       /* Check if we have sequence block as well */
-       sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
-       if (!sequence) {
-               DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
-               return;
+/* Find the sequence block and size for the given panel. */
+static const u8 *
+find_panel_sequence_block(const struct bdb_mipi_sequence *sequence,
+                         u16 panel_id, u32 *seq_size)
+{
+       u32 total = get_blocksize(sequence);
+       const u8 *data = &sequence->data[0];
+       u8 current_id;
+       u32 current_size;
+       int header_size = sequence->version >= 3 ? 5 : 3;
+       int index = 0;
+       int i;
+
+       /* skip new block size */
+       if (sequence->version >= 3)
+               data += 4;
+
+       for (i = 0; i < MAX_MIPI_CONFIGURATIONS && index < total; i++) {
+               if (index + header_size > total) {
+                       DRM_ERROR("Invalid sequence block (header)\n");
+                       return NULL;
+               }
+
+               current_id = *(data + index);
+               if (sequence->version >= 3)
+                       current_size = *((const u32 *)(data + index + 1));
+               else
+                       current_size = *((const u16 *)(data + index + 1));
+
+               index += header_size;
+
+               if (index + current_size > total) {
+                       DRM_ERROR("Invalid sequence block\n");
+                       return NULL;
+               }
+
+               if (current_id == panel_id) {
+                       *seq_size = current_size;
+                       return data + index;
+               }
+
+               index += current_size;
        }
 
-       /* Fail gracefully for forward incompatible sequence block. */
-       if (sequence->version >= 3) {
-               DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
-               return;
+       DRM_ERROR("Sequence block detected but no valid configuration\n");
+
+       return NULL;
+}
+
+static int goto_next_sequence(const u8 *data, int index, int total)
+{
+       u16 len;
+
+       /* Skip Sequence Byte. */
+       for (index = index + 1; index < total; index += len) {
+               u8 operation_byte = *(data + index);
+               index++;
+
+               switch (operation_byte) {
+               case MIPI_SEQ_ELEM_END:
+                       return index;
+               case MIPI_SEQ_ELEM_SEND_PKT:
+                       if (index + 4 > total)
+                               return 0;
+
+                       len = *((const u16 *)(data + index + 2)) + 4;
+                       break;
+               case MIPI_SEQ_ELEM_DELAY:
+                       len = 4;
+                       break;
+               case MIPI_SEQ_ELEM_GPIO:
+                       len = 2;
+                       break;
+               case MIPI_SEQ_ELEM_I2C:
+                       if (index + 7 > total)
+                               return 0;
+                       len = *(data + index + 6) + 7;
+                       break;
+               default:
+                       DRM_ERROR("Unknown operation byte\n");
+                       return 0;
+               }
        }
 
-       DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
+       return 0;
+}
 
-       block_size = get_blocksize(sequence);
+static int goto_next_sequence_v3(const u8 *data, int index, int total)
+{
+       int seq_end;
+       u16 len;
+       u32 size_of_sequence;
 
        /*
-        * parse the sequence block for individual sequences
+        * Could skip sequence based on Size of Sequence alone, but also do some
+        * checking on the structure.
         */
-       dev_priv->vbt.dsi.seq_version = sequence->version;
+       if (total < 5) {
+               DRM_ERROR("Too small sequence size\n");
+               return 0;
+       }
 
-       seq_data = &sequence->data[0];
+       /* Skip Sequence Byte. */
+       index++;
 
        /*
-        * sequence block is variable length and hence we need to parse and
-        * get the sequence data for specific panel id
+        * Size of Sequence. Excludes the Sequence Byte and the size itself,
+        * includes MIPI_SEQ_ELEM_END byte, excludes the final MIPI_SEQ_END
+        * byte.
         */
-       for (i = 0; i < MAX_MIPI_CONFIGURATIONS; i++) {
-               panel_id = *seq_data;
-               seq_size = *((u16 *) (seq_data + 1));
-               if (panel_id == panel_type)
-                       break;
+       size_of_sequence = *((const uint32_t *)(data + index));
+       index += 4;
 
-               /* skip the sequence including seq header of 3 bytes */
-               seq_data = seq_data + 3 + seq_size;
-               if ((seq_data - &sequence->data[0]) > block_size) {
-                       DRM_ERROR("Sequence start is beyond sequence block size, corrupted sequence block\n");
-                       return;
+       seq_end = index + size_of_sequence;
+       if (seq_end > total) {
+               DRM_ERROR("Invalid sequence size\n");
+               return 0;
+       }
+
+       for (; index < total; index += len) {
+               u8 operation_byte = *(data + index);
+               index++;
+
+               if (operation_byte == MIPI_SEQ_ELEM_END) {
+                       if (index != seq_end) {
+                               DRM_ERROR("Invalid element structure\n");
+                               return 0;
+                       }
+                       return index;
+               }
+
+               len = *(data + index);
+               index++;
+
+               /*
+                * FIXME: Would be nice to check elements like for v1/v2 in
+                * goto_next_sequence() above.
+                */
+               switch (operation_byte) {
+               case MIPI_SEQ_ELEM_SEND_PKT:
+               case MIPI_SEQ_ELEM_DELAY:
+               case MIPI_SEQ_ELEM_GPIO:
+               case MIPI_SEQ_ELEM_I2C:
+               case MIPI_SEQ_ELEM_SPI:
+               case MIPI_SEQ_ELEM_PMIC:
+                       break;
+               default:
+                       DRM_ERROR("Unknown operation byte %u\n",
+                                 operation_byte);
+                       break;
                }
        }
 
-       if (i == MAX_MIPI_CONFIGURATIONS) {
-               DRM_ERROR("Sequence block detected but no valid configuration\n");
+       return 0;
+}
+
+static void
+parse_mipi_sequence(struct drm_i915_private *dev_priv,
+                   const struct bdb_header *bdb)
+{
+       const struct bdb_mipi_sequence *sequence;
+       const u8 *seq_data;
+       u32 seq_size;
+       u8 *data;
+       int index = 0;
+
+       /* Only our generic panel driver uses the sequence block. */
+       if (dev_priv->vbt.dsi.panel_id != MIPI_DSI_GENERIC_PANEL_ID)
+               return;
+
+       sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
+       if (!sequence) {
+               DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
                return;
        }
 
-       /* check if found sequence is completely within the sequence block
-        * just being paranoid */
-       if (seq_size > block_size) {
-               DRM_ERROR("Corrupted sequence/size, bailing out\n");
+       /* Fail gracefully for forward incompatible sequence block. */
+       if (sequence->version >= 4) {
+               DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
+                         sequence->version);
                return;
        }
 
-       /* skip the panel id(1 byte) and seq size(2 bytes) */
-       dev_priv->vbt.dsi.data = kmemdup(seq_data + 3, seq_size, GFP_KERNEL);
-       if (!dev_priv->vbt.dsi.data)
+       DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
+
+       seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
+       if (!seq_data)
                return;
 
-       /*
-        * loop into the sequence data and split into multiple sequneces
-        * There are only 5 types of sequences as of now
-        */
-       data = dev_priv->vbt.dsi.data;
-       dev_priv->vbt.dsi.size = seq_size;
+       data = kmemdup(seq_data, seq_size, GFP_KERNEL);
+       if (!data)
+               return;
 
-       /* two consecutive 0x00 indicate end of all sequences */
-       while (1) {
-               int seq_id = *data;
-               if (MIPI_SEQ_MAX > seq_id && seq_id > MIPI_SEQ_UNDEFINED) {
-                       dev_priv->vbt.dsi.sequence[seq_id] = data;
-                       DRM_DEBUG_DRIVER("Found mipi sequence - %d\n", seq_id);
-               } else {
-                       DRM_ERROR("undefined sequence\n");
+       /* Parse the sequences, store pointers to each sequence. */
+       for (;;) {
+               u8 seq_id = *(data + index);
+               if (seq_id == MIPI_SEQ_END)
+                       break;
+
+               if (seq_id >= MIPI_SEQ_MAX) {
+                       DRM_ERROR("Unknown sequence %u\n", seq_id);
                        goto err;
                }
 
-               /* partial parsing to skip elements */
-               data = goto_next_sequence(data, &seq_size);
+               dev_priv->vbt.dsi.sequence[seq_id] = data + index;
 
-               if (data == NULL) {
-                       DRM_ERROR("Sequence elements going beyond block itself. Sequence block parsing failed\n");
+               if (sequence->version >= 3)
+                       index = goto_next_sequence_v3(data, index, seq_size);
+               else
+                       index = goto_next_sequence(data, index, seq_size);
+               if (!index) {
+                       DRM_ERROR("Invalid sequence %u\n", seq_id);
                        goto err;
                }
-
-               if (*data == 0)
-                       break; /* end of sequence reached */
        }
 
-       DRM_DEBUG_DRIVER("MIPI related vbt parsing complete\n");
+       dev_priv->vbt.dsi.data = data;
+       dev_priv->vbt.dsi.size = seq_size;
+       dev_priv->vbt.dsi.seq_version = sequence->version;
+
+       DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
        return;
-err:
-       kfree(dev_priv->vbt.dsi.data);
-       dev_priv->vbt.dsi.data = NULL;
 
-       /* error during parsing so set all pointers to null
-        * because of partial parsing */
+err:
+       kfree(data);
        memset(dev_priv->vbt.dsi.sequence, 0, sizeof(dev_priv->vbt.dsi.sequence));
 }
 
@@ -1088,7 +1168,12 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
                DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
                return;
        }
-       if (bdb->version < 195) {
+       if (bdb->version < 106) {
+               expected_size = 22;
+       } else if (bdb->version < 109) {
+               expected_size = 27;
+       } else if (bdb->version < 195) {
+               BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
                expected_size = sizeof(struct old_child_dev_config);
        } else if (bdb->version == 195) {
                expected_size = 37;
@@ -1101,18 +1186,18 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
                                 bdb->version, expected_size);
        }
 
-       /* The legacy sized child device config is the minimum we need. */
-       if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
-               DRM_ERROR("Child device config size %u is too small.\n",
-                         p_defs->child_dev_size);
-               return;
-       }
-
        /* Flag an error for unexpected size, but continue anyway. */
        if (p_defs->child_dev_size != expected_size)
                DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
                          p_defs->child_dev_size, expected_size, bdb->version);
 
+       /* The legacy sized child device config is the minimum we need. */
+       if (p_defs->child_dev_size < sizeof(struct old_child_dev_config)) {
+               DRM_DEBUG_KMS("Child device config size %u is too small.\n",
+                             p_defs->child_dev_size);
+               return;
+       }
+
        /* get the block size of general definitions */
        block_size = get_blocksize(p_defs);
        /* get the number of child device */
@@ -1285,7 +1370,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
 
 /**
  * intel_bios_init - find VBT and initialize settings from the BIOS
- * @dev: DRM device
+ * @dev_priv: i915 device instance
  *
  * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
  * to appropriate values.
@@ -1337,7 +1422,8 @@ intel_bios_init(struct drm_i915_private *dev_priv)
        parse_driver_features(dev_priv, bdb);
        parse_edp(dev_priv, bdb);
        parse_psr(dev_priv, bdb);
-       parse_mipi(dev_priv, bdb);
+       parse_mipi_config(dev_priv, bdb);
+       parse_mipi_sequence(dev_priv, bdb);
        parse_ddi_ports(dev_priv, bdb);
 
        if (bios)
index 54eac1003a1e1669a5eebfeba7e070047aca0ba8..350d4e0f75a4ea639ab2720734f0b03296188779 100644 (file)
  *
  */
 
-#ifndef _I830_BIOS_H_
-#define _I830_BIOS_H_
-
+#ifndef _INTEL_BIOS_H_
+#define _INTEL_BIOS_H_
+
+/**
+ * struct vbt_header - VBT Header structure
+ * @signature:         VBT signature, always starts with "$VBT"
+ * @version:           Version of this structure
+ * @header_size:       Size of this structure
+ * @vbt_size:          Size of VBT (VBT Header, BDB Header and data blocks)
+ * @vbt_checksum:      Checksum
+ * @reserved0:         Reserved
+ * @bdb_offset:                Offset of &struct bdb_header from beginning of VBT
+ * @aim_offset:                Offsets of add-in data blocks from beginning of VBT
+ */
 struct vbt_header {
-       u8 signature[20];               /**< Always starts with 'VBT$' */
-       u16 version;                    /**< decimal */
-       u16 header_size;                /**< in bytes */
-       u16 vbt_size;                   /**< in bytes */
+       u8 signature[20];
+       u16 version;
+       u16 header_size;
+       u16 vbt_size;
        u8 vbt_checksum;
        u8 reserved0;
-       u32 bdb_offset;                 /**< from beginning of VBT */
-       u32 aim_offset[4];              /**< from beginning of VBT */
+       u32 bdb_offset;
+       u32 aim_offset[4];
 } __packed;
 
+/**
+ * struct bdb_header - BDB Header structure
+ * @signature:         BDB signature "BIOS_DATA_BLOCK"
+ * @version:           Version of the data block definitions
+ * @header_size:       Size of this structure
+ * @bdb_size:          Size of BDB (BDB Header and data blocks)
+ */
 struct bdb_header {
-       u8 signature[16];               /**< Always 'BIOS_DATA_BLOCK' */
-       u16 version;                    /**< decimal */
-       u16 header_size;                /**< in bytes */
-       u16 bdb_size;                   /**< in bytes */
+       u8 signature[16];
+       u16 version;
+       u16 header_size;
+       u16 bdb_size;
 } __packed;
 
 /* strictly speaking, this is a "skip" block, but it has interesting info */
@@ -936,21 +954,29 @@ struct bdb_mipi_sequence {
 
 /* MIPI Sequnece Block definitions */
 enum mipi_seq {
-       MIPI_SEQ_UNDEFINED = 0,
+       MIPI_SEQ_END = 0,
        MIPI_SEQ_ASSERT_RESET,
        MIPI_SEQ_INIT_OTP,
        MIPI_SEQ_DISPLAY_ON,
        MIPI_SEQ_DISPLAY_OFF,
        MIPI_SEQ_DEASSERT_RESET,
+       MIPI_SEQ_BACKLIGHT_ON,          /* sequence block v2+ */
+       MIPI_SEQ_BACKLIGHT_OFF,         /* sequence block v2+ */
+       MIPI_SEQ_TEAR_ON,               /* sequence block v2+ */
+       MIPI_SEQ_TEAR_OFF,              /* sequence block v3+ */
+       MIPI_SEQ_POWER_ON,              /* sequence block v3+ */
+       MIPI_SEQ_POWER_OFF,             /* sequence block v3+ */
        MIPI_SEQ_MAX
 };
 
 enum mipi_seq_element {
-       MIPI_SEQ_ELEM_UNDEFINED = 0,
+       MIPI_SEQ_ELEM_END = 0,
        MIPI_SEQ_ELEM_SEND_PKT,
        MIPI_SEQ_ELEM_DELAY,
        MIPI_SEQ_ELEM_GPIO,
-       MIPI_SEQ_ELEM_STATUS,
+       MIPI_SEQ_ELEM_I2C,              /* sequence block v2+ */
+       MIPI_SEQ_ELEM_SPI,              /* sequence block v3+ */
+       MIPI_SEQ_ELEM_PMIC,             /* sequence block v3+ */
        MIPI_SEQ_ELEM_MAX
 };
 
@@ -965,4 +991,4 @@ enum mipi_gpio_pin_index {
        MIPI_GPIO_MAX
 };
 
-#endif /* _I830_BIOS_H_ */
+#endif /* _INTEL_BIOS_H_ */
index 9bb63a85997a4ffbf31da0e8dbd211786f2e626c..2a7ec3141c8dc0d4f277d7c3f8441488b2776a44 100644 (file)
@@ -44,6 +44,8 @@
 #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
 #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
 
+#define FIRMWARE_URL  "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
+
 MODULE_FIRMWARE(I915_CSR_SKL);
 MODULE_FIRMWARE(I915_CSR_BXT);
 
@@ -177,7 +179,8 @@ static const struct stepping_info kbl_stepping_info[] = {
 static const struct stepping_info skl_stepping_info[] = {
        {'A', '0'}, {'B', '0'}, {'C', '0'},
        {'D', '0'}, {'E', '0'}, {'F', '0'},
-       {'G', '0'}, {'H', '0'}, {'I', '0'}
+       {'G', '0'}, {'H', '0'}, {'I', '0'},
+       {'J', '0'}, {'K', '0'}
 };
 
 static const struct stepping_info bxt_stepping_info[] = {
@@ -278,10 +281,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
 
        csr->version = css_header->version;
 
-       if (IS_SKYLAKE(dev) && csr->version < SKL_CSR_VERSION_REQUIRED) {
+       if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
+           csr->version < SKL_CSR_VERSION_REQUIRED) {
                DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
                         " please upgrade to v%u.%u or later"
-                        " [https://01.org/linuxgraphics/intel-linux-graphics-firmwares].\n",
+                          " [" FIRMWARE_URL "].\n",
                         CSR_VERSION_MAJOR(csr->version),
                         CSR_VERSION_MINOR(csr->version),
                         CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
@@ -399,7 +403,10 @@ out:
                         CSR_VERSION_MAJOR(csr->version),
                         CSR_VERSION_MINOR(csr->version));
        } else {
-               DRM_ERROR("Failed to load DMC firmware, disabling rpm\n");
+               dev_notice(dev_priv->dev->dev,
+                          "Failed to load DMC firmware"
+                          " [" FIRMWARE_URL "],"
+                          " disabling runtime power management.\n");
        }
 
        release_firmware(fw);
@@ -421,7 +428,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
        if (!HAS_CSR(dev_priv))
                return;
 
-       if (IS_SKYLAKE(dev_priv))
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
                csr->fw_path = I915_CSR_SKL;
        else if (IS_BROXTON(dev_priv))
                csr->fw_path = I915_CSR_BXT;
index e6408e5583d7a88af4511c6ec2334db2b7922555..cdf2e14aa45daf46f5215732e41ec5c205c4ab3a 100644 (file)
@@ -133,38 +133,38 @@ static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
        { 0x00002016, 0x000000A0, 0x0 },
        { 0x00005012, 0x0000009B, 0x0 },
        { 0x00007011, 0x00000088, 0x0 },
-       { 0x80009010, 0x000000C0, 0x1 },        /* Uses I_boost level 0x1 */
+       { 0x80009010, 0x000000C0, 0x1 },
        { 0x00002016, 0x0000009B, 0x0 },
        { 0x00005012, 0x00000088, 0x0 },
-       { 0x80007011, 0x000000C0, 0x1 },        /* Uses I_boost level 0x1 */
+       { 0x80007011, 0x000000C0, 0x1 },
        { 0x00002016, 0x000000DF, 0x0 },
-       { 0x80005012, 0x000000C0, 0x1 },        /* Uses I_boost level 0x1 */
+       { 0x80005012, 0x000000C0, 0x1 },
 };
 
 /* Skylake U */
 static const struct ddi_buf_trans skl_u_ddi_translations_dp[] = {
        { 0x0000201B, 0x000000A2, 0x0 },
        { 0x00005012, 0x00000088, 0x0 },
-       { 0x00007011, 0x00000087, 0x0 },
-       { 0x80009010, 0x000000C0, 0x1 },        /* Uses I_boost level 0x1 */
+       { 0x80007011, 0x000000CD, 0x0 },
+       { 0x80009010, 0x000000C0, 0x1 },
        { 0x0000201B, 0x0000009D, 0x0 },
-       { 0x80005012, 0x000000C0, 0x1 },        /* Uses I_boost level 0x1 */
-       { 0x80007011, 0x000000C0, 0x1 },        /* Uses I_boost level 0x1 */
+       { 0x80005012, 0x000000C0, 0x1 },
+       { 0x80007011, 0x000000C0, 0x1 },
        { 0x00002016, 0x00000088, 0x0 },
-       { 0x80005012, 0x000000C0, 0x1 },        /* Uses I_boost level 0x1 */
+       { 0x80005012, 0x000000C0, 0x1 },
 };
 
 /* Skylake Y */
 static const struct ddi_buf_trans skl_y_ddi_translations_dp[] = {
        { 0x00000018, 0x000000A2, 0x0 },
        { 0x00005012, 0x00000088, 0x0 },
-       { 0x00007011, 0x00000087, 0x0 },
-       { 0x80009010, 0x000000C0, 0x3 },        /* Uses I_boost level 0x3 */
+       { 0x80007011, 0x000000CD, 0x0 },
+       { 0x80009010, 0x000000C0, 0x3 },
        { 0x00000018, 0x0000009D, 0x0 },
-       { 0x80005012, 0x000000C0, 0x3 },        /* Uses I_boost level 0x3 */
-       { 0x80007011, 0x000000C0, 0x3 },        /* Uses I_boost level 0x3 */
+       { 0x80005012, 0x000000C0, 0x3 },
+       { 0x80007011, 0x000000C0, 0x3 },
        { 0x00000018, 0x00000088, 0x0 },
-       { 0x80005012, 0x000000C0, 0x3 },        /* Uses I_boost level 0x3 */
+       { 0x80005012, 0x000000C0, 0x3 },
 };
 
 /*
@@ -226,26 +226,26 @@ static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
        { 0x00000018, 0x000000A1, 0x0 },
        { 0x00000018, 0x00000098, 0x0 },
        { 0x00004013, 0x00000088, 0x0 },
-       { 0x00006012, 0x00000087, 0x0 },
+       { 0x80006012, 0x000000CD, 0x1 },
        { 0x00000018, 0x000000DF, 0x0 },
-       { 0x00003015, 0x00000087, 0x0 },        /* Default */
-       { 0x00003015, 0x000000C7, 0x0 },
-       { 0x00000018, 0x000000C7, 0x0 },
+       { 0x80003015, 0x000000CD, 0x1 },        /* Default */
+       { 0x80003015, 0x000000C0, 0x1 },
+       { 0x80000018, 0x000000C0, 0x1 },
 };
 
 /* Skylake Y */
 static const struct ddi_buf_trans skl_y_ddi_translations_hdmi[] = {
        { 0x00000018, 0x000000A1, 0x0 },
        { 0x00005012, 0x000000DF, 0x0 },
-       { 0x00007011, 0x00000084, 0x0 },
+       { 0x80007011, 0x000000CB, 0x3 },
        { 0x00000018, 0x000000A4, 0x0 },
        { 0x00000018, 0x0000009D, 0x0 },
        { 0x00004013, 0x00000080, 0x0 },
-       { 0x00006013, 0x000000C7, 0x0 },
+       { 0x80006013, 0x000000C0, 0x3 },
        { 0x00000018, 0x0000008A, 0x0 },
-       { 0x00003015, 0x000000C7, 0x0 },        /* Default */
-       { 0x80003015, 0x000000C7, 0x7 },        /* Uses I_boost level 0x7 */
-       { 0x00000018, 0x000000C7, 0x0 },
+       { 0x80003015, 0x000000C0, 0x3 },        /* Default */
+       { 0x80003015, 0x000000C0, 0x3 },
+       { 0x80000018, 0x000000C0, 0x3 },
 };
 
 struct bxt_ddi_buf_trans {
@@ -301,8 +301,8 @@ static const struct bxt_ddi_buf_trans bxt_ddi_translations_hdmi[] = {
        { 154, 0x9A, 1, 128, true },    /* 9:   1200            0   */
 };
 
-static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-                                   enum port port, int type);
+static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
+                                   u32 level, enum port port, int type);
 
 static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
                                 struct intel_digital_port **dig_port,
@@ -342,81 +342,50 @@ enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
        return port;
 }
 
-static bool
-intel_dig_port_supports_hdmi(const struct intel_digital_port *intel_dig_port)
-{
-       return i915_mmio_reg_valid(intel_dig_port->hdmi.hdmi_reg);
-}
-
-static const struct ddi_buf_trans *skl_get_buf_trans_dp(struct drm_device *dev,
-                                                       int *n_entries)
+static const struct ddi_buf_trans *
+skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-       const struct ddi_buf_trans *ddi_translations;
-
-       if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
-               ddi_translations = skl_y_ddi_translations_dp;
+       if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
                *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
-       } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
-               ddi_translations = skl_u_ddi_translations_dp;
+               return skl_y_ddi_translations_dp;
+       } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
                *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
+               return skl_u_ddi_translations_dp;
        } else {
-               ddi_translations = skl_ddi_translations_dp;
                *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+               return skl_ddi_translations_dp;
        }
-
-       return ddi_translations;
 }
 
-static const struct ddi_buf_trans *skl_get_buf_trans_edp(struct drm_device *dev,
-                                                        int *n_entries)
+static const struct ddi_buf_trans *
+skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const struct ddi_buf_trans *ddi_translations;
-
-       if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
-               if (dev_priv->edp_low_vswing) {
-                       ddi_translations = skl_y_ddi_translations_edp;
+       if (dev_priv->edp_low_vswing) {
+               if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
                        *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp);
-               } else {
-                       ddi_translations = skl_y_ddi_translations_dp;
-                       *n_entries = ARRAY_SIZE(skl_y_ddi_translations_dp);
-               }
-       } else if (IS_SKL_ULT(dev) || IS_KBL_ULT(dev)) {
-               if (dev_priv->edp_low_vswing) {
-                       ddi_translations = skl_u_ddi_translations_edp;
+                       return skl_y_ddi_translations_edp;
+               } else if (IS_SKL_ULT(dev_priv) || IS_KBL_ULT(dev_priv)) {
                        *n_entries = ARRAY_SIZE(skl_u_ddi_translations_edp);
+                       return skl_u_ddi_translations_edp;
                } else {
-                       ddi_translations = skl_u_ddi_translations_dp;
-                       *n_entries = ARRAY_SIZE(skl_u_ddi_translations_dp);
-               }
-       } else {
-               if (dev_priv->edp_low_vswing) {
-                       ddi_translations = skl_ddi_translations_edp;
                        *n_entries = ARRAY_SIZE(skl_ddi_translations_edp);
-               } else {
-                       ddi_translations = skl_ddi_translations_dp;
-                       *n_entries = ARRAY_SIZE(skl_ddi_translations_dp);
+                       return skl_ddi_translations_edp;
                }
        }
 
-       return ddi_translations;
+       return skl_get_buf_trans_dp(dev_priv, n_entries);
 }
 
 static const struct ddi_buf_trans *
-skl_get_buf_trans_hdmi(struct drm_device *dev,
-                      int *n_entries)
+skl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
 {
-       const struct ddi_buf_trans *ddi_translations;
-
-       if (IS_SKL_ULX(dev) || IS_KBL_ULX(dev)) {
-               ddi_translations = skl_y_ddi_translations_hdmi;
+       if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) {
                *n_entries = ARRAY_SIZE(skl_y_ddi_translations_hdmi);
+               return skl_y_ddi_translations_hdmi;
        } else {
-               ddi_translations = skl_ddi_translations_hdmi;
                *n_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
+               return skl_ddi_translations_hdmi;
        }
-
-       return ddi_translations;
 }
 
 /*
@@ -426,42 +395,52 @@ skl_get_buf_trans_hdmi(struct drm_device *dev,
  * in either FDI or DP modes only, as HDMI connections will work with both
  * of those
  */
-static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
-                                     bool supports_hdmi)
+void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 iboost_bit = 0;
        int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
            size;
-       int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+       int hdmi_level;
+       enum port port;
        const struct ddi_buf_trans *ddi_translations_fdi;
        const struct ddi_buf_trans *ddi_translations_dp;
        const struct ddi_buf_trans *ddi_translations_edp;
        const struct ddi_buf_trans *ddi_translations_hdmi;
        const struct ddi_buf_trans *ddi_translations;
 
-       if (IS_BROXTON(dev)) {
-               if (!supports_hdmi)
+       port = intel_ddi_get_encoder_port(encoder);
+       hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
+
+       if (IS_BROXTON(dev_priv)) {
+               if (encoder->type != INTEL_OUTPUT_HDMI)
                        return;
 
                /* Vswing programming for HDMI */
-               bxt_ddi_vswing_sequence(dev, hdmi_level, port,
+               bxt_ddi_vswing_sequence(dev_priv, hdmi_level, port,
                                        INTEL_OUTPUT_HDMI);
                return;
-       } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
+       }
+
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
                ddi_translations_fdi = NULL;
                ddi_translations_dp =
-                               skl_get_buf_trans_dp(dev, &n_dp_entries);
+                               skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
                ddi_translations_edp =
-                               skl_get_buf_trans_edp(dev, &n_edp_entries);
+                               skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
                ddi_translations_hdmi =
-                               skl_get_buf_trans_hdmi(dev, &n_hdmi_entries);
+                               skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
                hdmi_default_entry = 8;
                /* If we're boosting the current, set bit 31 of trans1 */
                if (dev_priv->vbt.ddi_port_info[port].hdmi_boost_level ||
                    dev_priv->vbt.ddi_port_info[port].dp_boost_level)
                        iboost_bit = 1<<31;
-       } else if (IS_BROADWELL(dev)) {
+
+               if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+                           port != PORT_A && port != PORT_E &&
+                           n_edp_entries > 9))
+                       n_edp_entries = 9;
+       } else if (IS_BROADWELL(dev_priv)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
                ddi_translations_edp = bdw_ddi_translations_edp;
@@ -470,7 +449,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
                n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
                n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
                hdmi_default_entry = 7;
-       } else if (IS_HASWELL(dev)) {
+       } else if (IS_HASWELL(dev_priv)) {
                ddi_translations_fdi = hsw_ddi_translations_fdi;
                ddi_translations_dp = hsw_ddi_translations_dp;
                ddi_translations_edp = hsw_ddi_translations_dp;
@@ -490,30 +469,18 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
                hdmi_default_entry = 7;
        }
 
-       switch (port) {
-       case PORT_A:
+       switch (encoder->type) {
+       case INTEL_OUTPUT_EDP:
                ddi_translations = ddi_translations_edp;
                size = n_edp_entries;
                break;
-       case PORT_B:
-       case PORT_C:
+       case INTEL_OUTPUT_DISPLAYPORT:
+       case INTEL_OUTPUT_HDMI:
                ddi_translations = ddi_translations_dp;
                size = n_dp_entries;
                break;
-       case PORT_D:
-               if (intel_dp_is_edp(dev, PORT_D)) {
-                       ddi_translations = ddi_translations_edp;
-                       size = n_edp_entries;
-               } else {
-                       ddi_translations = ddi_translations_dp;
-                       size = n_dp_entries;
-               }
-               break;
-       case PORT_E:
-               if (ddi_translations_fdi)
-                       ddi_translations = ddi_translations_fdi;
-               else
-                       ddi_translations = ddi_translations_dp;
+       case INTEL_OUTPUT_ANALOG:
+               ddi_translations = ddi_translations_fdi;
                size = n_dp_entries;
                break;
        default:
@@ -527,7 +494,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
                           ddi_translations[i].trans2);
        }
 
-       if (!supports_hdmi)
+       if (encoder->type != INTEL_OUTPUT_HDMI)
                return;
 
        /* Choose a good default if VBT is badly populated */
@@ -542,37 +509,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
                   ddi_translations_hdmi[hdmi_level].trans2);
 }
 
-/* Program DDI buffers translations for DP. By default, program ports A-D in DP
- * mode and port E for FDI.
- */
-void intel_prepare_ddi(struct drm_device *dev)
-{
-       struct intel_encoder *intel_encoder;
-       bool visited[I915_MAX_PORTS] = { 0, };
-
-       if (!HAS_DDI(dev))
-               return;
-
-       for_each_intel_encoder(dev, intel_encoder) {
-               struct intel_digital_port *intel_dig_port;
-               enum port port;
-               bool supports_hdmi;
-
-               if (intel_encoder->type == INTEL_OUTPUT_DSI)
-                       continue;
-
-               ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port);
-               if (visited[port])
-                       continue;
-
-               supports_hdmi = intel_dig_port &&
-                               intel_dig_port_supports_hdmi(intel_dig_port);
-
-               intel_prepare_ddi_buffers(dev, port, supports_hdmi);
-               visited[port] = true;
-       }
-}
-
 static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
                                    enum port port)
 {
@@ -601,8 +537,14 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+       struct intel_encoder *encoder;
        u32 temp, i, rx_ctl_val;
 
+       for_each_encoder_on_crtc(dev, crtc, encoder) {
+               WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
+               intel_prepare_ddi_buffer(encoder);
+       }
+
        /* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
         * mode set "sequence for CRT port" document:
         * - TP1 to TP2 time with the default value
@@ -1589,7 +1531,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
                         DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
                         DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
                         wrpll_params.central_freq;
-       } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+       } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+                  intel_encoder->type == INTEL_OUTPUT_DP_MST) {
                switch (crtc_state->port_clock / 2) {
                case 81000:
                        ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
@@ -1603,8 +1546,10 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
                }
 
                cfgcr1 = cfgcr2 = 0;
-       } else /* eDP */
+       } else if (intel_encoder->type == INTEL_OUTPUT_EDP) {
                return true;
+       } else
+               return false;
 
        memset(&crtc_state->dpll_hw_state, 0,
               sizeof(crtc_state->dpll_hw_state));
@@ -2085,10 +2030,9 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
                           TRANS_CLK_SEL_DISABLED);
 }
 
-static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
-                              enum port port, int type)
+static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
+                              u32 level, enum port port, int type)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        const struct ddi_buf_trans *ddi_translations;
        uint8_t iboost;
        uint8_t dp_iboost, hdmi_iboost;
@@ -2103,21 +2047,26 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
                if (dp_iboost) {
                        iboost = dp_iboost;
                } else {
-                       ddi_translations = skl_get_buf_trans_dp(dev, &n_entries);
+                       ddi_translations = skl_get_buf_trans_dp(dev_priv, &n_entries);
                        iboost = ddi_translations[level].i_boost;
                }
        } else if (type == INTEL_OUTPUT_EDP) {
                if (dp_iboost) {
                        iboost = dp_iboost;
                } else {
-                       ddi_translations = skl_get_buf_trans_edp(dev, &n_entries);
+                       ddi_translations = skl_get_buf_trans_edp(dev_priv, &n_entries);
+
+                       if (WARN_ON(port != PORT_A &&
+                                   port != PORT_E && n_entries > 9))
+                               n_entries = 9;
+
                        iboost = ddi_translations[level].i_boost;
                }
        } else if (type == INTEL_OUTPUT_HDMI) {
                if (hdmi_iboost) {
                        iboost = hdmi_iboost;
                } else {
-                       ddi_translations = skl_get_buf_trans_hdmi(dev, &n_entries);
+                       ddi_translations = skl_get_buf_trans_hdmi(dev_priv, &n_entries);
                        iboost = ddi_translations[level].i_boost;
                }
        } else {
@@ -2142,10 +2091,9 @@ static void skl_ddi_set_iboost(struct drm_device *dev, u32 level,
        I915_WRITE(DISPIO_CR_TX_BMU_CR0, reg);
 }
 
-static void bxt_ddi_vswing_sequence(struct drm_device *dev, u32 level,
-                                   enum port port, int type)
+static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
+                                   u32 level, enum port port, int type)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        const struct bxt_ddi_buf_trans *ddi_translations;
        u32 n_entries, i;
        uint32_t val;
@@ -2260,7 +2208,7 @@ static uint32_t translate_signal_level(int signal_levels)
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = dport->base.base.dev;
+       struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
        struct intel_encoder *encoder = &dport->base;
        uint8_t train_set = intel_dp->train_set[0];
        int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
@@ -2270,10 +2218,10 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
 
        level = translate_signal_level(signal_levels);
 
-       if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
-               skl_ddi_set_iboost(dev, level, port, encoder->type);
-       else if (IS_BROXTON(dev))
-               bxt_ddi_vswing_sequence(dev, level, port, encoder->type);
+       if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+               skl_ddi_set_iboost(dev_priv, level, port, encoder->type);
+       else if (IS_BROXTON(dev_priv))
+               bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
 
        return DDI_BUF_TRANS_SELECT(level);
 }
@@ -2325,12 +2273,12 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
 static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 {
        struct drm_encoder *encoder = &intel_encoder->base;
-       struct drm_device *dev = encoder->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(encoder->dev);
        struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
        enum port port = intel_ddi_get_encoder_port(intel_encoder);
        int type = intel_encoder->type;
-       int hdmi_level;
+
+       intel_prepare_ddi_buffer(intel_encoder);
 
        if (type == INTEL_OUTPUT_EDP) {
                struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2348,17 +2296,11 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
 
                intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
                intel_dp_start_link_train(intel_dp);
-               if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
+               if (port != PORT_A || INTEL_INFO(dev_priv)->gen >= 9)
                        intel_dp_stop_link_train(intel_dp);
        } else if (type == INTEL_OUTPUT_HDMI) {
                struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
-               if (IS_BROXTON(dev)) {
-                       hdmi_level = dev_priv->vbt.
-                               ddi_port_info[port].hdmi_level_shift;
-                       bxt_ddi_vswing_sequence(dev, hdmi_level, port,
-                                       INTEL_OUTPUT_HDMI);
-               }
                intel_hdmi->set_infoframes(encoder,
                                           crtc->config->has_hdmi_sink,
                                           &crtc->config->base.adjusted_mode);
@@ -3282,6 +3224,33 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        struct intel_encoder *intel_encoder;
        struct drm_encoder *encoder;
        bool init_hdmi, init_dp;
+       int max_lanes;
+
+       if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) {
+               switch (port) {
+               case PORT_A:
+                       max_lanes = 4;
+                       break;
+               case PORT_E:
+                       max_lanes = 0;
+                       break;
+               default:
+                       max_lanes = 4;
+                       break;
+               }
+       } else {
+               switch (port) {
+               case PORT_A:
+                       max_lanes = 2;
+                       break;
+               case PORT_E:
+                       max_lanes = 2;
+                       break;
+               default:
+                       max_lanes = 4;
+                       break;
+               }
+       }
 
        init_hdmi = (dev_priv->vbt.ddi_port_info[port].supports_dvi ||
                     dev_priv->vbt.ddi_port_info[port].supports_hdmi);
@@ -3327,9 +3296,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
                if (!(intel_dig_port->saved_port_bits & DDI_A_4_LANES)) {
                        DRM_DEBUG_KMS("BXT BIOS forgot to set DDI_A_4_LANES for port A; fixing\n");
                        intel_dig_port->saved_port_bits |= DDI_A_4_LANES;
+                       max_lanes = 4;
                }
        }
 
+       intel_dig_port->max_lanes = max_lanes;
+
        intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
        intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
        intel_encoder->cloneable = 0;
index 2f00828ccc6e923a65447424c716d9cc5ee0e99b..836bbdc239b67c9caf53f11094e10883159a461b 100644 (file)
@@ -85,8 +85,6 @@ static const uint32_t intel_cursor_formats[] = {
        DRM_FORMAT_ARGB8888,
 };
 
-static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
-
 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
                                struct intel_crtc_state *pipe_config);
 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
@@ -1152,11 +1150,6 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
        }
 }
 
-static const char *state_string(bool enabled)
-{
-       return enabled ? "on" : "off";
-}
-
 /* Only for pre-ILK configs */
 void assert_pll(struct drm_i915_private *dev_priv,
                enum pipe pipe, bool state)
@@ -1168,7 +1161,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
        cur_state = !!(val & DPLL_VCO_ENABLE);
        I915_STATE_WARN(cur_state != state,
             "PLL state assertion failure (expected %s, current %s)\n",
-            state_string(state), state_string(cur_state));
+                       onoff(state), onoff(cur_state));
 }
 
 /* XXX: the dsi pll is shared between MIPI DSI ports */
@@ -1184,7 +1177,7 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
        cur_state = val & DSI_PLL_VCO_EN;
        I915_STATE_WARN(cur_state != state,
             "DSI PLL state assertion failure (expected %s, current %s)\n",
-            state_string(state), state_string(cur_state));
+                       onoff(state), onoff(cur_state));
 }
 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
@@ -1208,14 +1201,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
        bool cur_state;
        struct intel_dpll_hw_state hw_state;
 
-       if (WARN (!pll,
-                 "asserting DPLL %s with no DPLL\n", state_string(state)))
+       if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
                return;
 
        cur_state = pll->get_hw_state(dev_priv, pll, &hw_state);
        I915_STATE_WARN(cur_state != state,
             "%s assertion failure (expected %s, current %s)\n",
-            pll->name, state_string(state), state_string(cur_state));
+                       pll->name, onoff(state), onoff(cur_state));
 }
 
 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
@@ -1235,7 +1227,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
        }
        I915_STATE_WARN(cur_state != state,
             "FDI TX state assertion failure (expected %s, current %s)\n",
-            state_string(state), state_string(cur_state));
+                       onoff(state), onoff(cur_state));
 }
 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
@@ -1250,7 +1242,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
        cur_state = !!(val & FDI_RX_ENABLE);
        I915_STATE_WARN(cur_state != state,
             "FDI RX state assertion failure (expected %s, current %s)\n",
-            state_string(state), state_string(cur_state));
+                       onoff(state), onoff(cur_state));
 }
 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
@@ -1282,7 +1274,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
        cur_state = !!(val & FDI_RX_PLL_ENABLE);
        I915_STATE_WARN(cur_state != state,
             "FDI RX PLL assertion failure (expected %s, current %s)\n",
-            state_string(state), state_string(cur_state));
+                       onoff(state), onoff(cur_state));
 }
 
 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1340,7 +1332,7 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
 
        I915_STATE_WARN(cur_state != state,
             "cursor on pipe %c assertion failure (expected %s, current %s)\n",
-            pipe_name(pipe), state_string(state), state_string(cur_state));
+                       pipe_name(pipe), onoff(state), onoff(cur_state));
 }
 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
@@ -1367,7 +1359,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
 
        I915_STATE_WARN(cur_state != state,
             "pipe %c assertion failure (expected %s, current %s)\n",
-            pipe_name(pipe), state_string(state), state_string(cur_state));
+                       pipe_name(pipe), onoff(state), onoff(cur_state));
 }
 
 static void assert_plane(struct drm_i915_private *dev_priv,
@@ -1380,7 +1372,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
        cur_state = !!(val & DISPLAY_PLANE_ENABLE);
        I915_STATE_WARN(cur_state != state,
             "plane %c assertion failure (expected %s, current %s)\n",
-            plane_name(plane), state_string(state), state_string(cur_state));
+                       plane_name(plane), onoff(state), onoff(cur_state));
 }
 
 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
@@ -2153,6 +2145,17 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
 
        I915_WRITE(reg, val | PIPECONF_ENABLE);
        POSTING_READ(reg);
+
+       /*
+        * Until the pipe starts DSL will read as 0, which would cause
+        * an apparent vblank timestamp jump, which messes up also the
+        * frame count when it's derived from the timestamps. So let's
+        * wait for the pipe to start properly before we call
+        * drm_crtc_vblank_on()
+        */
+       if (dev->max_vblank_count == 0 &&
+           wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
+               DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
 }
 
 /**
@@ -2214,67 +2217,75 @@ static bool need_vtd_wa(struct drm_device *dev)
        return false;
 }
 
-unsigned int
-intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
-                 uint64_t fb_format_modifier, unsigned int plane)
+static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
 {
-       unsigned int tile_height;
-       uint32_t pixel_bytes;
+       return IS_GEN2(dev_priv) ? 2048 : 4096;
+}
 
-       switch (fb_format_modifier) {
+static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv,
+                                    uint64_t fb_modifier, unsigned int cpp)
+{
+       switch (fb_modifier) {
        case DRM_FORMAT_MOD_NONE:
-               tile_height = 1;
-               break;
+               return cpp;
        case I915_FORMAT_MOD_X_TILED:
-               tile_height = IS_GEN2(dev) ? 16 : 8;
-               break;
+               if (IS_GEN2(dev_priv))
+                       return 128;
+               else
+                       return 512;
        case I915_FORMAT_MOD_Y_TILED:
-               tile_height = 32;
-               break;
+               if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
+                       return 128;
+               else
+                       return 512;
        case I915_FORMAT_MOD_Yf_TILED:
-               pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
-               switch (pixel_bytes) {
-               default:
+               switch (cpp) {
                case 1:
-                       tile_height = 64;
-                       break;
+                       return 64;
                case 2:
                case 4:
-                       tile_height = 32;
-                       break;
+                       return 128;
                case 8:
-                       tile_height = 16;
-                       break;
                case 16:
-                       WARN_ONCE(1,
-                                 "128-bit pixels are not supported for display!");
-                       tile_height = 16;
-                       break;
+                       return 256;
+               default:
+                       MISSING_CASE(cpp);
+                       return cpp;
                }
                break;
        default:
-               MISSING_CASE(fb_format_modifier);
-               tile_height = 1;
-               break;
+               MISSING_CASE(fb_modifier);
+               return cpp;
        }
+}
 
-       return tile_height;
+unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
+                              uint64_t fb_modifier, unsigned int cpp)
+{
+       if (fb_modifier == DRM_FORMAT_MOD_NONE)
+               return 1;
+       else
+               return intel_tile_size(dev_priv) /
+                       intel_tile_width(dev_priv, fb_modifier, cpp);
 }
 
 unsigned int
 intel_fb_align_height(struct drm_device *dev, unsigned int height,
-                     uint32_t pixel_format, uint64_t fb_format_modifier)
+                     uint32_t pixel_format, uint64_t fb_modifier)
 {
-       return ALIGN(height, intel_tile_height(dev, pixel_format,
-                                              fb_format_modifier, 0));
+       unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
+       unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
+
+       return ALIGN(height, tile_height);
 }
 
 static void
 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
                        const struct drm_plane_state *plane_state)
 {
-       struct intel_rotation_info *info = &view->params.rotation_info;
-       unsigned int tile_height, tile_pitch;
+       struct drm_i915_private *dev_priv = to_i915(fb->dev);
+       struct intel_rotation_info *info = &view->params.rotated;
+       unsigned int tile_size, tile_width, tile_height, cpp;
 
        *view = i915_ggtt_view_normal;
 
@@ -2292,26 +2303,28 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
        info->uv_offset = fb->offsets[1];
        info->fb_modifier = fb->modifier[0];
 
-       tile_height = intel_tile_height(fb->dev, fb->pixel_format,
-                                       fb->modifier[0], 0);
-       tile_pitch = PAGE_SIZE / tile_height;
-       info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
+       tile_size = intel_tile_size(dev_priv);
+
+       cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+       tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp);
+       tile_height = tile_size / tile_width;
+
+       info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width);
        info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
-       info->size = info->width_pages * info->height_pages * PAGE_SIZE;
+       info->size = info->width_pages * info->height_pages * tile_size;
 
        if (info->pixel_format == DRM_FORMAT_NV12) {
-               tile_height = intel_tile_height(fb->dev, fb->pixel_format,
-                                               fb->modifier[0], 1);
-               tile_pitch = PAGE_SIZE / tile_height;
-               info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
-               info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
-                                                    tile_height);
-               info->size_uv = info->width_pages_uv * info->height_pages_uv *
-                               PAGE_SIZE;
+               cpp = drm_format_plane_cpp(fb->pixel_format, 1);
+               tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp);
+               tile_height = tile_size / tile_width;
+
+               info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width);
+               info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height);
+               info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size;
        }
 }
 
-static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
+static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
 {
        if (INTEL_INFO(dev_priv)->gen >= 9)
                return 256 * 1024;
@@ -2324,6 +2337,25 @@ static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv)
                return 0;
 }
 
+static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
+                                        uint64_t fb_modifier)
+{
+       switch (fb_modifier) {
+       case DRM_FORMAT_MOD_NONE:
+               return intel_linear_alignment(dev_priv);
+       case I915_FORMAT_MOD_X_TILED:
+               if (INTEL_INFO(dev_priv)->gen >= 9)
+                       return 256 * 1024;
+               return 0;
+       case I915_FORMAT_MOD_Y_TILED:
+       case I915_FORMAT_MOD_Yf_TILED:
+               return 1 * 1024 * 1024;
+       default:
+               MISSING_CASE(fb_modifier);
+               return 0;
+       }
+}
+
 int
 intel_pin_and_fence_fb_obj(struct drm_plane *plane,
                           struct drm_framebuffer *fb,
@@ -2338,29 +2370,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane,
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-       switch (fb->modifier[0]) {
-       case DRM_FORMAT_MOD_NONE:
-               alignment = intel_linear_alignment(dev_priv);
-               break;
-       case I915_FORMAT_MOD_X_TILED:
-               if (INTEL_INFO(dev)->gen >= 9)
-                       alignment = 256 * 1024;
-               else {
-                       /* pin() will align the object as required by fence */
-                       alignment = 0;
-               }
-               break;
-       case I915_FORMAT_MOD_Y_TILED:
-       case I915_FORMAT_MOD_Yf_TILED:
-               if (WARN_ONCE(INTEL_INFO(dev)->gen < 9,
-                         "Y tiling bo slipped through, driver bug!\n"))
-                       return -EINVAL;
-               alignment = 1 * 1024 * 1024;
-               break;
-       default:
-               MISSING_CASE(fb->modifier[0]);
-               return -EINVAL;
-       }
+       alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
 
        intel_fill_fb_ggtt_view(&view, fb, plane_state);
 
@@ -2438,22 +2448,27 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb,
 
 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
  * is assumed to be a power-of-two. */
-unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
-                                            int *x, int *y,
-                                            unsigned int tiling_mode,
-                                            unsigned int cpp,
-                                            unsigned int pitch)
-{
-       if (tiling_mode != I915_TILING_NONE) {
+u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
+                             int *x, int *y,
+                             uint64_t fb_modifier,
+                             unsigned int cpp,
+                             unsigned int pitch)
+{
+       if (fb_modifier != DRM_FORMAT_MOD_NONE) {
+               unsigned int tile_size, tile_width, tile_height;
                unsigned int tile_rows, tiles;
 
-               tile_rows = *y / 8;
-               *y %= 8;
+               tile_size = intel_tile_size(dev_priv);
+               tile_width = intel_tile_width(dev_priv, fb_modifier, cpp);
+               tile_height = tile_size / tile_width;
 
-               tiles = *x / (512/cpp);
-               *x %= 512/cpp;
+               tile_rows = *y / tile_height;
+               *y %= tile_height;
 
-               return tile_rows * pitch * 8 + tiles * 4096;
+               tiles = *x / (tile_width/cpp);
+               *x %= tile_width/cpp;
+
+               return tile_rows * pitch * tile_height + tiles * tile_size;
        } else {
                unsigned int alignment = intel_linear_alignment(dev_priv) - 1;
                unsigned int offset;
@@ -2598,6 +2613,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
        struct drm_plane_state *plane_state = primary->state;
        struct drm_crtc_state *crtc_state = intel_crtc->base.state;
        struct intel_plane *intel_plane = to_intel_plane(primary);
+       struct intel_plane_state *intel_state =
+               to_intel_plane_state(plane_state);
        struct drm_framebuffer *fb;
 
        if (!plane_config->fb)
@@ -2659,6 +2676,15 @@ valid_fb:
        plane_state->crtc_w = fb->width;
        plane_state->crtc_h = fb->height;
 
+       intel_state->src.x1 = plane_state->src_x;
+       intel_state->src.y1 = plane_state->src_y;
+       intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
+       intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
+       intel_state->dst.x1 = plane_state->crtc_x;
+       intel_state->dst.y1 = plane_state->crtc_y;
+       intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
+       intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
+
        obj = intel_fb_obj(fb);
        if (obj->tiling_mode != I915_TILING_NONE)
                dev_priv->preserve_bios_swizzle = true;
@@ -2670,37 +2696,22 @@ valid_fb:
        obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
 }
 
-static void i9xx_update_primary_plane(struct drm_crtc *crtc,
-                                     struct drm_framebuffer *fb,
-                                     int x, int y)
+static void i9xx_update_primary_plane(struct drm_plane *primary,
+                                     const struct intel_crtc_state *crtc_state,
+                                     const struct intel_plane_state *plane_state)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = primary->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_plane *primary = crtc->primary;
-       bool visible = to_intel_plane_state(primary->state)->visible;
-       struct drm_i915_gem_object *obj;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_framebuffer *fb = plane_state->base.fb;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        int plane = intel_crtc->plane;
-       unsigned long linear_offset;
+       u32 linear_offset;
        u32 dspcntr;
        i915_reg_t reg = DSPCNTR(plane);
-       int pixel_size;
-
-       if (!visible || !fb) {
-               I915_WRITE(reg, 0);
-               if (INTEL_INFO(dev)->gen >= 4)
-                       I915_WRITE(DSPSURF(plane), 0);
-               else
-                       I915_WRITE(DSPADDR(plane), 0);
-               POSTING_READ(reg);
-               return;
-       }
-
-       obj = intel_fb_obj(fb);
-       if (WARN_ON(obj == NULL))
-               return;
-
-       pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+       int x = plane_state->src.x1 >> 16;
+       int y = plane_state->src.y1 >> 16;
 
        dspcntr = DISPPLANE_GAMMA_ENABLE;
 
@@ -2714,13 +2725,13 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
                 * which should always be the user's requested size.
                 */
                I915_WRITE(DSPSIZE(plane),
-                          ((intel_crtc->config->pipe_src_h - 1) << 16) |
-                          (intel_crtc->config->pipe_src_w - 1));
+                          ((crtc_state->pipe_src_h - 1) << 16) |
+                          (crtc_state->pipe_src_w - 1));
                I915_WRITE(DSPPOS(plane), 0);
        } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
                I915_WRITE(PRIMSIZE(plane),
-                          ((intel_crtc->config->pipe_src_h - 1) << 16) |
-                          (intel_crtc->config->pipe_src_w - 1));
+                          ((crtc_state->pipe_src_h - 1) << 16) |
+                          (crtc_state->pipe_src_w - 1));
                I915_WRITE(PRIMPOS(plane), 0);
                I915_WRITE(PRIMCNSTALPHA(plane), 0);
        }
@@ -2758,30 +2769,29 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
        if (IS_G4X(dev))
                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-       linear_offset = y * fb->pitches[0] + x * pixel_size;
+       linear_offset = y * fb->pitches[0] + x * cpp;
 
        if (INTEL_INFO(dev)->gen >= 4) {
                intel_crtc->dspaddr_offset =
-                       intel_gen4_compute_page_offset(dev_priv,
-                                                      &x, &y, obj->tiling_mode,
-                                                      pixel_size,
-                                                      fb->pitches[0]);
+                       intel_compute_tile_offset(dev_priv, &x, &y,
+                                                 fb->modifier[0], cpp,
+                                                 fb->pitches[0]);
                linear_offset -= intel_crtc->dspaddr_offset;
        } else {
                intel_crtc->dspaddr_offset = linear_offset;
        }
 
-       if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+       if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
                dspcntr |= DISPPLANE_ROTATE_180;
 
-               x += (intel_crtc->config->pipe_src_w - 1);
-               y += (intel_crtc->config->pipe_src_h - 1);
+               x += (crtc_state->pipe_src_w - 1);
+               y += (crtc_state->pipe_src_h - 1);
 
                /* Finding the last pixel of the last line of the display
                data and adding to linear_offset*/
                linear_offset +=
-                       (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
-                       (intel_crtc->config->pipe_src_w - 1) * pixel_size;
+                       (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
+                       (crtc_state->pipe_src_w - 1) * cpp;
        }
 
        intel_crtc->adjusted_x = x;
@@ -2800,37 +2810,40 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
        POSTING_READ(reg);
 }
 
-static void ironlake_update_primary_plane(struct drm_crtc *crtc,
-                                         struct drm_framebuffer *fb,
-                                         int x, int y)
+static void i9xx_disable_primary_plane(struct drm_plane *primary,
+                                      struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_plane *primary = crtc->primary;
-       bool visible = to_intel_plane_state(primary->state)->visible;
-       struct drm_i915_gem_object *obj;
        int plane = intel_crtc->plane;
-       unsigned long linear_offset;
-       u32 dspcntr;
-       i915_reg_t reg = DSPCNTR(plane);
-       int pixel_size;
 
-       if (!visible || !fb) {
-               I915_WRITE(reg, 0);
+       I915_WRITE(DSPCNTR(plane), 0);
+       if (INTEL_INFO(dev_priv)->gen >= 4)
                I915_WRITE(DSPSURF(plane), 0);
-               POSTING_READ(reg);
-               return;
-       }
-
-       obj = intel_fb_obj(fb);
-       if (WARN_ON(obj == NULL))
-               return;
+       else
+               I915_WRITE(DSPADDR(plane), 0);
+       POSTING_READ(DSPCNTR(plane));
+}
 
-       pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
+static void ironlake_update_primary_plane(struct drm_plane *primary,
+                                         const struct intel_crtc_state *crtc_state,
+                                         const struct intel_plane_state *plane_state)
+{
+       struct drm_device *dev = primary->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_framebuffer *fb = plane_state->base.fb;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       int plane = intel_crtc->plane;
+       u32 linear_offset;
+       u32 dspcntr;
+       i915_reg_t reg = DSPCNTR(plane);
+       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+       int x = plane_state->src.x1 >> 16;
+       int y = plane_state->src.y1 >> 16;
 
        dspcntr = DISPPLANE_GAMMA_ENABLE;
-
        dspcntr |= DISPLAY_PLANE_ENABLE;
 
        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
@@ -2865,25 +2878,24 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
        if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
                dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
 
-       linear_offset = y * fb->pitches[0] + x * pixel_size;
+       linear_offset = y * fb->pitches[0] + x * cpp;
        intel_crtc->dspaddr_offset =
-               intel_gen4_compute_page_offset(dev_priv,
-                                              &x, &y, obj->tiling_mode,
-                                              pixel_size,
-                                              fb->pitches[0]);
+               intel_compute_tile_offset(dev_priv, &x, &y,
+                                         fb->modifier[0], cpp,
+                                         fb->pitches[0]);
        linear_offset -= intel_crtc->dspaddr_offset;
-       if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) {
+       if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
                dspcntr |= DISPPLANE_ROTATE_180;
 
                if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
-                       x += (intel_crtc->config->pipe_src_w - 1);
-                       y += (intel_crtc->config->pipe_src_h - 1);
+                       x += (crtc_state->pipe_src_w - 1);
+                       y += (crtc_state->pipe_src_h - 1);
 
                        /* Finding the last pixel of the last line of the display
                        data and adding to linear_offset*/
                        linear_offset +=
-                               (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] +
-                               (intel_crtc->config->pipe_src_w - 1) * pixel_size;
+                               (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
+                               (crtc_state->pipe_src_w - 1) * cpp;
                }
        }
 
@@ -2904,37 +2916,15 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
        POSTING_READ(reg);
 }
 
-u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
-                             uint32_t pixel_format)
+u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
+                             uint64_t fb_modifier, uint32_t pixel_format)
 {
-       u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8;
-
-       /*
-        * The stride is either expressed as a multiple of 64 bytes
-        * chunks for linear buffers or in number of tiles for tiled
-        * buffers.
-        */
-       switch (fb_modifier) {
-       case DRM_FORMAT_MOD_NONE:
-               return 64;
-       case I915_FORMAT_MOD_X_TILED:
-               if (INTEL_INFO(dev)->gen == 2)
-                       return 128;
-               return 512;
-       case I915_FORMAT_MOD_Y_TILED:
-               /* No need to check for old gens and Y tiling since this is
-                * about the display engine and those will be blocked before
-                * we get here.
-                */
-               return 128;
-       case I915_FORMAT_MOD_Yf_TILED:
-               if (bits_per_pixel == 8)
-                       return 64;
-               else
-                       return 128;
-       default:
-               MISSING_CASE(fb_modifier);
+       if (fb_modifier == DRM_FORMAT_MOD_NONE) {
                return 64;
+       } else {
+               int cpp = drm_format_plane_cpp(pixel_format, 0);
+
+               return intel_tile_width(dev_priv, fb_modifier, cpp);
        }
 }
 
@@ -2946,7 +2936,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
        struct i915_vma *vma;
        u64 offset;
 
-       intel_fill_fb_ggtt_view(&view, intel_plane->base.fb,
+       intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
                                intel_plane->base.state);
 
        vma = i915_gem_obj_to_ggtt_view(obj, &view);
@@ -2957,7 +2947,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
        offset = vma->node.start;
 
        if (plane == 1) {
-               offset += vma->ggtt_view.params.rotation_info.uv_start_page *
+               offset += vma->ggtt_view.params.rotated.uv_start_page *
                          PAGE_SIZE;
        }
 
@@ -3074,36 +3064,30 @@ u32 skl_plane_ctl_rotation(unsigned int rotation)
        return 0;
 }
 
-static void skylake_update_primary_plane(struct drm_crtc *crtc,
-                                        struct drm_framebuffer *fb,
-                                        int x, int y)
+static void skylake_update_primary_plane(struct drm_plane *plane,
+                                        const struct intel_crtc_state *crtc_state,
+                                        const struct intel_plane_state *plane_state)
 {
-       struct drm_device *dev = crtc->dev;
+       struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_plane *plane = crtc->primary;
-       bool visible = to_intel_plane_state(plane->state)->visible;
-       struct drm_i915_gem_object *obj;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+       struct drm_framebuffer *fb = plane_state->base.fb;
+       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        int pipe = intel_crtc->pipe;
        u32 plane_ctl, stride_div, stride;
        u32 tile_height, plane_offset, plane_size;
-       unsigned int rotation;
+       unsigned int rotation = plane_state->base.rotation;
        int x_offset, y_offset;
        u32 surf_addr;
-       struct intel_crtc_state *crtc_state = intel_crtc->config;
-       struct intel_plane_state *plane_state;
-       int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
-       int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
-       int scaler_id = -1;
-
-       plane_state = to_intel_plane_state(plane->state);
-
-       if (!visible || !fb) {
-               I915_WRITE(PLANE_CTL(pipe, 0), 0);
-               I915_WRITE(PLANE_SURF(pipe, 0), 0);
-               POSTING_READ(PLANE_CTL(pipe, 0));
-               return;
-       }
+       int scaler_id = plane_state->scaler_id;
+       int src_x = plane_state->src.x1 >> 16;
+       int src_y = plane_state->src.y1 >> 16;
+       int src_w = drm_rect_width(&plane_state->src) >> 16;
+       int src_h = drm_rect_height(&plane_state->src) >> 16;
+       int dst_x = plane_state->dst.x1;
+       int dst_y = plane_state->dst.y1;
+       int dst_w = drm_rect_width(&plane_state->dst);
+       int dst_h = drm_rect_height(&plane_state->dst);
 
        plane_ctl = PLANE_CTL_ENABLE |
                    PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -3112,41 +3096,27 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
        plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
        plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
        plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-
-       rotation = plane->state->rotation;
        plane_ctl |= skl_plane_ctl_rotation(rotation);
 
-       obj = intel_fb_obj(fb);
-       stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+       stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
                                               fb->pixel_format);
        surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
 
        WARN_ON(drm_rect_width(&plane_state->src) == 0);
 
-       scaler_id = plane_state->scaler_id;
-       src_x = plane_state->src.x1 >> 16;
-       src_y = plane_state->src.y1 >> 16;
-       src_w = drm_rect_width(&plane_state->src) >> 16;
-       src_h = drm_rect_height(&plane_state->src) >> 16;
-       dst_x = plane_state->dst.x1;
-       dst_y = plane_state->dst.y1;
-       dst_w = drm_rect_width(&plane_state->dst);
-       dst_h = drm_rect_height(&plane_state->dst);
-
-       WARN_ON(x != src_x || y != src_y);
-
        if (intel_rotation_90_or_270(rotation)) {
+               int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
                /* stride = Surface height in tiles */
-               tile_height = intel_tile_height(dev, fb->pixel_format,
-                                               fb->modifier[0], 0);
+               tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
                stride = DIV_ROUND_UP(fb->height, tile_height);
-               x_offset = stride * tile_height - y - src_h;
-               y_offset = x;
+               x_offset = stride * tile_height - src_y - src_h;
+               y_offset = src_x;
                plane_size = (src_w - 1) << 16 | (src_h - 1);
        } else {
                stride = fb->pitches[0] / stride_div;
-               x_offset = x;
-               y_offset = y;
+               x_offset = src_x;
+               y_offset = src_y;
                plane_size = (src_h - 1) << 16 | (src_w - 1);
        }
        plane_offset = y_offset << 16 | x_offset;
@@ -3179,20 +3149,27 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
        POSTING_READ(PLANE_SURF(pipe, 0));
 }
 
-/* Assume fb object is pinned & idle & fenced and just update base pointers */
-static int
-intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
-                          int x, int y, enum mode_set_atomic state)
+static void skylake_disable_primary_plane(struct drm_plane *primary,
+                                         struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int pipe = to_intel_crtc(crtc)->pipe;
 
-       if (dev_priv->fbc.deactivate)
-               dev_priv->fbc.deactivate(dev_priv);
+       I915_WRITE(PLANE_CTL(pipe, 0), 0);
+       I915_WRITE(PLANE_SURF(pipe, 0), 0);
+       POSTING_READ(PLANE_SURF(pipe, 0));
+}
 
-       dev_priv->display.update_primary_plane(crtc, fb, x, y);
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+                          int x, int y, enum mode_set_atomic state)
+{
+       /* Support for kgdboc is disabled, this needs a major rework. */
+       DRM_ERROR("legacy panic handler not supported any more.\n");
 
-       return 0;
+       return -ENODEV;
 }
 
 static void intel_complete_page_flips(struct drm_device *dev)
@@ -3219,8 +3196,10 @@ static void intel_update_primary_planes(struct drm_device *dev)
                drm_modeset_lock_crtc(crtc, &plane->base);
                plane_state = to_intel_plane_state(plane->base.state);
 
-               if (crtc->state->active && plane_state->base.fb)
-                       plane->commit_plane(&plane->base, plane_state);
+               if (plane_state->visible)
+                       plane->update_plane(&plane->base,
+                                           to_intel_crtc_state(crtc->state),
+                                           plane_state);
 
                drm_modeset_unlock_crtc(crtc);
        }
@@ -4452,7 +4431,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
                      intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
 
        return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
-               &state->scaler_state.scaler_id, DRM_ROTATE_0,
+               &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
                state->pipe_src_w, state->pipe_src_h,
                adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
 }
@@ -4817,7 +4796,7 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
                intel_update_watermarks(&crtc->base);
 
        if (atomic->update_fbc)
-               intel_fbc_update(crtc);
+               intel_fbc_post_update(crtc);
 
        if (atomic->post_enable_primary)
                intel_post_enable_primary(&crtc->base);
@@ -4825,26 +4804,39 @@ static void intel_post_plane_update(struct intel_crtc *crtc)
        memset(atomic, 0, sizeof(*atomic));
 }
 
-static void intel_pre_plane_update(struct intel_crtc *crtc)
+static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
        struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc_atomic_commit *atomic = &crtc->atomic;
        struct intel_crtc_state *pipe_config =
                to_intel_crtc_state(crtc->base.state);
+       struct drm_atomic_state *old_state = old_crtc_state->base.state;
+       struct drm_plane *primary = crtc->base.primary;
+       struct drm_plane_state *old_pri_state =
+               drm_atomic_get_existing_plane_state(old_state, primary);
+       bool modeset = needs_modeset(&pipe_config->base);
 
-       if (atomic->disable_fbc)
-               intel_fbc_deactivate(crtc);
+       if (atomic->update_fbc)
+               intel_fbc_pre_update(crtc);
 
-       if (crtc->atomic.disable_ips)
-               hsw_disable_ips(crtc);
+       if (old_pri_state) {
+               struct intel_plane_state *primary_state =
+                       to_intel_plane_state(primary->state);
+               struct intel_plane_state *old_primary_state =
+                       to_intel_plane_state(old_pri_state);
 
-       if (atomic->pre_disable_primary)
-               intel_pre_disable_primary(&crtc->base);
+               if (old_primary_state->visible &&
+                   (modeset || !primary_state->visible))
+                       intel_pre_disable_primary(&crtc->base);
+       }
 
        if (pipe_config->disable_cxsr) {
                crtc->wm.cxsr_allowed = false;
-               intel_set_memory_cxsr(dev_priv, false);
+
+               if (old_crtc_state->base.active)
+                       intel_set_memory_cxsr(dev_priv, false);
        }
 
        if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed)
@@ -4945,8 +4937,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->config->has_pch_encoder)
                intel_wait_for_vblank(dev, pipe);
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
-
-       intel_fbc_enable(intel_crtc);
 }
 
 /* IPS only exists on ULT machines and is tied to pipe A. */
@@ -5059,8 +5049,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
                intel_wait_for_vblank(dev, hsw_workaround_pipe);
                intel_wait_for_vblank(dev, hsw_workaround_pipe);
        }
-
-       intel_fbc_enable(intel_crtc);
 }
 
 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
@@ -5141,8 +5129,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
        }
 
        intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
-
-       intel_fbc_disable_crtc(intel_crtc);
 }
 
 static void haswell_crtc_disable(struct drm_crtc *crtc)
@@ -5193,8 +5179,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
                intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
                                                      true);
        }
-
-       intel_fbc_disable_crtc(intel_crtc);
 }
 
 static void i9xx_pfit_enable(struct intel_crtc *crtc)
@@ -5370,6 +5354,7 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
 
 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
 {
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct drm_device *dev = state->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long put_domains[I915_MAX_PIPES] = {};
@@ -5383,13 +5368,9 @@ static void modeset_update_crtc_power_domains(struct drm_atomic_state *state)
                                modeset_get_crtc_power_domains(crtc);
        }
 
-       if (dev_priv->display.modeset_commit_cdclk) {
-               unsigned int cdclk = to_intel_atomic_state(state)->cdclk;
-
-               if (cdclk != dev_priv->cdclk_freq &&
-                   !WARN_ON(!state->allow_modeset))
-                       dev_priv->display.modeset_commit_cdclk(state);
-       }
+       if (dev_priv->display.modeset_commit_cdclk &&
+           intel_state->dev_cdclk != dev_priv->cdclk_freq)
+               dev_priv->display.modeset_commit_cdclk(state);
 
        for (i = 0; i < I915_MAX_PIPES; i++)
                if (put_domains[i])
@@ -6063,22 +6044,31 @@ static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
 static int intel_mode_max_pixclk(struct drm_device *dev,
                                 struct drm_atomic_state *state)
 {
-       struct intel_crtc *intel_crtc;
-       struct intel_crtc_state *crtc_state;
-       int max_pixclk = 0;
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       unsigned max_pixclk = 0, i;
+       enum pipe pipe;
 
-       for_each_intel_crtc(dev, intel_crtc) {
-               crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-               if (IS_ERR(crtc_state))
-                       return PTR_ERR(crtc_state);
+       memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
+              sizeof(intel_state->min_pixclk));
 
-               if (!crtc_state->base.enable)
-                       continue;
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               int pixclk = 0;
+
+               if (crtc_state->enable)
+                       pixclk = crtc_state->adjusted_mode.crtc_clock;
 
-               max_pixclk = max(max_pixclk,
-                                crtc_state->base.adjusted_mode.crtc_clock);
+               intel_state->min_pixclk[i] = pixclk;
        }
 
+       if (!intel_state->active_crtcs)
+               return 0;
+
+       for_each_pipe(dev_priv, pipe)
+               max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
+
        return max_pixclk;
 }
 
@@ -6087,13 +6077,18 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
        struct drm_device *dev = state->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int max_pixclk = intel_mode_max_pixclk(dev, state);
+       struct intel_atomic_state *intel_state =
+               to_intel_atomic_state(state);
 
        if (max_pixclk < 0)
                return max_pixclk;
 
-       to_intel_atomic_state(state)->cdclk =
+       intel_state->cdclk = intel_state->dev_cdclk =
                valleyview_calc_cdclk(dev_priv, max_pixclk);
 
+       if (!intel_state->active_crtcs)
+               intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
+
        return 0;
 }
 
@@ -6102,13 +6097,18 @@ static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
        struct drm_device *dev = state->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        int max_pixclk = intel_mode_max_pixclk(dev, state);
+       struct intel_atomic_state *intel_state =
+               to_intel_atomic_state(state);
 
        if (max_pixclk < 0)
                return max_pixclk;
 
-       to_intel_atomic_state(state)->cdclk =
+       intel_state->cdclk = intel_state->dev_cdclk =
                broxton_calc_cdclk(dev_priv, max_pixclk);
 
+       if (!intel_state->active_crtcs)
+               intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0);
+
        return 0;
 }
 
@@ -6151,8 +6151,10 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
-       unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
+       unsigned req_cdclk = old_intel_state->dev_cdclk;
 
        /*
         * FIXME: We can end up here with all power domains off, yet
@@ -6287,8 +6289,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
 
        for_each_encoder_on_crtc(dev, crtc, encoder)
                encoder->enable(encoder);
-
-       intel_fbc_enable(intel_crtc);
 }
 
 static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -6351,8 +6351,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
 
        if (!IS_GEN2(dev))
                intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
-       intel_fbc_disable_crtc(intel_crtc);
 }
 
 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
@@ -6376,6 +6374,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
 
        dev_priv->display.crtc_disable(crtc);
        intel_crtc->active = false;
+       intel_fbc_disable(intel_crtc);
        intel_update_watermarks(crtc);
        intel_disable_shared_dpll(intel_crtc);
 
@@ -6383,6 +6382,9 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
        for_each_power_domain(domain, domains)
                intel_display_power_put(dev_priv, domain);
        intel_crtc->enabled_power_domains = 0;
+
+       dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
+       dev_priv->min_pixclk[intel_crtc->pipe] = 0;
 }
 
 /*
@@ -7593,26 +7595,34 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
  * in cases where we need the PLL enabled even when @pipe is not going to
  * be enabled.
  */
-void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
-                     const struct dpll *dpll)
+int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+                    const struct dpll *dpll)
 {
        struct intel_crtc *crtc =
                to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
-       struct intel_crtc_state pipe_config = {
-               .base.crtc = &crtc->base,
-               .pixel_multiplier = 1,
-               .dpll = *dpll,
-       };
+       struct intel_crtc_state *pipe_config;
+
+       pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+       if (!pipe_config)
+               return -ENOMEM;
+
+       pipe_config->base.crtc = &crtc->base;
+       pipe_config->pixel_multiplier = 1;
+       pipe_config->dpll = *dpll;
 
        if (IS_CHERRYVIEW(dev)) {
-               chv_compute_dpll(crtc, &pipe_config);
-               chv_prepare_pll(crtc, &pipe_config);
-               chv_enable_pll(crtc, &pipe_config);
+               chv_compute_dpll(crtc, pipe_config);
+               chv_prepare_pll(crtc, pipe_config);
+               chv_enable_pll(crtc, pipe_config);
        } else {
-               vlv_compute_dpll(crtc, &pipe_config);
-               vlv_prepare_pll(crtc, &pipe_config);
-               vlv_enable_pll(crtc, &pipe_config);
+               vlv_compute_dpll(crtc, pipe_config);
+               vlv_prepare_pll(crtc, pipe_config);
+               vlv_enable_pll(crtc, pipe_config);
        }
+
+       kfree(pipe_config);
+
+       return 0;
 }
 
 /**
@@ -9246,7 +9256,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
        fb->width = ((val >> 0) & 0x1fff) + 1;
 
        val = I915_READ(PLANE_STRIDE(pipe, 0));
-       stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0],
+       stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
                                                fb->pixel_format);
        fb->pitches[0] = (val & 0x3ff) * stride_mult;
 
@@ -9662,14 +9672,14 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
                val |= PCH_LP_PARTITION_LEVEL_DISABLE;
                I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
        }
-
-       intel_prepare_ddi(dev);
 }
 
 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
-       unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
+       unsigned int req_cdclk = old_intel_state->dev_cdclk;
 
        broxton_set_cdclk(dev, req_cdclk);
 }
@@ -9677,29 +9687,41 @@ static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 /* compute the max rate for new configuration */
 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
 {
-       struct intel_crtc *intel_crtc;
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_i915_private *dev_priv = state->dev->dev_private;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *cstate;
        struct intel_crtc_state *crtc_state;
-       int max_pixel_rate = 0;
+       unsigned max_pixel_rate = 0, i;
+       enum pipe pipe;
 
-       for_each_intel_crtc(state->dev, intel_crtc) {
-               int pixel_rate;
+       memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
+              sizeof(intel_state->min_pixclk));
 
-               crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
-               if (IS_ERR(crtc_state))
-                       return PTR_ERR(crtc_state);
+       for_each_crtc_in_state(state, crtc, cstate, i) {
+               int pixel_rate;
 
-               if (!crtc_state->base.enable)
+               crtc_state = to_intel_crtc_state(cstate);
+               if (!crtc_state->base.enable) {
+                       intel_state->min_pixclk[i] = 0;
                        continue;
+               }
 
                pixel_rate = ilk_pipe_pixel_rate(crtc_state);
 
                /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
-               if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled)
+               if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
                        pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
 
-               max_pixel_rate = max(max_pixel_rate, pixel_rate);
+               intel_state->min_pixclk[i] = pixel_rate;
        }
 
+       if (!intel_state->active_crtcs)
+               return 0;
+
+       for_each_pipe(dev_priv, pipe)
+               max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
+
        return max_pixel_rate;
 }
 
@@ -9783,6 +9805,7 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
 {
        struct drm_i915_private *dev_priv = to_i915(state->dev);
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        int max_pixclk = ilk_max_pixel_rate(state);
        int cdclk;
 
@@ -9805,7 +9828,9 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
                return -EINVAL;
        }
 
-       to_intel_atomic_state(state)->cdclk = cdclk;
+       intel_state->cdclk = intel_state->dev_cdclk = cdclk;
+       if (!intel_state->active_crtcs)
+               intel_state->dev_cdclk = 337500;
 
        return 0;
 }
@@ -9813,7 +9838,9 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 {
        struct drm_device *dev = old_state->dev;
-       unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk;
+       struct intel_atomic_state *old_intel_state =
+               to_intel_atomic_state(old_state);
+       unsigned req_cdclk = old_intel_state->dev_cdclk;
 
        broadwell_set_cdclk(dev, req_cdclk);
 }
@@ -9821,8 +9848,13 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
                                      struct intel_crtc_state *crtc_state)
 {
-       if (!intel_ddi_pll_select(crtc, crtc_state))
-               return -EINVAL;
+       struct intel_encoder *intel_encoder =
+               intel_ddi_get_crtc_new_encoder(crtc_state);
+
+       if (intel_encoder->type != INTEL_OUTPUT_DSI) {
+               if (!intel_ddi_pll_select(crtc, crtc_state))
+                       return -EINVAL;
+       }
 
        crtc->lowfreq_avail = false;
 
@@ -10026,16 +10058,17 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
        return true;
 }
 
-static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
+                              const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        uint32_t cntl = 0, size = 0;
 
-       if (on) {
-               unsigned int width = intel_crtc->base.cursor->state->crtc_w;
-               unsigned int height = intel_crtc->base.cursor->state->crtc_h;
+       if (plane_state && plane_state->visible) {
+               unsigned int width = plane_state->base.crtc_w;
+               unsigned int height = plane_state->base.crtc_h;
                unsigned int stride = roundup_pow_of_two(width) * 4;
 
                switch (stride) {
@@ -10088,7 +10121,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
        }
 }
 
-static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
+                              const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -10096,9 +10130,9 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
        int pipe = intel_crtc->pipe;
        uint32_t cntl = 0;
 
-       if (on) {
+       if (plane_state && plane_state->visible) {
                cntl = MCURSOR_GAMMA_ENABLE;
-               switch (intel_crtc->base.cursor->state->crtc_w) {
+               switch (plane_state->base.crtc_w) {
                        case 64:
                                cntl |= CURSOR_MODE_64_ARGB_AX;
                                break;
@@ -10109,17 +10143,17 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
                                cntl |= CURSOR_MODE_256_ARGB_AX;
                                break;
                        default:
-                               MISSING_CASE(intel_crtc->base.cursor->state->crtc_w);
+                               MISSING_CASE(plane_state->base.crtc_w);
                                return;
                }
                cntl |= pipe << 28; /* Connect to correct pipe */
 
                if (HAS_DDI(dev))
                        cntl |= CURSOR_PIPE_CSC_ENABLE;
-       }
 
-       if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180))
-               cntl |= CURSOR_ROTATE_180;
+               if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
+                       cntl |= CURSOR_ROTATE_180;
+       }
 
        if (intel_crtc->cursor_cntl != cntl) {
                I915_WRITE(CURCNTR(pipe), cntl);
@@ -10136,56 +10170,45 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
 
 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
-                                    bool on)
+                                    const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int pipe = intel_crtc->pipe;
-       struct drm_plane_state *cursor_state = crtc->cursor->state;
-       int x = cursor_state->crtc_x;
-       int y = cursor_state->crtc_y;
-       u32 base = 0, pos = 0;
-
-       base = intel_crtc->cursor_addr;
+       u32 base = intel_crtc->cursor_addr;
+       u32 pos = 0;
 
-       if (x >= intel_crtc->config->pipe_src_w)
-               on = false;
+       if (plane_state) {
+               int x = plane_state->base.crtc_x;
+               int y = plane_state->base.crtc_y;
 
-       if (y >= intel_crtc->config->pipe_src_h)
-               on = false;
-
-       if (x < 0) {
-               if (x + cursor_state->crtc_w <= 0)
-                       on = false;
-
-               pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
-               x = -x;
-       }
-       pos |= x << CURSOR_X_SHIFT;
+               if (x < 0) {
+                       pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+                       x = -x;
+               }
+               pos |= x << CURSOR_X_SHIFT;
 
-       if (y < 0) {
-               if (y + cursor_state->crtc_h <= 0)
-                       on = false;
+               if (y < 0) {
+                       pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+                       y = -y;
+               }
+               pos |= y << CURSOR_Y_SHIFT;
 
-               pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
-               y = -y;
+               /* ILK+ do this automagically */
+               if (HAS_GMCH_DISPLAY(dev) &&
+                   plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
+                       base += (plane_state->base.crtc_h *
+                                plane_state->base.crtc_w - 1) * 4;
+               }
        }
-       pos |= y << CURSOR_Y_SHIFT;
 
        I915_WRITE(CURPOS(pipe), pos);
 
-       /* ILK+ do this automagically */
-       if (HAS_GMCH_DISPLAY(dev) &&
-           crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) {
-               base += (cursor_state->crtc_h *
-                        cursor_state->crtc_w - 1) * 4;
-       }
-
        if (IS_845G(dev) || IS_I865G(dev))
-               i845_update_cursor(crtc, base, on);
+               i845_update_cursor(crtc, base, plane_state);
        else
-               i9xx_update_cursor(crtc, base, on);
+               i9xx_update_cursor(crtc, base, plane_state);
 }
 
 static bool cursor_size_ok(struct drm_device *dev,
@@ -10498,7 +10521,6 @@ retry:
        }
 
        connector_state->crtc = crtc;
-       connector_state->best_encoder = &intel_encoder->base;
 
        crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
        if (IS_ERR(crtc_state)) {
@@ -10594,7 +10616,6 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
                if (IS_ERR(crtc_state))
                        goto fail;
 
-               connector_state->best_encoder = NULL;
                connector_state->crtc = NULL;
 
                crtc_state->base.enable = crtc_state->base.active = false;
@@ -10778,7 +10799,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
        struct drm_display_mode *mode;
-       struct intel_crtc_state pipe_config;
+       struct intel_crtc_state *pipe_config;
        int htot = I915_READ(HTOTAL(cpu_transcoder));
        int hsync = I915_READ(HSYNC(cpu_transcoder));
        int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -10789,6 +10810,12 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
        if (!mode)
                return NULL;
 
+       pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
+       if (!pipe_config) {
+               kfree(mode);
+               return NULL;
+       }
+
        /*
         * Construct a pipe_config sufficient for getting the clock info
         * back out of crtc_clock_get.
@@ -10796,14 +10823,14 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
         * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
         * to use a real value here instead.
         */
-       pipe_config.cpu_transcoder = (enum transcoder) pipe;
-       pipe_config.pixel_multiplier = 1;
-       pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe));
-       pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe));
-       pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe));
-       i9xx_crtc_clock_get(intel_crtc, &pipe_config);
-
-       mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier;
+       pipe_config->cpu_transcoder = (enum transcoder) pipe;
+       pipe_config->pixel_multiplier = 1;
+       pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
+       pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
+       pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
+       i9xx_crtc_clock_get(intel_crtc, pipe_config);
+
+       mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
        mode->hdisplay = (htot & 0xffff) + 1;
        mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
        mode->hsync_start = (hsync & 0xffff) + 1;
@@ -10815,6 +10842,8 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 
        drm_mode_set_name(mode);
 
+       kfree(pipe_config);
+
        return mode;
 }
 
@@ -10885,6 +10914,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
        mutex_unlock(&dev->struct_mutex);
 
        intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
+       intel_fbc_post_update(crtc);
        drm_framebuffer_unreference(work->old_fb);
 
        BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
@@ -11319,13 +11349,12 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
         */
        if (intel_rotation_90_or_270(rotation)) {
                /* stride = Surface height in tiles */
-               tile_height = intel_tile_height(dev, fb->pixel_format,
-                                               fb->modifier[0], 0);
+               tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
                stride = DIV_ROUND_UP(fb->height, tile_height);
        } else {
                stride = fb->pitches[0] /
-                               intel_fb_stride_alignment(dev, fb->modifier[0],
-                                                         fb->pixel_format);
+                       intel_fb_stride_alignment(dev_priv, fb->modifier[0],
+                                                 fb->pixel_format);
        }
 
        /*
@@ -11601,6 +11630,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
        crtc->primary->fb = fb;
        update_state_fb(crtc->primary);
+       intel_fbc_pre_update(intel_crtc);
 
        work->pending_flip_obj = obj;
 
@@ -11660,9 +11690,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                                        obj->last_write_req);
        } else {
                if (!request) {
-                       ret = i915_gem_request_alloc(ring, ring->default_context, &request);
-                       if (ret)
+                       request = i915_gem_request_alloc(ring, NULL);
+                       if (IS_ERR(request)) {
+                               ret = PTR_ERR(request);
                                goto cleanup_unpin;
+                       }
                }
 
                ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
@@ -11683,7 +11715,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                          to_intel_plane(primary)->frontbuffer_bit);
        mutex_unlock(&dev->struct_mutex);
 
-       intel_fbc_deactivate(intel_crtc);
        intel_frontbuffer_flip_prepare(dev,
                                       to_intel_plane(primary)->frontbuffer_bit);
 
@@ -11694,7 +11725,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 cleanup_unpin:
        intel_unpin_fb_obj(fb, crtc->primary->state);
 cleanup_pending:
-       if (request)
+       if (!IS_ERR_OR_NULL(request))
                i915_gem_request_cancel(request);
        atomic_dec(&intel_crtc->unpin_work_count);
        mutex_unlock(&dev->struct_mutex);
@@ -11805,7 +11836,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_plane *plane = plane_state->plane;
        struct drm_device *dev = crtc->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_plane_state *old_plane_state =
                to_intel_plane_state(plane->state);
        int idx = intel_crtc->base.base.id, ret;
@@ -11831,8 +11861,13 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
        if (!was_crtc_enabled && WARN_ON(was_visible))
                was_visible = false;
 
-       if (!is_crtc_enabled && WARN_ON(visible))
-               visible = false;
+       /*
+        * Visibility is calculated as if the crtc was on, but
+        * after scaler setup everything depends on it being off
+        * when the crtc isn't active.
+        */
+       if (!is_crtc_enabled)
+               to_intel_plane_state(plane_state)->visible = visible = false;
 
        if (!was_visible && !visible)
                return 0;
@@ -11866,39 +11901,8 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
 
        switch (plane->type) {
        case DRM_PLANE_TYPE_PRIMARY:
-               intel_crtc->atomic.pre_disable_primary = turn_off;
                intel_crtc->atomic.post_enable_primary = turn_on;
-
-               if (turn_off) {
-                       /*
-                        * FIXME: Actually if we will still have any other
-                        * plane enabled on the pipe we could let IPS enabled
-                        * still, but for now lets consider that when we make
-                        * primary invisible by setting DSPCNTR to 0 on
-                        * update_primary_plane function IPS needs to be
-                        * disable.
-                        */
-                       intel_crtc->atomic.disable_ips = true;
-
-                       intel_crtc->atomic.disable_fbc = true;
-               }
-
-               /*
-                * FBC does not work on some platforms for rotated
-                * planes, so disable it when rotation is not 0 and
-                * update it when rotation is set back to 0.
-                *
-                * FIXME: This is redundant with the fbc update done in
-                * the primary plane enable function except that that
-                * one is done too late. We eventually need to unify
-                * this.
-                */
-
-               if (visible &&
-                   INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
-                   dev_priv->fbc.crtc == intel_crtc &&
-                   plane_state->rotation != BIT(DRM_ROTATE_0))
-                       intel_crtc->atomic.disable_fbc = true;
+               intel_crtc->atomic.update_fbc = true;
 
                /*
                 * BDW signals flip done immediately if the plane
@@ -11908,7 +11912,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
                if (turn_on && IS_BROADWELL(dev))
                        intel_crtc->atomic.wait_vblank = true;
 
-               intel_crtc->atomic.update_fbc |= visible || mode_changed;
                break;
        case DRM_PLANE_TYPE_CURSOR:
                break;
@@ -12075,11 +12078,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
                pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
        }
 
-       /* Clamp bpp to 8 on screens without EDID 1.4 */
-       if (connector->base.display_info.bpc == 0 && bpp > 24) {
-               DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
-                             bpp);
-               pipe_config->pipe_bpp = 24;
+       /* Clamp bpp to default limit on screens without EDID 1.4 */
+       if (connector->base.display_info.bpc == 0) {
+               int type = connector->base.connector_type;
+               int clamp_bpp = 24;
+
+               /* Fall back to 18 bpp when DP sink capability is unknown. */
+               if (type == DRM_MODE_CONNECTOR_DisplayPort ||
+                   type == DRM_MODE_CONNECTOR_eDP)
+                       clamp_bpp = 18;
+
+               if (bpp > clamp_bpp) {
+                       DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
+                                     bpp, clamp_bpp);
+                       pipe_config->pipe_bpp = clamp_bpp;
+               }
        }
 }
 
@@ -12519,19 +12532,22 @@ intel_compare_m_n(unsigned int m, unsigned int n,
 
        BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
 
-       if (m > m2) {
-               while (m > m2) {
+       if (n > n2) {
+               while (n > n2) {
                        m2 <<= 1;
                        n2 <<= 1;
                }
-       } else if (m < m2) {
-               while (m < m2) {
+       } else if (n < n2) {
+               while (n < n2) {
                        m <<= 1;
                        n <<= 1;
                }
        }
 
-       return m == m2 && n == n2;
+       if (n != n2)
+               return false;
+
+       return intel_fuzzy_clock_check(m, m2);
 }
 
 static bool
@@ -13206,15 +13222,27 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
 
 static int intel_modeset_checks(struct drm_atomic_state *state)
 {
-       struct drm_device *dev = state->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+       struct drm_i915_private *dev_priv = state->dev->dev_private;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       int ret = 0, i;
 
        if (!check_digital_port_conflicts(state)) {
                DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
                return -EINVAL;
        }
 
+       intel_state->modeset = true;
+       intel_state->active_crtcs = dev_priv->active_crtcs;
+
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (crtc_state->active)
+                       intel_state->active_crtcs |= 1 << i;
+               else
+                       intel_state->active_crtcs &= ~(1 << i);
+       }
+
        /*
         * See if the config requires any additional preparation, e.g.
         * to adjust global state with pipes off.  We need to do this
@@ -13223,22 +13251,19 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
         * adjusted_mode bits in the crtc directly.
         */
        if (dev_priv->display.modeset_calc_cdclk) {
-               unsigned int cdclk;
-
                ret = dev_priv->display.modeset_calc_cdclk(state);
 
-               cdclk = to_intel_atomic_state(state)->cdclk;
-               if (!ret && cdclk != dev_priv->cdclk_freq)
+               if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
                        ret = intel_modeset_all_pipes(state);
 
                if (ret < 0)
                        return ret;
        } else
-               to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq;
+               to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
 
        intel_modeset_clear_plls(state);
 
-       if (IS_HASWELL(dev))
+       if (IS_HASWELL(dev_priv))
                return haswell_mode_set_planes_workaround(state);
 
        return 0;
@@ -13291,6 +13316,7 @@ static void calc_watermark_data(struct drm_atomic_state *state)
 static int intel_atomic_check(struct drm_device *dev,
                              struct drm_atomic_state *state)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct drm_crtc *crtc;
        struct drm_crtc_state *crtc_state;
@@ -13333,7 +13359,7 @@ static int intel_atomic_check(struct drm_device *dev,
                        return ret;
 
                if (i915.fastboot &&
-                   intel_pipe_config_compare(state->dev,
+                   intel_pipe_config_compare(dev,
                                        to_intel_crtc_state(crtc->state),
                                        pipe_config, true)) {
                        crtc_state->mode_changed = false;
@@ -13359,12 +13385,13 @@ static int intel_atomic_check(struct drm_device *dev,
                if (ret)
                        return ret;
        } else
-               intel_state->cdclk = to_i915(state->dev)->cdclk_freq;
+               intel_state->cdclk = dev_priv->cdclk_freq;
 
-       ret = drm_atomic_helper_check_planes(state->dev, state);
+       ret = drm_atomic_helper_check_planes(dev, state);
        if (ret)
                return ret;
 
+       intel_fbc_choose_crtc(dev_priv, state);
        calc_watermark_data(state);
 
        return 0;
@@ -13456,12 +13483,12 @@ static int intel_atomic_commit(struct drm_device *dev,
                               struct drm_atomic_state *state,
                               bool async)
 {
+       struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_crtc_state *crtc_state;
        struct drm_crtc *crtc;
-       int ret = 0;
-       int i;
-       bool any_ms = false;
+       int ret = 0, i;
+       bool hw_check = intel_state->modeset;
 
        ret = intel_atomic_prepare_commit(dev, state, async);
        if (ret) {
@@ -13472,19 +13499,26 @@ static int intel_atomic_commit(struct drm_device *dev,
        drm_atomic_helper_swap_state(dev, state);
        dev_priv->wm.config = to_intel_atomic_state(state)->wm_config;
 
+       if (intel_state->modeset) {
+               memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
+                      sizeof(intel_state->min_pixclk));
+               dev_priv->active_crtcs = intel_state->active_crtcs;
+               dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+       }
+
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
                struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
                if (!needs_modeset(crtc->state))
                        continue;
 
-               any_ms = true;
-               intel_pre_plane_update(intel_crtc);
+               intel_pre_plane_update(to_intel_crtc_state(crtc_state));
 
                if (crtc_state->active) {
                        intel_crtc_disable_planes(crtc, crtc_state->plane_mask);
                        dev_priv->display.crtc_disable(crtc);
                        intel_crtc->active = false;
+                       intel_fbc_disable(intel_crtc);
                        intel_disable_shared_dpll(intel_crtc);
 
                        /*
@@ -13503,7 +13537,7 @@ static int intel_atomic_commit(struct drm_device *dev,
         * update the the output configuration. */
        intel_modeset_update_crtc_state(state);
 
-       if (any_ms) {
+       if (intel_state->modeset) {
                intel_shared_dpll_commit(state);
 
                drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
@@ -13530,11 +13564,14 @@ static int intel_atomic_commit(struct drm_device *dev,
                        put_domains = modeset_get_crtc_power_domains(crtc);
 
                        /* make sure intel_modeset_check_state runs */
-                       any_ms = true;
+                       hw_check = true;
                }
 
                if (!modeset)
-                       intel_pre_plane_update(intel_crtc);
+                       intel_pre_plane_update(to_intel_crtc_state(crtc_state));
+
+               if (crtc->state->active && intel_crtc->atomic.update_fbc)
+                       intel_fbc_enable(intel_crtc);
 
                if (crtc->state->active &&
                    (crtc->state->planes_changed || update_pipe))
@@ -13557,11 +13594,24 @@ static int intel_atomic_commit(struct drm_device *dev,
        drm_atomic_helper_cleanup_planes(dev, state);
        mutex_unlock(&dev->struct_mutex);
 
-       if (any_ms)
+       if (hw_check)
                intel_modeset_check_state(dev, state);
 
        drm_atomic_state_free(state);
 
+       /* As one of the primary mmio accessors, KMS has a high likelihood
+        * of triggering bugs in unclaimed access. After we finish
+        * modesetting, see if an error has been flagged, and if so
+        * enable debugging for the next modeset - and hope we catch
+        * the culprit.
+        *
+        * XXX note that we assume display power is on at this point.
+        * This might hold true now but we need to add pm helper to check
+        * unclaimed only when the hardware is on, as atomic commits
+        * can happen also when the device is completely off.
+        */
+       intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
        return 0;
 }
 
@@ -13850,7 +13900,7 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
        struct drm_i915_private *dev_priv;
        int crtc_clock, cdclk;
 
-       if (!intel_crtc || !crtc_state)
+       if (!intel_crtc || !crtc_state->base.enable)
                return DRM_PLANE_HELPER_NO_SCALING;
 
        dev = intel_crtc->base.dev;
@@ -13883,11 +13933,12 @@ intel_check_primary_plane(struct drm_plane *plane,
        int max_scale = DRM_PLANE_HELPER_NO_SCALING;
        bool can_position = false;
 
-       /* use scaler when colorkey is not required */
-       if (INTEL_INFO(plane->dev)->gen >= 9 &&
-           state->ckey.flags == I915_SET_COLORKEY_NONE) {
-               min_scale = 1;
-               max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+       if (INTEL_INFO(plane->dev)->gen >= 9) {
+               /* use scaler when colorkey is not required */
+               if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
+                       min_scale = 1;
+                       max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
+               }
                can_position = true;
        }
 
@@ -13898,32 +13949,6 @@ intel_check_primary_plane(struct drm_plane *plane,
                                             &state->visible);
 }
 
-static void
-intel_commit_primary_plane(struct drm_plane *plane,
-                          struct intel_plane_state *state)
-{
-       struct drm_crtc *crtc = state->base.crtc;
-       struct drm_framebuffer *fb = state->base.fb;
-       struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       crtc = crtc ? crtc : plane->crtc;
-
-       dev_priv->display.update_primary_plane(crtc, fb,
-                                              state->src.x1 >> 16,
-                                              state->src.y1 >> 16);
-}
-
-static void
-intel_disable_primary_plane(struct drm_plane *plane,
-                           struct drm_crtc *crtc)
-{
-       struct drm_device *dev = plane->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
-}
-
 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
                                    struct drm_crtc_state *old_crtc_state)
 {
@@ -14008,20 +14033,33 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
        primary->plane = pipe;
        primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
        primary->check_plane = intel_check_primary_plane;
-       primary->commit_plane = intel_commit_primary_plane;
-       primary->disable_plane = intel_disable_primary_plane;
        if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
                primary->plane = !pipe;
 
        if (INTEL_INFO(dev)->gen >= 9) {
                intel_primary_formats = skl_primary_formats;
                num_formats = ARRAY_SIZE(skl_primary_formats);
+
+               primary->update_plane = skylake_update_primary_plane;
+               primary->disable_plane = skylake_disable_primary_plane;
+       } else if (HAS_PCH_SPLIT(dev)) {
+               intel_primary_formats = i965_primary_formats;
+               num_formats = ARRAY_SIZE(i965_primary_formats);
+
+               primary->update_plane = ironlake_update_primary_plane;
+               primary->disable_plane = i9xx_disable_primary_plane;
        } else if (INTEL_INFO(dev)->gen >= 4) {
                intel_primary_formats = i965_primary_formats;
                num_formats = ARRAY_SIZE(i965_primary_formats);
+
+               primary->update_plane = i9xx_update_primary_plane;
+               primary->disable_plane = i9xx_disable_primary_plane;
        } else {
                intel_primary_formats = i8xx_primary_formats;
                num_formats = ARRAY_SIZE(i8xx_primary_formats);
+
+               primary->update_plane = i9xx_update_primary_plane;
+               primary->disable_plane = i9xx_disable_primary_plane;
        }
 
        drm_universal_plane_init(dev, &primary->base, 0,
@@ -14120,22 +14158,23 @@ static void
 intel_disable_cursor_plane(struct drm_plane *plane,
                           struct drm_crtc *crtc)
 {
-       intel_crtc_update_cursor(crtc, false);
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+       intel_crtc->cursor_addr = 0;
+       intel_crtc_update_cursor(crtc, NULL);
 }
 
 static void
-intel_commit_cursor_plane(struct drm_plane *plane,
-                         struct intel_plane_state *state)
+intel_update_cursor_plane(struct drm_plane *plane,
+                         const struct intel_crtc_state *crtc_state,
+                         const struct intel_plane_state *state)
 {
-       struct drm_crtc *crtc = state->base.crtc;
+       struct drm_crtc *crtc = crtc_state->base.crtc;
+       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct drm_device *dev = plane->dev;
-       struct intel_crtc *intel_crtc;
        struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
        uint32_t addr;
 
-       crtc = crtc ? crtc : plane->crtc;
-       intel_crtc = to_intel_crtc(crtc);
-
        if (!obj)
                addr = 0;
        else if (!INTEL_INFO(dev)->cursor_needs_physical)
@@ -14144,9 +14183,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
                addr = obj->phys_handle->busaddr;
 
        intel_crtc->cursor_addr = addr;
-
-       if (crtc->state->active)
-               intel_crtc_update_cursor(crtc, state->visible);
+       intel_crtc_update_cursor(crtc, state);
 }
 
 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -14172,7 +14209,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
        cursor->plane = pipe;
        cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
        cursor->check_plane = intel_check_cursor_plane;
-       cursor->commit_plane = intel_commit_cursor_plane;
+       cursor->update_plane = intel_update_cursor_plane;
        cursor->disable_plane = intel_disable_cursor_plane;
 
        drm_universal_plane_init(dev, &cursor->base, 0,
@@ -14619,10 +14656,12 @@ u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
        u32 gen = INTEL_INFO(dev)->gen;
 
        if (gen >= 9) {
+               int cpp = drm_format_plane_cpp(pixel_format, 0);
+
                /* "The stride in bytes must not exceed the of the size of 8K
                 *  pixels and 32K bytes."
                 */
-                return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768);
+               return min(8192 * cpp, 32768);
        } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
                return 32*1024;
        } else if (gen >= 4) {
@@ -14646,6 +14685,7 @@ static int intel_framebuffer_init(struct drm_device *dev,
                                  struct drm_mode_fb_cmd2 *mode_cmd,
                                  struct drm_i915_gem_object *obj)
 {
+       struct drm_i915_private *dev_priv = to_i915(dev);
        unsigned int aligned_height;
        int ret;
        u32 pitch_limit, stride_alignment;
@@ -14687,7 +14727,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
                return -EINVAL;
        }
 
-       stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0],
+       stride_alignment = intel_fb_stride_alignment(dev_priv,
+                                                    mode_cmd->modifier[0],
                                                     mode_cmd->pixel_format);
        if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
                DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
@@ -14779,7 +14820,6 @@ static int intel_framebuffer_init(struct drm_device *dev,
 
        drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
        intel_fb->obj = obj;
-       intel_fb->obj->framebuffer_references++;
 
        ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
        if (ret) {
@@ -14787,6 +14827,8 @@ static int intel_framebuffer_init(struct drm_device *dev,
                return ret;
        }
 
+       intel_fb->obj->framebuffer_references++;
+
        return 0;
 }
 
@@ -14850,8 +14892,6 @@ static void intel_init_display(struct drm_device *dev)
                        haswell_crtc_compute_clock;
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
-               dev_priv->display.update_primary_plane =
-                       skylake_update_primary_plane;
        } else if (HAS_DDI(dev)) {
                dev_priv->display.get_pipe_config = haswell_get_pipe_config;
                dev_priv->display.get_initial_plane_config =
@@ -14860,8 +14900,6 @@ static void intel_init_display(struct drm_device *dev)
                        haswell_crtc_compute_clock;
                dev_priv->display.crtc_enable = haswell_crtc_enable;
                dev_priv->display.crtc_disable = haswell_crtc_disable;
-               dev_priv->display.update_primary_plane =
-                       ironlake_update_primary_plane;
        } else if (HAS_PCH_SPLIT(dev)) {
                dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
                dev_priv->display.get_initial_plane_config =
@@ -14870,8 +14908,6 @@ static void intel_init_display(struct drm_device *dev)
                        ironlake_crtc_compute_clock;
                dev_priv->display.crtc_enable = ironlake_crtc_enable;
                dev_priv->display.crtc_disable = ironlake_crtc_disable;
-               dev_priv->display.update_primary_plane =
-                       ironlake_update_primary_plane;
        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
                dev_priv->display.get_initial_plane_config =
@@ -14879,8 +14915,6 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
                dev_priv->display.crtc_enable = valleyview_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
-               dev_priv->display.update_primary_plane =
-                       i9xx_update_primary_plane;
        } else {
                dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
                dev_priv->display.get_initial_plane_config =
@@ -14888,8 +14922,6 @@ static void intel_init_display(struct drm_device *dev)
                dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
                dev_priv->display.crtc_enable = i9xx_crtc_enable;
                dev_priv->display.crtc_disable = i9xx_crtc_disable;
-               dev_priv->display.update_primary_plane =
-                       i9xx_update_primary_plane;
        }
 
        /* Returns the core display clock speed */
@@ -15195,12 +15227,89 @@ static void i915_disable_vga(struct drm_device *dev)
 
 void intel_modeset_init_hw(struct drm_device *dev)
 {
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
        intel_update_cdclk(dev);
-       intel_prepare_ddi(dev);
+
+       dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
+
        intel_init_clock_gating(dev);
        intel_enable_gt_powersave(dev);
 }
 
+/*
+ * Calculate what we think the watermarks should be for the state we've read
+ * out of the hardware and then immediately program those watermarks so that
+ * we ensure the hardware settings match our internal state.
+ *
+ * We can calculate what we think WM's should be by creating a duplicate of the
+ * current state (which was constructed during hardware readout) and running it
+ * through the atomic check code to calculate new watermark values in the
+ * state object.
+ */
+static void sanitize_watermarks(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct drm_atomic_state *state;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *cstate;
+       struct drm_modeset_acquire_ctx ctx;
+       int ret;
+       int i;
+
+       /* Only supported on platforms that use atomic watermark design */
+       if (!dev_priv->display.program_watermarks)
+               return;
+
+       /*
+        * We need to hold connection_mutex before calling duplicate_state so
+        * that the connector loop is protected.
+        */
+       drm_modeset_acquire_init(&ctx, 0);
+retry:
+       ret = drm_modeset_lock_all_ctx(dev, &ctx);
+       if (ret == -EDEADLK) {
+               drm_modeset_backoff(&ctx);
+               goto retry;
+       } else if (WARN_ON(ret)) {
+               goto fail;
+       }
+
+       state = drm_atomic_helper_duplicate_state(dev, &ctx);
+       if (WARN_ON(IS_ERR(state)))
+               goto fail;
+
+       ret = intel_atomic_check(dev, state);
+       if (ret) {
+               /*
+                * If we fail here, it means that the hardware appears to be
+                * programmed in a way that shouldn't be possible, given our
+                * understanding of watermark requirements.  This might mean a
+                * mistake in the hardware readout code or a mistake in the
+                * watermark calculations for a given platform.  Raise a WARN
+                * so that this is noticeable.
+                *
+                * If this actually happens, we'll have to just leave the
+                * BIOS-programmed watermarks untouched and hope for the best.
+                */
+               WARN(true, "Could not determine valid watermarks for inherited state\n");
+               goto fail;
+       }
+
+       /* Write calculated watermark values back */
+       to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
+       for_each_crtc_in_state(state, crtc, cstate, i) {
+               struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
+
+               dev_priv->display.program_watermarks(cs);
+       }
+
+       drm_atomic_state_free(state);
+fail:
+       drm_modeset_drop_locks(&ctx);
+       drm_modeset_acquire_fini(&ctx);
+}
+
 void intel_modeset_init(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -15321,6 +15430,13 @@ void intel_modeset_init(struct drm_device *dev)
                 */
                intel_find_initial_plane_obj(crtc, &plane_config);
        }
+
+       /*
+        * Make sure hardware watermarks really match the state we read out.
+        * Note that we need to do this after reconstructing the BIOS fb's
+        * since the watermark calculation done here will use pstate->fb.
+        */
+       sanitize_watermarks(dev);
 }
 
 static void intel_enable_pipe_a(struct drm_device *dev)
@@ -15451,6 +15567,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                crtc->base.state->active = crtc->active;
                crtc->base.enabled = crtc->active;
                crtc->base.state->connector_mask = 0;
+               crtc->base.state->encoder_mask = 0;
 
                /* Because we only establish the connector -> encoder ->
                 * crtc links if something is active, this means the
@@ -15593,16 +15710,40 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
        struct intel_connector *connector;
        int i;
 
+       dev_priv->active_crtcs = 0;
+
        for_each_intel_crtc(dev, crtc) {
-               __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state);
-               memset(crtc->config, 0, sizeof(*crtc->config));
-               crtc->config->base.crtc = &crtc->base;
+               struct intel_crtc_state *crtc_state = crtc->config;
+               int pixclk = 0;
 
-               crtc->active = dev_priv->display.get_pipe_config(crtc,
-                                                                crtc->config);
+               __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
+               memset(crtc_state, 0, sizeof(*crtc_state));
+               crtc_state->base.crtc = &crtc->base;
 
-               crtc->base.state->active = crtc->active;
-               crtc->base.enabled = crtc->active;
+               crtc_state->base.active = crtc_state->base.enable =
+                       dev_priv->display.get_pipe_config(crtc, crtc_state);
+
+               crtc->base.enabled = crtc_state->base.enable;
+               crtc->active = crtc_state->base.active;
+
+               if (crtc_state->base.active) {
+                       dev_priv->active_crtcs |= 1 << crtc->pipe;
+
+                       if (IS_BROADWELL(dev_priv)) {
+                               pixclk = ilk_pipe_pixel_rate(crtc_state);
+
+                               /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+                               if (crtc_state->ips_enabled)
+                                       pixclk = DIV_ROUND_UP(pixclk * 100, 95);
+                       } else if (IS_VALLEYVIEW(dev_priv) ||
+                                  IS_CHERRYVIEW(dev_priv) ||
+                                  IS_BROXTON(dev_priv))
+                               pixclk = crtc_state->base.adjusted_mode.crtc_clock;
+                       else
+                               WARN_ON(dev_priv->display.modeset_calc_cdclk);
+               }
+
+               dev_priv->min_pixclk[crtc->pipe] = pixclk;
 
                readout_plane_state(crtc);
 
@@ -15666,6 +15807,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                                 */
                                encoder->base.crtc->state->connector_mask |=
                                        1 << drm_connector_index(&connector->base);
+                               encoder->base.crtc->state->encoder_mask |=
+                                       1 << drm_encoder_index(&encoder->base);
                        }
 
                } else {
@@ -15767,6 +15910,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
                        modeset_put_power_domains(dev_priv, put_domains);
        }
        intel_display_set_init_power(dev_priv, false);
+
+       intel_fbc_init_pipe_state(dev_priv);
 }
 
 void intel_display_resume(struct drm_device *dev)
@@ -15896,7 +16041,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
 
        intel_unregister_dsm_handler();
 
-       intel_fbc_disable(dev_priv);
+       intel_fbc_global_disable(dev_priv);
 
        /* flush any delayed tasks or pending work */
        flush_scheduled_work();
@@ -16106,7 +16251,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
        for_each_pipe(dev_priv, i) {
                err_printf(m, "Pipe [%d]:\n", i);
                err_printf(m, "  Power: %s\n",
-                          error->pipe[i].power_domain_on ? "on" : "off");
+                          onoff(error->pipe[i].power_domain_on));
                err_printf(m, "  SRC: %08x\n", error->pipe[i].source);
                err_printf(m, "  STAT: %08x\n", error->pipe[i].stat);
 
@@ -16134,7 +16279,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
                err_printf(m, "CPU transcoder: %c\n",
                           transcoder_name(error->transcoder[i].cpu_transcoder));
                err_printf(m, "  Power: %s\n",
-                          error->transcoder[i].power_domain_on ? "on" : "off");
+                          onoff(error->transcoder[i].power_domain_on));
                err_printf(m, "  CONF: %08x\n", error->transcoder[i].conf);
                err_printf(m, "  HTOTAL: %08x\n", error->transcoder[i].htotal);
                err_printf(m, "  HBLANK: %08x\n", error->transcoder[i].hblank);
@@ -16144,24 +16289,3 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
                err_printf(m, "  VSYNC: %08x\n", error->transcoder[i].vsync);
        }
 }
-
-void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
-{
-       struct intel_crtc *crtc;
-
-       for_each_intel_crtc(dev, crtc) {
-               struct intel_unpin_work *work;
-
-               spin_lock_irq(&dev->event_lock);
-
-               work = crtc->unpin_work;
-
-               if (work && work->event &&
-                   work->event->base.file_priv == file) {
-                       kfree(work->event);
-                       work->event = NULL;
-               }
-
-               spin_unlock_irq(&dev->event_lock);
-       }
-}
index 796e3d313cb975efc3797a39b15beb8e55ab7f92..a073f04a5330794a3056cfa2fc8152170f1e4500 100644 (file)
@@ -157,14 +157,9 @@ intel_dp_max_link_bw(struct intel_dp  *intel_dp)
 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
        u8 source_max, sink_max;
 
-       source_max = 4;
-       if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
-           (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
-               source_max = 2;
-
+       source_max = intel_dig_port->max_lanes;
        sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
 
        return min(source_max, sink_max);
@@ -340,8 +335,12 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
                release_cl_override = IS_CHERRYVIEW(dev) &&
                        !chv_phy_powergate_ch(dev_priv, phy, ch, true);
 
-               vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
-                                &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
+               if (vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
+                                    &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
+                       DRM_ERROR("Failed to force on pll for pipe %c!\n",
+                                 pipe_name(pipe));
+                       return;
+               }
        }
 
        /*
@@ -980,7 +979,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
                if (WARN_ON(txsize > 20))
                        return -E2BIG;
 
-               memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+               if (msg->buffer)
+                       memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
+               else
+                       WARN_ON(msg->size);
 
                ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
                if (ret > 0) {
@@ -2243,11 +2245,6 @@ static void intel_edp_backlight_power(struct intel_connector *connector,
                _intel_edp_backlight_off(intel_dp);
 }
 
-static const char *state_string(bool enabled)
-{
-       return enabled ? "on" : "off";
-}
-
 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -2257,7 +2254,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
        I915_STATE_WARN(cur_state != state,
                        "DP port %c state assertion failure (expected %s, current %s)\n",
                        port_name(dig_port->port),
-                       state_string(state), state_string(cur_state));
+                       onoff(state), onoff(cur_state));
 }
 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
 
@@ -2267,7 +2264,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
 
        I915_STATE_WARN(cur_state != state,
                        "eDP PLL state assertion failure (expected %s, current %s)\n",
-                       state_string(state), state_string(cur_state));
+                       onoff(state), onoff(cur_state));
 }
 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
@@ -4014,7 +4011,7 @@ static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
        } while (--attempts && count);
 
        if (attempts == 0) {
-               DRM_ERROR("TIMEOUT: Sink CRC counter is not zeroed\n");
+               DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
                ret = -ETIMEDOUT;
        }
 
@@ -5839,6 +5836,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
        enum port port = intel_dig_port->port;
        int type, ret;
 
+       if (WARN(intel_dig_port->max_lanes < 1,
+                "Not enough lanes (%d) for DP on port %c\n",
+                intel_dig_port->max_lanes, port_name(port)))
+               return false;
+
        intel_dp->pps_pipe = INVALID_PIPE;
 
        /* intel_dp vfuncs */
@@ -6037,6 +6039,7 @@ intel_dp_init(struct drm_device *dev,
        intel_dig_port->port = port;
        dev_priv->dig_port_map[port] = intel_encoder;
        intel_dig_port->dp.output_reg = output_reg;
+       intel_dig_port->max_lanes = 4;
 
        intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
        if (IS_CHERRYVIEW(dev)) {
index 88887938e0bfd3e0cf3e7bdebd66a145f7e036a7..0b8eefc2acc5d93088b960e4714bce55944df82e 100644 (file)
@@ -215,27 +215,46 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
        }
 }
 
-static void
-intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
+/*
+ * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * or 1.2 devices that support it, Training Pattern 2 otherwise.
+ */
+static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
 {
-       bool channel_eq = false;
-       int tries, cr_tries;
-       uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+       u32 training_pattern = DP_TRAINING_PATTERN_2;
+       bool source_tps3, sink_tps3;
 
        /*
-        * Training Pattern 3 for HBR2 or 1.2 devices that support it.
-        *
         * Intel platforms that support HBR2 also support TPS3. TPS3 support is
-        * also mandatory for downstream devices that support HBR2.
+        * also mandatory for downstream devices that support HBR2. However, not
+        * all sinks follow the spec.
         *
         * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
-        * supported but still not enabled.
+        * supported in source but still not enabled.
         */
-       if (intel_dp_source_supports_hbr2(intel_dp) &&
-           drm_dp_tps3_supported(intel_dp->dpcd))
+       source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
+       sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
+
+       if (source_tps3 && sink_tps3) {
                training_pattern = DP_TRAINING_PATTERN_3;
-       else if (intel_dp->link_rate == 540000)
-               DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
+       } else if (intel_dp->link_rate == 540000) {
+               if (!source_tps3)
+                       DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
+               if (!sink_tps3)
+                       DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+       }
+
+       return training_pattern;
+}
+
+static void
+intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
+{
+       bool channel_eq = false;
+       int tries, cr_tries;
+       u32 training_pattern;
+
+       training_pattern = intel_dp_training_pattern(intel_dp);
 
        /* channel equalization */
        if (!intel_dp_set_link_train(intel_dp,
index fa0dabf578dc51c7b2371ecc9a1f1342b345892f..2a2ab306ad8457f523c06ffdfc543e847d2ebe4a 100644 (file)
@@ -184,7 +184,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
        intel_mst->port = found->port;
 
        if (intel_dp->active_mst_links == 0) {
-               intel_ddi_clk_select(encoder, intel_crtc->config);
+               intel_prepare_ddi_buffer(&intel_dig_port->base);
+
+               intel_ddi_clk_select(&intel_dig_port->base, intel_crtc->config);
 
                intel_dp_set_link_params(intel_dp, intel_crtc->config);
 
index ea5415851c6e2cc36d4c6790db473fab637fd46d..878172a45f397f2a2577b9c273a683b6ab137185 100644 (file)
@@ -246,7 +246,18 @@ struct intel_atomic_state {
        struct drm_atomic_state base;
 
        unsigned int cdclk;
-       bool dpll_set;
+
+       /*
+        * Calculated device cdclk, can be different from cdclk
+        * only when all crtc's are DPMS off.
+        */
+       unsigned int dev_cdclk;
+
+       bool dpll_set, modeset;
+
+       unsigned int active_crtcs;
+       unsigned int min_pixclk[I915_MAX_PIPES];
+
        struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
        struct intel_wm_config wm_config;
 };
@@ -481,6 +492,8 @@ struct intel_crtc_state {
 
        bool ips_enabled;
 
+       bool enable_fbc;
+
        bool double_wide;
 
        bool dp_encoder_is_mst;
@@ -531,16 +544,15 @@ struct intel_mmio_flip {
  */
 struct intel_crtc_atomic_commit {
        /* Sleepable operations to perform before commit */
-       bool disable_fbc;
-       bool disable_ips;
-       bool pre_disable_primary;
 
        /* Sleepable operations to perform after commit */
        unsigned fb_bits;
        bool wait_vblank;
-       bool update_fbc;
        bool post_enable_primary;
        unsigned update_sprite_watermarks;
+
+       /* Sleepable operations to perform before and after commit */
+       bool update_fbc;
 };
 
 struct intel_crtc {
@@ -564,7 +576,7 @@ struct intel_crtc {
        /* Display surface base address adjustement for pageflips. Note that on
         * gen4+ this only adjusts up to a tile, offsets within a tile are
         * handled in the hw itself (with the TILEOFF register). */
-       unsigned long dspaddr_offset;
+       u32 dspaddr_offset;
        int adjusted_x;
        int adjusted_y;
 
@@ -647,23 +659,17 @@ struct intel_plane {
        /*
         * NOTE: Do not place new plane state fields here (e.g., when adding
         * new plane properties).  New runtime state should now be placed in
-        * the intel_plane_state structure and accessed via drm_plane->state.
+        * the intel_plane_state structure and accessed via plane_state.
         */
 
        void (*update_plane)(struct drm_plane *plane,
-                            struct drm_crtc *crtc,
-                            struct drm_framebuffer *fb,
-                            int crtc_x, int crtc_y,
-                            unsigned int crtc_w, unsigned int crtc_h,
-                            uint32_t x, uint32_t y,
-                            uint32_t src_w, uint32_t src_h);
+                            const struct intel_crtc_state *crtc_state,
+                            const struct intel_plane_state *plane_state);
        void (*disable_plane)(struct drm_plane *plane,
                              struct drm_crtc *crtc);
        int (*check_plane)(struct drm_plane *plane,
                           struct intel_crtc_state *crtc_state,
                           struct intel_plane_state *state);
-       void (*commit_plane)(struct drm_plane *plane,
-                            struct intel_plane_state *state);
 };
 
 struct intel_watermark_params {
@@ -817,6 +823,7 @@ struct intel_digital_port {
        struct intel_hdmi hdmi;
        enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
        bool release_cl2_override;
+       uint8_t max_lanes;
        /* for communication with audio component; protected by av_mutex */
        const struct drm_connector *audio_connector;
 };
@@ -996,7 +1003,7 @@ void intel_crt_init(struct drm_device *dev);
 /* intel_ddi.c */
 void intel_ddi_clk_select(struct intel_encoder *encoder,
                          const struct intel_crtc_state *pipe_config);
-void intel_prepare_ddi(struct drm_device *dev);
+void intel_prepare_ddi_buffer(struct intel_encoder *encoder);
 void hsw_fdi_link_train(struct drm_crtc *crtc);
 void intel_ddi_init(struct drm_device *dev, enum port port);
 enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
@@ -1041,8 +1048,8 @@ unsigned int intel_fb_align_height(struct drm_device *dev,
                                   uint64_t fb_format_modifier);
 void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
                        enum fb_op_origin origin);
-u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
-                             uint32_t pixel_format);
+u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
+                             uint64_t fb_modifier, uint32_t pixel_format);
 
 /* intel_audio.c */
 void intel_init_audio(struct drm_device *dev);
@@ -1126,9 +1133,8 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
                                    struct drm_plane_state *plane_state);
 
-unsigned int
-intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
-                 uint64_t fb_format_modifier, unsigned int plane);
+unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
+                              uint64_t fb_modifier, unsigned int cpp);
 
 static inline bool
 intel_rotation_90_or_270(unsigned int rotation)
@@ -1149,8 +1155,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
                                                struct intel_crtc_state *state);
 
-void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
-                     const struct dpll *dpll);
+int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
+                    const struct dpll *dpll);
 void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
 
 /* modesetting asserts */
@@ -1167,11 +1173,11 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
 void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
 #define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
 #define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
-unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv,
-                                            int *x, int *y,
-                                            unsigned int tiling_mode,
-                                            unsigned int bpp,
-                                            unsigned int pitch);
+u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv,
+                             int *x, int *y,
+                             uint64_t fb_modifier,
+                             unsigned int cpp,
+                             unsigned int pitch);
 void intel_prepare_reset(struct drm_device *dev);
 void intel_finish_reset(struct drm_device *dev);
 void hsw_enable_pc8(struct drm_i915_private *dev_priv);
@@ -1207,7 +1213,6 @@ enum intel_display_power_domain
 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
                                 struct intel_crtc_state *pipe_config);
-void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file);
 
 int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
 int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
@@ -1323,13 +1328,16 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
 #endif
 
 /* intel_fbc.c */
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+                          struct drm_atomic_state *state);
 bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_deactivate(struct intel_crtc *crtc);
-void intel_fbc_update(struct intel_crtc *crtc);
+void intel_fbc_pre_update(struct intel_crtc *crtc);
+void intel_fbc_post_update(struct intel_crtc *crtc);
 void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
 void intel_fbc_enable(struct intel_crtc *crtc);
-void intel_fbc_disable(struct drm_i915_private *dev_priv);
-void intel_fbc_disable_crtc(struct intel_crtc *crtc);
+void intel_fbc_disable(struct intel_crtc *crtc);
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
                          unsigned int frontbuffer_bits,
                          enum fb_op_origin origin);
@@ -1555,6 +1563,7 @@ void skl_wm_get_hw_state(struct drm_device *dev);
 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
                          struct skl_ddb_allocation *ddb /* out */);
 uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
+int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6);
 
 /* intel_sdvo.c */
 bool intel_sdvo_init(struct drm_device *dev,
index 44742fa2f616dd22fc25847b0f4ddb5730e550de..378f879f40153439eba54569f7c66866240fd421 100644 (file)
@@ -478,8 +478,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
 
        DRM_DEBUG_KMS("\n");
 
-       intel_dsi_prepare(encoder);
        intel_enable_dsi_pll(encoder);
+       intel_dsi_prepare(encoder);
 
        /* Panel Enable over CRC PMIC */
        if (intel_dsi->gpio_panel)
@@ -702,7 +702,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
 static void intel_dsi_get_config(struct intel_encoder *encoder,
                                 struct intel_crtc_state *pipe_config)
 {
-       u32 pclk = 0;
+       u32 pclk;
        DRM_DEBUG_KMS("\n");
 
        pipe_config->has_dsi_encoder = true;
@@ -713,12 +713,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
         */
        pipe_config->dpll_hw_state.dpll_md = 0;
 
-       if (IS_BROXTON(encoder->base.dev))
-               pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
-       else if (IS_VALLEYVIEW(encoder->base.dev) ||
-                IS_CHERRYVIEW(encoder->base.dev))
-               pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp);
-
+       pclk = intel_dsi_get_pclk(encoder, pipe_config->pipe_bpp);
        if (!pclk)
                return;
 
index 02551ff228c22e32751b2e34803668432e7c6d64..de7be7f3fb42c8167fa4c9b677de4c163750c64e 100644 (file)
@@ -126,8 +126,7 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
 
 extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
 extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
-extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
-extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
+extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp);
 extern void intel_dsi_reset_clocks(struct intel_encoder *encoder,
                                                        enum port port);
 
index a5e99ac305daab3ef69471d37b0ec9a97a27425c..787f01c63984e574fa038d3cbed09481053e3e40 100644 (file)
@@ -204,10 +204,28 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
        struct drm_device *dev = intel_dsi->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       if (dev_priv->vbt.dsi.seq_version >= 3)
+               data++;
+
        gpio = *data++;
 
        /* pull up/down */
-       action = *data++;
+       action = *data++ & 1;
+
+       if (gpio >= ARRAY_SIZE(gtable)) {
+               DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
+               goto out;
+       }
+
+       if (!IS_VALLEYVIEW(dev_priv)) {
+               DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
+               goto out;
+       }
+
+       if (dev_priv->vbt.dsi.seq_version >= 3) {
+               DRM_DEBUG_KMS("GPIO element v3 not supported\n");
+               goto out;
+       }
 
        function = gtable[gpio].function_reg;
        pad = gtable[gpio].pad_reg;
@@ -216,27 +234,33 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
        if (!gtable[gpio].init) {
                /* program the function */
                /* FIXME: remove constant below */
-               vlv_gpio_nc_write(dev_priv, function, 0x2000CC00);
+               vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function,
+                                 0x2000CC00);
                gtable[gpio].init = 1;
        }
 
        val = 0x4 | action;
 
        /* pull up/down */
-       vlv_gpio_nc_write(dev_priv, pad, val);
+       vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val);
        mutex_unlock(&dev_priv->sb_lock);
 
+out:
        return data;
 }
 
+static const u8 *mipi_exec_i2c_skip(struct intel_dsi *intel_dsi, const u8 *data)
+{
+       return data + *(data + 6) + 7;
+}
+
 typedef const u8 * (*fn_mipi_elem_exec)(struct intel_dsi *intel_dsi,
                                        const u8 *data);
 static const fn_mipi_elem_exec exec_elem[] = {
-       NULL, /* reserved */
-       mipi_exec_send_packet,
-       mipi_exec_delay,
-       mipi_exec_gpio,
-       NULL, /* status read; later */
+       [MIPI_SEQ_ELEM_SEND_PKT] = mipi_exec_send_packet,
+       [MIPI_SEQ_ELEM_DELAY] = mipi_exec_delay,
+       [MIPI_SEQ_ELEM_GPIO] = mipi_exec_gpio,
+       [MIPI_SEQ_ELEM_I2C] = mipi_exec_i2c_skip,
 };
 
 /*
@@ -246,107 +270,114 @@ static const fn_mipi_elem_exec exec_elem[] = {
  */
 
 static const char * const seq_name[] = {
-       "UNDEFINED",
-       "MIPI_SEQ_ASSERT_RESET",
-       "MIPI_SEQ_INIT_OTP",
-       "MIPI_SEQ_DISPLAY_ON",
-       "MIPI_SEQ_DISPLAY_OFF",
-       "MIPI_SEQ_DEASSERT_RESET"
+       [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET",
+       [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
+       [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
+       [MIPI_SEQ_DISPLAY_OFF]  = "MIPI_SEQ_DISPLAY_OFF",
+       [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
+       [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON",
+       [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF",
+       [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
+       [MIPI_SEQ_TEAR_OFF] = "MIPI_SEQ_TEAR_OFF",
+       [MIPI_SEQ_POWER_ON] = "MIPI_SEQ_POWER_ON",
+       [MIPI_SEQ_POWER_OFF] = "MIPI_SEQ_POWER_OFF",
 };
 
-static void generic_exec_sequence(struct intel_dsi *intel_dsi, const u8 *data)
+static const char *sequence_name(enum mipi_seq seq_id)
 {
+       if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
+               return seq_name[seq_id];
+       else
+               return "(unknown)";
+}
+
+static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
+{
+       struct vbt_panel *vbt_panel = to_vbt_panel(panel);
+       struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+       struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+       const u8 *data;
        fn_mipi_elem_exec mipi_elem_exec;
-       int index;
 
-       if (!data)
+       if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
                return;
 
-       DRM_DEBUG_DRIVER("Starting MIPI sequence - %s\n", seq_name[*data]);
+       data = dev_priv->vbt.dsi.sequence[seq_id];
+       if (!data) {
+               DRM_DEBUG_KMS("MIPI sequence %d - %s not available\n",
+                             seq_id, sequence_name(seq_id));
+               return;
+       }
 
-       /* go to the first element of the sequence */
-       data++;
+       WARN_ON(*data != seq_id);
 
-       /* parse each byte till we reach end of sequence byte - 0x00 */
-       while (1) {
-               index = *data;
-               mipi_elem_exec = exec_elem[index];
-               if (!mipi_elem_exec) {
-                       DRM_ERROR("Unsupported MIPI element, skipping sequence execution\n");
-                       return;
-               }
+       DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
+                     seq_id, sequence_name(seq_id));
 
-               /* goto element payload */
-               data++;
+       /* Skip Sequence Byte. */
+       data++;
 
-               /* execute the element specific rotines */
-               data = mipi_elem_exec(intel_dsi, data);
+       /* Skip Size of Sequence. */
+       if (dev_priv->vbt.dsi.seq_version >= 3)
+               data += 4;
 
-               /*
-                * After processing the element, data should point to
-                * next element or end of sequence
-                * check if have we reached end of sequence
-                */
-               if (*data == 0x00)
+       while (1) {
+               u8 operation_byte = *data++;
+               u8 operation_size = 0;
+
+               if (operation_byte == MIPI_SEQ_ELEM_END)
                        break;
+
+               if (operation_byte < ARRAY_SIZE(exec_elem))
+                       mipi_elem_exec = exec_elem[operation_byte];
+               else
+                       mipi_elem_exec = NULL;
+
+               /* Size of Operation. */
+               if (dev_priv->vbt.dsi.seq_version >= 3)
+                       operation_size = *data++;
+
+               if (mipi_elem_exec) {
+                       data = mipi_elem_exec(intel_dsi, data);
+               } else if (operation_size) {
+                       /* We have size, skip. */
+                       DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
+                                     operation_byte);
+                       data += operation_size;
+               } else {
+                       /* No size, can't skip without parsing. */
+                       DRM_ERROR("Unsupported MIPI operation byte %u\n",
+                                 operation_byte);
+                       return;
+               }
        }
 }
 
 static int vbt_panel_prepare(struct drm_panel *panel)
 {
-       struct vbt_panel *vbt_panel = to_vbt_panel(panel);
-       struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
-       struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const u8 *sequence;
-
-       sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET];
-       generic_exec_sequence(intel_dsi, sequence);
-
-       sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
-       generic_exec_sequence(intel_dsi, sequence);
+       generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
+       generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
 
        return 0;
 }
 
 static int vbt_panel_unprepare(struct drm_panel *panel)
 {
-       struct vbt_panel *vbt_panel = to_vbt_panel(panel);
-       struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
-       struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const u8 *sequence;
-
-       sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET];
-       generic_exec_sequence(intel_dsi, sequence);
+       generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
 
        return 0;
 }
 
 static int vbt_panel_enable(struct drm_panel *panel)
 {
-       struct vbt_panel *vbt_panel = to_vbt_panel(panel);
-       struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
-       struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const u8 *sequence;
-
-       sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_ON];
-       generic_exec_sequence(intel_dsi, sequence);
+       generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
 
        return 0;
 }
 
 static int vbt_panel_disable(struct drm_panel *panel)
 {
-       struct vbt_panel *vbt_panel = to_vbt_panel(panel);
-       struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
-       struct drm_device *dev = intel_dsi->base.base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const u8 *sequence;
-
-       sequence = dev_priv->vbt.dsi.sequence[MIPI_SEQ_DISPLAY_OFF];
-       generic_exec_sequence(intel_dsi, sequence);
+       generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
 
        return 0;
 }
@@ -666,6 +697,8 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
 
        /* This is cheating a bit with the cleanup. */
        vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
+       if (!vbt_panel)
+               return NULL;
 
        vbt_panel->intel_dsi = intel_dsi;
        drm_panel_init(&vbt_panel->panel);
index fbd2b51810ca0f2223a0129a3beac9bbb1433c61..bb5e95a1a4530812809c06bded3492e7547c73a8 100644 (file)
 #include "i915_drv.h"
 #include "intel_dsi.h"
 
-#define DSI_HSS_PACKET_SIZE            4
-#define DSI_HSE_PACKET_SIZE            4
-#define DSI_HSA_PACKET_EXTRA_SIZE      6
-#define DSI_HBP_PACKET_EXTRA_SIZE      6
-#define DSI_HACTIVE_PACKET_EXTRA_SIZE  6
-#define DSI_HFP_PACKET_EXTRA_SIZE      6
-#define DSI_EOTP_PACKET_SIZE           4
-
 static int dsi_pixel_format_bpp(int pixel_format)
 {
        int bpp;
@@ -71,77 +63,6 @@ static const u32 lfsr_converts[] = {
        71, 35, 273, 136, 324, 418, 465, 488, 500, 506          /* 91 - 100 */
 };
 
-#ifdef DSI_CLK_FROM_RR
-
-static u32 dsi_rr_formula(const struct drm_display_mode *mode,
-                         int pixel_format, int video_mode_format,
-                         int lane_count, bool eotp)
-{
-       u32 bpp;
-       u32 hactive, vactive, hfp, hsync, hbp, vfp, vsync, vbp;
-       u32 hsync_bytes, hbp_bytes, hactive_bytes, hfp_bytes;
-       u32 bytes_per_line, bytes_per_frame;
-       u32 num_frames;
-       u32 bytes_per_x_frames, bytes_per_x_frames_x_lanes;
-       u32 dsi_bit_clock_hz;
-       u32 dsi_clk;
-
-       bpp = dsi_pixel_format_bpp(pixel_format);
-
-       hactive = mode->hdisplay;
-       vactive = mode->vdisplay;
-       hfp = mode->hsync_start - mode->hdisplay;
-       hsync = mode->hsync_end - mode->hsync_start;
-       hbp = mode->htotal - mode->hsync_end;
-
-       vfp = mode->vsync_start - mode->vdisplay;
-       vsync = mode->vsync_end - mode->vsync_start;
-       vbp = mode->vtotal - mode->vsync_end;
-
-       hsync_bytes = DIV_ROUND_UP(hsync * bpp, 8);
-       hbp_bytes = DIV_ROUND_UP(hbp * bpp, 8);
-       hactive_bytes = DIV_ROUND_UP(hactive * bpp, 8);
-       hfp_bytes = DIV_ROUND_UP(hfp * bpp, 8);
-
-       bytes_per_line = DSI_HSS_PACKET_SIZE + hsync_bytes +
-               DSI_HSA_PACKET_EXTRA_SIZE + DSI_HSE_PACKET_SIZE +
-               hbp_bytes + DSI_HBP_PACKET_EXTRA_SIZE +
-               hactive_bytes + DSI_HACTIVE_PACKET_EXTRA_SIZE +
-               hfp_bytes + DSI_HFP_PACKET_EXTRA_SIZE;
-
-       /*
-        * XXX: Need to accurately calculate LP to HS transition timeout and add
-        * it to bytes_per_line/bytes_per_frame.
-        */
-
-       if (eotp && video_mode_format == VIDEO_MODE_BURST)
-               bytes_per_line += DSI_EOTP_PACKET_SIZE;
-
-       bytes_per_frame = vsync * bytes_per_line + vbp * bytes_per_line +
-               vactive * bytes_per_line + vfp * bytes_per_line;
-
-       if (eotp &&
-           (video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ||
-            video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS))
-               bytes_per_frame += DSI_EOTP_PACKET_SIZE;
-
-       num_frames = drm_mode_vrefresh(mode);
-       bytes_per_x_frames = num_frames * bytes_per_frame;
-
-       bytes_per_x_frames_x_lanes = bytes_per_x_frames / lane_count;
-
-       /* the dsi clock is divided by 2 in the hardware to get dsi ddr clock */
-       dsi_bit_clock_hz = bytes_per_x_frames_x_lanes * 8;
-       dsi_clk = dsi_bit_clock_hz / 1000;
-
-       if (eotp && video_mode_format == VIDEO_MODE_BURST)
-               dsi_clk *= 2;
-
-       return dsi_clk;
-}
-
-#else
-
 /* Get DSI clock from pixel clock */
 static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
 {
@@ -155,8 +76,6 @@ static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count)
        return dsi_clk_khz;
 }
 
-#endif
-
 static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
                        struct dsi_mnp *dsi_mnp, int target_dsi_clk)
 {
@@ -322,7 +241,7 @@ static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
             bpp, pipe_bpp);
 }
 
-u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
 {
        struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
        struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
@@ -384,7 +303,7 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
        return pclk;
 }
 
-u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
+static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
 {
        u32 pclk;
        u32 dsi_clk;
@@ -419,6 +338,14 @@ u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
        return pclk;
 }
 
+u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp)
+{
+       if (IS_BROXTON(encoder->base.dev))
+               return bxt_dsi_get_pclk(encoder, pipe_bpp);
+       else
+               return vlv_dsi_get_pclk(encoder, pipe_bpp);
+}
+
 static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
 {
        u32 temp;
index a1988a486b9276954db36e8cb482db283a051887..3614a951736b395ed5e615beb881a0ec7ee51631 100644 (file)
@@ -43,7 +43,7 @@
 
 static inline bool fbc_supported(struct drm_i915_private *dev_priv)
 {
-       return dev_priv->fbc.activate != NULL;
+       return HAS_FBC(dev_priv);
 }
 
 static inline bool fbc_on_pipe_a_only(struct drm_i915_private *dev_priv)
@@ -56,6 +56,11 @@ static inline bool fbc_on_plane_a_only(struct drm_i915_private *dev_priv)
        return INTEL_INFO(dev_priv)->gen < 4;
 }
 
+static inline bool no_fbc_on_multiple_pipes(struct drm_i915_private *dev_priv)
+{
+       return INTEL_INFO(dev_priv)->gen <= 3;
+}
+
 /*
  * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
  * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -74,19 +79,17 @@ static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
  * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
  * we wrote to PIPESRC.
  */
-static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
+static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
                                            int *width, int *height)
 {
-       struct intel_plane_state *plane_state =
-                       to_intel_plane_state(crtc->base.primary->state);
        int w, h;
 
-       if (intel_rotation_90_or_270(plane_state->base.rotation)) {
-               w = drm_rect_height(&plane_state->src) >> 16;
-               h = drm_rect_width(&plane_state->src) >> 16;
+       if (intel_rotation_90_or_270(cache->plane.rotation)) {
+               w = cache->plane.src_h;
+               h = cache->plane.src_w;
        } else {
-               w = drm_rect_width(&plane_state->src) >> 16;
-               h = drm_rect_height(&plane_state->src) >> 16;
+               w = cache->plane.src_w;
+               h = cache->plane.src_h;
        }
 
        if (width)
@@ -95,26 +98,23 @@ static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc,
                *height = h;
 }
 
-static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc,
-                                       struct drm_framebuffer *fb)
+static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
+                                       struct intel_fbc_state_cache *cache)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
        int lines;
 
-       intel_fbc_get_plane_source_size(crtc, NULL, &lines);
+       intel_fbc_get_plane_source_size(cache, NULL, &lines);
        if (INTEL_INFO(dev_priv)->gen >= 7)
                lines = min(lines, 2048);
 
        /* Hardware needs the full buffer stride, not just the active area. */
-       return lines * fb->pitches[0];
+       return lines * cache->fb.stride;
 }
 
 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
        u32 fbc_ctl;
 
-       dev_priv->fbc.active = false;
-
        /* Disable compression */
        fbc_ctl = I915_READ(FBC_CONTROL);
        if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -130,21 +130,17 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
        }
 }
 
-static void i8xx_fbc_activate(struct intel_crtc *crtc)
+static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-       struct drm_framebuffer *fb = crtc->base.primary->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
        int cfb_pitch;
        int i;
        u32 fbc_ctl;
 
-       dev_priv->fbc.active = true;
-
        /* Note: fbc.threshold == 1 for i8xx */
-       cfb_pitch = intel_fbc_calculate_cfb_size(crtc, fb) / FBC_LL_SIZE;
-       if (fb->pitches[0] < cfb_pitch)
-               cfb_pitch = fb->pitches[0];
+       cfb_pitch = params->cfb_size / FBC_LL_SIZE;
+       if (params->fb.stride < cfb_pitch)
+               cfb_pitch = params->fb.stride;
 
        /* FBC_CTL wants 32B or 64B units */
        if (IS_GEN2(dev_priv))
@@ -161,9 +157,9 @@ static void i8xx_fbc_activate(struct intel_crtc *crtc)
 
                /* Set it up... */
                fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
-               fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
+               fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.plane);
                I915_WRITE(FBC_CONTROL2, fbc_ctl2);
-               I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
+               I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
        }
 
        /* enable it... */
@@ -173,7 +169,7 @@ static void i8xx_fbc_activate(struct intel_crtc *crtc)
        if (IS_I945GM(dev_priv))
                fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
-       fbc_ctl |= obj->fence_reg;
+       fbc_ctl |= params->fb.fence_reg;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 }
 
@@ -182,23 +178,19 @@ static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
        return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
 }
 
-static void g4x_fbc_activate(struct intel_crtc *crtc)
+static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-       struct drm_framebuffer *fb = crtc->base.primary->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
        u32 dpfc_ctl;
 
-       dev_priv->fbc.active = true;
-
-       dpfc_ctl = DPFC_CTL_PLANE(crtc->plane) | DPFC_SR_EN;
-       if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+       dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN;
+       if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
                dpfc_ctl |= DPFC_CTL_LIMIT_2X;
        else
                dpfc_ctl |= DPFC_CTL_LIMIT_1X;
-       dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
+       dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
 
-       I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
+       I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
 
        /* enable it... */
        I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -208,8 +200,6 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
        u32 dpfc_ctl;
 
-       dev_priv->fbc.active = false;
-
        /* Disable compression */
        dpfc_ctl = I915_READ(DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
@@ -230,19 +220,14 @@ static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
        POSTING_READ(MSG_FBC_REND_STATE);
 }
 
-static void ilk_fbc_activate(struct intel_crtc *crtc)
+static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-       struct drm_framebuffer *fb = crtc->base.primary->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
        u32 dpfc_ctl;
        int threshold = dev_priv->fbc.threshold;
-       unsigned int y_offset;
 
-       dev_priv->fbc.active = true;
-
-       dpfc_ctl = DPFC_CTL_PLANE(crtc->plane);
-       if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+       dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane);
+       if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
                threshold++;
 
        switch (threshold) {
@@ -259,18 +244,17 @@ static void ilk_fbc_activate(struct intel_crtc *crtc)
        }
        dpfc_ctl |= DPFC_CTL_FENCE_EN;
        if (IS_GEN5(dev_priv))
-               dpfc_ctl |= obj->fence_reg;
+               dpfc_ctl |= params->fb.fence_reg;
 
-       y_offset = get_crtc_fence_y_offset(crtc);
-       I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
-       I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+       I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
        if (IS_GEN6(dev_priv)) {
                I915_WRITE(SNB_DPFC_CTL_SA,
-                          SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-               I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
+                          SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+               I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
        }
 
        intel_fbc_recompress(dev_priv);
@@ -280,8 +264,6 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
        u32 dpfc_ctl;
 
-       dev_priv->fbc.active = false;
-
        /* Disable compression */
        dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
@@ -295,21 +277,17 @@ static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
        return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
 }
 
-static void gen7_fbc_activate(struct intel_crtc *crtc)
+static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-       struct drm_framebuffer *fb = crtc->base.primary->fb;
-       struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+       struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
        u32 dpfc_ctl;
        int threshold = dev_priv->fbc.threshold;
 
-       dev_priv->fbc.active = true;
-
        dpfc_ctl = 0;
        if (IS_IVYBRIDGE(dev_priv))
-               dpfc_ctl |= IVB_DPFC_CTL_PLANE(crtc->plane);
+               dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane);
 
-       if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
+       if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2)
                threshold++;
 
        switch (threshold) {
@@ -337,20 +315,60 @@ static void gen7_fbc_activate(struct intel_crtc *crtc)
                           ILK_FBCQ_DIS);
        } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
                /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
-               I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
-                          I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
+               I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
+                          I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
                           HSW_FBCQ_DIS);
        }
 
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
        I915_WRITE(SNB_DPFC_CTL_SA,
-                  SNB_CPU_FENCE_ENABLE | obj->fence_reg);
-       I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
+                  SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+       I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
 
        intel_fbc_recompress(dev_priv);
 }
 
+static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
+{
+       if (INTEL_INFO(dev_priv)->gen >= 5)
+               return ilk_fbc_is_active(dev_priv);
+       else if (IS_GM45(dev_priv))
+               return g4x_fbc_is_active(dev_priv);
+       else
+               return i8xx_fbc_is_active(dev_priv);
+}
+
+static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       fbc->active = true;
+
+       if (INTEL_INFO(dev_priv)->gen >= 7)
+               gen7_fbc_activate(dev_priv);
+       else if (INTEL_INFO(dev_priv)->gen >= 5)
+               ilk_fbc_activate(dev_priv);
+       else if (IS_GM45(dev_priv))
+               g4x_fbc_activate(dev_priv);
+       else
+               i8xx_fbc_activate(dev_priv);
+}
+
+static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       fbc->active = false;
+
+       if (INTEL_INFO(dev_priv)->gen >= 5)
+               ilk_fbc_deactivate(dev_priv);
+       else if (IS_GM45(dev_priv))
+               g4x_fbc_deactivate(dev_priv);
+       else
+               i8xx_fbc_deactivate(dev_priv);
+}
+
 /**
  * intel_fbc_is_active - Is FBC active?
  * @dev_priv: i915 device instance
@@ -364,24 +382,24 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
        return dev_priv->fbc.active;
 }
 
-static void intel_fbc_activate(const struct drm_framebuffer *fb)
-{
-       struct drm_i915_private *dev_priv = fb->dev->dev_private;
-       struct intel_crtc *crtc = dev_priv->fbc.crtc;
-
-       dev_priv->fbc.activate(crtc);
-
-       dev_priv->fbc.fb_id = fb->base.id;
-       dev_priv->fbc.y = crtc->base.y;
-}
-
 static void intel_fbc_work_fn(struct work_struct *__work)
 {
        struct drm_i915_private *dev_priv =
                container_of(__work, struct drm_i915_private, fbc.work.work);
-       struct intel_fbc_work *work = &dev_priv->fbc.work;
-       struct intel_crtc *crtc = dev_priv->fbc.crtc;
-       int delay_ms = 50;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_work *work = &fbc->work;
+       struct intel_crtc *crtc = fbc->crtc;
+       struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe];
+
+       if (drm_crtc_vblank_get(&crtc->base)) {
+               DRM_ERROR("vblank not available for FBC on pipe %c\n",
+                         pipe_name(crtc->pipe));
+
+               mutex_lock(&fbc->lock);
+               work->scheduled = false;
+               mutex_unlock(&fbc->lock);
+               return;
+       }
 
 retry:
        /* Delay the actual enabling to let pageflipping cease and the
@@ -390,142 +408,97 @@ retry:
         * vblank to pass after disabling the FBC before we attempt
         * to modify the control registers.
         *
-        * A more complicated solution would involve tracking vblanks
-        * following the termination of the page-flipping sequence
-        * and indeed performing the enable as a co-routine and not
-        * waiting synchronously upon the vblank.
-        *
         * WaFbcWaitForVBlankBeforeEnable:ilk,snb
+        *
+        * It is also worth mentioning that since work->scheduled_vblank can be
+        * updated multiple times by the other threads, hitting the timeout is
+        * not an error condition. We'll just end up hitting the "goto retry"
+        * case below.
         */
-       wait_remaining_ms_from_jiffies(work->enable_jiffies, delay_ms);
+       wait_event_timeout(vblank->queue,
+               drm_crtc_vblank_count(&crtc->base) != work->scheduled_vblank,
+               msecs_to_jiffies(50));
 
-       mutex_lock(&dev_priv->fbc.lock);
+       mutex_lock(&fbc->lock);
 
        /* Were we cancelled? */
        if (!work->scheduled)
                goto out;
 
        /* Were we delayed again while this function was sleeping? */
-       if (time_after(work->enable_jiffies + msecs_to_jiffies(delay_ms),
-                      jiffies)) {
-               mutex_unlock(&dev_priv->fbc.lock);
+       if (drm_crtc_vblank_count(&crtc->base) == work->scheduled_vblank) {
+               mutex_unlock(&fbc->lock);
                goto retry;
        }
 
-       if (crtc->base.primary->fb == work->fb)
-               intel_fbc_activate(work->fb);
+       intel_fbc_hw_activate(dev_priv);
 
        work->scheduled = false;
 
 out:
-       mutex_unlock(&dev_priv->fbc.lock);
-}
-
-static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
-{
-       WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
-       dev_priv->fbc.work.scheduled = false;
+       mutex_unlock(&fbc->lock);
+       drm_crtc_vblank_put(&crtc->base);
 }
 
 static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-       struct intel_fbc_work *work = &dev_priv->fbc.work;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_work *work = &fbc->work;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+       WARN_ON(!mutex_is_locked(&fbc->lock));
 
-       /* It is useless to call intel_fbc_cancel_work() in this function since
-        * we're not releasing fbc.lock, so it won't have an opportunity to grab
-        * it to discover that it was cancelled. So we just update the expected
-        * jiffy count. */
-       work->fb = crtc->base.primary->fb;
+       if (drm_crtc_vblank_get(&crtc->base)) {
+               DRM_ERROR("vblank not available for FBC on pipe %c\n",
+                         pipe_name(crtc->pipe));
+               return;
+       }
+
+       /* It is useless to call intel_fbc_cancel_work() or cancel_work() in
+        * this function since we're not releasing fbc.lock, so it won't have an
+        * opportunity to grab it to discover that it was cancelled. So we just
+        * update the expected jiffy count. */
        work->scheduled = true;
-       work->enable_jiffies = jiffies;
+       work->scheduled_vblank = drm_crtc_vblank_count(&crtc->base);
+       drm_crtc_vblank_put(&crtc->base);
 
        schedule_work(&work->work);
 }
 
-static void __intel_fbc_deactivate(struct drm_i915_private *dev_priv)
+static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
 {
-       WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+       struct intel_fbc *fbc = &dev_priv->fbc;
 
-       intel_fbc_cancel_work(dev_priv);
+       WARN_ON(!mutex_is_locked(&fbc->lock));
 
-       if (dev_priv->fbc.active)
-               dev_priv->fbc.deactivate(dev_priv);
-}
+       /* Calling cancel_work() here won't help due to the fact that the work
+        * function grabs fbc->lock. Just set scheduled to false so the work
+        * function can know it was cancelled. */
+       fbc->work.scheduled = false;
 
-/*
- * intel_fbc_deactivate - deactivate FBC if it's associated with crtc
- * @crtc: the CRTC
- *
- * This function deactivates FBC if it's associated with the provided CRTC.
- */
-void intel_fbc_deactivate(struct intel_crtc *crtc)
-{
-       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-
-       if (!fbc_supported(dev_priv))
-               return;
-
-       mutex_lock(&dev_priv->fbc.lock);
-       if (dev_priv->fbc.crtc == crtc)
-               __intel_fbc_deactivate(dev_priv);
-       mutex_unlock(&dev_priv->fbc.lock);
+       if (fbc->active)
+               intel_fbc_hw_deactivate(dev_priv);
 }
 
-static void set_no_fbc_reason(struct drm_i915_private *dev_priv,
-                             const char *reason)
-{
-       if (dev_priv->fbc.no_fbc_reason == reason)
-               return;
-
-       dev_priv->fbc.no_fbc_reason = reason;
-       DRM_DEBUG_KMS("Disabling FBC: %s\n", reason);
-}
-
-static bool crtc_can_fbc(struct intel_crtc *crtc)
+static bool multiple_pipes_ok(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct drm_plane *primary = crtc->base.primary;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       enum pipe pipe = crtc->pipe;
 
-       if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A)
-               return false;
-
-       if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A)
-               return false;
-
-       return true;
-}
-
-static bool crtc_is_valid(struct intel_crtc *crtc)
-{
-       if (!intel_crtc_active(&crtc->base))
-               return false;
-
-       if (!to_intel_plane_state(crtc->base.primary->state)->visible)
-               return false;
-
-       return true;
-}
-
-static bool multiple_pipes_ok(struct drm_i915_private *dev_priv)
-{
-       enum pipe pipe;
-       int n_pipes = 0;
-       struct drm_crtc *crtc;
-
-       if (INTEL_INFO(dev_priv)->gen > 4)
+       /* Don't even bother tracking anything we don't need. */
+       if (!no_fbc_on_multiple_pipes(dev_priv))
                return true;
 
-       for_each_pipe(dev_priv, pipe) {
-               crtc = dev_priv->pipe_to_crtc_mapping[pipe];
+       WARN_ON(!drm_modeset_is_locked(&primary->mutex));
 
-               if (intel_crtc_active(crtc) &&
-                   to_intel_plane_state(crtc->primary->state)->visible)
-                       n_pipes++;
-       }
+       if (to_intel_plane_state(primary->state)->visible)
+               fbc->visible_pipes_mask |= (1 << pipe);
+       else
+               fbc->visible_pipes_mask &= ~(1 << pipe);
 
-       return (n_pipes < 2);
+       return (fbc->visible_pipes_mask & ~(1 << pipe)) != 0;
 }
 
 static int find_compression_threshold(struct drm_i915_private *dev_priv,
@@ -581,16 +554,16 @@ again:
 static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-       struct drm_framebuffer *fb = crtc->base.primary->state->fb;
+       struct intel_fbc *fbc = &dev_priv->fbc;
        struct drm_mm_node *uninitialized_var(compressed_llb);
        int size, fb_cpp, ret;
 
-       WARN_ON(drm_mm_node_allocated(&dev_priv->fbc.compressed_fb));
+       WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
 
-       size = intel_fbc_calculate_cfb_size(crtc, fb);
-       fb_cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+       size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache);
+       fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0);
 
-       ret = find_compression_threshold(dev_priv, &dev_priv->fbc.compressed_fb,
+       ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
                                         size, fb_cpp);
        if (!ret)
                goto err_llb;
@@ -599,12 +572,12 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
 
        }
 
-       dev_priv->fbc.threshold = ret;
+       fbc->threshold = ret;
 
        if (INTEL_INFO(dev_priv)->gen >= 5)
-               I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+               I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
        else if (IS_GM45(dev_priv)) {
-               I915_WRITE(DPFC_CB_BASE, dev_priv->fbc.compressed_fb.start);
+               I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
        } else {
                compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
                if (!compressed_llb)
@@ -615,23 +588,22 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
                if (ret)
                        goto err_fb;
 
-               dev_priv->fbc.compressed_llb = compressed_llb;
+               fbc->compressed_llb = compressed_llb;
 
                I915_WRITE(FBC_CFB_BASE,
-                          dev_priv->mm.stolen_base + dev_priv->fbc.compressed_fb.start);
+                          dev_priv->mm.stolen_base + fbc->compressed_fb.start);
                I915_WRITE(FBC_LL_BASE,
                           dev_priv->mm.stolen_base + compressed_llb->start);
        }
 
        DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
-                     dev_priv->fbc.compressed_fb.size,
-                     dev_priv->fbc.threshold);
+                     fbc->compressed_fb.size, fbc->threshold);
 
        return 0;
 
 err_fb:
        kfree(compressed_llb);
-       i915_gem_stolen_remove_node(dev_priv, &dev_priv->fbc.compressed_fb);
+       i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
 err_llb:
        pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
        return -ENOSPC;
@@ -639,25 +611,27 @@ err_llb:
 
 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
 {
-       if (drm_mm_node_allocated(&dev_priv->fbc.compressed_fb))
-               i915_gem_stolen_remove_node(dev_priv,
-                                           &dev_priv->fbc.compressed_fb);
-
-       if (dev_priv->fbc.compressed_llb) {
-               i915_gem_stolen_remove_node(dev_priv,
-                                           dev_priv->fbc.compressed_llb);
-               kfree(dev_priv->fbc.compressed_llb);
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (drm_mm_node_allocated(&fbc->compressed_fb))
+               i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+
+       if (fbc->compressed_llb) {
+               i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
+               kfree(fbc->compressed_llb);
        }
 }
 
 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
 {
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
        if (!fbc_supported(dev_priv))
                return;
 
-       mutex_lock(&dev_priv->fbc.lock);
+       mutex_lock(&fbc->lock);
        __intel_fbc_cleanup_cfb(dev_priv);
-       mutex_unlock(&dev_priv->fbc.lock);
+       mutex_unlock(&fbc->lock);
 }
 
 static bool stride_is_valid(struct drm_i915_private *dev_priv,
@@ -681,19 +655,17 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
        return true;
 }
 
-static bool pixel_format_is_valid(struct drm_framebuffer *fb)
+static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
+                                 uint32_t pixel_format)
 {
-       struct drm_device *dev = fb->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       switch (fb->pixel_format) {
+       switch (pixel_format) {
        case DRM_FORMAT_XRGB8888:
        case DRM_FORMAT_XBGR8888:
                return true;
        case DRM_FORMAT_XRGB1555:
        case DRM_FORMAT_RGB565:
                /* 16bpp not supported on gen2 */
-               if (IS_GEN2(dev))
+               if (IS_GEN2(dev_priv))
                        return false;
                /* WaFbcOnly1to1Ratio:ctg */
                if (IS_G4X(dev_priv))
@@ -713,6 +685,7 @@ static bool pixel_format_is_valid(struct drm_framebuffer *fb)
 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct intel_fbc *fbc = &dev_priv->fbc;
        unsigned int effective_w, effective_h, max_w, max_h;
 
        if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) {
@@ -726,87 +699,105 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
                max_h = 1536;
        }
 
-       intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h);
+       intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
+                                       &effective_h);
        effective_w += crtc->adjusted_x;
        effective_h += crtc->adjusted_y;
 
        return effective_w <= max_w && effective_h <= max_h;
 }
 
-/**
- * __intel_fbc_update - activate/deactivate FBC as needed, unlocked
- * @crtc: the CRTC that triggered the update
- *
- * This function completely reevaluates the status of FBC, then activates,
- * deactivates or maintains it on the same state.
- */
-static void __intel_fbc_update(struct intel_crtc *crtc)
+static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
-       struct drm_framebuffer *fb;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_state_cache *cache = &fbc->state_cache;
+       struct intel_crtc_state *crtc_state =
+               to_intel_crtc_state(crtc->base.state);
+       struct intel_plane_state *plane_state =
+               to_intel_plane_state(crtc->base.primary->state);
+       struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj;
-       const struct drm_display_mode *adjusted_mode;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
+       WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
+       WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
 
-       if (!multiple_pipes_ok(dev_priv)) {
-               set_no_fbc_reason(dev_priv, "more than one pipe active");
-               goto out_disable;
-       }
+       cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
+       if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+               cache->crtc.hsw_bdw_pixel_rate =
+                       ilk_pipe_pixel_rate(crtc_state);
 
-       if (!dev_priv->fbc.enabled || dev_priv->fbc.crtc != crtc)
-               return;
+       cache->plane.rotation = plane_state->base.rotation;
+       cache->plane.src_w = drm_rect_width(&plane_state->src) >> 16;
+       cache->plane.src_h = drm_rect_height(&plane_state->src) >> 16;
+       cache->plane.visible = plane_state->visible;
 
-       if (!crtc_is_valid(crtc)) {
-               set_no_fbc_reason(dev_priv, "no output");
-               goto out_disable;
-       }
+       if (!cache->plane.visible)
+               return;
 
-       fb = crtc->base.primary->fb;
        obj = intel_fb_obj(fb);
-       adjusted_mode = &crtc->config->base.adjusted_mode;
 
-       if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
-           (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
-               set_no_fbc_reason(dev_priv, "incompatible mode");
-               goto out_disable;
+       /* FIXME: We lack the proper locking here, so only run this on the
+        * platforms that need. */
+       if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7)
+               cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
+       cache->fb.pixel_format = fb->pixel_format;
+       cache->fb.stride = fb->pitches[0];
+       cache->fb.fence_reg = obj->fence_reg;
+       cache->fb.tiling_mode = obj->tiling_mode;
+}
+
+static bool intel_fbc_can_activate(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+       if (!cache->plane.visible) {
+               fbc->no_fbc_reason = "primary plane not visible";
+               return false;
+       }
+
+       if ((cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) ||
+           (cache->crtc.mode_flags & DRM_MODE_FLAG_DBLSCAN)) {
+               fbc->no_fbc_reason = "incompatible mode";
+               return false;
        }
 
        if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
-               set_no_fbc_reason(dev_priv, "mode too large for compression");
-               goto out_disable;
+               fbc->no_fbc_reason = "mode too large for compression";
+               return false;
        }
 
        /* The use of a CPU fence is mandatory in order to detect writes
         * by the CPU to the scanout and trigger updates to the FBC.
         */
-       if (obj->tiling_mode != I915_TILING_X ||
-           obj->fence_reg == I915_FENCE_REG_NONE) {
-               set_no_fbc_reason(dev_priv, "framebuffer not tiled or fenced");
-               goto out_disable;
+       if (cache->fb.tiling_mode != I915_TILING_X ||
+           cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+               fbc->no_fbc_reason = "framebuffer not tiled or fenced";
+               return false;
        }
        if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_G4X(dev_priv) &&
-           crtc->base.primary->state->rotation != BIT(DRM_ROTATE_0)) {
-               set_no_fbc_reason(dev_priv, "rotation unsupported");
-               goto out_disable;
+           cache->plane.rotation != BIT(DRM_ROTATE_0)) {
+               fbc->no_fbc_reason = "rotation unsupported";
+               return false;
        }
 
-       if (!stride_is_valid(dev_priv, fb->pitches[0])) {
-               set_no_fbc_reason(dev_priv, "framebuffer stride not supported");
-               goto out_disable;
+       if (!stride_is_valid(dev_priv, cache->fb.stride)) {
+               fbc->no_fbc_reason = "framebuffer stride not supported";
+               return false;
        }
 
-       if (!pixel_format_is_valid(fb)) {
-               set_no_fbc_reason(dev_priv, "pixel format is invalid");
-               goto out_disable;
+       if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) {
+               fbc->no_fbc_reason = "pixel format is invalid";
+               return false;
        }
 
        /* WaFbcExceedCdClockThreshold:hsw,bdw */
        if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
-           ilk_pipe_pixel_rate(crtc->config) >=
-           dev_priv->cdclk_freq * 95 / 100) {
-               set_no_fbc_reason(dev_priv, "pixel rate is too big");
-               goto out_disable;
+           cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
+               fbc->no_fbc_reason = "pixel rate is too big";
+               return false;
        }
 
        /* It is possible for the required CFB size change without a
@@ -819,189 +810,320 @@ static void __intel_fbc_update(struct intel_crtc *crtc)
         * we didn't get any invalidate/deactivate calls, but this would require
         * a lot of tracking just for a specific case. If we conclude it's an
         * important case, we can implement it later. */
-       if (intel_fbc_calculate_cfb_size(crtc, fb) >
-           dev_priv->fbc.compressed_fb.size * dev_priv->fbc.threshold) {
-               set_no_fbc_reason(dev_priv, "CFB requirements changed");
-               goto out_disable;
+       if (intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+           fbc->compressed_fb.size * fbc->threshold) {
+               fbc->no_fbc_reason = "CFB requirements changed";
+               return false;
        }
 
+       return true;
+}
+
+static bool intel_fbc_can_choose(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (intel_vgpu_active(dev_priv->dev)) {
+               fbc->no_fbc_reason = "VGPU is active";
+               return false;
+       }
+
+       if (i915.enable_fbc < 0) {
+               fbc->no_fbc_reason = "disabled per chip default";
+               return false;
+       }
+
+       if (!i915.enable_fbc) {
+               fbc->no_fbc_reason = "disabled per module param";
+               return false;
+       }
+
+       if (fbc_on_pipe_a_only(dev_priv) && crtc->pipe != PIPE_A) {
+               fbc->no_fbc_reason = "no enabled pipes can have FBC";
+               return false;
+       }
+
+       if (fbc_on_plane_a_only(dev_priv) && crtc->plane != PLANE_A) {
+               fbc->no_fbc_reason = "no enabled planes can have FBC";
+               return false;
+       }
+
+       return true;
+}
+
+static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
+                                    struct intel_fbc_reg_params *params)
+{
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+       /* Since all our fields are integer types, use memset here so the
+        * comparison function can rely on memcmp because the padding will be
+        * zero. */
+       memset(params, 0, sizeof(*params));
+
+       params->crtc.pipe = crtc->pipe;
+       params->crtc.plane = crtc->plane;
+       params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
+
+       params->fb.pixel_format = cache->fb.pixel_format;
+       params->fb.stride = cache->fb.stride;
+       params->fb.fence_reg = cache->fb.fence_reg;
+
+       params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
+
+       params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
+}
+
+static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
+                                      struct intel_fbc_reg_params *params2)
+{
+       /* We can use this since intel_fbc_get_reg_params() does a memset. */
+       return memcmp(params1, params2, sizeof(*params1)) == 0;
+}
+
+void intel_fbc_pre_update(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
+       if (!fbc_supported(dev_priv))
+               return;
+
+       mutex_lock(&fbc->lock);
+
+       if (!multiple_pipes_ok(crtc)) {
+               fbc->no_fbc_reason = "more than one pipe active";
+               goto deactivate;
+       }
+
+       if (!fbc->enabled || fbc->crtc != crtc)
+               goto unlock;
+
+       intel_fbc_update_state_cache(crtc);
+
+deactivate:
+       intel_fbc_deactivate(dev_priv);
+unlock:
+       mutex_unlock(&fbc->lock);
+}
+
+static void __intel_fbc_post_update(struct intel_crtc *crtc)
+{
+       struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_fbc_reg_params old_params;
+
+       WARN_ON(!mutex_is_locked(&fbc->lock));
+
+       if (!fbc->enabled || fbc->crtc != crtc)
+               return;
+
+       if (!intel_fbc_can_activate(crtc)) {
+               WARN_ON(fbc->active);
+               return;
+       }
+
+       old_params = fbc->params;
+       intel_fbc_get_reg_params(crtc, &fbc->params);
+
        /* If the scanout has not changed, don't modify the FBC settings.
         * Note that we make the fundamental assumption that the fb->obj
         * cannot be unpinned (and have its GTT offset and fence revoked)
         * without first being decoupled from the scanout and FBC disabled.
         */
-       if (dev_priv->fbc.crtc == crtc &&
-           dev_priv->fbc.fb_id == fb->base.id &&
-           dev_priv->fbc.y == crtc->base.y &&
-           dev_priv->fbc.active)
+       if (fbc->active &&
+           intel_fbc_reg_params_equal(&old_params, &fbc->params))
                return;
 
-       if (intel_fbc_is_active(dev_priv)) {
-               /* We update FBC along two paths, after changing fb/crtc
-                * configuration (modeswitching) and after page-flipping
-                * finishes. For the latter, we know that not only did
-                * we disable the FBC at the start of the page-flip
-                * sequence, but also more than one vblank has passed.
-                *
-                * For the former case of modeswitching, it is possible
-                * to switch between two FBC valid configurations
-                * instantaneously so we do need to disable the FBC
-                * before we can modify its control registers. We also
-                * have to wait for the next vblank for that to take
-                * effect. However, since we delay enabling FBC we can
-                * assume that a vblank has passed since disabling and
-                * that we can safely alter the registers in the deferred
-                * callback.
-                *
-                * In the scenario that we go from a valid to invalid
-                * and then back to valid FBC configuration we have
-                * no strict enforcement that a vblank occurred since
-                * disabling the FBC. However, along all current pipe
-                * disabling paths we do need to wait for a vblank at
-                * some point. And we wait before enabling FBC anyway.
-                */
-               DRM_DEBUG_KMS("deactivating FBC for update\n");
-               __intel_fbc_deactivate(dev_priv);
-       }
-
+       intel_fbc_deactivate(dev_priv);
        intel_fbc_schedule_activation(crtc);
-       dev_priv->fbc.no_fbc_reason = "FBC enabled (not necessarily active)";
-       return;
-
-out_disable:
-       /* Multiple disables should be harmless */
-       if (intel_fbc_is_active(dev_priv)) {
-               DRM_DEBUG_KMS("unsupported config, deactivating FBC\n");
-               __intel_fbc_deactivate(dev_priv);
-       }
+       fbc->no_fbc_reason = "FBC enabled (active or scheduled)";
 }
 
-/*
- * intel_fbc_update - activate/deactivate FBC as needed
- * @crtc: the CRTC that triggered the update
- *
- * This function reevaluates the overall state and activates or deactivates FBC.
- */
-void intel_fbc_update(struct intel_crtc *crtc)
+void intel_fbc_post_update(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
                return;
 
-       mutex_lock(&dev_priv->fbc.lock);
-       __intel_fbc_update(crtc);
-       mutex_unlock(&dev_priv->fbc.lock);
+       mutex_lock(&fbc->lock);
+       __intel_fbc_post_update(crtc);
+       mutex_unlock(&fbc->lock);
+}
+
+static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
+{
+       if (fbc->enabled)
+               return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
+       else
+               return fbc->possible_framebuffer_bits;
 }
 
 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
                          unsigned int frontbuffer_bits,
                          enum fb_op_origin origin)
 {
-       unsigned int fbc_bits;
+       struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
                return;
 
-       if (origin == ORIGIN_GTT)
+       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
                return;
 
-       mutex_lock(&dev_priv->fbc.lock);
+       mutex_lock(&fbc->lock);
 
-       if (dev_priv->fbc.enabled)
-               fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe);
-       else
-               fbc_bits = dev_priv->fbc.possible_framebuffer_bits;
-
-       dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits);
+       fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
 
-       if (dev_priv->fbc.busy_bits)
-               __intel_fbc_deactivate(dev_priv);
+       if (fbc->enabled && fbc->busy_bits)
+               intel_fbc_deactivate(dev_priv);
 
-       mutex_unlock(&dev_priv->fbc.lock);
+       mutex_unlock(&fbc->lock);
 }
 
 void intel_fbc_flush(struct drm_i915_private *dev_priv,
                     unsigned int frontbuffer_bits, enum fb_op_origin origin)
 {
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
        if (!fbc_supported(dev_priv))
                return;
 
-       if (origin == ORIGIN_GTT)
+       if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
                return;
 
-       mutex_lock(&dev_priv->fbc.lock);
+       mutex_lock(&fbc->lock);
 
-       dev_priv->fbc.busy_bits &= ~frontbuffer_bits;
+       fbc->busy_bits &= ~frontbuffer_bits;
 
-       if (!dev_priv->fbc.busy_bits && dev_priv->fbc.enabled) {
-               if (origin != ORIGIN_FLIP && dev_priv->fbc.active) {
+       if (!fbc->busy_bits && fbc->enabled &&
+           (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
+               if (fbc->active)
                        intel_fbc_recompress(dev_priv);
-               } else {
-                       __intel_fbc_deactivate(dev_priv);
-                       __intel_fbc_update(dev_priv->fbc.crtc);
+               else
+                       __intel_fbc_post_update(fbc->crtc);
+       }
+
+       mutex_unlock(&fbc->lock);
+}
+
+/**
+ * intel_fbc_choose_crtc - select a CRTC to enable FBC on
+ * @dev_priv: i915 device instance
+ * @state: the atomic state structure
+ *
+ * This function looks at the proposed state for CRTCs and planes, then chooses
+ * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
+ * true.
+ *
+ * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
+ * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
+ */
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+                          struct drm_atomic_state *state)
+{
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct drm_crtc *crtc;
+       struct drm_crtc_state *crtc_state;
+       struct drm_plane *plane;
+       struct drm_plane_state *plane_state;
+       bool fbc_crtc_present = false;
+       int i, j;
+
+       mutex_lock(&fbc->lock);
+
+       for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (fbc->crtc == to_intel_crtc(crtc)) {
+                       fbc_crtc_present = true;
+                       break;
                }
        }
+       /* This atomic commit doesn't involve the CRTC currently tied to FBC. */
+       if (!fbc_crtc_present && fbc->crtc != NULL)
+               goto out;
+
+       /* Simply choose the first CRTC that is compatible and has a visible
+        * plane. We could go for fancier schemes such as checking the plane
+        * size, but this would just affect the few platforms that don't tie FBC
+        * to pipe or plane A. */
+       for_each_plane_in_state(state, plane, plane_state, i) {
+               struct intel_plane_state *intel_plane_state =
+                       to_intel_plane_state(plane_state);
+
+               if (!intel_plane_state->visible)
+                       continue;
 
-       mutex_unlock(&dev_priv->fbc.lock);
+               for_each_crtc_in_state(state, crtc, crtc_state, j) {
+                       struct intel_crtc_state *intel_crtc_state =
+                               to_intel_crtc_state(crtc_state);
+
+                       if (plane_state->crtc != crtc)
+                               continue;
+
+                       if (!intel_fbc_can_choose(to_intel_crtc(crtc)))
+                               break;
+
+                       intel_crtc_state->enable_fbc = true;
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&fbc->lock);
 }
 
 /**
  * intel_fbc_enable: tries to enable FBC on the CRTC
  * @crtc: the CRTC
  *
- * This function checks if it's possible to enable FBC on the following CRTC,
- * then enables it. Notice that it doesn't activate FBC.
+ * This function checks if the given CRTC was chosen for FBC, then enables it if
+ * possible. Notice that it doesn't activate FBC. It is valid to call
+ * intel_fbc_enable multiple times for the same pipe without an
+ * intel_fbc_disable in the middle, as long as it is deactivated.
  */
 void intel_fbc_enable(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
                return;
 
-       mutex_lock(&dev_priv->fbc.lock);
+       mutex_lock(&fbc->lock);
 
-       if (dev_priv->fbc.enabled) {
-               WARN_ON(dev_priv->fbc.crtc == crtc);
-               goto out;
-       }
-
-       WARN_ON(dev_priv->fbc.active);
-       WARN_ON(dev_priv->fbc.crtc != NULL);
-
-       if (intel_vgpu_active(dev_priv->dev)) {
-               set_no_fbc_reason(dev_priv, "VGPU is active");
-               goto out;
-       }
-
-       if (i915.enable_fbc < 0) {
-               set_no_fbc_reason(dev_priv, "disabled per chip default");
+       if (fbc->enabled) {
+               WARN_ON(fbc->crtc == NULL);
+               if (fbc->crtc == crtc) {
+                       WARN_ON(!crtc->config->enable_fbc);
+                       WARN_ON(fbc->active);
+               }
                goto out;
        }
 
-       if (!i915.enable_fbc) {
-               set_no_fbc_reason(dev_priv, "disabled per module param");
+       if (!crtc->config->enable_fbc)
                goto out;
-       }
 
-       if (!crtc_can_fbc(crtc)) {
-               set_no_fbc_reason(dev_priv, "no enabled pipes can have FBC");
-               goto out;
-       }
+       WARN_ON(fbc->active);
+       WARN_ON(fbc->crtc != NULL);
 
+       intel_fbc_update_state_cache(crtc);
        if (intel_fbc_alloc_cfb(crtc)) {
-               set_no_fbc_reason(dev_priv, "not enough stolen memory");
+               fbc->no_fbc_reason = "not enough stolen memory";
                goto out;
        }
 
        DRM_DEBUG_KMS("Enabling FBC on pipe %c\n", pipe_name(crtc->pipe));
-       dev_priv->fbc.no_fbc_reason = "FBC enabled but not active yet\n";
+       fbc->no_fbc_reason = "FBC enabled but not active yet\n";
 
-       dev_priv->fbc.enabled = true;
-       dev_priv->fbc.crtc = crtc;
+       fbc->enabled = true;
+       fbc->crtc = crtc;
 out:
-       mutex_unlock(&dev_priv->fbc.lock);
+       mutex_unlock(&fbc->lock);
 }
 
 /**
@@ -1013,58 +1135,88 @@ out:
  */
 static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
 {
-       struct intel_crtc *crtc = dev_priv->fbc.crtc;
+       struct intel_fbc *fbc = &dev_priv->fbc;
+       struct intel_crtc *crtc = fbc->crtc;
 
-       WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock));
-       WARN_ON(!dev_priv->fbc.enabled);
-       WARN_ON(dev_priv->fbc.active);
-       assert_pipe_disabled(dev_priv, crtc->pipe);
+       WARN_ON(!mutex_is_locked(&fbc->lock));
+       WARN_ON(!fbc->enabled);
+       WARN_ON(fbc->active);
+       WARN_ON(crtc->active);
 
        DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
 
        __intel_fbc_cleanup_cfb(dev_priv);
 
-       dev_priv->fbc.enabled = false;
-       dev_priv->fbc.crtc = NULL;
+       fbc->enabled = false;
+       fbc->crtc = NULL;
 }
 
 /**
- * intel_fbc_disable_crtc - disable FBC if it's associated with crtc
+ * intel_fbc_disable - disable FBC if it's associated with crtc
  * @crtc: the CRTC
  *
  * This function disables FBC if it's associated with the provided CRTC.
  */
-void intel_fbc_disable_crtc(struct intel_crtc *crtc)
+void intel_fbc_disable(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
+       struct intel_fbc *fbc = &dev_priv->fbc;
 
        if (!fbc_supported(dev_priv))
                return;
 
-       mutex_lock(&dev_priv->fbc.lock);
-       if (dev_priv->fbc.crtc == crtc) {
-               WARN_ON(!dev_priv->fbc.enabled);
-               WARN_ON(dev_priv->fbc.active);
+       mutex_lock(&fbc->lock);
+       if (fbc->crtc == crtc) {
+               WARN_ON(!fbc->enabled);
+               WARN_ON(fbc->active);
                __intel_fbc_disable(dev_priv);
        }
-       mutex_unlock(&dev_priv->fbc.lock);
+       mutex_unlock(&fbc->lock);
+
+       cancel_work_sync(&fbc->work.work);
 }
 
 /**
- * intel_fbc_disable - globally disable FBC
+ * intel_fbc_global_disable - globally disable FBC
  * @dev_priv: i915 device instance
  *
  * This function disables FBC regardless of which CRTC is associated with it.
  */
-void intel_fbc_disable(struct drm_i915_private *dev_priv)
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
 {
+       struct intel_fbc *fbc = &dev_priv->fbc;
+
        if (!fbc_supported(dev_priv))
                return;
 
-       mutex_lock(&dev_priv->fbc.lock);
-       if (dev_priv->fbc.enabled)
+       mutex_lock(&fbc->lock);
+       if (fbc->enabled)
                __intel_fbc_disable(dev_priv);
-       mutex_unlock(&dev_priv->fbc.lock);
+       mutex_unlock(&fbc->lock);
+
+       cancel_work_sync(&fbc->work.work);
+}
+
+/**
+ * intel_fbc_init_pipe_state - initialize FBC's CRTC visibility tracking
+ * @dev_priv: i915 device instance
+ *
+ * The FBC code needs to track CRTC visibility since the older platforms can't
+ * have FBC enabled while multiple pipes are used. This function does the
+ * initial setup at driver load to make sure FBC is matching the real hardware.
+ */
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
+{
+       struct intel_crtc *crtc;
+
+       /* Don't even bother tracking anything if we don't need. */
+       if (!no_fbc_on_multiple_pipes(dev_priv))
+               return;
+
+       for_each_intel_crtc(dev_priv->dev, crtc)
+               if (intel_crtc_active(&crtc->base) &&
+                   to_intel_plane_state(crtc->base.primary->state)->visible)
+                       dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
 }
 
 /**
@@ -1075,51 +1227,35 @@ void intel_fbc_disable(struct drm_i915_private *dev_priv)
  */
 void intel_fbc_init(struct drm_i915_private *dev_priv)
 {
+       struct intel_fbc *fbc = &dev_priv->fbc;
        enum pipe pipe;
 
-       INIT_WORK(&dev_priv->fbc.work.work, intel_fbc_work_fn);
-       mutex_init(&dev_priv->fbc.lock);
-       dev_priv->fbc.enabled = false;
-       dev_priv->fbc.active = false;
-       dev_priv->fbc.work.scheduled = false;
+       INIT_WORK(&fbc->work.work, intel_fbc_work_fn);
+       mutex_init(&fbc->lock);
+       fbc->enabled = false;
+       fbc->active = false;
+       fbc->work.scheduled = false;
 
        if (!HAS_FBC(dev_priv)) {
-               dev_priv->fbc.no_fbc_reason = "unsupported by this chipset";
+               fbc->no_fbc_reason = "unsupported by this chipset";
                return;
        }
 
        for_each_pipe(dev_priv, pipe) {
-               dev_priv->fbc.possible_framebuffer_bits |=
+               fbc->possible_framebuffer_bits |=
                                INTEL_FRONTBUFFER_PRIMARY(pipe);
 
                if (fbc_on_pipe_a_only(dev_priv))
                        break;
        }
 
-       if (INTEL_INFO(dev_priv)->gen >= 7) {
-               dev_priv->fbc.is_active = ilk_fbc_is_active;
-               dev_priv->fbc.activate = gen7_fbc_activate;
-               dev_priv->fbc.deactivate = ilk_fbc_deactivate;
-       } else if (INTEL_INFO(dev_priv)->gen >= 5) {
-               dev_priv->fbc.is_active = ilk_fbc_is_active;
-               dev_priv->fbc.activate = ilk_fbc_activate;
-               dev_priv->fbc.deactivate = ilk_fbc_deactivate;
-       } else if (IS_GM45(dev_priv)) {
-               dev_priv->fbc.is_active = g4x_fbc_is_active;
-               dev_priv->fbc.activate = g4x_fbc_activate;
-               dev_priv->fbc.deactivate = g4x_fbc_deactivate;
-       } else {
-               dev_priv->fbc.is_active = i8xx_fbc_is_active;
-               dev_priv->fbc.activate = i8xx_fbc_activate;
-               dev_priv->fbc.deactivate = i8xx_fbc_deactivate;
-
-               /* This value was pulled out of someone's hat */
+       /* This value was pulled out of someone's hat */
+       if (INTEL_INFO(dev_priv)->gen <= 4 && !IS_GM45(dev_priv))
                I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
-       }
 
        /* We still don't have any sort of hardware state readout for FBC, so
         * deactivate it in case the BIOS activated it to make sure software
         * matches the hardware state. */
-       if (dev_priv->fbc.is_active(dev_priv))
-               dev_priv->fbc.deactivate(dev_priv);
+       if (intel_fbc_hw_is_active(dev_priv))
+               intel_fbc_hw_deactivate(dev_priv);
 }
index bea75cafc623f158325cde3011d05a61f3fbce1e..09840f4380f98a79691d50d4c946964f2f31f3bc 100644 (file)
@@ -119,7 +119,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 {
        struct intel_fbdev *ifbdev =
                container_of(helper, struct intel_fbdev, helper);
-       struct drm_framebuffer *fb = NULL;
+       struct drm_framebuffer *fb;
        struct drm_device *dev = helper->dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_mode_fb_cmd2 mode_cmd = {};
@@ -171,8 +171,6 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 
 out:
        mutex_unlock(&dev->struct_mutex);
-       if (!IS_ERR_OR_NULL(fb))
-               drm_framebuffer_unreference(fb);
        return ret;
 }
 
index 822952235dcfa50630feefccade5668d36611afe..73002e901ff219217bd5f17335583aa8504b2e1d 100644 (file)
@@ -43,9 +43,10 @@ struct i915_guc_client {
        uint32_t wq_offset;
        uint32_t wq_size;
        uint32_t wq_tail;
+       uint32_t wq_head;
 
        /* GuC submission statistics & status */
-       uint64_t submissions[I915_NUM_RINGS];
+       uint64_t submissions[GUC_MAX_ENGINES_NUM];
        uint32_t q_fail;
        uint32_t b_fail;
        int retcode;
@@ -88,6 +89,8 @@ struct intel_guc {
        uint32_t log_flags;
        struct drm_i915_gem_object *log_obj;
 
+       struct drm_i915_gem_object *ads_obj;
+
        struct drm_i915_gem_object *ctx_pool_obj;
        struct ida ctx_ids;
 
@@ -103,8 +106,8 @@ struct intel_guc {
        uint32_t action_fail;           /* Total number of failures     */
        int32_t action_err;             /* Last error code              */
 
-       uint64_t submissions[I915_NUM_RINGS];
-       uint32_t last_seqno[I915_NUM_RINGS];
+       uint64_t submissions[GUC_MAX_ENGINES_NUM];
+       uint32_t last_seqno[GUC_MAX_ENGINES_NUM];
 };
 
 /* intel_guc_loader.c */
@@ -122,5 +125,6 @@ int i915_guc_submit(struct i915_guc_client *client,
                    struct drm_i915_gem_request *rq);
 void i915_guc_submission_disable(struct drm_device *dev);
 void i915_guc_submission_fini(struct drm_device *dev);
+int i915_guc_wq_check_space(struct i915_guc_client *client);
 
 #endif
index 40b2ea572e16e959a321cb2599f9e1158d282992..2de57ffe5e18b8a20538ff33e70b8cc679c46ca0 100644 (file)
 #define GUC_CTX_PRIORITY_HIGH          1
 #define GUC_CTX_PRIORITY_KMD_NORMAL    2
 #define GUC_CTX_PRIORITY_NORMAL                3
+#define GUC_CTX_PRIORITY_NUM           4
 
 #define GUC_MAX_GPU_CONTEXTS           1024
 #define        GUC_INVALID_CTX_ID              GUC_MAX_GPU_CONTEXTS
 
+#define GUC_RENDER_ENGINE              0
+#define GUC_VIDEO_ENGINE               1
+#define GUC_BLITTER_ENGINE             2
+#define GUC_VIDEOENHANCE_ENGINE                3
+#define GUC_VIDEO_ENGINE2              4
+#define GUC_MAX_ENGINES_NUM            (GUC_VIDEO_ENGINE2 + 1)
+
 /* Work queue item header definitions */
 #define WQ_STATUS_ACTIVE               1
 #define WQ_STATUS_SUSPENDED            2
 #define GUC_CTL_CTXINFO                        0
 #define   GUC_CTL_CTXNUM_IN16_SHIFT    0
 #define   GUC_CTL_BASE_ADDR_SHIFT      12
+
 #define GUC_CTL_ARAT_HIGH              1
 #define GUC_CTL_ARAT_LOW               2
+
 #define GUC_CTL_DEVICE_INFO            3
 #define   GUC_CTL_GTTYPE_SHIFT         0
 #define   GUC_CTL_COREFAMILY_SHIFT     7
+
 #define GUC_CTL_LOG_PARAMS             4
 #define   GUC_LOG_VALID                        (1 << 0)
 #define   GUC_LOG_NOTIFY_ON_HALF_FULL  (1 << 1)
 #define   GUC_LOG_ISR_PAGES            3
 #define   GUC_LOG_ISR_SHIFT            9
 #define   GUC_LOG_BUF_ADDR_SHIFT       12
+
 #define GUC_CTL_PAGE_FAULT_CONTROL     5
+
 #define GUC_CTL_WA                     6
 #define   GUC_CTL_WA_UK_BY_DRIVER      (1 << 3)
+
 #define GUC_CTL_FEATURE                        7
 #define   GUC_CTL_VCS2_ENABLED         (1 << 0)
 #define   GUC_CTL_KERNEL_SUBMISSIONS   (1 << 1)
 #define   GUC_CTL_PREEMPTION_LOG       (1 << 5)
 #define   GUC_CTL_ENABLE_SLPC          (1 << 7)
 #define   GUC_CTL_RESET_ON_PREMPT_FAILURE      (1 << 8)
+
 #define GUC_CTL_DEBUG                  8
 #define   GUC_LOG_VERBOSITY_SHIFT      0
 #define   GUC_LOG_VERBOSITY_LOW                (0 << GUC_LOG_VERBOSITY_SHIFT)
 /* Verbosity range-check limits, without the shift */
 #define          GUC_LOG_VERBOSITY_MIN         0
 #define          GUC_LOG_VERBOSITY_MAX         3
+#define          GUC_LOG_VERBOSITY_MASK        0x0000000f
+#define          GUC_LOG_DESTINATION_MASK      (3 << 4)
+#define   GUC_LOG_DISABLED             (1 << 6)
+#define   GUC_PROFILE_ENABLED          (1 << 7)
+#define   GUC_WQ_TRACK_ENABLED         (1 << 8)
+#define   GUC_ADS_ENABLED              (1 << 9)
+#define   GUC_DEBUG_RESERVED           (1 << 10)
+#define   GUC_ADS_ADDR_SHIFT           11
+#define   GUC_ADS_ADDR_MASK            0xfffff800
+
 #define GUC_CTL_RSRVD                  9
 
-#define GUC_CTL_MAX_DWORDS             (GUC_CTL_RSRVD + 1)
+#define GUC_CTL_MAX_DWORDS             (SOFT_SCRATCH_COUNT - 2) /* [1..14] */
 
 /**
  * DOC: GuC Firmware Layout
@@ -267,7 +292,7 @@ struct guc_context_desc {
        u64 db_trigger_phy;
        u16 db_id;
 
-       struct guc_execlist_context lrc[I915_NUM_RINGS];
+       struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
 
        u8 attribute;
 
@@ -299,6 +324,99 @@ struct guc_context_desc {
 #define GUC_POWER_D2           3
 #define GUC_POWER_D3           4
 
+/* Scheduling policy settings */
+
+/* Reset engine upon preempt failure */
+#define POLICY_RESET_ENGINE            (1<<0)
+/* Preempt to idle on quantum expiry */
+#define POLICY_PREEMPT_TO_IDLE         (1<<1)
+
+#define POLICY_MAX_NUM_WI              15
+
+struct guc_policy {
+       /* Time for one workload to execute. (in micro seconds) */
+       u32 execution_quantum;
+       u32 reserved1;
+
+       /* Time to wait for a preemption request to completed before issuing a
+        * reset. (in micro seconds). */
+       u32 preemption_time;
+
+       /* How much time to allow to run after the first fault is observed.
+        * Then preempt afterwards. (in micro seconds) */
+       u32 fault_time;
+
+       u32 policy_flags;
+       u32 reserved[2];
+} __packed;
+
+struct guc_policies {
+       struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
+
+       /* In micro seconds. How much time to allow before DPC processing is
+        * called back via interrupt (to prevent DPC queue drain starving).
+        * Typically 1000s of micro seconds (example only, not granularity). */
+       u32 dpc_promote_time;
+
+       /* Must be set to take these new values. */
+       u32 is_valid;
+
+       /* Max number of WIs to process per call. A large value may keep CS
+        * idle. */
+       u32 max_num_work_items;
+
+       u32 reserved[19];
+} __packed;
+
+/* GuC MMIO reg state struct */
+
+#define GUC_REGSET_FLAGS_NONE          0x0
+#define GUC_REGSET_POWERCYCLE          0x1
+#define GUC_REGSET_MASKED              0x2
+#define GUC_REGSET_ENGINERESET         0x4
+#define GUC_REGSET_SAVE_DEFAULT_VALUE  0x8
+#define GUC_REGSET_SAVE_CURRENT_VALUE  0x10
+
+#define GUC_REGSET_MAX_REGISTERS       25
+#define GUC_MMIO_WHITE_LIST_START      0x24d0
+#define GUC_MMIO_WHITE_LIST_MAX                12
+#define GUC_S3_SAVE_SPACE_PAGES                10
+
+struct guc_mmio_regset {
+       struct __packed {
+               u32 offset;
+               u32 value;
+               u32 flags;
+       } registers[GUC_REGSET_MAX_REGISTERS];
+
+       u32 values_valid;
+       u32 number_of_registers;
+} __packed;
+
+struct guc_mmio_reg_state {
+       struct guc_mmio_regset global_reg;
+       struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
+
+       /* MMIO registers that are set as non privileged */
+       struct __packed {
+               u32 mmio_start;
+               u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
+               u32 count;
+       } mmio_white_list[GUC_MAX_ENGINES_NUM];
+} __packed;
+
+/* GuC Additional Data Struct */
+
+struct guc_ads {
+       u32 reg_state_addr;
+       u32 reg_state_buffer;
+       u32 golden_context_lrca;
+       u32 scheduler_policies;
+       u32 reserved0[3];
+       u32 eng_state_size[GUC_MAX_ENGINES_NUM];
+       u32 reserved2[4];
+} __packed;
+
 /* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */
 enum host2guc_action {
        HOST2GUC_ACTION_DEFAULT = 0x0,
index 550921f2ef7d5073f73d26481a44ebb1b4bbfb4d..3accd914490f34798d5b9eaa9e0849fb1008b509 100644 (file)
@@ -165,6 +165,13 @@ static void set_guc_init_params(struct drm_i915_private *dev_priv)
                        i915.guc_log_level << GUC_LOG_VERBOSITY_SHIFT;
        }
 
+       if (guc->ads_obj) {
+               u32 ads = (u32)i915_gem_obj_ggtt_offset(guc->ads_obj)
+                               >> PAGE_SHIFT;
+               params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
+               params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
+       }
+
        /* If GuC submission is enabled, set up additional parameters here */
        if (i915.enable_guc_submission) {
                u32 pgs = i915_gem_obj_ggtt_offset(dev_priv->guc.ctx_pool_obj);
@@ -438,6 +445,7 @@ fail:
 
        direct_interrupts_to_host(dev_priv);
        i915_guc_submission_disable(dev);
+       i915_guc_submission_fini(dev);
 
        return err;
 }
@@ -554,10 +562,12 @@ fail:
        DRM_ERROR("Failed to fetch GuC firmware from %s (error %d)\n",
                  guc_fw->guc_fw_path, err);
 
+       mutex_lock(&dev->struct_mutex);
        obj = guc_fw->guc_fw_obj;
        if (obj)
                drm_gem_object_unreference(&obj->base);
        guc_fw->guc_fw_obj = NULL;
+       mutex_unlock(&dev->struct_mutex);
 
        release_firmware(fw);           /* OK even if fw is NULL */
        guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
@@ -624,10 +634,11 @@ void intel_guc_ucode_fini(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
 
+       mutex_lock(&dev->struct_mutex);
        direct_interrupts_to_host(dev_priv);
+       i915_guc_submission_disable(dev);
        i915_guc_submission_fini(dev);
 
-       mutex_lock(&dev->struct_mutex);
        if (guc_fw->guc_fw_obj)
                drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
        guc_fw->guc_fw_obj = NULL;
index 4a77639a489dfcd67ff0937e6e3fa4da61fed461..8698a643d027f70f5dd9a212c8b13eaa8f220e0e 100644 (file)
@@ -2033,6 +2033,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        enum port port = intel_dig_port->port;
        uint8_t alternate_ddc_pin;
 
+       if (WARN(intel_dig_port->max_lanes < 4,
+                "Not enough lanes (%d) for HDMI on port %c\n",
+                intel_dig_port->max_lanes, port_name(port)))
+               return;
+
        drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
                           DRM_MODE_CONNECTOR_HDMIA);
        drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
@@ -2218,6 +2223,7 @@ void intel_hdmi_init(struct drm_device *dev,
        dev_priv->dig_port_map[port] = intel_encoder;
        intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
        intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
+       intel_dig_port->max_lanes = 4;
 
        intel_hdmi_init_connector(intel_dig_port, intel_connector);
 }
index 25254b5c1ac5c95173d0d5b4fa101f35a1131cb7..deb8282c26d83f952473ae145c4fef0b3112b9f1 100644 (file)
@@ -683,7 +683,7 @@ int intel_setup_gmbus(struct drm_device *dev)
        return 0;
 
 err:
-       while (--pin) {
+       while (pin--) {
                if (!intel_gmbus_is_valid_pin(dev_priv, pin))
                        continue;
 
index 3aa614731d7e4b7160a85336d96df41469587991..3a03646e343da320bb434361847e8534b674c4f5 100644 (file)
@@ -225,7 +225,8 @@ enum {
 #define GEN8_CTX_ID_SHIFT 32
 #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT  0x17
 
-static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
+static int intel_lr_context_pin(struct intel_context *ctx,
+                               struct intel_engine_cs *engine);
 static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
                struct drm_i915_gem_object *default_ctx_obj);
 
@@ -263,65 +264,92 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
        return 0;
 }
 
+static void
+logical_ring_init_platform_invariants(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+
+       ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
+                                       IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
+                                       (ring->id == VCS || ring->id == VCS2);
+
+       ring->ctx_desc_template = GEN8_CTX_VALID;
+       ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) <<
+                                  GEN8_CTX_ADDRESSING_MODE_SHIFT;
+       if (IS_GEN8(dev))
+               ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
+       ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
+
+       /* TODO: WaDisableLiteRestore when we start using semaphore
+        * signalling between Command Streamers */
+       /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
+
+       /* WaEnableForceRestoreInCtxtDescForVCS:skl */
+       /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
+       if (ring->disable_lite_restore_wa)
+               ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+}
+
 /**
- * intel_execlists_ctx_id() - get the Execlists Context ID
- * @ctx_obj: Logical Ring Context backing object.
+ * intel_lr_context_descriptor_update() - calculate & cache the descriptor
+ *                                       descriptor for a pinned context
  *
- * Do not confuse with ctx->id! Unfortunately we have a name overload
- * here: the old context ID we pass to userspace as a handler so that
- * they can refer to a context, and the new context ID we pass to the
- * ELSP so that the GPU can inform us of the context status via
- * interrupts.
+ * @ctx: Context to work on
+ * @ring: Engine the descriptor will be used with
  *
- * Return: 20-bits globally unique context ID.
+ * The context descriptor encodes various attributes of a context,
+ * including its GTT address and some flags. Because it's fairly
+ * expensive to calculate, we'll just do it once and cache the result,
+ * which remains valid until the context is unpinned.
+ *
+ * This is what a descriptor looks like, from LSB to MSB:
+ *    bits 0-11:    flags, GEN8_CTX_* (cached in ctx_desc_template)
+ *    bits 12-31:    LRCA, GTT address of (the HWSP of) this context
+ *    bits 32-51:    ctx ID, a globally unique tag (the LRCA again!)
+ *    bits 52-63:    reserved, may encode the engine ID (for GuC)
  */
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
+static void
+intel_lr_context_descriptor_update(struct intel_context *ctx,
+                                  struct intel_engine_cs *ring)
 {
-       u32 lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
-                       LRC_PPHWSP_PN * PAGE_SIZE;
+       uint64_t lrca, desc;
 
-       /* LRCA is required to be 4K aligned so the more significant 20 bits
-        * are globally unique */
-       return lrca >> 12;
-}
+       lrca = ctx->engine[ring->id].lrc_vma->node.start +
+              LRC_PPHWSP_PN * PAGE_SIZE;
 
-static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
-{
-       struct drm_device *dev = ring->dev;
+       desc = ring->ctx_desc_template;                    /* bits  0-11 */
+       desc |= lrca;                                      /* bits 12-31 */
+       desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */
 
-       return (IS_SKL_REVID(dev, 0, SKL_REVID_B0) ||
-               IS_BXT_REVID(dev, 0, BXT_REVID_A1)) &&
-              (ring->id == VCS || ring->id == VCS2);
+       ctx->engine[ring->id].lrc_desc = desc;
 }
 
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
                                     struct intel_engine_cs *ring)
 {
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-       uint64_t desc;
-       uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
-                       LRC_PPHWSP_PN * PAGE_SIZE;
-
-       WARN_ON(lrca & 0xFFFFFFFF00000FFFULL);
-
-       desc = GEN8_CTX_VALID;
-       desc |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT;
-       if (IS_GEN8(ctx_obj->base.dev))
-               desc |= GEN8_CTX_L3LLC_COHERENT;
-       desc |= GEN8_CTX_PRIVILEGE;
-       desc |= lrca;
-       desc |= (u64)intel_execlists_ctx_id(ctx_obj) << GEN8_CTX_ID_SHIFT;
-
-       /* TODO: WaDisableLiteRestore when we start using semaphore
-        * signalling between Command Streamers */
-       /* desc |= GEN8_CTX_FORCE_RESTORE; */
-
-       /* WaEnableForceRestoreInCtxtDescForVCS:skl */
-       /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
-       if (disable_lite_restore_wa(ring))
-               desc |= GEN8_CTX_FORCE_RESTORE;
+       return ctx->engine[ring->id].lrc_desc;
+}
 
-       return desc;
+/**
+ * intel_execlists_ctx_id() - get the Execlists Context ID
+ * @ctx: Context to get the ID for
+ * @ring: Engine to get the ID for
+ *
+ * Do not confuse with ctx->id! Unfortunately we have a name overload
+ * here: the old context ID we pass to userspace as a handler so that
+ * they can refer to a context, and the new context ID we pass to the
+ * ELSP so that the GPU can inform us of the context status via
+ * interrupts.
+ *
+ * The context ID is a portion of the context descriptor, so we can
+ * just extract the required part from the cached descriptor.
+ *
+ * Return: 20-bits globally unique context ID.
+ */
+u32 intel_execlists_ctx_id(struct intel_context *ctx,
+                          struct intel_engine_cs *ring)
+{
+       return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT;
 }
 
 static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
@@ -363,20 +391,9 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
 {
        struct intel_engine_cs *ring = rq->ring;
        struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
-       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-       struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj;
-       struct page *page;
-       uint32_t *reg_state;
-
-       BUG_ON(!ctx_obj);
-       WARN_ON(!i915_gem_obj_is_pinned(ctx_obj));
-       WARN_ON(!i915_gem_obj_is_pinned(rb_obj));
-
-       page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
-       reg_state = kmap_atomic(page);
+       uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state;
 
        reg_state[CTX_RING_TAIL+1] = rq->tail;
-       reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(rb_obj);
 
        if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
                /* True 32b PPGTT with dynamic page allocation: update PDP
@@ -390,8 +407,6 @@ static int execlists_update_context(struct drm_i915_gem_request *rq)
                ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
        }
 
-       kunmap_atomic(reg_state);
-
        return 0;
 }
 
@@ -431,9 +446,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
                        /* Same ctx: ignore first request, as second request
                         * will update tail past first request's workload */
                        cursor->elsp_submitted = req0->elsp_submitted;
-                       list_del(&req0->execlist_link);
-                       list_add_tail(&req0->execlist_link,
-                               &ring->execlist_retired_req_list);
+                       list_move_tail(&req0->execlist_link,
+                                      &ring->execlist_retired_req_list);
                        req0 = cursor;
                } else {
                        req1 = cursor;
@@ -478,16 +492,13 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
                                            execlist_link);
 
        if (head_req != NULL) {
-               struct drm_i915_gem_object *ctx_obj =
-                               head_req->ctx->engine[ring->id].state;
-               if (intel_execlists_ctx_id(ctx_obj) == request_id) {
+               if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) {
                        WARN(head_req->elsp_submitted == 0,
                             "Never submitted head request\n");
 
                        if (--head_req->elsp_submitted <= 0) {
-                               list_del(&head_req->execlist_link);
-                               list_add_tail(&head_req->execlist_link,
-                                       &ring->execlist_retired_req_list);
+                               list_move_tail(&head_req->execlist_link,
+                                              &ring->execlist_retired_req_list);
                                return true;
                        }
                }
@@ -496,6 +507,19 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
        return false;
 }
 
+static void get_context_status(struct intel_engine_cs *ring,
+                              u8 read_pointer,
+                              u32 *status, u32 *context_id)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+
+       if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES))
+               return;
+
+       *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer));
+       *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer));
+}
+
 /**
  * intel_lrc_irq_handler() - handle Context Switch interrupts
  * @ring: Engine Command Streamer to handle.
@@ -516,16 +540,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
 
        read_pointer = ring->next_context_status_buffer;
-       write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
+       write_pointer = GEN8_CSB_WRITE_PTR(status_pointer);
        if (read_pointer > write_pointer)
                write_pointer += GEN8_CSB_ENTRIES;
 
        spin_lock(&ring->execlist_lock);
 
        while (read_pointer < write_pointer) {
-               read_pointer++;
-               status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % GEN8_CSB_ENTRIES));
-               status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % GEN8_CSB_ENTRIES));
+
+               get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES,
+                                  &status, &status_id);
 
                if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
                        continue;
@@ -538,14 +562,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
                                WARN(1, "Preemption without Lite Restore\n");
                }
 
-                if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
-                    (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
+               if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) ||
+                   (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) {
                        if (execlists_check_remove_request(ring, status_id))
                                submit_contexts++;
                }
        }
 
-       if (disable_lite_restore_wa(ring)) {
+       if (ring->disable_lite_restore_wa) {
                /* Prevent a ctx to preempt itself */
                if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
                    (submit_contexts != 0))
@@ -556,13 +580,16 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
 
        spin_unlock(&ring->execlist_lock);
 
-       WARN(submit_contexts > 2, "More than two context complete events?\n");
+       if (unlikely(submit_contexts > 2))
+               DRM_ERROR("More than two context complete events?\n");
+
        ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
 
+       /* Update the read pointer to the old write pointer. Manual ringbuffer
+        * management ftw </sarcasm> */
        I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
-                  _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
-                                ((u32)ring->next_context_status_buffer &
-                                 GEN8_CSB_PTR_MASK) << 8));
+                  _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
+                                ring->next_context_status_buffer << 8));
 }
 
 static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -571,8 +598,8 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
        struct drm_i915_gem_request *cursor;
        int num_elements = 0;
 
-       if (request->ctx != ring->default_context)
-               intel_lr_context_pin(request);
+       if (request->ctx != request->i915->kernel_context)
+               intel_lr_context_pin(request->ctx, ring);
 
        i915_gem_request_reference(request);
 
@@ -592,9 +619,8 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
                if (request->ctx == tail_req->ctx) {
                        WARN(tail_req->elsp_submitted != 0,
                                "More than 2 already-submitted reqs queued\n");
-                       list_del(&tail_req->execlist_link);
-                       list_add_tail(&tail_req->execlist_link,
-                               &ring->execlist_retired_req_list);
+                       list_move_tail(&tail_req->execlist_link,
+                                      &ring->execlist_retired_req_list);
                }
        }
 
@@ -660,17 +686,27 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
-       int ret;
+       int ret = 0;
 
        request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
 
-       if (request->ctx != request->ring->default_context) {
-               ret = intel_lr_context_pin(request);
+       if (i915.enable_guc_submission) {
+               /*
+                * Check that the GuC has space for the request before
+                * going any further, as the i915_add_request() call
+                * later on mustn't fail ...
+                */
+               struct intel_guc *guc = &request->i915->guc;
+
+               ret = i915_guc_wq_check_space(guc->execbuf_client);
                if (ret)
                        return ret;
        }
 
-       return 0;
+       if (request->ctx != request->i915->kernel_context)
+               ret = intel_lr_context_pin(request->ctx, request->ring);
+
+       return ret;
 }
 
 static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
@@ -724,23 +760,46 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
  * on a queue waiting for the ELSP to be ready to accept a new context submission. At that
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
-static void
+static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-       struct intel_engine_cs *ring = request->ring;
+       struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct drm_i915_private *dev_priv = request->i915;
+       struct intel_engine_cs *engine = request->ring;
 
-       intel_logical_ring_advance(request->ringbuf);
+       intel_logical_ring_advance(ringbuf);
+       request->tail = ringbuf->tail;
 
-       request->tail = request->ringbuf->tail;
+       /*
+        * Here we add two extra NOOPs as padding to avoid
+        * lite restore of a context with HEAD==TAIL.
+        *
+        * Caller must reserve WA_TAIL_DWORDS for us!
+        */
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_logical_ring_emit(ringbuf, MI_NOOP);
+       intel_logical_ring_advance(ringbuf);
 
-       if (intel_ring_stopped(ring))
-               return;
+       if (intel_ring_stopped(engine))
+               return 0;
+
+       if (engine->last_context != request->ctx) {
+               if (engine->last_context)
+                       intel_lr_context_unpin(engine->last_context, engine);
+               if (request->ctx != request->i915->kernel_context) {
+                       intel_lr_context_pin(request->ctx, engine);
+                       engine->last_context = request->ctx;
+               } else {
+                       engine->last_context = NULL;
+               }
+       }
 
        if (dev_priv->guc.execbuf_client)
                i915_guc_submit(dev_priv->guc.execbuf_client, request);
        else
                execlists_context_queue(request);
+
+       return 0;
 }
 
 static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
@@ -967,8 +1026,9 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
                struct drm_i915_gem_object *ctx_obj =
                                ctx->engine[ring->id].state;
 
-               if (ctx_obj && (ctx != ring->default_context))
-                       intel_lr_context_unpin(req);
+               if (ctx_obj && (ctx != req->i915->kernel_context))
+                       intel_lr_context_unpin(ctx, ring);
+
                list_del(&req->execlist_link);
                i915_gem_request_unreference(req);
        }
@@ -1012,24 +1072,39 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
        return 0;
 }
 
-static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
-               struct drm_i915_gem_object *ctx_obj,
-               struct intel_ringbuffer *ringbuf)
+static int intel_lr_context_do_pin(struct intel_context *ctx,
+                                  struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+       struct page *lrc_state_page;
+       uint32_t *lrc_reg_state;
+       int ret;
 
        WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
        ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
                        PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
        if (ret)
                return ret;
 
+       lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN);
+       if (WARN_ON(!lrc_state_page)) {
+               ret = -ENODEV;
+               goto unpin_ctx_obj;
+       }
+
        ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
        if (ret)
                goto unpin_ctx_obj;
 
+       ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj);
+       intel_lr_context_descriptor_update(ctx, ring);
+       lrc_reg_state = kmap(lrc_state_page);
+       lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start;
+       ctx->engine[ring->id].lrc_reg_state = lrc_reg_state;
        ctx_obj->dirty = true;
 
        /* Invalidate GuC TLB. */
@@ -1044,37 +1119,44 @@ unpin_ctx_obj:
        return ret;
 }
 
-static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
+static int intel_lr_context_pin(struct intel_context *ctx,
+                               struct intel_engine_cs *engine)
 {
        int ret = 0;
-       struct intel_engine_cs *ring = rq->ring;
-       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-       struct intel_ringbuffer *ringbuf = rq->ringbuf;
 
-       if (rq->ctx->engine[ring->id].pin_count++ == 0) {
-               ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
+       if (ctx->engine[engine->id].pin_count++ == 0) {
+               ret = intel_lr_context_do_pin(ctx, engine);
                if (ret)
                        goto reset_pin_count;
+
+               i915_gem_context_reference(ctx);
        }
        return ret;
 
 reset_pin_count:
-       rq->ctx->engine[ring->id].pin_count = 0;
+       ctx->engine[engine->id].pin_count = 0;
        return ret;
 }
 
-void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
+void intel_lr_context_unpin(struct intel_context *ctx,
+                           struct intel_engine_cs *engine)
 {
-       struct intel_engine_cs *ring = rq->ring;
-       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
-       struct intel_ringbuffer *ringbuf = rq->ringbuf;
+       struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
 
-       if (ctx_obj) {
-               WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-               if (--rq->ctx->engine[ring->id].pin_count == 0) {
-                       intel_unpin_ringbuffer_obj(ringbuf);
-                       i915_gem_object_ggtt_unpin(ctx_obj);
-               }
+       WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex));
+
+       if (WARN_ON_ONCE(!ctx_obj))
+               return;
+
+       if (--ctx->engine[engine->id].pin_count == 0) {
+               kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state));
+               intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
+               i915_gem_object_ggtt_unpin(ctx_obj);
+               ctx->engine[engine->id].lrc_vma = NULL;
+               ctx->engine[engine->id].lrc_desc = 0;
+               ctx->engine[engine->id].lrc_reg_state = NULL;
+
+               i915_gem_context_unreference(ctx);
        }
 }
 
@@ -1087,7 +1169,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct i915_workarounds *w = &dev_priv->workarounds;
 
-       if (WARN_ON_ONCE(w->count == 0))
+       if (w->count == 0)
                return 0;
 
        ring->gpu_caches_dirty = true;
@@ -1474,7 +1556,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
        u8 next_context_status_buffer_hw;
 
        lrc_setup_hardware_status_page(ring,
-                               ring->default_context->engine[ring->id].state);
+                               dev_priv->kernel_context->engine[ring->id].state);
 
        I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
        I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1493,9 +1575,11 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
         *      | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
         * BDW  | CSB regs not reset       | CSB regs reset       |
         * CHT  | CSB regs not reset       | CSB regs not reset   |
+        * SKL  |         ?                |         ?            |
+        * BXT  |         ?                |         ?            |
         */
-       next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
-                                                  & GEN8_CSB_PTR_MASK);
+       next_context_status_buffer_hw =
+               GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring)));
 
        /*
         * When the CSB registers are reset (also after power-up / gpu reset),
@@ -1698,7 +1782,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        struct intel_ringbuffer *ringbuf = request->ringbuf;
        struct intel_engine_cs *ring = ringbuf->ring;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
-       bool vf_flush_wa;
+       bool vf_flush_wa = false;
        u32 flags = 0;
        int ret;
 
@@ -1707,6 +1791,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
 
@@ -1719,14 +1804,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
                flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_QW_WRITE;
                flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
-       }
 
-       /*
-        * On GEN9+ Before VF_CACHE_INVALIDATE we need to emit a NULL pipe
-        * control.
-        */
-       vf_flush_wa = INTEL_INFO(ring->dev)->gen >= 9 &&
-                     flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
+               /*
+                * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
+                * pipe control.
+                */
+               if (IS_GEN9(ring->dev))
+                       vf_flush_wa = true;
+       }
 
        ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
        if (ret)
@@ -1790,44 +1875,65 @@ static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno)
        intel_flush_status_page(ring, I915_GEM_HWS_INDEX);
 }
 
+/*
+ * Reserve space for 2 NOOPs at the end of each request to be
+ * used as a workaround for not being allowed to do lite
+ * restore with HEAD==TAIL (WaIdleLiteRestore).
+ */
+#define WA_TAIL_DWORDS 2
+
+static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
+{
+       return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
+}
+
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
        struct intel_ringbuffer *ringbuf = request->ringbuf;
-       struct intel_engine_cs *ring = ringbuf->ring;
-       u32 cmd;
        int ret;
 
-       /*
-        * Reserve space for 2 NOOPs at the end of each request to be
-        * used as a workaround for not being allowed to do lite
-        * restore with HEAD==TAIL (WaIdleLiteRestore).
-        */
-       ret = intel_logical_ring_begin(request, 8);
+       ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
        if (ret)
                return ret;
 
-       cmd = MI_STORE_DWORD_IMM_GEN4;
-       cmd |= MI_GLOBAL_GTT;
+       /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
+       BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-       intel_logical_ring_emit(ringbuf, cmd);
        intel_logical_ring_emit(ringbuf,
-                               (ring->status_page.gfx_addr +
-                               (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)));
+                               (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+       intel_logical_ring_emit(ringbuf,
+                               hws_seqno_address(request->ring) |
+                               MI_FLUSH_DW_USE_GTT);
        intel_logical_ring_emit(ringbuf, 0);
        intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
        intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
        intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance_and_submit(request);
+       return intel_logical_ring_advance_and_submit(request);
+}
 
-       /*
-        * Here we add two extra NOOPs as padding to avoid
-        * lite restore of a context with HEAD==TAIL.
-        */
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_emit(ringbuf, MI_NOOP);
-       intel_logical_ring_advance(ringbuf);
+static int gen8_emit_request_render(struct drm_i915_gem_request *request)
+{
+       struct intel_ringbuffer *ringbuf = request->ringbuf;
+       int ret;
 
-       return 0;
+       ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+       if (ret)
+               return ret;
+
+       /* w/a for post sync ops following a GPGPU operation we
+        * need a prior CS_STALL, which is emitted by the flush
+        * following the batch.
+        */
+       intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5));
+       intel_logical_ring_emit(ringbuf,
+                               (PIPE_CONTROL_GLOBAL_GTT_IVB |
+                                PIPE_CONTROL_CS_STALL |
+                                PIPE_CONTROL_QW_WRITE));
+       intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring));
+       intel_logical_ring_emit(ringbuf, 0);
+       intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+       intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
+       return intel_logical_ring_advance_and_submit(request);
 }
 
 static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
@@ -1910,12 +2016,44 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
                ring->status_page.obj = NULL;
        }
 
+       ring->disable_lite_restore_wa = false;
+       ring->ctx_desc_template = 0;
+
        lrc_destroy_wa_ctx_obj(ring);
        ring->dev = NULL;
 }
 
-static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+static void
+logical_ring_default_vfuncs(struct drm_device *dev,
+                           struct intel_engine_cs *ring)
+{
+       /* Default vfuncs which can be overriden by each engine. */
+       ring->init_hw = gen8_init_common_ring;
+       ring->emit_request = gen8_emit_request;
+       ring->emit_flush = gen8_emit_flush;
+       ring->irq_get = gen8_logical_ring_get_irq;
+       ring->irq_put = gen8_logical_ring_put_irq;
+       ring->emit_bb_start = gen8_emit_bb_start;
+       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+               ring->get_seqno = bxt_a_get_seqno;
+               ring->set_seqno = bxt_a_set_seqno;
+       } else {
+               ring->get_seqno = gen8_get_seqno;
+               ring->set_seqno = gen8_set_seqno;
+       }
+}
+
+static inline void
+logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
 {
+       ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
+       ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+}
+
+static int
+logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+{
+       struct intel_context *dctx = to_i915(dev)->kernel_context;
        int ret;
 
        /* Intentionally left blank. */
@@ -1932,19 +2070,18 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
        INIT_LIST_HEAD(&ring->execlist_retired_req_list);
        spin_lock_init(&ring->execlist_lock);
 
+       logical_ring_init_platform_invariants(ring);
+
        ret = i915_cmd_parser_init_ring(ring);
        if (ret)
                goto error;
 
-       ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
+       ret = intel_lr_context_deferred_alloc(dctx, ring);
        if (ret)
                goto error;
 
        /* As this is the default context, always pin it */
-       ret = intel_lr_context_do_pin(
-                       ring,
-                       ring->default_context->engine[ring->id].state,
-                       ring->default_context->engine[ring->id].ringbuf);
+       ret = intel_lr_context_do_pin(dctx, ring);
        if (ret) {
                DRM_ERROR(
                        "Failed to pin and map ringbuffer %s: %d\n",
@@ -1967,32 +2104,25 @@ static int logical_render_ring_init(struct drm_device *dev)
 
        ring->name = "render ring";
        ring->id = RCS;
+       ring->exec_id = I915_EXEC_RENDER;
+       ring->guc_id = GUC_RENDER_ENGINE;
        ring->mmio_base = RENDER_RING_BASE;
-       ring->irq_enable_mask =
-               GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
-       ring->irq_keep_mask =
-               GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+
+       logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT);
        if (HAS_L3_DPF(dev))
                ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
 
+       logical_ring_default_vfuncs(dev, ring);
+
+       /* Override some for render ring. */
        if (INTEL_INFO(dev)->gen >= 9)
                ring->init_hw = gen9_init_render_ring;
        else
                ring->init_hw = gen8_init_render_ring;
        ring->init_context = gen8_init_rcs_context;
        ring->cleanup = intel_fini_pipe_control;
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-               ring->get_seqno = bxt_a_get_seqno;
-               ring->set_seqno = bxt_a_set_seqno;
-       } else {
-               ring->get_seqno = gen8_get_seqno;
-               ring->set_seqno = gen8_set_seqno;
-       }
-       ring->emit_request = gen8_emit_request;
        ring->emit_flush = gen8_emit_flush_render;
-       ring->irq_get = gen8_logical_ring_get_irq;
-       ring->irq_put = gen8_logical_ring_put_irq;
-       ring->emit_bb_start = gen8_emit_bb_start;
+       ring->emit_request = gen8_emit_request_render;
 
        ring->dev = dev;
 
@@ -2026,25 +2156,12 @@ static int logical_bsd_ring_init(struct drm_device *dev)
 
        ring->name = "bsd ring";
        ring->id = VCS;
+       ring->exec_id = I915_EXEC_BSD;
+       ring->guc_id = GUC_VIDEO_ENGINE;
        ring->mmio_base = GEN6_BSD_RING_BASE;
-       ring->irq_enable_mask =
-               GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
-       ring->irq_keep_mask =
-               GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
 
-       ring->init_hw = gen8_init_common_ring;
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-               ring->get_seqno = bxt_a_get_seqno;
-               ring->set_seqno = bxt_a_set_seqno;
-       } else {
-               ring->get_seqno = gen8_get_seqno;
-               ring->set_seqno = gen8_set_seqno;
-       }
-       ring->emit_request = gen8_emit_request;
-       ring->emit_flush = gen8_emit_flush;
-       ring->irq_get = gen8_logical_ring_get_irq;
-       ring->irq_put = gen8_logical_ring_put_irq;
-       ring->emit_bb_start = gen8_emit_bb_start;
+       logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT);
+       logical_ring_default_vfuncs(dev, ring);
 
        return logical_ring_init(dev, ring);
 }
@@ -2054,22 +2171,14 @@ static int logical_bsd2_ring_init(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
 
-       ring->name = "bds2 ring";
+       ring->name = "bsd2 ring";
        ring->id = VCS2;
+       ring->exec_id = I915_EXEC_BSD;
+       ring->guc_id = GUC_VIDEO_ENGINE2;
        ring->mmio_base = GEN8_BSD2_RING_BASE;
-       ring->irq_enable_mask =
-               GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
-       ring->irq_keep_mask =
-               GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
 
-       ring->init_hw = gen8_init_common_ring;
-       ring->get_seqno = gen8_get_seqno;
-       ring->set_seqno = gen8_set_seqno;
-       ring->emit_request = gen8_emit_request;
-       ring->emit_flush = gen8_emit_flush;
-       ring->irq_get = gen8_logical_ring_get_irq;
-       ring->irq_put = gen8_logical_ring_put_irq;
-       ring->emit_bb_start = gen8_emit_bb_start;
+       logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT);
+       logical_ring_default_vfuncs(dev, ring);
 
        return logical_ring_init(dev, ring);
 }
@@ -2081,25 +2190,12 @@ static int logical_blt_ring_init(struct drm_device *dev)
 
        ring->name = "blitter ring";
        ring->id = BCS;
+       ring->exec_id = I915_EXEC_BLT;
+       ring->guc_id = GUC_BLITTER_ENGINE;
        ring->mmio_base = BLT_RING_BASE;
-       ring->irq_enable_mask =
-               GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
-       ring->irq_keep_mask =
-               GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
 
-       ring->init_hw = gen8_init_common_ring;
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-               ring->get_seqno = bxt_a_get_seqno;
-               ring->set_seqno = bxt_a_set_seqno;
-       } else {
-               ring->get_seqno = gen8_get_seqno;
-               ring->set_seqno = gen8_set_seqno;
-       }
-       ring->emit_request = gen8_emit_request;
-       ring->emit_flush = gen8_emit_flush;
-       ring->irq_get = gen8_logical_ring_get_irq;
-       ring->irq_put = gen8_logical_ring_put_irq;
-       ring->emit_bb_start = gen8_emit_bb_start;
+       logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT);
+       logical_ring_default_vfuncs(dev, ring);
 
        return logical_ring_init(dev, ring);
 }
@@ -2111,25 +2207,12 @@ static int logical_vebox_ring_init(struct drm_device *dev)
 
        ring->name = "video enhancement ring";
        ring->id = VECS;
+       ring->exec_id = I915_EXEC_VEBOX;
+       ring->guc_id = GUC_VIDEOENHANCE_ENGINE;
        ring->mmio_base = VEBOX_RING_BASE;
-       ring->irq_enable_mask =
-               GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
-       ring->irq_keep_mask =
-               GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
 
-       ring->init_hw = gen8_init_common_ring;
-       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
-               ring->get_seqno = bxt_a_get_seqno;
-               ring->set_seqno = bxt_a_set_seqno;
-       } else {
-               ring->get_seqno = gen8_get_seqno;
-               ring->set_seqno = gen8_set_seqno;
-       }
-       ring->emit_request = gen8_emit_request;
-       ring->emit_flush = gen8_emit_flush;
-       ring->irq_get = gen8_logical_ring_get_irq;
-       ring->irq_put = gen8_logical_ring_put_irq;
-       ring->emit_bb_start = gen8_emit_bb_start;
+       logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT);
+       logical_ring_default_vfuncs(dev, ring);
 
        return logical_ring_init(dev, ring);
 }
@@ -2367,26 +2450,39 @@ void intel_lr_context_free(struct intel_context *ctx)
 {
        int i;
 
-       for (i = 0; i < I915_NUM_RINGS; i++) {
+       for (i = I915_NUM_RINGS; --i >= 0; ) {
+               struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
                struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
-               if (ctx_obj) {
-                       struct intel_ringbuffer *ringbuf =
-                                       ctx->engine[i].ringbuf;
-                       struct intel_engine_cs *ring = ringbuf->ring;
+               if (!ctx_obj)
+                       continue;
 
-                       if (ctx == ring->default_context) {
-                               intel_unpin_ringbuffer_obj(ringbuf);
-                               i915_gem_object_ggtt_unpin(ctx_obj);
-                       }
-                       WARN_ON(ctx->engine[ring->id].pin_count);
-                       intel_ringbuffer_free(ringbuf);
-                       drm_gem_object_unreference(&ctx_obj->base);
+               if (ctx == ctx->i915->kernel_context) {
+                       intel_unpin_ringbuffer_obj(ringbuf);
+                       i915_gem_object_ggtt_unpin(ctx_obj);
                }
+
+               WARN_ON(ctx->engine[i].pin_count);
+               intel_ringbuffer_free(ringbuf);
+               drm_gem_object_unreference(&ctx_obj->base);
        }
 }
 
-static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
+/**
+ * intel_lr_context_size() - return the size of the context for an engine
+ * @ring: which engine to find the context size for
+ *
+ * Each engine may require a different amount of space for a context image,
+ * so when allocating (or copying) an image, this function can be used to
+ * find the right size for the specific engine.
+ *
+ * Return: size (in bytes) of an engine-specific context image
+ *
+ * Note: this size includes the HWSP, which is part of the context image
+ * in LRC mode, but does not include the "shared data page" used with
+ * GuC submission. The caller should account for this if using the GuC.
+ */
+uint32_t intel_lr_context_size(struct intel_engine_cs *ring)
 {
        int ret = 0;
 
@@ -2443,7 +2539,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
  */
 
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
-                                    struct intel_engine_cs *ring)
+                                   struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
        struct drm_i915_gem_object *ctx_obj;
@@ -2454,7 +2550,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
        WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL);
        WARN_ON(ctx->engine[ring->id].state);
 
-       context_size = round_up(get_lr_context_size(ring), 4096);
+       context_size = round_up(intel_lr_context_size(ring), 4096);
 
        /* One extra page as the sharing data between driver and GuC */
        context_size += PAGE_SIZE * LRC_PPHWSP_PN;
@@ -2480,14 +2576,13 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
        ctx->engine[ring->id].ringbuf = ringbuf;
        ctx->engine[ring->id].state = ctx_obj;
 
-       if (ctx != ring->default_context && ring->init_context) {
+       if (ctx != ctx->i915->kernel_context && ring->init_context) {
                struct drm_i915_gem_request *req;
 
-               ret = i915_gem_request_alloc(ring,
-                       ctx, &req);
-               if (ret) {
-                       DRM_ERROR("ring create req: %d\n",
-                               ret);
+               req = i915_gem_request_alloc(ring, ctx);
+               if (IS_ERR(req)) {
+                       ret = PTR_ERR(req);
+                       DRM_ERROR("ring create req: %d\n", ret);
                        goto error_ringbuf;
                }
 
index 0b821b91723a61a4766e761ef4975989e9eea61c..e6cda3e225d02b67d2df629b0f06b2462093353f 100644 (file)
@@ -25,8 +25,6 @@
 #define _INTEL_LRC_H_
 
 #define GEN8_LR_CONTEXT_ALIGN 4096
-#define GEN8_CSB_ENTRIES 6
-#define GEN8_CSB_PTR_MASK 0x07
 
 /* Execlists regs */
 #define RING_ELSP(ring)                                _MMIO((ring)->mmio_base + 0x230)
 #define RING_CONTEXT_STATUS_BUF_HI(ring, i)    _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
 #define RING_CONTEXT_STATUS_PTR(ring)          _MMIO((ring)->mmio_base + 0x3a0)
 
+/* The docs specify that the write pointer wraps around after 5h, "After status
+ * is written out to the last available status QW at offset 5h, this pointer
+ * wraps to 0."
+ *
+ * Therefore, one must infer than even though there are 3 bits available, 6 and
+ * 7 appear to be * reserved.
+ */
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x7
+#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
+#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
+#define GEN8_CSB_WRITE_PTR(csb_status) \
+       (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
+#define GEN8_CSB_READ_PTR(csb_status) \
+       (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
+
 /* Logical Rings */
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
@@ -84,21 +98,25 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
 #define LRC_STATE_PN   (LRC_PPHWSP_PN + 1)
 
 void intel_lr_context_free(struct intel_context *ctx);
+uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
                                    struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct drm_i915_gem_request *req);
+void intel_lr_context_unpin(struct intel_context *ctx,
+                           struct intel_engine_cs *engine);
 void intel_lr_context_reset(struct drm_device *dev,
                        struct intel_context *ctx);
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
                                     struct intel_engine_cs *ring);
 
+u32 intel_execlists_ctx_id(struct intel_context *ctx,
+                          struct intel_engine_cs *ring);
+
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
 struct i915_execbuffer_params;
 int intel_execlists_submission(struct i915_execbuffer_params *params,
                               struct drm_i915_gem_execbuffer2 *args,
                               struct list_head *vmas);
-u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
 
 void intel_lrc_irq_handler(struct intel_engine_cs *ring);
 void intel_execlists_retire_requests(struct intel_engine_cs *ring);
index 0da0240caf815089447d4614b200ac818fd85250..811ddf7799f0c0f381ac650f64b166c2a17c812a 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
+#include <linux/vga_switcheroo.h>
 #include <drm/drmP.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_crtc.h>
@@ -1080,7 +1081,12 @@ void intel_lvds_init(struct drm_device *dev)
         * preferred mode is the right one.
         */
        mutex_lock(&dev->mode_config.mutex);
-       edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
+       if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC)
+               edid = drm_get_edid_switcheroo(connector,
+                                   intel_gmbus_get_adapter(dev_priv, pin));
+       else
+               edid = drm_get_edid(connector,
+                                   intel_gmbus_get_adapter(dev_priv, pin));
        if (edid) {
                if (drm_add_edid_modes(connector, edid)) {
                        drm_mode_connector_update_edid_property(connector,
index 76f1980a7541c39b6abdfef011eaac719274337d..9168413fe204402d4476d2b873be39368b099e4d 100644 (file)
@@ -240,9 +240,9 @@ static int intel_overlay_on(struct intel_overlay *overlay)
        WARN_ON(overlay->active);
        WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-       if (ret)
-               return ret;
+       req = i915_gem_request_alloc(ring, NULL);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
 
        ret = intel_ring_begin(req, 4);
        if (ret) {
@@ -283,9 +283,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
        if (tmp & (1 << 17))
                DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-       if (ret)
-               return ret;
+       req = i915_gem_request_alloc(ring, NULL);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
 
        ret = intel_ring_begin(req, 2);
        if (ret) {
@@ -349,9 +349,9 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         * of the hw. Do it in both cases */
        flip_addr |= OFC_UPDATE;
 
-       ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-       if (ret)
-               return ret;
+       req = i915_gem_request_alloc(ring, NULL);
+       if (IS_ERR(req))
+               return PTR_ERR(req);
 
        ret = intel_ring_begin(req, 6);
        if (ret) {
@@ -423,9 +423,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
                /* synchronous slowpath */
                struct drm_i915_gem_request *req;
 
-               ret = i915_gem_request_alloc(ring, ring->default_context, &req);
-               if (ret)
-                       return ret;
+               req = i915_gem_request_alloc(ring, NULL);
+               if (IS_ERR(req))
+                       return PTR_ERR(req);
 
                ret = intel_ring_begin(req, 2);
                if (ret) {
index eb5fa05cf476e465ab2c0df6ee52f6870ec568f8..379eabe093cb4195d79412080078ae423c372848 100644 (file)
@@ -32,6 +32,8 @@
 #include <linux/module.h>
 
 /**
+ * DOC: RC6
+ *
  * RC6 is a special power stage which allows the GPU to enter an very
  * low-voltage mode when idle, using down to 0V while at this stage.  This
  * stage is entered automatically when the GPU is idle when RC6 support is
@@ -546,7 +548,7 @@ static const struct intel_watermark_params i845_wm_info = {
  * intel_calculate_wm - calculate watermark level
  * @clock_in_khz: pixel clock
  * @wm: chip FIFO params
- * @pixel_size: display pixel size
+ * @cpp: bytes per pixel
  * @latency_ns: memory latency for the platform
  *
  * Calculate the watermark level (the level at which the display plane will
@@ -562,8 +564,7 @@ static const struct intel_watermark_params i845_wm_info = {
  */
 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
                                        const struct intel_watermark_params *wm,
-                                       int fifo_size,
-                                       int pixel_size,
+                                       int fifo_size, int cpp,
                                        unsigned long latency_ns)
 {
        long entries_required, wm_size;
@@ -574,7 +575,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
         * clocks go from a few thousand to several hundred thousand.
         * latency is usually a few thousand
         */
-       entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
+       entries_required = ((clock_in_khz / 1000) * cpp * latency_ns) /
                1000;
        entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
 
@@ -638,13 +639,13 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
        crtc = single_enabled_crtc(dev);
        if (crtc) {
                const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
-               int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+               int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
                int clock = adjusted_mode->crtc_clock;
 
                /* Display SR */
                wm = intel_calculate_wm(clock, &pineview_display_wm,
                                        pineview_display_wm.fifo_size,
-                                       pixel_size, latency->display_sr);
+                                       cpp, latency->display_sr);
                reg = I915_READ(DSPFW1);
                reg &= ~DSPFW_SR_MASK;
                reg |= FW_WM(wm, SR);
@@ -654,7 +655,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
                /* cursor SR */
                wm = intel_calculate_wm(clock, &pineview_cursor_wm,
                                        pineview_display_wm.fifo_size,
-                                       pixel_size, latency->cursor_sr);
+                                       cpp, latency->cursor_sr);
                reg = I915_READ(DSPFW3);
                reg &= ~DSPFW_CURSOR_SR_MASK;
                reg |= FW_WM(wm, CURSOR_SR);
@@ -663,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
                /* Display HPLL off SR */
                wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
                                        pineview_display_hplloff_wm.fifo_size,
-                                       pixel_size, latency->display_hpll_disable);
+                                       cpp, latency->display_hpll_disable);
                reg = I915_READ(DSPFW3);
                reg &= ~DSPFW_HPLL_SR_MASK;
                reg |= FW_WM(wm, HPLL_SR);
@@ -672,7 +673,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc)
                /* cursor HPLL off SR */
                wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
                                        pineview_display_hplloff_wm.fifo_size,
-                                       pixel_size, latency->cursor_hpll_disable);
+                                       cpp, latency->cursor_hpll_disable);
                reg = I915_READ(DSPFW3);
                reg &= ~DSPFW_HPLL_CURSOR_MASK;
                reg |= FW_WM(wm, HPLL_CURSOR);
@@ -696,7 +697,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
 {
        struct drm_crtc *crtc;
        const struct drm_display_mode *adjusted_mode;
-       int htotal, hdisplay, clock, pixel_size;
+       int htotal, hdisplay, clock, cpp;
        int line_time_us, line_count;
        int entries, tlb_miss;
 
@@ -711,10 +712,10 @@ static bool g4x_compute_wm0(struct drm_device *dev,
        clock = adjusted_mode->crtc_clock;
        htotal = adjusted_mode->crtc_htotal;
        hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
-       pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+       cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 
        /* Use the small buffer method to calculate plane watermark */
-       entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+       entries = ((clock * cpp / 1000) * display_latency_ns) / 1000;
        tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
        if (tlb_miss > 0)
                entries += tlb_miss;
@@ -726,7 +727,7 @@ static bool g4x_compute_wm0(struct drm_device *dev,
        /* Use the large buffer method to calculate cursor watermark */
        line_time_us = max(htotal * 1000 / clock, 1);
        line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
-       entries = line_count * crtc->cursor->state->crtc_w * pixel_size;
+       entries = line_count * crtc->cursor->state->crtc_w * cpp;
        tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
        if (tlb_miss > 0)
                entries += tlb_miss;
@@ -782,7 +783,7 @@ static bool g4x_compute_srwm(struct drm_device *dev,
 {
        struct drm_crtc *crtc;
        const struct drm_display_mode *adjusted_mode;
-       int hdisplay, htotal, pixel_size, clock;
+       int hdisplay, htotal, cpp, clock;
        unsigned long line_time_us;
        int line_count, line_size;
        int small, large;
@@ -798,21 +799,21 @@ static bool g4x_compute_srwm(struct drm_device *dev,
        clock = adjusted_mode->crtc_clock;
        htotal = adjusted_mode->crtc_htotal;
        hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
-       pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+       cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
 
        line_time_us = max(htotal * 1000 / clock, 1);
        line_count = (latency_ns / line_time_us + 1000) / 1000;
-       line_size = hdisplay * pixel_size;
+       line_size = hdisplay * cpp;
 
        /* Use the minimum of the small and large buffer method for primary */
-       small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+       small = ((clock * cpp / 1000) * latency_ns) / 1000;
        large = line_count * line_size;
 
        entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
        *display_wm = entries + display->guard_size;
 
        /* calculate the self-refresh watermark for display cursor */
-       entries = line_count * pixel_size * crtc->cursor->state->crtc_w;
+       entries = line_count * cpp * crtc->cursor->state->crtc_w;
        entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
        *cursor_wm = entries + cursor->guard_size;
 
@@ -904,13 +905,13 @@ enum vlv_wm_level {
 static unsigned int vlv_wm_method2(unsigned int pixel_rate,
                                   unsigned int pipe_htotal,
                                   unsigned int horiz_pixels,
-                                  unsigned int bytes_per_pixel,
+                                  unsigned int cpp,
                                   unsigned int latency)
 {
        unsigned int ret;
 
        ret = (latency * pixel_rate) / (pipe_htotal * 10000);
-       ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+       ret = (ret + 1) * horiz_pixels * cpp;
        ret = DIV_ROUND_UP(ret, 64);
 
        return ret;
@@ -939,7 +940,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
                                     int level)
 {
        struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
-       int clock, htotal, pixel_size, width, wm;
+       int clock, htotal, cpp, width, wm;
 
        if (dev_priv->wm.pri_latency[level] == 0)
                return USHRT_MAX;
@@ -947,7 +948,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
        if (!state->visible)
                return 0;
 
-       pixel_size = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
+       cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0);
        clock = crtc->config->base.adjusted_mode.crtc_clock;
        htotal = crtc->config->base.adjusted_mode.crtc_htotal;
        width = crtc->config->pipe_src_w;
@@ -963,7 +964,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane,
                 */
                wm = 63;
        } else {
-               wm = vlv_wm_method2(clock, htotal, width, pixel_size,
+               wm = vlv_wm_method2(clock, htotal, width, cpp,
                                    dev_priv->wm.pri_latency[level] * 10);
        }
 
@@ -1437,7 +1438,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
                int clock = adjusted_mode->crtc_clock;
                int htotal = adjusted_mode->crtc_htotal;
                int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
-               int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8;
+               int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
                unsigned long line_time_us;
                int entries;
 
@@ -1445,7 +1446,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
 
                /* Use ns/us then divide to preserve precision */
                entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-                       pixel_size * hdisplay;
+                       cpp * hdisplay;
                entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
                srwm = I965_FIFO_SIZE - entries;
                if (srwm < 0)
@@ -1455,7 +1456,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
                              entries, srwm);
 
                entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-                       pixel_size * crtc->cursor->state->crtc_w;
+                       cpp * crtc->cursor->state->crtc_w;
                entries = DIV_ROUND_UP(entries,
                                          i965_cursor_wm_info.cacheline_size);
                cursor_sr = i965_cursor_wm_info.fifo_size -
@@ -1516,7 +1517,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
        crtc = intel_get_crtc_for_plane(dev, 0);
        if (intel_crtc_active(crtc)) {
                const struct drm_display_mode *adjusted_mode;
-               int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
+               int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
                if (IS_GEN2(dev))
                        cpp = 4;
 
@@ -1538,7 +1539,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
        crtc = intel_get_crtc_for_plane(dev, 1);
        if (intel_crtc_active(crtc)) {
                const struct drm_display_mode *adjusted_mode;
-               int cpp = crtc->primary->state->fb->bits_per_pixel / 8;
+               int cpp = drm_format_plane_cpp(crtc->primary->state->fb->pixel_format, 0);
                if (IS_GEN2(dev))
                        cpp = 4;
 
@@ -1584,7 +1585,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
                int clock = adjusted_mode->crtc_clock;
                int htotal = adjusted_mode->crtc_htotal;
                int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
-               int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8;
+               int cpp = drm_format_plane_cpp(enabled->primary->state->fb->pixel_format, 0);
                unsigned long line_time_us;
                int entries;
 
@@ -1592,7 +1593,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
 
                /* Use ns/us then divide to preserve precision */
                entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
-                       pixel_size * hdisplay;
+                       cpp * hdisplay;
                entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
                DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
                srwm = wm_info->fifo_size - entries;
@@ -1672,6 +1673,9 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
                if (pipe_h < pfit_h)
                        pipe_h = pfit_h;
 
+               if (WARN_ON(!pfit_w || !pfit_h))
+                       return pixel_rate;
+
                pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
                                     pfit_w * pfit_h);
        }
@@ -1680,15 +1684,14 @@ uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
 }
 
 /* latency must be in 0.1us units. */
-static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
-                              uint32_t latency)
+static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
 {
        uint64_t ret;
 
        if (WARN(latency == 0, "Latency value missing\n"))
                return UINT_MAX;
 
-       ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
+       ret = (uint64_t) pixel_rate * cpp * latency;
        ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
 
        return ret;
@@ -1696,24 +1699,37 @@ static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
 
 /* latency must be in 0.1us units. */
 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
-                              uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+                              uint32_t horiz_pixels, uint8_t cpp,
                               uint32_t latency)
 {
        uint32_t ret;
 
        if (WARN(latency == 0, "Latency value missing\n"))
                return UINT_MAX;
+       if (WARN_ON(!pipe_htotal))
+               return UINT_MAX;
 
        ret = (latency * pixel_rate) / (pipe_htotal * 10000);
-       ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
+       ret = (ret + 1) * horiz_pixels * cpp;
        ret = DIV_ROUND_UP(ret, 64) + 2;
        return ret;
 }
 
 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
-                          uint8_t bytes_per_pixel)
+                          uint8_t cpp)
 {
-       return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
+       /*
+        * Neither of these should be possible since this function shouldn't be
+        * called if the CRTC is off or the plane is invisible.  But let's be
+        * extra paranoid to avoid a potential divide-by-zero if we screw up
+        * elsewhere in the driver.
+        */
+       if (WARN_ON(!cpp))
+               return 0;
+       if (WARN_ON(!horiz_pixels))
+               return 0;
+
+       return DIV_ROUND_UP(pri_val * 64, horiz_pixels * cpp) + 2;
 }
 
 struct ilk_wm_maximums {
@@ -1732,13 +1748,14 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
                                   uint32_t mem_value,
                                   bool is_lp)
 {
-       int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+       int cpp = pstate->base.fb ?
+               drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
        uint32_t method1, method2;
 
        if (!cstate->base.active || !pstate->visible)
                return 0;
 
-       method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
+       method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
 
        if (!is_lp)
                return method1;
@@ -1746,8 +1763,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
        method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
                                 cstate->base.adjusted_mode.crtc_htotal,
                                 drm_rect_width(&pstate->dst),
-                                bpp,
-                                mem_value);
+                                cpp, mem_value);
 
        return min(method1, method2);
 }
@@ -1760,18 +1776,18 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
                                   const struct intel_plane_state *pstate,
                                   uint32_t mem_value)
 {
-       int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+       int cpp = pstate->base.fb ?
+               drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
        uint32_t method1, method2;
 
        if (!cstate->base.active || !pstate->visible)
                return 0;
 
-       method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value);
+       method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
        method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
                                 cstate->base.adjusted_mode.crtc_htotal,
                                 drm_rect_width(&pstate->dst),
-                                bpp,
-                                mem_value);
+                                cpp, mem_value);
        return min(method1, method2);
 }
 
@@ -1783,16 +1799,20 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
                                   const struct intel_plane_state *pstate,
                                   uint32_t mem_value)
 {
-       int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+       /*
+        * We treat the cursor plane as always-on for the purposes of watermark
+        * calculation.  Until we have two-stage watermark programming merged,
+        * this is necessary to avoid flickering.
+        */
+       int cpp = 4;
+       int width = pstate->visible ? pstate->base.crtc_w : 64;
 
-       if (!cstate->base.active || !pstate->visible)
+       if (!cstate->base.active)
                return 0;
 
        return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
                              cstate->base.adjusted_mode.crtc_htotal,
-                             drm_rect_width(&pstate->dst),
-                             bpp,
-                             mem_value);
+                             width, cpp, mem_value);
 }
 
 /* Only for WM_LP. */
@@ -1800,12 +1820,13 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
                                   const struct intel_plane_state *pstate,
                                   uint32_t pri_val)
 {
-       int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+       int cpp = pstate->base.fb ?
+               drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0;
 
        if (!cstate->base.active || !pstate->visible)
                return 0;
 
-       return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp);
+       return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), cpp);
 }
 
 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
@@ -1998,14 +2019,19 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
 }
 
 static uint32_t
-hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
+hsw_compute_linetime_wm(struct drm_device *dev,
+                       struct intel_crtc_state *cstate)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
+       const struct drm_display_mode *adjusted_mode =
+               &cstate->base.adjusted_mode;
        u32 linetime, ips_linetime;
 
-       if (!intel_crtc->active)
+       if (!cstate->base.active)
+               return 0;
+       if (WARN_ON(adjusted_mode->crtc_clock == 0))
+               return 0;
+       if (WARN_ON(dev_priv->cdclk_freq == 0))
                return 0;
 
        /* The WM are computed with base on how long it takes to fill a single
@@ -2277,6 +2303,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
                return PTR_ERR(cstate);
 
        pipe_wm = &cstate->wm.optimal.ilk;
+       memset(pipe_wm, 0, sizeof(*pipe_wm));
 
        for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
                ps = drm_atomic_get_plane_state(state,
@@ -2313,8 +2340,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc,
                             pristate, sprstate, curstate, &pipe_wm->wm[0]);
 
        if (IS_HASWELL(dev) || IS_BROADWELL(dev))
-               pipe_wm->linetime = hsw_compute_linetime_wm(dev,
-                                                           &intel_crtc->base);
+               pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate);
 
        /* LP0 watermarks always use 1/2 DDB partitioning */
        ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
@@ -3019,26 +3045,25 @@ static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
 
 /*
  * The max latency should be 257 (max the punit can code is 255 and we add 2us
- * for the read latency) and bytes_per_pixel should always be <= 8, so that
+ * for the read latency) and cpp should always be <= 8, so that
  * should allow pixel_rate up to ~2 GHz which seems sufficient since max
  * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
 */
-static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
-                              uint32_t latency)
+static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
 {
        uint32_t wm_intermediate_val, ret;
 
        if (latency == 0)
                return UINT_MAX;
 
-       wm_intermediate_val = latency * pixel_rate * bytes_per_pixel / 512;
+       wm_intermediate_val = latency * pixel_rate * cpp / 512;
        ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
 
        return ret;
 }
 
 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
-                              uint32_t horiz_pixels, uint8_t bytes_per_pixel,
+                              uint32_t horiz_pixels, uint8_t cpp,
                               uint64_t tiling, uint32_t latency)
 {
        uint32_t ret;
@@ -3048,7 +3073,7 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
        if (latency == 0)
                return UINT_MAX;
 
-       plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
+       plane_bytes_per_line = horiz_pixels * cpp;
 
        if (tiling == I915_FORMAT_MOD_Y_TILED ||
            tiling == I915_FORMAT_MOD_Yf_TILED) {
@@ -3098,23 +3123,21 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
        uint32_t plane_bytes_per_line, plane_blocks_per_line;
        uint32_t res_blocks, res_lines;
        uint32_t selected_result;
-       uint8_t bytes_per_pixel;
+       uint8_t cpp;
 
        if (latency == 0 || !cstate->base.active || !fb)
                return false;
 
-       bytes_per_pixel = drm_format_plane_cpp(fb->pixel_format, 0);
+       cpp = drm_format_plane_cpp(fb->pixel_format, 0);
        method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate),
-                                bytes_per_pixel,
-                                latency);
+                                cpp, latency);
        method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate),
                                 cstate->base.adjusted_mode.crtc_htotal,
                                 cstate->pipe_src_w,
-                                bytes_per_pixel,
-                                fb->modifier[0],
+                                cpp, fb->modifier[0],
                                 latency);
 
-       plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel;
+       plane_bytes_per_line = cstate->pipe_src_w * cpp;
        plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512);
 
        if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
@@ -3122,11 +3145,11 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
                uint32_t min_scanlines = 4;
                uint32_t y_tile_minimum;
                if (intel_rotation_90_or_270(plane->state->rotation)) {
-                       int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
+                       int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
                                drm_format_plane_cpp(fb->pixel_format, 1) :
                                drm_format_plane_cpp(fb->pixel_format, 0);
 
-                       switch (bpp) {
+                       switch (cpp) {
                        case 1:
                                min_scanlines = 16;
                                break;
@@ -3597,23 +3620,45 @@ static void skl_update_wm(struct drm_crtc *crtc)
        dev_priv->wm.skl_hw = *results;
 }
 
-static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
+static void ilk_compute_wm_config(struct drm_device *dev,
+                                 struct intel_wm_config *config)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct intel_crtc *crtc;
+
+       /* Compute the currently _active_ config */
+       for_each_intel_crtc(dev, crtc) {
+               const struct intel_pipe_wm *wm = &crtc->wm.active.ilk;
+
+               if (!wm->pipe_enabled)
+                       continue;
+
+               config->sprites_enabled |= wm->sprites_enabled;
+               config->sprites_scaled |= wm->sprites_scaled;
+               config->num_pipes_active++;
+       }
+}
+
+static void ilk_program_watermarks(struct intel_crtc_state *cstate)
+{
+       struct drm_crtc *crtc = cstate->base.crtc;
+       struct drm_device *dev = crtc->dev;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
        struct ilk_wm_maximums max;
-       struct intel_wm_config *config = &dev_priv->wm.config;
+       struct intel_wm_config config = {};
        struct ilk_wm_values results = {};
        enum intel_ddb_partitioning partitioning;
 
-       ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_1_2, &max);
-       ilk_wm_merge(dev, config, &max, &lp_wm_1_2);
+       ilk_compute_wm_config(dev, &config);
+
+       ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
+       ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
 
        /* 5/6 split only in single pipe config on IVB+ */
        if (INTEL_INFO(dev)->gen >= 7 &&
-           config->num_pipes_active == 1 && config->sprites_enabled) {
-               ilk_compute_wm_maximums(dev, 1, config, INTEL_DDB_PART_5_6, &max);
-               ilk_wm_merge(dev, config, &max, &lp_wm_5_6);
+           config.num_pipes_active == 1 && config.sprites_enabled) {
+               ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
+               ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
 
                best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
        } else {
@@ -3630,7 +3675,6 @@ static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
 
 static void ilk_update_wm(struct drm_crtc *crtc)
 {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
 
@@ -3650,7 +3694,7 @@ static void ilk_update_wm(struct drm_crtc *crtc)
 
        intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk;
 
-       ilk_program_watermarks(dev_priv);
+       ilk_program_watermarks(cstate);
 }
 
 static void skl_pipe_wm_active_state(uint32_t val,
@@ -4036,7 +4080,7 @@ void intel_update_watermarks(struct drm_crtc *crtc)
                dev_priv->display.update_wm(crtc);
 }
 
-/**
+/*
  * Lock protecting IPS related data structures
  */
 DEFINE_SPINLOCK(mchdev_lock);
@@ -4509,21 +4553,71 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
        }
        if (HAS_RC6p(dev))
                DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
-                             (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
-                             (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
-                             (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
+                             onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
+                             onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
+                             onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
 
        else
                DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
-                             (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
+                             onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
+}
+
+static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       bool enable_rc6 = true;
+       unsigned long rc6_ctx_base;
+
+       if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
+               DRM_DEBUG_KMS("RC6 Base location not set properly.\n");
+               enable_rc6 = false;
+       }
+
+       /*
+        * The exact context size is not known for BXT, so assume a page size
+        * for this check.
+        */
+       rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
+       if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
+             (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
+                                       dev_priv->gtt.stolen_reserved_size))) {
+               DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
+               enable_rc6 = false;
+       }
+
+       if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) &&
+             ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
+             ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
+             ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
+               DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n");
+               enable_rc6 = false;
+       }
+
+       if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE |
+                                           GEN6_RC_CTL_HW_ENABLE)) &&
+           ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
+            !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
+               DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
+               enable_rc6 = false;
+       }
+
+       return enable_rc6;
 }
 
-static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
+int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
 {
        /* No RC6 before Ironlake and code is gone for ilk. */
        if (INTEL_INFO(dev)->gen < 6)
                return 0;
 
+       if (!enable_rc6)
+               return 0;
+
+       if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) {
+               DRM_INFO("RC6 disabled by BIOS\n");
+               return 0;
+       }
+
        /* Respect the kernel parameter if it is set */
        if (enable_rc6 >= 0) {
                int mask;
@@ -4693,8 +4787,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
        /* 3a: Enable RC6 */
        if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
                rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
-       DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
-                       "on" : "off");
+       DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
        /* WaRsUseTimeoutMode */
        if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) ||
            IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
@@ -4713,8 +4806,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
         * 3b: Enable Coarse Power Gating only when RC6 is enabled.
         * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
         */
-       if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
-           ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
+       if (NEEDS_WaRsDisableCoarsePowerGating(dev))
                I915_WRITE(GEN9_PG_ENABLE, 0);
        else
                I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
@@ -6015,7 +6107,6 @@ void intel_init_gt_powersave(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
        /*
         * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
         * requirement.
@@ -6981,6 +7072,7 @@ void intel_init_pm(struct drm_device *dev)
                     dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
                        dev_priv->display.update_wm = ilk_update_wm;
                        dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
+                       dev_priv->display.program_watermarks = ilk_program_watermarks;
                } else {
                        DRM_DEBUG_KMS("Failed to read display plane latency. "
                                      "Disable CxSR\n");
@@ -7146,9 +7238,10 @@ static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
        int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
-       div = vlv_gpu_freq_div(czclk_freq) / 2;
+       div = vlv_gpu_freq_div(czclk_freq);
        if (div < 0)
                return div;
+       div /= 2;
 
        return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
 }
@@ -7157,9 +7250,10 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
        int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000);
 
-       mul = vlv_gpu_freq_div(czclk_freq) / 2;
+       mul = vlv_gpu_freq_div(czclk_freq);
        if (mul < 0)
                return mul;
+       mul /= 2;
 
        /* CHV needs even values */
        return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
index 9ccff3011523378d59338253f61c96bc7b944648..4ab757947f1581a6563f7144486001ac58fb8402 100644 (file)
@@ -225,7 +225,12 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
                   (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
        }
 
-       drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, DP_PSR_ENABLE);
+       if (dev_priv->psr.link_standby)
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+                                  DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
+       else
+               drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
+                                  DP_PSR_ENABLE);
 }
 
 static void vlv_psr_enable_source(struct intel_dp *intel_dp)
@@ -280,6 +285,9 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
        if (IS_HASWELL(dev))
                val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
 
+       if (dev_priv->psr.link_standby)
+               val |= EDP_PSR_LINK_STANDBY;
+
        I915_WRITE(EDP_PSR_CTL, val |
                   max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
                   idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
@@ -304,8 +312,15 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
 
        dev_priv->psr.source_ok = false;
 
-       if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
-               DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+       /*
+        * HSW spec explicitly says PSR is tied to port A.
+        * BDW+ platforms with DDI implementation of PSR have different
+        * PSR registers per transcoder and we only implement transcoder EDP
+        * ones. Since by Display design transcoder EDP is tied to port A
+        * we can safely escape based on the port A.
+        */
+       if (HAS_DDI(dev) && dig_port->port != PORT_A) {
+               DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
                return false;
        }
 
@@ -314,6 +329,12 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
                return false;
        }
 
+       if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
+           !dev_priv->psr.link_standby) {
+               DRM_ERROR("PSR condition failed: Link off requested but not supported on this platform\n");
+               return false;
+       }
+
        if (IS_HASWELL(dev) &&
            I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config->cpu_transcoder)) &
                      S3D_ENABLE) {
@@ -327,12 +348,6 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
                return false;
        }
 
-       if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
-           ((dev_priv->vbt.psr.full_link) || (dig_port->port != PORT_A))) {
-               DRM_DEBUG_KMS("PSR condition failed: Link Standby requested/needed but not supported on this platform\n");
-               return false;
-       }
-
        dev_priv->psr.source_ok = true;
        return true;
 }
@@ -763,6 +778,27 @@ void intel_psr_init(struct drm_device *dev)
        dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
                HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
 
+       /* Set link_standby x link_off defaults */
+       if (IS_HASWELL(dev) || IS_BROADWELL(dev))
+               /* HSW and BDW require workarounds that we don't implement. */
+               dev_priv->psr.link_standby = false;
+       else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
+               /* On VLV and CHV only standby mode is supported. */
+               dev_priv->psr.link_standby = true;
+       else
+               /* For new platforms let's respect VBT back again */
+               dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
+
+       /* Override link_standby x link_off defaults */
+       if (i915.enable_psr == 2 && !dev_priv->psr.link_standby) {
+               DRM_DEBUG_KMS("PSR: Forcing link standby\n");
+               dev_priv->psr.link_standby = true;
+       }
+       if (i915.enable_psr == 3 && dev_priv->psr.link_standby) {
+               DRM_DEBUG_KMS("PSR: Forcing main link off\n");
+               dev_priv->psr.link_standby = false;
+       }
+
        INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
        mutex_init(&dev_priv->psr.lock);
 }
index 339701d7a9a5069ccce00f2f171f923073d2c3b5..6f5b511bdb5d9b41eea7011f477e9bbcf663030b 100644 (file)
@@ -331,6 +331,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
        if (invalidate_domains) {
@@ -403,6 +404,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
        if (flush_domains) {
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+               flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
        }
        if (invalidate_domains) {
@@ -787,6 +789,22 @@ static int wa_add(struct drm_i915_private *dev_priv,
 
 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
 
+static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg)
+{
+       struct drm_i915_private *dev_priv = ring->dev->dev_private;
+       struct i915_workarounds *wa = &dev_priv->workarounds;
+       const uint32_t index = wa->hw_whitelist_count[ring->id];
+
+       if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
+               return -EINVAL;
+
+       WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index),
+                i915_mmio_reg_offset(reg));
+       wa->hw_whitelist_count[ring->id]++;
+
+       return 0;
+}
+
 static int gen8_init_workarounds(struct intel_engine_cs *ring)
 {
        struct drm_device *dev = ring->dev;
@@ -892,6 +910,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
        struct drm_device *dev = ring->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        uint32_t tmp;
+       int ret;
 
        /* WaEnableLbsSlaRetryTimerDecrement:skl */
        I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
@@ -962,6 +981,20 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
        /* WaDisableSTUnitPowerOptimization:skl,bxt */
        WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
 
+       /* WaOCLCoherentLineFlush:skl,bxt */
+       I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
+                                   GEN8_LQSC_FLUSH_COHERENT_LINES));
+
+       /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */
+       ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1);
+       if (ret)
+               return ret;
+
+       /* WaAllowUMDToModifyHDCChicken1:skl,bxt */
+       ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1);
+       if (ret)
+               return ret;
+
        return 0;
 }
 
@@ -1017,6 +1050,16 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
        if (ret)
                return ret;
 
+       /*
+        * Actual WA is to disable percontext preemption granularity control
+        * until D0 which is the default case so this is equivalent to
+        * !WaDisablePerCtxtPreemptionGranularityControl:skl
+        */
+       if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) {
+               I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+                          _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+       }
+
        if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) {
                /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
                I915_WRITE(FF_SLICE_CS_CHICKEN2,
@@ -1069,6 +1112,11 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
                        GEN7_HALF_SLICE_CHICKEN1,
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 
+       /* WaDisableLSQCROPERFforOCL:skl */
+       ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+       if (ret)
+               return ret;
+
        return skl_tune_iz_hashing(ring);
 }
 
@@ -1104,6 +1152,20 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring)
                        GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
        }
 
+       /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
+       /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
+       /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
+       /* WaDisableLSQCROPERFforOCL:bxt */
+       if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
+               ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1);
+               if (ret)
+                       return ret;
+
+               ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
@@ -1115,6 +1177,7 @@ int init_workarounds_ring(struct intel_engine_cs *ring)
        WARN_ON(ring->id != RCS);
 
        dev_priv->workarounds.count = 0;
+       dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
 
        if (IS_BROADWELL(dev))
                return bdw_init_workarounds(ring);
@@ -1865,15 +1928,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
                offset = cs_offset;
        }
 
-       ret = intel_ring_begin(req, 4);
+       ret = intel_ring_begin(req, 2);
        if (ret)
                return ret;
 
-       intel_ring_emit(ring, MI_BATCH_BUFFER);
+       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
        intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
                                        0 : MI_BATCH_NON_SECURE));
-       intel_ring_emit(ring, offset + len - 8);
-       intel_ring_emit(ring, MI_NOOP);
        intel_ring_advance(ring);
 
        return 0;
@@ -1899,6 +1960,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
        return 0;
 }
 
+static void cleanup_phys_status_page(struct intel_engine_cs *ring)
+{
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
+
+       if (!dev_priv->status_page_dmah)
+               return;
+
+       drm_pci_free(ring->dev, dev_priv->status_page_dmah);
+       ring->status_page.page_addr = NULL;
+}
+
 static void cleanup_status_page(struct intel_engine_cs *ring)
 {
        struct drm_i915_gem_object *obj;
@@ -1915,9 +1987,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
 
 static int init_status_page(struct intel_engine_cs *ring)
 {
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_gem_object *obj = ring->status_page.obj;
 
-       if ((obj = ring->status_page.obj) == NULL) {
+       if (obj == NULL) {
                unsigned flags;
                int ret;
 
@@ -1988,6 +2060,7 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
        else
                iounmap(ringbuf->virtual_start);
        ringbuf->virtual_start = NULL;
+       ringbuf->vma = NULL;
        i915_gem_object_ggtt_unpin(ringbuf->obj);
 }
 
@@ -2054,6 +2127,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
                }
        }
 
+       ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+
        return 0;
 }
 
@@ -2162,7 +2237,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
                if (ret)
                        goto error;
        } else {
-               BUG_ON(ring->id != RCS);
+               WARN_ON(ring->id != RCS);
                ret = init_phys_status_page(ring);
                if (ret)
                        goto error;
@@ -2208,7 +2283,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
        if (ring->cleanup)
                ring->cleanup(ring);
 
-       cleanup_status_page(ring);
+       if (I915_NEED_GFX_HWS(ring->dev)) {
+               cleanup_status_page(ring);
+       } else {
+               WARN_ON(ring->id != RCS);
+               cleanup_phys_status_page(ring);
+       }
 
        i915_cmd_parser_fini_ring(ring);
        i915_gem_batch_pool_fini(&ring->batch_pool);
@@ -2664,6 +2744,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 
        ring->name = "render ring";
        ring->id = RCS;
+       ring->exec_id = I915_EXEC_RENDER;
        ring->mmio_base = RENDER_RING_BASE;
 
        if (INTEL_INFO(dev)->gen >= 8) {
@@ -2812,6 +2893,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
 
        ring->name = "bsd ring";
        ring->id = VCS;
+       ring->exec_id = I915_EXEC_BSD;
 
        ring->write_tail = ring_write_tail;
        if (INTEL_INFO(dev)->gen >= 6) {
@@ -2888,6 +2970,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
 
        ring->name = "bsd2 ring";
        ring->id = VCS2;
+       ring->exec_id = I915_EXEC_BSD;
 
        ring->write_tail = ring_write_tail;
        ring->mmio_base = GEN8_BSD2_RING_BASE;
@@ -2918,6 +3001,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 
        ring->name = "blitter ring";
        ring->id = BCS;
+       ring->exec_id = I915_EXEC_BLT;
 
        ring->mmio_base = BLT_RING_BASE;
        ring->write_tail = ring_write_tail;
@@ -2975,6 +3059,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
 
        ring->name = "video enhancement ring";
        ring->id = VECS;
+       ring->exec_id = I915_EXEC_VEBOX;
 
        ring->mmio_base = VEBOX_RING_BASE;
        ring->write_tail = ring_write_tail;
index 49574ffe54bcf8420d33290a89e67f70d8d7b945..566b0ae10ce00f6a3eec840734c0c26c30aef27e 100644 (file)
@@ -93,11 +93,13 @@ struct intel_ring_hangcheck {
        int score;
        enum intel_ring_hangcheck_action action;
        int deadlock;
+       u32 instdone[I915_NUM_INSTDONE_REG];
 };
 
 struct intel_ringbuffer {
        struct drm_i915_gem_object *obj;
        void __iomem *virtual_start;
+       struct i915_vma *vma;
 
        struct intel_engine_cs *ring;
        struct list_head link;
@@ -147,14 +149,16 @@ struct  i915_ctx_workarounds {
 struct  intel_engine_cs {
        const char      *name;
        enum intel_ring_id {
-               RCS = 0x0,
-               VCS,
+               RCS = 0,
                BCS,
-               VECS,
-               VCS2
+               VCS,
+               VCS2,   /* Keep instances of the same type engine together. */
+               VECS
        } id;
 #define I915_NUM_RINGS 5
-#define LAST_USER_RING (VECS + 1)
+#define _VCS(n) (VCS + (n))
+       unsigned int exec_id;
+       unsigned int guc_id;
        u32             mmio_base;
        struct          drm_device *dev;
        struct intel_ringbuffer *buffer;
@@ -268,6 +272,8 @@ struct  intel_engine_cs {
        struct list_head execlist_queue;
        struct list_head execlist_retired_req_list;
        u8 next_context_status_buffer;
+       bool disable_lite_restore_wa;
+       u32 ctx_desc_template;
        u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
        int             (*emit_request)(struct drm_i915_gem_request *request);
        int             (*emit_flush)(struct drm_i915_gem_request *request,
@@ -305,7 +311,6 @@ struct  intel_engine_cs {
 
        wait_queue_head_t irq_queue;
 
-       struct intel_context *default_context;
        struct intel_context *last_context;
 
        struct intel_ring_hangcheck hangcheck;
@@ -406,7 +411,7 @@ intel_write_status_page(struct intel_engine_cs *ring,
        ring->status_page.page_addr[reg] = value;
 }
 
-/**
+/*
  * Reads a dword out of the status page, which is written to from the command
  * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
  * MI_STORE_DATA_IMM.
@@ -423,6 +428,7 @@ intel_write_status_page(struct intel_engine_cs *ring,
  * The area from dword 0x30 to 0x3ff is available for driver usage.
  */
 #define I915_GEM_HWS_INDEX             0x30
+#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 #define I915_GEM_HWS_SCRATCH_INDEX     0x40
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
index ddbdbffe829a684eff8c2ef42a3cf29ccbf112d8..bbca527184d0dbb79f5b37864482ff29c09ce517 100644 (file)
@@ -532,7 +532,8 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
        bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
                                        SKL_DISP_PW_2);
 
-       WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
+       WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+                 "Platform doesn't support DC5.\n");
        WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
        WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
 
@@ -568,7 +569,8 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
 {
        struct drm_device *dev = dev_priv->dev;
 
-       WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
+       WARN_ONCE(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev),
+                 "Platform doesn't support DC6.\n");
        WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
        WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
                  "Backlight is not disabled.\n");
@@ -595,7 +597,8 @@ static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv)
 {
        assert_can_disable_dc5(dev_priv);
 
-       if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
+       if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+           i915.enable_dc != 0 && i915.enable_dc != 1)
                assert_can_disable_dc6(dev_priv);
 
        gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -623,7 +626,6 @@ void skl_disable_dc6(struct drm_i915_private *dev_priv)
 static void skl_set_power_well(struct drm_i915_private *dev_priv,
                        struct i915_power_well *power_well, bool enable)
 {
-       struct drm_device *dev = dev_priv->dev;
        uint32_t tmp, fuse_status;
        uint32_t req_mask, state_mask;
        bool is_enabled, enable_requested, check_fuse_status = false;
@@ -667,17 +669,6 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
                                !I915_READ(HSW_PWR_WELL_BIOS),
                                "Invalid for power well status to be enabled, unless done by the BIOS, \
                                when request is to disable!\n");
-                       if (power_well->data == SKL_DISP_PW_2) {
-                               /*
-                                * DDI buffer programming unnecessary during
-                                * driver-load/resume as it's already done
-                                * during modeset initialization then. It's
-                                * also invalid here as encoder list is still
-                                * uninitialized.
-                                */
-                               if (!dev_priv->power_domains.initializing)
-                                       intel_prepare_ddi(dev);
-                       }
                        I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
                }
 
@@ -783,7 +774,8 @@ static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
 static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
                                           struct i915_power_well *power_well)
 {
-       if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 && i915.enable_dc != 1)
+       if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+           i915.enable_dc != 0 && i915.enable_dc != 1)
                skl_enable_dc6(dev_priv);
        else
                gen9_enable_dc5(dev_priv);
@@ -795,7 +787,8 @@ static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
        if (power_well->count > 0) {
                gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
        } else {
-               if (IS_SKYLAKE(dev_priv) && i915.enable_dc != 0 &&
+               if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+                   i915.enable_dc != 0 &&
                    i915.enable_dc != 1)
                        gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
                else
@@ -1851,7 +1844,7 @@ void skl_pw1_misc_io_init(struct drm_i915_private *dev_priv)
 {
        struct i915_power_well *well;
 
-       if (!IS_SKYLAKE(dev_priv))
+       if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
                return;
 
        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
@@ -1865,7 +1858,7 @@ void skl_pw1_misc_io_fini(struct drm_i915_private *dev_priv)
 {
        struct i915_power_well *well;
 
-       if (!IS_SKYLAKE(dev_priv))
+       if (!(IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
                return;
 
        well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
index 2e2d4eb4a00d190b24c03c1998b4e9f0cd9f06c5..db0ed499268ae216cb0ee8185467078907d954a6 100644 (file)
@@ -24,8 +24,8 @@
  *     Eric Anholt <eric@anholt.net>
  */
 
-/**
- * @file SDVO command definitions and structures.
+/*
+ * SDVO command definitions and structures.
  */
 
 #define SDVO_OUTPUT_FIRST   (0)
@@ -66,39 +66,39 @@ struct intel_sdvo_caps {
 #define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
 #define DTD_FLAG_INTERLACE     (1 << 7)
 
-/** This matches the EDID DTD structure, more or less */
+/* This matches the EDID DTD structure, more or less */
 struct intel_sdvo_dtd {
        struct {
-               u16 clock;      /**< pixel clock, in 10kHz units */
-               u8 h_active;    /**< lower 8 bits (pixels) */
-               u8 h_blank;     /**< lower 8 bits (pixels) */
-               u8 h_high;      /**< upper 4 bits each h_active, h_blank */
-               u8 v_active;    /**< lower 8 bits (lines) */
-               u8 v_blank;     /**< lower 8 bits (lines) */
-               u8 v_high;      /**< upper 4 bits each v_active, v_blank */
+               u16 clock;      /* pixel clock, in 10kHz units */
+               u8 h_active;    /* lower 8 bits (pixels) */
+               u8 h_blank;     /* lower 8 bits (pixels) */
+               u8 h_high;      /* upper 4 bits each h_active, h_blank */
+               u8 v_active;    /* lower 8 bits (lines) */
+               u8 v_blank;     /* lower 8 bits (lines) */
+               u8 v_high;      /* upper 4 bits each v_active, v_blank */
        } part1;
 
        struct {
-               u8 h_sync_off;  /**< lower 8 bits, from hblank start */
-               u8 h_sync_width;        /**< lower 8 bits (pixels) */
-               /** lower 4 bits each vsync offset, vsync width */
+               u8 h_sync_off;  /* lower 8 bits, from hblank start */
+               u8 h_sync_width;        /* lower 8 bits (pixels) */
+               /* lower 4 bits each vsync offset, vsync width */
                u8 v_sync_off_width;
-               /**
+               /*
                * 2 high bits of hsync offset, 2 high bits of hsync width,
                * bits 4-5 of vsync offset, and 2 high bits of vsync width.
                */
                u8 sync_off_width_high;
                u8 dtd_flags;
                u8 sdvo_flags;
-               /** bits 6-7 of vsync offset at bits 6-7 */
+               /* bits 6-7 of vsync offset at bits 6-7 */
                u8 v_sync_off_high;
                u8 reserved;
        } part2;
 } __packed;
 
 struct intel_sdvo_pixel_clock_range {
-       u16 min;        /**< pixel clock, in 10kHz units */
-       u16 max;        /**< pixel clock, in 10kHz units */
+       u16 min;        /* pixel clock, in 10kHz units */
+       u16 max;        /* pixel clock, in 10kHz units */
 } __packed;
 
 struct intel_sdvo_preferred_input_timing_args {
@@ -144,7 +144,7 @@ struct intel_sdvo_preferred_input_timing_args {
 
 #define SDVO_CMD_RESET                                 0x01
 
-/** Returns a struct intel_sdvo_caps */
+/* Returns a struct intel_sdvo_caps */
 #define SDVO_CMD_GET_DEVICE_CAPS                       0x02
 
 #define SDVO_CMD_GET_FIRMWARE_REV                      0x86
@@ -152,7 +152,7 @@ struct intel_sdvo_preferred_input_timing_args {
 # define SDVO_DEVICE_FIRMWARE_MAJOR                    SDVO_I2C_RETURN_1
 # define SDVO_DEVICE_FIRMWARE_PATCH                    SDVO_I2C_RETURN_2
 
-/**
+/*
  * Reports which inputs are trained (managed to sync).
  *
  * Devices must have trained within 2 vsyncs of a mode change.
@@ -164,10 +164,10 @@ struct intel_sdvo_get_trained_inputs_response {
        unsigned int pad:6;
 } __packed;
 
-/** Returns a struct intel_sdvo_output_flags of active outputs. */
+/* Returns a struct intel_sdvo_output_flags of active outputs. */
 #define SDVO_CMD_GET_ACTIVE_OUTPUTS                    0x04
 
-/**
+/*
  * Sets the current set of active outputs.
  *
  * Takes a struct intel_sdvo_output_flags.  Must be preceded by a SET_IN_OUT_MAP
@@ -175,7 +175,7 @@ struct intel_sdvo_get_trained_inputs_response {
  */
 #define SDVO_CMD_SET_ACTIVE_OUTPUTS                    0x05
 
-/**
+/*
  * Returns the current mapping of SDVO inputs to outputs on the device.
  *
  * Returns two struct intel_sdvo_output_flags structures.
@@ -185,29 +185,29 @@ struct intel_sdvo_in_out_map {
        u16 in0, in1;
 };
 
-/**
+/*
  * Sets the current mapping of SDVO inputs to outputs on the device.
  *
  * Takes two struct i380_sdvo_output_flags structures.
  */
 #define SDVO_CMD_SET_IN_OUT_MAP                                0x07
 
-/**
+/*
  * Returns a struct intel_sdvo_output_flags of attached displays.
  */
 #define SDVO_CMD_GET_ATTACHED_DISPLAYS                 0x0b
 
-/**
+/*
  * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
  */
 #define SDVO_CMD_GET_HOT_PLUG_SUPPORT                  0x0c
 
-/**
+/*
  * Takes a struct intel_sdvo_output_flags.
  */
 #define SDVO_CMD_SET_ACTIVE_HOT_PLUG                   0x0d
 
-/**
+/*
  * Returns a struct intel_sdvo_output_flags of displays with hot plug
  * interrupts enabled.
  */
@@ -221,7 +221,7 @@ struct intel_sdvo_get_interrupt_event_source_response {
        unsigned int pad:6;
 } __packed;
 
-/**
+/*
  * Selects which input is affected by future input commands.
  *
  * Commands affected include SET_INPUT_TIMINGS_PART[12],
@@ -234,7 +234,7 @@ struct intel_sdvo_set_target_input_args {
        unsigned int pad:7;
 } __packed;
 
-/**
+/*
  * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
  * future output commands.
  *
@@ -280,7 +280,7 @@ struct intel_sdvo_set_target_input_args {
 # define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH                     (2 << 4)
 # define SDVO_DTD_VSYNC_OFF_HIGH                       SDVO_I2C_ARG_6
 
-/**
+/*
  * Generates a DTD based on the given width, height, and flags.
  *
  * This will be supported by any device supporting scaling or interlaced
@@ -300,24 +300,24 @@ struct intel_sdvo_set_target_input_args {
 #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1      0x1b
 #define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2      0x1c
 
-/** Returns a struct intel_sdvo_pixel_clock_range */
+/* Returns a struct intel_sdvo_pixel_clock_range */
 #define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE           0x1d
-/** Returns a struct intel_sdvo_pixel_clock_range */
+/* Returns a struct intel_sdvo_pixel_clock_range */
 #define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE          0x1e
 
-/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+/* Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
 #define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS                0x1f
 
-/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+/* Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
 #define SDVO_CMD_GET_CLOCK_RATE_MULT                   0x20
-/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+/* Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
 #define SDVO_CMD_SET_CLOCK_RATE_MULT                   0x21
 # define SDVO_CLOCK_RATE_MULT_1X                               (1 << 0)
 # define SDVO_CLOCK_RATE_MULT_2X                               (1 << 1)
 # define SDVO_CLOCK_RATE_MULT_4X                               (1 << 3)
 
 #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS              0x27
-/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+/* 6 bytes of bit flags for TV formats shared by all TV format functions */
 struct intel_sdvo_tv_format {
        unsigned int ntsc_m:1;
        unsigned int ntsc_j:1;
@@ -376,7 +376,7 @@ struct intel_sdvo_tv_format {
 
 #define SDVO_CMD_SET_TV_FORMAT                         0x29
 
-/** Returns the resolutiosn that can be used with the given TV format */
+/* Returns the resolutiosn that can be used with the given TV format */
 #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT           0x83
 struct intel_sdvo_sdtv_resolution_request {
        unsigned int ntsc_m:1;
@@ -539,7 +539,7 @@ struct intel_sdvo_hdtv_resolution_reply {
 #define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING                0x2d
 #define SDVO_CMD_GET_PANEL_POWER_SEQUENCING            0x2e
 #define SDVO_CMD_SET_PANEL_POWER_SEQUENCING            0x2f
-/**
+/*
  * The panel power sequencing parameters are in units of milliseconds.
  * The high fields are bits 8:9 of the 10-bit values.
  */
index 8831fc579adeb5c3ddc3df548bd78cb7df026d5f..c3998188cf35de5111c735282106da8f59c9c5c9 100644 (file)
@@ -129,17 +129,18 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
        return val;
 }
 
-u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
+u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg)
 {
        u32 val = 0;
-       vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
                        SB_CRRDDA_NP, reg, &val);
        return val;
 }
 
-void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
+void vlv_iosf_sb_write(struct drm_i915_private *dev_priv,
+                      u8 port, u32 reg, u32 val)
 {
-       vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
+       vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), port,
                        SB_CRWRDA_NP, reg, &val);
 }
 
@@ -171,20 +172,6 @@ void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
                        SB_CRWRDA_NP, reg, &val);
 }
 
-u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
-{
-       u32 val = 0;
-       vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
-                       SB_CRRDDA_NP, reg, &val);
-       return val;
-}
-
-void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
-{
-       vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
-                       SB_CRWRDA_NP, reg, &val);
-}
-
 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg)
 {
        u32 val = 0;
index 4ff7a1f4183eeccf1fbcdea632ca54f3c0ab6d38..a2582c455b3623a13e32bc32ac49b5b5c0bf6149 100644 (file)
@@ -178,28 +178,33 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
 }
 
 static void
-skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
-                struct drm_framebuffer *fb,
-                int crtc_x, int crtc_y,
-                unsigned int crtc_w, unsigned int crtc_h,
-                uint32_t x, uint32_t y,
-                uint32_t src_w, uint32_t src_h)
+skl_update_plane(struct drm_plane *drm_plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = drm_plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+       struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        const int pipe = intel_plane->pipe;
        const int plane = intel_plane->plane + 1;
        u32 plane_ctl, stride_div, stride;
-       const struct drm_intel_sprite_colorkey *key =
-               &to_intel_plane_state(drm_plane->state)->ckey;
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
        u32 surf_addr;
        u32 tile_height, plane_offset, plane_size;
        unsigned int rotation;
        int x_offset, y_offset;
-       struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
-       int scaler_id;
+       int crtc_x = plane_state->dst.x1;
+       int crtc_y = plane_state->dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+       uint32_t x = plane_state->src.x1 >> 16;
+       uint32_t y = plane_state->src.y1 >> 16;
+       uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+       uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
+       const struct intel_scaler *scaler =
+               &crtc_state->scaler_state.scalers[plane_state->scaler_id];
 
        plane_ctl = PLANE_CTL_ENABLE |
                PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -208,14 +213,12 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
        plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
        plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
 
-       rotation = drm_plane->state->rotation;
+       rotation = plane_state->base.rotation;
        plane_ctl |= skl_plane_ctl_rotation(rotation);
 
-       stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
+       stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
                                               fb->pixel_format);
 
-       scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
-
        /* Sizes are 0 based */
        src_w--;
        src_h--;
@@ -236,9 +239,10 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
        surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
 
        if (intel_rotation_90_or_270(rotation)) {
+               int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+
                /* stride: Surface height in tiles */
-               tile_height = intel_tile_height(dev, fb->pixel_format,
-                                               fb->modifier[0], 0);
+               tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
                stride = DIV_ROUND_UP(fb->height, tile_height);
                plane_size = (src_w << 16) | src_h;
                x_offset = stride * tile_height - y - (src_h + 1);
@@ -256,13 +260,13 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
        I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
 
        /* program plane scaler */
-       if (scaler_id >= 0) {
+       if (plane_state->scaler_id >= 0) {
                uint32_t ps_ctrl = 0;
+               int scaler_id = plane_state->scaler_id;
 
                DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
                        PS_PLANE_SEL(plane));
-               ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
-                       crtc_state->scaler_state.scalers[scaler_id].mode;
+               ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode;
                I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
                I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
                I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
@@ -334,24 +338,29 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
 }
 
 static void
-vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
-                struct drm_framebuffer *fb,
-                int crtc_x, int crtc_y,
-                unsigned int crtc_w, unsigned int crtc_h,
-                uint32_t x, uint32_t y,
-                uint32_t src_w, uint32_t src_h)
+vlv_update_plane(struct drm_plane *dplane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = dplane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_plane *intel_plane = to_intel_plane(dplane);
+       struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        int pipe = intel_plane->pipe;
        int plane = intel_plane->plane;
        u32 sprctl;
-       unsigned long sprsurf_offset, linear_offset;
-       int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       const struct drm_intel_sprite_colorkey *key =
-               &to_intel_plane_state(dplane->state)->ckey;
+       u32 sprsurf_offset, linear_offset;
+       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       int crtc_x = plane_state->dst.x1;
+       int crtc_y = plane_state->dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+       uint32_t x = plane_state->src.x1 >> 16;
+       uint32_t y = plane_state->src.y1 >> 16;
+       uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+       uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
 
        sprctl = SP_ENABLE;
 
@@ -413,20 +422,18 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
        crtc_w--;
        crtc_h--;
 
-       linear_offset = y * fb->pitches[0] + x * pixel_size;
-       sprsurf_offset = intel_gen4_compute_page_offset(dev_priv,
-                                                       &x, &y,
-                                                       obj->tiling_mode,
-                                                       pixel_size,
-                                                       fb->pitches[0]);
+       linear_offset = y * fb->pitches[0] + x * cpp;
+       sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+                                                  fb->modifier[0], cpp,
+                                                  fb->pitches[0]);
        linear_offset -= sprsurf_offset;
 
-       if (dplane->state->rotation == BIT(DRM_ROTATE_180)) {
+       if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
                sprctl |= SP_ROTATE_180;
 
                x += src_w;
                y += src_h;
-               linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+               linear_offset += src_h * fb->pitches[0] + src_w * cpp;
        }
 
        if (key->flags) {
@@ -474,23 +481,28 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
 }
 
 static void
-ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-                struct drm_framebuffer *fb,
-                int crtc_x, int crtc_y,
-                unsigned int crtc_w, unsigned int crtc_h,
-                uint32_t x, uint32_t y,
-                uint32_t src_w, uint32_t src_h)
+ivb_update_plane(struct drm_plane *plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        enum pipe pipe = intel_plane->pipe;
        u32 sprctl, sprscale = 0;
-       unsigned long sprsurf_offset, linear_offset;
-       int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       const struct drm_intel_sprite_colorkey *key =
-               &to_intel_plane_state(plane->state)->ckey;
+       u32 sprsurf_offset, linear_offset;
+       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       int crtc_x = plane_state->dst.x1;
+       int crtc_y = plane_state->dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+       uint32_t x = plane_state->src.x1 >> 16;
+       uint32_t y = plane_state->src.y1 >> 16;
+       uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+       uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
 
        sprctl = SPRITE_ENABLE;
 
@@ -543,22 +555,20 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (crtc_w != src_w || crtc_h != src_h)
                sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
 
-       linear_offset = y * fb->pitches[0] + x * pixel_size;
-       sprsurf_offset =
-               intel_gen4_compute_page_offset(dev_priv,
-                                              &x, &y, obj->tiling_mode,
-                                              pixel_size, fb->pitches[0]);
+       linear_offset = y * fb->pitches[0] + x * cpp;
+       sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+                                                  fb->modifier[0], cpp,
+                                                  fb->pitches[0]);
        linear_offset -= sprsurf_offset;
 
-       if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+       if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
                sprctl |= SPRITE_ROTATE_180;
 
                /* HSW and BDW does this automagically in hardware */
                if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
                        x += src_w;
                        y += src_h;
-                       linear_offset += src_h * fb->pitches[0] +
-                               src_w * pixel_size;
+                       linear_offset += src_h * fb->pitches[0] + src_w * cpp;
                }
        }
 
@@ -612,23 +622,28 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
 }
 
 static void
-ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
-                struct drm_framebuffer *fb,
-                int crtc_x, int crtc_y,
-                unsigned int crtc_w, unsigned int crtc_h,
-                uint32_t x, uint32_t y,
-                uint32_t src_w, uint32_t src_h)
+ilk_update_plane(struct drm_plane *plane,
+                const struct intel_crtc_state *crtc_state,
+                const struct intel_plane_state *plane_state)
 {
        struct drm_device *dev = plane->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_plane *intel_plane = to_intel_plane(plane);
+       struct drm_framebuffer *fb = plane_state->base.fb;
        struct drm_i915_gem_object *obj = intel_fb_obj(fb);
        int pipe = intel_plane->pipe;
-       unsigned long dvssurf_offset, linear_offset;
        u32 dvscntr, dvsscale;
-       int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-       const struct drm_intel_sprite_colorkey *key =
-               &to_intel_plane_state(plane->state)->ckey;
+       u32 dvssurf_offset, linear_offset;
+       int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
+       const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+       int crtc_x = plane_state->dst.x1;
+       int crtc_y = plane_state->dst.y1;
+       uint32_t crtc_w = drm_rect_width(&plane_state->dst);
+       uint32_t crtc_h = drm_rect_height(&plane_state->dst);
+       uint32_t x = plane_state->src.x1 >> 16;
+       uint32_t y = plane_state->src.y1 >> 16;
+       uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
+       uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
 
        dvscntr = DVS_ENABLE;
 
@@ -677,19 +692,18 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        if (crtc_w != src_w || crtc_h != src_h)
                dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
 
-       linear_offset = y * fb->pitches[0] + x * pixel_size;
-       dvssurf_offset =
-               intel_gen4_compute_page_offset(dev_priv,
-                                              &x, &y, obj->tiling_mode,
-                                              pixel_size, fb->pitches[0]);
+       linear_offset = y * fb->pitches[0] + x * cpp;
+       dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y,
+                                                  fb->modifier[0], cpp,
+                                                  fb->pitches[0]);
        linear_offset -= dvssurf_offset;
 
-       if (plane->state->rotation == BIT(DRM_ROTATE_180)) {
+       if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
                dvscntr |= DVS_ROTATE_180;
 
                x += src_w;
                y += src_h;
-               linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
+               linear_offset += src_h * fb->pitches[0] + src_w * cpp;
        }
 
        if (key->flags) {
@@ -754,7 +768,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
        int hscale, vscale;
        int max_scale, min_scale;
        bool can_scale;
-       int pixel_size;
 
        if (!fb) {
                state->visible = false;
@@ -876,6 +889,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
        /* Check size restrictions when scaling */
        if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
                unsigned int width_bytes;
+               int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
 
                WARN_ON(!can_scale);
 
@@ -887,9 +901,7 @@ intel_check_sprite_plane(struct drm_plane *plane,
                if (src_w < 3 || src_h < 3)
                        state->visible = false;
 
-               pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
-               width_bytes = ((src_x * pixel_size) & 63) +
-                                       src_w * pixel_size;
+               width_bytes = ((src_x * cpp) & 63) + src_w * cpp;
 
                if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
                    width_bytes > 4096 || fb->pitches[0] > 4096)) {
@@ -913,30 +925,6 @@ intel_check_sprite_plane(struct drm_plane *plane,
        return 0;
 }
 
-static void
-intel_commit_sprite_plane(struct drm_plane *plane,
-                         struct intel_plane_state *state)
-{
-       struct drm_crtc *crtc = state->base.crtc;
-       struct intel_plane *intel_plane = to_intel_plane(plane);
-       struct drm_framebuffer *fb = state->base.fb;
-
-       crtc = crtc ? crtc : plane->crtc;
-
-       if (state->visible) {
-               intel_plane->update_plane(plane, crtc, fb,
-                                         state->dst.x1, state->dst.y1,
-                                         drm_rect_width(&state->dst),
-                                         drm_rect_height(&state->dst),
-                                         state->src.x1 >> 16,
-                                         state->src.y1 >> 16,
-                                         drm_rect_width(&state->src) >> 16,
-                                         drm_rect_height(&state->src) >> 16);
-       } else {
-               intel_plane->disable_plane(plane, crtc);
-       }
-}
-
 int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
                              struct drm_file *file_priv)
 {
@@ -1118,7 +1106,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
        intel_plane->plane = plane;
        intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
        intel_plane->check_plane = intel_check_sprite_plane;
-       intel_plane->commit_plane = intel_commit_sprite_plane;
        possible_crtcs = (1 << pipe);
        ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
                                       &intel_plane_funcs,
index 948cbff6c62e164eddc1b965219f4da921705145..5034b0055169512ca816c507e69fa1ae7dd74a79 100644 (file)
@@ -1420,6 +1420,7 @@ intel_tv_get_modes(struct drm_connector *connector)
                if (!mode_ptr)
                        continue;
                strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
+               mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0';
 
                mode_ptr->hdisplay = hactive_s;
                mode_ptr->hsync_start = hactive_s + 1;
index 277e60ae0e474a2f665f59b9c4aabb35a74b1d96..436d8f2b86823d30bd7f33bc2247d7d905a787ab 100644 (file)
@@ -327,13 +327,54 @@ static void intel_uncore_ellc_detect(struct drm_device *dev)
        }
 }
 
+static bool
+fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+       u32 dbg;
+
+       dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
+       if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
+               return false;
+
+       __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+
+       return true;
+}
+
+static bool
+vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+       u32 cer;
+
+       cer = __raw_i915_read32(dev_priv, CLAIM_ER);
+       if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
+               return false;
+
+       __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
+
+       return true;
+}
+
+static bool
+check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+{
+       if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
+               return fpga_check_for_unclaimed_mmio(dev_priv);
+
+       if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               return vlv_check_for_unclaimed_mmio(dev_priv);
+
+       return false;
+}
+
 static void __intel_uncore_early_sanitize(struct drm_device *dev,
                                          bool restore_forcewake)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (HAS_FPGA_DBG_UNCLAIMED(dev))
-               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       /* clear out unclaimed reg detection bit */
+       if (check_for_unclaimed_mmio(dev_priv))
+               DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
 
        /* clear out old GT FIFO errors */
        if (IS_GEN6(dev) || IS_GEN7(dev))
@@ -359,6 +400,8 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
 
 void intel_uncore_sanitize(struct drm_device *dev)
 {
+       i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
+
        /* BIOS often leaves RC6 enabled, but disable it for hw init */
        intel_disable_gt_powersave(dev);
 }
@@ -585,38 +628,38 @@ ilk_dummy_write(struct drm_i915_private *dev_priv)
 }
 
 static void
-hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv,
-                       i915_reg_t reg, bool read, bool before)
+__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+                     const i915_reg_t reg,
+                     const bool read,
+                     const bool before)
 {
-       const char *op = read ? "reading" : "writing to";
-       const char *when = before ? "before" : "after";
-
-       if (!i915.mmio_debug)
+       /* XXX. We limit the auto arming traces for mmio
+        * debugs on these platforms. There are just too many
+        * revealed by these and CI/Bat suffers from the noise.
+        * Please fix and then re-enable the automatic traces.
+        */
+       if (i915.mmio_debug < 2 &&
+           (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
                return;
 
-       if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
-               WARN(1, "Unclaimed register detected %s %s register 0x%x\n",
-                    when, op, i915_mmio_reg_offset(reg));
-               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       if (WARN(check_for_unclaimed_mmio(dev_priv),
+                "Unclaimed register detected %s %s register 0x%x\n",
+                before ? "before" : "after",
+                read ? "reading" : "writing to",
+                i915_mmio_reg_offset(reg)))
                i915.mmio_debug--; /* Only report the first N failures */
-       }
 }
 
-static void
-hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv)
+static inline void
+unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+                   const i915_reg_t reg,
+                   const bool read,
+                   const bool before)
 {
-       static bool mmio_debug_once = true;
-
-       if (i915.mmio_debug || !mmio_debug_once)
+       if (likely(!i915.mmio_debug))
                return;
 
-       if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
-               DRM_DEBUG("Unclaimed register detected, "
-                         "enabling oneshot unclaimed register reporting. "
-                         "Please use i915.mmio_debug=N for more information.\n");
-               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-               i915.mmio_debug = mmio_debug_once--;
-       }
+       __unclaimed_reg_debug(dev_priv, reg, read, before);
 }
 
 #define GEN2_READ_HEADER(x) \
@@ -664,9 +707,11 @@ __gen2_read(64)
        unsigned long irqflags; \
        u##x val = 0; \
        assert_rpm_wakelock_held(dev_priv); \
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+       unclaimed_reg_debug(dev_priv, reg, true, true)
 
 #define GEN6_READ_FOOTER \
+       unclaimed_reg_debug(dev_priv, reg, true, false); \
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
        trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
        return val
@@ -699,11 +744,9 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv,
 static u##x \
 gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
        GEN6_READ_HEADER(x); \
-       hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
        if (NEEDS_FORCE_WAKE(offset)) \
                __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
        val = __raw_i915_read##x(dev_priv, reg); \
-       hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
        GEN6_READ_FOOTER; \
 }
 
@@ -751,7 +794,6 @@ static u##x \
 gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
        enum forcewake_domains fw_engine; \
        GEN6_READ_HEADER(x); \
-       hsw_unclaimed_reg_debug(dev_priv, reg, true, true); \
        if (!SKL_NEEDS_FORCE_WAKE(offset)) \
                fw_engine = 0; \
        else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \
@@ -765,7 +807,6 @@ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
        if (fw_engine) \
                __force_wake_get(dev_priv, fw_engine); \
        val = __raw_i915_read##x(dev_priv, reg); \
-       hsw_unclaimed_reg_debug(dev_priv, reg, true, false); \
        GEN6_READ_FOOTER; \
 }
 
@@ -864,9 +905,11 @@ __gen2_write(64)
        unsigned long irqflags; \
        trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
        assert_rpm_wakelock_held(dev_priv); \
-       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
+       spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+       unclaimed_reg_debug(dev_priv, reg, false, true)
 
 #define GEN6_WRITE_FOOTER \
+       unclaimed_reg_debug(dev_priv, reg, false, false); \
        spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
 
 #define __gen6_write(x) \
@@ -892,13 +935,10 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t
        if (NEEDS_FORCE_WAKE(offset)) { \
                __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
        } \
-       hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
        __raw_i915_write##x(dev_priv, reg, val); \
        if (unlikely(__fifo_ret)) { \
                gen6_gt_check_fifodbg(dev_priv); \
        } \
-       hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
-       hsw_unclaimed_reg_detect(dev_priv); \
        GEN6_WRITE_FOOTER; \
 }
 
@@ -928,12 +968,9 @@ static bool is_gen8_shadowed(struct drm_i915_private *dev_priv,
 static void \
 gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
        GEN6_WRITE_HEADER; \
-       hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
        if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \
                __force_wake_get(dev_priv, FORCEWAKE_RENDER); \
        __raw_i915_write##x(dev_priv, reg, val); \
-       hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
-       hsw_unclaimed_reg_detect(dev_priv); \
        GEN6_WRITE_FOOTER; \
 }
 
@@ -987,7 +1024,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
                bool trace) { \
        enum forcewake_domains fw_engine; \
        GEN6_WRITE_HEADER; \
-       hsw_unclaimed_reg_debug(dev_priv, reg, false, true); \
        if (!SKL_NEEDS_FORCE_WAKE(offset) || \
            is_gen9_shadowed(dev_priv, reg)) \
                fw_engine = 0; \
@@ -1002,8 +1038,6 @@ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \
        if (fw_engine) \
                __force_wake_get(dev_priv, fw_engine); \
        __raw_i915_write##x(dev_priv, reg, val); \
-       hsw_unclaimed_reg_debug(dev_priv, reg, false, false); \
-       hsw_unclaimed_reg_detect(dev_priv); \
        GEN6_WRITE_FOOTER; \
 }
 
@@ -1223,6 +1257,8 @@ void intel_uncore_init(struct drm_device *dev)
        intel_uncore_fw_domains_init(dev);
        __intel_uncore_early_sanitize(dev, false);
 
+       dev_priv->uncore.unclaimed_mmio_check = 1;
+
        switch (INTEL_INFO(dev)->gen) {
        default:
        case 9:
@@ -1580,13 +1616,26 @@ bool intel_has_gpu_reset(struct drm_device *dev)
        return intel_get_gpu_reset(dev) != NULL;
 }
 
-void intel_uncore_check_errors(struct drm_device *dev)
+bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       return check_for_unclaimed_mmio(dev_priv);
+}
+
+bool
+intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
+{
+       if (unlikely(i915.mmio_debug ||
+                    dev_priv->uncore.unclaimed_mmio_check <= 0))
+               return false;
 
-       if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
-           (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
-               DRM_ERROR("Unclaimed register before interrupt\n");
-               __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+       if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
+               DRM_DEBUG("Unclaimed register detected, "
+                         "enabling oneshot unclaimed register reporting. "
+                         "Please use i915.mmio_debug=N for more information.\n");
+               i915.mmio_debug++;
+               dev_priv->uncore.unclaimed_mmio_check--;
+               return true;
        }
+
+       return false;
 }
index 2f57d79674175c85c8a5ac1870ac57de3a1b0763..7c4d1250e0710cfd67a6ea65f1ce56662bb30987 100644 (file)
@@ -171,18 +171,6 @@ static void imx_drm_disable_vblank(struct drm_device *drm, unsigned int pipe)
        imx_drm_crtc->imx_drm_helper_funcs.disable_vblank(imx_drm_crtc->crtc);
 }
 
-static void imx_drm_driver_preclose(struct drm_device *drm,
-               struct drm_file *file)
-{
-       int i;
-
-       if (!file->is_master)
-               return;
-
-       for (i = 0; i < MAX_CRTC; i++)
-               imx_drm_disable_vblank(drm, i);
-}
-
 static const struct file_operations imx_drm_driver_fops = {
        .owner = THIS_MODULE,
        .open = drm_open,
@@ -463,7 +451,6 @@ static struct drm_driver imx_drm_driver = {
        .load                   = imx_drm_driver_load,
        .unload                 = imx_drm_driver_unload,
        .lastclose              = imx_drm_driver_lastclose,
-       .preclose               = imx_drm_driver_preclose,
        .set_busid              = drm_platform_set_busid,
        .gem_free_object        = drm_gem_cma_free_object,
        .gem_vm_ops             = &drm_gem_cma_vm_ops,
index 30a57185bdb4e41e7ff26a0d4af36577a218b0ea..846b5f558897fb2927731b437e9dabf4e49e6970 100644 (file)
@@ -285,10 +285,6 @@ static int ipu_enable_vblank(struct drm_crtc *crtc)
 
 static void ipu_disable_vblank(struct drm_crtc *crtc)
 {
-       struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
-
-       ipu_crtc->page_flip_event = NULL;
-       ipu_crtc->newfb = NULL;
 }
 
 static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
index a3b54cc7649531c987ed9bd18ba6d2b7087bbe6f..0e1d0c57cd3d10e7a745e67df24adb0a09a3f2d5 100644 (file)
@@ -41,6 +41,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
                                (adreno_gpu->rev.major << 16) |
                                (adreno_gpu->rev.core << 24);
                return 0;
+       case MSM_PARAM_MAX_FREQ:
+               *value = adreno_gpu->base.fast_rate;
+               return 0;
        default:
                DBG("%s: invalid param: %u", gpu->name, param);
                return -EINVAL;
index 28df397c3b040a9d6575ad541ef36adb1703468c..909d74250de7c574a19457df0c98389713f31246 100644 (file)
@@ -575,13 +575,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
        return mdp4_crtc->vblank.irqmask;
 }
 
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-       struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       DBG("%s: cancel: %p", mdp4_crtc->name, file);
-       complete_flip(crtc, file);
-}
-
 /* set dma config, ie. the format the encoder wants. */
 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
 {
index 5a8e3d6bcbffb319dcca5cbf8dd7e15fb326718e..1c8e330f8d989ec5918b13da6508cb188c43429e 100644 (file)
@@ -179,16 +179,6 @@ static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
        }
 }
 
-static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
-{
-       struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
-       struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
-       unsigned i;
-
-       for (i = 0; i < priv->num_crtcs; i++)
-               mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
-}
-
 static void mdp4_destroy(struct msm_kms *kms)
 {
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
@@ -213,7 +203,6 @@ static const struct mdp_kms_funcs kms_funcs = {
                .wait_for_crtc_commit_done = mdp4_wait_for_crtc_commit_done,
                .get_format      = mdp_get_format,
                .round_pixclk    = mdp4_round_pixclk,
-               .preclose        = mdp4_preclose,
                .destroy         = mdp4_destroy,
        },
        .set_irqmask         = mdp4_set_irqmask,
index d2c96ef431f4019a5135fd2966587fe51f5baf5e..9ec53b464662d969d869b86878c5bf12850c3f7e 100644 (file)
@@ -199,7 +199,6 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
                enum mdp4_pipe pipe_id, bool private_plane);
 
 uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
-void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
 void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
 void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc);
index 20cee5ce4071e4a0a4d2d101b3d70965f8cd6b13..46682aa8870c1368986c959cda280362be4bfd8a 100644 (file)
@@ -721,12 +721,6 @@ uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
        return mdp5_crtc->vblank.irqmask;
 }
 
-void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-       DBG("cancel: %p", file);
-       complete_flip(crtc, file);
-}
-
 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
                struct mdp5_interface *intf, struct mdp5_ctl *ctl)
 {
index e115318402bd877a3f84c0cba4f77ba82657b7a2..5e4d16b399c7b1cafd99bcc26ee531953d9d8456 100644 (file)
@@ -117,16 +117,6 @@ static int mdp5_set_split_display(struct msm_kms *kms,
                return mdp5_encoder_set_split_display(encoder, slave_encoder);
 }
 
-static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
-{
-       struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
-       struct msm_drm_private *priv = mdp5_kms->dev->dev_private;
-       unsigned i;
-
-       for (i = 0; i < priv->num_crtcs; i++)
-               mdp5_crtc_cancel_pending_flip(priv->crtcs[i], file);
-}
-
 static void mdp5_destroy(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -164,7 +154,6 @@ static const struct mdp_kms_funcs kms_funcs = {
                .get_format      = mdp_get_format,
                .round_pixclk    = mdp5_round_pixclk,
                .set_split_display = mdp5_set_split_display,
-               .preclose        = mdp5_preclose,
                .destroy         = mdp5_destroy,
        },
        .set_irqmask         = mdp5_set_irqmask,
index 00730ba08a60ac5754176357880ca4f3c0be0c54..9a25898239d3d06d62519f1d990f10be081da559 100644 (file)
@@ -211,7 +211,6 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
 
 int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
 void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
                struct mdp5_interface *intf, struct mdp5_ctl *ctl);
 void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
index d95af6eba6020ff0f7368c4a211cc21dcdd21eff..e119c2979509744adc61998cf4ec4ccbbe60c14b 100644 (file)
@@ -65,9 +65,6 @@ static int msm_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
        struct drm_device *dev = helper->dev;
        int ret = 0;
 
-       if (drm_device_is_unplugged(dev))
-               return -ENODEV;
-
        ret = drm_gem_mmap_obj(drm_obj, drm_obj->size, vma);
        if (ret) {
                pr_err("%s:drm_gem_mmap_obj fail\n", __func__);
index d5e6938cc6bc06e9e2903a81da002289ae9e5688..cdf522770cfaf508486d928c62ad0bea6cd4453c 100644 (file)
@@ -314,7 +314,7 @@ void nouveau_register_dsm_handler(void)
        if (!r)
                return;
 
-       vga_switcheroo_register_handler(&nouveau_dsm_handler);
+       vga_switcheroo_register_handler(&nouveau_dsm_handler, 0);
 }
 
 /* Must be called for Optimus models before the card can be turned off */
index fcebfae5d42680802b3dea19d63915cb92a157be..ae96ebc490fb2b80cbed26bacca113325c52fc82 100644 (file)
@@ -27,6 +27,7 @@
 #include <acpi/button.h>
 
 #include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
 
 #include <drm/drmP.h>
 #include <drm/drm_edid.h>
@@ -153,6 +154,17 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
                        if (ret == 0)
                                break;
                } else
+               if ((vga_switcheroo_handler_flags() &
+                    VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
+                   nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
+                   nv_encoder->i2c) {
+                       int ret;
+                       vga_switcheroo_lock_ddc(dev->pdev);
+                       ret = nvkm_probe_i2c(nv_encoder->i2c, 0x50);
+                       vga_switcheroo_unlock_ddc(dev->pdev);
+                       if (ret)
+                               break;
+               } else
                if (nv_encoder->i2c) {
                        if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
                                break;
@@ -265,7 +277,14 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
 
        nv_encoder = nouveau_connector_ddc_detect(connector);
        if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
-               nv_connector->edid = drm_get_edid(connector, i2c);
+               if ((vga_switcheroo_handler_flags() &
+                    VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
+                   nv_connector->type == DCB_CONNECTOR_LVDS)
+                       nv_connector->edid = drm_get_edid_switcheroo(connector,
+                                                                    i2c);
+               else
+                       nv_connector->edid = drm_get_edid(connector, i2c);
+
                drm_mode_connector_update_edid_property(connector,
                                                        nv_connector->edid);
                if (!nv_connector->edid) {
index 2f2f252e3fb68e3528467049dc32df51aa45b07b..bb8498c9b13ed2873b757c7bb913975ac39a7dfe 100644 (file)
  * Authors: Ben Skeggs
  */
 
+#include <linux/apple-gmux.h>
 #include <linux/console.h>
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 
 #include "drmP.h"
@@ -312,6 +314,15 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
        bool boot = false;
        int ret;
 
+       /*
+        * apple-gmux is needed on dual GPU MacBook Pro
+        * to probe the panel if we're the inactive GPU.
+        */
+       if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
+           apple_gmux_present() && pdev != vga_default_device() &&
+           !vga_switcheroo_handler_flags())
+               return -EPROBE_DEFER;
+
        /* remove conflicting drivers (vesafb, efifb etc) */
        aper = alloc_apertures(3);
        if (!aper)
index 2ed0754ed19edc586c2a70c1bdc01aecb8593aa1..d38fcbcc43a82383a4b4e5fdd4405384f140f0e6 100644 (file)
@@ -269,18 +269,7 @@ static void omap_crtc_complete_page_flip(struct drm_crtc *crtc)
                return;
 
        spin_lock_irqsave(&dev->event_lock, flags);
-
-       list_del(&event->base.link);
-
-       /*
-        * Queue the event for delivery if it's still linked to a file
-        * handle, otherwise just destroy it.
-        */
-       if (event->base.file_priv)
-               drm_crtc_send_vblank_event(crtc, event);
-       else
-               event->base.destroy(&event->base);
-
+       drm_crtc_send_vblank_event(crtc, event);
        spin_unlock_irqrestore(&dev->event_lock, flags);
 }
 
index dfafdb602ad2a058cde0cf416bf32f4b53ccd7bf..33370f42e4d7a0c85cd03684e93c49fd5ad5b6b0 100644 (file)
@@ -142,7 +142,6 @@ static int omap_atomic_commit(struct drm_device *dev,
 {
        struct omap_drm_private *priv = dev->dev_private;
        struct omap_atomic_state_commit *commit;
-       unsigned long flags;
        unsigned int i;
        int ret;
 
@@ -175,17 +174,6 @@ static int omap_atomic_commit(struct drm_device *dev,
        priv->commit.pending |= commit->crtcs;
        spin_unlock(&priv->commit.lock);
 
-       /* Keep track of all CRTC events to unlink them in preclose(). */
-       spin_lock_irqsave(&dev->event_lock, flags);
-       for (i = 0; i < dev->mode_config.num_crtc; ++i) {
-               struct drm_crtc_state *cstate = state->crtc_states[i];
-
-               if (cstate && cstate->event)
-                       list_add_tail(&cstate->event->base.link,
-                                     &priv->commit.events);
-       }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-
        /* Swap the state, this is the point of no return. */
        drm_atomic_helper_swap_state(dev, state);
 
@@ -673,7 +661,6 @@ static int dev_load(struct drm_device *dev, unsigned long flags)
        priv->wq = alloc_ordered_workqueue("omapdrm", 0);
        init_waitqueue_head(&priv->commit.wait);
        spin_lock_init(&priv->commit.lock);
-       INIT_LIST_HEAD(&priv->commit.events);
 
        spin_lock_init(&priv->list_lock);
        INIT_LIST_HEAD(&priv->obj_list);
@@ -787,33 +774,6 @@ static void dev_lastclose(struct drm_device *dev)
        }
 }
 
-static void dev_preclose(struct drm_device *dev, struct drm_file *file)
-{
-       struct omap_drm_private *priv = dev->dev_private;
-       struct drm_pending_event *event;
-       unsigned long flags;
-
-       DBG("preclose: dev=%p", dev);
-
-       /*
-        * Unlink all pending CRTC events to make sure they won't be queued up
-        * by a pending asynchronous commit.
-        */
-       spin_lock_irqsave(&dev->event_lock, flags);
-       list_for_each_entry(event, &priv->commit.events, link) {
-               if (event->file_priv == file) {
-                       file->event_space += event->event->length;
-                       event->file_priv = NULL;
-               }
-       }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
-static void dev_postclose(struct drm_device *dev, struct drm_file *file)
-{
-       DBG("postclose: dev=%p, file=%p", dev, file);
-}
-
 static const struct vm_operations_struct omap_gem_vm_ops = {
        .fault = omap_gem_fault,
        .open = drm_gem_vm_open,
@@ -838,8 +798,6 @@ static struct drm_driver omap_drm_driver = {
        .unload = dev_unload,
        .open = dev_open,
        .lastclose = dev_lastclose,
-       .preclose = dev_preclose,
-       .postclose = dev_postclose,
        .set_busid = drm_platform_set_busid,
        .get_vblank_counter = drm_vblank_no_hw_counter,
        .enable_vblank = omap_irq_enable_vblank,
index 9e0030731c37c3a8e8f36b3ba2c79caf56e1d973..c23cbe6fe9e4478dd7704d9c1a711be5a49f17d0 100644 (file)
@@ -106,7 +106,6 @@ struct omap_drm_private {
 
        /* atomic commit */
        struct {
-               struct list_head events;
                wait_queue_head_t wait;
                u32 pending;
                spinlock_t lock;        /* Protects commit.pending */
index 27c297672076b058128e67e452b7ee1a47e7eb59..aebae1c2dab2ac8dac11222f31e12847b1273e20 100644 (file)
@@ -79,7 +79,7 @@ static void omap_gem_dmabuf_release(struct dma_buf *buffer)
 
 
 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
-               size_t start, size_t len, enum dma_data_direction dir)
+               enum dma_data_direction dir)
 {
        struct drm_gem_object *obj = buffer->priv;
        struct page **pages;
@@ -94,7 +94,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
 }
 
 static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
-               size_t start, size_t len, enum dma_data_direction dir)
+               enum dma_data_direction dir)
 {
        struct drm_gem_object *obj = buffer->priv;
        omap_gem_put_pages(obj);
index 6bfc46369db1b4f8daa02fba453a382e863a3585..367a916f364e94617a1b15fcd7d78cd3f90c8dc6 100644 (file)
@@ -304,18 +304,10 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
                unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
                        DENTIST_DPREFCLK_WDIVIDER_MASK) >>
                        DENTIST_DPREFCLK_WDIVIDER_SHIFT;
-
-               if (div < 128 && div >= 96)
-                       div -= 64;
-               else if (div >= 64)
-                       div = div / 2 - 16;
-               else if (div >= 8)
-                       div /= 4;
-               else
-                       div = 0;
+               div = radeon_audio_decode_dfs_div(div);
 
                if (div)
-                       clock = rdev->clock.gpupll_outputfreq * 10 / div;
+                       clock = clock * 100 / div;
 
                WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
                WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
index 9953356fe2637cfacdc2ba41e8ecd082d65213ff..3cf04a2f44bbb05169264a504349dec3d949e5bb 100644 (file)
@@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
         * number (coefficient of two integer numbers.  DCCG_AUDIO_DTOx_PHASE
         * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
         */
+       if (ASIC_IS_DCE41(rdev)) {
+               unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
+                       DENTIST_DPREFCLK_WDIVIDER_MASK) >>
+                       DENTIST_DPREFCLK_WDIVIDER_SHIFT;
+               div = radeon_audio_decode_dfs_div(div);
+
+               if (div)
+                       clock = 100 * clock / div;
+       }
+
        WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
        WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
 }
index 4aa5f755572b1593a8b6f7876cf5f7aed183715d..13b6029d65cc524528aec6a1f1727b9d695aa4af 100644 (file)
 #define DCCG_AUDIO_DTO1_CNTL              0x05cc
 #       define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
 
+#define DCE41_DENTIST_DISPCLK_CNTL                     0x049c
+#       define DENTIST_DPREFCLK_WDIVIDER(x)            (((x) & 0x7f) << 24)
+#       define DENTIST_DPREFCLK_WDIVIDER_MASK          (0x7f << 24)
+#       define DENTIST_DPREFCLK_WDIVIDER_SHIFT         24
+
 /* DCE 4.0 AFMT */
 #define HDMI_CONTROL                         0x7030
 #       define HDMI_KEEPOUT_MODE             (1 << 0)
index 5ae6db98aa4d1eae63990bbd5d8cff1b82dfc6e3..78a51b3eda10c4d2931c3c783221ad0625d9a9c9 100644 (file)
@@ -268,7 +268,7 @@ struct radeon_clock {
        uint32_t current_dispclk;
        uint32_t dp_extclk;
        uint32_t max_pixel_clock;
-       uint32_t gpupll_outputfreq;
+       uint32_t vco_freq;
 };
 
 /*
index 08fc1b5effa8a5ec21d4f6234bc168c5b2508b36..de9a2ffcf5f762539d6d4c92464ed5eff19e1c42 100644 (file)
@@ -1106,6 +1106,31 @@ union firmware_info {
        ATOM_FIRMWARE_INFO_V2_2 info_22;
 };
 
+union igp_info {
+       struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
+{
+       struct radeon_mode_info *mode_info = &rdev->mode_info;
+       int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+       union igp_info *igp_info;
+       u8 frev, crev;
+       u16 data_offset;
+
+       if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+                       &frev, &crev, &data_offset)) {
+               igp_info = (union igp_info *)(mode_info->atom_context->bios +
+                       data_offset);
+               rdev->clock.vco_freq =
+                       le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
+       }
+}
+
 bool radeon_atom_get_clock_info(struct drm_device *dev)
 {
        struct radeon_device *rdev = dev->dev_private;
@@ -1257,12 +1282,18 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                rdev->mode_info.firmware_flags =
                        le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
 
-               if (ASIC_IS_DCE8(rdev)) {
-                       rdev->clock.gpupll_outputfreq =
+               if (ASIC_IS_DCE8(rdev))
+                       rdev->clock.vco_freq =
                                le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
-                       if (rdev->clock.gpupll_outputfreq == 0)
-                               rdev->clock.gpupll_outputfreq = 360000; /* 3.6 GHz */
-               }
+               else if (ASIC_IS_DCE5(rdev))
+                       rdev->clock.vco_freq = rdev->clock.current_dispclk;
+               else if (ASIC_IS_DCE41(rdev))
+                       radeon_atombios_get_dentist_vco_freq(rdev);
+               else
+                       rdev->clock.vco_freq = rdev->clock.current_dispclk;
+
+               if (rdev->clock.vco_freq == 0)
+                       rdev->clock.vco_freq = 360000;  /* 3.6 GHz */
 
                return true;
        }
@@ -1270,14 +1301,6 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
        return false;
 }
 
-union igp_info {
-       struct _ATOM_INTEGRATED_SYSTEM_INFO info;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
-       struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
-};
-
 bool radeon_atombios_sideport_present(struct radeon_device *rdev)
 {
        struct radeon_mode_info *mode_info = &rdev->mode_info;
index c4b4f298a2831a2ca3723bebe3ef8834bc6a6f3c..56482e35d43e334d7b2a8bc170a241f4a5643d96 100644 (file)
@@ -551,13 +551,14 @@ static bool radeon_atpx_detect(void)
 void radeon_register_atpx_handler(void)
 {
        bool r;
+       enum vga_switcheroo_handler_flags_t handler_flags = 0;
 
        /* detect if we have any ATPX + 2 VGA in the system */
        r = radeon_atpx_detect();
        if (!r)
                return;
 
-       vga_switcheroo_register_handler(&radeon_atpx_handler);
+       vga_switcheroo_register_handler(&radeon_atpx_handler, handler_flags);
 }
 
 /**
index 2c02e99b5f95a65a26357f26d67e91a45f8224a4..b214663b370da00905faa882ea27513153aa91bd 100644 (file)
@@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
        struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
        struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
-       struct radeon_connector *radeon_connector = to_radeon_connector(connector);
-       struct radeon_connector_atom_dig *dig_connector =
-               radeon_connector->con_priv;
 
        if (!dig || !dig->afmt)
                return;
@@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
                radeon_audio_write_speaker_allocation(encoder);
                radeon_audio_write_sad_regs(encoder);
                radeon_audio_write_latency_fields(encoder, mode);
-               if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
-                       radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
-               else
-                       radeon_audio_set_dto(encoder, dig_connector->dp_clock);
+               radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
                radeon_audio_set_audio_packet(encoder);
                radeon_audio_select_pin(encoder);
 
@@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
        if (radeon_encoder->audio && radeon_encoder->audio->dpms)
                radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
 }
+
+unsigned int radeon_audio_decode_dfs_div(unsigned int div)
+{
+       if (div >= 8 && div < 64)
+               return (div - 8) * 25 + 200;
+       else if (div >= 64 && div < 96)
+               return (div - 64) * 50 + 1600;
+       else if (div >= 96 && div < 128)
+               return (div - 96) * 100 + 3200;
+       else
+               return 0;
+}
index 059cc3012062a7b50708620c557771dcfea821c0..5c70cceaa4a6c35e673ff9c50306e36e7f93ff3d 100644 (file)
@@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
 void radeon_audio_mode_set(struct drm_encoder *encoder,
        struct drm_display_mode *mode);
 void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
+unsigned int radeon_audio_decode_dfs_div(unsigned int div);
 
 #endif
index a9b01bcf7d0a2242cf181ef31a77cf9150e6e8f8..432480ff9d228857d57170b3353c143bf0501c3f 100644 (file)
@@ -34,7 +34,6 @@
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #endif /* CONFIG_PPC_PMAC */
 
 /* from radeon_legacy_encoder.c */
index 340f3f549f295314788fbf9b3052880b8095a7f1..cfcc099c537d0475da7eb6d6ce4576cc7a116b98 100644 (file)
@@ -34,6 +34,7 @@
 #include "atom.h"
 
 #include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
 
 static int radeon_dp_handle_hpd(struct drm_connector *connector)
 {
@@ -344,6 +345,11 @@ static void radeon_connector_get_edid(struct drm_connector *connector)
                else if (radeon_connector->ddc_bus)
                        radeon_connector->edid = drm_get_edid(&radeon_connector->base,
                                                              &radeon_connector->ddc_bus->adapter);
+       } else if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC &&
+                  connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+                  radeon_connector->ddc_bus) {
+               radeon_connector->edid = drm_get_edid_switcheroo(&radeon_connector->base,
+                                                                &radeon_connector->ddc_bus->adapter);
        } else if (radeon_connector->ddc_bus) {
                radeon_connector->edid = drm_get_edid(&radeon_connector->base,
                                                      &radeon_connector->ddc_bus->adapter);
index b3bb92368ae0e98372e55caab123fb720ecdc48d..298ea1c453c3638fecdf6cef5afce1a48fdefa82 100644 (file)
@@ -1670,8 +1670,10 @@ int radeon_modeset_init(struct radeon_device *rdev)
        /* setup afmt */
        radeon_afmt_init(rdev);
 
-       radeon_fbdev_init(rdev);
-       drm_kms_helper_poll_init(rdev->ddev);
+       if (!list_empty(&rdev->ddev->mode_config.connector_list)) {
+               radeon_fbdev_init(rdev);
+               drm_kms_helper_poll_init(rdev->ddev);
+       }
 
        /* do pm late init */
        ret = radeon_pm_late_init(rdev);
index e266ffc520d264178cd9bb7569c35b3626235396..cad25557650f054746b957bc04ea8a69f3a83e01 100644 (file)
 #include "radeon_drv.h"
 
 #include <drm/drm_pciids.h>
+#include <linux/apple-gmux.h>
 #include <linux/console.h>
 #include <linux/module.h>
 #include <linux/pm_runtime.h>
+#include <linux/vgaarb.h>
 #include <linux/vga_switcheroo.h>
 #include <drm/drm_gem.h>
 
@@ -319,6 +321,15 @@ static int radeon_pci_probe(struct pci_dev *pdev,
 {
        int ret;
 
+       /*
+        * apple-gmux is needed on dual GPU MacBook Pro
+        * to probe the panel if we're the inactive GPU.
+        */
+       if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
+           apple_gmux_present() && pdev != vga_default_device() &&
+           !vga_switcheroo_handler_flags())
+               return -EPROBE_DEFER;
+
        /* Get rid of things like offb */
        ret = radeon_kick_out_firmware_fb(pdev);
        if (ret)
index 3dcc5733ff6915b2e2497ca3d4ff800455f49c20..e26c963f2e9304e58f25517114e773b3c85242bd 100644 (file)
@@ -663,6 +663,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
        bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
        if (!bo_va) {
                args->operation = RADEON_VA_RESULT_ERROR;
+               radeon_bo_unreserve(rbo);
                drm_gem_object_unreference_unlocked(gobj);
                return -ENOENT;
        }
index 84d45633d28c9b1c58f58dc6ec6383eba2207230..fb6ad143873f5b40d2c027a20dd25914dbd5e29e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/slab.h>
 #include <drm/drmP.h>
 #include <drm/radeon_drm.h>
+#include <drm/drm_cache.h>
 #include "radeon.h"
 #include "radeon_trace.h"
 
@@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
                DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
                              "better performance thanks to write-combining\n");
        bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#else
+       /* For architectures that don't support WC memory,
+        * mask out the WC flag from the BO
+        */
+       if (!drm_arch_can_wc_memory())
+               bo->flags &= ~RADEON_GEM_GTT_WC;
 #endif
 
        radeon_ttm_placement_from_domain(bo, domain);
index 07a0d378e122b1b206b8aceb1a97234254d00e74..a01efe39a8206ec1b1d1fd26a3192c5d72abb885 100644 (file)
@@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
                return -EINVAL;
        }
 
-       for (i = 0; i < sign->num; ++i) {
-               if (sign->val[i].chip_id == chip_id)
+       for (i = 0; i < le32_to_cpu(sign->num); ++i) {
+               if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
                        break;
        }
 
-       if (i == sign->num)
+       if (i == le32_to_cpu(sign->num))
                return -EINVAL;
 
        data += (256 - 64) / 4;
@@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
        data[1] = sign->val[i].nonce[1];
        data[2] = sign->val[i].nonce[2];
        data[3] = sign->val[i].nonce[3];
-       data[4] = sign->len + 64;
+       data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
 
        memset(&data[5], 0, 44);
        memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
 
-       data += data[4] / 4;
+       data += le32_to_cpu(data[4]) / 4;
        data[0] = sign->val[i].sigval[0];
        data[1] = sign->val[i].sigval[1];
        data[2] = sign->val[i].sigval[2];
        data[3] = sign->val[i].sigval[3];
 
-       rdev->vce.keyselect = sign->val[i].keyselect;
+       rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
 
        return 0;
 }
index 88a4b706be169ff81c279137ac3d2df62df8fb97..4ec80ae1fa99cfd002ecc39c5ca527f691a23413 100644 (file)
@@ -282,26 +282,6 @@ static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
  * Page Flip
  */
 
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
-                                  struct drm_file *file)
-{
-       struct drm_pending_vblank_event *event;
-       struct drm_device *dev = rcrtc->crtc.dev;
-       unsigned long flags;
-
-       /* Destroy the pending vertical blanking event associated with the
-        * pending page flip, if any, and disable vertical blanking interrupts.
-        */
-       spin_lock_irqsave(&dev->event_lock, flags);
-       event = rcrtc->event;
-       if (event && event->base.file_priv == file) {
-               rcrtc->event = NULL;
-               event->base.destroy(&event->base);
-               drm_crtc_vblank_put(&rcrtc->crtc);
-       }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
 {
        struct drm_pending_vblank_event *event;
index 4b95d9d08c4991ad03f159a2c6c54086c781592f..2bbe3f5aab65e0cb86d2df0e4333e569a97996b5 100644 (file)
@@ -67,8 +67,6 @@ enum rcar_du_output {
 
 int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
 void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
-                                  struct drm_file *file);
 void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
 void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
 
index 40422f6b645e0c9a7c1589d027cca373688a3feb..0bb2b31555bf16a3718fec1299e36418aafe5e24 100644 (file)
@@ -220,15 +220,6 @@ done:
        return ret;
 }
 
-static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
-{
-       struct rcar_du_device *rcdu = dev->dev_private;
-       unsigned int i;
-
-       for (i = 0; i < rcdu->num_crtcs; ++i)
-               rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
-}
-
 static void rcar_du_lastclose(struct drm_device *dev)
 {
        struct rcar_du_device *rcdu = dev->dev_private;
@@ -271,7 +262,6 @@ static struct drm_driver rcar_du_driver = {
                                | DRIVER_ATOMIC,
        .load                   = rcar_du_load,
        .unload                 = rcar_du_unload,
-       .preclose               = rcar_du_preclose,
        .lastclose              = rcar_du_lastclose,
        .set_busid              = drm_platform_set_busid,
        .get_vblank_counter     = drm_vblank_no_hw_counter,
index d1dc0f7b01dbdf48ad97e2829278b7f61bb6efea..f6a809afceec2e35237c855df8d2803a09674147 100644 (file)
@@ -2,11 +2,11 @@
 # Makefile for the drm device driver.  This driver provides support for the
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
-rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
-               rockchip_drm_gem.o
+rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \
+               rockchip_drm_gem.o rockchip_drm_vop.o
+rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o
 
 obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
 obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
 
-obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o \
-                               rockchip_vop_reg.o
+obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_vop_reg.o
index 7bfe243c61737bd112cf591366611ce545e86598..f8f8f29fb7c336d8fffe1a74bddbad66e01854f6 100644 (file)
@@ -461,10 +461,11 @@ static int dw_mipi_dsi_phy_init(struct dw_mipi_dsi *dsi)
 
 static int dw_mipi_dsi_get_lane_bps(struct dw_mipi_dsi *dsi)
 {
-       unsigned int bpp, i, pre;
+       unsigned int i, pre;
        unsigned long mpclk, pllref, tmp;
        unsigned int m = 1, n = 1, target_mbps = 1000;
        unsigned int max_mbps = dptdin_map[ARRAY_SIZE(dptdin_map) - 1].max_mbps;
+       int bpp;
 
        bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
        if (bpp < 0) {
index 8397d1b62ef9460910bb28c96e0675a53f5d1b50..a0d51ccb6ea449802a0ff4fac1bcd8b4cf510d3a 100644 (file)
@@ -55,14 +55,12 @@ int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
 
        return arm_iommu_attach_device(dev, mapping);
 }
-EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
 
 void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
                                    struct device *dev)
 {
        arm_iommu_detach_device(dev);
 }
-EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
 
 int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
                                 const struct rockchip_crtc_funcs *crtc_funcs)
@@ -77,7 +75,6 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
 
 void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
 {
@@ -89,7 +86,6 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
 
        priv->crtc_funcs[pipe] = NULL;
 }
-EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
 
 static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
                                                int pipe)
index f7844883cb76cb8cb00c434aaeb9d85d63b0160a..3b8f652698f828574c19f624264e816ab1299ee9 100644 (file)
@@ -39,7 +39,6 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
 
        return rk_fb->obj[plane];
 }
-EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
 
 static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
 {
@@ -177,8 +176,23 @@ static void rockchip_crtc_wait_for_update(struct drm_crtc *crtc)
                crtc_funcs->wait_for_update(crtc);
 }
 
+/*
+ * We can't use drm_atomic_helper_wait_for_vblanks() because rk3288 and rk3066
+ * have hardware counters for neither vblanks nor scanlines, which results in
+ * a race where:
+ *                             | <-- HW vsync irq and reg take effect
+ *            plane_commit --> |
+ *     get_vblank and wait --> |
+ *                             | <-- handle_vblank, vblank->count + 1
+ *              cleanup_fb --> |
+ *             iommu crash --> |
+ *                             | <-- HW vsync irq and reg take effect
+ *
+ * This function is equivalent but uses rockchip_crtc_wait_for_update() instead
+ * of waiting for vblank_count to change.
+ */
 static void
-rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state)
+rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_state *old_state)
 {
        struct drm_crtc_state *old_crtc_state;
        struct drm_crtc *crtc;
@@ -194,6 +208,10 @@ rockchip_atomic_wait_for_complete(struct drm_atomic_state *old_state)
                if (!crtc->state->active)
                        continue;
 
+               if (!drm_atomic_helper_framebuffer_changed(dev,
+                               old_state, crtc))
+                       continue;
+
                ret = drm_crtc_vblank_get(crtc);
                if (ret != 0)
                        continue;
@@ -241,7 +259,7 @@ rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit)
 
        drm_atomic_helper_commit_planes(dev, state, true);
 
-       rockchip_atomic_wait_for_complete(state);
+       rockchip_atomic_wait_for_complete(dev, state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
 
index 50432e9b5b37f38fcb3c3a10cf28573bf2d59163..73718c5f5bbf720ca6151fc44ca670fc8646c6a9 100644 (file)
 #ifndef _ROCKCHIP_DRM_FBDEV_H
 #define _ROCKCHIP_DRM_FBDEV_H
 
+#ifdef CONFIG_DRM_FBDEV_EMULATION
 int rockchip_drm_fbdev_init(struct drm_device *dev);
 void rockchip_drm_fbdev_fini(struct drm_device *dev);
+#else
+static inline int rockchip_drm_fbdev_init(struct drm_device *dev)
+{
+       return 0;
+}
+
+static inline void rockchip_drm_fbdev_fini(struct drm_device *dev)
+{
+}
+#endif
 
 #endif /* _ROCKCHIP_DRM_FBDEV_H */
index d908321b94ced4ffe10936f4c7ef37fc518d127a..18e07338c6e5f31f45b9a38c17884164fc476cff 100644 (file)
@@ -234,13 +234,8 @@ int rockchip_gem_dumb_create(struct drm_file *file_priv,
        /*
         * align to 64 bytes since Mali requires it.
         */
-       min_pitch = ALIGN(min_pitch, 64);
-
-       if (args->pitch < min_pitch)
-               args->pitch = min_pitch;
-
-       if (args->size < args->pitch * args->height)
-               args->size = args->pitch * args->height;
+       args->pitch = ALIGN(min_pitch, 64);
+       args->size = args->pitch * args->height;
 
        rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
                                                 &args->handle);
index 46c2a8dfd8aa78b22caf02d9b1e217b8d011dd25..fd370548d7d75dc1990226110cf24ea0353a002c 100644 (file)
@@ -43,8 +43,8 @@
 
 #define REG_SET(x, base, reg, v, mode) \
                __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
-#define REG_SET_MASK(x, base, reg, v, mode) \
-               __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+#define REG_SET_MASK(x, base, reg, mask, v, mode) \
+               __REG_SET_##mode(x, base + reg.offset, mask, reg.shift, v)
 
 #define VOP_WIN_SET(x, win, name, v) \
                REG_SET(x, win->base, win->phy->name, v, RELAXED)
 #define VOP_INTR_GET(vop, name) \
                vop_read_reg(vop, 0, &vop->data->ctrl->name)
 
-#define VOP_INTR_SET(vop, name, v) \
-               REG_SET(vop, 0, vop->data->intr->name, v, NORMAL)
+#define VOP_INTR_SET(vop, name, mask, v) \
+               REG_SET_MASK(vop, 0, vop->data->intr->name, mask, v, NORMAL)
 #define VOP_INTR_SET_TYPE(vop, name, type, v) \
        do { \
-               int i, reg = 0; \
+               int i, reg = 0, mask = 0; \
                for (i = 0; i < vop->data->intr->nintrs; i++) { \
-                       if (vop->data->intr->intrs[i] & type) \
+                       if (vop->data->intr->intrs[i] & type) \
                                reg |= (v) << i; \
+                               mask |= 1 << i; \
+                       } \
                } \
-               VOP_INTR_SET(vop, name, reg); \
+               VOP_INTR_SET(vop, name, mask, reg); \
        } while (0)
 #define VOP_INTR_GET_TYPE(vop, name, type) \
                vop_get_intr_type(vop, &vop->data->intr->name, type)
index db0763794edcb62de69a9f45c825a0b79b631820..27342fd76e9052fcfe9f82c495a72e63d48d0fc1 100644 (file)
@@ -438,26 +438,6 @@ static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
        .mode_set_base = shmob_drm_crtc_mode_set_base,
 };
 
-void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
-                                    struct drm_file *file)
-{
-       struct drm_pending_vblank_event *event;
-       struct drm_device *dev = scrtc->crtc.dev;
-       unsigned long flags;
-
-       /* Destroy the pending vertical blanking event associated with the
-        * pending page flip, if any, and disable vertical blanking interrupts.
-        */
-       spin_lock_irqsave(&dev->event_lock, flags);
-       event = scrtc->event;
-       if (event && event->base.file_priv == file) {
-               scrtc->event = NULL;
-               event->base.destroy(&event->base);
-               drm_vblank_put(dev, 0);
-       }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
 {
        struct drm_pending_vblank_event *event;
index eddad6dcc88ab00110057fc27771912f1f14566f..38ed4ff8aaf228118f5e75d03e5f0d894101744c 100644 (file)
@@ -47,8 +47,6 @@ struct shmob_drm_connector {
 
 int shmob_drm_crtc_create(struct shmob_drm_device *sdev);
 void shmob_drm_crtc_enable_vblank(struct shmob_drm_device *sdev, bool enable);
-void shmob_drm_crtc_cancel_page_flip(struct shmob_drm_crtc *scrtc,
-                                    struct drm_file *file);
 void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc);
 void shmob_drm_crtc_suspend(struct shmob_drm_crtc *scrtc);
 void shmob_drm_crtc_resume(struct shmob_drm_crtc *scrtc);
index 04e66e3751b49859c93bc66d1c85f518f564038e..7700ff1720792108e9847bebb4dca522ac45ff45 100644 (file)
@@ -200,13 +200,6 @@ done:
        return ret;
 }
 
-static void shmob_drm_preclose(struct drm_device *dev, struct drm_file *file)
-{
-       struct shmob_drm_device *sdev = dev->dev_private;
-
-       shmob_drm_crtc_cancel_page_flip(&sdev->crtc, file);
-}
-
 static irqreturn_t shmob_drm_irq(int irq, void *arg)
 {
        struct drm_device *dev = arg;
@@ -266,7 +259,6 @@ static struct drm_driver shmob_drm_driver = {
                                | DRIVER_PRIME,
        .load                   = shmob_drm_load,
        .unload                 = shmob_drm_unload,
-       .preclose               = shmob_drm_preclose,
        .set_busid              = drm_platform_set_busid,
        .irq_handler            = shmob_drm_irq,
        .get_vblank_counter     = drm_vblank_no_hw_counter,
index dde6f208c3477211d5a33be0da8efc932da3dea7..fb2b4b0271a2a7a5f14587b9cd8a9a1d9b35ecb4 100644 (file)
@@ -988,23 +988,6 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
        spin_unlock_irqrestore(&drm->event_lock, flags);
 }
 
-void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-       struct tegra_dc *dc = to_tegra_dc(crtc);
-       struct drm_device *drm = crtc->dev;
-       unsigned long flags;
-
-       spin_lock_irqsave(&drm->event_lock, flags);
-
-       if (dc->event && dc->event->base.file_priv == file) {
-               dc->event->base.destroy(&dc->event->base);
-               drm_crtc_vblank_put(crtc);
-               dc->event = NULL;
-       }
-
-       spin_unlock_irqrestore(&drm->event_lock, flags);
-}
-
 static void tegra_dc_destroy(struct drm_crtc *crtc)
 {
        drm_crtc_cleanup(crtc);
index c5c856a0879d49d8f11d1221e53f7fb848cda027..8e6b18caa706d207b0a1ef66e3a5299e54bc3e54 100644 (file)
@@ -858,10 +858,6 @@ static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
 {
        struct tegra_drm_file *fpriv = file->driver_priv;
        struct tegra_drm_context *context, *tmp;
-       struct drm_crtc *crtc;
-
-       list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
-               tegra_dc_cancel_page_flip(crtc, file);
 
        list_for_each_entry_safe(context, tmp, &fpriv->contexts, list)
                tegra_drm_context_free(context);
index c088f2f67eda7cebb50c3c54ac7849fced67d5cb..8a10f5b7d9dce926f41dc402a7feaa4826425d23 100644 (file)
@@ -195,7 +195,6 @@ struct tegra_dc_window {
 u32 tegra_dc_get_vblank_counter(struct tegra_dc *dc);
 void tegra_dc_enable_vblank(struct tegra_dc *dc);
 void tegra_dc_disable_vblank(struct tegra_dc *dc);
-void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
 void tegra_dc_commit(struct tegra_dc *dc);
 int tegra_dc_state_setup_clock(struct tegra_dc *dc,
                               struct drm_crtc_state *crtc_state,
index 7d07733bdc861536bd704b36d8619fb5c0c99880..4802da8e6d6f989cd796b98eb4e32f2012f29bcc 100644 (file)
@@ -662,26 +662,6 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
        return IRQ_HANDLED;
 }
 
-void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-       struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
-       struct drm_pending_vblank_event *event;
-       struct drm_device *dev = crtc->dev;
-       unsigned long flags;
-
-       /* Destroy the pending vertical blanking event associated with the
-        * pending page flip, if any, and disable vertical blanking interrupts.
-        */
-       spin_lock_irqsave(&dev->event_lock, flags);
-       event = tilcdc_crtc->event;
-       if (event && event->base.file_priv == file) {
-               tilcdc_crtc->event = NULL;
-               event->base.destroy(&event->base);
-               drm_vblank_put(dev, 0);
-       }
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
 {
        struct tilcdc_crtc *tilcdc_crtc;
index d7f5b897c6c57a18c70536b39a2265b1af6268c2..8190ac3b1b3225dcffb1fd2944cec9dd5104f36a 100644 (file)
@@ -350,13 +350,6 @@ fail_free_priv:
        return ret;
 }
 
-static void tilcdc_preclose(struct drm_device *dev, struct drm_file *file)
-{
-       struct tilcdc_drm_private *priv = dev->dev_private;
-
-       tilcdc_crtc_cancel_page_flip(priv->crtc, file);
-}
-
 static void tilcdc_lastclose(struct drm_device *dev)
 {
        struct tilcdc_drm_private *priv = dev->dev_private;
@@ -557,7 +550,6 @@ static struct drm_driver tilcdc_driver = {
        .driver_features    = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
        .load               = tilcdc_load,
        .unload             = tilcdc_unload,
-       .preclose           = tilcdc_preclose,
        .lastclose          = tilcdc_lastclose,
        .set_busid          = drm_platform_set_busid,
        .irq_handler        = tilcdc_irq,
index e863ad0d26fe0497d2507dca5734e1b1453a9481..66105d8dc62062a426b7d44849b2a781c31d81be 100644 (file)
@@ -163,7 +163,6 @@ struct tilcdc_panel_info {
 #define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
 
 struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev);
-void tilcdc_crtc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
 irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc);
 void tilcdc_crtc_update_clk(struct drm_crtc *crtc);
 void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
index 200419d4d43cf7fd475729b0fc10e97478359a76..c427499133d6e3ffbc7f777ada41ee8ec1c42139 100644 (file)
@@ -409,7 +409,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
 
        if (ufb->obj->base.import_attach) {
                ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
-                                              0, ufb->obj->base.size,
                                               DMA_FROM_DEVICE);
                if (ret)
                        goto unlock;
@@ -425,7 +424,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
 
        if (ufb->obj->base.import_attach) {
                dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
-                                      0, ufb->obj->base.size,
                                       DMA_FROM_DEVICE);
        }
 
index 018145e0b87d35ee49377b23805da2435367c682..937409792b97549f882de8e9c043d9dc77084420 100644 (file)
@@ -593,26 +593,6 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
        .atomic_flush = vc4_crtc_atomic_flush,
 };
 
-/* Frees the page flip event when the DRM device is closed with the
- * event still outstanding.
- */
-void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
-{
-       struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->event_lock, flags);
-
-       if (vc4_crtc->event && vc4_crtc->event->base.file_priv == file) {
-               vc4_crtc->event->base.destroy(&vc4_crtc->event->base);
-               drm_crtc_vblank_put(crtc);
-               vc4_crtc->event = NULL;
-       }
-
-       spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
 static const struct vc4_crtc_data pv0_data = {
        .hvs_channel = 0,
        .encoder0_type = VC4_ENCODER_TYPE_DSI0,
index f1655fff8425166e88537c2b700c61de9200509d..b7d2ff0e6e1fb6d9611b7a98b8b1af0303b2ac9d 100644 (file)
@@ -43,14 +43,6 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
        return map;
 }
 
-static void vc4_drm_preclose(struct drm_device *dev, struct drm_file *file)
-{
-       struct drm_crtc *crtc;
-
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
-               vc4_cancel_page_flip(crtc, file);
-}
-
 static void vc4_lastclose(struct drm_device *dev)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -91,8 +83,6 @@ static struct drm_driver vc4_drm_driver = {
                            DRIVER_HAVE_IRQ |
                            DRIVER_PRIME),
        .lastclose = vc4_lastclose,
-       .preclose = vc4_drm_preclose,
-
        .irq_handler = vc4_irq,
        .irq_preinstall = vc4_irq_preinstall,
        .irq_postinstall = vc4_irq_postinstall,
index 080865ec2bae67c7ff7b04a97fdb5956096cc156..4c734d087d7f1bab972b30031f3c31212651c29c 100644 (file)
@@ -376,7 +376,6 @@ int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
 extern struct platform_driver vc4_crtc_driver;
 int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
 void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
-void vc4_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file);
 int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
 
 /* vc4_debugfs.c */
index 424d515ffcdafae650c9d472a02600db7c0a8485..314ff71db978dedfd8272ea0afe0087f1903ca4a 100644 (file)
@@ -144,19 +144,16 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
 }
 #endif /* CONFIG_DEBUG_FS */
 
-/*
- * Asks the firmware to turn on power to the V3D engine.
- *
- * This may be doable with just the clocks interface, though this
- * packet does some other register setup from the firmware, too.
- */
 int
 vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
 {
-       if (on)
-               return pm_generic_poweroff(&vc4->v3d->pdev->dev);
-       else
-               return pm_generic_resume(&vc4->v3d->pdev->dev);
+       /* XXX: This interface is needed for GPU reset, and the way to
+        * do it is to turn our power domain off and back on.  We
+        * can't just reset from within the driver, because the reset
+        * bits are in the power domain's register area, and get set
+        * during the poweron process.
+        */
+       return 0;
 }
 
 static void vc4_v3d_init_hw(struct drm_device *dev)
index b40ed6061f050b1ff817e2f164e784275df84b8f..7f898cfdc7468c6697a50ede409749f3e89fa08b 100644 (file)
@@ -118,7 +118,7 @@ static const struct file_operations virtio_gpu_driver_fops = {
 
 
 static struct drm_driver driver = {
-       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER,
+       .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
        .set_busid = drm_virtio_set_busid,
        .load = virtio_gpu_driver_load,
        .unload = virtio_gpu_driver_unload,
index 572fb351feabe727aea0c1fa6117f0b029068ba8..70b44a2345ab43e2ac1bef8ded76c292ab752884 100644 (file)
@@ -68,10 +68,17 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
        struct virtio_gpu_object *bo;
        uint32_t handle;
 
-       if (plane->fb) {
-               vgfb = to_virtio_gpu_framebuffer(plane->fb);
+       if (plane->state->fb) {
+               vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
                bo = gem_to_virtio_gpu_obj(vgfb->obj);
                handle = bo->hw_res_handle;
+               if (bo->dumb) {
+                       virtio_gpu_cmd_transfer_to_host_2d
+                               (vgdev, handle, 0,
+                                cpu_to_le32(plane->state->crtc_w),
+                                cpu_to_le32(plane->state->crtc_h),
+                                plane->state->crtc_x, plane->state->crtc_y, NULL);
+               }
        } else {
                handle = 0;
        }
@@ -84,6 +91,11 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
                                   plane->state->crtc_h,
                                   plane->state->crtc_x,
                                   plane->state->crtc_y);
+       virtio_gpu_cmd_resource_flush(vgdev, handle,
+                                     plane->state->crtc_x,
+                                     plane->state->crtc_y,
+                                     plane->state->crtc_w,
+                                     plane->state->crtc_h);
 }
 
 
index c49812b80dd0dae82c77096f75fbd4cced19ffb1..0ee76e523a902ac0905502fc838216903d8af83b 100644 (file)
@@ -25,6 +25,7 @@
  *
  **************************************************************************/
 #include <linux/module.h>
+#include <linux/console.h>
 
 #include <drm/drmP.h>
 #include "vmwgfx_drv.h"
@@ -971,15 +972,6 @@ static int vmw_driver_unload(struct drm_device *dev)
        return 0;
 }
 
-static void vmw_preclose(struct drm_device *dev,
-                        struct drm_file *file_priv)
-{
-       struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
-       struct vmw_private *dev_priv = vmw_priv(dev);
-
-       vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
-}
-
 static void vmw_postclose(struct drm_device *dev,
                         struct drm_file *file_priv)
 {
@@ -1010,7 +1002,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
        if (unlikely(vmw_fp == NULL))
                return ret;
 
-       INIT_LIST_HEAD(&vmw_fp->fence_events);
        vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
        if (unlikely(vmw_fp->tfile == NULL))
                goto out_no_tfile;
@@ -1500,7 +1491,6 @@ static struct drm_driver driver = {
        .master_set = vmw_master_set,
        .master_drop = vmw_master_drop,
        .open = vmw_driver_open,
-       .preclose = vmw_preclose,
        .postclose = vmw_postclose,
        .set_busid = drm_pci_set_busid,
 
@@ -1538,6 +1528,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 static int __init vmwgfx_init(void)
 {
        int ret;
+
+#ifdef CONFIG_VGA_CONSOLE
+       if (vgacon_text_force())
+               return -EINVAL;
+#endif
+
        ret = drm_pci_init(&driver, &vmw_pci_driver);
        if (ret)
                DRM_ERROR("Failed initializing DRM.\n");
index 469cdd520615d036fdb99b0f7e27ed67ce3abd67..5cb1b1687cd4c6f9be863ca3c757eabd74d91775 100644 (file)
@@ -80,7 +80,6 @@
 struct vmw_fpriv {
        struct drm_master *locked_master;
        struct ttm_object_file *tfile;
-       struct list_head fence_events;
        bool gb_aware;
 };
 
index 8e689b439890061ffc066b4c265590566c69e2d2..e959df6ede83e0a7b3b72e3c16df313f4df98940 100644 (file)
@@ -71,7 +71,6 @@ struct vmw_user_fence {
  */
 struct vmw_event_fence_action {
        struct vmw_fence_action action;
-       struct list_head fpriv_head;
 
        struct drm_pending_event *event;
        struct vmw_fence_obj *fence;
@@ -807,44 +806,6 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
                                         TTM_REF_USAGE);
 }
 
-/**
- * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects
- *
- * @fman: Pointer to a struct vmw_fence_manager
- * @event_list: Pointer to linked list of struct vmw_event_fence_action objects
- * with pointers to a struct drm_file object about to be closed.
- *
- * This function removes all pending fence events with references to a
- * specific struct drm_file object about to be closed. The caller is required
- * to pass a list of all struct vmw_event_fence_action objects with such
- * events attached. This function is typically called before the
- * struct drm_file object's event management is taken down.
- */
-void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
-                               struct list_head *event_list)
-{
-       struct vmw_event_fence_action *eaction;
-       struct drm_pending_event *event;
-       unsigned long irq_flags;
-
-       while (1) {
-               spin_lock_irqsave(&fman->lock, irq_flags);
-               if (list_empty(event_list))
-                       goto out_unlock;
-               eaction = list_first_entry(event_list,
-                                          struct vmw_event_fence_action,
-                                          fpriv_head);
-               list_del_init(&eaction->fpriv_head);
-               event = eaction->event;
-               eaction->event = NULL;
-               spin_unlock_irqrestore(&fman->lock, irq_flags);
-               event->destroy(event);
-       }
-out_unlock:
-       spin_unlock_irqrestore(&fman->lock, irq_flags);
-}
-
-
 /**
  * vmw_event_fence_action_seq_passed
  *
@@ -879,10 +840,8 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
                *eaction->tv_usec = tv.tv_usec;
        }
 
-       list_del_init(&eaction->fpriv_head);
-       list_add_tail(&eaction->event->link, &file_priv->event_list);
+       drm_send_event_locked(dev, eaction->event);
        eaction->event = NULL;
-       wake_up_all(&file_priv->event_wait);
        spin_unlock_irqrestore(&dev->event_lock, irq_flags);
 }
 
@@ -899,12 +858,6 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
 {
        struct vmw_event_fence_action *eaction =
                container_of(action, struct vmw_event_fence_action, action);
-       struct vmw_fence_manager *fman = fman_from_fence(eaction->fence);
-       unsigned long irq_flags;
-
-       spin_lock_irqsave(&fman->lock, irq_flags);
-       list_del(&eaction->fpriv_head);
-       spin_unlock_irqrestore(&fman->lock, irq_flags);
 
        vmw_fence_obj_unreference(&eaction->fence);
        kfree(eaction);
@@ -984,8 +937,6 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
 {
        struct vmw_event_fence_action *eaction;
        struct vmw_fence_manager *fman = fman_from_fence(fence);
-       struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
-       unsigned long irq_flags;
 
        eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
        if (unlikely(eaction == NULL))
@@ -1002,10 +953,6 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
        eaction->tv_sec = tv_sec;
        eaction->tv_usec = tv_usec;
 
-       spin_lock_irqsave(&fman->lock, irq_flags);
-       list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events);
-       spin_unlock_irqrestore(&fman->lock, irq_flags);
-
        vmw_fence_obj_add_action(fence, &eaction->action);
 
        return 0;
@@ -1025,38 +972,26 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
        struct vmw_event_fence_pending *event;
        struct vmw_fence_manager *fman = fman_from_fence(fence);
        struct drm_device *dev = fman->dev_priv->dev;
-       unsigned long irq_flags;
        int ret;
 
-       spin_lock_irqsave(&dev->event_lock, irq_flags);
-
-       ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0;
-       if (likely(ret == 0))
-               file_priv->event_space -= sizeof(event->event);
-
-       spin_unlock_irqrestore(&dev->event_lock, irq_flags);
-
-       if (unlikely(ret != 0)) {
-               DRM_ERROR("Failed to allocate event space for this file.\n");
-               goto out_no_space;
-       }
-
-
        event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (unlikely(event == NULL)) {
                DRM_ERROR("Failed to allocate an event.\n");
                ret = -ENOMEM;
-               goto out_no_event;
+               goto out_no_space;
        }
 
        event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
        event->event.base.length = sizeof(*event);
        event->event.user_data = user_data;
 
-       event->base.event = &event->event.base;
-       event->base.file_priv = file_priv;
-       event->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+       ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
 
+       if (unlikely(ret != 0)) {
+               DRM_ERROR("Failed to allocate event space for this file.\n");
+               kfree(event);
+               goto out_no_space;
+       }
 
        if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
                ret = vmw_event_fence_action_queue(file_priv, fence,
@@ -1076,11 +1011,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
        return 0;
 
 out_no_queue:
-       event->base.destroy(&event->base);
-out_no_event:
-       spin_lock_irqsave(&dev->event_lock, irq_flags);
-       file_priv->event_space += sizeof(*event);
-       spin_unlock_irqrestore(&dev->event_lock, irq_flags);
+       drm_event_cancel_free(dev, &event->base);
 out_no_space:
        return ret;
 }
index 8be6c29f5eb55fd8ca96bc8bd2edc68927386362..83ae301ee14181630f36faf6e711d1dffac240c0 100644 (file)
@@ -116,8 +116,6 @@ extern int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv);
 extern int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
                                 struct drm_file *file_priv);
-extern void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman,
-                                      struct list_head *event_list);
 extern int vmw_event_fence_action_queue(struct drm_file *filee_priv,
                                        struct vmw_fence_obj *fence,
                                        struct drm_pending_event *event,
index da462afcb225ea6958918b3d8ff52a0ee928cca3..c2e7fba370bb155bfc2c4a550450be9e370b2bce 100644 (file)
@@ -82,8 +82,10 @@ static int host1x_device_parse_dt(struct host1x_device *device,
                if (of_match_node(driver->subdevs, np) &&
                    of_device_is_available(np)) {
                        err = host1x_subdev_add(device, np);
-                       if (err < 0)
+                       if (err < 0) {
+                               of_node_put(np);
                                return err;
+                       }
                }
        }
 
index 63bd63f3c7dfd2da2fd3d9ae58bba1473bceb70b..1919aab88c3f412a10fa22fd5d85557f0e38a9a0 100644 (file)
@@ -225,7 +225,7 @@ unpin:
        return 0;
 }
 
-static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
+static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
 {
        int i = 0;
        u32 last_page = ~0;
index 665ab9fd0e011233b2105035ce20cdb9e4ee7c60..cbd7c986d926e072a234b248762e185fda1eebe4 100644 (file)
  * there can thus be up to three clients: Two vga clients (GPUs) and one audio
  * client (on the discrete GPU). The code is mostly prepared to support
  * machines with more than two GPUs should they become available.
+ *
  * The GPU to which the outputs are currently switched is called the
  * active client in vga_switcheroo parlance. The GPU not in use is the
- * inactive client.
+ * inactive client. When the inactive client's DRM driver is loaded,
+ * it will be unable to probe the panel's EDID and hence depends on
+ * VBIOS to provide its display modes. If the VBIOS modes are bogus or
+ * if there is no VBIOS at all (which is common on the MacBook Pro),
+ * a client may alternatively request that the DDC lines are temporarily
+ * switched to it, provided that the handler supports this. Switching
+ * only the DDC lines and not the entire output avoids unnecessary
+ * flickering.
  */
 
 /**
@@ -126,6 +134,10 @@ static DEFINE_MUTEX(vgasr_mutex);
  *     (counting only vga clients, not audio clients)
  * @clients: list of registered clients
  * @handler: registered handler
+ * @handler_flags: flags of registered handler
+ * @mux_hw_lock: protects mux state
+ *     (in particular while DDC lines are temporarily switched)
+ * @old_ddc_owner: client to which DDC lines will be switched back on unlock
  *
  * vga_switcheroo private data. Currently only one vga_switcheroo instance
  * per system is supported.
@@ -142,6 +154,9 @@ struct vgasr_priv {
        struct list_head clients;
 
        const struct vga_switcheroo_handler *handler;
+       enum vga_switcheroo_handler_flags_t handler_flags;
+       struct mutex mux_hw_lock;
+       int old_ddc_owner;
 };
 
 #define ID_BIT_AUDIO           0x100
@@ -156,6 +171,7 @@ static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
 /* only one switcheroo per system */
 static struct vgasr_priv vgasr_priv = {
        .clients = LIST_HEAD_INIT(vgasr_priv.clients),
+       .mux_hw_lock = __MUTEX_INITIALIZER(vgasr_priv.mux_hw_lock),
 };
 
 static bool vga_switcheroo_ready(void)
@@ -190,13 +206,15 @@ static void vga_switcheroo_enable(void)
 /**
  * vga_switcheroo_register_handler() - register handler
  * @handler: handler callbacks
+ * @handler_flags: handler flags
  *
  * Register handler. Enable vga_switcheroo if two vga clients have already
  * registered.
  *
  * Return: 0 on success, -EINVAL if a handler was already registered.
  */
-int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler)
+int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+                                   enum vga_switcheroo_handler_flags_t handler_flags)
 {
        mutex_lock(&vgasr_mutex);
        if (vgasr_priv.handler) {
@@ -205,6 +223,7 @@ int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler
        }
 
        vgasr_priv.handler = handler;
+       vgasr_priv.handler_flags = handler_flags;
        if (vga_switcheroo_ready()) {
                pr_info("enabled\n");
                vga_switcheroo_enable();
@@ -222,16 +241,33 @@ EXPORT_SYMBOL(vga_switcheroo_register_handler);
 void vga_switcheroo_unregister_handler(void)
 {
        mutex_lock(&vgasr_mutex);
+       mutex_lock(&vgasr_priv.mux_hw_lock);
+       vgasr_priv.handler_flags = 0;
        vgasr_priv.handler = NULL;
        if (vgasr_priv.active) {
                pr_info("disabled\n");
                vga_switcheroo_debugfs_fini(&vgasr_priv);
                vgasr_priv.active = false;
        }
+       mutex_unlock(&vgasr_priv.mux_hw_lock);
        mutex_unlock(&vgasr_mutex);
 }
 EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
 
+/**
+ * vga_switcheroo_handler_flags() - obtain handler flags
+ *
+ * Helper for clients to obtain the handler flags bitmask.
+ *
+ * Return: Handler flags. A value of 0 means that no handler is registered
+ * or that the handler has no special capabilities.
+ */
+enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void)
+{
+       return vgasr_priv.handler_flags;
+}
+EXPORT_SYMBOL(vga_switcheroo_handler_flags);
+
 static int register_client(struct pci_dev *pdev,
                           const struct vga_switcheroo_client_ops *ops,
                           enum vga_switcheroo_client_id id, bool active,
@@ -412,6 +448,76 @@ void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
 }
 EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
 
+/**
+ * vga_switcheroo_lock_ddc() - temporarily switch DDC lines to a given client
+ * @pdev: client pci device
+ *
+ * Temporarily switch DDC lines to the client identified by @pdev
+ * (but leave the outputs otherwise switched to where they are).
+ * This allows the inactive client to probe EDID. The DDC lines must
+ * afterwards be switched back by calling vga_switcheroo_unlock_ddc(),
+ * even if this function returns an error.
+ *
+ * Return: Previous DDC owner on success or a negative int on error.
+ * Specifically, %-ENODEV if no handler has registered or if the handler
+ * does not support switching the DDC lines. Also, a negative value
+ * returned by the handler is propagated back to the caller.
+ * The return value has merely an informational purpose for any caller
+ * which might be interested in it. It is acceptable to ignore the return
+ * value and simply rely on the result of the subsequent EDID probe,
+ * which will be %NULL if DDC switching failed.
+ */
+int vga_switcheroo_lock_ddc(struct pci_dev *pdev)
+{
+       enum vga_switcheroo_client_id id;
+
+       mutex_lock(&vgasr_priv.mux_hw_lock);
+       if (!vgasr_priv.handler || !vgasr_priv.handler->switch_ddc) {
+               vgasr_priv.old_ddc_owner = -ENODEV;
+               return -ENODEV;
+       }
+
+       id = vgasr_priv.handler->get_client_id(pdev);
+       vgasr_priv.old_ddc_owner = vgasr_priv.handler->switch_ddc(id);
+       return vgasr_priv.old_ddc_owner;
+}
+EXPORT_SYMBOL(vga_switcheroo_lock_ddc);
+
+/**
+ * vga_switcheroo_unlock_ddc() - switch DDC lines back to previous owner
+ * @pdev: client pci device
+ *
+ * Switch DDC lines back to the previous owner after calling
+ * vga_switcheroo_lock_ddc(). This must be called even if
+ * vga_switcheroo_lock_ddc() returned an error.
+ *
+ * Return: Previous DDC owner on success (i.e. the client identifier of @pdev)
+ * or a negative int on error.
+ * Specifically, %-ENODEV if no handler has registered or if the handler
+ * does not support switching the DDC lines. Also, a negative value
+ * returned by the handler is propagated back to the caller.
+ * Finally, invoking this function without calling vga_switcheroo_lock_ddc()
+ * first is not allowed and will result in %-EINVAL.
+ */
+int vga_switcheroo_unlock_ddc(struct pci_dev *pdev)
+{
+       enum vga_switcheroo_client_id id;
+       int ret = vgasr_priv.old_ddc_owner;
+
+       if (WARN_ON_ONCE(!mutex_is_locked(&vgasr_priv.mux_hw_lock)))
+               return -EINVAL;
+
+       if (vgasr_priv.old_ddc_owner >= 0) {
+               id = vgasr_priv.handler->get_client_id(pdev);
+               if (vgasr_priv.old_ddc_owner != id)
+                       ret = vgasr_priv.handler->switch_ddc(
+                                                    vgasr_priv.old_ddc_owner);
+       }
+       mutex_unlock(&vgasr_priv.mux_hw_lock);
+       return ret;
+}
+EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
+
 /**
  * DOC: Manual switching and manual power control
  *
@@ -549,7 +655,9 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
                console_unlock();
        }
 
+       mutex_lock(&vgasr_priv.mux_hw_lock);
        ret = vgasr_priv.handler->switchto(new_client->id);
+       mutex_unlock(&vgasr_priv.mux_hw_lock);
        if (ret)
                return ret;
 
@@ -664,7 +772,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
        vgasr_priv.delayed_switch_active = false;
 
        if (just_mux) {
+               mutex_lock(&vgasr_priv.mux_hw_lock);
                ret = vgasr_priv.handler->switchto(client_id);
+               mutex_unlock(&vgasr_priv.mux_hw_lock);
                goto out;
        }
 
@@ -876,8 +986,11 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
        if (ret)
                return ret;
        mutex_lock(&vgasr_mutex);
-       if (vgasr_priv.handler->switchto)
+       if (vgasr_priv.handler->switchto) {
+               mutex_lock(&vgasr_priv.mux_hw_lock);
                vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
+               mutex_unlock(&vgasr_priv.mux_hw_lock);
+       }
        vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
        mutex_unlock(&vgasr_mutex);
        return 0;
index 7e89288b15375a2e8016ea6b31f5904e95c32719..e637e4ff1c88aed8e4d9cd36d70c0143805675df 100644 (file)
@@ -1075,7 +1075,7 @@ static u32 s32ton(__s32 value, unsigned n)
  * Extract/implement a data field from/to a little endian report (bit array).
  *
  * Code sort-of follows HID spec:
- *     http://www.usb.org/developers/devclass_docs/HID1_11.pdf
+ *     http://www.usb.org/developers/hidpage/HID1_11.pdf
  *
  * While the USB HID spec allows unlimited length bit fields in "report
  * descriptors", most devices never use more than 16 bits.
@@ -1083,20 +1083,37 @@ static u32 s32ton(__s32 value, unsigned n)
  * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
  */
 
-__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
-                    unsigned offset, unsigned n)
-{
-       u64 x;
+static u32 __extract(u8 *report, unsigned offset, int n)
+{
+       unsigned int idx = offset / 8;
+       unsigned int bit_nr = 0;
+       unsigned int bit_shift = offset % 8;
+       int bits_to_copy = 8 - bit_shift;
+       u32 value = 0;
+       u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
+
+       while (n > 0) {
+               value |= ((u32)report[idx] >> bit_shift) << bit_nr;
+               n -= bits_to_copy;
+               bit_nr += bits_to_copy;
+               bits_to_copy = 8;
+               bit_shift = 0;
+               idx++;
+       }
+
+       return value & mask;
+}
 
-       if (n > 32)
+u32 hid_field_extract(const struct hid_device *hid, u8 *report,
+                       unsigned offset, unsigned n)
+{
+       if (n > 32) {
                hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
                         n, current->comm);
+               n = 32;
+       }
 
-       report += offset >> 3;  /* adjust byte index */
-       offset &= 7;            /* now only need bit offset into one byte */
-       x = get_unaligned_le64(report);
-       x = (x >> offset) & ((1ULL << n) - 1);  /* extract bit field */
-       return (u32) x;
+       return __extract(report, offset, n);
 }
 EXPORT_SYMBOL_GPL(hid_field_extract);
 
@@ -1106,31 +1123,56 @@ EXPORT_SYMBOL_GPL(hid_field_extract);
  * The data mangled in the bit stream remains in little endian
  * order the whole time. It make more sense to talk about
  * endianness of register values by considering a register
- * a "cached" copy of the little endiad bit stream.
+ * a "cached" copy of the little endian bit stream.
  */
-static void implement(const struct hid_device *hid, __u8 *report,
-                     unsigned offset, unsigned n, __u32 value)
+
+static void __implement(u8 *report, unsigned offset, int n, u32 value)
+{
+       unsigned int idx = offset / 8;
+       unsigned int size = offset + n;
+       unsigned int bit_shift = offset % 8;
+       int bits_to_set = 8 - bit_shift;
+       u8 bit_mask = 0xff << bit_shift;
+
+       while (n - bits_to_set >= 0) {
+               report[idx] &= ~bit_mask;
+               report[idx] |= value << bit_shift;
+               value >>= bits_to_set;
+               n -= bits_to_set;
+               bits_to_set = 8;
+               bit_mask = 0xff;
+               bit_shift = 0;
+               idx++;
+       }
+
+       /* last nibble */
+       if (n) {
+               if (size % 8)
+                       bit_mask &= (1U << (size % 8)) - 1;
+               report[idx] &= ~bit_mask;
+               report[idx] |= (value << bit_shift) & bit_mask;
+       }
+}
+
+static void implement(const struct hid_device *hid, u8 *report,
+                     unsigned offset, unsigned n, u32 value)
 {
-       u64 x;
-       u64 m = (1ULL << n) - 1;
+       u64 m;
 
-       if (n > 32)
+       if (n > 32) {
                hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
                         __func__, n, current->comm);
+               n = 32;
+       }
 
+       m = (1ULL << n) - 1;
        if (value > m)
                hid_warn(hid, "%s() called with too large value %d! (%s)\n",
                         __func__, value, current->comm);
        WARN_ON(value > m);
        value &= m;
 
-       report += offset >> 3;
-       offset &= 7;
-
-       x = get_unaligned_le64(report);
-       x &= ~(m << offset);
-       x |= ((u64)value) << offset;
-       put_unaligned_le64(x, report);
+       __implement(report, offset, n, value);
 }
 
 /*
@@ -1251,6 +1293,7 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
                /* Ignore report if ErrorRollOver */
                if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
                    value[n] >= min && value[n] <= max &&
+                   value[n] - min < field->maxusage &&
                    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1)
                        goto exit;
        }
@@ -1263,11 +1306,13 @@ static void hid_input_field(struct hid_device *hid, struct hid_field *field,
                }
 
                if (field->value[n] >= min && field->value[n] <= max
+                       && field->value[n] - min < field->maxusage
                        && field->usage[field->value[n] - min].hid
                        && search(value, field->value[n], count))
                                hid_process_event(hid, field, &field->usage[field->value[n] - min], 0, interrupt);
 
                if (value[n] >= min && value[n] <= max
+                       && value[n] - min < field->maxusage
                        && field->usage[value[n] - min].hid
                        && search(field->value, value[n], count))
                                hid_process_event(hid, field, &field->usage[value[n] - min], 1, interrupt);
@@ -2003,6 +2048,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
        { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
        { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
index 1d78ba3b799e6f020eee8f9e16c9cab5b8f61627..8fd4bf77f264940ec04631252062e06aafe148c8 100644 (file)
@@ -151,7 +151,7 @@ static inline int drff_init(struct hid_device *hid)
  * descriptor. In any case, it's a wonder it works on Windows.
  *
  *  Usage Page (Desktop),             ; Generic desktop controls (01h)
- *  Usage (Joystik),                  ; Joystik (04h, application collection)
+ *  Usage (Joystick),                 ; Joystick (04h, application collection)
  *  Collection (Application),
  *    Collection (Logical),
  *      Report Size (8),
@@ -207,7 +207,7 @@ static inline int drff_init(struct hid_device *hid)
 /* Fixed report descriptor for PID 0x011 joystick */
 static __u8 pid0011_rdesc_fixed[] = {
        0x05, 0x01,         /*  Usage Page (Desktop),           */
-       0x09, 0x04,         /*  Usage (Joystik),                */
+       0x09, 0x04,         /*  Usage (Joystick),               */
        0xA1, 0x01,         /*  Collection (Application),       */
        0xA1, 0x02,         /*      Collection (Logical),       */
        0x14,               /*          Logical Minimum (0),    */
index b6ff6e78ac54e281a8617212024147d63c891346..96fefdbbc75b28ae07a80d7c36665fd9033e84b0 100644 (file)
@@ -61,6 +61,9 @@
 #define USB_VENDOR_ID_AIREN            0x1a2c
 #define USB_DEVICE_ID_AIREN_SLIMPLUS   0x0002
 
+#define USB_VENDOR_ID_AKAI             0x2011
+#define USB_DEVICE_ID_AKAI_MPKMINI2    0x0715
+
 #define USB_VENDOR_ID_ALCOR            0x058f
 #define USB_DEVICE_ID_ALCOR_USBRS232   0x9720
 
 #define USB_VENDOR_ID_QUANTA           0x0408
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH             0x3000
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001                0x3001
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003                0x3003
 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008                0x3008
 
 #define USB_VENDOR_ID_RAZER            0x1532
 #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER             0x0002
 #define USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER    0x1000
 
+#define USB_VENDOR_ID_SINO_LITE                        0x1345
+#define USB_DEVICE_ID_SINO_LITE_CONTROLLER     0x3008
+
 #define USB_VENDOR_ID_SOUNDGRAPH       0x15c2
 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_FIRST    0x0034
 #define USB_DEVICE_ID_SOUNDGRAPH_IMON_LAST     0x0046
 #define USB_DEVICE_ID_RI_KA_WEBMAIL    0x1320  /* Webmail Notifier */
 
 #define USB_VENDOR_ID_MULTIPLE_1781    0x1781
-#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD    0x0a8d
+#define USB_DEVICE_ID_RAPHNET_4NES4SNES_OLD    0x0a9d
 
 #define USB_VENDOR_ID_DRACAL_RAPHNET   0x289b
 #define USB_DEVICE_ID_RAPHNET_2NES2SNES        0x0002
index c690fae02cf823d16b6d985d944b6bda143b6282..feb2be71f77c199e9ab21fabd0e6ffcd267d2d9e 100644 (file)
@@ -61,7 +61,7 @@
  */
 static __u8 df_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),                   */
-0x09, 0x04,         /*  Usage (Joystik),                        */
+0x09, 0x04,         /*  Usage (Joystick),                       */
 0xA1, 0x01,         /*  Collection (Application),               */
 0xA1, 0x02,         /*      Collection (Logical),               */
 0x95, 0x01,         /*          Report Count (1),               */
@@ -127,7 +127,7 @@ static __u8 df_rdesc_fixed[] = {
 
 static __u8 dfp_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),                   */
-0x09, 0x04,         /*  Usage (Joystik),                        */
+0x09, 0x04,         /*  Usage (Joystick),                       */
 0xA1, 0x01,         /*  Collection (Application),               */
 0xA1, 0x02,         /*      Collection (Logical),               */
 0x95, 0x01,         /*          Report Count (1),               */
@@ -175,7 +175,7 @@ static __u8 dfp_rdesc_fixed[] = {
 
 static __u8 fv_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),                   */
-0x09, 0x04,         /*  Usage (Joystik),                        */
+0x09, 0x04,         /*  Usage (Joystick),                       */
 0xA1, 0x01,         /*  Collection (Application),               */
 0xA1, 0x02,         /*      Collection (Logical),               */
 0x95, 0x01,         /*          Report Count (1),               */
@@ -242,7 +242,7 @@ static __u8 fv_rdesc_fixed[] = {
 
 static __u8 momo_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),               */
-0x09, 0x04,         /*  Usage (Joystik),                    */
+0x09, 0x04,         /*  Usage (Joystick),                   */
 0xA1, 0x01,         /*  Collection (Application),           */
 0xA1, 0x02,         /*      Collection (Logical),           */
 0x95, 0x01,         /*          Report Count (1),           */
@@ -288,7 +288,7 @@ static __u8 momo_rdesc_fixed[] = {
 
 static __u8 momo2_rdesc_fixed[] = {
 0x05, 0x01,         /*  Usage Page (Desktop),               */
-0x09, 0x04,         /*  Usage (Joystik),                    */
+0x09, 0x04,         /*  Usage (Joystick),                   */
 0xA1, 0x01,         /*  Collection (Application),           */
 0xA1, 0x02,         /*      Collection (Logical),           */
 0x95, 0x01,         /*          Report Count (1),           */
index bd2ab476c65ef9f73a04a1f732dc7f75a3b49835..2e2515a4c070eac305a834c9229d0e1fa27f722f 100644 (file)
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/device.h>
+#include <linux/input.h>
+#include <linux/usb.h>
 #include <linux/hid.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
 #include <linux/kfifo.h>
 #include <linux/input/mt.h>
+#include <linux/workqueue.h>
+#include <linux/atomic.h>
+#include <linux/fixp-arith.h>
 #include <asm/unaligned.h>
+#include "usbhid/usbhid.h"
 #include "hid-ids.h"
 
 MODULE_LICENSE("GPL");
@@ -773,6 +779,589 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
        }
 }
 
+/* -------------------------------------------------------------------------- */
+/* 0x8123: Force feedback support                                             */
+/* -------------------------------------------------------------------------- */
+
+#define HIDPP_FF_GET_INFO              0x01
+#define HIDPP_FF_RESET_ALL             0x11
+#define HIDPP_FF_DOWNLOAD_EFFECT       0x21
+#define HIDPP_FF_SET_EFFECT_STATE      0x31
+#define HIDPP_FF_DESTROY_EFFECT                0x41
+#define HIDPP_FF_GET_APERTURE          0x51
+#define HIDPP_FF_SET_APERTURE          0x61
+#define HIDPP_FF_GET_GLOBAL_GAINS      0x71
+#define HIDPP_FF_SET_GLOBAL_GAINS      0x81
+
+#define HIDPP_FF_EFFECT_STATE_GET      0x00
+#define HIDPP_FF_EFFECT_STATE_STOP     0x01
+#define HIDPP_FF_EFFECT_STATE_PLAY     0x02
+#define HIDPP_FF_EFFECT_STATE_PAUSE    0x03
+
+#define HIDPP_FF_EFFECT_CONSTANT       0x00
+#define HIDPP_FF_EFFECT_PERIODIC_SINE          0x01
+#define HIDPP_FF_EFFECT_PERIODIC_SQUARE                0x02
+#define HIDPP_FF_EFFECT_PERIODIC_TRIANGLE      0x03
+#define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP    0x04
+#define HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN  0x05
+#define HIDPP_FF_EFFECT_SPRING         0x06
+#define HIDPP_FF_EFFECT_DAMPER         0x07
+#define HIDPP_FF_EFFECT_FRICTION       0x08
+#define HIDPP_FF_EFFECT_INERTIA                0x09
+#define HIDPP_FF_EFFECT_RAMP           0x0A
+
+#define HIDPP_FF_EFFECT_AUTOSTART      0x80
+
+#define HIDPP_FF_EFFECTID_NONE         -1
+#define HIDPP_FF_EFFECTID_AUTOCENTER   -2
+
+#define HIDPP_FF_MAX_PARAMS    20
+#define HIDPP_FF_RESERVED_SLOTS        1
+
+struct hidpp_ff_private_data {
+       struct hidpp_device *hidpp;
+       u8 feature_index;
+       u8 version;
+       u16 gain;
+       s16 range;
+       u8 slot_autocenter;
+       u8 num_effects;
+       int *effect_ids;
+       struct workqueue_struct *wq;
+       atomic_t workqueue_size;
+};
+
+struct hidpp_ff_work_data {
+       struct work_struct work;
+       struct hidpp_ff_private_data *data;
+       int effect_id;
+       u8 command;
+       u8 params[HIDPP_FF_MAX_PARAMS];
+       u8 size;
+};
+
+static const signed short hiddpp_ff_effects[] = {
+       FF_CONSTANT,
+       FF_PERIODIC,
+       FF_SINE,
+       FF_SQUARE,
+       FF_SAW_UP,
+       FF_SAW_DOWN,
+       FF_TRIANGLE,
+       FF_SPRING,
+       FF_DAMPER,
+       FF_AUTOCENTER,
+       FF_GAIN,
+       -1
+};
+
+static const signed short hiddpp_ff_effects_v2[] = {
+       FF_RAMP,
+       FF_FRICTION,
+       FF_INERTIA,
+       -1
+};
+
+static const u8 HIDPP_FF_CONDITION_CMDS[] = {
+       HIDPP_FF_EFFECT_SPRING,
+       HIDPP_FF_EFFECT_FRICTION,
+       HIDPP_FF_EFFECT_DAMPER,
+       HIDPP_FF_EFFECT_INERTIA
+};
+
+static const char *HIDPP_FF_CONDITION_NAMES[] = {
+       "spring",
+       "friction",
+       "damper",
+       "inertia"
+};
+
+
+static u8 hidpp_ff_find_effect(struct hidpp_ff_private_data *data, int effect_id)
+{
+       int i;
+
+       for (i = 0; i < data->num_effects; i++)
+               if (data->effect_ids[i] == effect_id)
+                       return i+1;
+
+       return 0;
+}
+
+static void hidpp_ff_work_handler(struct work_struct *w)
+{
+       struct hidpp_ff_work_data *wd = container_of(w, struct hidpp_ff_work_data, work);
+       struct hidpp_ff_private_data *data = wd->data;
+       struct hidpp_report response;
+       u8 slot;
+       int ret;
+
+       /* add slot number if needed */
+       switch (wd->effect_id) {
+       case HIDPP_FF_EFFECTID_AUTOCENTER:
+               wd->params[0] = data->slot_autocenter;
+               break;
+       case HIDPP_FF_EFFECTID_NONE:
+               /* leave slot as zero */
+               break;
+       default:
+               /* find current slot for effect */
+               wd->params[0] = hidpp_ff_find_effect(data, wd->effect_id);
+               break;
+       }
+
+       /* send command and wait for reply */
+       ret = hidpp_send_fap_command_sync(data->hidpp, data->feature_index,
+               wd->command, wd->params, wd->size, &response);
+
+       if (ret) {
+               hid_err(data->hidpp->hid_dev, "Failed to send command to device!\n");
+               goto out;
+       }
+
+       /* parse return data */
+       switch (wd->command) {
+       case HIDPP_FF_DOWNLOAD_EFFECT:
+               slot = response.fap.params[0];
+               if (slot > 0 && slot <= data->num_effects) {
+                       if (wd->effect_id >= 0)
+                               /* regular effect uploaded */
+                               data->effect_ids[slot-1] = wd->effect_id;
+                       else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
+                               /* autocenter spring uploaded */
+                               data->slot_autocenter = slot;
+               }
+               break;
+       case HIDPP_FF_DESTROY_EFFECT:
+               if (wd->effect_id >= 0)
+                       /* regular effect destroyed */
+                       data->effect_ids[wd->params[0]-1] = -1;
+               else if (wd->effect_id >= HIDPP_FF_EFFECTID_AUTOCENTER)
+                       /* autocenter spring destoyed */
+                       data->slot_autocenter = 0;
+               break;
+       case HIDPP_FF_SET_GLOBAL_GAINS:
+               data->gain = (wd->params[0] << 8) + wd->params[1];
+               break;
+       case HIDPP_FF_SET_APERTURE:
+               data->range = (wd->params[0] << 8) + wd->params[1];
+               break;
+       default:
+               /* no action needed */
+               break;
+       }
+
+out:
+       atomic_dec(&data->workqueue_size);
+       kfree(wd);
+}
+
+static int hidpp_ff_queue_work(struct hidpp_ff_private_data *data, int effect_id, u8 command, u8 *params, u8 size)
+{
+       struct hidpp_ff_work_data *wd = kzalloc(sizeof(*wd), GFP_KERNEL);
+       int s;
+
+       if (!wd)
+               return -ENOMEM;
+
+       INIT_WORK(&wd->work, hidpp_ff_work_handler);
+
+       wd->data = data;
+       wd->effect_id = effect_id;
+       wd->command = command;
+       wd->size = size;
+       memcpy(wd->params, params, size);
+
+       atomic_inc(&data->workqueue_size);
+       queue_work(data->wq, &wd->work);
+
+       /* warn about excessive queue size */
+       s = atomic_read(&data->workqueue_size);
+       if (s >= 20 && s % 20 == 0)
+               hid_warn(data->hidpp->hid_dev, "Force feedback command queue contains %d commands, causing substantial delays!", s);
+
+       return 0;
+}
+
+static int hidpp_ff_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 params[20];
+       u8 size;
+       int force;
+
+       /* set common parameters */
+       params[2] = effect->replay.length >> 8;
+       params[3] = effect->replay.length & 255;
+       params[4] = effect->replay.delay >> 8;
+       params[5] = effect->replay.delay & 255;
+
+       switch (effect->type) {
+       case FF_CONSTANT:
+               force = (effect->u.constant.level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+               params[1] = HIDPP_FF_EFFECT_CONSTANT;
+               params[6] = force >> 8;
+               params[7] = force & 255;
+               params[8] = effect->u.constant.envelope.attack_level >> 7;
+               params[9] = effect->u.constant.envelope.attack_length >> 8;
+               params[10] = effect->u.constant.envelope.attack_length & 255;
+               params[11] = effect->u.constant.envelope.fade_level >> 7;
+               params[12] = effect->u.constant.envelope.fade_length >> 8;
+               params[13] = effect->u.constant.envelope.fade_length & 255;
+               size = 14;
+               dbg_hid("Uploading constant force level=%d in dir %d = %d\n",
+                               effect->u.constant.level,
+                               effect->direction, force);
+               dbg_hid("          envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+                               effect->u.constant.envelope.attack_level,
+                               effect->u.constant.envelope.attack_length,
+                               effect->u.constant.envelope.fade_level,
+                               effect->u.constant.envelope.fade_length);
+               break;
+       case FF_PERIODIC:
+       {
+               switch (effect->u.periodic.waveform) {
+               case FF_SINE:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_SINE;
+                       break;
+               case FF_SQUARE:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_SQUARE;
+                       break;
+               case FF_SAW_UP:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHUP;
+                       break;
+               case FF_SAW_DOWN:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_SAWTOOTHDOWN;
+                       break;
+               case FF_TRIANGLE:
+                       params[1] = HIDPP_FF_EFFECT_PERIODIC_TRIANGLE;
+                       break;
+               default:
+                       hid_err(data->hidpp->hid_dev, "Unexpected periodic waveform type %i!\n", effect->u.periodic.waveform);
+                       return -EINVAL;
+               }
+               force = (effect->u.periodic.magnitude * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+               params[6] = effect->u.periodic.magnitude >> 8;
+               params[7] = effect->u.periodic.magnitude & 255;
+               params[8] = effect->u.periodic.offset >> 8;
+               params[9] = effect->u.periodic.offset & 255;
+               params[10] = effect->u.periodic.period >> 8;
+               params[11] = effect->u.periodic.period & 255;
+               params[12] = effect->u.periodic.phase >> 8;
+               params[13] = effect->u.periodic.phase & 255;
+               params[14] = effect->u.periodic.envelope.attack_level >> 7;
+               params[15] = effect->u.periodic.envelope.attack_length >> 8;
+               params[16] = effect->u.periodic.envelope.attack_length & 255;
+               params[17] = effect->u.periodic.envelope.fade_level >> 7;
+               params[18] = effect->u.periodic.envelope.fade_length >> 8;
+               params[19] = effect->u.periodic.envelope.fade_length & 255;
+               size = 20;
+               dbg_hid("Uploading periodic force mag=%d/dir=%d, offset=%d, period=%d ms, phase=%d\n",
+                               effect->u.periodic.magnitude, effect->direction,
+                               effect->u.periodic.offset,
+                               effect->u.periodic.period,
+                               effect->u.periodic.phase);
+               dbg_hid("          envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+                               effect->u.periodic.envelope.attack_level,
+                               effect->u.periodic.envelope.attack_length,
+                               effect->u.periodic.envelope.fade_level,
+                               effect->u.periodic.envelope.fade_length);
+               break;
+       }
+       case FF_RAMP:
+               params[1] = HIDPP_FF_EFFECT_RAMP;
+               force = (effect->u.ramp.start_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+               params[6] = force >> 8;
+               params[7] = force & 255;
+               force = (effect->u.ramp.end_level * fixp_sin16((effect->direction * 360) >> 16)) >> 15;
+               params[8] = force >> 8;
+               params[9] = force & 255;
+               params[10] = effect->u.ramp.envelope.attack_level >> 7;
+               params[11] = effect->u.ramp.envelope.attack_length >> 8;
+               params[12] = effect->u.ramp.envelope.attack_length & 255;
+               params[13] = effect->u.ramp.envelope.fade_level >> 7;
+               params[14] = effect->u.ramp.envelope.fade_length >> 8;
+               params[15] = effect->u.ramp.envelope.fade_length & 255;
+               size = 16;
+               dbg_hid("Uploading ramp force level=%d -> %d in dir %d = %d\n",
+                               effect->u.ramp.start_level,
+                               effect->u.ramp.end_level,
+                               effect->direction, force);
+               dbg_hid("          envelope attack=(%d, %d ms) fade=(%d, %d ms)\n",
+                               effect->u.ramp.envelope.attack_level,
+                               effect->u.ramp.envelope.attack_length,
+                               effect->u.ramp.envelope.fade_level,
+                               effect->u.ramp.envelope.fade_length);
+               break;
+       case FF_FRICTION:
+       case FF_INERTIA:
+       case FF_SPRING:
+       case FF_DAMPER:
+               params[1] = HIDPP_FF_CONDITION_CMDS[effect->type - FF_SPRING];
+               params[6] = effect->u.condition[0].left_saturation >> 9;
+               params[7] = (effect->u.condition[0].left_saturation >> 1) & 255;
+               params[8] = effect->u.condition[0].left_coeff >> 8;
+               params[9] = effect->u.condition[0].left_coeff & 255;
+               params[10] = effect->u.condition[0].deadband >> 9;
+               params[11] = (effect->u.condition[0].deadband >> 1) & 255;
+               params[12] = effect->u.condition[0].center >> 8;
+               params[13] = effect->u.condition[0].center & 255;
+               params[14] = effect->u.condition[0].right_coeff >> 8;
+               params[15] = effect->u.condition[0].right_coeff & 255;
+               params[16] = effect->u.condition[0].right_saturation >> 9;
+               params[17] = (effect->u.condition[0].right_saturation >> 1) & 255;
+               size = 18;
+               dbg_hid("Uploading %s force left coeff=%d, left sat=%d, right coeff=%d, right sat=%d\n",
+                               HIDPP_FF_CONDITION_NAMES[effect->type - FF_SPRING],
+                               effect->u.condition[0].left_coeff,
+                               effect->u.condition[0].left_saturation,
+                               effect->u.condition[0].right_coeff,
+                               effect->u.condition[0].right_saturation);
+               dbg_hid("          deadband=%d, center=%d\n",
+                               effect->u.condition[0].deadband,
+                               effect->u.condition[0].center);
+               break;
+       default:
+               hid_err(data->hidpp->hid_dev, "Unexpected force type %i!\n", effect->type);
+               return -EINVAL;
+       }
+
+       return hidpp_ff_queue_work(data, effect->id, HIDPP_FF_DOWNLOAD_EFFECT, params, size);
+}
+
+static int hidpp_ff_playback(struct input_dev *dev, int effect_id, int value)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 params[2];
+
+       params[1] = value ? HIDPP_FF_EFFECT_STATE_PLAY : HIDPP_FF_EFFECT_STATE_STOP;
+
+       dbg_hid("St%sing playback of effect %d.\n", value?"art":"opp", effect_id);
+
+       return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_SET_EFFECT_STATE, params, ARRAY_SIZE(params));
+}
+
+static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 slot = 0;
+
+       dbg_hid("Erasing effect %d.\n", effect_id);
+
+       return hidpp_ff_queue_work(data, effect_id, HIDPP_FF_DESTROY_EFFECT, &slot, 1);
+}
+
+static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 params[18];
+
+       dbg_hid("Setting autocenter to %d.\n", magnitude);
+
+       /* start a standard spring effect */
+       params[1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART;
+       /* zero delay and duration */
+       params[2] = params[3] = params[4] = params[5] = 0;
+       /* set coeff to 25% of saturation */
+       params[8] = params[14] = magnitude >> 11;
+       params[9] = params[15] = (magnitude >> 3) & 255;
+       params[6] = params[16] = magnitude >> 9;
+       params[7] = params[17] = (magnitude >> 1) & 255;
+       /* zero deadband and center */
+       params[10] = params[11] = params[12] = params[13] = 0;
+
+       hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_AUTOCENTER, HIDPP_FF_DOWNLOAD_EFFECT, params, ARRAY_SIZE(params));
+}
+
+static void hidpp_ff_set_gain(struct input_dev *dev, u16 gain)
+{
+       struct hidpp_ff_private_data *data = dev->ff->private;
+       u8 params[4];
+
+       dbg_hid("Setting gain to %d.\n", gain);
+
+       params[0] = gain >> 8;
+       params[1] = gain & 255;
+       params[2] = 0; /* no boost */
+       params[3] = 0;
+
+       hidpp_ff_queue_work(data, HIDPP_FF_EFFECTID_NONE, HIDPP_FF_SET_GLOBAL_GAINS, params, ARRAY_SIZE(params));
+}
+
+static ssize_t hidpp_ff_range_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct hid_device *hid = to_hid_device(dev);
+       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       struct input_dev *idev = hidinput->input;
+       struct hidpp_ff_private_data *data = idev->ff->private;
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", data->range);
+}
+
+static ssize_t hidpp_ff_range_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct hid_device *hid = to_hid_device(dev);
+       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       struct input_dev *idev = hidinput->input;
+       struct hidpp_ff_private_data *data = idev->ff->private;
+       u8 params[2];
+       int range = simple_strtoul(buf, NULL, 10);
+
+       range = clamp(range, 180, 900);
+
+       params[0] = range >> 8;
+       params[1] = range & 0x00FF;
+
+       hidpp_ff_queue_work(data, -1, HIDPP_FF_SET_APERTURE, params, ARRAY_SIZE(params));
+
+       return count;
+}
+
+static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp_ff_range_show, hidpp_ff_range_store);
+
+static void hidpp_ff_destroy(struct ff_device *ff)
+{
+       struct hidpp_ff_private_data *data = ff->private;
+
+       kfree(data->effect_ids);
+}
+
+static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+{
+       struct hid_device *hid = hidpp->hid_dev;
+       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       struct input_dev *dev = hidinput->input;
+       const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
+       const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
+       struct ff_device *ff;
+       struct hidpp_report response;
+       struct hidpp_ff_private_data *data;
+       int error, j, num_slots;
+       u8 version;
+
+       if (!dev) {
+               hid_err(hid, "Struct input_dev not set!\n");
+               return -EINVAL;
+       }
+
+       /* Get firmware release */
+       version = bcdDevice & 255;
+
+       /* Set supported force feedback capabilities */
+       for (j = 0; hiddpp_ff_effects[j] >= 0; j++)
+               set_bit(hiddpp_ff_effects[j], dev->ffbit);
+       if (version > 1)
+               for (j = 0; hiddpp_ff_effects_v2[j] >= 0; j++)
+                       set_bit(hiddpp_ff_effects_v2[j], dev->ffbit);
+
+       /* Read number of slots available in device */
+       error = hidpp_send_fap_command_sync(hidpp, feature_index,
+               HIDPP_FF_GET_INFO, NULL, 0, &response);
+       if (error) {
+               if (error < 0)
+                       return error;
+               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
+                       __func__, error);
+               return -EPROTO;
+       }
+
+       num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
+
+       error = input_ff_create(dev, num_slots);
+
+       if (error) {
+               hid_err(dev, "Failed to create FF device!\n");
+               return error;
+       }
+
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+       data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL);
+       if (!data->effect_ids) {
+               kfree(data);
+               return -ENOMEM;
+       }
+       data->hidpp = hidpp;
+       data->feature_index = feature_index;
+       data->version = version;
+       data->slot_autocenter = 0;
+       data->num_effects = num_slots;
+       for (j = 0; j < num_slots; j++)
+               data->effect_ids[j] = -1;
+
+       ff = dev->ff;
+       ff->private = data;
+
+       ff->upload = hidpp_ff_upload_effect;
+       ff->erase = hidpp_ff_erase_effect;
+       ff->playback = hidpp_ff_playback;
+       ff->set_gain = hidpp_ff_set_gain;
+       ff->set_autocenter = hidpp_ff_set_autocenter;
+       ff->destroy = hidpp_ff_destroy;
+
+
+       /* reset all forces */
+       error = hidpp_send_fap_command_sync(hidpp, feature_index,
+               HIDPP_FF_RESET_ALL, NULL, 0, &response);
+
+       /* Read current Range */
+       error = hidpp_send_fap_command_sync(hidpp, feature_index,
+               HIDPP_FF_GET_APERTURE, NULL, 0, &response);
+       if (error)
+               hid_warn(hidpp->hid_dev, "Failed to read range from device!\n");
+       data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]);
+
+       /* Create sysfs interface */
+       error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
+       if (error)
+               hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error);
+
+       /* Read the current gain values */
+       error = hidpp_send_fap_command_sync(hidpp, feature_index,
+               HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response);
+       if (error)
+               hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n");
+       data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]);
+       /* ignore boost value at response.fap.params[2] */
+
+       /* init the hardware command queue */
+       data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
+       atomic_set(&data->workqueue_size, 0);
+
+       /* initialize with zero autocenter to get wheel in usable state */
+       hidpp_ff_set_autocenter(dev, 0);
+
+       hid_info(hid, "Force feeback support loaded (firmware release %d).\n", version);
+
+       return 0;
+}
+
+static int hidpp_ff_deinit(struct hid_device *hid)
+{
+       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       struct input_dev *dev = hidinput->input;
+       struct hidpp_ff_private_data *data;
+
+       if (!dev) {
+               hid_err(hid, "Struct input_dev not found!\n");
+               return -EINVAL;
+       }
+
+       hid_info(hid, "Unloading HID++ force feedback.\n");
+       data = dev->ff->private;
+       if (!data) {
+               hid_err(hid, "Private data not found!\n");
+               return -EINVAL;
+       }
+
+       destroy_workqueue(data->wq);
+       device_remove_file(&hid->dev, &dev_attr_range);
+
+       return 0;
+}
+
+
 /* ************************************************************************** */
 /*                                                                            */
 /* Device Support                                                             */
@@ -1301,121 +1890,22 @@ static int k400_connect(struct hid_device *hdev, bool connected)
 
 #define HIDPP_PAGE_G920_FORCE_FEEDBACK                 0x8123
 
-/* Using session ID = 1 */
-#define CMD_G920_FORCE_GET_APERTURE                    0x51
-#define CMD_G920_FORCE_SET_APERTURE                    0x61
-
-struct g920_private_data {
-       u8 force_feature;
-       u16 range;
-};
-
-static ssize_t g920_range_show(struct device *dev, struct device_attribute *attr,
-                               char *buf)
-{
-       struct hid_device *hid = to_hid_device(dev);
-       struct hidpp_device *hidpp = hid_get_drvdata(hid);
-       struct g920_private_data *pdata;
-
-       pdata = hidpp->private_data;
-       if (!pdata) {
-               hid_err(hid, "Private driver data not found!\n");
-               return -EINVAL;
-       }
-
-       return scnprintf(buf, PAGE_SIZE, "%u\n", pdata->range);
-}
-
-static ssize_t g920_range_store(struct device *dev, struct device_attribute *attr,
-                                const char *buf, size_t count)
-{
-       struct hid_device *hid = to_hid_device(dev);
-       struct hidpp_device *hidpp = hid_get_drvdata(hid);
-       struct g920_private_data *pdata;
-       struct hidpp_report response;
-       u8 params[2];
-       int ret;
-       u16 range = simple_strtoul(buf, NULL, 10);
-
-       pdata = hidpp->private_data;
-       if (!pdata) {
-               hid_err(hid, "Private driver data not found!\n");
-               return -EINVAL;
-       }
-
-       if (range < 180)
-               range = 180;
-       else if (range > 900)
-               range = 900;
-
-       params[0] = range >> 8;
-       params[1] = range & 0x00FF;
-
-       ret = hidpp_send_fap_command_sync(hidpp, pdata->force_feature,
-               CMD_G920_FORCE_SET_APERTURE, params, 2, &response);
-       if (ret)
-               return ret;
-
-       pdata->range = range;
-       return count;
-}
-
-static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, g920_range_show, g920_range_store);
-
-static int g920_allocate(struct hid_device *hdev)
-{
-       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
-       struct g920_private_data *pdata;
-
-       pdata = devm_kzalloc(&hdev->dev, sizeof(struct g920_private_data),
-                       GFP_KERNEL);
-       if (!pdata)
-               return -ENOMEM;
-
-       hidpp->private_data = pdata;
-
-       return 0;
-}
-
 static int g920_get_config(struct hidpp_device *hidpp)
 {
-       struct g920_private_data *pdata = hidpp->private_data;
-       struct hidpp_report response;
        u8 feature_type;
        u8 feature_index;
        int ret;
 
-       pdata = hidpp->private_data;
-       if (!pdata) {
-               hid_err(hidpp->hid_dev, "Private driver data not found!\n");
-               return -EINVAL;
-       }
-
        /* Find feature and store for later use */
        ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
                &feature_index, &feature_type);
        if (ret)
                return ret;
 
-       pdata->force_feature = feature_index;
-
-       /* Read current Range */
-       ret = hidpp_send_fap_command_sync(hidpp, feature_index,
-               CMD_G920_FORCE_GET_APERTURE, NULL, 0, &response);
-       if (ret > 0) {
-               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
-                       __func__, ret);
-               return -EPROTO;
-       }
-       if (ret)
-               return ret;
-
-       pdata->range = get_unaligned_be16(&response.fap.params[0]);
-
-       /* Create sysfs interface */
-       ret = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
+       ret = hidpp_ff_init(hidpp, feature_index);
        if (ret)
-               hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d\n", ret);
+               hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n",
+                               ret);
 
        return 0;
 }
@@ -1739,10 +2229,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                ret = k400_allocate(hdev);
                if (ret)
                        goto allocate_fail;
-       } else if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
-               ret = g920_allocate(hdev);
-               if (ret)
-                       goto allocate_fail;
        }
 
        INIT_WORK(&hidpp->work, delayed_work_cb);
@@ -1825,7 +2311,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
 hid_hw_open_failed:
        hid_device_io_stop(hdev);
        if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
-               device_remove_file(&hdev->dev, &dev_attr_range);
                hid_hw_close(hdev);
                hid_hw_stop(hdev);
        }
@@ -1843,7 +2328,7 @@ static void hidpp_remove(struct hid_device *hdev)
        struct hidpp_device *hidpp = hid_get_drvdata(hdev);
 
        if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
-               device_remove_file(&hdev->dev, &dev_attr_range);
+               hidpp_ff_deinit(hdev);
                hid_hw_close(hdev);
        }
        hid_hw_stop(hdev);
index 296d4991560e45a9fed565c8a2d3cd463efb9ff1..6adb788bbfcaa13984a6a24f8ea99eea4b91bf6d 100644 (file)
@@ -1133,6 +1133,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return ret;
 
        ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
+       if (ret)
+               dev_warn(&hdev->dev, "Cannot allocate sysfs group for %s\n",
+                               hdev->name);
 
        mt_set_maxcontacts(hdev);
        mt_set_input_mode(hdev);
index 67cd059a8f46cdf7dab8d73cb0edb7bd5aff1333..9cd2ca34a6be5583dbcd82a64feb018f66b20bf8 100644 (file)
@@ -594,6 +594,9 @@ static int rmi_suspend(struct hid_device *hdev, pm_message_t message)
        int ret;
        u8 buf[RMI_F11_CTRL_REG_COUNT];
 
+       if (!(data->device_flags & RMI_DEVICE))
+               return 0;
+
        ret = rmi_read_block(hdev, data->f11.control_base_addr, buf,
                                RMI_F11_CTRL_REG_COUNT);
        if (ret)
@@ -613,6 +616,9 @@ static int rmi_post_reset(struct hid_device *hdev)
        struct rmi_data *data = hid_get_drvdata(hdev);
        int ret;
 
+       if (!(data->device_flags & RMI_DEVICE))
+               return 0;
+
        ret = rmi_reset_attn_mode(hdev);
        if (ret) {
                hid_err(hdev, "can not set rmi mode\n");
@@ -640,6 +646,11 @@ static int rmi_post_reset(struct hid_device *hdev)
 
 static int rmi_post_resume(struct hid_device *hdev)
 {
+       struct rmi_data *data = hid_get_drvdata(hdev);
+
+       if (!(data->device_flags & RMI_DEVICE))
+               return 0;
+
        return rmi_reset_attn_mode(hdev);
 }
 #endif /* CONFIG_PM */
index 9b8db0e0ef1cdd6c3b2b050d87a50cb32e18ce1d..310436a54a3f308157a272d56d6624c3dd3c98c1 100644 (file)
@@ -50,6 +50,7 @@
 #define MOTION_CONTROLLER_BT      BIT(8)
 #define NAVIGATION_CONTROLLER_USB BIT(9)
 #define NAVIGATION_CONTROLLER_BT  BIT(10)
+#define SINO_LITE_CONTROLLER      BIT(11)
 
 #define SIXAXIS_CONTROLLER (SIXAXIS_CONTROLLER_USB | SIXAXIS_CONTROLLER_BT)
 #define MOTION_CONTROLLER (MOTION_CONTROLLER_USB | MOTION_CONTROLLER_BT)
@@ -74,7 +75,7 @@
  * axis values.  Additionally, the controller only has 20 actual, physical axes
  * so there are several unused axes in between the used ones.
  */
-static __u8 sixaxis_rdesc[] = {
+static u8 sixaxis_rdesc[] = {
        0x05, 0x01,         /*  Usage Page (Desktop),               */
        0x09, 0x04,         /*  Usage (Joystick),                   */
        0xA1, 0x01,         /*  Collection (Application),           */
@@ -152,7 +153,7 @@ static __u8 sixaxis_rdesc[] = {
 };
 
 /* PS/3 Motion controller */
-static __u8 motion_rdesc[] = {
+static u8 motion_rdesc[] = {
        0x05, 0x01,         /*  Usage Page (Desktop),               */
        0x09, 0x04,         /*  Usage (Joystick),                   */
        0xA1, 0x01,         /*  Collection (Application),           */
@@ -249,9 +250,9 @@ static __u8 motion_rdesc[] = {
 };
 
 /* PS/3 Navigation controller */
-static __u8 navigation_rdesc[] = {
+static u8 navigation_rdesc[] = {
        0x05, 0x01,         /*  Usage Page (Desktop),               */
-       0x09, 0x04,         /*  Usage (Joystik),                    */
+       0x09, 0x04,         /*  Usage (Joystick),                   */
        0xA1, 0x01,         /*  Collection (Application),           */
        0xA1, 0x02,         /*      Collection (Logical),           */
        0x85, 0x01,         /*          Report ID (1),              */
@@ -809,7 +810,7 @@ static u8 dualshock4_bt_rdesc[] = {
        0xC0                /*  End Collection                      */
 };
 
-static __u8 ps3remote_rdesc[] = {
+static u8 ps3remote_rdesc[] = {
        0x05, 0x01,          /* GUsagePage Generic Desktop */
        0x09, 0x05,          /* LUsage 0x05 [Game Pad] */
        0xA1, 0x01,          /* MCollection Application (mouse, keyboard) */
@@ -817,14 +818,18 @@ static __u8 ps3remote_rdesc[] = {
         /* Use collection 1 for joypad buttons */
         0xA1, 0x02,         /* MCollection Logical (interrelated data) */
 
-         /* Ignore the 1st byte, maybe it is used for a controller
-          * number but it's not needed for correct operation */
+         /*
+          * Ignore the 1st byte, maybe it is used for a controller
+          * number but it's not needed for correct operation
+          */
          0x75, 0x08,        /* GReportSize 0x08 [8] */
          0x95, 0x01,        /* GReportCount 0x01 [1] */
          0x81, 0x01,        /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
 
-         /* Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
-          * buttons multiple keypresses are allowed */
+         /*
+          * Bytes from 2nd to 4th are a bitmap for joypad buttons, for these
+          * buttons multiple keypresses are allowed
+          */
          0x05, 0x09,        /* GUsagePage Button */
          0x19, 0x01,        /* LUsageMinimum 0x01 [Button 1 (primary/trigger)] */
          0x29, 0x18,        /* LUsageMaximum 0x18 [Button 24] */
@@ -849,8 +854,10 @@ static __u8 ps3remote_rdesc[] = {
          0x95, 0x01,        /* GReportCount 0x01 [1] */
          0x80,              /* MInput  */
 
-         /* Ignore bytes from 6th to 11th, 6th to 10th are always constant at
-          * 0xff and 11th is for press indication */
+         /*
+          * Ignore bytes from 6th to 11th, 6th to 10th are always constant at
+          * 0xff and 11th is for press indication
+          */
          0x75, 0x08,        /* GReportSize 0x08 [8] */
          0x95, 0x06,        /* GReportCount 0x06 [6] */
          0x81, 0x01,        /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */
@@ -929,7 +936,7 @@ static const unsigned int buzz_keymap[] = {
        /*
         * The controller has 4 remote buzzers, each with one LED and 5
         * buttons.
-        * 
+        *
         * We use the mapping chosen by the controller, which is:
         *
         * Key          Offset
@@ -943,15 +950,15 @@ static const unsigned int buzz_keymap[] = {
         * So, for example, the orange button on the third buzzer is mapped to
         * BTN_TRIGGER_HAPPY14
         */
-       1] = BTN_TRIGGER_HAPPY1,
-       2] = BTN_TRIGGER_HAPPY2,
-       3] = BTN_TRIGGER_HAPPY3,
-       4] = BTN_TRIGGER_HAPPY4,
-       5] = BTN_TRIGGER_HAPPY5,
-       6] = BTN_TRIGGER_HAPPY6,
-       7] = BTN_TRIGGER_HAPPY7,
-       8] = BTN_TRIGGER_HAPPY8,
-       9] = BTN_TRIGGER_HAPPY9,
+        [1] = BTN_TRIGGER_HAPPY1,
+        [2] = BTN_TRIGGER_HAPPY2,
+        [3] = BTN_TRIGGER_HAPPY3,
+        [4] = BTN_TRIGGER_HAPPY4,
+        [5] = BTN_TRIGGER_HAPPY5,
+        [6] = BTN_TRIGGER_HAPPY6,
+        [7] = BTN_TRIGGER_HAPPY7,
+        [8] = BTN_TRIGGER_HAPPY8,
+        [9] = BTN_TRIGGER_HAPPY9,
        [10] = BTN_TRIGGER_HAPPY10,
        [11] = BTN_TRIGGER_HAPPY11,
        [12] = BTN_TRIGGER_HAPPY12,
@@ -973,33 +980,33 @@ static enum power_supply_property sony_battery_props[] = {
 };
 
 struct sixaxis_led {
-       __u8 time_enabled; /* the total time the led is active (0xff means forever) */
-       __u8 duty_length;  /* how long a cycle is in deciseconds (0 means "really fast") */
-       __u8 enabled;
-       __u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */
-       __u8 duty_on;  /* % of duty_length the led is on (0xff mean 100%) */
+       u8 time_enabled; /* the total time the led is active (0xff means forever) */
+       u8 duty_length;  /* how long a cycle is in deciseconds (0 means "really fast") */
+       u8 enabled;
+       u8 duty_off; /* % of duty_length the led is off (0xff means 100%) */
+       u8 duty_on;  /* % of duty_length the led is on (0xff mean 100%) */
 } __packed;
 
 struct sixaxis_rumble {
-       __u8 padding;
-       __u8 right_duration; /* Right motor duration (0xff means forever) */
-       __u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */
-       __u8 left_duration;    /* Left motor duration (0xff means forever) */
-       __u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */
+       u8 padding;
+       u8 right_duration; /* Right motor duration (0xff means forever) */
+       u8 right_motor_on; /* Right (small) motor on/off, only supports values of 0 or 1 (off/on) */
+       u8 left_duration;    /* Left motor duration (0xff means forever) */
+       u8 left_motor_force; /* left (large) motor, supports force values from 0 to 255 */
 } __packed;
 
 struct sixaxis_output_report {
-       __u8 report_id;
+       u8 report_id;
        struct sixaxis_rumble rumble;
-       __u8 padding[4];
-       __u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */
+       u8 padding[4];
+       u8 leds_bitmap; /* bitmap of enabled LEDs: LED_1 = 0x02, LED_2 = 0x04, ... */
        struct sixaxis_led led[4];    /* LEDx at (4 - x) */
        struct sixaxis_led _reserved; /* LED5, not actually soldered */
 } __packed;
 
 union sixaxis_output_report_01 {
        struct sixaxis_output_report data;
-       __u8 buf[36];
+       u8 buf[36];
 };
 
 struct motion_output_report_02 {
@@ -1028,30 +1035,30 @@ struct sony_sc {
        struct led_classdev *leds[MAX_LEDS];
        unsigned long quirks;
        struct work_struct state_worker;
-       void(*send_output_report)(struct sony_sc*);
+       void (*send_output_report)(struct sony_sc *);
        struct power_supply *battery;
        struct power_supply_desc battery_desc;
        int device_id;
-       __u8 *output_report_dmabuf;
+       u8 *output_report_dmabuf;
 
 #ifdef CONFIG_SONY_FF
-       __u8 left;
-       __u8 right;
+       u8 left;
+       u8 right;
 #endif
 
-       __u8 mac_address[6];
-       __u8 worker_initialized;
-       __u8 cable_state;
-       __u8 battery_charging;
-       __u8 battery_capacity;
-       __u8 led_state[MAX_LEDS];
-       __u8 resume_led_state[MAX_LEDS];
-       __u8 led_delay_on[MAX_LEDS];
-       __u8 led_delay_off[MAX_LEDS];
-       __u8 led_count;
+       u8 mac_address[6];
+       u8 worker_initialized;
+       u8 cable_state;
+       u8 battery_charging;
+       u8 battery_capacity;
+       u8 led_state[MAX_LEDS];
+       u8 resume_led_state[MAX_LEDS];
+       u8 led_delay_on[MAX_LEDS];
+       u8 led_delay_off[MAX_LEDS];
+       u8 led_count;
 };
 
-static __u8 *sixaxis_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *sixaxis_fixup(struct hid_device *hdev, u8 *rdesc,
                             unsigned int *rsize)
 {
        *rsize = sizeof(sixaxis_rdesc);
@@ -1072,7 +1079,7 @@ static u8 *navigation_fixup(struct hid_device *hdev, u8 *rdesc,
        return navigation_rdesc;
 }
 
-static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *ps3remote_fixup(struct hid_device *hdev, u8 *rdesc,
                             unsigned int *rsize)
 {
        *rsize = sizeof(ps3remote_rdesc);
@@ -1113,11 +1120,14 @@ static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi,
        return 1;
 }
 
-static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
+static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
                unsigned int *rsize)
 {
        struct sony_sc *sc = hid_get_drvdata(hdev);
 
+       if (sc->quirks & SINO_LITE_CONTROLLER)
+               return rdesc;
+
        /*
         * Some Sony RF receivers wrongly declare the mouse pointer as a
         * a constant non-data variable.
@@ -1164,12 +1174,12 @@ static __u8 *sony_report_fixup(struct hid_device *hdev, __u8 *rdesc,
        return rdesc;
 }
 
-static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+static void sixaxis_parse_report(struct sony_sc *sc, u8 *rd, int size)
 {
-       static const __u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
+       static const u8 sixaxis_battery_capacity[] = { 0, 1, 25, 50, 75, 100 };
        unsigned long flags;
        int offset;
-       __u8 cable_state, battery_capacity, battery_charging;
+       u8 cable_state, battery_capacity, battery_charging;
 
        /*
         * The sixaxis is charging if the battery value is 0xee
@@ -1184,7 +1194,7 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
                battery_charging = !(rd[offset] & 0x01);
                cable_state = 1;
        } else {
-               __u8 index = rd[offset] <= 5 ? rd[offset] : 5;
+               u8 index = rd[offset] <= 5 ? rd[offset] : 5;
                battery_capacity = sixaxis_battery_capacity[index];
                battery_charging = 0;
                cable_state = 0;
@@ -1197,14 +1207,14 @@ static void sixaxis_parse_report(struct sony_sc *sc, __u8 *rd, int size)
        spin_unlock_irqrestore(&sc->lock, flags);
 }
 
-static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
+static void dualshock4_parse_report(struct sony_sc *sc, u8 *rd, int size)
 {
        struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
                                                struct hid_input, list);
        struct input_dev *input_dev = hidinput->input;
        unsigned long flags;
        int n, offset;
-       __u8 cable_state, battery_capacity, battery_charging;
+       u8 cable_state, battery_capacity, battery_charging;
 
        /*
         * Battery and touchpad data starts at byte 30 in the USB report and
@@ -1254,7 +1264,7 @@ static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
         * follows the data for the first.
         */
        for (n = 0; n < 2; n++) {
-               __u16 x, y;
+               u16 x, y;
 
                x = rd[offset+1] | ((rd[offset+2] & 0xF) << 8);
                y = ((rd[offset+2] & 0xF0) >> 4) | (rd[offset+3] << 4);
@@ -1270,7 +1280,7 @@ static void dualshock4_parse_report(struct sony_sc *sc, __u8 *rd, int size)
 }
 
 static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
-               __u8 *rd, int size)
+               u8 *rd, int size)
 {
        struct sony_sc *sc = hid_get_drvdata(hdev);
 
@@ -1394,7 +1404,7 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
 {
        const int buf_size =
                max(SIXAXIS_REPORT_0xF2_SIZE, SIXAXIS_REPORT_0xF5_SIZE);
-       __u8 *buf;
+       u8 *buf;
        int ret;
 
        buf = kmalloc(buf_size, GFP_KERNEL);
@@ -1420,8 +1430,10 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
        }
 
        ret = hid_hw_output_report(hdev, buf, 1);
-       if (ret < 0)
-               hid_err(hdev, "can't set operational mode: step 3\n");
+       if (ret < 0) {
+               hid_info(hdev, "can't set operational mode: step 3, ignoring\n");
+               ret = 0;
+       }
 
 out:
        kfree(buf);
@@ -1431,8 +1443,8 @@ out:
 
 static int sixaxis_set_operational_bt(struct hid_device *hdev)
 {
-       static const __u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
-       __u8 *buf;
+       static const u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
+       u8 *buf;
        int ret;
 
        buf = kmemdup(report, sizeof(report), GFP_KERNEL);
@@ -1453,7 +1465,7 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev)
  */
 static int dualshock4_set_operational_bt(struct hid_device *hdev)
 {
-       __u8 *buf;
+       u8 *buf;
        int ret;
 
        buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
@@ -1470,7 +1482,7 @@ static int dualshock4_set_operational_bt(struct hid_device *hdev)
 
 static void sixaxis_set_leds_from_id(struct sony_sc *sc)
 {
-       static const __u8 sixaxis_leds[10][4] = {
+       static const u8 sixaxis_leds[10][4] = {
                                { 0x01, 0x00, 0x00, 0x00 },
                                { 0x00, 0x01, 0x00, 0x00 },
                                { 0x00, 0x00, 0x01, 0x00 },
@@ -1497,7 +1509,7 @@ static void sixaxis_set_leds_from_id(struct sony_sc *sc)
 static void dualshock4_set_leds_from_id(struct sony_sc *sc)
 {
        /* The first 4 color/index entries match what the PS4 assigns */
-       static const __u8 color_code[7][3] = {
+       static const u8 color_code[7][3] = {
                        /* Blue   */    { 0x00, 0x00, 0x01 },
                        /* Red    */    { 0x01, 0x00, 0x00 },
                        /* Green  */    { 0x00, 0x01, 0x00 },
@@ -1525,7 +1537,7 @@ static void buzz_set_leds(struct sony_sc *sc)
                &hdev->report_enum[HID_OUTPUT_REPORT].report_list;
        struct hid_report *report = list_entry(report_list->next,
                struct hid_report, list);
-       __s32 *value = report->field[0]->value;
+       s32 *value = report->field[0]->value;
 
        BUILD_BUG_ON(MAX_LEDS < 4);
 
@@ -1619,7 +1631,7 @@ static int sony_led_blink_set(struct led_classdev *led, unsigned long *delay_on,
        struct hid_device *hdev = to_hid_device(dev);
        struct sony_sc *drv_data = hid_get_drvdata(hdev);
        int n;
-       __u8 new_on, new_off;
+       u8 new_on, new_off;
 
        if (!drv_data) {
                hid_err(hdev, "No device data\n");
@@ -1690,8 +1702,8 @@ static int sony_leds_init(struct sony_sc *sc)
        const char *name_fmt;
        static const char * const ds4_name_str[] = { "red", "green", "blue",
                                                  "global" };
-       __u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
-       __u8 use_hw_blink[MAX_LEDS] = { 0 };
+       u8 max_brightness[MAX_LEDS] = { [0 ... (MAX_LEDS - 1)] = 1 };
+       u8 use_hw_blink[MAX_LEDS] = { 0 };
 
        BUG_ON(!(sc->quirks & SONY_LED_SUPPORT));
 
@@ -1719,7 +1731,7 @@ static int sony_leds_init(struct sony_sc *sc)
                name_len = 0;
                name_fmt = "%s:%s";
        } else if (sc->quirks & NAVIGATION_CONTROLLER) {
-               static const __u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00};
+               static const u8 navigation_leds[4] = {0x01, 0x00, 0x00, 0x00};
 
                memcpy(sc->led_state, navigation_leds, sizeof(navigation_leds));
                sc->led_count = 1;
@@ -1796,7 +1808,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
        static const union sixaxis_output_report_01 default_report = {
                .buf = {
                        0x01,
-                       0x00, 0xff, 0x00, 0xff, 0x00,
+                       0x01, 0xff, 0x00, 0xff, 0x00,
                        0x00, 0x00, 0x00, 0x00, 0x00,
                        0xff, 0x27, 0x10, 0x00, 0x32,
                        0xff, 0x27, 0x10, 0x00, 0x32,
@@ -1842,7 +1854,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
                }
        }
 
-       hid_hw_raw_request(sc->hdev, report->report_id, (__u8 *)report,
+       hid_hw_raw_request(sc->hdev, report->report_id, (u8 *)report,
                        sizeof(struct sixaxis_output_report),
                        HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
 }
@@ -1850,7 +1862,7 @@ static void sixaxis_send_output_report(struct sony_sc *sc)
 static void dualshock4_send_output_report(struct sony_sc *sc)
 {
        struct hid_device *hdev = sc->hdev;
-       __u8 *buf = sc->output_report_dmabuf;
+       u8 *buf = sc->output_report_dmabuf;
        int offset;
 
        if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
@@ -1910,7 +1922,7 @@ static void motion_send_output_report(struct sony_sc *sc)
        report->rumble = max(sc->right, sc->left);
 #endif
 
-       hid_hw_output_report(hdev, (__u8 *)report, MOTION_REPORT_0x02_SIZE);
+       hid_hw_output_report(hdev, (u8 *)report, MOTION_REPORT_0x02_SIZE);
 }
 
 static inline void sony_send_output_report(struct sony_sc *sc)
@@ -1922,6 +1934,7 @@ static inline void sony_send_output_report(struct sony_sc *sc)
 static void sony_state_worker(struct work_struct *work)
 {
        struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
+
        sc->send_output_report(sc);
 }
 
@@ -2142,7 +2155,7 @@ static int sony_get_bt_devaddr(struct sony_sc *sc)
 
 static int sony_check_add(struct sony_sc *sc)
 {
-       __u8 *buf = NULL;
+       u8 *buf = NULL;
        int n, ret;
 
        if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) ||
@@ -2253,7 +2266,7 @@ static void sony_release_device_id(struct sony_sc *sc)
 }
 
 static inline void sony_init_output_report(struct sony_sc *sc,
-                               void(*send_output_report)(struct sony_sc*))
+                               void (*send_output_report)(struct sony_sc *))
 {
        sc->send_output_report = send_output_report;
 
@@ -2441,7 +2454,7 @@ static int sony_suspend(struct hid_device *hdev, pm_message_t message)
        /*
         * On suspend save the current LED state,
         * stop running force-feedback and blank the LEDS.
-         */
+        */
        if (SONY_LED_SUPPORT || SONY_FF_SUPPORT) {
                struct sony_sc *sc = hid_get_drvdata(hdev);
 
@@ -2501,8 +2514,10 @@ static const struct hid_device_id sony_devices[] = {
                .driver_data = VAIO_RDESC_CONSTANT },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE),
                .driver_data = VAIO_RDESC_CONSTANT },
-       /* Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
-        * Logitech joystick from the device descriptor. */
+       /*
+        * Wired Buzz Controller. Reported as Sony Hub from its USB ID and as
+        * Logitech joystick from the device descriptor.
+        */
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER),
                .driver_data = BUZZ_CONTROLLER },
        { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER),
@@ -2521,6 +2536,9 @@ static const struct hid_device_id sony_devices[] = {
                .driver_data = DUALSHOCK4_CONTROLLER_USB },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
                .driver_data = DUALSHOCK4_CONTROLLER_BT },
+       /* Nyko Core Controller for PS3 */
+       { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
+               .driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
        { }
 };
 MODULE_DEVICE_TABLE(hid, sony_devices);
index 7dd0953cd70f222f037c72b7c217b76eff8d2942..cb8cbdd07c4e00571562c4294a40348419263ead 100644 (file)
@@ -55,6 +55,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
 
        { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
@@ -106,6 +107,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_MOUSE_4D22, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
index 99ef77fcfb8040d9bcb0021ebb13a901a21c741e..e92e1e855a728725c66dbba1c06edc60cb2339f5 100644 (file)
@@ -580,11 +580,12 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
        struct wacom_features *features = &wacom->features;
        unsigned char *data = wacom->data;
        struct input_dev *input = wacom->pen_input;
-       int idx = 0;
+       int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
 
-       /* tool number */
-       if (features->type == INTUOS)
-               idx = data[1] & 0x01;
+       if (!(((data[1] & 0xfc) == 0xc0) ||  /* in prox */
+           ((data[1] & 0xfe) == 0x20) ||    /* in range */
+           ((data[1] & 0xfe) == 0x80)))     /* out prox */
+               return 0;
 
        /* Enter report */
        if ((data[1] & 0xfc) == 0xc0) {
@@ -612,6 +613,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                case 0x885: /* Intuos3 Marker Pen */
                case 0x802: /* Intuos4/5 13HD/24HD General Pen */
                case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
+               case 0x8e2: /* IntuosHT2 pen */
                case 0x022:
                case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
                case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
@@ -673,39 +675,23 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                        wacom->tool[idx] = BTN_TOOL_PEN;
                        break;
                }
+               wacom->shared->stylus_in_proximity = true;
                return 1;
        }
 
-       /*
-        * don't report events for invalid data
-        */
-       /* older I4 styli don't work with new Cintiqs */
-       if ((!((wacom->id[idx] >> 20) & 0x01) &&
-                       (features->type == WACOM_21UX2)) ||
-           /* Only large Intuos support Lense Cursor */
-           (wacom->tool[idx] == BTN_TOOL_LENS &&
-               (features->type == INTUOS3 ||
-                features->type == INTUOS3S ||
-                features->type == INTUOS4 ||
-                features->type == INTUOS4S ||
-                features->type == INTUOS5 ||
-                features->type == INTUOS5S ||
-                features->type == INTUOSPM ||
-                features->type == INTUOSPS)) ||
-          /* Cintiq doesn't send data when RDY bit isn't set */
-          (features->type == CINTIQ && !(data[1] & 0x40)))
-               return 1;
+       /* in Range */
+       if ((data[1] & 0xfe) == 0x20) {
+               if (features->type != INTUOSHT2)
+                       wacom->shared->stylus_in_proximity = true;
 
-       wacom->shared->stylus_in_proximity = true;
-       if (wacom->shared->touch_down)
+               /* in Range while exiting */
+               if (wacom->reporting_data) {
+                       input_report_key(input, BTN_TOUCH, 0);
+                       input_report_abs(input, ABS_PRESSURE, 0);
+                       input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
+                       return 2;
+               }
                return 1;
-
-       /* in Range while exiting */
-       if (((data[1] & 0xfe) == 0x20) && wacom->reporting_data) {
-               input_report_key(input, BTN_TOUCH, 0);
-               input_report_abs(input, ABS_PRESSURE, 0);
-               input_report_abs(input, ABS_DISTANCE, wacom->features.distance_max);
-               return 2;
        }
 
        /* Exit report */
@@ -750,13 +736,6 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
                return 2;
        }
 
-       /* don't report other events if we don't know the ID */
-       if (!wacom->id[idx]) {
-               /* but reschedule a read of the current tool */
-               wacom_intuos_schedule_prox_event(wacom);
-               return 1;
-       }
-
        return 0;
 }
 
@@ -897,6 +876,36 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
                data[0] != WACOM_REPORT_INTUOS_PEN)
                return 0;
 
+       if (wacom->shared->touch_down)
+               return 1;
+
+       /* don't report events if we don't know the tool ID */
+       if (!wacom->id[idx]) {
+               /* but reschedule a read of the current tool */
+               wacom_intuos_schedule_prox_event(wacom);
+               return 1;
+       }
+
+       /*
+        * don't report events for invalid data
+        */
+       /* older I4 styli don't work with new Cintiqs */
+       if ((!((wacom->id[idx] >> 20) & 0x01) &&
+                       (features->type == WACOM_21UX2)) ||
+           /* Only large Intuos support Lense Cursor */
+           (wacom->tool[idx] == BTN_TOOL_LENS &&
+               (features->type == INTUOS3 ||
+                features->type == INTUOS3S ||
+                features->type == INTUOS4 ||
+                features->type == INTUOS4S ||
+                features->type == INTUOS5 ||
+                features->type == INTUOS5S ||
+                features->type == INTUOSPM ||
+                features->type == INTUOSPS)) ||
+          /* Cintiq doesn't send data when RDY bit isn't set */
+          (features->type == CINTIQ && !(data[1] & 0x40)))
+               return 1;
+
        x = (be16_to_cpup((__be16 *)&data[2]) << 1) | ((data[9] >> 1) & 1);
        y = (be16_to_cpup((__be16 *)&data[4]) << 1) | (data[9] & 1);
        distance = data[9] >> 2;
index 7f82c911ad74c4893c71eefcbe4f47bdd52652a3..c000780d931f80f0c767f59c6fe93d34b94a8632 100644 (file)
@@ -281,6 +281,8 @@ static int nokia_modem_remove(struct device *dev)
 #ifdef CONFIG_OF
 static const struct of_device_id nokia_modem_of_match[] = {
        { .compatible = "nokia,n900-modem", },
+       { .compatible = "nokia,n950-modem", },
+       { .compatible = "nokia,n9-modem", },
        {},
 };
 MODULE_DEVICE_TABLE(of, nokia_modem_of_match);
index a38af68cf32683ca28f4c1200f06639b22aa9e81..6595d20912683ab9d4e41c95e5dca9fe9097a0dd 100644 (file)
@@ -521,13 +521,7 @@ static void ssip_start_rx(struct hsi_client *cl)
         * high transition. Therefore we need to ignore the sencond UP event.
         */
        if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) {
-               if (ssi->main_state == INIT) {
-                       ssi->main_state = HANDSHAKE;
-                       spin_unlock(&ssi->lock);
-                       ssip_send_bootinfo_req_cmd(cl);
-               } else {
-                       spin_unlock(&ssi->lock);
-               }
+               spin_unlock(&ssi->lock);
                return;
        }
        ssip_set_rxstate(ssi, RECV_READY);
@@ -671,6 +665,7 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
                ssip_error(cl);
                /* Fall through */
        case INIT:
+       case HANDSHAKE:
                spin_lock(&ssi->lock);
                ssi->main_state = HANDSHAKE;
                if (!ssi->waketest) {
@@ -688,9 +683,6 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd)
                msg->complete = ssip_release_cmd;
                hsi_async_write(cl, msg);
                break;
-       case HANDSHAKE:
-               /* Ignore */
-               break;
        default:
                dev_dbg(&cl->device, "Wrong state M(%d)\n", ssi->main_state);
                break;
@@ -939,9 +931,11 @@ static int ssip_pn_open(struct net_device *dev)
                ssi->waketest = 1;
                ssi_waketest(cl, 1); /* FIXME: To be removed */
        }
-       ssi->main_state = INIT;
+       ssi->main_state = HANDSHAKE;
        spin_unlock_bh(&ssi->lock);
 
+       ssip_send_bootinfo_req_cmd(cl);
+
        return 0;
 }
 
index 60fb80bd353d601563c57710d8e897d0af50bd30..852c8a85e1e88a3b7d8d5db2f076fd3284bbf9ef 100644 (file)
@@ -685,6 +685,20 @@ config SENSORS_LTC2945
          This driver can also be built as a module. If so, the module will
          be called ltc2945.
 
+config SENSORS_LTC2990
+       tristate "Linear Technology LTC2990 (current monitoring mode only)"
+       depends on I2C
+       help
+         If you say yes here you get support for Linear Technology LTC2990
+         I2C System Monitor. The LTC2990 supports a combination of voltage,
+         current and temperature monitoring, but in addition to the Vcc supply
+         voltage and chip temperature, this driver currently only supports
+         reading two currents by measuring two differential voltages across
+         series resistors.
+
+         This driver can also be built as a module. If so, the module will
+         be called ltc2990.
+
 config SENSORS_LTC4151
        tristate "Linear Technology LTC4151"
        depends on I2C
index 30c94df314658fc8f9dcb04c7ae05eb141c979e1..a7ecaf2f29aabeaef33b40d69555622d255520e7 100644 (file)
@@ -100,6 +100,7 @@ obj-$(CONFIG_SENSORS_LM95234)       += lm95234.o
 obj-$(CONFIG_SENSORS_LM95241)  += lm95241.o
 obj-$(CONFIG_SENSORS_LM95245)  += lm95245.o
 obj-$(CONFIG_SENSORS_LTC2945)  += ltc2945.o
+obj-$(CONFIG_SENSORS_LTC2990)  += ltc2990.o
 obj-$(CONFIG_SENSORS_LTC4151)  += ltc4151.o
 obj-$(CONFIG_SENSORS_LTC4215)  += ltc4215.o
 obj-$(CONFIG_SENSORS_LTC4222)  += ltc4222.o
@@ -149,7 +150,7 @@ obj-$(CONFIG_SENSORS_TMP103)        += tmp103.o
 obj-$(CONFIG_SENSORS_TMP401)   += tmp401.o
 obj-$(CONFIG_SENSORS_TMP421)   += tmp421.o
 obj-$(CONFIG_SENSORS_TWL4030_MADC)+= twl4030-madc-hwmon.o
-obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress.o
+obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o
 obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o
 obj-$(CONFIG_SENSORS_VIA686A)  += via686a.o
 obj-$(CONFIG_SENSORS_VT1211)   += vt1211.o
index c8487894b31236cefd761b24cac48fb4e17e6d52..c43318d3416e20a4d027a32402b1cf751718b8c6 100644 (file)
@@ -930,6 +930,17 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
 static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+       {
+               /*
+                * CPU fan speed going up and down on Dell Studio XPS 8000
+                * for unknown reasons.
+                */
+               .ident = "Dell Studio XPS 8000",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
+               },
+       },
        {
                /*
                 * CPU fan speed going up and down on Dell Studio XPS 8100
index 952fe692d7649a67d1dfe1d8d8c2c2ed8272a719..24e395c5907d4c549912072c3219bd24c389488e 100644 (file)
@@ -58,7 +58,7 @@ static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
  */
 static int apd = -1;
 module_param(apd, bint, 0);
-MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
+MODULE_PARM_DESC(apd, "Set to zero to disable anti-parallel diode mode");
 
 struct temperature {
        s8      degrees;
index f77eb971ce959a6d3501dfdf740e8f8f566383e5..4f695d8fcafaceab3c5d8caaa677b58e94ae9c2c 100644 (file)
@@ -90,7 +90,15 @@ static ssize_t show_power(struct device *dev,
        pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5),
                                  REG_TDP_LIMIT3, &val);
 
-       tdp_limit = val >> 16;
+       /*
+        * On Carrizo and later platforms, ApmTdpLimit bit field
+        * is extended to 16:31 from 16:28.
+        */
+       if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model >= 0x60)
+               tdp_limit = val >> 16;
+       else
+               tdp_limit = (val >> 16) & 0x1fff;
+
        curr_pwr_watts = ((u64)(tdp_limit +
                                data->base_tdp)) << running_avg_range;
        curr_pwr_watts -= running_avg_capture;
diff --git a/drivers/hwmon/ltc2990.c b/drivers/hwmon/ltc2990.c
new file mode 100644 (file)
index 0000000..8f8fe05
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Driver for Linear Technology LTC2990 power monitor
+ *
+ * Copyright (C) 2014 Topic Embedded Products
+ * Author: Mike Looijmans <mike.looijmans@topic.nl>
+ *
+ * License: GPLv2
+ *
+ * This driver assumes the chip is wired as a dual current monitor, and
+ * reports the voltage drop across two series resistors. It also reports
+ * the chip's internal temperature and Vcc power supply voltage.
+ */
+
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#define LTC2990_STATUS 0x00
+#define LTC2990_CONTROL        0x01
+#define LTC2990_TRIGGER        0x02
+#define LTC2990_TINT_MSB       0x04
+#define LTC2990_V1_MSB 0x06
+#define LTC2990_V2_MSB 0x08
+#define LTC2990_V3_MSB 0x0A
+#define LTC2990_V4_MSB 0x0C
+#define LTC2990_VCC_MSB        0x0E
+
+#define LTC2990_CONTROL_KELVIN         BIT(7)
+#define LTC2990_CONTROL_SINGLE         BIT(6)
+#define LTC2990_CONTROL_MEASURE_ALL    (0x3 << 3)
+#define LTC2990_CONTROL_MODE_CURRENT   0x06
+#define LTC2990_CONTROL_MODE_VOLTAGE   0x07
+
+/* convert raw register value to sign-extended integer in 16-bit range */
+static int ltc2990_voltage_to_int(int raw)
+{
+       if (raw & BIT(14))
+               return -(0x4000 - (raw & 0x3FFF)) << 2;
+       else
+               return (raw & 0x3FFF) << 2;
+}
+
+/* Return the converted value from the given register in uV or mC */
+static int ltc2990_get_value(struct i2c_client *i2c, u8 reg, int *result)
+{
+       int val;
+
+       val = i2c_smbus_read_word_swapped(i2c, reg);
+       if (unlikely(val < 0))
+               return val;
+
+       switch (reg) {
+       case LTC2990_TINT_MSB:
+               /* internal temp, 0.0625 degrees/LSB, 13-bit  */
+               val = (val & 0x1FFF) << 3;
+               *result = (val * 1000) >> 7;
+               break;
+       case LTC2990_V1_MSB:
+       case LTC2990_V3_MSB:
+                /* Vx-Vy, 19.42uV/LSB. Depends on mode. */
+               *result = ltc2990_voltage_to_int(val) * 1942 / (4 * 100);
+               break;
+       case LTC2990_VCC_MSB:
+               /* Vcc, 305.18μV/LSB, 2.5V offset */
+               *result = (ltc2990_voltage_to_int(val) * 30518 /
+                          (4 * 100 * 1000)) + 2500;
+               break;
+       default:
+               return -EINVAL; /* won't happen, keep compiler happy */
+       }
+
+       return 0;
+}
+
+static ssize_t ltc2990_show_value(struct device *dev,
+                                 struct device_attribute *da, char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
+       int value;
+       int ret;
+
+       ret = ltc2990_get_value(dev_get_drvdata(dev), attr->index, &value);
+       if (unlikely(ret < 0))
+               return ret;
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ltc2990_show_value, NULL,
+                         LTC2990_TINT_MSB);
+static SENSOR_DEVICE_ATTR(curr1_input, S_IRUGO, ltc2990_show_value, NULL,
+                         LTC2990_V1_MSB);
+static SENSOR_DEVICE_ATTR(curr2_input, S_IRUGO, ltc2990_show_value, NULL,
+                         LTC2990_V3_MSB);
+static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, ltc2990_show_value, NULL,
+                         LTC2990_VCC_MSB);
+
+static struct attribute *ltc2990_attrs[] = {
+       &sensor_dev_attr_temp1_input.dev_attr.attr,
+       &sensor_dev_attr_curr1_input.dev_attr.attr,
+       &sensor_dev_attr_curr2_input.dev_attr.attr,
+       &sensor_dev_attr_in0_input.dev_attr.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(ltc2990);
+
+static int ltc2990_i2c_probe(struct i2c_client *i2c,
+                            const struct i2c_device_id *id)
+{
+       int ret;
+       struct device *hwmon_dev;
+
+       if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
+                                    I2C_FUNC_SMBUS_WORD_DATA))
+               return -ENODEV;
+
+       /* Setup continuous mode, current monitor */
+       ret = i2c_smbus_write_byte_data(i2c, LTC2990_CONTROL,
+                                       LTC2990_CONTROL_MEASURE_ALL |
+                                       LTC2990_CONTROL_MODE_CURRENT);
+       if (ret < 0) {
+               dev_err(&i2c->dev, "Error: Failed to set control mode.\n");
+               return ret;
+       }
+       /* Trigger once to start continuous conversion */
+       ret = i2c_smbus_write_byte_data(i2c, LTC2990_TRIGGER, 1);
+       if (ret < 0) {
+               dev_err(&i2c->dev, "Error: Failed to start acquisition.\n");
+               return ret;
+       }
+
+       hwmon_dev = devm_hwmon_device_register_with_groups(&i2c->dev,
+                                                          i2c->name,
+                                                          i2c,
+                                                          ltc2990_groups);
+
+       return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id ltc2990_i2c_id[] = {
+       { "ltc2990", 0 },
+       {}
+};
+MODULE_DEVICE_TABLE(i2c, ltc2990_i2c_id);
+
+static struct i2c_driver ltc2990_i2c_driver = {
+       .driver = {
+               .name = "ltc2990",
+       },
+       .probe    = ltc2990_i2c_probe,
+       .id_table = ltc2990_i2c_id,
+};
+
+module_i2c_driver(ltc2990_i2c_driver);
+
+MODULE_DESCRIPTION("LTC2990 Sensor Driver");
+MODULE_AUTHOR("Topic Embedded Products");
+MODULE_LICENSE("GPL v2");
index 52f708bcf77f397952ea0f278ce5b161780e076a..d50c701b19d678e9998319be36a492bb3a8eba6d 100644 (file)
@@ -313,6 +313,10 @@ int of_hwspin_lock_get_id(struct device_node *np, int index)
                hwlock = radix_tree_deref_slot(slot);
                if (unlikely(!hwlock))
                        continue;
+               if (radix_tree_is_indirect_ptr(hwlock)) {
+                       slot = radix_tree_iter_retry(&iter);
+                       continue;
+               }
 
                if (hwlock->bank->dev->of_node == args.np) {
                        ret = 0;
index ba9732c236c52985c922a63d50bec477585af610..10fbd6d841e06aef8ae0a627f9fcf36106f5119b 100644 (file)
@@ -874,7 +874,8 @@ int i2c_dw_probe(struct dw_i2c_dev *dev)
        i2c_set_adapdata(adap, dev);
 
        i2c_dw_disable_int(dev);
-       r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, IRQF_SHARED,
+       r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr,
+                            IRQF_SHARED | IRQF_COND_SUSPEND,
                             dev_name(dev->dev), dev);
        if (r) {
                dev_err(dev->dev, "failure requesting irq %i: %d\n",
index e045985950737c429c3c3253620559f55be1244e..93f2895383ee55bb9f060cf145af92adbbeb512e 100644 (file)
@@ -137,10 +137,11 @@ static const struct dmi_system_id piix4_dmi_ibm[] = {
 };
 
 /* SB800 globals */
+static DEFINE_MUTEX(piix4_mutex_sb800);
 static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = {
-       "SDA0", "SDA2", "SDA3", "SDA4"
+       " port 0", " port 2", " port 3", " port 4"
 };
-static const char *piix4_aux_port_name_sb800 = "SDA1";
+static const char *piix4_aux_port_name_sb800 = " port 1";
 
 struct i2c_piix4_adapdata {
        unsigned short smba;
@@ -148,7 +149,6 @@ struct i2c_piix4_adapdata {
        /* SB800 */
        bool sb800_main;
        unsigned short port;
-       struct mutex *mutex;
 };
 
 static int piix4_setup(struct pci_dev *PIIX4_dev,
@@ -275,10 +275,12 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
        else
                smb_en = (aux) ? 0x28 : 0x2c;
 
+       mutex_lock(&piix4_mutex_sb800);
        outb_p(smb_en, SB800_PIIX4_SMB_IDX);
        smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
        outb_p(smb_en + 1, SB800_PIIX4_SMB_IDX);
        smba_en_hi = inb_p(SB800_PIIX4_SMB_IDX + 1);
+       mutex_unlock(&piix4_mutex_sb800);
 
        if (!smb_en) {
                smb_en_status = smba_en_lo & 0x10;
@@ -559,7 +561,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
        u8 port;
        int retval;
 
-       mutex_lock(adapdata->mutex);
+       mutex_lock(&piix4_mutex_sb800);
 
        outb_p(SB800_PIIX4_PORT_IDX, SB800_PIIX4_SMB_IDX);
        smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1);
@@ -574,7 +576,7 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
 
        outb_p(smba_en_lo, SB800_PIIX4_SMB_IDX + 1);
 
-       mutex_unlock(adapdata->mutex);
+       mutex_unlock(&piix4_mutex_sb800);
 
        return retval;
 }
@@ -625,6 +627,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS];
 static struct i2c_adapter *piix4_aux_adapter;
 
 static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
+                            bool sb800_main, unsigned short port,
                             const char *name, struct i2c_adapter **padap)
 {
        struct i2c_adapter *adap;
@@ -639,7 +642,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
 
        adap->owner = THIS_MODULE;
        adap->class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
-       adap->algo = &smbus_algorithm;
+       adap->algo = sb800_main ? &piix4_smbus_algorithm_sb800
+                               : &smbus_algorithm;
 
        adapdata = kzalloc(sizeof(*adapdata), GFP_KERNEL);
        if (adapdata == NULL) {
@@ -649,12 +653,14 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
        }
 
        adapdata->smba = smba;
+       adapdata->sb800_main = sb800_main;
+       adapdata->port = port;
 
        /* set up the sysfs linkage to our parent device */
        adap->dev.parent = &dev->dev;
 
        snprintf(adap->name, sizeof(adap->name),
-               "SMBus PIIX4 adapter %s at %04x", name, smba);
+               "SMBus PIIX4 adapter%s at %04x", name, smba);
 
        i2c_set_adapdata(adap, adapdata);
 
@@ -673,30 +679,16 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba,
 
 static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba)
 {
-       struct mutex *mutex;
        struct i2c_piix4_adapdata *adapdata;
        int port;
        int retval;
 
-       mutex = kzalloc(sizeof(*mutex), GFP_KERNEL);
-       if (mutex == NULL)
-               return -ENOMEM;
-
-       mutex_init(mutex);
-
        for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) {
-               retval = piix4_add_adapter(dev, smba,
+               retval = piix4_add_adapter(dev, smba, true, port,
                                           piix4_main_port_names_sb800[port],
                                           &piix4_main_adapters[port]);
                if (retval < 0)
                        goto error;
-
-               piix4_main_adapters[port]->algo = &piix4_smbus_algorithm_sb800;
-
-               adapdata = i2c_get_adapdata(piix4_main_adapters[port]);
-               adapdata->sb800_main = true;
-               adapdata->port = port;
-               adapdata->mutex = mutex;
        }
 
        return retval;
@@ -714,19 +706,20 @@ error:
                }
        }
 
-       kfree(mutex);
-
        return retval;
 }
 
 static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        int retval;
+       bool is_sb800 = false;
 
        if ((dev->vendor == PCI_VENDOR_ID_ATI &&
             dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
             dev->revision >= 0x40) ||
            dev->vendor == PCI_VENDOR_ID_AMD) {
+               is_sb800 = true;
+
                if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) {
                        dev_err(&dev->dev,
                        "SMBus base address index region 0x%x already in use!\n",
@@ -756,7 +749,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
                        return retval;
 
                /* Try to register main SMBus adapter, give up if we can't */
-               retval = piix4_add_adapter(dev, retval, "main",
+               retval = piix4_add_adapter(dev, retval, false, 0, "",
                                           &piix4_main_adapters[0]);
                if (retval < 0)
                        return retval;
@@ -783,7 +776,8 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (retval > 0) {
                /* Try to add the aux adapter if it exists,
                 * piix4_add_adapter will clean up if this fails */
-               piix4_add_adapter(dev, retval, piix4_aux_port_name_sb800,
+               piix4_add_adapter(dev, retval, false, 0,
+                                 is_sb800 ? piix4_aux_port_name_sb800 : "",
                                  &piix4_aux_adapter);
        }
 
@@ -798,10 +792,8 @@ static void piix4_adap_remove(struct i2c_adapter *adap)
                i2c_del_adapter(adap);
                if (adapdata->port == 0) {
                        release_region(adapdata->smba, SMBIOSIZE);
-                       if (adapdata->sb800_main) {
-                               kfree(adapdata->mutex);
+                       if (adapdata->sb800_main)
                                release_region(SB800_PIIX4_SMB_IDX, 2);
-                       }
                }
                kfree(adapdata);
                kfree(adap);
index 9ad014a7afc797b921d7b687b474cd4faaddfeba..b33646be699cbc090c028730e03ac8ac4b72c4e9 100644 (file)
@@ -28,7 +28,6 @@
 
 #ifdef CONFIG_PPC_PMAC
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #endif
 
 #define DRV_NAME "pdc202xx_new"
index 96a345248224b16fe78a2e71fed1920c8088edaa..7f0434f7e486666be6052e1c11c98c16f8f25174 100644 (file)
@@ -40,7 +40,6 @@
 #include <asm/io.h>
 #include <asm/dbdma.h>
 #include <asm/ide.h>
-#include <asm/pci-bridge.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/sections.h>
index edc29b173f6c9012635771116a6cca23193a096e..833ea9dd4464b664e9d11103c8543961a65f5f01 100644 (file)
@@ -213,6 +213,7 @@ config STK8312
 config STK8BA50
        tristate "Sensortek STK8BA50 3-Axis Accelerometer Driver"
        depends on I2C
+       depends on IIO_TRIGGER
        help
          Say yes here to get support for the Sensortek STK8BA50 3-axis
          accelerometer.
index 605ff42c46310201e7aba3fd2f7ab42a0a873e04..283ded7747a9379be4c105ef49964d5c5d674b0f 100644 (file)
@@ -175,6 +175,7 @@ config DA9150_GPADC
 config EXYNOS_ADC
        tristate "Exynos ADC driver support"
        depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || (OF && COMPILE_TEST)
+       depends on HAS_IOMEM
        help
          Core support for the ADC block found in the Samsung EXYNOS series
          of SoCs for drivers such as the touchscreen and hwmon to use to share
@@ -207,6 +208,7 @@ config INA2XX_ADC
 config IMX7D_ADC
        tristate "IMX7D ADC driver"
        depends on ARCH_MXC || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          Say yes here to build support for IMX7D ADC.
 
@@ -409,6 +411,7 @@ config TWL6030_GPADC
 config VF610_ADC
        tristate "Freescale vf610 ADC driver"
        depends on OF
+       depends on HAS_IOMEM
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index 942320e32753c3838e967e9e0a96d4db2d691fce..c1e05532d437f263a9aa3d7f7c96147b13bfe682 100644 (file)
@@ -289,7 +289,7 @@ static int tiadc_iio_buffered_hardware_setup(struct iio_dev *indio_dev,
                goto error_kfifo_free;
 
        indio_dev->setup_ops = setup_ops;
-       indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+       indio_dev->modes |= INDIO_BUFFER_SOFTWARE;
 
        return 0;
 
index 43d14588448d65f507ab02ce22c0e01d7a9008ad..b4dde8315210c6519c7e2a7db4bd8bf3b6607988 100644 (file)
@@ -300,6 +300,7 @@ static int mcp4725_probe(struct i2c_client *client,
        data->client = client;
 
        indio_dev->dev.parent = &client->dev;
+       indio_dev->name = id->name;
        indio_dev->info = &mcp4725_info;
        indio_dev->channels = &mcp4725_channel;
        indio_dev->num_channels = 1;
index 1165b1c4f9d67bd93db24f784da5b13010d9fd99..cfc5a051ab9f3946bfdfd4aaf5aaac01f7245f36 100644 (file)
@@ -117,7 +117,7 @@ static int dht11_decode(struct dht11 *dht11, int offset, int timeres)
        if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
                return -EIO;
 
-       dht11->timestamp = ktime_get_real_ns();
+       dht11->timestamp = ktime_get_boot_ns();
        if (hum_int < 20) {  /* DHT22 */
                dht11->temperature = (((temp_int & 0x7f) << 8) + temp_dec) *
                                        ((temp_int & 0x80) ? -100 : 100);
@@ -145,7 +145,7 @@ static irqreturn_t dht11_handle_irq(int irq, void *data)
 
        /* TODO: Consider making the handler safe for IRQ sharing */
        if (dht11->num_edges < DHT11_EDGES_PER_READ && dht11->num_edges >= 0) {
-               dht11->edges[dht11->num_edges].ts = ktime_get_real_ns();
+               dht11->edges[dht11->num_edges].ts = ktime_get_boot_ns();
                dht11->edges[dht11->num_edges++].value =
                                                gpio_get_value(dht11->gpio);
 
@@ -164,7 +164,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
        int ret, timeres;
 
        mutex_lock(&dht11->lock);
-       if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_real_ns()) {
+       if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
                timeres = ktime_get_resolution_ns();
                if (DHT11_DATA_BIT_HIGH < 2 * timeres) {
                        dev_err(dht11->dev, "timeresolution %dns too low\n",
@@ -279,7 +279,7 @@ static int dht11_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       dht11->timestamp = ktime_get_real_ns() - DHT11_DATA_VALID_TIME - 1;
+       dht11->timestamp = ktime_get_boot_ns() - DHT11_DATA_VALID_TIME - 1;
        dht11->num_edges = -1;
 
        platform_set_drvdata(pdev, iio);
index cb32b593f1c55c023046d97cfc41263ea3818d89..36607d52fee065f9e2aa82f14215897e2c5b44b1 100644 (file)
@@ -43,7 +43,7 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
                return -ENOMEM;
 
        rx = adis->buffer;
-       tx = rx + indio_dev->scan_bytes;
+       tx = rx + scan_count;
 
        spi_message_init(&adis->msg);
 
index 48fbc0bc7e2a1d499015001bace2c46db2c8b440..8f8d1370ed8b9b3359d3a4f1135767a622df2c21 100644 (file)
@@ -5,9 +5,9 @@
 config INV_MPU6050_IIO
        tristate "Invensense MPU6050 devices"
        depends on I2C && SYSFS
+       depends on I2C_MUX
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
-       select I2C_MUX
        help
          This driver supports the Invensense MPU6050 devices.
          This driver can also support MPU6500 in MPU6050 compatibility mode
index 80fbbfd76faf9e1859eefb1b5dc55b2129f9858b..734a0042de0cb4265ee3145ee48f22a19b8b50af 100644 (file)
@@ -349,6 +349,8 @@ EXPORT_SYMBOL_GPL(iio_channel_get);
 
 void iio_channel_release(struct iio_channel *channel)
 {
+       if (!channel)
+               return;
        iio_device_put(channel->indio_dev);
        kfree(channel);
 }
index 60537ec0c923b98c45160d011f4a17ac932e4609..53201d99a16c8d760f4ec909c8c5fb1cba6bc8be 100644 (file)
@@ -54,7 +54,9 @@ static const struct iio_chan_spec acpi_als_channels[] = {
                        .realbits       = 32,
                        .storagebits    = 32,
                },
-               .info_mask_separate     = BIT(IIO_CHAN_INFO_RAW),
+               /* _RAW is here for backward ABI compatibility */
+               .info_mask_separate     = BIT(IIO_CHAN_INFO_RAW) |
+                                         BIT(IIO_CHAN_INFO_PROCESSED),
        },
 };
 
@@ -152,7 +154,7 @@ static int acpi_als_read_raw(struct iio_dev *indio_dev,
        s32 temp_val;
        int ret;
 
-       if (mask != IIO_CHAN_INFO_RAW)
+       if ((mask != IIO_CHAN_INFO_PROCESSED) && (mask != IIO_CHAN_INFO_RAW))
                return -EINVAL;
 
        /* we support only illumination (_ALI) so far. */
index 809a961b9a7f6d0d0077114e12767e4e267c7606..6bf89d8f374191cee48f258d1c25810b0e5dc410 100644 (file)
@@ -180,7 +180,7 @@ static const struct ltr501_samp_table ltr501_ps_samp_table[] = {
                        {500000, 2000000}
 };
 
-static unsigned int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
+static int ltr501_match_samp_freq(const struct ltr501_samp_table *tab,
                                           int len, int val, int val2)
 {
        int i, freq;
index f5ecd6e19f5de725e715e547a5527f5875050561..a0d7deeac62f78dfc416bf597e0b894436c75a8c 100644 (file)
@@ -117,7 +117,7 @@ static int mpl115_read_raw(struct iio_dev *indio_dev,
                *val = ret >> 6;
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_OFFSET:
-               *val = 605;
+               *val = -605;
                *val2 = 750000;
                return IIO_VAL_INT_PLUS_MICRO;
        case IIO_CHAN_INFO_SCALE:
index 93e29fb67fa00f94e9ca0a614addfc5cde7ebb76..db35e04a063767a9b2b6011cf3d0345c9d0929bd 100644 (file)
@@ -87,7 +87,7 @@ static int lidar_i2c_xfer(struct lidar_data *data, u8 reg, u8 *val, int len)
 
        ret = i2c_transfer(client->adapter, msg, 2);
 
-       return (ret == 2) ? 0 : ret;
+       return (ret == 2) ? 0 : -EIO;
 }
 
 static int lidar_smbus_xfer(struct lidar_data *data, u8 reg, u8 *val, int len)
index 19837d2702789e63f1d948c1abaf4bae9a28d7f6..2116132568e70e61c8465ffc9d922d732aca28ed 100644 (file)
@@ -322,6 +322,8 @@ int ib_ud_header_init(int     payload_bytes,
                      int    immediate_present,
                      struct ib_ud_header *header)
 {
+       size_t udp_bytes = udp_present ? IB_UDP_BYTES : 0;
+
        grh_present = grh_present && !ip_version;
        memset(header, 0, sizeof *header);
 
@@ -353,7 +355,8 @@ int ib_ud_header_init(int     payload_bytes,
        if (ip_version == 6 || grh_present) {
                header->grh.ip_version      = 6;
                header->grh.payload_length  =
-                       cpu_to_be16((IB_BTH_BYTES     +
+                       cpu_to_be16((udp_bytes        +
+                                    IB_BTH_BYTES     +
                                     IB_DETH_BYTES    +
                                     payload_bytes    +
                                     4                + /* ICRC     */
@@ -362,8 +365,6 @@ int ib_ud_header_init(int     payload_bytes,
        }
 
        if (ip_version == 4) {
-               int udp_bytes = udp_present ? IB_UDP_BYTES : 0;
-
                header->ip4.ver = 4; /* version 4 */
                header->ip4.hdr_len = 5; /* 5 words */
                header->ip4.tot_len =
index ec737e2287fe150ff8d5d83e6ca7042a2cca8b62..03c418ccbc982e4114681044798c075a65357b28 100644 (file)
@@ -844,6 +844,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        int err;
        int i;
        size_t reqlen;
+       size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2,
+                                    max_cqe_version);
 
        if (!dev->ib_active)
                return ERR_PTR(-EAGAIN);
@@ -854,7 +856,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
        if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
                ver = 0;
-       else if (reqlen >= sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+       else if (reqlen >= min_req_v2)
                ver = 2;
        else
                return ERR_PTR(-EINVAL);
@@ -2214,7 +2216,9 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
        dev->ib_dev.uverbs_ex_cmd_mask =
-               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
+               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE)     |
+               (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)        |
+               (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP);
 
        dev->ib_dev.query_device        = mlx5_ib_query_device;
        dev->ib_dev.query_port          = mlx5_ib_query_port;
index 8fb9c27485e19959a09edf3b3bdf3f7c56557732..9116bc3988a6f9338709f6120a4597794d2cf36d 100644 (file)
@@ -1036,7 +1036,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
        wq = MLX5_ADDR_OF(rqc, rqc, wq);
        MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
        MLX5_SET(wq, wq, end_padding_mode,
-                MLX5_GET64(qpc, qpc, end_padding_mode));
+                MLX5_GET(qpc, qpc, end_padding_mode));
        MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
        MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
        MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
@@ -1615,15 +1615,6 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
 
        if (pd) {
                dev = to_mdev(pd->device);
-       } else {
-               /* being cautious here */
-               if (init_attr->qp_type != IB_QPT_XRC_TGT &&
-                   init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
-                       pr_warn("%s: no PD for transport %s\n", __func__,
-                               ib_qp_type_str(init_attr->qp_type));
-                       return ERR_PTR(-EINVAL);
-               }
-               dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
 
                if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
                        if (!pd->uobject) {
@@ -1634,6 +1625,15 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
                                return ERR_PTR(-EINVAL);
                        }
                }
+       } else {
+               /* being cautious here */
+               if (init_attr->qp_type != IB_QPT_XRC_TGT &&
+                   init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
+                       pr_warn("%s: no PD for transport %s\n", __func__,
+                               ib_qp_type_str(init_attr->qp_type));
+                       return ERR_PTR(-EINVAL);
+               }
+               dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
        }
 
        switch (init_attr->qp_type) {
index 5f44b66ccb86481774733960b3577e83fcdfabc0..5b0248adf4ce661a3088673afa3683cf486130e4 100644 (file)
@@ -64,7 +64,7 @@ const char *usnic_ib_qp_grp_state_to_string(enum ib_qp_state state)
        case IB_QPS_ERR:
                return "ERR";
        default:
-               return "UNKOWN STATE";
+               return "UNKNOWN STATE";
 
        }
 }
index 6727954ab74be9338e9c3d46e12a55fa3db7a6e7..e8a84d12b7fffe812cd329a88da26f6922c219af 100644 (file)
@@ -1207,7 +1207,6 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
 #else
 static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
 static void xpad_led_disconnect(struct usb_xpad *xpad) { }
-static void xpad_identify_controller(struct usb_xpad *xpad) { }
 #endif
 
 static int xpad_start_input(struct usb_xpad *xpad)
index 4d446d5085aad9b110ca7ecb05a3167f859ef12c..c01a1d648f9f087df57aafedf7eb3e56b4df2b8e 100644 (file)
@@ -235,7 +235,7 @@ struct adp5589_kpad {
        unsigned short gpimapsize;
        unsigned extend_cfg;
        bool is_adp5585;
-       bool adp5585_support_row5;
+       bool support_row5;
 #ifdef CONFIG_GPIOLIB
        unsigned char gpiomap[ADP5589_MAXGPIO];
        bool export_gpio;
@@ -485,7 +485,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
        if (kpad->extend_cfg & C4_EXTEND_CFG)
                pin_used[kpad->var->c4_extend_cfg] = true;
 
-       if (!kpad->adp5585_support_row5)
+       if (!kpad->support_row5)
                pin_used[5] = true;
 
        for (i = 0; i < kpad->var->maxgpio; i++)
@@ -884,12 +884,13 @@ static int adp5589_probe(struct i2c_client *client,
 
        switch (id->driver_data) {
        case ADP5585_02:
-               kpad->adp5585_support_row5 = true;
+               kpad->support_row5 = true;
        case ADP5585_01:
                kpad->is_adp5585 = true;
                kpad->var = &const_adp5585;
                break;
        case ADP5589:
+               kpad->support_row5 = true;
                kpad->var = &const_adp5589;
                break;
        }
index 378db10001df5067adcf4fe5bfc2b38e42098574..4401be225d64b28b8ffa798ba71cce07c8a81282 100644 (file)
@@ -304,8 +304,10 @@ static int cap11xx_init_leds(struct device *dev,
                led->cdev.brightness = LED_OFF;
 
                error = of_property_read_u32(child, "reg", &reg);
-               if (error != 0 || reg >= num_leds)
+               if (error != 0 || reg >= num_leds) {
+                       of_node_put(child);
                        return -EINVAL;
+               }
 
                led->reg = reg;
                led->priv = priv;
@@ -313,8 +315,10 @@ static int cap11xx_init_leds(struct device *dev,
                INIT_WORK(&led->work, cap11xx_led_work);
 
                error = devm_led_classdev_register(dev, &led->cdev);
-               if (error)
+               if (error) {
+                       of_node_put(child);
                        return error;
+               }
 
                priv->num_leds++;
                led++;
index d6d16fa782815481e04609771b09b32e862ce679..1f2337abcf2f333de7b10cc449aaad56aadd5369 100644 (file)
@@ -733,7 +733,7 @@ config INPUT_XEN_KBDDEV_FRONTEND
          module will be called xen-kbdfront.
 
 config INPUT_SIRFSOC_ONKEY
-       bool "CSR SiRFSoC power on/off/suspend key support"
+       tristate "CSR SiRFSoC power on/off/suspend key support"
        depends on ARCH_SIRF && OF
        default y
        help
index 9d5b89befe6fb059e593c6fe1e6265f0b3819199..ed7237f1953966c378c3822faa9a40f7c6a044dd 100644 (file)
@@ -101,7 +101,7 @@ static void sirfsoc_pwrc_close(struct input_dev *input)
 static const struct of_device_id sirfsoc_pwrc_of_match[] = {
        { .compatible = "sirf,prima2-pwrc" },
        {},
-}
+};
 MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
 
 static int sirfsoc_pwrc_probe(struct platform_device *pdev)
index 17f97e5e11e7cd2ecf52844af144b546f955c1f1..096abb4ad5cdfac410021f390de68afc212b10b9 100644 (file)
@@ -48,6 +48,16 @@ config MOUSE_PS2_ALPS
 
          If unsure, say Y.
 
+config MOUSE_PS2_BYD
+       bool "BYD PS/2 mouse protocol extension" if EXPERT
+       default y
+       depends on MOUSE_PS2
+       help
+         Say Y here if you have a BYD PS/2 touchpad connected to
+         your system.
+
+         If unsure, say Y.
+
 config MOUSE_PS2_LOGIPS2PP
        bool "Logitech PS/2++ mouse protocol extension" if EXPERT
        default y
index ee6a6e9563d486e015c91ceb259549c246368a46..6168b134937b13c60ba7b0f06be6d67162f83ec4 100644 (file)
@@ -28,6 +28,7 @@ cyapatp-objs := cyapa.o cyapa_gen3.o cyapa_gen5.o cyapa_gen6.o
 psmouse-objs := psmouse-base.o synaptics.o focaltech.o
 
 psmouse-$(CONFIG_MOUSE_PS2_ALPS)       += alps.o
+psmouse-$(CONFIG_MOUSE_PS2_BYD)                += byd.o
 psmouse-$(CONFIG_MOUSE_PS2_ELANTECH)   += elantech.o
 psmouse-$(CONFIG_MOUSE_PS2_OLPC)       += hgpk.o
 psmouse-$(CONFIG_MOUSE_PS2_LOGIPS2PP)  += logips2pp.o
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
new file mode 100644 (file)
index 0000000..9425e0f
--- /dev/null
@@ -0,0 +1,337 @@
+/*
+ * BYD TouchPad PS/2 mouse driver
+ *
+ * Copyright (C) 2015 Chris Diamand <chris@diamand.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/libps2.h>
+#include <linux/serio.h>
+
+#include "psmouse.h"
+#include "byd.h"
+
+#define PS2_Y_OVERFLOW BIT_MASK(7)
+#define PS2_X_OVERFLOW BIT_MASK(6)
+#define PS2_Y_SIGN     BIT_MASK(5)
+#define PS2_X_SIGN     BIT_MASK(4)
+#define PS2_ALWAYS_1   BIT_MASK(3)
+#define PS2_MIDDLE     BIT_MASK(2)
+#define PS2_RIGHT      BIT_MASK(1)
+#define PS2_LEFT       BIT_MASK(0)
+
+/*
+ * The touchpad reports gestures in the last byte of each packet. It can take
+ * any of the following values:
+ */
+
+/* One-finger scrolling in one of the edge scroll zones. */
+#define BYD_SCROLLUP           0xCA
+#define BYD_SCROLLDOWN         0x36
+#define BYD_SCROLLLEFT         0xCB
+#define BYD_SCROLLRIGHT                0x35
+/* Two-finger scrolling. */
+#define BYD_2DOWN              0x2B
+#define BYD_2UP                        0xD5
+#define BYD_2LEFT              0xD6
+#define BYD_2RIGHT             0x2A
+/* Pinching in or out. */
+#define BYD_ZOOMOUT            0xD8
+#define BYD_ZOOMIN             0x28
+/* Three-finger swipe. */
+#define BYD_3UP                        0xD3
+#define BYD_3DOWN              0x2D
+#define BYD_3LEFT              0xD4
+#define BYD_3RIGHT             0x2C
+/* Four-finger swipe. */
+#define BYD_4UP                        0xCD
+#define BYD_4DOWN              0x33
+
+int byd_detect(struct psmouse *psmouse, bool set_properties)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       unsigned char param[4];
+
+       param[0] = 0x03;
+       param[1] = 0x00;
+       param[2] = 0x00;
+       param[3] = 0x00;
+
+       if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+               return -1;
+       if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+               return -1;
+       if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+               return -1;
+       if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES))
+               return -1;
+       if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+               return -1;
+
+       if (param[1] != 0x03 || param[2] != 0x64)
+               return -ENODEV;
+
+       psmouse_dbg(psmouse, "BYD touchpad detected\n");
+
+       if (set_properties) {
+               psmouse->vendor = "BYD";
+               psmouse->name = "TouchPad";
+       }
+
+       return 0;
+}
+
+static psmouse_ret_t byd_process_byte(struct psmouse *psmouse)
+{
+       struct input_dev *dev = psmouse->dev;
+       u8 *pkt = psmouse->packet;
+
+       if (psmouse->pktcnt > 0 && !(pkt[0] & PS2_ALWAYS_1)) {
+               psmouse_warn(psmouse, "Always_1 bit not 1. pkt[0] = %02x\n",
+                            pkt[0]);
+               return PSMOUSE_BAD_DATA;
+       }
+
+       if (psmouse->pktcnt < psmouse->pktsize)
+               return PSMOUSE_GOOD_DATA;
+
+       /* Otherwise, a full packet has been received */
+       switch (pkt[3]) {
+       case 0: {
+               /* Standard packet */
+               /* Sign-extend if a sign bit is set. */
+               unsigned int signx = pkt[0] & PS2_X_SIGN ? ~0xFF : 0;
+               unsigned int signy = pkt[0] & PS2_Y_SIGN ? ~0xFF : 0;
+               int dx = signx | (int) pkt[1];
+               int dy = signy | (int) pkt[2];
+
+               input_report_rel(psmouse->dev, REL_X, dx);
+               input_report_rel(psmouse->dev, REL_Y, -dy);
+
+               input_report_key(psmouse->dev, BTN_LEFT, pkt[0] & PS2_LEFT);
+               input_report_key(psmouse->dev, BTN_RIGHT, pkt[0] & PS2_RIGHT);
+               input_report_key(psmouse->dev, BTN_MIDDLE, pkt[0] & PS2_MIDDLE);
+               break;
+       }
+
+       case BYD_SCROLLDOWN:
+       case BYD_2DOWN:
+               input_report_rel(dev, REL_WHEEL, -1);
+               break;
+
+       case BYD_SCROLLUP:
+       case BYD_2UP:
+               input_report_rel(dev, REL_WHEEL, 1);
+               break;
+
+       case BYD_SCROLLLEFT:
+       case BYD_2LEFT:
+               input_report_rel(dev, REL_HWHEEL, -1);
+               break;
+
+       case BYD_SCROLLRIGHT:
+       case BYD_2RIGHT:
+               input_report_rel(dev, REL_HWHEEL, 1);
+               break;
+
+       case BYD_ZOOMOUT:
+       case BYD_ZOOMIN:
+       case BYD_3UP:
+       case BYD_3DOWN:
+       case BYD_3LEFT:
+       case BYD_3RIGHT:
+       case BYD_4UP:
+       case BYD_4DOWN:
+               break;
+
+       default:
+               psmouse_warn(psmouse,
+                            "Unrecognized Z: pkt = %02x %02x %02x %02x\n",
+                            psmouse->packet[0], psmouse->packet[1],
+                            psmouse->packet[2], psmouse->packet[3]);
+               return PSMOUSE_BAD_DATA;
+       }
+
+       input_sync(dev);
+
+       return PSMOUSE_FULL_PACKET;
+}
+
+/* Send a sequence of bytes, where each is ACKed before the next is sent. */
+static int byd_send_sequence(struct psmouse *psmouse, const u8 *seq, size_t len)
+{
+       unsigned int i;
+
+       for (i = 0; i < len; ++i) {
+               if (ps2_command(&psmouse->ps2dev, NULL, seq[i]))
+                       return -1;
+       }
+       return 0;
+}
+
+/* Keep scrolling after fingers are removed. */
+#define SCROLL_INERTIAL                0x01
+#define SCROLL_NO_INERTIAL     0x02
+
+/* Clicking can be done by tapping or pressing. */
+#define CLICK_BOTH             0x01
+/* Clicking can only be done by pressing. */
+#define CLICK_PRESS_ONLY       0x02
+
+static int byd_enable(struct psmouse *psmouse)
+{
+       const u8 seq1[] = { 0xE2, 0x00, 0xE0, 0x02, 0xE0 };
+       const u8 seq2[] = {
+               0xD3, 0x01,
+               0xD0, 0x00,
+               0xD0, 0x04,
+               /* Whether clicking is done by tapping or pressing. */
+               0xD4, CLICK_PRESS_ONLY,
+               0xD5, 0x01,
+               0xD7, 0x03,
+               /* Vertical and horizontal one-finger scroll zone inertia. */
+               0xD8, SCROLL_INERTIAL,
+               0xDA, 0x05,
+               0xDB, 0x02,
+               0xE4, 0x05,
+               0xD6, 0x01,
+               0xDE, 0x04,
+               0xE3, 0x01,
+               0xCF, 0x00,
+               0xD2, 0x03,
+               /* Vertical and horizontal two-finger scrolling inertia. */
+               0xE5, SCROLL_INERTIAL,
+               0xD9, 0x02,
+               0xD9, 0x07,
+               0xDC, 0x03,
+               0xDD, 0x03,
+               0xDF, 0x03,
+               0xE1, 0x03,
+               0xD1, 0x00,
+               0xCE, 0x00,
+               0xCC, 0x00,
+               0xE0, 0x00,
+               0xE2, 0x01
+       };
+       u8 param[4];
+
+       if (byd_send_sequence(psmouse, seq1, ARRAY_SIZE(seq1)))
+               return -1;
+
+       /* Send a 0x01 command, which should return 4 bytes. */
+       if (ps2_command(&psmouse->ps2dev, param, 0x0401))
+               return -1;
+
+       if (byd_send_sequence(psmouse, seq2, ARRAY_SIZE(seq2)))
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Send the set of PS/2 commands required to make it identify as an
+ * intellimouse with 4-byte instead of 3-byte packets.
+ */
+static int byd_send_intellimouse_sequence(struct psmouse *psmouse)
+{
+       struct ps2dev *ps2dev = &psmouse->ps2dev;
+       u8 param[4];
+       int i;
+       const struct {
+               u16 command;
+               u8 arg;
+       } seq[] = {
+               { PSMOUSE_CMD_RESET_BAT, 0 },
+               { PSMOUSE_CMD_RESET_BAT, 0 },
+               { PSMOUSE_CMD_GETID, 0 },
+               { PSMOUSE_CMD_SETSCALE11, 0 },
+               { PSMOUSE_CMD_SETSCALE11, 0 },
+               { PSMOUSE_CMD_SETSCALE11, 0 },
+               { PSMOUSE_CMD_GETINFO, 0 },
+               { PSMOUSE_CMD_SETRES, 0x03 },
+               { PSMOUSE_CMD_SETRATE, 0xC8 },
+               { PSMOUSE_CMD_SETRATE, 0x64 },
+               { PSMOUSE_CMD_SETRATE, 0x50 },
+               { PSMOUSE_CMD_GETID, 0 },
+               { PSMOUSE_CMD_SETRATE, 0xC8 },
+               { PSMOUSE_CMD_SETRATE, 0xC8 },
+               { PSMOUSE_CMD_SETRATE, 0x50 },
+               { PSMOUSE_CMD_GETID, 0 },
+               { PSMOUSE_CMD_SETRATE, 0x64 },
+               { PSMOUSE_CMD_SETRES, 0x03 },
+               { PSMOUSE_CMD_ENABLE, 0 }
+       };
+
+       memset(param, 0, sizeof(param));
+       for (i = 0; i < ARRAY_SIZE(seq); ++i) {
+               param[0] = seq[i].arg;
+               if (ps2_command(ps2dev, param, seq[i].command))
+                       return -1;
+       }
+
+       return 0;
+}
+
+static int byd_reset_touchpad(struct psmouse *psmouse)
+{
+       if (byd_send_intellimouse_sequence(psmouse))
+               return -EIO;
+
+       if (byd_enable(psmouse))
+               return -EIO;
+
+       return 0;
+}
+
+static int byd_reconnect(struct psmouse *psmouse)
+{
+       int retry = 0, error = 0;
+
+       psmouse_dbg(psmouse, "Reconnect\n");
+       do {
+               psmouse_reset(psmouse);
+               if (retry)
+                       ssleep(1);
+               error = byd_detect(psmouse, 0);
+       } while (error && ++retry < 3);
+
+       if (error)
+               return error;
+
+       psmouse_dbg(psmouse, "Reconnected after %d attempts\n", retry);
+
+       error = byd_reset_touchpad(psmouse);
+       if (error) {
+               psmouse_err(psmouse, "Unable to initialize device\n");
+               return error;
+       }
+
+       return 0;
+}
+
+int byd_init(struct psmouse *psmouse)
+{
+       struct input_dev *dev = psmouse->dev;
+
+       if (psmouse_reset(psmouse))
+               return -EIO;
+
+       if (byd_reset_touchpad(psmouse))
+               return -EIO;
+
+       psmouse->reconnect = byd_reconnect;
+       psmouse->protocol_handler = byd_process_byte;
+       psmouse->pktsize = 4;
+       psmouse->resync_time = 0;
+
+       __set_bit(BTN_MIDDLE, dev->keybit);
+       __set_bit(REL_WHEEL, dev->relbit);
+       __set_bit(REL_HWHEEL, dev->relbit);
+
+       return 0;
+}
diff --git a/drivers/input/mouse/byd.h b/drivers/input/mouse/byd.h
new file mode 100644 (file)
index 0000000..d6c120c
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _BYD_H
+#define _BYD_H
+
+#ifdef CONFIG_MOUSE_PS2_BYD
+int byd_detect(struct psmouse *psmouse, bool set_properties);
+int byd_init(struct psmouse *psmouse);
+#else
+static inline int byd_detect(struct psmouse *psmouse, bool set_properties)
+{
+       return -ENOSYS;
+}
+static inline int byd_init(struct psmouse *psmouse)
+{
+       return -ENOSYS;
+}
+#endif /* CONFIG_MOUSE_PS2_BYD */
+
+#endif /* _BYD_H */
index b9e4ee34c132df58525a1969346520325c6a2490..39d1becd35c909d1c173a080b2d88ffb1b4be152 100644 (file)
@@ -37,6 +37,7 @@
 #include "cypress_ps2.h"
 #include "focaltech.h"
 #include "vmmouse.h"
+#include "byd.h"
 
 #define DRIVER_DESC    "PS/2 mouse driver"
 
@@ -841,6 +842,15 @@ static const struct psmouse_protocol psmouse_protocols[] = {
                .detect         = vmmouse_detect,
                .init           = vmmouse_init,
        },
+#endif
+#ifdef CONFIG_MOUSE_PS2_BYD
+       {
+               .type           = PSMOUSE_BYD,
+               .name           = "BydPS/2",
+               .alias          = "byd",
+               .detect         = byd_detect,
+               .init           = byd_init,
+       },
 #endif
        {
                .type           = PSMOUSE_AUTO,
@@ -1105,6 +1115,10 @@ static int psmouse_extensions(struct psmouse *psmouse,
                if (psmouse_try_protocol(psmouse, PSMOUSE_TOUCHKIT_PS2,
                                         &max_proto, set_properties, true))
                        return PSMOUSE_TOUCHKIT_PS2;
+
+               if (psmouse_try_protocol(psmouse, PSMOUSE_BYD,
+                                        &max_proto, set_properties, true))
+                       return PSMOUSE_BYD;
        }
 
        /*
index ad5a5a1ea872cd17389d623e109e0299b90f1429..e0ca6cda3d166ad2d5b0e14d63ccb5fb257727ce 100644 (file)
@@ -104,6 +104,7 @@ enum psmouse_type {
        PSMOUSE_CYPRESS,
        PSMOUSE_FOCALTECH,
        PSMOUSE_VMMOUSE,
+       PSMOUSE_BYD,
        PSMOUSE_AUTO            /* This one should always be last */
 };
 
index e272f06258cefb3c2119058298ad10f2cb77b7ce..a3f0f5a47490e936e31b45594861d692503429b1 100644 (file)
@@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
        priv->abs_dev = abs_dev;
        psmouse->private = priv;
 
-       input_set_capability(rel_dev, EV_REL, REL_WHEEL);
-
        /* Set up and register absolute device */
        snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
                 psmouse->ps2dev.serio->phys);
@@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
        abs_dev->id.version = psmouse->model;
        abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
 
-       error = input_register_device(priv->abs_dev);
-       if (error)
-               goto init_fail;
-
        /* Set absolute device capabilities */
        input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
        input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
@@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
        input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
        input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
 
+       error = input_register_device(priv->abs_dev);
+       if (error)
+               goto init_fail;
+
+       /* Add wheel capability to the relative device */
+       input_set_capability(rel_dev, EV_REL, REL_WHEEL);
+
        psmouse->protocol_handler = vmmouse_process_byte;
        psmouse->disconnect = vmmouse_disconnect;
        psmouse->reconnect = vmmouse_reconnect;
index 8f828975ab10b03746e700dd26dce1cb03d2c17e..1ca7f551e2dabe73896f780ec53e4c6735d13019 100644 (file)
@@ -134,7 +134,7 @@ static void serio_find_driver(struct serio *serio)
        int error;
 
        error = device_attach(&serio->dev);
-       if (error < 0)
+       if (error < 0 && error != -EPROBE_DEFER)
                dev_warn(&serio->dev,
                         "device_attach() failed for %s (%s), error: %d\n",
                         serio->phys, serio->name, error);
index 66c62641b59a31cfb7622dd5be14729b62176dc9..fba4d5665857221bf657db7bff1c5578fc48e4be 100644 (file)
@@ -334,7 +334,7 @@ config TOUCHSCREEN_FUJITSU
 config TOUCHSCREEN_GOODIX
        tristate "Goodix I2C touchscreen"
        depends on I2C
-       depends on GPIOLIB
+       depends on GPIOLIB || COMPILE_TEST
        help
          Say Y here if you have the Goodix touchscreen (such as one
          installed in Onda v975w tablets) connected to your
@@ -1112,7 +1112,8 @@ config TOUCHSCREEN_ZFORCE
 
 config TOUCHSCREEN_COLIBRI_VF50
        tristate "Toradex Colibri on board touchscreen driver"
-       depends on GPIOLIB && IIO && VF610_ADC
+       depends on IIO && VF610_ADC
+       depends on GPIOLIB || COMPILE_TEST
        help
          Say Y here if you have a Colibri VF50 and plan to use
          the on-board provided 4-wire touchscreen driver.
index 5d4903a402cc6a5f183010ded6e7af17c1136a49..69828d015d45ffa4747368e79f6fa5f47108c0c9 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
index 5b74e8b84e79369ce12dda3c0392ac3d61a5a581..91cda8f8119d455388b15a34ed962906bd28ba3c 100644 (file)
 #include <linux/delay.h>
 #include <linux/input.h>
 #include <linux/input/mt.h>
+#include <linux/input/touchscreen.h>
 #include <linux/gpio.h>
 #include <linux/interrupt.h>
 #include <linux/slab.h>
+#include <linux/property.h>
+#include <linux/gpio/consumer.h>
 
 #include "cyttsp_core.h"
 
@@ -57,6 +60,7 @@
 #define CY_DELAY_DFLT                  20 /* ms */
 #define CY_DELAY_MAX                   500
 #define CY_ACT_DIST_DFLT               0xF8
+#define CY_ACT_DIST_MASK               0x0F
 #define CY_HNDSHK_BIT                  0x80
 /* device mode bits */
 #define CY_OPERATE_MODE                        0x00
@@ -120,7 +124,7 @@ static int ttsp_send_command(struct cyttsp *ts, u8 cmd)
 
 static int cyttsp_handshake(struct cyttsp *ts)
 {
-       if (ts->pdata->use_hndshk)
+       if (ts->use_hndshk)
                return ttsp_send_command(ts,
                                ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
 
@@ -142,9 +146,9 @@ static int cyttsp_exit_bl_mode(struct cyttsp *ts)
        u8 bl_cmd[sizeof(bl_command)];
 
        memcpy(bl_cmd, bl_command, sizeof(bl_command));
-       if (ts->pdata->bl_keys)
+       if (ts->bl_keys)
                memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
-                       ts->pdata->bl_keys, CY_NUM_BL_KEYS);
+                       ts->bl_keys, CY_NUM_BL_KEYS);
 
        error = ttsp_write_block_data(ts, CY_REG_BASE,
                                      sizeof(bl_cmd), bl_cmd);
@@ -217,14 +221,14 @@ static int cyttsp_set_sysinfo_regs(struct cyttsp *ts)
 {
        int retval = 0;
 
-       if (ts->pdata->act_intrvl != CY_ACT_INTRVL_DFLT ||
-           ts->pdata->tch_tmout != CY_TCH_TMOUT_DFLT ||
-           ts->pdata->lp_intrvl != CY_LP_INTRVL_DFLT) {
+       if (ts->act_intrvl != CY_ACT_INTRVL_DFLT ||
+           ts->tch_tmout != CY_TCH_TMOUT_DFLT ||
+           ts->lp_intrvl != CY_LP_INTRVL_DFLT) {
 
                u8 intrvl_ray[] = {
-                       ts->pdata->act_intrvl,
-                       ts->pdata->tch_tmout,
-                       ts->pdata->lp_intrvl
+                       ts->act_intrvl,
+                       ts->tch_tmout,
+                       ts->lp_intrvl
                };
 
                /* set intrvl registers */
@@ -236,6 +240,16 @@ static int cyttsp_set_sysinfo_regs(struct cyttsp *ts)
        return retval;
 }
 
+static void cyttsp_hard_reset(struct cyttsp *ts)
+{
+       if (ts->reset_gpio) {
+               gpiod_set_value_cansleep(ts->reset_gpio, 1);
+               msleep(CY_DELAY_DFLT);
+               gpiod_set_value_cansleep(ts->reset_gpio, 0);
+               msleep(CY_DELAY_DFLT);
+       }
+}
+
 static int cyttsp_soft_reset(struct cyttsp *ts)
 {
        unsigned long timeout;
@@ -263,7 +277,7 @@ out:
 
 static int cyttsp_act_dist_setup(struct cyttsp *ts)
 {
-       u8 act_dist_setup = ts->pdata->act_dist;
+       u8 act_dist_setup = ts->act_dist;
 
        /* Init gesture; active distance setup */
        return ttsp_write_block_data(ts, CY_REG_ACT_DIST,
@@ -528,45 +542,110 @@ static void cyttsp_close(struct input_dev *dev)
                cyttsp_disable(ts);
 }
 
+static int cyttsp_parse_properties(struct cyttsp *ts)
+{
+       struct device *dev = ts->dev;
+       u32 dt_value;
+       int ret;
+
+       ts->bl_keys = devm_kzalloc(dev, CY_NUM_BL_KEYS, GFP_KERNEL);
+       if (!ts->bl_keys)
+               return -ENOMEM;
+
+       /* Set some default values */
+       ts->use_hndshk = false;
+       ts->act_dist = CY_ACT_DIST_DFLT;
+       ts->act_intrvl = CY_ACT_INTRVL_DFLT;
+       ts->tch_tmout = CY_TCH_TMOUT_DFLT;
+       ts->lp_intrvl = CY_LP_INTRVL_DFLT;
+
+       ret = device_property_read_u8_array(dev, "bootloader-key",
+                                           ts->bl_keys, CY_NUM_BL_KEYS);
+       if (ret) {
+               dev_err(dev,
+                       "bootloader-key property could not be retrieved\n");
+               return ret;
+       }
+
+       ts->use_hndshk = device_property_present(dev, "use-handshake");
+
+       if (!device_property_read_u32(dev, "active-distance", &dt_value)) {
+               if (dt_value > 15) {
+                       dev_err(dev, "active-distance (%u) must be [0-15]\n",
+                               dt_value);
+                       return -EINVAL;
+               }
+               ts->act_dist &= ~CY_ACT_DIST_MASK;
+               ts->act_dist |= dt_value;
+       }
+
+       if (!device_property_read_u32(dev, "active-interval-ms", &dt_value)) {
+               if (dt_value > 255) {
+                       dev_err(dev, "active-interval-ms (%u) must be [0-255]\n",
+                               dt_value);
+                       return -EINVAL;
+               }
+               ts->act_intrvl = dt_value;
+       }
+
+       if (!device_property_read_u32(dev, "lowpower-interval-ms", &dt_value)) {
+               if (dt_value > 2550) {
+                       dev_err(dev, "lowpower-interval-ms (%u) must be [0-2550]\n",
+                               dt_value);
+                       return -EINVAL;
+               }
+               /* Register value is expressed in 0.01s / bit */
+               ts->lp_intrvl = dt_value / 10;
+       }
+
+       if (!device_property_read_u32(dev, "touch-timeout-ms", &dt_value)) {
+               if (dt_value > 2550) {
+                       dev_err(dev, "touch-timeout-ms (%u) must be [0-2550]\n",
+                               dt_value);
+                       return -EINVAL;
+               }
+               /* Register value is expressed in 0.01s / bit */
+               ts->tch_tmout = dt_value / 10;
+       }
+
+       return 0;
+}
+
 struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
                            struct device *dev, int irq, size_t xfer_buf_size)
 {
-       const struct cyttsp_platform_data *pdata = dev_get_platdata(dev);
        struct cyttsp *ts;
        struct input_dev *input_dev;
        int error;
 
-       if (!pdata || !pdata->name || irq <= 0) {
-               error = -EINVAL;
-               goto err_out;
-       }
+       ts = devm_kzalloc(dev, sizeof(*ts) + xfer_buf_size, GFP_KERNEL);
+       if (!ts)
+               return ERR_PTR(-ENOMEM);
 
-       ts = kzalloc(sizeof(*ts) + xfer_buf_size, GFP_KERNEL);
-       input_dev = input_allocate_device();
-       if (!ts || !input_dev) {
-               error = -ENOMEM;
-               goto err_free_mem;
-       }
+       input_dev = devm_input_allocate_device(dev);
+       if (!input_dev)
+               return ERR_PTR(-ENOMEM);
 
        ts->dev = dev;
        ts->input = input_dev;
-       ts->pdata = dev_get_platdata(dev);
        ts->bus_ops = bus_ops;
        ts->irq = irq;
 
+       ts->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+       if (IS_ERR(ts->reset_gpio)) {
+               error = PTR_ERR(ts->reset_gpio);
+               dev_err(dev, "Failed to request reset gpio, error %d\n", error);
+               return ERR_PTR(error);
+       }
+
+       error = cyttsp_parse_properties(ts);
+       if (error)
+               return ERR_PTR(error);
+
        init_completion(&ts->bl_ready);
        snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(dev));
 
-       if (pdata->init) {
-               error = pdata->init();
-               if (error) {
-                       dev_err(ts->dev, "platform init failed, err: %d\n",
-                               error);
-                       goto err_free_mem;
-               }
-       }
-
-       input_dev->name = pdata->name;
+       input_dev->name = "Cypress TTSP TouchScreen";
        input_dev->phys = ts->phys;
        input_dev->id.bustype = bus_ops->bustype;
        input_dev->dev.parent = ts->dev;
@@ -576,63 +655,44 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
 
        input_set_drvdata(input_dev, ts);
 
-       __set_bit(EV_ABS, input_dev->evbit);
-       input_set_abs_params(input_dev, ABS_MT_POSITION_X,
-                            0, pdata->maxx, 0, 0);
-       input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
-                            0, pdata->maxy, 0, 0);
-       input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
-                            0, CY_MAXZ, 0, 0);
+       input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_X);
+       input_set_capability(input_dev, EV_ABS, ABS_MT_POSITION_Y);
+       touchscreen_parse_properties(input_dev, true);
 
-       input_mt_init_slots(input_dev, CY_MAX_ID, 0);
+       error = input_mt_init_slots(input_dev, CY_MAX_ID, 0);
+       if (error) {
+               dev_err(dev, "Unable to init MT slots.\n");
+               return ERR_PTR(error);
+       }
 
-       error = request_threaded_irq(ts->irq, NULL, cyttsp_irq,
-                                    IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-                                    pdata->name, ts);
+       error = devm_request_threaded_irq(dev, ts->irq, NULL, cyttsp_irq,
+                                         IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                                         "cyttsp", ts);
        if (error) {
                dev_err(ts->dev, "failed to request IRQ %d, err: %d\n",
                        ts->irq, error);
-               goto err_platform_exit;
+               return ERR_PTR(error);
        }
 
        disable_irq(ts->irq);
 
+       cyttsp_hard_reset(ts);
+
        error = cyttsp_power_on(ts);
        if (error)
-               goto err_free_irq;
+               return ERR_PTR(error);
 
        error = input_register_device(input_dev);
        if (error) {
                dev_err(ts->dev, "failed to register input device: %d\n",
                        error);
-               goto err_free_irq;
+               return ERR_PTR(error);
        }
 
        return ts;
-
-err_free_irq:
-       free_irq(ts->irq, ts);
-err_platform_exit:
-       if (pdata->exit)
-               pdata->exit();
-err_free_mem:
-       input_free_device(input_dev);
-       kfree(ts);
-err_out:
-       return ERR_PTR(error);
 }
 EXPORT_SYMBOL_GPL(cyttsp_probe);
 
-void cyttsp_remove(struct cyttsp *ts)
-{
-       free_irq(ts->irq, ts);
-       input_unregister_device(ts->input);
-       if (ts->pdata->exit)
-               ts->pdata->exit();
-       kfree(ts);
-}
-EXPORT_SYMBOL_GPL(cyttsp_remove);
-
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cypress TrueTouch(R) Standard touchscreen driver core");
 MODULE_AUTHOR("Cypress");
index 07074110a902136eeb53dad02260302d7f5637c9..7835e2bacf5a241a9e1cffce4bd72dcdf8a69c85 100644 (file)
@@ -129,7 +129,6 @@ struct cyttsp {
        int irq;
        struct input_dev *input;
        char phys[32];
-       const struct cyttsp_platform_data *pdata;
        const struct cyttsp_bus_ops *bus_ops;
        struct cyttsp_bootloader_data bl_data;
        struct cyttsp_sysinfo_data sysinfo_data;
@@ -138,12 +137,19 @@ struct cyttsp {
        enum cyttsp_state state;
        bool suspended;
 
+       struct gpio_desc *reset_gpio;
+       bool use_hndshk;
+       u8 act_dist;
+       u8 act_intrvl;
+       u8 tch_tmout;
+       u8 lp_intrvl;
+       u8 *bl_keys;
+
        u8 xfer_buf[] ____cacheline_aligned;
 };
 
 struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops,
                            struct device *dev, int irq, size_t xfer_buf_size);
-void cyttsp_remove(struct cyttsp *ts);
 
 int cyttsp_i2c_write_block_data(struct device *dev, u8 *xfer_buf, u16 addr,
                u8 length, const void *values);
index eee51b3f2e3f39382e3c421226b78de5e0c76e38..1edfdba96ede4184e737c1b93d92c228941784e5 100644 (file)
@@ -56,15 +56,6 @@ static int cyttsp_i2c_probe(struct i2c_client *client,
        return 0;
 }
 
-static int cyttsp_i2c_remove(struct i2c_client *client)
-{
-       struct cyttsp *ts = i2c_get_clientdata(client);
-
-       cyttsp_remove(ts);
-
-       return 0;
-}
-
 static const struct i2c_device_id cyttsp_i2c_id[] = {
        { CY_I2C_NAME, 0 },
        { }
@@ -77,7 +68,6 @@ static struct i2c_driver cyttsp_i2c_driver = {
                .pm     = &cyttsp_pm_ops,
        },
        .probe          = cyttsp_i2c_probe,
-       .remove         = cyttsp_i2c_remove,
        .id_table       = cyttsp_i2c_id,
 };
 
index bbeeb2488b579f5ca380b54f411f21f0cdb65281..3c9d18b1b6efde5ee807ee8933ddbeda43bc747f 100644 (file)
@@ -170,22 +170,12 @@ static int cyttsp_spi_probe(struct spi_device *spi)
        return 0;
 }
 
-static int cyttsp_spi_remove(struct spi_device *spi)
-{
-       struct cyttsp *ts = spi_get_drvdata(spi);
-
-       cyttsp_remove(ts);
-
-       return 0;
-}
-
 static struct spi_driver cyttsp_spi_driver = {
        .driver = {
                .name   = CY_SPI_NAME,
                .pm     = &cyttsp_pm_ops,
        },
        .probe  = cyttsp_spi_probe,
-       .remove = cyttsp_spi_remove,
 };
 
 module_spi_driver(cyttsp_spi_driver);
index 0b0f8c17f3f7e0f4df1e69144d693b46741f4025..23fbe382da8b420791a1fec28962454132d4e7ef 100644 (file)
@@ -822,16 +822,22 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
        int error;
 
        error = device_property_read_u32(dev, "threshold", &val);
-       if (!error)
-               reg_addr->reg_threshold = val;
+       if (!error) {
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val);
+               tsdata->threshold = val;
+       }
 
        error = device_property_read_u32(dev, "gain", &val);
-       if (!error)
-               reg_addr->reg_gain = val;
+       if (!error) {
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val);
+               tsdata->gain = val;
+       }
 
        error = device_property_read_u32(dev, "offset", &val);
-       if (!error)
-               reg_addr->reg_offset = val;
+       if (!error) {
+               edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
+               tsdata->offset = val;
+       }
 }
 
 static void
index 515c20a6e10fa084e3ce39be145d25e421a86017..73861ad22df43b83b75f28c3bf64cb67160ffd41 100644 (file)
@@ -848,7 +848,7 @@ static int wdt87xx_do_update_firmware(struct i2c_client *client,
        error = wdt87xx_get_sysparam(client, &wdt->param);
        if (error)
                dev_err(&client->dev,
-                       "failed to refresh system paramaters: %d\n", error);
+                       "failed to refresh system parameters: %d\n", error);
 out:
        enable_irq(client->irq);
        mutex_unlock(&wdt->fw_mutex);
index 539b0dea8034b5d5b61e93f5927b353448197c67..e5e223938eecc9f013e33977b1de50bbb3cfae3d 100644 (file)
@@ -2049,7 +2049,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
        /* Update device table */
        set_dte_entry(dev_data->devid, domain, ats);
        if (alias != dev_data->devid)
-               set_dte_entry(dev_data->devid, domain, ats);
+               set_dte_entry(alias, domain, ats);
 
        device_flush_dte(dev_data);
 }
index 62a400c5ba0614fde00d18ad2207b430d99433ee..fb092f3f11cb3e32ec8462925587e9778f862a25 100644 (file)
@@ -1353,7 +1353,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
 
        raw_spin_lock_irqsave(&iommu->register_lock, flags);
 
-       sts =  dmar_readq(iommu->reg + DMAR_GSTS_REG);
+       sts =  readl(iommu->reg + DMAR_GSTS_REG);
        if (!(sts & DMA_GSTS_QIES))
                goto end;
 
index ac7387686ddc7b2a7c7757f2cb3fbd003c8a23af..986a53e3eb96b4bb56faedfdb36b4bd658aacb99 100644 (file)
@@ -1489,7 +1489,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
 {
        struct pci_dev *pdev;
 
-       if (dev_is_pci(info->dev))
+       if (!dev_is_pci(info->dev))
                return;
 
        pdev = to_pci_dev(info->dev);
index 50464833d0b84732a4d397cee8b73ed43a721712..97a818992d6ddcd16d62d29db693b131445cd0b7 100644 (file)
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
 static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
 {
        struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+       struct intel_svm_dev *sdev;
 
+       /* This might end up being called from exit_mmap(), *before* the page
+        * tables are cleared. And __mmu_notifier_release() will delete us from
+        * the list of notifiers so that our invalidate_range() callback doesn't
+        * get called when the page tables are cleared. So we need to protect
+        * against hardware accessing those page tables.
+        *
+        * We do it by clearing the entry in the PASID table and then flushing
+        * the IOTLB and the PASID table caches. This might upset hardware;
+        * perhaps we'll want to point the PASID to a dummy PGD (like the zero
+        * page) so that we end up taking a fault that the hardware really
+        * *has* to handle gracefully without affecting other processes.
+        */
        svm->iommu->pasid_table[svm->pasid].val = 0;
+       wmb();
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sdev, &svm->devs, list) {
+               intel_flush_pasid_dev(svm, sdev, svm->pasid);
+               intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+       }
+       rcu_read_unlock();
 
-       /* There's no need to do any flush because we can't get here if there
-        * are any devices left anyway. */
-       WARN_ON(!list_empty(&svm->devs));
 }
 
 static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
                                goto out;
                        }
                        iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
-                       mm = NULL;
                } else
                        iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
                wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
                                kfree_rcu(sdev, rcu);
 
                                if (list_empty(&svm->devs)) {
-                                       mmu_notifier_unregister(&svm->notifier, svm->mm);
 
                                        idr_remove(&svm->iommu->pasid_idr, svm->pasid);
                                        if (svm->mm)
-                                               mmput(svm->mm);
+                                               mmu_notifier_unregister(&svm->notifier, svm->mm);
+
                                        /* We mandate that no page faults may be outstanding
                                         * for the PASID when intel_svm_unbind_mm() is called.
                                         * If that is not obeyed, subtle errors will happen.
@@ -551,6 +568,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                 * any faults on kernel addresses. */
                if (!svm->mm)
                        goto bad_req;
+               /* If the mm is already defunct, don't handle faults. */
+               if (!atomic_inc_not_zero(&svm->mm->mm_users))
+                       goto bad_req;
                down_read(&svm->mm->mmap_sem);
                vma = find_extend_vma(svm->mm, address);
                if (!vma || address < vma->vm_start)
@@ -567,6 +587,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
                result = QI_RESP_SUCCESS;
        invalid:
                up_read(&svm->mm->mmap_sem);
+               mmput(svm->mm);
        bad_req:
                /* Accounting for major/minor faults? */
                rcu_read_lock();
index c12ba4516df25b7201731b44ef4a175c78f2d0e0..ac596928f6b40af32e9c44ecf388ebf7ed4b10ed 100644 (file)
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
 
        raw_spin_lock_irqsave(&iommu->register_lock, flags);
 
-       sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+       sts = readl(iommu->reg + DMAR_GSTS_REG);
        if (!(sts & DMA_GSTS_IRES))
                goto end;
 
index 8bbcbfe7695cf82aabda359b7a9036527e4951b3..381ca5a37a7b7de9b338236b28202c6c46f306df 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/types.h>
+#include <linux/dma-mapping.h>
 
 #include <asm/barrier.h>
 
index ebf0adb8e7ea729f5cab436de29a8e18afd8d78c..a6f593a0a29eda65c1f383dbe1700c13a8e9be27 100644 (file)
@@ -86,7 +86,8 @@ struct rk_iommu_domain {
 
 struct rk_iommu {
        struct device *dev;
-       void __iomem *base;
+       void __iomem **bases;
+       int num_mmu;
        int irq;
        struct list_head node; /* entry in rk_iommu_domain.iommus */
        struct iommu_domain *domain; /* domain to which iommu is attached */
@@ -271,47 +272,70 @@ static u32 rk_iova_page_offset(dma_addr_t iova)
        return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
 }
 
-static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
+static u32 rk_iommu_read(void __iomem *base, u32 offset)
 {
-       return readl(iommu->base + offset);
+       return readl(base + offset);
 }
 
-static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
+static void rk_iommu_write(void __iomem *base, u32 offset, u32 value)
 {
-       writel(value, iommu->base + offset);
+       writel(value, base + offset);
 }
 
 static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
 {
-       writel(command, iommu->base + RK_MMU_COMMAND);
+       int i;
+
+       for (i = 0; i < iommu->num_mmu; i++)
+               writel(command, iommu->bases[i] + RK_MMU_COMMAND);
 }
 
+static void rk_iommu_base_command(void __iomem *base, u32 command)
+{
+       writel(command, base + RK_MMU_COMMAND);
+}
 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
                               size_t size)
 {
+       int i;
+
        dma_addr_t iova_end = iova + size;
        /*
         * TODO(djkurtz): Figure out when it is more efficient to shootdown the
         * entire iotlb rather than iterate over individual iovas.
         */
-       for (; iova < iova_end; iova += SPAGE_SIZE)
-               rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
+       for (i = 0; i < iommu->num_mmu; i++)
+               for (; iova < iova_end; iova += SPAGE_SIZE)
+                       rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova);
 }
 
 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
 {
-       return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
+       bool active = true;
+       int i;
+
+       for (i = 0; i < iommu->num_mmu; i++)
+               active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+                                       RK_MMU_STATUS_STALL_ACTIVE;
+
+       return active;
 }
 
 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
 {
-       return rk_iommu_read(iommu, RK_MMU_STATUS) &
-                            RK_MMU_STATUS_PAGING_ENABLED;
+       bool enable = true;
+       int i;
+
+       for (i = 0; i < iommu->num_mmu; i++)
+               enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) &
+                                       RK_MMU_STATUS_PAGING_ENABLED;
+
+       return enable;
 }
 
 static int rk_iommu_enable_stall(struct rk_iommu *iommu)
 {
-       int ret;
+       int ret, i;
 
        if (rk_iommu_is_stall_active(iommu))
                return 0;
@@ -324,15 +348,16 @@ static int rk_iommu_enable_stall(struct rk_iommu *iommu)
 
        ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
        if (ret)
-               dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
-                       rk_iommu_read(iommu, RK_MMU_STATUS));
+               for (i = 0; i < iommu->num_mmu; i++)
+                       dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
+                               rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
 
        return ret;
 }
 
 static int rk_iommu_disable_stall(struct rk_iommu *iommu)
 {
-       int ret;
+       int ret, i;
 
        if (!rk_iommu_is_stall_active(iommu))
                return 0;
@@ -341,15 +366,16 @@ static int rk_iommu_disable_stall(struct rk_iommu *iommu)
 
        ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
        if (ret)
-               dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
-                       rk_iommu_read(iommu, RK_MMU_STATUS));
+               for (i = 0; i < iommu->num_mmu; i++)
+                       dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
+                               rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
 
        return ret;
 }
 
 static int rk_iommu_enable_paging(struct rk_iommu *iommu)
 {
-       int ret;
+       int ret, i;
 
        if (rk_iommu_is_paging_enabled(iommu))
                return 0;
@@ -358,15 +384,16 @@ static int rk_iommu_enable_paging(struct rk_iommu *iommu)
 
        ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
        if (ret)
-               dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
-                       rk_iommu_read(iommu, RK_MMU_STATUS));
+               for (i = 0; i < iommu->num_mmu; i++)
+                       dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
+                               rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
 
        return ret;
 }
 
 static int rk_iommu_disable_paging(struct rk_iommu *iommu)
 {
-       int ret;
+       int ret, i;
 
        if (!rk_iommu_is_paging_enabled(iommu))
                return 0;
@@ -375,41 +402,49 @@ static int rk_iommu_disable_paging(struct rk_iommu *iommu)
 
        ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
        if (ret)
-               dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
-                       rk_iommu_read(iommu, RK_MMU_STATUS));
+               for (i = 0; i < iommu->num_mmu; i++)
+                       dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
+                               rk_iommu_read(iommu->bases[i], RK_MMU_STATUS));
 
        return ret;
 }
 
 static int rk_iommu_force_reset(struct rk_iommu *iommu)
 {
-       int ret;
+       int ret, i;
        u32 dte_addr;
 
        /*
         * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
         * and verifying that upper 5 nybbles are read back.
         */
-       rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+       for (i = 0; i < iommu->num_mmu; i++) {
+               rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
 
-       dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
-       if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
-               dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
-               return -EFAULT;
+               dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR);
+               if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+                       dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
+                       return -EFAULT;
+               }
        }
 
        rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
 
-       ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
-                         FORCE_RESET_TIMEOUT);
-       if (ret)
-               dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+       for (i = 0; i < iommu->num_mmu; i++) {
+               ret = rk_wait_for(rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0x00000000,
+                                 FORCE_RESET_TIMEOUT);
+               if (ret) {
+                       dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+                       return ret;
+               }
+       }
 
-       return ret;
+       return 0;
 }
 
-static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
+static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova)
 {
+       void __iomem *base = iommu->bases[index];
        u32 dte_index, pte_index, page_offset;
        u32 mmu_dte_addr;
        phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
@@ -425,7 +460,7 @@ static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
        pte_index = rk_iova_pte_index(iova);
        page_offset = rk_iova_page_offset(iova);
 
-       mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+       mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR);
        mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
 
        dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
@@ -460,51 +495,56 @@ static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
        u32 status;
        u32 int_status;
        dma_addr_t iova;
+       irqreturn_t ret = IRQ_NONE;
+       int i;
 
-       int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
-       if (int_status == 0)
-               return IRQ_NONE;
+       for (i = 0; i < iommu->num_mmu; i++) {
+               int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS);
+               if (int_status == 0)
+                       continue;
 
-       iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
+               ret = IRQ_HANDLED;
+               iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR);
 
-       if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
-               int flags;
+               if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
+                       int flags;
 
-               status = rk_iommu_read(iommu, RK_MMU_STATUS);
-               flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
-                               IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+                       status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS);
+                       flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
+                                       IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
 
-               dev_err(iommu->dev, "Page fault at %pad of type %s\n",
-                       &iova,
-                       (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+                       dev_err(iommu->dev, "Page fault at %pad of type %s\n",
+                               &iova,
+                               (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
 
-               log_iova(iommu, iova);
+                       log_iova(iommu, i, iova);
 
-               /*
-                * Report page fault to any installed handlers.
-                * Ignore the return code, though, since we always zap cache
-                * and clear the page fault anyway.
-                */
-               if (iommu->domain)
-                       report_iommu_fault(iommu->domain, iommu->dev, iova,
-                                          flags);
-               else
-                       dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
+                       /*
+                        * Report page fault to any installed handlers.
+                        * Ignore the return code, though, since we always zap cache
+                        * and clear the page fault anyway.
+                        */
+                       if (iommu->domain)
+                               report_iommu_fault(iommu->domain, iommu->dev, iova,
+                                                  flags);
+                       else
+                               dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
 
-               rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
-               rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
-       }
+                       rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+                       rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE);
+               }
 
-       if (int_status & RK_MMU_IRQ_BUS_ERROR)
-               dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
+               if (int_status & RK_MMU_IRQ_BUS_ERROR)
+                       dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
 
-       if (int_status & ~RK_MMU_IRQ_MASK)
-               dev_err(iommu->dev, "unexpected int_status: %#08x\n",
-                       int_status);
+               if (int_status & ~RK_MMU_IRQ_MASK)
+                       dev_err(iommu->dev, "unexpected int_status: %#08x\n",
+                               int_status);
 
-       rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
+               rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status);
+       }
 
-       return IRQ_HANDLED;
+       return ret;
 }
 
 static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -746,7 +786,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
        struct rk_iommu *iommu;
        struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
        unsigned long flags;
-       int ret;
+       int ret, i;
        phys_addr_t dte_addr;
 
        /*
@@ -773,9 +813,11 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
                return ret;
 
        dte_addr = virt_to_phys(rk_domain->dt);
-       rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
-       rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
-       rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+       for (i = 0; i < iommu->num_mmu; i++) {
+               rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
+               rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+               rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+       }
 
        ret = rk_iommu_enable_paging(iommu);
        if (ret)
@@ -798,6 +840,7 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
        struct rk_iommu *iommu;
        struct rk_iommu_domain *rk_domain = to_rk_domain(domain);
        unsigned long flags;
+       int i;
 
        /* Allow 'virtual devices' (eg drm) to detach from domain */
        iommu = rk_iommu_from_dev(dev);
@@ -811,8 +854,10 @@ static void rk_iommu_detach_device(struct iommu_domain *domain,
        /* Ignore error while disabling, just keep going */
        rk_iommu_enable_stall(iommu);
        rk_iommu_disable_paging(iommu);
-       rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
-       rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
+       for (i = 0; i < iommu->num_mmu; i++) {
+               rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0);
+               rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0);
+       }
        rk_iommu_disable_stall(iommu);
 
        devm_free_irq(dev, iommu->irq, iommu);
@@ -988,6 +1033,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct rk_iommu *iommu;
        struct resource *res;
+       int i;
 
        iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
        if (!iommu)
@@ -995,11 +1041,21 @@ static int rk_iommu_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, iommu);
        iommu->dev = dev;
+       iommu->num_mmu = 0;
+       iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * iommu->num_mmu,
+                                   GFP_KERNEL);
+       if (!iommu->bases)
+               return -ENOMEM;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       iommu->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(iommu->base))
-               return PTR_ERR(iommu->base);
+       for (i = 0; i < pdev->num_resources; i++) {
+               res = platform_get_resource(pdev, IORESOURCE_MEM, i);
+               iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res);
+               if (IS_ERR(iommu->bases[i]))
+                       continue;
+               iommu->num_mmu++;
+       }
+       if (iommu->num_mmu == 0)
+               return PTR_ERR(iommu->bases[0]);
 
        iommu->irq = platform_get_irq(pdev, 0);
        if (iommu->irq < 0) {
index 11fc2a27fa2ea90da2b65ef282a5db139c594c71..abe7a571836e6b923cad4587625c63acc0ad4071 100644 (file)
@@ -78,6 +78,11 @@ config I8259
        bool
        select IRQ_DOMAIN
 
+config BCM6345_L1_IRQ
+       bool
+       select GENERIC_IRQ_CHIP
+       select IRQ_DOMAIN
+
 config BCM7038_L1_IRQ
        bool
        select GENERIC_IRQ_CHIP
@@ -130,6 +135,11 @@ config ORION_IRQCHIP
        select IRQ_DOMAIN
        select MULTI_IRQ_HANDLER
 
+config PIC32_EVIC
+       bool
+       select GENERIC_IRQ_CHIP
+       select IRQ_DOMAIN
+
 config RENESAS_INTC_IRQPIN
        bool
        select IRQ_DOMAIN
@@ -154,6 +164,7 @@ config TB10X_IRQC
 config TS4800_IRQ
        tristate "TS-4800 IRQ controller"
        select IRQ_DOMAIN
+       depends on HAS_IOMEM
        help
          Support for the TS-4800 FPGA IRQ controller
 
index d4c2e4ebc30809fcb21b9f705f416e9c8dbb7ee3..d91d99ded30f20f2a6292b7e153a913c7ce7707a 100644 (file)
@@ -46,6 +46,7 @@ obj-$(CONFIG_XTENSA)                  += irq-xtensa-pic.o
 obj-$(CONFIG_XTENSA_MX)                        += irq-xtensa-mx.o
 obj-$(CONFIG_IRQ_CROSSBAR)             += irq-crossbar.o
 obj-$(CONFIG_SOC_VF610)                        += irq-vf610-mscm-ir.o
+obj-$(CONFIG_BCM6345_L1_IRQ)           += irq-bcm6345-l1.o
 obj-$(CONFIG_BCM7038_L1_IRQ)           += irq-bcm7038-l1.o
 obj-$(CONFIG_BCM7120_L2_IRQ)           += irq-bcm7120-l2.o
 obj-$(CONFIG_BRCMSTB_L2_IRQ)           += irq-brcmstb-l2.o
@@ -58,3 +59,4 @@ obj-$(CONFIG_RENESAS_H8S_INTC)                += irq-renesas-h8s.o
 obj-$(CONFIG_ARCH_SA1100)              += irq-sa11x0.o
 obj-$(CONFIG_INGENIC_IRQ)              += irq-ingenic.o
 obj-$(CONFIG_IMX_GPCV2)                        += irq-imx-gpcv2.o
+obj-$(CONFIG_PIC32_EVIC)               += irq-pic32-evic.o
index b12a5d58546f922b460d6aac34c073ff363ef1b8..28b26c80f4cf937b8547328b5d724a69e51b7d05 100644 (file)
@@ -80,16 +80,10 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
        return 0;
 }
 
-int aic_common_set_priority(int priority, unsigned *val)
+void aic_common_set_priority(int priority, unsigned *val)
 {
-       if (priority < AT91_AIC_IRQ_MIN_PRIORITY ||
-           priority > AT91_AIC_IRQ_MAX_PRIORITY)
-               return -EINVAL;
-
-       *val &= AT91_AIC_PRIOR;
+       *val &= ~AT91_AIC_PRIOR;
        *val |= priority;
-
-       return 0;
 }
 
 int aic_common_irq_domain_xlate(struct irq_domain *d,
@@ -193,7 +187,7 @@ void __init aic_common_rtt_irq_fixup(struct device_node *root)
        }
 }
 
-void __init aic_common_irq_fixup(const struct of_device_id *matches)
+static void __init aic_common_irq_fixup(const struct of_device_id *matches)
 {
        struct device_node *root = of_find_node_by_path("/");
        const struct of_device_id *match;
@@ -214,7 +208,8 @@ void __init aic_common_irq_fixup(const struct of_device_id *matches)
 
 struct irq_domain *__init aic_common_of_init(struct device_node *node,
                                             const struct irq_domain_ops *ops,
-                                            const char *name, int nirqs)
+                                            const char *name, int nirqs,
+                                            const struct of_device_id *matches)
 {
        struct irq_chip_generic *gc;
        struct irq_domain *domain;
@@ -264,6 +259,7 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
        }
 
        aic_common_ext_irq_of_init(domain);
+       aic_common_irq_fixup(matches);
 
        return domain;
 
index 603f0a9d5411afe0f5f50880cea9048f087cc408..af60376d50debe30132acd00c58254c4d1a7ab9a 100644 (file)
@@ -19,7 +19,7 @@
 
 int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val);
 
-int aic_common_set_priority(int priority, unsigned *val);
+void aic_common_set_priority(int priority, unsigned *val);
 
 int aic_common_irq_domain_xlate(struct irq_domain *d,
                                struct device_node *ctrlr,
@@ -30,12 +30,11 @@ int aic_common_irq_domain_xlate(struct irq_domain *d,
 
 struct irq_domain *__init aic_common_of_init(struct device_node *node,
                                             const struct irq_domain_ops *ops,
-                                            const char *name, int nirqs);
+                                            const char *name, int nirqs,
+                                            const struct of_device_id *matches);
 
 void __init aic_common_rtc_irq_fixup(struct device_node *root);
 
 void __init aic_common_rtt_irq_fixup(struct device_node *root);
 
-void __init aic_common_irq_fixup(const struct of_device_id *matches);
-
 #endif /* __IRQ_ATMEL_AIC_COMMON_H */
index 8a0c7f28819841a83e1afb0e3eccf71d4aa287a7..112e17c2768be06587d340865ade9fe202adc7da 100644 (file)
@@ -196,9 +196,8 @@ static int aic_irq_domain_xlate(struct irq_domain *d,
 
        irq_gc_lock(gc);
        smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq));
-       ret = aic_common_set_priority(intspec[2], &smr);
-       if (!ret)
-               irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
+       aic_common_set_priority(intspec[2], &smr);
+       irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq));
        irq_gc_unlock(gc);
 
        return ret;
@@ -248,12 +247,10 @@ static int __init aic_of_init(struct device_node *node,
                return -EEXIST;
 
        domain = aic_common_of_init(node, &aic_irq_ops, "atmel-aic",
-                                   NR_AIC_IRQS);
+                                   NR_AIC_IRQS, aic_irq_fixups);
        if (IS_ERR(domain))
                return PTR_ERR(domain);
 
-       aic_common_irq_fixup(aic_irq_fixups);
-
        aic_domain = domain;
        gc = irq_get_domain_generic_chip(domain, 0);
 
index 62bb840c613f2a660966333f858426af5ab3d93e..4f0d068e1abec2bc068b5a2185bef0fd6df729ff 100644 (file)
@@ -272,9 +272,8 @@ static int aic5_irq_domain_xlate(struct irq_domain *d,
        irq_gc_lock(bgc);
        irq_reg_writel(bgc, *out_hwirq, AT91_AIC5_SSR);
        smr = irq_reg_readl(bgc, AT91_AIC5_SMR);
-       ret = aic_common_set_priority(intspec[2], &smr);
-       if (!ret)
-               irq_reg_writel(bgc, intspec[2] | smr, AT91_AIC5_SMR);
+       aic_common_set_priority(intspec[2], &smr);
+       irq_reg_writel(bgc, smr, AT91_AIC5_SMR);
        irq_gc_unlock(bgc);
 
        return ret;
@@ -312,12 +311,10 @@ static int __init aic5_of_init(struct device_node *node,
                return -EEXIST;
 
        domain = aic_common_of_init(node, &aic5_irq_ops, "atmel-aic5",
-                                   nirqs);
+                                   nirqs, aic5_irq_fixups);
        if (IS_ERR(domain))
                return PTR_ERR(domain);
 
-       aic_common_irq_fixup(aic5_irq_fixups);
-
        aic5_domain = domain;
        nchips = aic5_domain->revmap_size / 32;
        for (i = 0; i < nchips; i++) {
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
new file mode 100644 (file)
index 0000000..b844c89
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * Broadcom BCM6345 style Level 1 interrupt controller driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ * Copyright 2015 Simon Arlott
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is based on the BCM7038 (which supports SMP) but with a single
+ * enable register instead of separate mask/set/clear registers.
+ *
+ * The BCM3380 has a similar mask/status register layout, but each pair
+ * of words is at separate locations (and SMP is not supported).
+ *
+ * ENABLE/STATUS words are packed next to each other for each CPU:
+ *
+ * BCM6368:
+ *   0x1000_0020: CPU0_W0_ENABLE
+ *   0x1000_0024: CPU0_W1_ENABLE
+ *   0x1000_0028: CPU0_W0_STATUS               IRQs 31-63
+ *   0x1000_002c: CPU0_W1_STATUS               IRQs 0-31
+ *   0x1000_0030: CPU1_W0_ENABLE
+ *   0x1000_0034: CPU1_W1_ENABLE
+ *   0x1000_0038: CPU1_W0_STATUS               IRQs 31-63
+ *   0x1000_003c: CPU1_W1_STATUS               IRQs 0-31
+ *
+ * BCM63168:
+ *   0x1000_0020: CPU0_W0_ENABLE
+ *   0x1000_0024: CPU0_W1_ENABLE
+ *   0x1000_0028: CPU0_W2_ENABLE
+ *   0x1000_002c: CPU0_W3_ENABLE
+ *   0x1000_0030: CPU0_W0_STATUS       IRQs 96-127
+ *   0x1000_0034: CPU0_W1_STATUS       IRQs 64-95
+ *   0x1000_0038: CPU0_W2_STATUS       IRQs 32-63
+ *   0x1000_003c: CPU0_W3_STATUS       IRQs 0-31
+ *   0x1000_0040: CPU1_W0_ENABLE
+ *   0x1000_0044: CPU1_W1_ENABLE
+ *   0x1000_0048: CPU1_W2_ENABLE
+ *   0x1000_004c: CPU1_W3_ENABLE
+ *   0x1000_0050: CPU1_W0_STATUS       IRQs 96-127
+ *   0x1000_0054: CPU1_W1_STATUS       IRQs 64-95
+ *   0x1000_0058: CPU1_W2_STATUS       IRQs 32-63
+ *   0x1000_005c: CPU1_W3_STATUS       IRQs 0-31
+ *
+ * IRQs are numbered in CPU native endian order
+ * (which is big-endian in these examples)
+ */
+
+#define pr_fmt(fmt)    KBUILD_MODNAME  ": " fmt
+
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/chained_irq.h>
+
+#define IRQS_PER_WORD          32
+#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 2)
+
+struct bcm6345_l1_cpu;
+
+struct bcm6345_l1_chip {
+       raw_spinlock_t          lock;
+       unsigned int            n_words;
+       struct irq_domain       *domain;
+       struct cpumask          cpumask;
+       struct bcm6345_l1_cpu   *cpus[NR_CPUS];
+};
+
+struct bcm6345_l1_cpu {
+       void __iomem            *map_base;
+       unsigned int            parent_irq;
+       u32                     enable_cache[];
+};
+
+static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
+                                          unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+       return (1 * intc->n_words - word - 1) * sizeof(u32);
+#else
+       return (0 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
+                                     unsigned int word)
+{
+#ifdef __BIG_ENDIAN
+       return (2 * intc->n_words - word - 1) * sizeof(u32);
+#else
+       return (1 * intc->n_words + word) * sizeof(u32);
+#endif
+}
+
+static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
+                                       struct irq_data *d)
+{
+       return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
+}
+
+static void bcm6345_l1_irq_handle(struct irq_desc *desc)
+{
+       struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
+       struct bcm6345_l1_cpu *cpu;
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+       unsigned int idx;
+
+#ifdef CONFIG_SMP
+       cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
+#else
+       cpu = intc->cpus[0];
+#endif
+
+       chained_irq_enter(chip, desc);
+
+       for (idx = 0; idx < intc->n_words; idx++) {
+               int base = idx * IRQS_PER_WORD;
+               unsigned long pending;
+               irq_hw_number_t hwirq;
+               unsigned int irq;
+
+               pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
+               pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
+
+               for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
+                       irq = irq_linear_revmap(intc->domain, base + hwirq);
+                       if (irq)
+                               do_IRQ(irq);
+                       else
+                               spurious_interrupt();
+               }
+       }
+
+       chained_irq_exit(chip, desc);
+}
+
+static inline void __bcm6345_l1_unmask(struct irq_data *d)
+{
+       struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+       u32 word = d->hwirq / IRQS_PER_WORD;
+       u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+       unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+       intc->cpus[cpu_idx]->enable_cache[word] |= mask;
+       __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+               intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static inline void __bcm6345_l1_mask(struct irq_data *d)
+{
+       struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+       u32 word = d->hwirq / IRQS_PER_WORD;
+       u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+       unsigned int cpu_idx = cpu_for_irq(intc, d);
+
+       intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
+       __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
+               intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
+}
+
+static void bcm6345_l1_unmask(struct irq_data *d)
+{
+       struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&intc->lock, flags);
+       __bcm6345_l1_unmask(d);
+       raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static void bcm6345_l1_mask(struct irq_data *d)
+{
+       struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&intc->lock, flags);
+       __bcm6345_l1_mask(d);
+       raw_spin_unlock_irqrestore(&intc->lock, flags);
+}
+
+static int bcm6345_l1_set_affinity(struct irq_data *d,
+                                  const struct cpumask *dest,
+                                  bool force)
+{
+       struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
+       u32 word = d->hwirq / IRQS_PER_WORD;
+       u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
+       unsigned int old_cpu = cpu_for_irq(intc, d);
+       unsigned int new_cpu;
+       struct cpumask valid;
+       unsigned long flags;
+       bool enabled;
+
+       if (!cpumask_and(&valid, &intc->cpumask, dest))
+               return -EINVAL;
+
+       new_cpu = cpumask_any_and(&valid, cpu_online_mask);
+       if (new_cpu >= nr_cpu_ids)
+               return -EINVAL;
+
+       dest = cpumask_of(new_cpu);
+
+       raw_spin_lock_irqsave(&intc->lock, flags);
+       if (old_cpu != new_cpu) {
+               enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
+               if (enabled)
+                       __bcm6345_l1_mask(d);
+               cpumask_copy(irq_data_get_affinity_mask(d), dest);
+               if (enabled)
+                       __bcm6345_l1_unmask(d);
+       } else {
+               cpumask_copy(irq_data_get_affinity_mask(d), dest);
+       }
+       raw_spin_unlock_irqrestore(&intc->lock, flags);
+
+       return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static int __init bcm6345_l1_init_one(struct device_node *dn,
+                                     unsigned int idx,
+                                     struct bcm6345_l1_chip *intc)
+{
+       struct resource res;
+       resource_size_t sz;
+       struct bcm6345_l1_cpu *cpu;
+       unsigned int i, n_words;
+
+       if (of_address_to_resource(dn, idx, &res))
+               return -EINVAL;
+       sz = resource_size(&res);
+       n_words = sz / REG_BYTES_PER_IRQ_WORD;
+
+       if (!intc->n_words)
+               intc->n_words = n_words;
+       else if (intc->n_words != n_words)
+               return -EINVAL;
+
+       cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
+                                       GFP_KERNEL);
+       if (!cpu)
+               return -ENOMEM;
+
+       cpu->map_base = ioremap(res.start, sz);
+       if (!cpu->map_base)
+               return -ENOMEM;
+
+       for (i = 0; i < n_words; i++) {
+               cpu->enable_cache[i] = 0;
+               __raw_writel(0, cpu->map_base + reg_enable(intc, i));
+       }
+
+       cpu->parent_irq = irq_of_parse_and_map(dn, idx);
+       if (!cpu->parent_irq) {
+               pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
+               return -EINVAL;
+       }
+       irq_set_chained_handler_and_data(cpu->parent_irq,
+                                               bcm6345_l1_irq_handle, intc);
+
+       return 0;
+}
+
+static struct irq_chip bcm6345_l1_irq_chip = {
+       .name                   = "bcm6345-l1",
+       .irq_mask               = bcm6345_l1_mask,
+       .irq_unmask             = bcm6345_l1_unmask,
+       .irq_set_affinity       = bcm6345_l1_set_affinity,
+};
+
+static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
+                         irq_hw_number_t hw_irq)
+{
+       irq_set_chip_and_handler(virq,
+               &bcm6345_l1_irq_chip, handle_percpu_irq);
+       irq_set_chip_data(virq, d->host_data);
+       return 0;
+}
+
+static const struct irq_domain_ops bcm6345_l1_domain_ops = {
+       .xlate                  = irq_domain_xlate_onecell,
+       .map                    = bcm6345_l1_map,
+};
+
+static int __init bcm6345_l1_of_init(struct device_node *dn,
+                             struct device_node *parent)
+{
+       struct bcm6345_l1_chip *intc;
+       unsigned int idx;
+       int ret;
+
+       intc = kzalloc(sizeof(*intc), GFP_KERNEL);
+       if (!intc)
+               return -ENOMEM;
+
+       for_each_possible_cpu(idx) {
+               ret = bcm6345_l1_init_one(dn, idx, intc);
+               if (ret)
+                       pr_err("failed to init intc L1 for cpu %d: %d\n",
+                               idx, ret);
+               else
+                       cpumask_set_cpu(idx, &intc->cpumask);
+       }
+
+       if (!cpumask_weight(&intc->cpumask)) {
+               ret = -ENODEV;
+               goto out_free;
+       }
+
+       raw_spin_lock_init(&intc->lock);
+
+       intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
+                                            &bcm6345_l1_domain_ops,
+                                            intc);
+       if (!intc->domain) {
+               ret = -ENOMEM;
+               goto out_unmap;
+       }
+
+       pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
+                       IRQS_PER_WORD * intc->n_words);
+       for_each_cpu(idx, &intc->cpumask) {
+               struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+               pr_info("  CPU%u at MMIO 0x%p (irq = %d)\n", idx,
+                               cpu->map_base, cpu->parent_irq);
+       }
+
+       return 0;
+
+out_unmap:
+       for_each_possible_cpu(idx) {
+               struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
+
+               if (cpu) {
+                       if (cpu->map_base)
+                               iounmap(cpu->map_base);
+                       kfree(cpu);
+               }
+       }
+out_free:
+       kfree(intc);
+       return ret;
+}
+
+IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
index e23d1d18f9d6a39731bdd34633e2bbc9bc473525..0a73632b28d573cfa450e4edf75dacfc8683ad86 100644 (file)
@@ -66,7 +66,10 @@ struct its_node {
        unsigned long           phys_base;
        struct its_cmd_block    *cmd_base;
        struct its_cmd_block    *cmd_write;
-       void                    *tables[GITS_BASER_NR_REGS];
+       struct {
+               void            *base;
+               u32             order;
+       } tables[GITS_BASER_NR_REGS];
        struct its_collection   *collections;
        struct list_head        its_device_list;
        u64                     flags;
@@ -807,9 +810,10 @@ static void its_free_tables(struct its_node *its)
        int i;
 
        for (i = 0; i < GITS_BASER_NR_REGS; i++) {
-               if (its->tables[i]) {
-                       free_page((unsigned long)its->tables[i]);
-                       its->tables[i] = NULL;
+               if (its->tables[i].base) {
+                       free_pages((unsigned long)its->tables[i].base,
+                                  its->tables[i].order);
+                       its->tables[i].base = NULL;
                }
        }
 }
@@ -875,6 +879,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
                }
 
                alloc_size = (1 << order) * PAGE_SIZE;
+retry_alloc_baser:
                alloc_pages = (alloc_size / psz);
                if (alloc_pages > GITS_BASER_PAGES_MAX) {
                        alloc_pages = GITS_BASER_PAGES_MAX;
@@ -889,7 +894,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
                        goto out_free;
                }
 
-               its->tables[i] = base;
+               its->tables[i].base = base;
+               its->tables[i].order = order;
 
 retry_baser:
                val = (virt_to_phys(base)                                |
@@ -938,13 +944,16 @@ retry_baser:
                         * size and retry. If we reach 4K, then
                         * something is horribly wrong...
                         */
+                       free_pages((unsigned long)base, order);
+                       its->tables[i].base = NULL;
+
                        switch (psz) {
                        case SZ_16K:
                                psz = SZ_4K;
-                               goto retry_baser;
+                               goto retry_alloc_baser;
                        case SZ_64K:
                                psz = SZ_16K;
-                               goto retry_baser;
+                               goto retry_alloc_baser;
                        }
                }
 
index c22e2d40cb302452624c29efd44c483e577f5333..efe50845939d91fee149acb085912697031dd9a0 100644 (file)
@@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
                writel(0, icoll_priv.intr + i);
 
        icoll_add_domain(np, ASM9260_NUM_IRQS);
+       set_handle_irq(icoll_handle_irq);
 
        return 0;
 }
diff --git a/drivers/irqchip/irq-pic32-evic.c b/drivers/irqchip/irq-pic32-evic.c
new file mode 100644 (file)
index 0000000..e7155db
--- /dev/null
@@ -0,0 +1,324 @@
+/*
+ * Cristian Birsan <cristian.birsan@microchip.com>
+ * Joshua Henderson <joshua.henderson@microchip.com>
+ * Copyright (C) 2016 Microchip Technology Inc.  All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irq.h>
+
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/mach-pic32/pic32.h>
+
+#define REG_INTCON     0x0000
+#define REG_INTSTAT    0x0020
+#define REG_IFS_OFFSET 0x0040
+#define REG_IEC_OFFSET 0x00C0
+#define REG_IPC_OFFSET 0x0140
+#define REG_OFF_OFFSET 0x0540
+
+#define MAJPRI_MASK    0x07
+#define SUBPRI_MASK    0x03
+#define PRIORITY_MASK  0x1F
+
+#define PIC32_INT_PRI(pri, subpri)                             \
+       ((((pri) & MAJPRI_MASK) << 2) | ((subpri) & SUBPRI_MASK))
+
+struct evic_chip_data {
+       u32 irq_types[NR_IRQS];
+       u32 ext_irqs[8];
+};
+
+static struct irq_domain *evic_irq_domain;
+static void __iomem *evic_base;
+
+asmlinkage void __weak plat_irq_dispatch(void)
+{
+       unsigned int irq, hwirq;
+
+       hwirq = readl(evic_base + REG_INTSTAT) & 0xFF;
+       irq = irq_linear_revmap(evic_irq_domain, hwirq);
+       do_IRQ(irq);
+}
+
+static struct evic_chip_data *irqd_to_priv(struct irq_data *data)
+{
+       return (struct evic_chip_data *)data->domain->host_data;
+}
+
+static int pic32_set_ext_polarity(int bit, u32 type)
+{
+       /*
+        * External interrupts can be either edge rising or edge falling,
+        * but not both.
+        */
+       switch (type) {
+       case IRQ_TYPE_EDGE_RISING:
+               writel(BIT(bit), evic_base + PIC32_SET(REG_INTCON));
+               break;
+       case IRQ_TYPE_EDGE_FALLING:
+               writel(BIT(bit), evic_base + PIC32_CLR(REG_INTCON));
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int pic32_set_type_edge(struct irq_data *data,
+                              unsigned int flow_type)
+{
+       struct evic_chip_data *priv = irqd_to_priv(data);
+       int ret;
+       int i;
+
+       if (!(flow_type & IRQ_TYPE_EDGE_BOTH))
+               return -EBADR;
+
+       /* set polarity for external interrupts only */
+       for (i = 0; i < ARRAY_SIZE(priv->ext_irqs); i++) {
+               if (priv->ext_irqs[i] == data->hwirq) {
+                       ret = pic32_set_ext_polarity(i + 1, flow_type);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       irqd_set_trigger_type(data, flow_type);
+
+       return IRQ_SET_MASK_OK;
+}
+
+static void pic32_bind_evic_interrupt(int irq, int set)
+{
+       writel(set, evic_base + REG_OFF_OFFSET + irq * 4);
+}
+
+static void pic32_set_irq_priority(int irq, int priority)
+{
+       u32 reg, shift;
+
+       reg = irq / 4;
+       shift = (irq % 4) * 8;
+
+       writel(PRIORITY_MASK << shift,
+               evic_base + PIC32_CLR(REG_IPC_OFFSET + reg * 0x10));
+       writel(priority << shift,
+               evic_base + PIC32_SET(REG_IPC_OFFSET + reg * 0x10));
+}
+
+#define IRQ_REG_MASK(_hwirq, _reg, _mask)                     \
+       do {                                                   \
+               _reg = _hwirq / 32;                            \
+               _mask = 1 << (_hwirq % 32);                    \
+       } while (0)
+
+static int pic32_irq_domain_map(struct irq_domain *d, unsigned int virq,
+                               irq_hw_number_t hw)
+{
+       struct evic_chip_data *priv = d->host_data;
+       struct irq_data *data;
+       int ret;
+       u32 iecclr, ifsclr;
+       u32 reg, mask;
+
+       ret = irq_map_generic_chip(d, virq, hw);
+       if (ret)
+               return ret;
+
+       /*
+        * Piggyback on xlate function to move to an alternate chip as necessary
+        * at time of mapping instead of allowing the flow handler/chip to be
+        * changed later. This requires all interrupts to be configured through
+        * DT.
+        */
+       if (priv->irq_types[hw] & IRQ_TYPE_SENSE_MASK) {
+               data = irq_domain_get_irq_data(d, virq);
+               irqd_set_trigger_type(data, priv->irq_types[hw]);
+               irq_setup_alt_chip(data, priv->irq_types[hw]);
+       }
+
+       IRQ_REG_MASK(hw, reg, mask);
+
+       iecclr = PIC32_CLR(REG_IEC_OFFSET + reg * 0x10);
+       ifsclr = PIC32_CLR(REG_IFS_OFFSET + reg * 0x10);
+
+       /* mask and clear flag */
+       writel(mask, evic_base + iecclr);
+       writel(mask, evic_base + ifsclr);
+
+       /* default priority is required */
+       pic32_set_irq_priority(hw, PIC32_INT_PRI(2, 0));
+
+       return ret;
+}
+
+int pic32_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
+                          const u32 *intspec, unsigned int intsize,
+                          irq_hw_number_t *out_hwirq, unsigned int *out_type)
+{
+       struct evic_chip_data *priv = d->host_data;
+
+       if (WARN_ON(intsize < 2))
+               return -EINVAL;
+
+       if (WARN_ON(intspec[0] >= NR_IRQS))
+               return -EINVAL;
+
+       *out_hwirq = intspec[0];
+       *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
+
+       priv->irq_types[intspec[0]] = intspec[1] & IRQ_TYPE_SENSE_MASK;
+
+       return 0;
+}
+
+static const struct irq_domain_ops pic32_irq_domain_ops = {
+       .map    = pic32_irq_domain_map,
+       .xlate  = pic32_irq_domain_xlate,
+};
+
+static void __init pic32_ext_irq_of_init(struct irq_domain *domain)
+{
+       struct device_node *node = irq_domain_get_of_node(domain);
+       struct evic_chip_data *priv = domain->host_data;
+       struct property *prop;
+       const __le32 *p;
+       u32 hwirq;
+       int i = 0;
+       const char *pname = "microchip,external-irqs";
+
+       of_property_for_each_u32(node, pname, prop, p, hwirq) {
+               if (i >= ARRAY_SIZE(priv->ext_irqs)) {
+                       pr_warn("More than %d external irq, skip rest\n",
+                               ARRAY_SIZE(priv->ext_irqs));
+                       break;
+               }
+
+               priv->ext_irqs[i] = hwirq;
+               i++;
+       }
+}
+
+static int __init pic32_of_init(struct device_node *node,
+                               struct device_node *parent)
+{
+       struct irq_chip_generic *gc;
+       struct evic_chip_data *priv;
+       unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+       int nchips, ret;
+       int i;
+
+       nchips = DIV_ROUND_UP(NR_IRQS, 32);
+
+       evic_base = of_iomap(node, 0);
+       if (!evic_base)
+               return -ENOMEM;
+
+       priv = kcalloc(nchips, sizeof(*priv), GFP_KERNEL);
+       if (!priv) {
+               ret = -ENOMEM;
+               goto err_iounmap;
+       }
+
+       evic_irq_domain = irq_domain_add_linear(node, nchips * 32,
+                                               &pic32_irq_domain_ops,
+                                               priv);
+       if (!evic_irq_domain) {
+               ret = -ENOMEM;
+               goto err_free_priv;
+       }
+
+       /*
+        * The PIC32 EVIC has a linear list of irqs and the type of each
+        * irq is determined by the hardware peripheral the EVIC is arbitrating.
+        * These irq types are defined in the datasheet as "persistent" and
+        * "non-persistent" which are mapped here to level and edge
+        * respectively. To manage the different flow handler requirements of
+        * each irq type, different chip_types are used.
+        */
+       ret = irq_alloc_domain_generic_chips(evic_irq_domain, 32, 2,
+                                            "evic-level", handle_level_irq,
+                                            clr, 0, 0);
+       if (ret)
+               goto err_domain_remove;
+
+       board_bind_eic_interrupt = &pic32_bind_evic_interrupt;
+
+       for (i = 0; i < nchips; i++) {
+               u32 ifsclr = PIC32_CLR(REG_IFS_OFFSET + (i * 0x10));
+               u32 iec = REG_IEC_OFFSET + (i * 0x10);
+
+               gc = irq_get_domain_generic_chip(evic_irq_domain, i * 32);
+
+               gc->reg_base = evic_base;
+               gc->unused = 0;
+
+               /*
+                * Level/persistent interrupts have a special requirement that
+                * the condition generating the interrupt be cleared before the
+                * interrupt flag (ifs) can be cleared. chip.irq_eoi is used to
+                * complete the interrupt with an ack.
+                */
+               gc->chip_types[0].type                  = IRQ_TYPE_LEVEL_MASK;
+               gc->chip_types[0].handler               = handle_fasteoi_irq;
+               gc->chip_types[0].regs.ack              = ifsclr;
+               gc->chip_types[0].regs.mask             = iec;
+               gc->chip_types[0].chip.name             = "evic-level";
+               gc->chip_types[0].chip.irq_eoi          = irq_gc_ack_set_bit;
+               gc->chip_types[0].chip.irq_mask         = irq_gc_mask_clr_bit;
+               gc->chip_types[0].chip.irq_unmask       = irq_gc_mask_set_bit;
+               gc->chip_types[0].chip.flags            = IRQCHIP_SKIP_SET_WAKE;
+
+               /* Edge interrupts */
+               gc->chip_types[1].type                  = IRQ_TYPE_EDGE_BOTH;
+               gc->chip_types[1].handler               = handle_edge_irq;
+               gc->chip_types[1].regs.ack              = ifsclr;
+               gc->chip_types[1].regs.mask             = iec;
+               gc->chip_types[1].chip.name             = "evic-edge";
+               gc->chip_types[1].chip.irq_ack          = irq_gc_ack_set_bit;
+               gc->chip_types[1].chip.irq_mask         = irq_gc_mask_clr_bit;
+               gc->chip_types[1].chip.irq_unmask       = irq_gc_mask_set_bit;
+               gc->chip_types[1].chip.irq_set_type     = pic32_set_type_edge;
+               gc->chip_types[1].chip.flags            = IRQCHIP_SKIP_SET_WAKE;
+
+               gc->private = &priv[i];
+       }
+
+       irq_set_default_host(evic_irq_domain);
+
+       /*
+        * External interrupts have software configurable edge polarity. These
+        * interrupts are defined in DT allowing polarity to be configured only
+        * for these interrupts when requested.
+        */
+       pic32_ext_irq_of_init(evic_irq_domain);
+
+       return 0;
+
+err_domain_remove:
+       irq_domain_remove(evic_irq_domain);
+
+err_free_priv:
+       kfree(priv);
+
+err_iounmap:
+       iounmap(evic_base);
+
+       return ret;
+}
+
+IRQCHIP_DECLARE(pic32_evic, "microchip,pic32mzda-evic", pic32_of_init);
index c71914e8f596c3700d8c5a4da2448b32acf8c41f..5dc5a760c7236971a81dfcf70cd78542abadf8f4 100644 (file)
@@ -605,7 +605,7 @@ err:
        return ERR_PTR(ret);
 }
 
-static struct s3c_irq_data init_eint[32] = {
+static struct s3c_irq_data __maybe_unused init_eint[32] = {
        { .type = S3C_IRQTYPE_NONE, }, /* reserved */
        { .type = S3C_IRQTYPE_NONE, }, /* reserved */
        { .type = S3C_IRQTYPE_NONE, }, /* reserved */
index 0704362f4c824c6ba2b87e42353f0481bcff36e5..376b28074e0d8937c43036b647de218ef45c14b7 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/of_irq.h>
 
 #include <asm/exception.h>
-#include <asm/mach/irq.h>
 
 #define SUN4I_IRQ_VECTOR_REG           0x00
 #define SUN4I_IRQ_PROTECTION_REG       0x08
index 2175225af74214c925ae1202a5c3771bf0608b7c..947d5c978b8f7559708d2b0337e84eec52f8231a 100644 (file)
@@ -1572,7 +1572,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
 #endif
                return;
        }
-       port->flags |= ASYNC_CLOSING;
+       info->closing = 1;
 
        tty->closing = 1;
        /*
@@ -1603,6 +1603,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
        info->ncarrier = 0;
 
        tty_port_close_end(port, tty);
+       info->closing = 0;
 #ifdef ISDN_DEBUG_MODEM_OPEN
        printk(KERN_DEBUG "isdn_tty_close normal exit\n");
 #endif
@@ -2236,7 +2237,7 @@ isdn_tty_at_cout(char *msg, modem_info *info)
        l = strlen(msg);
 
        spin_lock_irqsave(&info->readlock, flags);
-       if (port->flags & ASYNC_CLOSING) {
+       if (info->closing) {
                spin_unlock_irqrestore(&info->readlock, flags);
                return;
        }
@@ -2386,13 +2387,12 @@ isdn_tty_modem_result(int code, modem_info *info)
        case RESULT_NO_CARRIER:
 #ifdef ISDN_DEBUG_MODEM_HUP
                printk(KERN_DEBUG "modem_result: NO CARRIER %d %d\n",
-                      (info->port.flags & ASYNC_CLOSING),
-                      (!info->port.tty));
+                      info->closing, !info->port.tty);
 #endif
                m->mdmreg[REG_RINGCNT] = 0;
                del_timer(&info->nc_timer);
                info->ncarrier = 0;
-               if ((info->port.flags & ASYNC_CLOSING) || (!info->port.tty))
+               if (info->closing || !info->port.tty)
                        return;
 
 #ifdef CONFIG_ISDN_AUDIO
@@ -2525,7 +2525,7 @@ isdn_tty_modem_result(int code, modem_info *info)
                }
        }
        if (code == RESULT_NO_CARRIER) {
-               if ((info->port.flags & ASYNC_CLOSING) || (!info->port.tty))
+               if (info->closing || (!info->port.tty))
                        return;
 
                if (info->port.flags & ASYNC_CHECK_CD)
index 7f940c24a16b0d00163051acfda665b692733b5d..1034696b77cfba2abc4fa0417c966cd1b865b78d 100644 (file)
@@ -568,6 +568,18 @@ config LEDS_SEAD3
          This driver can also be built as a module. If so the module
          will be called leds-sead3.
 
+config LEDS_SN3218
+       tristate "LED support for Si-En SN3218 I2C chip"
+       depends on LEDS_CLASS && I2C
+       depends on OF
+       select REGMAP_I2C
+       help
+         This option enables support for the Si-EN SN3218 LED driver
+         connected through I2C. Say Y to enable support for the SN3218 LED.
+
+         This driver can also be built as a module. If so the module
+         will be called leds-sn3218.
+
 comment "LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)"
 
 config LEDS_BLINKM
index e9d53092765d8ca8ae43e970dc63ec1c23e96b78..89c9b6ff65c3632d99db298ac30fb2927e3145c7 100644 (file)
@@ -66,6 +66,7 @@ obj-$(CONFIG_LEDS_MENF21BMC)          += leds-menf21bmc.o
 obj-$(CONFIG_LEDS_KTD2692)             += leds-ktd2692.o
 obj-$(CONFIG_LEDS_POWERNV)             += leds-powernv.o
 obj-$(CONFIG_LEDS_SEAD3)               += leds-sead3.o
+obj-$(CONFIG_LEDS_SN3218)              += leds-sn3218.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)          += leds-dac124s085.o
index 14139c337312c574c82a98bee4c163834cad6bc5..aa84e5b375931825e71b23f6e2d1eefc7c3bea17 100644 (file)
@@ -245,6 +245,8 @@ void led_classdev_unregister(struct led_classdev *led_cdev)
        up_write(&led_cdev->trigger_lock);
 #endif
 
+       led_cdev->flags |= LED_UNREGISTERING;
+
        /* Stop blinking */
        led_stop_software_blink(led_cdev);
 
index 19e1e60dfaa354585ca01e2ffdc2a47f29c49c3b..ad684b6d0b724bc4b249102879c12202f6e6be50 100644 (file)
@@ -98,7 +98,10 @@ static void set_brightness_delayed(struct work_struct *ws)
                                                led_cdev->delayed_set_value);
        else
                ret = -ENOTSUPP;
-       if (ret < 0)
+       if (ret < 0 &&
+           /* LED HW might have been unplugged, therefore don't warn */
+           !(ret == -ENODEV && (led_cdev->flags & LED_UNREGISTERING) &&
+           (led_cdev->flags & LED_HW_PLUGGABLE)))
                dev_err(led_cdev->dev,
                        "Setting an LED's brightness failed (%d)\n", ret);
 }
index 7bc53280dbfdd91d0d2efa51bb63986a6bbf336b..5883dede34238862c8cbc8871240aa93f93ab780 100644 (file)
@@ -104,8 +104,8 @@ static int create_gpio_led(const struct gpio_led *template,
                        return ret;
 
                led_dat->gpiod = gpio_to_desc(template->gpio);
-               if (IS_ERR(led_dat->gpiod))
-                       return PTR_ERR(led_dat->gpiod);
+               if (!led_dat->gpiod)
+                       return -EINVAL;
        }
 
        led_dat->cdev.name = template->name;
index 6c758aea1bbdaa1fb77edf71e9c611e4e131ee0e..be60c181222a562f564eb839d470e85ae932f9d9 100644 (file)
@@ -199,8 +199,11 @@ static int lp3944_led_set(struct lp3944_led_data *led, u8 status)
        if (status > LP3944_LED_STATUS_DIM1)
                return -EINVAL;
 
-       /* invert only 0 and 1, leave unchanged the other values,
-        * remember we are abusing status to set blink patterns
+       /*
+        * Invert status only when it's < 2 (i.e. 0 or 1) which means it's
+        * controlling the on/off state directly.
+        * When, instead, status is >= 2 don't invert it because it would mean
+        * to mess with the hardware blinking mode.
         */
        if (led->type == LP3944_LED_TYPE_LED_INVERTED && status < 2)
                status = 1 - status;
diff --git a/drivers/leds/leds-sn3218.c b/drivers/leds/leds-sn3218.c
new file mode 100644 (file)
index 0000000..dcc2581
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * Si-En SN3218 18 Channel LED Driver
+ *
+ * Copyright (C) 2016 Stefan Wahren <stefan.wahren@i2se.com>
+ *
+ * Based on leds-pca963x.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * Datasheet: http://www.si-en.com/uploadpdf/s2011517171720.pdf
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define SN3218_MODE            0x00
+#define SN3218_PWM_1           0x01
+#define SN3218_PWM_2           0x02
+#define SN3218_PWM_3           0x03
+#define SN3218_PWM_4           0x04
+#define SN3218_PWM_5           0x05
+#define SN3218_PWM_6           0x06
+#define SN3218_PWM_7           0x07
+#define SN3218_PWM_8           0x08
+#define SN3218_PWM_9           0x09
+#define SN3218_PWM_10          0x0a
+#define SN3218_PWM_11          0x0b
+#define SN3218_PWM_12          0x0c
+#define SN3218_PWM_13          0x0d
+#define SN3218_PWM_14          0x0e
+#define SN3218_PWM_15          0x0f
+#define SN3218_PWM_16          0x10
+#define SN3218_PWM_17          0x11
+#define SN3218_PWM_18          0x12
+#define SN3218_LED_1_6         0x13
+#define SN3218_LED_7_12                0x14
+#define SN3218_LED_13_18       0x15
+#define SN3218_UPDATE          0x16    /* applies to reg 0x01 .. 0x15 */
+#define SN3218_RESET           0x17
+
+#define SN3218_LED_MASK                0x3f
+#define SN3218_LED_ON          0x01
+#define SN3218_LED_OFF         0x00
+
+#define SN3218_MODE_SHUTDOWN   0x00
+#define SN3218_MODE_NORMAL     0x01
+
+#define NUM_LEDS               18
+
+struct sn3218_led;
+
+/**
+ * struct sn3218 -
+ * @client - Pointer to the I2C client
+ * @leds - Pointer to the individual LEDs
+ * @num_leds - Actual number of LEDs
+**/
+struct sn3218 {
+       struct i2c_client *client;
+       struct regmap *regmap;
+       struct sn3218_led *leds;
+       int num_leds;
+};
+
+/**
+ * struct sn3218_led -
+ * @chip - Pointer to the container
+ * @led_cdev - led class device pointer
+ * @led_num - LED index ( 0 .. 17 )
+**/
+struct sn3218_led {
+       struct sn3218 *chip;
+       struct led_classdev led_cdev;
+       int led_num;
+};
+
+static int sn3218_led_set(struct led_classdev *led_cdev,
+                         enum led_brightness brightness)
+{
+       struct sn3218_led *led =
+                       container_of(led_cdev, struct sn3218_led, led_cdev);
+       struct regmap *regmap = led->chip->regmap;
+       u8 bank = led->led_num / 6;
+       u8 mask = 0x1 << (led->led_num % 6);
+       u8 val;
+       int ret;
+
+       if (brightness == LED_OFF)
+               val = 0;
+       else
+               val = mask;
+
+       ret = regmap_update_bits(regmap, SN3218_LED_1_6 + bank, mask, val);
+       if (ret < 0)
+               return ret;
+
+       if (brightness > LED_OFF) {
+               ret = regmap_write(regmap, SN3218_PWM_1 + led->led_num,
+                                  brightness);
+               if (ret < 0)
+                       return ret;
+       }
+
+       ret = regmap_write(regmap, SN3218_UPDATE, 0xff);
+
+       return ret;
+}
+
+static void sn3218_led_init(struct sn3218 *sn3218, struct device_node *node,
+                           u32 reg)
+{
+       struct sn3218_led *leds = sn3218->leds;
+       struct led_classdev *cdev = &leds[reg].led_cdev;
+
+       leds[reg].led_num = reg;
+       leds[reg].chip = sn3218;
+
+       if (of_property_read_string(node, "label", &cdev->name))
+               cdev->name = node->name;
+
+       of_property_read_string(node, "linux,default-trigger",
+                               &cdev->default_trigger);
+
+       cdev->brightness_set_blocking = sn3218_led_set;
+}
+
+static const struct reg_default sn3218_reg_defs[] = {
+       { SN3218_MODE, 0x00},
+       { SN3218_PWM_1, 0x00},
+       { SN3218_PWM_2, 0x00},
+       { SN3218_PWM_3, 0x00},
+       { SN3218_PWM_4, 0x00},
+       { SN3218_PWM_5, 0x00},
+       { SN3218_PWM_6, 0x00},
+       { SN3218_PWM_7, 0x00},
+       { SN3218_PWM_8, 0x00},
+       { SN3218_PWM_9, 0x00},
+       { SN3218_PWM_10, 0x00},
+       { SN3218_PWM_11, 0x00},
+       { SN3218_PWM_12, 0x00},
+       { SN3218_PWM_13, 0x00},
+       { SN3218_PWM_14, 0x00},
+       { SN3218_PWM_15, 0x00},
+       { SN3218_PWM_16, 0x00},
+       { SN3218_PWM_17, 0x00},
+       { SN3218_PWM_18, 0x00},
+       { SN3218_LED_1_6, 0x00},
+       { SN3218_LED_7_12, 0x00},
+       { SN3218_LED_13_18, 0x00},
+       { SN3218_UPDATE, 0x00},
+       { SN3218_RESET, 0x00},
+};
+
+static const struct regmap_config sn3218_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+
+       .max_register = SN3218_RESET,
+       .reg_defaults = sn3218_reg_defs,
+       .num_reg_defaults = ARRAY_SIZE(sn3218_reg_defs),
+       .cache_type = REGCACHE_RBTREE,
+};
+
+static int sn3218_init(struct i2c_client *client, struct sn3218 *sn3218)
+{
+       struct device_node *np = client->dev.of_node, *child;
+       struct sn3218_led *leds;
+       int ret, count;
+
+       count = of_get_child_count(np);
+       if (!count)
+               return -ENODEV;
+
+       if (count > NUM_LEDS) {
+               dev_err(&client->dev, "Invalid LED count %d\n", count);
+               return -EINVAL;
+       }
+
+       leds = devm_kzalloc(&client->dev, count * sizeof(*leds), GFP_KERNEL);
+       if (!leds)
+               return -ENOMEM;
+
+       sn3218->leds = leds;
+       sn3218->num_leds = count;
+       sn3218->client = client;
+
+       sn3218->regmap = devm_regmap_init_i2c(client, &sn3218_regmap_config);
+       if (IS_ERR(sn3218->regmap)) {
+               ret = PTR_ERR(sn3218->regmap);
+               dev_err(&client->dev, "Failed to allocate register map: %d\n",
+                       ret);
+               return ret;
+       }
+
+       for_each_child_of_node(np, child) {
+               u32 reg;
+
+               ret = of_property_read_u32(child, "reg", &reg);
+               if (ret)
+                       goto fail;
+
+               if (reg >= count) {
+                       dev_err(&client->dev, "Invalid LED (%u >= %d)\n", reg,
+                               count);
+                       ret = -EINVAL;
+                       goto fail;
+               }
+
+               sn3218_led_init(sn3218, child, reg);
+       }
+
+       return 0;
+
+fail:
+       of_node_put(child);
+       return ret;
+}
+
+static int sn3218_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
+{
+       struct sn3218 *sn3218;
+       struct sn3218_led *leds;
+       struct device *dev = &client->dev;
+       int i, ret;
+
+       sn3218 = devm_kzalloc(dev, sizeof(*sn3218), GFP_KERNEL);
+       if (!sn3218)
+               return -ENOMEM;
+
+       ret = sn3218_init(client, sn3218);
+       if (ret)
+               return ret;
+
+       i2c_set_clientdata(client, sn3218);
+       leds = sn3218->leds;
+
+       /*
+        * Since the chip is write-only we need to reset him into
+        * a defined state (all LEDs off).
+        */
+       ret = regmap_write(sn3218->regmap, SN3218_RESET, 0xff);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < sn3218->num_leds; i++) {
+               ret = devm_led_classdev_register(dev, &leds[i].led_cdev);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return regmap_write(sn3218->regmap, SN3218_MODE, SN3218_MODE_NORMAL);
+}
+
+static int sn3218_remove(struct i2c_client *client)
+{
+       struct sn3218 *sn3218 = i2c_get_clientdata(client);
+
+       regmap_write(sn3218->regmap, SN3218_MODE, SN3218_MODE_SHUTDOWN);
+
+       return 0;
+}
+
+static void sn3218_shutdown(struct i2c_client *client)
+{
+       struct sn3218 *sn3218 = i2c_get_clientdata(client);
+
+       regmap_write(sn3218->regmap, SN3218_MODE, SN3218_MODE_SHUTDOWN);
+}
+
+static const struct i2c_device_id sn3218_id[] = {
+       { "sn3218", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, sn3218_id);
+
+static const struct of_device_id of_sn3218_match[] = {
+       { .compatible = "si-en,sn3218", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_sn3218_match);
+
+static struct i2c_driver sn3218_driver = {
+       .driver = {
+               .name   = "leds-sn3218",
+               .of_match_table = of_match_ptr(of_sn3218_match),
+       },
+       .probe  = sn3218_probe,
+       .remove = sn3218_remove,
+       .shutdown = sn3218_shutdown,
+       .id_table = sn3218_id,
+};
+
+module_i2c_driver(sn3218_driver);
+
+MODULE_DESCRIPTION("Si-En SN3218 LED Driver");
+MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
+MODULE_LICENSE("GPL v2");
index 4f12c6f01fe7d384d086321a09d73c1fb8fdd4a3..b6819f0fc6083266afd1d9fbea5c7e42c41b13fc 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/macio.h>
 #include <asm/pmac_feature.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 
 #undef DEBUG
 
index 546d05f4358a7c452ee80366b3ae782e685fd7ef..b2bbe8659beda5ad8182b5f4ff7bbefeda970459 100644 (file)
@@ -81,6 +81,7 @@ config STI_MBOX
 config MAILBOX_TEST
        tristate "Mailbox Test Client"
        depends on OF
+       depends on HAS_IOMEM
        help
          Test client to help with testing new Controller driver
          implementations.
index 45d85aea9955404d0c4567f33863e0e3f500674f..8f779a1ec99c4b248c9721fceef19b579432539b 100644 (file)
@@ -81,16 +81,10 @@ static struct mbox_controller pcc_mbox_ctrl = {};
  */
 static struct mbox_chan *get_pcc_channel(int id)
 {
-       struct mbox_chan *pcc_chan;
-
        if (id < 0 || id > pcc_mbox_ctrl.num_chans)
                return ERR_PTR(-ENOENT);
 
-       pcc_chan = (struct mbox_chan *)
-               (unsigned long) pcc_mbox_channels +
-               (id * sizeof(*pcc_chan));
-
-       return pcc_chan;
+       return &pcc_mbox_channels[id];
 }
 
 /**
index 4f22e919787aba1bcab7b8f6799a3c22a6b7ce28..d80cce499a56e595d1f2679170124ca5fc620315 100644 (file)
@@ -210,10 +210,6 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait)
        struct block_device *bdev;
        struct mddev *mddev = bitmap->mddev;
        struct bitmap_storage *store = &bitmap->storage;
-       int node_offset = 0;
-
-       if (mddev_is_clustered(bitmap->mddev))
-               node_offset = bitmap->cluster_slot * store->file_pages;
 
        while ((rdev = next_active_rdev(rdev, mddev)) != NULL) {
                int size = PAGE_SIZE;
index 5780accffa3059dfa979ef8ff81752bfabd46452..2238d6f484fe97ef03577c1fcae0da06d0db472e 100644 (file)
@@ -2771,7 +2771,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        ti->split_discard_bios = false;
 
        cache->features = ca->features;
-       ti->per_bio_data_size = get_per_bio_data_size(cache);
+       ti->per_io_data_size = get_per_bio_data_size(cache);
 
        cache->callbacks.congested_fn = cache_is_congested;
        dm_table_add_target_callbacks(ti->table, &cache->callbacks);
index 3147c8d09ea84a0a76d0fd7ead35931a89e29aed..4f3cb355494469e4abfc259a0042cafd187e36cc 100644 (file)
@@ -28,6 +28,7 @@
 #include <crypto/hash.h>
 #include <crypto/md5.h>
 #include <crypto/algapi.h>
+#include <crypto/skcipher.h>
 
 #include <linux/device-mapper.h>
 
@@ -44,7 +45,7 @@ struct convert_context {
        struct bvec_iter iter_out;
        sector_t cc_sector;
        atomic_t cc_pending;
-       struct ablkcipher_request *req;
+       struct skcipher_request *req;
 };
 
 /*
@@ -86,7 +87,7 @@ struct crypt_iv_operations {
 };
 
 struct iv_essiv_private {
-       struct crypto_hash *hash_tfm;
+       struct crypto_ahash *hash_tfm;
        u8 *salt;
 };
 
@@ -153,13 +154,13 @@ struct crypt_config {
 
        /* ESSIV: struct crypto_cipher *essiv_tfm */
        void *iv_private;
-       struct crypto_ablkcipher **tfms;
+       struct crypto_skcipher **tfms;
        unsigned tfms_count;
 
        /*
         * Layout of each crypto request:
         *
-        *   struct ablkcipher_request
+        *   struct skcipher_request
         *      context
         *      padding
         *   struct dm_crypt_request
@@ -189,7 +190,7 @@ static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq);
 /*
  * Use this to access cipher attributes that are the same for each CPU.
  */
-static struct crypto_ablkcipher *any_tfm(struct crypt_config *cc)
+static struct crypto_skcipher *any_tfm(struct crypt_config *cc)
 {
        return cc->tfms[0];
 }
@@ -263,23 +264,25 @@ static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv,
 static int crypt_iv_essiv_init(struct crypt_config *cc)
 {
        struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, essiv->hash_tfm);
        struct scatterlist sg;
        struct crypto_cipher *essiv_tfm;
        int err;
 
        sg_init_one(&sg, cc->key, cc->key_size);
-       desc.tfm = essiv->hash_tfm;
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       ahash_request_set_tfm(req, essiv->hash_tfm);
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+       ahash_request_set_crypt(req, &sg, essiv->salt, cc->key_size);
 
-       err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
        if (err)
                return err;
 
        essiv_tfm = cc->iv_private;
 
        err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
-                           crypto_hash_digestsize(essiv->hash_tfm));
+                           crypto_ahash_digestsize(essiv->hash_tfm));
        if (err)
                return err;
 
@@ -290,7 +293,7 @@ static int crypt_iv_essiv_init(struct crypt_config *cc)
 static int crypt_iv_essiv_wipe(struct crypt_config *cc)
 {
        struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
-       unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
+       unsigned salt_size = crypto_ahash_digestsize(essiv->hash_tfm);
        struct crypto_cipher *essiv_tfm;
        int r, err = 0;
 
@@ -320,7 +323,7 @@ static struct crypto_cipher *setup_essiv_cpu(struct crypt_config *cc,
        }
 
        if (crypto_cipher_blocksize(essiv_tfm) !=
-           crypto_ablkcipher_ivsize(any_tfm(cc))) {
+           crypto_skcipher_ivsize(any_tfm(cc))) {
                ti->error = "Block size of ESSIV cipher does "
                            "not match IV size of block cipher";
                crypto_free_cipher(essiv_tfm);
@@ -342,7 +345,7 @@ static void crypt_iv_essiv_dtr(struct crypt_config *cc)
        struct crypto_cipher *essiv_tfm;
        struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
 
-       crypto_free_hash(essiv->hash_tfm);
+       crypto_free_ahash(essiv->hash_tfm);
        essiv->hash_tfm = NULL;
 
        kzfree(essiv->salt);
@@ -360,7 +363,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
                              const char *opts)
 {
        struct crypto_cipher *essiv_tfm = NULL;
-       struct crypto_hash *hash_tfm = NULL;
+       struct crypto_ahash *hash_tfm = NULL;
        u8 *salt = NULL;
        int err;
 
@@ -370,14 +373,14 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
        }
 
        /* Allocate hash algorithm */
-       hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
+       hash_tfm = crypto_alloc_ahash(opts, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(hash_tfm)) {
                ti->error = "Error initializing ESSIV hash";
                err = PTR_ERR(hash_tfm);
                goto bad;
        }
 
-       salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
+       salt = kzalloc(crypto_ahash_digestsize(hash_tfm), GFP_KERNEL);
        if (!salt) {
                ti->error = "Error kmallocing salt storage in ESSIV";
                err = -ENOMEM;
@@ -388,7 +391,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
        cc->iv_gen_private.essiv.hash_tfm = hash_tfm;
 
        essiv_tfm = setup_essiv_cpu(cc, ti, salt,
-                               crypto_hash_digestsize(hash_tfm));
+                               crypto_ahash_digestsize(hash_tfm));
        if (IS_ERR(essiv_tfm)) {
                crypt_iv_essiv_dtr(cc);
                return PTR_ERR(essiv_tfm);
@@ -399,7 +402,7 @@ static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
 
 bad:
        if (hash_tfm && !IS_ERR(hash_tfm))
-               crypto_free_hash(hash_tfm);
+               crypto_free_ahash(hash_tfm);
        kfree(salt);
        return err;
 }
@@ -419,7 +422,7 @@ static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv,
 static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti,
                              const char *opts)
 {
-       unsigned bs = crypto_ablkcipher_blocksize(any_tfm(cc));
+       unsigned bs = crypto_skcipher_blocksize(any_tfm(cc));
        int log = ilog2(bs);
 
        /* we need to calculate how far we must shift the sector count
@@ -816,27 +819,27 @@ static void crypt_convert_init(struct crypt_config *cc,
 }
 
 static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc,
-                                            struct ablkcipher_request *req)
+                                            struct skcipher_request *req)
 {
        return (struct dm_crypt_request *)((char *)req + cc->dmreq_start);
 }
 
-static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc,
+static struct skcipher_request *req_of_dmreq(struct crypt_config *cc,
                                               struct dm_crypt_request *dmreq)
 {
-       return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start);
+       return (struct skcipher_request *)((char *)dmreq - cc->dmreq_start);
 }
 
 static u8 *iv_of_dmreq(struct crypt_config *cc,
                       struct dm_crypt_request *dmreq)
 {
        return (u8 *)ALIGN((unsigned long)(dmreq + 1),
-               crypto_ablkcipher_alignmask(any_tfm(cc)) + 1);
+               crypto_skcipher_alignmask(any_tfm(cc)) + 1);
 }
 
 static int crypt_convert_block(struct crypt_config *cc,
                               struct convert_context *ctx,
-                              struct ablkcipher_request *req)
+                              struct skcipher_request *req)
 {
        struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
        struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
@@ -866,13 +869,13 @@ static int crypt_convert_block(struct crypt_config *cc,
                        return r;
        }
 
-       ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
-                                    1 << SECTOR_SHIFT, iv);
+       skcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
+                                  1 << SECTOR_SHIFT, iv);
 
        if (bio_data_dir(ctx->bio_in) == WRITE)
-               r = crypto_ablkcipher_encrypt(req);
+               r = crypto_skcipher_encrypt(req);
        else
-               r = crypto_ablkcipher_decrypt(req);
+               r = crypto_skcipher_decrypt(req);
 
        if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
                r = cc->iv_gen_ops->post(cc, iv, dmreq);
@@ -891,23 +894,23 @@ static void crypt_alloc_req(struct crypt_config *cc,
        if (!ctx->req)
                ctx->req = mempool_alloc(cc->req_pool, GFP_NOIO);
 
-       ablkcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
+       skcipher_request_set_tfm(ctx->req, cc->tfms[key_index]);
 
        /*
         * Use REQ_MAY_BACKLOG so a cipher driver internally backlogs
         * requests if driver request queue is full.
         */
-       ablkcipher_request_set_callback(ctx->req,
+       skcipher_request_set_callback(ctx->req,
            CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
            kcryptd_async_done, dmreq_of_req(cc, ctx->req));
 }
 
 static void crypt_free_req(struct crypt_config *cc,
-                          struct ablkcipher_request *req, struct bio *base_bio)
+                          struct skcipher_request *req, struct bio *base_bio)
 {
        struct dm_crypt_io *io = dm_per_bio_data(base_bio, cc->per_bio_data_size);
 
-       if ((struct ablkcipher_request *)(io + 1) != req)
+       if ((struct skcipher_request *)(io + 1) != req)
                mempool_free(req, cc->req_pool);
 }
 
@@ -1437,7 +1440,7 @@ static void crypt_free_tfms(struct crypt_config *cc)
 
        for (i = 0; i < cc->tfms_count; i++)
                if (cc->tfms[i] && !IS_ERR(cc->tfms[i])) {
-                       crypto_free_ablkcipher(cc->tfms[i]);
+                       crypto_free_skcipher(cc->tfms[i]);
                        cc->tfms[i] = NULL;
                }
 
@@ -1450,13 +1453,13 @@ static int crypt_alloc_tfms(struct crypt_config *cc, char *ciphermode)
        unsigned i;
        int err;
 
-       cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_ablkcipher *),
+       cc->tfms = kmalloc(cc->tfms_count * sizeof(struct crypto_skcipher *),
                           GFP_KERNEL);
        if (!cc->tfms)
                return -ENOMEM;
 
        for (i = 0; i < cc->tfms_count; i++) {
-               cc->tfms[i] = crypto_alloc_ablkcipher(ciphermode, 0, 0);
+               cc->tfms[i] = crypto_alloc_skcipher(ciphermode, 0, 0);
                if (IS_ERR(cc->tfms[i])) {
                        err = PTR_ERR(cc->tfms[i]);
                        crypt_free_tfms(cc);
@@ -1476,9 +1479,9 @@ static int crypt_setkey_allcpus(struct crypt_config *cc)
        subkey_size = (cc->key_size - cc->key_extra_size) >> ilog2(cc->tfms_count);
 
        for (i = 0; i < cc->tfms_count; i++) {
-               r = crypto_ablkcipher_setkey(cc->tfms[i],
-                                            cc->key + (i * subkey_size),
-                                            subkey_size);
+               r = crypto_skcipher_setkey(cc->tfms[i],
+                                          cc->key + (i * subkey_size),
+                                          subkey_size);
                if (r)
                        err = r;
        }
@@ -1645,7 +1648,7 @@ static int crypt_ctr_cipher(struct dm_target *ti,
        }
 
        /* Initialize IV */
-       cc->iv_size = crypto_ablkcipher_ivsize(any_tfm(cc));
+       cc->iv_size = crypto_skcipher_ivsize(any_tfm(cc));
        if (cc->iv_size)
                /* at least a 64 bit sector number should fit in our buffer */
                cc->iv_size = max(cc->iv_size,
@@ -1763,21 +1766,21 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        if (ret < 0)
                goto bad;
 
-       cc->dmreq_start = sizeof(struct ablkcipher_request);
-       cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
+       cc->dmreq_start = sizeof(struct skcipher_request);
+       cc->dmreq_start += crypto_skcipher_reqsize(any_tfm(cc));
        cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
 
-       if (crypto_ablkcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
+       if (crypto_skcipher_alignmask(any_tfm(cc)) < CRYPTO_MINALIGN) {
                /* Allocate the padding exactly */
                iv_size_padding = -(cc->dmreq_start + sizeof(struct dm_crypt_request))
-                               & crypto_ablkcipher_alignmask(any_tfm(cc));
+                               & crypto_skcipher_alignmask(any_tfm(cc));
        } else {
                /*
                 * If the cipher requires greater alignment than kmalloc
                 * alignment, we don't know the exact position of the
                 * initialization vector. We must assume worst case.
                 */
-               iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
+               iv_size_padding = crypto_skcipher_alignmask(any_tfm(cc));
        }
 
        ret = -ENOMEM;
@@ -1788,7 +1791,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       cc->per_bio_data_size = ti->per_bio_data_size =
+       cc->per_bio_data_size = ti->per_io_data_size =
                ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start +
                      sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
                      ARCH_KMALLOC_MINALIGN);
@@ -1922,7 +1925,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
 
        io = dm_per_bio_data(bio, cc->per_bio_data_size);
        crypt_io_init(io, cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
-       io->ctx.req = (struct ablkcipher_request *)(io + 1);
+       io->ctx.req = (struct skcipher_request *)(io + 1);
 
        if (bio_data_dir(io->base_bio) == READ) {
                if (kcryptd_io_read(io, GFP_NOWAIT))
index b4c356a21123051d1716ca25badb13cfe78b4e26..cc70871a6d298a92f451d341f1ee6a7c5d84533b 100644 (file)
@@ -204,7 +204,7 @@ out:
 
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
-       ti->per_bio_data_size = sizeof(struct dm_delay_info);
+       ti->per_io_data_size = sizeof(struct dm_delay_info);
        ti->private = dc;
        return 0;
 
index 09e2afcafd2ddc2575d552e14eb4f469505300f2..b7341de87015e774c30e93324c5e05560b7f6c3f 100644 (file)
@@ -220,7 +220,7 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
-       ti->per_bio_data_size = sizeof(struct per_bio_data);
+       ti->per_io_data_size = sizeof(struct per_bio_data);
        ti->private = fc;
        return 0;
 
index 80a43954325966a872cf8ebc95fb23c92c0b8819..2adf81d81fca4237f6c911211a0d5b0dc21546d1 100644 (file)
@@ -1291,7 +1291,8 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
 
        immutable_target_type = dm_get_immutable_target_type(md);
        if (immutable_target_type &&
-           (immutable_target_type != dm_table_get_immutable_target_type(t))) {
+           (immutable_target_type != dm_table_get_immutable_target_type(t)) &&
+           !dm_table_get_wildcard_target(t)) {
                DMWARN("can't replace immutable target type %s",
                       immutable_target_type->name);
                r = -EINVAL;
@@ -1303,7 +1304,7 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
                dm_set_md_type(md, dm_table_get_type(t));
 
                /* setup md->queue to reflect md's type (may block) */
-               r = dm_setup_md_queue(md);
+               r = dm_setup_md_queue(md, t);
                if (r) {
                        DMWARN("unable to set up device queue for new table.");
                        goto err_unlock_md_type;
index 624589d51c2cb67828567ad34b02d283e5965956..608302e222af0c3016c5d6d59ceb330294d1644b 100644 (file)
@@ -475,7 +475,7 @@ static int log_writes_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        ti->flush_supported = true;
        ti->num_discard_bios = 1;
        ti->discards_supported = true;
-       ti->per_bio_data_size = sizeof(struct per_bio_data);
+       ti->per_io_data_size = sizeof(struct per_bio_data);
        ti->private = lc;
        return 0;
 
index cfa29f574c2a9e1454788a5757471835b254d857..177a016fe980e9b36ffd6a5125a83658f8fa9b94 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/delay.h>
 #include <scsi/scsi_dh.h>
 #include <linux/atomic.h>
+#include <linux/blk-mq.h>
 
 #define DM_MSG_PREFIX "multipath"
 #define DM_PG_INIT_DELAY_MSECS 2000
@@ -181,10 +182,9 @@ static void free_priority_group(struct priority_group *pg,
        kfree(pg);
 }
 
-static struct multipath *alloc_multipath(struct dm_target *ti)
+static struct multipath *alloc_multipath(struct dm_target *ti, bool use_blk_mq)
 {
        struct multipath *m;
-       unsigned min_ios = dm_get_reserved_rq_based_ios();
 
        m = kzalloc(sizeof(*m), GFP_KERNEL);
        if (m) {
@@ -195,11 +195,18 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
                INIT_WORK(&m->trigger_event, trigger_event);
                init_waitqueue_head(&m->pg_init_wait);
                mutex_init(&m->work_mutex);
-               m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
-               if (!m->mpio_pool) {
-                       kfree(m);
-                       return NULL;
+
+               m->mpio_pool = NULL;
+               if (!use_blk_mq) {
+                       unsigned min_ios = dm_get_reserved_rq_based_ios();
+
+                       m->mpio_pool = mempool_create_slab_pool(min_ios, _mpio_cache);
+                       if (!m->mpio_pool) {
+                               kfree(m);
+                               return NULL;
+                       }
                }
+
                m->ti = ti;
                ti->private = m;
        }
@@ -222,26 +229,41 @@ static void free_multipath(struct multipath *m)
        kfree(m);
 }
 
-static int set_mapinfo(struct multipath *m, union map_info *info)
+static struct dm_mpath_io *get_mpio(union map_info *info)
+{
+       return info->ptr;
+}
+
+static struct dm_mpath_io *set_mpio(struct multipath *m, union map_info *info)
 {
        struct dm_mpath_io *mpio;
 
+       if (!m->mpio_pool) {
+               /* Use blk-mq pdu memory requested via per_io_data_size */
+               mpio = get_mpio(info);
+               memset(mpio, 0, sizeof(*mpio));
+               return mpio;
+       }
+
        mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC);
        if (!mpio)
-               return -ENOMEM;
+               return NULL;
 
        memset(mpio, 0, sizeof(*mpio));
        info->ptr = mpio;
 
-       return 0;
+       return mpio;
 }
 
-static void clear_mapinfo(struct multipath *m, union map_info *info)
+static void clear_request_fn_mpio(struct multipath *m, union map_info *info)
 {
-       struct dm_mpath_io *mpio = info->ptr;
+       /* Only needed for non blk-mq (.request_fn) multipath */
+       if (m->mpio_pool) {
+               struct dm_mpath_io *mpio = info->ptr;
 
-       info->ptr = NULL;
-       mempool_free(mpio, m->mpio_pool);
+               info->ptr = NULL;
+               mempool_free(mpio, m->mpio_pool);
+       }
 }
 
 /*-----------------------------------------------
@@ -380,7 +402,7 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
                           union map_info *map_context,
                           struct request *rq, struct request **__clone)
 {
-       struct multipath *m = (struct multipath *) ti->private;
+       struct multipath *m = ti->private;
        int r = DM_MAPIO_REQUEUE;
        size_t nr_bytes = clone ? blk_rq_bytes(clone) : blk_rq_bytes(rq);
        struct pgpath *pgpath;
@@ -405,30 +427,37 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
                goto out_unlock;
        }
 
-       if (set_mapinfo(m, map_context) < 0)
+       spin_unlock_irq(&m->lock);
+
+       mpio = set_mpio(m, map_context);
+       if (!mpio)
                /* ENOMEM, requeue */
-               goto out_unlock;
+               return r;
 
-       mpio = map_context->ptr;
        mpio->pgpath = pgpath;
        mpio->nr_bytes = nr_bytes;
 
        bdev = pgpath->path.dev->bdev;
 
-       spin_unlock_irq(&m->lock);
-
        if (clone) {
-               /* Old request-based interface: allocated clone is passed in */
+               /*
+                * Old request-based interface: allocated clone is passed in.
+                * Used by both: .request_fn stacked on .request_fn path(s) and
+                * blk-mq stacked on .request_fn path(s).
+                */
                clone->q = bdev_get_queue(bdev);
                clone->rq_disk = bdev->bd_disk;
                clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
        } else {
-               /* blk-mq request-based interface */
-               *__clone = blk_get_request(bdev_get_queue(bdev),
-                                          rq_data_dir(rq), GFP_ATOMIC);
+               /*
+                * blk-mq request-based interface; used by both:
+                * .request_fn stacked on blk-mq path(s) and
+                * blk-mq stacked on blk-mq path(s).
+                */
+               *__clone = blk_mq_alloc_request(bdev_get_queue(bdev),
+                                               rq_data_dir(rq), BLK_MQ_REQ_NOWAIT);
                if (IS_ERR(*__clone)) {
                        /* ENOMEM, requeue */
-                       clear_mapinfo(m, map_context);
                        return r;
                }
                (*__clone)->bio = (*__clone)->biotail = NULL;
@@ -463,7 +492,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
 
 static void multipath_release_clone(struct request *clone)
 {
-       blk_put_request(clone);
+       blk_mq_free_request(clone);
 }
 
 /*
@@ -820,11 +849,12 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
        struct dm_arg_set as;
        unsigned pg_count = 0;
        unsigned next_pg_num;
+       bool use_blk_mq = dm_use_blk_mq(dm_table_get_md(ti->table));
 
        as.argc = argc;
        as.argv = argv;
 
-       m = alloc_multipath(ti);
+       m = alloc_multipath(ti, use_blk_mq);
        if (!m) {
                ti->error = "can't allocate multipath";
                return -EINVAL;
@@ -880,6 +910,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
        ti->num_write_same_bios = 1;
+       if (use_blk_mq)
+               ti->per_io_data_size = sizeof(struct dm_mpath_io);
 
        return 0;
 
@@ -1291,21 +1323,21 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
                            int error, union map_info *map_context)
 {
        struct multipath *m = ti->private;
-       struct dm_mpath_io *mpio = map_context->ptr;
+       struct dm_mpath_io *mpio = get_mpio(map_context);
        struct pgpath *pgpath;
        struct path_selector *ps;
        int r;
 
        BUG_ON(!mpio);
 
-       r  = do_end_io(m, clone, error, mpio);
+       r = do_end_io(m, clone, error, mpio);
        pgpath = mpio->pgpath;
        if (pgpath) {
                ps = &pgpath->pg->ps;
                if (ps->type->end_io)
                        ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes);
        }
-       clear_mapinfo(m, map_context);
+       clear_request_fn_mpio(m, map_context);
 
        return r;
 }
@@ -1318,7 +1350,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone,
  */
 static void multipath_presuspend(struct dm_target *ti)
 {
-       struct multipath *m = (struct multipath *) ti->private;
+       struct multipath *m = ti->private;
 
        queue_if_no_path(m, 0, 1);
 }
@@ -1337,7 +1369,7 @@ static void multipath_postsuspend(struct dm_target *ti)
  */
 static void multipath_resume(struct dm_target *ti)
 {
-       struct multipath *m = (struct multipath *) ti->private;
+       struct multipath *m = ti->private;
        unsigned long flags;
 
        spin_lock_irqsave(&m->lock, flags);
@@ -1366,7 +1398,7 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
 {
        int sz = 0;
        unsigned long flags;
-       struct multipath *m = (struct multipath *) ti->private;
+       struct multipath *m = ti->private;
        struct priority_group *pg;
        struct pgpath *p;
        unsigned pg_num;
@@ -1474,7 +1506,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
 {
        int r = -EINVAL;
        struct dm_dev *dev;
-       struct multipath *m = (struct multipath *) ti->private;
+       struct multipath *m = ti->private;
        action_fn action;
 
        mutex_lock(&m->work_mutex);
@@ -1684,7 +1716,8 @@ out:
  *---------------------------------------------------------------*/
 static struct target_type multipath_target = {
        .name = "multipath",
-       .version = {1, 10, 0},
+       .version = {1, 11, 0},
+       .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
        .module = THIS_MODULE,
        .ctr = multipath_ctr,
        .dtr = multipath_dtr,
index f2a363a89629902350649cb818f6159ecda1afd9..b3ccf1e0d4f218d8cb55f5d895e1bf46ac26ac8b 100644 (file)
@@ -1121,7 +1121,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        ti->num_flush_bios = 1;
        ti->num_discard_bios = 1;
-       ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
+       ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
        ti->discard_zeroes_data_unsupported = true;
 
        ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
index 3766386080a48fbfb06226ae80646b5e2f0e653a..62479ac4baf7e3a7bc9a6b00fcdce7f7dbd5d49c 100644 (file)
@@ -1201,7 +1201,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 
        ti->private = s;
        ti->num_flush_bios = num_flush_bios;
-       ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
+       ti->per_io_data_size = sizeof(struct dm_snap_tracked_chunk);
 
        /* Add snapshot to the list of snapshots for this origin */
        /* Exceptions aren't triggered till snapshot_resume() is called */
index 061152a437300bbfacfbbc6e475c3935df7cecb6..7210e5392cc434121b3c0849f18fd7bfde3a8f59 100644 (file)
@@ -920,6 +920,30 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
        return t->immutable_target_type;
 }
 
+struct dm_target *dm_table_get_immutable_target(struct dm_table *t)
+{
+       /* Immutable target is implicitly a singleton */
+       if (t->num_targets > 1 ||
+           !dm_target_is_immutable(t->targets[0].type))
+               return NULL;
+
+       return t->targets;
+}
+
+struct dm_target *dm_table_get_wildcard_target(struct dm_table *t)
+{
+       struct dm_target *uninitialized_var(ti);
+       unsigned i = 0;
+
+       while (i < dm_table_get_num_targets(t)) {
+               ti = dm_table_get_target(t, i++);
+               if (dm_target_is_wildcard(ti->type))
+                       return ti;
+       }
+
+       return NULL;
+}
+
 bool dm_table_request_based(struct dm_table *t)
 {
        return __table_type_request_based(dm_table_get_type(t));
@@ -933,7 +957,7 @@ bool dm_table_mq_request_based(struct dm_table *t)
 static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md)
 {
        unsigned type = dm_table_get_type(t);
-       unsigned per_bio_data_size = 0;
+       unsigned per_io_data_size = 0;
        struct dm_target *tgt;
        unsigned i;
 
@@ -945,10 +969,10 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *
        if (type == DM_TYPE_BIO_BASED)
                for (i = 0; i < t->num_targets; i++) {
                        tgt = t->targets + i;
-                       per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size);
+                       per_io_data_size = max(per_io_data_size, tgt->per_io_data_size);
                }
 
-       t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size);
+       t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_io_data_size);
        if (!t->mempools)
                return -ENOMEM;
 
index 925ec1b15e75ede24ea91c74667d426950af251e..a317dd884ba6b5aca1b1355e7537b28bfa5c1bd4 100644 (file)
@@ -150,7 +150,8 @@ static void io_err_release_clone_rq(struct request *clone)
 
 static struct target_type error_target = {
        .name = "error",
-       .version = {1, 3, 0},
+       .version = {1, 4, 0},
+       .features = DM_TARGET_WILDCARD,
        .ctr  = io_err_ctr,
        .dtr  = io_err_dtr,
        .map  = io_err_map,
index f962d6453afd64e33b739253ca4166a276307dab..dc947f54824ebd3604a3053efa614b439f416c34 100644 (file)
@@ -344,7 +344,7 @@ static void subtree_dec(void *context, const void *value)
        memcpy(&root_le, value, sizeof(root_le));
        root = le64_to_cpu(root_le);
        if (dm_btree_del(info, root))
-               DMERR("btree delete failed\n");
+               DMERR("btree delete failed");
 }
 
 static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
index 72d91f477683f2c1dfad5a3a9614aaf0b26992e1..4fbbe1fb9f082def040e9ecf9f41321b8ab651e1 100644 (file)
@@ -4037,7 +4037,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        ti->num_flush_bios = 1;
        ti->flush_supported = true;
-       ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
+       ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);
 
        /* In case the pool supports discards, pass them on. */
        ti->discard_zeroes_data_unsupported = true;
index 1cc10c4de70101ace370a7716c2a0d5f42887ce2..459a9f8905edb22b81205e5196910b66956eaa1f 100644 (file)
@@ -812,7 +812,7 @@ int verity_fec_ctr(struct dm_verity *v)
        }
 
        /* Reserve space for our per-bio data */
-       ti->per_bio_data_size += sizeof(struct dm_verity_fec_io);
+       ti->per_io_data_size += sizeof(struct dm_verity_fec_io);
 
        return 0;
 }
index 5c5d30cb6ec59bba067fa3f97808353004ea8aec..0aba34a7b3b3feefaadc992299e457a8eb7a644d 100644 (file)
@@ -354,7 +354,7 @@ int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
                                       size_t len))
 {
        unsigned todo = 1 << v->data_dev_block_bits;
-       struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+       struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
 
        do {
                int r;
@@ -460,7 +460,7 @@ static int verity_verify_io(struct dm_verity_io *io)
 static void verity_finish_io(struct dm_verity_io *io, int error)
 {
        struct dm_verity *v = io->v;
-       struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+       struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
 
        bio->bi_end_io = io->orig_bi_end_io;
        bio->bi_error = error;
@@ -574,7 +574,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
        if (bio_data_dir(bio) == WRITE)
                return -EIO;
 
-       io = dm_per_bio_data(bio, ti->per_bio_data_size);
+       io = dm_per_bio_data(bio, ti->per_io_data_size);
        io->v = v;
        io->orig_bi_end_io = bio->bi_end_io;
        io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
@@ -1036,15 +1036,15 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
                goto bad;
        }
 
-       ti->per_bio_data_size = sizeof(struct dm_verity_io) +
+       ti->per_io_data_size = sizeof(struct dm_verity_io) +
                                v->shash_descsize + v->digest_size * 2;
 
        r = verity_fec_ctr(v);
        if (r)
                goto bad;
 
-       ti->per_bio_data_size = roundup(ti->per_bio_data_size,
-                                       __alignof__(struct dm_verity_io));
+       ti->per_io_data_size = roundup(ti->per_io_data_size,
+                                      __alignof__(struct dm_verity_io));
 
        return 0;
 
index 5df40480228b7a26e3c73ac78e963ce47ed25448..11b5ad8c0c51b2580ed2484f9b9f87d7fba4bdf5 100644 (file)
@@ -106,14 +106,6 @@ struct dm_rq_clone_bio_info {
        struct bio clone;
 };
 
-union map_info *dm_get_rq_mapinfo(struct request *rq)
-{
-       if (rq && rq->end_io_data)
-               return &((struct dm_rq_target_io *)rq->end_io_data)->info;
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
-
 #define MINOR_ALLOCED ((void *)-1)
 
 /*
@@ -162,6 +154,7 @@ struct mapped_device {
        /* Protect queue and type against concurrent access. */
        struct mutex type_lock;
 
+       struct dm_target *immutable_target;
        struct target_type *immutable_target_type;
 
        struct gendisk *disk;
@@ -230,8 +223,9 @@ struct mapped_device {
        ktime_t last_rq_start_time;
 
        /* for blk-mq request-based DM support */
-       struct blk_mq_tag_set tag_set;
-       bool use_blk_mq;
+       struct blk_mq_tag_set *tag_set;
+       bool use_blk_mq:1;
+       bool init_tio_pdu:1;
 };
 
 #ifdef CONFIG_DM_MQ_DEFAULT
@@ -240,10 +234,17 @@ static bool use_blk_mq = true;
 static bool use_blk_mq = false;
 #endif
 
+#define DM_MQ_NR_HW_QUEUES 1
+#define DM_MQ_QUEUE_DEPTH 2048
+
+static unsigned blk_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
+static unsigned blk_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
+
 bool dm_use_blk_mq(struct mapped_device *md)
 {
        return md->use_blk_mq;
 }
+EXPORT_SYMBOL_GPL(dm_use_blk_mq);
 
 /*
  * For mempools pre-allocation at the table loading time.
@@ -310,6 +311,17 @@ unsigned dm_get_reserved_rq_based_ios(void)
 }
 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
 
+unsigned dm_get_blk_mq_nr_hw_queues(void)
+{
+       return __dm_get_module_param(&blk_mq_nr_hw_queues, 1, 32);
+}
+
+unsigned dm_get_blk_mq_queue_depth(void)
+{
+       return __dm_get_module_param(&blk_mq_queue_depth,
+                                    DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
+}
+
 static int __init local_init(void)
 {
        int r = -ENOMEM;
@@ -1109,12 +1121,8 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
         * back into ->request_fn() could deadlock attempting to grab the
         * queue lock again.
         */
-       if (run_queue) {
-               if (md->queue->mq_ops)
-                       blk_mq_run_hw_queues(md->queue, true);
-               else
-                       blk_run_queue_async(md->queue);
-       }
+       if (!md->queue->mq_ops && run_queue)
+               blk_run_queue_async(md->queue);
 
        /*
         * dm_put() must be at the end of this function. See the comment above
@@ -1334,7 +1342,10 @@ static void dm_complete_request(struct request *rq, int error)
        struct dm_rq_target_io *tio = tio_from_request(rq);
 
        tio->error = error;
-       blk_complete_request(rq);
+       if (!rq->q->mq_ops)
+               blk_complete_request(rq);
+       else
+               blk_mq_complete_request(rq, rq->errors);
 }
 
 /*
@@ -1841,6 +1852,8 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md,
                                struct dm_rq_target_io *tio, gfp_t gfp_mask)
 {
        /*
+        * Create clone for use with .request_fn request_queue
+        *
         * Do not allocate a clone if tio->clone was already set
         * (see: dm_mq_queue_rq).
         */
@@ -1875,7 +1888,13 @@ static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
        tio->clone = NULL;
        tio->orig = rq;
        tio->error = 0;
-       memset(&tio->info, 0, sizeof(tio->info));
+       /*
+        * Avoid initializing info for blk-mq; it passes
+        * target-specific data through info.ptr
+        * (see: dm_mq_init_request)
+        */
+       if (!md->init_tio_pdu)
+               memset(&tio->info, 0, sizeof(tio->info));
        if (md->kworker_task)
                init_kthread_work(&tio->work, map_tio_request);
 }
@@ -2077,12 +2096,18 @@ static bool dm_request_peeked_before_merge_deadline(struct mapped_device *md)
 static void dm_request_fn(struct request_queue *q)
 {
        struct mapped_device *md = q->queuedata;
-       int srcu_idx;
-       struct dm_table *map = dm_get_live_table(md, &srcu_idx);
-       struct dm_target *ti;
+       struct dm_target *ti = md->immutable_target;
        struct request *rq;
        struct dm_rq_target_io *tio;
-       sector_t pos;
+       sector_t pos = 0;
+
+       if (unlikely(!ti)) {
+               int srcu_idx;
+               struct dm_table *map = dm_get_live_table(md, &srcu_idx);
+
+               ti = dm_table_find_target(map, pos);
+               dm_put_live_table(md, srcu_idx);
+       }
 
        /*
         * For suspend, check blk_queue_stopped() and increment
@@ -2093,33 +2118,21 @@ static void dm_request_fn(struct request_queue *q)
        while (!blk_queue_stopped(q)) {
                rq = blk_peek_request(q);
                if (!rq)
-                       goto out;
+                       return;
 
                /* always use block 0 to find the target for flushes for now */
                pos = 0;
                if (!(rq->cmd_flags & REQ_FLUSH))
                        pos = blk_rq_pos(rq);
 
-               ti = dm_table_find_target(map, pos);
-               if (!dm_target_is_valid(ti)) {
-                       /*
-                        * Must perform setup, that rq_completed() requires,
-                        * before calling dm_kill_unmapped_request
-                        */
-                       DMERR_LIMIT("request attempted access beyond the end of device");
-                       dm_start_request(md, rq);
-                       dm_kill_unmapped_request(rq, -EIO);
-                       continue;
+               if ((dm_request_peeked_before_merge_deadline(md) &&
+                    md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
+                    md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
+                   (ti->type->busy && ti->type->busy(ti))) {
+                       blk_delay_queue(q, HZ / 100);
+                       return;
                }
 
-               if (dm_request_peeked_before_merge_deadline(md) &&
-                   md_in_flight(md) && rq->bio && rq->bio->bi_vcnt == 1 &&
-                   md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq))
-                       goto delay_and_out;
-
-               if (ti->type->busy && ti->type->busy(ti))
-                       goto delay_and_out;
-
                dm_start_request(md, rq);
 
                tio = tio_from_request(rq);
@@ -2128,13 +2141,6 @@ static void dm_request_fn(struct request_queue *q)
                queue_kthread_work(&md->kworker, &tio->work);
                BUG_ON(!irqs_disabled());
        }
-
-       goto out;
-
-delay_and_out:
-       blk_delay_queue(q, HZ / 100);
-out:
-       dm_put_live_table(md, srcu_idx);
 }
 
 static int dm_any_congested(void *congested_data, int bdi_bits)
@@ -2144,19 +2150,18 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
        struct dm_table *map;
 
        if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
-               map = dm_get_live_table_fast(md);
-               if (map) {
+               if (dm_request_based(md)) {
                        /*
-                        * Request-based dm cares about only own queue for
-                        * the query about congestion status of request_queue
+                        * With request-based DM we only need to check the
+                        * top-level queue for congestion.
                         */
-                       if (dm_request_based(md))
-                               r = md->queue->backing_dev_info.wb.state &
-                                   bdi_bits;
-                       else
+                       r = md->queue->backing_dev_info.wb.state & bdi_bits;
+               } else {
+                       map = dm_get_live_table_fast(md);
+                       if (map)
                                r = dm_table_any_congested(map, bdi_bits);
+                       dm_put_live_table_fast(md);
                }
-               dm_put_live_table_fast(md);
        }
 
        return r;
@@ -2308,6 +2313,7 @@ static struct mapped_device *alloc_dev(int minor)
                goto bad_io_barrier;
 
        md->use_blk_mq = use_blk_mq;
+       md->init_tio_pdu = false;
        md->type = DM_TYPE_NONE;
        mutex_init(&md->suspend_lock);
        mutex_init(&md->type_lock);
@@ -2391,8 +2397,10 @@ static void free_dev(struct mapped_device *md)
        unlock_fs(md);
 
        cleanup_mapped_device(md);
-       if (md->use_blk_mq)
-               blk_mq_free_tag_set(&md->tag_set);
+       if (md->tag_set) {
+               blk_mq_free_tag_set(md->tag_set);
+               kfree(md->tag_set);
+       }
 
        free_table_devices(&md->table_devices);
        dm_stats_cleanup(&md->stats);
@@ -2500,8 +2508,15 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
         * This must be done before setting the queue restrictions,
         * because request-based dm may be run just after the setting.
         */
-       if (dm_table_request_based(t))
+       if (dm_table_request_based(t)) {
                stop_queue(q);
+               /*
+                * Leverage the fact that request-based DM targets are
+                * immutable singletons and establish md->immutable_target
+                * - used to optimize both dm_request_fn and dm_mq_queue_rq
+                */
+               md->immutable_target = dm_table_get_immutable_target(t);
+       }
 
        __bind_mempools(md, t);
 
@@ -2572,7 +2587,6 @@ void dm_set_md_type(struct mapped_device *md, unsigned type)
 
 unsigned dm_get_md_type(struct mapped_device *md)
 {
-       BUG_ON(!mutex_is_locked(&md->type_lock));
        return md->type;
 }
 
@@ -2640,6 +2654,11 @@ static int dm_mq_init_request(void *data, struct request *rq,
         */
        tio->md = md;
 
+       if (md->init_tio_pdu) {
+               /* target-specific per-io data is immediately after the tio */
+               tio->info.ptr = tio + 1;
+       }
+
        return 0;
 }
 
@@ -2649,28 +2668,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct request *rq = bd->rq;
        struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
        struct mapped_device *md = tio->md;
-       int srcu_idx;
-       struct dm_table *map = dm_get_live_table(md, &srcu_idx);
-       struct dm_target *ti;
-       sector_t pos;
+       struct dm_target *ti = md->immutable_target;
 
-       /* always use block 0 to find the target for flushes for now */
-       pos = 0;
-       if (!(rq->cmd_flags & REQ_FLUSH))
-               pos = blk_rq_pos(rq);
+       if (unlikely(!ti)) {
+               int srcu_idx;
+               struct dm_table *map = dm_get_live_table(md, &srcu_idx);
 
-       ti = dm_table_find_target(map, pos);
-       if (!dm_target_is_valid(ti)) {
+               ti = dm_table_find_target(map, 0);
                dm_put_live_table(md, srcu_idx);
-               DMERR_LIMIT("request attempted access beyond the end of device");
-               /*
-                * Must perform setup, that rq_completed() requires,
-                * before returning BLK_MQ_RQ_QUEUE_ERROR
-                */
-               dm_start_request(md, rq);
-               return BLK_MQ_RQ_QUEUE_ERROR;
        }
-       dm_put_live_table(md, srcu_idx);
 
        if (ti->type->busy && ti->type->busy(ti))
                return BLK_MQ_RQ_QUEUE_BUSY;
@@ -2686,10 +2692,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
         */
        tio->ti = ti;
 
-       /* Clone the request if underlying devices aren't blk-mq */
-       if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
-               /* clone request is allocated at the end of the pdu */
-               tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
+       /*
+        * Both the table and md type cannot change after initial table load
+        */
+       if (dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) {
+               /*
+                * Clone the request if underlying devices aren't blk-mq
+                * - clone request is allocated at the end of the pdu
+                */
+               tio->clone = blk_mq_rq_to_pdu(rq) + sizeof(*tio) + ti->per_io_data_size;
                (void) clone_rq(rq, md, tio, GFP_ATOMIC);
                queue_kthread_work(&md->kworker, &tio->work);
        } else {
@@ -2712,30 +2723,40 @@ static struct blk_mq_ops dm_mq_ops = {
        .init_request = dm_mq_init_request,
 };
 
-static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
+static int dm_init_request_based_blk_mq_queue(struct mapped_device *md,
+                                             struct dm_target *immutable_tgt)
 {
        unsigned md_type = dm_get_md_type(md);
        struct request_queue *q;
        int err;
 
-       memset(&md->tag_set, 0, sizeof(md->tag_set));
-       md->tag_set.ops = &dm_mq_ops;
-       md->tag_set.queue_depth = BLKDEV_MAX_RQ;
-       md->tag_set.numa_node = NUMA_NO_NODE;
-       md->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
-       md->tag_set.nr_hw_queues = 1;
+       md->tag_set = kzalloc(sizeof(struct blk_mq_tag_set), GFP_KERNEL);
+       if (!md->tag_set)
+               return -ENOMEM;
+
+       md->tag_set->ops = &dm_mq_ops;
+       md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
+       md->tag_set->numa_node = NUMA_NO_NODE;
+       md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+       md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
+       md->tag_set->driver_data = md;
+
+       md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
+       if (immutable_tgt && immutable_tgt->per_io_data_size) {
+               /* any target-specific per-io data is immediately after the tio */
+               md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
+               md->init_tio_pdu = true;
+       }
        if (md_type == DM_TYPE_REQUEST_BASED) {
-               /* make the memory for non-blk-mq clone part of the pdu */
-               md->tag_set.cmd_size = sizeof(struct dm_rq_target_io) + sizeof(struct request);
-       } else
-               md->tag_set.cmd_size = sizeof(struct dm_rq_target_io);
-       md->tag_set.driver_data = md;
+               /* put the memory for non-blk-mq clone at the end of the pdu */
+               md->tag_set->cmd_size += sizeof(struct request);
+       }
 
-       err = blk_mq_alloc_tag_set(&md->tag_set);
+       err = blk_mq_alloc_tag_set(md->tag_set);
        if (err)
-               return err;
+               goto out_kfree_tag_set;
 
-       q = blk_mq_init_allocated_queue(&md->tag_set, md->queue);
+       q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
        if (IS_ERR(q)) {
                err = PTR_ERR(q);
                goto out_tag_set;
@@ -2752,7 +2773,10 @@ static int dm_init_request_based_blk_mq_queue(struct mapped_device *md)
        return 0;
 
 out_tag_set:
-       blk_mq_free_tag_set(&md->tag_set);
+       blk_mq_free_tag_set(md->tag_set);
+out_kfree_tag_set:
+       kfree(md->tag_set);
+
        return err;
 }
 
@@ -2767,7 +2791,7 @@ static unsigned filter_md_type(unsigned type, struct mapped_device *md)
 /*
  * Setup the DM device's queue based on md's type
  */
-int dm_setup_md_queue(struct mapped_device *md)
+int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
 {
        int r;
        unsigned md_type = filter_md_type(dm_get_md_type(md), md);
@@ -2781,7 +2805,7 @@ int dm_setup_md_queue(struct mapped_device *md)
                }
                break;
        case DM_TYPE_MQ_REQUEST_BASED:
-               r = dm_init_request_based_blk_mq_queue(md);
+               r = dm_init_request_based_blk_mq_queue(md, dm_table_get_immutable_target(t));
                if (r) {
                        DMWARN("Cannot initialize queue for request-based blk-mq mapped device");
                        return r;
@@ -3480,7 +3504,7 @@ int dm_noflush_suspending(struct dm_target *ti)
 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
 
 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type,
-                                           unsigned integrity, unsigned per_bio_data_size)
+                                           unsigned integrity, unsigned per_io_data_size)
 {
        struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
        struct kmem_cache *cachep = NULL;
@@ -3496,7 +3520,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
        case DM_TYPE_BIO_BASED:
                cachep = _io_cache;
                pool_size = dm_get_reserved_bio_based_ios();
-               front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
+               front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
                break;
        case DM_TYPE_REQUEST_BASED:
                cachep = _rq_tio_cache;
@@ -3509,8 +3533,7 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t
                if (!pool_size)
                        pool_size = dm_get_reserved_rq_based_ios();
                front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
-               /* per_bio_data_size is not used. See __bind_mempools(). */
-               WARN_ON(per_bio_data_size != 0);
+               /* per_io_data_size is used for blk-mq pdu at queue allocation */
                break;
        default:
                BUG();
@@ -3699,6 +3722,12 @@ MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools"
 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
 
+module_param(blk_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(blk_mq_nr_hw_queues, "Number of hardware queues for blk-mq request-based DM devices");
+
+module_param(blk_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(blk_mq_queue_depth, "Queue depth for blk-mq request-based DM devices");
+
 MODULE_DESCRIPTION(DM_NAME " driver");
 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
 MODULE_LICENSE("GPL");
index 7edcf97dfa5a66fc93b964c4f16671d72a17e5a8..13a758ec0f88327ee2a047fea74108cd7b40eb59 100644 (file)
@@ -73,6 +73,8 @@ int dm_table_resume_targets(struct dm_table *t);
 int dm_table_any_congested(struct dm_table *t, int bdi_bits);
 unsigned dm_table_get_type(struct dm_table *t);
 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
+struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
+struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
 bool dm_table_request_based(struct dm_table *t);
 bool dm_table_mq_request_based(struct dm_table *t);
 void dm_table_free_md_mempools(struct dm_table *t);
@@ -84,7 +86,7 @@ void dm_set_md_type(struct mapped_device *md, unsigned type);
 unsigned dm_get_md_type(struct mapped_device *md);
 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
 
-int dm_setup_md_queue(struct mapped_device *md);
+int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
 
 /*
  * To check the return value from dm_table_find_target().
index 4a8e15058e8b56a8a9d89d9f0db50135df44c6d3..685aa2d77e2526935f8f2f416ad8d8681b6c7b14 100644 (file)
@@ -170,7 +170,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode)
                conf->nfaults = n+1;
 }
 
-static void make_request(struct mddev *mddev, struct bio *bio)
+static void faulty_make_request(struct mddev *mddev, struct bio *bio)
 {
        struct faulty_conf *conf = mddev->private;
        int failit = 0;
@@ -226,7 +226,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
        generic_make_request(bio);
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void faulty_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct faulty_conf *conf = mddev->private;
        int n;
@@ -259,7 +259,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
 }
 
 
-static int reshape(struct mddev *mddev)
+static int faulty_reshape(struct mddev *mddev)
 {
        int mode = mddev->new_layout & ModeMask;
        int count = mddev->new_layout >> ModeShift;
@@ -299,7 +299,7 @@ static sector_t faulty_size(struct mddev *mddev, sector_t sectors, int raid_disk
        return sectors;
 }
 
-static int run(struct mddev *mddev)
+static int faulty_run(struct mddev *mddev)
 {
        struct md_rdev *rdev;
        int i;
@@ -327,7 +327,7 @@ static int run(struct mddev *mddev)
        md_set_array_sectors(mddev, faulty_size(mddev, 0, 0));
        mddev->private = conf;
 
-       reshape(mddev);
+       faulty_reshape(mddev);
 
        return 0;
 }
@@ -344,11 +344,11 @@ static struct md_personality faulty_personality =
        .name           = "faulty",
        .level          = LEVEL_FAULTY,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = faulty_make_request,
+       .run            = faulty_run,
        .free           = faulty_free,
-       .status         = status,
-       .check_reshape  = reshape,
+       .status         = faulty_status,
+       .check_reshape  = faulty_reshape,
        .size           = faulty_size,
 };
 
index 0ded8e97751d270dbfdae0aac73a792043f10621..dd97d42458226b4b284b488983e218e01c814889 100644 (file)
@@ -293,6 +293,7 @@ static void recover_bitmaps(struct md_thread *thread)
 dlm_unlock:
                dlm_unlock_sync(bm_lockres);
 clear_bit:
+               lockres_free(bm_lockres);
                clear_bit(slot, &cinfo->recovery_map);
        }
 }
@@ -682,8 +683,10 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
                bm_lockres = lockres_init(mddev, str, NULL, 1);
                if (!bm_lockres)
                        return -ENOMEM;
-               if (i == (cinfo->slot_number - 1))
+               if (i == (cinfo->slot_number - 1)) {
+                       lockres_free(bm_lockres);
                        continue;
+               }
 
                bm_lockres->flags |= DLM_LKF_NOQUEUE;
                ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
@@ -858,6 +861,7 @@ static int leave(struct mddev *mddev)
        lockres_free(cinfo->token_lockres);
        lockres_free(cinfo->ack_lockres);
        lockres_free(cinfo->no_new_dev_lockres);
+       lockres_free(cinfo->resync_lockres);
        lockres_free(cinfo->bitmap_lockres);
        unlock_all_bitmaps(mddev);
        dlm_release_lockspace(cinfo->lockspace, 2);
index c4b9134092260f22939027a0a82945b146a4ebf9..4e3843f7d24592cc596df1fc6d6c4577a9a6f57f 100644 (file)
@@ -1044,7 +1044,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
        kfree(plug);
 }
 
-static void make_request(struct mddev *mddev, struct bio * bio)
+static void raid1_make_request(struct mddev *mddev, struct bio * bio)
 {
        struct r1conf *conf = mddev->private;
        struct raid1_info *mirror;
@@ -1422,7 +1422,7 @@ read_again:
        wake_up(&conf->wait_barrier);
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid1_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct r1conf *conf = mddev->private;
        int i;
@@ -1439,7 +1439,7 @@ static void status(struct seq_file *seq, struct mddev *mddev)
        seq_printf(seq, "]");
 }
 
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r1conf *conf = mddev->private;
@@ -2472,7 +2472,8 @@ static int init_resync(struct r1conf *conf)
  * that can be installed to exclude normal IO requests.
  */
 
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
+static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+                                  int *skipped)
 {
        struct r1conf *conf = mddev->private;
        struct r1bio *r1_bio;
@@ -2890,7 +2891,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
 }
 
 static void raid1_free(struct mddev *mddev, void *priv);
-static int run(struct mddev *mddev)
+static int raid1_run(struct mddev *mddev)
 {
        struct r1conf *conf;
        int i;
@@ -3170,15 +3171,15 @@ static struct md_personality raid1_personality =
        .name           = "raid1",
        .level          = 1,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid1_make_request,
+       .run            = raid1_run,
        .free           = raid1_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid1_status,
+       .error_handler  = raid1_error,
        .hot_add_disk   = raid1_add_disk,
        .hot_remove_disk= raid1_remove_disk,
        .spare_active   = raid1_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid1_sync_request,
        .resize         = raid1_resize,
        .size           = raid1_size,
        .check_reshape  = raid1_reshape,
index ce959b4ae4dfd9a09d33828f5706d3a6aadd7f71..1c1447dd3417cd3e27f7c097a004b5e9b17a10d5 100644 (file)
@@ -1442,7 +1442,7 @@ retry_write:
        one_write_done(r10_bio);
 }
 
-static void make_request(struct mddev *mddev, struct bio *bio)
+static void raid10_make_request(struct mddev *mddev, struct bio *bio)
 {
        struct r10conf *conf = mddev->private;
        sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
@@ -1484,7 +1484,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
        wake_up(&conf->wait_barrier);
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid10_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct r10conf *conf = mddev->private;
        int i;
@@ -1562,7 +1562,7 @@ static int enough(struct r10conf *conf, int ignore)
                _enough(conf, 1, ignore);
 }
 
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid10_error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r10conf *conf = mddev->private;
@@ -2802,7 +2802,7 @@ static int init_resync(struct r10conf *conf)
  *
  */
 
-static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
+static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
                             int *skipped)
 {
        struct r10conf *conf = mddev->private;
@@ -3523,7 +3523,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
        return ERR_PTR(err);
 }
 
-static int run(struct mddev *mddev)
+static int raid10_run(struct mddev *mddev)
 {
        struct r10conf *conf;
        int i, disk_idx, chunk_size;
@@ -4617,15 +4617,15 @@ static struct md_personality raid10_personality =
        .name           = "raid10",
        .level          = 10,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid10_make_request,
+       .run            = raid10_run,
        .free           = raid10_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid10_status,
+       .error_handler  = raid10_error,
        .hot_add_disk   = raid10_add_disk,
        .hot_remove_disk= raid10_remove_disk,
        .spare_active   = raid10_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid10_sync_request,
        .quiesce        = raid10_quiesce,
        .size           = raid10_size,
        .resize         = raid10_resize,
index a086014dcd49915d5dad8b2c57b9bbfa30b39e5c..b4f02c9959f23c1bb5e8feccb2b9bf6e1c59e862 100644 (file)
@@ -2496,7 +2496,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
        dev->sector = raid5_compute_blocknr(sh, i, previous);
 }
 
-static void error(struct mddev *mddev, struct md_rdev *rdev)
+static void raid5_error(struct mddev *mddev, struct md_rdev *rdev)
 {
        char b[BDEVNAME_SIZE];
        struct r5conf *conf = mddev->private;
@@ -2958,7 +2958,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
         * If several bio share a stripe. The bio bi_phys_segments acts as a
         * reference count to avoid race. The reference count should already be
         * increased before this function is called (for example, in
-        * make_request()), so other bio sharing this stripe will not free the
+        * raid5_make_request()), so other bio sharing this stripe will not free the
         * stripe. If a stripe is owned by one stripe, the stripe lock will
         * protect it.
         */
@@ -5135,7 +5135,7 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
        }
 }
 
-static void make_request(struct mddev *mddev, struct bio * bi)
+static void raid5_make_request(struct mddev *mddev, struct bio * bi)
 {
        struct r5conf *conf = mddev->private;
        int dd_idx;
@@ -5225,7 +5225,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
                new_sector = raid5_compute_sector(conf, logical_sector,
                                                  previous,
                                                  &dd_idx, NULL);
-               pr_debug("raid456: make_request, sector %llu logical %llu\n",
+               pr_debug("raid456: raid5_make_request, sector %llu logical %llu\n",
                        (unsigned long long)new_sector,
                        (unsigned long long)logical_sector);
 
@@ -5575,7 +5575,8 @@ ret:
        return retn;
 }
 
-static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped)
+static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_nr,
+                                         int *skipped)
 {
        struct r5conf *conf = mddev->private;
        struct stripe_head *sh;
@@ -6674,7 +6675,7 @@ static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded
        return 0;
 }
 
-static int run(struct mddev *mddev)
+static int raid5_run(struct mddev *mddev)
 {
        struct r5conf *conf;
        int working_disks = 0;
@@ -7048,7 +7049,7 @@ static void raid5_free(struct mddev *mddev, void *priv)
        mddev->to_remove = &raid5_attrs_group;
 }
 
-static void status(struct seq_file *seq, struct mddev *mddev)
+static void raid5_status(struct seq_file *seq, struct mddev *mddev)
 {
        struct r5conf *conf = mddev->private;
        int i;
@@ -7864,15 +7865,15 @@ static struct md_personality raid6_personality =
        .name           = "raid6",
        .level          = 6,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid5_make_request,
+       .run            = raid5_run,
        .free           = raid5_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid5_status,
+       .error_handler  = raid5_error,
        .hot_add_disk   = raid5_add_disk,
        .hot_remove_disk= raid5_remove_disk,
        .spare_active   = raid5_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid5_sync_request,
        .resize         = raid5_resize,
        .size           = raid5_size,
        .check_reshape  = raid6_check_reshape,
@@ -7887,15 +7888,15 @@ static struct md_personality raid5_personality =
        .name           = "raid5",
        .level          = 5,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid5_make_request,
+       .run            = raid5_run,
        .free           = raid5_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid5_status,
+       .error_handler  = raid5_error,
        .hot_add_disk   = raid5_add_disk,
        .hot_remove_disk= raid5_remove_disk,
        .spare_active   = raid5_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid5_sync_request,
        .resize         = raid5_resize,
        .size           = raid5_size,
        .check_reshape  = raid5_check_reshape,
@@ -7911,15 +7912,15 @@ static struct md_personality raid4_personality =
        .name           = "raid4",
        .level          = 4,
        .owner          = THIS_MODULE,
-       .make_request   = make_request,
-       .run            = run,
+       .make_request   = raid5_make_request,
+       .run            = raid5_run,
        .free           = raid5_free,
-       .status         = status,
-       .error_handler  = error,
+       .status         = raid5_status,
+       .error_handler  = raid5_error,
        .hot_add_disk   = raid5_add_disk,
        .hot_remove_disk= raid5_remove_disk,
        .spare_active   = raid5_spare_active,
-       .sync_request   = sync_request,
+       .sync_request   = raid5_sync_request,
        .resize         = raid5_resize,
        .size           = raid5_size,
        .check_reshape  = raid5_check_reshape,
index 9c59f4306883a6de666052c613896b850313b8d0..f5956402fc69dfcd88c9870abe5ea76e3d6e6d65 100644 (file)
@@ -38,7 +38,7 @@ static int flexcop_fe_request_firmware(struct dvb_frontend *fe,
 #endif
 
 /* lnb control */
-#if FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)
+#if (FE_SUPPORTED(MT312) || FE_SUPPORTED(STV0299)) && FE_SUPPORTED(PLL)
 static int flexcop_set_voltage(struct dvb_frontend *fe,
                               enum fe_sec_voltage voltage)
 {
@@ -68,7 +68,7 @@ static int flexcop_set_voltage(struct dvb_frontend *fe,
 #endif
 
 #if FE_SUPPORTED(S5H1420) || FE_SUPPORTED(STV0299) || FE_SUPPORTED(MT312)
-static int flexcop_sleep(struct dvb_frontend* fe)
+static int __maybe_unused flexcop_sleep(struct dvb_frontend* fe)
 {
        struct flexcop_device *fc = fe->dvb->priv;
        if (fc->fe_sleep)
index 412c5daf2b48c4208f2fb0bd60ed50fb2f4acbef..0f5114d406f822004dca7f2b6e148ca49110d710 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Linux driver for digital TV devices equipped with B2C2 FlexcopII(b)/III
  * flexcop.c - main module part
- * Copyright (C) 2004-9 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-9 Patrick Boettcher <patrick.boettcher@posteo.de>
  * based on skystar2-driver Copyright (C) 2003 Vadim Catana, skystar@moldova.cc
  *
  * Acknowledgements:
@@ -34,7 +34,7 @@
 #include "flexcop.h"
 
 #define DRIVER_NAME "B2C2 FlexcopII/II(b)/III digital TV receiver chip"
-#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de"
+#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de"
 
 #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
 #define DEBSTATUS ""
index 577e82058fdc9e268bf1268651a6402f7f7de9ca..50e3f76d4847412e1280e14c1fe0b44847c8fd57 100644 (file)
@@ -1,6 +1,6 @@
 /*  cypress_firmware.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for downloading the firmware to Cypress FX 1
index e493cbc7a5289ecae3c8c80fedd52b358e1ffd90..1e4f2735620529a5bc3531947c15e003679b95df 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for downloading the firmware to Cypress FX 1
index 1c1c298d2289b9bba1ab8fdd1629c660dedfba92..dbdbb84294c5f1b5a225fe493a603d18bdbcab13 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-ids.h is part of the DVB USB library.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de) see
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de) see
  * dvb-usb-init.c for copyright information.
  *
  * a header file containing define's for the USB device supported by the
 #define USB_PID_DIBCOM_STK807XP                                0x1f90
 #define USB_PID_DIBCOM_STK807XPVR                      0x1f98
 #define USB_PID_DIBCOM_STK8096GP                        0x1fa0
+#define USB_PID_DIBCOM_STK8096PVR                       0x1faa
 #define USB_PID_DIBCOM_NIM8096MD                        0x1fa8
 #define USB_PID_DIBCOM_TFE8096P                                0x1f9C
 #define USB_PID_DIBCOM_ANCHOR_2135_COLD                        0x2131
 #define USB_PID_TECHNOTREND_CONNECT_CT3650             0x300d
 #define USB_PID_TECHNOTREND_CONNECT_S2_4600             0x3011
 #define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI                0x3012
+#define USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2      0x3015
 #define USB_PID_TECHNOTREND_TVSTICK_CT2_4400           0x3014
 #define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY       0x005a
 #define USB_PID_TERRATEC_CINERGY_DT_XS_DIVERSITY_2     0x0081
 #define USB_PID_TERRATEC_CINERGY_T_EXPRESS             0x0062
 #define USB_PID_TERRATEC_CINERGY_T_XXS                 0x0078
 #define USB_PID_TERRATEC_CINERGY_T_XXS_2               0x00ab
+#define USB_PID_TERRATEC_CINERGY_S2_R1                 0x00a8
+#define USB_PID_TERRATEC_CINERGY_S2_R2                 0x00b0
+#define USB_PID_TERRATEC_CINERGY_S2_R3                 0x0102
+#define USB_PID_TERRATEC_CINERGY_S2_R4                 0x0105
 #define USB_PID_TERRATEC_H7                            0x10b4
 #define USB_PID_TERRATEC_H7_2                          0x10a3
 #define USB_PID_TERRATEC_H7_3                          0x10a5
index 40080645341e727a6380feac3f991fb022b86bf4..b1255b7c0b0edb7c34dd65c3983a1f02ededb949 100644 (file)
@@ -899,10 +899,10 @@ void dvb_frontend_sleep_until(ktime_t *waketime, u32 add_usec)
        s32 delta;
 
        *waketime = ktime_add_us(*waketime, add_usec);
-       delta = ktime_us_delta(ktime_get_real(), *waketime);
+       delta = ktime_us_delta(ktime_get_boottime(), *waketime);
        if (delta > 2500) {
                msleep((delta - 1500) / 1000);
-               delta = ktime_us_delta(ktime_get_real(), *waketime);
+               delta = ktime_us_delta(ktime_get_boottime(), *waketime);
        }
        if (delta > 0)
                udelay(delta);
@@ -1162,18 +1162,24 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
        _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0),
 };
 
-static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp)
+static void dtv_property_dump(struct dvb_frontend *fe,
+                             bool is_set,
+                             struct dtv_property *tvp)
 {
        int i;
 
        if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) {
-               dev_warn(fe->dvb->device, "%s: tvp.cmd = 0x%08x undefined\n",
-                               __func__, tvp->cmd);
+               dev_warn(fe->dvb->device, "%s: %s tvp.cmd = 0x%08x undefined\n",
+                               __func__,
+                               is_set ? "SET" : "GET",
+                               tvp->cmd);
                return;
        }
 
-       dev_dbg(fe->dvb->device, "%s: tvp.cmd    = 0x%08x (%s)\n", __func__,
-                       tvp->cmd, dtv_cmds[tvp->cmd].name);
+       dev_dbg(fe->dvb->device, "%s: %s tvp.cmd    = 0x%08x (%s)\n", __func__,
+               is_set ? "SET" : "GET",
+               tvp->cmd,
+               dtv_cmds[tvp->cmd].name);
 
        if (dtv_cmds[tvp->cmd].buffer) {
                dev_dbg(fe->dvb->device, "%s: tvp.u.buffer.len = 0x%02x\n",
@@ -1589,7 +1595,7 @@ static int dtv_property_process_get(struct dvb_frontend *fe,
                        return r;
        }
 
-       dtv_property_dump(fe, tvp);
+       dtv_property_dump(fe, false, tvp);
 
        return 0;
 }
@@ -1830,6 +1836,8 @@ static int dtv_property_process_set(struct dvb_frontend *fe,
                        return r;
        }
 
+       dtv_property_dump(fe, true, tvp);
+
        switch(tvp->cmd) {
        case DTV_CLEAR:
                /*
@@ -2451,7 +2459,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
                        u8 last = 1;
                        if (dvb_frontend_debug)
                                printk("%s switch command: 0x%04lx\n", __func__, swcmd);
-                       nexttime = ktime_get_real();
+                       nexttime = ktime_get_boottime();
                        if (dvb_frontend_debug)
                                tv[0] = nexttime;
                        /* before sending a command, initialize by sending
@@ -2462,7 +2470,7 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
 
                        for (i = 0; i < 9; i++) {
                                if (dvb_frontend_debug)
-                                       tv[i+1] = ktime_get_real();
+                                       tv[i+1] = ktime_get_boottime();
                                if ((swcmd & 0x01) != last) {
                                        /* set voltage to (last ? 13V : 18V) */
                                        fe->ops.set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18);
index 560450a0b32a5b2399196aa2a9a8d0ff161534e9..1b9732ee0a4fbd4720289d336148941e4e0de550 100644 (file)
@@ -58,7 +58,7 @@ static const char * const dnames[] = {
 #define DVB_MAX_IDS            MAX_DVB_MINORS
 #else
 #define DVB_MAX_IDS            4
-#define nums2minor(num,type,id)        ((num << 6) | (id << 4) | type)
+#define nums2minor(num, type, id)      ((num << 6) | (id << 4) | type)
 #define MAX_DVB_MINORS         (DVB_MAX_ADAPTERS*64)
 #endif
 
@@ -85,7 +85,7 @@ static int dvb_device_open(struct inode *inode, struct file *file)
                file->private_data = dvbdev;
                replace_fops(file, new_fops);
                if (file->f_op->open)
-                       err = file->f_op->open(inode,file);
+                       err = file->f_op->open(inode, file);
                up_read(&minor_rwsem);
                mutex_unlock(&dvbdev_mutex);
                return err;
@@ -352,7 +352,7 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
        ret = media_device_register_entity(dvbdev->adapter->mdev,
                                           dvbdev->entity);
        if (ret)
-               return (ret);
+               return ret;
 
        printk(KERN_DEBUG "%s: media entity '%s' registered.\n",
                __func__, dvbdev->entity->name);
@@ -620,8 +620,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
                        return -ENOMEM;
                adap->conn = conn;
 
-               adap->conn_pads = kcalloc(1, sizeof(*adap->conn_pads),
-                                           GFP_KERNEL);
+               adap->conn_pads = kzalloc(sizeof(*adap->conn_pads), GFP_KERNEL);
                if (!adap->conn_pads)
                        return -ENOMEM;
 
@@ -661,7 +660,7 @@ int dvb_create_media_graph(struct dvb_adapter *adap,
        if (ntuner && ndemod) {
                ret = media_create_pad_links(mdev,
                                             MEDIA_ENT_F_TUNER,
-                                            tuner, TUNER_PAD_IF_OUTPUT,
+                                            tuner, TUNER_PAD_OUTPUT,
                                             MEDIA_ENT_F_DTV_DEMOD,
                                             demod, 0, MEDIA_LNK_FL_ENABLED,
                                             false);
@@ -868,7 +867,7 @@ int dvb_usercopy(struct file *file,
                        parg = sbuf;
                } else {
                        /* too big to allocate from stack */
-                       mbuf = kmalloc(_IOC_SIZE(cmd),GFP_KERNEL);
+                       mbuf = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
                        if (NULL == mbuf)
                                return -ENOMEM;
                        parg = mbuf;
index e23197da84af51bc244772d2dc2e7b01caf8eae9..41ab5defb7989b058bb42dfa90ecd972ded4381d 100644 (file)
@@ -1344,6 +1344,10 @@ err:
 static void af9013_release(struct dvb_frontend *fe)
 {
        struct af9013_state *state = fe->demodulator_priv;
+
+       /* stop statistics polling */
+       cancel_delayed_work_sync(&state->statistics_work);
+
        kfree(state);
 }
 
index bc35206a08215b83a6cf49719cfd1db9363af98c..8b328d1ca8d365f6cac693a443a80adee4e9041e 100644 (file)
@@ -1372,6 +1372,9 @@ static int af9033_remove(struct i2c_client *client)
 
        dev_dbg(&dev->client->dev, "\n");
 
+       /* stop statistics polling */
+       cancel_delayed_work_sync(&dev->stat_work);
+
        dev->fe.ops.release = NULL;
        dev->fe.demodulator_priv = NULL;
        kfree(dev);
index d30275f27644bd963d16329f117d73fc3364c15d..bb698839e477e23a07a04dbc15082cdd341d04f0 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2001-5, B2C2 inc.
  *
- *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de>
+ *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  *  This driver is "hard-coded" to be used with the 1st generation of
  *  Technisat/B2C2's Air2PC ATSC PCI/USB cards/boxes. The pll-programming
@@ -865,5 +865,5 @@ static struct dvb_frontend_ops bcm3510_ops = {
 };
 
 MODULE_DESCRIPTION("Broadcom BCM3510 ATSC (8VSB/16VSB & ITU J83 AnnexB FEC QAM64/256) demodulator driver");
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_LICENSE("GPL");
index ff66492fb9408f5324c749d64638df67929d1077..961c2eb87c684a595ede26ac332a6889c8323309 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2001-5, B2C2 inc.
  *
- *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de>
+ *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 3bb1bc2a04f006a43d849ad2dad914ffde425852..67f24686c31b9dd3306bc51c5271d9af25a47e11 100644 (file)
@@ -3,7 +3,7 @@
  *
  *  Copyright (C) 2001-5, B2C2 inc.
  *
- *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@desy.de>
+ *  GPL/Linux driver written by Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 24a457d9d803372dd25ced70284bdab381c0d2f5..ba4cb7557aa577ce762ef455e26d30176ad004b8 100644 (file)
@@ -606,8 +606,7 @@ static int cxd2820r_i2c_gate_ctrl(struct dvb_frontend *fe, int enable)
 static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr,
                int val)
 {
-       struct cxd2820r_priv *priv =
-                       container_of(chip, struct cxd2820r_priv, gpio_chip);
+       struct cxd2820r_priv *priv = gpiochip_get_data(chip);
        u8 gpio[GPIO_COUNT];
 
        dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
@@ -620,8 +619,7 @@ static int cxd2820r_gpio_direction_output(struct gpio_chip *chip, unsigned nr,
 
 static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
 {
-       struct cxd2820r_priv *priv =
-                       container_of(chip, struct cxd2820r_priv, gpio_chip);
+       struct cxd2820r_priv *priv = gpiochip_get_data(chip);
        u8 gpio[GPIO_COUNT];
 
        dev_dbg(&priv->i2c->dev, "%s: nr=%d val=%d\n", __func__, nr, val);
@@ -636,8 +634,7 @@ static void cxd2820r_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
 
 static int cxd2820r_gpio_get(struct gpio_chip *chip, unsigned nr)
 {
-       struct cxd2820r_priv *priv =
-                       container_of(chip, struct cxd2820r_priv, gpio_chip);
+       struct cxd2820r_priv *priv = gpiochip_get_data(chip);
 
        dev_dbg(&priv->i2c->dev, "%s: nr=%d\n", __func__, nr);
 
@@ -731,7 +728,7 @@ struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
                priv->gpio_chip.base = -1; /* dynamic allocation */
                priv->gpio_chip.ngpio = GPIO_COUNT;
                priv->gpio_chip.can_sleep = 1;
-               ret = gpiochip_add(&priv->gpio_chip);
+               ret = gpiochip_add_data(&priv->gpio_chip, priv);
                if (ret)
                        goto error;
 
index 0b8fb5dd18898a46c93348caabd4818ec4dbe37b..ee7d66997ccde63508b9f4ac1a626cd9c1f0f286 100644 (file)
@@ -774,6 +774,6 @@ free_mem:
 }
 EXPORT_SYMBOL(dib0070_attach);
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 0070 base-band RF Tuner");
 MODULE_LICENSE("GPL");
index 47cb72243b9da6bfc9c62271592633f2ecadc3db..976ee034a4307052a344c6c70592826a8b535641 100644 (file)
@@ -2669,7 +2669,7 @@ free_mem:
 }
 EXPORT_SYMBOL(dib0090_fw_register);
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
-MODULE_AUTHOR("Olivier Grenie <olivier.grenie@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
 MODULE_DESCRIPTION("Driver for the DiBcom 0090 base-band RF Tuner");
 MODULE_LICENSE("GPL");
index 6ae9899b5b450bec39a56601134accd686dbd990..d5dfafb4ef13b665a5439c9a68ce72eb69452486 100644 (file)
@@ -2,11 +2,11 @@
  * public header file of the frontend drivers for mobile DVB-T demodulators
  * DiBcom 3000M-B and DiBcom 3000P/M-C (http://www.dibcom.fr/)
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * based on GPL code from DibCom, which has
  *
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
@@ -14,7 +14,7 @@
  *
  * Acknowledgements
  *
- *  Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver
+ *  Amaury Demol from DiBcom for providing specs and driver
  *  sources, on which this driver (and the dvb-dibusb) are based.
  *
  * see Documentation/dvb/README.dvb-usb for more information
index 7a61172d0d453b9aa38282dbb6cd97d239387080..3ca300939f790bb1c3ebaceca298932a19f1ac3c 100644 (file)
@@ -2,11 +2,11 @@
  * Frontend driver for mobile DVB-T demodulator DiBcom 3000M-B
  * DiBcom (http://www.dibcom.fr/)
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * based on GPL code from DibCom, which has
  *
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
@@ -14,7 +14,7 @@
  *
  * Acknowledgements
  *
- *  Amaury Demol (ademol@dibcom.fr) from DiBcom for providing specs and driver
+ *  Amaury Demol from DiBcom for providing specs and driver
  *  sources, on which this driver (and the dvb-dibusb) are based.
  *
  * see Documentation/dvb/README.dvb-usb for more information
@@ -36,7 +36,7 @@
 /* Version information */
 #define DRIVER_VERSION "0.1"
 #define DRIVER_DESC "DiBcom 3000M-B DVB-T demodulator"
-#define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@desy.de"
+#define DRIVER_AUTHOR "Patrick Boettcher, patrick.boettcher@posteo.de"
 
 static int debug;
 module_param(debug, int, 0644);
index 9dc235aa44b71a712ba7c57918f9c786dd237e15..0459d5c84314e7cca51532d2e088ddd28b6cc130 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * dib3000mb_priv.h
  *
- * Copyright (C) 2004 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or
  *     modify it under the terms of the GNU General Public License as
index 583d6b7fabedad77116565a8f948c0e10ec28318..ac90ed3af37e40a6a184dd7698a6f756140adddb 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for DiBcom DiB3000MC/P-demodulator.
  *
  * Copyright (C) 2004-7 DiBcom (http://www.dibcom.fr/)
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * This code is partially based on the previous dib3000mc.c .
  *
@@ -939,6 +939,6 @@ static struct dvb_frontend_ops dib3000mc_ops = {
        .read_ucblocks        = dib3000mc_read_unc_blocks,
 };
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 3000MC/P COFDM demodulator");
 MODULE_LICENSE("GPL");
index 74816f79361180c0191c4cdd69239121371543f5..b37e69e6a58c7ca63e05167780de8e9cadd86738 100644 (file)
@@ -2,7 +2,7 @@
  * Driver for DiBcom DiB3000MC/P-demodulator.
  *
  * Copyright (C) 2004-6 DiBcom (http://www.dibcom.fr/)
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher\@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * This code is partially based on the previous dib3000mc.c .
  *
index 35eb71fe3c2b4977d5db8bf68e5f7b8039cb60da..8b21cccf3c3a04184629af11a30cb0d9c3d91d9b 100644 (file)
@@ -1465,6 +1465,6 @@ static struct dvb_frontend_ops dib7000m_ops = {
        .read_ucblocks        = dib7000m_read_unc_blocks,
 };
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 7000MA/MB/PA/PB/MC COFDM demodulator");
 MODULE_LICENSE("GPL");
index 33be5d6b9e10589275529a83525914ad6a33a470..65ab79ed5e3ecb3e3a00c31adbb6b7ca5e8e03b6 100644 (file)
@@ -2834,7 +2834,7 @@ static struct dvb_frontend_ops dib7000p_ops = {
        .read_ucblocks = dib7000p_read_unc_blocks,
 };
 
-MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Olivier Grenie <olivie.grenie@parrot.com>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 7000PC COFDM demodulator");
 MODULE_LICENSE("GPL");
index 94c26270fff0e98375161ddad5e9c0ae5fc8d2bd..349d2f1f62ce0341e2612d519b7d7ee3f043ed21 100644 (file)
@@ -4516,6 +4516,6 @@ void *dib8000_attach(struct dib8000_ops *ops)
 }
 EXPORT_SYMBOL(dib8000_attach);
 
-MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@dibcom.fr, " "Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@parrot.com, Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator");
 MODULE_LICENSE("GPL");
index 8f92aca0b073555a40fa422b864ddb89646e0232..91888a2f53018bc2755a31d2a128560486226ce7 100644 (file)
@@ -2589,7 +2589,7 @@ static struct dvb_frontend_ops dib9000_ops = {
        .read_ucblocks = dib9000_read_unc_blocks,
 };
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
-MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
+MODULE_AUTHOR("Olivier Grenie <olivier.grenie@parrot.com>");
 MODULE_DESCRIPTION("Driver for the DiBcom 9000 COFDM demodulator");
 MODULE_LICENSE("GPL");
index 43be7238311ec513726e8ecaa13d0864c2b622dd..723358d7ca84debd209750f58a1423b1dd3861f9 100644 (file)
@@ -510,6 +510,6 @@ u32 systime(void)
 }
 EXPORT_SYMBOL(systime);
 
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Common function the DiBcom demodulator family");
 MODULE_LICENSE("GPL");
index b792f305cf15f05fc1289a4bdd0566269bfd15e0..74b771218033d76dee58d151dca1addd54404b13 100644 (file)
@@ -900,6 +900,9 @@ static int rtl2830_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "\n");
 
+       /* stop statistics polling */
+       cancel_delayed_work_sync(&dev->stat_work);
+
        i2c_del_mux_adapter(dev->adapter);
        regmap_exit(dev->regmap);
        kfree(dev);
index 2b93241d4bc102618c722107477b08482c7d86bb..8bf716a8ea58c2d5161ab66e08cae9ec1f1f3545 100644 (file)
@@ -225,22 +225,18 @@ static int si2165_writereg32(struct si2165_state *state, const u16 reg, u32 val)
 static int si2165_writereg_mask8(struct si2165_state *state, const u16 reg,
                                 u8 val, u8 mask)
 {
-       int ret;
-       u8 tmp;
-
        if (mask != 0xff) {
-               ret = si2165_readreg8(state, reg, &tmp);
+               u8 tmp;
+               int ret = si2165_readreg8(state, reg, &tmp);
+
                if (ret < 0)
-                       goto err;
+                       return ret;
 
                val &= mask;
                tmp &= ~mask;
                val |= tmp;
        }
-
-       ret = si2165_writereg8(state, reg, val);
-err:
-       return ret;
+       return si2165_writereg8(state, reg, val);
 }
 
 #define REG16(reg, val) { (reg), (val) & 0xff }, { (reg)+1, (val)>>8 & 0xff }
@@ -825,19 +821,19 @@ static int si2165_set_frontend_dvbt(struct dvb_frontend *fe)
        struct si2165_state *state = fe->demodulator_priv;
        u32 dvb_rate = 0;
        u16 bw10k;
+       u32 bw_hz = p->bandwidth_hz;
 
        dprintk("%s: called\n", __func__);
 
        if (!state->has_dvbt)
                return -EINVAL;
 
-       if (p->bandwidth_hz > 0) {
-               dvb_rate = p->bandwidth_hz * 8 / 7;
-               bw10k = p->bandwidth_hz / 10000;
-       } else {
-               dvb_rate = 8 * 8 / 7;
-               bw10k = 800;
-       }
+       /* no bandwidth auto-detection */
+       if (bw_hz == 0)
+               return -EINVAL;
+
+       dvb_rate = bw_hz * 8 / 7;
+       bw10k = bw_hz / 10000;
 
        ret = si2165_adjust_pll_divl(state, 12);
        if (ret < 0)
index a8177807fb6568f6d4a261c3509242ac8152b7b3..c43f36d323409b768c5dfbe2ff0fab22e03995af 100644 (file)
@@ -422,7 +422,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long
        if (debug_legacy_dish_switch)
                printk ("%s switch command: 0x%04lx\n",__func__, cmd);
 
-       nexttime = ktime_get_real();
+       nexttime = ktime_get_boottime();
        if (debug_legacy_dish_switch)
                tv[0] = nexttime;
        stv0299_writeregI (state, 0x0c, reg0x0c | 0x50); /* set LNB to 18V */
@@ -431,7 +431,7 @@ static int stv0299_send_legacy_dish_cmd (struct dvb_frontend* fe, unsigned long
 
        for (i=0; i<9; i++) {
                if (debug_legacy_dish_switch)
-                       tv[i+1] = ktime_get_real();
+                       tv[i+1] = ktime_get_boottime();
                if((cmd & 0x01) != last) {
                        /* set voltage to (last ? 13V : 18V) */
                        stv0299_writeregI (state, 0x0c, reg0x0c | (last ? lv_mask : 0x50));
index e66154e5c1d7710e0ddcc8c6183f707be0e388ea..a62c01e454f518399270443f0311106a39ef426b 100644 (file)
@@ -355,7 +355,7 @@ static struct dvb_tuner_ops stv6110x_ops = {
        .release                = stv6110x_release
 };
 
-static struct stv6110x_devctl stv6110x_ctl = {
+static const struct stv6110x_devctl stv6110x_ctl = {
        .tuner_init             = stv6110x_init,
        .tuner_sleep            = stv6110x_sleep,
        .tuner_set_mode         = stv6110x_set_mode,
@@ -369,7 +369,7 @@ static struct stv6110x_devctl stv6110x_ctl = {
        .tuner_get_status       = stv6110x_get_status,
 };
 
-struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
                                        const struct stv6110x_config *config,
                                        struct i2c_adapter *i2c)
 {
index 9f7eb251aec32cb0a6d7bf821000a95690674332..696b6e5b9e7be1969fc51d1fb482ad52f52b02e7 100644 (file)
@@ -55,12 +55,12 @@ struct stv6110x_devctl {
 
 #if IS_REACHABLE(CONFIG_DVB_STV6110x)
 
-extern struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+extern const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
                                               const struct stv6110x_config *config,
                                               struct i2c_adapter *i2c);
 
 #else
-static inline struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
+static inline const struct stv6110x_devctl *stv6110x_attach(struct dvb_frontend *fe,
                                                      const struct stv6110x_config *config,
                                                      struct i2c_adapter *i2c)
 {
index 0ec936a660a765801a60791f095981e27943946d..a993aba27b7ef33e8c7a48dc849950f737504b28 100644 (file)
@@ -70,7 +70,7 @@ struct stv6110x_state {
        const struct stv6110x_config    *config;
        u8                              regs[8];
 
-       struct stv6110x_devctl          *devctl;
+       const struct stv6110x_devctl    *devctl;
 };
 
 #endif /* __STV6110x_PRIV_H */
index 0e209b56c76c1c487007121e336e0438944acb93..c6abeb4fba9db51827fc815fde920b1a30faa1fa 100644 (file)
@@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
 {
        struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
        struct tda1004x_state* state = fe->demodulator_priv;
+       int status;
 
        dprintk("%s\n", __func__);
 
+       status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
+       if (status == -1)
+               return -EIO;
+
+       /* Only update the properties cache if device is locked */
+       if (!(status & 8))
+               return 0;
+
        // inversion status
        fe_params->inversion = INVERSION_OFF;
        if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
index 7979e5d6498b52a2c01f4db05a44079eeb5c3bb5..14b410ffe612c2d76381ab0177d82970384913e8 100644 (file)
@@ -712,6 +712,10 @@ static int ts2020_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "\n");
 
+       /* stop statistics polling */
+       if (!dev->dont_poll)
+               cancel_delayed_work_sync(&dev->stat_work);
+
        regmap_exit(dev->regmap);
        kfree(dev);
        return 0;
index f8dd7505b5294ff167352a1d3ce8823605916db7..801a9b09d5e51e913089d4d29b8f19345730440e 100644 (file)
@@ -1884,6 +1884,26 @@ static int adv76xx_get_format(struct v4l2_subdev *sd,
        return 0;
 }
 
+static int adv76xx_get_selection(struct v4l2_subdev *sd,
+                                struct v4l2_subdev_pad_config *cfg,
+                                struct v4l2_subdev_selection *sel)
+{
+       struct adv76xx_state *state = to_state(sd);
+
+       if (sel->which != V4L2_SUBDEV_FORMAT_ACTIVE)
+               return -EINVAL;
+       /* Only CROP, CROP_DEFAULT and CROP_BOUNDS are supported */
+       if (sel->target > V4L2_SEL_TGT_CROP_BOUNDS)
+               return -EINVAL;
+
+       sel->r.left     = 0;
+       sel->r.top      = 0;
+       sel->r.width    = state->timings.bt.width;
+       sel->r.height   = state->timings.bt.height;
+
+       return 0;
+}
+
 static int adv76xx_set_format(struct v4l2_subdev *sd,
                              struct v4l2_subdev_pad_config *cfg,
                              struct v4l2_subdev_format *format)
@@ -2404,6 +2424,7 @@ static const struct v4l2_subdev_video_ops adv76xx_video_ops = {
 
 static const struct v4l2_subdev_pad_ops adv76xx_pad_ops = {
        .enum_mbus_code = adv76xx_enum_mbus_code,
+       .get_selection = adv76xx_get_selection,
        .get_fmt = adv76xx_get_format,
        .set_fmt = adv76xx_set_format,
        .get_edid = adv76xx_get_edid,
@@ -2799,6 +2820,7 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
        struct device_node *endpoint;
        struct device_node *np;
        unsigned int flags;
+       int ret;
        u32 v;
 
        np = state->i2c_clients[ADV76XX_PAGE_IO]->dev.of_node;
@@ -2808,7 +2830,11 @@ static int adv76xx_parse_dt(struct adv76xx_state *state)
        if (!endpoint)
                return -EINVAL;
 
-       v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+       ret = v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+       if (ret) {
+               of_node_put(endpoint);
+               return ret;
+       }
 
        if (!of_property_read_u32(endpoint, "default-input", &v))
                state->pdata.default_input = v;
index 830491960add2d239d817e46db7c4f0d2dc8d00a..bf82726fd3f44f684ac5677286764255055cc9a7 100644 (file)
@@ -478,7 +478,6 @@ static const struct i2c_device_id ir_kbd_id[] = {
        { "ir_rx_z8f0811_hdpvr", 0 },
        { }
 };
-MODULE_DEVICE_TABLE(i2c, ir_kbd_id);
 
 static struct i2c_driver ir_kbd_driver = {
        .driver = {
index a84561d0d4a809c761c5ed51d58af93aa7737dd7..e016626ebf89b25de49a282582676bcd710b10c2 100644 (file)
@@ -688,6 +688,9 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
        int msp_revision;
        int msp_product, msp_prod_hi, msp_prod_lo;
        int msp_rom;
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       int ret;
+#endif
 
        if (!id)
                strlcpy(client->name, "msp3400", sizeof(client->name));
@@ -704,6 +707,17 @@ static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
        sd = &state->sd;
        v4l2_i2c_subdev_init(sd, client, &msp_ops);
 
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       state->pads[IF_AUD_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+       state->pads[IF_AUD_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+
+       sd->entity.function = MEDIA_ENT_F_IF_AUD_DECODER;
+
+       ret = media_entity_pads_init(&sd->entity, 2, state->pads);
+       if (ret < 0)
+               return ret;
+#endif
+
        state->v4l2_std = V4L2_STD_NTSC;
        state->detected_std = V4L2_STD_ALL;
        state->audmode = V4L2_TUNER_MODE_STEREO;
index 6cae21366ed5e6b4ab7003dee04ba6504be05652..a8702aca187a97176eac9c4b2795e1b9c2969872 100644 (file)
@@ -7,6 +7,7 @@
 #include <media/drv-intf/msp3400.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
+#include <media/v4l2-mc.h>
 
 /* ---------------------------------------------------------------------- */
 
@@ -102,6 +103,10 @@ struct msp_state {
        wait_queue_head_t    wq;
        unsigned int         restart:1;
        unsigned int         watch_stereo:1;
+
+#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
+       struct media_pad pads[IF_AUD_DEC_PAD_NUM_PADS];
+#endif
 };
 
 static inline struct msp_state *to_state(struct v4l2_subdev *sd)
index b9fea11d6b0b0e08ff2c4440f976a97a077aaa99..9ed1b26b6549822a46338f6ce2b826147f3903c4 100644 (file)
@@ -50,6 +50,9 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
 
 struct mt9v011 {
        struct v4l2_subdev sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_pad pad;
+#endif
        struct v4l2_ctrl_handler ctrls;
        unsigned width, height;
        unsigned xtal;
@@ -493,6 +496,9 @@ static int mt9v011_probe(struct i2c_client *c,
        u16 version;
        struct mt9v011 *core;
        struct v4l2_subdev *sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+       int ret;
+#endif
 
        /* Check if the adapter supports the needed features */
        if (!i2c_check_functionality(c->adapter,
@@ -506,6 +512,15 @@ static int mt9v011_probe(struct i2c_client *c,
        sd = &core->sd;
        v4l2_i2c_subdev_init(sd, c, &mt9v011_ops);
 
+#ifdef CONFIG_MEDIA_CONTROLLER
+       core->pad.flags = MEDIA_PAD_FL_SOURCE;
+       sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+       ret = media_entity_pads_init(&sd->entity, 1, &core->pad);
+       if (ret < 0)
+               return ret;
+#endif
+
        /* Check if the sensor is really a MT9V011 */
        version = mt9v011_read(sd, R00_MT9V011_CHIP_VERSION);
        if ((version != MT9V011_VERSION) &&
index 2e1d116a64e7b43eb1f6e886f842d9ef296664c9..501b37039449730411ad1673da7f4458d15ce373 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/i2c.h>
 #include <linux/log2.h>
 #include <linux/mutex.h>
@@ -251,6 +252,8 @@ struct mt9v032 {
 
        struct regmap *regmap;
        struct clk *clk;
+       struct gpio_desc *reset_gpio;
+       struct gpio_desc *standby_gpio;
 
        struct mt9v032_platform_data *pdata;
        const struct mt9v032_model_info *model;
@@ -312,16 +315,31 @@ static int mt9v032_power_on(struct mt9v032 *mt9v032)
        struct regmap *map = mt9v032->regmap;
        int ret;
 
+       if (mt9v032->reset_gpio)
+               gpiod_set_value_cansleep(mt9v032->reset_gpio, 1);
+
        ret = clk_set_rate(mt9v032->clk, mt9v032->sysclk);
        if (ret < 0)
                return ret;
 
+       /* System clock has to be enabled before releasing the reset */
        ret = clk_prepare_enable(mt9v032->clk);
        if (ret)
                return ret;
 
        udelay(1);
 
+       if (mt9v032->reset_gpio) {
+               gpiod_set_value_cansleep(mt9v032->reset_gpio, 0);
+
+               /* After releasing reset we need to wait 10 clock cycles
+                * before accessing the sensor over I2C. As the minimum SYSCLK
+                * frequency is 13MHz, waiting 1µs will be enough in the worst
+                * case.
+                */
+               udelay(1);
+       }
+
        /* Reset the chip and stop data read out */
        ret = regmap_write(map, MT9V032_RESET, 1);
        if (ret < 0)
@@ -954,6 +972,16 @@ static int mt9v032_probe(struct i2c_client *client,
        if (IS_ERR(mt9v032->clk))
                return PTR_ERR(mt9v032->clk);
 
+       mt9v032->reset_gpio = devm_gpiod_get_optional(&client->dev, "reset",
+                                                     GPIOD_OUT_HIGH);
+       if (IS_ERR(mt9v032->reset_gpio))
+               return PTR_ERR(mt9v032->reset_gpio);
+
+       mt9v032->standby_gpio = devm_gpiod_get_optional(&client->dev, "standby",
+                                                       GPIOD_OUT_LOW);
+       if (IS_ERR(mt9v032->standby_gpio))
+               return PTR_ERR(mt9v032->standby_gpio);
+
        mutex_init(&mt9v032->power_lock);
        mt9v032->pdata = pdata;
        mt9v032->model = (const void *)did->driver_data;
index 02b9a3440557b49e9ad7dd366d201ebcd6e962ed..1f999e9c0118e358ac111c17084c21799a677db0 100644 (file)
@@ -1321,10 +1321,6 @@ static int ov2659_detect(struct v4l2_subdev *sd)
        }
        usleep_range(1000, 2000);
 
-       ret = ov2659_init(sd, 0);
-       if (ret < 0)
-               return ret;
-
        /* Check sensor revision */
        ret = ov2659_read(client, REG_SC_CHIP_ID_H, &pid);
        if (!ret)
@@ -1338,8 +1334,10 @@ static int ov2659_detect(struct v4l2_subdev *sd)
                        dev_err(&client->dev,
                                "Sensor detection failed (%04X, %d)\n",
                                id, ret);
-               else
+               else {
                        dev_info(&client->dev, "Found OV%04X sensor\n", id);
+                       ret = ov2659_init(sd, 0);
+               }
        }
 
        return ret;
index 57b3d27993a40ffadf7d42a5b791e71f3f74eb76..08af58fb8e7d0089a410b8ceb1e4c2466ba9f9da 100644 (file)
@@ -1639,8 +1639,10 @@ static int s5c73m3_get_platform_data(struct s5c73m3 *state)
                return 0;
        }
 
-       v4l2_of_parse_endpoint(node_ep, &ep);
+       ret = v4l2_of_parse_endpoint(node_ep, &ep);
        of_node_put(node_ep);
+       if (ret)
+               return ret;
 
        if (ep.bus_type != V4L2_MBUS_CSI2) {
                dev_err(dev, "unsupported bus type\n");
index 7d65b36434b1f3ccfccf25a7ff2071821080ff4b..72ef9f936e6ceb13ad7b94e5c998ffa9543700e8 100644 (file)
@@ -37,7 +37,6 @@ enum spi_direction {
        SPI_DIR_RX,
        SPI_DIR_TX
 };
-MODULE_DEVICE_TABLE(of, s5c73m3_spi_ids);
 
 static int spi_xmit(struct spi_device *spi_dev, void *addr, const int len,
                                                        enum spi_direction dir)
index fc3a5a8e6c9c7ebafb5b8a3ba97e1fa7cbe564fd..db82ed05792ef7d21c99879c7cd38e9a754af0fe 100644 (file)
@@ -1868,8 +1868,11 @@ static int s5k5baf_parse_device_node(struct s5k5baf *state, struct device *dev)
                return -EINVAL;
        }
 
-       v4l2_of_parse_endpoint(node_ep, &ep);
+       ret = v4l2_of_parse_endpoint(node_ep, &ep);
        of_node_put(node_ep);
+       if (ret)
+               return ret;
+
        state->bus_type = ep.bus_type;
 
        switch (state->bus_type) {
index b9e43ffa50859caeb4ce2b8343f660a598d249ff..cbe4711e9b31acfca212a8ef0a96867c03bb45fb 100644 (file)
@@ -144,8 +144,7 @@ static int s5k6a3_set_fmt(struct v4l2_subdev *sd,
        mf = __s5k6a3_get_format(sensor, cfg, fmt->pad, fmt->which);
        if (mf) {
                mutex_lock(&sensor->lock);
-               if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
-                       *mf = fmt->format;
+               *mf = fmt->format;
                mutex_unlock(&sensor->lock);
        }
        return 0;
index 24d2b76dbe97e7a24d762b2bc58a6ba2bc2c6051..d2a1ce2bc7f5ce41eec4c1c6d5069280c358df5d 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/videodev2.h>
 #include <media/v4l2-device.h>
 #include <media/v4l2-ctrls.h>
+#include <media/v4l2-mc.h>
 #include <media/i2c/saa7115.h>
 #include <asm/div64.h>
 
@@ -74,6 +75,9 @@ enum saa711x_model {
 
 struct saa711x_state {
        struct v4l2_subdev sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_pad pads[DEMOD_NUM_PADS];
+#endif
        struct v4l2_ctrl_handler hdl;
 
        struct {
@@ -1809,6 +1813,9 @@ static int saa711x_probe(struct i2c_client *client,
        struct saa7115_platform_data *pdata;
        int ident;
        char name[CHIP_VER_SIZE + 1];
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       int ret;
+#endif
 
        /* Check if the adapter supports the needed features */
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -1832,6 +1839,18 @@ static int saa711x_probe(struct i2c_client *client,
        sd = &state->sd;
        v4l2_i2c_subdev_init(sd, client, &saa711x_ops);
 
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       state->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+       state->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+       state->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
+
+       sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
+
+       ret = media_entity_pads_init(&sd->entity, DEMOD_NUM_PADS, state->pads);
+       if (ret < 0)
+               return ret;
+#endif
+
        v4l_info(client, "%s found @ 0x%x (%s)\n", name,
                 client->addr << 1, client->adapter->name);
        hdl = &state->hdl;
index 2e14e52ba2e056a071d0f1e4e02538569e446af5..69becc358659cf25dc6a4ccf7ddf0d2dbbe9573d 100644 (file)
@@ -632,7 +632,7 @@ static struct v4l2_subdev_video_ops mt9m001_subdev_video_ops = {
        .s_mbus_config  = mt9m001_s_mbus_config,
 };
 
-static struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
+static const struct v4l2_subdev_sensor_ops mt9m001_subdev_sensor_ops = {
        .g_skip_top_lines       = mt9m001_g_skip_top_lines,
 };
 
index 3b6eeed2e2b96d9457b0c0a09eeef88d1697c8ef..5c8e3ffe3b27bff0b69c51d218cd68e7f3001d0e 100644 (file)
@@ -728,7 +728,7 @@ static struct v4l2_subdev_video_ops mt9t031_subdev_video_ops = {
        .s_mbus_config  = mt9t031_s_mbus_config,
 };
 
-static struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
+static const struct v4l2_subdev_sensor_ops mt9t031_subdev_sensor_ops = {
        .g_skip_top_lines       = mt9t031_g_skip_top_lines,
 };
 
index c2ba1fb3694dfe78a874186d50b042c60bdde7dd..2721e583bfa0b239c3f36b7cb8d3efa6affc53a4 100644 (file)
@@ -860,7 +860,7 @@ static struct v4l2_subdev_video_ops mt9v022_subdev_video_ops = {
        .s_mbus_config  = mt9v022_s_mbus_config,
 };
 
-static struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
+static const struct v4l2_subdev_sensor_ops mt9v022_subdev_sensor_ops = {
        .g_skip_top_lines       = mt9v022_g_skip_top_lines,
 };
 
index 3397eb99c67b0269beecf3cc47c0076758d53885..da7469bc6e56fa8ede646e5aa7846f5a931e044f 100644 (file)
@@ -59,8 +59,7 @@ MODULE_LICENSE("GPL");
 #define EDID_NUM_BLOCKS_MAX 8
 #define EDID_BLOCK_SIZE 128
 
-/* Max transfer size done by I2C transfer functions */
-#define MAX_XFER_SIZE  (EDID_NUM_BLOCKS_MAX * EDID_BLOCK_SIZE + 2)
+#define I2C_MAX_XFER_SIZE  (EDID_BLOCK_SIZE + 2)
 
 static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
        .type = V4L2_DV_BT_656_1120,
@@ -97,9 +96,6 @@ struct tc358743_state {
        /* edid  */
        u8 edid_blocks_written;
 
-       /* used by i2c_wr() */
-       u8 wr_data[MAX_XFER_SIZE];
-
        struct v4l2_dv_timings timings;
        u32 mbus_fmt_code;
 
@@ -149,13 +145,15 @@ static void i2c_wr(struct v4l2_subdev *sd, u16 reg, u8 *values, u32 n)
 {
        struct tc358743_state *state = to_state(sd);
        struct i2c_client *client = state->i2c_client;
-       u8 *data = state->wr_data;
        int err, i;
        struct i2c_msg msg;
+       u8 data[I2C_MAX_XFER_SIZE];
 
-       if ((2 + n) > sizeof(state->wr_data))
+       if ((2 + n) > I2C_MAX_XFER_SIZE) {
+               n = I2C_MAX_XFER_SIZE - 2;
                v4l2_warn(sd, "i2c wr reg=%04x: len=%d is too big!\n",
                          reg, 2 + n);
+       }
 
        msg.addr = client->addr;
        msg.buf = data;
@@ -859,15 +857,16 @@ static void tc358743_format_change(struct v4l2_subdev *sd)
        if (tc358743_get_detected_timings(sd, &timings)) {
                enable_stream(sd, false);
 
-               v4l2_dbg(1, debug, sd, "%s: Format changed. No signal\n",
+               v4l2_dbg(1, debug, sd, "%s: No signal\n",
                                __func__);
        } else {
                if (!v4l2_match_dv_timings(&state->timings, &timings, 0, false))
                        enable_stream(sd, false);
 
-               v4l2_print_dv_timings(sd->name,
-                               "tc358743_format_change: Format changed. New format: ",
-                               &timings, false);
+               if (debug)
+                       v4l2_print_dv_timings(sd->name,
+                                       "tc358743_format_change: New format: ",
+                                       &timings, false);
        }
 
        if (sd->devnode)
@@ -1581,6 +1580,7 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
 {
        struct tc358743_state *state = to_state(sd);
        u16 edid_len = edid->blocks * EDID_BLOCK_SIZE;
+       int i;
 
        v4l2_dbg(2, debug, sd, "%s, pad %d, start block %d, blocks %d\n",
                 __func__, edid->pad, edid->start_block, edid->blocks);
@@ -1606,7 +1606,8 @@ static int tc358743_s_edid(struct v4l2_subdev *sd,
                return 0;
        }
 
-       i2c_wr(sd, EDID_RAM, edid->edid, edid_len);
+       for (i = 0; i < edid_len; i += EDID_BLOCK_SIZE)
+               i2c_wr(sd, EDID_RAM + i, edid->edid + i, EDID_BLOCK_SIZE);
 
        state->edid_blocks_written = edid->blocks;
 
index 7fa5f1e4fe3798155011263f9e4bdd0ad6b2955c..7cdd94842938e6d923fc9ed209f98556e242a9bc 100644 (file)
@@ -1001,7 +1001,7 @@ static struct tvp514x_decoder tvp514x_dev = {
 static struct tvp514x_platform_data *
 tvp514x_get_pdata(struct i2c_client *client)
 {
-       struct tvp514x_platform_data *pdata;
+       struct tvp514x_platform_data *pdata = NULL;
        struct v4l2_of_endpoint bus_cfg;
        struct device_node *endpoint;
        unsigned int flags;
@@ -1013,11 +1013,13 @@ tvp514x_get_pdata(struct i2c_client *client)
        if (!endpoint)
                return NULL;
 
+       if (v4l2_of_parse_endpoint(endpoint, &bus_cfg))
+               goto done;
+
        pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
                goto done;
 
-       v4l2_of_parse_endpoint(endpoint, &bus_cfg);
        flags = bus_cfg.bus.parallel.flags;
 
        if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
index 6c3769d44b75ccf6cc113dece96d2ab2e6244b2b..19b52736b24e97236abf849bc1d1650410594d90 100644 (file)
@@ -9,11 +9,14 @@
 #include <linux/slab.h>
 #include <linux/videodev2.h>
 #include <linux/delay.h>
+#include <linux/gpio/consumer.h>
 #include <linux/module.h>
 #include <media/v4l2-async.h>
 #include <media/v4l2-device.h>
 #include <media/i2c/tvp5150.h>
 #include <media/v4l2-ctrls.h>
+#include <media/v4l2-of.h>
+#include <media/v4l2-mc.h>
 
 #include "tvp5150_reg.h"
 
@@ -35,6 +38,9 @@ MODULE_PARM_DESC(debug, "Debug level (0-2)");
 
 struct tvp5150 {
        struct v4l2_subdev sd;
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_pad pads[DEMOD_NUM_PADS];
+#endif
        struct v4l2_ctrl_handler hdl;
        struct v4l2_rect rect;
 
@@ -42,6 +48,8 @@ struct tvp5150 {
        u32 input;
        u32 output;
        int enable;
+
+       enum v4l2_mbus_type mbus_type;
 };
 
 static inline struct tvp5150 *to_tvp5150(struct v4l2_subdev *sd)
@@ -772,6 +780,10 @@ static int tvp5150_reset(struct v4l2_subdev *sd, u32 val)
        v4l2_ctrl_handler_setup(&decoder->hdl);
 
        tvp5150_set_std(sd, decoder->norm);
+
+       if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+               tvp5150_write(sd, TVP5150_DATA_RATE_SEL, 0x40);
+
        return 0;
 };
 
@@ -818,17 +830,6 @@ static v4l2_std_id tvp5150_read_std(struct v4l2_subdev *sd)
        }
 }
 
-static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
-               struct v4l2_subdev_pad_config *cfg,
-               struct v4l2_subdev_mbus_code_enum *code)
-{
-       if (code->pad || code->index)
-               return -EINVAL;
-
-       code->code = MEDIA_BUS_FMT_UYVY8_2X8;
-       return 0;
-}
-
 static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
                struct v4l2_subdev_pad_config *cfg,
                struct v4l2_subdev_format *format)
@@ -844,10 +845,10 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
        tvp5150_reset(sd, 0);
 
        f->width = decoder->rect.width;
-       f->height = decoder->rect.height;
+       f->height = decoder->rect.height / 2;
 
        f->code = MEDIA_BUS_FMT_UYVY8_2X8;
-       f->field = V4L2_FIELD_SEQ_TB;
+       f->field = V4L2_FIELD_ALTERNATE;
        f->colorspace = V4L2_COLORSPACE_SMPTE170M;
 
        v4l2_dbg(1, debug, sd, "width = %d, height = %d\n", f->width,
@@ -948,10 +949,76 @@ static int tvp5150_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a)
        return 0;
 }
 
+static int tvp5150_g_mbus_config(struct v4l2_subdev *sd,
+                                struct v4l2_mbus_config *cfg)
+{
+       struct tvp5150 *decoder = to_tvp5150(sd);
+
+       cfg->type = decoder->mbus_type;
+       cfg->flags = V4L2_MBUS_MASTER | V4L2_MBUS_PCLK_SAMPLE_RISING
+                  | V4L2_MBUS_FIELD_EVEN_LOW | V4L2_MBUS_DATA_ACTIVE_HIGH;
+
+       return 0;
+}
+
+/****************************************************************************
+                       V4L2 subdev pad ops
+ ****************************************************************************/
+static int tvp5150_enum_mbus_code(struct v4l2_subdev *sd,
+               struct v4l2_subdev_pad_config *cfg,
+               struct v4l2_subdev_mbus_code_enum *code)
+{
+       if (code->pad || code->index)
+               return -EINVAL;
+
+       code->code = MEDIA_BUS_FMT_UYVY8_2X8;
+       return 0;
+}
+
+static int tvp5150_enum_frame_size(struct v4l2_subdev *sd,
+                                  struct v4l2_subdev_pad_config *cfg,
+                                  struct v4l2_subdev_frame_size_enum *fse)
+{
+       struct tvp5150 *decoder = to_tvp5150(sd);
+
+       if (fse->index >= 8 || fse->code != MEDIA_BUS_FMT_UYVY8_2X8)
+               return -EINVAL;
+
+       fse->code = MEDIA_BUS_FMT_UYVY8_2X8;
+       fse->min_width = decoder->rect.width;
+       fse->max_width = decoder->rect.width;
+       fse->min_height = decoder->rect.height / 2;
+       fse->max_height = decoder->rect.height / 2;
+
+       return 0;
+}
+
 /****************************************************************************
                        I2C Command
  ****************************************************************************/
 
+static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
+{
+       struct tvp5150 *decoder = to_tvp5150(sd);
+       /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
+       int val = 0x09;
+
+       /* Output format: 8-bit 4:2:2 YUV with discrete sync */
+       if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+               val = 0x0d;
+
+       /* Initializes TVP5150 to its default values */
+       /* # set PCLK (27MHz) */
+       tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+
+       if (enable)
+               tvp5150_write(sd, TVP5150_MISC_CTL, val);
+       else
+               tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+
+       return 0;
+}
+
 static int tvp5150_s_routing(struct v4l2_subdev *sd,
                             u32 input, u32 output, u32 config)
 {
@@ -1073,10 +1140,12 @@ static const struct v4l2_subdev_tuner_ops tvp5150_tuner_ops = {
 
 static const struct v4l2_subdev_video_ops tvp5150_video_ops = {
        .s_std = tvp5150_s_std,
+       .s_stream = tvp5150_s_stream,
        .s_routing = tvp5150_s_routing,
        .s_crop = tvp5150_s_crop,
        .g_crop = tvp5150_g_crop,
        .cropcap = tvp5150_cropcap,
+       .g_mbus_config = tvp5150_g_mbus_config,
 };
 
 static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
@@ -1088,6 +1157,7 @@ static const struct v4l2_subdev_vbi_ops tvp5150_vbi_ops = {
 
 static const struct v4l2_subdev_pad_ops tvp5150_pad_ops = {
        .enum_mbus_code = tvp5150_enum_mbus_code,
+       .enum_frame_size = tvp5150_enum_frame_size,
        .set_fmt = tvp5150_fill_fmt,
        .get_fmt = tvp5150_fill_fmt,
 };
@@ -1105,63 +1175,167 @@ static const struct v4l2_subdev_ops tvp5150_ops = {
                        I2C Client & Driver
  ****************************************************************************/
 
+static int tvp5150_detect_version(struct tvp5150 *core)
+{
+       struct v4l2_subdev *sd = &core->sd;
+       struct i2c_client *c = v4l2_get_subdevdata(sd);
+       unsigned int i;
+       u16 dev_id;
+       u16 rom_ver;
+       u8 regs[4];
+       int res;
+
+       /*
+        * Read consequent registers - TVP5150_MSB_DEV_ID, TVP5150_LSB_DEV_ID,
+        * TVP5150_ROM_MAJOR_VER, TVP5150_ROM_MINOR_VER
+        */
+       for (i = 0; i < 4; i++) {
+               res = tvp5150_read(sd, TVP5150_MSB_DEV_ID + i);
+               if (res < 0)
+                       return res;
+               regs[i] = res;
+       }
+
+       dev_id = (regs[0] << 8) | regs[1];
+       rom_ver = (regs[2] << 8) | regs[3];
+
+       v4l2_info(sd, "tvp%04x (%u.%u) chip found @ 0x%02x (%s)\n",
+                 dev_id, regs[2], regs[3], c->addr << 1, c->adapter->name);
+
+       if (dev_id == 0x5150 && rom_ver == 0x0321) { /* TVP51510A */
+               v4l2_info(sd, "tvp5150a detected.\n");
+       } else if (dev_id == 0x5150 && rom_ver == 0x0400) { /* TVP5150AM1 */
+               v4l2_info(sd, "tvp5150am1 detected.\n");
+
+               /* ITU-T BT.656.4 timing */
+               tvp5150_write(sd, TVP5150_REV_SELECT, 0);
+       } else if (dev_id == 0x5151 && rom_ver == 0x0100) { /* TVP5151 */
+               v4l2_info(sd, "tvp5151 detected.\n");
+       } else {
+               v4l2_info(sd, "*** unknown tvp%04x chip detected.\n", dev_id);
+       }
+
+       return 0;
+}
+
+static int tvp5150_init(struct i2c_client *c)
+{
+       struct gpio_desc *pdn_gpio;
+       struct gpio_desc *reset_gpio;
+
+       pdn_gpio = devm_gpiod_get_optional(&c->dev, "pdn", GPIOD_OUT_HIGH);
+       if (IS_ERR(pdn_gpio))
+               return PTR_ERR(pdn_gpio);
+
+       if (pdn_gpio) {
+               gpiod_set_value_cansleep(pdn_gpio, 0);
+               /* Delay time between power supplies active and reset */
+               msleep(20);
+       }
+
+       reset_gpio = devm_gpiod_get_optional(&c->dev, "reset", GPIOD_OUT_HIGH);
+       if (IS_ERR(reset_gpio))
+               return PTR_ERR(reset_gpio);
+
+       if (reset_gpio) {
+               /* RESETB pulse duration */
+               ndelay(500);
+               gpiod_set_value_cansleep(reset_gpio, 0);
+               /* Delay time between end of reset to I2C active */
+               usleep_range(200, 250);
+       }
+
+       return 0;
+}
+
+static int tvp5150_parse_dt(struct tvp5150 *decoder, struct device_node *np)
+{
+       struct v4l2_of_endpoint bus_cfg;
+       struct device_node *ep;
+       unsigned int flags;
+       int ret = 0;
+
+       ep = of_graph_get_next_endpoint(np, NULL);
+       if (!ep)
+               return -EINVAL;
+
+       ret = v4l2_of_parse_endpoint(ep, &bus_cfg);
+       if (ret)
+               goto err;
+
+       flags = bus_cfg.bus.parallel.flags;
+
+       if (bus_cfg.bus_type == V4L2_MBUS_PARALLEL &&
+           !(flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH &&
+             flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH &&
+             flags & V4L2_MBUS_FIELD_EVEN_LOW))
+               return -EINVAL;
+
+       decoder->mbus_type = bus_cfg.bus_type;
+
+err:
+       of_node_put(ep);
+       return ret;
+}
+
 static int tvp5150_probe(struct i2c_client *c,
                         const struct i2c_device_id *id)
 {
        struct tvp5150 *core;
        struct v4l2_subdev *sd;
-       int tvp5150_id[4];
-       int i, res;
+       struct device_node *np = c->dev.of_node;
+       int res;
 
        /* Check if the adapter supports the needed features */
        if (!i2c_check_functionality(c->adapter,
             I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
                return -EIO;
 
+       res = tvp5150_init(c);
+       if (res)
+               return res;
+
        core = devm_kzalloc(&c->dev, sizeof(*core), GFP_KERNEL);
        if (!core)
                return -ENOMEM;
+
        sd = &core->sd;
-       v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
 
-       /* 
-        * Read consequent registers - TVP5150_MSB_DEV_ID, TVP5150_LSB_DEV_ID,
-        * TVP5150_ROM_MAJOR_VER, TVP5150_ROM_MINOR_VER 
-        */
-       for (i = 0; i < 4; i++) {
-               res = tvp5150_read(sd, TVP5150_MSB_DEV_ID + i);
-               if (res < 0)
+       if (IS_ENABLED(CONFIG_OF) && np) {
+               res = tvp5150_parse_dt(core, np);
+               if (res) {
+                       v4l2_err(sd, "DT parsing error: %d\n", res);
                        return res;
-               tvp5150_id[i] = res;
+               }
+       } else {
+               /* Default to BT.656 embedded sync */
+               core->mbus_type = V4L2_MBUS_BT656;
        }
 
-       v4l_info(c, "chip found @ 0x%02x (%s)\n",
-                c->addr << 1, c->adapter->name);
+       v4l2_i2c_subdev_init(sd, c, &tvp5150_ops);
+       sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
 
-       if (tvp5150_id[2] == 4 && tvp5150_id[3] == 0) { /* Is TVP5150AM1 */
-               v4l2_info(sd, "tvp%02x%02xam1 detected.\n",
-                         tvp5150_id[0], tvp5150_id[1]);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       core->pads[DEMOD_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+       core->pads[DEMOD_PAD_VID_OUT].flags = MEDIA_PAD_FL_SOURCE;
+       core->pads[DEMOD_PAD_VBI_OUT].flags = MEDIA_PAD_FL_SOURCE;
 
-               /* ITU-T BT.656.4 timing */
-               tvp5150_write(sd, TVP5150_REV_SELECT, 0);
-       } else {
-               /* Is TVP5150A */
-               if (tvp5150_id[2] == 3 || tvp5150_id[3] == 0x21) {
-                       v4l2_info(sd, "tvp%02x%02xa detected.\n",
-                                 tvp5150_id[0], tvp5150_id[1]);
-               } else {
-                       v4l2_info(sd, "*** unknown tvp%02x%02x chip detected.\n",
-                                 tvp5150_id[0], tvp5150_id[1]);
-                       v4l2_info(sd, "*** Rom ver is %d.%d\n",
-                                 tvp5150_id[2], tvp5150_id[3]);
-               }
-       }
+       sd->entity.function = MEDIA_ENT_F_ATV_DECODER;
+
+       res = media_entity_pads_init(&sd->entity, DEMOD_NUM_PADS, core->pads);
+       if (res < 0)
+               return res;
+#endif
+
+       res = tvp5150_detect_version(core);
+       if (res < 0)
+               return res;
 
        core->norm = V4L2_STD_ALL;      /* Default is autodetect */
        core->input = TVP5150_COMPOSITE1;
        core->enable = 1;
 
-       v4l2_ctrl_handler_init(&core->hdl, 4);
+       v4l2_ctrl_handler_init(&core->hdl, 5);
        v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
                        V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
        v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
@@ -1170,6 +1344,9 @@ static int tvp5150_probe(struct i2c_client *c,
                        V4L2_CID_SATURATION, 0, 255, 1, 128);
        v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
                        V4L2_CID_HUE, -128, 127, 1, 0);
+       v4l2_ctrl_new_std(&core->hdl, &tvp5150_ctrl_ops,
+                       V4L2_CID_PIXEL_RATE, 27000000,
+                       27000000, 1, 27000000);
        sd->ctrl_handler = &core->hdl;
        if (core->hdl.error) {
                res = core->hdl.error;
@@ -1221,8 +1398,17 @@ static const struct i2c_device_id tvp5150_id[] = {
 };
 MODULE_DEVICE_TABLE(i2c, tvp5150_id);
 
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tvp5150_of_match[] = {
+       { .compatible = "ti,tvp5150", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tvp5150_of_match);
+#endif
+
 static struct i2c_driver tvp5150_driver = {
        .driver = {
+               .of_match_table = of_match_ptr(tvp5150_of_match),
                .name   = "tvp5150",
        },
        .probe          = tvp5150_probe,
index 83c79fa5f61de1b7155cb42ff69672f87036b5ba..4df640c3aa40fde3eecf4206233e1aa04745f12d 100644 (file)
@@ -894,7 +894,7 @@ static struct tvp7002_config *
 tvp7002_get_pdata(struct i2c_client *client)
 {
        struct v4l2_of_endpoint bus_cfg;
-       struct tvp7002_config *pdata;
+       struct tvp7002_config *pdata = NULL;
        struct device_node *endpoint;
        unsigned int flags;
 
@@ -905,11 +905,13 @@ tvp7002_get_pdata(struct i2c_client *client)
        if (!endpoint)
                return NULL;
 
+       if (v4l2_of_parse_endpoint(endpoint, &bus_cfg))
+               goto done;
+
        pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
        if (!pdata)
                goto done;
 
-       v4l2_of_parse_endpoint(endpoint, &bus_cfg);
        flags = bus_cfg.bus.parallel.flags;
 
        if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
index 4b564f17f618e38664d9cdffc8992fe6cf61a737..90b693f4e2ab84b4da6b68bf79f5b24fab65149d 100644 (file)
@@ -124,7 +124,7 @@ static int vpx3220_fp_write(struct v4l2_subdev *sd, u8 fpaddr, u16 data)
        return 0;
 }
 
-static u16 vpx3220_fp_read(struct v4l2_subdev *sd, u16 fpaddr)
+static int vpx3220_fp_read(struct v4l2_subdev *sd, u16 fpaddr)
 {
        struct i2c_client *client = v4l2_get_subdevdata(sd);
        s16 data;
index 7dae0ac0f3aec12c5d44875111d3a69818e43c92..4d1c13de494bfb9f7a4957311225211250480b5d 100644 (file)
@@ -234,7 +234,6 @@ static long media_device_setup_link(struct media_device *mdev,
        return ret;
 }
 
-#if 0 /* Let's postpone it to Kernel 4.6 */
 static long __media_device_get_topology(struct media_device *mdev,
                                      struct media_v2_topology *topo)
 {
@@ -390,7 +389,6 @@ static long media_device_get_topology(struct media_device *mdev,
 
        return 0;
 }
-#endif
 
 static long media_device_ioctl(struct file *filp, unsigned int cmd,
                               unsigned long arg)
@@ -424,14 +422,13 @@ static long media_device_ioctl(struct file *filp, unsigned int cmd,
                mutex_unlock(&dev->graph_mutex);
                break;
 
-#if 0 /* Let's postpone it to Kernel 4.6 */
        case MEDIA_IOC_G_TOPOLOGY:
                mutex_lock(&dev->graph_mutex);
                ret = media_device_get_topology(dev,
                                (struct media_v2_topology __user *)arg);
                mutex_unlock(&dev->graph_mutex);
                break;
-#endif
+
        default:
                ret = -ENOIOCTLCMD;
        }
@@ -480,9 +477,7 @@ static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
        case MEDIA_IOC_DEVICE_INFO:
        case MEDIA_IOC_ENUM_ENTITIES:
        case MEDIA_IOC_SETUP_LINK:
-#if 0 /* Let's postpone it to Kernel 4.6 */
        case MEDIA_IOC_G_TOPOLOGY:
-#endif
                return media_device_ioctl(filp, cmd, arg);
 
        case MEDIA_IOC_ENUM_LINKS32:
index cea35bf2001170d7ca0471e1d51e5fe9eae8211e..29409f440f1caef38f82694b382d3b66a3895ffb 100644 (file)
@@ -181,6 +181,7 @@ static int media_open(struct inode *inode, struct file *filp)
                ret = mdev->fops->open(filp);
                if (ret) {
                        put_device(&mdev->dev);
+                       filp->private_data = NULL;
                        return ret;
                }
        }
index e89d85a7d31b56286a6d8c9d492b5565dc6a4d43..f2e43603d6d24f6a3c27105369187a73d61d8175 100644 (file)
@@ -452,9 +452,12 @@ error:
        media_entity_graph_walk_start(graph, entity_err);
 
        while ((entity_err = media_entity_graph_walk_next(graph))) {
-               entity_err->stream_count--;
-               if (entity_err->stream_count == 0)
-                       entity_err->pipe = NULL;
+               /* don't let the stream_count go negative */
+               if (entity->stream_count > 0) {
+                       entity_err->stream_count--;
+                       if (entity_err->stream_count == 0)
+                               entity_err->pipe = NULL;
+               }
 
                /*
                 * We haven't increased stream_count further than this
@@ -486,9 +489,12 @@ void media_entity_pipeline_stop(struct media_entity *entity)
        media_entity_graph_walk_start(graph, entity);
 
        while ((entity = media_entity_graph_walk_next(graph))) {
-               entity->stream_count--;
-               if (entity->stream_count == 0)
-                       entity->pipe = NULL;
+               /* don't let the stream_count go negative */
+               if (entity->stream_count > 0) {
+                       entity->stream_count--;
+                       if (entity->stream_count == 0)
+                               entity->pipe = NULL;
+               }
        }
 
        if (!--pipe->streaming_count)
index 8b5e0b3a92a0c4a408af8bc50217279065b78803..4cac1fc233f28fd0a99e58f923a216482b63d90a 100644 (file)
@@ -39,7 +39,7 @@ MODULE_PARM_DESC(debug,
 
 #define DRIVER_VERSION "0.1"
 #define DRIVER_NAME "flexcop-pci"
-#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
+#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
 
 struct flexcop_pci {
        struct pci_dev *pdev;
index 9400e996087b6a5eb5076cd7e0ab399f2bed0e9e..2c412377507bc16f13468f524b566618f5d7f76d 100644 (file)
@@ -186,7 +186,7 @@ MODULE_VERSION(BTTV_VERSION);
 static ssize_t show_card(struct device *cd,
                         struct device_attribute *attr, char *buf)
 {
-       struct video_device *vfd = container_of(cd, struct video_device, dev);
+       struct video_device *vfd = to_video_device(cd);
        struct bttv *btv = video_get_drvdata(vfd);
        return sprintf(buf, "%d\n", btv ? btv->c.type : UNSET);
 }
@@ -1726,22 +1726,15 @@ static int bttv_s_std(struct file *file, void *priv, v4l2_std_id id)
        struct bttv_fh *fh  = priv;
        struct bttv *btv = fh->btv;
        unsigned int i;
-       int err = 0;
 
        for (i = 0; i < BTTV_TVNORMS; i++)
                if (id & bttv_tvnorms[i].v4l2_id)
                        break;
-       if (i == BTTV_TVNORMS) {
-               err = -EINVAL;
-               goto err;
-       }
-
+       if (i == BTTV_TVNORMS)
+               return -EINVAL;
        btv->std = id;
        set_tvnorm(btv, i);
-
-err:
-
-       return err;
+       return 0;
 }
 
 static int bttv_g_std(struct file *file, void *priv, v4l2_std_id *id)
@@ -1770,12 +1763,9 @@ static int bttv_enum_input(struct file *file, void *priv,
 {
        struct bttv_fh *fh = priv;
        struct bttv *btv = fh->btv;
-       int rc = 0;
 
-       if (i->index >= bttv_tvcards[btv->c.type].video_inputs) {
-               rc = -EINVAL;
-               goto err;
-       }
+       if (i->index >= bttv_tvcards[btv->c.type].video_inputs)
+               return -EINVAL;
 
        i->type     = V4L2_INPUT_TYPE_CAMERA;
        i->audioset = 0;
@@ -1799,10 +1789,7 @@ static int bttv_enum_input(struct file *file, void *priv,
        }
 
        i->std = BTTV_NORMS;
-
-err:
-
-       return rc;
+       return 0;
 }
 
 static int bttv_g_input(struct file *file, void *priv, unsigned int *i)
index d407244fd1bce3339b3fdbc13ea3e4801114f19a..e69d338ab9be0e0f51b85b05e2e4071c5982c802 100644 (file)
@@ -318,7 +318,7 @@ static int microtune_mt7202dtf_request_firmware(struct dvb_frontend* fe, const s
        return request_firmware(fw, name, &bt->bt->dev->dev);
 }
 
-static struct sp887x_config microtune_mt7202dtf_config = {
+static const struct sp887x_config microtune_mt7202dtf_config = {
        .demod_address = 0x70,
        .request_firmware = microtune_mt7202dtf_request_firmware,
 };
@@ -458,7 +458,7 @@ static void or51211_sleep(struct dvb_frontend * fe)
        bttv_write_gpio(bt->bttv_nr, 0x0001, 0x0000);
 }
 
-static struct or51211_config or51211_config = {
+static const struct or51211_config or51211_config = {
        .demod_address = 0x15,
        .request_firmware = or51211_request_firmware,
        .setmode = or51211_setmode,
index 9d5b314142f1b49daabb78985f6d13a2f982405a..6e995ef8c37e80872be886d5aa8a7027915ad992 100644 (file)
@@ -690,7 +690,7 @@ static int tuner_attach_stv6110(struct ddb_input *input, int type)
        struct stv090x_config *feconf = type ? &stv0900_aa : &stv0900;
        struct stv6110x_config *tunerconf = (input->nr & 1) ?
                &stv6110b : &stv6110a;
-       struct stv6110x_devctl *ctl;
+       const struct stv6110x_devctl *ctl;
 
        ctl = dvb_attach(stv6110x_attach, input->fe, tunerconf, i2c);
        if (!ctl) {
index 525ebfefeee829c644a74d279d888a7fc60e9ca3..c94cecd2aa4068c5d7a4922800745e52b5a8dd98 100644 (file)
@@ -771,10 +771,9 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
 
        /* allocate device context */
        ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
-
        if (!ndev)
                goto dev_alloc_err;
-       memset(ndev, 0, sizeof(*ndev));
+
        ndev->old_fw = old_firmware;
        ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
        if (!ndev->wq) {
index 039bed3cc919d2f5624e1635327e126abdfca064..4e783a68bf4aef998cf2c26353510a367dba338d 100644 (file)
@@ -57,7 +57,7 @@ static int tuner_attach_stv6110(struct ngene_channel *chan)
                chan->dev->card_info->fe_config[chan->number];
        struct stv6110x_config *tunerconf = (struct stv6110x_config *)
                chan->dev->card_info->tuner_config[chan->number];
-       struct stv6110x_devctl *ctl;
+       const struct stv6110x_devctl *ctl;
 
        /* tuner 1+2: i2c adapter #0, tuner 3+4: i2c adapter #1 */
        if (chan->number < 2)
index 1d2c310ce838e5fb415ebd4d3b533ac11b8c4b9b..94f8162444071ca3168af652b0c260acaeeab021 100644 (file)
@@ -1211,6 +1211,8 @@ static int alsa_device_init(struct saa7134_dev *dev)
 
 static int alsa_device_exit(struct saa7134_dev *dev)
 {
+       if (!snd_saa7134_cards[dev->nr])
+               return 1;
 
        snd_card_free(snd_saa7134_cards[dev->nr]);
        snd_saa7134_cards[dev->nr] = NULL;
@@ -1260,7 +1262,8 @@ static void saa7134_alsa_exit(void)
        int idx;
 
        for (idx = 0; idx < SNDRV_CARDS; idx++) {
-               snd_card_free(snd_saa7134_cards[idx]);
+               if (snd_saa7134_cards[idx])
+                       snd_card_free(snd_saa7134_cards[idx]);
        }
 
        saa7134_dmasound_init = NULL;
index 56b932c97196d68cc76dcdfd007dcf7b937bd40f..ca417a454d6787aecc77a0a338137dce78d7acb0 100644 (file)
@@ -189,6 +189,7 @@ static const struct v4l2_ioctl_ops ts_ioctl_ops = {
        .vidioc_querybuf                = vb2_ioctl_querybuf,
        .vidioc_qbuf                    = vb2_ioctl_qbuf,
        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
+       .vidioc_expbuf                  = vb2_ioctl_expbuf,
        .vidioc_streamon                = vb2_ioctl_streamon,
        .vidioc_streamoff               = vb2_ioctl_streamoff,
        .vidioc_g_frequency             = saa7134_g_frequency,
@@ -286,7 +287,7 @@ static int empress_init(struct saa7134_dev *dev)
         * transfers that do not start at the beginning of a page. A USERPTR
         * can start anywhere in a page, so USERPTR support is a no-go.
         */
-       q->io_modes = VB2_MMAP | VB2_READ;
+       q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
        q->drv_priv = &dev->ts_q;
        q->ops = &saa7134_empress_qops;
        q->gfp_flags = GFP_DMA32;
index 8a2abb34186b3dfa08b3ac6a22af298b757bc132..2799538e2d7e043ac70a4985daee314a9392f18b 100644 (file)
@@ -378,7 +378,7 @@ static int saa7134_go7007_send_firmware(struct go7007 *go, u8 *data, int len)
        return 0;
 }
 
-static struct go7007_hpi_ops saa7134_go7007_hpi_ops = {
+static const struct go7007_hpi_ops saa7134_go7007_hpi_ops = {
        .interface_reset        = saa7134_go7007_interface_reset,
        .write_interrupt        = saa7134_go7007_write_interrupt,
        .read_interrupt         = saa7134_go7007_read_interrupt,
index a63c1366a64efad4ad837a5ae927a3417b39d580..7a42d3ae3ac933287884480beeb9d7623ce1abbc 100644 (file)
@@ -1906,6 +1906,7 @@ static const struct v4l2_ioctl_ops video_ioctl_ops = {
        .vidioc_querybuf                = vb2_ioctl_querybuf,
        .vidioc_qbuf                    = vb2_ioctl_qbuf,
        .vidioc_dqbuf                   = vb2_ioctl_dqbuf,
+       .vidioc_expbuf                  = vb2_ioctl_expbuf,
        .vidioc_s_std                   = saa7134_s_std,
        .vidioc_g_std                   = saa7134_g_std,
        .vidioc_querystd                = saa7134_querystd,
@@ -2089,7 +2090,7 @@ int saa7134_video_init1(struct saa7134_dev *dev)
         * USERPTR support is a no-go unless the application knows about these
         * limitations and has special support for this.
         */
-       q->io_modes = VB2_MMAP | VB2_READ;
+       q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
        if (saa7134_userptr)
                q->io_modes |= VB2_USERPTR;
        q->drv_priv = &dev->video_q;
index a69dc6a0752bb94fc8352c133571b31f37599f99..18d229fa65cfb3839e61a60cd8f4dfebd8fc55eb 100644 (file)
@@ -1739,7 +1739,7 @@ static int alps_tdlb7_request_firmware(struct dvb_frontend* fe, const struct fir
 #endif
 }
 
-static struct sp8870_config alps_tdlb7_config = {
+static const struct sp8870_config alps_tdlb7_config = {
 
        .demod_address = 0x71,
        .request_firmware = alps_tdlb7_request_firmware,
index de54310a26603b23a152b174b3d04c2ce7cd58df..9f48100227f150f216c9ecfaf8c704d73ae6f3b8 100644 (file)
@@ -644,7 +644,7 @@ static void frontend_init(struct budget *budget)
                }
 
        case 0x101c: { /* TT S2-1600 */
-                       struct stv6110x_devctl *ctl;
+                       const struct stv6110x_devctl *ctl;
                        saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
                        msleep(50);
                        saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
@@ -697,7 +697,7 @@ static void frontend_init(struct budget *budget)
                break;
 
        case 0x1020: { /* Omicom S2 */
-                       struct stv6110x_devctl *ctl;
+                       const struct stv6110x_devctl *ctl;
                        saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTLO);
                        msleep(50);
                        saa7146_setgpio(budget->dev, 2, SAA7146_GPIO_OUTHI);
index 526359447ff90fda699b032e0dbed4d6a06e577e..fd4fcd5a7184ce08f31d1ea11b8ea730496121ae 100644 (file)
@@ -120,6 +120,18 @@ source "drivers/media/platform/s5p-tv/Kconfig"
 source "drivers/media/platform/am437x/Kconfig"
 source "drivers/media/platform/xilinx/Kconfig"
 
+config VIDEO_TI_CAL
+       tristate "TI CAL (Camera Adaptation Layer) driver"
+       depends on VIDEO_DEV && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+       depends on SOC_DRA7XX || COMPILE_TEST
+       select VIDEOBUF2_DMA_CONTIG
+       default n
+       ---help---
+         Support for the TI CAL (Camera Adaptation Layer) block
+         found on DRA72X SoC.
+         In TI Technical Reference Manual this module is referred as
+         Camera Interface Subsystem (CAMSS).
+
 endif # V4L_PLATFORM_DRIVERS
 
 menuconfig V4L_MEM2MEM_DRIVERS
@@ -215,6 +227,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
 config VIDEO_STI_BDISP
        tristate "STMicroelectronics BDISP 2D blitter driver"
        depends on VIDEO_DEV && VIDEO_V4L2
+       depends on HAS_DMA
        depends on ARCH_STI || COMPILE_TEST
        select VIDEOBUF2_DMA_CONTIG
        select V4L2_MEM2MEM_DEV
index efa0295af87bfe089acb526329b1601e95756c89..028a7233096baff93845935b4be126a65dadd32b 100644 (file)
@@ -18,6 +18,8 @@ obj-$(CONFIG_VIDEO_VIM2M)             += vim2m.o
 
 obj-$(CONFIG_VIDEO_TI_VPE)             += ti-vpe/
 
+obj-$(CONFIG_VIDEO_TI_CAL)             += ti-vpe/
+
 obj-$(CONFIG_VIDEO_MX2_EMMAPRP)                += mx2_emmaprp.o
 obj-$(CONFIG_VIDEO_CODA)               += coda/
 
index 7d28899f89ce16c00cd9f00f424a948fa57e2ae8..6efe9d0029611251ba0621210dbeee41606abb08 100644 (file)
@@ -1342,7 +1342,7 @@ static void coda_finish_encode(struct coda_ctx *ctx)
 
        /* Calculate bytesused field */
        if (dst_buf->sequence == 0) {
-               vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+               vb2_set_plane_payload(&dst_buf->vb2_buf, 0, wr_ptr - start_ptr +
                                        ctx->vpu_header_size[0] +
                                        ctx->vpu_header_size[1] +
                                        ctx->vpu_header_size[2]);
index ffbefdff6b5ef460d4a7c1abce024ed9841d8b4c..6fba32bec97455b8a98ddd255252b88e178ed0ea 100644 (file)
@@ -261,7 +261,7 @@ static int ccdc_update_raw_params(struct ccdc_config_params_raw *raw_params)
         */
        if (raw_params->fault_pxl.fp_num != config_params->fault_pxl.fp_num) {
                if (fpc_physaddr != NULL) {
-                       free_pages((unsigned long)fpc_physaddr,
+                       free_pages((unsigned long)fpc_virtaddr,
                                   get_order
                                   (config_params->fault_pxl.fp_num *
                                   FP_NUM_BYTES));
index 93782f15b8252092ba31a81b8c135de581f8dc4a..a600e32e25430a8ddc82e090f373a87d4e335b57 100644 (file)
@@ -700,7 +700,7 @@ static unsigned int gsc_m2m_poll(struct file *file,
 {
        struct gsc_ctx *ctx = fh_to_ctx(file->private_data);
        struct gsc_dev *gsc = ctx->gsc_dev;
-       int ret;
+       unsigned int ret;
 
        if (mutex_lock_interruptible(&gsc->lock))
                return -ERESTARTSYS;
index 40423c6c5324a8e1cf2573b2acffeb566ad74655..57d42c6172c576757b7724906852c7ec9ac34911 100644 (file)
@@ -1,6 +1,6 @@
 
 config VIDEO_SAMSUNG_EXYNOS4_IS
-       bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
+       tristate "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
        depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
        depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
        depends on OF && COMMON_CLK
index 49658ca39e5100d70ed9011eea6adb4cd02c3c55..bf2a54e7e47022325f799fc5378fefff04cec126 100644 (file)
@@ -297,10 +297,10 @@ int fimc_is_wait_event(struct fimc_is *is, unsigned long bit,
        int ret = wait_event_timeout(is->irq_queue,
                                     !state ^ test_bit(bit, &is->state),
                                     timeout);
-       if (ret == 0) {
-               dev_WARN(&is->pdev->dev, "%s() timed out\n", __func__);
+
+       if (dev_WARN(&is->pdev->dev, ret == 0, "%s() timed out\n", __func__))
                return -ETIME;
-       }
+
        return 0;
 }
 
@@ -631,6 +631,12 @@ static int fimc_is_hw_open_sensor(struct fimc_is *is,
 
        fimc_is_mem_barrier();
 
+       /*
+        * Some user space use cases hang up here without this
+        * empirically chosen delay.
+        */
+       udelay(100);
+
        mcuctl_write(HIC_OPEN_SENSOR, is, MCUCTL_REG_ISSR(0));
        mcuctl_write(is->sensor_index, is, MCUCTL_REG_ISSR(1));
        mcuctl_write(sensor->drvdata->id, is, MCUCTL_REG_ISSR(2));
index bf9261eb57a15c37a8b82fb3197a94649e4cc9c6..c0816728cbfe1d2be7c6eb5c987a4ac3b8213a74 100644 (file)
@@ -218,8 +218,8 @@ static void isp_video_capture_buffer_queue(struct vb2_buffer *vb)
                                                        ivb->dma_addr[i];
 
                        isp_dbg(2, &video->ve.vdev,
-                               "dma_buf %pad (%d/%d/%d) addr: %pad\n",
-                               &buf_index, ivb->index, i, vb->index,
+                               "dma_buf %d (%d/%d/%d) addr: %pad\n",
+                               buf_index, ivb->index, i, vb->index,
                                &ivb->dma_addr[i]);
                }
 
index f3b2dd30ec7769b3199da9ed2694df5886004616..feb521f28e14857b8344d7320769c87183cd17ac 100644 (file)
@@ -185,6 +185,37 @@ error:
        return ret;
 }
 
+/**
+ * __fimc_pipeline_enable - enable power of all pipeline subdevs
+ *                         and the sensor clock
+ * @ep: video pipeline structure
+ * @fmd: fimc media device
+ *
+ * Called with the graph mutex held.
+ */
+static int __fimc_pipeline_enable(struct exynos_media_pipeline *ep,
+                                 struct fimc_md *fmd)
+{
+       struct fimc_pipeline *p = to_fimc_pipeline(ep);
+       int ret;
+
+       /* Enable PXLASYNC clock if this pipeline includes FIMC-IS */
+       if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
+               ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
+               if (ret < 0)
+                       return ret;
+       }
+
+       ret = fimc_pipeline_s_power(p, 1);
+       if (!ret)
+               return 0;
+
+       if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
+               clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
+
+       return ret;
+}
+
 /**
  * __fimc_pipeline_open - update the pipeline information, enable power
  *                        of all pipeline subdevs and the sensor clock
@@ -199,7 +230,6 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
        struct fimc_md *fmd = entity_to_fimc_mdev(me);
        struct fimc_pipeline *p = to_fimc_pipeline(ep);
        struct v4l2_subdev *sd;
-       int ret;
 
        if (WARN_ON(p == NULL || me == NULL))
                return -EINVAL;
@@ -208,24 +238,16 @@ static int __fimc_pipeline_open(struct exynos_media_pipeline *ep,
                fimc_pipeline_prepare(p, me);
 
        sd = p->subdevs[IDX_SENSOR];
-       if (sd == NULL)
-               return -EINVAL;
-
-       /* Disable PXLASYNC clock if this pipeline includes FIMC-IS */
-       if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP]) {
-               ret = clk_prepare_enable(fmd->wbclk[CLK_IDX_WB_B]);
-               if (ret < 0)
-                       return ret;
-       }
-
-       ret = fimc_pipeline_s_power(p, 1);
-       if (!ret)
+       if (sd == NULL) {
+               pr_warn("%s(): No sensor subdev\n", __func__);
+               /*
+                * Pipeline open cannot fail so as to make it possible
+                * for the user space to configure the pipeline.
+                */
                return 0;
+       }
 
-       if (!IS_ERR(fmd->wbclk[CLK_IDX_WB_B]) && p->subdevs[IDX_IS_ISP])
-               clk_disable_unprepare(fmd->wbclk[CLK_IDX_WB_B]);
-
-       return ret;
+       return __fimc_pipeline_enable(ep, fmd);
 }
 
 /**
@@ -269,10 +291,43 @@ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
                { IDX_CSIS, IDX_FLITE, IDX_FIMC, IDX_SENSOR, IDX_IS_ISP },
        };
        struct fimc_pipeline *p = to_fimc_pipeline(ep);
+       struct fimc_md *fmd = entity_to_fimc_mdev(&p->subdevs[IDX_CSIS]->entity);
+       enum fimc_subdev_index sd_id;
        int i, ret = 0;
 
-       if (p->subdevs[IDX_SENSOR] == NULL)
-               return -ENODEV;
+       if (p->subdevs[IDX_SENSOR] == NULL) {
+               if (!fmd->user_subdev_api) {
+                       /*
+                        * Sensor must be already discovered if we
+                        * aren't in the user_subdev_api mode
+                        */
+                       return -ENODEV;
+               }
+
+               /* Get pipeline sink entity */
+               if (p->subdevs[IDX_FIMC])
+                       sd_id = IDX_FIMC;
+               else if (p->subdevs[IDX_IS_ISP])
+                       sd_id = IDX_IS_ISP;
+               else if (p->subdevs[IDX_FLITE])
+                       sd_id = IDX_FLITE;
+               else
+                       return -ENODEV;
+
+               /*
+                * Sensor could have been linked between open and STREAMON -
+                * check if this is the case.
+                */
+               fimc_pipeline_prepare(p, &p->subdevs[sd_id]->entity);
+
+               if (p->subdevs[IDX_SENSOR] == NULL)
+                       return -ENODEV;
+
+               ret = __fimc_pipeline_enable(ep, fmd);
+               if (ret < 0)
+                       return ret;
+
+       }
 
        for (i = 0; i < IDX_MAX; i++) {
                unsigned int idx = seq[on][i];
@@ -282,8 +337,10 @@ static int __fimc_pipeline_s_stream(struct exynos_media_pipeline *ep, bool on)
                if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV)
                        goto error;
        }
+
        return 0;
 error:
+       fimc_pipeline_s_power(p, !on);
        for (; i >= 0; i--) {
                unsigned int idx = seq[on][i];
                v4l2_subdev_call(p->subdevs[idx], video, s_stream, !on);
@@ -332,13 +389,19 @@ static int fimc_md_parse_port_node(struct fimc_md *fmd,
        struct fimc_source_info *pd = &fmd->sensor[index].pdata;
        struct device_node *rem, *ep, *np;
        struct v4l2_of_endpoint endpoint;
+       int ret;
 
        /* Assume here a port node can have only one endpoint node. */
        ep = of_get_next_child(port, NULL);
        if (!ep)
                return 0;
 
-       v4l2_of_parse_endpoint(ep, &endpoint);
+       ret = v4l2_of_parse_endpoint(ep, &endpoint);
+       if (ret) {
+               of_node_put(ep);
+               return ret;
+       }
+
        if (WARN_ON(endpoint.base.port == 0) || index >= FIMC_MAX_SENSORS)
                return -EINVAL;
 
@@ -429,8 +492,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
                        continue;
 
                ret = fimc_md_parse_port_node(fmd, port, index);
-               if (ret < 0)
+               if (ret < 0) {
+                       of_node_put(node);
                        goto rpm_put;
+               }
                index++;
        }
 
@@ -441,8 +506,10 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
 
        for_each_child_of_node(ports, node) {
                ret = fimc_md_parse_port_node(fmd, node, index);
-               if (ret < 0)
+               if (ret < 0) {
+                       of_node_put(node);
                        break;
+               }
                index++;
        }
 rpm_put:
@@ -650,8 +717,10 @@ static int fimc_md_register_platform_entities(struct fimc_md *fmd,
                        ret = fimc_md_register_platform_entity(fmd, pdev,
                                                        plat_entity);
                put_device(&pdev->dev);
-               if (ret < 0)
+               if (ret < 0) {
+                       of_node_put(node);
                        break;
+               }
        }
 
        return ret;
index ac5e50e595be83cd7ac973a6eab1d5866fca1d1b..7f067c3c06df2a172dfb8ee4a75fc0ac65ebf448 100644 (file)
@@ -736,6 +736,7 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
 {
        struct device_node *node = pdev->dev.of_node;
        struct v4l2_of_endpoint endpoint;
+       int ret;
 
        if (of_property_read_u32(node, "clock-frequency",
                                 &state->clk_frequency))
@@ -751,7 +752,9 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
                return -EINVAL;
        }
        /* Get port node and validate MIPI-CSI channel id. */
-       v4l2_of_parse_endpoint(node, &endpoint);
+       ret = v4l2_of_parse_endpoint(node, &endpoint);
+       if (ret)
+               goto err;
 
        state->index = endpoint.base.port - FIMC_INPUT_MIPI_CSI2_0;
        if (state->index >= CSIS_MAX_ENTITIES)
@@ -764,9 +767,10 @@ static int s5pcsis_parse_dt(struct platform_device *pdev,
                                        "samsung,csis-wclk");
 
        state->num_lanes = endpoint.bus.mipi_csi2.num_data_lanes;
-       of_node_put(node);
 
-       return 0;
+err:
+       of_node_put(node);
+       return ret;
 }
 
 static int s5pcsis_pm_resume(struct device *dev, bool runtime);
@@ -838,7 +842,7 @@ static int s5pcsis_probe(struct platform_device *pdev)
                ret = clk_set_rate(state->clock[CSIS_CLK_MUX],
                                   state->clk_frequency);
        else
-               dev_WARN(dev, "No clock frequency specified!\n");
+               dev_WARN(dev, true, "No clock frequency specified!\n");
        if (ret < 0)
                goto e_clkput;
 
index 0bcfa553c1aa254b63789980d50a1dc5a0149d5f..f9e5245f26acca69a0113b05d950d2611bb6a3d0 100644 (file)
@@ -449,7 +449,7 @@ void omap3isp_configure_bridge(struct isp_device *isp,
        case CCDC_INPUT_PARALLEL:
                ispctrl_val |= ISPCTRL_PAR_SER_CLK_SEL_PARALLEL;
                ispctrl_val |= parcfg->clk_pol << ISPCTRL_PAR_CLK_POL_SHIFT;
-               shift += parcfg->data_lane_shift * 2;
+               shift += parcfg->data_lane_shift;
                break;
 
        case CCDC_INPUT_CSI2A:
@@ -2235,8 +2235,11 @@ static int isp_of_parse_node(struct device *dev, struct device_node *node,
        struct isp_bus_cfg *buscfg = &isd->bus;
        struct v4l2_of_endpoint vep;
        unsigned int i;
+       int ret;
 
-       v4l2_of_parse_endpoint(node, &vep);
+       ret = v4l2_of_parse_endpoint(node, &vep);
+       if (ret)
+               return ret;
 
        dev_dbg(dev, "parsing endpoint %s, interface %u\n", node->full_name,
                vep.base.port);
@@ -2528,12 +2531,13 @@ static int isp_probe(struct platform_device *pdev)
        }
 
        /* Interrupt */
-       isp->irq_num = platform_get_irq(pdev, 0);
-       if (isp->irq_num <= 0) {
+       ret = platform_get_irq(pdev, 0);
+       if (ret <= 0) {
                dev_err(isp->dev, "No IRQ resource\n");
                ret = -ENODEV;
                goto error_iommu;
        }
+       isp->irq_num = ret;
 
        if (devm_request_irq(isp->dev, isp->irq_num, isp_isr, IRQF_SHARED,
                             "OMAP3 ISP", isp)) {
@@ -2599,6 +2603,7 @@ static const struct of_device_id omap3isp_of_table[] = {
        { .compatible = "ti,omap3-isp" },
        { },
 };
+MODULE_DEVICE_TABLE(of, omap3isp_of_table);
 
 static struct platform_driver omap3isp_driver = {
        .probe = isp_probe,
index bb3974c98e37ebdfc1306534fd9d0555b67273de..882310eb45ccfa51e309769058a21433aa485655 100644 (file)
@@ -2421,7 +2421,7 @@ static int ccdc_link_validate(struct v4l2_subdev *sd,
                        &((struct isp_bus_cfg *)
                          media_entity_to_v4l2_subdev(link->source->entity)
                          ->host_priv)->bus.parallel;
-               parallel_shift = parcfg->data_lane_shift * 2;
+               parallel_shift = parcfg->data_lane_shift;
        } else {
                parallel_shift = 0;
        }
index 84a96670e2e71483442a584d2bcd124f4f0c6da4..ac30a0f837801ae57d3227429e4492a23306a0a9 100644 (file)
@@ -1480,13 +1480,6 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
        struct isp_buffer *buffer;
        int restart = 0;
 
-       if (prev->input == PREVIEW_INPUT_MEMORY) {
-               buffer = omap3isp_video_buffer_next(&prev->video_in);
-               if (buffer != NULL)
-                       preview_set_inaddr(prev, buffer->dma);
-               pipe->state |= ISP_PIPELINE_IDLE_INPUT;
-       }
-
        if (prev->output & PREVIEW_OUTPUT_MEMORY) {
                buffer = omap3isp_video_buffer_next(&prev->video_out);
                if (buffer != NULL) {
@@ -1496,6 +1489,13 @@ static void preview_isr_buffer(struct isp_prev_device *prev)
                pipe->state |= ISP_PIPELINE_IDLE_OUTPUT;
        }
 
+       if (prev->input == PREVIEW_INPUT_MEMORY) {
+               buffer = omap3isp_video_buffer_next(&prev->video_in);
+               if (buffer != NULL)
+                       preview_set_inaddr(prev, buffer->dma);
+               pipe->state |= ISP_PIPELINE_IDLE_INPUT;
+       }
+
        switch (prev->state) {
        case ISP_PIPELINE_STREAM_SINGLESHOT:
                if (isp_pipeline_ready(pipe))
index 994dfc0813f6b7556fb43b05b8536b29f4c6f8b1..2aff755ff77c37741d540181ce25e32dc8bf0ba1 100644 (file)
@@ -434,10 +434,68 @@ static void isp_video_buffer_queue(struct vb2_buffer *buf)
        }
 }
 
+/*
+ * omap3isp_video_return_buffers - Return all queued buffers to videobuf2
+ * @video: ISP video object
+ * @state: new state for the returned buffers
+ *
+ * Return all buffers queued on the video node to videobuf2 in the given state.
+ * The buffer state should be VB2_BUF_STATE_QUEUED if called due to an error
+ * when starting the stream, or VB2_BUF_STATE_ERROR otherwise.
+ *
+ * The function must be called with the video irqlock held.
+ */
+static void omap3isp_video_return_buffers(struct isp_video *video,
+                                         enum vb2_buffer_state state)
+{
+       while (!list_empty(&video->dmaqueue)) {
+               struct isp_buffer *buf;
+
+               buf = list_first_entry(&video->dmaqueue,
+                                      struct isp_buffer, irqlist);
+               list_del(&buf->irqlist);
+               vb2_buffer_done(&buf->vb.vb2_buf, state);
+       }
+}
+
+static int isp_video_start_streaming(struct vb2_queue *queue,
+                                    unsigned int count)
+{
+       struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
+       struct isp_video *video = vfh->video;
+       struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
+       unsigned long flags;
+       int ret;
+
+       /* In sensor-to-memory mode, the stream can be started synchronously
+        * to the stream on command. In memory-to-memory mode, it will be
+        * started when buffers are queued on both the input and output.
+        */
+       if (pipe->input)
+               return 0;
+
+       ret = omap3isp_pipeline_set_stream(pipe,
+                                          ISP_PIPELINE_STREAM_CONTINUOUS);
+       if (ret < 0) {
+               spin_lock_irqsave(&video->irqlock, flags);
+               omap3isp_video_return_buffers(video, VB2_BUF_STATE_QUEUED);
+               spin_unlock_irqrestore(&video->irqlock, flags);
+               return ret;
+       }
+
+       spin_lock_irqsave(&video->irqlock, flags);
+       if (list_empty(&video->dmaqueue))
+               video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
+       spin_unlock_irqrestore(&video->irqlock, flags);
+
+       return 0;
+}
+
 static const struct vb2_ops isp_video_queue_ops = {
        .queue_setup = isp_video_queue_setup,
        .buf_prepare = isp_video_buffer_prepare,
        .buf_queue = isp_video_buffer_queue,
+       .start_streaming = isp_video_start_streaming,
 };
 
 /*
@@ -459,7 +517,7 @@ static const struct vb2_ops isp_video_queue_ops = {
 struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
 {
        struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
-       enum isp_pipeline_state state;
+       enum vb2_buffer_state vb_state;
        struct isp_buffer *buf;
        unsigned long flags;
 
@@ -495,17 +553,19 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
 
        /* Report pipeline errors to userspace on the capture device side. */
        if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
-               state = VB2_BUF_STATE_ERROR;
+               vb_state = VB2_BUF_STATE_ERROR;
                pipe->error = false;
        } else {
-               state = VB2_BUF_STATE_DONE;
+               vb_state = VB2_BUF_STATE_DONE;
        }
 
-       vb2_buffer_done(&buf->vb.vb2_buf, state);
+       vb2_buffer_done(&buf->vb.vb2_buf, vb_state);
 
        spin_lock_irqsave(&video->irqlock, flags);
 
        if (list_empty(&video->dmaqueue)) {
+               enum isp_pipeline_state state;
+
                spin_unlock_irqrestore(&video->irqlock, flags);
 
                if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -541,26 +601,16 @@ struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
  * omap3isp_video_cancel_stream - Cancel stream on a video node
  * @video: ISP video object
  *
- * Cancelling a stream mark all buffers on the video node as erroneous and makes
- * sure no new buffer can be queued.
+ * Cancelling a stream returns all buffers queued on the video node to videobuf2
+ * in the erroneous state and makes sure no new buffer can be queued.
  */
 void omap3isp_video_cancel_stream(struct isp_video *video)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&video->irqlock, flags);
-
-       while (!list_empty(&video->dmaqueue)) {
-               struct isp_buffer *buf;
-
-               buf = list_first_entry(&video->dmaqueue,
-                                      struct isp_buffer, irqlist);
-               list_del(&buf->irqlist);
-               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
-       }
-
+       omap3isp_video_return_buffers(video, VB2_BUF_STATE_ERROR);
        video->error = true;
-
        spin_unlock_irqrestore(&video->irqlock, flags);
 }
 
@@ -1087,29 +1137,10 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
        if (ret < 0)
                goto err_check_format;
 
-       /* In sensor-to-memory mode, the stream can be started synchronously
-        * to the stream on command. In memory-to-memory mode, it will be
-        * started when buffers are queued on both the input and output.
-        */
-       if (pipe->input == NULL) {
-               ret = omap3isp_pipeline_set_stream(pipe,
-                                             ISP_PIPELINE_STREAM_CONTINUOUS);
-               if (ret < 0)
-                       goto err_set_stream;
-               spin_lock_irqsave(&video->irqlock, flags);
-               if (list_empty(&video->dmaqueue))
-                       video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
-               spin_unlock_irqrestore(&video->irqlock, flags);
-       }
-
        mutex_unlock(&video->stream_lock);
 
        return 0;
 
-err_set_stream:
-       mutex_lock(&video->queue_lock);
-       vb2_streamoff(&vfh->queue, type);
-       mutex_unlock(&video->queue_lock);
 err_check_format:
        media_entity_pipeline_stop(&video->video.entity);
 err_pipeline_start:
index 190e259a6a2df87553dd2e7c3b655b0f7ebca08e..443e8f7673e2fe02164ba907ca5744a12bf44785 100644 (file)
@@ -33,9 +33,9 @@ enum isp_interface_type {
  * struct isp_parallel_cfg - Parallel interface configuration
  * @data_lane_shift: Data lane shifter
  *             0 - CAMEXT[13:0] -> CAM[13:0]
- *             1 - CAMEXT[13:2] -> CAM[11:0]
- *             2 - CAMEXT[13:4] -> CAM[9:0]
- *             3 - CAMEXT[13:6] -> CAM[7:0]
+ *             2 - CAMEXT[13:2] -> CAM[11:0]
+ *             4 - CAMEXT[13:4] -> CAM[9:0]
+ *             6 - CAMEXT[13:6] -> CAM[7:0]
  * @clk_pol: Pixel clock polarity
  *             0 - Sample on rising edge, 1 - Sample on falling edge
  * @hs_pol: Horizontal synchronization polarity
@@ -48,7 +48,7 @@ enum isp_interface_type {
  *             0 - Normal, 1 - One's complement
  */
 struct isp_parallel_cfg {
-       unsigned int data_lane_shift:2;
+       unsigned int data_lane_shift:3;
        unsigned int clk_pol:1;
        unsigned int hs_pol:1;
        unsigned int vs_pol:1;
index 485f5259acb082d4e71484c12ad730c0142dc77c..552789a69c8645bd94af18f71cee62192fa3d80c 100644 (file)
@@ -1613,6 +1613,7 @@ static const struct of_device_id jpu_dt_ids[] = {
        { .compatible = "renesas,jpu-r8a7791" }, /* M2-W */
        { .compatible = "renesas,jpu-r8a7792" }, /* V2H */
        { .compatible = "renesas,jpu-r8a7793" }, /* M2-N */
+       { .compatible = "renesas,rcar-gen2-jpu" },
        { },
 };
 MODULE_DEVICE_TABLE(of, jpu_dt_ids);
index c398b285180cda757ec291bb6cc0c004c3c27de4..1af779ee3c747f31aff5d79d7d3db66d65a41b03 100644 (file)
@@ -795,7 +795,7 @@ static int isi_camera_get_formats(struct soc_camera_device *icd,
                        xlate->host_fmt = &isi_camera_formats[i];
                        xlate->code     = code.code;
                        dev_dbg(icd->parent, "Providing format %s using code %d\n",
-                               isi_camera_formats[0].name, code.code);
+                               xlate->host_fmt->name, xlate->code);
                }
                break;
        default:
index b7fd695b9ed5187d2f8d2c356269ebe6fc1d26d4..dc75a80794fb92658f0ab375d924eeeed74b3467 100644 (file)
 #define RCAR_VIN_BT656                 (1 << 3)
 
 enum chip_id {
+       RCAR_GEN3,
        RCAR_GEN2,
        RCAR_H1,
        RCAR_M1,
@@ -1818,6 +1819,7 @@ static struct soc_camera_host_ops rcar_vin_host_ops = {
 
 #ifdef CONFIG_OF
 static const struct of_device_id rcar_vin_of_table[] = {
+       { .compatible = "renesas,vin-r8a7795", .data = (void *)RCAR_GEN3 },
        { .compatible = "renesas,vin-r8a7794", .data = (void *)RCAR_GEN2 },
        { .compatible = "renesas,vin-r8a7793", .data = (void *)RCAR_GEN2 },
        { .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 },
index 90c87f2b4ec075bbe96ee15201494f3dcc757753..b9f369c0fb9473190bfc7d4f41c63b503a1b1785 100644 (file)
@@ -213,8 +213,7 @@ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq,
                        unsigned int *count, unsigned int *num_planes,
                        unsigned int sizes[], void *alloc_ctxs[])
 {
-       struct soc_camera_device *icd = container_of(vq,
-                       struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
 
@@ -361,8 +360,7 @@ static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb)
 static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb)
 {
        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-       struct soc_camera_device *icd = container_of(vb->vb2_queue,
-                       struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
        struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
@@ -413,8 +411,7 @@ error:
 static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
 {
        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-       struct soc_camera_device *icd = container_of(vb->vb2_queue,
-                       struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
@@ -444,8 +441,7 @@ static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb)
 static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
 {
        struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
-       struct soc_camera_device *icd = container_of(vb->vb2_queue,
-                       struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
 
@@ -460,7 +456,7 @@ static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb)
 
 static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q)
 {
-       struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq);
+       struct soc_camera_device *icd = soc_camera_from_vb2q(q);
        struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
        struct sh_mobile_ceu_dev *pcdev = ici->priv;
        struct list_head *buf_head, *tmp;
index cc84c6d6a701ce249722010ed514c1e4a4630def..46c7186f78679a5ca09a01d8709cbb77cf1332bc 100644 (file)
@@ -1493,6 +1493,8 @@ static void soc_camera_async_unbind(struct v4l2_async_notifier *notifier,
                                        struct soc_camera_async_client, notifier);
        struct soc_camera_device *icd = platform_get_drvdata(sasc->pdev);
 
+       icd->control = NULL;
+
        if (icd->clk) {
                v4l2_clk_unregister(icd->clk);
                icd->clk = NULL;
index 69d7fe4471c2ce887c57f15551df6fbd1f385b63..2c0015b1264d3254451a7c56f9c9b1a69b760ad9 100644 (file)
@@ -118,7 +118,7 @@ int c8sectpfe_frontend_attach(struct dvb_frontend **fe,
                struct channel_info *tsin, int chan_num)
 {
        struct tda18212_config *tda18212;
-       struct stv6110x_devctl *fe2;
+       const struct stv6110x_devctl *fe2;
        struct i2c_client *client;
        struct i2c_board_info tda18212_info = {
                .type = "tda18212",
index be680f839e77c9f3d7ab0e7366762093a35e88fe..e236059a60ad3ccf9b16c1861b5297abd549c266 100644 (file)
@@ -3,3 +3,7 @@ obj-$(CONFIG_VIDEO_TI_VPE) += ti-vpe.o
 ti-vpe-y := vpe.o sc.o csc.o vpdma.o
 
 ccflags-$(CONFIG_VIDEO_TI_VPE_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VIDEO_TI_CAL) += ti-cal.o
+
+ti-cal-y := cal.o
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
new file mode 100644 (file)
index 0000000..69ec79b
--- /dev/null
@@ -0,0 +1,1970 @@
+/*
+ * TI CAL camera interface driver
+ *
+ * Copyright (c) 2015 Texas Instruments Inc.
+ * Benoit Parrot, <bparrot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+
+#include <media/v4l2-of.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-common.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include "cal_regs.h"
+
+#define CAL_MODULE_NAME "cal"
+
+#define MAX_WIDTH 1920
+#define MAX_HEIGHT 1200
+
+#define CAL_VERSION "0.1.0"
+
+MODULE_DESCRIPTION("TI CAL driver");
+MODULE_AUTHOR("Benoit Parrot, <bparrot@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(CAL_VERSION);
+
+static unsigned video_nr = -1;
+module_param(video_nr, uint, 0644);
+MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect");
+
+static unsigned debug;
+module_param(debug, uint, 0644);
+MODULE_PARM_DESC(debug, "activates debug info");
+
+/* timeperframe: min/max and default */
+static const struct v4l2_fract
+       tpf_default = {.numerator = 1001,       .denominator = 30000};
+
+#define cal_dbg(level, caldev, fmt, arg...)    \
+               v4l2_dbg(level, debug, &caldev->v4l2_dev, fmt, ##arg)
+#define cal_info(caldev, fmt, arg...)  \
+               v4l2_info(&caldev->v4l2_dev, fmt, ##arg)
+#define cal_err(caldev, fmt, arg...)   \
+               v4l2_err(&caldev->v4l2_dev, fmt, ##arg)
+
+#define ctx_dbg(level, ctx, fmt, arg...)       \
+               v4l2_dbg(level, debug, &ctx->v4l2_dev, fmt, ##arg)
+#define ctx_info(ctx, fmt, arg...)     \
+               v4l2_info(&ctx->v4l2_dev, fmt, ##arg)
+#define ctx_err(ctx, fmt, arg...)      \
+               v4l2_err(&ctx->v4l2_dev, fmt, ##arg)
+
+#define CAL_NUM_INPUT 1
+#define CAL_NUM_CONTEXT 2
+
+#define bytes_per_line(pixel, bpp) (ALIGN(pixel * bpp, 16))
+
+#define reg_read(dev, offset) ioread32(dev->base + offset)
+#define reg_write(dev, offset, val) iowrite32(val, dev->base + offset)
+
+#define reg_read_field(dev, offset, mask) get_field(reg_read(dev, offset), \
+                                                   mask)
+#define reg_write_field(dev, offset, field, mask) { \
+       u32 val = reg_read(dev, offset); \
+       set_field(&val, field, mask); \
+       reg_write(dev, offset, val); }
+
+/* ------------------------------------------------------------------
+ *     Basic structures
+ * ------------------------------------------------------------------
+ */
+
+struct cal_fmt {
+       u32     fourcc;
+       u32     code;
+       u8      depth;
+};
+
+static struct cal_fmt cal_formats[] = {
+       {
+               .fourcc         = V4L2_PIX_FMT_YUYV,
+               .code           = MEDIA_BUS_FMT_YUYV8_2X8,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_UYVY,
+               .code           = MEDIA_BUS_FMT_UYVY8_2X8,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_YVYU,
+               .code           = MEDIA_BUS_FMT_YVYU8_2X8,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_VYUY,
+               .code           = MEDIA_BUS_FMT_VYUY8_2X8,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
+               .code           = MEDIA_BUS_FMT_RGB565_2X8_LE,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
+               .code           = MEDIA_BUS_FMT_RGB565_2X8_BE,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB555, /* gggbbbbb arrrrrgg */
+               .code           = MEDIA_BUS_FMT_RGB555_2X8_PADHI_LE,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB555X, /* arrrrrgg gggbbbbb */
+               .code           = MEDIA_BUS_FMT_RGB555_2X8_PADHI_BE,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB24, /* rgb */
+               .code           = MEDIA_BUS_FMT_RGB888_2X12_LE,
+               .depth          = 24,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_BGR24, /* bgr */
+               .code           = MEDIA_BUS_FMT_RGB888_2X12_BE,
+               .depth          = 24,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_RGB32, /* argb */
+               .code           = MEDIA_BUS_FMT_ARGB8888_1X32,
+               .depth          = 32,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SBGGR8,
+               .code           = MEDIA_BUS_FMT_SBGGR8_1X8,
+               .depth          = 8,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGBRG8,
+               .code           = MEDIA_BUS_FMT_SGBRG8_1X8,
+               .depth          = 8,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGRBG8,
+               .code           = MEDIA_BUS_FMT_SGRBG8_1X8,
+               .depth          = 8,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SRGGB8,
+               .code           = MEDIA_BUS_FMT_SRGGB8_1X8,
+               .depth          = 8,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SBGGR10,
+               .code           = MEDIA_BUS_FMT_SBGGR10_1X10,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGBRG10,
+               .code           = MEDIA_BUS_FMT_SGBRG10_1X10,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGRBG10,
+               .code           = MEDIA_BUS_FMT_SGRBG10_1X10,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SRGGB10,
+               .code           = MEDIA_BUS_FMT_SRGGB10_1X10,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SBGGR12,
+               .code           = MEDIA_BUS_FMT_SBGGR12_1X12,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGBRG12,
+               .code           = MEDIA_BUS_FMT_SGBRG12_1X12,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SGRBG12,
+               .code           = MEDIA_BUS_FMT_SGRBG12_1X12,
+               .depth          = 16,
+       }, {
+               .fourcc         = V4L2_PIX_FMT_SRGGB12,
+               .code           = MEDIA_BUS_FMT_SRGGB12_1X12,
+               .depth          = 16,
+       },
+};
+
+/*  Print Four-character-code (FOURCC) */
+static char *fourcc_to_str(u32 fmt)
+{
+       static char code[5];
+
+       code[0] = (unsigned char)(fmt & 0xff);
+       code[1] = (unsigned char)((fmt >> 8) & 0xff);
+       code[2] = (unsigned char)((fmt >> 16) & 0xff);
+       code[3] = (unsigned char)((fmt >> 24) & 0xff);
+       code[4] = '\0';
+
+       return code;
+}
+
+/* buffer for one video frame */
+struct cal_buffer {
+       /* common v4l buffer stuff -- must be first */
+       struct vb2_v4l2_buffer  vb;
+       struct list_head        list;
+       const struct cal_fmt    *fmt;
+};
+
+struct cal_dmaqueue {
+       struct list_head        active;
+
+       /* Counters to control fps rate */
+       int                     frame;
+       int                     ini_jiffies;
+};
+
+struct cm_data {
+       void __iomem            *base;
+       struct resource         *res;
+
+       unsigned int            camerrx_control;
+
+       struct platform_device *pdev;
+};
+
+struct cc_data {
+       void __iomem            *base;
+       struct resource         *res;
+
+       struct platform_device *pdev;
+};
+
+/*
+ * there is one cal_dev structure in the driver, it is shared by
+ * all instances.
+ */
+struct cal_dev {
+       int                     irq;
+       void __iomem            *base;
+       struct resource         *res;
+       struct platform_device  *pdev;
+       struct v4l2_device      v4l2_dev;
+
+       /* Control Module handle */
+       struct cm_data          *cm;
+       /* Camera Core Module handle */
+       struct cc_data          *cc[CAL_NUM_CSI2_PORTS];
+
+       struct cal_ctx          *ctx[CAL_NUM_CONTEXT];
+};
+
+/*
+ * There is one cal_ctx structure for each camera core context.
+ */
+struct cal_ctx {
+       struct v4l2_device      v4l2_dev;
+       struct v4l2_ctrl_handler ctrl_handler;
+       struct video_device     vdev;
+       struct v4l2_async_notifier notifier;
+       struct v4l2_subdev      *sensor;
+       struct v4l2_of_endpoint endpoint;
+
+       struct v4l2_async_subdev asd;
+       struct v4l2_async_subdev *asd_list[1];
+
+       struct v4l2_fh          fh;
+       struct cal_dev          *dev;
+       struct cc_data          *cc;
+
+       /* v4l2_ioctl mutex */
+       struct mutex            mutex;
+       /* v4l2 buffers lock */
+       spinlock_t              slock;
+
+       /* Several counters */
+       unsigned long           jiffies;
+
+       struct vb2_alloc_ctx    *alloc_ctx;
+       struct cal_dmaqueue     vidq;
+
+       /* Input Number */
+       int                     input;
+
+       /* video capture */
+       const struct cal_fmt    *fmt;
+       /* Used to store current pixel format */
+       struct v4l2_format              v_fmt;
+       /* Used to store current mbus frame format */
+       struct v4l2_mbus_framefmt       m_fmt;
+
+       /* Current subdev enumerated format */
+       struct cal_fmt          *active_fmt[ARRAY_SIZE(cal_formats)];
+       int                     num_active_fmt;
+
+       struct v4l2_fract       timeperframe;
+       unsigned int            sequence;
+       unsigned int            external_rate;
+       struct vb2_queue        vb_vidq;
+       unsigned int            seq_count;
+       unsigned int            csi2_port;
+       unsigned int            virtual_channel;
+
+       /* Pointer pointing to current v4l2_buffer */
+       struct cal_buffer       *cur_frm;
+       /* Pointer pointing to next v4l2_buffer */
+       struct cal_buffer       *next_frm;
+};
+
+static const struct cal_fmt *find_format_by_pix(struct cal_ctx *ctx,
+                                               u32 pixelformat)
+{
+       const struct cal_fmt *fmt;
+       unsigned int k;
+
+       for (k = 0; k < ctx->num_active_fmt; k++) {
+               fmt = ctx->active_fmt[k];
+               if (fmt->fourcc == pixelformat)
+                       return fmt;
+       }
+
+       return NULL;
+}
+
+static const struct cal_fmt *find_format_by_code(struct cal_ctx *ctx,
+                                                u32 code)
+{
+       const struct cal_fmt *fmt;
+       unsigned int k;
+
+       for (k = 0; k < ctx->num_active_fmt; k++) {
+               fmt = ctx->active_fmt[k];
+               if (fmt->code == code)
+                       return fmt;
+       }
+
+       return NULL;
+}
+
+static inline struct cal_ctx *notifier_to_ctx(struct v4l2_async_notifier *n)
+{
+       return container_of(n, struct cal_ctx, notifier);
+}
+
+static inline int get_field(u32 value, u32 mask)
+{
+       return (value & mask) >> __ffs(mask);
+}
+
+static inline void set_field(u32 *valp, u32 field, u32 mask)
+{
+       u32 val = *valp;
+
+       val &= ~mask;
+       val |= (field << __ffs(mask)) & mask;
+       *valp = val;
+}
+
+/*
+ * Control Module block access
+ */
+static struct cm_data *cm_create(struct cal_dev *dev)
+{
+       struct platform_device *pdev = dev->pdev;
+       struct cm_data *cm;
+
+       cm = devm_kzalloc(&pdev->dev, sizeof(*cm), GFP_KERNEL);
+       if (!cm)
+               return ERR_PTR(-ENOMEM);
+
+       cm->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                               "camerrx_control");
+       cm->base = devm_ioremap_resource(&pdev->dev, cm->res);
+       if (IS_ERR(cm->base)) {
+               cal_err(dev, "failed to ioremap\n");
+               return cm->base;
+       }
+
+       cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+               cm->res->name, &cm->res->start, &cm->res->end);
+
+       return cm;
+}
+
+static void camerarx_phy_enable(struct cal_ctx *ctx)
+{
+       u32 val;
+
+       if (!ctx->dev->cm->base) {
+               ctx_err(ctx, "cm not mapped\n");
+               return;
+       }
+
+       val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
+       if (ctx->csi2_port == 1) {
+               set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
+               set_field(&val, 0, CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK);
+               /* enable all lanes by default */
+               set_field(&val, 0xf, CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK);
+               set_field(&val, 1, CM_CAMERRX_CTRL_CSI0_MODE_MASK);
+       } else if (ctx->csi2_port == 2) {
+               set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
+               set_field(&val, 0, CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK);
+               /* enable all lanes by default */
+               set_field(&val, 0x3, CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK);
+               set_field(&val, 1, CM_CAMERRX_CTRL_CSI1_MODE_MASK);
+       }
+       reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+}
+
+static void camerarx_phy_disable(struct cal_ctx *ctx)
+{
+       u32 val;
+
+       if (!ctx->dev->cm->base) {
+               ctx_err(ctx, "cm not mapped\n");
+               return;
+       }
+
+       val = reg_read(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL);
+       if (ctx->csi2_port == 1)
+               set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK);
+       else if (ctx->csi2_port == 2)
+               set_field(&val, 0x0, CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK);
+       reg_write(ctx->dev->cm, CM_CTRL_CORE_CAMERRX_CONTROL, val);
+}
+
+/*
+ * Camera Instance access block
+ */
+static struct cc_data *cc_create(struct cal_dev *dev, unsigned int core)
+{
+       struct platform_device *pdev = dev->pdev;
+       struct cc_data *cc;
+
+       cc = devm_kzalloc(&pdev->dev, sizeof(*cc), GFP_KERNEL);
+       if (!cc)
+               return ERR_PTR(-ENOMEM);
+
+       cc->res = platform_get_resource_byname(pdev,
+                                              IORESOURCE_MEM,
+                                              (core == 0) ?
+                                               "cal_rx_core0" :
+                                               "cal_rx_core1");
+       cc->base = devm_ioremap_resource(&pdev->dev, cc->res);
+       if (IS_ERR(cc->base)) {
+               cal_err(dev, "failed to ioremap\n");
+               return cc->base;
+       }
+
+       cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+               cc->res->name, &cc->res->start, &cc->res->end);
+
+       return cc;
+}
+
+/*
+ * Get Revision and HW info
+ */
+static void cal_get_hwinfo(struct cal_dev *dev)
+{
+       u32 revision = 0;
+       u32 hwinfo = 0;
+
+       revision = reg_read(dev, CAL_HL_REVISION);
+       cal_dbg(3, dev, "CAL_HL_REVISION = 0x%08x (expecting 0x40000200)\n",
+               revision);
+
+       hwinfo = reg_read(dev, CAL_HL_HWINFO);
+       cal_dbg(3, dev, "CAL_HL_HWINFO = 0x%08x (expecting 0xA3C90469)\n",
+               hwinfo);
+}
+
+static inline int cal_runtime_get(struct cal_dev *dev)
+{
+       int r;
+
+       r = pm_runtime_get_sync(&dev->pdev->dev);
+
+       return r;
+}
+
+static inline void cal_runtime_put(struct cal_dev *dev)
+{
+       pm_runtime_put_sync(&dev->pdev->dev);
+}
+
+static void cal_quickdump_regs(struct cal_dev *dev)
+{
+       cal_info(dev, "CAL Registers @ 0x%pa:\n", &dev->res->start);
+       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+                      dev->base, resource_size(dev->res), false);
+
+       if (dev->ctx[0]) {
+               cal_info(dev, "CSI2 Core 0 Registers @ %pa:\n",
+                        &dev->ctx[0]->cc->res->start);
+               print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+                              dev->ctx[0]->cc->base,
+                              resource_size(dev->ctx[0]->cc->res),
+                              false);
+       }
+
+       if (dev->ctx[1]) {
+               cal_info(dev, "CSI2 Core 1 Registers @ %pa:\n",
+                        &dev->ctx[1]->cc->res->start);
+               print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+                              dev->ctx[1]->cc->base,
+                              resource_size(dev->ctx[1]->cc->res),
+                              false);
+       }
+
+       cal_info(dev, "CAMERRX_Control Registers @ %pa:\n",
+                &dev->cm->res->start);
+       print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 4,
+                      dev->cm->base,
+                      resource_size(dev->cm->res), false);
+}
+
+/*
+ * Enable the expected IRQ sources
+ */
+static void enable_irqs(struct cal_ctx *ctx)
+{
+       /* Enable IRQ_WDMA_END 0/1 */
+       reg_write_field(ctx->dev,
+                       CAL_HL_IRQENABLE_SET(2),
+                       CAL_HL_IRQ_ENABLE,
+                       CAL_HL_IRQ_MASK(ctx->csi2_port));
+       /* Enable IRQ_WDMA_START 0/1 */
+       reg_write_field(ctx->dev,
+                       CAL_HL_IRQENABLE_SET(3),
+                       CAL_HL_IRQ_ENABLE,
+                       CAL_HL_IRQ_MASK(ctx->csi2_port));
+       /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
+       reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0xFF000000);
+}
+
+static void disable_irqs(struct cal_ctx *ctx)
+{
+       /* Disable IRQ_WDMA_END 0/1 */
+       reg_write_field(ctx->dev,
+                       CAL_HL_IRQENABLE_CLR(2),
+                       CAL_HL_IRQ_CLEAR,
+                       CAL_HL_IRQ_MASK(ctx->csi2_port));
+       /* Disable IRQ_WDMA_START 0/1 */
+       reg_write_field(ctx->dev,
+                       CAL_HL_IRQENABLE_CLR(3),
+                       CAL_HL_IRQ_CLEAR,
+                       CAL_HL_IRQ_MASK(ctx->csi2_port));
+       /* Todo: Add VC_IRQ and CSI2_COMPLEXIO_IRQ handling */
+       reg_write(ctx->dev, CAL_CSI2_VC_IRQENABLE(1), 0);
+}
+
+static void csi2_init(struct cal_ctx *ctx)
+{
+       int i;
+       u32 val;
+
+       val = reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port));
+       set_field(&val, CAL_GEN_ENABLE,
+                 CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK);
+       set_field(&val, CAL_GEN_ENABLE,
+                 CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK);
+       set_field(&val, CAL_GEN_DISABLE,
+                 CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK);
+       set_field(&val, 407, CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK);
+       reg_write(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_CSI2_TIMING(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_CSI2_TIMING(ctx->csi2_port)));
+
+       val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+       set_field(&val, CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL,
+                 CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK);
+       set_field(&val, CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON,
+                 CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK);
+       reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+       for (i = 0; i < 10; i++) {
+               if (reg_read_field(ctx->dev,
+                                  CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port),
+                                  CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK) ==
+                   CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON)
+                       break;
+               usleep_range(1000, 1100);
+       }
+       ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port)));
+
+       val = reg_read(ctx->dev, CAL_CTRL);
+       set_field(&val, CAL_CTRL_BURSTSIZE_BURST128, CAL_CTRL_BURSTSIZE_MASK);
+       set_field(&val, 0xF, CAL_CTRL_TAGCNT_MASK);
+       set_field(&val, CAL_CTRL_POSTED_WRITES_NONPOSTED,
+                 CAL_CTRL_POSTED_WRITES_MASK);
+       set_field(&val, 0xFF, CAL_CTRL_MFLAGL_MASK);
+       set_field(&val, 0xFF, CAL_CTRL_MFLAGH_MASK);
+       reg_write(ctx->dev, CAL_CTRL, val);
+       ctx_dbg(3, ctx, "CAL_CTRL = 0x%08x\n", reg_read(ctx->dev, CAL_CTRL));
+}
+
+static void csi2_lane_config(struct cal_ctx *ctx)
+{
+       u32 val = reg_read(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port));
+       u32 lane_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK;
+       u32 polarity_mask = CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK;
+       struct v4l2_of_bus_mipi_csi2 *mipi_csi2 = &ctx->endpoint.bus.mipi_csi2;
+       int lane;
+
+       set_field(&val, mipi_csi2->clock_lane + 1, lane_mask);
+       set_field(&val, mipi_csi2->lane_polarities[0], polarity_mask);
+       for (lane = 0; lane < mipi_csi2->num_data_lanes; lane++) {
+               /*
+                * Every lane are one nibble apart starting with the
+                * clock followed by the data lanes so shift masks by 4.
+                */
+               lane_mask <<= 4;
+               polarity_mask <<= 4;
+               set_field(&val, mipi_csi2->data_lanes[lane] + 1, lane_mask);
+               set_field(&val, mipi_csi2->lane_polarities[lane + 1],
+                         polarity_mask);
+       }
+
+       reg_write(ctx->dev, CAL_CSI2_COMPLEXIO_CFG(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_CSI2_COMPLEXIO_CFG(%d) = 0x%08x\n",
+               ctx->csi2_port, val);
+}
+
+static void csi2_ppi_enable(struct cal_ctx *ctx)
+{
+       reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
+                       CAL_GEN_ENABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void csi2_ppi_disable(struct cal_ctx *ctx)
+{
+       reg_write_field(ctx->dev, CAL_CSI2_PPI_CTRL(ctx->csi2_port),
+                       CAL_GEN_DISABLE, CAL_CSI2_PPI_CTRL_IF_EN_MASK);
+}
+
+static void csi2_ctx_config(struct cal_ctx *ctx)
+{
+       u32 val;
+
+       val = reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port));
+       set_field(&val, ctx->csi2_port, CAL_CSI2_CTX_CPORT_MASK);
+       /*
+        * DT type: MIPI CSI-2 Specs
+        *   0x1: All - DT filter is disabled
+        *  0x24: RGB888 1 pixel  = 3 bytes
+        *  0x2B: RAW10  4 pixels = 5 bytes
+        *  0x2A: RAW8   1 pixel  = 1 byte
+        *  0x1E: YUV422 2 pixels = 4 bytes
+        */
+       set_field(&val, 0x1, CAL_CSI2_CTX_DT_MASK);
+       /* Virtual Channel from the CSI2 sensor usually 0! */
+       set_field(&val, ctx->virtual_channel, CAL_CSI2_CTX_VC_MASK);
+       /* NUM_LINES_PER_FRAME => 0 means auto detect */
+       set_field(&val, 0, CAL_CSI2_CTX_LINES_MASK);
+       set_field(&val, CAL_CSI2_CTX_ATT_PIX, CAL_CSI2_CTX_ATT_MASK);
+       set_field(&val, CAL_CSI2_CTX_PACK_MODE_LINE,
+                 CAL_CSI2_CTX_PACK_MODE_MASK);
+       reg_write(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_CSI2_CTX0(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_CSI2_CTX0(ctx->csi2_port)));
+}
+
+static void pix_proc_config(struct cal_ctx *ctx)
+{
+       u32 val;
+
+       val = reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port));
+       set_field(&val, CAL_PIX_PROC_EXTRACT_B8, CAL_PIX_PROC_EXTRACT_MASK);
+       set_field(&val, CAL_PIX_PROC_DPCMD_BYPASS, CAL_PIX_PROC_DPCMD_MASK);
+       set_field(&val, CAL_PIX_PROC_DPCME_BYPASS, CAL_PIX_PROC_DPCME_MASK);
+       set_field(&val, CAL_PIX_PROC_PACK_B8, CAL_PIX_PROC_PACK_MASK);
+       set_field(&val, ctx->csi2_port, CAL_PIX_PROC_CPORT_MASK);
+       set_field(&val, CAL_GEN_ENABLE, CAL_PIX_PROC_EN_MASK);
+       reg_write(ctx->dev, CAL_PIX_PROC(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_PIX_PROC(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_PIX_PROC(ctx->csi2_port)));
+}
+
+static void cal_wr_dma_config(struct cal_ctx *ctx,
+                             unsigned int width)
+{
+       u32 val;
+
+       val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
+       set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
+       set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
+                 CAL_WR_DMA_CTRL_DTAG_MASK);
+       set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
+                 CAL_WR_DMA_CTRL_MODE_MASK);
+       set_field(&val, CAL_WR_DMA_CTRL_PATTERN_LINEAR,
+                 CAL_WR_DMA_CTRL_PATTERN_MASK);
+       set_field(&val, CAL_GEN_ENABLE, CAL_WR_DMA_CTRL_STALL_RD_MASK);
+       reg_write(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_WR_DMA_CTRL(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port)));
+
+       /*
+        * width/16 not sure but giving it a whirl.
+        * zero does not work right
+        */
+       reg_write_field(ctx->dev,
+                       CAL_WR_DMA_OFST(ctx->csi2_port),
+                       (width / 16),
+                       CAL_WR_DMA_OFST_MASK);
+       ctx_dbg(3, ctx, "CAL_WR_DMA_OFST(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_WR_DMA_OFST(ctx->csi2_port)));
+
+       val = reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port));
+       /* 64 bit word means no skipping */
+       set_field(&val, 0, CAL_WR_DMA_XSIZE_XSKIP_MASK);
+       /*
+        * (width*8)/64 this should be size of an entire line
+        * in 64bit word but 0 means all data until the end
+        * is detected automagically
+        */
+       set_field(&val, (width / 8), CAL_WR_DMA_XSIZE_MASK);
+       reg_write(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port), val);
+       ctx_dbg(3, ctx, "CAL_WR_DMA_XSIZE(%d) = 0x%08x\n", ctx->csi2_port,
+               reg_read(ctx->dev, CAL_WR_DMA_XSIZE(ctx->csi2_port)));
+}
+
+static void cal_wr_dma_addr(struct cal_ctx *ctx, unsigned int dmaaddr)
+{
+       reg_write(ctx->dev, CAL_WR_DMA_ADDR(ctx->csi2_port), dmaaddr);
+}
+
+/*
+ * TCLK values are OK at their reset values
+ */
+#define TCLK_TERM      0
+#define TCLK_MISS      1
+#define TCLK_SETTLE    14
+#define THS_SETTLE     15
+
+static void csi2_phy_config(struct cal_ctx *ctx)
+{
+       unsigned int reg0, reg1;
+       unsigned int ths_term, ths_settle;
+       unsigned int ddrclkperiod_us;
+
+       /*
+        * THS_TERM: Programmed value = floor(20 ns/DDRClk period) - 2.
+        */
+       ddrclkperiod_us = ctx->external_rate / 2000000;
+       ddrclkperiod_us = 1000000 / ddrclkperiod_us;
+       ctx_dbg(1, ctx, "ddrclkperiod_us: %d\n", ddrclkperiod_us);
+
+       ths_term = 20000 / ddrclkperiod_us;
+       ths_term = (ths_term >= 2) ? ths_term - 2 : ths_term;
+       ctx_dbg(1, ctx, "ths_term: %d (0x%02x)\n", ths_term, ths_term);
+
+       /*
+        * THS_SETTLE: Programmed value = floor(176.3 ns/CtrlClk period) - 1.
+        *      Since CtrlClk is fixed at 96Mhz then we get
+        *      ths_settle = floor(176.3 / 10.416) - 1 = 15
+        * If we ever switch to a dynamic clock then this code might be useful
+        *
+        * unsigned int ctrlclkperiod_us;
+        * ctrlclkperiod_us = 96000000 / 1000000;
+        * ctrlclkperiod_us = 1000000 / ctrlclkperiod_us;
+        * ctx_dbg(1, ctx, "ctrlclkperiod_us: %d\n", ctrlclkperiod_us);
+
+        * ths_settle = 176300  / ctrlclkperiod_us;
+        * ths_settle = (ths_settle > 1) ? ths_settle - 1 : ths_settle;
+        */
+
+       ths_settle = THS_SETTLE;
+       ctx_dbg(1, ctx, "ths_settle: %d (0x%02x)\n", ths_settle, ths_settle);
+
+       reg0 = reg_read(ctx->cc, CAL_CSI2_PHY_REG0);
+       set_field(&reg0, CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE,
+                 CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK);
+       set_field(&reg0, ths_term, CAL_CSI2_PHY_REG0_THS_TERM_MASK);
+       set_field(&reg0, ths_settle, CAL_CSI2_PHY_REG0_THS_SETTLE_MASK);
+
+       ctx_dbg(1, ctx, "CSI2_%d_REG0 = 0x%08x\n", (ctx->csi2_port - 1), reg0);
+       reg_write(ctx->cc, CAL_CSI2_PHY_REG0, reg0);
+
+       reg1 = reg_read(ctx->cc, CAL_CSI2_PHY_REG1);
+       set_field(&reg1, TCLK_TERM, CAL_CSI2_PHY_REG1_TCLK_TERM_MASK);
+       set_field(&reg1, 0xb8, CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK);
+       set_field(&reg1, TCLK_MISS, CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK);
+       set_field(&reg1, TCLK_SETTLE, CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK);
+
+       ctx_dbg(1, ctx, "CSI2_%d_REG1 = 0x%08x\n", (ctx->csi2_port - 1), reg1);
+       reg_write(ctx->cc, CAL_CSI2_PHY_REG1, reg1);
+}
+
+static int cal_get_external_info(struct cal_ctx *ctx)
+{
+       struct v4l2_ctrl *ctrl;
+
+       ctrl = v4l2_ctrl_find(ctx->sensor->ctrl_handler, V4L2_CID_PIXEL_RATE);
+       if (!ctrl) {
+               ctx_err(ctx, "no pixel rate control in subdev: %s\n",
+                       ctx->sensor->name);
+               return -EPIPE;
+       }
+
+       ctx->external_rate = v4l2_ctrl_g_ctrl_int64(ctrl);
+       ctx_dbg(3, ctx, "sensor Pixel Rate: %d\n", ctx->external_rate);
+
+       return 0;
+}
+
+static inline void cal_schedule_next_buffer(struct cal_ctx *ctx)
+{
+       struct cal_dmaqueue *dma_q = &ctx->vidq;
+       struct cal_buffer *buf;
+       unsigned long addr;
+
+       buf = list_entry(dma_q->active.next, struct cal_buffer, list);
+       ctx->next_frm = buf;
+       list_del(&buf->list);
+
+       addr = vb2_dma_contig_plane_dma_addr(&buf->vb.vb2_buf, 0);
+       cal_wr_dma_addr(ctx, addr);
+}
+
+static inline void cal_process_buffer_complete(struct cal_ctx *ctx)
+{
+       ctx->cur_frm->vb.vb2_buf.timestamp = ktime_get_ns();
+       ctx->cur_frm->vb.field = ctx->m_fmt.field;
+       ctx->cur_frm->vb.sequence = ctx->sequence++;
+
+       vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_DONE);
+       ctx->cur_frm = ctx->next_frm;
+}
+
+#define isvcirqset(irq, vc, ff) (irq & \
+       (CAL_CSI2_VC_IRQENABLE_ ##ff ##_IRQ_##vc ##_MASK))
+
+#define isportirqset(irq, port) (irq & CAL_HL_IRQ_MASK(port))
+
+static irqreturn_t cal_irq(int irq_cal, void *data)
+{
+       struct cal_dev *dev = (struct cal_dev *)data;
+       struct cal_ctx *ctx;
+       struct cal_dmaqueue *dma_q;
+       u32 irqst2, irqst3;
+
+       /* Check which DMA just finished */
+       irqst2 = reg_read(dev, CAL_HL_IRQSTATUS(2));
+       if (irqst2) {
+               /* Clear Interrupt status */
+               reg_write(dev, CAL_HL_IRQSTATUS(2), irqst2);
+
+               /* Need to check both port */
+               if (isportirqset(irqst2, 1)) {
+                       ctx = dev->ctx[0];
+
+                       if (ctx->cur_frm != ctx->next_frm)
+                               cal_process_buffer_complete(ctx);
+               }
+
+               if (isportirqset(irqst2, 2)) {
+                       ctx = dev->ctx[1];
+
+                       if (ctx->cur_frm != ctx->next_frm)
+                               cal_process_buffer_complete(ctx);
+               }
+       }
+
+       /* Check which DMA just started */
+       irqst3 = reg_read(dev, CAL_HL_IRQSTATUS(3));
+       if (irqst3) {
+               /* Clear Interrupt status */
+               reg_write(dev, CAL_HL_IRQSTATUS(3), irqst3);
+
+               /* Need to check both port */
+               if (isportirqset(irqst3, 1)) {
+                       ctx = dev->ctx[0];
+                       dma_q = &ctx->vidq;
+
+                       spin_lock(&ctx->slock);
+                       if (!list_empty(&dma_q->active) &&
+                           ctx->cur_frm == ctx->next_frm)
+                               cal_schedule_next_buffer(ctx);
+                       spin_unlock(&ctx->slock);
+               }
+
+               if (isportirqset(irqst3, 2)) {
+                       ctx = dev->ctx[1];
+                       dma_q = &ctx->vidq;
+
+                       spin_lock(&ctx->slock);
+                       if (!list_empty(&dma_q->active) &&
+                           ctx->cur_frm == ctx->next_frm)
+                               cal_schedule_next_buffer(ctx);
+                       spin_unlock(&ctx->slock);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * video ioctls
+ */
+static int cal_querycap(struct file *file, void *priv,
+                       struct v4l2_capability *cap)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+
+       strlcpy(cap->driver, CAL_MODULE_NAME, sizeof(cap->driver));
+       strlcpy(cap->card, CAL_MODULE_NAME, sizeof(cap->card));
+
+       snprintf(cap->bus_info, sizeof(cap->bus_info),
+                "platform:%s", ctx->v4l2_dev.name);
+       cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+                           V4L2_CAP_READWRITE;
+       cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+       return 0;
+}
+
+static int cal_enum_fmt_vid_cap(struct file *file, void  *priv,
+                               struct v4l2_fmtdesc *f)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       const struct cal_fmt *fmt = NULL;
+
+       if (f->index >= ctx->num_active_fmt)
+               return -EINVAL;
+
+       fmt = ctx->active_fmt[f->index];
+
+       f->pixelformat = fmt->fourcc;
+       f->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       return 0;
+}
+
+static int __subdev_get_format(struct cal_ctx *ctx,
+                              struct v4l2_mbus_framefmt *fmt)
+{
+       struct v4l2_subdev_format sd_fmt;
+       struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+       int ret;
+
+       if (!ctx->sensor)
+               return -EINVAL;
+
+       sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+       sd_fmt.pad = 0;
+
+       ret = v4l2_subdev_call(ctx->sensor, pad, get_fmt, NULL, &sd_fmt);
+       if (ret)
+               return ret;
+
+       *fmt = *mbus_fmt;
+
+       ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+               fmt->width, fmt->height, fmt->code);
+
+       return 0;
+}
+
+static int __subdev_set_format(struct cal_ctx *ctx,
+                              struct v4l2_mbus_framefmt *fmt)
+{
+       struct v4l2_subdev_format sd_fmt;
+       struct v4l2_mbus_framefmt *mbus_fmt = &sd_fmt.format;
+       int ret;
+
+       if (!ctx->sensor)
+               return -EINVAL;
+
+       sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+       sd_fmt.pad = 0;
+       *mbus_fmt = *fmt;
+
+       ret = v4l2_subdev_call(ctx->sensor, pad, set_fmt, NULL, &sd_fmt);
+       if (ret)
+               return ret;
+
+       ctx_dbg(1, ctx, "%s %dx%d code:%04X\n", __func__,
+               fmt->width, fmt->height, fmt->code);
+
+       return 0;
+}
+
+static int cal_calc_format_size(struct cal_ctx *ctx,
+                               const struct cal_fmt *fmt,
+                               struct v4l2_format *f)
+{
+       if (!fmt) {
+               ctx_dbg(3, ctx, "No cal_fmt provided!\n");
+               return -EINVAL;
+       }
+
+       v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2,
+                             &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0);
+       f->fmt.pix.bytesperline = bytes_per_line(f->fmt.pix.width,
+                                                fmt->depth >> 3);
+       f->fmt.pix.sizeimage = f->fmt.pix.height *
+                              f->fmt.pix.bytesperline;
+
+       ctx_dbg(3, ctx, "%s: fourcc: %s size: %dx%d bpl:%d img_size:%d\n",
+               __func__, fourcc_to_str(f->fmt.pix.pixelformat),
+               f->fmt.pix.width, f->fmt.pix.height,
+               f->fmt.pix.bytesperline, f->fmt.pix.sizeimage);
+
+       return 0;
+}
+
+static int cal_g_fmt_vid_cap(struct file *file, void *priv,
+                            struct v4l2_format *f)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+
+       *f = ctx->v_fmt;
+
+       return 0;
+}
+
+static int cal_try_fmt_vid_cap(struct file *file, void *priv,
+                              struct v4l2_format *f)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       const struct cal_fmt *fmt;
+       struct v4l2_subdev_frame_size_enum fse;
+       int ret, found;
+
+       fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+       if (!fmt) {
+               ctx_dbg(3, ctx, "Fourcc format (0x%08x) not found.\n",
+                       f->fmt.pix.pixelformat);
+
+               /* Just get the first one enumerated */
+               fmt = ctx->active_fmt[0];
+               f->fmt.pix.pixelformat = fmt->fourcc;
+       }
+
+       f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
+
+       /* check for/find a valid width/height */
+       ret = 0;
+       found = false;
+       fse.pad = 0;
+       fse.code = fmt->code;
+       fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+       for (fse.index = 0; ; fse.index++) {
+               ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
+                                      NULL, &fse);
+               if (ret)
+                       break;
+
+               if ((f->fmt.pix.width == fse.max_width) &&
+                   (f->fmt.pix.height == fse.max_height)) {
+                       found = true;
+                       break;
+               } else if ((f->fmt.pix.width >= fse.min_width) &&
+                        (f->fmt.pix.width <= fse.max_width) &&
+                        (f->fmt.pix.height >= fse.min_height) &&
+                        (f->fmt.pix.height <= fse.max_height)) {
+                       found = true;
+                       break;
+               }
+       }
+
+       if (!found) {
+               /* use existing values as default */
+               f->fmt.pix.width = ctx->v_fmt.fmt.pix.width;
+               f->fmt.pix.height =  ctx->v_fmt.fmt.pix.height;
+       }
+
+       /*
+        * Use current colorspace for now, it will get
+        * updated properly during s_fmt
+        */
+       f->fmt.pix.colorspace = ctx->v_fmt.fmt.pix.colorspace;
+       return cal_calc_format_size(ctx, fmt, f);
+}
+
+static int cal_s_fmt_vid_cap(struct file *file, void *priv,
+                            struct v4l2_format *f)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       struct vb2_queue *q = &ctx->vb_vidq;
+       const struct cal_fmt *fmt;
+       struct v4l2_mbus_framefmt mbus_fmt;
+       int ret;
+
+       if (vb2_is_busy(q)) {
+               ctx_dbg(3, ctx, "%s device busy\n", __func__);
+               return -EBUSY;
+       }
+
+       ret = cal_try_fmt_vid_cap(file, priv, f);
+       if (ret < 0)
+               return ret;
+
+       fmt = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
+
+       v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, fmt->code);
+
+       ret = __subdev_set_format(ctx, &mbus_fmt);
+       if (ret)
+               return ret;
+
+       /* Just double check nothing has gone wrong */
+       if (mbus_fmt.code != fmt->code) {
+               ctx_dbg(3, ctx,
+                       "%s subdev changed format on us, this should not happen\n",
+                       __func__);
+               return -EINVAL;
+       }
+
+       v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+       ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       ctx->v_fmt.fmt.pix.pixelformat  = fmt->fourcc;
+       cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
+       ctx->fmt = fmt;
+       ctx->m_fmt = mbus_fmt;
+       *f = ctx->v_fmt;
+
+       return 0;
+}
+
+static int cal_enum_framesizes(struct file *file, void *fh,
+                              struct v4l2_frmsizeenum *fsize)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       const struct cal_fmt *fmt;
+       struct v4l2_subdev_frame_size_enum fse;
+       int ret;
+
+       /* check for valid format */
+       fmt = find_format_by_pix(ctx, fsize->pixel_format);
+       if (!fmt) {
+               ctx_dbg(3, ctx, "Invalid pixel code: %x\n",
+                       fsize->pixel_format);
+               return -EINVAL;
+       }
+
+       fse.index = fsize->index;
+       fse.pad = 0;
+       fse.code = fmt->code;
+
+       ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size, NULL, &fse);
+       if (ret)
+               return -EINVAL;
+
+       ctx_dbg(1, ctx, "%s: index: %d code: %x W:[%d,%d] H:[%d,%d]\n",
+               __func__, fse.index, fse.code, fse.min_width, fse.max_width,
+               fse.min_height, fse.max_height);
+
+       fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+       fsize->discrete.width = fse.max_width;
+       fsize->discrete.height = fse.max_height;
+
+       return 0;
+}
+
+static int cal_enum_input(struct file *file, void *priv,
+                         struct v4l2_input *inp)
+{
+       if (inp->index >= CAL_NUM_INPUT)
+               return -EINVAL;
+
+       inp->type = V4L2_INPUT_TYPE_CAMERA;
+       sprintf(inp->name, "Camera %u", inp->index);
+       return 0;
+}
+
+static int cal_g_input(struct file *file, void *priv, unsigned int *i)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+
+       *i = ctx->input;
+       return 0;
+}
+
+static int cal_s_input(struct file *file, void *priv, unsigned int i)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+
+       if (i >= CAL_NUM_INPUT)
+               return -EINVAL;
+
+       ctx->input = i;
+       return 0;
+}
+
+/* timeperframe is arbitrary and continuous */
+static int cal_enum_frameintervals(struct file *file, void *priv,
+                                  struct v4l2_frmivalenum *fival)
+{
+       struct cal_ctx *ctx = video_drvdata(file);
+       const struct cal_fmt *fmt;
+       struct v4l2_subdev_frame_size_enum fse;
+       int ret;
+
+       if (fival->index)
+               return -EINVAL;
+
+       fmt = find_format_by_pix(ctx, fival->pixel_format);
+       if (!fmt)
+               return -EINVAL;
+
+       /* check for valid width/height */
+       ret = 0;
+       fse.pad = 0;
+       fse.code = fmt->code;
+       fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+       for (fse.index = 0; ; fse.index++) {
+               ret = v4l2_subdev_call(ctx->sensor, pad, enum_frame_size,
+                                      NULL, &fse);
+               if (ret)
+                       return -EINVAL;
+
+               if ((fival->width == fse.max_width) &&
+                   (fival->height == fse.max_height))
+                       break;
+               else if ((fival->width >= fse.min_width) &&
+                        (fival->width <= fse.max_width) &&
+                        (fival->height >= fse.min_height) &&
+                        (fival->height <= fse.max_height))
+                       break;
+
+               return -EINVAL;
+       }
+
+       fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+       fival->discrete.numerator = 1;
+       fival->discrete.denominator = 30;
+
+       return 0;
+}
+
+/*
+ * Videobuf operations
+ */
+static int cal_queue_setup(struct vb2_queue *vq,
+                          unsigned int *nbuffers, unsigned int *nplanes,
+                          unsigned int sizes[], void *alloc_ctxs[])
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+       unsigned size = ctx->v_fmt.fmt.pix.sizeimage;
+
+       if (vq->num_buffers + *nbuffers < 3)
+               *nbuffers = 3 - vq->num_buffers;
+       alloc_ctxs[0] = ctx->alloc_ctx;
+
+       if (*nplanes) {
+               if (sizes[0] < size)
+                       return -EINVAL;
+               size = sizes[0];
+       }
+
+       *nplanes = 1;
+       sizes[0] = size;
+
+       ctx_dbg(3, ctx, "nbuffers=%d, size=%d\n", *nbuffers, sizes[0]);
+
+       return 0;
+}
+
+static int cal_buffer_prepare(struct vb2_buffer *vb)
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+       struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+                                             vb.vb2_buf);
+       unsigned long size;
+
+       if (WARN_ON(!ctx->fmt))
+               return -EINVAL;
+
+       size = ctx->v_fmt.fmt.pix.sizeimage;
+       if (vb2_plane_size(vb, 0) < size) {
+               ctx_err(ctx,
+                       "data will not fit into plane (%lu < %lu)\n",
+                       vb2_plane_size(vb, 0), size);
+               return -EINVAL;
+       }
+
+       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+       return 0;
+}
+
+static void cal_buffer_queue(struct vb2_buffer *vb)
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+       struct cal_buffer *buf = container_of(vb, struct cal_buffer,
+                                             vb.vb2_buf);
+       struct cal_dmaqueue *vidq = &ctx->vidq;
+       unsigned long flags = 0;
+
+       /* recheck locking */
+       spin_lock_irqsave(&ctx->slock, flags);
+       list_add_tail(&buf->list, &vidq->active);
+       spin_unlock_irqrestore(&ctx->slock, flags);
+}
+
+static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+       struct cal_dmaqueue *dma_q = &ctx->vidq;
+       struct cal_buffer *buf, *tmp;
+       unsigned long addr = 0;
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&ctx->slock, flags);
+       if (list_empty(&dma_q->active)) {
+               spin_unlock_irqrestore(&ctx->slock, flags);
+               ctx_dbg(3, ctx, "buffer queue is empty\n");
+               return -EIO;
+       }
+
+       buf = list_entry(dma_q->active.next, struct cal_buffer, list);
+       ctx->cur_frm = buf;
+       ctx->next_frm = buf;
+       list_del(&buf->list);
+       spin_unlock_irqrestore(&ctx->slock, flags);
+
+       addr = vb2_dma_contig_plane_dma_addr(&ctx->cur_frm->vb.vb2_buf, 0);
+       ctx->sequence = 0;
+
+       ret = cal_get_external_info(ctx);
+       if (ret < 0)
+               goto err;
+
+       cal_runtime_get(ctx->dev);
+
+       enable_irqs(ctx);
+       camerarx_phy_enable(ctx);
+       csi2_init(ctx);
+       csi2_phy_config(ctx);
+       csi2_lane_config(ctx);
+       csi2_ctx_config(ctx);
+       pix_proc_config(ctx);
+       cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
+       cal_wr_dma_addr(ctx, addr);
+       csi2_ppi_enable(ctx);
+
+       if (ctx->sensor) {
+               if (v4l2_subdev_call(ctx->sensor, video, s_stream, 1)) {
+                       ctx_err(ctx, "stream on failed in subdev\n");
+                       cal_runtime_put(ctx->dev);
+                       ret = -EINVAL;
+                       goto err;
+               }
+       }
+
+       if (debug >= 4)
+               cal_quickdump_regs(ctx->dev);
+
+       return 0;
+
+err:
+       list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
+               list_del(&buf->list);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED);
+       }
+       return ret;
+}
+
+static void cal_stop_streaming(struct vb2_queue *vq)
+{
+       struct cal_ctx *ctx = vb2_get_drv_priv(vq);
+       struct cal_dmaqueue *dma_q = &ctx->vidq;
+       struct cal_buffer *buf, *tmp;
+       unsigned long flags;
+
+       if (ctx->sensor) {
+               if (v4l2_subdev_call(ctx->sensor, video, s_stream, 0))
+                       ctx_err(ctx, "stream off failed in subdev\n");
+       }
+
+       csi2_ppi_disable(ctx);
+       disable_irqs(ctx);
+
+       /* Release all active buffers */
+       spin_lock_irqsave(&ctx->slock, flags);
+       list_for_each_entry_safe(buf, tmp, &dma_q->active, list) {
+               list_del(&buf->list);
+               vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+       }
+
+       if (ctx->cur_frm == ctx->next_frm) {
+               vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+       } else {
+               vb2_buffer_done(&ctx->cur_frm->vb.vb2_buf, VB2_BUF_STATE_ERROR);
+               vb2_buffer_done(&ctx->next_frm->vb.vb2_buf,
+                               VB2_BUF_STATE_ERROR);
+       }
+       ctx->cur_frm = NULL;
+       ctx->next_frm = NULL;
+       spin_unlock_irqrestore(&ctx->slock, flags);
+
+       cal_runtime_put(ctx->dev);
+}
+
+static struct vb2_ops cal_video_qops = {
+       .queue_setup            = cal_queue_setup,
+       .buf_prepare            = cal_buffer_prepare,
+       .buf_queue              = cal_buffer_queue,
+       .start_streaming        = cal_start_streaming,
+       .stop_streaming         = cal_stop_streaming,
+       .wait_prepare           = vb2_ops_wait_prepare,
+       .wait_finish            = vb2_ops_wait_finish,
+};
+
+static const struct v4l2_file_operations cal_fops = {
+       .owner          = THIS_MODULE,
+       .open           = v4l2_fh_open,
+       .release        = vb2_fop_release,
+       .read           = vb2_fop_read,
+       .poll           = vb2_fop_poll,
+       .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
+       .mmap           = vb2_fop_mmap,
+};
+
+static const struct v4l2_ioctl_ops cal_ioctl_ops = {
+       .vidioc_querycap      = cal_querycap,
+       .vidioc_enum_fmt_vid_cap  = cal_enum_fmt_vid_cap,
+       .vidioc_g_fmt_vid_cap     = cal_g_fmt_vid_cap,
+       .vidioc_try_fmt_vid_cap   = cal_try_fmt_vid_cap,
+       .vidioc_s_fmt_vid_cap     = cal_s_fmt_vid_cap,
+       .vidioc_enum_framesizes   = cal_enum_framesizes,
+       .vidioc_reqbufs       = vb2_ioctl_reqbufs,
+       .vidioc_create_bufs   = vb2_ioctl_create_bufs,
+       .vidioc_prepare_buf   = vb2_ioctl_prepare_buf,
+       .vidioc_querybuf      = vb2_ioctl_querybuf,
+       .vidioc_qbuf          = vb2_ioctl_qbuf,
+       .vidioc_dqbuf         = vb2_ioctl_dqbuf,
+       .vidioc_enum_input    = cal_enum_input,
+       .vidioc_g_input       = cal_g_input,
+       .vidioc_s_input       = cal_s_input,
+       .vidioc_enum_frameintervals = cal_enum_frameintervals,
+       .vidioc_streamon      = vb2_ioctl_streamon,
+       .vidioc_streamoff     = vb2_ioctl_streamoff,
+       .vidioc_log_status    = v4l2_ctrl_log_status,
+       .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+       .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static struct video_device cal_videodev = {
+       .name           = CAL_MODULE_NAME,
+       .fops           = &cal_fops,
+       .ioctl_ops      = &cal_ioctl_ops,
+       .minor          = -1,
+       .release        = video_device_release_empty,
+};
+
+/* -----------------------------------------------------------------
+ *     Initialization and module stuff
+ * ------------------------------------------------------------------
+ */
+static int cal_complete_ctx(struct cal_ctx *ctx);
+
+static int cal_async_bound(struct v4l2_async_notifier *notifier,
+                          struct v4l2_subdev *subdev,
+                          struct v4l2_async_subdev *asd)
+{
+       struct cal_ctx *ctx = notifier_to_ctx(notifier);
+       struct v4l2_subdev_mbus_code_enum mbus_code;
+       int ret = 0;
+       int i, j, k;
+
+       if (ctx->sensor) {
+               ctx_info(ctx, "Rejecting subdev %s (Already set!!)",
+                        subdev->name);
+               return 0;
+       }
+
+       ctx->sensor = subdev;
+       ctx_dbg(1, ctx, "Using sensor %s for capture\n", subdev->name);
+
+       /* Enumerate sub device formats and enable all matching local formats */
+       ctx->num_active_fmt = 0;
+       for (j = 0, i = 0; ret != -EINVAL; ++j) {
+               struct cal_fmt *fmt;
+
+               memset(&mbus_code, 0, sizeof(mbus_code));
+               mbus_code.index = j;
+               ret = v4l2_subdev_call(subdev, pad, enum_mbus_code,
+                                      NULL, &mbus_code);
+               if (ret)
+                       continue;
+
+               ctx_dbg(2, ctx,
+                       "subdev %s: code: %04x idx: %d\n",
+                       subdev->name, mbus_code.code, j);
+
+               for (k = 0; k < ARRAY_SIZE(cal_formats); k++) {
+                       fmt = &cal_formats[k];
+
+                       if (mbus_code.code == fmt->code) {
+                               ctx->active_fmt[i] = fmt;
+                               ctx_dbg(2, ctx,
+                                       "matched fourcc: %s: code: %04x idx: %d\n",
+                                       fourcc_to_str(fmt->fourcc),
+                                       fmt->code, i);
+                               ctx->num_active_fmt = ++i;
+                       }
+               }
+       }
+
+       if (i == 0) {
+               ctx_err(ctx, "No suitable format reported by subdev %s\n",
+                       subdev->name);
+               return -EINVAL;
+       }
+
+       cal_complete_ctx(ctx);
+
+       return 0;
+}
+
+static int cal_async_complete(struct v4l2_async_notifier *notifier)
+{
+       struct cal_ctx *ctx = notifier_to_ctx(notifier);
+       const struct cal_fmt *fmt;
+       struct v4l2_mbus_framefmt mbus_fmt;
+       int ret;
+
+       ret = __subdev_get_format(ctx, &mbus_fmt);
+       if (ret)
+               return ret;
+
+       fmt = find_format_by_code(ctx, mbus_fmt.code);
+       if (!fmt) {
+               ctx_dbg(3, ctx, "mbus code format (0x%08x) not found.\n",
+                       mbus_fmt.code);
+               return -EINVAL;
+       }
+
+       /* Save current subdev format */
+       v4l2_fill_pix_format(&ctx->v_fmt.fmt.pix, &mbus_fmt);
+       ctx->v_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       ctx->v_fmt.fmt.pix.pixelformat  = fmt->fourcc;
+       cal_calc_format_size(ctx, fmt, &ctx->v_fmt);
+       ctx->fmt = fmt;
+       ctx->m_fmt = mbus_fmt;
+
+       return 0;
+}
+
+static int cal_complete_ctx(struct cal_ctx *ctx)
+{
+       struct video_device *vfd;
+       struct vb2_queue *q;
+       int ret;
+
+       ctx->timeperframe = tpf_default;
+       ctx->external_rate = 192000000;
+
+       /* initialize locks */
+       spin_lock_init(&ctx->slock);
+       mutex_init(&ctx->mutex);
+
+       /* initialize queue */
+       q = &ctx->vb_vidq;
+       q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ;
+       q->drv_priv = ctx;
+       q->buf_struct_size = sizeof(struct cal_buffer);
+       q->ops = &cal_video_qops;
+       q->mem_ops = &vb2_dma_contig_memops;
+       q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+       q->lock = &ctx->mutex;
+       q->min_buffers_needed = 3;
+
+       ret = vb2_queue_init(q);
+       if (ret)
+               return ret;
+
+       /* init video dma queues */
+       INIT_LIST_HEAD(&ctx->vidq.active);
+
+       vfd = &ctx->vdev;
+       *vfd = cal_videodev;
+       vfd->v4l2_dev = &ctx->v4l2_dev;
+       vfd->queue = q;
+
+       /*
+        * Provide a mutex to v4l2 core. It will be used to protect
+        * all fops and v4l2 ioctls.
+        */
+       vfd->lock = &ctx->mutex;
+       video_set_drvdata(vfd, ctx);
+
+       ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr);
+       if (ret < 0)
+               return ret;
+
+       v4l2_info(&ctx->v4l2_dev, "V4L2 device registered as %s\n",
+                 video_device_node_name(vfd));
+
+       ctx->alloc_ctx = vb2_dma_contig_init_ctx(vfd->v4l2_dev->dev);
+       if (IS_ERR(ctx->alloc_ctx)) {
+               ctx_err(ctx, "Failed to alloc vb2 context\n");
+               ret = PTR_ERR(ctx->alloc_ctx);
+               goto vdev_unreg;
+       }
+
+       return 0;
+
+vdev_unreg:
+       video_unregister_device(vfd);
+       return ret;
+}
+
+static struct device_node *
+of_get_next_port(const struct device_node *parent,
+                struct device_node *prev)
+{
+       struct device_node *port = NULL;
+
+       if (!parent)
+               return NULL;
+
+       if (!prev) {
+               struct device_node *ports;
+               /*
+                * It's the first call, we have to find a port subnode
+                * within this node or within an optional 'ports' node.
+                */
+               ports = of_get_child_by_name(parent, "ports");
+               if (ports)
+                       parent = ports;
+
+               port = of_get_child_by_name(parent, "port");
+
+               /* release the 'ports' node */
+               of_node_put(ports);
+       } else {
+               struct device_node *ports;
+
+               ports = of_get_parent(prev);
+               if (!ports)
+                       return NULL;
+
+               do {
+                       port = of_get_next_child(ports, prev);
+                       if (!port) {
+                               of_node_put(ports);
+                               return NULL;
+                       }
+                       prev = port;
+               } while (of_node_cmp(port->name, "port") != 0);
+       }
+
+       return port;
+}
+
+static struct device_node *
+of_get_next_endpoint(const struct device_node *parent,
+                    struct device_node *prev)
+{
+       struct device_node *ep = NULL;
+
+       if (!parent)
+               return NULL;
+
+       do {
+               ep = of_get_next_child(parent, prev);
+               if (!ep)
+                       return NULL;
+               prev = ep;
+       } while (of_node_cmp(ep->name, "endpoint") != 0);
+
+       return ep;
+}
+
+static int of_cal_create_instance(struct cal_ctx *ctx, int inst)
+{
+       struct platform_device *pdev = ctx->dev->pdev;
+       struct device_node *ep_node, *port, *remote_ep,
+                       *sensor_node, *parent;
+       struct v4l2_of_endpoint *endpoint;
+       struct v4l2_async_subdev *asd;
+       u32 regval = 0;
+       int ret, index, found_port = 0, lane;
+
+       parent = pdev->dev.of_node;
+
+       asd = &ctx->asd;
+       endpoint = &ctx->endpoint;
+
+       ep_node = NULL;
+       port = NULL;
+       remote_ep = NULL;
+       sensor_node = NULL;
+       ret = -EINVAL;
+
+       ctx_dbg(3, ctx, "Scanning Port node for csi2 port: %d\n", inst);
+       for (index = 0; index < CAL_NUM_CSI2_PORTS; index++) {
+               port = of_get_next_port(parent, port);
+               if (!port) {
+                       ctx_dbg(1, ctx, "No port node found for csi2 port:%d\n",
+                               index);
+                       goto cleanup_exit;
+               }
+
+               /* Match the slice number with <REG> */
+               of_property_read_u32(port, "reg", &regval);
+               ctx_dbg(3, ctx, "port:%d inst:%d <reg>:%d\n",
+                       index, inst, regval);
+               if ((regval == inst) && (index == inst)) {
+                       found_port = 1;
+                       break;
+               }
+       }
+
+       if (!found_port) {
+               ctx_dbg(1, ctx, "No port node matches csi2 port:%d\n",
+                       inst);
+               goto cleanup_exit;
+       }
+
+       ctx_dbg(3, ctx, "Scanning sub-device for csi2 port: %d\n",
+               inst);
+
+       ep_node = of_get_next_endpoint(port, ep_node);
+       if (!ep_node) {
+               ctx_dbg(3, ctx, "can't get next endpoint\n");
+               goto cleanup_exit;
+       }
+
+       sensor_node = of_graph_get_remote_port_parent(ep_node);
+       if (!sensor_node) {
+               ctx_dbg(3, ctx, "can't get remote parent\n");
+               goto cleanup_exit;
+       }
+       asd->match_type = V4L2_ASYNC_MATCH_OF;
+       asd->match.of.node = sensor_node;
+
+       remote_ep = of_parse_phandle(ep_node, "remote-endpoint", 0);
+       if (!remote_ep) {
+               ctx_dbg(3, ctx, "can't get remote-endpoint\n");
+               goto cleanup_exit;
+       }
+       v4l2_of_parse_endpoint(remote_ep, endpoint);
+
+       if (endpoint->bus_type != V4L2_MBUS_CSI2) {
+               ctx_err(ctx, "Port:%d sub-device %s is not a CSI2 device\n",
+                       inst, sensor_node->name);
+               goto cleanup_exit;
+       }
+
+       /* Store Virtual Channel number */
+       ctx->virtual_channel = endpoint->base.id;
+
+       ctx_dbg(3, ctx, "Port:%d v4l2-endpoint: CSI2\n", inst);
+       ctx_dbg(3, ctx, "Virtual Channel=%d\n", ctx->virtual_channel);
+       ctx_dbg(3, ctx, "flags=0x%08x\n", endpoint->bus.mipi_csi2.flags);
+       ctx_dbg(3, ctx, "clock_lane=%d\n", endpoint->bus.mipi_csi2.clock_lane);
+       ctx_dbg(3, ctx, "num_data_lanes=%d\n",
+               endpoint->bus.mipi_csi2.num_data_lanes);
+       ctx_dbg(3, ctx, "data_lanes= <\n");
+       for (lane = 0; lane < endpoint->bus.mipi_csi2.num_data_lanes; lane++)
+               ctx_dbg(3, ctx, "\t%d\n",
+                       endpoint->bus.mipi_csi2.data_lanes[lane]);
+       ctx_dbg(3, ctx, "\t>\n");
+
+       ctx_dbg(1, ctx, "Port: %d found sub-device %s\n",
+               inst, sensor_node->name);
+
+       ctx->asd_list[0] = asd;
+       ctx->notifier.subdevs = ctx->asd_list;
+       ctx->notifier.num_subdevs = 1;
+       ctx->notifier.bound = cal_async_bound;
+       ctx->notifier.complete = cal_async_complete;
+       ret = v4l2_async_notifier_register(&ctx->v4l2_dev,
+                                          &ctx->notifier);
+       if (ret) {
+               ctx_err(ctx, "Error registering async notifier\n");
+               ret = -EINVAL;
+       }
+
+cleanup_exit:
+       if (!remote_ep)
+               of_node_put(remote_ep);
+       if (!sensor_node)
+               of_node_put(sensor_node);
+       if (!ep_node)
+               of_node_put(ep_node);
+       if (!port)
+               of_node_put(port);
+
+       return ret;
+}
+
+static struct cal_ctx *cal_create_instance(struct cal_dev *dev, int inst)
+{
+       struct cal_ctx *ctx;
+       struct v4l2_ctrl_handler *hdl;
+       int ret;
+
+       ctx = devm_kzalloc(&dev->pdev->dev, sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return 0;
+
+       /* save the cal_dev * for future ref */
+       ctx->dev = dev;
+
+       snprintf(ctx->v4l2_dev.name, sizeof(ctx->v4l2_dev.name),
+                "%s-%03d", CAL_MODULE_NAME, inst);
+       ret = v4l2_device_register(&dev->pdev->dev, &ctx->v4l2_dev);
+       if (ret)
+               goto err_exit;
+
+       hdl = &ctx->ctrl_handler;
+       ret = v4l2_ctrl_handler_init(hdl, 11);
+       if (ret) {
+               ctx_err(ctx, "Failed to init ctrl handler\n");
+               goto unreg_dev;
+       }
+       ctx->v4l2_dev.ctrl_handler = hdl;
+
+       /* Make sure Camera Core H/W register area is available */
+       ctx->cc = dev->cc[inst];
+
+       /* Store the instance id */
+       ctx->csi2_port = inst + 1;
+
+       ret = of_cal_create_instance(ctx, inst);
+       if (ret) {
+               ret = -EINVAL;
+               goto free_hdl;
+       }
+       return ctx;
+
+free_hdl:
+       v4l2_ctrl_handler_free(hdl);
+unreg_dev:
+       v4l2_device_unregister(&ctx->v4l2_dev);
+err_exit:
+       return 0;
+}
+
+static int cal_probe(struct platform_device *pdev)
+{
+       struct cal_dev *dev;
+       int ret;
+       int irq;
+
+       dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+
+       /* set pseudo v4l2 device name so we can use v4l2_printk */
+       strlcpy(dev->v4l2_dev.name, CAL_MODULE_NAME,
+               sizeof(dev->v4l2_dev.name));
+
+       /* save pdev pointer */
+       dev->pdev = pdev;
+
+       dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                               "cal_top");
+       dev->base = devm_ioremap_resource(&pdev->dev, dev->res);
+       if (IS_ERR(dev->base))
+               return PTR_ERR(dev->base);
+
+       cal_dbg(1, dev, "ioresource %s at %pa - %pa\n",
+               dev->res->name, &dev->res->start, &dev->res->end);
+
+       irq = platform_get_irq(pdev, 0);
+       cal_dbg(1, dev, "got irq# %d\n", irq);
+       ret = devm_request_irq(&pdev->dev, irq, cal_irq, 0, CAL_MODULE_NAME,
+                              dev);
+       if (ret)
+               return ret;
+
+       platform_set_drvdata(pdev, dev);
+
+       dev->cm = cm_create(dev);
+       if (IS_ERR(dev->cm))
+               return PTR_ERR(dev->cm);
+
+       dev->cc[0] = cc_create(dev, 0);
+       if (IS_ERR(dev->cc[0]))
+               return PTR_ERR(dev->cc[0]);
+
+       dev->cc[1] = cc_create(dev, 1);
+       if (IS_ERR(dev->cc[1]))
+               return PTR_ERR(dev->cc[1]);
+
+       dev->ctx[0] = NULL;
+       dev->ctx[1] = NULL;
+
+       dev->ctx[0] = cal_create_instance(dev, 0);
+       dev->ctx[1] = cal_create_instance(dev, 1);
+       if (!dev->ctx[0] && !dev->ctx[1]) {
+               cal_err(dev, "Neither port is configured, no point in staying up\n");
+               return -ENODEV;
+       }
+
+       pm_runtime_enable(&pdev->dev);
+
+       ret = cal_runtime_get(dev);
+       if (ret)
+               goto runtime_disable;
+
+       /* Just check we can actually access the module */
+       cal_get_hwinfo(dev);
+
+       cal_runtime_put(dev);
+
+       return 0;
+
+runtime_disable:
+       pm_runtime_disable(&pdev->dev);
+       return ret;
+}
+
+static int cal_remove(struct platform_device *pdev)
+{
+       struct cal_dev *dev =
+               (struct cal_dev *)platform_get_drvdata(pdev);
+       struct cal_ctx *ctx;
+       int i;
+
+       cal_dbg(1, dev, "Removing %s\n", CAL_MODULE_NAME);
+
+       cal_runtime_get(dev);
+
+       for (i = 0; i < CAL_NUM_CONTEXT; i++) {
+               ctx = dev->ctx[i];
+               if (ctx) {
+                       ctx_dbg(1, ctx, "unregistering %s\n",
+                               video_device_node_name(&ctx->vdev));
+                       camerarx_phy_disable(ctx);
+                       v4l2_async_notifier_unregister(&ctx->notifier);
+                       vb2_dma_contig_cleanup_ctx(ctx->alloc_ctx);
+                       v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+                       v4l2_device_unregister(&ctx->v4l2_dev);
+                       video_unregister_device(&ctx->vdev);
+               }
+       }
+
+       cal_runtime_put(dev);
+       pm_runtime_disable(&pdev->dev);
+
+       return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id cal_of_match[] = {
+       { .compatible = "ti,dra72-cal", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, cal_of_match);
+#endif
+
+static struct platform_driver cal_pdrv = {
+       .probe          = cal_probe,
+       .remove         = cal_remove,
+       .driver         = {
+               .name   = CAL_MODULE_NAME,
+               .of_match_table = of_match_ptr(cal_of_match),
+       },
+};
+
+module_platform_driver(cal_pdrv);
diff --git a/drivers/media/platform/ti-vpe/cal_regs.h b/drivers/media/platform/ti-vpe/cal_regs.h
new file mode 100644 (file)
index 0000000..82b3dcf
--- /dev/null
@@ -0,0 +1,479 @@
+/*
+ * TI CAL camera interface driver
+ *
+ * Copyright (c) 2015 Texas Instruments Inc.
+ *
+ * Benoit Parrot, <bparrot@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef __TI_CAL_REGS_H
+#define __TI_CAL_REGS_H
+
+#define CAL_NUM_CSI2_PORTS             2
+
+/* CAL register offsets */
+
+#define CAL_HL_REVISION                        0x0000
+#define CAL_HL_HWINFO                  0x0004
+#define CAL_HL_SYSCONFIG               0x0010
+#define CAL_HL_IRQ_EOI                 0x001c
+#define CAL_HL_IRQSTATUS_RAW(m)                (0x20U + ((m-1) * 0x10U))
+#define CAL_HL_IRQSTATUS(m)            (0x24U + ((m-1) * 0x10U))
+#define CAL_HL_IRQENABLE_SET(m)                (0x28U + ((m-1) * 0x10U))
+#define CAL_HL_IRQENABLE_CLR(m)                (0x2cU + ((m-1) * 0x10U))
+#define CAL_PIX_PROC(m)                        (0xc0U + ((m-1) * 0x4U))
+#define CAL_CTRL                       0x100
+#define CAL_CTRL1                      0x104
+#define CAL_LINE_NUMBER_EVT            0x108
+#define CAL_VPORT_CTRL1                        0x120
+#define CAL_VPORT_CTRL2                        0x124
+#define CAL_BYS_CTRL1                  0x130
+#define CAL_BYS_CTRL2                  0x134
+#define CAL_RD_DMA_CTRL                        0x140
+#define CAL_RD_DMA_PIX_ADDR            0x144
+#define CAL_RD_DMA_PIX_OFST            0x148
+#define CAL_RD_DMA_XSIZE               0x14c
+#define CAL_RD_DMA_YSIZE               0x150
+#define CAL_RD_DMA_INIT_ADDR           0x154
+#define CAL_RD_DMA_INIT_OFST           0x168
+#define CAL_RD_DMA_CTRL2               0x16c
+#define CAL_WR_DMA_CTRL(m)             (0x200U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_ADDR(m)             (0x204U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_OFST(m)             (0x208U + ((m-1) * 0x10U))
+#define CAL_WR_DMA_XSIZE(m)            (0x20cU + ((m-1) * 0x10U))
+#define CAL_CSI2_PPI_CTRL(m)           (0x300U + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_CFG(m)      (0x304U + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_IRQSTATUS(m)        (0x308U + ((m-1) * 0x80U))
+#define CAL_CSI2_SHORT_PACKET(m)       (0x30cU + ((m-1) * 0x80U))
+#define CAL_CSI2_COMPLEXIO_IRQENABLE(m)        (0x310U + ((m-1) * 0x80U))
+#define CAL_CSI2_TIMING(m)             (0x314U + ((m-1) * 0x80U))
+#define CAL_CSI2_VC_IRQENABLE(m)       (0x318U + ((m-1) * 0x80U))
+#define CAL_CSI2_VC_IRQSTATUS(m)       (0x328U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX0(m)               (0x330U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX1(m)               (0x334U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX2(m)               (0x338U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX3(m)               (0x33cU + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX4(m)               (0x340U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX5(m)               (0x344U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX6(m)               (0x348U + ((m-1) * 0x80U))
+#define CAL_CSI2_CTX7(m)               (0x34cU + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS0(m)            (0x350U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS1(m)            (0x354U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS2(m)            (0x358U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS3(m)            (0x35cU + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS4(m)            (0x360U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS5(m)            (0x364U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS6(m)            (0x368U + ((m-1) * 0x80U))
+#define CAL_CSI2_STATUS7(m)            (0x36cU + ((m-1) * 0x80U))
+
+/* CAL CSI2 PHY register offsets */
+#define CAL_CSI2_PHY_REG0              0x000
+#define CAL_CSI2_PHY_REG1              0x004
+#define CAL_CSI2_PHY_REG2              0x008
+
+/* CAL Control Module Core Camerrx Control register offsets */
+#define CM_CTRL_CORE_CAMERRX_CONTROL   0x000
+
+/*********************************************************************
+* Generic value used in various field below
+*********************************************************************/
+
+#define CAL_GEN_DISABLE                        0
+#define CAL_GEN_ENABLE                 1
+#define CAL_GEN_FALSE                  0
+#define CAL_GEN_TRUE                   1
+
+/*********************************************************************
+* Field Definition Macros
+*********************************************************************/
+
+#define CAL_HL_REVISION_MINOR_MASK             GENMASK(5, 0)
+#define CAL_HL_REVISION_CUSTOM_MASK            GENMASK(7, 6)
+#define CAL_HL_REVISION_MAJOR_MASK             GENMASK(10, 8)
+#define CAL_HL_REVISION_RTL_MASK               GENMASK(15, 11)
+#define CAL_HL_REVISION_FUNC_MASK              GENMASK(27, 16)
+#define CAL_HL_REVISION_SCHEME_MASK            GENMASK(31, 30)
+#define CAL_HL_REVISION_SCHEME_H08                     1
+#define CAL_HL_REVISION_SCHEME_LEGACY                  0
+
+#define CAL_HL_HWINFO_WFIFO_MASK               GENMASK(3, 0)
+#define CAL_HL_HWINFO_RFIFO_MASK               GENMASK(7, 4)
+#define CAL_HL_HWINFO_PCTX_MASK                        GENMASK(12, 8)
+#define CAL_HL_HWINFO_WCTX_MASK                        GENMASK(18, 13)
+#define CAL_HL_HWINFO_VFIFO_MASK               GENMASK(22, 19)
+#define CAL_HL_HWINFO_NCPORT_MASK              GENMASK(27, 23)
+#define CAL_HL_HWINFO_NPPI_CTXS0_MASK          GENMASK(29, 28)
+#define CAL_HL_HWINFO_NPPI_CTXS1_MASK          GENMASK(31, 30)
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_ZERO               0
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_FOUR               1
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_EIGHT              2
+#define CAL_HL_HWINFO_NPPI_CONTEXTS_RESERVED           3
+
+#define CAL_HL_SYSCONFIG_SOFTRESET_MASK                BIT_MASK(0)
+#define CAL_HL_SYSCONFIG_SOFTRESET_DONE                        0x0
+#define CAL_HL_SYSCONFIG_SOFTRESET_PENDING             0x1
+#define CAL_HL_SYSCONFIG_SOFTRESET_NOACTION            0x0
+#define CAL_HL_SYSCONFIG_SOFTRESET_RESET               0x1
+#define CAL_HL_SYSCONFIG_IDLE_MASK             GENMASK(3, 2)
+#define CAL_HL_SYSCONFIG_IDLEMODE_FORCE                        0
+#define CAL_HL_SYSCONFIG_IDLEMODE_NO                   1
+#define CAL_HL_SYSCONFIG_IDLEMODE_SMART1               2
+#define CAL_HL_SYSCONFIG_IDLEMODE_SMART2               3
+
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_MASK                BIT_MASK(0)
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_READ0               0
+#define CAL_HL_IRQ_EOI_LINE_NUMBER_EOI0                        0
+
+#define CAL_HL_IRQ_MASK(m)                     BIT_MASK(m-1)
+#define CAL_HL_IRQ_NOACTION                            0x0
+#define CAL_HL_IRQ_ENABLE                              0x1
+#define CAL_HL_IRQ_CLEAR                               0x1
+#define CAL_HL_IRQ_DISABLED                            0x0
+#define CAL_HL_IRQ_ENABLED                             0x1
+#define CAL_HL_IRQ_PENDING                             0x1
+
+#define CAL_PIX_PROC_EN_MASK                   BIT_MASK(0)
+#define CAL_PIX_PROC_EXTRACT_MASK              GENMASK(4, 1)
+#define CAL_PIX_PROC_EXTRACT_B6                                0x0
+#define CAL_PIX_PROC_EXTRACT_B7                                0x1
+#define CAL_PIX_PROC_EXTRACT_B8                                0x2
+#define CAL_PIX_PROC_EXTRACT_B10                       0x3
+#define CAL_PIX_PROC_EXTRACT_B10_MIPI                  0x4
+#define CAL_PIX_PROC_EXTRACT_B12                       0x5
+#define CAL_PIX_PROC_EXTRACT_B12_MIPI                  0x6
+#define CAL_PIX_PROC_EXTRACT_B14                       0x7
+#define CAL_PIX_PROC_EXTRACT_B14_MIPI                  0x8
+#define CAL_PIX_PROC_EXTRACT_B16_BE                    0x9
+#define CAL_PIX_PROC_EXTRACT_B16_LE                    0xa
+#define CAL_PIX_PROC_DPCMD_MASK                        GENMASK(9, 5)
+#define CAL_PIX_PROC_DPCMD_BYPASS                      0x0
+#define CAL_PIX_PROC_DPCMD_DPCM_10_8_1                 0x2
+#define CAL_PIX_PROC_DPCMD_DPCM_12_8_1                 0x8
+#define CAL_PIX_PROC_DPCMD_DPCM_10_7_1                 0x4
+#define CAL_PIX_PROC_DPCMD_DPCM_10_7_2                 0x5
+#define CAL_PIX_PROC_DPCMD_DPCM_10_6_1                 0x6
+#define CAL_PIX_PROC_DPCMD_DPCM_10_6_2                 0x7
+#define CAL_PIX_PROC_DPCMD_DPCM_12_7_1                 0xa
+#define CAL_PIX_PROC_DPCMD_DPCM_12_6_1                 0xc
+#define CAL_PIX_PROC_DPCMD_DPCM_14_10                  0xe
+#define CAL_PIX_PROC_DPCMD_DPCM_14_8_1                 0x10
+#define CAL_PIX_PROC_DPCMD_DPCM_16_12_1                        0x12
+#define CAL_PIX_PROC_DPCMD_DPCM_16_10_1                        0x14
+#define CAL_PIX_PROC_DPCMD_DPCM_16_8_1                 0x16
+#define CAL_PIX_PROC_DPCME_MASK                        GENMASK(15, 11)
+#define CAL_PIX_PROC_DPCME_BYPASS                      0x0
+#define CAL_PIX_PROC_DPCME_DPCM_10_8_1                 0x2
+#define CAL_PIX_PROC_DPCME_DPCM_12_8_1                 0x8
+#define CAL_PIX_PROC_DPCME_DPCM_14_10                  0xe
+#define CAL_PIX_PROC_DPCME_DPCM_14_8_1                 0x10
+#define CAL_PIX_PROC_DPCME_DPCM_16_12_1                        0x12
+#define CAL_PIX_PROC_DPCME_DPCM_16_10_1                        0x14
+#define CAL_PIX_PROC_DPCME_DPCM_16_8_1                 0x16
+#define CAL_PIX_PROC_PACK_MASK                 GENMASK(18, 16)
+#define CAL_PIX_PROC_PACK_B8                           0x0
+#define CAL_PIX_PROC_PACK_B10_MIPI                     0x2
+#define CAL_PIX_PROC_PACK_B12                          0x3
+#define CAL_PIX_PROC_PACK_B12_MIPI                     0x4
+#define CAL_PIX_PROC_PACK_B16                          0x5
+#define CAL_PIX_PROC_PACK_ARGB                         0x6
+#define CAL_PIX_PROC_CPORT_MASK                        GENMASK(23, 19)
+
+#define CAL_CTRL_POSTED_WRITES_MASK            BIT_MASK(0)
+#define CAL_CTRL_POSTED_WRITES_NONPOSTED               0
+#define CAL_CTRL_POSTED_WRITES                         1
+#define CAL_CTRL_TAGCNT_MASK                   GENMASK(4, 1)
+#define CAL_CTRL_BURSTSIZE_MASK                        GENMASK(6, 5)
+#define CAL_CTRL_BURSTSIZE_BURST16                     0x0
+#define CAL_CTRL_BURSTSIZE_BURST32                     0x1
+#define CAL_CTRL_BURSTSIZE_BURST64                     0x2
+#define CAL_CTRL_BURSTSIZE_BURST128                    0x3
+#define CAL_CTRL_LL_FORCE_STATE_MASK           GENMASK(12, 7)
+#define CAL_CTRL_MFLAGL_MASK                   GENMASK(20, 13)
+#define CAL_CTRL_PWRSCPCLK_MASK                        BIT_MASK(21)
+#define CAL_CTRL_PWRSCPCLK_AUTO                                0
+#define CAL_CTRL_PWRSCPCLK_FORCE                       1
+#define CAL_CTRL_RD_DMA_STALL_MASK             BIT_MASK(22)
+#define CAL_CTRL_MFLAGH_MASK                   GENMASK(31, 24)
+
+#define CAL_CTRL1_PPI_GROUPING_MASK            GENMASK(1, 0)
+#define CAL_CTRL1_PPI_GROUPING_DISABLED                        0
+#define CAL_CTRL1_PPI_GROUPING_RESERVED                        1
+#define CAL_CTRL1_PPI_GROUPING_0                       2
+#define CAL_CTRL1_PPI_GROUPING_1                       3
+#define CAL_CTRL1_INTERLEAVE01_MASK            GENMASK(3, 2)
+#define CAL_CTRL1_INTERLEAVE01_DISABLED                        0
+#define CAL_CTRL1_INTERLEAVE01_PIX1                    1
+#define CAL_CTRL1_INTERLEAVE01_PIX4                    2
+#define CAL_CTRL1_INTERLEAVE01_RESERVED                        3
+#define CAL_CTRL1_INTERLEAVE23_MASK            GENMASK(5, 4)
+#define CAL_CTRL1_INTERLEAVE23_DISABLED                        0
+#define CAL_CTRL1_INTERLEAVE23_PIX1                    1
+#define CAL_CTRL1_INTERLEAVE23_PIX4                    2
+#define CAL_CTRL1_INTERLEAVE23_RESERVED                        3
+
+#define CAL_LINE_NUMBER_EVT_CPORT_MASK         GENMASK(4, 0)
+#define CAL_LINE_NUMBER_EVT_MASK               GENMASK(29, 16)
+
+#define CAL_VPORT_CTRL1_PCLK_MASK              GENMASK(16, 0)
+#define CAL_VPORT_CTRL1_XBLK_MASK              GENMASK(24, 17)
+#define CAL_VPORT_CTRL1_YBLK_MASK              GENMASK(30, 25)
+#define CAL_VPORT_CTRL1_WIDTH_MASK             BIT_MASK(31)
+#define CAL_VPORT_CTRL1_WIDTH_ONE                      0
+#define CAL_VPORT_CTRL1_WIDTH_TWO                      1
+
+#define CAL_VPORT_CTRL2_CPORT_MASK             GENMASK(4, 0)
+#define CAL_VPORT_CTRL2_FREERUNNING_MASK       BIT_MASK(15)
+#define CAL_VPORT_CTRL2_FREERUNNING_GATED              0
+#define CAL_VPORT_CTRL2_FREERUNNING_FREE               1
+#define CAL_VPORT_CTRL2_FS_RESETS_MASK         BIT_MASK(16)
+#define CAL_VPORT_CTRL2_FS_RESETS_NO                   0
+#define CAL_VPORT_CTRL2_FS_RESETS_YES                  1
+#define CAL_VPORT_CTRL2_FSM_RESET_MASK         BIT_MASK(17)
+#define CAL_VPORT_CTRL2_FSM_RESET_NOEFFECT             0
+#define CAL_VPORT_CTRL2_FSM_RESET                      1
+#define CAL_VPORT_CTRL2_RDY_THR_MASK           GENMASK(31, 18)
+
+#define CAL_BYS_CTRL1_PCLK_MASK                        GENMASK(16, 0)
+#define CAL_BYS_CTRL1_XBLK_MASK                        GENMASK(24, 17)
+#define CAL_BYS_CTRL1_YBLK_MASK                        GENMASK(30, 25)
+#define CAL_BYS_CTRL1_BYSINEN_MASK             BIT_MASK(31)
+
+#define CAL_BYS_CTRL2_CPORTIN_MASK             GENMASK(4, 0)
+#define CAL_BYS_CTRL2_CPORTOUT_MASK            GENMASK(9, 5)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_MASK      BIT_MASK(10)
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_NO                        0
+#define CAL_BYS_CTRL2_DUPLICATEDDATA_YES               1
+#define CAL_BYS_CTRL2_FREERUNNING_MASK         BIT_MASK(11)
+#define CAL_BYS_CTRL2_FREERUNNING_NO                   0
+#define CAL_BYS_CTRL2_FREERUNNING_YES                  1
+
+#define CAL_RD_DMA_CTRL_GO_MASK                        BIT_MASK(0)
+#define CAL_RD_DMA_CTRL_GO_DIS                         0
+#define CAL_RD_DMA_CTRL_GO_EN                          1
+#define CAL_RD_DMA_CTRL_GO_IDLE                                0
+#define CAL_RD_DMA_CTRL_GO_BUSY                                1
+#define CAL_RD_DMA_CTRL_INIT_MASK              BIT_MASK(1)
+#define CAL_RD_DMA_CTRL_BW_LIMITER_MASK                GENMASK(10, 2)
+#define CAL_RD_DMA_CTRL_OCP_TAG_CNT_MASK       GENMASK(14, 11)
+#define CAL_RD_DMA_CTRL_PCLK_MASK              GENMASK(31, 15)
+
+#define CAL_RD_DMA_PIX_ADDR_MASK               GENMASK(31, 3)
+
+#define CAL_RD_DMA_PIX_OFST_MASK               GENMASK(31, 4)
+
+#define CAL_RD_DMA_XSIZE_MASK                  GENMASK(31, 19)
+
+#define CAL_RD_DMA_YSIZE_MASK                  GENMASK(29, 16)
+
+#define CAL_RD_DMA_INIT_ADDR_MASK              GENMASK(31, 3)
+
+#define CAL_RD_DMA_INIT_OFST_MASK              GENMASK(31, 3)
+
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_MASK                GENMASK(2, 0)
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_DIS                 0
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_ONE                 1
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_FOUR                        2
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTEEN             3
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_SIXTYFOUR           4
+#define CAL_RD_DMA_CTRL2_CIRC_MODE_RESERVED            5
+#define CAL_RD_DMA_CTRL2_ICM_CSTART_MASK       BIT_MASK(3)
+#define CAL_RD_DMA_CTRL2_PATTERN_MASK          GENMASK(5, 4)
+#define CAL_RD_DMA_CTRL2_PATTERN_LINEAR                        0
+#define CAL_RD_DMA_CTRL2_PATTERN_YUV420                        1
+#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP2              2
+#define CAL_RD_DMA_CTRL2_PATTERN_RD2SKIP4              3
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_MASK   BIT_MASK(6)
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_FREERUNNING    0
+#define CAL_RD_DMA_CTRL2_BYSOUT_LE_WAIT_WAITFORBYSOUT  1
+#define CAL_RD_DMA_CTRL2_CIRC_SIZE_MASK                GENMASK(29, 16)
+
+#define CAL_WR_DMA_CTRL_MODE_MASK              GENMASK(2, 0)
+#define CAL_WR_DMA_CTRL_MODE_DIS                       0
+#define CAL_WR_DMA_CTRL_MODE_SHD                       1
+#define CAL_WR_DMA_CTRL_MODE_CNT                       2
+#define CAL_WR_DMA_CTRL_MODE_CNT_INIT                  3
+#define CAL_WR_DMA_CTRL_MODE_CONST                     4
+#define CAL_WR_DMA_CTRL_MODE_RESERVED                  5
+#define CAL_WR_DMA_CTRL_PATTERN_MASK           GENMASK(4, 3)
+#define CAL_WR_DMA_CTRL_PATTERN_LINEAR                 0
+#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP2               2
+#define CAL_WR_DMA_CTRL_PATTERN_WR2SKIP4               3
+#define CAL_WR_DMA_CTRL_PATTERN_RESERVED               1
+#define CAL_WR_DMA_CTRL_ICM_PSTART_MASK                BIT_MASK(5)
+#define CAL_WR_DMA_CTRL_DTAG_MASK              GENMASK(8, 6)
+#define CAL_WR_DMA_CTRL_DTAG_ATT_HDR                   0
+#define CAL_WR_DMA_CTRL_DTAG_ATT_DAT                   1
+#define CAL_WR_DMA_CTRL_DTAG                           2
+#define CAL_WR_DMA_CTRL_DTAG_PIX_HDR                   3
+#define CAL_WR_DMA_CTRL_DTAG_PIX_DAT                   4
+#define CAL_WR_DMA_CTRL_DTAG_D5                                5
+#define CAL_WR_DMA_CTRL_DTAG_D6                                6
+#define CAL_WR_DMA_CTRL_DTAG_D7                                7
+#define CAL_WR_DMA_CTRL_CPORT_MASK             GENMASK(13, 9)
+#define CAL_WR_DMA_CTRL_STALL_RD_MASK          BIT_MASK(14)
+#define CAL_WR_DMA_CTRL_YSIZE_MASK             GENMASK(31, 18)
+
+#define CAL_WR_DMA_ADDR_MASK                   GENMASK(31, 4)
+
+#define CAL_WR_DMA_OFST_MASK                   GENMASK(18, 4)
+#define CAL_WR_DMA_OFST_CIRC_MODE_MASK         GENMASK(23, 22)
+#define CAL_WR_DMA_OFST_CIRC_MODE_ONE                  1
+#define CAL_WR_DMA_OFST_CIRC_MODE_FOUR                 2
+#define CAL_WR_DMA_OFST_CIRC_MODE_SIXTYFOUR            3
+#define CAL_WR_DMA_OFST_CIRC_MODE_DISABLED             0
+#define CAL_WR_DMA_OFST_CIRC_SIZE_MASK         GENMASK(31, 24)
+
+#define CAL_WR_DMA_XSIZE_XSKIP_MASK            GENMASK(15, 3)
+#define CAL_WR_DMA_XSIZE_MASK                  GENMASK(31, 19)
+
+#define CAL_CSI2_PPI_CTRL_IF_EN_MASK           BIT_MASK(0)
+#define CAL_CSI2_PPI_CTRL_ECC_EN_MASK          BIT_MASK(2)
+#define CAL_CSI2_PPI_CTRL_FRAME_MASK           BIT_MASK(3)
+#define CAL_CSI2_PPI_CTRL_FRAME_IMMEDIATE              0
+#define CAL_CSI2_PPI_CTRL_FRAME                                1
+
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POSITION_MASK     GENMASK(2, 0)
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_5                      5
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_4                      4
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_3                      3
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_2                      2
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_1                      1
+#define CAL_CSI2_COMPLEXIO_CFG_POSITION_NOT_USED               0
+#define CAL_CSI2_COMPLEXIO_CFG_CLOCK_POL_MASK          BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_CFG_POL_PLUSMINUS                   0
+#define CAL_CSI2_COMPLEXIO_CFG_POL_MINUSPLUS                   1
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POSITION_MASK     GENMASK(6, 4)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA1_POL_MASK          BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POSITION_MASK     GENMASK(10, 8)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA2_POL_MASK          BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POSITION_MASK     GENMASK(14, 12)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA3_POL_MASK          BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POSITION_MASK     GENMASK(18, 16)
+#define CAL_CSI2_COMPLEXIO_CFG_DATA4_POL_MASK          BIT_MASK(19)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_AUTO_MASK           BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_MASK         GENMASK(26, 25)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_OFF            0
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ON             1
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_STATUS_STATE_ULP            2
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_MASK            GENMASK(28, 27)
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_OFF               0
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ON                        1
+#define CAL_CSI2_COMPLEXIO_CFG_PWR_CMD_STATE_ULP               2
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_MASK         BIT_MASK(29)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETCOMPLETED       1
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_DONE_RESETONGOING         0
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_MASK         BIT_MASK(30)
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL                      0
+#define CAL_CSI2_COMPLEXIO_CFG_RESET_CTRL_OPERATIONAL          1
+
+#define CAL_CSI2_SHORT_PACKET_MASK     GENMASK(23, 0)
+
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS1_MASK          BIT_MASK(0)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS2_MASK          BIT_MASK(1)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS3_MASK          BIT_MASK(2)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS4_MASK          BIT_MASK(3)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTHS5_MASK          BIT_MASK(4)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS1_MASK      BIT_MASK(5)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS2_MASK      BIT_MASK(6)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS3_MASK      BIT_MASK(7)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS4_MASK      BIT_MASK(8)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRSOTSYNCHS5_MASK      BIT_MASK(9)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC1_MASK            BIT_MASK(10)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC2_MASK            BIT_MASK(11)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC3_MASK            BIT_MASK(12)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC4_MASK            BIT_MASK(13)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRESC5_MASK            BIT_MASK(14)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL1_MASK                BIT_MASK(15)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL2_MASK                BIT_MASK(16)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL3_MASK                BIT_MASK(17)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL4_MASK                BIT_MASK(18)
+#define CAL_CSI2_COMPLEXIO_IRQ_ERRCONTROL5_MASK                BIT_MASK(19)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM1_MASK         BIT_MASK(20)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM2_MASK         BIT_MASK(21)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM3_MASK         BIT_MASK(22)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM4_MASK         BIT_MASK(23)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEULPM5_MASK         BIT_MASK(24)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMENTER_MASK  BIT_MASK(25)
+#define CAL_CSI2_COMPLEXIO_IRQ_STATEALLULPMEXIT_MASK   BIT_MASK(26)
+#define CAL_CSI2_COMPLEXIO_IRQ_FIFO_OVR_MASK           BIT_MASK(27)
+#define CAL_CSI2_COMPLEXIO_IRQ_SHORT_PACKET_MASK       BIT_MASK(28)
+#define CAL_CSI2_COMPLEXIO_IRQ_ECC_NO_CORRECTION_MASK  BIT_MASK(30)
+
+#define CAL_CSI2_TIMING_STOP_STATE_COUNTER_IO1_MASK    GENMASK(12, 0)
+#define CAL_CSI2_TIMING_STOP_STATE_X4_IO1_MASK         BIT_MASK(13)
+#define CAL_CSI2_TIMING_STOP_STATE_X16_IO1_MASK                BIT_MASK(14)
+#define CAL_CSI2_TIMING_FORCE_RX_MODE_IO1_MASK         BIT_MASK(15)
+
+#define CAL_CSI2_VC_IRQ_FS_IRQ_0_MASK                  BIT_MASK(0)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_0_MASK                  BIT_MASK(1)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_0_MASK                  BIT_MASK(2)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_0_MASK                  BIT_MASK(3)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_0_MASK                  BIT_MASK(4)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_0_MASK     BIT_MASK(5)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_1_MASK                  BIT_MASK(8)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_1_MASK                  BIT_MASK(9)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_1_MASK                  BIT_MASK(10)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_1_MASK                  BIT_MASK(11)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_1_MASK                  BIT_MASK(12)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_1_MASK     BIT_MASK(13)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_2_MASK                  BIT_MASK(16)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_2_MASK                  BIT_MASK(17)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_2_MASK                  BIT_MASK(18)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_2_MASK                  BIT_MASK(19)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_2_MASK                  BIT_MASK(20)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_2_MASK     BIT_MASK(21)
+#define CAL_CSI2_VC_IRQ_FS_IRQ_3_MASK                  BIT_MASK(24)
+#define CAL_CSI2_VC_IRQ_FE_IRQ_3_MASK                  BIT_MASK(25)
+#define CAL_CSI2_VC_IRQ_LS_IRQ_3_MASK                  BIT_MASK(26)
+#define CAL_CSI2_VC_IRQ_LE_IRQ_3_MASK                  BIT_MASK(27)
+#define CAL_CSI2_VC_IRQ_CS_IRQ_3_MASK                  BIT_MASK(28)
+#define CAL_CSI2_VC_IRQ_ECC_CORRECTION0_IRQ_3_MASK     BIT_MASK(29)
+
+#define CAL_CSI2_CTX_DT_MASK           GENMASK(5, 0)
+#define CAL_CSI2_CTX_VC_MASK           GENMASK(7, 6)
+#define CAL_CSI2_CTX_CPORT_MASK                GENMASK(12, 8)
+#define CAL_CSI2_CTX_ATT_MASK          BIT_MASK(13)
+#define CAL_CSI2_CTX_ATT_PIX                   0
+#define CAL_CSI2_CTX_ATT                       1
+#define CAL_CSI2_CTX_PACK_MODE_MASK    BIT_MASK(14)
+#define CAL_CSI2_CTX_PACK_MODE_LINE            0
+#define CAL_CSI2_CTX_PACK_MODE_FRAME           1
+#define CAL_CSI2_CTX_LINES_MASK                GENMASK(29, 16)
+
+#define CAL_CSI2_STATUS_FRAME_MASK     GENMASK(15, 0)
+
+#define CAL_CSI2_PHY_REG0_THS_SETTLE_MASK      GENMASK(7, 0)
+#define CAL_CSI2_PHY_REG0_THS_TERM_MASK                GENMASK(15, 8)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_MASK   BIT_MASK(24)
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_DISABLE                1
+#define CAL_CSI2_PHY_REG0_HSCLOCKCONFIG_ENABLE         0
+
+#define CAL_CSI2_PHY_REG1_TCLK_SETTLE_MASK                     GENMASK(7, 0)
+#define CAL_CSI2_PHY_REG1_CTRLCLK_DIV_FACTOR_MASK              GENMASK(9, 8)
+#define CAL_CSI2_PHY_REG1_DPHY_HS_SYNC_PATTERN_MASK            GENMASK(17, 10)
+#define CAL_CSI2_PHY_REG1_TCLK_TERM_MASK                       GENMASK(24, 18)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_MASK      BIT_MASK(25)
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_ERROR             1
+#define CAL_CSI2_PHY_REG1_CLOCK_MISS_DETECTOR_STATUS_SUCCESS           0
+#define CAL_CSI2_PHY_REG1_RESET_DONE_STATUS_MASK               GENMASK(29, 28)
+
+#define CAL_CSI2_PHY_REG2_CCP2_SYNC_PATTERN_MASK               GENMASK(23, 0)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC3_MASK          GENMASK(25, 24)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC2_MASK          GENMASK(27, 26)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC1_MASK          GENMASK(29, 28)
+#define CAL_CSI2_PHY_REG2_TRIGGER_CMD_RXTRIGESC0_MASK          GENMASK(31, 30)
+
+#define CM_CAMERRX_CTRL_CSI1_CTRLCLKEN_MASK                    BIT_MASK(0)
+#define CM_CAMERRX_CTRL_CSI1_CAMMODE_MASK                      GENMASK(2, 1)
+#define CM_CAMERRX_CTRL_CSI1_LANEENABLE_MASK                   GENMASK(4, 3)
+#define CM_CAMERRX_CTRL_CSI1_MODE_MASK                         BIT_MASK(5)
+#define CM_CAMERRX_CTRL_CSI0_CTRLCLKEN_MASK                    BIT_MASK(10)
+#define CM_CAMERRX_CTRL_CSI0_CAMMODE_MASK                      GENMASK(12, 11)
+#define CM_CAMERRX_CTRL_CSI0_LANEENABLE_MASK                   GENMASK(16, 13)
+#define CM_CAMERRX_CTRL_CSI0_MODE_MASK                         BIT_MASK(17)
+
+#endif
index 418113c998013d5fabc083364b662dc17d259e79..c4b5fab836663d2ea07f9c240327d52f80529e83 100644 (file)
@@ -1074,7 +1074,7 @@ static int __init vim2m_init(void)
        if (ret)
                platform_device_unregister(&vim2m_pdev);
 
-       return 0;
+       return ret;
 }
 
 module_init(vim2m_init);
index 9baed6a1033433e9ebd3f23eb35d7d65b7d2f2ca..93fbaee69675a3988033c2af1af9bd3613597d7f 100644 (file)
@@ -418,6 +418,8 @@ static inline void tpg_s_bytesperline(struct tpg_data *tpg, unsigned plane, unsi
 
                tpg->bytesperline[p] = plane_w / tpg->hdownsampling[p];
        }
+       if (tpg_g_interleaved(tpg))
+               tpg->bytesperline[1] = tpg->bytesperline[0];
 }
 
 
index 42dff9d020afaf66c70e528b58d2f005a53203f3..533bc796391ed3053a5f21c92e94eba92cdc46a4 100644 (file)
@@ -256,7 +256,7 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
 
        /* Create links. */
        list_for_each_entry(entity, &vsp1->entities, list_dev) {
-               if (entity->type == VSP1_ENTITY_LIF) {
+               if (entity->type == VSP1_ENTITY_WPF) {
                        ret = vsp1_wpf_create_links(vsp1, entity);
                        if (ret < 0)
                                goto done;
@@ -264,7 +264,10 @@ static int vsp1_create_entities(struct vsp1_device *vsp1)
                        ret = vsp1_rpf_create_links(vsp1, entity);
                        if (ret < 0)
                                goto done;
-               } else {
+               }
+
+               if (entity->type != VSP1_ENTITY_LIF &&
+                   entity->type != VSP1_ENTITY_RPF) {
                        ret = vsp1_create_links(vsp1, entity);
                        if (ret < 0)
                                goto done;
index 637d0d6f79fba5fb511b6fba37fb24875d913dbd..b4dca57d1ae3f6d78a3417251089939a7da15f77 100644 (file)
@@ -515,7 +515,7 @@ static bool vsp1_pipeline_stopped(struct vsp1_pipeline *pipe)
        bool stopped;
 
        spin_lock_irqsave(&pipe->irqlock, flags);
-       stopped = pipe->state == VSP1_PIPELINE_STOPPED,
+       stopped = pipe->state == VSP1_PIPELINE_STOPPED;
        spin_unlock_irqrestore(&pipe->irqlock, flags);
 
        return stopped;
index 859f0c08ee0543c67af2a4ecbf368906722fbba0..271f725b17e85c77017f08ac9181510369f16872 100644 (file)
@@ -1530,11 +1530,11 @@ static int si476x_radio_probe(struct platform_device *pdev)
        if (si476x_core_has_diversity(radio->core)) {
                si476x_ctrls[SI476X_IDX_DIVERSITY_MODE].def =
                        si476x_phase_diversity_mode_to_idx(radio->core->diversity_mode);
-               si476x_radio_add_new_custom(radio, SI476X_IDX_DIVERSITY_MODE);
+               rval = si476x_radio_add_new_custom(radio, SI476X_IDX_DIVERSITY_MODE);
                if (rval < 0)
                        goto exit;
 
-               si476x_radio_add_new_custom(radio, SI476X_IDX_INTERCHIP_LINK);
+               rval = si476x_radio_add_new_custom(radio, SI476X_IDX_INTERCHIP_LINK);
                if (rval < 0)
                        goto exit;
        }
index ebc73b03424960fe65f3db7721ffedb79a0e1d41..3f9e6df7d837ac27d060a1d7a5413de92273f57f 100644 (file)
@@ -68,7 +68,7 @@ MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
 /* RDS buffer blocks */
 static u32 default_rds_buf = 300;
 module_param(default_rds_buf, uint, 0444);
-MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
+MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries");
 
 /* Radio Nr */
 static u32 radio_nr = -1;
index 18adf580f502443bafc7c0e310d2556e5908f7dc..c96da3aaf00b56b0489644b835a466942727bfe9 100644 (file)
@@ -80,17 +80,24 @@ static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
 }
 
 /* enter extended function mode */
-static inline void nvt_efm_enable(struct nvt_dev *nvt)
+static inline int nvt_efm_enable(struct nvt_dev *nvt)
 {
+       if (!request_muxed_region(nvt->cr_efir, 2, NVT_DRIVER_NAME))
+               return -EBUSY;
+
        /* Enabling Extended Function Mode explicitly requires writing 2x */
        outb(EFER_EFM_ENABLE, nvt->cr_efir);
        outb(EFER_EFM_ENABLE, nvt->cr_efir);
+
+       return 0;
 }
 
 /* exit extended function mode */
 static inline void nvt_efm_disable(struct nvt_dev *nvt)
 {
        outb(EFER_EFM_DISABLE, nvt->cr_efir);
+
+       release_region(nvt->cr_efir, 2);
 }
 
 /*
@@ -100,8 +107,25 @@ static inline void nvt_efm_disable(struct nvt_dev *nvt)
  */
 static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
 {
-       outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
-       outb(ldev, nvt->cr_efdr);
+       nvt_cr_write(nvt, ldev, CR_LOGICAL_DEV_SEL);
+}
+
+/* select and enable logical device with setting EFM mode*/
+static inline void nvt_enable_logical_dev(struct nvt_dev *nvt, u8 ldev)
+{
+       nvt_efm_enable(nvt);
+       nvt_select_logical_dev(nvt, ldev);
+       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
+       nvt_efm_disable(nvt);
+}
+
+/* select and disable logical device with setting EFM mode*/
+static inline void nvt_disable_logical_dev(struct nvt_dev *nvt, u8 ldev)
+{
+       nvt_efm_enable(nvt);
+       nvt_select_logical_dev(nvt, ldev);
+       nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
+       nvt_efm_disable(nvt);
 }
 
 /* write val to cir config register */
@@ -137,6 +161,22 @@ static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
        return val;
 }
 
+/* don't override io address if one is set already */
+static void nvt_set_ioaddr(struct nvt_dev *nvt, unsigned long *ioaddr)
+{
+       unsigned long old_addr;
+
+       old_addr = nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8;
+       old_addr |= nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO);
+
+       if (old_addr)
+               *ioaddr = old_addr;
+       else {
+               nvt_cr_write(nvt, *ioaddr >> 8, CR_CIR_BASE_ADDR_HI);
+               nvt_cr_write(nvt, *ioaddr & 0xff, CR_CIR_BASE_ADDR_LO);
+       }
+}
+
 /* dump current cir register contents */
 static void cir_dump_regs(struct nvt_dev *nvt)
 {
@@ -251,7 +291,7 @@ static inline const char *nvt_find_chip(struct nvt_dev *nvt, int id)
 
 
 /* detect hardware features */
-static void nvt_hw_detect(struct nvt_dev *nvt)
+static int nvt_hw_detect(struct nvt_dev *nvt)
 {
        const char *chip_name;
        int chip_id;
@@ -266,10 +306,17 @@ static void nvt_hw_detect(struct nvt_dev *nvt)
                nvt_efm_enable(nvt);
                nvt->chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
        }
-
        nvt->chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
 
+       nvt_efm_disable(nvt);
+
        chip_id = nvt->chip_major << 8 | nvt->chip_minor;
+       if (chip_id == NVT_INVALID) {
+               dev_err(&nvt->pdev->dev,
+                       "No device found on either EFM port\n");
+               return -ENODEV;
+       }
+
        chip_name = nvt_find_chip(nvt, chip_id);
 
        /* warn, but still let the driver load, if we don't know this chip */
@@ -282,7 +329,7 @@ static void nvt_hw_detect(struct nvt_dev *nvt)
                         "found %s or compatible: chip id: 0x%02x 0x%02x",
                         chip_name, nvt->chip_major, nvt->chip_minor);
 
-       nvt_efm_disable(nvt);
+       return 0;
 }
 
 static void nvt_cir_ldev_init(struct nvt_dev *nvt)
@@ -305,12 +352,10 @@ static void nvt_cir_ldev_init(struct nvt_dev *nvt)
        val |= psval;
        nvt_cr_write(nvt, val, psreg);
 
-       /* Select CIR logical device and enable */
+       /* Select CIR logical device */
        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 
-       nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
-       nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+       nvt_set_ioaddr(nvt, &nvt->cir_addr);
 
        nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
 
@@ -320,7 +365,7 @@ static void nvt_cir_ldev_init(struct nvt_dev *nvt)
 
 static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
 {
-       /* Select ACPI logical device, enable it and CIR Wake */
+       /* Select ACPI logical device and anable it */
        nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
        nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 
@@ -330,12 +375,10 @@ static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
        /* enable pme interrupt of cir wakeup event */
        nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
 
-       /* Select CIR Wake logical device and enable */
+       /* Select CIR Wake logical device */
        nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
-       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
 
-       nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
-       nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
+       nvt_set_ioaddr(nvt, &nvt->cir_wake_addr);
 
        nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
 
@@ -355,11 +398,19 @@ static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
 /* clear out the hardware's cir wake rx fifo */
 static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
 {
-       u8 val;
+       u8 val, config;
+
+       config = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON);
+
+       /* clearing wake fifo works in learning mode only */
+       nvt_cir_wake_reg_write(nvt, config & ~CIR_WAKE_IRCON_MODE0,
+                              CIR_WAKE_IRCON);
 
        val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
        nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
                               CIR_WAKE_FIFOCON);
+
+       nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
 }
 
 /* clear out the hardware's cir tx fifo */
@@ -408,6 +459,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt)
 
        /* and finally, enable interrupts */
        nvt_set_cir_iren(nvt);
+
+       /* enable the CIR logical device */
+       nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
 }
 
 static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
@@ -442,10 +496,15 @@ static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
 
        /* clear any and all stray interrupts */
        nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
+
+       /* enable the CIR WAKE logical device */
+       nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
 }
 
 static void nvt_enable_wake(struct nvt_dev *nvt)
 {
+       unsigned long flags;
+
        nvt_efm_enable(nvt);
 
        nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
@@ -457,12 +516,16 @@ static void nvt_enable_wake(struct nvt_dev *nvt)
 
        nvt_efm_disable(nvt);
 
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
+
        nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
                               CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
                               CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
                               CIR_WAKE_IRCON);
        nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
        nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
+
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 }
 
 #if 0 /* Currently unused */
@@ -670,7 +733,6 @@ static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
 /* copy data from hardware rx fifo into driver buffer */
 static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
 {
-       unsigned long flags;
        u8 fifocount, val;
        unsigned int b_idx;
        bool overrun = false;
@@ -689,8 +751,6 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
 
        nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
 
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
-
        b_idx = nvt->pkts;
 
        /* This should never happen, but lets check anyway... */
@@ -712,8 +772,6 @@ static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
 
        if (overrun)
                nvt_handle_rx_fifo_overrun(nvt);
-
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 }
 
 static void nvt_cir_log_irqs(u8 status, u8 iren)
@@ -736,16 +794,13 @@ static void nvt_cir_log_irqs(u8 status, u8 iren)
 static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
 {
        unsigned long flags;
-       bool tx_inactive;
        u8 tx_state;
 
        spin_lock_irqsave(&nvt->tx.lock, flags);
        tx_state = nvt->tx.tx_state;
        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 
-       tx_inactive = (tx_state == ST_TX_NONE);
-
-       return tx_inactive;
+       return tx_state == ST_TX_NONE;
 }
 
 /* interrupt service routine for incoming and outgoing CIR data */
@@ -757,9 +812,7 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
 
        nvt_dbg_verbose("%s firing", __func__);
 
-       nvt_efm_enable(nvt);
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_efm_disable(nvt);
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
 
        /*
         * Get IR Status register contents. Write 1 to ack/clear
@@ -775,9 +828,14 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
         *   0: CIR_IRSTS_GH  - Min Length Detected
         */
        status = nvt_cir_reg_read(nvt, CIR_IRSTS);
-       if (!status) {
+       iren = nvt_cir_reg_read(nvt, CIR_IREN);
+
+       /* IRQ may be shared with CIR WAKE, therefore check for each
+        * status bit whether the related interrupt source is enabled
+        */
+       if (!(status & iren)) {
+               spin_unlock_irqrestore(&nvt->nvt_lock, flags);
                nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
-               nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
                return IRQ_NONE;
        }
 
@@ -785,13 +843,6 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
        nvt_cir_reg_write(nvt, status, CIR_IRSTS);
        nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
 
-       /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
-       iren = nvt_cir_reg_read(nvt, CIR_IREN);
-       if (!iren) {
-               nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
-               return IRQ_NONE;
-       }
-
        nvt_cir_log_irqs(status, iren);
 
        if (status & CIR_IRSTS_RTR) {
@@ -805,16 +856,14 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
                if (nvt_cir_tx_inactive(nvt))
                        nvt_get_rx_ir_data(nvt);
 
-               spin_lock_irqsave(&nvt->nvt_lock, flags);
-
                cur_state = nvt->study_state;
 
-               spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
                if (cur_state == ST_STUDY_NONE)
                        nvt_clear_cir_fifo(nvt);
        }
 
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
        if (status & CIR_IRSTS_TE)
                nvt_clear_tx_fifo(nvt);
 
@@ -863,9 +912,18 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
 
        nvt_dbg_wake("%s firing", __func__);
 
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
+
        status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
-       if (!status)
+       iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
+
+       /* IRQ may be shared with CIR, therefore check for each
+        * status bit whether the related interrupt source is enabled
+        */
+       if (!(status & iren)) {
+               spin_unlock_irqrestore(&nvt->nvt_lock, flags);
                return IRQ_NONE;
+       }
 
        if (status & CIR_WAKE_IRSTS_IR_PENDING)
                nvt_clear_cir_wake_fifo(nvt);
@@ -873,13 +931,6 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
        nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
        nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
 
-       /* Interrupt may be shared with CIR, bail if Wake not enabled */
-       iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
-       if (!iren) {
-               nvt_dbg_wake("%s exiting, wake not enabled", __func__);
-               return IRQ_HANDLED;
-       }
-
        if ((status & CIR_WAKE_IRSTS_PE) &&
            (nvt->wake_state == ST_WAKE_START)) {
                while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
@@ -888,39 +939,21 @@ static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
                }
 
                nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
-               spin_lock_irqsave(&nvt->nvt_lock, flags);
                nvt->wake_state = ST_WAKE_FINISH;
-               spin_unlock_irqrestore(&nvt->nvt_lock, flags);
        }
 
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
+
        nvt_dbg_wake("%s done", __func__);
        return IRQ_HANDLED;
 }
 
-static void nvt_enable_cir(struct nvt_dev *nvt)
+static void nvt_disable_cir(struct nvt_dev *nvt)
 {
-       /* set function enable flags */
-       nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
-                         CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
-                         CIR_IRCON);
-
-       nvt_efm_enable(nvt);
-
-       /* enable the CIR logical device */
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
-
-       nvt_efm_disable(nvt);
-
-       /* clear all pending interrupts */
-       nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+       unsigned long flags;
 
-       /* enable interrupts */
-       nvt_set_cir_iren(nvt);
-}
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
 
-static void nvt_disable_cir(struct nvt_dev *nvt)
-{
        /* disable CIR interrupts */
        nvt_cir_reg_write(nvt, 0, CIR_IREN);
 
@@ -934,13 +967,10 @@ static void nvt_disable_cir(struct nvt_dev *nvt)
        nvt_clear_cir_fifo(nvt);
        nvt_clear_tx_fifo(nvt);
 
-       nvt_efm_enable(nvt);
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 
        /* disable the CIR logical device */
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
-
-       nvt_efm_disable(nvt);
+       nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
 }
 
 static int nvt_open(struct rc_dev *dev)
@@ -949,20 +979,31 @@ static int nvt_open(struct rc_dev *dev)
        unsigned long flags;
 
        spin_lock_irqsave(&nvt->nvt_lock, flags);
-       nvt_enable_cir(nvt);
+
+       /* set function enable flags */
+       nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
+                         CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
+                         CIR_IRCON);
+
+       /* clear all pending interrupts */
+       nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
+
+       /* enable interrupts */
+       nvt_set_cir_iren(nvt);
+
        spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 
+       /* enable the CIR logical device */
+       nvt_enable_logical_dev(nvt, LOGICAL_DEV_CIR);
+
        return 0;
 }
 
 static void nvt_close(struct rc_dev *dev)
 {
        struct nvt_dev *nvt = dev->priv;
-       unsigned long flags;
 
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
        nvt_disable_cir(nvt);
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 }
 
 /* Allocate memory, probe hardware, and initialize everything */
@@ -1024,7 +1065,9 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 
        init_waitqueue_head(&nvt->tx.queue);
 
-       nvt_hw_detect(nvt);
+       ret = nvt_hw_detect(nvt);
+       if (ret)
+               goto exit_free_dev_rdev;
 
        /* Initialize CIR & CIR Wake Logical Devices */
        nvt_efm_enable(nvt);
@@ -1032,7 +1075,10 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
        nvt_cir_wake_ldev_init(nvt);
        nvt_efm_disable(nvt);
 
-       /* Initialize CIR & CIR Wake Config Registers */
+       /*
+        * Initialize CIR & CIR Wake Config Registers
+        * and enable logical devices
+        */
        nvt_cir_regs_init(nvt);
        nvt_cir_wake_regs_init(nvt);
 
@@ -1079,12 +1125,12 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
                goto exit_unregister_device;
 
        if (!devm_request_region(&pdev->dev, nvt->cir_wake_addr,
-                           CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
+                           CIR_IOREG_LENGTH, NVT_DRIVER_NAME "-wake"))
                goto exit_unregister_device;
 
        if (devm_request_irq(&pdev->dev, nvt->cir_wake_irq,
                             nvt_cir_wake_isr, IRQF_SHARED,
-                            NVT_DRIVER_NAME, (void *)nvt))
+                            NVT_DRIVER_NAME "-wake", (void *)nvt))
                goto exit_unregister_device;
 
        device_init_wakeup(&pdev->dev, true);
@@ -1109,15 +1155,11 @@ exit_free_dev_rdev:
 static void nvt_remove(struct pnp_dev *pdev)
 {
        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
-       unsigned long flags;
 
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
-       /* disable CIR */
-       nvt_cir_reg_write(nvt, 0, CIR_IREN);
        nvt_disable_cir(nvt);
+
        /* enable CIR Wake (for IR power-on) */
        nvt_enable_wake(nvt);
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 
        rc_unregister_device(nvt->rdev);
 }
@@ -1129,26 +1171,23 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
 
        nvt_dbg("%s called", __func__);
 
-       /* zero out misc state tracking */
-       spin_lock_irqsave(&nvt->nvt_lock, flags);
-       nvt->study_state = ST_STUDY_NONE;
-       nvt->wake_state = ST_WAKE_NONE;
-       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
-
        spin_lock_irqsave(&nvt->tx.lock, flags);
        nvt->tx.tx_state = ST_TX_NONE;
        spin_unlock_irqrestore(&nvt->tx.lock, flags);
 
+       spin_lock_irqsave(&nvt->nvt_lock, flags);
+
+       /* zero out misc state tracking */
+       nvt->study_state = ST_STUDY_NONE;
+       nvt->wake_state = ST_WAKE_NONE;
+
        /* disable all CIR interrupts */
        nvt_cir_reg_write(nvt, 0, CIR_IREN);
 
-       nvt_efm_enable(nvt);
+       spin_unlock_irqrestore(&nvt->nvt_lock, flags);
 
        /* disable cir logical dev */
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
-
-       nvt_efm_disable(nvt);
+       nvt_disable_logical_dev(nvt, LOGICAL_DEV_CIR);
 
        /* make sure wake is enabled */
        nvt_enable_wake(nvt);
@@ -1162,16 +1201,6 @@ static int nvt_resume(struct pnp_dev *pdev)
 
        nvt_dbg("%s called", __func__);
 
-       /* open interrupt */
-       nvt_set_cir_iren(nvt);
-
-       /* Enable CIR logical device */
-       nvt_efm_enable(nvt);
-       nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
-       nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
-
-       nvt_efm_disable(nvt);
-
        nvt_cir_regs_init(nvt);
        nvt_cir_wake_regs_init(nvt);
 
@@ -1181,6 +1210,7 @@ static int nvt_resume(struct pnp_dev *pdev)
 static void nvt_shutdown(struct pnp_dev *pdev)
 {
        struct nvt_dev *nvt = pnp_get_drvdata(pdev);
+
        nvt_enable_wake(nvt);
 }
 
index 0ad15d34e9c93ce8adedd5f114f84543776f3055..4a5650dffa29a67fad87022659c71b3e2c601910 100644 (file)
@@ -68,7 +68,8 @@ enum nvt_chip_ver {
        NVT_W83667HG    = 0xa510,
        NVT_6775F       = 0xb470,
        NVT_6776F       = 0xc330,
-       NVT_6779D       = 0xc560
+       NVT_6779D       = 0xc560,
+       NVT_INVALID     = 0xffff,
 };
 
 struct nvt_chip {
@@ -157,8 +158,8 @@ struct nvt_dev {
 /* total length of CIR and CIR WAKE */
 #define CIR_IOREG_LENGTH       0x0f
 
-/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL (0x7d0 = 2000) */
-#define CIR_RX_LIMIT_COUNT     0x7d0
+/* RX limit length, 8 high bits for SLCH, 8 low bits for SLCL */
+#define CIR_RX_LIMIT_COUNT  (IR_DEFAULT_TIMEOUT / US_TO_NS(SAMPLE_PERIOD))
 
 /* CIR Regs */
 #define CIR_IRCON      0x00
@@ -292,10 +293,7 @@ struct nvt_dev {
 #define CIR_WAKE_IREN_RTR              0x40
 #define CIR_WAKE_IREN_PE               0x20
 #define CIR_WAKE_IREN_RFO              0x10
-#define CIR_WAKE_IREN_TE               0x08
-#define CIR_WAKE_IREN_TTR              0x04
-#define CIR_WAKE_IREN_TFU              0x02
-#define CIR_WAKE_IREN_GH               0x01
+#define CIR_WAKE_IREN_GH               0x08
 
 /* CIR WAKE FIFOCON settings */
 #define CIR_WAKE_FIFOCON_RXFIFOCLR     0x08
index 7359f3d03b647244baeb8c99ad7076e9a11ba74d..585d5e52118d538f7fc764d2793bd98384c72b44 100644 (file)
@@ -16,6 +16,9 @@
 #ifndef _RC_CORE_PRIV
 #define _RC_CORE_PRIV
 
+/* Define the max number of pulse/space transitions to buffer */
+#define        MAX_IR_EVENT_SIZE       512
+
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <media/rc-core.h>
@@ -35,7 +38,8 @@ struct ir_raw_event_ctrl {
        struct list_head                list;           /* to keep track of raw clients */
        struct task_struct              *thread;
        spinlock_t                      lock;
-       struct kfifo_rec_ptr_1          kfifo;          /* fifo for the pulse/space durations */
+       /* fifo for the pulse/space durations */
+       DECLARE_KFIFO(kfifo, struct ir_raw_event, MAX_IR_EVENT_SIZE);
        ktime_t                         last_event;     /* when last event occurred */
        enum raw_event_type             last_type;      /* last event type */
        struct rc_dev                   *dev;           /* pointer to the parent rc_dev */
index c69807fe2feff08c47441dc2bf3c780c4440d505..144304c94606169773566ef4f3132f0dc188d4ce 100644 (file)
@@ -20,9 +20,6 @@
 #include <linux/freezer.h>
 #include "rc-core-priv.h"
 
-/* Define the max number of pulse/space transitions to buffer */
-#define MAX_IR_EVENT_SIZE      512
-
 /* Used to keep track of IR raw clients, protected by ir_raw_handler_lock */
 static LIST_HEAD(ir_raw_client_list);
 
@@ -36,14 +33,12 @@ static int ir_raw_event_thread(void *data)
        struct ir_raw_event ev;
        struct ir_raw_handler *handler;
        struct ir_raw_event_ctrl *raw = (struct ir_raw_event_ctrl *)data;
-       int retval;
 
        while (!kthread_should_stop()) {
 
                spin_lock_irq(&raw->lock);
-               retval = kfifo_len(&raw->kfifo);
 
-               if (retval < sizeof(ev)) {
+               if (!kfifo_len(&raw->kfifo)) {
                        set_current_state(TASK_INTERRUPTIBLE);
 
                        if (kthread_should_stop())
@@ -54,7 +49,8 @@ static int ir_raw_event_thread(void *data)
                        continue;
                }
 
-               retval = kfifo_out(&raw->kfifo, &ev, sizeof(ev));
+               if(!kfifo_out(&raw->kfifo, &ev, 1))
+                       dev_err(&raw->dev->dev, "IR event FIFO is empty!\n");
                spin_unlock_irq(&raw->lock);
 
                mutex_lock(&ir_raw_handler_lock);
@@ -87,8 +83,10 @@ int ir_raw_event_store(struct rc_dev *dev, struct ir_raw_event *ev)
        IR_dprintk(2, "sample: (%05dus %s)\n",
                   TO_US(ev->duration), TO_STR(ev->pulse));
 
-       if (kfifo_in(&dev->raw->kfifo, ev, sizeof(*ev)) != sizeof(*ev))
-               return -ENOMEM;
+       if (!kfifo_put(&dev->raw->kfifo, *ev)) {
+               dev_err(&dev->dev, "IR event FIFO is full!\n");
+               return -ENOSPC;
+       }
 
        return 0;
 }
@@ -273,11 +271,7 @@ int ir_raw_event_register(struct rc_dev *dev)
 
        dev->raw->dev = dev;
        dev->change_protocol = change_protocol;
-       rc = kfifo_alloc(&dev->raw->kfifo,
-                        sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
-                        GFP_KERNEL);
-       if (rc < 0)
-               goto out;
+       INIT_KFIFO(dev->raw->kfifo);
 
        spin_lock_init(&dev->raw->lock);
        dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw,
@@ -319,7 +313,6 @@ void ir_raw_event_unregister(struct rc_dev *dev)
                        handler->raw_unregister(dev);
        mutex_unlock(&ir_raw_handler_lock);
 
-       kfifo_free(&dev->raw->kfifo);
        kfree(dev->raw);
        dev->raw = NULL;
 }
index 504bfbc4027adc4953ebdde65658782491182b34..9f3e0fd4cad9f6c63faf32ed914bba50af29ab6a 100644 (file)
@@ -461,13 +461,12 @@ static int m88rs6000t_sleep(struct dvb_frontend *fe)
        dev_dbg(&dev->client->dev, "%s:\n", __func__);
 
        ret = regmap_write(dev->regmap, 0x07, 0x6d);
-       if (ret)
-               goto err;
-       usleep_range(5000, 10000);
-err:
-       if (ret)
+       if (ret) {
                dev_dbg(&dev->client->dev, "failed=%d\n", ret);
-       return ret;
+               return ret;
+       }
+       usleep_range(5000, 10000);
+       return 0;
 }
 
 static int m88rs6000t_get_frequency(struct dvb_frontend *fe, u32 *frequency)
index a7a8452e99d23b2598e4bcb11bbe61a63d735c31..6ab35e315fe7a2dd11ce4078357b0dffff98d1b8 100644 (file)
@@ -1295,7 +1295,7 @@ static int generic_set_freq(struct dvb_frontend *fe,
                            v4l2_std_id std, u32 delsys)
 {
        struct r820t_priv               *priv = fe->tuner_priv;
-       int                             rc = -EINVAL;
+       int                             rc;
        u32                             lo_freq;
 
        tuner_dbg("should set frequency to %d kHz, bw %d MHz\n",
index 0e1ca2b00e61e3e78e3f30f1f357a2902aebb7d0..3450dfb7c427ad1841cfa31cde2647eee31fde72 100644 (file)
@@ -364,8 +364,8 @@ static int si2157_get_if_frequency(struct dvb_frontend *fe, u32 *frequency)
 static const struct dvb_tuner_ops si2157_ops = {
        .info = {
                .name           = "Silicon Labs Si2146/2147/2148/2157/2158",
-               .frequency_min  = 55000000,
-               .frequency_max  = 862000000,
+               .frequency_min  = 42000000,
+               .frequency_max  = 870000000,
        },
 
        .init = si2157_init,
@@ -458,6 +458,9 @@ static int si2157_remove(struct i2c_client *client)
 
        dev_dbg(&client->dev, "\n");
 
+       /* stop statistics polling */
+       cancel_delayed_work_sync(&dev->stat_work);
+
        memset(&fe->ops.tuner_ops, 0, sizeof(struct dvb_tuner_ops));
        fe->tuner_priv = NULL;
        kfree(dev);
index 4e941f00b6008f286b839d8e2f72c41978cc98e5..082ff5608455c273ea9c53e1824bbb76fad15a0e 100644 (file)
@@ -1403,11 +1403,12 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg)
         * in order to avoid troubles during device release.
         */
        kfree(priv->ctrl.fname);
+       priv->ctrl.fname = NULL;
        memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
        if (p->fname) {
                priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
                if (priv->ctrl.fname == NULL)
-                       rc = -ENOMEM;
+                       return -ENOMEM;
        }
 
        /*
index aee2d76e8dfc996dd3fa4e1f09ca0c55be47e2ed..8def19d9ab921698af30fef7bf7ce5529e097c0b 100644 (file)
@@ -52,7 +52,7 @@ struct as10x_bus_adapter_t {
        struct as10x_cmd_t *cmd, *rsp;
 
        /* bus adapter private ops callback */
-       struct as102_priv_ops_t *ops;
+       const struct as102_priv_ops_t *ops;
 };
 
 struct as102_dev_t {
index 3f669066ccf6837dd8b4022c7f03ab166e3a2f42..0e8030c071b8e74bfdd46e229f258f9b50aa64ca 100644 (file)
@@ -189,7 +189,7 @@ static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap,
        return actual_len;
 }
 
-static struct as102_priv_ops_t as102_priv_ops = {
+static const struct as102_priv_ops_t as102_priv_ops = {
        .upload_fw_pkt  = as102_send_ep1,
        .xfer_cmd       = as102_usb_xfer_cmd,
        .as102_read_ep2 = as102_read_ep2,
index 9e29e70a78d7b20f8b250cbde914c2a589a82052..df2bc3f732b697b359ef4574bd7db1e9f297bd1f 100644 (file)
@@ -276,7 +276,7 @@ static int au0828_create_media_graph(struct au0828_dev *dev)
                return -EINVAL;
 
        if (tuner) {
-               ret = media_create_pad_link(tuner, TUNER_PAD_IF_OUTPUT,
+               ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
                                            decoder, 0,
                                            MEDIA_LNK_FL_ENABLED);
                if (ret)
index 94363a3ba400a3ccdcaa3d6ec88ebc082d1ecdf2..0e174e86061404d6e01f36d8ff2db5e0f856702b 100644 (file)
@@ -181,7 +181,7 @@ static int stop_urb_transfer(struct au0828_dev *dev)
 static int start_urb_transfer(struct au0828_dev *dev)
 {
        struct urb *purb;
-       int i, ret = -ENOMEM;
+       int i, ret;
 
        dprintk(2, "%s()\n", __func__);
 
@@ -194,7 +194,7 @@ static int start_urb_transfer(struct au0828_dev *dev)
 
                dev->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
                if (!dev->urbs[i])
-                       goto err;
+                       return -ENOMEM;
 
                purb = dev->urbs[i];
 
@@ -207,9 +207,10 @@ static int start_urb_transfer(struct au0828_dev *dev)
                if (!purb->transfer_buffer) {
                        usb_free_urb(purb);
                        dev->urbs[i] = NULL;
+                       ret = -ENOMEM;
                        pr_err("%s: failed big buffer allocation, err = %d\n",
                               __func__, ret);
-                       goto err;
+                       return ret;
                }
 
                purb->status = -EINPROGRESS;
@@ -235,10 +236,7 @@ static int start_urb_transfer(struct au0828_dev *dev)
        }
 
        dev->urb_streaming = true;
-       ret = 0;
-
-err:
-       return ret;
+       return 0;
 }
 
 static void au0828_start_transport(struct au0828_dev *dev)
index 0bd96906339227ec8b3df4e78640fc500be4fb87..d4bdba60b0f71436065e4884d5622cc34dffbfe6 100644 (file)
@@ -10,7 +10,7 @@
 /* Version information */
 #define DRIVER_VERSION "0.1"
 #define DRIVER_NAME "Technisat/B2C2 FlexCop II/IIb/III Digital TV USB Driver"
-#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
+#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@posteo.de>"
 
 /* debug */
 #ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
index 48643b94e69449503618e2d95fc16a8da01c319b..c9320d6c613145888fe41f43034bfb32b88aabb5 100644 (file)
@@ -1382,6 +1382,8 @@ static int cx231xx_bulk_copy(struct cx231xx *dev, struct urb *urb)
        buffer_size = urb->actual_length;
 
        buffer = kmalloc(buffer_size, GFP_ATOMIC);
+       if (!buffer)
+               return -ENOMEM;
 
        memcpy(buffer, dma_q->ps_head, 3);
        memcpy(buffer+3, p_buffer, buffer_size-3);
index de4ae5eb4830bc4ce6c5c1513bf7f3b29bfa09a6..a6a9508418f8eebef0a4ae41c1e312610afb598c 100644 (file)
@@ -499,6 +499,11 @@ static int snd_cx231xx_pcm_close(struct snd_pcm_substream *substream)
        }
 
        dev->adev.users--;
+       if (substream->runtime->dma_area) {
+               dev_dbg(dev->dev, "freeing\n");
+               vfree(substream->runtime->dma_area);
+               substream->runtime->dma_area = NULL;
+       }
        mutex_unlock(&dev->lock);
 
        if (dev->adev.users == 0 && dev->adev.shutdown == 1) {
index 620b83d03f75a1c6952f4982d2a85c53fe0e54aa..54e43fe13e6d35da096e2b0faf08be166cc404b8 100644 (file)
@@ -1259,7 +1259,7 @@ static int cx231xx_create_media_graph(struct cx231xx *dev)
                return 0;
 
        if (tuner) {
-               ret = media_create_pad_link(tuner, TUNER_PAD_IF_OUTPUT, decoder, 0,
+               ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT, decoder, 0,
                                            MEDIA_LNK_FL_ENABLED);
                if (ret < 0)
                        return ret;
index 6e02a15d39ce90a1da6c95f6431fa5d04cdb9ec9..b3c09fe54d9bf7f681fb9a964cc8e65b617b1741 100644 (file)
@@ -684,7 +684,7 @@ static int af9035_download_firmware(struct dvb_usb_device *d,
        if (ret < 0)
                goto err;
 
-       if (tmp == 1 || tmp == 3) {
+       if (tmp == 1 || tmp == 3 || tmp == 5) {
                /* configure gpioh1, reset & power slave demod */
                ret = af9035_wr_reg_mask(d, 0x00d8b0, 0x01, 0x01);
                if (ret < 0)
@@ -823,7 +823,7 @@ static int af9035_read_config(struct dvb_usb_device *d)
        if (ret < 0)
                goto err;
 
-       if (tmp == 1 || tmp == 3)
+       if (tmp == 1 || tmp == 3 || tmp == 5)
                state->dual_mode = true;
 
        dev_dbg(&d->udev->dev, "%s: ts mode=%d dual mode=%d\n", __func__,
index 416a97f05ec8539fbc2c11dad23e1261a819650c..df22001f9e41273d137ab0c92979ecbb9c9b29b3 100644 (file)
@@ -112,9 +112,10 @@ static const u32 clock_lut_it9135[] = {
  * 0  TS
  * 1  DCA + PIP
  * 3  PIP
+ * 5  DCA + PIP
  * n  DCA
  *
- * Values 0 and 3 are seen to this day. 0 for single TS and 3 for dual TS.
+ * Values 0, 3 and 5 are seen to this day. 0 for single TS and 3/5 for dual TS.
  */
 
 #define EEPROM_BASE_AF9035        0x42fd
index 023d91f7e6542f0905af07964648988c252a5ad7..35f27e2e4e284f5a6554fe963c60931034d13864 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DVB USB framework
  *
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
  * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
  *
  *    This program is free software; you can redistribute it and/or modify
index 45f07090d431a25c3c2b2c645d90bf9c5b113c14..a1622bda2a5e00be3529309b1afe18bfa4ee3872 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DVB USB framework
  *
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
  * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
  *
  *    This program is free software; you can redistribute it and/or modify
index f0565bf3673e8687ba16599aafc59152ad85f1da..5ec159f2239974ca14bbf8038ce865b49ac58a4b 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DVB USB framework
  *
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
  * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
  *
  *    This program is free software; you can redistribute it and/or modify
@@ -1129,7 +1129,7 @@ int dvb_usbv2_reset_resume(struct usb_interface *intf)
 EXPORT_SYMBOL(dvb_usbv2_reset_resume);
 
 MODULE_VERSION("2.0");
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
 MODULE_DESCRIPTION("DVB USB common");
 MODULE_LICENSE("GPL");
index 22bdce15ecf31fcd351309ca7fa5794cb228869d..5bafeb6486beb3c7da3459e63a74ca0de6994957 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * DVB USB framework
  *
- * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2004-6 Patrick Boettcher <patrick.boettcher@posteo.de>
  * Copyright (C) 2012 Antti Palosaari <crope@iki.fi>
  *
  *    This program is free software; you can redistribute it and/or modify
index 1dd962535f972dda834a913cd4e5ae6bd26b9bc7..02dbc6c45423a157b0ae857dbb9963dc483e712a 100644 (file)
@@ -847,10 +847,17 @@ static const struct usb_device_id dvbsky_id_table[] = {
                USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI,
                &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI",
                RC_MAP_TT_1500) },
+       { DVB_USB_DEVICE(USB_VID_TECHNOTREND,
+               USB_PID_TECHNOTREND_CONNECT_CT2_4650_CI_2,
+               &dvbsky_t680c_props, "TechnoTrend TT-connect CT2-4650 CI v1.1",
+               RC_MAP_TT_1500) },
        { DVB_USB_DEVICE(USB_VID_TERRATEC,
                USB_PID_TERRATEC_H7_3,
                &dvbsky_t680c_props, "Terratec H7 Rev.4",
                RC_MAP_TT_1500) },
+       { DVB_USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R4,
+               &dvbsky_s960_props, "Terratec Cinergy S2 Rev.4",
+               RC_MAP_DVBSKY) },
        { }
 };
 MODULE_DEVICE_TABLE(usb, dvbsky_id_table);
index 444579be0b779618d63c61ad23cb7d531faee64e..7d16252dbb71f2c053ac52466592f555487051af 100644 (file)
@@ -36,7 +36,7 @@ MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able)).");
 struct mxl111sf_tuner_state {
        struct mxl111sf_state *mxl_state;
 
-       struct mxl111sf_tuner_config *cfg;
+       const struct mxl111sf_tuner_config *cfg;
 
        enum mxl_if_freq if_freq;
 
@@ -489,8 +489,8 @@ static struct dvb_tuner_ops mxl111sf_tuner_tuner_ops = {
 };
 
 struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
-                                          struct mxl111sf_state *mxl_state,
-                                          struct mxl111sf_tuner_config *cfg)
+                               struct mxl111sf_state *mxl_state,
+                               const struct mxl111sf_tuner_config *cfg)
 {
        struct mxl111sf_tuner_state *state = NULL;
 
index e6caab21a1973e2a918de7d095fb2c548a052e22..509b5507121849fea1c0526dd1cbc4494ec49930 100644 (file)
@@ -63,13 +63,13 @@ struct mxl111sf_tuner_config {
 #if IS_ENABLED(CONFIG_DVB_USB_MXL111SF)
 extern
 struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
-                                          struct mxl111sf_state *mxl_state,
-                                          struct mxl111sf_tuner_config *cfg);
+                               struct mxl111sf_state *mxl_state,
+                               const struct mxl111sf_tuner_config *cfg);
 #else
 static inline
 struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
-                                          struct mxl111sf_state *mxl_state,
-                                          struct mxl111sf_tuner_config *cfg)
+                               struct mxl111sf_state *mxl_state,
+                               const struct mxl111sf_tuner_config *cfg)
 {
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
index b669deccc34c64065e801ee29a68d85b05c8dedc..5d676b533a3ab245956764592047219d035c0069 100644 (file)
@@ -856,7 +856,7 @@ static int mxl111sf_ant_hunt(struct dvb_frontend *fe)
        return 0;
 }
 
-static struct mxl111sf_tuner_config mxl_tuner_config = {
+static const struct mxl111sf_tuner_config mxl_tuner_config = {
        .if_freq         = MXL_IF_6_0, /* applies to external IF output, only */
        .invert_spectrum = 0,
        .read_reg        = mxl111sf_read_reg,
@@ -888,7 +888,7 @@ static int mxl111sf_attach_tuner(struct dvb_usb_adapter *adap)
        state->tuner.function = MEDIA_ENT_F_TUNER;
        state->tuner.name = "mxl111sf tuner";
        state->tuner_pads[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
-       state->tuner_pads[TUNER_PAD_IF_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+       state->tuner_pads[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
 
        ret = media_entity_pads_init(&state->tuner,
                                     TUNER_NUM_PADS, state->tuner_pads);
index eb5787a3191e7afc31e69e6e9757297b745279c0..c4c6e92e8643c0f782617d534ec60b08ddfc01ce 100644 (file)
@@ -259,6 +259,10 @@ static int rtl28xxu_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[],
                ret = -EOPNOTSUPP;
        }
 
+       /* Retry failed I2C messages */
+       if (ret == -EPIPE)
+               ret = -EAGAIN;
+
 err_mutex_unlock:
        mutex_unlock(&d->i2c_mutex);
 
@@ -619,6 +623,10 @@ static int rtl28xxu_identify_state(struct dvb_usb_device *d, const char **name)
        }
        dev_dbg(&d->intf->dev, "chip_id=%u\n", dev->chip_id);
 
+       /* Retry failed I2C messages */
+       d->i2c_adap.retries = 1;
+       d->i2c_adap.timeout = msecs_to_jiffies(10);
+
        return WARM;
 err:
        dev_dbg(&d->intf->dev, "failed=%d\n", ret);
index ca8f3c2b10824c29c28f4be0e54ae931f08a8059..55136cde38f57c8841974b5f734054ee178adbdf 100644 (file)
@@ -1,6 +1,6 @@
 /* usb-urb.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file keeps functions for initializing and handling the
index 83684ed023cd9bba08a20ae9cfe682f636da5a36..7ba975bea96a6d0ba03f5276f53322bfd29715a1 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB framework compliant Linux driver for the AVerMedia AverTV DVB-T
  * USB2.0 (A800) DVB-T receiver.
  *
- * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to
  *   - AVerMedia who kindly provided information and
@@ -185,7 +185,7 @@ static struct usb_driver a800_driver = {
 
 module_usb_driver(a800_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("AVerMedia AverTV DVB-T USB 2.0 (A800)");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index ab715118172878ff52c8ba3e1002ec2357971a8c..907ac01ae2979a56657e6ddc32fafecfbb9c9042 100644 (file)
@@ -13,7 +13,7 @@
  *
  * TODO: Use the cx25840-driver for the analogue part
  *
- * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
  * Copyright (C) 2006 Michael Krufky (mkrufky@linuxtv.org)
  * Copyright (C) 2006, 2007 Chris Pascoe (c.pascoe@itee.uq.edu.au)
  *
@@ -2314,7 +2314,7 @@ static struct usb_driver cxusb_driver = {
 
 module_usb_driver(cxusb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
 MODULE_AUTHOR("Chris Pascoe <c.pascoe@itee.uq.edu.au>");
 MODULE_DESCRIPTION("Driver for Conexant USB2.0 hybrid reference design");
index 0d248ce02a9b2eb02b9da6fecbb6175319110574..c16f999b9d7c9de8c2cc75526256ba30fa69a33c 100644 (file)
@@ -881,7 +881,7 @@ static struct usb_driver dib0700_driver = {
 module_usb_driver(dib0700_driver);
 
 MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
-MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 7ed49646a699dd750fe7eded35632de7fd7cf4a4..ea0391e32d23b16ac848e33d60492a9e9e3dbb22 100644 (file)
@@ -1736,8 +1736,13 @@ static int dib809x_tuner_attach(struct dvb_usb_adapter *adap)
        struct dib0700_adapter_state *st = adap->priv;
        struct i2c_adapter *tun_i2c = st->dib8000_ops.get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_TUNER, 1);
 
-       if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
-               return -ENODEV;
+       if (adap->id == 0) {
+               if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+                       return -ENODEV;
+       } else {
+               if (dvb_attach(dib0090_register, adap->fe_adap[0].fe, tun_i2c, &dib809x_dib0090_config) == NULL)
+                       return -ENODEV;
+       }
 
        st->set_param_save = adap->fe_adap[0].fe->ops.tuner_ops.set_params;
        adap->fe_adap[0].fe->ops.tuner_ops.set_params = dib8096_set_param_override;
@@ -1773,6 +1778,20 @@ static int stk809x_frontend_attach(struct dvb_usb_adapter *adap)
        return adap->fe_adap[0].fe == NULL ?  -ENODEV : 0;
 }
 
+static int stk809x_frontend1_attach(struct dvb_usb_adapter *adap)
+{
+       struct dib0700_adapter_state *state = adap->priv;
+
+       if (!dvb_attach(dib8000_attach, &state->dib8000_ops))
+               return -ENODEV;
+
+       state->dib8000_ops.i2c_enumeration(&adap->dev->i2c_adap, 1, 0x10, 0x82, 0);
+
+       adap->fe_adap[0].fe = state->dib8000_ops.init(&adap->dev->i2c_adap, 0x82, &dib809x_dib8000_config[1]);
+
+       return adap->fe_adap[0].fe == NULL ?  -ENODEV : 0;
+}
+
 static int nim8096md_tuner_attach(struct dvb_usb_adapter *adap)
 {
        struct dib0700_adapter_state *st = adap->priv;
@@ -3794,6 +3813,7 @@ struct usb_device_id dib0700_usb_id_table[] = {
 /* 80 */{ USB_DEVICE(USB_VID_ELGATO,   USB_PID_ELGATO_EYETV_DTT_2) },
        { USB_DEVICE(USB_VID_PCTV,      USB_PID_PCTV_2002E) },
        { USB_DEVICE(USB_VID_PCTV,      USB_PID_PCTV_2002E_SE) },
+       { USB_DEVICE(USB_VID_PCTV,      USB_PID_DIBCOM_STK8096PVR) },
        { 0 }           /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -4959,6 +4979,59 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                                            RC_BIT_NEC,
                        .change_protocol  = dib0700_change_protocol,
                },
+       }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+               .num_adapters = 2,
+               .adapter = {
+                       {
+                               .num_frontends = 1,
+                               .fe = {{
+                                       .caps  = DVB_USB_ADAP_HAS_PID_FILTER |
+                                               DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+                                       .pid_filter_count = 32,
+                                       .pid_filter = stk80xx_pid_filter,
+                                       .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
+                                       .frontend_attach  = stk809x_frontend_attach,
+                                       .tuner_attach     = dib809x_tuner_attach,
+
+                                       DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+                               } },
+                               .size_of_priv =
+                                       sizeof(struct dib0700_adapter_state),
+                       }, {
+                               .num_frontends = 1,
+                               .fe = { {
+                                       .caps  = DVB_USB_ADAP_HAS_PID_FILTER |
+                                               DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+                                       .pid_filter_count = 32,
+                                       .pid_filter = stk80xx_pid_filter,
+                                       .pid_filter_ctrl = stk80xx_pid_filter_ctrl,
+                                       .frontend_attach  = stk809x_frontend1_attach,
+                                       .tuner_attach     = dib809x_tuner_attach,
+
+                                       DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+                               } },
+                               .size_of_priv =
+                                       sizeof(struct dib0700_adapter_state),
+                       },
+               },
+               .num_device_descs = 1,
+               .devices = {
+                       {   "DiBcom STK8096-PVR reference design",
+                               { &dib0700_usb_id_table[83], NULL },
+                               { NULL },
+                       },
+               },
+
+               .rc.core = {
+                       .rc_interval      = DEFAULT_RC_INTERVAL,
+                       .rc_codes         = RC_MAP_DIB0700_RC5_TABLE,
+                       .module_name  = "dib0700",
+                       .rc_query         = dib0700_rc_query_old_firmware,
+                       .allowed_protos   = RC_BIT_RC5 |
+                               RC_BIT_RC6_MCE |
+                               RC_BIT_NEC,
+                       .change_protocol  = dib0700_change_protocol,
+               },
        },
 };
 
index ef3a8f75f82ebe21b4ee5b7963aed9bbf8449c32..35de6095926df2289fa089a5b2a4410a9f0d15d8 100644 (file)
@@ -1,6 +1,6 @@
 /* Common methods for dibusb-based-receivers.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
index a4ac37e0e98b692ccc4a72ea5f93c556bcaf1447..a0057641cc86838d80d694ca9511e8547a37e480 100644 (file)
@@ -1,10 +1,10 @@
 /* DVB USB compliant linux driver for mobile DVB-T USB devices based on
  * reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-B)
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * based on GPL code from DiBcom, which has
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -465,7 +465,7 @@ static struct usb_driver dibusb_driver = {
 
 module_usb_driver(dibusb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for DiBcom USB DVB-T devices (DiB3000M-B based)");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 9d1a59d09c523c3fb8d6ae4be4f6641a0b6c1507..08fb8a3f6e0cc23e87ff150c747361fd5259b1f6 100644 (file)
@@ -1,10 +1,10 @@
 /* DVB USB compliant linux driver for mobile DVB-T USB devices based on
  * reference designs made by DiBcom (http://www.dibcom.fr/) (DiB3000M-C/P)
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * based on GPL code from DiBcom, which has
- * Copyright (C) 2004 Amaury Demol for DiBcom (ademol@dibcom.fr)
+ * Copyright (C) 2004 Amaury Demol for DiBcom
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -143,7 +143,7 @@ static struct usb_driver dibusb_mc_driver = {
 
 module_usb_driver(dibusb_mc_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for DiBcom USB2.0 DVB-T (DiB3000M-C/P based) devices");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 32ab1392313fbdaed6ec67591cbd38e964bde878..3f82163d8ab89061410bdff4b68bea2e1d153c27 100644 (file)
@@ -1,6 +1,6 @@
 /* Header file for all dibusb-based-receivers.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
index 772bde3c5020a63b026567cf6e62ba44f2c8714b..63134335c99406ca7df3d284cd3ba95d4154cae3 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB compliant linux driver for Nebula Electronics uDigiTV DVB-T USB2.0
  * receiver
  *
- * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2005 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * partly based on the SDK published by Nebula Electronics
  *
@@ -348,7 +348,7 @@ static struct usb_driver digitv_driver = {
 
 module_usb_driver(digitv_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for Nebula Electronics uDigiTV DVB-T USB2.0");
 MODULE_VERSION("1.0-alpha");
 MODULE_LICENSE("GPL");
index 8637ad1be6be430895c403ef2382f38d58494f0b..7e72a1bef76a22d6070a20ea71297e6a5da280ec 100644 (file)
@@ -1,7 +1,7 @@
 /* Frontend part of the Linux driver for the WideView/ Yakumo/ Hama/
  * Typhoon/ Yuan DVB-T USB2.0 receiver.
  *
- * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
index c357fb3b0a883c382a6c480a0c9567f665f8d10e..ca3b69aa96882f47692e5bbac10b12943dbba418 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB library compliant Linux driver for the WideView/ Yakumo/ Hama/
  * Typhoon/ Yuan/ Miglia DVB-T USB2.0 receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to Steve Chang from WideView for providing support for the WT-220U.
  *
@@ -362,7 +362,7 @@ static struct usb_driver dtt200u_usb_driver = {
 
 module_usb_driver(dtt200u_usb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for the WideView/Yakumo/Hama/Typhoon/Club3D/Miglia DVB-T USB2.0 devices");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 005b0a7df3585d1653e61d8b8a63a9278a0ab7cf..efccc399b1cba7cdb92f59b48b7033fa1619b7d1 100644 (file)
@@ -1,7 +1,7 @@
 /* Common header file of Linux driver for the WideView/ Yakumo/ Hama/
  * Typhoon/ Yuan DVB-T USB2.0 receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
index 6b7b2a89242e797d9bbe0c959ddef57515a25a8f..7e619d638809b12fce6c5fe9c7d744e0b56ef87a 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-common.h is part of the DVB USB library.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * a header file containing prototypes and types for internal use of the dvb-usb-lib
index 9ddfcab268be9c797e7ce6a276dc4168fe0651df..71de19ba0e01e211fc4d071ddc61b35755953513 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-dvb.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for initializing and handling the
index 733a7ff7b207819bcd05f2a816388fa21e9f69b5..dd048a7c461c8ffe1884b52ca80ff2b35233adaf 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-firmware.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for downloading the firmware to Cypress FX 1 and 2 based devices.
index 88e4a62abc44db45e72ea93bc46334805e0ef2f7..4f0b0adce7f5dfe18d47af28968b9d2d307fb3ff 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-i2c.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for (de-)initializing an I2C adapter.
index 1adf325012f7340f9648ae46b5df72532beeb94c..3896ba9a4179670687eadb63bdd199e251dc904e 100644 (file)
@@ -3,7 +3,7 @@
  *
  * dvb-usb-init.c
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -299,6 +299,6 @@ void dvb_usb_device_exit(struct usb_interface *intf)
 EXPORT_SYMBOL(dvb_usb_device_exit);
 
 MODULE_VERSION("1.0");
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("A library module containing commonly used USB and DVB function USB DVB devices");
 MODULE_LICENSE("GPL");
index 7b5dae3077f6a2550584b2eaada3da0a1d7632ca..c259f9e43542970d87a32ca2e79806d87d193a8e 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-remote.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file contains functions for initializing the input-device and for handling remote-control-queries.
index 5c8f651344fc1fd2a713014a82d14cea8cc3718c..95f9097498cb86259f0209013ab499894ab39839 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb-urb.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file keeps functions for initializing and handling the
index ce4c4e3b58bb7516f9bdff338d1b6b8ed2d0332e..639c4678c65b96c82293f7546919768401f43dbd 100644 (file)
@@ -1,6 +1,6 @@
 /* dvb-usb.h is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * the headerfile, all dvb-usb-drivers have to include.
index 14ef25dc6cd33076a288573ba413eb4e7ded6454..dd46d6c78c4ea27deea934dbb6515b0560a69017 100644 (file)
@@ -1688,6 +1688,7 @@ enum dw2102_table_entry {
        TECHNOTREND_S2_4600,
        TEVII_S482_1,
        TEVII_S482_2,
+       TERRATEC_CINERGY_S2_BOX,
 };
 
 static struct usb_device_id dw2102_table[] = {
@@ -1702,19 +1703,20 @@ static struct usb_device_id dw2102_table[] = {
        [TEVII_S660] = {USB_DEVICE(0x9022, USB_PID_TEVII_S660)},
        [PROF_7500] = {USB_DEVICE(0x3034, 0x7500)},
        [GENIATECH_SU3000] = {USB_DEVICE(0x1f4d, 0x3000)},
-       [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00a8)},
+       [TERRATEC_CINERGY_S2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R1)},
        [TEVII_S480_1] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_1)},
        [TEVII_S480_2] = {USB_DEVICE(0x9022, USB_PID_TEVII_S480_2)},
        [X3M_SPC1400HD] = {USB_DEVICE(0x1f4d, 0x3100)},
        [TEVII_S421] = {USB_DEVICE(0x9022, USB_PID_TEVII_S421)},
        [TEVII_S632] = {USB_DEVICE(0x9022, USB_PID_TEVII_S632)},
-       [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, 0x00b0)},
+       [TERRATEC_CINERGY_S2_R2] = {USB_DEVICE(USB_VID_TERRATEC, USB_PID_TERRATEC_CINERGY_S2_R2)},
        [GOTVIEW_SAT_HD] = {USB_DEVICE(0x1FE1, USB_PID_GOTVIEW_SAT_HD)},
        [GENIATECH_T220] = {USB_DEVICE(0x1f4d, 0xD220)},
        [TECHNOTREND_S2_4600] = {USB_DEVICE(USB_VID_TECHNOTREND,
                USB_PID_TECHNOTREND_CONNECT_S2_4600)},
        [TEVII_S482_1] = {USB_DEVICE(0x9022, 0xd483)},
        [TEVII_S482_2] = {USB_DEVICE(0x9022, 0xd484)},
+       [TERRATEC_CINERGY_S2_BOX] = {USB_DEVICE(USB_VID_TERRATEC, 0x0105)},
        { }
 };
 
@@ -2232,7 +2234,7 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
                } },
                }
        },
-       .num_device_descs = 3,
+       .num_device_descs = 4,
        .devices = {
                { "TechnoTrend TT-connect S2-4600",
                        { &dw2102_table[TECHNOTREND_S2_4600], NULL },
@@ -2246,6 +2248,10 @@ static struct dvb_usb_device_properties tt_s2_4600_properties = {
                        { &dw2102_table[TEVII_S482_2], NULL },
                        { NULL },
                },
+               { "Terratec Cinergy S2 USB BOX",
+                       { &dw2102_table[TERRATEC_CINERGY_S2_BOX], NULL },
+                       { NULL },
+               },
        }
 };
 
index 6c55384e2fca8638d7846241dfeffcda528fa96a..fc7569e2728d2201e14897cddd41c9a9504a9716 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB framework compliant Linux driver for the Hauppauge WinTV-NOVA-T usb2
  * DVB-T receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -227,7 +227,7 @@ static struct usb_driver nova_t_driver = {
 
 module_usb_driver(nova_t_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Hauppauge WinTV-NOVA-T usb2");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 6c3c477229551642fc762d753337a115c3d64b10..51487d2f7764cfcec87cb365bc15817123e68ef0 100644 (file)
@@ -512,7 +512,7 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a)
                        &a->dev->i2c_adap, STV090x_DEMODULATOR_0);
 
        if (a->fe_adap[0].fe) {
-               struct stv6110x_devctl *ctl;
+               const struct stv6110x_devctl *ctl;
 
                ctl = dvb_attach(stv6110x_attach,
                                a->fe_adap[0].fe,
index f10717311e05bb9873c3fa24e40b369b1d9c4237..ecc207fbaf3cfffd54595492bd0e6f7427afa9d2 100644 (file)
@@ -820,7 +820,7 @@ static struct usb_driver ttusb2_driver = {
 
 module_usb_driver(ttusb2_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for Pinnacle PCTV 400e DVB-S USB2.0");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index 9b042292e788db67d5576515e10d54b6e6b6415e..58ad5b4f856c5dd5e1ba0d2a824026dc10cc5e97 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB USB framework compliant Linux driver for the HanfTek UMT-010 USB2.0
  * DVB-T receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  *     This program is free software; you can redistribute it and/or modify it
  *     under the terms of the GNU General Public License as published by the Free
@@ -145,7 +145,7 @@ static struct usb_driver umt_driver = {
 
 module_usb_driver(umt_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index d62ee0f5a1658d64cf165ba1a9fe6dde2a34774a..89173603be6754e1e8e30c68a06affcfa89482fe 100644 (file)
@@ -1,6 +1,6 @@
 /* usb-urb.c is part of the DVB USB library.
  *
- * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@posteo.de)
  * see dvb-usb-init.c for copyright information.
  *
  * This file keeps functions for initializing and handling the
index d361a72ca0fa227d396d64ebe9e187c2aaa6d393..27398c08c69dc4c6cfec28703ebc53d64949fe75 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de>
  *                    Metzler Brothers Systementwicklung GbR
  *
- * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
index ee1e19e364452e142f4e35c393714c53a4e6d7d3..40de33de90a7aef04ae6cf95153c8177596a16ba 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright (C) 2005 Ralph Metzler <rjkm@metzlerbros.de>
  *                    Metzler Brothers Systementwicklung GbR
  *
- * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@desy.de>
+ * Copyright (C) 2005 Patrick Boettcher <patrick.boettcher@posteo.de>
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
@@ -439,7 +439,7 @@ static struct usb_driver vp702x_usb_driver = {
 
 module_usb_driver(vp702x_usb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for Twinhan StarBox DVB-S USB2.0 and clones");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index e708afc6a57fbce136a0b894ba660645552f3c64..7765602ea658c4ecf6ef951949dec5a912c8ae26 100644 (file)
@@ -1,7 +1,7 @@
 /* DVB frontend part of the Linux driver for TwinhanDTV Alpha/MagicBoxII USB2.0
  * DVB-T receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
index d750724132ee372902340fcf8f3bec2e4ef3401a..13340af0d39c34bee810a8b225da511e950dcfc5 100644 (file)
@@ -2,7 +2,7 @@
  *  - TwinhanDTV Alpha/MagicBoxII USB2.0 DVB-T receiver
  *  - DigitalNow TinyUSB2 DVB-t receiver
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
@@ -296,7 +296,7 @@ static struct usb_driver vp7045_usb_driver = {
 
 module_usb_driver(vp7045_usb_driver);
 
-MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
+MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@posteo.de>");
 MODULE_DESCRIPTION("Driver for Twinhan MagicBox/Alpha and DNTV tinyUSB2 DVB-T USB2.0");
 MODULE_VERSION("1.0");
 MODULE_LICENSE("GPL");
index cf5ec46f8bb1bbc40138af764be3f0a79f763aa1..66499932ca767f97156bb9bce08e82a0dfc9d8ef 100644 (file)
@@ -1,7 +1,7 @@
 /* Common header-file of the Linux driver for the TwinhanDTV Alpha/MagicBoxII
  * USB2.0 DVB-T receiver.
  *
- * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@desy.de)
+ * Copyright (C) 2004-5 Patrick Boettcher (patrick.boettcher@posteo.de)
  *
  * Thanks to Twinhan who kindly provided hardware and information.
  *
index b58acd3fcd9918368b2eadac79d61a5f64d80b38..72f3f4d50253a5041573140b6b5afa4b758dd3b8 100644 (file)
@@ -64,6 +64,8 @@ static int em28xx_initialize_mt9m111(struct em28xx *dev)
                i2c_master_send(&dev->i2c_client[dev->def_i2c_bus],
                                &regs[i][0], 3);
 
+       /* FIXME: This won't be creating a sensor at the media graph */
+
        return 0;
 }
 
@@ -91,6 +93,8 @@ static int em28xx_initialize_mt9m001(struct em28xx *dev)
                i2c_master_send(&dev->i2c_client[dev->def_i2c_bus],
                                &regs[i][0], 3);
 
+       /* FIXME: This won't be creating a sensor at the media graph */
+
        return 0;
 }
 
index a1b6ef5894a689a39c6597aa668db2d1d4115b33..ba442c9674155ad94197f0c4f9fbde7446f0d55c 100644 (file)
@@ -570,7 +570,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type    = TUNER_ABSENT,
                .is_webcam     = 1,
                .input         = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = silvercrest_reg_seq,
@@ -583,7 +583,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder      = EM28XX_SAA711X,
                .tuner_type   = TUNER_ABSENT,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -605,7 +605,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type    = TUNER_ABSENT,
                .is_webcam     = 1,
                .input         = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                } },
@@ -616,7 +616,7 @@ struct em28xx_board em28xx_boards[] = {
                .tda9887_conf = TDA9887_PRESENT,
                .decoder      = EM28XX_SAA711X,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -635,7 +635,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -655,7 +655,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -675,7 +675,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -715,7 +715,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -735,7 +735,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -755,7 +755,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -775,7 +775,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -800,7 +800,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE4,
                        .amux     = EM28XX_AMUX_AUX,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE5,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -819,7 +819,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .is_webcam    = 1,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                } },
@@ -829,7 +829,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .is_webcam    = 1,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = silvercrest_reg_seq,
@@ -848,7 +848,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
@@ -863,7 +863,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,   /* Capture only device */
                .decoder      = EM28XX_SAA711X,
                .input        = { {
-                       .type  = EM28XX_VMUX_COMPOSITE1,
+                       .type  = EM28XX_VMUX_COMPOSITE,
                        .vmux  = SAA7115_COMPOSITE0,
                        .amux  = EM28XX_AMUX_LINE_IN,
                }, {
@@ -879,7 +879,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .is_webcam    = 1,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = 0,
                        .amux     = EM28XX_AMUX_VIDEO,
                } },
@@ -889,7 +889,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder      = EM28XX_SAA711X,
                .tuner_type   = TUNER_ABSENT,   /* Capture only device */
                .input        = { {
-                       .type  = EM28XX_VMUX_COMPOSITE1,
+                       .type  = EM28XX_VMUX_COMPOSITE,
                        .vmux  = SAA7115_COMPOSITE0,
                        .amux  = EM28XX_AMUX_LINE_IN,
                }, {
@@ -909,7 +909,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -930,7 +930,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -952,7 +952,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -974,7 +974,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -992,7 +992,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1006,7 +1006,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type    = TUNER_ABSENT,  /* Capture only device */
                .decoder       = EM28XX_TVP5150,
                .input         = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1029,7 +1029,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = pinnacle_hybrid_pro_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = pinnacle_hybrid_pro_analog,
@@ -1100,7 +1100,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = terratec_cinergy_USB_XS_FR_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = terratec_cinergy_USB_XS_FR_analog,
@@ -1186,7 +1186,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1213,7 +1213,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1239,7 +1239,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1265,7 +1265,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1291,7 +1291,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1317,7 +1317,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1343,7 +1343,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = default_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = default_analog,
@@ -1368,7 +1368,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1392,7 +1392,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux      = SAA7115_COMPOSITE4,
                        .amux      = EM28XX_AMUX_VIDEO,
                }, {
-                       .type      = EM28XX_VMUX_COMPOSITE1,
+                       .type      = EM28XX_VMUX_COMPOSITE,
                        .vmux      = SAA7115_COMPOSITE0,
                        .amux      = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1413,7 +1413,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1428,7 +1428,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder    = EM28XX_SAA711X,
                .tuner_type = TUNER_ABSENT, /* capture only board */
                .input      = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1443,7 +1443,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,   /* Capture-only board */
                .decoder      = EM28XX_SAA711X,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = vc211a_enable,
@@ -1465,7 +1465,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1485,7 +1485,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1500,7 +1500,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT, /* capture only board */
                .decoder      = EM28XX_SAA711X,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1520,7 +1520,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = SAA7115_COMPOSITE2,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1541,7 +1541,7 @@ struct em28xx_board em28xx_boards[] = {
                        .aout     = EM28XX_AOUT_MONO |  /* I2S */
                                    EM28XX_AOUT_MASTER, /* Line out pin */
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1555,6 +1555,7 @@ struct em28xx_board em28xx_boards[] = {
                .buttons = std_snapshot_button,
                .tda9887_conf = TDA9887_PRESENT,
                .tuner_type   = TUNER_YMEC_TVF_5533MF,
+               .tuner_addr   = 0x60,
                .decoder      = EM28XX_SAA711X,
                .input        = { {
                        .type     = EM28XX_VMUX_TELEVISION,
@@ -1563,7 +1564,7 @@ struct em28xx_board em28xx_boards[] = {
                        .aout     = EM28XX_AOUT_MONO |  /* I2S */
                                    EM28XX_AOUT_MASTER, /* Line out pin */
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1581,7 +1582,7 @@ struct em28xx_board em28xx_boards[] = {
                        .type     = EM28XX_VMUX_SVIDEO,
                        .vmux     = SAA7115_SVIDEO3,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                } },
        },
@@ -1610,7 +1611,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = em2880_msi_digivox_ad_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = em2880_msi_digivox_ad_analog,
@@ -1633,7 +1634,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = em2880_msi_digivox_ad_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = em2880_msi_digivox_ad_analog,
@@ -1654,7 +1655,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1677,7 +1678,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = default_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = default_analog,
@@ -1708,7 +1709,7 @@ struct em28xx_board em28xx_boards[] = {
                        .gpio = em2882_kworld_315u_analog,
                        .aout = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
                }, {
-                       .type = EM28XX_VMUX_COMPOSITE1,
+                       .type = EM28XX_VMUX_COMPOSITE,
                        .vmux = SAA7115_COMPOSITE0,
                        .amux = EM28XX_AMUX_LINE_IN,
                        .gpio = em2882_kworld_315u_analog1,
@@ -1735,7 +1736,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux = EM28XX_AMUX_VIDEO,
                        .gpio = default_analog,
                }, {
-                       .type = EM28XX_VMUX_COMPOSITE1,
+                       .type = EM28XX_VMUX_COMPOSITE,
                        .vmux = TVP5150_COMPOSITE1,
                        .amux = EM28XX_AMUX_LINE_IN,
                        .gpio = default_analog,
@@ -1758,7 +1759,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = default_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = default_analog,
@@ -1782,7 +1783,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = pinnacle_hybrid_pro_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = pinnacle_hybrid_pro_analog,
@@ -1808,7 +1809,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1834,7 +1835,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1859,7 +1860,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = hauppauge_wintv_hvr_900_analog,
@@ -1904,7 +1905,7 @@ struct em28xx_board em28xx_boards[] = {
                        .gpio     = kworld_330u_analog,
                        .aout     = EM28XX_AOUT_PCM_IN | EM28XX_AOUT_PCM_STEREO,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = kworld_330u_analog,
@@ -1951,7 +1952,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
 
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1970,7 +1971,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .decoder      = EM28XX_SAA711X,
                .input           = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -1990,7 +1991,7 @@ struct em28xx_board em28xx_boards[] = {
                        .vmux     = TVP5150_COMPOSITE0,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, { /* Composite has not been tested yet */
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_VIDEO,
                }, { /* S-video has not been tested yet */
@@ -2006,7 +2007,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder         = EM28XX_SAA711X,
                .xclk            = EM28XX_XCLK_FREQUENCY_12MHZ,
                .input           = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -2023,7 +2024,7 @@ struct em28xx_board em28xx_boards[] = {
                .xclk            = EM28XX_XCLK_FREQUENCY_12MHZ,
                .mute_gpio       = terratec_av350_mute_gpio,
                .input           = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AUDIO_SRC_LINE,
                        .gpio     = terratec_av350_unmute_gpio,
@@ -2041,7 +2042,7 @@ struct em28xx_board em28xx_boards[] = {
                .decoder      = EM28XX_SAA711X,
                .tuner_type   = TUNER_ABSENT,   /* Capture only device */
                .input        = { {
-                       .type  = EM28XX_VMUX_COMPOSITE1,
+                       .type  = EM28XX_VMUX_COMPOSITE,
                        .vmux  = SAA7115_COMPOSITE0,
                        .amux  = EM28XX_AMUX_LINE_IN,
                }, {
@@ -2067,7 +2068,7 @@ struct em28xx_board em28xx_boards[] = {
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = evga_indtube_analog,
                }, {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                        .gpio     = evga_indtube_analog,
@@ -2125,7 +2126,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type          = TUNER_ABSENT,
                .decoder             = EM28XX_SAA711X,
                .input               = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = SAA7115_COMPOSITE0,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -2238,7 +2239,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type   = TUNER_ABSENT,
                .is_webcam    = 1,
                .input        = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .amux     = EM28XX_AMUX_VIDEO,
                        .gpio     = speedlink_vad_laplace_reg_seq,
                } },
@@ -2272,7 +2273,7 @@ struct em28xx_board em28xx_boards[] = {
                .tuner_type    = TUNER_ABSENT,  /* Capture only device */
                .decoder       = EM28XX_TVP5150,
                .input         = { {
-                       .type     = EM28XX_VMUX_COMPOSITE1,
+                       .type     = EM28XX_VMUX_COMPOSITE,
                        .vmux     = TVP5150_COMPOSITE1,
                        .amux     = EM28XX_AMUX_LINE_IN,
                }, {
@@ -3012,6 +3013,48 @@ static void flush_request_modules(struct em28xx *dev)
        flush_work(&dev->request_module_wk);
 }
 
+static int em28xx_media_device_init(struct em28xx *dev,
+                                   struct usb_device *udev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_device *mdev;
+
+       mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+       if (!mdev)
+               return -ENOMEM;
+
+       mdev->dev = &udev->dev;
+
+       if (!dev->name)
+               strlcpy(mdev->model, "unknown em28xx", sizeof(mdev->model));
+       else
+               strlcpy(mdev->model, dev->name, sizeof(mdev->model));
+       if (udev->serial)
+               strlcpy(mdev->serial, udev->serial, sizeof(mdev->serial));
+       strcpy(mdev->bus_info, udev->devpath);
+       mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+       mdev->driver_version = LINUX_VERSION_CODE;
+
+       media_device_init(mdev);
+
+       dev->media_dev = mdev;
+#endif
+       return 0;
+}
+
+static void em28xx_unregister_media_device(struct em28xx *dev)
+{
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+       if (dev->media_dev) {
+               media_device_unregister(dev->media_dev);
+               media_device_cleanup(dev->media_dev);
+               kfree(dev->media_dev);
+               dev->media_dev = NULL;
+       }
+#endif
+}
+
 /*
  * em28xx_release_resources()
  * unregisters the v4l2,i2c and usb devices
@@ -3023,6 +3066,8 @@ static void em28xx_release_resources(struct em28xx *dev)
 
        mutex_lock(&dev->lock);
 
+       em28xx_unregister_media_device(dev);
+
        if (dev->def_i2c_bus)
                em28xx_i2c_unregister(dev, 1);
        em28xx_i2c_unregister(dev, 0);
@@ -3167,6 +3212,8 @@ static int em28xx_init_dev(struct em28xx *dev, struct usb_device *udev,
         */
        snprintf(dev->name, sizeof(dev->name), "%s #%d", chip_name, dev->devno);
 
+       em28xx_media_device_init(dev, udev);
+
        if (dev->is_audio_only) {
                retval = em28xx_audio_setup(dev);
                if (retval)
@@ -3467,7 +3514,7 @@ static int em28xx_usb_probe(struct usb_interface *interface,
        /* save our data pointer in this interface device */
        usb_set_intfdata(interface, dev);
 
-       /* allocate device struct */
+       /* allocate device struct and check if the device is a webcam */
        mutex_init(&dev->lock);
        retval = em28xx_init_dev(dev, udev, interface, nr);
        if (retval) {
@@ -3483,6 +3530,15 @@ static int em28xx_usb_probe(struct usb_interface *interface,
                try_bulk = usb_xfer_mode > 0;
        }
 
+       /* Disable V4L2 if the device doesn't have a decoder */
+       if (has_video &&
+           dev->board.decoder == EM28XX_NODECODER && !dev->board.is_webcam) {
+               printk(DRIVER_NAME
+                      ": Currently, V4L2 is not supported on this model\n");
+               has_video = false;
+               dev->has_video = false;
+       }
+
        /* Select USB transfer types to use */
        if (has_video) {
                if (!dev->analog_ep_isoc || (try_bulk && dev->analog_ep_bulk))
@@ -3501,9 +3557,14 @@ static int em28xx_usb_probe(struct usb_interface *interface,
 
        request_modules(dev);
 
-       /* Should be the last thing to do, to avoid newer udev's to
-          open the device before fully initializing it
+       /*
+        * Do it at the end, to reduce dynamic configuration changes during
+        * the device init. Yet, as request_modules() can be async, the
+        * topology will likely change after the load of the em28xx subdrivers.
         */
+#ifdef CONFIG_MEDIA_CONTROLLER
+       retval = media_device_register(dev->media_dev);
+#endif
 
        return 0;
 
index bf5c24467c65bec87b43c14b3f2552cb9dce6aab..ea80541d58f065190b738730d93b3e8cd048750f 100644 (file)
@@ -916,6 +916,9 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
                       dev->name, result);
                goto fail_adapter;
        }
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+       dvb->adapter.mdev = dev->media_dev;
+#endif
 
        /* Ensure all frontends negotiate bus access */
        dvb->fe[0]->ops.ts_bus_ctrl = em28xx_dvb_bus_ctrl;
@@ -994,8 +997,15 @@ static int em28xx_register_dvb(struct em28xx_dvb *dvb, struct module *module,
 
        /* register network adapter */
        dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
+
+       result = dvb_create_media_graph(&dvb->adapter, false);
+       if (result < 0)
+               goto fail_create_graph;
+
        return 0;
 
+fail_create_graph:
+       dvb_net_release(&dvb->net);
 fail_fe_conn:
        dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
 fail_fe_mem:
index 0e86ff423c499d196303da2999215e7454d3867f..16a2d4039330307a295d760bef31919c099966b6 100644 (file)
@@ -196,7 +196,6 @@ static void em28xx_wake_i2c(struct em28xx *dev)
        v4l2_device_call_all(v4l2_dev, 0, core,  reset, 0);
        v4l2_device_call_all(v4l2_dev, 0, video, s_routing,
                             INPUT(dev->ctl_input)->vmux, 0, 0);
-       v4l2_device_call_all(v4l2_dev, 0, video, s_stream, 0);
 }
 
 static int em28xx_colorlevels_set_default(struct em28xx *dev)
@@ -867,6 +866,275 @@ static void res_free(struct em28xx *dev, enum v4l2_buf_type f_type)
        em28xx_videodbg("res: put %d\n", res_type);
 }
 
+static void em28xx_v4l2_media_release(struct em28xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+       int i;
+
+       for (i = 0; i < MAX_EM28XX_INPUT; i++) {
+               if (!INPUT(i)->type)
+                       return;
+               media_device_unregister_entity(&dev->input_ent[i]);
+       }
+#endif
+}
+
+/*
+ * Media Controller helper functions
+ */
+
+static int em28xx_v4l2_create_media_graph(struct em28xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct em28xx_v4l2 *v4l2 = dev->v4l2;
+       struct media_device *mdev = dev->media_dev;
+       struct media_entity *entity;
+       struct media_entity *if_vid = NULL, *if_aud = NULL;
+       struct media_entity *tuner = NULL, *decoder = NULL;
+       int i, ret;
+
+       if (!mdev)
+               return 0;
+
+       /* Webcams are really simple */
+       if (dev->board.is_webcam) {
+               media_device_for_each_entity(entity, mdev) {
+                       if (entity->function != MEDIA_ENT_F_CAM_SENSOR)
+                               continue;
+                       ret = media_create_pad_link(entity, 0,
+                                                   &v4l2->vdev.entity, 0,
+                                                   MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+               }
+               return 0;
+       }
+
+       /* Non-webcams have analog TV decoder and other complexities */
+
+       media_device_for_each_entity(entity, mdev) {
+               switch (entity->function) {
+               case MEDIA_ENT_F_IF_VID_DECODER:
+                       if_vid = entity;
+                       break;
+               case MEDIA_ENT_F_IF_AUD_DECODER:
+                       if_aud = entity;
+                       break;
+               case MEDIA_ENT_F_TUNER:
+                       tuner = entity;
+                       break;
+               case MEDIA_ENT_F_ATV_DECODER:
+                       decoder = entity;
+                       break;
+               }
+       }
+
+       /* Analog setup, using tuner as a link */
+
+       /* Something bad happened! */
+       if (!decoder)
+               return -EINVAL;
+
+       if (tuner) {
+               if (if_vid) {
+                       ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
+                                                   if_vid,
+                                                   IF_VID_DEC_PAD_IF_INPUT,
+                                                   MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+                       ret = media_create_pad_link(if_vid, IF_VID_DEC_PAD_OUT,
+                                               decoder, DEMOD_PAD_IF_INPUT,
+                                               MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+               } else {
+                       ret = media_create_pad_link(tuner, TUNER_PAD_OUTPUT,
+                                               decoder, DEMOD_PAD_IF_INPUT,
+                                               MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+               }
+
+               if (if_aud) {
+                       ret = media_create_pad_link(tuner, TUNER_PAD_AUD_OUT,
+                                                   if_aud,
+                                                   IF_AUD_DEC_PAD_IF_INPUT,
+                                                   MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+               } else {
+                       if_aud = tuner;
+               }
+
+       }
+       ret = media_create_pad_link(decoder, DEMOD_PAD_VID_OUT,
+                                   &v4l2->vdev.entity, 0,
+                                   MEDIA_LNK_FL_ENABLED);
+       if (ret)
+               return ret;
+
+       if (em28xx_vbi_supported(dev)) {
+               ret = media_create_pad_link(decoder, DEMOD_PAD_VBI_OUT,
+                                           &v4l2->vbi_dev.entity, 0,
+                                           MEDIA_LNK_FL_ENABLED);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < MAX_EM28XX_INPUT; i++) {
+               struct media_entity *ent = &dev->input_ent[i];
+
+               if (!INPUT(i)->type)
+                       break;
+
+               switch (INPUT(i)->type) {
+               case EM28XX_VMUX_COMPOSITE:
+               case EM28XX_VMUX_SVIDEO:
+                       ret = media_create_pad_link(ent, 0, decoder,
+                                                   DEMOD_PAD_IF_INPUT, 0);
+                       if (ret)
+                               return ret;
+                       break;
+               default: /* EM28XX_VMUX_TELEVISION or EM28XX_RADIO */
+                       if (!tuner)
+                               break;
+
+                       ret = media_create_pad_link(ent, 0, tuner,
+                                                   TUNER_PAD_RF_INPUT,
+                                                   MEDIA_LNK_FL_ENABLED);
+                       if (ret)
+                               return ret;
+                       break;
+               }
+       }
+#endif
+       return 0;
+}
+
+static int em28xx_enable_analog_tuner(struct em28xx *dev)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_device *mdev = dev->media_dev;
+       struct em28xx_v4l2 *v4l2 = dev->v4l2;
+       struct media_entity *source;
+       struct media_link *link, *found_link = NULL;
+       int ret, active_links = 0;
+
+       if (!mdev || !v4l2->decoder)
+               return 0;
+
+       /*
+        * This will find the tuner that is connected into the decoder.
+        * Technically, this is not 100% correct, as the device may be
+        * using an analog input instead of the tuner. However, as we can't
+        * do DVB streaming while the DMA engine is being used for V4L2,
+        * this should be enough for the actual needs.
+        */
+       list_for_each_entry(link, &v4l2->decoder->links, list) {
+               if (link->sink->entity == v4l2->decoder) {
+                       found_link = link;
+                       if (link->flags & MEDIA_LNK_FL_ENABLED)
+                               active_links++;
+                       break;
+               }
+       }
+
+       if (active_links == 1 || !found_link)
+               return 0;
+
+       source = found_link->source->entity;
+       list_for_each_entry(link, &source->links, list) {
+               struct media_entity *sink;
+               int flags = 0;
+
+               sink = link->sink->entity;
+
+               if (sink == v4l2->decoder)
+                       flags = MEDIA_LNK_FL_ENABLED;
+
+               ret = media_entity_setup_link(link, flags);
+               if (ret) {
+                       pr_err("Couldn't change link %s->%s to %s. Error %d\n",
+                              source->name, sink->name,
+                              flags ? "enabled" : "disabled",
+                              ret);
+                       return ret;
+               } else
+                       em28xx_videodbg("link %s->%s was %s\n",
+                                       source->name, sink->name,
+                                       flags ? "ENABLED" : "disabled");
+       }
+#endif
+       return 0;
+}
+
+static const char * const iname[] = {
+       [EM28XX_VMUX_COMPOSITE]  = "Composite",
+       [EM28XX_VMUX_SVIDEO]     = "S-Video",
+       [EM28XX_VMUX_TELEVISION] = "Television",
+       [EM28XX_RADIO]           = "Radio",
+};
+
+static void em28xx_v4l2_create_entities(struct em28xx *dev)
+{
+#if defined(CONFIG_MEDIA_CONTROLLER)
+       struct em28xx_v4l2 *v4l2 = dev->v4l2;
+       int ret, i;
+
+       /* Initialize Video, VBI and Radio pads */
+       v4l2->video_pad.flags = MEDIA_PAD_FL_SINK;
+       ret = media_entity_pads_init(&v4l2->vdev.entity, 1, &v4l2->video_pad);
+       if (ret < 0)
+               pr_err("failed to initialize video media entity!\n");
+
+       if (em28xx_vbi_supported(dev)) {
+               v4l2->vbi_pad.flags = MEDIA_PAD_FL_SINK;
+               ret = media_entity_pads_init(&v4l2->vbi_dev.entity, 1,
+                                            &v4l2->vbi_pad);
+               if (ret < 0)
+                       pr_err("failed to initialize vbi media entity!\n");
+       }
+
+       /* Webcams don't have input connectors */
+       if (dev->board.is_webcam)
+               return;
+
+       /* Create entities for each input connector */
+       for (i = 0; i < MAX_EM28XX_INPUT; i++) {
+               struct media_entity *ent = &dev->input_ent[i];
+
+               if (!INPUT(i)->type)
+                       break;
+
+               ent->name = iname[INPUT(i)->type];
+               ent->flags = MEDIA_ENT_FL_CONNECTOR;
+               dev->input_pad[i].flags = MEDIA_PAD_FL_SOURCE;
+
+               switch (INPUT(i)->type) {
+               case EM28XX_VMUX_COMPOSITE:
+                       ent->function = MEDIA_ENT_F_CONN_COMPOSITE;
+                       break;
+               case EM28XX_VMUX_SVIDEO:
+                       ent->function = MEDIA_ENT_F_CONN_SVIDEO;
+                       break;
+               default: /* EM28XX_VMUX_TELEVISION or EM28XX_RADIO */
+                       ent->function = MEDIA_ENT_F_CONN_RF;
+                       break;
+               }
+
+               ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);
+               if (ret < 0)
+                       pr_err("failed to initialize input pad[%d]!\n", i);
+
+               ret = media_device_register_entity(dev->media_dev, ent);
+               if (ret < 0)
+                       pr_err("failed to register input entity %d!\n", i);
+       }
+#endif
+}
+
+
 /* ------------------------------------------------------------------
        Videobuf2 operations
    ------------------------------------------------------------------*/
@@ -884,6 +1152,9 @@ static int queue_setup(struct vb2_queue *vq,
                return sizes[0] < size ? -EINVAL : 0;
        *nplanes = 1;
        sizes[0] = size;
+
+       em28xx_enable_analog_tuner(dev);
+
        return 0;
 }
 
@@ -962,6 +1233,9 @@ int em28xx_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
                        f.type = V4L2_TUNER_ANALOG_TV;
                v4l2_device_call_all(&v4l2->v4l2_dev,
                                     0, tuner, s_frequency, &f);
+
+               /* Enable video stream at TV decoder */
+               v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 1);
        }
 
        v4l2->streaming_users++;
@@ -981,6 +1255,9 @@ static void em28xx_stop_streaming(struct vb2_queue *vq)
        res_free(dev, vq->type);
 
        if (v4l2->streaming_users-- == 1) {
+               /* Disable video stream at TV decoder */
+               v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 0);
+
                /* Last active user, so shutdown all the URBS */
                em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
        }
@@ -1013,6 +1290,9 @@ void em28xx_stop_vbi_streaming(struct vb2_queue *vq)
        res_free(dev, vq->type);
 
        if (v4l2->streaming_users-- == 1) {
+               /* Disable video stream at TV decoder */
+               v4l2_device_call_all(&v4l2->v4l2_dev, 0, video, s_stream, 0);
+
                /* Last active user, so shutdown all the URBS */
                em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
        }
@@ -1224,6 +1504,12 @@ static void scale_to_size(struct em28xx *dev,
 
        *width = (((unsigned long)maxw) << 12) / (hscale + 4096L);
        *height = (((unsigned long)maxh) << 12) / (vscale + 4096L);
+
+       /* Don't let width or height to be zero */
+       if (*width < 1)
+               *width = 1;
+       if (*height < 1)
+               *height = 1;
 }
 
 /* ------------------------------------------------------------------
@@ -1299,6 +1585,11 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
                v4l_bound_align_image(&width, 48, maxw, 1, &height, 32, maxh,
                                      1, 0);
        }
+       /* Avoid division by zero at size_to_scale */
+       if (width < 1)
+               width = 1;
+       if (height < 1)
+               height = 1;
 
        size_to_scale(dev, width, height, &hscale, &vscale);
        scale_to_size(dev, hscale, vscale, &width, &height);
@@ -1434,18 +1725,6 @@ static int vidioc_s_parm(struct file *file, void *priv,
                                          0, video, s_parm, p);
 }
 
-static const char *iname[] = {
-       [EM28XX_VMUX_COMPOSITE1] = "Composite1",
-       [EM28XX_VMUX_COMPOSITE2] = "Composite2",
-       [EM28XX_VMUX_COMPOSITE3] = "Composite3",
-       [EM28XX_VMUX_COMPOSITE4] = "Composite4",
-       [EM28XX_VMUX_SVIDEO]     = "S-Video",
-       [EM28XX_VMUX_TELEVISION] = "Television",
-       [EM28XX_VMUX_CABLE]      = "Cable TV",
-       [EM28XX_VMUX_DVB]        = "DVB",
-       [EM28XX_VMUX_DEBUG]      = "for debug only",
-};
-
 static int vidioc_enum_input(struct file *file, void *priv,
                             struct v4l2_input *i)
 {
@@ -1463,8 +1742,7 @@ static int vidioc_enum_input(struct file *file, void *priv,
 
        strcpy(i->name, iname[INPUT(n)->type]);
 
-       if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type) ||
-           (EM28XX_VMUX_CABLE == INPUT(n)->type))
+       if ((EM28XX_VMUX_TELEVISION == INPUT(n)->type))
                i->type = V4L2_INPUT_TYPE_TUNER;
 
        i->std = dev->v4l2->vdev.tvnorms;
@@ -1961,6 +2239,8 @@ static int em28xx_v4l2_fini(struct em28xx *dev)
 
        em28xx_uninit_usb_xfer(dev, EM28XX_ANALOG_MODE);
 
+       em28xx_v4l2_media_release(dev);
+
        if (video_is_registered(&v4l2->radio_dev)) {
                em28xx_info("V4L2 device %s deregistered\n",
                            video_device_node_name(&v4l2->radio_dev));
@@ -2284,6 +2564,9 @@ static int em28xx_v4l2_init(struct em28xx *dev)
        v4l2->dev = dev;
        dev->v4l2 = v4l2;
 
+#ifdef CONFIG_MEDIA_CONTROLLER
+       v4l2->v4l2_dev.mdev = dev->media_dev;
+#endif
        ret = v4l2_device_register(&dev->udev->dev, &v4l2->v4l2_dev);
        if (ret < 0) {
                em28xx_errdev("Call to v4l2_device_register() failed!\n");
@@ -2556,6 +2839,16 @@ static int em28xx_v4l2_init(struct em28xx *dev)
                            video_device_node_name(&v4l2->radio_dev));
        }
 
+       /* Init entities at the Media Controller */
+       em28xx_v4l2_create_entities(dev);
+
+       ret = em28xx_v4l2_create_media_graph(dev);
+       if (ret) {
+               em28xx_errdev("failed to create media graph\n");
+               em28xx_v4l2_media_release(dev);
+               goto unregister_dev;
+       }
+
        em28xx_info("V4L2 video device registered as %s\n",
                    video_device_node_name(&v4l2->vdev));
 
@@ -2577,6 +2870,22 @@ static int em28xx_v4l2_init(struct em28xx *dev)
        return 0;
 
 unregister_dev:
+       if (video_is_registered(&v4l2->radio_dev)) {
+               em28xx_info("V4L2 device %s deregistered\n",
+                           video_device_node_name(&v4l2->radio_dev));
+               video_unregister_device(&v4l2->radio_dev);
+       }
+       if (video_is_registered(&v4l2->vbi_dev)) {
+               em28xx_info("V4L2 device %s deregistered\n",
+                           video_device_node_name(&v4l2->vbi_dev));
+               video_unregister_device(&v4l2->vbi_dev);
+       }
+       if (video_is_registered(&v4l2->vdev)) {
+               em28xx_info("V4L2 device %s deregistered\n",
+                           video_device_node_name(&v4l2->vdev));
+               video_unregister_device(&v4l2->vdev);
+       }
+
        v4l2_ctrl_handler_free(&v4l2->ctrl_handler);
        v4l2_device_unregister(&v4l2->v4l2_dev);
 err:
index 8ff066c977d9d3e88b3e32b05cfbd7c6974afaf4..2674449617757a92ec2cd97b9b407f60a32a0a63 100644 (file)
@@ -26,7 +26,7 @@
 #ifndef _EM28XX_H
 #define _EM28XX_H
 
-#define EM28XX_VERSION "0.2.1"
+#define EM28XX_VERSION "0.2.2"
 #define DRIVER_DESC    "Empia em28xx device driver"
 
 #include <linux/workqueue.h>
@@ -291,15 +291,9 @@ struct em28xx_dmaqueue {
 
 #define MAX_EM28XX_INPUT 4
 enum enum28xx_itype {
-       EM28XX_VMUX_COMPOSITE1 = 1,
-       EM28XX_VMUX_COMPOSITE2,
-       EM28XX_VMUX_COMPOSITE3,
-       EM28XX_VMUX_COMPOSITE4,
+       EM28XX_VMUX_COMPOSITE = 1,
        EM28XX_VMUX_SVIDEO,
        EM28XX_VMUX_TELEVISION,
-       EM28XX_VMUX_CABLE,
-       EM28XX_VMUX_DVB,
-       EM28XX_VMUX_DEBUG,
        EM28XX_RADIO,
 };
 
@@ -558,6 +552,11 @@ struct em28xx_v4l2 {
        bool top_field;
        int vbi_read;
        unsigned int field_count;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_pad video_pad, vbi_pad;
+       struct media_entity *decoder;
+#endif
 };
 
 struct em28xx_audio {
@@ -718,6 +717,12 @@ struct em28xx {
        /* Snapshot button input device */
        char snapshot_button_path[30];  /* path of the input dev */
        struct input_dev *sbutton_input_dev;
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+       struct media_device *media_dev;
+       struct media_entity input_ent[MAX_EM28XX_INPUT];
+       struct media_pad input_pad[MAX_EM28XX_INPUT];
+#endif
 };
 
 #define kref_to_dev(d) container_of(d, struct em28xx, ref)
index 745185eb060b5b4c104b220add58a89cac0d23b9..bebee8ca9981f85b05b46162f693aa87c4e28da1 100644 (file)
@@ -250,7 +250,7 @@ struct go7007 {
        struct i2c_adapter i2c_adapter;
 
        /* HPI driver */
-       struct go7007_hpi_ops *hpi_ops;
+       const struct go7007_hpi_ops *hpi_ops;
        void *hpi_context;
        int interrupt_available;
        wait_queue_head_t interrupt_waitq;
index 3dbf14c85c5c847ef8544ec78b96936b76677fd4..14d3f8c1ce4a4135193dab8b263dac34d2d9fe03 100644 (file)
@@ -932,7 +932,7 @@ static void go7007_usb_release(struct go7007 *go)
        kfree(go->hpi_context);
 }
 
-static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
+static const struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
        .interface_reset        = go7007_usb_interface_reset,
        .write_interrupt        = go7007_usb_ezusb_write_interrupt,
        .read_interrupt         = go7007_usb_read_interrupt,
@@ -942,7 +942,7 @@ static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
        .release                = go7007_usb_release,
 };
 
-static struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
+static const struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
        .interface_reset        = go7007_usb_interface_reset,
        .write_interrupt        = go7007_usb_onboard_write_interrupt,
        .read_interrupt         = go7007_usb_read_interrupt,
index 3fc64197b4e6a6475c89b61d580bf3a4317e4b35..08f0ca7aa012e9bb4b335dd27fb3fc401717f8d6 100644 (file)
@@ -273,7 +273,9 @@ static int hdpvr_probe(struct usb_interface *interface,
        struct hdpvr_device *dev;
        struct usb_host_interface *iface_desc;
        struct usb_endpoint_descriptor *endpoint;
+#if IS_ENABLED(CONFIG_I2C)
        struct i2c_client *client;
+#endif
        size_t buffer_size;
        int i;
        int retval = -ENOMEM;
index 7dee22deebf3d8b5c85ca95a39e7cb30935b1885..ba7f02270c8391f3d8bc939bcd0af4acd23e837f 100644 (file)
@@ -462,10 +462,8 @@ static ssize_t hdpvr_read(struct file *file, char __user *buffer, size_t count,
                        }
 
                        if (wait_event_interruptible(dev->wait_data,
-                                             buf->status == BUFSTAT_READY)) {
-                               ret = -ERESTARTSYS;
-                               goto err;
-                       }
+                                             buf->status == BUFSTAT_READY))
+                               return -ERESTARTSYS;
                }
 
                if (buf->status != BUFSTAT_READY)
index c104315fdc17217f4fad27b6c86e017fa126b7b2..2d33033682af3635a1237d14e0bef045185e7ef0 100644 (file)
@@ -839,8 +839,6 @@ static int msi2500_set_usb_adc(struct msi2500_dev *dev)
                goto err;
 
        ret = msi2500_ctrl_msg(dev, CMD_WREG, reg3);
-       if (ret)
-               goto err;
 err:
        return ret;
 }
index 086cf1c7bd7d19e0015950bd6af0d02c311bf090..18aed5dd325e024e1db0860ba954ae75ef50fa7e 100644 (file)
@@ -91,6 +91,7 @@ static const struct usb_device_id pwc_device_table [] = {
        { USB_DEVICE(0x0471, 0x0312) },
        { USB_DEVICE(0x0471, 0x0313) }, /* the 'new' 720K */
        { USB_DEVICE(0x0471, 0x0329) }, /* Philips SPC 900NC PC Camera */
+       { USB_DEVICE(0x0471, 0x032C) }, /* Philips SPC 880NC PC Camera */
        { USB_DEVICE(0x069A, 0x0001) }, /* Askey */
        { USB_DEVICE(0x046D, 0x08B0) }, /* Logitech QuickCam Pro 3000 */
        { USB_DEVICE(0x046D, 0x08B1) }, /* Logitech QuickCam Notebook Pro */
@@ -810,6 +811,11 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
                        name = "Philips SPC 900NC webcam";
                        type_id = 740;
                        break;
+               case 0x032C:
+                       PWC_INFO("Philips SPC 880NC USB webcam detected.\n");
+                       name = "Philips SPC 880NC webcam";
+                       type_id = 740;
+                       break;
                default:
                        return -ENODEV;
                        break;
index 46191d5262eb07f3efb9fdf87c75c0665abe6687..6ecb0b48423f3d411cb0874c9c159727241eb3bd 100644 (file)
@@ -98,7 +98,6 @@ void stk1160_buffer_done(struct stk1160 *dev)
 
        buf->vb.sequence = dev->sequence++;
        buf->vb.field = V4L2_FIELD_INTERLACED;
-       buf->vb.vb2_buf.planes[0].bytesused = buf->bytesused;
        buf->vb.vb2_buf.timestamp = ktime_get_ns();
 
        vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->bytesused);
index 4ebb33943f9a03a5922c408cfef628847a1053d7..f6cfad46547ef5951923e95d4005aa3f317c1e56 100644 (file)
@@ -312,20 +312,24 @@ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
        usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
        usbtv->chunks_done++;
 
-       /* Last chunk in a frame, signalling an end */
-       if (odd && chunk_no == usbtv->n_chunks-1) {
-               int size = vb2_plane_size(&buf->vb.vb2_buf, 0);
-               enum vb2_buffer_state state = usbtv->chunks_done ==
-                                               usbtv->n_chunks ?
-                                               VB2_BUF_STATE_DONE :
-                                               VB2_BUF_STATE_ERROR;
-
-               buf->vb.field = V4L2_FIELD_INTERLACED;
-               buf->vb.sequence = usbtv->sequence++;
-               buf->vb.vb2_buf.timestamp = ktime_get_ns();
-               vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
-               vb2_buffer_done(&buf->vb.vb2_buf, state);
-               list_del(&buf->list);
+       /* Last chunk in a field */
+       if (chunk_no == usbtv->n_chunks-1) {
+               /* Last chunk in a frame, signalling an end */
+               if (odd && !usbtv->last_odd) {
+                       int size = vb2_plane_size(&buf->vb.vb2_buf, 0);
+                       enum vb2_buffer_state state = usbtv->chunks_done ==
+                               usbtv->n_chunks ?
+                               VB2_BUF_STATE_DONE :
+                               VB2_BUF_STATE_ERROR;
+
+                       buf->vb.field = V4L2_FIELD_INTERLACED;
+                       buf->vb.sequence = usbtv->sequence++;
+                       buf->vb.vb2_buf.timestamp = ktime_get_ns();
+                       vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
+                       vb2_buffer_done(&buf->vb.vb2_buf, state);
+                       list_del(&buf->list);
+               }
+               usbtv->last_odd = odd;
        }
 
        spin_unlock_irqrestore(&usbtv->buflock, flags);
@@ -389,6 +393,10 @@ static struct urb *usbtv_setup_iso_transfer(struct usbtv *usbtv)
        ip->transfer_flags = URB_ISO_ASAP;
        ip->transfer_buffer = kzalloc(size * USBTV_ISOC_PACKETS,
                                                GFP_KERNEL);
+       if (!ip->transfer_buffer) {
+               usb_free_urb(ip);
+               return NULL;
+       }
        ip->complete = usbtv_iso_cb;
        ip->number_of_packets = USBTV_ISOC_PACKETS;
        ip->transfer_buffer_length = size * USBTV_ISOC_PACKETS;
@@ -639,6 +647,7 @@ static int usbtv_start_streaming(struct vb2_queue *vq, unsigned int count)
        if (usbtv->udev == NULL)
                return -ENODEV;
 
+       usbtv->last_odd = 1;
        usbtv->sequence = 0;
        return usbtv_start(usbtv);
 }
index 19cb8bf7c4e9b3c8befe5d82597ec2fdb8b258c7..161b38d5cfa0463e939af70fdfddfeb48510980e 100644 (file)
@@ -95,6 +95,7 @@ struct usbtv {
        int width, height;
        int n_chunks;
        int iso_size;
+       int last_odd;
        unsigned int sequence;
        struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
 
index de9ff3bb8edd8e7a94826bf58895d009dc420038..12f5ebbd0436e770022e7501a80f5dda7849f9a1 100644 (file)
@@ -162,8 +162,7 @@ MODULE_ALIAS(DRIVER_ALIAS);
 
 static inline struct usb_usbvision *cd_to_usbvision(struct device *cd)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        return video_get_drvdata(vdev);
 }
 
@@ -177,8 +176,7 @@ static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
 static ssize_t show_model(struct device *cd,
                          struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        return sprintf(buf, "%s\n",
                       usbvision_device_data[usbvision->dev_model].model_string);
@@ -188,8 +186,7 @@ static DEVICE_ATTR(model, S_IRUGO, show_model, NULL);
 static ssize_t show_hue(struct device *cd,
                        struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        struct v4l2_control ctrl;
        ctrl.id = V4L2_CID_HUE;
@@ -203,8 +200,7 @@ static DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL);
 static ssize_t show_contrast(struct device *cd,
                             struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        struct v4l2_control ctrl;
        ctrl.id = V4L2_CID_CONTRAST;
@@ -218,8 +214,7 @@ static DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL);
 static ssize_t show_brightness(struct device *cd,
                               struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        struct v4l2_control ctrl;
        ctrl.id = V4L2_CID_BRIGHTNESS;
@@ -233,8 +228,7 @@ static DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL);
 static ssize_t show_saturation(struct device *cd,
                               struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        struct v4l2_control ctrl;
        ctrl.id = V4L2_CID_SATURATION;
@@ -248,8 +242,7 @@ static DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL);
 static ssize_t show_streaming(struct device *cd,
                              struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        return sprintf(buf, "%s\n",
                       YES_NO(usbvision->streaming == stream_on ? 1 : 0));
@@ -259,8 +252,7 @@ static DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL);
 static ssize_t show_compression(struct device *cd,
                                struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        return sprintf(buf, "%s\n",
                       YES_NO(usbvision->isoc_mode == ISOC_MODE_COMPRESS));
@@ -270,8 +262,7 @@ static DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL);
 static ssize_t show_device_bridge(struct device *cd,
                                  struct device_attribute *attr, char *buf)
 {
-       struct video_device *vdev =
-               container_of(cd, struct video_device, dev);
+       struct video_device *vdev = to_video_device(cd);
        struct usb_usbvision *usbvision = video_get_drvdata(vdev);
        return sprintf(buf, "%d\n", usbvision->bridge_type);
 }
@@ -1156,6 +1147,7 @@ static int usbvision_radio_close(struct file *file)
        usbvision_audio_off(usbvision);
        usbvision->radio = 0;
        usbvision->user--;
+       mutex_unlock(&usbvision->v4l2_lock);
 
        if (usbvision->remove_pending) {
                printk(KERN_INFO "%s: Final disconnect\n", __func__);
@@ -1164,7 +1156,6 @@ static int usbvision_radio_close(struct file *file)
                return 0;
        }
 
-       mutex_unlock(&usbvision->v4l2_lock);
        PDEBUG(DBG_IO, "success");
        return v4l2_fh_release(file);
 }
index 9beece00869bf0e27a99fc641b8f926c0d3bad8f..29b3436d0910fbd0c1a591b0aea408dc34aca74a 100644 (file)
@@ -37,7 +37,6 @@ config VIDEO_PCI_SKELETON
 # Used by drivers that need tuner.ko
 config VIDEO_TUNER
        tristate
-       depends on MEDIA_TUNER
 
 # Used by drivers that need v4l2-mem2mem.ko
 config V4L2_MEM2MEM_DEV
index 76496fd282aa532da3e84960ea7dddeedc013ccf..731487be5baa993147206ec8f85242cd20dbbc91 100644 (file)
@@ -696,16 +696,32 @@ static int tuner_probe(struct i2c_client *client,
        /* Should be just before return */
 register_client:
 #if defined(CONFIG_MEDIA_CONTROLLER)
-       t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
-       t->pad[TUNER_PAD_IF_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
-       t->sd.entity.function = MEDIA_ENT_F_TUNER;
        t->sd.entity.name = t->name;
+       /*
+        * Handle the special case where the tuner has actually
+        * two stages: the PLL to tune into a frequency and the
+        * IF-PLL demodulator (tda988x).
+        */
+       if (t->type == TUNER_TDA9887) {
+               t->pad[IF_VID_DEC_PAD_IF_INPUT].flags = MEDIA_PAD_FL_SINK;
+               t->pad[IF_VID_DEC_PAD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+               ret = media_entity_pads_init(&t->sd.entity,
+                                            IF_VID_DEC_PAD_NUM_PADS,
+                                            &t->pad[0]);
+               t->sd.entity.function = MEDIA_ENT_F_IF_VID_DECODER;
+       } else {
+               t->pad[TUNER_PAD_RF_INPUT].flags = MEDIA_PAD_FL_SINK;
+               t->pad[TUNER_PAD_OUTPUT].flags = MEDIA_PAD_FL_SOURCE;
+               t->pad[TUNER_PAD_AUD_OUT].flags = MEDIA_PAD_FL_SOURCE;
+               ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS,
+                                            &t->pad[0]);
+               t->sd.entity.function = MEDIA_ENT_F_TUNER;
+       }
 
-       ret = media_entity_pads_init(&t->sd.entity, TUNER_NUM_PADS, &t->pad[0]);
        if (ret < 0) {
                tuner_err("failed to initialize media entity!\n");
                kfree(t);
-               return -ENODEV;
+               return ret;
        }
 #endif
        /* Sets a default mode */
index 8fd84a67478a77423cc6bc051e0654276561502c..019644ff627d775f7f4b67e29b46baab9dadabbf 100644 (file)
@@ -415,7 +415,8 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
                get_user(kp->index, &up->index) ||
                get_user(kp->type, &up->type) ||
                get_user(kp->flags, &up->flags) ||
-               get_user(kp->memory, &up->memory))
+               get_user(kp->memory, &up->memory) ||
+               get_user(kp->length, &up->length))
                        return -EFAULT;
 
        if (V4L2_TYPE_IS_OUTPUT(kp->type))
@@ -427,9 +428,6 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
                        return -EFAULT;
 
        if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
-               if (get_user(kp->length, &up->length))
-                       return -EFAULT;
-
                num_planes = kp->length;
                if (num_planes == 0) {
                        kp->m.planes = NULL;
@@ -462,16 +460,14 @@ static int get_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
        } else {
                switch (kp->memory) {
                case V4L2_MEMORY_MMAP:
-                       if (get_user(kp->length, &up->length) ||
-                               get_user(kp->m.offset, &up->m.offset))
+                       if (get_user(kp->m.offset, &up->m.offset))
                                return -EFAULT;
                        break;
                case V4L2_MEMORY_USERPTR:
                        {
                        compat_long_t tmp;
 
-                       if (get_user(kp->length, &up->length) ||
-                           get_user(tmp, &up->m.userptr))
+                       if (get_user(tmp, &up->m.userptr))
                                return -EFAULT;
 
                        kp->m.userptr = (unsigned long)compat_ptr(tmp);
@@ -513,7 +509,8 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
                copy_to_user(&up->timecode, &kp->timecode, sizeof(struct v4l2_timecode)) ||
                put_user(kp->sequence, &up->sequence) ||
                put_user(kp->reserved2, &up->reserved2) ||
-               put_user(kp->reserved, &up->reserved))
+               put_user(kp->reserved, &up->reserved) ||
+               put_user(kp->length, &up->length))
                        return -EFAULT;
 
        if (V4L2_TYPE_IS_MULTIPLANAR(kp->type)) {
@@ -536,13 +533,11 @@ static int put_v4l2_buffer32(struct v4l2_buffer *kp, struct v4l2_buffer32 __user
        } else {
                switch (kp->memory) {
                case V4L2_MEMORY_MMAP:
-                       if (put_user(kp->length, &up->length) ||
-                               put_user(kp->m.offset, &up->m.offset))
+                       if (put_user(kp->m.offset, &up->m.offset))
                                return -EFAULT;
                        break;
                case V4L2_MEMORY_USERPTR:
-                       if (put_user(kp->length, &up->length) ||
-                               put_user(kp->m.userptr, &up->m.userptr))
+                       if (put_user(kp->m.userptr, &up->m.userptr))
                                return -EFAULT;
                        break;
                case V4L2_MEMORY_OVERLAY:
index ec258b73001a26a96dcdb56d8c76e64b5762f1d9..889de0a321529ace36560ddabd682b74f080de8d 100644 (file)
@@ -165,7 +165,8 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
            bt->width > cap->max_width ||
            bt->pixelclock < cap->min_pixelclock ||
            bt->pixelclock > cap->max_pixelclock ||
-           (cap->standards && bt->standards &&
+           (!(caps & V4L2_DV_BT_CAP_CUSTOM) &&
+            cap->standards && bt->standards &&
             !(bt->standards & cap->standards)) ||
            (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
            (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
index b27cbb1f5afe734dd3b17b4e64ac40d1b2871276..93b33681776ca427703dee8bbf6616cca95d7938 100644 (file)
@@ -146,7 +146,7 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node,
  * variable without a low fixed limit. Please use
  * v4l2_of_alloc_parse_endpoint() in new drivers instead.
  *
- * Return: 0.
+ * Return: 0 on success or a negative error code on failure.
  */
 int v4l2_of_parse_endpoint(const struct device_node *node,
                           struct v4l2_of_endpoint *endpoint)
index c5d49d7a0d76d09c1ac5598a21ce7726d03190c1..dab94080ec3ae80941b275d2a226dfe777066b86 100644 (file)
@@ -1063,8 +1063,11 @@ EXPORT_SYMBOL_GPL(vb2_discard_done);
  */
 static int __qbuf_mmap(struct vb2_buffer *vb, const void *pb)
 {
-       int ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
-                       vb, pb, vb->planes);
+       int ret = 0;
+
+       if (pb)
+               ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+                                vb, pb, vb->planes);
        return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
 }
 
@@ -1077,14 +1080,16 @@ static int __qbuf_userptr(struct vb2_buffer *vb, const void *pb)
        struct vb2_queue *q = vb->vb2_queue;
        void *mem_priv;
        unsigned int plane;
-       int ret;
+       int ret = 0;
        enum dma_data_direction dma_dir =
                q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
        bool reacquired = vb->planes[0].mem_priv == NULL;
 
        memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
        /* Copy relevant information provided by the userspace */
-       ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+       if (pb)
+               ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+                                vb, pb, planes);
        if (ret)
                return ret;
 
@@ -1192,14 +1197,16 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
        struct vb2_queue *q = vb->vb2_queue;
        void *mem_priv;
        unsigned int plane;
-       int ret;
+       int ret = 0;
        enum dma_data_direction dma_dir =
                q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
        bool reacquired = vb->planes[0].mem_priv == NULL;
 
        memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
        /* Copy relevant information provided by the userspace */
-       ret = call_bufop(vb->vb2_queue, fill_vb2_buffer, vb, pb, planes);
+       if (pb)
+               ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+                                vb, pb, planes);
        if (ret)
                return ret;
 
@@ -1220,6 +1227,7 @@ static int __qbuf_dmabuf(struct vb2_buffer *vb, const void *pb)
                if (planes[plane].length < vb->planes[plane].min_length) {
                        dprintk(1, "invalid dmabuf length for plane %d\n",
                                plane);
+                       dma_buf_put(dbuf);
                        ret = -EINVAL;
                        goto err;
                }
@@ -1520,7 +1528,8 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
        q->waiting_for_buffers = false;
        vb->state = VB2_BUF_STATE_QUEUED;
 
-       call_void_bufop(q, copy_timestamp, vb, pb);
+       if (pb)
+               call_void_bufop(q, copy_timestamp, vb, pb);
 
        trace_vb2_qbuf(q, vb);
 
@@ -1532,7 +1541,8 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb)
                __enqueue_in_driver(vb);
 
        /* Fill buffer information for the userspace */
-       call_void_bufop(q, fill_user_buffer, vb, pb);
+       if (pb)
+               call_void_bufop(q, fill_user_buffer, vb, pb);
 
        /*
         * If streamon has been called, and we haven't yet called
@@ -1731,7 +1741,8 @@ static void __vb2_dqbuf(struct vb2_buffer *vb)
  * The return values from this function are intended to be directly returned
  * from vidioc_dqbuf handler in driver.
  */
-int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
+int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+                  bool nonblocking)
 {
        struct vb2_buffer *vb = NULL;
        int ret;
@@ -1754,8 +1765,12 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
 
        call_void_vb_qop(vb, buf_finish, vb);
 
+       if (pindex)
+               *pindex = vb->index;
+
        /* Fill buffer information for the userspace */
-       call_void_bufop(q, fill_user_buffer, vb, pb);
+       if (pb)
+               call_void_bufop(q, fill_user_buffer, vb, pb);
 
        /* Remove from videobuf queue */
        list_del(&vb->queued_entry);
@@ -1828,7 +1843,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
         * that's done in dqbuf, but that's not going to happen when we
         * cancel the whole queue. Note: this code belongs here, not in
         * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
-        * call to __fill_v4l2_buffer() after buf_finish(). That order can't
+        * call to __fill_user_buffer() after buf_finish(). That order can't
         * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
         */
        for (i = 0; i < q->num_buffers; ++i) {
@@ -2357,7 +2372,6 @@ struct vb2_fileio_data {
        unsigned int count;
        unsigned int type;
        unsigned int memory;
-       struct vb2_buffer *b;
        struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
        unsigned int cur_index;
        unsigned int initial_index;
@@ -2410,12 +2424,6 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
        if (fileio == NULL)
                return -ENOMEM;
 
-       fileio->b = kzalloc(q->buf_struct_size, GFP_KERNEL);
-       if (fileio->b == NULL) {
-               kfree(fileio);
-               return -ENOMEM;
-       }
-
        fileio->read_once = q->fileio_read_once;
        fileio->write_immediately = q->fileio_write_immediately;
 
@@ -2460,13 +2468,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
                 * Queue all buffers.
                 */
                for (i = 0; i < q->num_buffers; i++) {
-                       struct vb2_buffer *b = fileio->b;
-
-                       memset(b, 0, q->buf_struct_size);
-                       b->type = q->type;
-                       b->memory = q->memory;
-                       b->index = i;
-                       ret = vb2_core_qbuf(q, i, b);
+                       ret = vb2_core_qbuf(q, i, NULL);
                        if (ret)
                                goto err_reqbufs;
                        fileio->bufs[i].queued = 1;
@@ -2511,7 +2513,6 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
                q->fileio = NULL;
                fileio->count = 0;
                vb2_core_reqbufs(q, fileio->memory, &fileio->count);
-               kfree(fileio->b);
                kfree(fileio);
                dprintk(3, "file io emulator closed\n");
        }
@@ -2539,7 +2540,8 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
         * else is able to provide this information with the write() operation.
         */
        bool copy_timestamp = !read && q->copy_timestamp;
-       int ret, index;
+       unsigned index;
+       int ret;
 
        dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
                read ? "read" : "write", (long)*ppos, count,
@@ -2564,22 +2566,20 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
         */
        index = fileio->cur_index;
        if (index >= q->num_buffers) {
-               struct vb2_buffer *b = fileio->b;
+               struct vb2_buffer *b;
 
                /*
                 * Call vb2_dqbuf to get buffer back.
                 */
-               memset(b, 0, q->buf_struct_size);
-               b->type = q->type;
-               b->memory = q->memory;
-               ret = vb2_core_dqbuf(q, b, nonblock);
+               ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
                dprintk(5, "vb2_dqbuf result: %d\n", ret);
                if (ret)
                        return ret;
                fileio->dq_count += 1;
 
-               fileio->cur_index = index = b->index;
+               fileio->cur_index = index;
                buf = &fileio->bufs[index];
+               b = q->bufs[index];
 
                /*
                 * Get number of bytes filled by the driver
@@ -2630,7 +2630,7 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
         * Queue next buffer if required.
         */
        if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
-               struct vb2_buffer *b = fileio->b;
+               struct vb2_buffer *b = q->bufs[index];
 
                /*
                 * Check if this is the last buffer to read.
@@ -2643,15 +2643,11 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
                /*
                 * Call vb2_qbuf and give buffer to the driver.
                 */
-               memset(b, 0, q->buf_struct_size);
-               b->type = q->type;
-               b->memory = q->memory;
-               b->index = index;
                b->planes[0].bytesused = buf->pos;
 
                if (copy_timestamp)
                        b->timestamp = ktime_get_ns();
-               ret = vb2_core_qbuf(q, index, b);
+               ret = vb2_core_qbuf(q, index, NULL);
                dprintk(5, "vb2_dbuf result: %d\n", ret);
                if (ret)
                        return ret;
@@ -2713,10 +2709,9 @@ static int vb2_thread(void *data)
 {
        struct vb2_queue *q = data;
        struct vb2_threadio_data *threadio = q->threadio;
-       struct vb2_fileio_data *fileio = q->fileio;
        bool copy_timestamp = false;
-       int prequeue = 0;
-       int index = 0;
+       unsigned prequeue = 0;
+       unsigned index = 0;
        int ret = 0;
 
        if (q->is_output) {
@@ -2728,37 +2723,34 @@ static int vb2_thread(void *data)
 
        for (;;) {
                struct vb2_buffer *vb;
-               struct vb2_buffer *b = fileio->b;
 
                /*
                 * Call vb2_dqbuf to get buffer back.
                 */
-               memset(b, 0, q->buf_struct_size);
-               b->type = q->type;
-               b->memory = q->memory;
                if (prequeue) {
-                       b->index = index++;
+                       vb = q->bufs[index++];
                        prequeue--;
                } else {
                        call_void_qop(q, wait_finish, q);
                        if (!threadio->stop)
-                               ret = vb2_core_dqbuf(q, b, 0);
+                               ret = vb2_core_dqbuf(q, &index, NULL, 0);
                        call_void_qop(q, wait_prepare, q);
                        dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
+                       if (!ret)
+                               vb = q->bufs[index];
                }
                if (ret || threadio->stop)
                        break;
                try_to_freeze();
 
-               vb = q->bufs[b->index];
-               if (b->state == VB2_BUF_STATE_DONE)
+               if (vb->state != VB2_BUF_STATE_ERROR)
                        if (threadio->fnc(vb, threadio->priv))
                                break;
                call_void_qop(q, wait_finish, q);
                if (copy_timestamp)
-                       b->timestamp = ktime_get_ns();;
+                       vb->timestamp = ktime_get_ns();;
                if (!threadio->stop)
-                       ret = vb2_core_qbuf(q, b->index, b);
+                       ret = vb2_core_qbuf(q, vb->index, NULL);
                call_void_qop(q, wait_prepare, q);
                if (ret || threadio->stop)
                        break;
index c9a28605511a71166af35a4ef11fb95ff0eb8fbd..91f552124050d2b3b91d2190af16fbdf2f9d7243 100644 (file)
@@ -625,7 +625,7 @@ static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b,
                return -EINVAL;
        }
 
-       ret = vb2_core_dqbuf(q, b, nonblocking);
+       ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
 
        return ret;
 }
index acd1460cf7871c4ef4cffb0f0e196f41083aae36..2a691da8c1c7c0441c08d97a8d1e711099a32299 100644 (file)
@@ -260,7 +260,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
 
        /* get the Controller level irq */
        fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0);
-       if (fsl_ifc_ctrl_dev->irq == NO_IRQ) {
+       if (fsl_ifc_ctrl_dev->irq == 0) {
                dev_err(&dev->dev, "failed to get irq resource "
                                                        "for IFC\n");
                ret = -ENODEV;
index 9ca66de0c1c1770f4630c35a93c81adfdcee4883..1b8668a4a26d9045680c8b31ecf02011fff766c3 100644 (file)
@@ -203,7 +203,7 @@ config MFD_DA9062
        select MFD_CORE
        select REGMAP_I2C
        select REGMAP_IRQ
-       depends on I2C=y
+       depends on I2C
        help
          Say yes here for support for the Dialog Semiconductor DA9062 PMIC.
          This includes the I2C driver and core APIs.
@@ -215,7 +215,7 @@ config MFD_DA9063
        select MFD_CORE
        select REGMAP_I2C
        select REGMAP_IRQ
-       depends on I2C=y
+       depends on I2C
        help
          Say yes here for support for the Dialog Semiconductor DA9063 PMIC.
          This includes the I2C driver and core APIs.
@@ -224,7 +224,7 @@ config MFD_DA9063
 
 config MFD_DA9150
        tristate "Dialog Semiconductor DA9150 Charger Fuel-Gauge chip"
-       depends on I2C=y
+       depends on I2C
        select MFD_CORE
        select REGMAP_I2C
        select REGMAP_IRQ
@@ -445,7 +445,7 @@ config MFD_KEMPLD
 
 config MFD_88PM800
        tristate "Marvell 88PM800"
-       depends on I2C=y
+       depends on I2C
        select REGMAP_I2C
        select REGMAP_IRQ
        select MFD_CORE
@@ -457,7 +457,7 @@ config MFD_88PM800
 
 config MFD_88PM805
        tristate "Marvell 88PM805"
-       depends on I2C=y
+       depends on I2C
        select REGMAP_I2C
        select REGMAP_IRQ
        select MFD_CORE
@@ -538,7 +538,7 @@ config MFD_MAX77843
 config MFD_MAX8907
        tristate "Maxim Semiconductor MAX8907 PMIC Support"
        select MFD_CORE
-       depends on I2C=y
+       depends on I2C
        select REGMAP_I2C
        select REGMAP_IRQ
        help
@@ -743,7 +743,7 @@ config MFD_RTSX_PCI
 
 config MFD_RT5033
        tristate "Richtek RT5033 Power Management IC"
-       depends on I2C=y
+       depends on I2C
        select MFD_CORE
        select REGMAP_I2C
        select REGMAP_IRQ
@@ -1106,6 +1106,19 @@ config TPS6507X
          This driver can also be built as a module.  If so, the module
          will be called tps6507x.
 
+config MFD_TPS65086
+       tristate "TI TPS65086 Power Management Integrated Chips (PMICs)"
+       select REGMAP
+       select REGMAP_IRQ
+       select REGMAP_I2C
+       depends on I2C
+       help
+         If you say yes here you get support for the TPS65086 series of
+         Power Management chips.
+         This driver provides common support for accessing the device,
+         additional drivers must be enabled in order to use the
+         functionality of the device.
+
 config TPS65911_COMPARATOR
        tristate
 
index 0f230a6103f866e88213ea5315e18e64ca2fb31b..06801aa9d8dcdf5d36061cc519b3187cae2d6da2 100644 (file)
@@ -70,6 +70,7 @@ obj-$(CONFIG_MFD_WM8994)      += wm8994.o
 obj-$(CONFIG_TPS6105X)         += tps6105x.o
 obj-$(CONFIG_TPS65010)         += tps65010.o
 obj-$(CONFIG_TPS6507X)         += tps6507x.o
+obj-$(CONFIG_MFD_TPS65086)     += tps65086.o
 obj-$(CONFIG_MFD_TPS65217)     += tps65217.o
 obj-$(CONFIG_MFD_TPS65218)     += tps65218.o
 obj-$(CONFIG_MFD_TPS65910)     += tps65910.o
index f3d689176fc2229f5dd1be64af2014af798dac60..c03a86ee64e67957971d39ca5de993340a3b493f 100644 (file)
@@ -634,79 +634,41 @@ static const struct mfd_cell ab8500_bm_devs[] = {
 
 static const struct mfd_cell ab8500_devs[] = {
 #ifdef CONFIG_DEBUG_FS
-       {
-               .name = "ab8500-debug",
-               .of_compatible = "stericsson,ab8500-debug",
-       },
+       OF_MFD_CELL("ab8500-debug",
+                   NULL, NULL, 0, "stericsson,ab8500-debug"),
 #endif
-       {
-               .name = "ab8500-sysctrl",
-               .of_compatible = "stericsson,ab8500-sysctrl",
-       },
-       {
-               .name = "ab8500-ext-regulator",
-               .of_compatible = "stericsson,ab8500-ext-regulator",
-       },
-       {
-               .name = "ab8500-regulator",
-               .of_compatible = "stericsson,ab8500-regulator",
-       },
-       {
-               .name = "abx500-clk",
-               .of_compatible = "stericsson,abx500-clk",
-       },
-       {
-               .name = "ab8500-gpadc",
-               .of_compatible = "stericsson,ab8500-gpadc",
-       },
-       {
-               .name = "ab8500-rtc",
-               .of_compatible = "stericsson,ab8500-rtc",
-       },
-       {
-               .name = "ab8500-acc-det",
-               .of_compatible = "stericsson,ab8500-acc-det",
-       },
-       {
-
-               .name = "ab8500-poweron-key",
-               .of_compatible = "stericsson,ab8500-poweron-key",
-       },
-       {
-               .name = "ab8500-pwm",
-               .of_compatible = "stericsson,ab8500-pwm",
-               .id = 1,
-       },
-       {
-               .name = "ab8500-pwm",
-               .of_compatible = "stericsson,ab8500-pwm",
-               .id = 2,
-       },
-       {
-               .name = "ab8500-pwm",
-               .of_compatible = "stericsson,ab8500-pwm",
-               .id = 3,
-       },
-       {
-               .name = "ab8500-denc",
-               .of_compatible = "stericsson,ab8500-denc",
-       },
-       {
-               .name = "pinctrl-ab8500",
-               .of_compatible = "stericsson,ab8500-gpio",
-       },
-       {
-               .name = "abx500-temp",
-               .of_compatible = "stericsson,abx500-temp",
-       },
-       {
-               .name = "ab8500-usb",
-               .of_compatible = "stericsson,ab8500-usb",
-       },
-       {
-               .name = "ab8500-codec",
-               .of_compatible = "stericsson,ab8500-codec",
-       },
+       OF_MFD_CELL("ab8500-sysctrl",
+                   NULL, NULL, 0, "stericsson,ab8500-sysctrl"),
+       OF_MFD_CELL("ab8500-ext-regulator",
+                   NULL, NULL, 0, "stericsson,ab8500-ext-regulator"),
+       OF_MFD_CELL("ab8500-regulator",
+                   NULL, NULL, 0, "stericsson,ab8500-regulator"),
+       OF_MFD_CELL("abx500-clk",
+                   NULL, NULL, 0, "stericsson,abx500-clk"),
+       OF_MFD_CELL("ab8500-gpadc",
+                   NULL, NULL, 0, "stericsson,ab8500-gpadc"),
+       OF_MFD_CELL("ab8500-rtc",
+                   NULL, NULL, 0, "stericsson,ab8500-rtc"),
+       OF_MFD_CELL("ab8500-acc-det",
+                   NULL, NULL, 0, "stericsson,ab8500-acc-det"),
+       OF_MFD_CELL("ab8500-poweron-key",
+                   NULL, NULL, 0, "stericsson,ab8500-poweron-key"),
+       OF_MFD_CELL("ab8500-pwm",
+                   NULL, NULL, 1, "stericsson,ab8500-pwm"),
+       OF_MFD_CELL("ab8500-pwm",
+                   NULL, NULL, 2, "stericsson,ab8500-pwm"),
+       OF_MFD_CELL("ab8500-pwm",
+                   NULL, NULL, 3, "stericsson,ab8500-pwm"),
+       OF_MFD_CELL("ab8500-denc",
+                   NULL, NULL, 0, "stericsson,ab8500-denc"),
+       OF_MFD_CELL("pinctrl-ab8500",
+                   NULL, NULL, 0, "stericsson,ab8500-gpio"),
+       OF_MFD_CELL("abx500-temp",
+                   NULL, NULL, 0, "stericsson,abx500-temp"),
+       OF_MFD_CELL("ab8500-usb",
+                   NULL, NULL, 0, "stericsson,ab8500-usb"),
+       OF_MFD_CELL("ab8500-codec",
+                   NULL, NULL, 0, "stericsson,ab8500-codec"),
 };
 
 static const struct mfd_cell ab9540_devs[] = {
index e6e4bacb09ee5d2636576c3363c97a1a5c5f72c8..c0a86aeb173303fb6c07100137eddad6a2bcf68c 100644 (file)
@@ -739,20 +739,17 @@ int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
        if (!div && !requests[clkout])
                return -EINVAL;
 
-       switch (clkout) {
-       case 0:
+       if (clkout == 0) {
                div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
                mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
                bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
                        (div << PRCM_CLKOCR_CLKODIV0_SHIFT));
-               break;
-       case 1:
+       } else {
                div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
                mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
                        PRCM_CLKOCR_CLK1TYPE);
                bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
                        (div << PRCM_CLKOCR_CLKODIV1_SHIFT));
-               break;
        }
        bits &= mask;
 
@@ -2048,6 +2045,7 @@ int db8500_prcmu_config_hotmon(u8 low, u8 high)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon);
 
 static int config_hot_period(u16 val)
 {
@@ -2074,11 +2072,13 @@ int db8500_prcmu_start_temp_sense(u16 cycles32k)
 
        return config_hot_period(cycles32k);
 }
+EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense);
 
 int db8500_prcmu_stop_temp_sense(void)
 {
        return config_hot_period(0xFFFF);
 }
+EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense);
 
 static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
 {
index a7136c7ae9fb4b2ed4e51c54534f5f2a99cba8d2..afc7af92db9efcb02a58728a7aa48d25fc637df8 100644 (file)
@@ -112,7 +112,7 @@ static const struct intel_lpss_platform_info bxt_i2c_info = {
 };
 
 static const struct pci_device_id intel_lpss_pci_ids[] = {
-       /* BXT */
+       /* BXT A-Step */
        { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
        { PCI_VDEVICE(INTEL, 0x0aae), (kernel_ulong_t)&bxt_i2c_info },
        { PCI_VDEVICE(INTEL, 0x0ab0), (kernel_ulong_t)&bxt_i2c_info },
@@ -128,6 +128,23 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x0ac4), (kernel_ulong_t)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x0ac6), (kernel_ulong_t)&bxt_info },
        { PCI_VDEVICE(INTEL, 0x0aee), (kernel_ulong_t)&bxt_uart_info },
+       /* BXT B-Step */
+       { PCI_VDEVICE(INTEL, 0x1aac), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x1aae), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x1ab0), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x1ab2), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x1ab4), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x1ab6), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x1ab8), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x1aba), (kernel_ulong_t)&bxt_i2c_info },
+       { PCI_VDEVICE(INTEL, 0x1abc), (kernel_ulong_t)&bxt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x1abe), (kernel_ulong_t)&bxt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x1ac0), (kernel_ulong_t)&bxt_uart_info },
+       { PCI_VDEVICE(INTEL, 0x1ac2), (kernel_ulong_t)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
+       { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
+
        /* APL */
        { PCI_VDEVICE(INTEL, 0x5aac), (kernel_ulong_t)&bxt_i2c_info },
        { PCI_VDEVICE(INTEL, 0x5aae), (kernel_ulong_t)&bxt_i2c_info },
index 1743788f1595671af62102f95979af1f61c5a14d..1bbbe877ba7e7c1702107fa062f208c089151f60 100644 (file)
@@ -453,6 +453,7 @@ int intel_lpss_probe(struct device *dev,
 err_remove_ltr:
        intel_lpss_debugfs_remove(lpss);
        intel_lpss_ltr_hide(lpss);
+       intel_lpss_unregister_clock(lpss);
 
 err_clk_register:
        ida_simple_remove(&intel_lpss_devid_ida, lpss->devid);
index bd3aa45783460e983add77ab46d32124f46c37a0..d5296bd3e88d85b40f15323f259b28943fb25c3f 100644 (file)
@@ -936,7 +936,10 @@ static int lpc_ich_init_gpio(struct pci_dev *dev)
 gpe0_done:
        /* Setup GPIO base register */
        pci_read_config_dword(dev, priv->gbase, &base_addr_cfg);
-       base_addr = base_addr_cfg & 0x0000ff80;
+
+       /* Clear the i/o flag */
+       base_addr = base_addr_cfg & ~BIT(0);
+
        if (!base_addr) {
                dev_notice(&dev->dev, "I/O space for GPIO uninitialized\n");
                ret = -ENODEV;
index b7aabeefab07a2b44ab96b3e371de3d731c0b9cf..2f2225e845efe960a9904fefc1d16fc5cde6b081 100644 (file)
@@ -36,7 +36,7 @@ struct syscon {
        struct list_head list;
 };
 
-static struct regmap_config syscon_regmap_config = {
+static const struct regmap_config syscon_regmap_config = {
        .reg_bits = 32,
        .val_bits = 32,
        .reg_stride = 4,
@@ -50,6 +50,7 @@ static struct syscon *of_syscon_register(struct device_node *np)
        u32 reg_io_width;
        int ret;
        struct regmap_config syscon_config = syscon_regmap_config;
+       struct resource res;
 
        if (!of_device_is_compatible(np, "syscon"))
                return ERR_PTR(-EINVAL);
@@ -58,7 +59,12 @@ static struct syscon *of_syscon_register(struct device_node *np)
        if (!syscon)
                return ERR_PTR(-ENOMEM);
 
-       base = of_iomap(np, 0);
+       if (of_address_to_resource(np, 0, &res)) {
+               ret = -ENOMEM;
+               goto err_map;
+       }
+
+       base = ioremap(res.start, resource_size(&res));
        if (!base) {
                ret = -ENOMEM;
                goto err_map;
@@ -81,6 +87,7 @@ static struct syscon *of_syscon_register(struct device_node *np)
 
        syscon_config.reg_stride = reg_io_width;
        syscon_config.val_bits = reg_io_width * 8;
+       syscon_config.max_register = resource_size(&res) - reg_io_width;
 
        regmap = regmap_init_mmio(NULL, base, &syscon_config);
        if (IS_ERR(regmap)) {
@@ -192,6 +199,7 @@ static int syscon_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct syscon_platform_data *pdata = dev_get_platdata(dev);
        struct syscon *syscon;
+       struct regmap_config syscon_config = syscon_regmap_config;
        struct resource *res;
        void __iomem *base;
 
@@ -207,11 +215,10 @@ static int syscon_probe(struct platform_device *pdev)
        if (!base)
                return -ENOMEM;
 
-       syscon_regmap_config.max_register = res->end - res->start - 3;
+       syscon_config.max_register = res->end - res->start - 3;
        if (pdata)
-               syscon_regmap_config.name = pdata->label;
-       syscon->regmap = devm_regmap_init_mmio(dev, base,
-                                       &syscon_regmap_config);
+               syscon_config.name = pdata->label;
+       syscon->regmap = devm_regmap_init_mmio(dev, base, &syscon_config);
        if (IS_ERR(syscon->regmap)) {
                dev_err(dev, "regmap init failed\n");
                return PTR_ERR(syscon->regmap);
index 83e615ed100ae76bf4abd4740f1acb9cce9de862..495e4518fc2992ff1a757858611f7cf1f3c27b55 100644 (file)
@@ -1059,26 +1059,7 @@ EXPORT_SYMBOL(tps65013_set_low_pwr);
 
 static int __init tps_init(void)
 {
-       u32     tries = 3;
-       int     status = -ENODEV;
-
-       printk(KERN_INFO "%s: version %s\n", DRIVER_NAME, DRIVER_VERSION);
-
-       /* some boards have startup glitches */
-       while (tries--) {
-               status = i2c_add_driver(&tps65010_driver);
-               if (the_tps)
-                       break;
-               i2c_del_driver(&tps65010_driver);
-               if (!tries) {
-                       printk(KERN_ERR "%s: no chip?\n", DRIVER_NAME);
-                       return -ENODEV;
-               }
-               pr_debug("%s: re-probe ...\n", DRIVER_NAME);
-               msleep(10);
-       }
-
-       return status;
+       return i2c_add_driver(&tps65010_driver);
 }
 /* NOTE:  this MUST be initialized before the other parts of the system
  * that rely on it ... but after the i2c bus on which this relies.
diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c
new file mode 100644 (file)
index 0000000..43119a6
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *     Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65912 driver
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+
+#include <linux/mfd/tps65086.h>
+
+static const struct mfd_cell tps65086_cells[] = {
+       { .name = "tps65086-regulator", },
+       { .name = "tps65086-gpio", },
+};
+
+static const struct regmap_range tps65086_yes_ranges[] = {
+       regmap_reg_range(TPS65086_IRQ, TPS65086_IRQ),
+       regmap_reg_range(TPS65086_PMICSTAT, TPS65086_SHUTDNSRC),
+       regmap_reg_range(TPS65086_GPOCTRL, TPS65086_GPOCTRL),
+       regmap_reg_range(TPS65086_PG_STATUS1, TPS65086_OC_STATUS),
+};
+
+static const struct regmap_access_table tps65086_volatile_table = {
+       .yes_ranges = tps65086_yes_ranges,
+       .n_yes_ranges = ARRAY_SIZE(tps65086_yes_ranges),
+};
+
+static const struct regmap_config tps65086_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .cache_type = REGCACHE_RBTREE,
+       .volatile_table = &tps65086_volatile_table,
+};
+
+static const struct regmap_irq tps65086_irqs[] = {
+       REGMAP_IRQ_REG(TPS65086_IRQ_DIETEMP, 0, TPS65086_IRQ_DIETEMP_MASK),
+       REGMAP_IRQ_REG(TPS65086_IRQ_SHUTDN, 0, TPS65086_IRQ_SHUTDN_MASK),
+       REGMAP_IRQ_REG(TPS65086_IRQ_FAULT, 0, TPS65086_IRQ_FAULT_MASK),
+};
+
+static struct regmap_irq_chip tps65086_irq_chip = {
+       .name = "tps65086",
+       .status_base = TPS65086_IRQ,
+       .mask_base = TPS65086_IRQ_MASK,
+       .ack_base = TPS65086_IRQ,
+       .init_ack_masked = true,
+       .num_regs = 1,
+       .irqs = tps65086_irqs,
+       .num_irqs = ARRAY_SIZE(tps65086_irqs),
+};
+
+static const struct of_device_id tps65086_of_match_table[] = {
+       { .compatible = "ti,tps65086", },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tps65086_of_match_table);
+
+static int tps65086_probe(struct i2c_client *client,
+                         const struct i2c_device_id *ids)
+{
+       struct tps65086 *tps;
+       unsigned int version;
+       int ret;
+
+       tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL);
+       if (!tps)
+               return -ENOMEM;
+
+       i2c_set_clientdata(client, tps);
+       tps->dev = &client->dev;
+       tps->irq = client->irq;
+
+       tps->regmap = devm_regmap_init_i2c(client, &tps65086_regmap_config);
+       if (IS_ERR(tps->regmap)) {
+               dev_err(tps->dev, "Failed to initialize register map\n");
+               return PTR_ERR(tps->regmap);
+       }
+
+       ret = regmap_read(tps->regmap, TPS65086_DEVICEID, &version);
+       if (ret) {
+               dev_err(tps->dev, "Failed to read revision register\n");
+               return ret;
+       }
+
+       dev_info(tps->dev, "Device: TPS65086%01lX, OTP: %c, Rev: %ld\n",
+                (version & TPS65086_DEVICEID_PART_MASK),
+                (char)((version & TPS65086_DEVICEID_OTP_MASK) >> 4) + 'A',
+                (version & TPS65086_DEVICEID_REV_MASK) >> 6);
+
+       ret = regmap_add_irq_chip(tps->regmap, tps->irq, IRQF_ONESHOT, 0,
+                                 &tps65086_irq_chip, &tps->irq_data);
+       if (ret) {
+               dev_err(tps->dev, "Failed to register IRQ chip\n");
+               return ret;
+       }
+
+       ret = mfd_add_devices(tps->dev, PLATFORM_DEVID_AUTO, tps65086_cells,
+                             ARRAY_SIZE(tps65086_cells), NULL, 0,
+                             regmap_irq_get_domain(tps->irq_data));
+       if (ret) {
+               regmap_del_irq_chip(tps->irq, tps->irq_data);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int tps65086_remove(struct i2c_client *client)
+{
+       struct tps65086 *tps = i2c_get_clientdata(client);
+
+       regmap_del_irq_chip(tps->irq, tps->irq_data);
+
+       return 0;
+}
+
+static const struct i2c_device_id tps65086_id_table[] = {
+       { "tps65086", 0 },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, tps65086_id_table);
+
+static struct i2c_driver tps65086_driver = {
+       .driver         = {
+               .name   = "tps65086",
+               .of_match_table = tps65086_of_match_table,
+       },
+       .probe          = tps65086_probe,
+       .remove         = tps65086_remove,
+       .id_table       = tps65086_id_table,
+};
+module_i2c_driver(tps65086_driver);
+
+MODULE_AUTHOR("Andrew F. Davis <afd@ti.com>");
+MODULE_DESCRIPTION("TPS65086 PMIC Driver");
+MODULE_LICENSE("GPL v2");
index 4c1903f781fc1793bb7ddf7205d138e60ea2133a..a21403238a4a683a0062dabfea3a0fd3af9847ab 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/delay.h>
 #include <asm/opal.h>
 #include <asm/msi_bitmap.h>
-#include <asm/pci-bridge.h> /* for struct pci_controller */
 #include <asm/pnv-pci.h>
 #include <asm/io.h>
 
index 677d0362f334e842abb2c8c2439260978249b871..80f9afcb13823282a9859e08a96c36f55901efa6 100644 (file)
@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
 {
        struct mei_cl *cl = file->private_data;
 
-       return mei_cl_notify_request(cl, file, request);
+       if (request != MEI_HBM_NOTIFICATION_START &&
+           request != MEI_HBM_NOTIFICATION_STOP)
+               return -EINVAL;
+
+       return mei_cl_notify_request(cl, file, (u8)request);
 }
 
 /**
index 5914263090fc81447e26130baab143802504c088..b3ec4fb0d344c15d550e2d85bd09d0f9d2ed97b7 100644 (file)
@@ -655,8 +655,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
        }
 
        md = mmc_blk_get(bdev->bd_disk);
-       if (!md)
+       if (!md) {
+               err = -EINVAL;
                goto cmd_err;
+       }
 
        card = md->queue.card;
        if (IS_ERR(card)) {
@@ -1363,8 +1365,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
 
        if (brq->data.error) {
                if (need_retune && !brq->retune_retry_done) {
-                       pr_info("%s: retrying because a re-tune was needed\n",
-                               req->rq_disk->disk_name);
+                       pr_debug("%s: retrying because a re-tune was needed\n",
+                                req->rq_disk->disk_name);
                        brq->retune_retry_done = 1;
                        return MMC_BLK_RETRY;
                }
@@ -1525,13 +1527,13 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
        }
        if (rq_data_dir(req) == READ) {
                brq->cmd.opcode = readcmd;
-               brq->data.flags |= MMC_DATA_READ;
+               brq->data.flags = MMC_DATA_READ;
                if (brq->mrq.stop)
                        brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
                                        MMC_CMD_AC;
        } else {
                brq->cmd.opcode = writecmd;
-               brq->data.flags |= MMC_DATA_WRITE;
+               brq->data.flags = MMC_DATA_WRITE;
                if (brq->mrq.stop)
                        brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
                                        MMC_CMD_AC;
@@ -1800,7 +1802,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
 
        brq->data.blksz = 512;
        brq->data.blocks = packed->blocks + hdr_blocks;
-       brq->data.flags |= MMC_DATA_WRITE;
+       brq->data.flags = MMC_DATA_WRITE;
 
        brq->stop.opcode = MMC_STOP_TRANSMISSION;
        brq->stop.arg = 0;
index 7fc9174d46191a13c77fa4e220bb476a32dfe05e..c032eef45762c1dfb2ef810add07e1d3f980eee5 100644 (file)
@@ -2829,6 +2829,7 @@ static int mtf_testlist_show(struct seq_file *sf, void *data)
 
        mutex_lock(&mmc_test_lock);
 
+       seq_printf(sf, "0:\tRun all tests\n");
        for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
                seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
 
index d2de5925b73e6d7cb730b6b687e26d04cc0f92bf..5415056f9aa5b109c67a075fa2fd3bfd7a335acf 100644 (file)
@@ -493,7 +493,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
        if (status & UART_MSR_DCTS) {
                port->icount.cts++;
                tty = tty_port_tty_get(&port->port);
-               if (tty && (tty->termios.c_cflag & CRTSCTS)) {
+               if (tty && C_CRTSCTS(tty)) {
                        int cts = (status & UART_MSR_CTS);
                        if (tty->hw_stopped) {
                                if (cts) {
@@ -648,10 +648,10 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
 
        sdio_uart_change_speed(port, &tty->termios, NULL);
 
-       if (tty->termios.c_cflag & CBAUD)
+       if (C_BAUD(tty))
                sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
 
-       if (tty->termios.c_cflag & CRTSCTS)
+       if (C_CRTSCTS(tty))
                if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
                        tty->hw_stopped = 1;
 
@@ -833,7 +833,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)
 {
        struct sdio_uart_port *port = tty->driver_data;
 
-       if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))
+       if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
                return;
 
        if (sdio_uart_claim_func(port) != 0)
@@ -844,7 +844,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)
                sdio_uart_start_tx(port);
        }
 
-       if (tty->termios.c_cflag & CRTSCTS)
+       if (C_CRTSCTS(tty))
                sdio_uart_clear_mctrl(port, TIOCM_RTS);
 
        sdio_uart_irq(port->func);
@@ -855,7 +855,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
 {
        struct sdio_uart_port *port = tty->driver_data;
 
-       if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))
+       if (!I_IXOFF(tty) && !C_CRTSCTS(tty))
                return;
 
        if (sdio_uart_claim_func(port) != 0)
@@ -870,7 +870,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
                }
        }
 
-       if (tty->termios.c_cflag & CRTSCTS)
+       if (C_CRTSCTS(tty))
                sdio_uart_set_mctrl(port, TIOCM_RTS);
 
        sdio_uart_irq(port->func);
index f95d41ffc766e5038059d0be82eaeb6aa4b0fe7a..41b1e761965f7459c0d8f2849f7feb2c73f2ca30 100644 (file)
@@ -1033,7 +1033,7 @@ static inline void mmc_set_ios(struct mmc_host *host)
                "width %u timing %u\n",
                 mmc_hostname(host), ios->clock, ios->bus_mode,
                 ios->power_mode, ios->chip_select, ios->vdd,
-                ios->bus_width, ios->timing);
+                1 << ios->bus_width, ios->timing);
 
        host->ops->set_ios(host, ios);
 }
@@ -1079,7 +1079,8 @@ int mmc_execute_tuning(struct mmc_card *card)
        err = host->ops->execute_tuning(host, opcode);
 
        if (err)
-               pr_err("%s: tuning execution failed\n", mmc_hostname(host));
+               pr_err("%s: tuning execution failed: %d\n",
+                       mmc_hostname(host), err);
        else
                mmc_retune_enable(host);
 
@@ -1204,8 +1205,9 @@ EXPORT_SYMBOL(mmc_vddrange_to_ocrmask);
  * @np: The device node need to be parsed.
  * @mask: mask of voltages available for MMC/SD/SDIO
  *
- * 1. Return zero on success.
- * 2. Return negative errno: voltage-range is invalid.
+ * Parse the "voltage-ranges" DT property, returning zero if it is not
+ * found, negative errno if the voltage-range specification is invalid,
+ * or one if the voltage-range is specified and successfully parsed.
  */
 int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
 {
@@ -1214,8 +1216,12 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
 
        voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
        num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
-       if (!voltage_ranges || !num_ranges) {
-               pr_info("%s: voltage-ranges unspecified\n", np->full_name);
+       if (!voltage_ranges) {
+               pr_debug("%s: voltage-ranges unspecified\n", np->full_name);
+               return 0;
+       }
+       if (!num_ranges) {
+               pr_err("%s: voltage-ranges empty\n", np->full_name);
                return -EINVAL;
        }
 
@@ -1234,7 +1240,7 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
                *mask |= ocr_mask;
        }
 
-       return 0;
+       return 1;
 }
 EXPORT_SYMBOL(mmc_of_parse_voltage);
 
@@ -2532,7 +2538,7 @@ int mmc_detect_card_removed(struct mmc_host *host)
        if (!card)
                return 1;
 
-       if (host->caps & MMC_CAP_NONREMOVABLE)
+       if (!mmc_card_is_removable(host))
                return 0;
 
        ret = mmc_card_removed(card);
@@ -2570,7 +2576,7 @@ void mmc_rescan(struct work_struct *work)
                return;
 
        /* If there is a non-removable card registered, only scan once */
-       if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
+       if (!mmc_card_is_removable(host) && host->rescan_entered)
                return;
        host->rescan_entered = 1;
 
@@ -2587,8 +2593,7 @@ void mmc_rescan(struct work_struct *work)
         * if there is a _removable_ card registered, check whether it is
         * still present
         */
-       if (host->bus_ops && !host->bus_dead
-           && !(host->caps & MMC_CAP_NONREMOVABLE))
+       if (host->bus_ops && !host->bus_dead && mmc_card_is_removable(host))
                host->bus_ops->detect(host);
 
        host->detect_change = 0;
@@ -2613,7 +2618,7 @@ void mmc_rescan(struct work_struct *work)
        mmc_bus_put(host);
 
        mmc_claim_host(host);
-       if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd &&
+       if (mmc_card_is_removable(host) && host->ops->get_cd &&
                        host->ops->get_cd(host) == 0) {
                mmc_power_off(host);
                mmc_release_host(host);
index 65cc0ac9b82d2b9e1eb928131c7a1dea074f2d02..9382a57a5aa496f19c22353521fe475e1d253c91 100644 (file)
@@ -220,7 +220,7 @@ static int mmc_clock_opt_set(void *data, u64 val)
        struct mmc_host *host = data;
 
        /* We need this check due to input value is u64 */
-       if (val > host->f_max)
+       if (val != 0 && (val > host->f_max || val < host->f_min))
                return -EINVAL;
 
        mmc_claim_host(host);
index 0aecd5c00b866571a852c19c91af766591d4025e..1d94607611d888d0e409a152b4f67df6724273df 100644 (file)
@@ -339,6 +339,7 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
        host->class_dev.parent = dev;
        host->class_dev.class = &mmc_host_class;
        device_initialize(&host->class_dev);
+       device_enable_async_suspend(&host->class_dev);
 
        if (mmc_gpio_alloc(host)) {
                put_device(&host->class_dev);
index bf49e44571f20a21b88cda5c1f35a5c222cfcf32..4dbe3df8024b2cf3ed9206f8d76cff1ba660dc7e 100644 (file)
@@ -501,7 +501,7 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
                        card->ext_csd.raw_bkops_status =
                                ext_csd[EXT_CSD_BKOPS_STATUS];
                        if (!card->ext_csd.man_bkops_en)
-                               pr_info("%s: MAN_BKOPS_EN bit is not set\n",
+                               pr_debug("%s: MAN_BKOPS_EN bit is not set\n",
                                        mmc_hostname(card->host));
                }
 
@@ -945,7 +945,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
                        break;
                } else {
                        pr_warn("%s: switch to bus width %d failed\n",
-                               mmc_hostname(host), ext_csd_bits[idx]);
+                               mmc_hostname(host), 1 << bus_width);
                }
        }
 
index 2c90635c89afbb3782a48145fe830828dca4ec27..62355bda608f2ec598597df6b4be800e51eeb7c3 100644 (file)
@@ -90,7 +90,6 @@ int mmc_send_status(struct mmc_card *card, u32 *status)
 
 static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
 {
-       int err;
        struct mmc_command cmd = {0};
 
        BUG_ON(!host);
@@ -105,11 +104,7 @@ static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)
                cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
        }
 
-       err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
-       if (err)
-               return err;
-
-       return 0;
+       return mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
 }
 
 int mmc_select_card(struct mmc_card *card)
@@ -244,7 +239,6 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)
 
 int mmc_set_relative_addr(struct mmc_card *card)
 {
-       int err;
        struct mmc_command cmd = {0};
 
        BUG_ON(!card);
@@ -254,11 +248,7 @@ int mmc_set_relative_addr(struct mmc_card *card)
        cmd.arg = card->rca << 16;
        cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 
-       err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
-       if (err)
-               return err;
-
-       return 0;
+       return mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES);
 }
 
 static int
@@ -743,7 +733,7 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
 
 int mmc_bus_test(struct mmc_card *card, u8 bus_width)
 {
-       int err, width;
+       int width;
 
        if (bus_width == MMC_BUS_WIDTH_8)
                width = 8;
@@ -759,8 +749,7 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
         * is a problem.  This improves chances that the test will work.
         */
        mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width);
-       err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
-       return err;
+       return mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
 }
 
 int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
index aba786daebcadd9e193ffabe71e3a0124dd8ec62..bc173e18b71cd62843bf21a325b35fff7e7d82cf 100644 (file)
@@ -12,7 +12,6 @@
 #include <linux/slab.h>
 #include <linux/device.h>
 #include <linux/err.h>
-#include <linux/of_gpio.h>
 #include <linux/gpio/consumer.h>
 
 #include <linux/mmc/host.h>
index 48d0c93ba25a36590ac6e2c786c4ca923d35cde1..16b774c18e75fbc9b37c982bff30e38c18944304 100644 (file)
@@ -120,7 +120,6 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);
 
 int mmc_app_set_bus_width(struct mmc_card *card, int width)
 {
-       int err;
        struct mmc_command cmd = {0};
 
        BUG_ON(!card);
@@ -140,11 +139,7 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)
                return -EINVAL;
        }
 
-       err = mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
-       if (err)
-               return err;
-
-       return 0;
+       return mmc_wait_for_app_cmd(card->host, card, &cmd, MMC_CMD_RETRIES);
 }
 
 int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
index 62508b457c4f0c8563486f6d4d536135ddf80753..34f6e80153064a9fa963c5784cd3fbec7f7b8673 100644 (file)
@@ -217,7 +217,6 @@ int sdio_reset(struct mmc_host *host)
        else
                abort |= 0x08;
 
-       ret = mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
-       return ret;
+       return mmc_io_rw_direct_host(host, 1, 0, SDIO_CCCR_ABORT, abort, NULL);
 }
 
index 1526b8a10b094e88e52275e46446ff3e8b66afdf..4a35ebf5165d83d6cfdb62162c2ed9cb19961cce 100644 (file)
@@ -318,15 +318,15 @@ config MMC_SDHCI_F_SDH30
          If unsure, say N.
 
 config MMC_SDHCI_IPROC
-       tristate "SDHCI platform support for the iProc SD/MMC Controller"
-       depends on ARCH_BCM_IPROC || COMPILE_TEST
+       tristate "SDHCI support for the BCM2835 & iProc SD/MMC Controller"
+       depends on ARCH_BCM2835 || ARCH_BCM_IPROC || COMPILE_TEST
        depends on MMC_SDHCI_PLTFM
        default ARCH_BCM_IPROC
        select MMC_SDHCI_IO_ACCESSORS
        help
          This selects the iProc SD/MMC controller.
 
-         If you have an IPROC platform with SD or MMC devices,
+         If you have a BCM2835 or IPROC platform with SD or MMC devices,
          say Y or M here.
 
          If unsure, say N.
@@ -673,7 +673,7 @@ config MMC_DW_ROCKCHIP
 
 config MMC_SH_MMCIF
        tristate "SuperH Internal MMCIF support"
-       depends on MMC_BLOCK && HAS_DMA
+       depends on HAS_DMA
        depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
        help
          This selects the MMC Host Interface controller (MMCIF).
@@ -786,3 +786,14 @@ config MMC_MTK
          If you have a machine with a integrated SD/MMC card reader, say Y or M here.
          This is needed if support for any SD/SDIO/MMC devices is required.
          If unsure, say N.
+
+config MMC_SDHCI_MICROCHIP_PIC32
+        tristate "Microchip PIC32MZDA SDHCI support"
+        depends on MMC_SDHCI && PIC32MZDA && MMC_SDHCI_PLTFM
+        help
+          This selects the Secure Digital Host Controller Interface (SDHCI)
+          for PIC32MZDA platform.
+
+          If you have a controller with this interface, say Y or M here.
+
+          If unsure, say N.
index 3595f83e89dd2caf9d56cdf6ceb875ef85ece719..af918d261ff9625d1d42146da1173031cd9917dd 100644 (file)
@@ -75,6 +75,7 @@ obj-$(CONFIG_MMC_SDHCI_BCM2835)               += sdhci-bcm2835.o
 obj-$(CONFIG_MMC_SDHCI_IPROC)          += sdhci-iproc.o
 obj-$(CONFIG_MMC_SDHCI_MSM)            += sdhci-msm.o
 obj-$(CONFIG_MMC_SDHCI_ST)             += sdhci-st.o
+obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32)        += sdhci-pic32.o
 
 ifeq ($(CONFIG_CB710_DEBUG),y)
        CFLAGS-cb710-mmc        += -DDEBUG
index 851ccd9ac868aa0e208cac5d48ee9d8a4eaa5be4..7f9d3de9f8700b3f1ea3767c5115dff9b43fa54e 100644 (file)
@@ -848,9 +848,7 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
                if (cmd->opcode == SD_IO_RW_EXTENDED) {
                        cmdr |= ATMCI_CMDR_SDIO_BLOCK;
                } else {
-                       if (data->flags & MMC_DATA_STREAM)
-                               cmdr |= ATMCI_CMDR_STREAM;
-                       else if (data->blocks > 1)
+                       if (data->blocks > 1)
                                cmdr |= ATMCI_CMDR_MULTI_BLOCK;
                        else
                                cmdr |= ATMCI_CMDR_BLOCK;
@@ -1371,10 +1369,7 @@ static void atmci_start_request(struct atmel_mci *host,
                host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
                if (!(data->flags & MMC_DATA_WRITE))
                        host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;
-               if (data->flags & MMC_DATA_STREAM)
-                       host->stop_cmdr |= ATMCI_CMDR_STREAM;
-               else
-                       host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
+               host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;
        }
 
        /*
index 2b7f37e82ca94edf7c875c8cd32614675d625a54..526231e385831526c6f64f9dd8cb8d333c514de5 100644 (file)
@@ -126,9 +126,6 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
        length = data->blksz * data->blocks;
        bfin_write_SDH_DATA_LGTH(length);
 
-       if (data->flags & MMC_DATA_STREAM)
-               data_ctl |= DTX_MODE;
-
        if (data->flags & MMC_DATA_READ)
                data_ctl |= DTX_DIR;
        /* Only supports power-of-2 block size */
index ea2a2ebc6b91320d928439518218b36a966396a3..693144e7427b1d9fefbe37c2dd936b602dab2af7 100644 (file)
@@ -346,10 +346,6 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
        if (cmd->data)
                cmd_reg |= MMCCMD_WDATX;
 
-       /* Setting whether stream or block transfer */
-       if (cmd->flags & MMC_DATA_STREAM)
-               cmd_reg |= MMCCMD_STRMTP;
-
        /* Setting whether data read or write */
        if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
                cmd_reg |= MMCCMD_DTRW;
@@ -568,8 +564,7 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
                return;
        }
 
-       dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n",
-               (data->flags & MMC_DATA_STREAM) ? "stream" : "block",
+       dev_dbg(mmc_dev(host->mmc), "%s, %d blocks of %d bytes\n",
                (data->flags & MMC_DATA_WRITE) ? "write" : "read",
                data->blocks, data->blksz);
        dev_dbg(mmc_dev(host->mmc), "  DTO %d cycles + %d ns\n",
@@ -584,22 +579,18 @@ mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req)
        writel(data->blksz, host->base + DAVINCI_MMCBLEN);
 
        /* Configure the FIFO */
-       switch (data->flags & MMC_DATA_WRITE) {
-       case MMC_DATA_WRITE:
+       if (data->flags & MMC_DATA_WRITE) {
                host->data_dir = DAVINCI_MMC_DATADIR_WRITE;
                writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST,
                        host->base + DAVINCI_MMCFIFOCTL);
                writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR,
                        host->base + DAVINCI_MMCFIFOCTL);
-               break;
-
-       default:
+       } else {
                host->data_dir = DAVINCI_MMC_DATADIR_READ;
                writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST,
                        host->base + DAVINCI_MMCFIFOCTL);
                writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD,
                        host->base + DAVINCI_MMCFIFOCTL);
-               break;
        }
 
        host->buffer = NULL;
index 3a7e835a00339a3f1b4165ebc47c28c37d57fc85..8790f2afc057f3909f64b50d0bab558ea74fe53c 100644 (file)
@@ -145,6 +145,16 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
                mci_writel(host, CLKSEL64, clksel);
        else
                mci_writel(host, CLKSEL, clksel);
+
+       /*
+        * Exynos4412 and Exynos5250 extends the use of CMD register with the
+        * use of bit 29 (which is reserved on standard MSHC controllers) for
+        * optionally bypassing the HOLD register for command and data. The
+        * HOLD register should be bypassed in case there is no phase shift
+        * applied on CMD/DATA that is sent to the card.
+        */
+       if (!SDMMC_CLKSEL_GET_DRV_WD3(clksel))
+               set_bit(DW_MMC_CARD_NO_USE_HOLD, &host->cur_slot->flags);
 }
 
 #ifdef CONFIG_PM_SLEEP
@@ -202,26 +212,6 @@ static int dw_mci_exynos_resume_noirq(struct device *dev)
 #define dw_mci_exynos_resume_noirq     NULL
 #endif /* CONFIG_PM_SLEEP */
 
-static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
-       struct dw_mci_exynos_priv_data *priv = host->priv;
-       /*
-        * Exynos4412 and Exynos5250 extends the use of CMD register with the
-        * use of bit 29 (which is reserved on standard MSHC controllers) for
-        * optionally bypassing the HOLD register for command and data. The
-        * HOLD register should be bypassed in case there is no phase shift
-        * applied on CMD/DATA that is sent to the card.
-        */
-       if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS7 ||
-               priv->ctrl_type == DW_MCI_TYPE_EXYNOS7_SMU) {
-               if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL64)))
-                       *cmdr |= SDMMC_CMD_USE_HOLD_REG;
-        } else {
-               if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL)))
-                       *cmdr |= SDMMC_CMD_USE_HOLD_REG;
-       }
-}
-
 static void dw_mci_exynos_config_hs400(struct dw_mci *host, u32 timing)
 {
        struct dw_mci_exynos_priv_data *priv = host->priv;
@@ -500,7 +490,6 @@ static const struct dw_mci_drv_data exynos_drv_data = {
        .caps                   = exynos_dwmmc_caps,
        .init                   = dw_mci_exynos_priv_init,
        .setup_clock            = dw_mci_exynos_setup_clock,
-       .prepare_command        = dw_mci_exynos_prepare_command,
        .set_ios                = dw_mci_exynos_set_ios,
        .parse_dt               = dw_mci_exynos_parse_dt,
        .execute_tuning         = dw_mci_exynos_execute_tuning,
index 81bdeeb05a4d23426ecf39119a54d544eb8342f9..c0bb0c793e84b2744586db19a5f559f31d207cd2 100644 (file)
 #include "dw_mmc.h"
 #include "dw_mmc-pltfm.h"
 
-static void dw_mci_pltfm_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
-       *cmdr |= SDMMC_CMD_USE_HOLD_REG;
-}
-
-static const struct dw_mci_drv_data socfpga_drv_data = {
-       .prepare_command        = dw_mci_pltfm_prepare_command,
-};
-
-static const struct dw_mci_drv_data pistachio_drv_data = {
-       .prepare_command        = dw_mci_pltfm_prepare_command,
-};
-
 int dw_mci_pltfm_register(struct platform_device *pdev,
                          const struct dw_mci_drv_data *drv_data)
 {
@@ -94,10 +81,8 @@ EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops);
 
 static const struct of_device_id dw_mci_pltfm_match[] = {
        { .compatible = "snps,dw-mshc", },
-       { .compatible = "altr,socfpga-dw-mshc",
-               .data = &socfpga_drv_data },
-       { .compatible = "img,pistachio-dw-mshc",
-               .data = &pistachio_drv_data },
+       { .compatible = "altr,socfpga-dw-mshc", },
+       { .compatible = "img,pistachio-dw-mshc", },
        {},
 };
 MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match);
index d9c92f31da641e1c5d98e6d446f174ee61d6552a..84e50f3a64b69239b44fa11e96001fe953ed0948 100644 (file)
@@ -26,11 +26,6 @@ struct dw_mci_rockchip_priv_data {
        int                     default_sample_phase;
 };
 
-static void dw_mci_rockchip_prepare_command(struct dw_mci *host, u32 *cmdr)
-{
-       *cmdr |= SDMMC_CMD_USE_HOLD_REG;
-}
-
 static int dw_mci_rk3288_setup_clock(struct dw_mci *host)
 {
        host->bus_hz /= RK3288_CLKGEN_DIV;
@@ -240,12 +235,10 @@ static int dw_mci_rockchip_init(struct dw_mci *host)
 }
 
 static const struct dw_mci_drv_data rk2928_drv_data = {
-       .prepare_command        = dw_mci_rockchip_prepare_command,
        .init                   = dw_mci_rockchip_init,
 };
 
 static const struct dw_mci_drv_data rk3288_drv_data = {
-       .prepare_command        = dw_mci_rockchip_prepare_command,
        .set_ios                = dw_mci_rk3288_set_ios,
        .execute_tuning         = dw_mci_rk3288_execute_tuning,
        .parse_dt               = dw_mci_rk3288_parse_dt,
index 712835177e8b7ef0b49ff59155be3fef2bf5c678..242f9a0769bd5ef7edf0f6c245597e8dc2a85aa5 100644 (file)
@@ -234,7 +234,6 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
        struct mmc_data *data;
        struct dw_mci_slot *slot = mmc_priv(mmc);
        struct dw_mci *host = slot->host;
-       const struct dw_mci_drv_data *drv_data = slot->host->drv_data;
        u32 cmdr;
 
        cmd->error = -EINPROGRESS;
@@ -290,14 +289,12 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
        data = cmd->data;
        if (data) {
                cmdr |= SDMMC_CMD_DAT_EXP;
-               if (data->flags & MMC_DATA_STREAM)
-                       cmdr |= SDMMC_CMD_STRM_MODE;
                if (data->flags & MMC_DATA_WRITE)
                        cmdr |= SDMMC_CMD_DAT_WR;
        }
 
-       if (drv_data && drv_data->prepare_command)
-               drv_data->prepare_command(slot->host, &cmdr);
+       if (!test_bit(DW_MMC_CARD_NO_USE_HOLD, &slot->flags))
+               cmdr |= SDMMC_CMD_USE_HOLD_REG;
 
        return cmdr;
 }
@@ -1450,12 +1447,11 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
 {
        int present;
        struct dw_mci_slot *slot = mmc_priv(mmc);
-       struct dw_mci_board *brd = slot->host->pdata;
        struct dw_mci *host = slot->host;
        int gpio_cd = mmc_gpio_get_cd(mmc);
 
        /* Use platform get_cd function, else try onboard card detect */
-       if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) ||
+       if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
            (mmc->caps & MMC_CAP_NONREMOVABLE))
                present = 1;
        else if (!IS_ERR_VALUE(gpio_cd))
@@ -1477,6 +1473,34 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
        return present;
 }
 
+static void dw_mci_hw_reset(struct mmc_host *mmc)
+{
+       struct dw_mci_slot *slot = mmc_priv(mmc);
+       struct dw_mci *host = slot->host;
+       int reset;
+
+       if (host->use_dma == TRANS_MODE_IDMAC)
+               dw_mci_idmac_reset(host);
+
+       if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_DMA_RESET |
+                                    SDMMC_CTRL_FIFO_RESET))
+               return;
+
+       /*
+        * According to eMMC spec, card reset procedure:
+        * tRstW >= 1us:   RST_n pulse width
+        * tRSCA >= 200us: RST_n to Command time
+        * tRSTH >= 1us:   RST_n high period
+        */
+       reset = mci_readl(host, RST_N);
+       reset &= ~(SDMMC_RST_HWACTIVE << slot->id);
+       mci_writel(host, RST_N, reset);
+       usleep_range(1, 2);
+       reset |= SDMMC_RST_HWACTIVE << slot->id;
+       mci_writel(host, RST_N, reset);
+       usleep_range(200, 300);
+}
+
 static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
 {
        struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -1563,6 +1587,7 @@ static const struct mmc_host_ops dw_mci_ops = {
        .set_ios                = dw_mci_set_ios,
        .get_ro                 = dw_mci_get_ro,
        .get_cd                 = dw_mci_get_cd,
+       .hw_reset               = dw_mci_hw_reset,
        .enable_sdio_irq        = dw_mci_enable_sdio_irq,
        .execute_tuning         = dw_mci_execute_tuning,
        .card_busy              = dw_mci_card_busy,
@@ -2840,23 +2865,13 @@ static void dw_mci_dto_timer(unsigned long arg)
 }
 
 #ifdef CONFIG_OF
-static struct dw_mci_of_quirks {
-       char *quirk;
-       int id;
-} of_quirks[] = {
-       {
-               .quirk  = "broken-cd",
-               .id     = DW_MCI_QUIRK_BROKEN_CARD_DETECTION,
-       },
-};
-
 static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
 {
        struct dw_mci_board *pdata;
        struct device *dev = host->dev;
        struct device_node *np = dev->of_node;
        const struct dw_mci_drv_data *drv_data = host->drv_data;
-       int idx, ret;
+       int ret;
        u32 clock_frequency;
 
        pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
@@ -2864,17 +2879,7 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
                return ERR_PTR(-ENOMEM);
 
        /* find out number of slots supported */
-       if (of_property_read_u32(dev->of_node, "num-slots",
-                               &pdata->num_slots)) {
-               dev_info(dev,
-                        "num-slots property not found, assuming 1 slot is available\n");
-               pdata->num_slots = 1;
-       }
-
-       /* get quirks */
-       for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++)
-               if (of_get_property(np, of_quirks[idx].quirk, NULL))
-                       pdata->quirks |= of_quirks[idx].id;
+       of_property_read_u32(np, "num-slots", &pdata->num_slots);
 
        if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth))
                dev_info(dev,
@@ -2908,18 +2913,19 @@ static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host)
 
 static void dw_mci_enable_cd(struct dw_mci *host)
 {
-       struct dw_mci_board *brd = host->pdata;
        unsigned long irqflags;
        u32 temp;
        int i;
+       struct dw_mci_slot *slot;
 
-       /* No need for CD if broken card detection */
-       if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
-               return;
-
-       /* No need for CD if all slots have a non-error GPIO */
+       /*
+        * No need for CD if all slots have a non-error GPIO
+        * as well as broken card detection is found.
+        */
        for (i = 0; i < host->num_slots; i++) {
-               struct dw_mci_slot *slot = host->slot[i];
+               slot = host->slot[i];
+               if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
+                       return;
 
                if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
                        break;
@@ -2949,12 +2955,6 @@ int dw_mci_probe(struct dw_mci *host)
                }
        }
 
-       if (host->pdata->num_slots < 1) {
-               dev_err(host->dev,
-                       "Platform data must supply num_slots.\n");
-               return -ENODEV;
-       }
-
        host->biu_clk = devm_clk_get(host->dev, "biu");
        if (IS_ERR(host->biu_clk)) {
                dev_dbg(host->dev, "biu clock not available\n");
@@ -3052,8 +3052,10 @@ int dw_mci_probe(struct dw_mci *host)
        }
 
        /* Reset all blocks */
-       if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS))
-               return -ENODEV;
+       if (!dw_mci_ctrl_reset(host, SDMMC_CTRL_ALL_RESET_FLAGS)) {
+               ret = -ENODEV;
+               goto err_clk_ciu;
+       }
 
        host->dma_ops = host->pdata->dma_ops;
        dw_mci_init_dma(host);
@@ -3111,13 +3113,20 @@ int dw_mci_probe(struct dw_mci *host)
        if (host->pdata->num_slots)
                host->num_slots = host->pdata->num_slots;
        else
-               host->num_slots = SDMMC_GET_SLOT_NUM(mci_readl(host, HCON));
+               host->num_slots = 1;
+
+       if (host->num_slots < 1 ||
+           host->num_slots > SDMMC_GET_SLOT_NUM(mci_readl(host, HCON))) {
+               dev_err(host->dev,
+                       "Platform data must supply correct num_slots.\n");
+               ret = -ENODEV;
+               goto err_clk_ciu;
+       }
 
        /*
         * Enable interrupts for command done, data over, data empty,
         * receive ready and error such as transmit, receive timeout, crc error
         */
-       mci_writel(host, RINTSTS, 0xFFFFFFFF);
        mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER |
                   SDMMC_INT_TXDR | SDMMC_INT_RXDR |
                   DW_MCI_ERROR_FLAGS);
index f695b58f06135868ce934cae1e322899f3dc6545..68d5da2dfd191f5799f7af985534e00068f7116e 100644 (file)
@@ -46,6 +46,7 @@
 #define SDMMC_VERID            0x06c
 #define SDMMC_HCON             0x070
 #define SDMMC_UHS_REG          0x074
+#define SDMMC_RST_N            0x078
 #define SDMMC_BMOD             0x080
 #define SDMMC_PLDMND           0x084
 #define SDMMC_DBADDR           0x088
 #define SDMMC_IDMAC_ENABLE             BIT(7)
 #define SDMMC_IDMAC_FB                 BIT(1)
 #define SDMMC_IDMAC_SWRESET            BIT(0)
+/* H/W reset */
+#define SDMMC_RST_HWACTIVE             0x1
 /* Version ID register define */
 #define SDMMC_GET_VERID(x)             ((x) & 0xFFFF)
 /* Card read threshold */
@@ -265,6 +268,7 @@ struct dw_mci_slot {
 #define DW_MMC_CARD_PRESENT    0
 #define DW_MMC_CARD_NEED_INIT  1
 #define DW_MMC_CARD_NO_LOW_PWR 2
+#define DW_MMC_CARD_NO_USE_HOLD 3
        int                     id;
        int                     sdio_id;
 };
@@ -274,7 +278,6 @@ struct dw_mci_slot {
  * @caps: mmc subsystem specified capabilities of the controller(s).
  * @init: early implementation specific initialization.
  * @setup_clock: implementation specific clock configuration.
- * @prepare_command: handle CMD register extensions.
  * @set_ios: handle bus specific extensions.
  * @parse_dt: parse implementation specific device tree properties.
  * @execute_tuning: implementation specific tuning procedure.
@@ -287,7 +290,6 @@ struct dw_mci_drv_data {
        unsigned long   *caps;
        int             (*init)(struct dw_mci *host);
        int             (*setup_clock)(struct dw_mci *host);
-       void            (*prepare_command)(struct dw_mci *host, u32 *cmdr);
        void            (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
        int             (*parse_dt)(struct dw_mci *host);
        int             (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode);
index 76e8bce6f46e7015bb6ae9e3dfe082d2cf58db35..03ddf0ecf4023c8ab09b9e516640626659eb1ae5 100644 (file)
@@ -660,8 +660,6 @@ static void jz4740_mmc_send_command(struct jz4740_mmc_host *host,
                cmdat |= JZ_MMC_CMDAT_DATA_EN;
                if (cmd->data->flags & MMC_DATA_WRITE)
                        cmdat |= JZ_MMC_CMDAT_WRITE;
-               if (cmd->data->flags & MMC_DATA_STREAM)
-                       cmdat |= JZ_MMC_CMDAT_STREAM;
                if (host->use_dma)
                        cmdat |= JZ_MMC_CMDAT_DMA_EN;
 
index 1c1b45ef3faf847d4087d61904ed18c1a6f7ce5e..3446097a43c01ca6cf7f8e163cc75dbbb3911f3f 100644 (file)
@@ -925,6 +925,10 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
 
                        dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
                                                PAGE_SIZE, dir);
+                       if (dma_mapping_error(dma_dev, dma_addr)) {
+                               data->error = -EFAULT;
+                               break;
+                       }
                        if (direction == DMA_TO_DEVICE)
                                t->tx_dma = dma_addr + sg->offset;
                        else
@@ -1393,10 +1397,12 @@ static int mmc_spi_probe(struct spi_device *spi)
                host->dma_dev = dev;
                host->ones_dma = dma_map_single(dev, ones,
                                MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
+               if (dma_mapping_error(dev, host->ones_dma))
+                       goto fail_ones_dma;
                host->data_dma = dma_map_single(dev, host->data,
                                sizeof(*host->data), DMA_BIDIRECTIONAL);
-
-               /* REVISIT in theory those map operations can fail... */
+               if (dma_mapping_error(dev, host->data_dma))
+                       goto fail_data_dma;
 
                dma_sync_single_for_cpu(host->dma_dev,
                                host->data_dma, sizeof(*host->data),
@@ -1462,6 +1468,11 @@ fail_glue_init:
        if (host->dma_dev)
                dma_unmap_single(host->dma_dev, host->data_dma,
                                sizeof(*host->data), DMA_BIDIRECTIONAL);
+fail_data_dma:
+       if (host->dma_dev)
+               dma_unmap_single(host->dma_dev, host->ones_dma,
+                               MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
+fail_ones_dma:
        kfree(host->data);
 
 fail_nobuf1:
index d110f9e98c4b45cc587378f740988acc2b3b3cfa..3d1ea5e0e54952821f730cc26e89a3aeeecb0c78 100644 (file)
@@ -307,9 +307,6 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
        enum dma_transfer_direction slave_dirn;
        int i, nents;
 
-       if (data->flags & MMC_DATA_STREAM)
-               nob = 0xffff;
-
        host->data = data;
        data->bytes_xfered = 0;
 
index b6639ea0bf18dbd37ccf0d639073119353b418b6..43097a76399cde95a813f7aa6ffb6a4dc275ca07 100644 (file)
@@ -503,8 +503,11 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
        host->pbias = devm_regulator_get_optional(host->dev, "pbias");
        if (IS_ERR(host->pbias)) {
                ret = PTR_ERR(host->pbias);
-               if ((ret != -ENODEV) && host->dev->of_node)
+               if ((ret != -ENODEV) && host->dev->of_node) {
+                       dev_err(host->dev,
+                       "SD card detect fail? enable CONFIG_REGULATOR_PBIAS\n");
                        return ret;
+               }
                dev_dbg(host->dev, "unable to get pbias regulator %ld\n",
                        PTR_ERR(host->pbias));
                host->pbias = NULL;
@@ -2159,7 +2162,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
                                                 &rx_req, &pdev->dev, "rx");
 
        if (!host->rx_chan) {
-               dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req);
+               dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel\n");
                ret = -ENXIO;
                goto err_irq;
        }
@@ -2169,7 +2172,7 @@ static int omap_hsmmc_probe(struct platform_device *pdev)
                                                 &tx_req, &pdev->dev, "tx");
 
        if (!host->tx_chan) {
-               dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req);
+               dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel\n");
                ret = -ENXIO;
                goto err_irq;
        }
index ce08896b9d696b00440fe7807aeddf72b922f320..86fac3e8683378acde59d1da87a0fad3878bf481 100644 (file)
@@ -86,7 +86,7 @@ struct pxamci_host {
 static inline void pxamci_init_ocr(struct pxamci_host *host)
 {
 #ifdef CONFIG_REGULATOR
-       host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc");
+       host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc");
 
        if (IS_ERR(host->vcc))
                host->vcc = NULL;
@@ -191,9 +191,6 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
 
        host->data = data;
 
-       if (data->flags & MMC_DATA_STREAM)
-               nob = 0xffff;
-
        writel(nob, host->base + MMC_NOB);
        writel(data->blksz, host->base + MMC_BLKLEN);
 
@@ -443,9 +440,6 @@ static void pxamci_request(struct mmc_host *mmc, struct mmc_request *mrq)
                cmdat |= CMDAT_DATAEN | CMDAT_DMAEN;
                if (mrq->data->flags & MMC_DATA_WRITE)
                        cmdat |= CMDAT_WRITE;
-
-               if (mrq->data->flags & MMC_DATA_STREAM)
-                       cmdat |= CMDAT_STREAM;
        }
 
        pxamci_start_cmd(host, mrq->cmd, cmdat);
@@ -654,12 +648,8 @@ static int pxamci_probe(struct platform_device *pdev)
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
-       if (!r || irq < 0)
-               return -ENXIO;
-
-       r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
-       if (!r)
-               return -EBUSY;
+       if (irq < 0)
+               return irq;
 
        mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
        if (!mmc) {
@@ -695,7 +685,7 @@ static int pxamci_probe(struct platform_device *pdev)
        host->pdata = pdev->dev.platform_data;
        host->clkrt = CLKRT_OFF;
 
-       host->clk = clk_get(&pdev->dev, NULL);
+       host->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(host->clk)) {
                ret = PTR_ERR(host->clk);
                host->clk = NULL;
@@ -727,9 +717,9 @@ static int pxamci_probe(struct platform_device *pdev)
        host->irq = irq;
        host->imask = MMC_I_MASK_ALL;
 
-       host->base = ioremap(r->start, SZ_4K);
-       if (!host->base) {
-               ret = -ENOMEM;
+       host->base = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(host->base)) {
+               ret = PTR_ERR(host->base);
                goto out;
        }
 
@@ -742,7 +732,8 @@ static int pxamci_probe(struct platform_device *pdev)
        writel(64, host->base + MMC_RESTO);
        writel(host->imask, host->base + MMC_I_MASK);
 
-       ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
+       ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0,
+                              DRIVER_NAME, host);
        if (ret)
                goto out;
 
@@ -804,7 +795,7 @@ static int pxamci_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
                goto out;
        } else {
-               mmc->caps |= host->pdata->gpio_card_ro_invert ?
+               mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
                        0 : MMC_CAP2_RO_ACTIVE_HIGH;
        }
 
@@ -833,14 +824,9 @@ out:
                        dma_release_channel(host->dma_chan_rx);
                if (host->dma_chan_tx)
                        dma_release_channel(host->dma_chan_tx);
-               if (host->base)
-                       iounmap(host->base);
-               if (host->clk)
-                       clk_put(host->clk);
        }
        if (mmc)
                mmc_free_host(mmc);
-       release_resource(r);
        return ret;
 }
 
@@ -859,9 +845,6 @@ static int pxamci_remove(struct platform_device *pdev)
                        gpio_ro = host->pdata->gpio_card_ro;
                        gpio_power = host->pdata->gpio_power;
                }
-               if (host->vcc)
-                       regulator_put(host->vcc);
-
                if (host->pdata && host->pdata->exit)
                        host->pdata->exit(&pdev->dev, mmc);
 
@@ -870,16 +853,10 @@ static int pxamci_remove(struct platform_device *pdev)
                       END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
                       host->base + MMC_I_MASK);
 
-               free_irq(host->irq, host);
                dmaengine_terminate_all(host->dma_chan_rx);
                dmaengine_terminate_all(host->dma_chan_tx);
                dma_release_channel(host->dma_chan_rx);
                dma_release_channel(host->dma_chan_tx);
-               iounmap(host->base);
-
-               clk_put(host->clk);
-
-               release_resource(host->res);
 
                mmc_free_host(mmc);
        }
index 6291d5042ef2a5004212b1b14aff0d5a2f4c8613..39814f3dc96f652c46fb4abec59a8dc25503de51 100644 (file)
@@ -1014,8 +1014,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
        if (host->bus_width == MMC_BUS_WIDTH_4)
                dcon |= S3C2410_SDIDCON_WIDEBUS;
 
-       if (!(data->flags & MMC_DATA_STREAM))
-               dcon |= S3C2410_SDIDCON_BLOCKMODE;
+       dcon |= S3C2410_SDIDCON_BLOCKMODE;
 
        if (data->flags & MMC_DATA_WRITE) {
                dcon |= S3C2410_SDIDCON_TXAFTERRESP;
index f6047fc9406204d6ee672b74544d0ec6866308cd..3d27f2de0054a51bf2e91e890df7ebaddc08dcd0 100644 (file)
@@ -388,6 +388,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
                pm_runtime_enable(dev);
        }
 
+       device_enable_async_suspend(dev);
+
        return 0;
 
 err_free:
index 3b423b0ad8e7a2ce0d3c9c5a85aee92b42a54143..1110f73b08aa6334b931373e402896a038df8c0a 100644 (file)
@@ -26,6 +26,7 @@ struct sdhci_iproc_data {
        const struct sdhci_pltfm_data *pdata;
        u32 caps;
        u32 caps1;
+       u32 mmc_caps;
 };
 
 struct sdhci_iproc_host {
@@ -165,9 +166,25 @@ static const struct sdhci_iproc_data iproc_data = {
        .pdata = &sdhci_iproc_pltfm_data,
        .caps = 0x05E90000,
        .caps1 = 0x00000064,
+       .mmc_caps = MMC_CAP_1_8V_DDR,
+};
+
+static const struct sdhci_pltfm_data sdhci_bcm2835_pltfm_data = {
+       .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+                 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+                 SDHCI_QUIRK_MISSING_CAPS,
+       .ops = &sdhci_iproc_ops,
+};
+
+static const struct sdhci_iproc_data bcm2835_data = {
+       .pdata = &sdhci_bcm2835_pltfm_data,
+       .caps = SDHCI_CAN_VDD_330,
+       .caps1 = 0x00000000,
+       .mmc_caps = 0x00000000,
 };
 
 static const struct of_device_id sdhci_iproc_of_match[] = {
+       { .compatible = "brcm,bcm2835-sdhci", .data = &bcm2835_data },
        { .compatible = "brcm,sdhci-iproc-cygnus", .data = &iproc_data },
        { }
 };
@@ -199,32 +216,37 @@ static int sdhci_iproc_probe(struct platform_device *pdev)
        mmc_of_parse(host->mmc);
        sdhci_get_of_property(pdev);
 
-       /* Enable EMMC 1/8V DDR capable */
-       host->mmc->caps |= MMC_CAP_1_8V_DDR;
+       host->mmc->caps |= iproc_host->data->mmc_caps;
 
        pltfm_host->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pltfm_host->clk)) {
                ret = PTR_ERR(pltfm_host->clk);
                goto err;
        }
+       ret = clk_prepare_enable(pltfm_host->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to enable host clk\n");
+               goto err;
+       }
 
        if (iproc_host->data->pdata->quirks & SDHCI_QUIRK_MISSING_CAPS) {
                host->caps = iproc_host->data->caps;
                host->caps1 = iproc_host->data->caps1;
        }
 
-       return sdhci_add_host(host);
+       ret = sdhci_add_host(host);
+       if (ret)
+               goto err_clk;
+
+       return 0;
 
+err_clk:
+       clk_disable_unprepare(pltfm_host->clk);
 err:
        sdhci_pltfm_free(pdev);
        return ret;
 }
 
-static int sdhci_iproc_remove(struct platform_device *pdev)
-{
-       return sdhci_pltfm_unregister(pdev);
-}
-
 static struct platform_driver sdhci_iproc_driver = {
        .driver = {
                .name = "sdhci-iproc",
@@ -232,7 +254,7 @@ static struct platform_driver sdhci_iproc_driver = {
                .pm = SDHCI_PLTFM_PMOPS,
        },
        .probe = sdhci_iproc_probe,
-       .remove = sdhci_iproc_remove,
+       .remove = sdhci_pltfm_unregister,
 };
 module_platform_driver(sdhci_iproc_driver);
 
index 75379cb0fb354e7aa7749852ced8fca5973d435e..5d9fdb353446f2792f391281dd6fb1f0a0c22f42 100644 (file)
@@ -172,11 +172,6 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
                goto clk_disable_all;
        }
 
-       if (of_device_is_compatible(pdev->dev.of_node, "arasan,sdhci-4.9a")) {
-               host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
-               host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23;
-       }
-
        sdhci_get_of_property(pdev);
        pltfm_host = sdhci_priv(host);
        pltfm_host->priv = sdhci_arasan;
index 7e7d8f0c9438fe4ac41bfb1f1759ff4fde776089..9cb86fb25976f380530fe0029015bdfaf408d743 100644 (file)
@@ -217,6 +217,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
 pm_runtime_disable:
        pm_runtime_disable(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
+       pm_runtime_put_noidle(&pdev->dev);
 clocks_disable_unprepare:
        clk_disable_unprepare(priv->gck);
        clk_disable_unprepare(priv->mainck);
diff --git a/drivers/mmc/host/sdhci-pic32.c b/drivers/mmc/host/sdhci-pic32.c
new file mode 100644 (file)
index 0000000..059df70
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * Support of SDHCI platform devices for Microchip PIC32.
+ *
+ * Copyright (C) 2015 Microchip
+ * Andrei Pistirica, Paul Thacker
+ *
+ * Inspired by sdhci-pltfm.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+#include <linux/mmc/host.h>
+#include <linux/io.h>
+#include "sdhci.h"
+#include "sdhci-pltfm.h"
+#include <linux/platform_data/sdhci-pic32.h>
+
+#define SDH_SHARED_BUS_CTRL            0x000000E0
+#define SDH_SHARED_BUS_NR_CLK_PINS_MASK        0x7
+#define SDH_SHARED_BUS_NR_IRQ_PINS_MASK        0x30
+#define SDH_SHARED_BUS_CLK_PINS                0x10
+#define SDH_SHARED_BUS_IRQ_PINS                0x14
+#define SDH_CAPS_SDH_SLOT_TYPE_MASK    0xC0000000
+#define SDH_SLOT_TYPE_REMOVABLE                0x0
+#define SDH_SLOT_TYPE_EMBEDDED         0x1
+#define SDH_SLOT_TYPE_SHARED_BUS       0x2
+#define SDHCI_CTRL_CDSSEL              0x80
+#define SDHCI_CTRL_CDTLVL              0x40
+
+#define ADMA_FIFO_RD_THSHLD    512
+#define ADMA_FIFO_WR_THSHLD    512
+
+struct pic32_sdhci_priv {
+       struct platform_device  *pdev;
+       struct clk *sys_clk;
+       struct clk *base_clk;
+};
+
+static unsigned int pic32_sdhci_get_max_clock(struct sdhci_host *host)
+{
+       struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host);
+
+       return clk_get_rate(sdhci_pdata->base_clk);
+}
+
+static void pic32_sdhci_set_bus_width(struct sdhci_host *host, int width)
+{
+       u8 ctrl;
+
+       ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+       if (width == MMC_BUS_WIDTH_8) {
+               ctrl &= ~SDHCI_CTRL_4BITBUS;
+               if (host->version >= SDHCI_SPEC_300)
+                       ctrl |= SDHCI_CTRL_8BITBUS;
+       } else {
+               if (host->version >= SDHCI_SPEC_300)
+                       ctrl &= ~SDHCI_CTRL_8BITBUS;
+               if (width == MMC_BUS_WIDTH_4)
+                       ctrl |= SDHCI_CTRL_4BITBUS;
+               else
+                       ctrl &= ~SDHCI_CTRL_4BITBUS;
+       }
+
+       /* CD select and test bits must be set for errata workaround. */
+       ctrl &= ~SDHCI_CTRL_CDTLVL;
+       ctrl |= SDHCI_CTRL_CDSSEL;
+       sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+}
+
+static unsigned int pic32_sdhci_get_ro(struct sdhci_host *host)
+{
+       /*
+        * The SDHCI_WRITE_PROTECT bit is unstable on current hardware so we
+        * can't depend on its value in any way.
+        */
+       return 0;
+}
+
+static const struct sdhci_ops pic32_sdhci_ops = {
+       .get_max_clock = pic32_sdhci_get_max_clock,
+       .set_clock = sdhci_set_clock,
+       .set_bus_width = pic32_sdhci_set_bus_width,
+       .reset = sdhci_reset,
+       .set_uhs_signaling = sdhci_set_uhs_signaling,
+       .get_ro = pic32_sdhci_get_ro,
+};
+
+static struct sdhci_pltfm_data sdhci_pic32_pdata = {
+       .ops = &pic32_sdhci_ops,
+       .quirks = SDHCI_QUIRK_NO_HISPD_BIT,
+       .quirks2 = SDHCI_QUIRK2_NO_1_8_V,
+};
+
+static void pic32_sdhci_shared_bus(struct platform_device *pdev)
+{
+       struct sdhci_host *host = platform_get_drvdata(pdev);
+       u32 bus = readl(host->ioaddr + SDH_SHARED_BUS_CTRL);
+       u32 clk_pins = (bus & SDH_SHARED_BUS_NR_CLK_PINS_MASK) >> 0;
+       u32 irq_pins = (bus & SDH_SHARED_BUS_NR_IRQ_PINS_MASK) >> 4;
+
+       /* select first clock */
+       if (clk_pins & 1)
+               bus |= (1 << SDH_SHARED_BUS_CLK_PINS);
+
+       /* select first interrupt */
+       if (irq_pins & 1)
+               bus |= (1 << SDH_SHARED_BUS_IRQ_PINS);
+
+       writel(bus, host->ioaddr + SDH_SHARED_BUS_CTRL);
+}
+
+static int pic32_sdhci_probe_platform(struct platform_device *pdev,
+                                     struct pic32_sdhci_priv *pdata)
+{
+       int ret = 0;
+       u32 caps_slot_type;
+       struct sdhci_host *host = platform_get_drvdata(pdev);
+
+       /* Check card slot connected on shared bus. */
+       host->caps = readl(host->ioaddr + SDHCI_CAPABILITIES);
+       caps_slot_type = (host->caps & SDH_CAPS_SDH_SLOT_TYPE_MASK) >> 30;
+       if (caps_slot_type == SDH_SLOT_TYPE_SHARED_BUS)
+               pic32_sdhci_shared_bus(pdev);
+
+       return ret;
+}
+
+static int pic32_sdhci_probe(struct platform_device *pdev)
+{
+       struct sdhci_host *host;
+       struct sdhci_pltfm_host *pltfm_host;
+       struct pic32_sdhci_priv *sdhci_pdata;
+       struct pic32_sdhci_platform_data *plat_data;
+       int ret;
+
+       host = sdhci_pltfm_init(pdev, &sdhci_pic32_pdata,
+                               sizeof(struct pic32_sdhci_priv));
+       if (IS_ERR(host)) {
+               ret = PTR_ERR(host);
+               goto err;
+       }
+
+       pltfm_host = sdhci_priv(host);
+       sdhci_pdata = sdhci_pltfm_priv(pltfm_host);
+
+       plat_data = pdev->dev.platform_data;
+       if (plat_data && plat_data->setup_dma) {
+               ret = plat_data->setup_dma(ADMA_FIFO_RD_THSHLD,
+                                          ADMA_FIFO_WR_THSHLD);
+               if (ret)
+                       goto err_host;
+       }
+
+       sdhci_pdata->sys_clk = devm_clk_get(&pdev->dev, "sys_clk");
+       if (IS_ERR(sdhci_pdata->sys_clk)) {
+               ret = PTR_ERR(sdhci_pdata->sys_clk);
+               dev_err(&pdev->dev, "Error getting clock\n");
+               goto err_host;
+       }
+
+       ret = clk_prepare_enable(sdhci_pdata->sys_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Error enabling clock\n");
+               goto err_host;
+       }
+
+       sdhci_pdata->base_clk = devm_clk_get(&pdev->dev, "base_clk");
+       if (IS_ERR(sdhci_pdata->base_clk)) {
+               ret = PTR_ERR(sdhci_pdata->base_clk);
+               dev_err(&pdev->dev, "Error getting clock\n");
+               goto err_sys_clk;
+       }
+
+       ret = clk_prepare_enable(sdhci_pdata->base_clk);
+       if (ret) {
+               dev_err(&pdev->dev, "Error enabling clock\n");
+               goto err_base_clk;
+       }
+
+       ret = mmc_of_parse(host->mmc);
+       if (ret)
+               goto err_base_clk;
+
+       ret = pic32_sdhci_probe_platform(pdev, sdhci_pdata);
+       if (ret) {
+               dev_err(&pdev->dev, "failed to probe platform!\n");
+               goto err_base_clk;
+       }
+
+       ret = sdhci_add_host(host);
+       if (ret) {
+               dev_err(&pdev->dev, "error adding host\n");
+               goto err_base_clk;
+       }
+
+       dev_info(&pdev->dev, "Successfully added sdhci host\n");
+       return 0;
+
+err_base_clk:
+       clk_disable_unprepare(sdhci_pdata->base_clk);
+err_sys_clk:
+       clk_disable_unprepare(sdhci_pdata->sys_clk);
+err_host:
+       sdhci_pltfm_free(pdev);
+err:
+       dev_err(&pdev->dev, "pic32-sdhci probe failed: %d\n", ret);
+       return ret;
+}
+
+static int pic32_sdhci_remove(struct platform_device *pdev)
+{
+       struct sdhci_host *host = platform_get_drvdata(pdev);
+       struct pic32_sdhci_priv *sdhci_pdata = sdhci_priv(host);
+       u32 scratch;
+
+       scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
+       sdhci_remove_host(host, scratch == (u32)~0);
+       clk_disable_unprepare(sdhci_pdata->base_clk);
+       clk_disable_unprepare(sdhci_pdata->sys_clk);
+       sdhci_pltfm_free(pdev);
+
+       return 0;
+}
+
+static const struct of_device_id pic32_sdhci_id_table[] = {
+       { .compatible = "microchip,pic32mzda-sdhci" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, pic32_sdhci_id_table);
+
+static struct platform_driver pic32_sdhci_driver = {
+       .driver = {
+               .name   = "pic32-sdhci",
+               .owner  = THIS_MODULE,
+               .of_match_table = of_match_ptr(pic32_sdhci_id_table),
+       },
+       .probe          = pic32_sdhci_probe,
+       .remove         = pic32_sdhci_remove,
+};
+
+module_platform_driver(pic32_sdhci_driver);
+
+MODULE_DESCRIPTION("Microchip PIC32 SDHCI driver");
+MODULE_AUTHOR("Pistirica Sorin Andrei & Sandeep Sheriker");
+MODULE_LICENSE("GPL v2");
index b7e305775314fa75ccc58dd67752420bda8494ce..5ff26ab81eb180241c5cccfb58c4073237954e89 100644 (file)
@@ -398,10 +398,10 @@ static struct mmc_host_ops sdricoh_ops = {
 static int sdricoh_init_mmc(struct pci_dev *pci_dev,
                            struct pcmcia_device *pcmcia_dev)
 {
-       int result = 0;
-       void __iomem *iobase = NULL;
-       struct mmc_host *mmc = NULL;
-       struct sdricoh_host *host = NULL;
+       int result;
+       void __iomem *iobase;
+       struct mmc_host *mmc;
+       struct sdricoh_host *host;
        struct device *dev = &pcmcia_dev->dev;
        /* map iomem */
        if (pci_resource_len(pci_dev, SDRICOH_PCI_REGION) !=
@@ -419,7 +419,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
        if (readl(iobase + R104_VERSION) != 0x4000) {
                dev_dbg(dev, "no supported mmc controller found\n");
                result = -ENODEV;
-               goto err;
+               goto unmap_io;
        }
        /* allocate privdata */
        mmc = pcmcia_dev->priv =
@@ -427,7 +427,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
        if (!mmc) {
                dev_err(dev, "mmc_alloc_host failed\n");
                result = -ENOMEM;
-               goto err;
+               goto unmap_io;
        }
        host = mmc_priv(mmc);
 
@@ -451,8 +451,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
        if (sdricoh_reset(host)) {
                dev_dbg(dev, "could not reset\n");
                result = -EIO;
-               goto err;
-
+               goto free_host;
        }
 
        result = mmc_add_host(mmc);
@@ -461,13 +460,10 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,
                dev_dbg(dev, "mmc host registered\n");
                return 0;
        }
-
-err:
-       if (iobase)
-               pci_iounmap(pci_dev, iobase);
-       if (mmc)
-               mmc_free_host(mmc);
-
+free_host:
+       mmc_free_host(mmc);
+unmap_io:
+       pci_iounmap(pci_dev, iobase);
        return result;
 }
 
index 1ca8a1359cbc1efd6a35cefb596c1c4534c77920..8d870ce9f9442ef9c672e3167f59660337c4cbc9 100644 (file)
@@ -445,7 +445,7 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
                                                        pdata->slave_id_rx);
        } else {
                host->chan_tx = dma_request_slave_channel(dev, "tx");
-               host->chan_tx = dma_request_slave_channel(dev, "rx");
+               host->chan_rx = dma_request_slave_channel(dev, "rx");
        }
        dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
                host->chan_rx);
@@ -1395,7 +1395,7 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
 
 static void sh_mmcif_timeout_work(struct work_struct *work)
 {
-       struct delayed_work *d = container_of(work, struct delayed_work, work);
+       struct delayed_work *d = to_delayed_work(work);
        struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
        struct mmc_request *mrq = host->mrq;
        struct device *dev = sh_mmcif_host_to_dev(host);
index 354f4f335ed57ab7abed082700264beb11d804b9..557e2b9dadeec74b09803d83fb83a69e7f107257 100644 (file)
@@ -59,7 +59,7 @@ static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = {
 
 static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = {
        .tmio_flags     = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE |
-                         TMIO_MMC_CLK_ACTUAL,
+                         TMIO_MMC_CLK_ACTUAL | TMIO_MMC_FAST_CLK_CHG,
        .capabilities   = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
        .dma_buswidth   = DMA_SLAVE_BUSWIDTH_4_BYTES,
        .dma_rx_offset  = 0x2000,
@@ -163,6 +163,7 @@ static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr)
        case CTL_SD_MEM_CARD_OPT:
        case CTL_TRANSACTION_CTL:
        case CTL_DMA_ENABLE:
+       case EXT_ACC:
                return sh_mobile_sdhi_wait_idle(host);
        }
 
@@ -213,10 +214,8 @@ static int sh_mobile_sdhi_probe(struct platform_device *pdev)
                return -EINVAL;
 
        priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL);
-       if (priv == NULL) {
-               dev_err(&pdev->dev, "kzalloc failed\n");
+       if (!priv)
                return -ENOMEM;
-       }
 
        mmc_data = &priv->mmc_data;
        dma_priv = &priv->dma_priv;
index 83de82bceafc91a6dd6326d393a4285a5b9061ad..8372a413848c19f8ed61e8c38e6a3b8ebb0a4ba7 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
 #include <linux/reset.h>
+#include <linux/regulator/consumer.h>
 
 #include <linux/of_address.h>
 #include <linux/of_gpio.h>
 #define SDXC_CLK_25M           1
 #define SDXC_CLK_50M           2
 #define SDXC_CLK_50M_DDR       3
+#define SDXC_CLK_50M_DDR_8BIT  4
 
 struct sunxi_mmc_clk_delay {
        u32 output;
@@ -256,6 +258,9 @@ struct sunxi_mmc_host {
        struct mmc_request *mrq;
        struct mmc_request *manual_stop_mrq;
        int             ferror;
+
+       /* vqmmc */
+       bool            vqmmc_enabled;
 };
 
 static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host)
@@ -284,16 +289,28 @@ static int sunxi_mmc_init_host(struct mmc_host *mmc)
        if (sunxi_mmc_reset_host(host))
                return -EIO;
 
+       /*
+        * Burst 8 transfers, RX trigger level: 7, TX trigger level: 8
+        *
+        * TODO: sun9i has a larger FIFO and supports higher trigger values
+        */
        mmc_writel(host, REG_FTRGL, 0x20070008);
+       /* Maximum timeout value */
        mmc_writel(host, REG_TMOUT, 0xffffffff);
+       /* Unmask SDIO interrupt if needed */
        mmc_writel(host, REG_IMASK, host->sdio_imask);
+       /* Clear all pending interrupts */
        mmc_writel(host, REG_RINTR, 0xffffffff);
+       /* Debug register? undocumented */
        mmc_writel(host, REG_DBGC, 0xdeb);
+       /* Enable CEATA support */
        mmc_writel(host, REG_FUNS, SDXC_CEATA_ON);
+       /* Set DMA descriptor list base address */
        mmc_writel(host, REG_DLBA, host->sg_dma);
 
        rval = mmc_readl(host, REG_GCTRL);
        rval |= SDXC_INTERRUPT_ENABLE_BIT;
+       /* Undocumented, but found in Allwinner code */
        rval &= ~SDXC_ACCESS_DONE_DIRECT;
        mmc_writel(host, REG_GCTRL, rval);
 
@@ -640,11 +657,17 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
                                  struct mmc_ios *ios)
 {
        u32 rate, oclk_dly, rval, sclk_dly;
+       u32 clock = ios->clock;
        int ret;
 
-       rate = clk_round_rate(host->clk_mmc, ios->clock);
+       /* 8 bit DDR requires a higher module clock */
+       if (ios->timing == MMC_TIMING_MMC_DDR52 &&
+           ios->bus_width == MMC_BUS_WIDTH_8)
+               clock <<= 1;
+
+       rate = clk_round_rate(host->clk_mmc, clock);
        dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n",
-               ios->clock, rate);
+               clock, rate);
 
        /* setting clock rate */
        ret = clk_set_rate(host->clk_mmc, rate);
@@ -661,6 +684,12 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
        /* clear internal divider */
        rval = mmc_readl(host, REG_CLKCR);
        rval &= ~0xff;
+       /* set internal divider for 8 bit eMMC DDR, so card clock is right */
+       if (ios->timing == MMC_TIMING_MMC_DDR52 &&
+           ios->bus_width == MMC_BUS_WIDTH_8) {
+               rval |= 1;
+               rate >>= 1;
+       }
        mmc_writel(host, REG_CLKCR, rval);
 
        /* determine delays */
@@ -670,13 +699,17 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
        } else if (rate <= 25000000) {
                oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
                sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
-       } else if (rate <= 50000000) {
-               if (ios->timing == MMC_TIMING_UHS_DDR50) {
-                       oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
-                       sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
-               } else {
+       } else if (rate <= 52000000) {
+               if (ios->timing != MMC_TIMING_UHS_DDR50 &&
+                   ios->timing != MMC_TIMING_MMC_DDR52) {
                        oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
                        sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
+               } else if (ios->bus_width == MMC_BUS_WIDTH_8) {
+                       oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].output;
+                       sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR_8BIT].sample;
+               } else {
+                       oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+                       sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
                }
        } else {
                return -EINVAL;
@@ -699,7 +732,20 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                break;
 
        case MMC_POWER_UP:
-               mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+               host->ferror = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc,
+                                                    ios->vdd);
+               if (host->ferror)
+                       return;
+
+               if (!IS_ERR(mmc->supply.vqmmc)) {
+                       host->ferror = regulator_enable(mmc->supply.vqmmc);
+                       if (host->ferror) {
+                               dev_err(mmc_dev(mmc),
+                                       "failed to enable vqmmc\n");
+                               return;
+                       }
+                       host->vqmmc_enabled = true;
+               }
 
                host->ferror = sunxi_mmc_init_host(mmc);
                if (host->ferror)
@@ -712,6 +758,9 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                dev_dbg(mmc_dev(mmc), "power off!\n");
                sunxi_mmc_reset_host(host);
                mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+               if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled)
+                       regulator_disable(mmc->supply.vqmmc);
+               host->vqmmc_enabled = false;
                break;
        }
 
@@ -730,7 +779,8 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
        /* set ddr mode */
        rval = mmc_readl(host, REG_GCTRL);
-       if (ios->timing == MMC_TIMING_UHS_DDR50)
+       if (ios->timing == MMC_TIMING_UHS_DDR50 ||
+           ios->timing == MMC_TIMING_MMC_DDR52)
                rval |= SDXC_DDR_MODE;
        else
                rval &= ~SDXC_DDR_MODE;
@@ -743,6 +793,19 @@ static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
        }
 }
 
+static int sunxi_mmc_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+       /* vqmmc regulator is available */
+       if (!IS_ERR(mmc->supply.vqmmc))
+               return mmc_regulator_set_vqmmc(mmc, ios);
+
+       /* no vqmmc regulator, assume fixed regulator at 3/3.3V */
+       if (mmc->ios.signal_voltage == MMC_SIGNAL_VOLTAGE_330)
+               return 0;
+
+       return -EINVAL;
+}
+
 static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
 {
        struct sunxi_mmc_host *host = mmc_priv(mmc);
@@ -815,11 +878,6 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
                if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) {
                        cmd_val |= SDXC_DATA_EXPIRE | SDXC_WAIT_PRE_OVER;
-                       if (cmd->data->flags & MMC_DATA_STREAM) {
-                               imask |= SDXC_AUTO_COMMAND_DONE;
-                               cmd_val |= SDXC_SEQUENCE_MODE |
-                                          SDXC_SEND_AUTO_STOP;
-                       }
 
                        if (cmd->data->stop) {
                                imask |= SDXC_AUTO_COMMAND_DONE;
@@ -894,6 +952,7 @@ static struct mmc_host_ops sunxi_mmc_ops = {
        .get_ro          = mmc_gpio_get_ro,
        .get_cd          = mmc_gpio_get_cd,
        .enable_sdio_irq = sunxi_mmc_enable_sdio_irq,
+       .start_signal_voltage_switch = sunxi_mmc_volt_switch,
        .hw_reset        = sunxi_mmc_hw_reset,
        .card_busy       = sunxi_mmc_card_busy,
 };
@@ -903,6 +962,8 @@ static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
        [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
        [SDXC_CLK_50M]          = { .output =  90, .sample = 120 },
        [SDXC_CLK_50M_DDR]      = { .output =  60, .sample = 120 },
+       /* Value from A83T "new timing mode". Works but might not be right. */
+       [SDXC_CLK_50M_DDR_8BIT] = { .output =  90, .sample = 180 },
 };
 
 static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
@@ -910,6 +971,7 @@ static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
        [SDXC_CLK_25M]          = { .output = 180, .sample =  75 },
        [SDXC_CLK_50M]          = { .output = 150, .sample = 120 },
        [SDXC_CLK_50M_DDR]      = { .output =  90, .sample = 120 },
+       [SDXC_CLK_50M_DDR_8BIT] = { .output =  90, .sample = 120 },
 };
 
 static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
@@ -1060,10 +1122,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
        mmc->max_segs           = PAGE_SIZE / sizeof(struct sunxi_idma_des);
        mmc->max_seg_size       = (1 << host->idma_des_size_bits);
        mmc->max_req_size       = mmc->max_seg_size * mmc->max_segs;
-       /* 400kHz ~ 50MHz */
+       /* 400kHz ~ 52MHz */
        mmc->f_min              =   400000;
-       mmc->f_max              = 50000000;
+       mmc->f_max              = 52000000;
        mmc->caps              |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
+                                 MMC_CAP_1_8V_DDR |
                                  MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
 
        ret = mmc_of_parse(mmc);
index 4a0d6b80eaa374e7749081c7f966ce85379ad3e2..675435873823126170602156ff99ca385e451d18 100644 (file)
@@ -94,10 +94,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
                        desc = NULL;
                        ret = cookie;
                }
-               dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
-                       __func__, host->sg_len, ret, cookie, host->mrq);
        }
-
 pio:
        if (!desc) {
                /* DMA failed, fall back to PIO */
@@ -115,9 +112,6 @@ pio:
                dev_warn(&host->pdev->dev,
                         "DMA failed: %d, falling back to PIO\n", ret);
        }
-
-       dev_dbg(&host->pdev->dev, "%s(): desc %p, sg[%d]\n", __func__,
-               desc, host->sg_len);
 }
 
 static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
@@ -174,10 +168,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
                        desc = NULL;
                        ret = cookie;
                }
-               dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
-                       __func__, host->sg_len, ret, cookie, host->mrq);
        }
-
 pio:
        if (!desc) {
                /* DMA failed, fall back to PIO */
@@ -195,8 +186,6 @@ pio:
                dev_warn(&host->pdev->dev,
                         "DMA failed: %d, falling back to PIO\n", ret);
        }
-
-       dev_dbg(&host->pdev->dev, "%s(): desc %p\n", __func__, desc);
 }
 
 void tmio_mmc_start_dma(struct tmio_mmc_host *host,
index a10fde40b6c3ddbed98e05a693336ea6b1bb1c65..03f6e74c190691c74e4845bdd94401710baaed5c 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * linux/drivers/mmc/host/tmio_mmc_pio.c
  *
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
  * Copyright (C) 2011 Guennadi Liakhovetski
  * Copyright (C) 2007 Ian Molton
  * Copyright (C) 2004 Ian Molton
@@ -159,42 +161,44 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host,
 
        if (new_clock) {
                for (clock = host->mmc->f_min, clk = 0x80000080;
-                       new_clock >= (clock<<1); clk >>= 1)
+                    new_clock >= (clock << 1);
+                    clk >>= 1)
                        clock <<= 1;
 
                /* 1/1 clock is option */
                if ((host->pdata->flags & TMIO_MMC_CLK_ACTUAL) &&
-                   ((clk >> 22) & 0x1))
+                  ((clk >> 22) & 0x1))
                        clk |= 0xff;
        }
 
        if (host->set_clk_div)
-               host->set_clk_div(host->pdev, (clk>>22) & 1);
+               host->set_clk_div(host->pdev, (clk >> 22) & 1);
 
-       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff);
-       msleep(10);
+       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
+                       sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
+       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK);
+       if (!(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG))
+               msleep(10);
 }
 
 static void tmio_mmc_clk_stop(struct tmio_mmc_host *host)
 {
-       /* implicit BUG_ON(!res) */
        if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
                sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000);
                msleep(10);
        }
 
-       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 &
+       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN &
                sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-       msleep(10);
+       msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 5 : 10);
 }
 
 static void tmio_mmc_clk_start(struct tmio_mmc_host *host)
 {
-       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 |
+       sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, CLK_CTL_SCLKEN |
                sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL));
-       msleep(10);
+       msleep(host->pdata->flags & TMIO_MMC_FAST_CLK_CHG ? 1 : 10);
 
-       /* implicit BUG_ON(!res) */
        if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) {
                sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100);
                msleep(10);
@@ -205,7 +209,6 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
 {
        /* FIXME - should we set stop clock reg here */
        sd_ctrl_write16(host, CTL_RESET_SD, 0x0000);
-       /* implicit BUG_ON(!res) */
        if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG)
                sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000);
        msleep(10);
index b47122d3e8d8c71b435cdc47a275f349bf102a22..b2752fe711f2956d52e26d8d30c751919fcbde3e 100644 (file)
@@ -1630,7 +1630,7 @@ static irqreturn_t usdhi6_cd(int irq, void *dev_id)
  */
 static void usdhi6_timeout_work(struct work_struct *work)
 {
-       struct delayed_work *d = container_of(work, struct delayed_work, work);
+       struct delayed_work *d = to_delayed_work(work);
        struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
        struct mmc_request *mrq = host->mrq;
        struct mmc_data *data = mrq ? mrq->data : NULL;
index 8282f47bcf5d374e03d343453d62ee0bc4851785..845dd27d9f417a07cb6a55000f44504e55b3451c 100644 (file)
@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
 {
        uint32_t buf;
        size_t bytes_read;
+       int err;
 
-       if (mtd_read(master, offset, sizeof(buf), &bytes_read,
-                    (uint8_t *)&buf) < 0) {
-               pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
-                       offset);
+       err  = mtd_read(master, offset, sizeof(buf), &bytes_read,
+                       (uint8_t *)&buf);
+       if (err && !mtd_is_bitflip(err)) {
+               pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+                       offset, err);
                goto out_default;
        }
 
@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
        int trx_part = -1;
        int last_trx_part = -1;
        int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+       int err;
 
        /*
         * Some really old flashes (like AT45DB*) had smaller erasesize-s, but
@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
        /* Parse block by block looking for magics */
        for (offset = 0; offset <= master->size - blocksize;
             offset += blocksize) {
-               /* Nothing more in higher memory */
-               if (offset >= 0x2000000)
+               /* Nothing more in higher memory on BCM47XX (MIPS) */
+               if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
                        break;
 
                if (curr_part >= BCM47XXPART_MAX_PARTS) {
@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                /* Read beginning of the block */
-               if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
-                            &bytes_read, (uint8_t *)buf) < 0) {
-                       pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
-                              offset);
+               err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+                              &bytes_read, (uint8_t *)buf);
+               if (err && !mtd_is_bitflip(err)) {
+                       pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+                              offset, err);
                        continue;
                }
 
@@ -254,10 +258,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                /* Read middle of the block */
-               if (mtd_read(master, offset + 0x8000, 0x4,
-                            &bytes_read, (uint8_t *)buf) < 0) {
-                       pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
-                              offset);
+               err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
+                              (uint8_t *)buf);
+               if (err && !mtd_is_bitflip(err)) {
+                       pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+                              offset, err);
                        continue;
                }
 
@@ -277,10 +282,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
                }
 
                offset = master->size - possible_nvram_sizes[i];
-               if (mtd_read(master, offset, 0x4, &bytes_read,
-                            (uint8_t *)buf) < 0) {
-                       pr_err("mtd_read error while reading at offset 0x%X!\n",
-                              offset);
+               err = mtd_read(master, offset, 0x4, &bytes_read,
+                              (uint8_t *)buf);
+               if (err && !mtd_is_bitflip(err)) {
+                       pr_err("mtd_read error while reading (offset 0x%X): %d\n",
+                              offset, err);
                        continue;
                }
 
index 44093699859321ce4164ed63d368ec8b72c5d49b..cec3188a170d6a504aad659c45cdd9613002c394 100644 (file)
@@ -24,6 +24,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/bcm963xx_tag.h>
 #include <linux/crc32.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/mtd/partitions.h>
 
 #include <asm/mach-bcm63xx/bcm63xx_nvram.h>
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
 #include <asm/mach-bcm63xx/board_bcm963xx.h>
 
-#define BCM63XX_EXTENDED_SIZE  0xBFC00000      /* Extended flash address */
-
 #define BCM63XX_CFE_BLOCK_SIZE SZ_64K          /* always at least 64KiB */
 
 #define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
@@ -123,8 +121,8 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
                pr_info("CFE boot tag found with version %s and board type %s\n",
                        tagversion, boardid);
 
-               kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
-               rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
+               kerneladdr = kerneladdr - BCM963XX_EXTENDED_SIZE;
+               rootfsaddr = rootfsaddr - BCM963XX_EXTENDED_SIZE;
                spareaddr = roundup(totallen, master->erasesize) + cfelen;
 
                if (rootfsaddr < kerneladdr) {
index 20f01b3ec23d7dc29b5b4d4326cd4fc0d766d6be..b253654140d065405bf258ca339c80895b88e77c 100644 (file)
@@ -74,6 +74,7 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR
 config MTD_NAND_GPIO
        tristate "GPIO assisted NAND Flash driver"
        depends on GPIOLIB || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          This enables a NAND flash driver where control signals are
          connected to GPIO pins, and commands and data are communicated
@@ -310,6 +311,7 @@ config MTD_NAND_CAFE
 config MTD_NAND_CS553X
        tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
        depends on X86_32
+       depends on !UML && HAS_IOMEM
        help
          The CS553x companion chips for the AMD Geode processor
          include NAND flash controllers with built-in hardware ECC
@@ -463,6 +465,7 @@ config MTD_NAND_MPC5121_NFC
 config MTD_NAND_VF610_NFC
        tristate "Support for Freescale NFC for VF610/MPC5125"
        depends on (SOC_VF610 || COMPILE_TEST)
+       depends on HAS_IOMEM
        help
          Enables support for NAND Flash Controller on some Freescale
          processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
index bddcf83d6859aecd689797c99d5745569f39b713..affe7a7e9ad7dcefa1e25366b5a21cf07454f3ad 100644 (file)
@@ -825,7 +825,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
                        *(buf + byte_pos) ^= (1 << bit_pos);
 
                        pos = sector_num * host->pmecc_sector_size + byte_pos;
-                       dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+                       dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
                                pos, bit_pos, err_byte, *(buf + byte_pos));
                } else {
                        /* Bit flip in OOB area */
@@ -835,7 +835,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
                        ecc[tmp] ^= (1 << bit_pos);
 
                        pos = tmp + nand_chip->ecc.layout->eccpos[0];
-                       dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+                       dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
                                pos, bit_pos, err_byte, ecc[tmp]);
                }
 
@@ -1486,8 +1486,6 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
                ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
 }
 
-static const struct of_device_id atmel_nand_dt_ids[];
-
 static int atmel_of_init_port(struct atmel_nand_host *host,
                              struct device_node *np)
 {
@@ -1498,7 +1496,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
        enum of_gpio_flags flags = 0;
 
        host->caps = (struct atmel_nand_caps *)
-               of_match_device(atmel_nand_dt_ids, host->dev)->data;
+               of_device_get_match_data(host->dev);
 
        if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
                if (val >= 32) {
@@ -1550,7 +1548,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
                if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
                                (val != 24)) {
                        dev_err(host->dev,
-                               "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+                               "Required ECC strength not supported: %u\n",
                                val);
                        return -EINVAL;
                }
@@ -1560,7 +1558,7 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
        if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
                if ((val != 512) && (val != 1024)) {
                        dev_err(host->dev,
-                               "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+                               "Required ECC sector size not supported: %u\n",
                                val);
                        return -EINVAL;
                }
index 235ddcb58f39932fbd41aa949170000b9dee8891..8122c699ccf20895e400b8ae9246b39db00b1f00 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Freescale GPMI NAND Flash Driver
  *
- * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -136,7 +136,7 @@ static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
  *
  * We may have available oob space in this case.
  */
-static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
 {
        struct bch_geometry *geo = &this->bch_geometry;
        struct nand_chip *chip = &this->nand;
@@ -145,7 +145,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
        unsigned int block_mark_bit_offset;
 
        if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
-               return false;
+               return -EINVAL;
 
        switch (chip->ecc_step_ds) {
        case SZ_512:
@@ -158,19 +158,19 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
                dev_err(this->dev,
                        "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
                        chip->ecc_strength_ds, chip->ecc_step_ds);
-               return false;
+               return -EINVAL;
        }
        geo->ecc_chunk_size = chip->ecc_step_ds;
        geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
        if (!gpmi_check_ecc(this))
-               return false;
+               return -EINVAL;
 
        /* Keep the C >= O */
        if (geo->ecc_chunk_size < mtd->oobsize) {
                dev_err(this->dev,
                        "unsupported nand chip. ecc size: %d, oob size : %d\n",
                        chip->ecc_step_ds, mtd->oobsize);
-               return false;
+               return -EINVAL;
        }
 
        /* The default value, see comment in the legacy_set_geometry(). */
@@ -242,7 +242,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
                                + ALIGN(geo->ecc_chunk_count, 4);
 
        if (!this->swap_block_mark)
-               return true;
+               return 0;
 
        /* For bit swap. */
        block_mark_bit_offset = mtd->writesize * 8 -
@@ -251,7 +251,7 @@ static bool set_geometry_by_ecc_info(struct gpmi_nand_data *this)
 
        geo->block_mark_byte_offset = block_mark_bit_offset / 8;
        geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
-       return true;
+       return 0;
 }
 
 static int legacy_set_geometry(struct gpmi_nand_data *this)
@@ -285,7 +285,8 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
        geo->ecc_strength = get_ecc_strength(this);
        if (!gpmi_check_ecc(this)) {
                dev_err(this->dev,
-                       "required ecc strength of the NAND chip: %d is not supported by the GPMI controller (%d)\n",
+                       "ecc strength: %d cannot be supported by the controller (%d)\n"
+                       "try to use minimum ecc strength that NAND chip required\n",
                        geo->ecc_strength,
                        this->devdata->bch_max_ecc_strength);
                return -EINVAL;
@@ -366,10 +367,11 @@ static int legacy_set_geometry(struct gpmi_nand_data *this)
 
 int common_nfc_set_geometry(struct gpmi_nand_data *this)
 {
-       if (of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc")
-               && set_geometry_by_ecc_info(this))
-               return 0;
-       return legacy_set_geometry(this);
+       if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
+                               || legacy_set_geometry(this))
+               return set_geometry_by_ecc_info(this);
+
+       return 0;
 }
 
 struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
@@ -2033,9 +2035,54 @@ static int gpmi_nand_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int gpmi_pm_suspend(struct device *dev)
+{
+       struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+       release_dma_channels(this);
+       return 0;
+}
+
+static int gpmi_pm_resume(struct device *dev)
+{
+       struct gpmi_nand_data *this = dev_get_drvdata(dev);
+       int ret;
+
+       ret = acquire_dma_channels(this);
+       if (ret < 0)
+               return ret;
+
+       /* re-init the GPMI registers */
+       this->flags &= ~GPMI_TIMING_INIT_OK;
+       ret = gpmi_init(this);
+       if (ret) {
+               dev_err(this->dev, "Error setting GPMI : %d\n", ret);
+               return ret;
+       }
+
+       /* re-init the BCH registers */
+       ret = bch_set_geometry(this);
+       if (ret) {
+               dev_err(this->dev, "Error setting BCH : %d\n", ret);
+               return ret;
+       }
+
+       /* re-init others */
+       gpmi_extra_init(this);
+
+       return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops gpmi_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
+};
+
 static struct platform_driver gpmi_nand_driver = {
        .driver = {
                .name = "gpmi-nand",
+               .pm = &gpmi_pm_ops,
                .of_match_table = gpmi_nand_id_table,
        },
        .probe   = gpmi_nand_probe,
index b19d2a9a5eb97cb36ab64ca4108c5ff9fceb5ca2..673ceb2a0b44b677231e030aa8a0f23a5d412e3a 100644 (file)
@@ -427,9 +427,6 @@ static int jz_nand_probe(struct platform_device *pdev)
        chip->ecc.strength      = 4;
        chip->ecc.options       = NAND_ECC_GENERIC_ERASED_CHECK;
 
-       if (pdata)
-               chip->ecc.layout = pdata->ecc_layout;
-
        chip->chip_delay = 50;
        chip->cmd_ctrl = jz_nand_cmd_ctrl;
        chip->select_chip = jz_nand_select_chip;
index 9bc435d72a861b2a75d650f943a7522dd7e317ac..d8c3e7afcc0bfa74c5d0a87e580ca53a6b28e916 100644 (file)
@@ -750,7 +750,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
        }
 
        nand_chip->ecc.mode = NAND_ECC_HW;
-       nand_chip->ecc.size = mtd->writesize;
+       nand_chip->ecc.size = 512;
        nand_chip->ecc.layout = &lpc32xx_nand_oob;
        host->mlcsubpages = mtd->writesize / 512;
 
index 6b93e899d4e95faf9372689bb290ec0d22b946e6..5d7843ffff6ac0c85c8a5cc9a11901eb603fdd1b 100644 (file)
@@ -626,7 +626,7 @@ static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
 
 static int mpc5121_nfc_probe(struct platform_device *op)
 {
-       struct device_node *rootnode, *dn = op->dev.of_node;
+       struct device_node *dn = op->dev.of_node;
        struct clk *clk;
        struct device *dev = &op->dev;
        struct mpc5121_nfc_prv *prv;
@@ -712,18 +712,15 @@ static int mpc5121_nfc_probe(struct platform_device *op)
        chip->ecc.mode = NAND_ECC_SOFT;
 
        /* Support external chip-select logic on ADS5121 board */
-       rootnode = of_find_node_by_path("/");
-       if (of_device_is_compatible(rootnode, "fsl,mpc5121ads")) {
+       if (of_machine_is_compatible("fsl,mpc5121ads")) {
                retval = ads5121_chipselect_init(mtd);
                if (retval) {
                        dev_err(dev, "Chipselect init error!\n");
-                       of_node_put(rootnode);
                        return retval;
                }
 
                chip->select_chip = ads5121_select_chip;
        }
-       of_node_put(rootnode);
 
        /* Enable NFC clock */
        clk = devm_clk_get(dev, "ipg");
index 4b6a7085b442deaf3a25216415f5a347ae232281..2fbb523df066840d0919ad9e918d02dbd19ac21c 100644 (file)
@@ -1373,5 +1373,3 @@ int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
 
        return ret;
 }
-
-EXPORT_SYMBOL(nand_scan_bbt);
index a8804a3da0760968db677406d30446c5ca3162ff..ccc05f5b2695f935aa374ce4f3bb7b50e07aa879 100644 (file)
@@ -50,8 +50,8 @@ struct nand_flash_dev nand_flash_ids[] = {
                  SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
        {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
                { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
-                 SZ_8K, SZ_8K, SZ_2M, 0, 6, 640, NAND_ECC_INFO(40, SZ_1K),
-                 4 },
+                 SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+                 NAND_ECC_INFO(40, SZ_1K), 4 },
 
        LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
        LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
index 220ddfcf29f52535cd077f99ab5821a572962412..dbc5b571c2bbcca6a570e5855d2907009abd9b0f 100644 (file)
@@ -113,7 +113,7 @@ static int nuc900_check_rb(struct nuc900_nand *nand)
 {
        unsigned int val;
        spin_lock(&nand->lock);
-       val = __raw_readl(REG_SMISR);
+       val = __raw_readl(nand->reg + REG_SMISR);
        val &= READYBUSY;
        spin_unlock(&nand->lock);
 
index a0e26dea1424bb3ef0053836512a0a6a3c085265..e4e50da30444fc2dc2a4c6d936680f74525dac53 100644 (file)
@@ -73,7 +73,6 @@ static int plat_nand_probe(struct platform_device *pdev)
        data->chip.bbt_options |= pdata->chip.bbt_options;
 
        data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
-       data->chip.ecc.layout = pdata->chip.ecclayout;
        data->chip.ecc.mode = NAND_ECC_SOFT;
 
        platform_set_drvdata(pdev, data);
index 86fc245dc71a2395227a6b7283fa47ff1cc2fae1..e42496adda8dbf2a4a149f5197d42c1345ea3ac4 100644 (file)
 #define READ_ID_BYTES          7
 
 /* macros for registers read/write */
-#define nand_writel(info, off, val)    \
-       writel_relaxed((val), (info)->mmio_base + (off))
-
-#define nand_readl(info, off)          \
-       readl_relaxed((info)->mmio_base + (off))
+#define nand_writel(info, off, val)                                    \
+       do {                                                            \
+               dev_vdbg(&info->pdev->dev,                              \
+                        "%s():%d nand_writel(0x%x, 0x%04x)\n",         \
+                        __func__, __LINE__, (val), (off));             \
+               writel_relaxed((val), (info)->mmio_base + (off));       \
+       } while (0)
+
+#define nand_readl(info, off)                                          \
+       ({                                                              \
+               unsigned int _v;                                        \
+               _v = readl_relaxed((info)->mmio_base + (off));          \
+               dev_vdbg(&info->pdev->dev,                              \
+                        "%s():%d nand_readl(0x%04x) = 0x%x\n",         \
+                        __func__, __LINE__, (off), _v);                \
+               _v;                                                     \
+       })
 
 /* error code and state */
 enum {
index 01ac74fa3b95244d5c2bf455b664f0834c2940ae..9c9397b54b2ca9a826e27fc7625108100ae2fb87 100644 (file)
@@ -861,9 +861,6 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
        chip->ecc.mode      = NAND_ECC_SOFT;
 #endif
 
-       if (set->ecc_layout != NULL)
-               chip->ecc.layout = set->ecc_layout;
-
        if (set->disable_ecc)
                chip->ecc.mode  = NAND_ECC_NONE;
 
index 51e10a35fe08c29d6a27ef94563b4a8036f7e41a..b5ea6b312df08b3467fcc85220a0f1e01cd263c2 100644 (file)
@@ -60,6 +60,7 @@
 #define NFC_REG_ECC_ERR_CNT(x) ((0x0040 + (x)) & ~0x3)
 #define NFC_REG_USER_DATA(x)   (0x0050 + ((x) * 4))
 #define NFC_REG_SPARE_AREA     0x00A0
+#define NFC_REG_PAT_ID         0x00A4
 #define NFC_RAM0_BASE          0x0400
 #define NFC_RAM1_BASE          0x0800
 
@@ -538,6 +539,174 @@ static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
        sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
 }
 
+/* These seed values have been extracted from Allwinner's BSP */
+static const u16 sunxi_nfc_randomizer_page_seeds[] = {
+       0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
+       0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
+       0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
+       0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
+       0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
+       0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
+       0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
+       0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
+       0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
+       0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
+       0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
+       0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
+       0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
+       0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
+       0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
+       0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
+};
+
+/*
+ * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
+ * have been generated using
+ * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
+ * the randomizer engine does internally before de/scrambling OOB data.
+ *
+ * Those tables are statically defined to avoid calculating randomizer state
+ * at runtime.
+ */
+static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
+       0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
+       0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
+       0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
+       0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
+       0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
+       0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
+       0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
+       0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
+       0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
+       0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
+       0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
+       0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
+       0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
+       0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
+       0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
+       0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
+};
+
+static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
+       0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
+       0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
+       0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
+       0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
+       0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
+       0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
+       0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
+       0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
+       0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
+       0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
+       0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
+       0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
+       0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
+       0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
+       0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
+       0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
+};
+
+static u16 sunxi_nfc_randomizer_step(u16 state, int count)
+{
+       state &= 0x7fff;
+
+       /*
+        * This loop is just a simple implementation of a Fibonacci LFSR using
+        * the x16 + x15 + 1 polynomial.
+        */
+       while (count--)
+               state = ((state >> 1) |
+                        (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
+
+       return state;
+}
+
+static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
+{
+       const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
+       int mod = mtd_div_by_ws(mtd->erasesize, mtd);
+
+       if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
+               mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
+
+       if (ecc) {
+               if (mtd->ecc_step_size == 512)
+                       seeds = sunxi_nfc_randomizer_ecc512_seeds;
+               else
+                       seeds = sunxi_nfc_randomizer_ecc1024_seeds;
+       }
+
+       return seeds[page % mod];
+}
+
+static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
+                                       int page, bool ecc)
+{
+       struct nand_chip *nand = mtd->priv;
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+       u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+       u16 state;
+
+       if (!(nand->options & NAND_NEED_SCRAMBLING))
+               return;
+
+       ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+       state = sunxi_nfc_randomizer_state(mtd, page, ecc);
+       ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
+       writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
+{
+       struct nand_chip *nand = mtd->priv;
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+       if (!(nand->options & NAND_NEED_SCRAMBLING))
+               return;
+
+       writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
+              nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
+{
+       struct nand_chip *nand = mtd->priv;
+       struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+       if (!(nand->options & NAND_NEED_SCRAMBLING))
+               return;
+
+       writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
+              nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
+{
+       u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
+
+       bbm[0] ^= state;
+       bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
+}
+
+static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
+                                          const uint8_t *buf, int len,
+                                          bool ecc, int page)
+{
+       sunxi_nfc_randomizer_config(mtd, page, ecc);
+       sunxi_nfc_randomizer_enable(mtd);
+       sunxi_nfc_write_buf(mtd, buf, len);
+       sunxi_nfc_randomizer_disable(mtd);
+}
+
+static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
+                                         int len, bool ecc, int page)
+{
+       sunxi_nfc_randomizer_config(mtd, page, ecc);
+       sunxi_nfc_randomizer_enable(mtd);
+       sunxi_nfc_read_buf(mtd, buf, len);
+       sunxi_nfc_randomizer_disable(mtd);
+}
+
 static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
@@ -574,18 +743,20 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
                                       u8 *data, int data_off,
                                       u8 *oob, int oob_off,
                                       int *cur_off,
-                                      unsigned int *max_bitflips)
+                                      unsigned int *max_bitflips,
+                                      bool bbm, int page)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
        struct nand_ecc_ctrl *ecc = &nand->ecc;
+       int raw_mode = 0;
        u32 status;
        int ret;
 
        if (*cur_off != data_off)
                nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
 
-       sunxi_nfc_read_buf(mtd, NULL, ecc->size);
+       sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
 
        if (data_off + ecc->size != oob_off)
                nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
@@ -594,25 +765,54 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
        if (ret)
                return ret;
 
+       sunxi_nfc_randomizer_enable(mtd);
        writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
               nfc->regs + NFC_REG_CMD);
 
        ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+       sunxi_nfc_randomizer_disable(mtd);
        if (ret)
                return ret;
 
+       *cur_off = oob_off + ecc->bytes + 4;
+
        status = readl(nfc->regs + NFC_REG_ECC_ST);
+       if (status & NFC_ECC_PAT_FOUND(0)) {
+               u8 pattern = 0xff;
+
+               if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1)))
+                       pattern = 0x0;
+
+               memset(data, pattern, ecc->size);
+               memset(oob, pattern, ecc->bytes + 4);
+
+               return 1;
+       }
+
        ret = NFC_ECC_ERR_CNT(0, readl(nfc->regs + NFC_REG_ECC_ERR_CNT(0)));
 
        memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
 
        nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
-       sunxi_nfc_read_buf(mtd, oob, ecc->bytes + 4);
+       sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4, true, page);
 
        if (status & NFC_ECC_ERR(0)) {
+               /*
+                * Re-read the data with the randomizer disabled to identify
+                * bitflips in erased pages.
+                */
+               if (nand->options & NAND_NEED_SCRAMBLING) {
+                       nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+                       nand->read_buf(mtd, data, ecc->size);
+                       nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+                       nand->read_buf(mtd, oob, ecc->bytes + 4);
+               }
+
                ret = nand_check_erased_ecc_chunk(data, ecc->size,
                                                  oob, ecc->bytes + 4,
                                                  NULL, 0, ecc->strength);
+               if (ret >= 0)
+                       raw_mode = 1;
        } else {
                /*
                 * The engine protects 4 bytes of OOB data per chunk.
@@ -620,6 +820,10 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
                 */
                sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(0)),
                                           oob);
+
+               /* De-randomize the Bad Block Marker. */
+               if (bbm && nand->options & NAND_NEED_SCRAMBLING)
+                       sunxi_nfc_randomize_bbm(mtd, page, oob);
        }
 
        if (ret < 0) {
@@ -629,13 +833,12 @@ static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
                *max_bitflips = max_t(unsigned int, *max_bitflips, ret);
        }
 
-       *cur_off = oob_off + ecc->bytes + 4;
-
-       return 0;
+       return raw_mode;
 }
 
 static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
-                                           u8 *oob, int *cur_off)
+                                           u8 *oob, int *cur_off,
+                                           bool randomize, int page)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -649,7 +852,11 @@ static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
                nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
                              offset + mtd->writesize, -1);
 
-       sunxi_nfc_read_buf(mtd, oob + offset, len);
+       if (!randomize)
+               sunxi_nfc_read_buf(mtd, oob + offset, len);
+       else
+               sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
+                                             false, page);
 
        *cur_off = mtd->oobsize + mtd->writesize;
 }
@@ -662,7 +869,8 @@ static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
 static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
                                        const u8 *data, int data_off,
                                        const u8 *oob, int oob_off,
-                                       int *cur_off)
+                                       int *cur_off, bool bbm,
+                                       int page)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
@@ -672,11 +880,20 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
        if (data_off != *cur_off)
                nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
 
-       sunxi_nfc_write_buf(mtd, data, ecc->size);
+       sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
 
        /* Fill OOB data in */
-       writel(sunxi_nfc_buf_to_user_data(oob),
-              nfc->regs + NFC_REG_USER_DATA(0));
+       if ((nand->options & NAND_NEED_SCRAMBLING) && bbm) {
+               u8 user_data[4];
+
+               memcpy(user_data, oob, 4);
+               sunxi_nfc_randomize_bbm(mtd, page, user_data);
+               writel(sunxi_nfc_buf_to_user_data(user_data),
+                      nfc->regs + NFC_REG_USER_DATA(0));
+       } else {
+               writel(sunxi_nfc_buf_to_user_data(oob),
+                      nfc->regs + NFC_REG_USER_DATA(0));
+       }
 
        if (data_off + ecc->size != oob_off)
                nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
@@ -685,11 +902,13 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
        if (ret)
                return ret;
 
+       sunxi_nfc_randomizer_enable(mtd);
        writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
               NFC_ACCESS_DIR | NFC_ECC_OP,
               nfc->regs + NFC_REG_CMD);
 
        ret = sunxi_nfc_wait_int(nfc, NFC_CMD_INT_FLAG, 0);
+       sunxi_nfc_randomizer_disable(mtd);
        if (ret)
                return ret;
 
@@ -699,7 +918,8 @@ static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
 }
 
 static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
-                                            u8 *oob, int *cur_off)
+                                            u8 *oob, int *cur_off,
+                                            int page)
 {
        struct nand_chip *nand = mtd_to_nand(mtd);
        struct nand_ecc_ctrl *ecc = &nand->ecc;
@@ -713,7 +933,7 @@ static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
                nand->cmdfunc(mtd, NAND_CMD_RNDIN,
                              offset + mtd->writesize, -1);
 
-       sunxi_nfc_write_buf(mtd, oob + offset, len);
+       sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
 
        *cur_off = mtd->oobsize + mtd->writesize;
 }
@@ -725,6 +945,7 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
        struct nand_ecc_ctrl *ecc = &chip->ecc;
        unsigned int max_bitflips = 0;
        int ret, i, cur_off = 0;
+       bool raw_mode = false;
 
        sunxi_nfc_hw_ecc_enable(mtd);
 
@@ -736,13 +957,17 @@ static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
 
                ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
                                                  oob_off + mtd->writesize,
-                                                 &cur_off, &max_bitflips);
-               if (ret)
+                                                 &cur_off, &max_bitflips,
+                                                 !i, page);
+               if (ret < 0)
                        return ret;
+               else if (ret)
+                       raw_mode = true;
        }
 
        if (oob_required)
-               sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off);
+               sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+                                               !raw_mode, page);
 
        sunxi_nfc_hw_ecc_disable(mtd);
 
@@ -767,13 +992,14 @@ static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
 
                ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
                                                   oob_off + mtd->writesize,
-                                                  &cur_off);
+                                                  &cur_off, !i, page);
                if (ret)
                        return ret;
        }
 
-       if (oob_required)
-               sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off);
+       if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+               sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+                                                &cur_off, page);
 
        sunxi_nfc_hw_ecc_disable(mtd);
 
@@ -788,6 +1014,7 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
        struct nand_ecc_ctrl *ecc = &chip->ecc;
        unsigned int max_bitflips = 0;
        int ret, i, cur_off = 0;
+       bool raw_mode = false;
 
        sunxi_nfc_hw_ecc_enable(mtd);
 
@@ -799,13 +1026,16 @@ static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
 
                ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
                                                  oob_off, &cur_off,
-                                                 &max_bitflips);
-               if (ret)
+                                                 &max_bitflips, !i, page);
+               if (ret < 0)
                        return ret;
+               else if (ret)
+                       raw_mode = true;
        }
 
        if (oob_required)
-               sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off);
+               sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+                                               !raw_mode, page);
 
        sunxi_nfc_hw_ecc_disable(mtd);
 
@@ -829,13 +1059,15 @@ static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
                const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
 
                ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
-                                                  oob, oob_off, &cur_off);
+                                                  oob, oob_off, &cur_off,
+                                                  false, page);
                if (ret)
                        return ret;
        }
 
-       if (oob_required)
-               sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi, &cur_off);
+       if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+               sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+                                                &cur_off, page);
 
        sunxi_nfc_hw_ecc_disable(mtd);
 
@@ -1345,6 +1577,9 @@ static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
        if (nand->bbt_options & NAND_BBT_USE_FLASH)
                nand->bbt_options |= NAND_BBT_NO_OOB;
 
+       if (nand->options & NAND_NEED_SCRAMBLING)
+               nand->options |= NAND_NO_SUBPAGE_WRITE;
+
        ret = sunxi_nand_chip_init_timings(chip, np);
        if (ret) {
                dev_err(dev, "could not configure chip timings: %d\n", ret);
index 034420f313d500634e4ad66e46129535fac619d7..293feb19b0b149c7add9d4e2bf7dc0f2a8313275 100644 (file)
@@ -795,8 +795,6 @@ static int vf610_nfc_probe(struct platform_device *pdev)
                        goto error;
                }
 
-               /* propagate ecc.layout to mtd_info */
-               mtd->ecclayout = chip->ecc.layout;
                chip->ecc.read_page = vf610_nfc_read_page;
                chip->ecc.write_page = vf610_nfc_write_page;
 
index 08d0085f3e939fb277cc9c21b3b3004eab662206..680188a881301ee195af0320694324da9ca2caeb 100644 (file)
@@ -179,7 +179,7 @@ static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
  * by the onenand_release function.
  *
  */
-int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+static int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
 {
        struct onenand_chip *this = mtd->priv;
        struct bbm_info *bbm = this->bbm;
@@ -247,6 +247,3 @@ int onenand_default_bbt(struct mtd_info *mtd)
 
        return onenand_scan_bbt(mtd, bbm->badblock_pattern);
 }
-
-EXPORT_SYMBOL(onenand_scan_bbt);
-EXPORT_SYMBOL(onenand_default_bbt);
index 0dc927540b3d1627fbe77fce5e3bff09414fe4fb..83befab4b5b4a13121b2dc1b4c816baaca6e238a 100644 (file)
@@ -9,6 +9,7 @@ if MTD_SPI_NOR
 
 config MTD_MT81xx_NOR
        tristate "Mediatek MT81xx SPI NOR flash controller"
+       depends on HAS_IOMEM
        help
          This enables access to SPI NOR flash, using MT81xx SPI NOR flash
          controller. This controller does not support generic SPI BUS, it only
index d5f850d035bb93fb3a7c6547901ddfe84c3c6f04..8bed1a4cb79ce585d88623dc7d6784efaf58ccb7 100644 (file)
@@ -371,8 +371,8 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf,
        return ret;
 }
 
-static int __init mtk_nor_init(struct mt8173_nor *mt8173_nor,
-                              struct device_node *flash_node)
+static int mtk_nor_init(struct mt8173_nor *mt8173_nor,
+                       struct device_node *flash_node)
 {
        int ret;
        struct spi_nor *nor;
index 4cbb8b27a891237a2541c6fcf506ae2e0db08686..ee94056dbb2ea6e239905ecb2e46c9b8d737565e 100644 (file)
@@ -357,6 +357,14 @@ static u8 __get_duplex(struct port *port)
        return retval;
 }
 
+static void __ad_actor_update_port(struct port *port)
+{
+       const struct bonding *bond = bond_get_bond_by_slave(port->slave);
+
+       port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
+       port->actor_system_priority = BOND_AD_INFO(bond).system.sys_priority;
+}
+
 /* Conversions */
 
 /**
@@ -1963,9 +1971,7 @@ void bond_3ad_bind_slave(struct slave *slave)
                port->actor_admin_port_key = bond->params.ad_user_port_key << 6;
                ad_update_actor_keys(port, false);
                /* actor system is the bond's system */
-               port->actor_system = BOND_AD_INFO(bond).system.sys_mac_addr;
-               port->actor_system_priority =
-                   BOND_AD_INFO(bond).system.sys_priority;
+               __ad_actor_update_port(port);
                /* tx timer(to verify that no more than MAX_TX_IN_SECOND
                 * lacpdu's are sent in one second)
                 */
@@ -2147,6 +2153,34 @@ out:
        spin_unlock_bh(&bond->mode_lock);
 }
 
+/**
+ * bond_3ad_update_ad_actor_settings - reflect change of actor settings to ports
+ * @bond: bonding struct to work on
+ *
+ * If an ad_actor setting gets changed we need to update the individual port
+ * settings so the bond device will use the new values when it gets upped.
+ */
+void bond_3ad_update_ad_actor_settings(struct bonding *bond)
+{
+       struct list_head *iter;
+       struct slave *slave;
+
+       ASSERT_RTNL();
+
+       BOND_AD_INFO(bond).system.sys_priority = bond->params.ad_actor_sys_prio;
+       if (is_zero_ether_addr(bond->params.ad_actor_system))
+               BOND_AD_INFO(bond).system.sys_mac_addr =
+                   *((struct mac_addr *)bond->dev->dev_addr);
+       else
+               BOND_AD_INFO(bond).system.sys_mac_addr =
+                   *((struct mac_addr *)bond->params.ad_actor_system);
+
+       spin_lock_bh(&bond->mode_lock);
+       bond_for_each_slave(bond, slave, iter)
+               __ad_actor_update_port(&(SLAVE_AD_INFO(slave)->port));
+       spin_unlock_bh(&bond->mode_lock);
+}
+
 /**
  * bond_3ad_state_machine_handler - handle state machines timeout
  * @bond: bonding struct to work on
index 56b560558884dc6d87a0081d7d99c96ac4e99a67..705cb0198faa7c065b4724f6019f0f117e99f0d5 100644 (file)
@@ -618,8 +618,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
 static void bond_set_dev_addr(struct net_device *bond_dev,
                              struct net_device *slave_dev)
 {
-       netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
-                  bond_dev, slave_dev, slave_dev->addr_len);
+       netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n",
+                  bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len);
        memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
        bond_dev->addr_assign_type = NET_ADDR_STOLEN;
        call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
@@ -928,11 +928,10 @@ void bond_select_active_slave(struct bonding *bond)
                if (!rv)
                        return;
 
-               if (netif_carrier_ok(bond->dev)) {
+               if (netif_carrier_ok(bond->dev))
                        netdev_info(bond->dev, "first active interface up!\n");
-               } else {
+               else
                        netdev_info(bond->dev, "now running without any active interface!\n");
-               }
        }
 }
 
@@ -1178,9 +1177,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
                }
        }
 
-       if (bond_should_deliver_exact_match(skb, slave, bond)) {
+       if (bond_should_deliver_exact_match(skb, slave, bond))
                return RX_HANDLER_EXACT;
-       }
 
        skb->dev = bond->dev;
 
@@ -1241,7 +1239,7 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
 {
        struct slave *slave = NULL;
 
-       slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
+       slave = kzalloc(sizeof(*slave), GFP_KERNEL);
        if (!slave)
                return NULL;
 
@@ -3309,6 +3307,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
                stats->rx_bytes += sstats->rx_bytes - pstats->rx_bytes;
                stats->rx_errors += sstats->rx_errors - pstats->rx_errors;
                stats->rx_dropped += sstats->rx_dropped - pstats->rx_dropped;
+               stats->rx_nohandler += sstats->rx_nohandler - pstats->rx_nohandler;
 
                stats->tx_packets += sstats->tx_packets - pstats->tx_packets;;
                stats->tx_bytes += sstats->tx_bytes - pstats->tx_bytes;
index 55e93b6b6d2150f2687f36bdeebe5db8c4ab2b01..ed0bdae64f5e436e082249b2f38fb51ab694e3a1 100644 (file)
@@ -1392,6 +1392,8 @@ static int bond_option_ad_actor_sys_prio_set(struct bonding *bond,
                    newval->value);
 
        bond->params.ad_actor_sys_prio = newval->value;
+       bond_3ad_update_ad_actor_settings(bond);
+
        return 0;
 }
 
@@ -1418,6 +1420,8 @@ static int bond_option_ad_actor_system_set(struct bonding *bond,
 
        netdev_info(bond->dev, "Setting ad_actor_system to %pM\n", mac);
        ether_addr_copy(bond->params.ad_actor_system, mac);
+       bond_3ad_update_ad_actor_settings(bond);
+
        return 0;
 
 err:
index 9fe33fc3c2b9a54ddbd0db4e8a03d750553b6d7c..cf34681af4f625d07bc43c77d528d632325bdc0d 100644 (file)
@@ -1532,7 +1532,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
 
        /* no PVID with ranges, otherwise it's a bug */
        if (pvid)
-               err = _mv88e6xxx_port_pvid_set(ds, port, vid);
+               err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
 unlock:
        mutex_unlock(&ps->smi_mutex);
 
@@ -2163,7 +2163,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
         * database, and allow every port to egress frames on all other ports.
         */
        reg = BIT(ps->num_ports) - 1; /* all ports */
-       ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg & ~port);
+       reg &= ~BIT(port); /* except itself */
+       ret = _mv88e6xxx_port_vlan_map_set(ds, port, reg);
        if (ret)
                goto abort;
 
index a4799c1fc7d4de55c38a0e1a99c1aa42985d5143..5eb9b20c0eeab09954a909290ac5bd9bf3fdcb06 100644 (file)
@@ -628,6 +628,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
        int ret;
 
        ring = pdata->rx_ring;
+       irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
        ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
                               IRQF_SHARED, ring->irq_name, ring);
        if (ret)
@@ -635,6 +636,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
 
        if (pdata->cq_cnt) {
                ring = pdata->tx_ring->cp_ring;
+               irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
                ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
                                       IRQF_SHARED, ring->irq_name, ring);
                if (ret) {
@@ -649,15 +651,19 @@ static int xgene_enet_register_irq(struct net_device *ndev)
 static void xgene_enet_free_irq(struct net_device *ndev)
 {
        struct xgene_enet_pdata *pdata;
+       struct xgene_enet_desc_ring *ring;
        struct device *dev;
 
        pdata = netdev_priv(ndev);
        dev = ndev_to_dev(ndev);
-       devm_free_irq(dev, pdata->rx_ring->irq, pdata->rx_ring);
+       ring = pdata->rx_ring;
+       irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+       devm_free_irq(dev, ring->irq, ring);
 
        if (pdata->cq_cnt) {
-               devm_free_irq(dev, pdata->tx_ring->cp_ring->irq,
-                             pdata->tx_ring->cp_ring);
+               ring = pdata->tx_ring->cp_ring;
+               irq_clear_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
+               devm_free_irq(dev, ring->irq, ring);
        }
 }
 
index 70d5b62c125a489ceb5fafaf21b96988cecaab5c..248dfc40a7611a0b298336c0aa552665da165ba7 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/acpi.h>
 #include <linux/clk.h>
 #include <linux/efi.h>
+#include <linux/irq.h>
 #include <linux/io.h>
 #include <linux/of_platform.h>
 #include <linux/of_net.h>
index ecc4a334c50727c7a93b07576b540fc4b5b53d58..f71ab2647a3bc1f62f6b7450b34dd325c710549d 100644 (file)
@@ -302,7 +302,7 @@ static int nb8800_poll(struct napi_struct *napi, int budget)
        nb8800_tx_done(dev);
 
 again:
-       while (work < budget) {
+       do {
                struct nb8800_rx_buf *rxb;
                unsigned int len;
 
@@ -330,7 +330,7 @@ again:
                rxd->report = 0;
                last = next;
                work++;
-       }
+       } while (work < budget);
 
        if (work) {
                priv->rx_descs[last].desc.config |= DESC_EOC;
index 8550df189ceb709290c8b1caad4a3132dcb6a810..19f7cd02e08527ec234b1e8de426bad1fd37cadb 100644 (file)
@@ -151,8 +151,11 @@ config BNX2X_VXLAN
 
 config BGMAC
        tristate "BCMA bus GBit core support"
-       depends on BCMA_HOST_SOC && HAS_DMA && (BCM47XX || ARCH_BCM_5301X)
+       depends on BCMA && BCMA_HOST_SOC
+       depends on HAS_DMA
+       depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
        select PHYLIB
+       select FIXED_PHY
        ---help---
          This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
          They can be found on BCM47xx SoCs and provide gigabit ethernet.
index 06f6cffdfaf54a6dd56209f2ae9ac38c6f508fc4..230f8e6209e57f22e1c5fc4c3cf3a6ade4aca95f 100644 (file)
@@ -26,6 +26,17 @@ static const struct bcma_device_id bgmac_bcma_tbl[] = {
 };
 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
 
+static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac)
+{
+       switch (bgmac->core->bus->chipinfo.id) {
+       case BCMA_CHIP_ID_BCM4707:
+       case BCMA_CHIP_ID_BCM53018:
+               return true;
+       default:
+               return false;
+       }
+}
+
 static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
                             u32 value, int timeout)
 {
@@ -987,11 +998,9 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
 static void bgmac_miiconfig(struct bgmac *bgmac)
 {
        struct bcma_device *core = bgmac->core;
-       struct bcma_chipinfo *ci = &core->bus->chipinfo;
        u8 imode;
 
-       if (ci->id == BCMA_CHIP_ID_BCM4707 ||
-           ci->id == BCMA_CHIP_ID_BCM53018) {
+       if (bgmac_is_bcm4707_family(bgmac)) {
                bcma_awrite32(core, BCMA_IOCTL,
                              bcma_aread32(core, BCMA_IOCTL) | 0x40 |
                              BGMAC_BCMA_IOCTL_SW_CLKEN);
@@ -1055,9 +1064,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
        }
 
        /* Request Misc PLL for corerev > 2 */
-       if (core->id.rev > 2 &&
-           ci->id != BCMA_CHIP_ID_BCM4707 &&
-           ci->id != BCMA_CHIP_ID_BCM53018) {
+       if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) {
                bgmac_set(bgmac, BCMA_CLKCTLST,
                          BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
                bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
@@ -1193,8 +1200,7 @@ static void bgmac_enable(struct bgmac *bgmac)
                break;
        }
 
-       if (ci->id != BCMA_CHIP_ID_BCM4707 &&
-           ci->id != BCMA_CHIP_ID_BCM53018) {
+       if (!bgmac_is_bcm4707_family(bgmac)) {
                rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
                rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
                bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
@@ -1472,14 +1478,12 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac)
 
 static int bgmac_mii_register(struct bgmac *bgmac)
 {
-       struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
        struct mii_bus *mii_bus;
        struct phy_device *phy_dev;
        char bus_id[MII_BUS_ID_SIZE + 3];
        int err = 0;
 
-       if (ci->id == BCMA_CHIP_ID_BCM4707 ||
-           ci->id == BCMA_CHIP_ID_BCM53018)
+       if (bgmac_is_bcm4707_family(bgmac))
                return bgmac_fixed_phy_register(bgmac);
 
        mii_bus = mdiobus_alloc();
@@ -1539,7 +1543,6 @@ static void bgmac_mii_unregister(struct bgmac *bgmac)
 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
 static int bgmac_probe(struct bcma_device *core)
 {
-       struct bcma_chipinfo *ci = &core->bus->chipinfo;
        struct net_device *net_dev;
        struct bgmac *bgmac;
        struct ssb_sprom *sprom = &core->bus->sprom;
@@ -1620,8 +1623,7 @@ static int bgmac_probe(struct bcma_device *core)
        bgmac_chip_reset(bgmac);
 
        /* For Northstar, we have to take all GMAC core out of reset */
-       if (ci->id == BCMA_CHIP_ID_BCM4707 ||
-           ci->id == BCMA_CHIP_ID_BCM53018) {
+       if (bgmac_is_bcm4707_family(bgmac)) {
                struct bcma_device *ns_core;
                int ns_gmac;
 
index df835f5e46d85f374cb1e0d9adf1fd8e16cb40f3..5dc89e527e7deefe04c831d9ec556ed76ac40d26 100644 (file)
@@ -1490,10 +1490,11 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
 
                        last = tx_buf->nr_frags;
                        j += 2;
-                       for (k = 0; k < last; k++, j = NEXT_TX(j)) {
+                       for (k = 0; k < last; k++, j++) {
+                               int ring_idx = j & bp->tx_ring_mask;
                                skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
 
-                               tx_buf = &txr->tx_buf_ring[j];
+                               tx_buf = &txr->tx_buf_ring[ring_idx];
                                dma_unmap_page(
                                        &pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
@@ -3406,7 +3407,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp,
        struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
        u16 error_code;
 
-       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
+       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
        req.ring_type = ring_type;
        req.ring_id = cpu_to_le16(ring->fw_ring_id);
 
@@ -4819,8 +4820,6 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 
                stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
 
-               stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
-
                stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
        }
 
index 0d775964b06016bfcc66352c99c0164c6957d773..457c3bc8cfff49654b45e879495e5cbfa88428d6 100644 (file)
@@ -401,7 +401,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
         * Ethernet MAC ISRs
         */
        if (priv->internal_phy)
-               priv->mii_bus->irq[phydev->mdio.addr] = PHY_IGNORE_INTERRUPT;
+               priv->phydev->irq = PHY_IGNORE_INTERRUPT;
 
        return 0;
 }
index 9293675df7ba99c44b2f77c739fbe12da0e86093..3010080cfeee350a2e523ce0c26aeb427953f21a 100644 (file)
@@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
        return ret;
 }
 
+static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
+{
+       /* Check if we will never have enough descriptors,
+        * as gso_segs can be more than current ring size
+        */
+       return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
+}
+
 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
 
 /* Use GSO to workaround all TSO packets that meet HW bug conditions
@@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 * vlan encapsulated.
                 */
                if (skb->protocol == htons(ETH_P_8021Q) ||
-                   skb->protocol == htons(ETH_P_8021AD))
-                       return tg3_tso_bug(tp, tnapi, txq, skb);
+                   skb->protocol == htons(ETH_P_8021AD)) {
+                       if (tg3_tso_bug_gso_check(tnapi, skb))
+                               return tg3_tso_bug(tp, tnapi, txq, skb);
+                       goto drop;
+               }
 
                if (!skb_is_gso_v6(skb)) {
                        if (unlikely((ETH_HLEN + hdr_len) > 80) &&
-                           tg3_flag(tp, TSO_BUG))
-                               return tg3_tso_bug(tp, tnapi, txq, skb);
-
+                           tg3_flag(tp, TSO_BUG)) {
+                               if (tg3_tso_bug_gso_check(tnapi, skb))
+                                       return tg3_tso_bug(tp, tnapi, txq, skb);
+                               goto drop;
+                       }
                        ip_csum = iph->check;
                        ip_tot_len = iph->tot_len;
                        iph->check = 0;
@@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (would_hit_hwbug) {
                tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
 
-               if (mss) {
+               if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
                        /* If it's a TSO packet, do GSO instead of
                         * allocating and copying to a large linear SKB
                         */
@@ -12016,7 +12029,7 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
        int ret;
        u32 offset, len, b_offset, odd_len;
        u8 *buf;
-       __be32 start, end;
+       __be32 start = 0, end;
 
        if (tg3_flag(tp, NO_NVRAM) ||
            eeprom->magic != TG3_EEPROM_MAGIC)
index 9d9984a87d4227f0b79c01e5ff1a3877da41b7e3..50c94104f19cc5d90b5fb3d4643bae1d4624e127 100644 (file)
@@ -2823,7 +2823,7 @@ static int macb_probe(struct platform_device *pdev)
        struct device_node *np = pdev->dev.of_node;
        struct device_node *phy_node;
        const struct macb_config *macb_config = NULL;
-       struct clk *pclk, *hclk, *tx_clk;
+       struct clk *pclk, *hclk = NULL, *tx_clk = NULL;
        unsigned int queue_mask, num_queues;
        struct macb_platform_data *pdata;
        bool native_io;
index b89504405b7207f12663aee0d6d3ecf78d392235..872765527081ab3d48d7701f8a9d8705553640fc 100644 (file)
@@ -1526,7 +1526,6 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
                                struct timespec64 *ts)
 {
        u64 ns;
-       u32 remainder;
        unsigned long flags;
        struct lio *lio = container_of(ptp, struct lio, ptp_info);
        struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
@@ -1536,8 +1535,7 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
        ns += lio->ptp_adjust;
        spin_unlock_irqrestore(&lio->ptp_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
 
        return 0;
 }
index 1671fa3332c2d3011db8c308a17ea5a9bf8b3fb2..7ba6d530b0c0ab6e3d1c3d4ea21b0b950ebe3c3f 100644 (file)
@@ -33,7 +33,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.3.0.12"
+#define DRV_VERSION            "2.3.0.20"
 #define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
index 1ffd1050860bb5cde576fd291e0651b351d578ff..1fdf5fe12a9562251e3bcee9fe635872053357a2 100644 (file)
@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                          int wait)
 {
        struct devcmd2_controller *dc2c = vdev->devcmd2;
-       struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+       struct devcmd2_result *result;
+       u8 color;
        unsigned int i;
        int delay, err;
        u32 fetch_index, new_posted;
@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
        if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
                return 0;
 
+       result = dc2c->result + dc2c->next_result;
+       color = dc2c->color;
+
+       dc2c->next_result++;
+       if (dc2c->next_result == dc2c->result_size) {
+               dc2c->next_result = 0;
+               dc2c->color = dc2c->color ? 0 : 1;
+       }
+
        for (delay = 0; delay < wait; delay++) {
-               if (result->color == dc2c->color) {
-                       dc2c->next_result++;
-                       if (dc2c->next_result == dc2c->result_size) {
-                               dc2c->next_result = 0;
-                               dc2c->color = dc2c->color ? 0 : 1;
-                       }
+               if (result->color == color) {
                        if (result->error) {
                                err = result->error;
                                if (err != ERR_ECMDUNKNOWN ||
index cf837831304be2f4d7edaebf68880a44f96bd057..515e206589cca9a20e7bae54ac9529849387c088 100644 (file)
 #define BE3_MAX_TX_QS          16
 #define BE3_MAX_EVT_QS         16
 #define BE3_SRIOV_MAX_EVT_QS   8
+#define SH_VF_MAX_NIC_EQS      3       /* Skyhawk VFs can have a max of 4 EQs
+                                        * and at least 1 is granted to either
+                                        * SURF/DPDK
+                                        */
 
 #define MAX_RSS_IFACES         15
 #define MAX_RX_QS              32
@@ -393,6 +397,10 @@ enum vf_state {
 #define BE_UC_PMAC_COUNT                       30
 #define BE_VF_UC_PMAC_COUNT                    2
 
+#define MAX_ERR_RECOVERY_RETRY_COUNT           3
+#define ERR_DETECTION_DELAY                    1000
+#define ERR_RECOVERY_RETRY_DELAY               30000
+
 /* Ethtool set_dump flags */
 #define LANCER_INITIATE_FW_DUMP                        0x1
 #define LANCER_DELETE_FW_DUMP                  0x2
@@ -530,6 +538,7 @@ struct be_adapter {
        u16 work_counter;
 
        struct delayed_work be_err_detection_work;
+       u8 recovery_retries;
        u8 err_flags;
        u32 flags;
        u32 cmd_privileges;
index b63d8ad2e1157a1d6d9f5344711609d6fc7dbe1d..7d51d4733890fb682eced3aac6e2ae906a11777e 100644 (file)
@@ -65,7 +65,22 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
                CMD_SUBSYSTEM_COMMON,
                BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
                BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
-       }
+       },
+       {
+               OPCODE_LOWLEVEL_HOST_DDR_DMA,
+               CMD_SUBSYSTEM_LOWLEVEL,
+               BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+       },
+       {
+               OPCODE_LOWLEVEL_LOOPBACK_TEST,
+               CMD_SUBSYSTEM_LOWLEVEL,
+               BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+       },
+       {
+               OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+               CMD_SUBSYSTEM_LOWLEVEL,
+               BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
+       },
 };
 
 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -236,7 +251,8 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
 
        if (base_status != MCC_STATUS_SUCCESS &&
            !be_skip_err_log(opcode, base_status, addl_status)) {
-               if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+               if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
+                   addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
                        dev_warn(&adapter->pdev->dev,
                                 "VF is not privileged to issue opcode %d-%d\n",
                                 opcode, subsystem);
@@ -3168,6 +3184,10 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
        struct be_cmd_req_set_lmode *req;
        int status;
 
+       if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+                           CMD_SUBSYSTEM_LOWLEVEL))
+               return -EPERM;
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
@@ -3213,6 +3233,10 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
        struct be_cmd_resp_loopback_test *resp;
        int status;
 
+       if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
+                           CMD_SUBSYSTEM_LOWLEVEL))
+               return -EPERM;
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
@@ -3259,6 +3283,10 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
        int status;
        int i, j = 0;
 
+       if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
+                           CMD_SUBSYSTEM_LOWLEVEL))
+               return -EPERM;
+
        spin_lock_bh(&adapter->mcc_lock);
 
        wrb = wrb_from_mccq(adapter);
index 241819b36ca72ac6c133875f88d17f4359c0c5d4..f260ef3329a17973ef1d887c450fae1bbb8ae74e 100644 (file)
@@ -68,7 +68,8 @@ enum mcc_addl_status {
        MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a,
        MCC_ADDL_STATUS_INSUFFICIENT_VLANS = 0xab,
        MCC_ADDL_STATUS_INVALID_SIGNATURE = 0x56,
-       MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57
+       MCC_ADDL_STATUS_MISSING_SIGNATURE = 0x57,
+       MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES = 0x60
 };
 
 #define CQE_BASE_STATUS_MASK           0xFFFF
index a19ac441336f7fe52fe991a67837ed9010cd2098..2ff691636dac3b8f518f8c054ce5c90dc8b00ffe 100644 (file)
@@ -720,29 +720,32 @@ static int be_set_phys_id(struct net_device *netdev,
                          enum ethtool_phys_id_state state)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       int status = 0;
 
        switch (state) {
        case ETHTOOL_ID_ACTIVE:
-               be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
-                                       &adapter->beacon_state);
-               return 1;       /* cycle on/off once per second */
+               status = be_cmd_get_beacon_state(adapter, adapter->hba_port_num,
+                                                &adapter->beacon_state);
+               if (status)
+                       return be_cmd_status(status);
+               return 1;       /* cycle on/off once per second */
 
        case ETHTOOL_ID_ON:
-               be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
-                                       BEACON_STATE_ENABLED);
+               status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+                                                0, 0, BEACON_STATE_ENABLED);
                break;
 
        case ETHTOOL_ID_OFF:
-               be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
-                                       BEACON_STATE_DISABLED);
+               status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+                                                0, 0, BEACON_STATE_DISABLED);
                break;
 
        case ETHTOOL_ID_INACTIVE:
-               be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0,
-                                       adapter->beacon_state);
+               status = be_cmd_set_beacon_state(adapter, adapter->hba_port_num,
+                                                0, 0, adapter->beacon_state);
        }
 
-       return 0;
+       return be_cmd_status(status);
 }
 
 static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
index f99de3657ce3b5f58b6f08f1f97f470bb11d7788..9c1fc9dcea250e391526f6a9010985204244b1f5 100644 (file)
@@ -1463,6 +1463,9 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
        if (lancer_chip(adapter) && vid == 0)
                return 0;
 
+       if (!test_bit(vid, adapter->vids))
+               return 0;
+
        clear_bit(vid, adapter->vids);
        adapter->vlans_added--;
 
@@ -1914,8 +1917,7 @@ static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
        if (!aic->enable)
                return 0;
 
-       if (time_before_eq(now, aic->jiffies) ||
-           jiffies_to_msecs(now - aic->jiffies) < 1)
+       if (jiffies_to_msecs(now - aic->jiffies) < 1)
                eqd = aic->prev_eqd;
        else
                eqd = be_get_new_eqd(eqo);
@@ -3789,18 +3791,15 @@ static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
        struct be_resources res = adapter->pool_res;
        u16 num_vf_qs = 1;
 
-       /* Distribute the queue resources equally among the PF and it's VFs
+       /* Distribute the queue resources among the PF and it's VFs
         * Do not distribute queue resources in multi-channel configuration.
         */
        if (num_vfs && !be_is_mc(adapter)) {
-               /* If number of VFs requested is 8 less than max supported,
-                * assign 8 queue pairs to the PF and divide the remaining
-                * resources evenly among the VFs
-                */
-               if (num_vfs < (be_max_vfs(adapter) - 8))
-                       num_vf_qs = (res.max_rss_qs - 8) / num_vfs;
-               else
-                       num_vf_qs = res.max_rss_qs / num_vfs;
+                /* Divide the qpairs evenly among the VFs and the PF, capped
+                 * at VF-EQ-count. Any remainder qpairs belong to the PF.
+                 */
+               num_vf_qs = min(SH_VF_MAX_NIC_EQS,
+                               res.max_rss_qs / (num_vfs + 1));
 
                /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
                 * interfaces per port. Provide RSS on VFs, only if number
@@ -4265,10 +4264,10 @@ static void be_schedule_worker(struct be_adapter *adapter)
        adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
 }
 
-static void be_schedule_err_detection(struct be_adapter *adapter)
+static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
 {
        schedule_delayed_work(&adapter->be_err_detection_work,
-                             msecs_to_jiffies(1000));
+                             msecs_to_jiffies(delay));
        adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
 }
 
@@ -4859,21 +4858,27 @@ static int be_resume(struct be_adapter *adapter)
 
 static int be_err_recover(struct be_adapter *adapter)
 {
-       struct device *dev = &adapter->pdev->dev;
        int status;
 
+       /* Error recovery is supported only Lancer as of now */
+       if (!lancer_chip(adapter))
+               return -EIO;
+
+       /* Wait for adapter to reach quiescent state before
+        * destroying queues
+        */
+       status = be_fw_wait_ready(adapter);
+       if (status)
+               goto err;
+
+       be_cleanup(adapter);
+
        status = be_resume(adapter);
        if (status)
                goto err;
 
-       dev_info(dev, "Adapter recovery successful\n");
        return 0;
 err:
-       if (be_physfn(adapter))
-               dev_err(dev, "Adapter recovery failed\n");
-       else
-               dev_err(dev, "Re-trying adapter recovery\n");
-
        return status;
 }
 
@@ -4882,21 +4887,43 @@ static void be_err_detection_task(struct work_struct *work)
        struct be_adapter *adapter =
                                container_of(work, struct be_adapter,
                                             be_err_detection_work.work);
-       int status = 0;
+       struct device *dev = &adapter->pdev->dev;
+       int recovery_status;
+       int delay = ERR_DETECTION_DELAY;
 
        be_detect_error(adapter);
 
-       if (be_check_error(adapter, BE_ERROR_HW)) {
-               be_cleanup(adapter);
-
-               /* As of now error recovery support is in Lancer only */
-               if (lancer_chip(adapter))
-                       status = be_err_recover(adapter);
+       if (be_check_error(adapter, BE_ERROR_HW))
+               recovery_status = be_err_recover(adapter);
+       else
+               goto reschedule_task;
+
+       if (!recovery_status) {
+               adapter->recovery_retries = 0;
+               dev_info(dev, "Adapter recovery successful\n");
+               goto reschedule_task;
+       } else if (be_virtfn(adapter)) {
+               /* For VFs, check if PF have allocated resources
+                * every second.
+                */
+               dev_err(dev, "Re-trying adapter recovery\n");
+               goto reschedule_task;
+       } else if (adapter->recovery_retries++ <
+                  MAX_ERR_RECOVERY_RETRY_COUNT) {
+               /* In case of another error during recovery, it takes 30 sec
+                * for adapter to come out of error. Retry error recovery after
+                * this time interval.
+                */
+               dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
+               delay = ERR_RECOVERY_RETRY_DELAY;
+               goto reschedule_task;
+       } else {
+               dev_err(dev, "Adapter recovery failed\n");
        }
 
-       /* Always attempt recovery on VFs */
-       if (!status || be_virtfn(adapter))
-               be_schedule_err_detection(adapter);
+       return;
+reschedule_task:
+       be_schedule_err_detection(adapter, delay);
 }
 
 static void be_log_sfp_info(struct be_adapter *adapter)
@@ -5292,7 +5319,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
 
        be_roce_dev_add(adapter);
 
-       be_schedule_err_detection(adapter);
+       be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
 
        /* On Die temperature not supported for VF. */
        if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
@@ -5359,7 +5386,7 @@ static int be_pci_resume(struct pci_dev *pdev)
        if (status)
                return status;
 
-       be_schedule_err_detection(adapter);
+       be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
 
        if (adapter->wol_en)
                be_setup_wol(adapter, false);
@@ -5459,7 +5486,7 @@ static void be_eeh_resume(struct pci_dev *pdev)
        if (status)
                goto err;
 
-       be_schedule_err_detection(adapter);
+       be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
        return;
 err:
        dev_err(&adapter->pdev->dev, "EEH resume failed\n");
index 48ecbc8aaaea2983fdce3b2c9bb08d0578059f41..b423ad380b6a3ff0b31621cf98152143030c2ed6 100644 (file)
@@ -18,6 +18,7 @@ if NET_VENDOR_EZCHIP
 config EZCHIP_NPS_MANAGEMENT_ENET
        tristate "EZchip NPS management enet support"
        depends on OF_IRQ && OF_NET
+       depends on HAS_IOMEM
        ---help---
          Simple LAN device for debug or management purposes.
          Device supports interrupts for RX and TX(completion).
index 4097c58d17a7527a3654fc6664e575d97575d4fe..cbe21dc7e37ee41dab11425d673ba0d20409074a 100644 (file)
@@ -4,6 +4,9 @@
 
 obj-$(CONFIG_FEC) += fec.o
 fec-objs :=fec_main.o fec_ptp.o
+CFLAGS_fec_main.o := -D__CHECK_ENDIAN__
+CFLAGS_fec_ptp.o := -D__CHECK_ENDIAN__
+
 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
 ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
        obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
index 99d33e2d35e6c2c219fbd06f34546619a01a586d..2106d72c91dc610fdf7b368e6dbf0a99c98c0a3d 100644 (file)
@@ -19,8 +19,7 @@
 #include <linux/timecounter.h>
 
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-    defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
-    defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
 /*
  *     Just figures, Motorola would have to change the offsets for
  *     registers in the same peripheral device on different models
 
 /*
  *     Define the buffer descriptor structure.
+ *
+ *     Evidently, ARM SoCs have the FEC block generated in a
+ *     little endian mode so adjust endianness accordingly.
  */
-#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+#if defined(CONFIG_ARM)
+#define fec32_to_cpu le32_to_cpu
+#define fec16_to_cpu le16_to_cpu
+#define cpu_to_fec32 cpu_to_le32
+#define cpu_to_fec16 cpu_to_le16
+#define __fec32 __le32
+#define __fec16 __le16
+
 struct bufdesc {
-       unsigned short cbd_datlen;      /* Data length */
-       unsigned short cbd_sc;  /* Control and status info */
-       unsigned long cbd_bufaddr;      /* Buffer address */
+       __fec16 cbd_datlen;     /* Data length */
+       __fec16 cbd_sc;         /* Control and status info */
+       __fec32 cbd_bufaddr;    /* Buffer address */
 };
 #else
+#define fec32_to_cpu be32_to_cpu
+#define fec16_to_cpu be16_to_cpu
+#define cpu_to_fec32 cpu_to_be32
+#define cpu_to_fec16 cpu_to_be16
+#define __fec32 __be32
+#define __fec16 __be16
+
 struct bufdesc {
-       unsigned short  cbd_sc;                 /* Control and status info */
-       unsigned short  cbd_datlen;             /* Data length */
-       unsigned long   cbd_bufaddr;            /* Buffer address */
+       __fec16 cbd_sc;         /* Control and status info */
+       __fec16 cbd_datlen;     /* Data length */
+       __fec32 cbd_bufaddr;    /* Buffer address */
 };
 #endif
 
 struct bufdesc_ex {
        struct bufdesc desc;
-       unsigned long cbd_esc;
-       unsigned long cbd_prot;
-       unsigned long cbd_bdu;
-       unsigned long ts;
-       unsigned short res0[4];
+       __fec32 cbd_esc;
+       __fec32 cbd_prot;
+       __fec32 cbd_bdu;
+       __fec32 ts;
+       __fec16 res0[4];
 };
 
 /*
index 502da6f48f95e5e52a43ce5666a5c265fcd9cfe3..41c81f6ec630a289c2c280ae068f429a8da9e21e 100644 (file)
@@ -332,11 +332,13 @@ static void fec_dump(struct net_device *ndev)
        bdp = txq->tx_bd_base;
 
        do {
-               pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
+               pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
                        index,
                        bdp == txq->cur_tx ? 'S' : ' ',
                        bdp == txq->dirty_tx ? 'H' : ' ',
-                       bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
+                       fec16_to_cpu(bdp->cbd_sc),
+                       fec32_to_cpu(bdp->cbd_bufaddr),
+                       fec16_to_cpu(bdp->cbd_datlen),
                        txq->tx_skbuff[index]);
                bdp = fec_enet_get_nextdesc(bdp, fep, 0);
                index++;
@@ -389,7 +391,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                bdp = fec_enet_get_nextdesc(bdp, fep, queue);
                ebdp = (struct bufdesc_ex *)bdp;
 
-               status = bdp->cbd_sc;
+               status = fec16_to_cpu(bdp->cbd_sc);
                status &= ~BD_ENET_TX_STATS;
                status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
                frag_len = skb_shinfo(skb)->frags[frag].size;
@@ -411,7 +413,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                        if (skb->ip_summed == CHECKSUM_PARTIAL)
                                estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
                        ebdp->cbd_bdu = 0;
-                       ebdp->cbd_esc = estatus;
+                       ebdp->cbd_esc = cpu_to_fec32(estatus);
                }
 
                bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
@@ -435,9 +437,9 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
                        goto dma_mapping_error;
                }
 
-               bdp->cbd_bufaddr = addr;
-               bdp->cbd_datlen = frag_len;
-               bdp->cbd_sc = status;
+               bdp->cbd_bufaddr = cpu_to_fec32(addr);
+               bdp->cbd_datlen = cpu_to_fec16(frag_len);
+               bdp->cbd_sc = cpu_to_fec16(status);
        }
 
        return bdp;
@@ -445,8 +447,8 @@ dma_mapping_error:
        bdp = txq->cur_tx;
        for (i = 0; i < frag; i++) {
                bdp = fec_enet_get_nextdesc(bdp, fep, queue);
-               dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
-                               bdp->cbd_datlen, DMA_TO_DEVICE);
+               dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
+                                fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
        }
        return ERR_PTR(-ENOMEM);
 }
@@ -483,7 +485,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
        /* Fill in a Tx ring entry */
        bdp = txq->cur_tx;
        last_bdp = bdp;
-       status = bdp->cbd_sc;
+       status = fec16_to_cpu(bdp->cbd_sc);
        status &= ~BD_ENET_TX_STATS;
 
        /* Set buffer length and buffer pointer */
@@ -539,21 +541,21 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
 
                ebdp->cbd_bdu = 0;
-               ebdp->cbd_esc = estatus;
+               ebdp->cbd_esc = cpu_to_fec32(estatus);
        }
 
        index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep);
        /* Save skb pointer */
        txq->tx_skbuff[index] = skb;
 
-       bdp->cbd_datlen = buflen;
-       bdp->cbd_bufaddr = addr;
+       bdp->cbd_datlen = cpu_to_fec16(buflen);
+       bdp->cbd_bufaddr = cpu_to_fec32(addr);
 
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
         * it's the last BD of the frame, and to put the CRC on the end.
         */
        status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
-       bdp->cbd_sc = status;
+       bdp->cbd_sc = cpu_to_fec16(status);
 
        /* If this was the last BD in the ring, start at the beginning again. */
        bdp = fec_enet_get_nextdesc(last_bdp, fep, queue);
@@ -585,7 +587,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
        unsigned int estatus = 0;
        dma_addr_t addr;
 
-       status = bdp->cbd_sc;
+       status = fec16_to_cpu(bdp->cbd_sc);
        status &= ~BD_ENET_TX_STATS;
 
        status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
@@ -607,8 +609,8 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
                return NETDEV_TX_BUSY;
        }
 
-       bdp->cbd_datlen = size;
-       bdp->cbd_bufaddr = addr;
+       bdp->cbd_datlen = cpu_to_fec16(size);
+       bdp->cbd_bufaddr = cpu_to_fec32(addr);
 
        if (fep->bufdesc_ex) {
                if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -616,7 +618,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
                ebdp->cbd_bdu = 0;
-               ebdp->cbd_esc = estatus;
+               ebdp->cbd_esc = cpu_to_fec32(estatus);
        }
 
        /* Handle the last BD specially */
@@ -625,10 +627,10 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
        if (is_last) {
                status |= BD_ENET_TX_INTR;
                if (fep->bufdesc_ex)
-                       ebdp->cbd_esc |= BD_ENET_TX_INT;
+                       ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
        }
 
-       bdp->cbd_sc = status;
+       bdp->cbd_sc = cpu_to_fec16(status);
 
        return 0;
 }
@@ -647,7 +649,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
        unsigned short status;
        unsigned int estatus = 0;
 
-       status = bdp->cbd_sc;
+       status = fec16_to_cpu(bdp->cbd_sc);
        status &= ~BD_ENET_TX_STATS;
        status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
 
@@ -671,8 +673,8 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
                }
        }
 
-       bdp->cbd_bufaddr = dmabuf;
-       bdp->cbd_datlen = hdr_len;
+       bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
+       bdp->cbd_datlen = cpu_to_fec16(hdr_len);
 
        if (fep->bufdesc_ex) {
                if (fep->quirks & FEC_QUIRK_HAS_AVB)
@@ -680,10 +682,10 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
                ebdp->cbd_bdu = 0;
-               ebdp->cbd_esc = estatus;
+               ebdp->cbd_esc = cpu_to_fec32(estatus);
        }
 
-       bdp->cbd_sc = status;
+       bdp->cbd_sc = cpu_to_fec16(status);
 
        return 0;
 }
@@ -823,15 +825,15 @@ static void fec_enet_bd_init(struct net_device *dev)
 
                        /* Initialize the BD for every fragment in the page. */
                        if (bdp->cbd_bufaddr)
-                               bdp->cbd_sc = BD_ENET_RX_EMPTY;
+                               bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
                        else
-                               bdp->cbd_sc = 0;
+                               bdp->cbd_sc = cpu_to_fec16(0);
                        bdp = fec_enet_get_nextdesc(bdp, fep, q);
                }
 
                /* Set the last buffer to wrap */
                bdp = fec_enet_get_prevdesc(bdp, fep, q);
-               bdp->cbd_sc |= BD_SC_WRAP;
+               bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
 
                rxq->cur_rx = rxq->rx_bd_base;
        }
@@ -844,18 +846,18 @@ static void fec_enet_bd_init(struct net_device *dev)
 
                for (i = 0; i < txq->tx_ring_size; i++) {
                        /* Initialize the BD for every fragment in the page. */
-                       bdp->cbd_sc = 0;
+                       bdp->cbd_sc = cpu_to_fec16(0);
                        if (txq->tx_skbuff[i]) {
                                dev_kfree_skb_any(txq->tx_skbuff[i]);
                                txq->tx_skbuff[i] = NULL;
                        }
-                       bdp->cbd_bufaddr = 0;
+                       bdp->cbd_bufaddr = cpu_to_fec32(0);
                        bdp = fec_enet_get_nextdesc(bdp, fep, q);
                }
 
                /* Set the last buffer to wrap */
                bdp = fec_enet_get_prevdesc(bdp, fep, q);
-               bdp->cbd_sc |= BD_SC_WRAP;
+               bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
                txq->dirty_tx = bdp;
        }
 }
@@ -947,8 +949,10 @@ fec_restart(struct net_device *ndev)
         */
        if (fep->quirks & FEC_QUIRK_ENET_MAC) {
                memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
-               writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
-               writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
+               writel((__force u32)cpu_to_be32(temp_mac[0]),
+                      fep->hwp + FEC_ADDR_LOW);
+               writel((__force u32)cpu_to_be32(temp_mac[1]),
+                      fep->hwp + FEC_ADDR_HIGH);
        }
 
        /* Clear any outstanding interrupt. */
@@ -1222,7 +1226,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
        while (bdp != READ_ONCE(txq->cur_tx)) {
                /* Order the load of cur_tx and cbd_sc */
                rmb();
-               status = READ_ONCE(bdp->cbd_sc);
+               status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
                if (status & BD_ENET_TX_READY)
                        break;
 
@@ -1230,10 +1234,12 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
 
                skb = txq->tx_skbuff[index];
                txq->tx_skbuff[index] = NULL;
-               if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr))
-                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
-                                       bdp->cbd_datlen, DMA_TO_DEVICE);
-               bdp->cbd_bufaddr = 0;
+               if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+                       dma_unmap_single(&fep->pdev->dev,
+                                        fec32_to_cpu(bdp->cbd_bufaddr),
+                                        fec16_to_cpu(bdp->cbd_datlen),
+                                        DMA_TO_DEVICE);
+               bdp->cbd_bufaddr = cpu_to_fec32(0);
                if (!skb) {
                        bdp = fec_enet_get_nextdesc(bdp, fep, queue_id);
                        continue;
@@ -1264,7 +1270,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
                        struct skb_shared_hwtstamps shhwtstamps;
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 
-                       fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
+                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
                        skb_tstamp_tx(skb, &shhwtstamps);
                }
 
@@ -1324,10 +1330,8 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
        if (off)
                skb_reserve(skb, fep->rx_align + 1 - off);
 
-       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
-                                         FEC_ENET_RX_FRSIZE - fep->rx_align,
-                                         DMA_FROM_DEVICE);
-       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+       bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE));
+       if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
                if (net_ratelimit())
                        netdev_err(ndev, "Rx DMA memory map failed\n");
                return -ENOMEM;
@@ -1349,7 +1353,8 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
        if (!new_skb)
                return false;
 
-       dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
+       dma_sync_single_for_cpu(&fep->pdev->dev,
+                               fec32_to_cpu(bdp->cbd_bufaddr),
                                FEC_ENET_RX_FRSIZE - fep->rx_align,
                                DMA_FROM_DEVICE);
        if (!swap)
@@ -1396,7 +1401,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
         */
        bdp = rxq->cur_rx;
 
-       while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
+       while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
 
                if (pkt_received >= budget)
                        break;
@@ -1438,7 +1443,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
 
                /* Process the incoming frame. */
                ndev->stats.rx_packets++;
-               pkt_len = bdp->cbd_datlen;
+               pkt_len = fec16_to_cpu(bdp->cbd_datlen);
                ndev->stats.rx_bytes += pkt_len;
 
                index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep);
@@ -1456,7 +1461,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                                ndev->stats.rx_dropped++;
                                goto rx_processing_done;
                        }
-                       dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+                       dma_unmap_single(&fep->pdev->dev,
+                                        fec32_to_cpu(bdp->cbd_bufaddr),
                                         FEC_ENET_RX_FRSIZE - fep->rx_align,
                                         DMA_FROM_DEVICE);
                }
@@ -1475,7 +1481,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                /* If this is a VLAN packet remove the VLAN Tag */
                vlan_packet_rcvd = false;
                if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
-                       fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) {
+                   fep->bufdesc_ex &&
+                   (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
                        /* Push and remove the vlan tag */
                        struct vlan_hdr *vlan_header =
                                        (struct vlan_hdr *) (data + ETH_HLEN);
@@ -1491,12 +1498,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
 
                /* Get receive timestamp from the skb */
                if (fep->hwts_rx_en && fep->bufdesc_ex)
-                       fec_enet_hwtstamp(fep, ebdp->ts,
+                       fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
                                          skb_hwtstamps(skb));
 
                if (fep->bufdesc_ex &&
                    (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
-                       if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) {
+                       if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
                                /* don't check it */
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
                        } else {
@@ -1513,7 +1520,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
                napi_gro_receive(&fep->napi, skb);
 
                if (is_copybreak) {
-                       dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr,
+                       dma_sync_single_for_device(&fep->pdev->dev,
+                                                  fec32_to_cpu(bdp->cbd_bufaddr),
                                                   FEC_ENET_RX_FRSIZE - fep->rx_align,
                                                   DMA_FROM_DEVICE);
                } else {
@@ -1527,12 +1535,12 @@ rx_processing_done:
 
                /* Mark the buffer empty */
                status |= BD_ENET_RX_EMPTY;
-               bdp->cbd_sc = status;
+               bdp->cbd_sc = cpu_to_fec16(status);
 
                if (fep->bufdesc_ex) {
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 
-                       ebdp->cbd_esc = BD_ENET_RX_INT;
+                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
                        ebdp->cbd_prot = 0;
                        ebdp->cbd_bdu = 0;
                }
@@ -2145,8 +2153,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev)
 
 /* List of registers that can be safety be read to dump them with ethtool */
 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
-       defined(CONFIG_M520x) || defined(CONFIG_M532x) ||               \
-       defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
+       defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM)
 static u32 fec_enet_register_offset[] = {
        FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
        FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
@@ -2662,7 +2669,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
                        rxq->rx_skbuff[i] = NULL;
                        if (skb) {
                                dma_unmap_single(&fep->pdev->dev,
-                                                bdp->cbd_bufaddr,
+                                                fec32_to_cpu(bdp->cbd_bufaddr),
                                                 FEC_ENET_RX_FRSIZE - fep->rx_align,
                                                 DMA_FROM_DEVICE);
                                dev_kfree_skb(skb);
@@ -2777,11 +2784,11 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
                }
 
                rxq->rx_skbuff[i] = skb;
-               bdp->cbd_sc = BD_ENET_RX_EMPTY;
+               bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
 
                if (fep->bufdesc_ex) {
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-                       ebdp->cbd_esc = BD_ENET_RX_INT;
+                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
                }
 
                bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2789,7 +2796,7 @@ fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
 
        /* Set the last buffer to wrap. */
        bdp = fec_enet_get_prevdesc(bdp, fep, queue);
-       bdp->cbd_sc |= BD_SC_WRAP;
+       bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
        return 0;
 
  err_alloc:
@@ -2812,12 +2819,12 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
                if (!txq->tx_bounce[i])
                        goto err_alloc;
 
-               bdp->cbd_sc = 0;
-               bdp->cbd_bufaddr = 0;
+               bdp->cbd_sc = cpu_to_fec16(0);
+               bdp->cbd_bufaddr = cpu_to_fec32(0);
 
                if (fep->bufdesc_ex) {
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
-                       ebdp->cbd_esc = BD_ENET_TX_INT;
+                       ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
                }
 
                bdp = fec_enet_get_nextdesc(bdp, fep, queue);
@@ -2825,7 +2832,7 @@ fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
 
        /* Set the last buffer to wrap. */
        bdp = fec_enet_get_prevdesc(bdp, fep, queue);
-       bdp->cbd_sc |= BD_SC_WRAP;
+       bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
 
        return 0;
 
index 52e0091b4fb261f983ab0e043d1cb0d48638dc03..1ba359f17ec6e2103d8b1b126ec84a55d30f006e 100644 (file)
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
        cbd_t __iomem *prev_bd;
        cbd_t __iomem *last_tx_bd;
 
-       last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t));
+       last_tx_bd = fep->tx_bd_base + (fpi->tx_ring - 1);
 
        /* get the current bd held in TBPTR  and scan back from this point */
        recheck_bd = curr_tbptr = (cbd_t __iomem *)
index b3645297477e53309918e6762c44a6bfde031880..3bfe36f9405b60b4822185853ca3d3d35ea563a5 100644 (file)
@@ -95,21 +95,17 @@ static struct hnae_buf_ops hnae_bops = {
 static int __ae_match(struct device *dev, const void *data)
 {
        struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
-       const char *ae_id = data;
 
-       if (!strncmp(ae_id, hdev->name, AE_NAME_SIZE))
-               return 1;
-
-       return 0;
+       return hdev->dev->of_node == data;
 }
 
-static struct hnae_ae_dev *find_ae(const char *ae_id)
+static struct hnae_ae_dev *find_ae(const struct device_node *ae_node)
 {
        struct device *dev;
 
-       WARN_ON(!ae_id);
+       WARN_ON(!ae_node);
 
-       dev = class_find_device(hnae_class, NULL, ae_id, __ae_match);
+       dev = class_find_device(hnae_class, NULL, ae_node, __ae_match);
 
        return dev ? cls_to_ae_dev(dev) : NULL;
 }
@@ -316,7 +312,8 @@ EXPORT_SYMBOL(hnae_reinit_handle);
  * return handle ptr or ERR_PTR
  */
 struct hnae_handle *hnae_get_handle(struct device *owner_dev,
-                                   const char *ae_id, u32 port_id,
+                                   const struct device_node *ae_node,
+                                   u32 port_id,
                                    struct hnae_buf_ops *bops)
 {
        struct hnae_ae_dev *dev;
@@ -324,7 +321,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
        int i, j;
        int ret;
 
-       dev = find_ae(ae_id);
+       dev = find_ae(ae_node);
        if (!dev)
                return ERR_PTR(-ENODEV);
 
index 6ca94dc3dda3c2447d59e369fee4eadeba6e0f52..1cbcb9fa3fb56b5ea4fee252f3f35294e6e7b6d3 100644 (file)
@@ -524,8 +524,11 @@ struct hnae_handle {
 
 #define ring_to_dev(ring) ((ring)->q->dev->dev)
 
-struct hnae_handle *hnae_get_handle(struct device *owner_dev, const char *ae_id,
-                                   u32 port_id, struct hnae_buf_ops *bops);
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+                                   const struct device_node *ae_node,
+                                   u32 port_id,
+                                   struct hnae_buf_ops *bops);
+
 void hnae_put_handle(struct hnae_handle *handle);
 int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
 void hnae_ae_unregister(struct hnae_ae_dev *dev);
index 522b264866b42e68b65e40cd93e28f991a3e678c..a0070d0e740dae5b3de8f2b2a92a772854b08321 100644 (file)
@@ -847,6 +847,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
 int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
 {
        struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+       static atomic_t id = ATOMIC_INIT(-1);
 
        switch (dsaf_dev->dsaf_ver) {
        case AE_VERSION_1:
@@ -858,6 +859,9 @@ int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
        default:
                break;
        }
+
+       snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
+                (int)atomic_inc_return(&id));
        ae_dev->ops = &hns_dsaf_ops;
        ae_dev->dev = dsaf_dev->dev;
 
index 1c33bd06bd5cc484ba320e60ceb5bee113493014..9439f04962e1d96bf480d8aedf6472c912e28813 100644 (file)
@@ -35,7 +35,7 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
        int ret, i;
        u32 desc_num;
        u32 buf_size;
-       const char *name, *mode_str;
+       const char *mode_str;
        struct device_node *np = dsaf_dev->dev->of_node;
 
        if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
@@ -43,14 +43,6 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
        else
                dsaf_dev->dsaf_ver = AE_VERSION_2;
 
-       ret = of_property_read_string(np, "dsa_name", &name);
-       if (ret) {
-               dev_err(dsaf_dev->dev, "get dsaf name fail, ret=%d!\n", ret);
-               return ret;
-       }
-       strncpy(dsaf_dev->ae_dev.name, name, AE_NAME_SIZE);
-       dsaf_dev->ae_dev.name[AE_NAME_SIZE - 1] = '\0';
-
        ret = of_property_read_string(np, "mode", &mode_str);
        if (ret) {
                dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
index 31c312f9826e9d8bc644b0aa1c6d6852ea25e5bd..40205b910f80e66a439c14b53e7f2c907b0fbeb1 100644 (file)
@@ -18,6 +18,7 @@ struct hns_mac_cb;
 
 #define DSAF_DRV_NAME "hns_dsaf"
 #define DSAF_MOD_VERSION "v1.0"
+#define DSAF_DEVICE_NAME "dsaf"
 
 #define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000
 
index 0e30846a24f89b632028211f60ee82018432a7e0..3f77ff77abbc4594f8aeb0ccc40c6691fc122fac 100644 (file)
@@ -1802,7 +1802,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
        int ret;
 
        h = hnae_get_handle(&priv->netdev->dev,
-                           priv->ae_name, priv->port_id, NULL);
+                           priv->ae_node, priv->port_id, NULL);
        if (IS_ERR_OR_NULL(h)) {
                ret = PTR_ERR(h);
                dev_dbg(priv->dev, "has not handle, register notifier!\n");
@@ -1880,13 +1880,16 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
        else
                priv->enet_ver = AE_VERSION_2;
 
-       ret = of_property_read_string(node, "ae-name", &priv->ae_name);
-       if (ret)
-               goto out_read_string_fail;
+       priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
+       if (IS_ERR_OR_NULL(priv->ae_node)) {
+               ret = PTR_ERR(priv->ae_node);
+               dev_err(dev, "not find ae-handle\n");
+               goto out_read_prop_fail;
+       }
 
        ret = of_property_read_u32(node, "port-id", &priv->port_id);
        if (ret)
-               goto out_read_string_fail;
+               goto out_read_prop_fail;
 
        hns_init_mac_addr(ndev);
 
@@ -1945,7 +1948,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
 
 out_notify_fail:
        (void)cancel_work_sync(&priv->service_task);
-out_read_string_fail:
+out_read_prop_fail:
        free_netdev(ndev);
        return ret;
 }
index 4b75270f014e765ba1cc02010c4175cafddda84c..c68ab3d34fc224ef00f43532f929fbd533b2ee80 100644 (file)
@@ -51,7 +51,7 @@ struct hns_nic_ops {
 };
 
 struct hns_nic_priv {
-       const char *ae_name;
+       const struct device_node *ae_node;
        u32 enet_ver;
        u32 port_id;
        int phy_mode;
index 1d5c3e16d8f4f4ad1f3ba580ee3a4a9c48663551..3daf2d4a7ca057a66e9ad797baa581c0d92caeba 100644 (file)
@@ -194,7 +194,6 @@ static const char *hp100_isa_tbl[] = {
 };
 #endif
 
-#ifdef CONFIG_EISA
 static struct eisa_device_id hp100_eisa_tbl[] = {
        { "HWPF180" }, /* HP J2577 rev A */
        { "HWP1920" }, /* HP 27248B */
@@ -205,9 +204,7 @@ static struct eisa_device_id hp100_eisa_tbl[] = {
        { "" }         /* Mandatory final entry ! */
 };
 MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
-#endif
 
-#ifdef CONFIG_PCI
 static const struct pci_device_id hp100_pci_tbl[] = {
        {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
        {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
@@ -219,7 +216,6 @@ static const struct pci_device_id hp100_pci_tbl[] = {
        {}                      /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(pci, hp100_pci_tbl);
-#endif
 
 static int hp100_rx_ratio = HP100_DEFAULT_RX_RATIO;
 static int hp100_priority_tx = HP100_DEFAULT_PRIORITY_TX;
@@ -2842,7 +2838,6 @@ static void cleanup_dev(struct net_device *d)
        free_netdev(d);
 }
 
-#ifdef CONFIG_EISA
 static int hp100_eisa_probe(struct device *gendev)
 {
        struct net_device *dev = alloc_etherdev(sizeof(struct hp100_private));
@@ -2884,9 +2879,7 @@ static struct eisa_driver hp100_eisa_driver = {
                .remove  = hp100_eisa_remove,
         }
 };
-#endif
 
-#ifdef CONFIG_PCI
 static int hp100_pci_probe(struct pci_dev *pdev,
                           const struct pci_device_id *ent)
 {
@@ -2955,7 +2948,6 @@ static struct pci_driver hp100_pci_driver = {
        .probe          = hp100_pci_probe,
        .remove         = hp100_pci_remove,
 };
-#endif
 
 /*
  *  module section
@@ -3032,23 +3024,17 @@ static int __init hp100_module_init(void)
        err = hp100_isa_init();
        if (err && err != -ENODEV)
                goto out;
-#ifdef CONFIG_EISA
        err = eisa_driver_register(&hp100_eisa_driver);
        if (err && err != -ENODEV)
                goto out2;
-#endif
-#ifdef CONFIG_PCI
        err = pci_register_driver(&hp100_pci_driver);
        if (err && err != -ENODEV)
                goto out3;
-#endif
  out:
        return err;
  out3:
-#ifdef CONFIG_EISA
        eisa_driver_unregister (&hp100_eisa_driver);
  out2:
-#endif
        hp100_isa_cleanup();
        goto out;
 }
@@ -3057,12 +3043,8 @@ static int __init hp100_module_init(void)
 static void __exit hp100_module_exit(void)
 {
        hp100_isa_cleanup();
-#ifdef CONFIG_EISA
        eisa_driver_unregister (&hp100_eisa_driver);
-#endif
-#ifdef CONFIG_PCI
        pci_unregister_driver (&hp100_pci_driver);
-#endif
 }
 
 module_init(hp100_module_init)
index 68f2204ec6f3aa712ad8768f2985c46c9268df0c..53ed3bdd836311be4b0b4e2c383e8a2e04c311fa 100644 (file)
@@ -339,6 +339,8 @@ struct i40e_pf {
 #define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
 #define I40E_FLAG_GENEVE_OFFLOAD_CAPABLE       BIT_ULL(41)
 #define I40E_FLAG_NO_PCI_LINK_CHECK            BIT_ULL(42)
+#define I40E_FLAG_100M_SGMII_CAPABLE           BIT_ULL(43)
+#define I40E_FLAG_RESTART_AUTONEG              BIT_ULL(44)
 #define I40E_FLAG_PF_MAC                       BIT_ULL(50)
 
        /* tracks features that get auto disabled by errors */
index b22012a446a6e127fc7ed83665e3d629f441e4f2..0e608d2a70d5daef38dd1adb4765b472c9edf75b 100644 (file)
@@ -220,6 +220,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_get_phy_wol_caps           = 0x0621,
        i40e_aqc_opc_set_phy_debug              = 0x0622,
        i40e_aqc_opc_upload_ext_phy_fm          = 0x0625,
+       i40e_aqc_opc_run_phy_activity           = 0x0626,
 
        /* NVM commands */
        i40e_aqc_opc_nvm_read                   = 0x0701,
@@ -402,6 +403,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_OS2BMC_CAP      0x0004
 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
 #define I40E_AQ_CAP_ID_ALTERNATE_RAM   0x0006
+#define I40E_AQ_CAP_ID_WOL_AND_PROXY   0x0008
 #define I40E_AQ_CAP_ID_SRIOV           0x0012
 #define I40E_AQ_CAP_ID_VF              0x0013
 #define I40E_AQ_CAP_ID_VMDQ            0x0014
@@ -422,6 +424,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_LED             0x0061
 #define I40E_AQ_CAP_ID_SDP             0x0062
 #define I40E_AQ_CAP_ID_MDIO            0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT                0x0064
 #define I40E_AQ_CAP_ID_FLEX10          0x00F1
 #define I40E_AQ_CAP_ID_CEM             0x00F2
 
@@ -1257,9 +1260,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT              9
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK               0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN              0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN              0
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC         1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                        2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE             2
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                 3
 
        __le32  tenant_id;
@@ -1755,7 +1758,12 @@ struct i40e_aqc_get_link_status {
        u8      config;
 #define I40E_AQ_CONFIG_CRC_ENA         0x04
 #define I40E_AQ_CONFIG_PACING_MASK     0x78
-       u8      reserved[5];
+       u8      external_power_ability;
+#define I40E_AQ_LINK_POWER_CLASS_1     0x00
+#define I40E_AQ_LINK_POWER_CLASS_2     0x01
+#define I40E_AQ_LINK_POWER_CLASS_3     0x02
+#define I40E_AQ_LINK_POWER_CLASS_4     0x03
+       u8      reserved[4];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1823,6 +1831,18 @@ enum i40e_aq_phy_reg_type {
        I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
 };
 
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+       __le16  activity_id;
+       u8      flags;
+       u8      reserved1;
+       __le32  control;
+       __le32  data;
+       u8      reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
 /* NVM Read command (indirect 0x0701)
  * NVM Erase commands (direct 0x0702)
  * NVM Update commands (indirect 0x0703)
index 6a034ddac36a346e916dd11dde595f366bc34e32..3b03a3165ca71d474f06b0871342add07feb452e 100644 (file)
@@ -55,6 +55,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
                case I40E_DEV_ID_20G_KR2_A:
                        hw->mac.type = I40E_MAC_XL710;
                        break;
+               case I40E_DEV_ID_KX_X722:
+               case I40E_DEV_ID_QSFP_X722:
                case I40E_DEV_ID_SFP_X722:
                case I40E_DEV_ID_1G_BASE_T_X722:
                case I40E_DEV_ID_10G_BASE_T_X722:
@@ -2765,35 +2767,6 @@ i40e_aq_erase_nvm_exit:
        return status;
 }
 
-#define I40E_DEV_FUNC_CAP_SWITCH_MODE  0x01
-#define I40E_DEV_FUNC_CAP_MGMT_MODE    0x02
-#define I40E_DEV_FUNC_CAP_NPAR         0x03
-#define I40E_DEV_FUNC_CAP_OS2BMC       0x04
-#define I40E_DEV_FUNC_CAP_VALID_FUNC   0x05
-#define I40E_DEV_FUNC_CAP_SRIOV_1_1    0x12
-#define I40E_DEV_FUNC_CAP_VF           0x13
-#define I40E_DEV_FUNC_CAP_VMDQ         0x14
-#define I40E_DEV_FUNC_CAP_802_1_QBG    0x15
-#define I40E_DEV_FUNC_CAP_802_1_QBH    0x16
-#define I40E_DEV_FUNC_CAP_VSI          0x17
-#define I40E_DEV_FUNC_CAP_DCB          0x18
-#define I40E_DEV_FUNC_CAP_FCOE         0x21
-#define I40E_DEV_FUNC_CAP_ISCSI                0x22
-#define I40E_DEV_FUNC_CAP_RSS          0x40
-#define I40E_DEV_FUNC_CAP_RX_QUEUES    0x41
-#define I40E_DEV_FUNC_CAP_TX_QUEUES    0x42
-#define I40E_DEV_FUNC_CAP_MSIX         0x43
-#define I40E_DEV_FUNC_CAP_MSIX_VF      0x44
-#define I40E_DEV_FUNC_CAP_FLOW_DIRECTOR        0x45
-#define I40E_DEV_FUNC_CAP_IEEE_1588    0x46
-#define I40E_DEV_FUNC_CAP_FLEX10       0xF1
-#define I40E_DEV_FUNC_CAP_CEM          0xF2
-#define I40E_DEV_FUNC_CAP_IWARP                0x51
-#define I40E_DEV_FUNC_CAP_LED          0x61
-#define I40E_DEV_FUNC_CAP_SDP          0x62
-#define I40E_DEV_FUNC_CAP_MDIO         0x63
-#define I40E_DEV_FUNC_CAP_WR_CSR_PROT  0x64
-
 /**
  * i40e_parse_discover_capabilities
  * @hw: pointer to the hw struct
@@ -2832,79 +2805,79 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                major_rev = cap->major_rev;
 
                switch (id) {
-               case I40E_DEV_FUNC_CAP_SWITCH_MODE:
+               case I40E_AQ_CAP_ID_SWITCH_MODE:
                        p->switch_mode = number;
                        break;
-               case I40E_DEV_FUNC_CAP_MGMT_MODE:
+               case I40E_AQ_CAP_ID_MNG_MODE:
                        p->management_mode = number;
                        break;
-               case I40E_DEV_FUNC_CAP_NPAR:
+               case I40E_AQ_CAP_ID_NPAR_ACTIVE:
                        p->npar_enable = number;
                        break;
-               case I40E_DEV_FUNC_CAP_OS2BMC:
+               case I40E_AQ_CAP_ID_OS2BMC_CAP:
                        p->os2bmc = number;
                        break;
-               case I40E_DEV_FUNC_CAP_VALID_FUNC:
+               case I40E_AQ_CAP_ID_FUNCTIONS_VALID:
                        p->valid_functions = number;
                        break;
-               case I40E_DEV_FUNC_CAP_SRIOV_1_1:
+               case I40E_AQ_CAP_ID_SRIOV:
                        if (number == 1)
                                p->sr_iov_1_1 = true;
                        break;
-               case I40E_DEV_FUNC_CAP_VF:
+               case I40E_AQ_CAP_ID_VF:
                        p->num_vfs = number;
                        p->vf_base_id = logical_id;
                        break;
-               case I40E_DEV_FUNC_CAP_VMDQ:
+               case I40E_AQ_CAP_ID_VMDQ:
                        if (number == 1)
                                p->vmdq = true;
                        break;
-               case I40E_DEV_FUNC_CAP_802_1_QBG:
+               case I40E_AQ_CAP_ID_8021QBG:
                        if (number == 1)
                                p->evb_802_1_qbg = true;
                        break;
-               case I40E_DEV_FUNC_CAP_802_1_QBH:
+               case I40E_AQ_CAP_ID_8021QBR:
                        if (number == 1)
                                p->evb_802_1_qbh = true;
                        break;
-               case I40E_DEV_FUNC_CAP_VSI:
+               case I40E_AQ_CAP_ID_VSI:
                        p->num_vsis = number;
                        break;
-               case I40E_DEV_FUNC_CAP_DCB:
+               case I40E_AQ_CAP_ID_DCB:
                        if (number == 1) {
                                p->dcb = true;
                                p->enabled_tcmap = logical_id;
                                p->maxtc = phys_id;
                        }
                        break;
-               case I40E_DEV_FUNC_CAP_FCOE:
+               case I40E_AQ_CAP_ID_FCOE:
                        if (number == 1)
                                p->fcoe = true;
                        break;
-               case I40E_DEV_FUNC_CAP_ISCSI:
+               case I40E_AQ_CAP_ID_ISCSI:
                        if (number == 1)
                                p->iscsi = true;
                        break;
-               case I40E_DEV_FUNC_CAP_RSS:
+               case I40E_AQ_CAP_ID_RSS:
                        p->rss = true;
                        p->rss_table_size = number;
                        p->rss_table_entry_width = logical_id;
                        break;
-               case I40E_DEV_FUNC_CAP_RX_QUEUES:
+               case I40E_AQ_CAP_ID_RXQ:
                        p->num_rx_qp = number;
                        p->base_queue = phys_id;
                        break;
-               case I40E_DEV_FUNC_CAP_TX_QUEUES:
+               case I40E_AQ_CAP_ID_TXQ:
                        p->num_tx_qp = number;
                        p->base_queue = phys_id;
                        break;
-               case I40E_DEV_FUNC_CAP_MSIX:
+               case I40E_AQ_CAP_ID_MSIX:
                        p->num_msix_vectors = number;
                        break;
-               case I40E_DEV_FUNC_CAP_MSIX_VF:
+               case I40E_AQ_CAP_ID_VF_MSIX:
                        p->num_msix_vectors_vf = number;
                        break;
-               case I40E_DEV_FUNC_CAP_FLEX10:
+               case I40E_AQ_CAP_ID_FLEX10:
                        if (major_rev == 1) {
                                if (number == 1) {
                                        p->flex10_enable = true;
@@ -2920,38 +2893,38 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                        p->flex10_mode = logical_id;
                        p->flex10_status = phys_id;
                        break;
-               case I40E_DEV_FUNC_CAP_CEM:
+               case I40E_AQ_CAP_ID_CEM:
                        if (number == 1)
                                p->mgmt_cem = true;
                        break;
-               case I40E_DEV_FUNC_CAP_IWARP:
+               case I40E_AQ_CAP_ID_IWARP:
                        if (number == 1)
                                p->iwarp = true;
                        break;
-               case I40E_DEV_FUNC_CAP_LED:
+               case I40E_AQ_CAP_ID_LED:
                        if (phys_id < I40E_HW_CAP_MAX_GPIO)
                                p->led[phys_id] = true;
                        break;
-               case I40E_DEV_FUNC_CAP_SDP:
+               case I40E_AQ_CAP_ID_SDP:
                        if (phys_id < I40E_HW_CAP_MAX_GPIO)
                                p->sdp[phys_id] = true;
                        break;
-               case I40E_DEV_FUNC_CAP_MDIO:
+               case I40E_AQ_CAP_ID_MDIO:
                        if (number == 1) {
                                p->mdio_port_num = phys_id;
                                p->mdio_port_mode = logical_id;
                        }
                        break;
-               case I40E_DEV_FUNC_CAP_IEEE_1588:
+               case I40E_AQ_CAP_ID_1588:
                        if (number == 1)
                                p->ieee_1588 = true;
                        break;
-               case I40E_DEV_FUNC_CAP_FLOW_DIRECTOR:
+               case I40E_AQ_CAP_ID_FLOW_DIRECTOR:
                        p->fd = true;
                        p->fd_filters_guaranteed = number;
                        p->fd_filters_best_effort = logical_id;
                        break;
-               case I40E_DEV_FUNC_CAP_WR_CSR_PROT:
+               case I40E_AQ_CAP_ID_WSR_PROT:
                        p->wr_csr_prot = (u64)number;
                        p->wr_csr_prot |= (u64)logical_id << 32;
                        break;
index 2691277c0055d2572f2994e24c120066a7bec28b..582daa7ad77621e3dfdcc4941bd3a55adf6961d6 100644 (file)
@@ -814,13 +814,15 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
        struct i40e_aqc_get_cee_dcb_cfg_resp cee_cfg;
        struct i40e_aqc_get_cee_dcb_cfg_v1_resp cee_v1_cfg;
 
-       /* If Firmware version < v4.33 IEEE only */
-       if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
-           (hw->aq.fw_maj_ver < 4))
+       /* If Firmware version < v4.33 on X710/XL710, IEEE only */
+       if ((hw->mac.type == I40E_MAC_XL710) &&
+           (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+             (hw->aq.fw_maj_ver < 4)))
                return i40e_get_ieee_dcb_config(hw);
 
-       /* If Firmware version == v4.33 use old CEE struct */
-       if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
+       /* If Firmware version == v4.33 on X710/XL710, use old CEE struct */
+       if ((hw->mac.type == I40E_MAC_XL710) &&
+           ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33))) {
                ret = i40e_aq_get_cee_dcb_config(hw, &cee_v1_cfg,
                                                 sizeof(cee_v1_cfg), NULL);
                if (!ret) {
index 448ef4c17efbb3d815de7005008e2e8278f28992..f7ce5c7c90031a4af7bf26a97ba058c4fa4ffa4d 100644 (file)
@@ -41,6 +41,8 @@
 #define I40E_DEV_ID_10G_BASE_T4                0x1589
 #define I40E_DEV_ID_VF                 0x154C
 #define I40E_DEV_ID_VF_HV              0x1571
+#define I40E_DEV_ID_KX_X722            0x37CE
+#define I40E_DEV_ID_QSFP_X722          0x37CF
 #define I40E_DEV_ID_SFP_X722           0x37D0
 #define I40E_DEV_ID_1G_BASE_T_X722     0x37D1
 #define I40E_DEV_ID_10G_BASE_T_X722    0x37D2
index 29d5833e24a3ff558c9e26e62e3f6a74e1b4e7dd..45495911c5a4f5f221bac91ced1f284371880c4c 100644 (file)
@@ -340,7 +340,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
                                  SUPPORTED_1000baseT_Full;
                if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
                        ecmd->advertising |= ADVERTISED_1000baseT_Full;
-               if (pf->hw.mac.type == I40E_MAC_X722) {
+               if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
                        ecmd->supported |= SUPPORTED_100baseT_Full;
                        if (hw_link_info->requested_speeds &
                            I40E_LINK_SPEED_100MB)
@@ -411,6 +411,10 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
                if (pf->hw.mac.type == I40E_MAC_X722) {
                        ecmd->supported |= SUPPORTED_100baseT_Full;
                        ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+                               ecmd->supported |= SUPPORTED_100baseT_Full;
+                               ecmd->advertising |= ADVERTISED_100baseT_Full;
+                       }
                }
        }
        if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
@@ -2166,9 +2170,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-                       break;
+                       return -EINVAL;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                               hena |=
+                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+
                        hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
                        break;
                default:
@@ -2178,9 +2185,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case TCP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-                       break;
+                       return -EINVAL;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                               hena |=
+                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+
                        hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
                        break;
                default:
@@ -2190,10 +2200,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V4_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-                       break;
+                       return -EINVAL;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                               hena |=
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+
                        hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
                        break;
@@ -2204,10 +2217,13 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
        case UDP_V6_FLOW:
                switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
                case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-                       break;
+                       return -EINVAL;
                case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+                       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE)
+                               hena |=
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+
                        hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
                        break;
index bb4612c159fd197db5b2a750027400b48cbdec77..320b0491abd95db56a7d71028fba3585acd20134 100644 (file)
@@ -51,7 +51,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 1
 #define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 8
+#define DRV_VERSION_BUILD 10
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -90,6 +90,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_X722), 0},
+       {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
        {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
@@ -110,6 +112,8 @@ MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+static struct workqueue_struct *i40e_wq;
+
 /**
  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
  * @hw:   pointer to the HW structure
@@ -295,7 +299,7 @@ static void i40e_service_event_schedule(struct i40e_pf *pf)
        if (!test_bit(__I40E_DOWN, &pf->state) &&
            !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
            !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
-               schedule_work(&pf->service_task);
+               queue_work(i40e_wq, &pf->service_task);
 }
 
 /**
@@ -1368,7 +1372,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
                f->changed = true;
 
                INIT_LIST_HEAD(&f->list);
-               list_add(&f->list, &vsi->mac_filter_list);
+               list_add_tail(&f->list, &vsi->mac_filter_list);
        }
 
        /* increment counter and add a new flag if needed */
@@ -6889,8 +6893,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                wr32(hw, I40E_REG_MSS, val);
        }
 
-       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
-           (pf->hw.aq.fw_maj_ver < 4)) {
+       if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
                msleep(75);
                ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (ret)
@@ -7117,9 +7120,7 @@ static void i40e_service_task(struct work_struct *work)
        i40e_watchdog_subtask(pf);
        i40e_fdir_reinit_subtask(pf);
        i40e_sync_filters_subtask(pf);
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
        i40e_sync_udp_filters_subtask(pf);
-#endif
        i40e_clean_adminq_subtask(pf);
 
        i40e_service_event_complete(pf);
@@ -7937,6 +7938,52 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
        return ret;
 }
 
+/**
+ * i40e_get_rss_aq - Get RSS keys and lut by using AQ commands
+ * @vsi: Pointer to vsi structure
+ * @seed: Buffter to store the hash keys
+ * @lut: Buffer to store the lookup table entries
+ * @lut_size: Size of buffer to store the lookup table entries
+ *
+ * Return 0 on success, negative on failure
+ */
+static int i40e_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
+                          u8 *lut, u16 lut_size)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       int ret = 0;
+
+       if (seed) {
+               ret = i40e_aq_get_rss_key(hw, vsi->id,
+                       (struct i40e_aqc_get_set_rss_key_data *)seed);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Cannot get RSS key, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       return ret;
+               }
+       }
+
+       if (lut) {
+               bool pf_lut = vsi->type == I40E_VSI_MAIN ? true : false;
+
+               ret = i40e_aq_get_rss_lut(hw, vsi->id, pf_lut, lut, lut_size);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "Cannot get RSS lut, err %s aq_err %s\n",
+                                i40e_stat_str(&pf->hw, ret),
+                                i40e_aq_str(&pf->hw,
+                                            pf->hw.aq.asq_last_status));
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
 /**
  * i40e_config_rss_reg - Configure RSS keys and lut by writing registers
  * @vsi: Pointer to vsi structure
@@ -8039,7 +8086,12 @@ int i40e_config_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
  */
 int i40e_get_rss(struct i40e_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size)
 {
-       return i40e_get_rss_reg(vsi, seed, lut, lut_size);
+       struct i40e_pf *pf = vsi->back;
+
+       if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
+               return i40e_get_rss_aq(vsi, seed, lut, lut_size);
+       else
+               return i40e_get_rss_reg(vsi, seed, lut, lut_size);
 }
 
 /**
@@ -8369,6 +8421,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
                                 pf->hw.func_caps.fd_filters_best_effort;
        }
 
+       if (((pf->hw.mac.type == I40E_MAC_X710) ||
+            (pf->hw.mac.type == I40E_MAC_XL710)) &&
+           (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
+           (pf->hw.aq.fw_maj_ver < 4)))
+               pf->flags |= I40E_FLAG_RESTART_AUTONEG;
+
        if (pf->hw.func_caps.vmdq) {
                pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
                pf->flags |= I40E_FLAG_VMDQ_ENABLED;
@@ -8395,6 +8453,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
                             I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
                             I40E_FLAG_WB_ON_ITR_CAPABLE |
                             I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
+                            I40E_FLAG_100M_SGMII_CAPABLE |
                             I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
        }
        pf->eeprom_version = 0xDEAD;
@@ -8515,6 +8574,8 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
 }
 
 #endif
+
+#if IS_ENABLED(CONFIG_VXLAN)
 /**
  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
  * @netdev: This physical port's netdev
@@ -8524,7 +8585,6 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
 static void i40e_add_vxlan_port(struct net_device *netdev,
                                sa_family_t sa_family, __be16 port)
 {
-#if IS_ENABLED(CONFIG_VXLAN)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
@@ -8557,7 +8617,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
        pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
        pf->pending_udp_bitmap |= BIT_ULL(next_idx);
        pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
-#endif
 }
 
 /**
@@ -8569,7 +8628,6 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
 static void i40e_del_vxlan_port(struct net_device *netdev,
                                sa_family_t sa_family, __be16 port)
 {
-#if IS_ENABLED(CONFIG_VXLAN)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
@@ -8592,9 +8650,10 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
                netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
                            ntohs(port));
        }
-#endif
 }
+#endif
 
+#if IS_ENABLED(CONFIG_GENEVE)
 /**
  * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
  * @netdev: This physical port's netdev
@@ -8604,7 +8663,6 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
 static void i40e_add_geneve_port(struct net_device *netdev,
                                 sa_family_t sa_family, __be16 port)
 {
-#if IS_ENABLED(CONFIG_GENEVE)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
@@ -8639,7 +8697,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
        pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
 
        dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
-#endif
 }
 
 /**
@@ -8651,7 +8708,6 @@ static void i40e_add_geneve_port(struct net_device *netdev,
 static void i40e_del_geneve_port(struct net_device *netdev,
                                 sa_family_t sa_family, __be16 port)
 {
-#if IS_ENABLED(CONFIG_GENEVE)
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
        struct i40e_pf *pf = vsi->back;
@@ -8677,8 +8733,8 @@ static void i40e_del_geneve_port(struct net_device *netdev,
                netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
                            ntohs(port));
        }
-#endif
 }
+#endif
 
 static int i40e_get_phys_port_id(struct net_device *netdev,
                                 struct netdev_phys_item_id *ppid)
@@ -8947,11 +9003,11 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        np = netdev_priv(netdev);
        np->vsi = vsi;
 
-       netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
-                                 NETIF_F_RXCSUM         |
-                                 NETIF_F_GSO_UDP_TUNNEL |
-                                 NETIF_F_GSO_GRE        |
-                                 NETIF_F_TSO;
+       netdev->hw_enc_features |= NETIF_F_IP_CSUM        |
+                                  NETIF_F_GSO_UDP_TUNNEL |
+                                  NETIF_F_GSO_GRE        |
+                                  NETIF_F_TSO            |
+                                  0;
 
        netdev->features = NETIF_F_SG                  |
                           NETIF_F_IP_CSUM             |
@@ -10909,8 +10965,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                wr32(hw, I40E_REG_MSS, val);
        }
 
-       if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
-           (pf->hw.aq.fw_maj_ver < 4)) {
+       if (pf->flags & I40E_FLAG_RESTART_AUTONEG) {
                msleep(75);
                err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
                if (err)
@@ -11418,6 +11473,16 @@ static int __init i40e_init_module(void)
                i40e_driver_string, i40e_driver_version_str);
        pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
 
+       /* we will see if single thread per module is enough for now,
+        * it can't be any worse than using the system workqueue which
+        * was already single threaded
+        */
+       i40e_wq = create_singlethread_workqueue(i40e_driver_name);
+       if (!i40e_wq) {
+               pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
+               return -ENOMEM;
+       }
+
        i40e_dbg_init();
        return pci_register_driver(&i40e_driver);
 }
@@ -11432,6 +11497,7 @@ module_init(i40e_init_module);
 static void __exit i40e_exit_module(void)
 {
        pci_unregister_driver(&i40e_driver);
+       destroy_workqueue(i40e_wq);
        i40e_dbg_exit();
 }
 module_exit(i40e_exit_module);
index 720516b0e8eec59debf3b41195f9a2617eef169b..47bd8b3145a79a9bf2e299fc303100be4bf8289c 100644 (file)
@@ -2313,8 +2313,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
        struct iphdr *this_ip_hdr;
        u32 network_hdr_len;
        u8 l4_hdr = 0;
-       struct udphdr *oudph;
-       struct iphdr *oiph;
+       struct udphdr *oudph = NULL;
+       struct iphdr *oiph = NULL;
        u32 l4_tunnel = 0;
 
        if (skb->encapsulation) {
index 63e62f9aec6ef45e73e08970d5fca6152ac102c3..659d78270fdbaffe5e99f766d673f01cef98ab6a 100644 (file)
@@ -1213,9 +1213,21 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
        }
 
+       if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
+               if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                       vfres->vf_offload_flags |=
+                               I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
+       }
+
        if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
                vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
 
+       if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
+               if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+                       vfres->vf_offload_flags |=
+                                       I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+       }
+
        vfres->num_vsis = num_vsis;
        vfres->num_queue_pairs = vf->num_queue_pairs;
        vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
index f5b2b369dc7ce883820faae432e1f10947e29d14..578b1780fb08deaeaff6bfdb0e6fa50e5a93311a 100644 (file)
@@ -220,6 +220,7 @@ enum i40e_admin_queue_opc {
        i40e_aqc_opc_get_phy_wol_caps           = 0x0621,
        i40e_aqc_opc_set_phy_debug              = 0x0622,
        i40e_aqc_opc_upload_ext_phy_fm          = 0x0625,
+       i40e_aqc_opc_run_phy_activity           = 0x0626,
 
        /* NVM commands */
        i40e_aqc_opc_nvm_read                   = 0x0701,
@@ -399,6 +400,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_OS2BMC_CAP      0x0004
 #define I40E_AQ_CAP_ID_FUNCTIONS_VALID 0x0005
 #define I40E_AQ_CAP_ID_ALTERNATE_RAM   0x0006
+#define I40E_AQ_CAP_ID_WOL_AND_PROXY   0x0008
 #define I40E_AQ_CAP_ID_SRIOV           0x0012
 #define I40E_AQ_CAP_ID_VF              0x0013
 #define I40E_AQ_CAP_ID_VMDQ            0x0014
@@ -419,6 +421,7 @@ struct i40e_aqc_list_capabilities_element_resp {
 #define I40E_AQ_CAP_ID_LED             0x0061
 #define I40E_AQ_CAP_ID_SDP             0x0062
 #define I40E_AQ_CAP_ID_MDIO            0x0063
+#define I40E_AQ_CAP_ID_WSR_PROT                0x0064
 #define I40E_AQ_CAP_ID_FLEX10          0x00F1
 #define I40E_AQ_CAP_ID_CEM             0x00F2
 
@@ -1254,9 +1257,9 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
 
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT              9
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK               0x1E00
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN              0
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN              0
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC         1
-#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE                        2
+#define I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE             2
 #define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP                 3
 
        __le32  tenant_id;
@@ -1752,7 +1755,12 @@ struct i40e_aqc_get_link_status {
        u8      config;
 #define I40E_AQ_CONFIG_CRC_ENA         0x04
 #define I40E_AQ_CONFIG_PACING_MASK     0x78
-       u8      reserved[5];
+       u8      external_power_ability;
+#define I40E_AQ_LINK_POWER_CLASS_1     0x00
+#define I40E_AQ_LINK_POWER_CLASS_2     0x01
+#define I40E_AQ_LINK_POWER_CLASS_3     0x02
+#define I40E_AQ_LINK_POWER_CLASS_4     0x03
+       u8      reserved[4];
 };
 
 I40E_CHECK_CMD_LENGTH(i40e_aqc_get_link_status);
@@ -1820,6 +1828,18 @@ enum i40e_aq_phy_reg_type {
        I40E_AQC_PHY_REG_EXERNAL_MODULE = 0x3
 };
 
+/* Run PHY Activity (0x0626) */
+struct i40e_aqc_run_phy_activity {
+       __le16  activity_id;
+       u8      flags;
+       u8      reserved1;
+       __le32  control;
+       __le32  data;
+       u8      reserved2[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_run_phy_activity);
+
 /* NVM Read command (indirect 0x0701)
  * NVM Erase commands (direct 0x0702)
  * NVM Update commands (indirect 0x0703)
index 7a00657dacda63634276477b16a0c61e6957aaeb..7d663fb6192756e7168c22e20ba101fea02cf478 100644 (file)
@@ -252,6 +252,22 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
        tx_ring->q_vector->tx.total_bytes += total_bytes;
        tx_ring->q_vector->tx.total_packets += total_packets;
 
+       if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
+               unsigned int j = 0;
+               /* check to see if there are < 4 descriptors
+                * waiting to be written back, then kick the hardware to force
+                * them to be written back in case we stay in NAPI.
+                * In this mode on X722 we do not enable Interrupt.
+                */
+               j = i40evf_get_tx_pending(tx_ring);
+
+               if (budget &&
+                   ((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
+                   !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+                   (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+                       tx_ring->arm_wb = true;
+       }
+
        netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
                                                      tx_ring->queue_index),
                                  total_packets, total_bytes);
index be1b72b938882d73d5f522f8aab58b0fda2275bb..9e15f68d9dddec10bab585b10cce367cbdef551d 100644 (file)
@@ -173,6 +173,7 @@ enum i40evf_state_t {
        __I40EVF_RESETTING,             /* in reset */
        /* Below here, watchdog is running */
        __I40EVF_DOWN,                  /* ready, can be opened */
+       __I40EVF_DOWN_PENDING,          /* descending, waiting for watchdog */
        __I40EVF_TESTING,               /* in ethtool self-test */
        __I40EVF_RUNNING,               /* opened, working */
 };
index a4c9feb589e7022619bc705d54ee6b282e9bbdc5..bd1c2728bc5c603887bc6d0a8090706c19ea3143 100644 (file)
@@ -459,6 +459,7 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
                                   struct ethtool_rxnfc *nfc)
 {
        struct i40e_hw *hw = &adapter->hw;
+       u32 flags = adapter->vf_res->vf_offload_flags;
 
        u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
                   ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
@@ -477,54 +478,50 @@ static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
 
        switch (nfc->flow_type) {
        case TCP_V4_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                               hena |=
+                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+
                        hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
-                       break;
-               default:
+               } else {
                        return -EINVAL;
                }
                break;
        case TCP_V6_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                               hena |=
+                          BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+
                        hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
-                       break;
-               default:
+               } else {
                        return -EINVAL;
                }
                break;
        case UDP_V4_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                               hena |=
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+
                        hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
-                       break;
-               default:
+               } else {
                        return -EINVAL;
                }
                break;
        case UDP_V6_FLOW:
-               switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
-               case 0:
-                       hena &= ~(BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
-                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-                       break;
-               case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+               if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+                       if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+                               hena |=
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
+                           BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+
                        hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
                                 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
-                       break;
-               default:
+               } else {
                        return -EINVAL;
                }
                break;
index 94da913b151da615f751d287b33a65de92d95829..66964eb6b7de3d0222b4b19e00c7be5c7ea0871b 100644 (file)
@@ -69,6 +69,8 @@ MODULE_DESCRIPTION("Intel(R) XL710 X710 Virtual Function Network Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+static struct workqueue_struct *i40evf_wq;
+
 /**
  * i40evf_allocate_dma_mem_d - OS specific memory alloc for shared code
  * @hw:   pointer to the HW structure
@@ -182,7 +184,7 @@ static void i40evf_tx_timeout(struct net_device *netdev)
        if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING |
                                I40EVF_FLAG_RESET_NEEDED))) {
                adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
-               schedule_work(&adapter->reset_task);
+               queue_work(i40evf_wq, &adapter->reset_task);
        }
 }
 
@@ -1032,7 +1034,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        struct i40evf_mac_filter *f;
 
-       if (adapter->state == __I40EVF_DOWN)
+       if (adapter->state <= __I40EVF_DOWN_PENDING)
                return;
 
        while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
@@ -1122,7 +1124,9 @@ static void i40evf_free_queues(struct i40evf_adapter *adapter)
        if (!adapter->vsi_res)
                return;
        kfree(adapter->tx_rings);
+       adapter->tx_rings = NULL;
        kfree(adapter->rx_rings);
+       adapter->rx_rings = NULL;
 }
 
 /**
@@ -1454,7 +1458,11 @@ static int i40evf_init_rss(struct i40evf_adapter *adapter)
        int ret;
 
        /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
-       hena = I40E_DEFAULT_RSS_HENA;
+       if (adapter->vf_res->vf_offload_flags &
+                                       I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+               hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+       else
+               hena = I40E_DEFAULT_RSS_HENA;
        wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
        wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
 
@@ -2142,7 +2150,8 @@ static int i40evf_open(struct net_device *netdev)
                dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
                return -EIO;
        }
-       if (adapter->state != __I40EVF_DOWN || adapter->aq_required)
+
+       if (adapter->state != __I40EVF_DOWN)
                return -EBUSY;
 
        /* allocate transmit descriptors */
@@ -2197,14 +2206,14 @@ static int i40evf_close(struct net_device *netdev)
 {
        struct i40evf_adapter *adapter = netdev_priv(netdev);
 
-       if (adapter->state <= __I40EVF_DOWN)
+       if (adapter->state <= __I40EVF_DOWN_PENDING)
                return 0;
 
 
        set_bit(__I40E_DOWN, &adapter->vsi.state);
 
        i40evf_down(adapter);
-       adapter->state = __I40EVF_DOWN;
+       adapter->state = __I40EVF_DOWN_PENDING;
        i40evf_free_traffic_irqs(adapter);
 
        return 0;
@@ -2504,8 +2513,11 @@ static void i40evf_init_task(struct work_struct *work)
        if (adapter->vf_res->vf_offload_flags &
                    I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
                adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
-       if (!RSS_AQ(adapter))
-               i40evf_init_rss(adapter);
+
+       if (adapter->vf_res->vf_offload_flags &
+           I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+               adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
+
        err = i40evf_request_misc_irq(adapter);
        if (err)
                goto err_sw_init;
@@ -2885,6 +2897,11 @@ static int __init i40evf_init_module(void)
 
        pr_info("%s\n", i40evf_copyright);
 
+       i40evf_wq = create_singlethread_workqueue(i40evf_driver_name);
+       if (!i40evf_wq) {
+               pr_err("%s: Failed to create workqueue\n", i40evf_driver_name);
+               return -ENOMEM;
+       }
        ret = pci_register_driver(&i40evf_driver);
        return ret;
 }
@@ -2900,6 +2917,7 @@ module_init(i40evf_init_module);
 static void __exit i40evf_exit_module(void)
 {
        pci_unregister_driver(&i40evf_driver);
+       destroy_workqueue(i40evf_wq);
 }
 
 module_exit(i40evf_exit_module);
index c1c5262837572fdfb00d62f3e322c2f6ac7d14f1..d3739cc5b608488d6b1e91405c7e267d85327db3 100644 (file)
@@ -804,6 +804,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
        case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
                i40evf_free_all_tx_resources(adapter);
                i40evf_free_all_rx_resources(adapter);
+               if (adapter->state == __I40EVF_DOWN_PENDING)
+                       adapter->state = __I40EVF_DOWN;
                break;
        case I40E_VIRTCHNL_OP_VERSION:
        case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
index a0c03834a2f7b3d7494b864db6e96ee590b36c81..55831188bc32431e935550a7da5671fa1d24d89a 100644 (file)
@@ -762,10 +762,10 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
 
        if (length <= 8 && (uintptr_t)data & 0x7) {
                /* Copy unaligned small data fragment to TSO header data area */
-               memcpy(txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE,
+               memcpy(txq->tso_hdrs + tx_index * TSO_HEADER_SIZE,
                       data, length);
                desc->buf_ptr = txq->tso_hdrs_dma
-                       + txq->tx_curr_desc * TSO_HEADER_SIZE;
+                       + tx_index * TSO_HEADER_SIZE;
        } else {
                /* Alignment is okay, map buffer and hand off to hardware */
                txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
index fabc8df40392572d607f3e3417e60b54a67b4bf3..662c2ee268c7c7512c7d5f1290e1cb02fa3883dc 100644 (file)
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
 #include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
 #include <linux/inetdevice.h>
-#include <linux/mbus.h>
-#include <linux/module.h>
 #include <linux/interrupt.h>
-#include <linux/if_vlan.h>
-#include <net/ip.h>
-#include <net/ipv6.h>
 #include <linux/io.h>
-#include <net/tso.h>
+#include <linux/kernel.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
 #include <linux/of.h>
+#include <linux/of_address.h>
 #include <linux/of_irq.h>
 #include <linux/of_mdio.h>
 #include <linux/of_net.h>
-#include <linux/of_address.h>
 #include <linux/phy.h>
-#include <linux/clk.h>
-#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tso.h>
 
 /* Registers */
 #define MVNETA_RXQ_CONFIG_REG(q)                (0x1400 + ((q) << 2))
@@ -373,6 +373,8 @@ struct mvneta_port {
 
        /* Core clock */
        struct clk *clk;
+       /* AXI clock */
+       struct clk *clk_bus;
        u8 mcast_count[256];
        u16 tx_ring_size;
        u16 rx_ring_size;
@@ -3242,26 +3244,25 @@ static void mvneta_ethtool_update_stats(struct mvneta_port *pp)
        const struct mvneta_statistic *s;
        void __iomem *base = pp->base;
        u32 high, low, val;
+       u64 val64;
        int i;
 
        for (i = 0, s = mvneta_statistics;
             s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics);
             s++, i++) {
-               val = 0;
-
                switch (s->type) {
                case T_REG_32:
                        val = readl_relaxed(base + s->offset);
+                       pp->ethtool_stats[i] += val;
                        break;
                case T_REG_64:
                        /* Docs say to read low 32-bit then high */
                        low = readl_relaxed(base + s->offset);
                        high = readl_relaxed(base + s->offset + 4);
-                       val = (u64)high << 32 | low;
+                       val64 = (u64)high << 32 | low;
+                       pp->ethtool_stats[i] += val64;
                        break;
                }
-
-               pp->ethtool_stats[i] += val;
        }
 }
 
@@ -3605,7 +3606,9 @@ static int mvneta_probe(struct platform_device *pdev)
 
        pp->indir[0] = rxq_def;
 
-       pp->clk = devm_clk_get(&pdev->dev, NULL);
+       pp->clk = devm_clk_get(&pdev->dev, "core");
+       if (IS_ERR(pp->clk))
+               pp->clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(pp->clk)) {
                err = PTR_ERR(pp->clk);
                goto err_put_phy_node;
@@ -3613,6 +3616,10 @@ static int mvneta_probe(struct platform_device *pdev)
 
        clk_prepare_enable(pp->clk);
 
+       pp->clk_bus = devm_clk_get(&pdev->dev, "bus");
+       if (!IS_ERR(pp->clk_bus))
+               clk_prepare_enable(pp->clk_bus);
+
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        pp->base = devm_ioremap_resource(&pdev->dev, res);
        if (IS_ERR(pp->base)) {
@@ -3724,6 +3731,7 @@ err_free_stats:
 err_free_ports:
        free_percpu(pp->ports);
 err_clk:
+       clk_disable_unprepare(pp->clk_bus);
        clk_disable_unprepare(pp->clk);
 err_put_phy_node:
        of_node_put(phy_node);
@@ -3741,6 +3749,7 @@ static int mvneta_remove(struct platform_device *pdev)
        struct mvneta_port *pp = netdev_priv(dev);
 
        unregister_netdev(dev);
+       clk_disable_unprepare(pp->clk_bus);
        clk_disable_unprepare(pp->clk);
        free_percpu(pp->ports);
        free_percpu(pp->stats);
index 0c5237264e3e29519933180f307a531c8b34c054..bb77e2207804d9c3431b091f8d219297c8f92300 100644 (file)
@@ -1044,6 +1044,92 @@ static inline void mlxsw_reg_sftr_pack(char *payload,
        mlxsw_reg_sftr_port_mask_set(payload, port, 1);
 }
 
+/* SFDF - Switch Filtering DB Flush
+ * --------------------------------
+ * The switch filtering DB flush register is used to flush the FDB.
+ * Note that FDB notifications are flushed as well.
+ */
+#define MLXSW_REG_SFDF_ID 0x2013
+#define MLXSW_REG_SFDF_LEN 0x14
+
+static const struct mlxsw_reg_info mlxsw_reg_sfdf = {
+       .id = MLXSW_REG_SFDF_ID,
+       .len = MLXSW_REG_SFDF_LEN,
+};
+
+/* reg_sfdf_swid
+ * Switch partition ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sfdf, swid, 0x00, 24, 8);
+
+enum mlxsw_reg_sfdf_flush_type {
+       MLXSW_REG_SFDF_FLUSH_PER_SWID,
+       MLXSW_REG_SFDF_FLUSH_PER_FID,
+       MLXSW_REG_SFDF_FLUSH_PER_PORT,
+       MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID,
+       MLXSW_REG_SFDF_FLUSH_PER_LAG,
+       MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID,
+};
+
+/* reg_sfdf_flush_type
+ * Flush type.
+ * 0 - All SWID dynamic entries are flushed.
+ * 1 - All FID dynamic entries are flushed.
+ * 2 - All dynamic entries pointing to port are flushed.
+ * 3 - All FID dynamic entries pointing to port are flushed.
+ * 4 - All dynamic entries pointing to LAG are flushed.
+ * 5 - All FID dynamic entries pointing to LAG are flushed.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_type, 0x04, 28, 4);
+
+/* reg_sfdf_flush_static
+ * Static.
+ * 0 - Flush only dynamic entries.
+ * 1 - Flush both dynamic and static entries.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, flush_static, 0x04, 24, 1);
+
+static inline void mlxsw_reg_sfdf_pack(char *payload,
+                                      enum mlxsw_reg_sfdf_flush_type type)
+{
+       MLXSW_REG_ZERO(sfdf, payload);
+       mlxsw_reg_sfdf_flush_type_set(payload, type);
+       mlxsw_reg_sfdf_flush_static_set(payload, true);
+}
+
+/* reg_sfdf_fid
+ * FID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, fid, 0x0C, 0, 16);
+
+/* reg_sfdf_system_port
+ * Port to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, system_port, 0x0C, 0, 16);
+
+/* reg_sfdf_port_fid_system_port
+ * Port to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, port_fid_system_port, 0x08, 0, 16);
+
+/* reg_sfdf_lag_id
+ * LAG ID to flush.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_id, 0x0C, 0, 10);
+
+/* reg_sfdf_lag_fid_lag_id
+ * LAG ID to flush, pointed to by FID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sfdf, lag_fid_lag_id, 0x08, 0, 10);
+
 /* SLDR - Switch LAG Descriptor Register
  * -----------------------------------------
  * The switch LAG descriptor register is populated by LAG descriptors.
@@ -1701,20 +1787,20 @@ MLXSW_ITEM32(reg, pmlp, width, 0x00, 0, 8);
  * Module number.
  * Access: RW
  */
-MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, module, 0x04, 0, 8, 0x04, 0x00, false);
 
 /* reg_pmlp_tx_lane
  * Tx Lane. When rxtx field is cleared, this field is used for Rx as well.
  * Access: RW
  */
-MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 16, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, tx_lane, 0x04, 16, 2, 0x04, 0x00, false);
 
 /* reg_pmlp_rx_lane
  * Rx Lane. When rxtx field is cleared, this field is ignored and Rx lane is
  * equal to Tx lane.
  * Access: RW
  */
-MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 24, false);
+MLXSW_ITEM32_INDEXED(reg, pmlp, rx_lane, 0x04, 24, 2, 0x04, 0x00, false);
 
 static inline void mlxsw_reg_pmlp_pack(char *payload, u8 local_port)
 {
@@ -3121,6 +3207,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
                return "SFGC";
        case MLXSW_REG_SFTR_ID:
                return "SFTR";
+       case MLXSW_REG_SFDF_ID:
+               return "SFDF";
        case MLXSW_REG_SLDR_ID:
                return "SLDR";
        case MLXSW_REG_SLCR_ID:
index ce6845d534a83f7acea04c0d0c4401103192f0ab..217856bdd400474d400d1e4e3dd275a096f50db5 100644 (file)
@@ -1979,6 +1979,115 @@ static struct mlxsw_driver mlxsw_sp_driver = {
        .profile                = &mlxsw_sp_config_profile,
 };
 
+static int
+mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+       mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
+       mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+                                   u16 fid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+       mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID);
+       mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
+       mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
+                                               mlxsw_sp_port->local_port);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+       mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
+       mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+                                     u16 fid)
+{
+       struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       char sfdf_pl[MLXSW_REG_SFDF_LEN];
+
+       mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID);
+       mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
+       mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+
+       return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+}
+
+static int
+__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       int err, last_err = 0;
+       u16 vid;
+
+       for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
+               err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
+               if (err)
+                       last_err = err;
+       }
+
+       return last_err;
+}
+
+static int
+__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       int err, last_err = 0;
+       u16 vid;
+
+       for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
+               err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
+               if (err)
+                       last_err = err;
+       }
+
+       return last_err;
+}
+
+static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       if (!list_empty(&mlxsw_sp_port->vports_list))
+               if (mlxsw_sp_port->lagged)
+                       return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
+               else
+                       return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
+       else
+               if (mlxsw_sp_port->lagged)
+                       return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
+               else
+                       return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
+}
+
+static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+       u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
+       u16 fid = mlxsw_sp_vfid_to_fid(vfid);
+
+       if (mlxsw_sp_vport->lagged)
+               return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
+                                                            fid);
+       else
+               return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
+}
+
 static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
 {
        return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
@@ -2006,10 +2115,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
        return 0;
 }
 
-static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+                                     bool flush_fdb)
 {
        struct net_device *dev = mlxsw_sp_port->dev;
 
+       if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
+               netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
+
        mlxsw_sp_port->learning = 0;
        mlxsw_sp_port->learning_sync = 0;
        mlxsw_sp_port->uc_flood = 0;
@@ -2200,10 +2313,15 @@ err_col_port_enable:
        return err;
 }
 
+static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
+                                      struct net_device *br_dev,
+                                      bool flush_fdb);
+
 static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
                                   struct net_device *lag_dev)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+       struct mlxsw_sp_port *mlxsw_sp_vport;
        struct mlxsw_sp_upper *lag;
        u16 lag_id = mlxsw_sp_port->lag_id;
        int err;
@@ -2220,7 +2338,32 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
        if (err)
                return err;
 
+       /* In case we leave a LAG device that has bridges built on top,
+        * then their teardown sequence is never issued and we need to
+        * invoke the necessary cleanup routines ourselves.
+        */
+       list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
+                           vport.list) {
+               struct net_device *br_dev;
+
+               if (!mlxsw_sp_vport->bridged)
+                       continue;
+
+               br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
+               mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
+       }
+
+       if (mlxsw_sp_port->bridged) {
+               mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
+               mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
+
+               if (lag->ref_count == 1)
+                       mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+       }
+
        if (lag->ref_count == 1) {
+               if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
+                       netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
                err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
                if (err)
                        return err;
@@ -2272,9 +2415,6 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
        return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
 }
 
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
-                                      struct net_device *br_dev);
-
 static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
                                   struct net_device *vlan_dev)
 {
@@ -2312,7 +2452,7 @@ static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
                struct net_device *br_dev;
 
                br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
-               mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev);
+               mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
        }
 
        mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
@@ -2374,7 +2514,8 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
                                }
                                mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
                        } else {
-                               err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
+                               err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
+                                                                true);
                                mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
                                if (err) {
                                        netdev_err(dev, "Failed to leave bridge\n");
@@ -2541,7 +2682,8 @@ static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
 }
 
 static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
-                                      struct net_device *br_dev)
+                                      struct net_device *br_dev,
+                                      bool flush_fdb)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
        u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
@@ -2604,6 +2746,9 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
                goto err_vport_flood_set;
        }
 
+       if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
+               netdev_err(dev, "Failed to flush FDB\n");
+
        /* Switch between the vFIDs and destroy the old one if needed. */
        new_vfid->nr_vports++;
        mlxsw_sp_vport->vport.vfid = new_vfid;
@@ -2777,7 +2922,7 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
                        if (!mlxsw_sp_vport)
                                return NOTIFY_DONE;
                        err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
-                                                         upper_dev);
+                                                         upper_dev, true);
                        if (err) {
                                netdev_err(dev, "Failed to leave bridge\n");
                                return NOTIFY_BAD;
index a23dc610d2593aedce974eddf94789d3aaf181a6..7f42eb1c320e1c02ff12892c94261800e29eed64 100644 (file)
@@ -120,7 +120,6 @@ struct mlxsw_sp {
        } fdb_notify;
 #define MLXSW_SP_DEFAULT_AGEING_TIME 300
        u32 ageing_time;
-       struct mutex fdb_lock;  /* Make sure FDB sessions are atomic. */
        struct mlxsw_sp_upper master_bridge;
        struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
 };
@@ -254,5 +253,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
                           __be16 __always_unused proto, u16 vid);
 int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
                             bool set, bool only_uc);
+void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
 
 #endif
index 45479ef5bcf4f44848f9989f83f46ce66fe4c11b..e492ca2cdecd9953d5f0ec4124f7077e839803dc 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/if_bridge.h>
 #include <linux/workqueue.h>
 #include <linux/jiffies.h>
+#include <linux/rtnetlink.h>
 #include <net/switchdev.h>
 
 #include "spectrum.h"
@@ -124,14 +125,14 @@ static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
        int err;
 
        switch (state) {
-       case BR_STATE_DISABLED: /* fall-through */
        case BR_STATE_FORWARDING:
                spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
                break;
-       case BR_STATE_LISTENING: /* fall-through */
        case BR_STATE_LEARNING:
                spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
                break;
+       case BR_STATE_LISTENING: /* fall-through */
+       case BR_STATE_DISABLED: /* fall-through */
        case BR_STATE_BLOCKING:
                spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
                break;
@@ -936,6 +937,14 @@ static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
                                         vlan->vid_begin, vlan->vid_end, false);
 }
 
+void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+       u16 vid;
+
+       for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
+               __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
+}
+
 static int
 mlxsw_sp_port_fdb_static_del(struct mlxsw_sp_port *mlxsw_sp_port,
                             const struct switchdev_obj_port_fdb *fdb)
@@ -1040,10 +1049,12 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
 
 static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
                                  struct switchdev_obj_port_fdb *fdb,
-                                 switchdev_obj_dump_cb_t *cb)
+                                 switchdev_obj_dump_cb_t *cb,
+                                 struct net_device *orig_dev)
 {
        struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
-       u16 vport_vid = 0, vport_fid = 0;
+       struct mlxsw_sp_port *tmp;
+       u16 vport_fid = 0;
        char *sfd_pl;
        char mac[ETH_ALEN];
        u16 fid;
@@ -1058,13 +1069,11 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
        if (!sfd_pl)
                return -ENOMEM;
 
-       mutex_lock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
        if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
                u16 tmp;
 
                tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
                vport_fid = mlxsw_sp_vfid_to_fid(tmp);
-               vport_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
        }
 
        mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
@@ -1088,12 +1097,13 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
                                mlxsw_reg_sfd_uc_unpack(sfd_pl, i, mac, &fid,
                                                        &local_port);
                                if (local_port == mlxsw_sp_port->local_port) {
-                                       if (vport_fid && vport_fid != fid)
-                                               continue;
-                                       else if (vport_fid)
-                                               fdb->vid = vport_vid;
-                                       else
+                                       if (vport_fid && vport_fid == fid)
+                                               fdb->vid = 0;
+                                       else if (!vport_fid &&
+                                                !mlxsw_sp_fid_is_vfid(fid))
                                                fdb->vid = fid;
+                                       else
+                                               continue;
                                        ether_addr_copy(fdb->addr, mac);
                                        fdb->ndm_state = NUD_REACHABLE;
                                        err = cb(&fdb->obj);
@@ -1104,14 +1114,22 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
                        case MLXSW_REG_SFD_REC_TYPE_UNICAST_LAG:
                                mlxsw_reg_sfd_uc_lag_unpack(sfd_pl, i,
                                                            mac, &fid, &lag_id);
-                               if (mlxsw_sp_port ==
-                                   mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id)) {
-                                       if (vport_fid && vport_fid != fid)
+                               tmp = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
+                               if (tmp && tmp->local_port ==
+                                   mlxsw_sp_port->local_port) {
+                                       /* LAG records can only point to LAG
+                                        * devices or VLAN devices on top.
+                                        */
+                                       if (!netif_is_lag_master(orig_dev) &&
+                                           !is_vlan_dev(orig_dev))
                                                continue;
-                                       else if (vport_fid)
-                                               fdb->vid = vport_vid;
-                                       else
+                                       if (vport_fid && vport_fid == fid)
+                                               fdb->vid = 0;
+                                       else if (!vport_fid &&
+                                                !mlxsw_sp_fid_is_vfid(fid))
                                                fdb->vid = fid;
+                                       else
+                                               continue;
                                        ether_addr_copy(fdb->addr, mac);
                                        fdb->ndm_state = NUD_REACHABLE;
                                        err = cb(&fdb->obj);
@@ -1124,7 +1142,6 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
        } while (num_rec == MLXSW_REG_SFD_REC_MAX_COUNT);
 
 out:
-       mutex_unlock(&mlxsw_sp_port->mlxsw_sp->fdb_lock);
        kfree(sfd_pl);
        return stored_err ? stored_err : err;
 }
@@ -1176,7 +1193,8 @@ static int mlxsw_sp_port_obj_dump(struct net_device *dev,
                break;
        case SWITCHDEV_OBJ_ID_PORT_FDB:
                err = mlxsw_sp_port_fdb_dump(mlxsw_sp_port,
-                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb);
+                                            SWITCHDEV_OBJ_PORT_FDB(obj), cb,
+                                            obj->orig_dev);
                break;
        default:
                err = -EOPNOTSUPP;
@@ -1194,14 +1212,14 @@ static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = {
        .switchdev_port_obj_dump        = mlxsw_sp_port_obj_dump,
 };
 
-static void mlxsw_sp_fdb_call_notifiers(bool learning, bool learning_sync,
-                                       bool adding, char *mac, u16 vid,
+static void mlxsw_sp_fdb_call_notifiers(bool learning_sync, bool adding,
+                                       char *mac, u16 vid,
                                        struct net_device *dev)
 {
        struct switchdev_notifier_fdb_info info;
        unsigned long notifier_type;
 
-       if (learning && learning_sync) {
+       if (learning_sync) {
                info.addr = mac;
                info.vid = vid;
                notifier_type = adding ? SWITCHDEV_FDB_ADD : SWITCHDEV_FDB_DEL;
@@ -1237,7 +1255,7 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
                        netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
                        goto just_remove;
                }
-               vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+               vid = 0;
                /* Override the physical port with the vPort. */
                mlxsw_sp_port = mlxsw_sp_vport;
        } else {
@@ -1257,8 +1275,7 @@ do_fdb_op:
 
        if (!do_notification)
                return;
-       mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
-                                   mlxsw_sp_port->learning_sync,
+       mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync,
                                    adding, mac, vid, mlxsw_sp_port->dev);
        return;
 
@@ -1273,6 +1290,7 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
                                                bool adding)
 {
        struct mlxsw_sp_port *mlxsw_sp_port;
+       struct net_device *dev;
        char mac[ETH_ALEN];
        u16 lag_vid = 0;
        u16 lag_id;
@@ -1298,11 +1316,13 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
                        goto just_remove;
                }
 
-               vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-               lag_vid = vid;
+               lag_vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+               dev = mlxsw_sp_vport->dev;
+               vid = 0;
                /* Override the physical port with the vPort. */
                mlxsw_sp_port = mlxsw_sp_vport;
        } else {
+               dev = mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev;
                vid = fid;
        }
 
@@ -1319,10 +1339,8 @@ do_fdb_op:
 
        if (!do_notification)
                return;
-       mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning,
-                                   mlxsw_sp_port->learning_sync,
-                                   adding, mac, vid,
-                                   mlxsw_sp_lag_get(mlxsw_sp, lag_id)->dev);
+       mlxsw_sp_fdb_call_notifiers(mlxsw_sp_port->learning_sync, adding, mac,
+                                   vid, dev);
        return;
 
 just_remove:
@@ -1374,7 +1392,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
 
        mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
 
-       mutex_lock(&mlxsw_sp->fdb_lock);
+       rtnl_lock();
        do {
                mlxsw_reg_sfn_pack(sfn_pl);
                err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
@@ -1387,7 +1405,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
                        mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
 
        } while (num_rec);
-       mutex_unlock(&mlxsw_sp->fdb_lock);
+       rtnl_unlock();
 
        kfree(sfn_pl);
        mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
@@ -1402,7 +1420,6 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
                dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
                return err;
        }
-       mutex_init(&mlxsw_sp->fdb_lock);
        INIT_DELAYED_WORK(&mlxsw_sp->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
        mlxsw_sp->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
        mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
index a10c928bbd6b9bf5d192dfc6453395312062fe3f..00cfd95ca59d53fe040ef5b002e7d30604d35e96 100644 (file)
 
 #include "moxart_ether.h"
 
+static inline void moxart_desc_write(u32 data, u32 *desc)
+{
+       *desc = cpu_to_le32(data);
+}
+
+static inline u32 moxart_desc_read(u32 *desc)
+{
+       return le32_to_cpu(*desc);
+}
+
 static inline void moxart_emac_write(struct net_device *ndev,
                                     unsigned int reg, unsigned long value)
 {
@@ -112,7 +122,7 @@ static void moxart_mac_enable(struct net_device *ndev)
 static void moxart_mac_setup_desc_ring(struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
-       void __iomem *desc;
+       void *desc;
        int i;
 
        for (i = 0; i < TX_DESC_NUM; i++) {
@@ -121,7 +131,7 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
 
                priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
        }
-       writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
+       moxart_desc_write(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
 
        priv->tx_head = 0;
        priv->tx_tail = 0;
@@ -129,8 +139,8 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
        for (i = 0; i < RX_DESC_NUM; i++) {
                desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
                memset(desc, 0, RX_REG_DESC_SIZE);
-               writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
-               writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
+               moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+               moxart_desc_write(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
                       desc + RX_REG_OFFSET_DESC1);
 
                priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
@@ -141,12 +151,12 @@ static void moxart_mac_setup_desc_ring(struct net_device *ndev)
                if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
                        netdev_err(ndev, "DMA mapping error\n");
 
-               writel(priv->rx_mapping[i],
+               moxart_desc_write(priv->rx_mapping[i],
                       desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
-               writel(priv->rx_buf[i],
+               moxart_desc_write((uintptr_t)priv->rx_buf[i],
                       desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
        }
-       writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
+       moxart_desc_write(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
 
        priv->rx_head = 0;
 
@@ -201,14 +211,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                                                      napi);
        struct net_device *ndev = priv->ndev;
        struct sk_buff *skb;
-       void __iomem *desc;
+       void *desc;
        unsigned int desc0, len;
        int rx_head = priv->rx_head;
        int rx = 0;
 
        while (rx < budget) {
                desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
-               desc0 = readl(desc + RX_REG_OFFSET_DESC0);
+               desc0 = moxart_desc_read(desc + RX_REG_OFFSET_DESC0);
+               rmb(); /* ensure desc0 is up to date */
 
                if (desc0 & RX_DESC0_DMA_OWN)
                        break;
@@ -250,7 +261,8 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget)
                        priv->stats.multicast++;
 
 rx_next:
-               writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+               wmb(); /* prevent setting ownership back too early */
+               moxart_desc_write(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
 
                rx_head = RX_NEXT(rx_head);
                priv->rx_head = rx_head;
@@ -310,7 +322,7 @@ static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
 static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct moxart_mac_priv_t *priv = netdev_priv(ndev);
-       void __iomem *desc;
+       void *desc;
        unsigned int len;
        unsigned int tx_head = priv->tx_head;
        u32 txdes1;
@@ -319,11 +331,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
 
        spin_lock_irq(&priv->txlock);
-       if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
+       if (moxart_desc_read(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
                net_dbg_ratelimited("no TX space for packet\n");
                priv->stats.tx_dropped++;
                goto out_unlock;
        }
+       rmb(); /* ensure data is only read that had TX_DESC0_DMA_OWN cleared */
 
        len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
 
@@ -337,9 +350,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        priv->tx_len[tx_head] = len;
        priv->tx_skb[tx_head] = skb;
 
-       writel(priv->tx_mapping[tx_head],
+       moxart_desc_write(priv->tx_mapping[tx_head],
               desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
-       writel(skb->data,
+       moxart_desc_write((uintptr_t)skb->data,
               desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
 
        if (skb->len < ETH_ZLEN) {
@@ -354,8 +367,9 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK);
        if (tx_head == TX_DESC_NUM_MASK)
                txdes1 |= TX_DESC1_END;
-       writel(txdes1, desc + TX_REG_OFFSET_DESC1);
-       writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
+       moxart_desc_write(txdes1, desc + TX_REG_OFFSET_DESC1);
+       wmb(); /* flush descriptor before transferring ownership */
+       moxart_desc_write(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
 
        /* start to send packet */
        writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
index 2be9280d608ce9efe320ea7b20b01b1c350a023e..93a9563ac7c6730eec8240ac187a86b9831c9741 100644 (file)
@@ -300,7 +300,7 @@ struct moxart_mac_priv_t {
 
        dma_addr_t rx_base;
        dma_addr_t rx_mapping[RX_DESC_NUM];
-       void __iomem *rx_desc_base;
+       void *rx_desc_base;
        unsigned char *rx_buf_base;
        unsigned char *rx_buf[RX_DESC_NUM];
        unsigned int rx_head;
@@ -308,7 +308,7 @@ struct moxart_mac_priv_t {
 
        dma_addr_t tx_base;
        dma_addr_t tx_mapping[TX_DESC_NUM];
-       void __iomem *tx_desc_base;
+       void *tx_desc_base;
        unsigned char *tx_buf_base;
        unsigned char *tx_buf[RX_DESC_NUM];
        unsigned int tx_head;
index 50d5604833edc8e0ffde1f7f8974002852f8bb97..e0993eba5df3f8081bff6329a97b4753e876cbe3 100644 (file)
@@ -2223,8 +2223,6 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
-#ifdef CONFIG_PCI_MSI
-
 static irqreturn_t vxge_tx_msix_handle(int irq, void *dev_id)
 {
        struct vxge_fifo *fifo = (struct vxge_fifo *)dev_id;
@@ -2442,16 +2440,13 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
        if (vdev->config.intr_type == MSI_X)
                pci_disable_msix(vdev->pdev);
 }
-#endif
 
 static void vxge_rem_isr(struct vxgedev *vdev)
 {
-#ifdef CONFIG_PCI_MSI
-       if (vdev->config.intr_type == MSI_X) {
+       if (IS_ENABLED(CONFIG_PCI_MSI) &&
+           vdev->config.intr_type == MSI_X) {
                vxge_rem_msix_isr(vdev);
-       } else
-#endif
-       if (vdev->config.intr_type == INTA) {
+       } else if (vdev->config.intr_type == INTA) {
                        synchronize_irq(vdev->pdev->irq);
                        free_irq(vdev->pdev->irq, vdev);
        }
@@ -2460,11 +2455,10 @@ static void vxge_rem_isr(struct vxgedev *vdev)
 static int vxge_add_isr(struct vxgedev *vdev)
 {
        int ret = 0;
-#ifdef CONFIG_PCI_MSI
        int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
        int pci_fun = PCI_FUNC(vdev->pdev->devfn);
 
-       if (vdev->config.intr_type == MSI_X)
+       if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X)
                ret = vxge_enable_msix(vdev);
 
        if (ret) {
@@ -2475,7 +2469,7 @@ static int vxge_add_isr(struct vxgedev *vdev)
                vdev->config.intr_type = INTA;
        }
 
-       if (vdev->config.intr_type == MSI_X) {
+       if (IS_ENABLED(CONFIG_PCI_MSI) && vdev->config.intr_type == MSI_X) {
                for (intr_idx = 0;
                     intr_idx < (vdev->no_of_vpath *
                        VXGE_HW_VPATH_MSIX_ACTIVE); intr_idx++) {
@@ -2576,9 +2570,8 @@ static int vxge_add_isr(struct vxgedev *vdev)
                vdev->vxge_entries[intr_cnt].in_use = 1;
                vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
        }
-INTA_MODE:
-#endif
 
+INTA_MODE:
        if (vdev->config.intr_type == INTA) {
                snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
                        "%s:vxge:INTA", vdev->ndev->name);
@@ -3889,12 +3882,12 @@ static void vxge_device_config_init(struct vxge_hw_device_config *device_config,
        if (max_mac_vpath > VXGE_MAX_MAC_ADDR_COUNT)
                max_mac_vpath = VXGE_MAX_MAC_ADDR_COUNT;
 
-#ifndef CONFIG_PCI_MSI
-       vxge_debug_init(VXGE_ERR,
-               "%s: This Kernel does not support "
-               "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
-       *intr_type = INTA;
-#endif
+       if (!IS_ENABLED(CONFIG_PCI_MSI)) {
+               vxge_debug_init(VXGE_ERR,
+                       "%s: This Kernel does not support "
+                       "MSI-X. Defaulting to INTA", VXGE_DRIVER_NAME);
+               *intr_type = INTA;
+       }
 
        /* Configure whether MSI-X or IRQL. */
        switch (*intr_type) {
index afa445842f3ea1618a0bde1c5a617d685137a344..52d9a94aebb9af0aacfe021454df32dfbfa14675 100644 (file)
@@ -1038,7 +1038,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
 
        error = register_netdev(dev);
        if (error != 0) {
-               dev_err(&pdev->dev, "Regiter EMC w90p910 FAILED\n");
+               dev_err(&pdev->dev, "Register EMC w90p910 FAILED\n");
                error = -ENODEV;
                goto failed_put_rmiiclk;
        }
index 9fbe92ac225b00f93ed33d187f03f3db2f879f2e..b2160d1b9c7175daee2a2564082dfc38c096c4b2 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2014-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  * Based on the SuperH Ethernet driver
  *
@@ -837,6 +837,8 @@ static inline void ravb_write(struct net_device *ndev, u32 data,
        iowrite32(data, priv->addr + reg);
 }
 
+void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
+                u32 set);
 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
 
 irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
index ac43ed914fcf270653a6101a2f6c736b7ac2ef00..c936682aae68df0808eb68c63c25b42b0e3b4dca 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2014-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  * Based on the SuperH Ethernet driver
  *
                 NETIF_MSG_RX_ERR | \
                 NETIF_MSG_TX_ERR)
 
+void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
+                u32 set)
+{
+       ravb_write(ndev, (ravb_read(ndev, reg) & ~clear) | set, reg);
+}
+
 int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value)
 {
        int i;
@@ -59,8 +65,7 @@ static int ravb_config(struct net_device *ndev)
        int error;
 
        /* Set config mode */
-       ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_CONFIG,
-                  CCC);
+       ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
        /* Check if the operating mode is changed to the config mode */
        error = ravb_wait(ndev, CSR, CSR_OPS, CSR_OPS_CONFIG);
        if (error)
@@ -72,13 +77,8 @@ static int ravb_config(struct net_device *ndev)
 static void ravb_set_duplex(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
-       u32 ecmr = ravb_read(ndev, ECMR);
 
-       if (priv->duplex)       /* Full */
-               ecmr |=  ECMR_DM;
-       else                    /* Half */
-               ecmr &= ~ECMR_DM;
-       ravb_write(ndev, ecmr, ECMR);
+       ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0);
 }
 
 static void ravb_set_rate(struct net_device *ndev)
@@ -131,13 +131,8 @@ static void ravb_mdio_ctrl(struct mdiobb_ctrl *ctrl, u32 mask, int set)
 {
        struct ravb_private *priv = container_of(ctrl, struct ravb_private,
                                                 mdiobb);
-       u32 pir = ravb_read(priv->ndev, PIR);
 
-       if (set)
-               pir |=  mask;
-       else
-               pir &= ~mask;
-       ravb_write(priv->ndev, pir, PIR);
+       ravb_modify(priv->ndev, PIR, mask, set ? mask : 0);
 }
 
 /* MDC pin control */
@@ -393,9 +388,9 @@ static int ravb_dmac_init(struct net_device *ndev)
        ravb_ring_format(ndev, RAVB_NC);
 
 #if defined(__LITTLE_ENDIAN)
-       ravb_write(ndev, ravb_read(ndev, CCC) & ~CCC_BOC, CCC);
+       ravb_modify(ndev, CCC, CCC_BOC, 0);
 #else
-       ravb_write(ndev, ravb_read(ndev, CCC) | CCC_BOC, CCC);
+       ravb_modify(ndev, CCC, CCC_BOC, CCC_BOC);
 #endif
 
        /* Set AVB RX */
@@ -418,8 +413,7 @@ static int ravb_dmac_init(struct net_device *ndev)
        ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
 
        /* Setting the control will start the AVB-DMAC process. */
-       ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) | CCC_OPC_OPERATION,
-                  CCC);
+       ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
 
        return 0;
 }
@@ -493,7 +487,7 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
                                break;
                        }
                }
-               ravb_write(ndev, ravb_read(ndev, TCCR) | TCCR_TFR, TCCR);
+               ravb_modify(ndev, TCCR, TCCR_TFR, TCCR_TFR);
        }
 }
 
@@ -613,13 +607,13 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
 static void ravb_rcv_snd_disable(struct net_device *ndev)
 {
        /* Disable TX and RX */
-       ravb_write(ndev, ravb_read(ndev, ECMR) & ~(ECMR_RE | ECMR_TE), ECMR);
+       ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
 }
 
 static void ravb_rcv_snd_enable(struct net_device *ndev)
 {
        /* Enable TX and RX */
-       ravb_write(ndev, ravb_read(ndev, ECMR) | ECMR_RE | ECMR_TE, ECMR);
+       ravb_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
 }
 
 /* function for waiting dma process finished */
@@ -812,8 +806,8 @@ static int ravb_poll(struct napi_struct *napi, int budget)
 
        /* Re-enable RX/TX interrupts */
        spin_lock_irqsave(&priv->lock, flags);
-       ravb_write(ndev, ravb_read(ndev, RIC0) | mask, RIC0);
-       ravb_write(ndev, ravb_read(ndev, TIC)  | mask,  TIC);
+       ravb_modify(ndev, RIC0, mask, mask);
+       ravb_modify(ndev, TIC,  mask, mask);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -852,8 +846,7 @@ static void ravb_adjust_link(struct net_device *ndev)
                        ravb_set_rate(ndev);
                }
                if (!priv->link) {
-                       ravb_write(ndev, ravb_read(ndev, ECMR) & ~ECMR_TXF,
-                                  ECMR);
+                       ravb_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = true;
                        priv->link = phydev->link;
                        if (priv->no_avb_link)
@@ -1393,7 +1386,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        desc--;
        desc->die_dt = DT_FSTART;
 
-       ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
+       ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q);
 
        priv->cur_tx[q] += NUM_TX_DESC;
        if (priv->cur_tx[q] - priv->dirty_tx[q] >
@@ -1468,15 +1461,10 @@ static void ravb_set_rx_mode(struct net_device *ndev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
        unsigned long flags;
-       u32 ecmr;
 
        spin_lock_irqsave(&priv->lock, flags);
-       ecmr = ravb_read(ndev, ECMR);
-       if (ndev->flags & IFF_PROMISC)
-               ecmr |=  ECMR_PRM;
-       else
-               ecmr &= ~ECMR_PRM;
-       ravb_write(ndev, ecmr, ECMR);
+       ravb_modify(ndev, ECMR, ECMR_PRM,
+                   ndev->flags & IFF_PROMISC ? ECMR_PRM : 0);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
 }
@@ -1804,14 +1792,12 @@ static int ravb_probe(struct platform_device *pdev)
 
        /* Set AVB config mode */
        if (chip_id == RCAR_GEN2) {
-               ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) |
-                          CCC_OPC_CONFIG, CCC);
+               ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
                /* Set CSEL value */
-               ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) |
-                          CCC_CSEL_HPB, CCC);
+               ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
        } else {
-               ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_OPC) |
-                          CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
+               ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
+                           CCC_GAC | CCC_CSEL_HPB);
        }
 
        /* Set CSEL value */
@@ -1824,7 +1810,7 @@ static int ravb_probe(struct platform_device *pdev)
                goto out_release;
 
        /* Request GTI loading */
-       ravb_write(ndev, ravb_read(ndev, GCCR) | GCCR_LTI, GCCR);
+       ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
 
        /* Allocate descriptor base address table */
        priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
index 7a8ce920c49e709b067321ae91306153f4f24390..57992ccc46575dc6ba21bc4d7ce86e73576e46b9 100644 (file)
@@ -2,7 +2,7 @@
  *
  * Copyright (C) 2013-2015 Renesas Electronics Corporation
  * Copyright (C) 2015 Renesas Solutions Corp.
- * Copyright (C) 2015 Cogent Embedded, Inc. <source@cogentembedded.com>
+ * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -21,7 +21,7 @@ static int ravb_ptp_tcr_request(struct ravb_private *priv, u32 request)
        if (error)
                return error;
 
-       ravb_write(ndev, ravb_read(ndev, GCCR) | request, GCCR);
+       ravb_modify(ndev, GCCR, request, request);
        return ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
 }
 
@@ -185,7 +185,6 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
                                                 ptp.info);
        struct net_device *ndev = priv->ndev;
        unsigned long flags;
-       u32 gic;
 
        if (req->index)
                return -EINVAL;
@@ -195,12 +194,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
        priv->ptp.extts[req->index] = on;
 
        spin_lock_irqsave(&priv->lock, flags);
-       gic = ravb_read(ndev, GIC);
-       if (on)
-               gic |= GIC_PTCE;
-       else
-               gic &= ~GIC_PTCE;
-       ravb_write(ndev, gic, GIC);
+       ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -216,7 +210,6 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
        struct ravb_ptp_perout *perout;
        unsigned long flags;
        int error = 0;
-       u32 gic;
 
        if (req->index)
                return -EINVAL;
@@ -248,9 +241,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
                error = ravb_ptp_update_compare(priv, (u32)start_ns);
                if (!error) {
                        /* Unmask interrupt */
-                       gic = ravb_read(ndev, GIC);
-                       gic |= GIC_PTME;
-                       ravb_write(ndev, gic, GIC);
+                       ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
                }
        } else  {
                spin_lock_irqsave(&priv->lock, flags);
@@ -259,9 +250,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
                perout->period = 0;
 
                /* Mask interrupt */
-               gic = ravb_read(ndev, GIC);
-               gic &= ~GIC_PTME;
-               ravb_write(ndev, gic, GIC);
+               ravb_modify(ndev, GIC, GIC_PTME, 0);
        }
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
@@ -331,7 +320,6 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
 {
        struct ravb_private *priv = netdev_priv(ndev);
        unsigned long flags;
-       u32 gccr;
 
        priv->ptp.info = ravb_ptp_info;
 
@@ -340,8 +328,7 @@ void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
 
        spin_lock_irqsave(&priv->lock, flags);
        ravb_wait(ndev, GCCR, GCCR_TCR, GCCR_TCR_NOREQ);
-       gccr = ravb_read(ndev, GCCR) & ~GCCR_TCSS;
-       ravb_write(ndev, gccr | GCCR_TCSS_ADJGPTP, GCCR);
+       ravb_modify(ndev, GCCR, GCCR_TCSS, GCCR_TCSS_ADJGPTP);
        mmiowb();
        spin_unlock_irqrestore(&priv->lock, flags);
 
index dfa9e59c9442884dfe6a52a5c6df0c4c70fa4737..0a150b2289146fe6c996b4ad338b9d6b45f56803 100644 (file)
@@ -3,7 +3,7 @@
  *  Copyright (C) 2014  Renesas Electronics Corporation
  *  Copyright (C) 2006-2012 Nobuhiro Iwamatsu
  *  Copyright (C) 2008-2014 Renesas Solutions Corp.
- *  Copyright (C) 2013-2014 Cogent Embedded, Inc.
+ *  Copyright (C) 2013-2016 Cogent Embedded, Inc.
  *  Copyright (C) 2014 Codethink Limited
  *
  *  This program is free software; you can redistribute it and/or modify it
@@ -428,6 +428,13 @@ static u32 sh_eth_read(struct net_device *ndev, int enum_index)
        return ioread32(mdp->addr + offset);
 }
 
+static void sh_eth_modify(struct net_device *ndev, int enum_index, u32 clear,
+                         u32 set)
+{
+       sh_eth_write(ndev, (sh_eth_read(ndev, enum_index) & ~clear) | set,
+                    enum_index);
+}
+
 static bool sh_eth_is_gether(struct sh_eth_private *mdp)
 {
        return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -467,10 +474,7 @@ static void sh_eth_set_duplex(struct net_device *ndev)
 {
        struct sh_eth_private *mdp = netdev_priv(ndev);
 
-       if (mdp->duplex) /* Full */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
-       else            /* Half */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
+       sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
 }
 
 static void sh_eth_chip_reset(struct net_device *ndev)
@@ -583,10 +587,10 @@ static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
+               sh_eth_modify(ndev, ECMR, ECMR_ELB, 0);
                break;
        case 100:/* 100BASE */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
+               sh_eth_modify(ndev, ECMR, ECMR_ELB, ECMR_ELB);
                break;
        default:
                break;
@@ -649,10 +653,10 @@ static void sh_eth_set_rate_sh7724(struct net_device *ndev)
 
        switch (mdp->speed) {
        case 10: /* 10BASE */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
+               sh_eth_modify(ndev, ECMR, ECMR_RTM, 0);
                break;
        case 100:/* 100BASE */
-               sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
+               sh_eth_modify(ndev, ECMR, ECMR_RTM, ECMR_RTM);
                break;
        default:
                break;
@@ -924,8 +928,7 @@ static int sh_eth_reset(struct net_device *ndev)
 
        if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
                sh_eth_write(ndev, EDSR_ENALL, EDSR);
-               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
-                            EDMR);
+               sh_eth_modify(ndev, EDMR, EDMR_SRST_GETHER, EDMR_SRST_GETHER);
 
                ret = sh_eth_check_reset(ndev);
                if (ret)
@@ -949,11 +952,9 @@ static int sh_eth_reset(struct net_device *ndev)
                if (mdp->cd->select_mii)
                        sh_eth_select_mii(ndev);
        } else {
-               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
-                            EDMR);
+               sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, EDMR_SRST_ETHER);
                mdelay(3);
-               sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
-                            EDMR);
+               sh_eth_modify(ndev, EDMR, EDMR_SRST_ETHER, 0);
        }
 
        return ret;
@@ -1285,7 +1286,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
        sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
                     RFLR);
 
-       sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
+       sh_eth_modify(ndev, EESR, 0, 0);
        if (start) {
                mdp->irq_enabled = true;
                sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
@@ -1532,15 +1533,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
 static void sh_eth_rcv_snd_disable(struct net_device *ndev)
 {
        /* disable tx and rx */
-       sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
-               ~(ECMR_RE | ECMR_TE), ECMR);
+       sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, 0);
 }
 
 static void sh_eth_rcv_snd_enable(struct net_device *ndev)
 {
        /* enable tx and rx */
-       sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
-               (ECMR_RE | ECMR_TE), ECMR);
+       sh_eth_modify(ndev, ECMR, ECMR_RE | ECMR_TE, ECMR_RE | ECMR_TE);
 }
 
 /* error control function */
@@ -1569,13 +1568,11 @@ static void sh_eth_error(struct net_device *ndev, u32 intr_status)
                                sh_eth_rcv_snd_disable(ndev);
                        } else {
                                /* Link Up */
-                               sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
-                                                  ~DMAC_M_ECI, EESIPR);
+                               sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0);
                                /* clear int */
-                               sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
-                                            ECSR);
-                               sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
-                                                  DMAC_M_ECI, EESIPR);
+                               sh_eth_modify(ndev, ECSR, 0, 0);
+                               sh_eth_modify(ndev, EESIPR, DMAC_M_ECI,
+                                             DMAC_M_ECI);
                                /* enable tx and rx */
                                sh_eth_rcv_snd_enable(ndev);
                        }
@@ -1765,9 +1762,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
                                mdp->cd->set_rate(ndev);
                }
                if (!mdp->link) {
-                       sh_eth_write(ndev,
-                                    sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
-                                    ECMR);
+                       sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
                        new_state = 1;
                        mdp->link = phydev->link;
                        if (mdp->cd->no_psr || mdp->no_ether_link)
index a4ab71d43e4e9d521e2a58f75aca4a4dff3f9c30..166a7fc87e2f41966ab2b54a641ea1c7bfc46f1d 100644 (file)
@@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
        info.addr = lw->addr;
        info.vid = lw->vid;
 
+       rtnl_lock();
        if (learned && removing)
                call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
                                         lw->rocker_port->dev, &info.info);
        else if (learned && !removing)
                call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
                                         lw->rocker_port->dev, &info.info);
+       rtnl_unlock();
 
        rocker_port_kfree(lw->trans, work);
 }
index dcc80b9d4370eb47b1e001cf7c44fa95ebdc8ce1..31e968561d5ce37f2c1a4b0eb3553866a2f0df1e 100644 (file)
@@ -1,4 +1,4 @@
 obj-$(CONFIG_SXGBE_ETH) += samsung-sxgbe.o
 samsung-sxgbe-objs:= sxgbe_platform.o sxgbe_main.o sxgbe_desc.o \
                sxgbe_dma.o sxgbe_core.o sxgbe_mtl.o  sxgbe_mdio.o \
-               sxgbe_ethtool.o sxgbe_xpcs.o $(samsung-sxgbe-y)
+               sxgbe_ethtool.o $(samsung-sxgbe-y)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.c
deleted file mode 100644 (file)
index 51c3219..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/* 10G controller driver for Samsung SoCs
- *
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/bitops.h>
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include "sxgbe_common.h"
-#include "sxgbe_xpcs.h"
-
-static int sxgbe_xpcs_read(struct net_device *ndev, unsigned int reg)
-{
-       u32 value;
-       struct sxgbe_priv_data *priv = netdev_priv(ndev);
-
-       value = readl(priv->ioaddr + XPCS_OFFSET + reg);
-
-       return value;
-}
-
-static int sxgbe_xpcs_write(struct net_device *ndev, int reg, int data)
-{
-       struct sxgbe_priv_data *priv = netdev_priv(ndev);
-
-       writel(data, priv->ioaddr + XPCS_OFFSET + reg);
-
-       return 0;
-}
-
-int sxgbe_xpcs_init(struct net_device *ndev)
-{
-       u32 value;
-
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       /* 10G XAUI mode */
-       sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
-       sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
-       sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, value | BIT(13));
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
-
-       do {
-               value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
-       } while ((value & XPCS_QSEQ_STATE_MPLLOFF) == XPCS_QSEQ_STATE_STABLE);
-
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
-
-       do {
-               value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
-       } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
-
-       return 0;
-}
-
-int sxgbe_xpcs_init_1G(struct net_device *ndev)
-{
-       int value;
-
-       /* 10GBASE-X PCS (1G) mode */
-       sxgbe_xpcs_write(ndev, SR_PCS_CONTROL2, XPCS_TYPE_SEL_X);
-       sxgbe_xpcs_write(ndev, VR_PCS_MMD_XAUI_MODE_CONTROL, XPCS_XAUI_MODE);
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(13));
-
-       value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
-       sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(6));
-       sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value & ~BIT(13));
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value | BIT(11));
-
-       do {
-               value = sxgbe_xpcs_read(ndev, VR_PCS_MMD_DIGITAL_STATUS);
-       } while ((value & XPCS_QSEQ_STATE_MPLLOFF) != XPCS_QSEQ_STATE_STABLE);
-
-       value = sxgbe_xpcs_read(ndev, SR_PCS_MMD_CONTROL1);
-       sxgbe_xpcs_write(ndev, SR_PCS_MMD_CONTROL1, value & ~BIT(11));
-
-       /* Auto Negotiation cluase 37 enable */
-       value = sxgbe_xpcs_read(ndev, SR_MII_MMD_CONTROL);
-       sxgbe_xpcs_write(ndev, SR_MII_MMD_CONTROL, value | BIT(12));
-
-       return 0;
-}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_xpcs.h
deleted file mode 100644 (file)
index 6b26a50..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-/* 10G controller driver for Samsung SoCs
- *
- * Copyright (C) 2013 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * Author: Byungho An <bh74.an@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __SXGBE_XPCS_H__
-#define __SXGBE_XPCS_H__
-
-/* XPCS Registers */
-#define XPCS_OFFSET                    0x1A060000
-#define SR_PCS_MMD_CONTROL1            0x030000
-#define SR_PCS_CONTROL2                        0x030007
-#define VR_PCS_MMD_XAUI_MODE_CONTROL   0x038004
-#define VR_PCS_MMD_DIGITAL_STATUS      0x038010
-#define SR_MII_MMD_CONTROL             0x1F0000
-#define SR_MII_MMD_AN_ADV              0x1F0004
-#define SR_MII_MMD_AN_LINK_PARTNER_BA  0x1F0005
-#define VR_MII_MMD_AN_CONTROL          0x1F8001
-#define VR_MII_MMD_AN_INT_STATUS       0x1F8002
-
-#define XPCS_QSEQ_STATE_STABLE         0x10
-#define XPCS_QSEQ_STATE_MPLLOFF                0x1c
-#define XPCS_TYPE_SEL_R                        0x00
-#define XPCS_TYPE_SEL_X                        0x01
-#define XPCS_TYPE_SEL_W                        0x02
-#define XPCS_XAUI_MODE                 0x00
-#define XPCS_RXAUI_MODE                        0x01
-
-int sxgbe_xpcs_init(struct net_device *ndev);
-int sxgbe_xpcs_init_1G(struct net_device *ndev);
-
-#endif /* __SXGBE_XPCS_H__ */
index e23a642357e7c01d7010ae070b47632b3a926ba9..2437227712dcd0ee5fd0404113e35527646a8b76 100644 (file)
@@ -51,7 +51,6 @@
 #endif
 
 #ifdef CONFIG_PPC_PMAC
-#include <asm/pci-bridge.h>
 #include <asm/prom.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
index cc106d892e2975c924eafc8e850a4da8188ef227..942a95db20614ae2e5a9441e2514641a32695eaf 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/mutex.h>
 #include <linux/highmem.h>
 #include <linux/if_vlan.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/sunvnet.h>
 
 #if IS_ENABLED(CONFIG_IPV6)
 #include <linux/icmpv6.h>
@@ -389,17 +391,27 @@ static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
        if (vio_version_after_eq(&port->vio, 1, 8)) {
                struct vio_net_dext *dext = vio_net_ext(desc);
 
+               skb_reset_network_header(skb);
+
                if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
                        if (skb->protocol == ETH_P_IP) {
-                               struct iphdr *iph = (struct iphdr *)skb->data;
+                               struct iphdr *iph = ip_hdr(skb);
 
                                iph->check = 0;
                                ip_send_check(iph);
                        }
                }
                if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
-                   skb->ip_summed == CHECKSUM_NONE)
-                       vnet_fullcsum(skb);
+                   skb->ip_summed == CHECKSUM_NONE) {
+                       if (skb->protocol == htons(ETH_P_IP)) {
+                               struct iphdr *iph = ip_hdr(skb);
+                               int ihl = iph->ihl * 4;
+
+                               skb_reset_transport_header(skb);
+                               skb_set_transport_header(skb, ihl);
+                               vnet_fullcsum(skb);
+                       }
+               }
                if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
                        skb->ip_summed = CHECKSUM_PARTIAL;
                        skb->csum_level = 0;
@@ -530,6 +542,8 @@ static int vnet_walk_rx_one(struct vnet_port *port,
        err = vnet_rx_one(port, desc);
        if (err == -ECONNRESET)
                return err;
+       trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
+                         index, desc->hdr.ack);
        desc->hdr.state = VIO_DESC_DONE;
        err = put_rx_desc(port, dr, desc, index);
        if (err < 0)
@@ -577,9 +591,15 @@ static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
                ack_start = ack_end = vio_dring_prev(dr, start);
        if (send_ack) {
                port->napi_resume = false;
+               trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
+                                              port->vio._peer_sid,
+                                              ack_end, *npkts);
                return vnet_send_ack(port, dr, ack_start, ack_end,
                                     VIO_DRING_STOPPED);
        } else  {
+               trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
+                                               port->vio._peer_sid,
+                                               ack_end, *npkts);
                port->napi_resume = true;
                port->napi_stop_idx = ack_end;
                return 1;
@@ -653,6 +673,8 @@ static int vnet_ack(struct vnet_port *port, void *msgbuf)
        /* sync for race conditions with vnet_start_xmit() and tell xmit it
         * is time to send a trigger.
         */
+       trace_vnet_rx_stopped_ack(port->vio._local_sid,
+                                 port->vio._peer_sid, end);
        dr->cons = vio_dring_next(dr, end);
        desc = vio_dring_entry(dr, dr->cons);
        if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
@@ -876,6 +898,9 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
        int retries = 0;
 
        if (port->stop_rx) {
+               trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
+                                                 port->vio._peer_sid,
+                                                 port->stop_rx_idx, -1);
                err = vnet_send_ack(port,
                                    &port->vio.drings[VIO_DRIVER_RX_RING],
                                    port->stop_rx_idx, -1,
@@ -898,6 +923,8 @@ static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
                if (retries++ > VNET_MAX_RETRIES)
                        break;
        } while (err == -EAGAIN);
+       trace_vnet_tx_trigger(port->vio._local_sid,
+                             port->vio._peer_sid, start, err);
 
        return err;
 }
@@ -1404,8 +1431,11 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
         * producer to consumer announcement that work is available to the
         * consumer
         */
-       if (!port->start_cons)
-               goto ldc_start_done; /* previous trigger suffices */
+       if (!port->start_cons) { /* previous trigger suffices */
+               trace_vnet_skip_tx_trigger(port->vio._local_sid,
+                                          port->vio._peer_sid, dr->cons);
+               goto ldc_start_done;
+       }
 
        err = __vnet_tx_trigger(port, dr->cons);
        if (unlikely(err < 0)) {
index 70814b7386b3119e55ac8be1a92cfdb8c0740c5c..fc8bbff2d7e37ec19d807008c1e9b70040551ea2 100644 (file)
@@ -1880,9 +1880,9 @@ static int dwceqos_open(struct net_device *ndev)
        }
        netdev_reset_queue(ndev);
 
+       dwceqos_init_hw(lp);
        napi_enable(&lp->napi);
        phy_start(lp->phy_dev);
-       dwceqos_init_hw(lp);
 
        netif_start_queue(ndev);
        tasklet_enable(&lp->tx_bdreclaim_tasklet);
index 657b65bf5cac64254b4db7c845038d2e40549a8c..18bf3a8fdc505ad2ed8a0fa3b81dfeb7d833b94d 100644 (file)
@@ -82,7 +82,7 @@ struct cpdma_desc {
 
 struct cpdma_desc_pool {
        phys_addr_t             phys;
-       u32                     hw_addr;
+       dma_addr_t              hw_addr;
        void __iomem            *iomap;         /* ioremap map */
        void                    *cpumap;        /* dma_alloc map */
        int                     desc_size, mem_size;
@@ -152,7 +152,7 @@ struct cpdma_chan {
  * abstract out these details
  */
 static struct cpdma_desc_pool *
-cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
+cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
                                int size, int align)
 {
        int bitmap_size;
@@ -176,13 +176,13 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
 
        if (phys) {
                pool->phys  = phys;
-               pool->iomap = ioremap(phys, size);
+               pool->iomap = ioremap(phys, size); /* should be memremap? */
                pool->hw_addr = hw_addr;
        } else {
-               pool->cpumap = dma_alloc_coherent(dev, size, &pool->phys,
+               pool->cpumap = dma_alloc_coherent(dev, size, &pool->hw_addr,
                                                  GFP_KERNEL);
-               pool->iomap = pool->cpumap;
-               pool->hw_addr = pool->phys;
+               pool->iomap = (void __iomem __force *)pool->cpumap;
+               pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
        }
 
        if (pool->iomap)
index 3c54a2cae5dfd09e066205b945ae937a3cfabaee..67610270d171a69d5e1ecf5e0c027fe98d053bba 100644 (file)
@@ -48,7 +48,6 @@
 #include <linux/wait.h>
 #include <linux/workqueue.h>
 #include <linux/bitops.h>
-#include <asm/pci-bridge.h>
 #include <net/checksum.h>
 
 #include "spider_net.h"
index 7f975a2c8990fcff4491b4171cae626490d45807..b0de8ecd7fe8ed8ab030e22da50aef82e283e4b2 100644 (file)
@@ -533,8 +533,8 @@ static int dfx_register(struct device *bdev)
        const char *print_name = dev_name(bdev);
        struct net_device *dev;
        DFX_board_t       *bp;                  /* board pointer */
-       resource_size_t bar_start[3];           /* pointers to ports */
-       resource_size_t bar_len[3];             /* resource length */
+       resource_size_t bar_start[3] = {0};     /* pointers to ports */
+       resource_size_t bar_len[3] = {0};       /* resource length */
        int alloc_size;                         /* total buffer size used */
        struct resource *region;
        int err = 0;
@@ -3697,8 +3697,8 @@ static void dfx_unregister(struct device *bdev)
        int dfx_bus_pci = dev_is_pci(bdev);
        int dfx_bus_tc = DFX_BUS_TC(bdev);
        int dfx_use_mmio = DFX_MMIO || dfx_bus_tc;
-       resource_size_t bar_start[3];           /* pointers to ports */
-       resource_size_t bar_len[3];             /* resource lengths */
+       resource_size_t bar_start[3] = {0};     /* pointers to ports */
+       resource_size_t bar_len[3] = {0};       /* resource lengths */
        int             alloc_size;             /* total buffer size used */
 
        unregister_netdev(dev);
index 7456569f53c1a312d5590dc88072505413126889..028e3873c3107960c55f0bcff71d860fac0cad6e 100644 (file)
@@ -980,9 +980,9 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
                        opts = ip_tunnel_info_opts(info);
 
                if (key->tun_flags & TUNNEL_CSUM)
-                       flags |= GENEVE_F_UDP_CSUM;
+                       flags &= ~GENEVE_F_UDP_ZERO_CSUM6_TX;
                else
-                       flags &= ~GENEVE_F_UDP_CSUM;
+                       flags |= GENEVE_F_UDP_ZERO_CSUM6_TX;
 
                err = geneve6_build_skb(dst, skb, key->tun_flags, vni,
                                        info->options_len, opts,
@@ -1039,6 +1039,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
        return geneve_xmit_skb(skb, dev, info);
 }
 
+static int geneve_change_mtu(struct net_device *dev, int new_mtu)
+{
+       /* GENEVE overhead is not fixed, so we can't enforce a more
+        * precise max MTU.
+        */
+       if (new_mtu < 68 || new_mtu > IP_MAX_MTU)
+               return -EINVAL;
+       dev->mtu = new_mtu;
+       return 0;
+}
+
 static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
 {
        struct ip_tunnel_info *info = skb_tunnel_info(skb);
@@ -1083,7 +1094,7 @@ static const struct net_device_ops geneve_netdev_ops = {
        .ndo_stop               = geneve_stop,
        .ndo_start_xmit         = geneve_xmit,
        .ndo_get_stats64        = ip_tunnel_get_stats64,
-       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_change_mtu         = geneve_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_fill_metadata_dst  = geneve_fill_metadata_dst,
@@ -1442,11 +1453,21 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
 
        err = geneve_configure(net, dev, &geneve_remote_unspec,
                               0, 0, 0, htons(dst_port), true, 0);
-       if (err) {
-               free_netdev(dev);
-               return ERR_PTR(err);
-       }
+       if (err)
+               goto err;
+
+       /* openvswitch users expect packet sizes to be unrestricted,
+        * so set the largest MTU we can.
+        */
+       err = geneve_change_mtu(dev, IP_MAX_MTU);
+       if (err)
+               goto err;
+
        return dev;
+
+ err:
+       free_netdev(dev);
+       return ERR_PTR(err);
 }
 EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
 
index f4130af09244fb1c6b24f230c7d79cbdaeb43a22..fcb92c0d0eb967e0deba99282152a25c804b73ea 100644 (file)
@@ -624,6 +624,7 @@ struct nvsp_message {
 #define RNDIS_PKT_ALIGN_DEFAULT 8
 
 struct multi_send_data {
+       struct sk_buff *skb; /* skb containing the pkt */
        struct hv_netvsc_packet *pkt; /* netvsc pkt pending */
        u32 count; /* counter of batched packets */
 };
index 059fc523160107239d72080cadb1f2e7c0d246df..ec313fc08d82a3b6a3d8d79fd7a6527caa30733a 100644 (file)
@@ -841,6 +841,18 @@ static inline int netvsc_send_pkt(
        return ret;
 }
 
+/* Move packet out of multi send data (msd), and clear msd */
+static inline void move_pkt_msd(struct hv_netvsc_packet **msd_send,
+                               struct sk_buff **msd_skb,
+                               struct multi_send_data *msdp)
+{
+       *msd_skb = msdp->skb;
+       *msd_send = msdp->pkt;
+       msdp->skb = NULL;
+       msdp->pkt = NULL;
+       msdp->count = 0;
+}
+
 int netvsc_send(struct hv_device *device,
                struct hv_netvsc_packet *packet,
                struct rndis_message *rndis_msg,
@@ -855,6 +867,7 @@ int netvsc_send(struct hv_device *device,
        unsigned int section_index = NETVSC_INVALID_INDEX;
        struct multi_send_data *msdp;
        struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
+       struct sk_buff *msd_skb = NULL;
        bool try_batch;
        bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
 
@@ -897,10 +910,8 @@ int netvsc_send(struct hv_device *device,
                   net_device->send_section_size) {
                section_index = netvsc_get_next_send_section(net_device);
                if (section_index != NETVSC_INVALID_INDEX) {
-                               msd_send = msdp->pkt;
-                               msdp->pkt = NULL;
-                               msdp->count = 0;
-                               msd_len = 0;
+                       move_pkt_msd(&msd_send, &msd_skb, msdp);
+                       msd_len = 0;
                }
        }
 
@@ -919,31 +930,31 @@ int netvsc_send(struct hv_device *device,
                        packet->total_data_buflen += msd_len;
                }
 
-               if (msdp->pkt)
-                       dev_kfree_skb_any(skb);
+               if (msdp->skb)
+                       dev_kfree_skb_any(msdp->skb);
 
                if (xmit_more && !packet->cp_partial) {
+                       msdp->skb = skb;
                        msdp->pkt = packet;
                        msdp->count++;
                } else {
                        cur_send = packet;
+                       msdp->skb = NULL;
                        msdp->pkt = NULL;
                        msdp->count = 0;
                }
        } else {
-               msd_send = msdp->pkt;
-               msdp->pkt = NULL;
-               msdp->count = 0;
+               move_pkt_msd(&msd_send, &msd_skb, msdp);
                cur_send = packet;
        }
 
        if (msd_send) {
-               m_ret = netvsc_send_pkt(msd_send, net_device, pb, skb);
+               m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
 
                if (m_ret != 0) {
                        netvsc_free_send_slot(net_device,
                                              msd_send->send_buf_index);
-                       dev_kfree_skb_any(skb);
+                       dev_kfree_skb_any(msd_skb);
                }
        }
 
index 1c8db9afdcda8c0356db46db9fcfa4d971050b7b..1d3a66563bacbb0628e90693f8f3409c62018d68 100644 (file)
@@ -196,65 +196,6 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
        return ppi;
 }
 
-union sub_key {
-       u64 k;
-       struct {
-               u8 pad[3];
-               u8 kb;
-               u32 ka;
-       };
-};
-
-/* Toeplitz hash function
- * data: network byte order
- * return: host byte order
- */
-static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
-{
-       union sub_key subk;
-       int k_next = 4;
-       u8 dt;
-       int i, j;
-       u32 ret = 0;
-
-       subk.k = 0;
-       subk.ka = ntohl(*(u32 *)key);
-
-       for (i = 0; i < dlen; i++) {
-               subk.kb = key[k_next];
-               k_next = (k_next + 1) % klen;
-               dt = ((u8 *)data)[i];
-               for (j = 0; j < 8; j++) {
-                       if (dt & 0x80)
-                               ret ^= subk.ka;
-                       dt <<= 1;
-                       subk.k <<= 1;
-               }
-       }
-
-       return ret;
-}
-
-static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
-{
-       struct flow_keys flow;
-       int data_len;
-
-       if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
-           !(flow.basic.n_proto == htons(ETH_P_IP) ||
-             flow.basic.n_proto == htons(ETH_P_IPV6)))
-               return false;
-
-       if (flow.basic.ip_proto == IPPROTO_TCP)
-               data_len = 12;
-       else
-               data_len = 8;
-
-       *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
-
-       return true;
-}
-
 static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
                        void *accel_priv, select_queue_fallback_t fallback)
 {
@@ -267,11 +208,9 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
        if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
                return 0;
 
-       if (netvsc_set_hash(&hash, skb)) {
-               q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
-                       ndev->real_num_tx_queues;
-               skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
-       }
+       hash = skb_get_hash(skb);
+       q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
+               ndev->real_num_tx_queues;
 
        if (!nvsc_dev->chn_table[q_idx])
                q_idx = 0;
index f94392d07126c12fe7a60e0018b6214e9f72a4e2..7a3b41468a55180b1bdfe1be5d494d16eef701b9 100644 (file)
@@ -468,6 +468,7 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
        ipvlan->dev = dev;
        ipvlan->port = port;
        ipvlan->sfeatures = IPVLAN_FEATURES;
+       ipvlan_adjust_mtu(ipvlan, phy_dev);
        INIT_LIST_HEAD(&ipvlan->addrs);
 
        /* TODO Probably put random address here to be presented to the
index 29cbde8501ed8e09750f771204dfe9fc38503d34..d47cf14bb4a5b43bf5c94e0bdc533e1033f79b2a 100644 (file)
@@ -82,9 +82,6 @@ struct bfin_sir_self {
 
 #define DRIVER_NAME "bfin_sir"
 
-#define port_membase(port)     (((struct bfin_sir_port *)(port))->membase)
-#define get_lsr_cache(port)    (((struct bfin_sir_port *)(port))->lsr)
-#define put_lsr_cache(port, v) (((struct bfin_sir_port *)(port))->lsr = (v))
 #include <asm/bfin_serial.h>
 
 static const unsigned short per[][4] = {
index 696852eb23c3f07a9e31668c7854617943822d11..7a3f990c193546c42888f937497718d18c2b80e5 100644 (file)
@@ -430,16 +430,6 @@ static int irtty_open(struct tty_struct *tty)
 
        /* Module stuff handled via irda_ldisc.owner - Jean II */
 
-       /* First make sure we're not already connected. */
-       if (tty->disc_data != NULL) {
-               priv = tty->disc_data;
-               if (priv && priv->magic == IRTTY_MAGIC) {
-                       ret = -EEXIST;
-                       goto out;
-               }
-               tty->disc_data = NULL;          /* ### */
-       }
-
        /* stop the underlying  driver */
        irtty_stop_receiver(tty, TRUE);
        if (tty->ops->stop)
index 6a57a005e0ca8162d2a0b9334440d1b9cb7d75db..94e688805dd262317327a446d8e2e460a668d75a 100644 (file)
@@ -1323,6 +1323,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 
        list_add_tail_rcu(&vlan->list, &port->vlans);
        netif_stacked_transfer_operstate(lowerdev, dev);
+       linkwatch_fire_event(dev);
 
        return 0;
 
@@ -1522,6 +1523,7 @@ static int macvlan_device_event(struct notifier_block *unused,
        port = macvlan_port_get_rtnl(dev);
 
        switch (event) {
+       case NETDEV_UP:
        case NETDEV_CHANGE:
                list_for_each_entry(vlan, &port->vlans, list)
                        netif_stacked_transfer_operstate(vlan->lowerdev,
index 60994a83a0d68ca2e4c229fe16140b6017a9dd55..f0a77020037af0790f37b4098e4f6937c6a1ebee 100644 (file)
@@ -186,6 +186,7 @@ config MDIO_GPIO
 config MDIO_OCTEON
        tristate "Support for MDIO buses on Octeon and ThunderX SOCs"
        depends on 64BIT
+       depends on HAS_IOMEM
        help
 
          This module provides a driver for the Octeon and ThunderX MDIO
index 180f6995277944923b2aa57bacdf0bc17b9637ad..7a240fce3a7ea09edc9f10c6b93fadd94b2de3f5 100644 (file)
@@ -846,6 +846,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
        struct skb_shared_hwtstamps *shhwtstamps = NULL;
        struct sk_buff *skb;
        unsigned long flags;
+       u8 overflow;
+
+       overflow = (phy_rxts->ns_hi >> 14) & 0x3;
+       if (overflow)
+               pr_debug("rx timestamp queue overflow, count %d\n", overflow);
 
        spin_lock_irqsave(&dp83640->rx_lock, flags);
 
@@ -888,6 +893,7 @@ static void decode_txts(struct dp83640_private *dp83640,
        struct skb_shared_hwtstamps shhwtstamps;
        struct sk_buff *skb;
        u64 ns;
+       u8 overflow;
 
        /* We must already have the skb that triggered this. */
 
@@ -897,6 +903,17 @@ static void decode_txts(struct dp83640_private *dp83640,
                pr_debug("have timestamp but tx_queue empty\n");
                return;
        }
+
+       overflow = (phy_txts->ns_hi >> 14) & 0x3;
+       if (overflow) {
+               pr_debug("tx timestamp queue overflow, count %d\n", overflow);
+               while (skb) {
+                       skb_complete_tx_timestamp(skb, NULL);
+                       skb = skb_dequeue(&dp83640->tx_queue);
+               }
+               return;
+       }
+
        ns = phy2txts(phy_txts);
        memset(&shhwtstamps, 0, sizeof(shhwtstamps));
        shhwtstamps.hwtstamp = ns_to_ktime(ns);
index 8763bb20988a0edf80091347987fb38086e85496..5590b9c182c967d80a186256162f30690c707506 100644 (file)
@@ -692,25 +692,29 @@ void phy_change(struct work_struct *work)
        struct phy_device *phydev =
                container_of(work, struct phy_device, phy_queue);
 
-       if (phydev->drv->did_interrupt &&
-           !phydev->drv->did_interrupt(phydev))
-               goto ignore;
+       if (phy_interrupt_is_valid(phydev)) {
+               if (phydev->drv->did_interrupt &&
+                   !phydev->drv->did_interrupt(phydev))
+                       goto ignore;
 
-       if (phy_disable_interrupts(phydev))
-               goto phy_err;
+               if (phy_disable_interrupts(phydev))
+                       goto phy_err;
+       }
 
        mutex_lock(&phydev->lock);
        if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
                phydev->state = PHY_CHANGELINK;
        mutex_unlock(&phydev->lock);
 
-       atomic_dec(&phydev->irq_disable);
-       enable_irq(phydev->irq);
+       if (phy_interrupt_is_valid(phydev)) {
+               atomic_dec(&phydev->irq_disable);
+               enable_irq(phydev->irq);
 
-       /* Reenable interrupts */
-       if (PHY_HALTED != phydev->state &&
-           phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
-               goto irq_enable_err;
+               /* Reenable interrupts */
+               if (PHY_HALTED != phydev->state &&
+                   phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED))
+                       goto irq_enable_err;
+       }
 
        /* reschedule state queue work to run as soon as possible */
        cancel_delayed_work_sync(&phydev->state_queue);
@@ -905,10 +909,10 @@ void phy_state_machine(struct work_struct *work)
                phydev->adjust_link(phydev->attached_dev);
                break;
        case PHY_RUNNING:
-               /* Only register a CHANGE if we are polling or ignoring
-                * interrupts and link changed since latest checking.
+               /* Only register a CHANGE if we are polling and link changed
+                * since latest checking.
                 */
-               if (!phy_interrupt_is_valid(phydev)) {
+               if (phydev->irq == PHY_POLL) {
                        old_link = phydev->link;
                        err = phy_read_status(phydev);
                        if (err)
@@ -1000,15 +1004,21 @@ void phy_state_machine(struct work_struct *work)
                   phy_state_to_str(old_state),
                   phy_state_to_str(phydev->state));
 
-       queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
-                          PHY_STATE_TIME * HZ);
+       /* Only re-schedule a PHY state machine change if we are polling the
+        * PHY, if PHY_IGNORE_INTERRUPT is set, then we will be moving
+        * between states from phy_mac_interrupt()
+        */
+       if (phydev->irq == PHY_POLL)
+               queue_delayed_work(system_power_efficient_wq, &phydev->state_queue,
+                                  PHY_STATE_TIME * HZ);
 }
 
 void phy_mac_interrupt(struct phy_device *phydev, int new_link)
 {
-       cancel_work_sync(&phydev->phy_queue);
        phydev->link = new_link;
-       schedule_work(&phydev->phy_queue);
+
+       /* Trigger a state machine change */
+       queue_work(system_power_efficient_wq, &phydev->phy_queue);
 }
 EXPORT_SYMBOL(phy_mac_interrupt);
 
index e485f2653c8208578d7994284dd12faabcc6c096..2e21e9366f76a184aa2c8d770557e6d9ff0db235 100644 (file)
 #include <linux/netdevice.h>
 #include <linux/smscphy.h>
 
+struct smsc_phy_priv {
+       bool energy_enable;
+};
+
 static int smsc_phy_config_intr(struct phy_device *phydev)
 {
        int rc = phy_write (phydev, MII_LAN83C185_IM,
@@ -43,19 +47,14 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
 
 static int smsc_phy_config_init(struct phy_device *phydev)
 {
-       int __maybe_unused len;
-       struct device *dev __maybe_unused = &phydev->mdio.dev;
-       struct device_node *of_node __maybe_unused = dev->of_node;
+       struct smsc_phy_priv *priv = phydev->priv;
+
        int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
-       int enable_energy = 1;
 
        if (rc < 0)
                return rc;
 
-       if (of_find_property(of_node, "smsc,disable-energy-detect", &len))
-               enable_energy = 0;
-
-       if (enable_energy) {
+       if (priv->energy_enable) {
                /* Enable energy detect mode for this SMSC Transceivers */
                rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
                               rc | MII_LAN83C185_EDPWRDOWN);
@@ -110,10 +109,13 @@ static int lan911x_config_init(struct phy_device *phydev)
  */
 static int lan87xx_read_status(struct phy_device *phydev)
 {
+       struct smsc_phy_priv *priv = phydev->priv;
+
        int err = genphy_read_status(phydev);
-       int i;
 
-       if (!phydev->link) {
+       if (!phydev->link && priv->energy_enable) {
+               int i;
+
                /* Disable EDPD to wake up PHY */
                int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
                if (rc < 0)
@@ -149,6 +151,26 @@ static int lan87xx_read_status(struct phy_device *phydev)
        return err;
 }
 
+static int smsc_phy_probe(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->mdio.dev;
+       struct device_node *of_node = dev->of_node;
+       struct smsc_phy_priv *priv;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->energy_enable = true;
+
+       if (of_property_read_bool(of_node, "smsc,disable-energy-detect"))
+               priv->energy_enable = false;
+
+       phydev->priv = priv;
+
+       return 0;
+}
+
 static struct phy_driver smsc_phy_driver[] = {
 {
        .phy_id         = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -159,6 +181,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -180,6 +204,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -201,6 +227,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = lan87xx_read_status,
@@ -222,6 +250,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
@@ -242,6 +272,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = lan87xx_read_status,
@@ -263,6 +295,8 @@ static struct phy_driver smsc_phy_driver[] = {
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
 
+       .probe          = smsc_phy_probe,
+
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = lan87xx_read_status,
index 05005c660d4d954527d278130824f232383c2d49..f60f7660b451754b37f7f62f53cf4d3bcbed2724 100644 (file)
@@ -42,6 +42,8 @@
  *                    deprecated in 2.6
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -49,7 +51,6 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
 #include <linux/mm.h>
 #include <linux/ppp_defs.h>
 #include <linux/ppp-comp.h>
@@ -94,8 +95,8 @@ static inline void sha_pad_init(struct sha_pad *shapad)
  * State for an MPPE (de)compressor.
  */
 struct ppp_mppe_state {
-       struct crypto_blkcipher *arc4;
-       struct crypto_hash *sha1;
+       struct crypto_skcipher *arc4;
+       struct crypto_ahash *sha1;
        unsigned char *sha1_digest;
        unsigned char master_key[MPPE_MAX_KEY_LEN];
        unsigned char session_key[MPPE_MAX_KEY_LEN];
@@ -135,7 +136,7 @@ struct ppp_mppe_state {
  */
 static void get_new_key_from_sha(struct ppp_mppe_state * state)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, state->sha1);
        struct scatterlist sg[4];
        unsigned int nbytes;
 
@@ -148,10 +149,12 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
        nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
                           sizeof(sha_pad->sha_pad2));
 
-       desc.tfm = state->sha1;
-       desc.flags = 0;
+       ahash_request_set_tfm(req, state->sha1);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, state->sha1_digest, nbytes);
 
-       crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
+       crypto_ahash_digest(req);
+       ahash_request_zero(req);
 }
 
 /*
@@ -161,20 +164,23 @@ static void get_new_key_from_sha(struct ppp_mppe_state * state)
 static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
 {
        struct scatterlist sg_in[1], sg_out[1];
-       struct blkcipher_desc desc = { .tfm = state->arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
+
+       skcipher_request_set_tfm(req, state->arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
 
        get_new_key_from_sha(state);
        if (!initial_key) {
-               crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
-                                       state->keylen);
+               crypto_skcipher_setkey(state->arc4, state->sha1_digest,
+                                      state->keylen);
                sg_init_table(sg_in, 1);
                sg_init_table(sg_out, 1);
                setup_sg(sg_in, state->sha1_digest, state->keylen);
                setup_sg(sg_out, state->session_key, state->keylen);
-               if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
-                                            state->keylen) != 0) {
+               skcipher_request_set_crypt(req, sg_in, sg_out, state->keylen,
+                                          NULL);
+               if (crypto_skcipher_encrypt(req))
                    printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
-               }
        } else {
                memcpy(state->session_key, state->sha1_digest, state->keylen);
        }
@@ -184,7 +190,8 @@ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
                state->session_key[1] = 0x26;
                state->session_key[2] = 0x9e;
        }
-       crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
+       crypto_skcipher_setkey(state->arc4, state->session_key, state->keylen);
+       skcipher_request_zero(req);
 }
 
 /*
@@ -204,19 +211,19 @@ static void *mppe_alloc(unsigned char *options, int optlen)
                goto out;
 
 
-       state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       state->arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(state->arc4)) {
                state->arc4 = NULL;
                goto out_free;
        }
 
-       state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
+       state->sha1 = crypto_alloc_ahash("sha1", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(state->sha1)) {
                state->sha1 = NULL;
                goto out_free;
        }
 
-       digestsize = crypto_hash_digestsize(state->sha1);
+       digestsize = crypto_ahash_digestsize(state->sha1);
        if (digestsize < MPPE_MAX_KEY_LEN)
                goto out_free;
 
@@ -237,15 +244,12 @@ static void *mppe_alloc(unsigned char *options, int optlen)
 
        return (void *)state;
 
-       out_free:
-           if (state->sha1_digest)
-               kfree(state->sha1_digest);
-           if (state->sha1)
-               crypto_free_hash(state->sha1);
-           if (state->arc4)
-               crypto_free_blkcipher(state->arc4);
-           kfree(state);
-       out:
+out_free:
+       kfree(state->sha1_digest);
+       crypto_free_ahash(state->sha1);
+       crypto_free_skcipher(state->arc4);
+       kfree(state);
+out:
        return NULL;
 }
 
@@ -256,13 +260,10 @@ static void mppe_free(void *arg)
 {
        struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
        if (state) {
-           if (state->sha1_digest)
                kfree(state->sha1_digest);
-           if (state->sha1)
-               crypto_free_hash(state->sha1);
-           if (state->arc4)
-               crypto_free_blkcipher(state->arc4);
-           kfree(state);
+               crypto_free_ahash(state->sha1);
+               crypto_free_skcipher(state->arc4);
+               kfree(state);
        }
 }
 
@@ -368,8 +369,9 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
              int isize, int osize)
 {
        struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
-       struct blkcipher_desc desc = { .tfm = state->arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
        int proto;
+       int err;
        struct scatterlist sg_in[1], sg_out[1];
 
        /*
@@ -426,7 +428,13 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
        sg_init_table(sg_out, 1);
        setup_sg(sg_in, ibuf, isize);
        setup_sg(sg_out, obuf, osize);
-       if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
+
+       skcipher_request_set_tfm(req, state->arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in, sg_out, isize, NULL);
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
+       if (err) {
                printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
                return -1;
        }
@@ -475,7 +483,7 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
                int osize)
 {
        struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
-       struct blkcipher_desc desc = { .tfm = state->arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, state->arc4);
        unsigned ccount;
        int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
        struct scatterlist sg_in[1], sg_out[1];
@@ -609,9 +617,14 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
        sg_init_table(sg_out, 1);
        setup_sg(sg_in, ibuf, 1);
        setup_sg(sg_out, obuf, 1);
-       if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
+
+       skcipher_request_set_tfm(req, state->arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in, sg_out, 1, NULL);
+       if (crypto_skcipher_decrypt(req)) {
                printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
-               return DECOMP_ERROR;
+               osize = DECOMP_ERROR;
+               goto out_zap_req;
        }
 
        /*
@@ -629,9 +642,11 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
        /* And finally, decrypt the rest of the packet. */
        setup_sg(sg_in, ibuf + 1, isize - 1);
        setup_sg(sg_out, obuf + 1, osize - 1);
-       if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
+       skcipher_request_set_crypt(req, sg_in, sg_out, isize - 1, NULL);
+       if (crypto_skcipher_decrypt(req)) {
                printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
-               return DECOMP_ERROR;
+               osize = DECOMP_ERROR;
+               goto out_zap_req;
        }
 
        state->stats.unc_bytes += osize;
@@ -642,6 +657,8 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
        /* good packet credit */
        state->sanity_errors >>= 1;
 
+out_zap_req:
+       skcipher_request_zero(req);
        return osize;
 
 sanity_error:
@@ -714,8 +731,8 @@ static struct compressor ppp_mppe = {
 static int __init ppp_mppe_init(void)
 {
        int answer;
-       if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
-             crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
+       if (!(crypto_has_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
+             crypto_has_ahash("sha1", 0, CRYPTO_ALG_ASYNC)))
                return -ENODEV;
 
        sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
index 90868ca5e3412ffebd28630f571bdddfe389cc35..ae0905ed4a32b55183568e95512c6727458a9f96 100644 (file)
@@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
        return i < MAX_CALLID;
 }
 
-static int add_chan(struct pppox_sock *sock)
+static int add_chan(struct pppox_sock *sock,
+                   struct pptp_addr *sa)
 {
        static int call_id;
 
        spin_lock(&chan_lock);
-       if (!sock->proto.pptp.src_addr.call_id) {
+       if (!sa->call_id)       {
                call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
                if (call_id == MAX_CALLID) {
                        call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
                        if (call_id == MAX_CALLID)
                                goto out_err;
                }
-               sock->proto.pptp.src_addr.call_id = call_id;
-       } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
+               sa->call_id = call_id;
+       } else if (test_bit(sa->call_id, callid_bitmap)) {
                goto out_err;
+       }
 
-       set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
-       rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
+       sock->proto.pptp.src_addr = *sa;
+       set_bit(sa->call_id, callid_bitmap);
+       rcu_assign_pointer(callid_sock[sa->call_id], sock);
        spin_unlock(&chan_lock);
 
        return 0;
@@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
        struct sock *sk = sock->sk;
        struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
        struct pppox_sock *po = pppox_sk(sk);
-       struct pptp_opt *opt = &po->proto.pptp;
        int error = 0;
 
        if (sockaddr_len < sizeof(struct sockaddr_pppox))
@@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
 
        lock_sock(sk);
 
-       opt->src_addr = sp->sa_addr.pptp;
-       if (add_chan(po))
+       if (sk->sk_state & PPPOX_DEAD) {
+               error = -EALREADY;
+               goto out;
+       }
+
+       if (sk->sk_state & PPPOX_BOUND) {
                error = -EBUSY;
+               goto out;
+       }
+
+       if (add_chan(po, &sp->sa_addr.pptp))
+               error = -EBUSY;
+       else
+               sk->sk_state |= PPPOX_BOUND;
 
+out:
        release_sock(sk);
        return error;
 }
@@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
        }
 
        opt->dst_addr = sp->sa_addr.pptp;
-       sk->sk_state = PPPOX_CONNECTED;
+       sk->sk_state |= PPPOX_CONNECTED;
 
  end:
        release_sock(sk);
index 718ceeab4dbcf397adb5bc3a6d8aa8dcf029602c..00558e1395847d51bccd4a35050c9ae1bb79f52a 100644 (file)
@@ -758,6 +758,8 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
                u64_stats_update_end(&pcpu_stats->syncp);
 
                skb->dev = team->dev;
+       } else if (res == RX_HANDLER_EXACT) {
+               this_cpu_inc(team->pcpu_stats->rx_nohandler);
        } else {
                this_cpu_inc(team->pcpu_stats->rx_dropped);
        }
@@ -1807,7 +1809,7 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
        struct team *team = netdev_priv(dev);
        struct team_pcpu_stats *p;
        u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
-       u32 rx_dropped = 0, tx_dropped = 0;
+       u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
        unsigned int start;
        int i;
 
@@ -1828,14 +1830,16 @@ team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->tx_packets       += tx_packets;
                stats->tx_bytes         += tx_bytes;
                /*
-                * rx_dropped & tx_dropped are u32, updated
-                * without syncp protection.
+                * rx_dropped, tx_dropped & rx_nohandler are u32,
+                * updated without syncp protection.
                 */
                rx_dropped      += p->rx_dropped;
                tx_dropped      += p->tx_dropped;
+               rx_nohandler    += p->rx_nohandler;
        }
        stats->rx_dropped       = rx_dropped;
        stats->tx_dropped       = tx_dropped;
+       stats->rx_nohandler     = rx_nohandler;
        return stats;
 }
 
index 2ed53331bfb28de22019754cb46a8a27c35c43ad..1c299b8a162d7243c5ae94b96618c3763083e91e 100644 (file)
@@ -36,7 +36,7 @@
 #define DRIVER_AUTHOR  "WOOJUNG HUH <woojung.huh@microchip.com>"
 #define DRIVER_DESC    "LAN78XX USB 3.0 Gigabit Ethernet Devices"
 #define DRIVER_NAME    "lan78xx"
-#define DRIVER_VERSION "1.0.1"
+#define DRIVER_VERSION "1.0.2"
 
 #define TX_TIMEOUT_JIFFIES             (5 * HZ)
 #define THROTTLE_JIFFIES               (HZ / 8)
@@ -462,32 +462,53 @@ static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
                                   u32 length, u8 *data)
 {
        u32 val;
+       u32 saved;
        int i, ret;
+       int retval;
 
-       ret = lan78xx_eeprom_confirm_not_busy(dev);
-       if (ret)
-               return ret;
+       /* depends on chip, some EEPROM pins are muxed with LED function.
+        * disable & restore LED function to access EEPROM.
+        */
+       ret = lan78xx_read_reg(dev, HW_CFG, &val);
+       saved = val;
+       if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+               val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
+               ret = lan78xx_write_reg(dev, HW_CFG, val);
+       }
+
+       retval = lan78xx_eeprom_confirm_not_busy(dev);
+       if (retval)
+               return retval;
 
        for (i = 0; i < length; i++) {
                val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
                val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
                ret = lan78xx_write_reg(dev, E2P_CMD, val);
-               if (unlikely(ret < 0))
-                       return -EIO;
+               if (unlikely(ret < 0)) {
+                       retval = -EIO;
+                       goto exit;
+               }
 
-               ret = lan78xx_wait_eeprom(dev);
-               if (ret < 0)
-                       return ret;
+               retval = lan78xx_wait_eeprom(dev);
+               if (retval < 0)
+                       goto exit;
 
                ret = lan78xx_read_reg(dev, E2P_DATA, &val);
-               if (unlikely(ret < 0))
-                       return -EIO;
+               if (unlikely(ret < 0)) {
+                       retval = -EIO;
+                       goto exit;
+               }
 
                data[i] = val & 0xFF;
                offset++;
        }
 
-       return 0;
+       retval = 0;
+exit:
+       if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+               ret = lan78xx_write_reg(dev, HW_CFG, saved);
+
+       return retval;
 }
 
 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
@@ -509,44 +530,67 @@ static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
                                    u32 length, u8 *data)
 {
        u32 val;
+       u32 saved;
        int i, ret;
+       int retval;
 
-       ret = lan78xx_eeprom_confirm_not_busy(dev);
-       if (ret)
-               return ret;
+       /* depends on chip, some EEPROM pins are muxed with LED function.
+        * disable & restore LED function to access EEPROM.
+        */
+       ret = lan78xx_read_reg(dev, HW_CFG, &val);
+       saved = val;
+       if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000) {
+               val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
+               ret = lan78xx_write_reg(dev, HW_CFG, val);
+       }
+
+       retval = lan78xx_eeprom_confirm_not_busy(dev);
+       if (retval)
+               goto exit;
 
        /* Issue write/erase enable command */
        val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
        ret = lan78xx_write_reg(dev, E2P_CMD, val);
-       if (unlikely(ret < 0))
-               return -EIO;
+       if (unlikely(ret < 0)) {
+               retval = -EIO;
+               goto exit;
+       }
 
-       ret = lan78xx_wait_eeprom(dev);
-       if (ret < 0)
-               return ret;
+       retval = lan78xx_wait_eeprom(dev);
+       if (retval < 0)
+               goto exit;
 
        for (i = 0; i < length; i++) {
                /* Fill data register */
                val = data[i];
                ret = lan78xx_write_reg(dev, E2P_DATA, val);
-               if (ret < 0)
-                       return ret;
+               if (ret < 0) {
+                       retval = -EIO;
+                       goto exit;
+               }
 
                /* Send "write" command */
                val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
                val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
                ret = lan78xx_write_reg(dev, E2P_CMD, val);
-               if (ret < 0)
-                       return ret;
+               if (ret < 0) {
+                       retval = -EIO;
+                       goto exit;
+               }
 
-               ret = lan78xx_wait_eeprom(dev);
-               if (ret < 0)
-                       return ret;
+               retval = lan78xx_wait_eeprom(dev);
+               if (retval < 0)
+                       goto exit;
 
                offset++;
        }
 
-       return 0;
+       retval = 0;
+exit:
+       if ((dev->devid & ID_REV_CHIP_ID_MASK_) == 0x78000000)
+               ret = lan78xx_write_reg(dev, HW_CFG, saved);
+
+       return retval;
 }
 
 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
@@ -904,7 +948,6 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
 
        if (!phydev->link && dev->link_on) {
                dev->link_on = false;
-               netif_carrier_off(dev->net);
 
                /* reset MAC */
                ret = lan78xx_read_reg(dev, MAC_CR, &buf);
@@ -914,6 +957,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                ret = lan78xx_write_reg(dev, MAC_CR, buf);
                if (unlikely(ret < 0))
                        return -EIO;
+
+               phy_mac_interrupt(phydev, 0);
        } else if (phydev->link && !dev->link_on) {
                dev->link_on = true;
 
@@ -953,7 +998,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                          ethtool_cmd_speed(&ecmd), ecmd.duplex, ladv, radv);
 
                ret = lan78xx_update_flowcontrol(dev, ecmd.duplex, ladv, radv);
-               netif_carrier_on(dev->net);
+               phy_mac_interrupt(phydev, 1);
        }
 
        return ret;
@@ -1495,7 +1540,6 @@ done:
 static int lan78xx_mdio_init(struct lan78xx_net *dev)
 {
        int ret;
-       int i;
 
        dev->mdiobus = mdiobus_alloc();
        if (!dev->mdiobus) {
@@ -1511,10 +1555,6 @@ static int lan78xx_mdio_init(struct lan78xx_net *dev)
        snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
                 dev->udev->bus->busnum, dev->udev->devnum);
 
-       /* handle our own interrupt */
-       for (i = 0; i < PHY_MAX_ADDR; i++)
-               dev->mdiobus->irq[i] = PHY_IGNORE_INTERRUPT;
-
        switch (dev->devid & ID_REV_CHIP_ID_MASK_) {
        case 0x78000000:
        case 0x78500000:
@@ -1558,6 +1598,16 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
                return -EIO;
        }
 
+       /* Enable PHY interrupts.
+        * We handle our own interrupt
+        */
+       ret = phy_read(phydev, LAN88XX_INT_STS);
+       ret = phy_write(phydev, LAN88XX_INT_MASK,
+                       LAN88XX_INT_MASK_MDINTPIN_EN_ |
+                       LAN88XX_INT_MASK_LINK_CHANGE_);
+
+       phydev->irq = PHY_IGNORE_INTERRUPT;
+
        ret = phy_connect_direct(dev->net, phydev,
                                 lan78xx_link_status_change,
                                 PHY_INTERFACE_MODE_GMII);
@@ -1580,14 +1630,6 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
                              SUPPORTED_Pause | SUPPORTED_Asym_Pause);
        genphy_config_aneg(phydev);
 
-       /* Workaround to enable PHY interrupt.
-        * phy_start_interrupts() is API for requesting and enabling
-        * PHY interrupt. However, USB-to-Ethernet device can't use
-        * request_irq() called in phy_start_interrupts().
-        * Set PHY to PHY_HALTED and call phy_start()
-        * to make a call to phy_enable_interrupts()
-        */
-       phy_stop(phydev);
        phy_start(phydev);
 
        netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
@@ -2221,7 +2263,9 @@ netdev_tx_t lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
        if (skb2) {
                skb_queue_tail(&dev->txq_pend, skb2);
 
-               if (skb_queue_len(&dev->txq_pend) > 10)
+               /* throttle TX patch at slower than SUPER SPEED USB */
+               if ((dev->udev->speed < USB_SPEED_SUPER) &&
+                   (skb_queue_len(&dev->txq_pend) > 10))
                        netif_stop_queue(net);
        } else {
                netif_dbg(dev, tx_err, dev->net,
index 767ab11a6e9f67ce774d086101daeb1eb91838ce..c9fd52a8e6ec5f05d3459ba26994a449df764ec6 100644 (file)
@@ -146,6 +146,10 @@ struct virtnet_info {
        virtio_net_ctrl_ack ctrl_status;
        u8 ctrl_promisc;
        u8 ctrl_allmulti;
+
+       /* Ethtool settings */
+       u8 duplex;
+       u32 speed;
 };
 
 struct padded_vnet_hdr {
@@ -1376,6 +1380,58 @@ static void virtnet_get_channels(struct net_device *dev,
        channels->other_count = 0;
 }
 
+/* Check if the user is trying to change anything besides speed/duplex */
+static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd)
+{
+       struct ethtool_cmd diff1 = *cmd;
+       struct ethtool_cmd diff2 = {};
+
+       /* advertising and cmd are usually set, ignore port because we set it */
+       ethtool_cmd_speed_set(&diff1, 0);
+       diff1.advertising = 0;
+       diff1.duplex = 0;
+       diff1.port = 0;
+       diff1.cmd = 0;
+
+       return !memcmp(&diff1, &diff2, sizeof(diff1));
+}
+
+static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       u32 speed;
+
+       speed = ethtool_cmd_speed(cmd);
+       /* don't allow custom speed and duplex */
+       if (!ethtool_validate_speed(speed) ||
+           !ethtool_validate_duplex(cmd->duplex) ||
+           !virtnet_validate_ethtool_cmd(cmd))
+               return -EINVAL;
+       vi->speed = speed;
+       vi->duplex = cmd->duplex;
+
+       return 0;
+}
+
+static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+
+       ethtool_cmd_speed_set(cmd, vi->speed);
+       cmd->duplex = vi->duplex;
+       cmd->port = PORT_OTHER;
+
+       return 0;
+}
+
+static void virtnet_init_settings(struct net_device *dev)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+
+       vi->speed = SPEED_UNKNOWN;
+       vi->duplex = DUPLEX_UNKNOWN;
+}
+
 static const struct ethtool_ops virtnet_ethtool_ops = {
        .get_drvinfo = virtnet_get_drvinfo,
        .get_link = ethtool_op_get_link,
@@ -1383,6 +1439,8 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
        .set_channels = virtnet_set_channels,
        .get_channels = virtnet_get_channels,
        .get_ts_info = ethtool_op_get_ts_info,
+       .get_settings = virtnet_get_settings,
+       .set_settings = virtnet_set_settings,
 };
 
 #define MIN_MTU 68
@@ -1855,6 +1913,8 @@ static int virtnet_probe(struct virtio_device *vdev)
        netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
        netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
 
+       virtnet_init_settings(dev);
+
        err = register_netdev(dev);
        if (err) {
                pr_debug("virtio_net: registering device failed\n");
index 66addb7a7911beb33f17c916caa7bb48ae84507e..76e1fc9d8748e61d2b9cfbab91e3adf862e22de1 100644 (file)
@@ -877,6 +877,24 @@ static int vrf_fillinfo(struct sk_buff *skb,
        return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
 }
 
+static size_t vrf_get_slave_size(const struct net_device *bond_dev,
+                                const struct net_device *slave_dev)
+{
+       return nla_total_size(sizeof(u32));  /* IFLA_VRF_PORT_TABLE */
+}
+
+static int vrf_fill_slave_info(struct sk_buff *skb,
+                              const struct net_device *vrf_dev,
+                              const struct net_device *slave_dev)
+{
+       struct net_vrf *vrf = netdev_priv(vrf_dev);
+
+       if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
        [IFLA_VRF_TABLE] = { .type = NLA_U32 },
 };
@@ -890,6 +908,9 @@ static struct rtnl_link_ops vrf_link_ops __read_mostly = {
        .validate       = vrf_validate,
        .fill_info      = vrf_fillinfo,
 
+       .get_slave_size  = vrf_get_slave_size,
+       .fill_slave_info = vrf_fill_slave_info,
+
        .newlink        = vrf_newlink,
        .dellink        = vrf_dellink,
        .setup          = vrf_setup,
index 2d88c799d2ac32a44c2bc304e19280f2e471ad7b..57d219fc3d644d8a87b63a9e662870e5e1e3adbd 100644 (file)
@@ -73,7 +73,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 static int vxlan_net_id;
 static struct rtnl_link_ops vxlan_link_ops;
 
-static const u8 all_zeros_mac[ETH_ALEN];
+static const u8 all_zeros_mac[ETH_ALEN + 2];
 
 static int vxlan_sock_add(struct vxlan_dev *vxlan);
 
@@ -1684,18 +1684,14 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
        gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
-static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
-                          struct sk_buff *skb,
-                          struct net_device *dev, struct in6_addr *saddr,
-                          struct in6_addr *daddr, __u8 prio, __u8 ttl,
-                          __be16 src_port, __be16 dst_port, __be32 vni,
-                          struct vxlan_metadata *md, bool xnet, u32 vxflags)
+static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
+                          int iphdr_len, __be32 vni,
+                          struct vxlan_metadata *md, u32 vxflags,
+                          bool udp_sum)
 {
        struct vxlanhdr *vxh;
        int min_headroom;
        int err;
-       bool udp_sum = !(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX);
        int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
        u16 hdrlen = sizeof(struct vxlanhdr);
 
@@ -1712,93 +1708,8 @@ static int vxlan6_xmit_skb(struct dst_entry *dst, struct sock *sk,
                }
        }
 
-       skb_scrub_packet(skb, xnet);
-
        min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
-                       + VXLAN_HLEN + sizeof(struct ipv6hdr)
-                       + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
-
-       /* Need space for new headers (invalidates iph ptr) */
-       err = skb_cow_head(skb, min_headroom);
-       if (unlikely(err)) {
-               kfree_skb(skb);
-               goto err;
-       }
-
-       skb = vlan_hwaccel_push_inside(skb);
-       if (WARN_ON(!skb)) {
-               err = -ENOMEM;
-               goto err;
-       }
-
-       skb = iptunnel_handle_offloads(skb, udp_sum, type);
-       if (IS_ERR(skb)) {
-               err = -EINVAL;
-               goto err;
-       }
-
-       vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
-       vxh->vx_flags = htonl(VXLAN_HF_VNI);
-       vxh->vx_vni = vni;
-
-       if (type & SKB_GSO_TUNNEL_REMCSUM) {
-               u32 data = (skb_checksum_start_offset(skb) - hdrlen) >>
-                          VXLAN_RCO_SHIFT;
-
-               if (skb->csum_offset == offsetof(struct udphdr, check))
-                       data |= VXLAN_RCO_UDP;
-
-               vxh->vx_vni |= htonl(data);
-               vxh->vx_flags |= htonl(VXLAN_HF_RCO);
-
-               if (!skb_is_gso(skb)) {
-                       skb->ip_summed = CHECKSUM_NONE;
-                       skb->encapsulation = 0;
-               }
-       }
-
-       if (vxflags & VXLAN_F_GBP)
-               vxlan_build_gbp_hdr(vxh, vxflags, md);
-
-       skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
-       udp_tunnel6_xmit_skb(dst, sk, skb, dev, saddr, daddr, prio,
-                            ttl, src_port, dst_port,
-                            !!(vxflags & VXLAN_F_UDP_ZERO_CSUM6_TX));
-       return 0;
-err:
-       dst_release(dst);
-       return err;
-}
-#endif
-
-static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
-                         __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
-                         __be16 src_port, __be16 dst_port, __be32 vni,
-                         struct vxlan_metadata *md, bool xnet, u32 vxflags)
-{
-       struct vxlanhdr *vxh;
-       int min_headroom;
-       int err;
-       bool udp_sum = !!(vxflags & VXLAN_F_UDP_CSUM);
-       int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-       u16 hdrlen = sizeof(struct vxlanhdr);
-
-       if ((vxflags & VXLAN_F_REMCSUM_TX) &&
-           skb->ip_summed == CHECKSUM_PARTIAL) {
-               int csum_start = skb_checksum_start_offset(skb);
-
-               if (csum_start <= VXLAN_MAX_REMCSUM_START &&
-                   !(csum_start & VXLAN_RCO_SHIFT_MASK) &&
-                   (skb->csum_offset == offsetof(struct udphdr, check) ||
-                    skb->csum_offset == offsetof(struct tcphdr, check))) {
-                       udp_sum = false;
-                       type |= SKB_GSO_TUNNEL_REMCSUM;
-               }
-       }
-
-       min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
-                       + VXLAN_HLEN + sizeof(struct iphdr)
+                       + VXLAN_HLEN + iphdr_len
                        + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0);
 
        /* Need space for new headers (invalidates iph ptr) */
@@ -1840,13 +1751,30 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
                vxlan_build_gbp_hdr(vxh, vxflags, md);
 
        skb_set_inner_protocol(skb, htons(ETH_P_TEB));
-
-       udp_tunnel_xmit_skb(rt, sk, skb, src, dst, tos, ttl, df,
-                           src_port, dst_port, xnet,
-                           !(vxflags & VXLAN_F_UDP_CSUM));
        return 0;
 }
 
+static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
+                                     struct sk_buff *skb, int oif, u8 tos,
+                                     __be32 daddr, __be32 *saddr)
+{
+       struct rtable *rt = NULL;
+       struct flowi4 fl4;
+
+       memset(&fl4, 0, sizeof(fl4));
+       fl4.flowi4_oif = oif;
+       fl4.flowi4_tos = RT_TOS(tos);
+       fl4.flowi4_mark = skb->mark;
+       fl4.flowi4_proto = IPPROTO_UDP;
+       fl4.daddr = daddr;
+       fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
+
+       rt = ip_route_output_key(vxlan->net, &fl4);
+       if (!IS_ERR(rt))
+               *saddr = fl4.saddr;
+       return rt;
+}
+
 #if IS_ENABLED(CONFIG_IPV6)
 static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
                                          struct sk_buff *skb, int oif,
@@ -1928,7 +1856,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        struct sock *sk;
        struct rtable *rt = NULL;
        const struct iphdr *old_iph;
-       struct flowi4 fl4;
        union vxlan_addr *dst;
        union vxlan_addr remote_ip;
        struct vxlan_metadata _md;
@@ -1939,6 +1866,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        __u8 tos, ttl;
        int err;
        u32 flags = vxlan->flags;
+       bool udp_sum = false;
+       bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
 
        info = skb_tunnel_info(skb);
 
@@ -1985,13 +1914,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                                     vxlan->cfg.port_max, true);
 
        if (info) {
-               if (info->key.tun_flags & TUNNEL_CSUM)
-                       flags |= VXLAN_F_UDP_CSUM;
-               else
-                       flags &= ~VXLAN_F_UDP_CSUM;
-
                ttl = info->key.ttl;
                tos = info->key.tos;
+               udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
 
                if (info->options_len)
                        md = ip_tunnel_info_opts(info);
@@ -2000,22 +1925,22 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        }
 
        if (dst->sa.sa_family == AF_INET) {
+               __be32 saddr;
+
                if (!vxlan->vn4_sock)
                        goto drop;
                sk = vxlan->vn4_sock->sock->sk;
 
-               if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
-                       df = htons(IP_DF);
-
-               memset(&fl4, 0, sizeof(fl4));
-               fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
-               fl4.flowi4_tos = RT_TOS(tos);
-               fl4.flowi4_mark = skb->mark;
-               fl4.flowi4_proto = IPPROTO_UDP;
-               fl4.daddr = dst->sin.sin_addr.s_addr;
-               fl4.saddr = vxlan->cfg.saddr.sin.sin_addr.s_addr;
+               if (info) {
+                       if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
+                               df = htons(IP_DF);
+               } else {
+                       udp_sum = !!(flags & VXLAN_F_UDP_CSUM);
+               }
 
-               rt = ip_route_output_key(vxlan->net, &fl4);
+               rt = vxlan_get_route(vxlan, skb,
+                                    rdst ? rdst->remote_ifindex : 0, tos,
+                                    dst->sin.sin_addr.s_addr, &saddr);
                if (IS_ERR(rt)) {
                        netdev_dbg(dev, "no route to %pI4\n",
                                   &dst->sin.sin_addr.s_addr);
@@ -2047,16 +1972,14 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
 
                tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
                ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
-               err = vxlan_xmit_skb(rt, sk, skb, fl4.saddr,
-                                    dst->sin.sin_addr.s_addr, tos, ttl, df,
-                                    src_port, dst_port, htonl(vni << 8), md,
-                                    !net_eq(vxlan->net, dev_net(vxlan->dev)),
-                                    flags);
-               if (err < 0) {
-                       /* skb is already freed. */
-                       skb = NULL;
-                       goto rt_tx_error;
-               }
+               err = vxlan_build_skb(skb, &rt->dst, sizeof(struct iphdr),
+                                     htonl(vni << 8), md, flags, udp_sum);
+               if (err < 0)
+                       goto xmit_tx_error;
+
+               udp_tunnel_xmit_skb(rt, sk, skb, saddr,
+                                   dst->sin.sin_addr.s_addr, tos, ttl, df,
+                                   src_port, dst_port, xnet, !udp_sum);
 #if IS_ENABLED(CONFIG_IPV6)
        } else {
                struct dst_entry *ndst;
@@ -2101,11 +2024,20 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                        return;
                }
 
+               if (!info)
+                       udp_sum = !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+
                ttl = ttl ? : ip6_dst_hoplimit(ndst);
-               err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
-                                     0, ttl, src_port, dst_port, htonl(vni << 8), md,
-                                     !net_eq(vxlan->net, dev_net(vxlan->dev)),
-                                     flags);
+               skb_scrub_packet(skb, xnet);
+               err = vxlan_build_skb(skb, ndst, sizeof(struct ipv6hdr),
+                                     htonl(vni << 8), md, flags, udp_sum);
+               if (err < 0) {
+                       dst_release(ndst);
+                       return;
+               }
+               udp_tunnel6_xmit_skb(ndst, sk, skb, dev,
+                                    &saddr, &dst->sin6.sin6_addr,
+                                    0, ttl, src_port, dst_port, !udp_sum);
 #endif
        }
 
@@ -2115,6 +2047,9 @@ drop:
        dev->stats.tx_dropped++;
        goto tx_free;
 
+xmit_tx_error:
+       /* skb is already freed. */
+       skb = NULL;
 rt_tx_error:
        ip_rt_put(rt);
 tx_error:
@@ -2358,52 +2293,41 @@ static void vxlan_set_multicast_list(struct net_device *dev)
 {
 }
 
-static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
+static int __vxlan_change_mtu(struct net_device *dev,
+                             struct net_device *lowerdev,
+                             struct vxlan_rdst *dst, int new_mtu, bool strict)
 {
-       struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_rdst *dst = &vxlan->default_dst;
-       struct net_device *lowerdev;
-       int max_mtu;
+       int max_mtu = IP_MAX_MTU;
 
-       lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
-       if (lowerdev == NULL)
-               return eth_change_mtu(dev, new_mtu);
+       if (lowerdev)
+               max_mtu = lowerdev->mtu;
 
        if (dst->remote_ip.sa.sa_family == AF_INET6)
-               max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
+               max_mtu -= VXLAN6_HEADROOM;
        else
-               max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
+               max_mtu -= VXLAN_HEADROOM;
 
-       if (new_mtu < 68 || new_mtu > max_mtu)
+       if (new_mtu < 68)
                return -EINVAL;
 
+       if (new_mtu > max_mtu) {
+               if (strict)
+                       return -EINVAL;
+
+               new_mtu = max_mtu;
+       }
+
        dev->mtu = new_mtu;
        return 0;
 }
 
-static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
-                               struct ip_tunnel_info *info,
-                               __be16 sport, __be16 dport)
+static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct rtable *rt;
-       struct flowi4 fl4;
-
-       memset(&fl4, 0, sizeof(fl4));
-       fl4.flowi4_tos = RT_TOS(info->key.tos);
-       fl4.flowi4_mark = skb->mark;
-       fl4.flowi4_proto = IPPROTO_UDP;
-       fl4.daddr = info->key.u.ipv4.dst;
-
-       rt = ip_route_output_key(vxlan->net, &fl4);
-       if (IS_ERR(rt))
-               return PTR_ERR(rt);
-       ip_rt_put(rt);
-
-       info->key.u.ipv4.src = fl4.saddr;
-       info->key.tp_src = sport;
-       info->key.tp_dst = dport;
-       return 0;
+       struct vxlan_rdst *dst = &vxlan->default_dst;
+       struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
+                                                        dst->remote_ifindex);
+       return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
 }
 
 static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
@@ -2417,9 +2341,16 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
        dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
 
        if (ip_tunnel_info_af(info) == AF_INET) {
+               struct rtable *rt;
+
                if (!vxlan->vn4_sock)
                        return -EINVAL;
-               return egress_ipv4_tun_info(dev, skb, info, sport, dport);
+               rt = vxlan_get_route(vxlan, skb, 0, info->key.tos,
+                                    info->key.u.ipv4.dst,
+                                    &info->key.u.ipv4.src);
+               if (IS_ERR(rt))
+                       return PTR_ERR(rt);
+               ip_rt_put(rt);
        } else {
 #if IS_ENABLED(CONFIG_IPV6)
                struct dst_entry *ndst;
@@ -2432,13 +2363,12 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
                if (IS_ERR(ndst))
                        return PTR_ERR(ndst);
                dst_release(ndst);
-
-               info->key.tp_src = sport;
-               info->key.tp_dst = dport;
 #else /* !CONFIG_IPV6 */
                return -EPFNOSUPPORT;
 #endif
        }
+       info->key.tp_src = sport;
+       info->key.tp_dst = dport;
        return 0;
 }
 
@@ -2756,6 +2686,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        int err;
        bool use_ipv6 = false;
        __be16 default_port = vxlan->cfg.dst_port;
+       struct net_device *lowerdev = NULL;
 
        vxlan->net = src_net;
 
@@ -2776,9 +2707,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
        }
 
        if (conf->remote_ifindex) {
-               struct net_device *lowerdev
-                        = __dev_get_by_index(src_net, conf->remote_ifindex);
-
+               lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
                dst->remote_ifindex = conf->remote_ifindex;
 
                if (!lowerdev) {
@@ -2802,6 +2731,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
                needed_headroom = lowerdev->hard_header_len;
        }
 
+       if (conf->mtu) {
+               err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
+               if (err)
+                       return err;
+       }
+
        if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
                needed_headroom += VXLAN6_HEADROOM;
        else
index 6146a293601a7fea098a353aeb11fc803f5523a4..368de5e5a04f64c46003a131a49546724d6fc7a6 100644 (file)
@@ -6366,12 +6366,13 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
-                              enum ieee80211_ampdu_mlme_action action,
-                              struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                              u8 buf_size, bool amsdu)
+                              struct ieee80211_ampdu_params *params)
 {
        struct ath10k *ar = hw->priv;
        struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
 
        ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
                   arvif->vdev_id, sta->addr, tid, action);
index a7afdeee698c690b9decf1e5da83c884071d9690..73fb4232f9f28c09e6a3f44090dc401684be8747 100644 (file)
@@ -150,18 +150,18 @@ int ath9k_hw_nvram_swap_data(struct ath_hw *ah, bool *swap_needed, int size)
                return -EIO;
        }
 
-       if (magic == AR5416_EEPROM_MAGIC) {
-               *swap_needed = false;
-       } else if (swab16(magic) == AR5416_EEPROM_MAGIC) {
+       *swap_needed = false;
+       if (swab16(magic) == AR5416_EEPROM_MAGIC) {
                if (ah->ah_flags & AH_NO_EEP_SWAP) {
                        ath_info(common,
                                 "Ignoring endianness difference in EEPROM magic bytes.\n");
-
-                       *swap_needed = false;
                } else {
                        *swap_needed = true;
                }
-       } else {
+       } else if (magic != AR5416_EEPROM_MAGIC) {
+               if (ath9k_hw_use_flash(ah))
+                       return 0;
+
                ath_err(common,
                        "Invalid EEPROM Magic (0x%04x).\n", magic);
                return -EINVAL;
index fe1fd1a5ae1502a2c5d2947d1a40ff3ee6193d76..639294a9e34df6b2461b4c4c8052af75cbf2b258 100644 (file)
@@ -1657,13 +1657,14 @@ static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw,
 
 static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
-                                 enum ieee80211_ampdu_mlme_action action,
-                                 struct ieee80211_sta *sta,
-                                 u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+                                 struct ieee80211_ampdu_params *params)
 {
        struct ath9k_htc_priv *priv = hw->priv;
        struct ath9k_htc_sta *ista;
        int ret = 0;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
 
        mutex_lock(&priv->mutex);
        ath9k_htc_ps_wakeup(priv);
index c1b33fdcca0875a20cd2be3b54b21e46c37b725c..cf58a304e9f0aa08d577dc00d49361b568f450d3 100644 (file)
@@ -1864,14 +1864,16 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 
 static int ath9k_ampdu_action(struct ieee80211_hw *hw,
                              struct ieee80211_vif *vif,
-                             enum ieee80211_ampdu_mlme_action action,
-                             struct ieee80211_sta *sta,
-                             u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+                             struct ieee80211_ampdu_params *params)
 {
        struct ath_softc *sc = hw->priv;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
        bool flush = false;
        int ret = 0;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        mutex_lock(&sc->mutex);
 
index 19d3d64416bf66825eb113590474b4e4a68f8ec2..4d1527a2e292a2ba2d29907554625cf0fbb980f4 100644 (file)
@@ -1413,10 +1413,12 @@ static void carl9170_ampdu_work(struct work_struct *work)
 
 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif,
-                                   enum ieee80211_ampdu_mlme_action action,
-                                   struct ieee80211_sta *sta,
-                                   u16 tid, u16 *ssn, u8 buf_size, bool amsdu)
+                                   struct ieee80211_ampdu_params *params)
 {
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
        struct ar9170 *ar = hw->priv;
        struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
        struct carl9170_sta_tid *tid_info;
index 7c169abdbafee95ccfe8f461e670fbb8ab5ca5eb..a27279c2c6950913c0b1f14fd4cacdd3bc650428 100644 (file)
@@ -857,12 +857,14 @@ static int wcn36xx_resume(struct ieee80211_hw *hw)
 
 static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
                    struct ieee80211_vif *vif,
-                   enum ieee80211_ampdu_mlme_action action,
-                   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                   u8 buf_size, bool amsdu)
+                   struct ieee80211_ampdu_params *params)
 {
        struct wcn36xx *wcn = hw->priv;
        struct wcn36xx_sta *sta_priv = NULL;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
                    action, tid);
index 20d07ef679e89d467170abae532819f36f283821..1f231cd0813861fa24bafdb378e2e8cbb0ed3ae1 100644 (file)
@@ -422,6 +422,11 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
        if (sme->privacy && !rsn_eid)
                wil_info(wil, "WSC connection\n");
 
+       if (sme->pbss) {
+               wil_err(wil, "connect - PBSS not yet supported\n");
+               return -EOPNOTSUPP;
+       }
+
        bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
                               sme->ssid, sme->ssid_len,
                               IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
@@ -870,6 +875,11 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
                return -EINVAL;
        }
 
+       if (info->pbss) {
+               wil_err(wil, "AP: PBSS not yet supported\n");
+               return -EOPNOTSUPP;
+       }
+
        switch (info->hidden_ssid) {
        case NL80211_HIDDEN_SSID_NOT_IN_USE:
                hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
index ec013fbd6a81cda4a0cce04a74b469c1f3cda532..c279211e49f91440c7cd48a52feb93a9cbd3580d 100644 (file)
@@ -1215,10 +1215,10 @@ void b43_wireless_core_phy_pll_reset(struct b43_wldev *dev)
        case B43_BUS_BCMA:
                bcma_cc = &dev->dev->bdev->bus->drv_cc;
 
-               bcma_cc_write32(bcma_cc, BCMA_CC_CHIPCTL_ADDR, 0);
-               bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4);
-               bcma_cc_set32(bcma_cc, BCMA_CC_CHIPCTL_DATA, 0x4);
-               bcma_cc_mask32(bcma_cc, BCMA_CC_CHIPCTL_DATA, ~0x4);
+               bcma_cc_write32(bcma_cc, BCMA_CC_PMU_CHIPCTL_ADDR, 0);
+               bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4);
+               bcma_cc_set32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, 0x4);
+               bcma_cc_mask32(bcma_cc, BCMA_CC_PMU_CHIPCTL_DATA, ~0x4);
                break;
 #endif
 #ifdef CONFIG_B43_SSB
index 53637399bb99d0743663c50cd674e2a1711cfc3b..b98db8a0a069bdf71e3c09dcf873aa6905700823 100644 (file)
@@ -879,11 +879,24 @@ int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
        return 0;
 }
 
-static void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
+void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev)
 {
+       struct sdio_func *func;
+       struct mmc_host *host;
+       uint max_blocks;
        uint nents;
        int err;
 
+       func = sdiodev->func[2];
+       host = func->card->host;
+       sdiodev->sg_support = host->max_segs > 1;
+       max_blocks = min_t(uint, host->max_blk_count, 511u);
+       sdiodev->max_request_size = min_t(uint, host->max_req_size,
+                                         max_blocks * func->cur_blksize);
+       sdiodev->max_segment_count = min_t(uint, host->max_segs,
+                                          SG_MAX_SINGLE_ALLOC);
+       sdiodev->max_segment_size = host->max_seg_size;
+
        if (!sdiodev->sg_support)
                return;
 
@@ -1021,9 +1034,6 @@ static void brcmf_sdiod_host_fixup(struct mmc_host *host)
 
 static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
 {
-       struct sdio_func *func;
-       struct mmc_host *host;
-       uint max_blocks;
        int ret = 0;
 
        sdiodev->num_funcs = 2;
@@ -1054,26 +1064,6 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
                goto out;
        }
 
-       /*
-        * determine host related variables after brcmf_sdiod_probe()
-        * as func->cur_blksize is properly set and F2 init has been
-        * completed successfully.
-        */
-       func = sdiodev->func[2];
-       host = func->card->host;
-       sdiodev->sg_support = host->max_segs > 1;
-       max_blocks = min_t(uint, host->max_blk_count, 511u);
-       sdiodev->max_request_size = min_t(uint, host->max_req_size,
-                                         max_blocks * func->cur_blksize);
-       sdiodev->max_segment_count = min_t(uint, host->max_segs,
-                                          SG_MAX_SINGLE_ALLOC);
-       sdiodev->max_segment_size = host->max_seg_size;
-
-       /* allocate scatter-gather table. sg support
-        * will be disabled upon allocation failure.
-        */
-       brcmf_sdiod_sgtable_alloc(sdiodev);
-
        ret = brcmf_sdiod_freezer_attach(sdiodev);
        if (ret)
                goto out;
@@ -1084,7 +1074,7 @@ static int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
                ret = -ENODEV;
                goto out;
        }
-       brcmf_sdiod_host_fixup(host);
+       brcmf_sdiod_host_fixup(sdiodev->func[2]->card->host);
 out:
        if (ret)
                brcmf_sdiod_remove(sdiodev);
index 7b01e4ddb315b70ca0432371ecdb695808c2a4ac..d00c5c1d58bf1ce3c13c56cc22d40429be2d4daa 100644 (file)
@@ -247,7 +247,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
        brcmf_dbg(TRACE, "chandef: control %d center %d width %d\n",
                  ch->chan->center_freq, ch->center_freq1, ch->width);
        ch_inf.chnum = ieee80211_frequency_to_channel(ch->center_freq1);
-       primary_offset = ch->center_freq1 - ch->chan->center_freq;
+       primary_offset = ch->chan->center_freq - ch->center_freq1;
        switch (ch->width) {
        case NL80211_CHAN_WIDTH_20:
        case NL80211_CHAN_WIDTH_20_NOHT:
@@ -256,24 +256,21 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
                break;
        case NL80211_CHAN_WIDTH_40:
                ch_inf.bw = BRCMU_CHAN_BW_40;
-               if (primary_offset < 0)
+               if (primary_offset > 0)
                        ch_inf.sb = BRCMU_CHAN_SB_U;
                else
                        ch_inf.sb = BRCMU_CHAN_SB_L;
                break;
        case NL80211_CHAN_WIDTH_80:
                ch_inf.bw = BRCMU_CHAN_BW_80;
-               if (primary_offset < 0) {
-                       if (primary_offset < -CH_10MHZ_APART)
-                               ch_inf.sb = BRCMU_CHAN_SB_UU;
-                       else
-                               ch_inf.sb = BRCMU_CHAN_SB_UL;
-               } else {
-                       if (primary_offset > CH_10MHZ_APART)
-                               ch_inf.sb = BRCMU_CHAN_SB_LL;
-                       else
-                               ch_inf.sb = BRCMU_CHAN_SB_LU;
-               }
+               if (primary_offset == -30)
+                       ch_inf.sb = BRCMU_CHAN_SB_LL;
+               else if (primary_offset == -10)
+                       ch_inf.sb = BRCMU_CHAN_SB_LU;
+               else if (primary_offset == 10)
+                       ch_inf.sb = BRCMU_CHAN_SB_UL;
+               else
+                       ch_inf.sb = BRCMU_CHAN_SB_UU;
                break;
        case NL80211_CHAN_WIDTH_80P80:
        case NL80211_CHAN_WIDTH_160:
index 82e4382eb177c98733f79b79f6dc6e78af3066d7..0e8f2a079907d7c9deb8cef9eaafa61877eaf1b3 100644 (file)
@@ -803,7 +803,14 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
                                *eromaddr -= 4;
                                return -EFAULT;
                        }
-               } while (desc != DMP_DESC_ADDRESS);
+               } while (desc != DMP_DESC_ADDRESS &&
+                        desc != DMP_DESC_COMPONENT);
+
+               /* stop if we crossed current component border */
+               if (desc == DMP_DESC_COMPONENT) {
+                       *eromaddr -= 4;
+                       return 0;
+               }
 
                /* skip upper 32-bit address descriptor */
                if (val & DMP_DESC_ADDRSIZE_GT32)
@@ -876,7 +883,8 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
                rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
 
                /* need core with ports */
-               if (nmw + nsw == 0)
+               if (nmw + nsw == 0 &&
+                   id != BCMA_CORE_PMU)
                        continue;
 
                /* try to obtain register address info */
@@ -1006,6 +1014,7 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
 {
        struct brcmf_chip *pub;
        struct brcmf_core_priv *cc;
+       struct brcmf_core *pmu;
        u32 base;
        u32 val;
        int ret = 0;
@@ -1017,11 +1026,15 @@ static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
        /* get chipcommon capabilites */
        pub->cc_caps = chip->ops->read32(chip->ctx,
                                         CORE_CC_REG(base, capabilities));
+       pub->cc_caps_ext = chip->ops->read32(chip->ctx,
+                                            CORE_CC_REG(base,
+                                                        capabilities_ext));
 
        /* get pmu caps & rev */
+       pmu = brcmf_chip_get_pmu(pub); /* after reading cc_caps_ext */
        if (pub->cc_caps & CC_CAP_PMU) {
                val = chip->ops->read32(chip->ctx,
-                                       CORE_CC_REG(base, pmucapabilities));
+                                       CORE_CC_REG(pmu->base, pmucapabilities));
                pub->pmurev = val & PCAP_REV_MASK;
                pub->pmucaps = val;
        }
@@ -1120,6 +1133,23 @@ struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
        return &cc->pub;
 }
 
+struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub)
+{
+       struct brcmf_core *cc = brcmf_chip_get_chipcommon(pub);
+       struct brcmf_core *pmu;
+
+       /* See if there is separated PMU core available */
+       if (cc->rev >= 35 &&
+           pub->cc_caps_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
+               pmu = brcmf_chip_get_core(pub, BCMA_CORE_PMU);
+               if (pmu)
+                       return pmu;
+       }
+
+       /* Fallback to ChipCommon core for older hardware */
+       return cc;
+}
+
 bool brcmf_chip_iscoreup(struct brcmf_core *pub)
 {
        struct brcmf_core_priv *core;
@@ -1290,6 +1320,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
 {
        u32 base, addr, reg, pmu_cc3_mask = ~0;
        struct brcmf_chip_priv *chip;
+       struct brcmf_core *pmu = brcmf_chip_get_pmu(pub);
 
        brcmf_dbg(TRACE, "Enter\n");
 
@@ -1309,9 +1340,9 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
        case BRCM_CC_4335_CHIP_ID:
        case BRCM_CC_4339_CHIP_ID:
                /* read PMU chipcontrol register 3 */
-               addr = CORE_CC_REG(base, chipcontrol_addr);
+               addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
                chip->ops->write32(chip->ctx, addr, 3);
-               addr = CORE_CC_REG(base, chipcontrol_data);
+               addr = CORE_CC_REG(pmu->base, chipcontrol_data);
                reg = chip->ops->read32(chip->ctx, addr);
                return (reg & pmu_cc3_mask) != 0;
        case BRCM_CC_43430_CHIP_ID:
@@ -1319,12 +1350,12 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
                reg = chip->ops->read32(chip->ctx, addr);
                return reg != 0;
        default:
-               addr = CORE_CC_REG(base, pmucapabilities_ext);
+               addr = CORE_CC_REG(pmu->base, pmucapabilities_ext);
                reg = chip->ops->read32(chip->ctx, addr);
                if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
                        return false;
 
-               addr = CORE_CC_REG(base, retention_ctl);
+               addr = CORE_CC_REG(pmu->base, retention_ctl);
                reg = chip->ops->read32(chip->ctx, addr);
                return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
                               PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
index f6b5feea23d20508abf4fc0e6670adf51b966d33..dd0ec3eba6a98505e92005789bcbed7fa53dedb1 100644 (file)
@@ -27,6 +27,7 @@
  * @chip: chip identifier.
  * @chiprev: chip revision.
  * @cc_caps: chipcommon core capabilities.
+ * @cc_caps_ext: chipcommon core extended capabilities.
  * @pmucaps: PMU capabilities.
  * @pmurev: PMU revision.
  * @rambase: RAM base address (only applicable for ARM CR4 chips).
@@ -38,6 +39,7 @@ struct brcmf_chip {
        u32 chip;
        u32 chiprev;
        u32 cc_caps;
+       u32 cc_caps_ext;
        u32 pmucaps;
        u32 pmurev;
        u32 rambase;
@@ -83,6 +85,7 @@ struct brcmf_chip *brcmf_chip_attach(void *ctx,
 void brcmf_chip_detach(struct brcmf_chip *chip);
 struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
 struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
+struct brcmf_core *brcmf_chip_get_pmu(struct brcmf_chip *pub);
 bool brcmf_chip_iscoreup(struct brcmf_core *core);
 void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
 void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
index 4265b50faa98126400434f1c28995ded21e9e7f5..cfee477a6eb1f4b3f4ce5af01d3d19d37d768fb6 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/string.h>
 #include <linux/netdevice.h>
+#include <linux/module.h>
 #include <brcmu_wifi.h>
 #include <brcmu_utils.h>
 #include "core.h"
index 1365c12b78fc39632f2e30c99092092e7a0256a8..7269056d0044d82727f57cf2b05d60f5f93118a7 100644 (file)
@@ -93,7 +93,7 @@ static enum nvram_parser_state brcmf_nvram_handle_idle(struct nvram_parser *nvp)
        c = nvp->data[nvp->pos];
        if (c == '\n')
                return COMMENT;
-       if (is_whitespace(c))
+       if (is_whitespace(c) || c == '\0')
                goto proceed;
        if (c == '#')
                return COMMENT;
index 0480b70e3eb84b5b5e627dc022a9589455f85b4d..d5f9ef470447f690d7a26c11f27720196ed8198c 100644 (file)
@@ -1951,6 +1951,9 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = {
 
 #define BRCMF_PCIE_DEVICE(dev_id)      { BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
        PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
+#define BRCMF_PCIE_DEVICE_SUB(dev_id, subvend, subdev) { \
+       BRCM_PCIE_VENDOR_ID_BROADCOM, dev_id,\
+       subvend, subdev, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, 0 }
 
 static struct pci_device_id brcmf_pcie_devid_table[] = {
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID),
@@ -1966,6 +1969,7 @@ static struct pci_device_id brcmf_pcie_devid_table[] = {
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID),
+       BRCMF_PCIE_DEVICE_SUB(0x4365, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4365),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_2G_DEVICE_ID),
        BRCMF_PCIE_DEVICE(BRCM_PCIE_4366_5G_DEVICE_ID),
index dd6614332836a86443e505d9ee6394ec26ccfb6b..c790fa89db0591d1e1015b9b21383394d03ea027 100644 (file)
@@ -45,8 +45,8 @@
 #include "chip.h"
 #include "firmware.h"
 
-#define DCMD_RESP_TIMEOUT      msecs_to_jiffies(2000)
-#define CTL_DONE_TIMEOUT       msecs_to_jiffies(2000)
+#define DCMD_RESP_TIMEOUT      msecs_to_jiffies(2500)
+#define CTL_DONE_TIMEOUT       msecs_to_jiffies(2500)
 
 #ifdef DEBUG
 
@@ -3615,7 +3615,6 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
        const struct sdiod_drive_str *str_tab = NULL;
        u32 str_mask;
        u32 str_shift;
-       u32 base;
        u32 i;
        u32 drivestrength_sel = 0;
        u32 cc_data_temp;
@@ -3658,14 +3657,15 @@ brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
        }
 
        if (str_tab != NULL) {
+               struct brcmf_core *pmu = brcmf_chip_get_pmu(ci);
+
                for (i = 0; str_tab[i].strength != 0; i++) {
                        if (drivestrength >= str_tab[i].strength) {
                                drivestrength_sel = str_tab[i].sel;
                                break;
                        }
                }
-               base = brcmf_chip_get_chipcommon(ci)->base;
-               addr = CORE_CC_REG(base, chipcontrol_addr);
+               addr = CORE_CC_REG(pmu->base, chipcontrol_addr);
                brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
                cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
                cc_data_temp &= ~str_mask;
@@ -3835,8 +3835,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
                goto fail;
 
        /* set PMUControl so a backplane reset does PMU state reload */
-       reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
-                              pmucontrol);
+       reg_addr = CORE_CC_REG(brcmf_chip_get_pmu(bus->ci)->base, pmucontrol);
        reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
        if (err)
                goto fail;
@@ -4114,6 +4113,11 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
                goto fail;
        }
 
+       /* allocate scatter-gather table. sg support
+        * will be disabled upon allocation failure.
+        */
+       brcmf_sdiod_sgtable_alloc(bus->sdiodev);
+
        /* Query the F2 block size, set roundup accordingly */
        bus->blocksize = bus->sdiodev->func[2]->cur_blksize;
        bus->roundup = min(max_roundup, bus->blocksize);
index 5ec7a6d876723029f3a98a1047e0da9bcbf53d53..23f223150cef2cf4ec768b196d906603b92f36b2 100644 (file)
@@ -342,6 +342,7 @@ int brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
 
 /* Issue an abort to the specified function */
 int brcmf_sdiod_abort(struct brcmf_sdio_dev *sdiodev, uint fn);
+void brcmf_sdiod_sgtable_alloc(struct brcmf_sdio_dev *sdiodev);
 void brcmf_sdiod_change_state(struct brcmf_sdio_dev *sdiodev,
                              enum brcmf_sdiod_state state);
 #ifdef CONFIG_PM_SLEEP
index bec2dc1ca2e406bcf4ac0ee0d00fa27a704cf7f2..61ae2768132a0b16f0f98a57b3319c82adfda95b 100644 (file)
@@ -818,13 +818,15 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 static int
 brcms_ops_ampdu_action(struct ieee80211_hw *hw,
                    struct ieee80211_vif *vif,
-                   enum ieee80211_ampdu_mlme_action action,
-                   struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                   u8 buf_size, bool amsdu)
+                   struct ieee80211_ampdu_params *params)
 {
        struct brcms_info *wl = hw->priv;
        struct scb *scb = &wl->wlc->pri_scb;
        int status;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u8 buf_size = params->buf_size;
 
        if (WARN_ON(scb->magic != SCB_MAGIC))
                return -EIDRM;
index fd38aa0763e4f5f30384d29249e7bf205b7d5eeb..b75f4ef3cdc7b278ec837f83fd576444f5ce0008 100644 (file)
@@ -5982,12 +5982,14 @@ il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
 int
 il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 * ssn,
-                       u8 buf_size, bool amsdu)
+                       struct ieee80211_ampdu_params *params)
 {
        struct il_priv *il = hw->priv;
        int ret = -EINVAL;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
 
index 8ab8706f942267fcd2467928b1cbdf248a05921f..e432715e02d89dedcce89615d855233662c26d82 100644 (file)
@@ -182,9 +182,7 @@ void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
                                struct ieee80211_sta *sta, u32 iv32,
                                u16 *phase1key);
 int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                           enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 * ssn,
-                           u8 buf_size, bool amsdu);
+                           struct ieee80211_ampdu_params *params);
 int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                       struct ieee80211_sta *sta);
 void
index 866067789330ad4f4eaf0cebd7f807aa9403d1ec..11932d53ea2418678773d5004826d52dd0b1c897 100644 (file)
@@ -99,6 +99,18 @@ config IWLWIFI_UAPSD
 
          If unsure, say N.
 
+config IWLWIFI_PCIE_RTPM
+       bool "Enable runtime power management mode for PCIe devices"
+       depends on IWLMVM && PM
+       default false
+       help
+         Say Y here to enable runtime power management for PCIe
+         devices.  If enabled, the device will go into low power mode
+         when idle for a short period of time, allowing for improved
+         power saving during runtime.
+
+        If unsure, say N.
+
 menu "Debugging Options"
 
 config IWLWIFI_DEBUG
index 1aabb5ec096f5061082508bf4fe6acc0e0d96099..1bbd17ada974723623acc6b1da53b77131f6121b 100644 (file)
@@ -152,11 +152,14 @@ static void iwl_led_brightness_set(struct led_classdev *led_cdev,
 {
        struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
        unsigned long on = 0;
+       unsigned long off = 0;
 
        if (brightness > 0)
                on = IWL_LED_SOLID;
+       else
+               off = IWL_LED_SOLID;
 
-       iwl_led_cmd(priv, on, 0);
+       iwl_led_cmd(priv, on, off);
 }
 
 static int iwl_led_blink_set(struct led_classdev *led_cdev,
index 29ea1c6705b406f5c9bb410ddb5feaad6c59eb86..c63ea79571ff5f0d6b0bbfd07b4a6faba42f1593 100644 (file)
@@ -396,7 +396,7 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
        iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
                    CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
 
-       iwl_trans_d3_suspend(priv->trans, false);
+       iwl_trans_d3_suspend(priv->trans, false, true);
 
        goto out;
 
@@ -469,7 +469,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
        /* we'll clear ctx->vif during iwlagn_prepare_restart() */
        vif = ctx->vif;
 
-       ret = iwl_trans_d3_resume(priv->trans, &d3_status, false);
+       ret = iwl_trans_d3_resume(priv->trans, &d3_status, false, true);
        if (ret)
                goto out_unlock;
 
@@ -732,12 +732,15 @@ static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
 
 static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
                                   struct ieee80211_vif *vif,
-                                  enum ieee80211_ampdu_mlme_action action,
-                                  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                                  u8 buf_size, bool amsdu)
+                                  struct ieee80211_ampdu_params *params)
 {
        struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
        int ret = -EINVAL;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+       u8 buf_size = params->buf_size;
        struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
 
        IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
index e60cf141ed796f0bbcaf1c8b68cf7a08676ce86e..fa41a5e1c8902002d3bf8a1d3991a416d6600499 100644 (file)
 #define IWL7260_UCODE_API_MAX  17
 #define IWL7265_UCODE_API_MAX  17
 #define IWL7265D_UCODE_API_MAX 20
+#define IWL3168_UCODE_API_MAX  20
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   13
 #define IWL7265_UCODE_API_OK   13
 #define IWL7265D_UCODE_API_OK  13
+#define IWL3168_UCODE_API_OK   20
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  13
 #define IWL7265_UCODE_API_MIN  13
 #define IWL7265D_UCODE_API_MIN 13
+#define IWL3168_UCODE_API_MIN  20
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
@@ -92,6 +95,8 @@
 #define IWL3160_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL3165_NVM_VERSION            0x709
 #define IWL3165_TX_POWER_VERSION       0xffff /* meaningless */
+#define IWL3168_NVM_VERSION            0xd01
+#define IWL3168_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL7265_NVM_VERSION            0x0a1d
 #define IWL7265_TX_POWER_VERSION       0xffff /* meaningless */
 #define IWL7265D_NVM_VERSION           0x0c11
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
+#define IWL3168_FW_PRE "iwlwifi-3168-"
+#define IWL3168_MODULE_FIRMWARE(api) IWL3168_FW_PRE __stringify(api) ".ucode"
+
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
@@ -180,6 +188,12 @@ static const struct iwl_ht_params iwl7000_ht_params = {
        .ucode_api_ok = IWL7265_UCODE_API_OK,                   \
        .ucode_api_min = IWL7265_UCODE_API_MIN
 
+#define IWL_DEVICE_3008                                                \
+       IWL_DEVICE_7000_COMMON,                                 \
+       .ucode_api_max = IWL3168_UCODE_API_MAX,                 \
+       .ucode_api_ok = IWL3168_UCODE_API_OK,                   \
+       .ucode_api_min = IWL3168_UCODE_API_MIN
+
 #define IWL_DEVICE_7005D                                       \
        IWL_DEVICE_7000_COMMON,                                 \
        .ucode_api_max = IWL7265D_UCODE_API_MAX,                \
@@ -299,11 +313,11 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
 
 const struct iwl_cfg iwl3168_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 3168",
-       .fw_name_pre = IWL7265D_FW_PRE,
-       IWL_DEVICE_7000,
+       .fw_name_pre = IWL3168_FW_PRE,
+       IWL_DEVICE_3008,
        .ht_params = &iwl7000_ht_params,
-       .nvm_ver = IWL3165_NVM_VERSION,
-       .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
+       .nvm_ver = IWL3168_NVM_VERSION,
+       .nvm_calib_ver = IWL3168_TX_POWER_VERSION,
        .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
        .dccm_len = IWL7265_DCCM_LEN,
 };
@@ -376,5 +390,6 @@ const struct iwl_cfg iwl7265d_n_cfg = {
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+MODULE_FIRMWARE(IWL3168_MODULE_FIRMWARE(IWL3168_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
index ecbf4822cd697aa9dc1cf0e3a84bf5d376113bd1..4b93404f46a7f049ff2cd23c44f46e5e7e95354c 100644 (file)
@@ -138,7 +138,8 @@ static const struct iwl_tt_params iwl9000_tt_params = {
        .smem_offset = IWL9000_SMEM_OFFSET,                             \
        .smem_len = IWL9000_SMEM_LEN,                                   \
        .thermal_params = &iwl9000_tt_params,                           \
-       .apmg_not_supported = true
+       .apmg_not_supported = true,                                     \
+       .mq_rx_supported = true
 
 const struct iwl_cfg iwl9260_2ac_cfg = {
                .name = "Intel(R) Dual Band Wireless AC 9260",
index f99048135fb996a632675ea426de3d1da34b6224..dad5570d6cc8e33b6eb6e1914bda2e7fdcf37a33 100644 (file)
@@ -311,6 +311,7 @@ struct iwl_pwr_tx_backoff {
  * @dccm2_len: length of the second DCCM
  * @smem_offset: offset from which the SMEM begins
  * @smem_len: the length of SMEM
+ * @mq_rx_supported: multi-queue rx support
  *
  * We enable the driver to be backward compatible wrt. hardware features.
  * API differences in uCode shouldn't be handled here but through TLVs
@@ -362,6 +363,7 @@ struct iwl_cfg {
        const u32 smem_len;
        const struct iwl_tt_params *thermal_params;
        bool apmg_not_supported;
+       bool mq_rx_supported;
 };
 
 /*
index 5cc6be927eab95d7f5c2b616bf5ddebe4380f050..4ab6682ea53ea19108434fa94c117c38462b950f 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -312,6 +314,77 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
 #define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT        28
 #define FH_MEM_TB_MAX_LENGTH                   (0x00020000)
 
+/* 9000 rx series registers */
+
+#define RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */
+#define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define RFH_Q0_FRBDCB_WIDX 0xA08080
+#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
+/* Read index table */
+#define RFH_Q0_FRBDCB_RIDX 0xA080C0
+#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)
+/* Used list table */
+#define RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */
+#define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define RFH_Q0_URBDCB_WIDX 0xA08180
+#define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4)
+#define RFH_Q0_URBDCB_VAID 0xA081C0
+#define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4)
+/* stts */
+#define RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */
+#define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8)
+
+#define RFH_Q0_ORB_WPTR_LSB 0xA08280
+#define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8)
+#define RFH_RBDBUF_RBD0_LSB 0xA08300
+#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
+
+/* DMA configuration */
+#define RFH_RXF_DMA_CFG 0xA09820
+/* RB size */
+#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
+#define RFH_RXF_DMA_RB_SIZE_POS 16
+#define RFH_RXF_DMA_RB_SIZE_1K (0x1 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_2K (0x2 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_4K (0x4 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_8K (0x8 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_12K        (0x9 << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_16K        (0xA << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_20K        (0xB << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_24K        (0xC << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_28K        (0xD << RFH_RXF_DMA_RB_SIZE_POS)
+#define RFH_RXF_DMA_RB_SIZE_32K        (0xE << RFH_RXF_DMA_RB_SIZE_POS)
+/* RB Circular Buffer size:defines the table sizes in RBD units */
+#define RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */
+#define RFH_RXF_DMA_RBDCB_SIZE_POS 20
+#define RFH_RXF_DMA_RBDCB_SIZE_8       (0x3 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_16      (0x4 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_32      (0x5 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_64      (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_128     (0x7 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_256     (0x8 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_512     (0x9 << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_1024    (0xA << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_RBDCB_SIZE_2048    (0xB << RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
+#define RFH_RXF_DMA_MIN_RB_SIZE_POS    24
+#define RFH_RXF_DMA_MIN_RB_4_8 (3 << RFH_RXF_DMA_MIN_RB_SIZE_POS)
+#define RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
+#define RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
+#define RFH_DMA_EN_ENABLE_VAL BIT(31)
+
+#define RFH_RXF_RXQ_ACTIVE 0xA0980C
+
+#define RFH_GEN_CFG    0xA09800
+#define RFH_GEN_CFG_DEFAULT_RXQ_NUM_MASK 0xF00
+#define RFH_GEN_CFG_SERVICE_DMA_SNOOP BIT(0)
+#define RFH_GEN_CFG_RFH_DMA_SNOOP BIT(1)
+#define DEFAULT_RXQ_NUM 8
+
+/* end of 9000 rx series registers */
+
 /* TFDB  Area - TFDs buffer table */
 #define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK      (0xFFFFFFFF)
 #define FH_TFDIB_LOWER_BOUND       (FH_MEM_LOWER_BOUND + 0x900)
@@ -434,6 +507,10 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
  */
 #define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN   (0x00000002)
 
+#define MQ_RX_TABLE_SIZE               512
+#define MQ_RX_TABLE_MASK               (MQ_RX_TABLE_SIZE - 1)
+#define MQ_RX_POOL_SIZE                        MQ_RX_TABLE_MASK
+
 #define RX_QUEUE_SIZE                         256
 #define RX_QUEUE_MASK                         255
 #define RX_QUEUE_SIZE_LOG                     8
index a5aaf685370462d97bae6b9c0935c567ad758bfa..8425e1a587d97a5ea00d914ce8f78538df1eb910 100644 (file)
@@ -293,6 +293,8 @@ iwl_fw_error_next_data(struct iwl_fw_error_dump_data *data)
  * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
  *     threshold.
  * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
+ *  the firmware sends a tx reply.
  */
 enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_INVALID = 0,
@@ -309,6 +311,7 @@ enum iwl_fw_dbg_trigger {
        FW_DBG_TRIGGER_BA,
        FW_DBG_TRIGGER_TX_LATENCY,
        FW_DBG_TRIGGER_TDLS,
+       FW_DBG_TRIGGER_TX_STATUS,
 
        /* must be last */
        FW_DBG_TRIGGER_MAX,
index 84f8aeb926c8748f443e15ab74c01dfd1a51f77d..e2dbc67a367b9a609d7a63b1e7d26da3c7a77243 100644 (file)
@@ -297,10 +297,12 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
  *     which also implies support for the scheduler configuration command
  * @IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
  * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
  * @IWL_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
  * @IWL_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
  * @IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD
  * @IWL_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
  * @IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
  *     sources for the MCC. This TLV bit is a future replacement to
@@ -313,6 +315,8 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
  * @IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
  * @IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
  *     antenna the beacon should be transmitted
+ * @IWL_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
+ *     from AP and will send it upon d0i3 exit.
  * @IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
  *
  * @NUM_IWL_UCODE_TLV_CAPA: number of bits used
@@ -330,10 +334,12 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT         = (__force iwl_ucode_tlv_capa_t)11,
        IWL_UCODE_TLV_CAPA_DQA_SUPPORT                  = (__force iwl_ucode_tlv_capa_t)12,
        IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH          = (__force iwl_ucode_tlv_capa_t)13,
+       IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG            = (__force iwl_ucode_tlv_capa_t)17,
        IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT              = (__force iwl_ucode_tlv_capa_t)18,
        IWL_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT         = (__force iwl_ucode_tlv_capa_t)19,
        IWL_UCODE_TLV_CAPA_CSUM_SUPPORT                 = (__force iwl_ucode_tlv_capa_t)21,
        IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS           = (__force iwl_ucode_tlv_capa_t)22,
+       IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD         = (__force iwl_ucode_tlv_capa_t)26,
        IWL_UCODE_TLV_CAPA_BT_COEX_PLCR                 = (__force iwl_ucode_tlv_capa_t)28,
        IWL_UCODE_TLV_CAPA_LAR_MULTI_MCC                = (__force iwl_ucode_tlv_capa_t)29,
        IWL_UCODE_TLV_CAPA_BT_COEX_RRC                  = (__force iwl_ucode_tlv_capa_t)30,
@@ -341,7 +347,9 @@ enum iwl_ucode_tlv_capa {
        IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE         = (__force iwl_ucode_tlv_capa_t)64,
        IWL_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS            = (__force iwl_ucode_tlv_capa_t)65,
        IWL_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT             = (__force iwl_ucode_tlv_capa_t)67,
+       IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT       = (__force iwl_ucode_tlv_capa_t)68,
        IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION         = (__force iwl_ucode_tlv_capa_t)71,
+       IWL_UCODE_TLV_CAPA_BEACON_STORING               = (__force iwl_ucode_tlv_capa_t)72,
        IWL_UCODE_TLV_CAPA_LAR_SUPPORT_V2               = (__force iwl_ucode_tlv_capa_t)73,
 
        NUM_IWL_UCODE_TLV_CAPA
@@ -747,6 +755,19 @@ struct iwl_fw_dbg_trigger_tdls {
        u8 reserved[4];
 } __packed;
 
+/**
+ * struct iwl_fw_dbg_trigger_tx_status - configures trigger for tx response
+ *  status.
+ * @statuses: the list of statuses to trigger the collection on
+ */
+struct iwl_fw_dbg_trigger_tx_status {
+       struct tx_status {
+               u8 status;
+               u8 reserved[3];
+       } __packed statuses[16];
+       __le32 reserved[2];
+} __packed;
+
 /**
  * struct iwl_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
  * @id: conf id
index fd42f63f5e84a0b7db8c3be5831e956704fe5868..b88ecc7892a903c791dac8131fcb7fa600f2d44e 100644 (file)
@@ -108,6 +108,8 @@ enum iwl_amsdu_size {
  * @power_level: power level, default = 1
  * @debug_level: levels are IWL_DL_*
  * @ant_coupling: antenna coupling in dB, default = 0
+ * @nvm_file: specifies a external NVM file
+ * @uapsd_disable: disable U-APSD, default = 1
  * @d0i3_disable: disable d0i3, default = 1,
  * @d0i3_entry_delay: time to wait after no refs are taken before
  *     entering D0i3 (in msecs)
index 7b89bfc8c8ac3b1025b913e1dfd0bd017853dfa0..50f4cc60cf3e9800fdecfe1fe6c40db199f159fb 100644 (file)
@@ -539,7 +539,7 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                                           struct iwl_nvm_data *data,
                                           const __le16 *mac_override,
                                           const __le16 *nvm_hw,
-                                          u32 mac_addr0, u32 mac_addr1)
+                                          __le32 mac_addr0, __le32 mac_addr1)
 {
        const u8 *hw_addr;
 
@@ -583,7 +583,8 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
 
                if (!is_valid_ether_addr(data->hw_addr))
                        IWL_ERR_DEV(dev,
-                                   "mac address from hw section is not valid\n");
+                                   "mac address (%pM) from hw section is not valid\n",
+                                   data->hw_addr);
 
                return;
        }
@@ -597,7 +598,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
                   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-                  u32 mac_addr0, u32 mac_addr1)
+                  __le32 mac_addr0, __le32 mac_addr1)
 {
        struct iwl_nvm_data *data;
        u32 sku;
index 92466ee72806218a5825da285ddebdc2e6768d9c..4e8e0dc474d49fec9c33d9b8598c1eb050f70cc5 100644 (file)
@@ -79,7 +79,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                   const __le16 *nvm_calib, const __le16 *regulatory,
                   const __le16 *mac_override, const __le16 *phy_sku,
                   u8 tx_chains, u8 rx_chains, bool lar_fw_supported,
-                  u32 mac_addr0, u32 mac_addr1);
+                  __le32 mac_addr0, __le32 mac_addr1);
 
 /**
  * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
index 82fb3a97a46d3f6a165a5623d4a5be83883b092e..0ca0f13b69b0fa66e4d7ee13ed1f4c301bb2a1f6 100644 (file)
@@ -506,7 +506,7 @@ struct iwl_trans_config {
        bool sw_csum_tx;
        const struct iwl_hcmd_arr *command_groups;
        int command_groups_size;
+
        u32 sdio_adma_addr;
 };
 
@@ -618,9 +618,9 @@ struct iwl_trans_ops {
        void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
        void (*stop_device)(struct iwl_trans *trans, bool low_power);
 
-       void (*d3_suspend)(struct iwl_trans *trans, bool test);
+       void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
        int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
-                        bool test);
+                        bool test, bool reset);
 
        int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
 
@@ -736,6 +736,11 @@ enum iwl_plat_pm_mode {
        IWL_PLAT_PM_MODE_D0I3,
 };
 
+/* Max time to wait for trans to become idle/non-idle on d0i3
+ * enter/exit (in msecs).
+ */
+#define IWL_TRANS_IDLE_TIMEOUT 2000
+
 /**
  * struct iwl_trans - transport common data
  *
@@ -920,22 +925,23 @@ static inline void iwl_trans_stop_device(struct iwl_trans *trans)
        _iwl_trans_stop_device(trans, true);
 }
 
-static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
+static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
+                                       bool reset)
 {
        might_sleep();
        if (trans->ops->d3_suspend)
-               trans->ops->d3_suspend(trans, test);
+               trans->ops->d3_suspend(trans, test, reset);
 }
 
 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
                                      enum iwl_d3_status *status,
-                                     bool test)
+                                     bool test, bool reset)
 {
        might_sleep();
        if (!trans->ops->d3_resume)
                return 0;
 
-       return trans->ops->d3_resume(trans, status, test);
+       return trans->ops->d3_resume(trans, status, test, reset);
 }
 
 static inline void iwl_trans_ref(struct iwl_trans *trans)
index b00c03fcd4473cc670cd1489c3504c767de62c65..4b560e4417ee55fa1da2539dbe7f3c1482fe8790 100644 (file)
@@ -6,7 +6,8 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +33,8 @@
  * BSD LICENSE
  *
  * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2015        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -73,6 +75,7 @@
 #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT      (10 * USEC_PER_MSEC)
 #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT       (2 * 1024) /* defined in TU */
 #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT       (40 * 1024) /* defined in TU */
+#define IWL_MVM_P2P_UAPSD_STANDALONE           0
 #define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE       0
 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
 #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT          (50 * USEC_PER_MSEC)
 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK       1
 #define IWL_MVM_TOF_IS_RESPONDER               0
 #define IWL_MVM_SW_TX_CSUM_OFFLOAD             0
+#define IWL_MVM_COLLECT_FW_ERR_DUMP            1
 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE    1
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE      2
 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW   1
index d3e21d95cecec52830163c4cd674d444b6940a41..346376187ef849455ae1b60da0039cc2e48fd633 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016   Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016   Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -851,7 +853,8 @@ iwl_mvm_get_wowlan_config(struct iwl_mvm *mvm,
        wowlan_config_cmd->is_11n_connection =
                                        ap_sta->ht_cap.ht_supported;
        wowlan_config_cmd->flags = ENABLE_L3_FILTERING |
-               ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING;
+               ENABLE_NBNS_FILTERING | ENABLE_DHCP_FILTERING |
+               ENABLE_STORE_BEACON;
 
        /* Query the last used seqno and set it */
        ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
@@ -1023,14 +1026,18 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
                      struct ieee80211_sta *ap_sta)
 {
        int ret;
+       bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+                                        IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
-       ret = iwl_mvm_switch_to_d3(mvm);
-       if (ret)
-               return ret;
+       if (!unified_image) {
+               ret = iwl_mvm_switch_to_d3(mvm);
+               if (ret)
+                       return ret;
 
-       ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
-       if (ret)
-               return ret;
+               ret = iwl_mvm_d3_reprogram(mvm, vif, ap_sta);
+               if (ret)
+                       return ret;
+       }
 
        if (!iwlwifi_mod_params.sw_crypto) {
                /*
@@ -1072,10 +1079,14 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm,
 {
        struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
        int ret;
+       bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+                                        IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
-       ret = iwl_mvm_switch_to_d3(mvm);
-       if (ret)
-               return ret;
+       if (!unified_image) {
+               ret = iwl_mvm_switch_to_d3(mvm);
+               if (ret)
+                       return ret;
+       }
 
        /* rfkill release can be either for wowlan or netdetect */
        if (wowlan->rfkill_release)
@@ -1151,6 +1162,8 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
        };
        int ret;
        int len __maybe_unused;
+       bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+                                        IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
 
        if (!wowlan) {
                /*
@@ -1236,7 +1249,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
 
        clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
 
-       iwl_trans_d3_suspend(mvm->trans, test);
+       iwl_trans_d3_suspend(mvm->trans, test, !unified_image);
  out:
        if (ret < 0) {
                iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
@@ -1299,7 +1312,7 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
                __set_bit(D0I3_DEFER_WAKEUP, &mvm->d0i3_suspend_flags);
                mutex_unlock(&mvm->d0i3_suspend_mutex);
 
-               iwl_trans_d3_suspend(trans, false);
+               iwl_trans_d3_suspend(trans, false, false);
 
                return 0;
        }
@@ -2041,9 +2054,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
 static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
 {
        struct ieee80211_vif *vif = NULL;
-       int ret;
+       int ret = 1;
        enum iwl_d3_status d3_status;
        bool keep = false;
+       bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
+                                        IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
+
+       u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+                                   CMD_WAKE_UP_TRANS;
 
        mutex_lock(&mvm->mutex);
 
@@ -2052,7 +2070,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        if (IS_ERR_OR_NULL(vif))
                goto err;
 
-       ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
+       ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test, !unified_image);
        if (ret)
                goto err;
 
@@ -2095,17 +2113,28 @@ out_iterate:
                        iwl_mvm_d3_disconnect_iter, keep ? vif : NULL);
 
 out:
-       /* return 1 to reconfigure the device */
+       if (unified_image && !ret) {
+               ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+               if (!ret) /* D3 ended successfully - no need to reset device */
+                       return 0;
+       }
+
+       /*
+        * Reconfigure the device in one of the following cases:
+        * 1. We are not using a unified image
+        * 2. We are using a unified image but had an error while exiting D3
+        */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
-
-       /* We always return 1, which causes mac80211 to do a reconfig
-        * with IEEE80211_RECONFIG_TYPE_RESTART.  This type of
-        * reconfig calls iwl_mvm_restart_complete(), where we unref
-        * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
-        * reference here.
+       /*
+        * When switching images we return 1, which causes mac80211
+        * to do a reconfig with IEEE80211_RECONFIG_TYPE_RESTART.
+        * This type of reconfig calls iwl_mvm_restart_complete(),
+        * where we unref the IWL_MVM_REF_UCODE_DOWN, so we need
+        * to take the reference here.
         */
        iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
        return 1;
 }
 
@@ -2122,7 +2151,7 @@ static int iwl_mvm_resume_d0i3(struct iwl_mvm *mvm)
        enum iwl_d3_status d3_status;
        struct iwl_trans *trans = mvm->trans;
 
-       iwl_trans_d3_resume(trans, &d3_status, false);
+       iwl_trans_d3_resume(trans, &d3_status, false, false);
 
        /*
         * make sure to clear D0I3_DEFER_WAKEUP before
index 9e0d46368cdd1c0581f218ce757ab8a258ee93e3..14004456bf550db58ff95c511e6dee3203e32fed 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1255,6 +1257,7 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        struct iwl_mvm *mvm = mvmvif->mvm;
+       bool prev;
        u8 value;
        int ret;
 
@@ -1265,7 +1268,9 @@ static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
                return -EINVAL;
 
        mutex_lock(&mvm->mutex);
-       iwl_mvm_update_low_latency(mvm, vif, value);
+       prev = iwl_mvm_vif_low_latency(mvmvif);
+       mvmvif->low_latency_dbgfs = value;
+       iwl_mvm_update_low_latency(mvm, vif, prev);
        mutex_unlock(&mvm->mutex);
 
        return count;
@@ -1277,11 +1282,15 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
 {
        struct ieee80211_vif *vif = file->private_data;
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       char buf[2];
+       char buf[30] = {};
+       int len;
 
-       buf[0] = mvmvif->low_latency ? '1' : '0';
-       buf[1] = '\n';
-       return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+       len = snprintf(buf, sizeof(buf) - 1,
+                      "traffic=%d\ndbgfs=%d\nvcmd=%d\n",
+                      mvmvif->low_latency_traffic,
+                      mvmvif->low_latency_dbgfs,
+                      mvmvif->low_latency_vcmd);
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
 }
 
 static ssize_t iwl_dbgfs_uapsd_misbehaving_read(struct file *file,
@@ -1363,6 +1372,59 @@ static ssize_t iwl_dbgfs_rx_phyinfo_read(struct file *file,
        return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
 }
 
+static void iwl_dbgfs_quota_check(void *data, u8 *mac,
+                                 struct ieee80211_vif *vif)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       int *ret = data;
+
+       if (mvmvif->dbgfs_quota_min)
+               *ret = -EINVAL;
+}
+
+static ssize_t iwl_dbgfs_quota_min_write(struct ieee80211_vif *vif, char *buf,
+                                        size_t count, loff_t *ppos)
+{
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_mvm *mvm = mvmvif->mvm;
+       u16 value;
+       int ret;
+
+       ret = kstrtou16(buf, 0, &value);
+       if (ret)
+               return ret;
+
+       if (value > 95)
+               return -EINVAL;
+
+       mutex_lock(&mvm->mutex);
+
+       mvmvif->dbgfs_quota_min = 0;
+       ieee80211_iterate_interfaces(mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+                                    iwl_dbgfs_quota_check, &ret);
+       if (ret == 0) {
+               mvmvif->dbgfs_quota_min = value;
+               iwl_mvm_update_quotas(mvm, false, NULL);
+       }
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
+                                       char __user *user_buf,
+                                       size_t count, loff_t *ppos)
+{
+       struct ieee80211_vif *vif = file->private_data;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       char buf[10];
+       int len;
+
+       len = snprintf(buf, sizeof(buf), "%d\n", mvmvif->dbgfs_quota_min);
+
+       return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
 #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
        _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
 #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1386,6 +1448,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_req_ext, 32);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
 MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
 
 void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
@@ -1423,6 +1486,8 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                 S_IRUSR | S_IWUSR);
        MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir,
                                 S_IRUSR | S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
+                                S_IRUSR | S_IWUSR);
 
        if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
            mvmvif == mvm->bf_allowed_vif)
index 90500e2d107b60ada39c82adaacedc9a5051cb7e..c529e5355803f10a923b601f5dc46acbd62cfd66 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -261,17 +262,18 @@ static ssize_t iwl_dbgfs_nic_temp_read(struct file *file,
 {
        struct iwl_mvm *mvm = file->private_data;
        char buf[16];
-       int pos, temp;
+       int pos, ret;
+       s32 temp;
 
        if (!mvm->ucode_loaded)
                return -EIO;
 
        mutex_lock(&mvm->mutex);
-       temp = iwl_mvm_get_temp(mvm);
+       ret = iwl_mvm_get_temp(mvm, &temp);
        mutex_unlock(&mvm->mutex);
 
-       if (temp < 0)
-               return temp;
+       if (ret)
+               return -EIO;
 
        pos = scnprintf(buf , sizeof(buf), "%d\n", temp);
 
@@ -942,6 +944,47 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
        return count;
 }
 
+static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
+                                              char *buf, size_t count,
+                                              loff_t *ppos)
+{
+       struct iwl_rss_config_cmd cmd = {
+               .flags = cpu_to_le32(IWL_RSS_ENABLE),
+               .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+                            IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+                            IWL_RSS_HASH_TYPE_IPV6_TCP |
+                            IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+       };
+       int ret, i, num_repeats, nbytes = count / 2;
+
+       ret = hex2bin(cmd.indirection_table, buf, nbytes);
+       if (ret)
+               return ret;
+
+       /*
+        * The input is the redirection table, partial or full.
+        * Repeat the pattern if needed.
+        * For example, input of 01020F will be repeated 42 times,
+        * indirecting RSS hash results to queues 1, 2, 15 (skipping
+        * queues 3 - 14).
+        */
+       num_repeats = ARRAY_SIZE(cmd.indirection_table) / nbytes;
+       for (i = 1; i < num_repeats; i++)
+               memcpy(&cmd.indirection_table[i * nbytes],
+                      cmd.indirection_table, nbytes);
+       /* handle cut in the middle pattern for the last places */
+       memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
+              ARRAY_SIZE(cmd.indirection_table) % nbytes);
+
+       memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key));
+
+       mutex_lock(&mvm->mutex);
+       ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+       mutex_unlock(&mvm->mutex);
+
+       return ret ?: count;
+}
+
 static ssize_t iwl_dbgfs_fw_dbg_conf_read(struct file *file,
                                          char __user *user_buf,
                                          size_t count, loff_t *ppos)
@@ -983,7 +1026,7 @@ static ssize_t iwl_dbgfs_cont_recording_write(struct iwl_mvm *mvm,
            trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return -EOPNOTSUPP;
 
-       ret = kstrtouint(buf, 0, &rec_mode);
+       ret = kstrtoint(buf, 0, &rec_mode);
        if (ret)
                return ret;
 
@@ -1454,6 +1497,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
 MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
 MVM_DEBUGFS_WRITE_FILE_OPS(cont_recording, 8);
+MVM_DEBUGFS_WRITE_FILE_OPS(indirection_tbl, 16);
 
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
 MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
@@ -1498,11 +1542,15 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
        MVM_DEBUGFS_ADD_FILE(fw_dbg_collect, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(send_echo_cmd, mvm->debugfs_dir, S_IWUSR);
        MVM_DEBUGFS_ADD_FILE(cont_recording, mvm->debugfs_dir, S_IWUSR);
+       MVM_DEBUGFS_ADD_FILE(indirection_tbl, mvm->debugfs_dir, S_IWUSR);
        if (!debugfs_create_bool("enable_scan_iteration_notif",
                                 S_IRUSR | S_IWUSR,
                                 mvm->debugfs_dir,
                                 &mvm->scan_iter_notif_enabled))
                goto err;
+       if (!debugfs_create_bool("drop_bcn_ap_mode", S_IRUSR | S_IWUSR,
+                                mvm->debugfs_dir, &mvm->drop_bcn_ap_mode))
+               goto err;
 
 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING
        if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
index 62b9a0a96700738ad5df1c15a8d02305804fbb0d..eec52c57f71831014d550c917f5452015373d5fd 100644 (file)
@@ -251,6 +251,7 @@ enum iwl_wowlan_flags {
        ENABLE_L3_FILTERING     = BIT(1),
        ENABLE_NBNS_FILTERING   = BIT(2),
        ENABLE_DHCP_FILTERING   = BIT(3),
+       ENABLE_STORE_BEACON     = BIT(4),
 };
 
 struct iwl_wowlan_config_cmd {
index fb6d341d6f3dce33f26fe4e1abe73a531dc58d80..df939f51d9b97e75ddf854fa246662dbbc27f143 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -287,16 +287,13 @@ enum iwl_rx_mpdu_status {
        IWL_RX_MPDU_STATUS_KEY_ERROR            = BIT(4),
        IWL_RX_MPDU_STATUS_ICV_OK               = BIT(5),
        IWL_RX_MPDU_STATUS_MIC_OK               = BIT(6),
-       /* TODO - verify this is the correct value */
        IWL_RX_MPDU_RES_STATUS_TTAK_OK          = BIT(7),
        IWL_RX_MPDU_STATUS_SEC_MASK             = 0x7 << 8,
        IWL_RX_MPDU_STATUS_SEC_NONE             = 0x0 << 8,
        IWL_RX_MPDU_STATUS_SEC_WEP              = 0x1 << 8,
        IWL_RX_MPDU_STATUS_SEC_CCM              = 0x2 << 8,
        IWL_RX_MPDU_STATUS_SEC_TKIP             = 0x3 << 8,
-       /* TODO - define IWL_RX_MPDU_STATUS_SEC_EXT_ENC - this is a stub */
        IWL_RX_MPDU_STATUS_SEC_EXT_ENC          = 0x4 << 8,
-       /* TODO - define IWL_RX_MPDU_STATUS_SEC_GCM - this is a stub */
        IWL_RX_MPDU_STATUS_SEC_GCM              = 0x5 << 8,
        IWL_RX_MPDU_STATUS_DECRYPTED            = BIT(11),
        IWL_RX_MPDU_STATUS_WEP_MATCH            = BIT(12),
@@ -350,11 +347,11 @@ struct iwl_rx_mpdu_desc {
        /* DW8 */
        __le32 filter_match;
        /* DW9 */
-       __le32 gp2_on_air_rise;
-       /* DW10 */
        __le32 rate_n_flags;
+       /* DW10 */
+       u8 energy_a, energy_b, channel, reserved;
        /* DW11 */
-       u8 energy_a, energy_b, energy_c, channel;
+       __le32 gp2_on_air_rise;
        /* DW12 & DW13 */
        __le64 tsf_on_air_rise;
 } __packed;
@@ -365,4 +362,33 @@ struct iwl_frame_release {
        __le16 nssn;
 };
 
+enum iwl_rss_hash_func_en {
+       IWL_RSS_HASH_TYPE_IPV4_TCP,
+       IWL_RSS_HASH_TYPE_IPV4_UDP,
+       IWL_RSS_HASH_TYPE_IPV4_PAYLOAD,
+       IWL_RSS_HASH_TYPE_IPV6_TCP,
+       IWL_RSS_HASH_TYPE_IPV6_UDP,
+       IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+};
+
+#define IWL_RSS_HASH_KEY_CNT 10
+#define IWL_RSS_INDIRECTION_TABLE_SIZE 128
+#define IWL_RSS_ENABLE 1
+
+/**
+ * struct iwl_rss_config_cmd - RSS (Receive Side Scaling) configuration
+ *
+ * @flags: 1 - enable, 0 - disable
+ * @hash_mask: Type of RSS to use. Values are from %iwl_rss_hash_func_en
+ * @secret_key: 320 bit input of random key configuration from driver
+ * @indirection_table: indirection table
+ */
+struct iwl_rss_config_cmd {
+       __le32 flags;
+       u8 hash_mask;
+       u8 reserved[3];
+       __le32 secret_key[IWL_RSS_HASH_KEY_CNT];
+       u8 indirection_table[IWL_RSS_INDIRECTION_TABLE_SIZE];
+} __packed; /* RSS_CONFIG_CMD_API_S_VER_1 */
+
 #endif /* __fw_api_rx_h__ */
index 6fca4fb1d3064d8b090e57c9ed681d33ca744ea7..90d9113948363740ae4cda336bcf19a2649c2e8c 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -253,6 +255,68 @@ struct iwl_mvm_keyinfo {
        __le64 hw_tkip_mic_tx_key;
 } __packed;
 
+#define IWL_ADD_STA_STATUS_MASK        0xFF
+#define IWL_ADD_STA_BAID_MASK  0xFF00
+
+/**
+ * struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: 1: modify existing, 0: add new station
+ * @awake_acs:
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ *     AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to
+ * @addr[ETH_ALEN]: station's MAC address
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ *     alone. 1 - modify, 0 - don't change.
+ * @station_flags: look at %iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ *     Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ *     add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ *     Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ *     add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ *     asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ *     keeps track of STA sleep state.
+ * @sleep_state_flags: Look at %iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ *     mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwl_mvm_add_sta_cmd_v7 {
+       u8 add_modify;
+       u8 awake_acs;
+       __le16 tid_disable_tx;
+       __le32 mac_id_n_color;
+       u8 addr[ETH_ALEN];      /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+       __le16 reserved2;
+       u8 sta_id;
+       u8 modify_mask;
+       __le16 reserved3;
+       __le32 station_flags;
+       __le32 station_flags_msk;
+       u8 add_immediate_ba_tid;
+       u8 remove_immediate_ba_tid;
+       __le16 add_immediate_ba_ssn;
+       __le16 sleep_tx_count;
+       __le16 sleep_state_flags;
+       __le16 assoc_id;
+       __le16 beamform_flags;
+       __le32 tfd_queue_msk;
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
+
 /**
  * struct iwl_mvm_add_sta_cmd - Add/modify a station in the fw's sta table.
  * ( REPLY_ADD_STA = 0x18 )
@@ -282,6 +346,7 @@ struct iwl_mvm_keyinfo {
  *     mac-addr.
  * @beamform_flags: beam forming controls
  * @tfd_queue_msk: tfd queues used by this station
+ * @rx_ba_window: aggregation window size
  *
  * The device contains an internal table of per-station information, with info
  * on security keys, aggregation parameters, and Tx rates for initial Tx
@@ -310,7 +375,9 @@ struct iwl_mvm_add_sta_cmd {
        __le16 assoc_id;
        __le16 beamform_flags;
        __le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_7 */
+       __le16 rx_ba_window;
+       __le16 reserved;
+} __packed; /* ADD_STA_CMD_API_S_VER_8 */
 
 /**
  * struct iwl_mvm_add_sta_key_cmd - add/modify sta key
index 0036d18334af4ab893b0b45214e972a3836d7885..ba3f0bbddde8874db03a74cd9a0ca01df862bc38 100644 (file)
@@ -510,6 +510,9 @@ struct iwl_mvm_tx_resp {
  * @scd_ssn: the index of the last contiguously sent packet
  * @txed: number of Txed frames in this batch
  * @txed_2_done: number of Acked frames in this batch
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ *     not a copy from the LQ command. Thus, if not the first rate was used
+ *     for Tx-ing then this value will be set to 0 by FW.
  */
 struct iwl_mvm_ba_notif {
        __le32 sta_addr_lo32;
@@ -524,7 +527,8 @@ struct iwl_mvm_ba_notif {
        __le16 scd_ssn;
        u8 txed;
        u8 txed_2_done;
-       __le16 reserved1;
+       u8 reduced_txp;
+       u8 reserved1;
 } __packed;
 
 /*
index 82049bb139c26b2ae68f3d8a8ce191bde5f97de5..f332497e29d161a03a5fe16a26fca2804cb35e43 100644 (file)
@@ -213,6 +213,8 @@ enum {
 
        MFUART_LOAD_NOTIFICATION = 0xb1,
 
+       RSS_CONFIG_CMD = 0xb3,
+
        REPLY_RX_PHY_CMD = 0xc0,
        REPLY_RX_MPDU_CMD = 0xc1,
        FRAME_RELEASE = 0xc3,
@@ -280,11 +282,16 @@ enum iwl_phy_ops_subcmd_ids {
        DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
 };
 
+enum iwl_prot_offload_subcmd_ids {
+       STORED_BEACON_NTF = 0xFF,
+};
+
 /* command groups */
 enum {
        LEGACY_GROUP = 0x0,
        LONG_GROUP = 0x1,
        PHY_OPS_GROUP = 0x4,
+       PROT_OFFLOAD_GROUP = 0xb,
 };
 
 /**
@@ -1851,4 +1858,28 @@ struct iwl_shared_mem_cfg {
        __le32 page_buff_size;
 } __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
 
+#define MAX_STORED_BEACON_SIZE 600
+
+/**
+ * Stored beacon notification
+ *
+ * @system_time: system time on air rise
+ * @tsf: TSF on air rise
+ * @beacon_timestamp: beacon on air rise
+ * @phy_flags: general phy flags: band, modulation, etc.
+ * @channel: channel this beacon was received on
+ * @rates: rate in ucode internal format
+ * @byte_count: frame's byte count
+ */
+struct iwl_stored_beacon_notif {
+       __le32 system_time;
+       __le64 tsf;
+       __le32 beacon_timestamp;
+       __le16 phy_flags;
+       __le16 channel;
+       __le32 rates;
+       __le32 byte_count;
+       u8 data[MAX_STORED_BEACON_SIZE];
+} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
+
 #endif /* __fw_api_h__ */
index 0813f8184e10cbcb2472910847b789e7d57f454d..4856eac120f60d5eff2e9707e828e541761a76ae 100644 (file)
@@ -435,6 +435,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        bool monitor_dump_only = false;
        int i;
 
+       if (!IWL_MVM_COLLECT_FW_ERR_DUMP &&
+           !mvm->trans->dbg_dest_tlv)
+               return;
+
        lockdep_assert_held(&mvm->mutex);
 
        /* there's no point in fw dump if the bus is dead */
@@ -640,8 +644,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
 
        /* Dump fw's virtual image */
        if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
-               u32 i;
-
                for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
                        struct iwl_fw_error_dump_paging *paging;
                        struct page *pages =
index 4ed5180c547bb6ce8af288a6653220b5cf2c61b4..070e2af05ca25bc5545373e3738c8c30b67cd283 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -107,6 +108,24 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
                                    sizeof(tx_ant_cmd), &tx_ant_cmd);
 }
 
+static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
+{
+       int i;
+       struct iwl_rss_config_cmd cmd = {
+               .flags = cpu_to_le32(IWL_RSS_ENABLE),
+               .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+                            IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+                            IWL_RSS_HASH_TYPE_IPV6_TCP |
+                            IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
+       };
+
+       for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
+               cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
+       memcpy(cmd.secret_key, mvm->secret_key, ARRAY_SIZE(cmd.secret_key));
+
+       return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
+}
+
 static void iwl_free_fw_paging(struct iwl_mvm *mvm)
 {
        int i;
@@ -894,6 +913,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
+       /* Init RSS configuration */
+       if (iwl_mvm_has_new_rx_api(mvm)) {
+               ret = iwl_send_rss_cfg_cmd(mvm);
+               if (ret) {
+                       IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
+                               ret);
+                       goto error;
+               }
+       }
+
        /* init the fw <-> mac80211 STA mapping */
        for (i = 0; i < IWL_MVM_STATION_COUNT; i++)
                RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
index bf1e5eb5dbdba3657071ae1492e52b7073e6a9cd..535134d639e0ec70498a1a418cce0344ea5656ff 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -744,7 +744,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
                 * wake-ups.
                 */
                cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
-               if (mvmvif->ap_assoc_sta_count) {
+               if (mvmvif->ap_assoc_sta_count || !mvm->drop_bcn_ap_mode) {
                        cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
                        IWL_DEBUG_HC(mvm, "Asking FW to pass beacons\n");
                } else {
@@ -1462,3 +1462,40 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
                                                   iwl_mvm_beacon_loss_iterator,
                                                   mb);
 }
+
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+                                   struct iwl_rx_cmd_buffer *rxb)
+{
+       struct iwl_rx_packet *pkt = rxb_addr(rxb);
+       struct iwl_stored_beacon_notif *sb = (void *)pkt->data;
+       struct ieee80211_rx_status rx_status;
+       struct sk_buff *skb;
+       u32 size = le32_to_cpu(sb->byte_count);
+
+       if (size == 0)
+               return;
+
+       skb = alloc_skb(size, GFP_ATOMIC);
+       if (!skb) {
+               IWL_ERR(mvm, "alloc_skb failed\n");
+               return;
+       }
+
+       /* update rx_status according to the notification's metadata */
+       memset(&rx_status, 0, sizeof(rx_status));
+       rx_status.mactime = le64_to_cpu(sb->tsf);
+       rx_status.device_timestamp = le32_to_cpu(sb->system_time);
+       rx_status.band =
+               (sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
+                               IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+       rx_status.freq =
+               ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
+                                              rx_status.band);
+
+       /* copy the data */
+       memcpy(skb_put(skb, size), sb->data, size);
+       memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
+
+       /* pass it as regular rx to mac80211 */
+       ieee80211_rx_napi(mvm->hw, skb, NULL);
+}
index d70a1716f3e08e8856ec30aa66376b8437ef3108..24c46c2232463d5b42d2314ce30da5aaaf13d964 100644 (file)
@@ -837,13 +837,16 @@ iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
 
 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                                    struct ieee80211_vif *vif,
-                                   enum ieee80211_ampdu_mlme_action action,
-                                   struct ieee80211_sta *sta, u16 tid,
-                                   u16 *ssn, u8 buf_size, bool amsdu)
+                                   struct ieee80211_ampdu_params *params)
 {
        struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
        int ret;
        bool tx_agg_ref = false;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+       u8 buf_size = params->buf_size;
 
        IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
                     sta->addr, tid, action);
@@ -884,10 +887,10 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
                        ret = -EINVAL;
                        break;
                }
-               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true);
+               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size);
                break;
        case IEEE80211_AMPDU_RX_STOP:
-               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
+               ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size);
                break;
        case IEEE80211_AMPDU_TX_START:
                if (!iwl_enable_tx_ampdu(mvm->cfg)) {
index 5f3ac8cccf49d2c5a07644f894b1ba920dca8bfc..ebe37bb0ce4c4f42643bc6443d4b55246de132f3 100644 (file)
@@ -346,8 +346,9 @@ struct iwl_mvm_vif_bf_data {
  * @pm_enabled - Indicate if MAC power management is allowed
  * @monitor_active: indicates that monitor context is configured, and that the
  *     interface should get quota etc.
- * @low_latency: indicates that this interface is in low-latency mode
- *     (VMACLowLatencyMode)
+ * @low_latency_traffic: indicates low latency traffic was detected
+ * @low_latency_dbgfs: low latency mode set from debugfs
+ * @low_latency_vcmd: low latency mode set from vendor command
  * @ps_disabled: indicates that this interface requires PS to be disabled
  * @queue_params: QoS params for this MAC
  * @bcast_sta: station used for broadcast packets. Used by the following
@@ -375,7 +376,7 @@ struct iwl_mvm_vif {
        bool ap_ibss_active;
        bool pm_enabled;
        bool monitor_active;
-       bool low_latency;
+       bool low_latency_traffic, low_latency_dbgfs, low_latency_vcmd;
        bool ps_disabled;
        struct iwl_mvm_vif_bf_data bf_data;
 
@@ -432,6 +433,7 @@ struct iwl_mvm_vif {
        struct iwl_dbgfs_pm dbgfs_pm;
        struct iwl_dbgfs_bf dbgfs_bf;
        struct iwl_mac_power_cmd mac_pwr_cmd;
+       int dbgfs_quota_min;
 #endif
 
        enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
@@ -645,6 +647,7 @@ struct iwl_mvm {
        atomic_t pending_frames[IWL_MVM_STATION_COUNT];
        u32 tfd_drained[IWL_MVM_STATION_COUNT];
        u8 rx_ba_sessions;
+       u32 secret_key[IWL_RSS_HASH_KEY_CNT];
 
        /* configured by mac80211 */
        u32 rts_threshold;
@@ -856,6 +859,12 @@ struct iwl_mvm {
 
        u32 ciphers[6];
        struct iwl_mvm_tof_data tof_data;
+
+       /*
+        * Drop beacons from other APs in AP mode when there are no connected
+        * clients.
+        */
+       bool drop_bcn_ap_mode;
 };
 
 /* Extract MVM priv from op_mode and _hw */
@@ -1005,10 +1014,18 @@ static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
                IWL_MVM_BT_COEX_MPLUT;
 }
 
+static inline
+bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
+               IWL_MVM_P2P_UAPSD_STANDALONE;
+}
+
 static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
 {
-       /* firmware flag isn't defined yet */
-       return false;
+       return fw_has_capa(&mvm->fw->ucode_capa,
+                          IWL_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT);
 }
 
 extern const u8 iwl_mvm_ac_to_tx_fifo[];
@@ -1184,6 +1201,8 @@ void iwl_mvm_rx_beacon_notif(struct iwl_mvm *mvm,
                             struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
                                     struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
+                                   struct iwl_rx_cmd_buffer *rxb);
 void iwl_mvm_mac_ctxt_recalc_tsf_id(struct iwl_mvm *mvm,
                                    struct ieee80211_vif *vif);
 unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
@@ -1417,8 +1436,9 @@ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
         * binding, so this has no real impact. For now, just return
         * the current desired low-latency state.
         */
-
-       return mvmvif->low_latency;
+       return mvmvif->low_latency_dbgfs ||
+              mvmvif->low_latency_traffic ||
+              mvmvif->low_latency_vcmd;
 }
 
 /* hw scheduler queue config */
@@ -1481,7 +1501,7 @@ void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
 void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
 void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
 void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
-int iwl_mvm_get_temp(struct iwl_mvm *mvm);
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp);
 
 /* Location Aware Regulatory */
 struct iwl_mcc_update_resp *
index 7a3da2da6fd0bfe280415849764ca84361ea573a..c446e0da97899a45781d87e4b5bcb7341ee0c1fd 100644 (file)
@@ -300,7 +300,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
        struct iwl_nvm_section *sections = mvm->nvm_sections;
        const __le16 *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
        bool lar_enabled;
-       u32 mac_addr0, mac_addr1;
+       __le32 mac_addr0, mac_addr1;
 
        /* Checking for required sections */
        if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
@@ -337,8 +337,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
                return NULL;
 
        /* read the mac address from WFMP registers */
-       mac_addr0 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_0);
-       mac_addr1 = iwl_trans_read_prph(mvm->trans, WFMP_MAC_ADDR_1);
+       mac_addr0 = cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                   WFMP_MAC_ADDR_0));
+       mac_addr1 = cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+                                                   WFMP_MAC_ADDR_1));
 
        hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
        sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
index 89ea70deeb84410edd0e3ad07ae77abc08476107..09a94a5efb61111d150fa1616e21467a922485d9 100644 (file)
@@ -33,6 +33,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -267,6 +268,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
                   true),
        RX_HANDLER(MFUART_LOAD_NOTIFICATION, iwl_mvm_rx_mfuart_notif, false),
        RX_HANDLER(TOF_NOTIFICATION, iwl_mvm_tof_resp_handler, true),
+       RX_HANDLER_GRP(PROT_OFFLOAD_GROUP, STORED_BEACON_NTF,
+                      iwl_mvm_rx_stored_beacon_notif, false),
 
 };
 #undef RX_HANDLER
@@ -344,6 +347,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
        HCMD_NAME(MAC_PM_POWER_TABLE),
        HCMD_NAME(TDLS_CHANNEL_SWITCH_NOTIFICATION),
        HCMD_NAME(MFUART_LOAD_NOTIFICATION),
+       HCMD_NAME(RSS_CONFIG_CMD),
        HCMD_NAME(SCAN_ITERATION_COMPLETE_UMAC),
        HCMD_NAME(REPLY_RX_PHY_CMD),
        HCMD_NAME(REPLY_RX_MPDU_CMD),
@@ -386,13 +390,20 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
        HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
 };
 
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
+       HCMD_NAME(STORED_BEACON_NTF),
+};
+
 static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
        [LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
        [LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
        [PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
+       [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
 };
 
-
 /* this forward declaration can avoid to export the function */
 static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
 static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
@@ -481,6 +492,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
        }
        mvm->sf_state = SF_UNINIT;
        mvm->cur_ucode = IWL_UCODE_INIT;
+       mvm->drop_bcn_ap_mode = true;
 
        mutex_init(&mvm->mutex);
        mutex_init(&mvm->d0i3_suspend_mutex);
@@ -641,6 +653,9 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
 
        iwl_mvm_tof_init(mvm);
 
+       /* init RSS hash key */
+       get_random_bytes(mvm->secret_key, ARRAY_SIZE(mvm->secret_key));
+
        return op_mode;
 
  out_unregister:
@@ -1196,7 +1211,7 @@ static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
        cmd->is_11n_connection = ap_sta->ht_cap.ht_supported;
        cmd->offloading_tid = iter_data->offloading_tid;
        cmd->flags = ENABLE_L3_FILTERING | ENABLE_NBNS_FILTERING |
-               ENABLE_DHCP_FILTERING;
+               ENABLE_DHCP_FILTERING | ENABLE_STORE_BEACON;
        /*
         * The d0i3 uCode takes care of the nonqos counters,
         * so configure only the qos seq ones.
@@ -1217,8 +1232,7 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
        struct iwl_wowlan_config_cmd wowlan_config_cmd = {
                .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
                                             IWL_WOWLAN_WAKEUP_BEACON_MISS |
-                                            IWL_WOWLAN_WAKEUP_LINK_CHANGE |
-                                            IWL_WOWLAN_WAKEUP_BCN_FILTERING),
+                                            IWL_WOWLAN_WAKEUP_LINK_CHANGE),
        };
        struct iwl_d3_manager_config d3_cfg_cmd = {
                .min_sleep_time = cpu_to_le32(1000),
@@ -1268,6 +1282,12 @@ int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
 
        /* configure wowlan configuration only if needed */
        if (mvm->d0i3_ap_sta_id != IWL_MVM_STATION_COUNT) {
+               /* wake on beacons only if beacon storing isn't supported */
+               if (!fw_has_capa(&mvm->fw->ucode_capa,
+                                IWL_UCODE_TLV_CAPA_BEACON_STORING))
+                       wowlan_config_cmd.wakeup_filter |=
+                               cpu_to_le32(IWL_WOWLAN_WAKEUP_BCN_FILTERING);
+
                iwl_mvm_wowlan_config_key_params(mvm,
                                                 d0i3_iter_data.connected_vif,
                                                 true, flags);
index 9de159f1ef2dd366fbfeb67a75c1547952a26087..f313910cd0269f9e764b527ae886a49b1d9e4ef7 100644 (file)
@@ -7,7 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015        Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -259,6 +259,26 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
                IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
 }
 
+static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
+                                           struct ieee80211_vif *vif)
+{
+       bool *is_p2p_standalone = _data;
+
+       switch (ieee80211_vif_type_p2p(vif)) {
+       case NL80211_IFTYPE_P2P_GO:
+       case NL80211_IFTYPE_AP:
+               *is_p2p_standalone = false;
+               break;
+       case NL80211_IFTYPE_STATION:
+               if (vif->bss_conf.assoc)
+                       *is_p2p_standalone = false;
+               break;
+
+       default:
+               break;
+       }
+}
+
 static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
                                       struct ieee80211_vif *vif)
 {
@@ -268,9 +288,6 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
                    ETH_ALEN))
                return false;
 
-       if (vif->p2p &&
-           !(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD))
-               return false;
        /*
         * Avoid using uAPSD if P2P client is associated to GO that uses
         * opportunistic power save. This is due to current FW limitation.
@@ -287,6 +304,22 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
        if (iwl_mvm_phy_ctx_count(mvm) >= 2)
                return false;
 
+       if (vif->p2p) {
+               /* Allow U-APSD only if p2p is stand alone */
+               bool is_p2p_standalone = true;
+
+               if (!iwl_mvm_is_p2p_standalone_uapsd_supported(mvm))
+                       return false;
+
+               ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+                                       IEEE80211_IFACE_ITER_NORMAL,
+                                       iwl_mvm_p2p_standalone_iterator,
+                                       &is_p2p_standalone);
+
+               if (!is_p2p_standalone)
+                       return false;
+       }
+
        return true;
 }
 
@@ -544,7 +577,6 @@ void iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
 
 struct iwl_power_vifs {
        struct iwl_mvm *mvm;
-       struct ieee80211_vif *bf_vif;
        struct ieee80211_vif *bss_vif;
        struct ieee80211_vif *p2p_vif;
        struct ieee80211_vif *ap_vif;
@@ -617,11 +649,6 @@ static void iwl_mvm_power_get_vifs_iterator(void *_data, u8 *mac,
                if (mvmvif->phy_ctxt)
                        if (mvmvif->phy_ctxt->id < MAX_PHYS)
                                power_iterator->bss_active = true;
-
-               if (mvmvif->bf_data.bf_enabled &&
-                   !WARN_ON(power_iterator->bf_vif))
-                       power_iterator->bf_vif = vif;
-
                break;
 
        default:
@@ -850,29 +877,9 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
        return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
 }
 
-static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
-                                      struct ieee80211_vif *vif,
-                                      bool enable)
-{
-       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       struct iwl_beacon_filter_cmd cmd = {
-               IWL_BF_CMD_CONFIG_DEFAULTS,
-               .bf_enable_beacon_filter = cpu_to_le32(1),
-       };
-
-       if (!mvmvif->bf_data.bf_enabled)
-               return 0;
-
-       if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
-               cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
-
-       mvmvif->bf_data.ba_enabled = enable;
-       return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
-}
-
-int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
-                                 struct ieee80211_vif *vif,
-                                 u32 flags)
+static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+                                         struct ieee80211_vif *vif,
+                                         u32 flags, bool d0i3)
 {
        struct iwl_beacon_filter_cmd cmd = {};
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -883,12 +890,20 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
 
        ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
 
-       if (!ret)
+       /* don't change bf_enabled in case of temporary d0i3 configuration */
+       if (!ret && !d0i3)
                mvmvif->bf_data.bf_enabled = false;
 
        return ret;
 }
 
+int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
+                                 struct ieee80211_vif *vif,
+                                 u32 flags)
+{
+       return _iwl_mvm_disable_beacon_filter(mvm, vif, flags, false);
+}
+
 static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
 {
        bool disable_ps;
@@ -918,21 +933,26 @@ static int iwl_mvm_power_set_ps(struct iwl_mvm *mvm)
 }
 
 static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
-                               struct iwl_power_vifs *vifs)
+                               struct ieee80211_vif *vif)
 {
-       struct iwl_mvm_vif *mvmvif;
-       bool ba_enable;
+       struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+       struct iwl_beacon_filter_cmd cmd = {
+               IWL_BF_CMD_CONFIG_DEFAULTS,
+               .bf_enable_beacon_filter = cpu_to_le32(1),
+       };
 
-       if (!vifs->bf_vif)
+       if (!mvmvif->bf_data.bf_enabled)
                return 0;
 
-       mvmvif = iwl_mvm_vif_from_mac80211(vifs->bf_vif);
+       if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
+               cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
 
-       ba_enable = !(!mvmvif->pm_enabled || mvm->ps_disabled ||
-                     !vifs->bf_vif->bss_conf.ps ||
-                     iwl_mvm_vif_low_latency(mvmvif));
+       mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
+                                      mvm->ps_disabled ||
+                                      !vif->bss_conf.ps ||
+                                      iwl_mvm_vif_low_latency(mvmvif));
 
-       return iwl_mvm_update_beacon_abort(mvm, vifs->bf_vif, ba_enable);
+       return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, 0, false);
 }
 
 int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
@@ -953,7 +973,10 @@ int iwl_mvm_power_update_ps(struct iwl_mvm *mvm)
        if (ret)
                return ret;
 
-       return iwl_mvm_power_set_ba(mvm, &vifs);
+       if (vifs.bss_vif)
+               return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+       return 0;
 }
 
 int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
@@ -988,7 +1011,10 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm)
                        return ret;
        }
 
-       return iwl_mvm_power_set_ba(mvm, &vifs);
+       if (vifs.bss_vif)
+               return iwl_mvm_power_set_ba(mvm, vifs.bss_vif);
+
+       return 0;
 }
 
 int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
@@ -1025,8 +1051,17 @@ int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
                        IWL_BF_CMD_CONFIG_D0I3,
                        .bf_enable_beacon_filter = cpu_to_le32(1),
                };
-               ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
-                                                   flags, true);
+               /*
+                * When beacon storing is supported - disable beacon filtering
+                * altogether - the latest beacon will be sent when exiting d0i3
+                */
+               if (fw_has_capa(&mvm->fw->ucode_capa,
+                               IWL_UCODE_TLV_CAPA_BEACON_STORING))
+                       ret = _iwl_mvm_disable_beacon_filter(mvm, vif, flags,
+                                                            true);
+               else
+                       ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+                                                           flags, true);
        } else {
                if (mvmvif->bf_data.bf_enabled)
                        ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
index 0b762b4f8fadfb26b36f103b64308239edda9c23..2141db5bff82a28cbe2b5cc1280af264a5f0cc06 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016        Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -74,6 +76,9 @@ struct iwl_mvm_quota_iterator_data {
        int n_interfaces[MAX_BINDINGS];
        int colors[MAX_BINDINGS];
        int low_latency[MAX_BINDINGS];
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       int dbgfs_min[MAX_BINDINGS];
+#endif
        int n_low_latency_bindings;
        struct ieee80211_vif *disabled_vif;
 };
@@ -129,6 +134,12 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
 
        data->n_interfaces[id]++;
 
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+       if (mvmvif->dbgfs_quota_min)
+               data->dbgfs_min[id] = max(data->dbgfs_min[id],
+                                         mvmvif->dbgfs_quota_min);
+#endif
+
        if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
                data->n_low_latency_bindings++;
                data->low_latency[id] = true;
@@ -259,6 +270,11 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm,
 
                if (data.n_interfaces[i] <= 0)
                        cmd.quotas[idx].quota = cpu_to_le32(0);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+               else if (data.dbgfs_min[i])
+                       cmd.quotas[idx].quota =
+                               cpu_to_le32(data.dbgfs_min[i] * QUOTA_100 / 100);
+#endif
                else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
                         data.low_latency[i])
                        /*
index 7bb6fd0e4391a92f0200aad6e62362fc20c48802..6e7e78a378794a34174be26f2f9dcf01ba6403b3 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License as
@@ -724,14 +725,28 @@ static int _rs_collect_tx_data(struct iwl_mvm *mvm,
        return 0;
 }
 
-static int rs_collect_tx_data(struct iwl_mvm *mvm,
-                             struct iwl_lq_sta *lq_sta,
-                             struct iwl_scale_tbl_info *tbl,
-                             int scale_index, int attempts, int successes,
-                             u8 reduced_txp)
+static int rs_collect_tpc_data(struct iwl_mvm *mvm,
+                              struct iwl_lq_sta *lq_sta,
+                              struct iwl_scale_tbl_info *tbl,
+                              int scale_index, int attempts, int successes,
+                              u8 reduced_txp)
+{
+       struct iwl_rate_scale_data *window = NULL;
+
+       if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
+               return -EINVAL;
+
+       window = &tbl->tpc_win[reduced_txp];
+       return  _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
+                                   window);
+}
+
+static int rs_collect_tlc_data(struct iwl_mvm *mvm,
+                              struct iwl_lq_sta *lq_sta,
+                              struct iwl_scale_tbl_info *tbl,
+                              int scale_index, int attempts, int successes)
 {
        struct iwl_rate_scale_data *window = NULL;
-       int ret;
 
        if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
                return -EINVAL;
@@ -745,16 +760,6 @@ static int rs_collect_tx_data(struct iwl_mvm *mvm,
 
        /* Select window for current tx bit rate */
        window = &(tbl->win[scale_index]);
-
-       ret = _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
-                                 window);
-       if (ret)
-               return ret;
-
-       if (WARN_ON_ONCE(reduced_txp > TPC_MAX_REDUCTION))
-               return -EINVAL;
-
-       window = &tbl->tpc_win[reduced_txp];
        return _rs_collect_tx_data(mvm, tbl, scale_index, attempts, successes,
                                   window);
 }
@@ -1301,17 +1306,30 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
         * first index into rate scale table.
         */
        if (info->flags & IEEE80211_TX_STAT_AMPDU) {
-               /* ampdu_ack_len = 0 marks no BA was received. In this case
-                * treat it as a single frame loss as we don't want the success
-                * ratio to dip too quickly because a BA wasn't received
+               rs_collect_tpc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+                                   info->status.ampdu_len,
+                                   info->status.ampdu_ack_len,
+                                   reduced_txp);
+
+               /* ampdu_ack_len = 0 marks no BA was received. For TLC, treat
+                * it as a single frame loss as we don't want the success ratio
+                * to dip too quickly because a BA wasn't received.
+                * For TPC, there's no need for this optimisation since we want
+                * to recover very quickly from a bad power reduction and,
+                * therefore we'd like the success ratio to get an immediate hit
+                * when failing to get a BA, so we'd switch back to a lower or
+                * zero power reduction. When FW transmits agg with a rate
+                * different from the initial rate, it will not use reduced txp
+                * and will send BA notification twice (one empty with reduced
+                * txp equal to the value from LQ and one with reduced txp 0).
+                * We need to update counters for each txp level accordingly.
                 */
                if (info->status.ampdu_ack_len == 0)
                        info->status.ampdu_len = 1;
 
-               rs_collect_tx_data(mvm, lq_sta, curr_tbl, lq_rate.index,
-                                  info->status.ampdu_len,
-                                  info->status.ampdu_ack_len,
-                                  reduced_txp);
+               rs_collect_tlc_data(mvm, lq_sta, curr_tbl, lq_rate.index,
+                                   info->status.ampdu_len,
+                                   info->status.ampdu_ack_len);
 
                /* Update success/fail counts if not searching for new mode */
                if (lq_sta->rs_state == RS_STATE_STAY_IN_COLUMN) {
@@ -1344,9 +1362,13 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                        else
                                continue;
 
-                       rs_collect_tx_data(mvm, lq_sta, tmp_tbl, lq_rate.index,
-                                          1, i < retries ? 0 : legacy_success,
-                                          reduced_txp);
+                       rs_collect_tpc_data(mvm, lq_sta, tmp_tbl,
+                                           lq_rate.index, 1,
+                                           i < retries ? 0 : legacy_success,
+                                           reduced_txp);
+                       rs_collect_tlc_data(mvm, lq_sta, tmp_tbl,
+                                           lq_rate.index, 1,
+                                           i < retries ? 0 : legacy_success);
                }
 
                /* Update success/fail counts if not searching for new mode */
@@ -2040,7 +2062,8 @@ static enum tpc_action rs_get_tpc_action(struct iwl_mvm *mvm,
        }
 
        /* try decreasing first if applicable */
-       if (weak != TPC_INVALID) {
+       if (sr >= RS_PERCENT(IWL_MVM_RS_TPC_SR_NO_INCREASE) &&
+           weak != TPC_INVALID) {
                if (weak_tpt == IWL_INVALID_VALUE &&
                    (strong_tpt == IWL_INVALID_VALUE ||
                     current_tpt >= strong_tpt)) {
index 0c073e02fd4cba9d07c8410c2ec1b4e99c46a8d7..615dea143d4e8df022991512dcac62b1fe48cf6a 100644 (file)
@@ -201,25 +201,22 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
                                        struct iwl_rx_mpdu_desc *desc,
                                        struct ieee80211_rx_status *rx_status)
 {
-       int energy_a, energy_b, energy_c, max_energy;
+       int energy_a, energy_b, max_energy;
 
        energy_a = desc->energy_a;
        energy_a = energy_a ? -energy_a : S8_MIN;
        energy_b = desc->energy_b;
        energy_b = energy_b ? -energy_b : S8_MIN;
-       energy_c = desc->energy_c;
-       energy_c = energy_c ? -energy_c : S8_MIN;
        max_energy = max(energy_a, energy_b);
-       max_energy = max(max_energy, energy_c);
 
-       IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
-                       energy_a, energy_b, energy_c, max_energy);
+       IWL_DEBUG_STATS(mvm, "energy In A %d B %d, and max %d\n",
+                       energy_a, energy_b, max_energy);
 
        rx_status->signal = max_energy;
        rx_status->chains = 0; /* TODO: phy info */
        rx_status->chain_signal[0] = energy_a;
        rx_status->chain_signal[1] = energy_b;
-       rx_status->chain_signal[2] = energy_c;
+       rx_status->chain_signal[2] = S8_MIN;
 }
 
 static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
@@ -294,7 +291,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
        struct ieee80211_rx_status *rx_status;
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
        struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
-       struct ieee80211_hdr *hdr = (void *)(desc + 1);
+       struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc));
        u32 len = le16_to_cpu(desc->mpdu_len);
        u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags);
        struct ieee80211_sta *sta = NULL;
index 9a15642f80dd28b88dc4ae8326f465a7ec583e35..1e1ab9daaec9d330632455295af7d510cf2e9d5b 100644 (file)
@@ -930,8 +930,11 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        if (WARN_ON(num_channels > mvm->fw->ucode_capa.n_scan_channels))
                return -ENOBUFS;
 
-       if (type == mvm->scan_type)
+       if (type == mvm->scan_type) {
+               IWL_DEBUG_SCAN(mvm,
+                              "Ignoring UMAC scan config of the same type\n");
                return 0;
+       }
 
        cmd_size = sizeof(*scan_config) + mvm->fw->ucode_capa.n_scan_channels;
 
@@ -1109,7 +1112,7 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
        cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params,
                                                                 vif));
 
-       if (type == IWL_MVM_SCAN_SCHED)
+       if (type == IWL_MVM_SCAN_SCHED || type == IWL_MVM_SCAN_NETDETECT)
                cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
 
        if (iwl_mvm_scan_use_ebs(mvm, vif))
@@ -1351,7 +1354,7 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
 
        if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) {
                hcmd.id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
-               ret = iwl_mvm_scan_umac(mvm, vif, &params, IWL_MVM_SCAN_SCHED);
+               ret = iwl_mvm_scan_umac(mvm, vif, &params, type);
        } else {
                hcmd.id = SCAN_OFFLOAD_REQUEST_CMD;
                ret = iwl_mvm_scan_lmac(mvm, vif, &params);
index b556e33658d734974d8534b271cf5e57ea755be3..4854e79cbda84c2693867f6f9d99071345506b28 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
  *
  * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #include "sta.h"
 #include "rs.h"
 
+/*
+ * New version of ADD_STA_sta command added new fields at the end of the
+ * structure, so sending the size of the relevant API's structure is enough to
+ * support both API versions.
+ */
+static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
+{
+       return iwl_mvm_has_new_rx_api(mvm) ?
+               sizeof(struct iwl_mvm_add_sta_cmd) :
+               sizeof(struct iwl_mvm_add_sta_cmd_v7);
+}
+
 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
                                    enum nl80211_iftype iftype)
 {
@@ -187,12 +201,13 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(add_sta_cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &add_sta_cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
                break;
@@ -357,12 +372,13 @@ int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
        cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
                               mvmsta->sta_id);
@@ -623,12 +639,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
        if (addr)
                memcpy(cmd.addr, addr, ETH_ALEN);
 
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                IWL_DEBUG_INFO(mvm, "Internal station added.\n");
                return 0;
@@ -819,7 +836,7 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 #define IWL_MAX_RX_BA_SESSIONS 16
 
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                      int tid, u16 ssn, bool start)
+                      int tid, u16 ssn, bool start, u8 buf_size)
 {
        struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
        struct iwl_mvm_add_sta_cmd cmd = {};
@@ -839,6 +856,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (start) {
                cmd.add_immediate_ba_tid = (u8) tid;
                cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+               cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
        } else {
                cmd.remove_immediate_ba_tid = (u8) tid;
        }
@@ -846,12 +864,13 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
                                  STA_MODIFY_REMOVE_BA_TID;
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n",
                               start ? "start" : "stopp");
@@ -904,12 +923,13 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
 
        status = ADD_STA_SUCCESS;
-       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd),
+       ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
+                                         iwl_mvm_add_sta_cmd_size(mvm),
                                          &cmd, &status);
        if (ret)
                return ret;
 
-       switch (status) {
+       switch (status & IWL_ADD_STA_STATUS_MASK) {
        case ADD_STA_SUCCESS:
                break;
        default:
@@ -1640,7 +1660,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
        };
        int ret;
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+                                  iwl_mvm_add_sta_cmd_size(mvm), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
@@ -1731,7 +1752,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
 
        ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
                                   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
-                                  sizeof(cmd), &cmd);
+                                  iwl_mvm_add_sta_cmd_size(mvm), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
@@ -1766,7 +1787,8 @@ void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
        };
        int ret;
 
-       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, sizeof(cmd), &cmd);
+       ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
+                                  iwl_mvm_add_sta_cmd_size(mvm), &cmd);
        if (ret)
                IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
 }
index 39fdf5224e81741aacccec2f43f4783dd65b78fb..e3b9446ee99558236aa8175332dcd304f9bd93a5 100644 (file)
@@ -401,7 +401,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
 
 /* AMPDU */
 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
-                      int tid, u16 ssn, bool start);
+                      int tid, u16 ssn, bool start, u8 buf_size);
 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                        struct ieee80211_sta *sta, u16 tid, u16 *ssn);
 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
index fb76004eede4c4f29cc5835e90d1cfbe8262247b..758d05a8c6aad22748c2fb7f812038913b3e77e6 100644 (file)
@@ -194,12 +194,12 @@ static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
        return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
 }
 
-int iwl_mvm_get_temp(struct iwl_mvm *mvm)
+int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
 {
        struct iwl_notification_wait wait_temp_notif;
        static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
                                            DTS_MEASUREMENT_NOTIF_WIDE) };
-       int ret, temp;
+       int ret;
 
        if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_WIDE_CMD_HDR))
                temp_notif[0] = DTS_MEASUREMENT_NOTIFICATION;
@@ -208,7 +208,7 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 
        iwl_init_notification_wait(&mvm->notif_wait, &wait_temp_notif,
                                   temp_notif, ARRAY_SIZE(temp_notif),
-                                  iwl_mvm_temp_notif_wait, &temp);
+                                  iwl_mvm_temp_notif_wait, temp);
 
        ret = iwl_mvm_get_temp_cmd(mvm);
        if (ret) {
@@ -219,12 +219,10 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm)
 
        ret = iwl_wait_notification(&mvm->notif_wait, &wait_temp_notif,
                                    IWL_MVM_TEMP_NOTIF_WAIT_TIMEOUT);
-       if (ret) {
+       if (ret)
                IWL_ERR(mvm, "Getting the temperature timed out\n");
-               return ret;
-       }
 
-       return temp;
+       return ret;
 }
 
 static void check_exit_ctkill(struct work_struct *work)
@@ -233,6 +231,7 @@ static void check_exit_ctkill(struct work_struct *work)
        struct iwl_mvm *mvm;
        u32 duration;
        s32 temp;
+       int ret;
 
        tt = container_of(work, struct iwl_mvm_tt_mgmt, ct_kill_exit.work);
        mvm = container_of(tt, struct iwl_mvm, thermal_throttle);
@@ -250,13 +249,13 @@ static void check_exit_ctkill(struct work_struct *work)
                goto reschedule;
        }
 
-       temp = iwl_mvm_get_temp(mvm);
+       ret = iwl_mvm_get_temp(mvm, &temp);
 
        iwl_mvm_unref(mvm, IWL_MVM_REF_CHECK_CTKILL);
 
        __iwl_mvm_mac_stop(mvm);
 
-       if (temp < 0)
+       if (ret)
                goto reschedule;
 
        IWL_DEBUG_TEMP(mvm, "NIC temperature: %d\n", temp);
index 8bf48a7d0f4e99e8297feace5368375b6a16a151..8a1638ec7e28f65a1dc3550ed3464c84d0eed054 100644 (file)
@@ -736,6 +736,37 @@ static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
        iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
 }
 
+static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
+                                           u32 status)
+{
+       struct iwl_fw_dbg_trigger_tlv *trig;
+       struct iwl_fw_dbg_trigger_tx_status *status_trig;
+       int i;
+
+       if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_TX_STATUS))
+               return;
+
+       trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_TX_STATUS);
+       status_trig = (void *)trig->data;
+
+       if (!iwl_fw_dbg_trigger_check_stop(mvm, NULL, trig))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
+               /* don't collect on status 0 */
+               if (!status_trig->statuses[i].status)
+                       break;
+
+               if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
+                       continue;
+
+               iwl_mvm_fw_dbg_collect_trig(mvm, trig,
+                                           "Tx status %d was received",
+                                           status & TX_STATUS_MSK);
+               break;
+       }
+}
+
 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                                     struct iwl_rx_packet *pkt)
 {
@@ -784,6 +815,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
                        break;
                }
 
+               iwl_mvm_tx_status_check_trigger(mvm, status);
+
                info->status.rates[0].count = tx_resp->failure_frame + 1;
                iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
                                            info);
@@ -1029,7 +1062,6 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
                struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
                mvmsta->tid_data[tid].rate_n_flags =
                        le32_to_cpu(tx_resp->initial_rate);
-               mvmsta->tid_data[tid].reduced_tpc = tx_resp->reduced_tpc;
                mvmsta->tid_data[tid].tx_time =
                        le16_to_cpu(tx_resp->wireless_media_time);
        }
@@ -1060,7 +1092,7 @@ static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
        /* TODO: not accounted if the whole A-MPDU failed */
        info->status.tx_time = tid_data->tx_time;
        info->status.status_driver_data[0] =
-               (void *)(uintptr_t)tid_data->reduced_tpc;
+               (void *)(uintptr_t)ba_notif->reduced_txp;
        info->status.status_driver_data[1] =
                (void *)(uintptr_t)tid_data->rate_n_flags;
 }
@@ -1133,6 +1165,8 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
                           scd_flow, ba_resp_scd_ssn, ba_notif->txed,
                           ba_notif->txed_2_done);
 
+       IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
+                          ba_notif->reduced_txp);
        tid_data->next_reclaimed = ba_resp_scd_ssn;
 
        iwl_mvm_check_ratid_empty(mvm, sta, tid);
index 3a989f5c20db2b1a360361be970a97266f52ad25..59453c17658086ac757e2124f46cacfc842c4500 100644 (file)
@@ -937,18 +937,16 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
 }
 
 int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                              bool value)
+                              bool prev)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
        int res;
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (mvmvif->low_latency == value)
+       if (iwl_mvm_vif_low_latency(mvmvif) == prev)
                return 0;
 
-       mvmvif->low_latency = value;
-
        res = iwl_mvm_update_quotas(mvm, false, NULL);
        if (res)
                return res;
index 6261a68cae907d793cdbf62ceb9dda97e5031fee..753ec6785912f7f3eed9cab797d6216633582ad6 100644 (file)
@@ -7,6 +7,7 @@
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -66,6 +67,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/module.h>
+#include <linux/pm_runtime.h>
 #include <linux/pci.h>
 #include <linux/pci-aspm.h>
 #include <linux/acpi.h>
@@ -378,7 +380,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
 
 /* 3168 Series */
+       {IWL_PCI_DEVICE(0x24FB, 0x2010, iwl3168_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FB, 0x2110, iwl3168_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FB, 0x2050, iwl3168_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FB, 0x2150, iwl3168_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FB, 0x0000, iwl3168_2ac_cfg)},
 
 /* 7265 Series */
@@ -475,6 +480,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
@@ -623,6 +629,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (ret)
                goto out_free_drv;
 
+       /* if RTPM is in use, enable it in our device */
+       if (iwl_trans->runtime_pm_mode != IWL_PLAT_PM_MODE_DISABLED) {
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_set_autosuspend_delay(&pdev->dev,
+                                        iwlwifi_mod_params.d0i3_entry_delay);
+               pm_runtime_use_autosuspend(&pdev->dev);
+               pm_runtime_allow(&pdev->dev);
+       }
+
        return 0;
 
 out_free_drv:
@@ -689,15 +704,132 @@ static int iwl_pci_resume(struct device *device)
        return 0;
 }
 
-static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
+int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret;
+
+       if (test_bit(STATUS_FW_ERROR, &trans->status))
+               return 0;
+
+       set_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+
+       /* config the fw */
+       ret = iwl_op_mode_enter_d0i3(trans->op_mode);
+       if (ret == 1) {
+               IWL_DEBUG_RPM(trans, "aborting d0i3 entrance\n");
+               clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+               return -EBUSY;
+       }
+       if (ret)
+               goto err;
+
+       ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+                                test_bit(STATUS_TRANS_IDLE, &trans->status),
+                                msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+       if (!ret) {
+               IWL_ERR(trans, "Timeout entering D0i3\n");
+               ret = -ETIMEDOUT;
+               goto err;
+       }
+
+       clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+
+       return 0;
+err:
+       clear_bit(STATUS_TRANS_GOING_IDLE, &trans->status);
+       iwl_trans_fw_error(trans);
+       return ret;
+}
+
+int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans)
+{
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       int ret;
+
+       /* sometimes a D0i3 entry is not followed through */
+       if (!test_bit(STATUS_TRANS_IDLE, &trans->status))
+               return 0;
+
+       /* config the fw */
+       ret = iwl_op_mode_exit_d0i3(trans->op_mode);
+       if (ret)
+               goto err;
+
+       /* we clear STATUS_TRANS_IDLE only when D0I3_END command is completed */
+
+       ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+                                !test_bit(STATUS_TRANS_IDLE, &trans->status),
+                                msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+       if (!ret) {
+               IWL_ERR(trans, "Timeout exiting D0i3\n");
+               ret = -ETIMEDOUT;
+               goto err;
+       }
+
+       return 0;
+err:
+       clear_bit(STATUS_TRANS_IDLE, &trans->status);
+       iwl_trans_fw_error(trans);
+       return ret;
+}
+
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+static int iwl_pci_runtime_suspend(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct iwl_trans *trans = pci_get_drvdata(pdev);
+       int ret;
+
+       IWL_DEBUG_RPM(trans, "entering runtime suspend\n");
+
+       if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
+               ret = iwl_pci_fw_enter_d0i3(trans);
+               if (ret < 0)
+                       return ret;
+       }
+
+       trans->system_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+
+       iwl_trans_d3_suspend(trans, false, false);
+
+       return 0;
+}
+
+static int iwl_pci_runtime_resume(struct device *device)
+{
+       struct pci_dev *pdev = to_pci_dev(device);
+       struct iwl_trans *trans = pci_get_drvdata(pdev);
+       enum iwl_d3_status d3_status;
+
+       IWL_DEBUG_RPM(trans, "exiting runtime suspend (resume)\n");
+
+       iwl_trans_d3_resume(trans, &d3_status, false, false);
+
+       if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+               return iwl_pci_fw_exit_d0i3(trans);
+
+       return 0;
+}
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+
+static const struct dev_pm_ops iwl_dev_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(iwl_pci_suspend,
+                               iwl_pci_resume)
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+       SET_RUNTIME_PM_OPS(iwl_pci_runtime_suspend,
+                          iwl_pci_runtime_resume,
+                          NULL)
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+};
 
 #define IWL_PM_OPS     (&iwl_dev_pm_ops)
 
-#else
+#else /* CONFIG_PM_SLEEP */
 
 #define IWL_PM_OPS     NULL
 
-#endif
+#endif /* CONFIG_PM_SLEEP */
 
 static struct pci_driver iwl_pci_driver = {
        .name = DRV_NAME,
index cc3888e2700daf1cfe07ebb4e003ca53bcf6d934..2f959162d045671aaf40723a52c292f9d5e623e4 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
 #define RX_NUM_QUEUES 1
 #define RX_POST_REQ_ALLOC 2
 #define RX_CLAIM_REQ_ALLOC 8
-#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
-#define RX_LOW_WATERMARK 8
+#define RX_PENDING_WATERMARK 16
 
 struct iwl_host_cmd;
 
 /*This file includes the declaration that are internal to the
  * trans_pcie layer */
 
+/**
+ * struct iwl_rx_mem_buffer
+ * @page_dma: bus address of rxb page
+ * @page: driver's pointer to the rxb page
+ * @vid: index of this rxb in the global table
+ */
 struct iwl_rx_mem_buffer {
        dma_addr_t page_dma;
        struct page *page;
+       u16 vid;
        struct list_head list;
 };
 
@@ -90,8 +97,12 @@ struct isr_statistics {
 
 /**
  * struct iwl_rxq - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @id: queue index
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
+ *     Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
  * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
+ * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
  * @read: Shared index to newest available Rx buffer
  * @write: Shared index to oldest written Rx packet
  * @free_count: Number of pre-allocated buffers in rx_free
@@ -103,32 +114,34 @@ struct isr_statistics {
  * @rb_stts: driver's pointer to receive buffer status
  * @rb_stts_dma: bus address of receive buffer status
  * @lock:
- * @pool: initial pool of iwl_rx_mem_buffer for the queue
- * @queue: actual rx queue
+ * @queue: actual rx queue. Not used for multi-rx queue.
  *
  * NOTE:  rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
  */
 struct iwl_rxq {
-       __le32 *bd;
+       int id;
+       void *bd;
        dma_addr_t bd_dma;
+       __le32 *used_bd;
+       dma_addr_t used_bd_dma;
        u32 read;
        u32 write;
        u32 free_count;
        u32 used_count;
        u32 write_actual;
+       u32 queue_size;
        struct list_head rx_free;
        struct list_head rx_used;
        bool need_update;
        struct iwl_rb_status *rb_stts;
        dma_addr_t rb_stts_dma;
        spinlock_t lock;
-       struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
+       struct napi_struct napi;
        struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
 };
 
 /**
  * struct iwl_rb_allocator - Rx allocator
- * @pool: initial pool of allocator
  * @req_pending: number of requests the allcator had not processed yet
  * @req_ready: number of requests honored and ready for claiming
  * @rbd_allocated: RBDs with pages allocated and ready to be handled to
@@ -140,7 +153,6 @@ struct iwl_rxq {
  * @rx_alloc: work struct for background calls
  */
 struct iwl_rb_allocator {
-       struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
        atomic_t req_pending;
        atomic_t req_ready;
        struct list_head rbd_allocated;
@@ -280,6 +292,7 @@ struct iwl_txq {
        bool ampdu;
        bool block;
        unsigned long wd_timeout;
+       struct sk_buff_head overflow_q;
 };
 
 static inline dma_addr_t
@@ -297,6 +310,8 @@ struct iwl_tso_hdr_page {
 /**
  * struct iwl_trans_pcie - PCIe transport specific data
  * @rxq: all the RX queue data
+ * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
+ * @global_table: table mapping received VID from hw to rxb
  * @rba: allocator for RX replenishing
  * @drv - pointer to iwl_drv
  * @trans: pointer to the generic transport area
@@ -323,13 +338,14 @@ struct iwl_tso_hdr_page {
  * @fw_mon_size: size of the buffer for the firmware monitor
  */
 struct iwl_trans_pcie {
-       struct iwl_rxq rxq;
+       struct iwl_rxq *rxq;
+       struct iwl_rx_mem_buffer rx_pool[MQ_RX_POOL_SIZE];
+       struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
        struct iwl_rb_allocator rba;
        struct iwl_trans *trans;
        struct iwl_drv *drv;
 
        struct net_device napi_dev;
-       struct napi_struct napi;
 
        struct __percpu iwl_tso_hdr_page *tso_hdr_page;
 
@@ -359,6 +375,7 @@ struct iwl_trans_pcie {
        bool ucode_write_complete;
        wait_queue_head_t ucode_write_waitq;
        wait_queue_head_t wait_command_queue;
+       wait_queue_head_t d0i3_waitq;
 
        u8 cmd_queue;
        u8 cmd_fifo;
@@ -579,4 +596,7 @@ static inline int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
 }
 #endif
 
+int iwl_pci_fw_exit_d0i3(struct iwl_trans *trans);
+int iwl_pci_fw_enter_d0i3(struct iwl_trans *trans);
+
 #endif /* __iwl_trans_int_pcie_h__ */
index ccafbd8cf4b3b224649b3fbc87da69207884743c..51314e56209d3c07972dcba4b6b41aa231cd3ad7 100644 (file)
@@ -2,6 +2,7 @@
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
  */
 static int iwl_rxq_space(const struct iwl_rxq *rxq)
 {
-       /* Make sure RX_QUEUE_SIZE is a power of 2 */
-       BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
+       /* Make sure rx queue size is a power of 2 */
+       WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
 
        /*
         * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
@@ -149,7 +150,7 @@ static int iwl_rxq_space(const struct iwl_rxq *rxq)
         * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
         * defined for negative dividends.
         */
-       return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
+       return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
 }
 
 /*
@@ -160,6 +161,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
        return cpu_to_le32((u32)(dma_addr >> 8));
 }
 
+static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
+{
+       iwl_write_prph(trans, ofs, val & 0xffffffff);
+       iwl_write_prph(trans, ofs + 4, val >> 32);
+}
+
 /*
  * iwl_pcie_rx_stop - stops the Rx DMA
  */
@@ -173,10 +180,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
 /*
  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
  */
-static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
+static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
+                                   struct iwl_rxq *rxq)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        u32 reg;
 
        lockdep_assert_held(&rxq->lock);
@@ -201,24 +207,73 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans)
        }
 
        rxq->write_actual = round_down(rxq->write, 8);
-       iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+       if (trans->cfg->mq_rx_supported)
+               iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
+                              rxq->write_actual);
+       else
+               iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
 }
 
 static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       int i;
 
-       spin_lock(&rxq->lock);
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+               if (!rxq->need_update)
+                       continue;
+               spin_lock(&rxq->lock);
+               iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+               rxq->need_update = false;
+               spin_unlock(&rxq->lock);
+       }
+}
+
+static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans,
+                                   struct iwl_rxq *rxq)
+{
+       struct iwl_rx_mem_buffer *rxb;
+
+       /*
+        * If the device isn't enabled - no need to try to add buffers...
+        * This can happen when we stop the device and still have an interrupt
+        * pending. We stop the APM before we sync the interrupts because we
+        * have to (see comment there). On the other hand, since the APM is
+        * stopped, we cannot access the HW (in particular not prph).
+        * So don't try to restock if the APM has been already stopped.
+        */
+       if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
+               return;
 
-       if (!rxq->need_update)
-               goto exit_unlock;
+       spin_lock(&rxq->lock);
+       while (rxq->free_count) {
+               __le64 *bd = (__le64 *)rxq->bd;
 
-       iwl_pcie_rxq_inc_wr_ptr(trans);
-       rxq->need_update = false;
+               /* Get next free Rx buffer, remove from free list */
+               rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
+                                      list);
+               list_del(&rxb->list);
 
- exit_unlock:
+               /* 12 first bits are expected to be empty */
+               WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
+               /* Point to Rx buffer via next RBD in circular buffer */
+               bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
+               rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
+               rxq->free_count--;
+       }
        spin_unlock(&rxq->lock);
+
+       /*
+        * If we've added more space for the firmware to place data, tell it.
+        * Increment device's write pointer in multiples of 8.
+        */
+       if (rxq->write_actual != (rxq->write & ~0x7)) {
+               spin_lock(&rxq->lock);
+               iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
+               spin_unlock(&rxq->lock);
+       }
 }
 
 /*
@@ -232,10 +287,8 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
  * also updates the memory address in the firmware to reference the new
  * target buffer.
  */
-static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
+static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
 
        /*
@@ -251,6 +304,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
 
        spin_lock(&rxq->lock);
        while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
+               __le32 *bd = (__le32 *)rxq->bd;
                /* The overwritten rxb must be a used one */
                rxb = rxq->queue[rxq->write];
                BUG_ON(rxb && rxb->page);
@@ -261,7 +315,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
                list_del(&rxb->list);
 
                /* Point to Rx buffer via next RBD in circular buffer */
-               rxq->bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
+               bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
                rxq->queue[rxq->write] = rxb;
                rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
                rxq->free_count--;
@@ -272,7 +326,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
         * Increment device's write pointer in multiples of 8. */
        if (rxq->write_actual != (rxq->write & ~0x7)) {
                spin_lock(&rxq->lock);
-               iwl_pcie_rxq_inc_wr_ptr(trans);
+               iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
                spin_unlock(&rxq->lock);
        }
 }
@@ -285,13 +339,9 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
                                           gfp_t priority)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct page *page;
        gfp_t gfp_mask = priority;
 
-       if (rxq->free_count > RX_LOW_WATERMARK)
-               gfp_mask |= __GFP_NOWARN;
-
        if (trans_pcie->rx_page_order > 0)
                gfp_mask |= __GFP_COMP;
 
@@ -301,16 +351,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
                if (net_ratelimit())
                        IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
                                       trans_pcie->rx_page_order);
-               /* Issue an error if the hardware has consumed more than half
-                * of its free buffer list and we don't have enough
-                * pre-allocated buffers.
+               /*
+                * Issue an error if we don't have enough pre-allocated
+                 * buffers.
 `               */
-               if (rxq->free_count <= RX_LOW_WATERMARK &&
-                   iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
-                   net_ratelimit())
+               if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
                        IWL_CRIT(trans,
-                                "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
-                                rxq->free_count);
+                                "Failed to alloc_pages\n");
                return NULL;
        }
        return page;
@@ -325,10 +372,10 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
  * allocated buffers.
  */
-static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
+static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
+                                  struct iwl_rxq *rxq)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rx_mem_buffer *rxb;
        struct page *page;
 
@@ -372,10 +419,6 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
                        __free_pages(page, trans_pcie->rx_page_order);
                        return;
                }
-               /* dma address must be no more than 36 bits */
-               BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
-               /* and also 256 byte aligned! */
-               BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 
                spin_lock(&rxq->lock);
 
@@ -386,40 +429,23 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
        }
 }
 
-static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
+static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        int i;
 
-       lockdep_assert_held(&rxq->lock);
-
-       for (i = 0; i < RX_QUEUE_SIZE; i++) {
-               if (!rxq->pool[i].page)
+       for (i = 0; i < MQ_RX_POOL_SIZE; i++) {
+               if (!trans_pcie->rx_pool[i].page)
                        continue;
-               dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
+               dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
                               PAGE_SIZE << trans_pcie->rx_page_order,
                               DMA_FROM_DEVICE);
-               __free_pages(rxq->pool[i].page, trans_pcie->rx_page_order);
-               rxq->pool[i].page = NULL;
+               __free_pages(trans_pcie->rx_pool[i].page,
+                            trans_pcie->rx_page_order);
+               trans_pcie->rx_pool[i].page = NULL;
        }
 }
 
-/*
- * iwl_pcie_rx_replenish - Move all used buffers from rx_used to rx_free
- *
- * When moving to rx_free an page is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_pcie_rxq_restock.
- * This is called only during initialization
- */
-static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
-{
-       iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
-
-       iwl_pcie_rxq_restock(trans);
-}
-
 /*
  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
  *
@@ -444,6 +470,11 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
        while (pending) {
                int i;
                struct list_head local_allocated;
+               gfp_t gfp_mask = GFP_KERNEL;
+
+               /* Do not post a warning if there are only a few requests */
+               if (pending < RX_PENDING_WATERMARK)
+                       gfp_mask |= __GFP_NOWARN;
 
                INIT_LIST_HEAD(&local_allocated);
 
@@ -463,7 +494,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
                        BUG_ON(rxb->page);
 
                        /* Alloc a new receive buffer */
-                       page = iwl_pcie_rx_alloc_page(trans, GFP_KERNEL);
+                       page = iwl_pcie_rx_alloc_page(trans, gfp_mask);
                        if (!page)
                                continue;
                        rxb->page = page;
@@ -477,10 +508,6 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
                                __free_pages(page, trans_pcie->rx_page_order);
                                continue;
                        }
-                       /* dma address must be no more than 36 bits */
-                       BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
-                       /* and also 256 byte aligned! */
-                       BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
 
                        /* move the allocated entry to the out list */
                        list_move(&rxb->list, &local_allocated);
@@ -561,38 +588,83 @@ static void iwl_pcie_rx_allocator_work(struct work_struct *data)
 static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
        struct device *dev = trans->dev;
+       int i;
+       int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
+                                                     sizeof(__le32);
+
+       if (WARN_ON(trans_pcie->rxq))
+               return -EINVAL;
 
-       memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
+       trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
+                                 GFP_KERNEL);
+       if (!trans_pcie->rxq)
+               return -EINVAL;
 
-       spin_lock_init(&rxq->lock);
        spin_lock_init(&rba->lock);
 
-       if (WARN_ON(rxq->bd || rxq->rb_stts))
-               return -EINVAL;
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
 
-       /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
-       rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
-                                     &rxq->bd_dma, GFP_KERNEL);
-       if (!rxq->bd)
-               goto err_bd;
+               spin_lock_init(&rxq->lock);
+               if (trans->cfg->mq_rx_supported)
+                       rxq->queue_size = MQ_RX_TABLE_SIZE;
+               else
+                       rxq->queue_size = RX_QUEUE_SIZE;
 
-       /*Allocate the driver's pointer to receive buffer status */
-       rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
-                                          &rxq->rb_stts_dma, GFP_KERNEL);
-       if (!rxq->rb_stts)
-               goto err_rb_stts;
+               /*
+                * Allocate the circular buffer of Read Buffer Descriptors
+                * (RBDs)
+                */
+               rxq->bd = dma_zalloc_coherent(dev,
+                                            free_size * rxq->queue_size,
+                                            &rxq->bd_dma, GFP_KERNEL);
+               if (!rxq->bd)
+                       goto err;
+
+               if (trans->cfg->mq_rx_supported) {
+                       rxq->used_bd = dma_zalloc_coherent(dev,
+                                                          sizeof(__le32) *
+                                                          rxq->queue_size,
+                                                          &rxq->used_bd_dma,
+                                                          GFP_KERNEL);
+                       if (!rxq->used_bd)
+                               goto err;
+               }
 
+               /*Allocate the driver's pointer to receive buffer status */
+               rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+                                                  &rxq->rb_stts_dma,
+                                                  GFP_KERNEL);
+               if (!rxq->rb_stts)
+                       goto err;
+       }
        return 0;
 
-err_rb_stts:
-       dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
-                         rxq->bd, rxq->bd_dma);
-       rxq->bd_dma = 0;
-       rxq->bd = NULL;
-err_bd:
+err:
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+               if (rxq->bd)
+                       dma_free_coherent(dev, free_size * rxq->queue_size,
+                                         rxq->bd, rxq->bd_dma);
+               rxq->bd_dma = 0;
+               rxq->bd = NULL;
+
+               if (rxq->rb_stts)
+                       dma_free_coherent(trans->dev,
+                                         sizeof(struct iwl_rb_status),
+                                         rxq->rb_stts, rxq->rb_stts_dma);
+
+               if (rxq->used_bd)
+                       dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
+                                         rxq->used_bd, rxq->used_bd_dma);
+               rxq->used_bd_dma = 0;
+               rxq->used_bd = NULL;
+       }
+       kfree(trans_pcie->rxq);
+
        return -ENOMEM;
 }
 
@@ -659,65 +731,103 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
                iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
 }
 
-static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
+static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
 {
+       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+       u32 rb_size, enabled = 0;
        int i;
 
-       lockdep_assert_held(&rxq->lock);
-
-       INIT_LIST_HEAD(&rxq->rx_free);
-       INIT_LIST_HEAD(&rxq->rx_used);
-       rxq->free_count = 0;
-       rxq->used_count = 0;
+       switch (trans_pcie->rx_buf_size) {
+       case IWL_AMSDU_4K:
+               rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+               break;
+       case IWL_AMSDU_8K:
+               rb_size = RFH_RXF_DMA_RB_SIZE_8K;
+               break;
+       case IWL_AMSDU_12K:
+               rb_size = RFH_RXF_DMA_RB_SIZE_12K;
+               break;
+       default:
+               WARN_ON(1);
+               rb_size = RFH_RXF_DMA_RB_SIZE_4K;
+       }
 
-       for (i = 0; i < RX_QUEUE_SIZE; i++)
-               list_add(&rxq->pool[i].list, &rxq->rx_used);
-}
+       /* Stop Rx DMA */
+       iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+       /* disable free amd used rx queue operation */
+       iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
+
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               /* Tell device where to find RBD free table in DRAM */
+               iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
+                                      (u64)(trans_pcie->rxq[i].bd_dma));
+               /* Tell device where to find RBD used table in DRAM */
+               iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
+                                      (u64)(trans_pcie->rxq[i].used_bd_dma));
+               /* Tell device where in DRAM to update its Rx status */
+               iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
+                                      trans_pcie->rxq[i].rb_stts_dma);
+               /* Reset device indice tables */
+               iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
+               iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
+               iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
+
+               enabled |= BIT(i) | BIT(i + 16);
+       }
 
-static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
-{
-       int i;
+       /* restock default queue */
+       iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]);
 
-       lockdep_assert_held(&rba->lock);
+       /*
+        * Enable Rx DMA
+        * Single frame mode
+        * Rx buffer size 4 or 8k or 12k
+        * Min RB size 4 or 8
+        * 512 RBDs
+        */
+       iwl_write_prph(trans, RFH_RXF_DMA_CFG,
+                      RFH_DMA_EN_ENABLE_VAL |
+                      rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
+                      RFH_RXF_DMA_MIN_RB_4_8 |
+                      RFH_RXF_DMA_RBDCB_SIZE_512);
 
-       INIT_LIST_HEAD(&rba->rbd_allocated);
-       INIT_LIST_HEAD(&rba->rbd_empty);
+       iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
+                                         RFH_GEN_CFG_SERVICE_DMA_SNOOP);
+       iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
 
-       for (i = 0; i < RX_POOL_SIZE; i++)
-               list_add(&rba->pool[i].list, &rba->rbd_empty);
+       /* Set interrupt coalescing timer to default (2048 usecs) */
+       iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
 }
 
-static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
+static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
 {
-       struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rb_allocator *rba = &trans_pcie->rba;
-       int i;
+       lockdep_assert_held(&rxq->lock);
 
-       lockdep_assert_held(&rba->lock);
+       INIT_LIST_HEAD(&rxq->rx_free);
+       INIT_LIST_HEAD(&rxq->rx_used);
+       rxq->free_count = 0;
+       rxq->used_count = 0;
+}
 
-       for (i = 0; i < RX_POOL_SIZE; i++) {
-               if (!rba->pool[i].page)
-                       continue;
-               dma_unmap_page(trans->dev, rba->pool[i].page_dma,
-                              PAGE_SIZE << trans_pcie->rx_page_order,
-                              DMA_FROM_DEVICE);
-               __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
-               rba->pool[i].page = NULL;
-       }
+static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
+{
+       WARN_ON(1);
+       return 0;
 }
 
 int iwl_pcie_rx_init(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rxq *def_rxq;
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
-       int i, err;
+       int i, err, num_rbds, allocator_pool_size;
 
-       if (!rxq->bd) {
+       if (!trans_pcie->rxq) {
                err = iwl_pcie_rx_alloc(trans);
                if (err)
                        return err;
        }
+       def_rxq = trans_pcie->rxq;
        if (!rba->alloc_wq)
                rba->alloc_wq = alloc_workqueue("rb_allocator",
                                                WQ_HIGHPRI | WQ_UNBOUND, 1);
@@ -726,34 +836,68 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
        spin_lock(&rba->lock);
        atomic_set(&rba->req_pending, 0);
        atomic_set(&rba->req_ready, 0);
-       /* free all first - we might be reconfigured for a different size */
-       iwl_pcie_rx_free_rba(trans);
-       iwl_pcie_rx_init_rba(rba);
+       INIT_LIST_HEAD(&rba->rbd_allocated);
+       INIT_LIST_HEAD(&rba->rbd_empty);
        spin_unlock(&rba->lock);
 
-       spin_lock(&rxq->lock);
-
        /* free all first - we might be reconfigured for a different size */
-       iwl_pcie_rxq_free_rbs(trans);
-       iwl_pcie_rx_init_rxb_lists(rxq);
+       iwl_pcie_free_rbs_pool(trans);
 
        for (i = 0; i < RX_QUEUE_SIZE; i++)
-               rxq->queue[i] = NULL;
+               def_rxq->queue[i] = NULL;
 
-       /* Set us so that we have processed and used all buffers, but have
-        * not restocked the Rx queue with fresh buffers */
-       rxq->read = rxq->write = 0;
-       rxq->write_actual = 0;
-       memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
-       spin_unlock(&rxq->lock);
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
 
-       iwl_pcie_rx_replenish(trans);
+               rxq->id = i;
 
-       iwl_pcie_rx_hw_init(trans, rxq);
+               spin_lock(&rxq->lock);
+               /*
+                * Set read write pointer to reflect that we have processed
+                * and used all buffers, but have not restocked the Rx queue
+                * with fresh buffers
+                */
+               rxq->read = 0;
+               rxq->write = 0;
+               rxq->write_actual = 0;
+               memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
 
-       spin_lock(&rxq->lock);
-       iwl_pcie_rxq_inc_wr_ptr(trans);
-       spin_unlock(&rxq->lock);
+               iwl_pcie_rx_init_rxb_lists(rxq);
+
+               if (!rxq->napi.poll)
+                       netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
+                                      iwl_pcie_dummy_napi_poll, 64);
+
+               spin_unlock(&rxq->lock);
+       }
+
+       /* move the pool to the default queue and allocator ownerships */
+       num_rbds = trans->cfg->mq_rx_supported ?
+                    MQ_RX_POOL_SIZE : RX_QUEUE_SIZE;
+       allocator_pool_size = trans->num_rx_queues *
+               (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
+       for (i = 0; i < num_rbds; i++) {
+               struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
+
+               if (i < allocator_pool_size)
+                       list_add(&rxb->list, &rba->rbd_empty);
+               else
+                       list_add(&rxb->list, &def_rxq->rx_used);
+               trans_pcie->global_table[i] = rxb;
+               rxb->vid = (u16)i;
+       }
+
+       iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
+       if (trans->cfg->mq_rx_supported) {
+               iwl_pcie_rx_mq_hw_init(trans);
+       } else {
+               iwl_pcie_rxq_restock(trans, def_rxq);
+               iwl_pcie_rx_hw_init(trans, def_rxq);
+       }
+
+       spin_lock(&def_rxq->lock);
+       iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq);
+       spin_unlock(&def_rxq->lock);
 
        return 0;
 }
@@ -761,12 +905,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
 void iwl_pcie_rx_free(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_rb_allocator *rba = &trans_pcie->rba;
+       int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
+                                             sizeof(__le32);
+       int i;
 
-       /*if rxq->bd is NULL, it means that nothing has been allocated,
-        * exit now */
-       if (!rxq->bd) {
+       /*
+        * if rxq is NULL, it means that nothing has been allocated,
+        * exit now
+        */
+       if (!trans_pcie->rxq) {
                IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
                return;
        }
@@ -777,27 +925,37 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
                rba->alloc_wq = NULL;
        }
 
-       spin_lock(&rba->lock);
-       iwl_pcie_rx_free_rba(trans);
-       spin_unlock(&rba->lock);
-
-       spin_lock(&rxq->lock);
-       iwl_pcie_rxq_free_rbs(trans);
-       spin_unlock(&rxq->lock);
-
-       dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
-                         rxq->bd, rxq->bd_dma);
-       rxq->bd_dma = 0;
-       rxq->bd = NULL;
-
-       if (rxq->rb_stts)
-               dma_free_coherent(trans->dev,
-                                 sizeof(struct iwl_rb_status),
-                                 rxq->rb_stts, rxq->rb_stts_dma);
-       else
-               IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
-       rxq->rb_stts_dma = 0;
-       rxq->rb_stts = NULL;
+       iwl_pcie_free_rbs_pool(trans);
+
+       for (i = 0; i < trans->num_rx_queues; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+               if (rxq->bd)
+                       dma_free_coherent(trans->dev,
+                                         free_size * rxq->queue_size,
+                                         rxq->bd, rxq->bd_dma);
+               rxq->bd_dma = 0;
+               rxq->bd = NULL;
+
+               if (rxq->rb_stts)
+                       dma_free_coherent(trans->dev,
+                                         sizeof(struct iwl_rb_status),
+                                         rxq->rb_stts, rxq->rb_stts_dma);
+               else
+                       IWL_DEBUG_INFO(trans,
+                                      "Free rxq->rb_stts which is NULL\n");
+
+               if (rxq->used_bd)
+                       dma_free_coherent(trans->dev,
+                                         sizeof(__le32) * rxq->queue_size,
+                                         rxq->used_bd, rxq->used_bd_dma);
+               rxq->used_bd_dma = 0;
+               rxq->used_bd = NULL;
+
+               if (rxq->napi.poll)
+                       netif_napi_del(&rxq->napi);
+       }
+       kfree(trans_pcie->rxq);
 }
 
 /*
@@ -841,11 +999,11 @@ static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
 }
 
 static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
+                               struct iwl_rxq *rxq,
                                struct iwl_rx_mem_buffer *rxb,
                                bool emergency)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
        struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
        bool page_stolen = false;
        int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
@@ -911,7 +1069,12 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
                index = SEQ_TO_INDEX(sequence);
                cmd_index = get_cmd_index(&txq->q, index);
 
-               iwl_op_mode_rx(trans->op_mode, &trans_pcie->napi, &rxcb);
+               if (rxq->id == 0)
+                       iwl_op_mode_rx(trans->op_mode, &rxq->napi,
+                                      &rxcb);
+               else
+                       iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
+                                          &rxcb, rxq->id);
 
                if (reclaim) {
                        kzfree(txq->entries[cmd_index].free_buf);
@@ -975,7 +1138,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
 static void iwl_pcie_rx_handle(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       struct iwl_rxq *rxq = &trans_pcie->rxq[0];
        u32 r, i, j, count = 0;
        bool emergency = false;
 
@@ -993,16 +1156,26 @@ restart:
        while (i != r) {
                struct iwl_rx_mem_buffer *rxb;
 
-               if (unlikely(rxq->used_count == RX_QUEUE_SIZE / 2))
+               if (unlikely(rxq->used_count == rxq->queue_size / 2))
                        emergency = true;
 
-               rxb = rxq->queue[i];
-               rxq->queue[i] = NULL;
+               if (trans->cfg->mq_rx_supported) {
+                       /*
+                        * used_bd is a 32 bit but only 12 are used to retrieve
+                        * the vid
+                        */
+                       u16 vid = (u16)le32_to_cpu(rxq->used_bd[i]);
+
+                       rxb = trans_pcie->global_table[vid];
+               } else {
+                       rxb = rxq->queue[i];
+                       rxq->queue[i] = NULL;
+               }
 
                IWL_DEBUG_RX(trans, "rxbuf: HW = %d, SW = %d\n", r, i);
-               iwl_pcie_rx_handle_rb(trans, rxb, emergency);
+               iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
 
-               i = (i + 1) & RX_QUEUE_MASK;
+               i = (i + 1) & (rxq->queue_size - 1);
 
                /* If we have RX_CLAIM_REQ_ALLOC released rx buffers -
                 * try to claim the pre-allocated buffers from the allocator */
@@ -1040,10 +1213,10 @@ restart:
                        count++;
                        if (count == 8) {
                                count = 0;
-                               if (rxq->used_count < RX_QUEUE_SIZE / 3)
+                               if (rxq->used_count < rxq->queue_size / 3)
                                        emergency = false;
                                spin_unlock(&rxq->lock);
-                               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+                               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
                                spin_lock(&rxq->lock);
                        }
                }
@@ -1055,7 +1228,10 @@ restart:
                if (rxq->free_count >=  RX_CLAIM_REQ_ALLOC) {
                        rxq->read = i;
                        spin_unlock(&rxq->lock);
-                       iwl_pcie_rxq_restock(trans);
+                       if (trans->cfg->mq_rx_supported)
+                               iwl_pcie_rxq_mq_restock(trans, rxq);
+                       else
+                               iwl_pcie_rxq_restock(trans, rxq);
                        goto restart;
                }
        }
@@ -1077,10 +1253,10 @@ restart:
         * will be restocked by the next call of iwl_pcie_rxq_restock.
         */
        if (unlikely(emergency && count))
-               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC);
+               iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
 
-       if (trans_pcie->napi.poll)
-               napi_gro_flush(&trans_pcie->napi, false);
+       if (rxq->napi.poll)
+               napi_gro_flush(&rxq->napi, false);
 }
 
 /*
index d60a467a983c6f220384b701953eb42d2cc4c1ae..b796952da644ca0a32ad4bf1cf5851b984fdcf88 100644 (file)
@@ -72,6 +72,7 @@
 #include <linux/bitops.h>
 #include <linux/gfp.h>
 #include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
 
 #include "iwl-drv.h"
 #include "iwl-trans.h"
@@ -1218,11 +1219,12 @@ void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
                _iwl_trans_pcie_stop_device(trans, true);
 }
 
-static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
+static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
+                                     bool reset)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
 
-       if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
+       if (!reset) {
                /* Enable persistence mode to avoid reset */
                iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
                            CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
@@ -1246,7 +1248,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
        iwl_clear_bit(trans, CSR_GP_CNTRL,
                      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
 
-       if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D3) {
+       if (reset) {
                /*
                 * reset TX queues -- some of their registers reset during S3
                 * so if we don't reset everything here the D3 image would try
@@ -1260,7 +1262,7 @@ static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
 
 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
                                    enum iwl_d3_status *status,
-                                   bool test)
+                                   bool test,  bool reset)
 {
        u32 val;
        int ret;
@@ -1295,7 +1297,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
 
        iwl_pcie_set_pwr(trans, false);
 
-       if (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3) {
+       if (!reset) {
                iwl_clear_bit(trans, CSR_GP_CNTRL,
                              CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
        } else {
@@ -1353,6 +1355,10 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
        /* ... rfkill can call stop_device and set it false if needed */
        iwl_trans_pcie_rf_kill(trans, hw_rfkill);
 
+       /* Make sure we sync here, because we'll need full access later */
+       if (low_power)
+               pm_runtime_resume(trans->dev);
+
        return 0;
 }
 
@@ -1422,12 +1428,6 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
        iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
 }
 
-static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
-{
-       WARN_ON(1);
-       return 0;
-}
-
 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
                                     const struct iwl_trans_config *trans_cfg)
 {
@@ -1464,11 +1464,8 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
         * As this function may be called again in some corner cases don't
         * do anything if NAPI was already initialized.
         */
-       if (!trans_pcie->napi.poll) {
+       if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
                init_dummy_netdev(&trans_pcie->napi_dev);
-               netif_napi_add(&trans_pcie->napi_dev, &trans_pcie->napi,
-                              iwl_pcie_dummy_napi_poll, 64);
-       }
 }
 
 void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -1476,6 +1473,9 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int i;
 
+       /* TODO: check if this is really needed */
+       pm_runtime_disable(trans->dev);
+
        synchronize_irq(trans_pcie->pci_dev->irq);
 
        iwl_pcie_tx_free(trans);
@@ -1489,9 +1489,6 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
        pci_release_regions(trans_pcie->pci_dev);
        pci_disable_device(trans_pcie->pci_dev);
 
-       if (trans_pcie->napi.poll)
-               netif_napi_del(&trans_pcie->napi);
-
        iwl_pcie_free_fw_monitor(trans);
 
        for_each_possible_cpu(i) {
@@ -1831,6 +1828,7 @@ void iwl_trans_pcie_ref(struct iwl_trans *trans)
        spin_lock_irqsave(&trans_pcie->ref_lock, flags);
        IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
        trans_pcie->ref_count++;
+       pm_runtime_get(&trans_pcie->pci_dev->dev);
        spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
 }
 
@@ -1849,6 +1847,10 @@ void iwl_trans_pcie_unref(struct iwl_trans *trans)
                return;
        }
        trans_pcie->ref_count--;
+
+       pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
+       pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
+
        spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
 }
 
@@ -2001,29 +2003,48 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
 {
        struct iwl_trans *trans = file->private_data;
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
-       char buf[256];
-       int pos = 0;
-       const size_t bufsz = sizeof(buf);
-
-       pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
-                                               rxq->read);
-       pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
-                                               rxq->write);
-       pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
-                                               rxq->write_actual);
-       pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
-                                               rxq->need_update);
-       pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
-                                               rxq->free_count);
-       if (rxq->rb_stts) {
-               pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
-                        le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
-       } else {
-               pos += scnprintf(buf + pos, bufsz - pos,
-                                       "closed_rb_num: Not Allocated\n");
+       char *buf;
+       int pos = 0, i, ret;
+       size_t bufsz = sizeof(buf);
+
+       bufsz = sizeof(char) * 121 * trans->num_rx_queues;
+
+       if (!trans_pcie->rxq)
+               return -EAGAIN;
+
+       buf = kzalloc(bufsz, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
+               struct iwl_rxq *rxq = &trans_pcie->rxq[i];
+
+               pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
+                                i);
+               pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
+                                rxq->read);
+               pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
+                                rxq->write);
+               pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
+                                rxq->write_actual);
+               pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
+                                rxq->need_update);
+               pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
+                                rxq->free_count);
+               if (rxq->rb_stts) {
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                                        "\tclosed_rb_num: %u\n",
+                                        le16_to_cpu(rxq->rb_stts->closed_rb_num) &
+                                        0x0FFF);
+               } else {
+                       pos += scnprintf(buf + pos, bufsz - pos,
+                                        "\tclosed_rb_num: Not Allocated\n");
+       }
        }
-       return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+       kfree(buf);
+
+       return ret;
 }
 
 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
@@ -2188,7 +2209,8 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
-       struct iwl_rxq *rxq = &trans_pcie->rxq;
+       /* Dump RBs is supported only for pre-9000 devices (1 queue) */
+       struct iwl_rxq *rxq = &trans_pcie->rxq[0];
        u32 i, r, j, rb_len = 0;
 
        spin_lock(&rxq->lock);
@@ -2383,7 +2405,8 @@ static struct iwl_trans_dump_data
        u32 len, num_rbs;
        u32 monitor_len;
        int i, ptr;
-       bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
+       bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
+                       !trans->cfg->mq_rx_supported;
 
        /* transport dump header */
        len = sizeof(*dump_data);
@@ -2438,11 +2461,12 @@ static struct iwl_trans_dump_data
        len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
 
        if (dump_rbs) {
+               /* Dump RBs is supported only for pre-9000 devices (1 queue) */
+               struct iwl_rxq *rxq = &trans_pcie->rxq[0];
                /* RBs */
-               num_rbs = le16_to_cpu(ACCESS_ONCE(
-                                     trans_pcie->rxq.rb_stts->closed_rb_num))
+               num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num))
                                      & 0x0FFF;
-               num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
+               num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
                len += num_rbs * (sizeof(*data) +
                                  sizeof(struct iwl_fw_error_dump_rb) +
                                  (PAGE_SIZE << trans_pcie->rx_page_order));
@@ -2493,6 +2517,22 @@ static struct iwl_trans_dump_data
        return dump_data;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
+{
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+               return iwl_pci_fw_enter_d0i3(trans);
+
+       return 0;
+}
+
+static void iwl_trans_pcie_resume(struct iwl_trans *trans)
+{
+       if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3)
+               iwl_pci_fw_exit_d0i3(trans);
+}
+#endif /* CONFIG_PM_SLEEP */
+
 static const struct iwl_trans_ops trans_ops_pcie = {
        .start_hw = iwl_trans_pcie_start_hw,
        .op_mode_leave = iwl_trans_pcie_op_mode_leave,
@@ -2503,6 +2543,11 @@ static const struct iwl_trans_ops trans_ops_pcie = {
        .d3_suspend = iwl_trans_pcie_d3_suspend,
        .d3_resume = iwl_trans_pcie_d3_resume,
 
+#ifdef CONFIG_PM_SLEEP
+       .suspend = iwl_trans_pcie_suspend,
+       .resume = iwl_trans_pcie_resume,
+#endif /* CONFIG_PM_SLEEP */
+
        .send_cmd = iwl_trans_pcie_send_hcmd,
 
        .tx = iwl_trans_pcie_tx,
@@ -2541,7 +2586,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        struct iwl_trans_pcie *trans_pcie;
        struct iwl_trans *trans;
        u16 pci_cmd;
-       int ret;
+       int ret, addr_size;
 
        trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
                                &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2579,11 +2624,17 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                                       PCIE_LINK_STATE_CLKPM);
        }
 
+       if (cfg->mq_rx_supported)
+               addr_size = 64;
+       else
+               addr_size = 36;
+
        pci_set_master(pdev);
 
-       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
        if (!ret)
-               ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+               ret = pci_set_consistent_dma_mask(pdev,
+                                                 DMA_BIT_MASK(addr_size));
        if (ret) {
                ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (!ret)
@@ -2686,6 +2737,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        /* Initialize the wait queue for commands */
        init_waitqueue_head(&trans_pcie->wait_command_queue);
 
+       init_waitqueue_head(&trans_pcie->d0i3_waitq);
+
        ret = iwl_pcie_alloc_ict(trans);
        if (ret)
                goto out_pci_disable_msi;
@@ -2700,6 +2753,12 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
 
        trans_pcie->inta_mask = CSR_INI_SET_MASK;
 
+#ifdef CONFIG_IWLWIFI_PCIE_RTPM
+       trans->runtime_pm_mode = IWL_PLAT_PM_MODE_D0I3;
+#else
+       trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+#endif /* CONFIG_IWLWIFI_PCIE_RTPM */
+
        return trans;
 
 out_free_ict:
index 5262028b55055e4ca9243e8d27fee38bd9769f0e..837a7d536874a8ff9651ed10099384411a8bf084 100644 (file)
@@ -1,7 +1,8 @@
 /******************************************************************************
  *
  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -33,7 +34,6 @@
 #include <linux/sched.h>
 #include <net/ip6_checksum.h>
 #include <net/tso.h>
-#include <net/ip6_checksum.h>
 
 #include "iwl-debug.h"
 #include "iwl-csr.h"
@@ -571,6 +571,7 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
                return ret;
 
        spin_lock_init(&txq->lock);
+       __skb_queue_head_init(&txq->overflow_q);
 
        /*
         * Tell nic where to find circular buffer of Tx Frame Descriptors for
@@ -621,6 +622,13 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
                q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
        }
        txq->active = false;
+
+       while (!skb_queue_empty(&txq->overflow_q)) {
+               struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+               iwl_op_mode_free_skb(trans->op_mode, skb);
+       }
+
        spin_unlock_bh(&txq->lock);
 
        /* just in case - this queue may have been stopped */
@@ -1052,8 +1060,41 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
 
        iwl_pcie_txq_progress(txq);
 
-       if (iwl_queue_space(&txq->q) > txq->q.low_mark)
-               iwl_wake_queue(trans, txq);
+       if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+           test_bit(txq_id, trans_pcie->queue_stopped)) {
+               struct sk_buff_head skbs;
+
+               __skb_queue_head_init(&skbs);
+               skb_queue_splice_init(&txq->overflow_q, &skbs);
+
+               /*
+                * This is tricky: we are in reclaim path which is non
+                * re-entrant, so noone will try to take the access the
+                * txq data from that path. We stopped tx, so we can't
+                * have tx as well. Bottom line, we can unlock and re-lock
+                * later.
+                */
+               spin_unlock_bh(&txq->lock);
+
+               while (!skb_queue_empty(&skbs)) {
+                       struct sk_buff *skb = __skb_dequeue(&skbs);
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+                       u8 dev_cmd_idx = IWL_TRANS_FIRST_DRIVER_DATA + 1;
+                       struct iwl_device_cmd *dev_cmd =
+                               info->driver_data[dev_cmd_idx];
+
+                       /*
+                        * Note that we can very well be overflowing again.
+                        * In that case, iwl_queue_space will be small again
+                        * and we won't wake mac80211's queue.
+                        */
+                       iwl_trans_pcie_tx(trans, skb, dev_cmd, txq_id);
+               }
+               spin_lock_bh(&txq->lock);
+
+               if (iwl_queue_space(&txq->q) > txq->q.low_mark)
+                       iwl_wake_queue(trans, txq);
+       }
 
        if (q->read_ptr == q->write_ptr) {
                IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
@@ -1686,6 +1727,20 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
                wake_up(&trans_pcie->wait_command_queue);
        }
 
+       if (meta->flags & CMD_MAKE_TRANS_IDLE) {
+               IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
+                              iwl_get_cmd_string(trans, cmd->hdr.cmd));
+               set_bit(STATUS_TRANS_IDLE, &trans->status);
+               wake_up(&trans_pcie->d0i3_waitq);
+       }
+
+       if (meta->flags & CMD_WAKE_UP_TRANS) {
+               IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
+                              iwl_get_cmd_string(trans, cmd->hdr.cmd));
+               clear_bit(STATUS_TRANS_IDLE, &trans->status);
+               wake_up(&trans_pcie->d0i3_waitq);
+       }
+
        meta->flags = 0;
 
        spin_unlock_bh(&txq->lock);
@@ -2161,6 +2216,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
                csum = skb_checksum(skb, offs, skb->len - offs, 0);
                *(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
+
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
 
        if (skb_is_nonlinear(skb) &&
@@ -2177,6 +2234,22 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
 
        spin_lock(&txq->lock);
 
+       if (iwl_queue_space(q) < q->high_mark) {
+               iwl_stop_queue(trans, txq);
+
+               /* don't put the packet on the ring, if there is no room */
+               if (unlikely(iwl_queue_space(q) < 3)) {
+                       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+                       info->driver_data[IWL_TRANS_FIRST_DRIVER_DATA + 1] =
+                               dev_cmd;
+                       __skb_queue_tail(&txq->overflow_q, skb);
+
+                       spin_unlock(&txq->lock);
+                       return 0;
+               }
+       }
+
        /* In AGG mode, the index in the ring must correspond to the WiFi
         * sequence number. This is a HW requirements to help the SCD to parse
         * the BA.
@@ -2281,12 +2354,6 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
         * At this point the frame is "transmitted" successfully
         * and we will get a TX status notification eventually.
         */
-       if (iwl_queue_space(q) < q->high_mark) {
-               if (wait_write_ptr)
-                       iwl_pcie_txq_inc_wr_ptr(trans, txq);
-               else
-                       iwl_stop_queue(trans, txq);
-       }
        spin_unlock(&txq->lock);
        return 0;
 out_err:
index 6df3ee561d5214725c3c881895a6056861df74e1..515aa3f993f3dd8d1a65936472bfa6b00f4420de 100644 (file)
@@ -836,25 +836,30 @@ static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len,
        spin_lock_bh(&local->baplock);
 
        res = hfa384x_setup_bap(dev, BAP0, rid, 0);
-       if (!res)
-               res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+       if (res)
+               goto unlock;
+
+       res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+       if (res)
+               goto unlock;
 
        if (le16_to_cpu(rec.len) == 0) {
                /* RID not available */
                res = -ENODATA;
+               goto unlock;
        }
 
        rlen = (le16_to_cpu(rec.len) - 1) * 2;
-       if (!res && exact_len && rlen != len) {
+       if (exact_len && rlen != len) {
                printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
                       "rid=0x%04x, len=%d (expected %d)\n",
                       dev->name, rid, rlen, len);
                res = -ENODATA;
        }
 
-       if (!res)
-               res = hfa384x_from_bap(dev, BAP0, buf, len);
+       res = hfa384x_from_bap(dev, BAP0, buf, len);
 
+unlock:
        spin_unlock_bh(&local->baplock);
        mutex_unlock(&local->rid_bap_mtx);
 
index fce4a843e65694d90ff9d35ca34bb1ea808d0a8f..bc7397d709d3ac5ff85b9a1057e43a93500587fc 100644 (file)
@@ -6,7 +6,7 @@
 #include <linux/string.h>
 #include <linux/if_ether.h>
 #include <linux/scatterlist.h>
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 
 #include "orinoco.h"
 #include "mic.h"
@@ -16,7 +16,8 @@
 /********************************************************************/
 int orinoco_mic_init(struct orinoco_private *priv)
 {
-       priv->tx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
+       priv->tx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+                                             CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_mic)) {
                printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
                       "crypto API michael_mic\n");
@@ -24,7 +25,8 @@ int orinoco_mic_init(struct orinoco_private *priv)
                return -ENOMEM;
        }
 
-       priv->rx_tfm_mic = crypto_alloc_hash("michael_mic", 0, 0);
+       priv->rx_tfm_mic = crypto_alloc_ahash("michael_mic", 0,
+                                             CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_mic)) {
                printk(KERN_DEBUG "orinoco_mic_init: could not allocate "
                       "crypto API michael_mic\n");
@@ -38,18 +40,19 @@ int orinoco_mic_init(struct orinoco_private *priv)
 void orinoco_mic_free(struct orinoco_private *priv)
 {
        if (priv->tx_tfm_mic)
-               crypto_free_hash(priv->tx_tfm_mic);
+               crypto_free_ahash(priv->tx_tfm_mic);
        if (priv->rx_tfm_mic)
-               crypto_free_hash(priv->rx_tfm_mic);
+               crypto_free_ahash(priv->rx_tfm_mic);
 }
 
-int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
                u8 *da, u8 *sa, u8 priority,
                u8 *data, size_t data_len, u8 *mic)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm_michael);
        struct scatterlist sg[2];
        u8 hdr[ETH_HLEN + 2]; /* size of header + padding */
+       int err;
 
        if (tfm_michael == NULL) {
                printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n");
@@ -69,11 +72,13 @@ int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
        sg_set_buf(&sg[0], hdr, sizeof(hdr));
        sg_set_buf(&sg[1], data, data_len);
 
-       if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN))
+       if (crypto_ahash_setkey(tfm_michael, key, MIC_KEYLEN))
                return -1;
 
-       desc.tfm = tfm_michael;
-       desc.flags = 0;
-       return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr),
-                                 mic);
+       ahash_request_set_tfm(req, tfm_michael);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, mic, data_len + sizeof(hdr));
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
+       return err;
 }
index 04d05bc566d622edb063a15d3724afa418412653..ce731d05cc98cd2d0b6415f66868d3ad517b4753 100644 (file)
 
 /* Forward declarations */
 struct orinoco_private;
-struct crypto_hash;
+struct crypto_ahash;
 
 int orinoco_mic_init(struct orinoco_private *priv);
 void orinoco_mic_free(struct orinoco_private *priv);
-int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key,
+int orinoco_mic(struct crypto_ahash *tfm_michael, u8 *key,
                u8 *da, u8 *sa, u8 priority,
                u8 *data, size_t data_len, u8 *mic);
 
index eebd2be21ee9067e7d90f45f32b227bb07601998..2f0c84b1c440cd1160bdebc3cabcb13be110a73e 100644 (file)
@@ -152,8 +152,8 @@ struct orinoco_private {
        u8 *wpa_ie;
        int wpa_ie_len;
 
-       struct crypto_hash *rx_tfm_mic;
-       struct crypto_hash *tx_tfm_mic;
+       struct crypto_ahash *rx_tfm_mic;
+       struct crypto_ahash *tx_tfm_mic;
 
        unsigned int wpa_enabled:1;
        unsigned int tkip_cm_active:1;
index c32889a1e39cf53d4e2d1ab3a4bfc2b8b492af1e..a723a85f5635716aae4afa86cc1d45657f899af0 100644 (file)
@@ -991,7 +991,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
                goto nla_put_failure;
        }
 
-       if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, hdr->addr2))
+       if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
+                   ETH_ALEN, data->addresses[1].addr))
                goto nla_put_failure;
 
        /* We get the skb->data */
@@ -1333,10 +1334,8 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
        data->tx_bytes += skb->len;
        ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel);
 
-       if (ack && skb->len >= 16) {
-               struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       if (ack && skb->len >= 16)
                mac80211_hwsim_monitor_ack(channel, hdr->addr2);
-       }
 
        ieee80211_tx_info_clear_status(txi);
 
@@ -1845,10 +1844,12 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
 
 static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
                                       struct ieee80211_vif *vif,
-                                      enum ieee80211_ampdu_mlme_action action,
-                                      struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                                      u8 buf_size, bool amsdu)
+                                      struct ieee80211_ampdu_params *params)
 {
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+
        switch (action) {
        case IEEE80211_AMPDU_TX_START:
                ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
@@ -2736,7 +2737,7 @@ static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
 
        spin_lock_bh(&hwsim_radio_lock);
        list_for_each_entry(data, &hwsim_radios, list) {
-               if (mac80211_hwsim_addr_match(data, addr)) {
+               if (memcmp(data->addresses[1].addr, addr, ETH_ALEN) == 0) {
                        _found = true;
                        break;
                }
index 86955c416b30ed20ec48a5013d42df1e5257f89b..2eea76a340b7b44087d160304faf0a9767882d12 100644 (file)
@@ -2039,6 +2039,43 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
 
 
 
+int lbs_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
+                      bool enabled, int timeout)
+{
+       struct lbs_private *priv = wiphy_priv(wiphy);
+
+       if  (!(priv->fwcapinfo & FW_CAPINFO_PS)) {
+               if (!enabled)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
+       /* firmware does not work well with too long latency with power saving
+        * enabled, so do not enable it if there is only polling, no
+        * interrupts (like in some sdio hosts which can only
+        * poll for sdio irqs)
+        */
+       if  (priv->is_polling) {
+               if (!enabled)
+                       return 0;
+               else
+                       return -EINVAL;
+       }
+       if (!enabled) {
+               priv->psmode = LBS802_11POWERMODECAM;
+               if (priv->psstate != PS_STATE_FULL_POWER)
+                       lbs_set_ps_mode(priv,
+                                       PS_MODE_ACTION_EXIT_PS,
+                                       true);
+               return 0;
+       }
+       if (priv->psmode != LBS802_11POWERMODECAM)
+               return 0;
+       priv->psmode = LBS802_11POWERMODEMAX_PSP;
+       if (priv->connect_status == LBS_CONNECTED)
+               lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS, true);
+       return 0;
+}
 
 /*
  * Initialization
@@ -2057,6 +2094,7 @@ static struct cfg80211_ops lbs_cfg80211_ops = {
        .change_virtual_intf = lbs_change_intf,
        .join_ibss = lbs_join_ibss,
        .leave_ibss = lbs_leave_ibss,
+       .set_power_mgmt = lbs_set_power_mgmt,
 };
 
 
index 0387a5b380c80f5eeafd05c53c3ec12fc5b892f5..4ddd0e5a6b85c68102483b7f42e27cab096a2a9f 100644 (file)
@@ -957,7 +957,7 @@ static void lbs_queue_cmd(struct lbs_private *priv,
 
        /* Exit_PS command needs to be queued in the header always. */
        if (le16_to_cpu(cmdnode->cmdbuf->command) == CMD_802_11_PS_MODE) {
-               struct cmd_ds_802_11_ps_mode *psm = (void *) &cmdnode->cmdbuf;
+               struct cmd_ds_802_11_ps_mode *psm = (void *)cmdnode->cmdbuf;
 
                if (psm->action == cpu_to_le16(PS_MODE_ACTION_EXIT_PS)) {
                        if (priv->psstate != PS_STATE_FULL_POWER)
@@ -1387,7 +1387,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
                                 * PS command. Ignore it if it is not Exit_PS.
                                 * otherwise send it down immediately.
                                 */
-                               struct cmd_ds_802_11_ps_mode *psm = (void *)&cmd[1];
+                               struct cmd_ds_802_11_ps_mode *psm = (void *)cmd;
 
                                lbs_deb_host(
                                       "EXEC_NEXT_CMD: PS cmd, action 0x%02x\n",
@@ -1428,40 +1428,14 @@ int lbs_execute_next_command(struct lbs_private *priv)
                 * check if in power save mode, if yes, put the device back
                 * to PS mode
                 */
-#ifdef TODO
-               /*
-                * This was the old code for libertas+wext. Someone that
-                * understands this beast should re-code it in a sane way.
-                *
-                * I actually don't understand why this is related to WPA
-                * and to connection status, shouldn't powering should be
-                * independ of such things?
-                */
                if ((priv->psmode != LBS802_11POWERMODECAM) &&
                    (priv->psstate == PS_STATE_FULL_POWER) &&
-                   ((priv->connect_status == LBS_CONNECTED) ||
-                   lbs_mesh_connected(priv))) {
-                       if (priv->secinfo.WPAenabled ||
-                           priv->secinfo.WPA2enabled) {
-                               /* check for valid WPA group keys */
-                               if (priv->wpa_mcast_key.len ||
-                                   priv->wpa_unicast_key.len) {
-                                       lbs_deb_host(
-                                              "EXEC_NEXT_CMD: WPA enabled and GTK_SET"
-                                              " go back to PS_SLEEP");
-                                       lbs_set_ps_mode(priv,
-                                                       PS_MODE_ACTION_ENTER_PS,
-                                                       false);
-                               }
-                       } else {
-                               lbs_deb_host(
-                                      "EXEC_NEXT_CMD: cmdpendingq empty, "
-                                      "go back to PS_SLEEP");
-                               lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS,
-                                               false);
-                       }
+                   (priv->connect_status == LBS_CONNECTED)) {
+                       lbs_deb_host(
+                               "EXEC_NEXT_CMD: cmdpendingq empty, go back to PS_SLEEP");
+                       lbs_set_ps_mode(priv, PS_MODE_ACTION_ENTER_PS,
+                                       false);
                }
-#endif
        }
 
        ret = 0;
index e5442e8956f7ac3b1182058864816dc700548fe5..c95bf6dc9522eae5bdd4824e4c157ddc15dbc562 100644 (file)
@@ -123,7 +123,10 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
        priv->cmd_timed_out = 0;
 
        if (respcmd == CMD_RET(CMD_802_11_PS_MODE)) {
-               struct cmd_ds_802_11_ps_mode *psmode = (void *) &resp[1];
+               /* struct cmd_ds_802_11_ps_mode also contains
+                * the header
+                */
+               struct cmd_ds_802_11_ps_mode *psmode = (void *)resp;
                u16 action = le16_to_cpu(psmode->action);
 
                lbs_deb_host(
@@ -254,6 +257,10 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
                               "EVENT: in FULL POWER mode, ignoring PS_SLEEP\n");
                        break;
                }
+               if (!list_empty(&priv->cmdpendingq)) {
+                       lbs_deb_cmd("EVENT: commands in queue, do not sleep\n");
+                       break;
+               }
                priv->psstate = PS_STATE_PRE_SLEEP;
 
                lbs_ps_confirm_sleep(priv);
index 6bd1608992b00bec61b596d1c770674df8e11024..edf710bc5e77dedcfcc03fd77054ccd93b88bf90 100644 (file)
@@ -99,6 +99,7 @@ struct lbs_private {
        /* Hardware access */
        void *card;
        bool iface_running;
+       u8 is_polling; /* host has to poll the card irq */
        u8 fw_ready;
        u8 surpriseremoved;
        u8 setup_fw_on_resume;
index 68fd3a9779bdb99a06fad6735445ecda9dca7842..13eae9ff8c354384a6c8a172f73b7d9da2bbddee 100644 (file)
@@ -1267,7 +1267,7 @@ static int if_sdio_probe(struct sdio_func *func,
        priv->reset_card = if_sdio_reset_card;
        priv->power_save = if_sdio_power_save;
        priv->power_restore = if_sdio_power_restore;
-
+       priv->is_polling = !(func->card->host->caps & MMC_CAP_SDIO_IRQ);
        ret = if_sdio_power_on(card);
        if (ret)
                goto err_activate_card;
index dff08a2896a38f644894749c006787e26b1f3ea6..aba0c9995b14b143f716d4a83c0a695f0cc628eb 100644 (file)
@@ -267,6 +267,7 @@ static int if_usb_probe(struct usb_interface *intf,
        priv->enter_deep_sleep = NULL;
        priv->exit_deep_sleep = NULL;
        priv->reset_deep_sleep_wakeup = NULL;
+       priv->is_polling = false;
 #ifdef CONFIG_OLPC
        if (machine_is_olpc())
                priv->reset_card = if_usb_reset_olpc_card;
index 8079560f496581600cb658c4fa09d8e5d1faed81..b35b8bcce24cc0a34081fab73928c6d44b2fd91e 100644 (file)
@@ -1060,7 +1060,12 @@ void lbs_remove_card(struct lbs_private *priv)
 
        if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
                priv->psmode = LBS802_11POWERMODECAM;
-               lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true);
+               /* no need to wakeup if already woken up,
+                * on suspend, this exit ps command is not processed
+                * the driver hangs
+                */
+               if (priv->psstate != PS_STATE_FULL_POWER)
+                       lbs_set_ps_mode(priv, PS_MODE_ACTION_EXIT_PS, true);
        }
 
        if (priv->is_deep_sleep) {
index 2f0f9b5609d0139a301f7e57866f212cc1daefe1..24e649b1eb2453444967439166f2b9dd8b21e9b6 100644 (file)
@@ -237,4 +237,14 @@ device_dump
 
        cat fw_dump
 
+verext
+       This command is used to get extended firmware version string using
+       different configuration parameters.
+
+       Usage:
+               echo "[version_str_sel]" > verext
+               cat verext
+
+               [version_str_sel]: firmware support several extend version
+                                  string cases, include 0/1/10/20/21/99
 ===============================================================================
index e7adef72c05fcfd197f9aadb4eef24e5ef5f9511..f2dce81ba36ec834253791d662780640e03576ea 100644 (file)
@@ -1962,6 +1962,9 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
 {
        struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
 
+       if (!mwifiex_stop_bg_scan(priv))
+               cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+
        if (mwifiex_deauthenticate(priv, NULL))
                return -EFAULT;
 
@@ -2217,6 +2220,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
                    "info: Trying to associate to %s and bssid %pM\n",
                    (char *)sme->ssid, sme->bssid);
 
+       if (!mwifiex_stop_bg_scan(priv))
+               cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+
        ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
                                     priv->bss_mode, sme->channel, sme, 0);
        if (!ret) {
@@ -2420,6 +2426,9 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
                return -EBUSY;
        }
 
+       if (!mwifiex_stop_bg_scan(priv))
+               cfg80211_sched_scan_stopped_rtnl(priv->wdev.wiphy);
+
        user_scan_cfg = kzalloc(sizeof(*user_scan_cfg), GFP_KERNEL);
        if (!user_scan_cfg)
                return -ENOMEM;
@@ -2487,6 +2496,125 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
        return 0;
 }
 
+/* CFG802.11 operation handler for sched_scan_start.
+ *
+ * This function issues a bgscan config request to the firmware based upon
+ * the user specified sched_scan configuration. On successful completion,
+ * firmware will generate BGSCAN_REPORT event, driver should issue bgscan
+ * query command to get sched_scan results from firmware.
+ */
+static int
+mwifiex_cfg80211_sched_scan_start(struct wiphy *wiphy,
+                                 struct net_device *dev,
+                                 struct cfg80211_sched_scan_request *request)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+       int i, offset;
+       struct ieee80211_channel *chan;
+       struct mwifiex_bg_scan_cfg *bgscan_cfg;
+       struct ieee_types_header *ie;
+
+       if (!request || (!request->n_ssids && !request->n_match_sets)) {
+               wiphy_err(wiphy, "%s : Invalid Sched_scan parameters",
+                         __func__);
+               return -EINVAL;
+       }
+
+       wiphy_info(wiphy, "sched_scan start : n_ssids=%d n_match_sets=%d ",
+                  request->n_ssids, request->n_match_sets);
+       wiphy_info(wiphy, "n_channels=%d interval=%d ie_len=%d\n",
+                  request->n_channels, request->scan_plans->interval,
+                  (int)request->ie_len);
+
+       bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL);
+       if (!bgscan_cfg)
+               return -ENOMEM;
+
+       if (priv->scan_request || priv->scan_aborting)
+               bgscan_cfg->start_later = true;
+
+       bgscan_cfg->num_ssids = request->n_match_sets;
+       bgscan_cfg->ssid_list = request->match_sets;
+
+       if (request->ie && request->ie_len) {
+               offset = 0;
+               for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
+                       if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
+                               continue;
+                       priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_BGSCAN;
+                       ie = (struct ieee_types_header *)(request->ie + offset);
+                       memcpy(&priv->vs_ie[i].ie, ie, sizeof(*ie) + ie->len);
+                       offset += sizeof(*ie) + ie->len;
+
+                       if (offset >= request->ie_len)
+                               break;
+               }
+       }
+
+       for (i = 0; i < min_t(u32, request->n_channels,
+                             MWIFIEX_BG_SCAN_CHAN_MAX); i++) {
+               chan = request->channels[i];
+               bgscan_cfg->chan_list[i].chan_number = chan->hw_value;
+               bgscan_cfg->chan_list[i].radio_type = chan->band;
+
+               if ((chan->flags & IEEE80211_CHAN_NO_IR) || !request->n_ssids)
+                       bgscan_cfg->chan_list[i].scan_type =
+                                               MWIFIEX_SCAN_TYPE_PASSIVE;
+               else
+                       bgscan_cfg->chan_list[i].scan_type =
+                                               MWIFIEX_SCAN_TYPE_ACTIVE;
+
+               bgscan_cfg->chan_list[i].scan_time = 0;
+       }
+
+       bgscan_cfg->chan_per_scan = min_t(u32, request->n_channels,
+                                         MWIFIEX_BG_SCAN_CHAN_MAX);
+
+       /* Use at least 15 second for per scan cycle */
+       bgscan_cfg->scan_interval = (request->scan_plans->interval >
+                                    MWIFIEX_BGSCAN_INTERVAL) ?
+                               request->scan_plans->interval :
+                               MWIFIEX_BGSCAN_INTERVAL;
+
+       bgscan_cfg->repeat_count = MWIFIEX_BGSCAN_REPEAT_COUNT;
+       bgscan_cfg->report_condition = MWIFIEX_BGSCAN_SSID_MATCH |
+                               MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE;
+       bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA;
+       bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET;
+       bgscan_cfg->enable = true;
+       if (request->min_rssi_thold != NL80211_SCAN_RSSI_THOLD_OFF) {
+               bgscan_cfg->report_condition |= MWIFIEX_BGSCAN_SSID_RSSI_MATCH;
+               bgscan_cfg->rssi_threshold = request->min_rssi_thold;
+       }
+
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG,
+                            HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) {
+               kfree(bgscan_cfg);
+               return -EFAULT;
+       }
+
+       priv->sched_scanning = true;
+
+       kfree(bgscan_cfg);
+       return 0;
+}
+
+/* CFG802.11 operation handler for sched_scan_stop.
+ *
+ * This function issues a bgscan config command to disable
+ * previous bgscan configuration in the firmware
+ */
+static int mwifiex_cfg80211_sched_scan_stop(struct wiphy *wiphy,
+                                           struct net_device *dev)
+{
+       struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+       wiphy_info(wiphy, "sched scan stop!");
+       mwifiex_stop_bg_scan(priv);
+
+       return 0;
+}
+
 static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info,
                                   struct mwifiex_private *priv)
 {
@@ -2848,6 +2976,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
        mwifiex_dev_debugfs_remove(priv);
 #endif
 
+       if (priv->sched_scanning)
+               priv->sched_scanning = false;
+
        mwifiex_stop_net_dev_queue(priv->netdev, adapter);
 
        skb_queue_walk_safe(&priv->bypass_txq, skb, tmp)
@@ -3044,10 +3175,12 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
                                sizeof(byte_seq));
                mef_entry->filter[filt_num].filt_type = TYPE_EQ;
 
-               if (first_pat)
+               if (first_pat) {
                        first_pat = false;
-               else
+                       mwifiex_dbg(priv->adapter, INFO, "Wake on patterns\n");
+               } else {
                        mef_entry->filter[filt_num].filt_action = TYPE_AND;
+               }
 
                filt_num++;
        }
@@ -3073,6 +3206,7 @@ static int mwifiex_set_wowlan_mef_entry(struct mwifiex_private *priv,
                mef_entry->filter[filt_num].offset = 56;
                mef_entry->filter[filt_num].filt_type = TYPE_EQ;
                mef_entry->filter[filt_num].filt_action = TYPE_OR;
+               mwifiex_dbg(priv->adapter, INFO, "Wake on magic packet\n");
        }
        return ret;
 }
@@ -3143,7 +3277,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
 
        priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
 
-       if (!priv->media_connected) {
+       if (!priv->media_connected && !wowlan->nd_config) {
                mwifiex_dbg(adapter, ERROR,
                            "Can not configure WOWLAN in disconnected state\n");
                return 0;
@@ -3155,19 +3289,30 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
                return ret;
        }
 
+       memset(&hs_cfg, 0, sizeof(hs_cfg));
+       hs_cfg.conditions = le32_to_cpu(adapter->hs_cfg.conditions);
+
+       if (wowlan->nd_config) {
+               mwifiex_dbg(adapter, INFO, "Wake on net detect\n");
+               hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT;
+               mwifiex_cfg80211_sched_scan_start(wiphy, priv->netdev,
+                                                 wowlan->nd_config);
+       }
+
        if (wowlan->disconnect) {
-               memset(&hs_cfg, 0, sizeof(hs_cfg));
-               hs_cfg.is_invoke_hostcmd = false;
-               hs_cfg.conditions = HS_CFG_COND_MAC_EVENT;
-               hs_cfg.gpio = adapter->hs_cfg.gpio;
-               hs_cfg.gap = adapter->hs_cfg.gap;
-               ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
-                                           MWIFIEX_SYNC_CMD, &hs_cfg);
-               if (ret) {
-                       mwifiex_dbg(adapter, ERROR,
-                                   "Failed to set HS params\n");
-                       return ret;
-               }
+               hs_cfg.conditions |= HS_CFG_COND_MAC_EVENT;
+               mwifiex_dbg(priv->adapter, INFO, "Wake on device disconnect\n");
+       }
+
+       hs_cfg.is_invoke_hostcmd = false;
+       hs_cfg.gpio = adapter->hs_cfg.gpio;
+       hs_cfg.gap = adapter->hs_cfg.gap;
+       ret = mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
+                                   MWIFIEX_SYNC_CMD, &hs_cfg);
+       if (ret) {
+               mwifiex_dbg(adapter, ERROR,
+                           "Failed to set HS params\n");
+               return ret;
        }
 
        return ret;
@@ -3175,6 +3320,64 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
 
 static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
 {
+       struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+       struct mwifiex_private *priv =
+               mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+       struct mwifiex_ds_wakeup_reason wakeup_reason;
+       struct cfg80211_wowlan_wakeup wakeup_report;
+       int i;
+
+       mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD,
+                                 &wakeup_reason);
+       memset(&wakeup_report, 0, sizeof(struct cfg80211_wowlan_wakeup));
+
+       wakeup_report.pattern_idx = -1;
+
+       switch (wakeup_reason.hs_wakeup_reason) {
+       case NO_HSWAKEUP_REASON:
+               break;
+       case BCAST_DATA_MATCHED:
+               break;
+       case MCAST_DATA_MATCHED:
+               break;
+       case UCAST_DATA_MATCHED:
+               break;
+       case MASKTABLE_EVENT_MATCHED:
+               break;
+       case NON_MASKABLE_EVENT_MATCHED:
+               if (wiphy->wowlan_config->disconnect)
+                       wakeup_report.disconnect = true;
+               if (wiphy->wowlan_config->nd_config)
+                       wakeup_report.net_detect = adapter->nd_info;
+               break;
+       case NON_MASKABLE_CONDITION_MATCHED:
+               break;
+       case MAGIC_PATTERN_MATCHED:
+               if (wiphy->wowlan_config->magic_pkt)
+                       wakeup_report.magic_pkt = true;
+               if (wiphy->wowlan_config->n_patterns)
+                       wakeup_report.pattern_idx = 1;
+               break;
+       case CONTROL_FRAME_MATCHED:
+               break;
+       case    MANAGEMENT_FRAME_MATCHED:
+               break;
+       default:
+               break;
+       }
+
+       if ((wakeup_reason.hs_wakeup_reason > 0) &&
+           (wakeup_reason.hs_wakeup_reason <= 7))
+               cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report,
+                                             GFP_KERNEL);
+
+       if (adapter->nd_info) {
+               for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
+                       kfree(adapter->nd_info->matches[i]);
+               kfree(adapter->nd_info);
+               adapter->nd_info = NULL;
+       }
+
        return 0;
 }
 
@@ -3590,8 +3793,8 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
                freq = ieee80211_channel_to_frequency(curr_bss->channel, band);
                chan = ieee80211_get_channel(wiphy, freq);
 
-               if (curr_bss->bcn_ht_oper) {
-                       second_chan_offset = curr_bss->bcn_ht_oper->ht_param &
+               if (priv->ht_param_present) {
+                       second_chan_offset = priv->assoc_resp_ht_param &
                                        IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
                        chan_type = mwifiex_sec_chan_offset_to_chan_type
                                                        (second_chan_offset);
@@ -3701,6 +3904,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
        .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
        .set_antenna = mwifiex_cfg80211_set_antenna,
        .del_station = mwifiex_cfg80211_del_station,
+       .sched_scan_start = mwifiex_cfg80211_sched_scan_start,
+       .sched_scan_stop = mwifiex_cfg80211_sched_scan_stop,
 #ifdef CONFIG_PM
        .suspend = mwifiex_cfg80211_suspend,
        .resume = mwifiex_cfg80211_resume,
@@ -3720,11 +3925,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
 
 #ifdef CONFIG_PM
 static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
-       .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT,
+       .flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
+               WIPHY_WOWLAN_NET_DETECT,
        .n_patterns = MWIFIEX_MEF_MAX_FILTERS,
        .pattern_min_len = 1,
        .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
        .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
+       .max_nd_match_sets = MWIFIEX_MAX_ND_MATCH_SETS,
 };
 #endif
 
@@ -3829,6 +4036,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
        wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
                        WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
                        WIPHY_FLAG_AP_UAPSD |
+                       WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
                        WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
                        WIPHY_FLAG_HAS_CHANNEL_SWITCH |
                        WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -3847,6 +4055,10 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
                                    NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
                                    NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
 
+       wiphy->max_sched_scan_ssids = MWIFIEX_MAX_SSID_LIST_LENGTH;
+       wiphy->max_sched_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
+       wiphy->max_match_sets = MWIFIEX_MAX_SSID_LIST_LENGTH;
+
        wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
        wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
 
index cb25aa7e90db19d54df9b556d677c3a460093d77..a12adee776c6c6ac76c2f24d3bc44b88a1f5a6af 100644 (file)
@@ -1657,3 +1657,16 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
 
        return 0;
 }
+
+/* This function handles the command response of hs wakeup reason
+ * command.
+ */
+int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv,
+                             struct host_cmd_ds_command *resp,
+                             struct host_cmd_ds_wakeup_reason *wakeup_reason)
+{
+       wakeup_reason->wakeup_reason =
+               resp->params.hs_wakeup_reason.wakeup_reason;
+
+       return 0;
+}
index 0b9c580af988cd502e4ab4b05c351ffc030935ab..df28836a1d1100672050bf878a4dba70f40b87c6 100644 (file)
@@ -95,8 +95,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
 
        mwifiex_drv_get_driver_version(priv->adapter, fmt, sizeof(fmt) - 1);
 
-       if (!priv->version_str[0])
-               mwifiex_get_ver_ext(priv);
+       mwifiex_get_ver_ext(priv, 0);
 
        p += sprintf(p, "driver_name = " "\"mwifiex\"\n");
        p += sprintf(p, "driver_version = %s", fmt);
@@ -583,6 +582,52 @@ done:
        return ret;
 }
 
+/* debugfs verext file write handler.
+ * This function is called when the 'verext' file is opened for write
+ */
+static ssize_t
+mwifiex_verext_write(struct file *file, const char __user *ubuf,
+                    size_t count, loff_t *ppos)
+{
+       int ret;
+       u32 versionstrsel;
+       struct mwifiex_private *priv = (void *)file->private_data;
+       char buf[16];
+
+       memset(buf, 0, sizeof(buf));
+
+       if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+               return -EFAULT;
+
+       ret = kstrtou32(buf, 10, &versionstrsel);
+       if (ret)
+               return ret;
+
+       priv->versionstrsel = versionstrsel;
+
+       return count;
+}
+
+/* Proc verext file read handler.
+ * This function is called when the 'verext' file is opened for reading
+ * This function can be used read driver exteneed verion string.
+ */
+static ssize_t
+mwifiex_verext_read(struct file *file, char __user *ubuf,
+                   size_t count, loff_t *ppos)
+{
+       struct mwifiex_private *priv =
+               (struct mwifiex_private *)file->private_data;
+       char buf[256];
+       int ret;
+
+       mwifiex_get_ver_ext(priv, priv->versionstrsel);
+       ret = snprintf(buf, sizeof(buf), "version string: %s\n",
+                      priv->version_str);
+
+       return simple_read_from_buffer(ubuf, count, ppos, buf, ret);
+}
+
 /* Proc memrw file write handler.
  * This function is called when the 'memrw' file is opened for writing
  * This function can be used to write to a memory location.
@@ -940,6 +985,7 @@ MWIFIEX_DFS_FILE_OPS(histogram);
 MWIFIEX_DFS_FILE_OPS(debug_mask);
 MWIFIEX_DFS_FILE_OPS(timeshare_coex);
 MWIFIEX_DFS_FILE_WRITE_OPS(reset);
+MWIFIEX_DFS_FILE_OPS(verext);
 
 /*
  * This function creates the debug FS directory structure and the files.
@@ -968,6 +1014,7 @@ mwifiex_dev_debugfs_init(struct mwifiex_private *priv)
        MWIFIEX_DFS_ADD_FILE(debug_mask);
        MWIFIEX_DFS_ADD_FILE(timeshare_coex);
        MWIFIEX_DFS_ADD_FILE(reset);
+       MWIFIEX_DFS_ADD_FILE(verext);
 }
 
 /*
index d9c15cd36f1277f8a9a4e87a08a5b5fbc4ea9e97..bec300b9c2ea51bf478d6060b19b952360a36f31 100644 (file)
 #define BLOCK_NUMBER_OFFSET            15
 #define SDIO_HEADER_OFFSET             28
 
+#define MWIFIEX_SIZE_4K 0x4000
+
 enum mwifiex_bss_type {
        MWIFIEX_BSS_TYPE_STA = 0,
        MWIFIEX_BSS_TYPE_UAP = 1,
@@ -270,4 +272,26 @@ struct mwifiex_11h_intf_state {
        bool is_11h_enabled;
        bool is_11h_active;
 } __packed;
+
+#define MWIFIEX_FW_DUMP_IDX            0xff
+#define MWIFIEX_FW_DUMP_MAX_MEMSIZE     0x160000
+#define MWIFIEX_DRV_INFO_IDX           20
+#define FW_DUMP_MAX_NAME_LEN           8
+#define FW_DUMP_HOST_READY      0xEE
+#define FW_DUMP_DONE                   0xFF
+#define FW_DUMP_READ_DONE              0xFE
+
+struct memory_type_mapping {
+       u8 mem_name[FW_DUMP_MAX_NAME_LEN];
+       u8 *mem_ptr;
+       u32 mem_size;
+       u8 done_flag;
+};
+
+enum rdwr_status {
+       RDWR_STATUS_SUCCESS = 0,
+       RDWR_STATUS_FAILURE = 1,
+       RDWR_STATUS_DONE = 2
+};
+
 #endif /* !_MWIFIEX_DECL_H_ */
index ced7af2be29a3259eed41f33b0ea7ecea327e047..c134cf8652910b0381421f9e7c6ed746401ac719 100644 (file)
@@ -96,7 +96,7 @@ enum KEY_TYPE_ID {
 #define WAPI_KEY_LEN                   (WLAN_KEY_LEN_SMS4 + PN_LEN + 2)
 
 #define MAX_POLL_TRIES                 100
-#define MAX_FIRMWARE_POLL_TRIES                        100
+#define MAX_FIRMWARE_POLL_TRIES                        150
 
 #define FIRMWARE_READY_SDIO                            0xfedc
 #define FIRMWARE_READY_PCIE                            0xfedcba00
@@ -144,6 +144,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_WILDCARDSSID       (PROPRIETARY_TLV_BASE_ID + 18)
 #define TLV_TYPE_TSFTIMESTAMP       (PROPRIETARY_TLV_BASE_ID + 19)
 #define TLV_TYPE_RSSI_HIGH          (PROPRIETARY_TLV_BASE_ID + 22)
+#define TLV_TYPE_BGSCAN_START_LATER (PROPRIETARY_TLV_BASE_ID + 30)
 #define TLV_TYPE_AUTH_TYPE          (PROPRIETARY_TLV_BASE_ID + 31)
 #define TLV_TYPE_STA_MAC_ADDR       (PROPRIETARY_TLV_BASE_ID + 32)
 #define TLV_TYPE_BSSID              (PROPRIETARY_TLV_BASE_ID + 35)
@@ -177,6 +178,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define TLV_TYPE_TX_PAUSE           (PROPRIETARY_TLV_BASE_ID + 148)
 #define TLV_TYPE_COALESCE_RULE      (PROPRIETARY_TLV_BASE_ID + 154)
 #define TLV_TYPE_KEY_PARAM_V2       (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_REPEAT_COUNT       (PROPRIETARY_TLV_BASE_ID + 176)
 #define TLV_TYPE_MULTI_CHAN_INFO    (PROPRIETARY_TLV_BASE_ID + 183)
 #define TLV_TYPE_MC_GROUP_INFO      (PROPRIETARY_TLV_BASE_ID + 184)
 #define TLV_TYPE_TDLS_IDLE_TIMEOUT  (PROPRIETARY_TLV_BASE_ID + 194)
@@ -331,6 +333,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_802_11_MAC_ADDRESS                0x004D
 #define HostCmd_CMD_802_11D_DOMAIN_INFO               0x005b
 #define HostCmd_CMD_802_11_KEY_MATERIAL               0x005e
+#define HostCmd_CMD_802_11_BG_SCAN_CONFIG             0x006b
 #define HostCmd_CMD_802_11_BG_SCAN_QUERY              0x006c
 #define HostCmd_CMD_WMM_GET_STATUS                    0x0071
 #define HostCmd_CMD_802_11_SUBSCRIBE_EVENT            0x0075
@@ -370,6 +373,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
 #define HostCmd_CMD_MGMT_FRAME_REG                    0x010c
 #define HostCmd_CMD_REMAIN_ON_CHAN                    0x010d
 #define HostCmd_CMD_11AC_CFG                         0x0112
+#define HostCmd_CMD_HS_WAKEUP_REASON                  0x0116
 #define HostCmd_CMD_TDLS_CONFIG                       0x0100
 #define HostCmd_CMD_MC_POLICY                         0x0121
 #define HostCmd_CMD_TDLS_OPER                         0x0122
@@ -523,6 +527,7 @@ enum P2P_MODES {
 #define EVENT_CHANNEL_REPORT_RDY        0x00000054
 #define EVENT_TX_DATA_PAUSE             0x00000055
 #define EVENT_EXT_SCAN_REPORT           0x00000058
+#define EVENT_BG_SCAN_STOPPED           0x00000065
 #define EVENT_REMAIN_ON_CHAN_EXPIRED    0x0000005f
 #define EVENT_MULTI_CHAN_INFO           0x0000006a
 #define EVENT_TX_STATUS_REPORT         0x00000074
@@ -539,6 +544,8 @@ enum P2P_MODES {
 
 #define MWIFIEX_MAX_PATTERN_LEN                40
 #define MWIFIEX_MAX_OFFSET_LEN         100
+#define MWIFIEX_MAX_ND_MATCH_SETS      10
+
 #define STACK_NBYTES                   100
 #define TYPE_DNUM                      1
 #define TYPE_BYTESEQ                   2
@@ -601,6 +608,20 @@ struct mwifiex_ie_types_data {
 #define MWIFIEX_RXPD_FLAGS_TDLS_PACKET      0x01
 #define MWIFIEX_TXPD_FLAGS_REQ_TX_STATUS    0x20
 
+enum HS_WAKEUP_REASON {
+       NO_HSWAKEUP_REASON = 0,
+       BCAST_DATA_MATCHED,
+       MCAST_DATA_MATCHED,
+       UCAST_DATA_MATCHED,
+       MASKTABLE_EVENT_MATCHED,
+       NON_MASKABLE_EVENT_MATCHED,
+       NON_MASKABLE_CONDITION_MATCHED,
+       MAGIC_PATTERN_MATCHED,
+       CONTROL_FRAME_MATCHED,
+       MANAGEMENT_FRAME_MATCHED,
+       RESERVED
+};
+
 struct txpd {
        u8 bss_type;
        u8 bss_num;
@@ -733,6 +754,21 @@ struct mwifiex_ie_types_num_probes {
        __le16 num_probes;
 } __packed;
 
+struct mwifiex_ie_types_repeat_count {
+       struct mwifiex_ie_types_header header;
+       __le16 repeat_count;
+} __packed;
+
+struct mwifiex_ie_types_min_rssi_threshold {
+       struct mwifiex_ie_types_header header;
+       __le16 rssi_threshold;
+} __packed;
+
+struct mwifiex_ie_types_bgscan_start_later {
+       struct mwifiex_ie_types_header header;
+       __le16 start_later;
+} __packed;
+
 struct mwifiex_ie_types_scan_chan_gap {
        struct mwifiex_ie_types_header header;
        /* time gap in TUs to be used between two consecutive channels scan */
@@ -1027,7 +1063,7 @@ struct ieee_types_assoc_rsp {
        __le16 cap_info_bitmap;
        __le16 status_code;
        __le16 a_id;
-       u8 ie_buffer[1];
+       u8 ie_buffer[0];
 } __packed;
 
 struct host_cmd_ds_802_11_associate_rsp {
@@ -1425,6 +1461,36 @@ struct mwifiex_user_scan_cfg {
        u16 scan_chan_gap;
 } __packed;
 
+#define MWIFIEX_BG_SCAN_CHAN_MAX 38
+#define MWIFIEX_BSS_MODE_INFRA 1
+#define MWIFIEX_BGSCAN_ACT_GET     0x0000
+#define MWIFIEX_BGSCAN_ACT_SET     0x0001
+#define MWIFIEX_BGSCAN_ACT_SET_ALL 0xff01
+/** ssid match */
+#define MWIFIEX_BGSCAN_SSID_MATCH          0x0001
+/** ssid match and RSSI exceeded */
+#define MWIFIEX_BGSCAN_SSID_RSSI_MATCH     0x0004
+/**wait for all channel scan to complete to report scan result*/
+#define MWIFIEX_BGSCAN_WAIT_ALL_CHAN_DONE  0x80000000
+
+struct mwifiex_bg_scan_cfg {
+       u16 action;
+       u8 enable;
+       u8 bss_type;
+       u8 chan_per_scan;
+       u32 scan_interval;
+       u32 report_condition;
+       u8 num_probes;
+       u8 rssi_threshold;
+       u8 snr_threshold;
+       u16 repeat_count;
+       u16 start_later;
+       struct cfg80211_match_set *ssid_list;
+       u8 num_ssids;
+       struct mwifiex_user_scan_chan chan_list[MWIFIEX_BG_SCAN_CHAN_MAX];
+       u16 scan_chan_gap;
+} __packed;
+
 struct ie_body {
        u8 grp_key_oui[4];
        u8 ptk_cnt[2];
@@ -1470,6 +1536,20 @@ struct mwifiex_ie_types_bss_scan_info {
        __le64 tsf;
 } __packed;
 
+struct host_cmd_ds_802_11_bg_scan_config {
+       __le16 action;
+       u8 enable;
+       u8 bss_type;
+       u8 chan_per_scan;
+       u8 reserved;
+       __le16 reserved1;
+       __le32 scan_interval;
+       __le32 reserved2;
+       __le32 report_condition;
+       __le16 reserved3;
+       u8 tlv[0];
+} __packed;
+
 struct host_cmd_ds_802_11_bg_scan_query {
        u8 flush;
 } __packed;
@@ -2099,6 +2179,10 @@ struct host_cmd_ds_robust_coex {
        __le16 reserved;
 } __packed;
 
+struct host_cmd_ds_wakeup_reason {
+       u16  wakeup_reason;
+} __packed;
+
 struct host_cmd_ds_command {
        __le16 command;
        __le16 size;
@@ -2124,6 +2208,7 @@ struct host_cmd_ds_command {
                struct host_cmd_ds_802_11_scan scan;
                struct host_cmd_ds_802_11_scan_ext ext_scan;
                struct host_cmd_ds_802_11_scan_rsp scan_resp;
+               struct host_cmd_ds_802_11_bg_scan_config bg_scan_config;
                struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
                struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
                struct host_cmd_ds_802_11_associate associate;
@@ -2170,6 +2255,7 @@ struct host_cmd_ds_command {
                struct host_cmd_sdio_sp_rx_aggr_cfg sdio_rx_aggr_cfg;
                struct host_cmd_ds_multi_chan_policy mc_policy;
                struct host_cmd_ds_robust_coex coex;
+               struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
        } params;
 } __packed;
 
index 6f7876ec31b7a6af705593139318df63bfd77a84..517653b3adabd8481edc73cd8eb08046e44f8484 100644 (file)
@@ -741,8 +741,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
        u32 poll_num = 1;
 
        if (adapter->if_ops.check_fw_status) {
-               adapter->winner = 0;
-
                /* check if firmware is already running */
                ret = adapter->if_ops.check_fw_status(adapter, poll_num);
                if (!ret) {
@@ -750,13 +748,23 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
                                    "WLAN FW already running! Skip FW dnld\n");
                        return 0;
                }
+       }
+
+       /* check if we are the winner for downloading FW */
+       if (adapter->if_ops.check_winner_status) {
+               adapter->winner = 0;
+               ret = adapter->if_ops.check_winner_status(adapter);
 
                poll_num = MAX_FIRMWARE_POLL_TRIES;
+               if (ret) {
+                       mwifiex_dbg(adapter, MSG,
+                                   "WLAN read winner status failed!\n");
+                       return ret;
+               }
 
-               /* check if we are the winner for downloading FW */
                if (!adapter->winner) {
                        mwifiex_dbg(adapter, MSG,
-                                   "FW already running! Skip FW dnld\n");
+                                   "WLAN is not the winner! Skip FW dnld\n");
                        goto poll_fw;
                }
        }
index 4f0174c649461ed3ac219419cb676921f31a66e6..14cfa37deb00f9640a7f1816a166eb659a363b8f 100644 (file)
@@ -271,6 +271,10 @@ struct mwifiex_ds_hs_cfg {
        u32 gap;
 };
 
+struct mwifiex_ds_wakeup_reason {
+       u16  hs_wakeup_reason;
+};
+
 #define DEEP_SLEEP_ON  1
 #define DEEP_SLEEP_OFF 0
 #define DEEP_SLEEP_IDLE_TIME   100
@@ -414,6 +418,7 @@ struct mwifiex_ds_mef_cfg {
 #define MWIFIEX_VSIE_MASK_SCAN     0x01
 #define MWIFIEX_VSIE_MASK_ASSOC    0x02
 #define MWIFIEX_VSIE_MASK_ADHOC    0x04
+#define MWIFIEX_VSIE_MASK_BGSCAN   0x08
 
 enum {
        MWIFIEX_FUNC_INIT = 1,
index cc09a81dbf6a8eac3f4b4ac8ee84839bcbafe811..62211fca91b7456ff71fae76be6e0ffe9a01753f 100644 (file)
@@ -644,6 +644,8 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
        struct mwifiex_bssdescriptor *bss_desc;
        bool enable_data = true;
        u16 cap_info, status_code, aid;
+       const u8 *ie_ptr;
+       struct ieee80211_ht_operation *assoc_resp_ht_oper;
 
        assoc_rsp = (struct ieee_types_assoc_rsp *) &resp->params;
 
@@ -733,6 +735,19 @@ int mwifiex_ret_802_11_associate(struct mwifiex_private *priv,
                        = ((bss_desc->wmm_ie.qos_info_bitmap &
                                IEEE80211_WMM_IE_AP_QOSINFO_UAPSD) ? 1 : 0);
 
+       /* Store the bandwidth information from assoc response */
+       ie_ptr = cfg80211_find_ie(WLAN_EID_HT_OPERATION, assoc_rsp->ie_buffer,
+                                 priv->assoc_rsp_size
+                                 - sizeof(struct ieee_types_assoc_rsp));
+       if (ie_ptr) {
+               assoc_resp_ht_oper = (struct ieee80211_ht_operation *)(ie_ptr
+                                       + sizeof(struct ieee_types_header));
+               priv->assoc_resp_ht_param = assoc_resp_ht_oper->ht_param;
+               priv->ht_param_present = true;
+       } else {
+               priv->ht_param_present = false;
+       }
+
        mwifiex_dbg(priv->adapter, INFO,
                    "info: ASSOC_RESP: curr_pkt_filter is %#x\n",
                    priv->curr_pkt_filter);
index 79c16de8743e1945262f49aa82ec18f9022a2d2f..3cfa94677a8e2384bcbcc7ed188baf02814b84c9 100644 (file)
@@ -132,6 +132,13 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
                }
        }
 
+       if (adapter->nd_info) {
+               for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
+                       kfree(adapter->nd_info->matches[i]);
+               kfree(adapter->nd_info);
+               adapter->nd_info = NULL;
+       }
+
        vfree(adapter->chan_stats);
        kfree(adapter);
        return 0;
@@ -746,6 +753,13 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
 
        mwifiex_queue_main_work(priv->adapter);
 
+       if (priv->sched_scanning) {
+               mwifiex_dbg(priv->adapter, INFO,
+                           "aborting bgscan on ndo_stop\n");
+               mwifiex_stop_bg_scan(priv);
+               cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+       }
+
        return 0;
 }
 
index 2f7f478ce04b6d9ea179022bb52ff92c97255026..aea7aee46cf7834f6299148eac5d0ef3b30a2f19 100644 (file)
@@ -198,6 +198,11 @@ do {                                                               \
                               buf, len, false);                \
 } while (0)
 
+/** Min BGSCAN interval 15 second */
+#define MWIFIEX_BGSCAN_INTERVAL 15000
+/** default repeat count */
+#define MWIFIEX_BGSCAN_REPEAT_COUNT 6
+
 struct mwifiex_dbg {
        u32 num_cmd_host_to_card_failure;
        u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -293,6 +298,7 @@ struct mwifiex_tid_tbl {
 #define WMM_HIGHEST_PRIORITY           7
 #define HIGH_PRIO_TID                          7
 #define LOW_PRIO_TID                           0
+#define MWIFIEX_WMM_DRV_DELAY_MAX 510
 
 struct mwifiex_wmm_desc {
        struct mwifiex_tid_tbl tid_tbl_ptr[MAX_NUM_TID];
@@ -483,26 +489,6 @@ struct mwifiex_roc_cfg {
        struct ieee80211_channel chan;
 };
 
-#define MWIFIEX_FW_DUMP_IDX            0xff
-#define MWIFIEX_DRV_INFO_IDX           20
-#define FW_DUMP_MAX_NAME_LEN           8
-#define FW_DUMP_HOST_READY             0xEE
-#define FW_DUMP_DONE                   0xFF
-#define FW_DUMP_READ_DONE              0xFE
-
-struct memory_type_mapping {
-       u8 mem_name[FW_DUMP_MAX_NAME_LEN];
-       u8 *mem_ptr;
-       u32 mem_size;
-       u8 done_flag;
-};
-
-enum rdwr_status {
-       RDWR_STATUS_SUCCESS = 0,
-       RDWR_STATUS_FAILURE = 1,
-       RDWR_STATUS_DONE = 2
-};
-
 enum mwifiex_iface_work_flags {
        MWIFIEX_IFACE_WORK_DEVICE_DUMP,
        MWIFIEX_IFACE_WORK_CARD_RESET,
@@ -616,6 +602,7 @@ struct mwifiex_private {
        spinlock_t curr_bcn_buf_lock;
        struct wireless_dev wdev;
        struct mwifiex_chan_freq_power cfp;
+       u32 versionstrsel;
        char version_str[128];
 #ifdef CONFIG_DEBUG_FS
        struct dentry *dfs_dev_dir;
@@ -640,6 +627,7 @@ struct mwifiex_private {
        u32 mgmt_frame_mask;
        struct mwifiex_roc_cfg roc_cfg;
        bool scan_aborting;
+       u8 sched_scanning;
        u8 csa_chan;
        unsigned long csa_expire_time;
        u8 del_list_idx;
@@ -667,6 +655,8 @@ struct mwifiex_private {
        struct mwifiex_ds_mem_rw mem_rw;
        struct sk_buff_head bypass_txq;
        struct mwifiex_user_scan_chan hidden_chan[MWIFIEX_USER_SCAN_CHAN_MAX];
+       u8 assoc_resp_ht_param;
+       bool ht_param_present;
 };
 
 
@@ -791,6 +781,7 @@ struct mwifiex_if_ops {
        int (*init_if) (struct mwifiex_adapter *);
        void (*cleanup_if) (struct mwifiex_adapter *);
        int (*check_fw_status) (struct mwifiex_adapter *, u32);
+       int (*check_winner_status)(struct mwifiex_adapter *);
        int (*prog_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
        int (*register_dev) (struct mwifiex_adapter *);
        void (*unregister_dev) (struct mwifiex_adapter *);
@@ -994,6 +985,7 @@ struct mwifiex_adapter {
        u8 active_scan_triggered;
        bool usb_mc_status;
        bool usb_mc_setup;
+       struct cfg80211_wowlan_nd_info *nd_info;
 };
 
 void mwifiex_process_tx_queue(struct mwifiex_adapter *adapter);
@@ -1196,6 +1188,10 @@ int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv,
                                struct host_cmd_ds_command *resp);
 int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
                                         void *buf);
+int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
+                                     struct host_cmd_ds_command *cmd,
+                                     void *data_buf);
+int mwifiex_stop_bg_scan(struct mwifiex_private *priv);
 
 /*
  * This function checks if the queuing is RA based or not.
@@ -1417,7 +1413,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
 
 int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
 
-int mwifiex_get_ver_ext(struct mwifiex_private *priv);
+int mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel);
 
 int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action,
                               struct ieee80211_channel *chan,
@@ -1586,6 +1582,12 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter);
 void mwifiex_upload_device_dump(struct mwifiex_adapter *adapter);
 void *mwifiex_alloc_dma_align_buf(int rx_len, gfp_t flags);
 void mwifiex_queue_main_work(struct mwifiex_adapter *adapter);
+int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
+                             int cmd_type,
+                             struct mwifiex_ds_wakeup_reason *wakeup_reason);
+int mwifiex_ret_wakeup_reason(struct mwifiex_private *priv,
+                             struct host_cmd_ds_command *resp,
+                             struct host_cmd_ds_wakeup_reason *wakeup_reason);
 void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter);
 void mwifiex_11n_delba(struct mwifiex_private *priv, int tid);
 int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy);
index 6d0dc40e20e5c535f127e69743e046162fb71138..cc072142411aad8a175d3f790e7bc4a8900c21fa 100644 (file)
@@ -37,17 +37,6 @@ static struct mwifiex_if_ops pcie_ops;
 
 static struct semaphore add_remove_card_sem;
 
-static struct memory_type_mapping mem_type_mapping_tbl[] = {
-       {"ITCM", NULL, 0, 0xF0},
-       {"DTCM", NULL, 0, 0xF1},
-       {"SQRAM", NULL, 0, 0xF2},
-       {"IRAM", NULL, 0, 0xF3},
-       {"APU", NULL, 0, 0xF4},
-       {"CIU", NULL, 0, 0xF5},
-       {"ICU", NULL, 0, 0xF6},
-       {"MAC", NULL, 0, 0xF7},
-};
-
 static int
 mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                       size_t size, int flags)
@@ -206,6 +195,8 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
                card->pcie.blksz_fw_dl = data->blksz_fw_dl;
                card->pcie.tx_buf_size = data->tx_buf_size;
                card->pcie.can_dump_fw = data->can_dump_fw;
+               card->pcie.mem_type_mapping_tbl = data->mem_type_mapping_tbl;
+               card->pcie.num_mem_types = data->num_mem_types;
                card->pcie.can_ext_scan = data->can_ext_scan;
        }
 
@@ -323,6 +314,8 @@ static int mwifiex_read_reg(struct mwifiex_adapter *adapter, int reg, u32 *data)
        struct pcie_service_card *card = adapter->card;
 
        *data = ioread32(card->pci_mmap1 + reg);
+       if (*data == 0xffffffff)
+               return 0xffffffff;
 
        return 0;
 }
@@ -2007,14 +2000,12 @@ done:
 
 /*
  * This function checks the firmware status in card.
- *
- * The winner interface is also determined by this function.
  */
 static int
 mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
 {
        int ret = 0;
-       u32 firmware_stat, winner_status;
+       u32 firmware_stat;
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
        u32 tries;
@@ -2054,19 +2045,28 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
                }
        }
 
-       if (ret) {
-               if (mwifiex_read_reg(adapter, reg->fw_status,
-                                    &winner_status))
-                       ret = -1;
-               else if (!winner_status) {
-                       mwifiex_dbg(adapter, INFO,
-                                   "PCI-E is the winner\n");
-                       adapter->winner = 1;
-               } else {
-                       mwifiex_dbg(adapter, ERROR,
-                                   "PCI-E is not the winner <%#x,%d>, exit dnld\n",
-                                   ret, adapter->winner);
-               }
+       return ret;
+}
+
+/* This function checks if WLAN is the winner.
+ */
+static int
+mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
+{
+       u32 winner = 0;
+       int ret = 0;
+       struct pcie_service_card *card = adapter->card;
+       const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+
+       if (mwifiex_read_reg(adapter, reg->fw_status, &winner)) {
+               ret = -1;
+       } else if (!winner) {
+               mwifiex_dbg(adapter, INFO, "PCI-E is the winner\n");
+               adapter->winner = 1;
+       } else {
+               mwifiex_dbg(adapter, ERROR,
+                           "PCI-E is not the winner <%#x,%d>, exit dnld\n",
+                           ret, adapter->winner);
        }
 
        return ret;
@@ -2075,20 +2075,28 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
 /*
  * This function reads the interrupt status from card.
  */
-static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
+static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter,
+                                    int msg_id)
 {
        u32 pcie_ireg;
        unsigned long flags;
+       struct pcie_service_card *card = adapter->card;
 
        if (!mwifiex_pcie_ok_to_access_hw(adapter))
                return;
 
-       if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS, &pcie_ireg)) {
-               mwifiex_dbg(adapter, ERROR, "Read register failed\n");
-               return;
-       }
+       if (card->msix_enable && msg_id >= 0) {
+               pcie_ireg = BIT(msg_id);
+       } else {
+               if (mwifiex_read_reg(adapter, PCIE_HOST_INT_STATUS,
+                                    &pcie_ireg)) {
+                       mwifiex_dbg(adapter, ERROR, "Read register failed\n");
+                       return;
+               }
+
+               if ((pcie_ireg == 0xFFFFFFFF) || !pcie_ireg)
+                       return;
 
-       if ((pcie_ireg != 0xFFFFFFFF) && (pcie_ireg)) {
 
                mwifiex_pcie_disable_host_int(adapter);
 
@@ -2099,21 +2107,24 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
                                    "Write register failed\n");
                        return;
                }
-               spin_lock_irqsave(&adapter->int_lock, flags);
-               adapter->int_status |= pcie_ireg;
-               spin_unlock_irqrestore(&adapter->int_lock, flags);
-
-               if (!adapter->pps_uapsd_mode &&
-                   adapter->ps_state == PS_STATE_SLEEP &&
-                   mwifiex_pcie_ok_to_access_hw(adapter)) {
-                               /* Potentially for PCIe we could get other
-                                * interrupts like shared. Don't change power
-                                * state until cookie is set */
-                               adapter->ps_state = PS_STATE_AWAKE;
-                               adapter->pm_wakeup_fw_try = false;
-                               del_timer(&adapter->wakeup_timer);
-               }
        }
+
+       if (!adapter->pps_uapsd_mode &&
+           adapter->ps_state == PS_STATE_SLEEP &&
+           mwifiex_pcie_ok_to_access_hw(adapter)) {
+               /* Potentially for PCIe we could get other
+                * interrupts like shared. Don't change power
+                * state until cookie is set
+                */
+               adapter->ps_state = PS_STATE_AWAKE;
+               adapter->pm_wakeup_fw_try = false;
+               del_timer(&adapter->wakeup_timer);
+       }
+
+       spin_lock_irqsave(&adapter->int_lock, flags);
+       adapter->int_status |= pcie_ireg;
+       spin_unlock_irqrestore(&adapter->int_lock, flags);
+       mwifiex_dbg(adapter, INTR, "ireg: 0x%08x\n", pcie_ireg);
 }
 
 /*
@@ -2124,7 +2135,8 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
  */
 static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
 {
-       struct pci_dev *pdev = (struct pci_dev *)context;
+       struct mwifiex_msix_context *ctx = context;
+       struct pci_dev *pdev = ctx->dev;
        struct pcie_service_card *card;
        struct mwifiex_adapter *adapter;
 
@@ -2144,7 +2156,11 @@ static irqreturn_t mwifiex_pcie_interrupt(int irq, void *context)
        if (adapter->surprise_removed)
                goto exit;
 
-       mwifiex_interrupt_status(adapter);
+       if (card->msix_enable)
+               mwifiex_interrupt_status(adapter, ctx->msg_id);
+       else
+               mwifiex_interrupt_status(adapter, -1);
+
        mwifiex_queue_main_work(adapter);
 
 exit:
@@ -2164,7 +2180,7 @@ exit:
  * In case of Rx packets received, the packets are uploaded from card to
  * host and processed accordingly.
  */
-static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
+static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter)
 {
        int ret;
        u32 pcie_ireg;
@@ -2244,6 +2260,69 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
        return 0;
 }
 
+static int mwifiex_process_msix_int(struct mwifiex_adapter *adapter)
+{
+       int ret;
+       u32 pcie_ireg;
+       unsigned long flags;
+
+       spin_lock_irqsave(&adapter->int_lock, flags);
+       /* Clear out unused interrupts */
+       pcie_ireg = adapter->int_status;
+       adapter->int_status = 0;
+       spin_unlock_irqrestore(&adapter->int_lock, flags);
+
+       if (pcie_ireg & HOST_INTR_DNLD_DONE) {
+               mwifiex_dbg(adapter, INTR,
+                           "info: TX DNLD Done\n");
+               ret = mwifiex_pcie_send_data_complete(adapter);
+               if (ret)
+                       return ret;
+       }
+       if (pcie_ireg & HOST_INTR_UPLD_RDY) {
+               mwifiex_dbg(adapter, INTR,
+                           "info: Rx DATA\n");
+               ret = mwifiex_pcie_process_recv_data(adapter);
+               if (ret)
+                       return ret;
+       }
+       if (pcie_ireg & HOST_INTR_EVENT_RDY) {
+               mwifiex_dbg(adapter, INTR,
+                           "info: Rx EVENT\n");
+               ret = mwifiex_pcie_process_event_ready(adapter);
+               if (ret)
+                       return ret;
+       }
+
+       if (pcie_ireg & HOST_INTR_CMD_DONE) {
+               if (adapter->cmd_sent) {
+                       mwifiex_dbg(adapter, INTR,
+                                   "info: CMD sent Interrupt\n");
+                       adapter->cmd_sent = false;
+               }
+               /* Handle command response */
+               ret = mwifiex_pcie_process_cmd_complete(adapter);
+               if (ret)
+                       return ret;
+       }
+
+       mwifiex_dbg(adapter, INTR,
+                   "info: cmd_sent=%d data_sent=%d\n",
+                   adapter->cmd_sent, adapter->data_sent);
+
+       return 0;
+}
+
+static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
+{
+       struct pcie_service_card *card = adapter->card;
+
+       if (card->msix_enable)
+               return mwifiex_process_msix_int(adapter);
+       else
+               return mwifiex_process_pcie_int(adapter);
+}
+
 /*
  * This function downloads data from driver to card.
  *
@@ -2278,10 +2357,15 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
 {
        int ret, tries;
        u8 ctrl_data;
+       u32 fw_status;
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
 
-       ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl, FW_DUMP_HOST_READY);
+       if (mwifiex_read_reg(adapter, reg->fw_status, &fw_status))
+               return RDWR_STATUS_FAILURE;
+
+       ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
+                               reg->fw_dump_host_ready);
        if (ret) {
                mwifiex_dbg(adapter, ERROR,
                            "PCIE write err\n");
@@ -2294,11 +2378,11 @@ mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
                        return RDWR_STATUS_SUCCESS;
                if (doneflag && ctrl_data == doneflag)
                        return RDWR_STATUS_DONE;
-               if (ctrl_data != FW_DUMP_HOST_READY) {
+               if (ctrl_data != reg->fw_dump_host_ready) {
                        mwifiex_dbg(adapter, WARN,
                                    "The ctrl reg was changed, re-try again!\n");
                        ret = mwifiex_write_reg(adapter, reg->fw_dump_ctrl,
-                                               FW_DUMP_HOST_READY);
+                                               reg->fw_dump_host_ready);
                        if (ret) {
                                mwifiex_dbg(adapter, ERROR,
                                            "PCIE write err\n");
@@ -2318,7 +2402,8 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *creg = card->pcie.reg;
        unsigned int reg, reg_start, reg_end;
-       u8 *dbg_ptr, *end_ptr, dump_num, idx, i, read_reg, doneflag = 0;
+       u8 *dbg_ptr, *end_ptr, *tmp_ptr, fw_dump_num, dump_num;
+       u8 idx, i, read_reg, doneflag = 0;
        enum rdwr_status stat;
        u32 memory_size;
        int ret;
@@ -2326,8 +2411,9 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
        if (!card->pcie.can_dump_fw)
                return;
 
-       for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) {
-               struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
+       for (idx = 0; idx < adapter->num_mem_types; idx++) {
+               struct memory_type_mapping *entry =
+                               &adapter->mem_type_mapping_tbl[idx];
 
                if (entry->mem_ptr) {
                        vfree(entry->mem_ptr);
@@ -2336,7 +2422,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
                entry->mem_size = 0;
        }
 
-       mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump start ==\n");
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump start ==\n");
 
        /* Read the number of the memories which will dump */
        stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
@@ -2344,28 +2430,38 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
                return;
 
        reg = creg->fw_dump_start;
-       mwifiex_read_reg_byte(adapter, reg, &dump_num);
+       mwifiex_read_reg_byte(adapter, reg, &fw_dump_num);
+
+       /* W8997 chipset firmware dump will be restore in single region*/
+       if (fw_dump_num == 0)
+               dump_num = 1;
+       else
+               dump_num = fw_dump_num;
 
        /* Read the length of every memory which will dump */
        for (idx = 0; idx < dump_num; idx++) {
-               struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
-
-               stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
-               if (stat == RDWR_STATUS_FAILURE)
-                       return;
-
+               struct memory_type_mapping *entry =
+                               &adapter->mem_type_mapping_tbl[idx];
                memory_size = 0;
-               reg = creg->fw_dump_start;
-               for (i = 0; i < 4; i++) {
-                       mwifiex_read_reg_byte(adapter, reg, &read_reg);
-                       memory_size |= (read_reg << (i * 8));
+               if (fw_dump_num != 0) {
+                       stat = mwifiex_pcie_rdwr_firmware(adapter, doneflag);
+                       if (stat == RDWR_STATUS_FAILURE)
+                               return;
+
+                       reg = creg->fw_dump_start;
+                       for (i = 0; i < 4; i++) {
+                               mwifiex_read_reg_byte(adapter, reg, &read_reg);
+                               memory_size |= (read_reg << (i * 8));
                        reg++;
+                       }
+               } else {
+                       memory_size = MWIFIEX_FW_DUMP_MAX_MEMSIZE;
                }
 
                if (memory_size == 0) {
                        mwifiex_dbg(adapter, MSG, "Firmware dump Finished!\n");
                        ret = mwifiex_write_reg(adapter, creg->fw_dump_ctrl,
-                                               FW_DUMP_READ_DONE);
+                                               creg->fw_dump_read_done);
                        if (ret) {
                                mwifiex_dbg(adapter, ERROR, "PCIE write err\n");
                                return;
@@ -2400,11 +2496,21 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
                                mwifiex_read_reg_byte(adapter, reg, dbg_ptr);
                                if (dbg_ptr < end_ptr) {
                                        dbg_ptr++;
-                               } else {
-                                       mwifiex_dbg(adapter, ERROR,
-                                                   "Allocated buf not enough\n");
-                                       return;
+                                       continue;
                                }
+                               mwifiex_dbg(adapter, ERROR,
+                                           "pre-allocated buf not enough\n");
+                               tmp_ptr =
+                                       vzalloc(memory_size + MWIFIEX_SIZE_4K);
+                               if (!tmp_ptr)
+                                       return;
+                               memcpy(tmp_ptr, entry->mem_ptr, memory_size);
+                               vfree(entry->mem_ptr);
+                               entry->mem_ptr = tmp_ptr;
+                               tmp_ptr = NULL;
+                               dbg_ptr = entry->mem_ptr + memory_size;
+                               memory_size += MWIFIEX_SIZE_4K;
+                               end_ptr = entry->mem_ptr + memory_size;
                        }
 
                        if (stat != RDWR_STATUS_DONE)
@@ -2416,7 +2522,7 @@ static void mwifiex_pcie_fw_dump(struct mwifiex_adapter *adapter)
                        break;
                } while (true);
        }
-       mwifiex_dbg(adapter, DUMP, "== mwifiex firmware dump end ==\n");
+       mwifiex_dbg(adapter, MSG, "== mwifiex firmware dump end ==\n");
 }
 
 static void mwifiex_pcie_device_dump_work(struct mwifiex_adapter *adapter)
@@ -2595,10 +2701,43 @@ static void mwifiex_pcie_cleanup(struct mwifiex_adapter *adapter)
 
 static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
 {
-       int ret;
+       int ret, i, j;
        struct pcie_service_card *card = adapter->card;
        struct pci_dev *pdev = card->dev;
 
+       if (card->pcie.reg->msix_support) {
+               for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+                       card->msix_entries[i].entry = i;
+               ret = pci_enable_msix_exact(pdev, card->msix_entries,
+                                           MWIFIEX_NUM_MSIX_VECTORS);
+               if (!ret) {
+                       for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++) {
+                               card->msix_ctx[i].dev = pdev;
+                               card->msix_ctx[i].msg_id = i;
+
+                               ret = request_irq(card->msix_entries[i].vector,
+                                                 mwifiex_pcie_interrupt, 0,
+                                                 "MWIFIEX_PCIE_MSIX",
+                                                 &card->msix_ctx[i]);
+                               if (ret)
+                                       break;
+                       }
+
+                       if (ret) {
+                               mwifiex_dbg(adapter, INFO, "request_irq fail: %d\n",
+                                           ret);
+                               for (j = 0; j < i; j++)
+                                       free_irq(card->msix_entries[j].vector,
+                                                &card->msix_ctx[i]);
+                               pci_disable_msix(pdev);
+                       } else {
+                               mwifiex_dbg(adapter, MSG, "MSIx enabled!");
+                               card->msix_enable = 1;
+                               return 0;
+                       }
+               }
+       }
+
        if (pci_enable_msi(pdev) != 0)
                pci_disable_msi(pdev);
        else
@@ -2606,8 +2745,10 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
 
        mwifiex_dbg(adapter, INFO, "msi_enable = %d\n", card->msi_enable);
 
+       card->share_irq_ctx.dev = pdev;
+       card->share_irq_ctx.msg_id = -1;
        ret = request_irq(pdev->irq, mwifiex_pcie_interrupt, IRQF_SHARED,
-                         "MRVL_PCIE", pdev);
+                         "MRVL_PCIE", &card->share_irq_ctx);
        if (ret) {
                pr_err("request_irq failed: ret=%d\n", ret);
                adapter->card = NULL;
@@ -2635,8 +2776,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
                return -1;
 
        adapter->tx_buf_size = card->pcie.tx_buf_size;
-       adapter->mem_type_mapping_tbl = mem_type_mapping_tbl;
-       adapter->num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl);
+       adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl;
+       adapter->num_mem_types = card->pcie.num_mem_types;
        strcpy(adapter->fw_name, card->pcie.firmware);
        adapter->ext_scan = card->pcie.can_ext_scan;
 
@@ -2653,11 +2794,28 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
 {
        struct pcie_service_card *card = adapter->card;
        const struct mwifiex_pcie_card_reg *reg;
+       struct pci_dev *pdev = card->dev;
+       int i;
 
        if (card) {
-               mwifiex_dbg(adapter, INFO,
-                           "%s(): calling free_irq()\n", __func__);
-               free_irq(card->dev->irq, card->dev);
+               if (card->msix_enable) {
+                       for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+                               synchronize_irq(card->msix_entries[i].vector);
+
+                       for (i = 0; i < MWIFIEX_NUM_MSIX_VECTORS; i++)
+                               free_irq(card->msix_entries[i].vector,
+                                        &card->msix_ctx[i]);
+
+                       card->msix_enable = 0;
+                       pci_disable_msix(pdev);
+              } else {
+                       mwifiex_dbg(adapter, INFO,
+                                   "%s(): calling free_irq()\n", __func__);
+                      free_irq(card->dev->irq, &card->share_irq_ctx);
+
+                       if (card->msi_enable)
+                               pci_disable_msi(pdev);
+              }
 
                reg = card->pcie.reg;
                if (reg->sleep_cookie)
@@ -2675,6 +2833,7 @@ static struct mwifiex_if_ops pcie_ops = {
        .init_if =                      mwifiex_pcie_init,
        .cleanup_if =                   mwifiex_pcie_cleanup,
        .check_fw_status =              mwifiex_check_fw_status,
+       .check_winner_status =          mwifiex_check_winner_status,
        .prog_fw =                      mwifiex_prog_fw_w_helper,
        .register_dev =                 mwifiex_register_dev,
        .unregister_dev =               mwifiex_unregister_dev,
index 6fc28737b576c64b1ac52d0199611cb81c8bad2b..29e58ce877e349649c747080bce913f1063362c8 100644 (file)
@@ -26,6 +26,7 @@
 #include    <linux/pcieport_if.h>
 #include    <linux/interrupt.h>
 
+#include    "decl.h"
 #include    "main.h"
 
 #define PCIE8766_DEFAULT_FW_NAME "mrvl/pcie8766_uapsta.bin"
@@ -135,6 +136,9 @@ struct mwifiex_pcie_card_reg {
        u16 fw_dump_ctrl;
        u16 fw_dump_start;
        u16 fw_dump_end;
+       u8 fw_dump_host_ready;
+       u8 fw_dump_read_done;
+       u8 msix_support;
 };
 
 static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
@@ -166,6 +170,7 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
        .ring_tx_start_ptr = 0,
        .pfu_enabled = 0,
        .sleep_cookie = 1,
+       .msix_support = 0,
 };
 
 static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
@@ -200,6 +205,9 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
        .fw_dump_ctrl = 0xcf4,
        .fw_dump_start = 0xcf8,
        .fw_dump_end = 0xcff,
+       .fw_dump_host_ready = 0xee,
+       .fw_dump_read_done = 0xfe,
+       .msix_support = 0,
 };
 
 static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
@@ -231,6 +239,27 @@ static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
        .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
        .pfu_enabled = 1,
        .sleep_cookie = 0,
+       .fw_dump_ctrl = 0xcf4,
+       .fw_dump_start = 0xcf8,
+       .fw_dump_end = 0xcff,
+       .fw_dump_host_ready = 0xcc,
+       .fw_dump_read_done = 0xdd,
+       .msix_support = 1,
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = {
+       {"ITCM", NULL, 0, 0xF0},
+       {"DTCM", NULL, 0, 0xF1},
+       {"SQRAM", NULL, 0, 0xF2},
+       {"IRAM", NULL, 0, 0xF3},
+       {"APU", NULL, 0, 0xF4},
+       {"CIU", NULL, 0, 0xF5},
+       {"ICU", NULL, 0, 0xF6},
+       {"MAC", NULL, 0, 0xF7},
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
+       {"DUMP", NULL, 0, 0xDD},
 };
 
 struct mwifiex_pcie_device {
@@ -239,6 +268,8 @@ struct mwifiex_pcie_device {
        u16 blksz_fw_dl;
        u16 tx_buf_size;
        bool can_dump_fw;
+       struct memory_type_mapping *mem_type_mapping_tbl;
+       u8 num_mem_types;
        bool can_ext_scan;
 };
 
@@ -257,6 +288,8 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
        .can_dump_fw = true,
+       .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897,
+       .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897),
        .can_ext_scan = true,
 };
 
@@ -265,7 +298,9 @@ static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
        .reg            = &mwifiex_reg_8997,
        .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
        .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
-       .can_dump_fw = false,
+       .can_dump_fw = true,
+       .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997,
+       .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997),
        .can_ext_scan = true,
 };
 
@@ -290,6 +325,13 @@ struct mwifiex_pfu_buf_desc {
        u32 reserved;
 } __packed;
 
+#define MWIFIEX_NUM_MSIX_VECTORS   4
+
+struct mwifiex_msix_context {
+       struct pci_dev *dev;
+       u16 msg_id;
+};
+
 struct pcie_service_card {
        struct pci_dev *dev;
        struct mwifiex_adapter *adapter;
@@ -327,6 +369,12 @@ struct pcie_service_card {
        void __iomem *pci_mmap;
        void __iomem *pci_mmap1;
        int msi_enable;
+       int msix_enable;
+#ifdef CONFIG_PCI
+       struct msix_entry msix_entries[MWIFIEX_NUM_MSIX_VECTORS];
+#endif
+       struct mwifiex_msix_context msix_ctx[MWIFIEX_NUM_MSIX_VECTORS];
+       struct mwifiex_msix_context share_irq_ctx;
 };
 
 static inline int
index c20017ced5667c796abec645ae5cc8ca620b530f..489f7a911a83f225561a2ddf902c637457b6b960 100644 (file)
@@ -547,6 +547,61 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
        return chan_idx;
 }
 
+/* This function creates a channel list tlv for bgscan config, based
+ * on region/band information.
+ */
+static int
+mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv,
+                                  const struct mwifiex_bg_scan_cfg
+                                               *bgscan_cfg_in,
+                                  struct mwifiex_chan_scan_param_set
+                                               *scan_chan_list)
+{
+       enum ieee80211_band band;
+       struct ieee80211_supported_band *sband;
+       struct ieee80211_channel *ch;
+       struct mwifiex_adapter *adapter = priv->adapter;
+       int chan_idx = 0, i;
+
+       for (band = 0; (band < IEEE80211_NUM_BANDS); band++) {
+               if (!priv->wdev.wiphy->bands[band])
+                       continue;
+
+               sband = priv->wdev.wiphy->bands[band];
+
+               for (i = 0; (i < sband->n_channels) ; i++) {
+                       ch = &sband->channels[i];
+                       if (ch->flags & IEEE80211_CHAN_DISABLED)
+                               continue;
+                       scan_chan_list[chan_idx].radio_type = band;
+
+                       if (bgscan_cfg_in->chan_list[0].scan_time)
+                               scan_chan_list[chan_idx].max_scan_time =
+                                       cpu_to_le16((u16)bgscan_cfg_in->
+                                       chan_list[0].scan_time);
+                       else if (ch->flags & IEEE80211_CHAN_NO_IR)
+                               scan_chan_list[chan_idx].max_scan_time =
+                                       cpu_to_le16(adapter->passive_scan_time);
+                       else
+                               scan_chan_list[chan_idx].max_scan_time =
+                                       cpu_to_le16(adapter->
+                                                   specific_scan_time);
+
+                       if (ch->flags & IEEE80211_CHAN_NO_IR)
+                               scan_chan_list[chan_idx].chan_scan_mode_bitmap
+                                       |= MWIFIEX_PASSIVE_SCAN;
+                       else
+                               scan_chan_list[chan_idx].chan_scan_mode_bitmap
+                                       &= ~MWIFIEX_PASSIVE_SCAN;
+
+                       scan_chan_list[chan_idx].chan_number =
+                                                       (u32)ch->hw_value;
+                       chan_idx++;
+               }
+       }
+       return chan_idx;
+}
+
 /* This function appends rate TLV to scan config command. */
 static int
 mwifiex_append_rate_tlv(struct mwifiex_private *priv,
@@ -2037,6 +2092,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
        u8 is_bgscan_resp;
        __le64 fw_tsf = 0;
        u8 *radio_type;
+       struct cfg80211_wowlan_nd_match *pmatch;
+       struct cfg80211_sched_scan_request *nd_config = NULL;
 
        is_bgscan_resp = (le16_to_cpu(resp->command)
                          == HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -2099,6 +2156,21 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                                             (struct mwifiex_ie_types_data **)
                                             &chan_band_tlv);
 
+#ifdef CONFIG_PM
+       if (priv->wdev.wiphy->wowlan_config)
+               nd_config = priv->wdev.wiphy->wowlan_config->nd_config;
+#endif
+
+       if (nd_config) {
+               adapter->nd_info =
+                       kzalloc(sizeof(struct cfg80211_wowlan_nd_match) +
+                               sizeof(struct cfg80211_wowlan_nd_match *) *
+                               scan_rsp->number_of_sets, GFP_ATOMIC);
+
+               if (adapter->nd_info)
+                       adapter->nd_info->n_matches = scan_rsp->number_of_sets;
+       }
+
        for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
                /*
                 * If the TSF TLV was appended to the scan results, save this
@@ -2117,6 +2189,23 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
                        radio_type = NULL;
                }
 
+               if (chan_band_tlv && adapter->nd_info) {
+                       adapter->nd_info->matches[idx] =
+                               kzalloc(sizeof(*pmatch) +
+                               sizeof(u32), GFP_ATOMIC);
+
+                       pmatch = adapter->nd_info->matches[idx];
+
+                       if (pmatch) {
+                               memset(pmatch, 0, sizeof(*pmatch));
+                               if (chan_band_tlv) {
+                                       pmatch->n_channels = 1;
+                                       pmatch->channels[0] =
+                                               chan_band->chan_number;
+                               }
+                       }
+               }
+
                ret = mwifiex_parse_single_response_buf(priv, &bss_info,
                                                        &bytes_left,
                                                        le64_to_cpu(fw_tsf),
@@ -2155,6 +2244,227 @@ int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
        return 0;
 }
 
+/* This function prepares an background scan config command to be sent
+ * to the firmware
+ */
+int mwifiex_cmd_802_11_bg_scan_config(struct mwifiex_private *priv,
+                                     struct host_cmd_ds_command *cmd,
+                                     void *data_buf)
+{
+       struct host_cmd_ds_802_11_bg_scan_config *bgscan_config =
+                                       &cmd->params.bg_scan_config;
+       struct mwifiex_bg_scan_cfg *bgscan_cfg_in = data_buf;
+       u8 *tlv_pos = bgscan_config->tlv;
+       u8 num_probes;
+       u32 ssid_len, chan_idx, scan_type, scan_dur, chan_num;
+       int i;
+       struct mwifiex_ie_types_num_probes *num_probes_tlv;
+       struct mwifiex_ie_types_repeat_count *repeat_count_tlv;
+       struct mwifiex_ie_types_min_rssi_threshold *rssi_threshold_tlv;
+       struct mwifiex_ie_types_bgscan_start_later *start_later_tlv;
+       struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
+       struct mwifiex_ie_types_chan_list_param_set *chan_list_tlv;
+       struct mwifiex_chan_scan_param_set *temp_chan;
+
+       cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_CONFIG);
+       cmd->size = cpu_to_le16(sizeof(*bgscan_config) + S_DS_GEN);
+
+       bgscan_config->action = cpu_to_le16(bgscan_cfg_in->action);
+       bgscan_config->enable = bgscan_cfg_in->enable;
+       bgscan_config->bss_type = bgscan_cfg_in->bss_type;
+       bgscan_config->scan_interval =
+               cpu_to_le32(bgscan_cfg_in->scan_interval);
+       bgscan_config->report_condition =
+               cpu_to_le32(bgscan_cfg_in->report_condition);
+
+       /*  stop sched scan  */
+       if (!bgscan_config->enable)
+               return 0;
+
+       bgscan_config->chan_per_scan = bgscan_cfg_in->chan_per_scan;
+
+       num_probes = (bgscan_cfg_in->num_probes ? bgscan_cfg_in->
+                     num_probes : priv->adapter->scan_probes);
+
+       if (num_probes) {
+               num_probes_tlv = (struct mwifiex_ie_types_num_probes *)tlv_pos;
+               num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES);
+               num_probes_tlv->header.len =
+                       cpu_to_le16(sizeof(num_probes_tlv->num_probes));
+               num_probes_tlv->num_probes = cpu_to_le16((u16)num_probes);
+
+               tlv_pos += sizeof(num_probes_tlv->header) +
+                       le16_to_cpu(num_probes_tlv->header.len);
+       }
+
+       if (bgscan_cfg_in->repeat_count) {
+               repeat_count_tlv =
+                       (struct mwifiex_ie_types_repeat_count *)tlv_pos;
+               repeat_count_tlv->header.type =
+                       cpu_to_le16(TLV_TYPE_REPEAT_COUNT);
+               repeat_count_tlv->header.len =
+                       cpu_to_le16(sizeof(repeat_count_tlv->repeat_count));
+               repeat_count_tlv->repeat_count =
+                       cpu_to_le16(bgscan_cfg_in->repeat_count);
+
+               tlv_pos += sizeof(repeat_count_tlv->header) +
+                       le16_to_cpu(repeat_count_tlv->header.len);
+       }
+
+       if (bgscan_cfg_in->rssi_threshold) {
+               rssi_threshold_tlv =
+                       (struct mwifiex_ie_types_min_rssi_threshold *)tlv_pos;
+               rssi_threshold_tlv->header.type =
+                       cpu_to_le16(TLV_TYPE_RSSI_LOW);
+               rssi_threshold_tlv->header.len =
+                       cpu_to_le16(sizeof(rssi_threshold_tlv->rssi_threshold));
+               rssi_threshold_tlv->rssi_threshold =
+                       cpu_to_le16(bgscan_cfg_in->rssi_threshold);
+
+               tlv_pos += sizeof(rssi_threshold_tlv->header) +
+                       le16_to_cpu(rssi_threshold_tlv->header.len);
+       }
+
+       for (i = 0; i < bgscan_cfg_in->num_ssids; i++) {
+               ssid_len = bgscan_cfg_in->ssid_list[i].ssid.ssid_len;
+
+               wildcard_ssid_tlv =
+                       (struct mwifiex_ie_types_wildcard_ssid_params *)tlv_pos;
+               wildcard_ssid_tlv->header.type =
+                               cpu_to_le16(TLV_TYPE_WILDCARDSSID);
+               wildcard_ssid_tlv->header.len = cpu_to_le16(
+                               (u16)(ssid_len + sizeof(wildcard_ssid_tlv->
+                                                        max_ssid_length)));
+
+               /* max_ssid_length = 0 tells firmware to perform
+                * specific scan for the SSID filled, whereas
+                * max_ssid_length = IEEE80211_MAX_SSID_LEN is for
+                * wildcard scan.
+                */
+               if (ssid_len)
+                       wildcard_ssid_tlv->max_ssid_length = 0;
+               else
+                       wildcard_ssid_tlv->max_ssid_length =
+                                               IEEE80211_MAX_SSID_LEN;
+
+               memcpy(wildcard_ssid_tlv->ssid,
+                      bgscan_cfg_in->ssid_list[i].ssid.ssid, ssid_len);
+
+               tlv_pos += (sizeof(wildcard_ssid_tlv->header)
+                               + le16_to_cpu(wildcard_ssid_tlv->header.len));
+       }
+
+       chan_list_tlv = (struct mwifiex_ie_types_chan_list_param_set *)tlv_pos;
+
+       if (bgscan_cfg_in->chan_list[0].chan_number) {
+               dev_dbg(priv->adapter->dev, "info: bgscan: Using supplied channel list\n");
+
+               chan_list_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
+
+               for (chan_idx = 0;
+                    chan_idx < MWIFIEX_BG_SCAN_CHAN_MAX &&
+                    bgscan_cfg_in->chan_list[chan_idx].chan_number;
+                    chan_idx++) {
+                       temp_chan = chan_list_tlv->chan_scan_param + chan_idx;
+
+                       /* Increment the TLV header length by size appended */
+                       le16_add_cpu(&chan_list_tlv->header.len,
+                                    sizeof(chan_list_tlv->chan_scan_param));
+
+                       temp_chan->chan_number =
+                               bgscan_cfg_in->chan_list[chan_idx].chan_number;
+                       temp_chan->radio_type =
+                               bgscan_cfg_in->chan_list[chan_idx].radio_type;
+
+                       scan_type =
+                               bgscan_cfg_in->chan_list[chan_idx].scan_type;
+
+                       if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+                               temp_chan->chan_scan_mode_bitmap
+                                       |= MWIFIEX_PASSIVE_SCAN;
+                       else
+                               temp_chan->chan_scan_mode_bitmap
+                                       &= ~MWIFIEX_PASSIVE_SCAN;
+
+                       if (bgscan_cfg_in->chan_list[chan_idx].scan_time) {
+                               scan_dur = (u16)bgscan_cfg_in->
+                                       chan_list[chan_idx].scan_time;
+                       } else {
+                               scan_dur = (scan_type ==
+                                           MWIFIEX_SCAN_TYPE_PASSIVE) ?
+                                           priv->adapter->passive_scan_time :
+                                           priv->adapter->specific_scan_time;
+                       }
+
+                       temp_chan->min_scan_time = cpu_to_le16(scan_dur);
+                       temp_chan->max_scan_time = cpu_to_le16(scan_dur);
+               }
+       } else {
+               dev_dbg(priv->adapter->dev,
+                       "info: bgscan: Creating full region channel list\n");
+               chan_num =
+                       mwifiex_bgscan_create_channel_list(priv, bgscan_cfg_in,
+                                                          chan_list_tlv->
+                                                          chan_scan_param);
+               le16_add_cpu(&chan_list_tlv->header.len,
+                            chan_num *
+                            sizeof(chan_list_tlv->chan_scan_param[0]));
+       }
+
+       tlv_pos += (sizeof(chan_list_tlv->header)
+                       + le16_to_cpu(chan_list_tlv->header.len));
+
+       if (bgscan_cfg_in->start_later) {
+               start_later_tlv =
+                       (struct mwifiex_ie_types_bgscan_start_later *)tlv_pos;
+               start_later_tlv->header.type =
+                       cpu_to_le16(TLV_TYPE_BGSCAN_START_LATER);
+               start_later_tlv->header.len =
+                       cpu_to_le16(sizeof(start_later_tlv->start_later));
+               start_later_tlv->start_later =
+                       cpu_to_le16(bgscan_cfg_in->start_later);
+
+               tlv_pos += sizeof(start_later_tlv->header) +
+                       le16_to_cpu(start_later_tlv->header.len);
+       }
+
+       /* Append vendor specific IE TLV */
+       mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_BGSCAN, &tlv_pos);
+
+       le16_add_cpu(&cmd->size, tlv_pos - bgscan_config->tlv);
+
+       return 0;
+}
+
+int mwifiex_stop_bg_scan(struct mwifiex_private *priv)
+{
+       struct mwifiex_bg_scan_cfg *bgscan_cfg;
+
+       if (!priv->sched_scanning) {
+               dev_dbg(priv->adapter->dev, "bgscan already stopped!\n");
+               return 0;
+       }
+
+       bgscan_cfg = kzalloc(sizeof(*bgscan_cfg), GFP_KERNEL);
+       if (!bgscan_cfg)
+               return -ENOMEM;
+
+       bgscan_cfg->bss_type = MWIFIEX_BSS_MODE_INFRA;
+       bgscan_cfg->action = MWIFIEX_BGSCAN_ACT_SET;
+       bgscan_cfg->enable = false;
+
+       if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_BG_SCAN_CONFIG,
+                            HostCmd_ACT_GEN_SET, 0, bgscan_cfg, true)) {
+               kfree(bgscan_cfg);
+               return -EFAULT;
+       }
+
+       kfree(bgscan_cfg);
+       priv->sched_scanning = false;
+
+       return 0;
+}
+
 static void
 mwifiex_update_chan_statistics(struct mwifiex_private *priv,
                               struct mwifiex_ietypes_chanstats *tlv_stat)
index 4c8cae682c89c727de4518d3b78d42da388d0ac6..abf15dbdfe08e8e92d97e292c66d8a26e30026ff 100644 (file)
@@ -181,7 +181,7 @@ static int mwifiex_sdio_resume(struct device *dev)
 
        /* Disable Host Sleep */
        mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
-                         MWIFIEX_ASYNC_CMD);
+                         MWIFIEX_SYNC_CMD);
 
        return 0;
 }
@@ -1039,19 +1039,14 @@ done:
 
 /*
  * This function checks the firmware status in card.
- *
- * The winner interface is also determined by this function.
  */
 static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
                                   u32 poll_num)
 {
-       struct sdio_mmc_card *card = adapter->card;
        int ret = 0;
        u16 firmware_stat;
        u32 tries;
-       u8 winner_status;
 
-       /* Wait for firmware initialization event */
        for (tries = 0; tries < poll_num; tries++) {
                ret = mwifiex_sdio_read_fw_status(adapter, &firmware_stat);
                if (ret)
@@ -1065,16 +1060,25 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
                }
        }
 
-       if (ret) {
-               if (mwifiex_read_reg
-                   (adapter, card->reg->status_reg_0, &winner_status))
-                       winner_status = 0;
+       return ret;
+}
+
+/* This function checks if WLAN is the winner.
+ */
+static int mwifiex_check_winner_status(struct mwifiex_adapter *adapter)
+{
+       int ret = 0;
+       u8 winner = 0;
+       struct sdio_mmc_card *card = adapter->card;
+
+       if (mwifiex_read_reg(adapter, card->reg->status_reg_0, &winner))
+               return -1;
+
+       if (winner)
+               adapter->winner = 0;
+       else
+               adapter->winner = 1;
 
-               if (winner_status)
-                       adapter->winner = 0;
-               else
-                       adapter->winner = 1;
-       }
        return ret;
 }
 
@@ -2620,6 +2624,7 @@ static struct mwifiex_if_ops sdio_ops = {
        .init_if = mwifiex_init_sdio,
        .cleanup_if = mwifiex_cleanup_sdio,
        .check_fw_status = mwifiex_check_fw_status,
+       .check_winner_status = mwifiex_check_winner_status,
        .prog_fw = mwifiex_prog_fw_w_helper,
        .register_dev = mwifiex_register_dev,
        .unregister_dev = mwifiex_unregister_dev,
index e486867a4c67520fc2a8a8520368009975fd8878..30f152601c5774e266219066acee03f2cb3b21a8 100644 (file)
@@ -1813,6 +1813,22 @@ static int mwifiex_cmd_sdio_rx_aggr_cfg(struct host_cmd_ds_command *cmd,
        return 0;
 }
 
+/* This function prepares command to get HS wakeup reason.
+ *
+ * Preparation includes -
+ *      - Setting command ID, action and proper size
+ *      - Ensuring correct endian-ness
+ */
+static int mwifiex_cmd_get_wakeup_reason(struct mwifiex_private *priv,
+                                        struct host_cmd_ds_command *cmd)
+{
+       cmd->command = cpu_to_le16(HostCmd_CMD_HS_WAKEUP_REASON);
+       cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_wakeup_reason) +
+                               S_DS_GEN);
+
+       return 0;
+}
+
 /*
  * This function prepares the commands before sending them to the firmware.
  *
@@ -1873,6 +1889,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
        case HostCmd_CMD_802_11_SCAN:
                ret = mwifiex_cmd_802_11_scan(cmd_ptr, data_buf);
                break;
+       case HostCmd_CMD_802_11_BG_SCAN_CONFIG:
+               ret = mwifiex_cmd_802_11_bg_scan_config(priv, cmd_ptr,
+                                                       data_buf);
+               break;
        case HostCmd_CMD_802_11_BG_SCAN_QUERY:
                ret = mwifiex_cmd_802_11_bg_scan_query(cmd_ptr);
                break;
@@ -2063,6 +2083,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
                ret = mwifiex_cmd_sdio_rx_aggr_cfg(cmd_ptr, cmd_action,
                                                   data_buf);
                break;
+       case HostCmd_CMD_HS_WAKEUP_REASON:
+               ret = mwifiex_cmd_get_wakeup_reason(priv, cmd_ptr);
+               break;
        case HostCmd_CMD_MC_POLICY:
                ret = mwifiex_cmd_set_mc_policy(priv, cmd_ptr, cmd_action,
                                                data_buf);
index 9ac7aa2431b41533ab68f8495693c36c41f40801..d96523e10eb46ef051a1b40a35f408d42dc0404d 100644 (file)
@@ -1076,9 +1076,12 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
                break;
        case HostCmd_CMD_802_11_BG_SCAN_QUERY:
                ret = mwifiex_ret_802_11_scan(priv, resp);
+               cfg80211_sched_scan_results(priv->wdev.wiphy);
                mwifiex_dbg(adapter, CMD,
                            "info: CMD_RESP: BG_SCAN result is ready!\n");
                break;
+       case HostCmd_CMD_802_11_BG_SCAN_CONFIG:
+               break;
        case HostCmd_CMD_TXPWR_CFG:
                ret = mwifiex_ret_tx_power_cfg(priv, resp);
                break;
@@ -1233,6 +1236,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
        case HostCmd_CMD_SDIO_SP_RX_AGGR_CFG:
                ret = mwifiex_ret_sdio_rx_aggr_cfg(priv, resp);
                break;
+       case HostCmd_CMD_HS_WAKEUP_REASON:
+               ret = mwifiex_ret_wakeup_reason(priv, resp, data_buf);
+               break;
        case HostCmd_CMD_TDLS_CONFIG:
                break;
        case HostCmd_CMD_ROBUST_COEX:
index ff3ee9dfbbd54f51dbd42f8ff5478d78bc63013b..070bce401151a522983a3ce20fe6055ae4113612 100644 (file)
@@ -92,6 +92,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
        priv->is_data_rate_auto = true;
        priv->data_rate = 0;
 
+       priv->assoc_resp_ht_param = 0;
+       priv->ht_param_present = false;
+
        if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA ||
             GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) && priv->hist_data)
                mwifiex_hist_data_reset(priv);
@@ -607,11 +610,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
 
        case EVENT_PS_AWAKE:
                mwifiex_dbg(adapter, EVENT, "info: EVENT: AWAKE\n");
-               if (!adapter->pps_uapsd_mode && priv->port_open &&
+               if (!adapter->pps_uapsd_mode &&
+                   (priv->port_open ||
+                    (priv->bss_mode == NL80211_IFTYPE_ADHOC)) &&
                    priv->media_connected && adapter->sleep_period.period) {
-                               adapter->pps_uapsd_mode = true;
-                               mwifiex_dbg(adapter, EVENT,
-                                           "event: PPS/UAPSD mode activated\n");
+                       adapter->pps_uapsd_mode = true;
+                       mwifiex_dbg(adapter, EVENT,
+                                   "event: PPS/UAPSD mode activated\n");
                }
                adapter->tx_lock_flag = false;
                if (adapter->pps_uapsd_mode && adapter->gen_null_pkt) {
@@ -686,6 +691,13 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
                                       HostCmd_ACT_GEN_GET, 0, NULL, false);
                break;
 
+       case EVENT_BG_SCAN_STOPPED:
+               dev_dbg(adapter->dev, "event: BGS_STOPPED\n");
+               cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+               if (priv->sched_scanning)
+                       priv->sched_scanning = false;
+               break;
+
        case EVENT_PORT_RELEASE:
                mwifiex_dbg(adapter, EVENT, "event: PORT RELEASE\n");
                priv->port_open = true;
index 6a4fc5d183cfec34780d8ac0d6dcd2f30e0b7f0c..5cbee58f8781386d945b70073d385665ea8798e5 100644 (file)
@@ -504,6 +504,20 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
                }
        }
 
+       priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+       if (priv && priv->sched_scanning) {
+#ifdef CONFIG_PM
+               if (!priv->wdev.wiphy->wowlan_config->nd_config) {
+#endif
+                       mwifiex_dbg(adapter, CMD, "aborting bgscan!\n");
+                       mwifiex_stop_bg_scan(priv);
+                       cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+#ifdef CONFIG_PM
+               }
+#endif
+       }
+
        if (adapter->hs_activated) {
                mwifiex_dbg(adapter, CMD,
                            "cmd: HS Already activated\n");
@@ -1114,11 +1128,12 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
  * with requisite parameters and calls the IOCTL handler.
  */
 int
-mwifiex_get_ver_ext(struct mwifiex_private *priv)
+mwifiex_get_ver_ext(struct mwifiex_private *priv, u32 version_str_sel)
 {
        struct mwifiex_ver_ext ver_ext;
 
        memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext));
+       ver_ext.version_str_sel = version_str_sel;
        if (mwifiex_send_cmd(priv, HostCmd_CMD_VERSION_EXT,
                             HostCmd_ACT_GEN_GET, 0, &ver_ext, true))
                return -1;
@@ -1450,3 +1465,19 @@ mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
 
        return 0;
 }
+
+/* This function get Host Sleep wake up reason.
+ *
+ */
+int mwifiex_get_wakeup_reason(struct mwifiex_private *priv, u16 action,
+                             int cmd_type,
+                             struct mwifiex_ds_wakeup_reason *wakeup_reason)
+{
+       int status = 0;
+
+       status = mwifiex_send_cmd(priv, HostCmd_CMD_HS_WAKEUP_REASON,
+                                 HostCmd_ACT_GEN_GET, 0, wakeup_reason,
+                                 cmd_type == MWIFIEX_SYNC_CMD);
+
+       return status;
+}
index acccd6734e3b332cb172789e9b210c077465371f..0eb246502e1d119b03d83874cba3030ac4849092 100644 (file)
@@ -438,6 +438,7 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
                mwifiex_set_ba_params(priv);
                mwifiex_reset_11n_rx_seq_num(priv);
 
+               priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX;
                atomic_set(&priv->wmm.tx_pkts_queued, 0);
                atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
        }
@@ -475,7 +476,8 @@ mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
                priv = adapter->priv[i];
                if (!priv)
                        continue;
-               if (!priv->port_open)
+               if (!priv->port_open &&
+                   (priv->bss_mode != NL80211_IFTYPE_ADHOC))
                        continue;
                if (adapter->if_ops.is_port_ready &&
                    !adapter->if_ops.is_port_ready(priv))
@@ -1099,7 +1101,8 @@ mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
 
                        priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
 
-                       if (!priv_tmp->port_open ||
+                       if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
+                            !priv_tmp->port_open) ||
                            (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
                                continue;
 
index 30e3aaae32e2288ed1f7541b6da7a42eb6a8729a..088429d0a634d8c4372d45e2dc1cd3b7d2fabf1e 100644 (file)
@@ -5421,11 +5421,13 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
 
 static int
 mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                  enum ieee80211_ampdu_mlme_action action,
-                  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                  u8 buf_size, bool amsdu)
+                  struct ieee80211_ampdu_params *params)
 {
-
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+       u8 buf_size = params->buf_size;
        int i, rc = 0;
        struct mwl8k_priv *priv = hw->priv;
        struct mwl8k_ampdu_stream *stream;
index f715eee398510df829a61ba0f431610d1ab32baa..e70dd95239117f7e0ef8a2d668b6798727a06b46 100644 (file)
@@ -334,11 +334,13 @@ static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
 
 static int
 mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                 enum ieee80211_ampdu_mlme_action action,
-                 struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
-                 bool amsdu)
+                 struct ieee80211_ampdu_params *params)
 {
        struct mt7601u_dev *dev = hw->priv;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
        struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv;
 
        WARN_ON(msta->wcid.idx > GROUP_WCID(0));
index 9a3966cd6fbedb0e41e1f2c741405b10aee92fdc..155f343981fe29679d278b0621d3cf7ce64b2a77 100644 (file)
@@ -273,8 +273,10 @@ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
                           !(filter_flags & FIF_CONTROL));
-       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
        rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg);
index 1a6740b4d396211cd3c00290cd9abb06128c55bf..2553cdd7406623cde30a99531e4d86a74ae4836e 100644 (file)
@@ -274,8 +274,10 @@ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
                           !(filter_flags & FIF_CONTROL));
-       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
        rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
index d26018f30b7dfa9e8381ceb609d5eddd10ff47a8..2d64611de300146d98bcc254171f961e3af26831 100644 (file)
@@ -437,8 +437,10 @@ static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
                           !(filter_flags & FIF_CONTROL));
-       rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
        rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
index 9733b31a780d380fd2bfd550efe155943b0fdb34..7fa0128de7e310be2978b35a97746bc011acb324 100644 (file)
@@ -1490,7 +1490,8 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_FCSFAIL));
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
                           !(filter_flags & FIF_PLCPFAIL));
-       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
        rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
@@ -7935,10 +7936,11 @@ u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
 EXPORT_SYMBOL_GPL(rt2800_get_tsf);
 
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                       u8 buf_size, bool amsdu)
+                       struct ieee80211_ampdu_params *params)
 {
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
        struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv;
        int ret = 0;
 
index 440790b92b19e2927fd9b9d7446cbd3c8eef4f30..83f1a44fb9b481cb5f2a8dda9e9914b8113e91bb 100644 (file)
@@ -218,9 +218,7 @@ int rt2800_conf_tx(struct ieee80211_hw *hw,
                   const struct ieee80211_tx_queue_params *params);
 u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
 int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                       u8 buf_size, bool amsdu);
+                       struct ieee80211_ampdu_params *params);
 int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
                      struct survey_info *survey);
 void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
index 3282ddb766f4224a8e10e037fa7ea4332dfe0e27..6418620f95ff62a7e1a562b7cb29d6e306d2d4ad 100644 (file)
  * amount of bytes needed to move the data.
  */
 #define ALIGN_SIZE(__skb, __header) \
-       (  ((unsigned long)((__skb)->data + (__header))) & 3 )
+       (((unsigned long)((__skb)->data + (__header))) & 3)
 
 /*
  * Constants for extra TX headroom for alignment purposes.
 #define SLOT_TIME              20
 #define SHORT_SLOT_TIME                9
 #define SIFS                   10
-#define PIFS                   ( SIFS + SLOT_TIME )
-#define SHORT_PIFS             ( SIFS + SHORT_SLOT_TIME )
-#define DIFS                   ( PIFS + SLOT_TIME )
-#define SHORT_DIFS             ( SHORT_PIFS + SHORT_SLOT_TIME )
-#define EIFS                   ( SIFS + DIFS + \
-                                 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
-#define SHORT_EIFS             ( SIFS + SHORT_DIFS + \
-                                 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10) )
+#define PIFS                   (SIFS + SLOT_TIME)
+#define SHORT_PIFS             (SIFS + SHORT_SLOT_TIME)
+#define DIFS                   (PIFS + SLOT_TIME)
+#define SHORT_DIFS             (SHORT_PIFS + SHORT_SLOT_TIME)
+#define EIFS                   (SIFS + DIFS + \
+                                 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10))
+#define SHORT_EIFS             (SIFS + SHORT_DIFS + \
+                                 GET_DURATION(IEEE80211_HEADER + ACK_SIZE, 10))
 
 enum rt2x00_chip_intf {
        RT2X00_CHIP_INTF_PCI,
@@ -669,6 +669,7 @@ enum rt2x00_state_flags {
        CONFIG_POWERSAVING,
        CONFIG_HT_DISABLED,
        CONFIG_QOS_DISABLED,
+       CONFIG_MONITORING,
 
        /*
         * Mark we currently are sequentially reading TX_STA_FIFO register
index 7e8bb1198ae9f749dfc3e419f1ae652ffa7a0b8d..6a1f508d472f0e6ac3be534d780d209e90a0278d 100644 (file)
@@ -277,6 +277,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
        else
                clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags);
 
+       if (conf->flags & IEEE80211_CONF_MONITOR)
+               set_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+       else
+               clear_bit(CONFIG_MONITORING, &rt2x00dev->flags);
+
        rt2x00dev->curr_band = conf->chandef.chan->band;
        rt2x00dev->curr_freq = conf->chandef.chan->center_freq;
        rt2x00dev->tx_power = conf->power_level;
index 90fdb02b55e79debd5bd89b31104e90a260c596c..25ee3cb8e982db287ac269cac97666ad7dd60ec6 100644 (file)
@@ -629,7 +629,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
        data += sprintf(data, "register\tbase\twords\twordsize\n");
 #define RT2X00DEBUGFS_SPRINTF_REGISTER(__name)                 \
 {                                                              \
-       if(debug->__name.read)                                  \
+       if (debug->__name.read)                                 \
                data += sprintf(data, __stringify(__name)       \
                                "\t%d\t%d\t%d\n",               \
                                debug->__name.word_base,        \
@@ -699,7 +699,7 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
 
 #define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name)                    \
 ({                                                                             \
-       if(debug->__name.read) {                                                \
+       if (debug->__name.read) {                                               \
                (__intf)->__name##_off_entry =                                  \
                debugfs_create_u32(__stringify(__name) "_offset",               \
                                       S_IRUSR | S_IWUSR,                       \
index 3c26ee65a41513127cbc999416ee90fabc11d890..13da95a24cf77bb366481252226f3c4566352d67 100644 (file)
@@ -385,11 +385,6 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
                        *total_flags |= FIF_PSPOLL;
        }
 
-       /*
-        * Check if there is any work left for us.
-        */
-       if (rt2x00dev->packet_filter == *total_flags)
-               return;
        rt2x00dev->packet_filter = *total_flags;
 
        rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
index c0e730ea1b69058a663f953e73165fa23a850c72..24a3436ef952870a118e0adddf5f06fa3c5ca6f5 100644 (file)
@@ -530,8 +530,10 @@ static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
                           !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
-       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
index 1442075a838218f61366d84d1ec5d40fb45bc7be..ab8641547a1f6a61f763f9ab7dfc2e8e15dd7a4f 100644 (file)
 #define PAIRWISE_TA_TABLE_BASE         0x1a00
 
 #define SHARED_KEY_ENTRY(__idx) \
-       ( SHARED_KEY_TABLE_BASE + \
-               ((__idx) * sizeof(struct hw_key_entry)) )
+       (SHARED_KEY_TABLE_BASE + \
+               ((__idx) * sizeof(struct hw_key_entry)))
 #define PAIRWISE_KEY_ENTRY(__idx) \
-       ( PAIRWISE_KEY_TABLE_BASE + \
-               ((__idx) * sizeof(struct hw_key_entry)) )
+       (PAIRWISE_KEY_TABLE_BASE + \
+               ((__idx) * sizeof(struct hw_key_entry)))
 #define PAIRWISE_TA_ENTRY(__idx) \
-       ( PAIRWISE_TA_TABLE_BASE + \
-               ((__idx) * sizeof(struct hw_pairwise_ta_entry)) )
+       (PAIRWISE_TA_TABLE_BASE + \
+               ((__idx) * sizeof(struct hw_pairwise_ta_entry)))
 
 struct hw_key_entry {
        u8 key[16];
@@ -180,7 +180,7 @@ struct hw_pairwise_ta_entry {
 #define HW_BEACON_BASE3                        0x2f00
 
 #define HW_BEACON_OFFSET(__index) \
-       ( HW_BEACON_BASE0 + (__index * 0x0100) )
+       (HW_BEACON_BASE0 + (__index * 0x0100))
 
 /*
  * HOST-MCU shared memory.
@@ -1287,9 +1287,9 @@ struct hw_pairwise_ta_entry {
 /*
  * DMA descriptor defines.
  */
-#define TXD_DESC_SIZE                  ( 16 * sizeof(__le32) )
-#define TXINFO_SIZE                    ( 6 * sizeof(__le32) )
-#define RXD_DESC_SIZE                  ( 16 * sizeof(__le32) )
+#define TXD_DESC_SIZE                  (16 * sizeof(__le32))
+#define TXINFO_SIZE                    (6 * sizeof(__le32))
+#define RXD_DESC_SIZE                  (16 * sizeof(__le32))
 
 /*
  * TX descriptor format for TX, PRIO and Beacon Ring.
index 7081e13b4fd67ebbaa7fa8af3f15a1d81fbc219e..7bbc869311681988dbbcd50907fd33d84f4f1a85 100644 (file)
@@ -480,8 +480,10 @@ static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
                           !(filter_flags & FIF_PLCPFAIL));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
                           !(filter_flags & (FIF_CONTROL | FIF_PSPOLL)));
-       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, 1);
+       rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags));
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
+                          !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) &&
                           !rt2x00dev->intf_ap_count);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
        rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
index 6aed923a709ae3606225307cdcb2b7170442e11d..7d820c3953754260c05a5f7e21478e1cbf14555e 100644 (file)
@@ -5375,13 +5375,13 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
 
 static int
 rtl8xxxu_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-                     enum ieee80211_ampdu_mlme_action action,
-                     struct ieee80211_sta *sta, u16 tid, u16 *ssn, u8 buf_size,
-                     bool amsdu)
+                     struct ieee80211_ampdu_params *params)
 {
        struct rtl8xxxu_priv *priv = hw->priv;
        struct device *dev = &priv->udev->dev;
        u8 ampdu_factor, ampdu_density;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
 
        switch (action) {
        case IEEE80211_AMPDU_TX_START:
index 4ae421ef30d94f4f8bc93d85645eeda4548ba835..f2507610314ba81b94accf2a94cfb0284c11b641 100644 (file)
@@ -1371,11 +1371,13 @@ static void rtl_op_sta_notify(struct ieee80211_hw *hw,
 
 static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
                               struct ieee80211_vif *vif,
-                              enum ieee80211_ampdu_mlme_action action,
-                              struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                              u8 buf_size, bool amsdu)
+                              struct ieee80211_ampdu_params *params)
 {
        struct rtl_priv *rtlpriv = rtl_priv(hw);
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        switch (action) {
        case IEEE80211_AMPDU_TX_START:
index 74c14ce28238eed70f8ad798f0724642147411d3..28f7010e7108bf3c3f8d3a89296b2e0b16f2c555 100644 (file)
@@ -138,6 +138,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
                    ((wireless_mode == WIRELESS_MODE_N_5G) ||
                     (wireless_mode == WIRELESS_MODE_N_24G)))
                        rate->flags |= IEEE80211_TX_RC_MCS;
+               if (sta && sta->vht_cap.vht_supported &&
+                   (wireless_mode == WIRELESS_MODE_AC_5G ||
+                    wireless_mode == WIRELESS_MODE_AC_24G ||
+                    wireless_mode == WIRELESS_MODE_AC_ONLY))
+                       rate->flags |= IEEE80211_TX_RC_VHT_MCS;
        }
 }
 
index a62bf0a65c321bb73553c403c5233ccd30d1f8c7..5be34118e0af22b69392f5caf07f9eef5672c7fe 100644 (file)
@@ -351,7 +351,6 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
        case COUNTRY_CODE_SPAIN:
        case COUNTRY_CODE_FRANCE:
        case COUNTRY_CODE_ISRAEL:
-       case COUNTRY_CODE_WORLD_WIDE_13:
                return &rtl_regdom_12_13;
        case COUNTRY_CODE_MKK:
        case COUNTRY_CODE_MKK1:
@@ -360,6 +359,7 @@ static const struct ieee80211_regdomain *_rtl_regdomain_select(
                return &rtl_regdom_14_60_64;
        case COUNTRY_CODE_GLOBAL_DOMAIN:
                return &rtl_regdom_14;
+       case COUNTRY_CODE_WORLD_WIDE_13:
        case COUNTRY_CODE_WORLD_WIDE_13_5G_ALL:
                return &rtl_regdom_12_13_5g_all;
        default:
index b57cfd9651968a51e2e77ae31b86712e824057fb..95dcbff4673b1490c72fec5ee9bc55bcfa5f7c20 100644 (file)
@@ -626,7 +626,7 @@ static void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
                rtl_dm_dig->min_undec_pwdb_for_dm =
                    rtlpriv->dm.entry_min_undec_sm_pwdb;
                RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
-                        "AP Ext Port or disconnet PWDB = 0x%x\n",
+                        "AP Ext Port or disconnect PWDB = 0x%x\n",
                         rtl_dm_dig->min_undec_pwdb_for_dm);
        }
        RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
index b5bcc933a2a683df412139b1204fee320096a889..4df992de7d0731508a6c42fccf51096928156e9e 100644 (file)
@@ -659,29 +659,24 @@ static int rsi_mac80211_set_key(struct ieee80211_hw *hw,
  *                              informs the f/w regarding this.
  * @hw: Pointer to the ieee80211_hw structure.
  * @vif: Pointer to the ieee80211_vif structure.
- * @action: ieee80211_ampdu_mlme_action enum.
- * @sta: Pointer to the ieee80211_sta structure.
- * @tid: Traffic identifier.
- * @ssn: Pointer to ssn value.
- * @buf_size: Buffer size (for kernel version > 2.6.38).
- * @amsdu: is AMSDU in AMPDU allowed
+ * @params: Pointer to A-MPDU action parameters
  *
  * Return: status: 0 on success, negative error code on failure.
  */
 static int rsi_mac80211_ampdu_action(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif,
-                                    enum ieee80211_ampdu_mlme_action action,
-                                    struct ieee80211_sta *sta,
-                                    unsigned short tid,
-                                    unsigned short *ssn,
-                                    unsigned char buf_size,
-                                    bool amsdu)
+                                    struct ieee80211_ampdu_params *params)
 {
        int status = -EOPNOTSUPP;
        struct rsi_hw *adapter = hw->priv;
        struct rsi_common *common = adapter->priv;
        u16 seq_no = 0;
        u8 ii = 0;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
+       u8 buf_size = params->buf_size;
 
        for (ii = 0; ii < RSI_MAX_VIFS; ii++) {
                if (vif == adapter->vifs[ii])
index 06321c799c902689825c89098c796e875c737079..d0ddcde6c695fb6c5efd9b2283c76e98328fdbdf 100644 (file)
@@ -2129,9 +2129,7 @@ void cw1200_mcast_timeout(unsigned long arg)
 
 int cw1200_ampdu_action(struct ieee80211_hw *hw,
                        struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                       u8 buf_size, bool amsdu)
+                       struct ieee80211_ampdu_params *params)
 {
        /* Aggregation is implemented fully in firmware,
         * including block ack negotiation. Do not allow
index bebb3379017f6d40c3cd58b92ddfaabab1259f59..a0bacaa39b3193f230c61526afe6bdc68f38b7a0 100644 (file)
@@ -109,9 +109,7 @@ void cw1200_bss_info_changed(struct ieee80211_hw *dev,
                             u32 changed);
 int cw1200_ampdu_action(struct ieee80211_hw *hw,
                        struct ieee80211_vif *vif,
-                       enum ieee80211_ampdu_mlme_action action,
-                       struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                       u8 buf_size, bool amsdu);
+                       struct ieee80211_ampdu_params *params);
 
 void cw1200_suspend_resume(struct cw1200_common *priv,
                          struct wsm_suspend_resume *arg);
index 969c9d79bfc8bd92c2c4e48c213c8e88c9611121..8a8f1e7113846af0ac1bec70d19c815dcf60c9b1 100644 (file)
@@ -13,7 +13,7 @@ config WLCORE
 
 config WLCORE_SPI
        tristate "TI wlcore SPI support"
-       depends on WLCORE && SPI_MASTER
+       depends on WLCORE && SPI_MASTER && OF
        select CRC7
        ---help---
          This module adds support for the SPI interface of adapters using
index c96405498bf43c68b997e73d512e88be4b153ec0..4b59f67724dead0621174da68b2e6167506b3b95 100644 (file)
@@ -38,7 +38,7 @@
 
 int wlcore_event_fw_logger(struct wl1271 *wl)
 {
-       u32 ret;
+       int ret;
        struct fw_logger_information fw_log;
        u8  *buffer;
        u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
index d1109c4f0f0d1d7570ec373734db3f93117556f9..45662cf3169f78dd20554cde3373336f84cb6516 100644 (file)
@@ -5187,14 +5187,16 @@ out:
 
 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
                                  struct ieee80211_vif *vif,
-                                 enum ieee80211_ampdu_mlme_action action,
-                                 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                                 u8 buf_size, bool amsdu)
+                                 struct ieee80211_ampdu_params *params)
 {
        struct wl1271 *wl = hw->priv;
        struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
        int ret;
        u8 hlid, *ba_bitmap;
+       struct ieee80211_sta *sta = params->sta;
+       enum ieee80211_ampdu_mlme_action action = params->action;
+       u16 tid = params->tid;
+       u16 *ssn = &params->ssn;
 
        wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
                     tid);
index 44f059f7f34e9473ce4e4b33648a3af9b2e44f1e..020ac1a4b40834131d6355a7464b73129fd93ad1 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/spi/spi.h>
 #include <linux/wl12xx.h>
 #include <linux/platform_device.h>
+#include <linux/of_irq.h>
+#include <linux/regulator/consumer.h>
 
 #include "wlcore.h"
 #include "wl12xx_80211.h"
@@ -81,6 +83,7 @@
 struct wl12xx_spi_glue {
        struct device *dev;
        struct platform_device *core;
+       struct regulator *reg; /* Power regulator */
 };
 
 static void wl12xx_spi_reset(struct device *child)
@@ -318,14 +321,76 @@ static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
        return 0;
 }
 
+/**
+ * wl12xx_spi_set_power - power on/off the wl12xx unit
+ * @child: wl12xx device handle.
+ * @enable: true/false to power on/off the unit.
+ *
+ * use the WiFi enable regulator to enable/disable the WiFi unit.
+ */
+static int wl12xx_spi_set_power(struct device *child, bool enable)
+{
+       int ret = 0;
+       struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+
+       WARN_ON(!glue->reg);
+
+       /* Update regulator state */
+       if (enable) {
+               ret = regulator_enable(glue->reg);
+               if (ret)
+                       dev_err(child, "Power enable failure\n");
+       } else {
+               ret =  regulator_disable(glue->reg);
+               if (ret)
+                       dev_err(child, "Power disable failure\n");
+       }
+
+       return ret;
+}
+
 static struct wl1271_if_operations spi_ops = {
        .read           = wl12xx_spi_raw_read,
        .write          = wl12xx_spi_raw_write,
        .reset          = wl12xx_spi_reset,
        .init           = wl12xx_spi_init,
+       .power          = wl12xx_spi_set_power,
        .set_block_size = NULL,
 };
 
+static const struct of_device_id wlcore_spi_of_match_table[] = {
+       { .compatible = "ti,wl1271" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table);
+
+/**
+ * wlcore_probe_of - DT node parsing.
+ * @spi: SPI slave device parameters.
+ * @res: resource parameters.
+ * @glue: wl12xx SPI bus to slave device glue parameters.
+ * @pdev_data: wlcore device parameters
+ */
+static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
+                          struct wlcore_platdev_data *pdev_data)
+{
+       struct device_node *dt_node = spi->dev.of_node;
+       int ret;
+
+       if (of_find_property(dt_node, "clock-xtal", NULL))
+               pdev_data->ref_clock_xtal = true;
+
+       ret = of_property_read_u32(dt_node, "ref-clock-frequency",
+                                  &pdev_data->ref_clock_freq);
+       if (IS_ERR_VALUE(ret)) {
+               dev_err(glue->dev,
+                       "can't get reference clock frequency (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
 static int wl1271_probe(struct spi_device *spi)
 {
        struct wl12xx_spi_glue *glue;
@@ -335,8 +400,6 @@ static int wl1271_probe(struct spi_device *spi)
 
        memset(&pdev_data, 0x00, sizeof(pdev_data));
 
-       /* TODO: add DT parsing when needed */
-
        pdev_data.if_ops = &spi_ops;
 
        glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL);
@@ -353,6 +416,21 @@ static int wl1271_probe(struct spi_device *spi)
         * comes from the board-peripherals file */
        spi->bits_per_word = 32;
 
+       glue->reg = devm_regulator_get(&spi->dev, "vwlan");
+       if (PTR_ERR(glue->reg) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+       if (IS_ERR(glue->reg)) {
+               dev_err(glue->dev, "can't get regulator\n");
+               return PTR_ERR(glue->reg);
+       }
+
+       ret = wlcore_probe_of(spi, glue, &pdev_data);
+       if (IS_ERR_VALUE(ret)) {
+               dev_err(glue->dev,
+                       "can't get device tree parameters (%d)\n", ret);
+               return ret;
+       }
+
        ret = spi_setup(spi);
        if (ret < 0) {
                dev_err(glue->dev, "spi_setup failed\n");
@@ -370,7 +448,7 @@ static int wl1271_probe(struct spi_device *spi)
        memset(res, 0x00, sizeof(res));
 
        res[0].start = spi->irq;
-       res[0].flags = IORESOURCE_IRQ;
+       res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(spi->irq);
        res[0].name = "irq";
 
        ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
@@ -408,10 +486,10 @@ static int wl1271_remove(struct spi_device *spi)
        return 0;
 }
 
-
 static struct spi_driver wl1271_spi_driver = {
        .driver = {
                .name           = "wl1271_spi",
+               .of_match_table = of_match_ptr(wlcore_spi_of_match_table),
        },
 
        .probe          = wl1271_probe,
index 0333ab0fd9267f2910398bfc3270f615e76cc686..112825200d4192d6da01ae5b70ba52e41c886ffc 100644 (file)
@@ -251,6 +251,7 @@ struct xenvif {
        unsigned int stalled_queues;
 
        struct xenbus_watch credit_watch;
+       struct xenbus_watch mcast_ctrl_watch;
 
        spinlock_t lock;
 
index 56ebd8267386e6a91cabf506962e15f5d83531ec..39a303de20dd4e0f37bfa00d4c5eaade6590519c 100644 (file)
@@ -327,7 +327,7 @@ static int netback_probe(struct xenbus_device *dev,
                        goto abort_transaction;
                }
 
-               /* We support multicast-control. */
+               /* We support dynamic multicast-control. */
                err = xenbus_printf(xbt, dev->nodename,
                                    "feature-multicast-control", "%d", 1);
                if (err) {
@@ -335,6 +335,14 @@ static int netback_probe(struct xenbus_device *dev,
                        goto abort_transaction;
                }
 
+               err = xenbus_printf(xbt, dev->nodename,
+                                   "feature-dynamic-multicast-control",
+                                   "%d", 1);
+               if (err) {
+                       message = "writing feature-dynamic-multicast-control";
+                       goto abort_transaction;
+               }
+
                err = xenbus_transaction_end(xbt, 0);
        } while (err == -EAGAIN);
 
@@ -683,7 +691,8 @@ static void xen_net_rate_changed(struct xenbus_watch *watch,
        }
 }
 
-static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
+static int xen_register_credit_watch(struct xenbus_device *dev,
+                                    struct xenvif *vif)
 {
        int err = 0;
        char *node;
@@ -708,7 +717,7 @@ static int xen_register_watchers(struct xenbus_device *dev, struct xenvif *vif)
        return err;
 }
 
-static void xen_unregister_watchers(struct xenvif *vif)
+static void xen_unregister_credit_watch(struct xenvif *vif)
 {
        if (vif->credit_watch.node) {
                unregister_xenbus_watch(&vif->credit_watch);
@@ -717,6 +726,75 @@ static void xen_unregister_watchers(struct xenvif *vif)
        }
 }
 
+static void xen_mcast_ctrl_changed(struct xenbus_watch *watch,
+                                  const char **vec, unsigned int len)
+{
+       struct xenvif *vif = container_of(watch, struct xenvif,
+                                         mcast_ctrl_watch);
+       struct xenbus_device *dev = xenvif_to_xenbus_device(vif);
+       int val;
+
+       if (xenbus_scanf(XBT_NIL, dev->otherend,
+                        "request-multicast-control", "%d", &val) < 0)
+               val = 0;
+       vif->multicast_control = !!val;
+}
+
+static int xen_register_mcast_ctrl_watch(struct xenbus_device *dev,
+                                        struct xenvif *vif)
+{
+       int err = 0;
+       char *node;
+       unsigned maxlen = strlen(dev->otherend) +
+               sizeof("/request-multicast-control");
+
+       if (vif->mcast_ctrl_watch.node) {
+               pr_err_ratelimited("Watch is already registered\n");
+               return -EADDRINUSE;
+       }
+
+       node = kmalloc(maxlen, GFP_KERNEL);
+       if (!node) {
+               pr_err("Failed to allocate memory for watch\n");
+               return -ENOMEM;
+       }
+       snprintf(node, maxlen, "%s/request-multicast-control",
+                dev->otherend);
+       vif->mcast_ctrl_watch.node = node;
+       vif->mcast_ctrl_watch.callback = xen_mcast_ctrl_changed;
+       err = register_xenbus_watch(&vif->mcast_ctrl_watch);
+       if (err) {
+               pr_err("Failed to set watcher %s\n",
+                      vif->mcast_ctrl_watch.node);
+               kfree(node);
+               vif->mcast_ctrl_watch.node = NULL;
+               vif->mcast_ctrl_watch.callback = NULL;
+       }
+       return err;
+}
+
+static void xen_unregister_mcast_ctrl_watch(struct xenvif *vif)
+{
+       if (vif->mcast_ctrl_watch.node) {
+               unregister_xenbus_watch(&vif->mcast_ctrl_watch);
+               kfree(vif->mcast_ctrl_watch.node);
+               vif->mcast_ctrl_watch.node = NULL;
+       }
+}
+
+static void xen_register_watchers(struct xenbus_device *dev,
+                                 struct xenvif *vif)
+{
+       xen_register_credit_watch(dev, vif);
+       xen_register_mcast_ctrl_watch(dev, vif);
+}
+
+static void xen_unregister_watchers(struct xenvif *vif)
+{
+       xen_unregister_mcast_ctrl_watch(vif);
+       xen_unregister_credit_watch(vif);
+}
+
 static void unregister_hotplug_status_watch(struct backend_info *be)
 {
        if (be->have_hotplug_status_watch) {
@@ -1030,11 +1108,6 @@ static int read_xenbus_vif_flags(struct backend_info *be)
                val = 0;
        vif->ipv6_csum = !!val;
 
-       if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control",
-                        "%d", &val) < 0)
-               val = 0;
-       vif->multicast_control = !!val;
-
        return 0;
 }
 
index d6abf191122a3260cbd97f5c1a2dbd1c2cb5c5e2..96ccd4e943db49908b0821f1d515aef36072db15 100644 (file)
@@ -364,6 +364,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
        RING_IDX cons, prod;
        unsigned short id;
        struct sk_buff *skb;
+       bool more_to_do;
 
        BUG_ON(!netif_carrier_ok(queue->info->netdev));
 
@@ -398,18 +399,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
 
                queue->tx.rsp_cons = prod;
 
-               /*
-                * Set a new event, then check for race with update of tx_cons.
-                * Note that it is essential to schedule a callback, no matter
-                * how few buffers are pending. Even if there is space in the
-                * transmit ring, higher layers may be blocked because too much
-                * data is outstanding: in such cases notification from Xen is
-                * likely to be the only kick that we'll get.
-                */
-               queue->tx.sring->rsp_event =
-                       prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
-               mb();           /* update shared area */
-       } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
+               RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
+       } while (more_to_do);
 
        xennet_maybe_wake_tx(queue);
 }
index 64a90252c57f24c615223246854c13edeb066142..5f97da1947e396e9252809106ea61fd26d8e1db7 100644 (file)
@@ -19,7 +19,7 @@
 
 #include <linux/completion.h>
 #include <linux/firmware.h>
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 #include <crypto/sha.h>
 
 #include "s3fwrn5.h"
@@ -429,8 +429,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
 {
        struct s3fwrn5_fw_image *fw = &fw_info->fw;
        u8 hash_data[SHA1_DIGEST_SIZE];
-       struct scatterlist sg;
-       struct hash_desc desc;
+       struct crypto_shash *tfm;
        u32 image_size, off;
        int ret;
 
@@ -438,12 +437,31 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
 
        /* Compute SHA of firmware data */
 
-       sg_init_one(&sg, fw->image, image_size);
-       desc.tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
-       crypto_hash_init(&desc);
-       crypto_hash_update(&desc, &sg, image_size);
-       crypto_hash_final(&desc, hash_data);
-       crypto_free_hash(desc.tfm);
+       tfm = crypto_alloc_shash("sha1", 0, 0);
+       if (IS_ERR(tfm)) {
+               ret = PTR_ERR(tfm);
+               dev_err(&fw_info->ndev->nfc_dev->dev,
+                       "Cannot allocate shash (code=%d)\n", ret);
+               goto out;
+       }
+
+       {
+               SHASH_DESC_ON_STACK(desc, tfm);
+
+               desc->tfm = tfm;
+               desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+               ret = crypto_shash_digest(desc, fw->image, image_size,
+                                         hash_data);
+               shash_desc_zero(desc);
+       }
+
+       crypto_free_shash(tfm);
+       if (ret) {
+               dev_err(&fw_info->ndev->nfc_dev->dev,
+                       "Cannot compute hash (code=%d)\n", ret);
+               goto out;
+       }
 
        /* Firmware update process */
 
index b0045a505dc88c9ef56dcc1094a198bd011d24a7..95825b38559addb8a0dc5cca22bc305e228e65f6 100644 (file)
@@ -55,7 +55,7 @@ static int e820_pmem_probe(struct platform_device *pdev)
        for (p = iomem_resource.child; p ; p = p->sibling) {
                struct nd_region_desc ndr_desc;
 
-               if (strncmp(p->name, "Persistent Memory (legacy)", 26) != 0)
+               if (p->desc != IORES_DESC_PERSISTENT_MEMORY_LEGACY)
                        continue;
 
                memset(&ndr_desc, 0, sizeof(ndr_desc));
index 8ebfcaae3f5a06a4aca19611fc7c5abdeb29f16a..9edf7eb7d17cc39713e67e4dd62d4e16c0d93e07 100644 (file)
@@ -1277,10 +1277,12 @@ static ssize_t mode_show(struct device *dev,
 
        device_lock(dev);
        claim = ndns->claim;
-       if (pmem_should_map_pages(dev) || (claim && is_nd_pfn(claim)))
-               mode = "memory";
-       else if (claim && is_nd_btt(claim))
+       if (claim && is_nd_btt(claim))
                mode = "safe";
+       else if (claim && is_nd_pfn(claim))
+               mode = "memory";
+       else if (!claim && pmem_should_map_pages(dev))
+               mode = "memory";
        else
                mode = "raw";
        rc = sprintf(buf, "%s\n", mode);
index 0cc9048b86e23103900d36f75a0b64746521ee18..ae81a2f1da50b1e9f9cf5a14bd180df19b3ccab1 100644 (file)
@@ -301,10 +301,8 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
 
        switch (le32_to_cpu(pfn_sb->mode)) {
        case PFN_MODE_RAM:
-               break;
        case PFN_MODE_PMEM:
-               /* TODO: allocate from PMEM support */
-               return -ENOTTY;
+               break;
        default:
                return -ENXIO;
        }
index 5d6237391dcd4e3851390abe9b1412217d2428d8..2ed30f063a132973beb6f21b15f30b86da1763a7 100644 (file)
@@ -1,6 +1,10 @@
+config NVME_CORE
+       tristate
+
 config BLK_DEV_NVME
        tristate "NVM Express block device"
        depends on PCI && BLOCK
+       select NVME_CORE
        ---help---
          The NVM Express driver is for solid state drives directly
          connected to the PCI or PCI Express bus.  If you know you
@@ -11,7 +15,7 @@ config BLK_DEV_NVME
 
 config BLK_DEV_NVME_SCSI
        bool "SCSI emulation for NVMe device nodes"
-       depends on BLK_DEV_NVME
+       depends on NVME_CORE
        ---help---
          This adds support for the SG_IO ioctl on the NVMe character
          and block devices nodes, as well a a translation for a small
index 51bf90871549844c7f4db638a7ec26c4b4b9aa51..9a3ca892b4a722e2a6d51aef48d1397985fa5bc4 100644 (file)
@@ -1,6 +1,8 @@
+obj-$(CONFIG_NVME_CORE)                        += nvme-core.o
+obj-$(CONFIG_BLK_DEV_NVME)             += nvme.o
 
-obj-$(CONFIG_BLK_DEV_NVME)     += nvme.o
+nvme-core-y                            := core.o
+nvme-core-$(CONFIG_BLK_DEV_NVME_SCSI)  += scsi.o
+nvme-core-$(CONFIG_NVM)                        += lightnvm.o
 
-lightnvm-$(CONFIG_NVM)                 := lightnvm.o
-nvme-y                                 += core.o pci.o $(lightnvm-y)
-nvme-$(CONFIG_BLK_DEV_NVME_SCSI)        += scsi.o
+nvme-y                                 += pci.o
index c5bf001af55954e2c9275d34c65cbb5dfa2a037d..07b7ec699e9213f48c2c9624674e5e4147cb7085 100644 (file)
 
 #define NVME_MINORS            (1U << MINORBITS)
 
+unsigned char admin_timeout = 60;
+module_param(admin_timeout, byte, 0644);
+MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
+EXPORT_SYMBOL_GPL(admin_timeout);
+
+unsigned char nvme_io_timeout = 30;
+module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
+MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
+EXPORT_SYMBOL_GPL(nvme_io_timeout);
+
+unsigned char shutdown_timeout = 5;
+module_param(shutdown_timeout, byte, 0644);
+MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
+
 static int nvme_major;
 module_param(nvme_major, int, 0);
 
@@ -40,7 +54,7 @@ static int nvme_char_major;
 module_param(nvme_char_major, int, 0);
 
 static LIST_HEAD(nvme_ctrl_list);
-DEFINE_SPINLOCK(dev_list_lock);
+static DEFINE_SPINLOCK(dev_list_lock);
 
 static struct class *nvme_class;
 
@@ -71,11 +85,21 @@ static struct nvme_ns *nvme_get_ns_from_disk(struct gendisk *disk)
 
        spin_lock(&dev_list_lock);
        ns = disk->private_data;
-       if (ns && !kref_get_unless_zero(&ns->kref))
-               ns = NULL;
+       if (ns) {
+               if (!kref_get_unless_zero(&ns->kref))
+                       goto fail;
+               if (!try_module_get(ns->ctrl->ops->module))
+                       goto fail_put_ns;
+       }
        spin_unlock(&dev_list_lock);
 
        return ns;
+
+fail_put_ns:
+       kref_put(&ns->kref, nvme_free_ns);
+fail:
+       spin_unlock(&dev_list_lock);
+       return NULL;
 }
 
 void nvme_requeue_req(struct request *req)
@@ -88,6 +112,7 @@ void nvme_requeue_req(struct request *req)
                blk_mq_kick_requeue_list(req->q);
        spin_unlock_irqrestore(req->q->queue_lock, flags);
 }
+EXPORT_SYMBOL_GPL(nvme_requeue_req);
 
 struct request *nvme_alloc_request(struct request_queue *q,
                struct nvme_command *cmd, unsigned int flags)
@@ -111,6 +136,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
 
        return req;
 }
+EXPORT_SYMBOL_GPL(nvme_alloc_request);
 
 /*
  * Returns 0 on success.  If the result is negative, it's a Linux error code;
@@ -148,6 +174,7 @@ int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
 {
        return __nvme_submit_sync_cmd(q, cmd, buffer, bufflen, NULL, 0);
 }
+EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd);
 
 int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
                void __user *ubuffer, unsigned bufflen,
@@ -363,6 +390,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
        *count = min(*count, nr_io_queues);
        return 0;
 }
+EXPORT_SYMBOL_GPL(nvme_set_queue_count);
 
 static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
 {
@@ -499,7 +527,10 @@ static int nvme_open(struct block_device *bdev, fmode_t mode)
 
 static void nvme_release(struct gendisk *disk, fmode_t mode)
 {
-       nvme_put_ns(disk->private_data);
+       struct nvme_ns *ns = disk->private_data;
+
+       module_put(ns->ctrl->ops->module);
+       nvme_put_ns(ns);
 }
 
 static int nvme_getgeo(struct block_device *bdev, struct hd_geometry *geo)
@@ -557,8 +588,8 @@ static int nvme_revalidate_disk(struct gendisk *disk)
        unsigned short bs;
 
        if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
-               dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
-                               __func__, ns->ctrl->instance, ns->ns_id);
+               dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n",
+                               __func__);
                return -ENODEV;
        }
        if (id->ncap == 0) {
@@ -568,7 +599,7 @@ static int nvme_revalidate_disk(struct gendisk *disk)
 
        if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) {
                if (nvme_nvm_register(ns->queue, disk->disk_name)) {
-                       dev_warn(ns->ctrl->dev,
+                       dev_warn(disk_to_dev(ns->disk),
                                "%s: LightNVM init failure\n", __func__);
                        kfree(id);
                        return -ENODEV;
@@ -741,7 +772,7 @@ static int nvme_wait_ready(struct nvme_ctrl *ctrl, u64 cap, bool enabled)
                if (fatal_signal_pending(current))
                        return -EINTR;
                if (time_after(jiffies, timeout)) {
-                       dev_err(ctrl->dev,
+                       dev_err(ctrl->device,
                                "Device not ready; aborting %s\n", enabled ?
                                                "initialisation" : "reset");
                        return -ENODEV;
@@ -769,6 +800,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
                return ret;
        return nvme_wait_ready(ctrl, cap, false);
 }
+EXPORT_SYMBOL_GPL(nvme_disable_ctrl);
 
 int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
 {
@@ -781,7 +813,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
        int ret;
 
        if (page_shift < dev_page_min) {
-               dev_err(ctrl->dev,
+               dev_err(ctrl->device,
                        "Minimum device page size %u too large for host (%u)\n",
                        1 << dev_page_min, 1 << page_shift);
                return -ENODEV;
@@ -800,6 +832,7 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
                return ret;
        return nvme_wait_ready(ctrl, cap, true);
 }
+EXPORT_SYMBOL_GPL(nvme_enable_ctrl);
 
 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
 {
@@ -822,7 +855,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
                if (fatal_signal_pending(current))
                        return -EINTR;
                if (time_after(jiffies, timeout)) {
-                       dev_err(ctrl->dev,
+                       dev_err(ctrl->device,
                                "Device shutdown incomplete; abort shutdown\n");
                        return -ENODEV;
                }
@@ -830,6 +863,7 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl);
 
 /*
  * Initialize the cached copies of the Identify data and various controller
@@ -844,13 +878,13 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
 
        ret = ctrl->ops->reg_read32(ctrl, NVME_REG_VS, &ctrl->vs);
        if (ret) {
-               dev_err(ctrl->dev, "Reading VS failed (%d)\n", ret);
+               dev_err(ctrl->device, "Reading VS failed (%d)\n", ret);
                return ret;
        }
 
        ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &cap);
        if (ret) {
-               dev_err(ctrl->dev, "Reading CAP failed (%d)\n", ret);
+               dev_err(ctrl->device, "Reading CAP failed (%d)\n", ret);
                return ret;
        }
        page_shift = NVME_CAP_MPSMIN(cap) + 12;
@@ -860,7 +894,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
 
        ret = nvme_identify_ctrl(ctrl, &id);
        if (ret) {
-               dev_err(ctrl->dev, "Identify Controller failed (%d)\n", ret);
+               dev_err(ctrl->device, "Identify Controller failed (%d)\n", ret);
                return -EIO;
        }
 
@@ -891,6 +925,7 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        kfree(id);
        return 0;
 }
+EXPORT_SYMBOL_GPL(nvme_init_identify);
 
 static int nvme_dev_open(struct inode *inode, struct file *file)
 {
@@ -937,13 +972,13 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp)
 
        ns = list_first_entry(&ctrl->namespaces, struct nvme_ns, list);
        if (ns != list_last_entry(&ctrl->namespaces, struct nvme_ns, list)) {
-               dev_warn(ctrl->dev,
+               dev_warn(ctrl->device,
                        "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
                ret = -EINVAL;
                goto out_unlock;
        }
 
-       dev_warn(ctrl->dev,
+       dev_warn(ctrl->device,
                "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
        kref_get(&ns->kref);
        mutex_unlock(&ctrl->namespaces_mutex);
@@ -969,7 +1004,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
        case NVME_IOCTL_IO_CMD:
                return nvme_dev_user_cmd(ctrl, argp);
        case NVME_IOCTL_RESET:
-               dev_warn(ctrl->dev, "resetting controller\n");
+               dev_warn(ctrl->device, "resetting controller\n");
                return ctrl->ops->reset_ctrl(ctrl);
        case NVME_IOCTL_SUBSYS_RESET:
                return nvme_reset_subsystem(ctrl);
@@ -1296,6 +1331,7 @@ void nvme_scan_namespaces(struct nvme_ctrl *ctrl)
        mutex_unlock(&ctrl->namespaces_mutex);
        kfree(id);
 }
+EXPORT_SYMBOL_GPL(nvme_scan_namespaces);
 
 void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
 {
@@ -1306,6 +1342,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
                nvme_ns_remove(ns);
        mutex_unlock(&ctrl->namespaces_mutex);
 }
+EXPORT_SYMBOL_GPL(nvme_remove_namespaces);
 
 static DEFINE_IDA(nvme_instance_ida);
 
@@ -1337,13 +1374,14 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl)
 }
 
 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl)
- {
+{
        device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance));
 
        spin_lock(&dev_list_lock);
        list_del(&ctrl->node);
        spin_unlock(&dev_list_lock);
 }
+EXPORT_SYMBOL_GPL(nvme_uninit_ctrl);
 
 static void nvme_free_ctrl(struct kref *kref)
 {
@@ -1359,6 +1397,7 @@ void nvme_put_ctrl(struct nvme_ctrl *ctrl)
 {
        kref_put(&ctrl->kref, nvme_free_ctrl);
 }
+EXPORT_SYMBOL_GPL(nvme_put_ctrl);
 
 /*
  * Initialize a NVMe controller structures.  This needs to be called during
@@ -1383,14 +1422,13 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
 
        ctrl->device = device_create_with_groups(nvme_class, ctrl->dev,
                                MKDEV(nvme_char_major, ctrl->instance),
-                               dev, nvme_dev_attr_groups,
+                               ctrl, nvme_dev_attr_groups,
                                "nvme%d", ctrl->instance);
        if (IS_ERR(ctrl->device)) {
                ret = PTR_ERR(ctrl->device);
                goto out_release_instance;
        }
        get_device(ctrl->device);
-       dev_set_drvdata(ctrl->device, ctrl);
 
        spin_lock(&dev_list_lock);
        list_add_tail(&ctrl->node, &nvme_ctrl_list);
@@ -1402,6 +1440,7 @@ out_release_instance:
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(nvme_init_ctrl);
 
 void nvme_stop_queues(struct nvme_ctrl *ctrl)
 {
@@ -1418,6 +1457,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
        }
        mutex_unlock(&ctrl->namespaces_mutex);
 }
+EXPORT_SYMBOL_GPL(nvme_stop_queues);
 
 void nvme_start_queues(struct nvme_ctrl *ctrl)
 {
@@ -1431,6 +1471,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
        }
        mutex_unlock(&ctrl->namespaces_mutex);
 }
+EXPORT_SYMBOL_GPL(nvme_start_queues);
 
 int __init nvme_core_init(void)
 {
@@ -1470,3 +1511,8 @@ void nvme_core_exit(void)
        class_destroy(nvme_class);
        __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
 }
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+module_init(nvme_core_init);
+module_exit(nvme_core_exit);
index 4fb5bb737868ce2db7da41700cd238c8804b086c..63ba8a500ee13e13c8285fee73783cfa66020bce 100644 (file)
@@ -117,6 +117,7 @@ struct nvme_ns {
 };
 
 struct nvme_ctrl_ops {
+       struct module *module;
        int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
        int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
        int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
@@ -265,8 +266,6 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11,
                        dma_addr_t dma_addr, u32 *result);
 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 
-extern spinlock_t dev_list_lock;
-
 struct sg_io_hdr;
 
 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
index 72ef8322d32ac7180912e2ae2cf38245f3713d9a..fec747917690665f68e5a6cf341599fb6e958cbc 100644 (file)
 #define NVME_NR_AEN_COMMANDS   1
 #define NVME_AQ_BLKMQ_DEPTH    (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
 
-unsigned char admin_timeout = 60;
-module_param(admin_timeout, byte, 0644);
-MODULE_PARM_DESC(admin_timeout, "timeout in seconds for admin commands");
-
-unsigned char nvme_io_timeout = 30;
-module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
-MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
-
-unsigned char shutdown_timeout = 5;
-module_param(shutdown_timeout, byte, 0644);
-MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
-
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -77,6 +65,7 @@ module_param(use_cmb_sqes, bool, 0644);
 MODULE_PARM_DESC(use_cmb_sqes, "use controller's memory buffer for I/O SQes");
 
 static LIST_HEAD(dev_list);
+static DEFINE_SPINLOCK(dev_list_lock);
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
 static wait_queue_head_t nvme_kthread_wait;
@@ -299,10 +288,10 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
 
        switch (result & 0xff07) {
        case NVME_AER_NOTICE_NS_CHANGED:
-               dev_info(dev->dev, "rescanning\n");
+               dev_info(dev->ctrl.device, "rescanning\n");
                queue_work(nvme_workq, &dev->scan_work);
        default:
-               dev_warn(dev->dev, "async event result %08x\n", result);
+               dev_warn(dev->ctrl.device, "async event result %08x\n", result);
        }
 }
 
@@ -708,7 +697,7 @@ static void nvme_complete_rq(struct request *req)
        }
 
        if (unlikely(iod->aborted)) {
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                        "completing aborted command with status: %04x\n",
                        req->errors);
        }
@@ -740,7 +729,7 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
                        *tag = -1;
 
                if (unlikely(cqe.command_id >= nvmeq->q_depth)) {
-                       dev_warn(nvmeq->q_dmadev,
+                       dev_warn(nvmeq->dev->ctrl.device,
                                "invalid id %d completed on queue %d\n",
                                cqe.command_id, le16_to_cpu(cqe.sq_id));
                        continue;
@@ -908,7 +897,8 @@ static void abort_endio(struct request *req, int error)
        u32 result = (u32)(uintptr_t)req->special;
        u16 status = req->errors;
 
-       dev_warn(nvmeq->q_dmadev, "Abort status:%x result:%x", status, result);
+       dev_warn(nvmeq->dev->ctrl.device,
+               "Abort status:%x result:%x", status, result);
        atomic_inc(&nvmeq->dev->ctrl.abort_limit);
 
        blk_mq_free_request(req);
@@ -929,7 +919,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
         * shutdown, so we return BLK_EH_HANDLED.
         */
        if (test_bit(NVME_CTRL_RESETTING, &dev->flags)) {
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                         "I/O %d QID %d timeout, disable controller\n",
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
@@ -943,7 +933,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
         * returned to the driver, or if this is the admin queue.
         */
        if (!nvmeq->qid || iod->aborted) {
-               dev_warn(dev->dev,
+               dev_warn(dev->ctrl.device,
                         "I/O %d QID %d timeout, reset controller\n",
                         req->tag, nvmeq->qid);
                nvme_dev_disable(dev, false);
@@ -969,8 +959,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        cmd.abort.cid = req->tag;
        cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
 
-       dev_warn(nvmeq->q_dmadev, "I/O %d QID %d timeout, aborting\n",
-                                req->tag, nvmeq->qid);
+       dev_warn(nvmeq->dev->ctrl.device,
+               "I/O %d QID %d timeout, aborting\n",
+                req->tag, nvmeq->qid);
 
        abort_req = nvme_alloc_request(dev->ctrl.admin_q, &cmd,
                        BLK_MQ_REQ_NOWAIT);
@@ -999,7 +990,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
        if (!blk_mq_request_started(req))
                return;
 
-       dev_warn(nvmeq->q_dmadev,
+       dev_warn(nvmeq->dev->ctrl.device,
                 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
 
        status = NVME_SC_ABORT_REQ;
@@ -1355,7 +1346,7 @@ static int nvme_kthread(void *data)
                        if ((dev->subsystem && (csts & NVME_CSTS_NSSRO)) ||
                                                        csts & NVME_CSTS_CFS) {
                                if (queue_work(nvme_workq, &dev->reset_work)) {
-                                       dev_warn(dev->dev,
+                                       dev_warn(dev->ctrl.device,
                                                "Failed status: %x, reset controller\n",
                                                readl(dev->bar + NVME_REG_CSTS));
                                }
@@ -1381,7 +1372,7 @@ static int nvme_kthread(void *data)
 
 static int nvme_create_io_queues(struct nvme_dev *dev)
 {
-       unsigned i;
+       unsigned i, max;
        int ret = 0;
 
        for (i = dev->queue_count; i <= dev->max_qid; i++) {
@@ -1391,7 +1382,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev)
                }
        }
 
-       for (i = dev->online_queues; i <= dev->queue_count - 1; i++) {
+       max = min(dev->max_qid, dev->queue_count - 1);
+       for (i = dev->online_queues; i <= max; i++) {
                ret = nvme_create_queue(dev->queues[i], i);
                if (ret) {
                        nvme_free_queues(dev, i);
@@ -1482,7 +1474,8 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
         * access to the admin queue, as that might be only way to fix them up.
         */
        if (result > 0) {
-               dev_err(dev->dev, "Could not set queue count (%d)\n", result);
+               dev_err(dev->ctrl.device,
+                       "Could not set queue count (%d)\n", result);
                nr_io_queues = 0;
                result = 0;
        }
@@ -1548,9 +1541,6 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
                adminq->cq_vector = -1;
                goto free_queues;
        }
-
-       /* Free previously allocated queues that are no longer usable */
-       nvme_free_queues(dev, nr_io_queues + 1);
        return nvme_create_io_queues(dev);
 
  free_queues:
@@ -1684,7 +1674,13 @@ static int nvme_dev_add(struct nvme_dev *dev)
                if (blk_mq_alloc_tag_set(&dev->tagset))
                        return 0;
                dev->ctrl.tagset = &dev->tagset;
+       } else {
+               blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+
+               /* Free previously allocated queues that are no longer usable */
+               nvme_free_queues(dev, dev->online_queues);
        }
+
        queue_work(nvme_workq, &dev->scan_work);
        return 0;
 }
@@ -1943,7 +1939,7 @@ static void nvme_reset_work(struct work_struct *work)
         * any working I/O queue.
         */
        if (dev->online_queues < 2) {
-               dev_warn(dev->dev, "IO queues not created\n");
+               dev_warn(dev->ctrl.device, "IO queues not created\n");
                nvme_remove_namespaces(&dev->ctrl);
        } else {
                nvme_start_queues(&dev->ctrl);
@@ -1980,7 +1976,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
 
 static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
 {
-       dev_warn(dev->dev, "Removing after probe failure\n");
+       dev_warn(dev->ctrl.device, "Removing after probe failure\n");
        kref_get(&dev->ctrl.kref);
        if (!schedule_work(&dev->remove_work))
                nvme_put_ctrl(&dev->ctrl);
@@ -2029,6 +2025,7 @@ static int nvme_pci_reset_ctrl(struct nvme_ctrl *ctrl)
 }
 
 static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
+       .module                 = THIS_MODULE,
        .reg_read32             = nvme_pci_reg_read32,
        .reg_write32            = nvme_pci_reg_write32,
        .reg_read64             = nvme_pci_reg_read64,
@@ -2077,6 +2074,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (result)
                goto release_pools;
 
+       dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
+
        queue_work(nvme_workq, &dev->reset_work);
        return 0;
 
@@ -2160,7 +2159,7 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
         * shutdown the controller to quiesce. The controller will be restarted
         * after the slot reset through driver's slot_reset callback.
         */
-       dev_warn(&pdev->dev, "error detected: state:%d\n", state);
+       dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
        switch (state) {
        case pci_channel_io_normal:
                return PCI_ERS_RESULT_CAN_RECOVER;
@@ -2177,7 +2176,7 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
 
-       dev_info(&pdev->dev, "restart after slot reset\n");
+       dev_info(dev->ctrl.device, "restart after slot reset\n");
        pci_restore_state(pdev);
        queue_work(nvme_workq, &dev->reset_work);
        return PCI_ERS_RESULT_RECOVERED;
@@ -2231,26 +2230,15 @@ static int __init nvme_init(void)
        if (!nvme_workq)
                return -ENOMEM;
 
-       result = nvme_core_init();
-       if (result < 0)
-               goto kill_workq;
-
        result = pci_register_driver(&nvme_driver);
        if (result)
-               goto core_exit;
-       return 0;
-
- core_exit:
-       nvme_core_exit();
- kill_workq:
-       destroy_workqueue(nvme_workq);
+               destroy_workqueue(nvme_workq);
        return result;
 }
 
 static void __exit nvme_exit(void)
 {
        pci_unregister_driver(&nvme_driver);
-       nvme_core_exit();
        destroy_workqueue(nvme_workq);
        BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
        _nvme_check_size();
index 6fd4e5a5ef4a495bbd412ee33b931f4fb3a8a24f..9d11d98373128fef3de3406975d8bcc2ca286b9a 100644 (file)
@@ -70,6 +70,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
        if (pos >= nvmem->size)
                return 0;
 
+       if (count < nvmem->word_size)
+               return -EINVAL;
+
        if (pos + count > nvmem->size)
                count = nvmem->size - pos;
 
@@ -95,6 +98,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
        if (pos >= nvmem->size)
                return 0;
 
+       if (count < nvmem->word_size)
+               return -EINVAL;
+
        if (pos + count > nvmem->size)
                count = nvmem->size - pos;
 
index afb67e7eeee4a89612d56d0404726d7778c12a53..3829e5fbf8c366bf3ae7fa7149e87e5c0ddcf6f3 100644 (file)
@@ -21,6 +21,7 @@ static struct regmap_config qfprom_regmap_config = {
        .reg_bits = 32,
        .val_bits = 8,
        .reg_stride = 1,
+       .val_format_endian = REGMAP_ENDIAN_LITTLE,
 };
 
 static struct nvmem_config econfig = {
index 655f79db7899ffd0628714d51203847630a8075c..d5fbca07620a398a600234e022ad040cffbdeaa9 100644 (file)
@@ -796,14 +796,13 @@ static inline void early_init_dt_check_for_initrd(unsigned long node)
 #endif /* CONFIG_BLK_DEV_INITRD */
 
 #ifdef CONFIG_SERIAL_EARLYCON
-extern struct of_device_id __earlycon_of_table[];
 
 static int __init early_init_dt_scan_chosen_serial(void)
 {
        int offset;
-       const char *p;
+       const char *p, *q, *options = NULL;
        int l;
-       const struct of_device_id *match = __earlycon_of_table;
+       const struct earlycon_id *match;
        const void *fdt = initial_boot_params;
 
        offset = fdt_path_offset(fdt, "/chosen");
@@ -818,27 +817,26 @@ static int __init early_init_dt_scan_chosen_serial(void)
        if (!p || !l)
                return -ENOENT;
 
-       /* Remove console options if present */
-       l = strchrnul(p, ':') - p;
+       q = strchrnul(p, ':');
+       if (*q != '\0')
+               options = q + 1;
+       l = q - p;
 
        /* Get the node specified by stdout-path */
        offset = fdt_path_offset_namelen(fdt, p, l);
-       if (offset < 0)
-               return -ENODEV;
-
-       while (match->compatible[0]) {
-               u64 addr;
+       if (offset < 0) {
+               pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
+               return 0;
+       }
 
-               if (fdt_node_check_compatible(fdt, offset, match->compatible)) {
-                       match++;
+       for (match = __earlycon_table; match < __earlycon_table_end; match++) {
+               if (!match->compatible[0])
                        continue;
-               }
 
-               addr = fdt_translate_address(fdt, offset);
-               if (addr == OF_BAD_ADDR)
-                       return -ENXIO;
+               if (fdt_node_check_compatible(fdt, offset, match->compatible))
+                       continue;
 
-               of_setup_earlycon(addr, match->data);
+               of_setup_earlycon(match, offset, options);
                return 0;
        }
        return -ENODEV;
@@ -976,13 +974,16 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 }
 
 #ifdef CONFIG_HAVE_MEMBLOCK
+#ifndef MIN_MEMBLOCK_ADDR
+#define MIN_MEMBLOCK_ADDR      __pa(PAGE_OFFSET)
+#endif
 #ifndef MAX_MEMBLOCK_ADDR
 #define MAX_MEMBLOCK_ADDR      ((phys_addr_t)~0)
 #endif
 
 void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
 {
-       const u64 phys_offset = __pa(PAGE_OFFSET);
+       const u64 phys_offset = MIN_MEMBLOCK_ADDR;
 
        if (!PAGE_ALIGNED(base)) {
                if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
index 8d3dc6fbdb7af70a9dbdc35eb8a51cf4f391eb6f..dca8f9b93745a0e500fe32d1f8edaa6dbc856409 100644 (file)
@@ -161,7 +161,7 @@ static int __init fdt_translate_one(const void *blob, int parent,
  * that can be mapped to a cpu physical address). This is not really specified
  * that way, but this is traditionally the way IBM at least do things
  */
-u64 __init fdt_translate_address(const void *blob, int node_offset)
+static u64 __init fdt_translate_address(const void *blob, int node_offset)
 {
        int parent, len;
        const struct of_bus *bus, *pbus;
@@ -239,3 +239,12 @@ u64 __init fdt_translate_address(const void *blob, int node_offset)
  bail:
        return result;
 }
+
+/**
+ * of_flat_dt_translate_address - translate DT addr into CPU phys addr
+ * @node: node in the flat blob
+ */
+u64 __init of_flat_dt_translate_address(unsigned long node)
+{
+       return fdt_translate_address(initial_boot_params, node);
+}
index 706e3ff67f8b02f4403a841c8b92f66f3c0e5678..7ee21ae305ae10d480adce961ad59fe284ca366e 100644 (file)
@@ -679,18 +679,6 @@ u32 of_msi_map_rid(struct device *dev, struct device_node *msi_np, u32 rid_in)
        return __of_msi_map_rid(dev, &msi_np, rid_in);
 }
 
-static struct irq_domain *__of_get_msi_domain(struct device_node *np,
-                                             enum irq_domain_bus_token token)
-{
-       struct irq_domain *d;
-
-       d = irq_find_matching_host(np, token);
-       if (!d)
-               d = irq_find_host(np);
-
-       return d;
-}
-
 /**
  * of_msi_map_get_device_domain - Use msi-map to find the relevant MSI domain
  * @dev: device for which the mapping is to be done.
@@ -706,7 +694,7 @@ struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 rid)
        struct device_node *np = NULL;
 
        __of_msi_map_rid(dev, &np, rid);
-       return __of_get_msi_domain(np, DOMAIN_BUS_PCI_MSI);
+       return irq_find_matching_host(np, DOMAIN_BUS_PCI_MSI);
 }
 
 /**
@@ -730,7 +718,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
        /* Check for a single msi-parent property */
        msi_np = of_parse_phandle(np, "msi-parent", 0);
        if (msi_np && !of_property_read_bool(msi_np, "#msi-cells")) {
-               d = __of_get_msi_domain(msi_np, token);
+               d = irq_find_matching_host(msi_np, token);
                if (!d)
                        of_node_put(msi_np);
                return d;
@@ -744,7 +732,7 @@ struct irq_domain *of_msi_get_domain(struct device *dev,
                while (!of_parse_phandle_with_args(np, "msi-parent",
                                                   "#msi-cells",
                                                   index, &args)) {
-                       d = __of_get_msi_domain(args.np, token);
+                       d = irq_find_matching_host(args.np, token);
                        if (d)
                                return d;
 
index 86829f8064a61eb324a0a9ea28f63f0bf3ceadbd..39c4be41ef83d6cf23302fb9af6270f8857f85de 100644 (file)
@@ -143,11 +143,32 @@ int of_mdio_parse_addr(struct device *dev, const struct device_node *np)
 }
 EXPORT_SYMBOL(of_mdio_parse_addr);
 
+/* The following is a list of PHY compatible strings which appear in
+ * some DTBs. The compatible string is never matched against a PHY
+ * driver, so is pointless. We only expect devices which are not PHYs
+ * to have a compatible string, so they can be matched to an MDIO
+ * driver.  Encourage users to upgrade their DT blobs to remove these.
+ */
+static const struct of_device_id whitelist_phys[] = {
+       { .compatible = "brcm,40nm-ephy" },
+       { .compatible = "marvell,88E1111", },
+       { .compatible = "marvell,88e1116", },
+       { .compatible = "marvell,88e1118", },
+       { .compatible = "marvell,88e1145", },
+       { .compatible = "marvell,88e1149r", },
+       { .compatible = "marvell,88e1310", },
+       { .compatible = "marvell,88E1510", },
+       { .compatible = "marvell,88E1514", },
+       { .compatible = "moxa,moxart-rtl8201cp", },
+       {}
+};
+
 /*
  * Return true if the child node is for a phy. It must either:
  * o Compatible string of "ethernet-phy-idX.X"
  * o Compatible string of "ethernet-phy-ieee802.3-c45"
  * o Compatible string of "ethernet-phy-ieee802.3-c22"
+ * o In the white list above (and issue a warning)
  * o No compatibility string
  *
  * A device which is not a phy is expected to have a compatible string
@@ -166,6 +187,13 @@ static bool of_mdiobus_child_is_phy(struct device_node *child)
        if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22"))
                return true;
 
+       if (of_match_node(whitelist_phys, child)) {
+               pr_warn(FW_WARN
+                       "%s: Whitelisted compatible string. Please remove\n",
+                       child->full_name);
+               return true;
+       }
+
        if (!of_find_property(child, "compatible", NULL))
                return true;
 
@@ -256,11 +284,19 @@ static int of_phy_match(struct device *dev, void *phy_np)
 struct phy_device *of_phy_find_device(struct device_node *phy_np)
 {
        struct device *d;
+       struct mdio_device *mdiodev;
+
        if (!phy_np)
                return NULL;
 
        d = bus_find_device(&mdio_bus_type, NULL, phy_np, of_phy_match);
-       return d ? to_phy_device(d) : NULL;
+       if (d) {
+               mdiodev = to_mdio_device(d);
+               if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
+                       return to_phy_device(d);
+       }
+
+       return NULL;
 }
 EXPORT_SYMBOL(of_phy_find_device);
 
index b1449f71601cb257f1057b77c217a943da92cab8..13f4fed3804850804dfa9ad285c403e68729c150 100644 (file)
@@ -5,7 +5,6 @@
 #include <linux/of_device.h>
 #include <linux/of_pci.h>
 #include <linux/slab.h>
-#include <asm-generic/pci-bridge.h>
 
 static inline int __of_pci_pci_compare(struct device_node *node,
                                       unsigned int data)
index a656d9e8334302c2ec4f013ad6c016034ace5f27..21905fef2cbf0d34259e37069e50140a66101441 100644 (file)
@@ -91,7 +91,7 @@ static int configure_memory(const unsigned char *buf,
        for (i=0;i<HPEE_MEMORY_MAX_ENT;i++) {
                c = get_8(buf+len);
                
-               if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
+               if (NULL != (res = kzalloc(sizeof(struct resource), GFP_KERNEL))) {
                        int result;
                        
                        res->name = name;
@@ -183,7 +183,7 @@ static int configure_port(const unsigned char *buf, struct resource *io_parent,
        for (i=0;i<HPEE_PORT_MAX_ENT;i++) {
                c = get_8(buf+len);
                
-               if (NULL != (res = kmalloc(sizeof(struct resource), GFP_KERNEL))) {
+               if (NULL != (res = kzalloc(sizeof(struct resource), GFP_KERNEL))) {
                        res->name = board;
                        res->start = get_16(buf+len+1);
                        res->end = get_16(buf+len+1)+(c&HPEE_PORT_SIZE_MASK)+1;
index 89b3befc7155325b12f21d4667a675b52423bb6d..f2187d491475a6144cd4b11cff7c9589e1a6c82c 100644 (file)
@@ -291,7 +291,12 @@ void pci_bus_add_device(struct pci_dev *dev)
 
        dev->match_driver = true;
        retval = device_attach(&dev->dev);
-       WARN_ON(retval < 0);
+       if (retval < 0) {
+               dev_warn(&dev->dev, "device attach failed (%d)\n", retval);
+               pci_proc_detach_device(dev);
+               pci_remove_sysfs_dev_files(dev);
+               return;
+       }
 
        dev->is_added = 1;
 }
index fe600964fa50177bc17b28627568268608896010..bd3f7d0ef943d6af6c57ba60c592908fa8caddb9 100644 (file)
@@ -202,6 +202,23 @@ static int pcie_phy_write(void __iomem *dbi_base, int addr, int data)
        return 0;
 }
 
+static void imx6_pcie_reset_phy(struct pcie_port *pp)
+{
+       u32 tmp;
+
+       pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+       tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+               PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+       pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+
+       usleep_range(2000, 3000);
+
+       pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
+       tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
+                 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
+       pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
+}
+
 /*  Added for PCI abort handling */
 static int imx6q_pcie_abort_handler(unsigned long addr,
                unsigned int fsr, struct pt_regs *regs)
@@ -332,16 +349,30 @@ static int imx6_pcie_wait_for_link(struct pcie_port *pp)
 {
        unsigned int retries;
 
+       /*
+        * Test if the PHY reports that the link is up and also that the LTSSM
+        * training finished. There are three possible states of the link when
+        * this code is called:
+        * 1) The link is DOWN (unlikely)
+        *    The link didn't come up yet for some reason. This usually means
+        *    we have a real problem somewhere, if it happens with a peripheral
+        *    connected. This state calls for inspection of the DEBUG registers.
+        * 2) The link is UP, but still in LTSSM training
+        *    Wait for the training to finish, which should take a very short
+        *    time. If the training does not finish, we have a problem and we
+        *    need to inspect the DEBUG registers. If the training does finish,
+        *    the link is up and operating correctly.
+        * 3) The link is UP and no longer in LTSSM training
+        *    The link is up and operating correctly.
+        */
        for (retries = 0; retries < 200; retries++) {
-               if (dw_pcie_link_up(pp))
+               u32 reg = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
+               if ((reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP) &&
+                   !(reg & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
                        return 0;
-               usleep_range(100, 1000);
+               usleep_range(1000, 2000);
        }
 
-       dev_err(pp->dev, "phy link never came up\n");
-       dev_dbg(pp->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
-               readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
-               readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
        return -EINVAL;
 }
 
@@ -390,8 +421,10 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
                        IMX6Q_GPR12_PCIE_CTL_2, 1 << 10);
 
        ret = imx6_pcie_wait_for_link(pp);
-       if (ret)
-               return ret;
+       if (ret) {
+               dev_info(pp->dev, "Link never came up\n");
+               goto err_reset_phy;
+       }
 
        /* Allow Gen2 mode after the link is up. */
        tmp = readl(pp->dbi_base + PCIE_RC_LCR);
@@ -410,19 +443,28 @@ static int imx6_pcie_establish_link(struct pcie_port *pp)
        ret = imx6_pcie_wait_for_speed_change(pp);
        if (ret) {
                dev_err(pp->dev, "Failed to bring link up!\n");
-               return ret;
+               goto err_reset_phy;
        }
 
        /* Make sure link training is finished as well! */
        ret = imx6_pcie_wait_for_link(pp);
        if (ret) {
                dev_err(pp->dev, "Failed to bring link up!\n");
-               return ret;
+               goto err_reset_phy;
        }
 
        tmp = readl(pp->dbi_base + PCIE_RC_LCSR);
        dev_dbg(pp->dev, "Link up, Gen=%i\n", (tmp >> 16) & 0xf);
+
        return 0;
+
+err_reset_phy:
+       dev_dbg(pp->dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n",
+               readl(pp->dbi_base + PCIE_PHY_DEBUG_R0),
+               readl(pp->dbi_base + PCIE_PHY_DEBUG_R1));
+       imx6_pcie_reset_phy(pp);
+
+       return ret;
 }
 
 static void imx6_pcie_host_init(struct pcie_port *pp)
@@ -441,81 +483,10 @@ static void imx6_pcie_host_init(struct pcie_port *pp)
                dw_pcie_msi_init(pp);
 }
 
-static void imx6_pcie_reset_phy(struct pcie_port *pp)
-{
-       u32 tmp;
-
-       pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
-       tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN |
-               PHY_RX_OVRD_IN_LO_RX_PLL_EN);
-       pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
-
-       usleep_range(2000, 3000);
-
-       pcie_phy_read(pp->dbi_base, PHY_RX_OVRD_IN_LO, &tmp);
-       tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN |
-                 PHY_RX_OVRD_IN_LO_RX_PLL_EN);
-       pcie_phy_write(pp->dbi_base, PHY_RX_OVRD_IN_LO, tmp);
-}
-
 static int imx6_pcie_link_up(struct pcie_port *pp)
 {
-       u32 rc, debug_r0, rx_valid;
-       int count = 5;
-
-       /*
-        * Test if the PHY reports that the link is up and also that the LTSSM
-        * training finished. There are three possible states of the link when
-        * this code is called:
-        * 1) The link is DOWN (unlikely)
-        *     The link didn't come up yet for some reason. This usually means
-        *     we have a real problem somewhere. Reset the PHY and exit. This
-        *     state calls for inspection of the DEBUG registers.
-        * 2) The link is UP, but still in LTSSM training
-        *     Wait for the training to finish, which should take a very short
-        *     time. If the training does not finish, we have a problem and we
-        *     need to inspect the DEBUG registers. If the training does finish,
-        *     the link is up and operating correctly.
-        * 3) The link is UP and no longer in LTSSM training
-        *     The link is up and operating correctly.
-        */
-       while (1) {
-               rc = readl(pp->dbi_base + PCIE_PHY_DEBUG_R1);
-               if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_UP))
-                       break;
-               if (!(rc & PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING))
-                       return 1;
-               if (!count--)
-                       break;
-               dev_dbg(pp->dev, "Link is up, but still in training\n");
-               /*
-                * Wait a little bit, then re-check if the link finished
-                * the training.
-                */
-               usleep_range(1000, 2000);
-       }
-       /*
-        * From L0, initiate MAC entry to gen2 if EP/RC supports gen2.
-        * Wait 2ms (LTSSM timeout is 24ms, PHY lock is ~5us in gen2).
-        * If (MAC/LTSSM.state == Recovery.RcvrLock)
-        * && (PHY/rx_valid==0) then pulse PHY/rx_reset. Transition
-        * to gen2 is stuck
-        */
-       pcie_phy_read(pp->dbi_base, PCIE_PHY_RX_ASIC_OUT, &rx_valid);
-       debug_r0 = readl(pp->dbi_base + PCIE_PHY_DEBUG_R0);
-
-       if (rx_valid & PCIE_PHY_RX_ASIC_OUT_VALID)
-               return 0;
-
-       if ((debug_r0 & 0x3f) != 0x0d)
-               return 0;
-
-       dev_err(pp->dev, "transition to gen2 is stuck, reset PHY!\n");
-       dev_dbg(pp->dev, "debug_r0=%08x debug_r1=%08x\n", debug_r0, rc);
-
-       imx6_pcie_reset_phy(pp);
-
-       return 0;
+       return readl(pp->dbi_base + PCIE_PHY_DEBUG_R1) &
+                       PCIE_PHY_DEBUG_R1_XMLH_LINK_UP;
 }
 
 static struct pcie_host_ops imx6_pcie_host_ops = {
index 3923bed93c7e06fa8327cafef9429c34d1b063cb..c40d8b2ce3300c4b2e5b075ff3d13d14e7ca1e0e 100644 (file)
@@ -203,6 +203,7 @@ static const struct of_device_id ls_pcie_of_match[] = {
        { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
        { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
        { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
+       { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
        { },
 };
 MODULE_DEVICE_TABLE(of, ls_pcie_of_match);
index 53b79c5f055998761b837094b3ad43bd6c56de82..8cb32229a52c96605137a4d68adcf2bb5fefac0b 100644 (file)
@@ -397,11 +397,9 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
                return;
        }
 
-       if (!mvebu_has_ioport(port)) {
-               dev_WARN(&port->pcie->pdev->dev,
-                        "Attempt to set IO when IO is disabled\n");
+       if (dev_WARN(&port->pcie->pdev->dev, !mvebu_has_ioport(port),
+                               "Attempt to set IO when IO is disabled\n"))
                return;
-       }
 
        /*
         * We read the PCI-to-PCI bridge emulated registers, and
index 21716827847a8ab02f1123a49ffc23141571ebc9..f85f10d220490308be8413faf9c69aeca26c9c1e 100644 (file)
@@ -517,6 +517,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
        if (pp->ops->host_init)
                pp->ops->host_init(pp);
 
+       /*
+        * If the platform provides ->rd_other_conf, it means the platform
+        * uses its own address translation component rather than ATU, so
+        * we should not program the ATU here.
+        */
        if (!pp->ops->rd_other_conf)
                dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1,
                                          PCIE_ATU_TYPE_MEM, pp->mem_base,
@@ -551,13 +556,11 @@ int dw_pcie_host_init(struct pcie_port *pp)
        pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
 #endif
 
-       if (!pci_has_flag(PCI_PROBE_ONLY)) {
-               pci_bus_size_bridges(bus);
-               pci_bus_assign_resources(bus);
+       pci_bus_size_bridges(bus);
+       pci_bus_assign_resources(bus);
 
-               list_for_each_entry(child, &bus->children, node)
-                       pcie_bus_configure_settings(child);
-       }
+       list_for_each_entry(child, &bus->children, node)
+               pcie_bus_configure_settings(child);
 
        pci_bus_add_devices(bus);
        return 0;
index 5816bceddb650c24576464913f9b7044d38be585..a576aeeb22da6cec1a01784328e209a66e96e8df 100644 (file)
@@ -64,7 +64,6 @@
 #define OARR_SIZE_CFG                BIT(OARR_SIZE_CFG_SHIFT)
 
 #define MAX_NUM_OB_WINDOWS           2
-#define MAX_NUM_PAXC_PF              4
 
 #define IPROC_PCIE_REG_INVALID 0xffff
 
@@ -170,20 +169,6 @@ static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
        writel(val, pcie->base + offset + (window * 8));
 }
 
-static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie,
-                                             unsigned int slot,
-                                             unsigned int fn)
-{
-       if (slot > 0)
-               return false;
-
-       /* PAXC can only support limited number of functions */
-       if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF)
-               return false;
-
-       return true;
-}
-
 /**
  * Note access to the configuration registers are protected at the higher layer
  * by 'pci_lock' in drivers/pci/access.c
@@ -199,11 +184,11 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
        u32 val;
        u16 offset;
 
-       if (!iproc_pcie_device_is_valid(pcie, slot, fn))
-               return NULL;
-
        /* root complex access */
        if (busno == 0) {
+               if (slot > 0 || fn > 0)
+                       return NULL;
+
                iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
                                     where & CFG_IND_ADDR_MASK);
                offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
@@ -213,6 +198,14 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
                        return (pcie->base + offset);
        }
 
+       /*
+        * PAXC is connected to an internally emulated EP within the SoC.  It
+        * allows only one device.
+        */
+       if (pcie->type == IPROC_PCIE_PAXC)
+               if (slot > 0)
+                       return NULL;
+
        /* EP device access */
        val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
                (slot << CFG_ADDR_DEV_NUM_SHIFT) |
index 4edb5181f4e2ca64dc24d8c2cc1ebec46b2d7384..35092188039b99264f7911a3fdd91005c043d4d0 100644 (file)
@@ -390,9 +390,7 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
 
        rcar_pcie_setup(&res, pcie);
 
-       /* Do not reassign resources if probe only */
-       if (!pci_has_flag(PCI_PROBE_ONLY))
-               pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
+       pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
 
        if (IS_ENABLED(CONFIG_PCI_MSI))
                bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
@@ -408,13 +406,11 @@ static int rcar_pcie_enable(struct rcar_pcie *pcie)
 
        pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
 
-       if (!pci_has_flag(PCI_PROBE_ONLY)) {
-               pci_bus_size_bridges(bus);
-               pci_bus_assign_resources(bus);
+       pci_bus_size_bridges(bus);
+       pci_bus_assign_resources(bus);
 
-               list_for_each_entry(child, &bus->children, node)
-                       pcie_bus_configure_settings(child);
-       }
+       list_for_each_entry(child, &bus->children, node)
+               pcie_bus_configure_settings(child);
 
        pci_bus_add_devices(bus);
 
index 5f2fda12e0066d9bdd90bdf08139c706a2f1a2f0..fa49f9143b80631108e10ef78e5e45e285c40cf8 100644 (file)
@@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
 {
        pci_lock_rescan_remove();
 
-       if (slot->flags & SLOT_IS_GOING_AWAY)
+       if (slot->flags & SLOT_IS_GOING_AWAY) {
+               pci_unlock_rescan_remove();
                return -ENODEV;
+       }
 
        /* configure all functions */
        if (!(slot->flags & SLOT_ENABLED))
index 602eb422351060c611967538359b8409885397a1..64c0a1215f842b2d00e488d0bd8675f4198a1534 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/device.h>
 #include <linux/pm_runtime.h>
 #include <linux/pci_hotplug.h>
-#include <asm-generic/pci-bridge.h>
 #include <asm/setup.h>
 #include <linux/aer.h>
 #include "pci.h"
index 20db790465dd682efae0ade88d7ac42cfd52b060..e2760a39a98ad1f9524bf4ca5b6445c2ee732840 100644 (file)
@@ -124,16 +124,13 @@ static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
 static struct pci_bus_ops *pci_bus_ops_pop(void)
 {
        unsigned long flags;
-       struct pci_bus_ops *bus_ops = NULL;
+       struct pci_bus_ops *bus_ops;
 
        spin_lock_irqsave(&inject_lock, flags);
-       if (list_empty(&pci_bus_ops_list))
-               bus_ops = NULL;
-       else {
-               struct list_head *lh = pci_bus_ops_list.next;
-               list_del(lh);
-               bus_ops = list_entry(lh, struct pci_bus_ops, list);
-       }
+       bus_ops = list_first_entry_or_null(&pci_bus_ops_list,
+                                          struct pci_bus_ops, list);
+       if (bus_ops)
+               list_del(&bus_ops->list);
        spin_unlock_irqrestore(&inject_lock, flags);
        return bus_ops;
 }
@@ -181,14 +178,16 @@ static u32 *find_pci_config_dword(struct aer_error *err, int where,
        return target;
 }
 
-static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
-                       int size, u32 *val)
+static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
+                              int where, int size, u32 *val)
 {
        u32 *sim;
        struct aer_error *err;
        unsigned long flags;
        struct pci_ops *ops;
+       struct pci_ops *my_ops;
        int domain;
+       int rv;
 
        spin_lock_irqsave(&inject_lock, flags);
        if (size != sizeof(u32))
@@ -208,19 +207,32 @@ static int pci_read_aer(struct pci_bus *bus, unsigned int devfn, int where,
        }
 out:
        ops = __find_pci_bus_ops(bus);
+       /*
+        * pci_lock must already be held, so we can directly
+        * manipulate bus->ops.  Many config access functions,
+        * including pci_generic_config_read() require the original
+        * bus->ops be installed to function, so temporarily put them
+        * back.
+        */
+       my_ops = bus->ops;
+       bus->ops = ops;
+       rv = ops->read(bus, devfn, where, size, val);
+       bus->ops = my_ops;
        spin_unlock_irqrestore(&inject_lock, flags);
-       return ops->read(bus, devfn, where, size, val);
+       return rv;
 }
 
-static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
-                        int size, u32 val)
+static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
+                               int where, int size, u32 val)
 {
        u32 *sim;
        struct aer_error *err;
        unsigned long flags;
        int rw1cs;
        struct pci_ops *ops;
+       struct pci_ops *my_ops;
        int domain;
+       int rv;
 
        spin_lock_irqsave(&inject_lock, flags);
        if (size != sizeof(u32))
@@ -243,13 +255,24 @@ static int pci_write_aer(struct pci_bus *bus, unsigned int devfn, int where,
        }
 out:
        ops = __find_pci_bus_ops(bus);
+       /*
+        * pci_lock must already be held, so we can directly
+        * manipulate bus->ops.  Many config access functions,
+        * including pci_generic_config_write() require the original
+        * bus->ops be installed to function, so temporarily put them
+        * back.
+        */
+       my_ops = bus->ops;
+       bus->ops = ops;
+       rv = ops->write(bus, devfn, where, size, val);
+       bus->ops = my_ops;
        spin_unlock_irqrestore(&inject_lock, flags);
-       return ops->write(bus, devfn, where, size, val);
+       return rv;
 }
 
-static struct pci_ops pci_ops_aer = {
-       .read = pci_read_aer,
-       .write = pci_write_aer,
+static struct pci_ops aer_inj_pci_ops = {
+       .read = aer_inj_read_config,
+       .write = aer_inj_write_config,
 };
 
 static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
@@ -270,9 +293,9 @@ static int pci_bus_set_aer_ops(struct pci_bus *bus)
        bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
        if (!bus_ops)
                return -ENOMEM;
-       ops = pci_bus_set_ops(bus, &pci_ops_aer);
+       ops = pci_bus_set_ops(bus, &aer_inj_pci_ops);
        spin_lock_irqsave(&inject_lock, flags);
-       if (ops == &pci_ops_aer)
+       if (ops == &aer_inj_pci_ops)
                goto out;
        pci_bus_ops_init(bus_ops, bus, ops);
        list_add(&bus_ops->list, &pci_bus_ops_list);
index 0bf82a20a0fb479ccfbdb43479bf9b0cf6ecff6a..48d21e0edd568cedc16b1626f78f0a95b22bca1c 100644 (file)
@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
        rpc->rpd = dev;
        INIT_WORK(&rpc->dpc_handler, aer_isr);
        mutex_init(&rpc->rpc_mutex);
-       init_waitqueue_head(&rpc->wait_release);
 
        /* Use PCIe bus function to store rpc into PCIe device */
        set_service_data(dev, rpc);
@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
                if (rpc->isr)
                        free_irq(dev->irq, dev);
 
-               wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
-
+               flush_work(&rpc->dpc_handler);
                aer_disable_rootport(rpc);
                kfree(rpc);
                set_service_data(dev, NULL);
index 84420b7c9456ecbb0e43a9e8ca539af5b2ee1a20..945c939a86c5c2919335d85a95dbeee34f9c0bff 100644 (file)
@@ -72,7 +72,6 @@ struct aer_rpc {
                                         * recovery on the same
                                         * root port hierarchy
                                         */
-       wait_queue_head_t wait_release;
 };
 
 struct aer_broadcast_data {
index 712392504ed9b9f1a8992079f801cf2c271a0ae6..521e39c1b66d597f8881d9ab02a4e4b41c1b68a0 100644 (file)
@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
        while (get_e_source(rpc, &e_src))
                aer_isr_one_error(p_device, &e_src);
        mutex_unlock(&rpc->rpc_mutex);
-
-       wake_up(&rpc->wait_release);
 }
 
 /**
index 63fc63911295e7c0be875b39436c889c6850234e..1ae4c73e7a3c3037f5430df3159b606f69287c77 100644 (file)
@@ -396,7 +396,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
 {
        struct pcie_pme_service_data *data = get_service_data(srv);
        struct pci_dev *port = srv->port;
-       bool wakeup;
+       bool wakeup, wake_irq_enabled = false;
        int ret;
 
        if (device_may_wakeup(&port->dev)) {
@@ -409,11 +409,12 @@ static int pcie_pme_suspend(struct pcie_device *srv)
        spin_lock_irq(&data->lock);
        if (wakeup) {
                ret = enable_irq_wake(srv->irq);
-               data->suspend_level = PME_SUSPEND_WAKEUP;
+               if (ret == 0) {
+                       data->suspend_level = PME_SUSPEND_WAKEUP;
+                       wake_irq_enabled = true;
+               }
        }
-       if (!wakeup || ret) {
-               struct pci_dev *port = srv->port;
-
+       if (!wake_irq_enabled) {
                pcie_pme_interrupt_enable(port, false);
                pcie_clear_root_pme_status(port);
                data->suspend_level = PME_SUSPEND_NOIRQ;
index 6d7ab9bb0d5a6f002debd5a3b83e569f78c44c96..5eb378fbe8496dd2b715164e9fd6fcaaef417bd0 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/pci-aspm.h>
 #include <linux/aer.h>
 #include <linux/acpi.h>
-#include <asm-generic/pci-bridge.h>
 #include "pci.h"
 
 #define CARDBUS_LATENCY_TIMER  176     /* secondary latency timer */
@@ -1803,6 +1802,13 @@ static int only_one_child(struct pci_bus *bus)
                return 0;
        if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
                return 1;
+
+       /*
+        * PCIe downstream ports are bridges that normally lead to only a
+        * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
+        * possible devices, not just device 0.  See PCIe spec r3.0,
+        * sec 7.3.1.
+        */
        if (parent->has_secondary_link &&
            !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
                return 1;
index 0575a1e026b4c4550ee07655910cc64d0e0e9341..85fa6a2a6dd20c8866b9e58f12193b6f6a20a390 100644 (file)
@@ -3832,6 +3832,19 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
 #endif
 }
 
+static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
+{
+       /*
+        * Cavium devices matching this quirk do not perform peer-to-peer
+        * with other functions, allowing masking out these bits as if they
+        * were unimplemented in the ACS capability.
+        */
+       acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+                      PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+
+       return acs_flags ? 0 : 1;
+}
+
 /*
  * Many Intel PCH root ports do provide ACS-like features to disable peer
  * transactions and validate bus numbers in requests, but do not provide an
@@ -3984,6 +3997,8 @@ static const struct pci_dev_acs_enabled {
        { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
        { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
        { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
+       /* Cavium ThunderX */
+       { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
        { 0 }
 };
 
index 7796d0a5befafb958c8bae6193b4bca56da96203..55641a39a3e94f3eab29a5494d943afc9614d3e6 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/ioport.h>
 #include <linux/cache.h>
 #include <linux/slab.h>
-#include <asm-generic/pci-bridge.h>
 #include "pci.h"
 
 unsigned int pci_flags;
index 8c7f27db6ad352260f8ad8675758ed365b6390a6..a931b1a1dda9525b633faf9f4aec9ef0889b5e0c 100644 (file)
@@ -500,10 +500,8 @@ struct phy *phy_get(struct device *dev, const char *string)
        int index = 0;
        struct phy *phy;
 
-       if (string == NULL) {
-               dev_WARN(dev, "missing string\n");
+       if (dev_WARN(dev, string == NULL, "missing string\n"))
                return ERR_PTR(-EINVAL);
-       }
 
        if (dev->of_node) {
                index = of_property_match_string(dev->of_node, "phy-names",
index 1e1e594238892a3bf18d99ee0e4e35d58ac96d1f..005629447b0ce624b24ed02835e3d9314065a50a 100644 (file)
@@ -33,6 +33,9 @@
 #define WMAX_METHOD_BRIGHTNESS         0x3
 #define WMAX_METHOD_ZONE_CONTROL       0x4
 #define WMAX_METHOD_HDMI_CABLE         0x5
+#define WMAX_METHOD_AMPLIFIER_CABLE    0x6
+#define WMAX_METHOD_DEEP_SLEEP_CONTROL 0x0B
+#define WMAX_METHOD_DEEP_SLEEP_STATUS  0x0C
 
 MODULE_AUTHOR("Mario Limonciello <mario_limonciello@dell.com>");
 MODULE_DESCRIPTION("Alienware special feature control");
@@ -60,6 +63,8 @@ enum WMAX_CONTROL_STATES {
 struct quirk_entry {
        u8 num_zones;
        u8 hdmi_mux;
+       u8 amplifier;
+       u8 deepslp;
 };
 
 static struct quirk_entry *quirks;
@@ -67,16 +72,43 @@ static struct quirk_entry *quirks;
 static struct quirk_entry quirk_unknown = {
        .num_zones = 2,
        .hdmi_mux = 0,
+       .amplifier = 0,
+       .deepslp = 0,
 };
 
-static struct quirk_entry quirk_x51_family = {
+static struct quirk_entry quirk_x51_r1_r2 = {
        .num_zones = 3,
-       .hdmi_mux = 0.
+       .hdmi_mux = 0,
+       .amplifier = 0,
+       .deepslp = 0,
+};
+
+static struct quirk_entry quirk_x51_r3 = {
+       .num_zones = 4,
+       .hdmi_mux = 0,
+       .amplifier = 1,
+       .deepslp = 0,
 };
 
 static struct quirk_entry quirk_asm100 = {
        .num_zones = 2,
        .hdmi_mux = 1,
+       .amplifier = 0,
+       .deepslp = 0,
+};
+
+static struct quirk_entry quirk_asm200 = {
+       .num_zones = 2,
+       .hdmi_mux = 1,
+       .amplifier = 0,
+       .deepslp = 1,
+};
+
+static struct quirk_entry quirk_asm201 = {
+       .num_zones = 2,
+       .hdmi_mux = 1,
+       .amplifier = 1,
+       .deepslp = 1,
 };
 
 static int __init dmi_matched(const struct dmi_system_id *dmi)
@@ -88,12 +120,12 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
 static const struct dmi_system_id alienware_quirks[] __initconst = {
        {
         .callback = dmi_matched,
-        .ident = "Alienware X51 R1",
+        .ident = "Alienware X51 R3",
         .matches = {
                     DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
-                    DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R3"),
                     },
-        .driver_data = &quirk_x51_family,
+        .driver_data = &quirk_x51_r3,
         },
        {
         .callback = dmi_matched,
@@ -102,17 +134,44 @@ static const struct dmi_system_id alienware_quirks[] __initconst = {
                     DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
                     DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51 R2"),
                     },
-        .driver_data = &quirk_x51_family,
+        .driver_data = &quirk_x51_r1_r2,
+        },
+       {
+        .callback = dmi_matched,
+        .ident = "Alienware X51 R1",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "Alienware X51"),
+                    },
+        .driver_data = &quirk_x51_r1_r2,
+        },
+       {
+        .callback = dmi_matched,
+        .ident = "Alienware ASM100",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
+                    },
+        .driver_data = &quirk_asm100,
+        },
+       {
+        .callback = dmi_matched,
+        .ident = "Alienware ASM200",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "ASM200"),
+                    },
+        .driver_data = &quirk_asm200,
         },
        {
-               .callback = dmi_matched,
-               .ident = "Alienware ASM100",
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "ASM100"),
-               },
-               .driver_data = &quirk_asm100,
-       },
+        .callback = dmi_matched,
+        .ident = "Alienware ASM201",
+        .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+                    DMI_MATCH(DMI_PRODUCT_NAME, "ASM201"),
+                    },
+        .driver_data = &quirk_asm201,
+        },
        {}
 };
 
@@ -133,7 +192,7 @@ struct wmax_brightness_args {
        u32 percentage;
 };
 
-struct hdmi_args {
+struct wmax_basic_args {
        u8 arg;
 };
 
@@ -170,7 +229,7 @@ static u8 global_brightness;
 
 /*
  * Helpers used for zone control
-*/
+ */
 static int parse_rgb(const char *buf, struct platform_zone *zone)
 {
        long unsigned int rgb;
@@ -210,7 +269,7 @@ static struct platform_zone *match_zone(struct device_attribute *attr)
 
 /*
  * Individual RGB zone control
-*/
+ */
 static int alienware_update_led(struct platform_zone *zone)
 {
        int method_id;
@@ -218,16 +277,16 @@ static int alienware_update_led(struct platform_zone *zone)
        char *guid;
        struct acpi_buffer input;
        struct legacy_led_args legacy_args;
-       struct wmax_led_args wmax_args;
+       struct wmax_led_args wmax_basic_args;
        if (interface == WMAX) {
-               wmax_args.led_mask = 1 << zone->location;
-               wmax_args.colors = zone->colors;
-               wmax_args.state = lighting_control_state;
+               wmax_basic_args.led_mask = 1 << zone->location;
+               wmax_basic_args.colors = zone->colors;
+               wmax_basic_args.state = lighting_control_state;
                guid = WMAX_CONTROL_GUID;
                method_id = WMAX_METHOD_ZONE_CONTROL;
 
-               input.length = (acpi_size) sizeof(wmax_args);
-               input.pointer = &wmax_args;
+               input.length = (acpi_size) sizeof(wmax_basic_args);
+               input.pointer = &wmax_basic_args;
        } else {
                legacy_args.colors = zone->colors;
                legacy_args.brightness = global_brightness;
@@ -283,7 +342,7 @@ static ssize_t zone_set(struct device *dev, struct device_attribute *attr,
 
 /*
  * LED Brightness (Global)
-*/
+ */
 static int wmax_brightness(int brightness)
 {
        acpi_status status;
@@ -327,7 +386,7 @@ static struct led_classdev global_led = {
 
 /*
  * Lighting control state device attribute (Global)
-*/
+ */
 static ssize_t show_control_state(struct device *dev,
                                  struct device_attribute *attr, char *buf)
 {
@@ -435,11 +494,7 @@ static void alienware_zone_exit(struct platform_device *dev)
        kfree(zone_attrs);
 }
 
-/*
-       The HDMI mux sysfs node indicates the status of the HDMI input mux.
-       It can toggle between standard system GPU output and HDMI input.
-*/
-static acpi_status alienware_hdmi_command(struct hdmi_args *in_args,
+static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
                                          u32 command, int *out_data)
 {
        acpi_status status;
@@ -467,16 +522,20 @@ static acpi_status alienware_hdmi_command(struct hdmi_args *in_args,
 
 }
 
+/*
+ *     The HDMI mux sysfs node indicates the status of the HDMI input mux.
+ *     It can toggle between standard system GPU output and HDMI input.
+ */
 static ssize_t show_hdmi_cable(struct device *dev,
                               struct device_attribute *attr, char *buf)
 {
        acpi_status status;
        u32 out_data;
-       struct hdmi_args in_args = {
+       struct wmax_basic_args in_args = {
                .arg = 0,
        };
        status =
-           alienware_hdmi_command(&in_args, WMAX_METHOD_HDMI_CABLE,
+           alienware_wmax_command(&in_args, WMAX_METHOD_HDMI_CABLE,
                                   (u32 *) &out_data);
        if (ACPI_SUCCESS(status)) {
                if (out_data == 0)
@@ -495,11 +554,11 @@ static ssize_t show_hdmi_source(struct device *dev,
 {
        acpi_status status;
        u32 out_data;
-       struct hdmi_args in_args = {
+       struct wmax_basic_args in_args = {
                .arg = 0,
        };
        status =
-           alienware_hdmi_command(&in_args, WMAX_METHOD_HDMI_STATUS,
+           alienware_wmax_command(&in_args, WMAX_METHOD_HDMI_STATUS,
                                   (u32 *) &out_data);
 
        if (ACPI_SUCCESS(status)) {
@@ -519,7 +578,7 @@ static ssize_t toggle_hdmi_source(struct device *dev,
                                  const char *buf, size_t count)
 {
        acpi_status status;
-       struct hdmi_args args;
+       struct wmax_basic_args args;
        if (strcmp(buf, "gpu\n") == 0)
                args.arg = 1;
        else if (strcmp(buf, "input\n") == 0)
@@ -528,7 +587,7 @@ static ssize_t toggle_hdmi_source(struct device *dev,
                args.arg = 3;
        pr_debug("alienware-wmi: setting hdmi to %d : %s", args.arg, buf);
 
-       status = alienware_hdmi_command(&args, WMAX_METHOD_HDMI_SOURCE, NULL);
+       status = alienware_wmax_command(&args, WMAX_METHOD_HDMI_SOURCE, NULL);
 
        if (ACPI_FAILURE(status))
                pr_err("alienware-wmi: HDMI toggle failed: results: %u\n",
@@ -563,11 +622,144 @@ static int create_hdmi(struct platform_device *dev)
 
        ret = sysfs_create_group(&dev->dev.kobj, &hdmi_attribute_group);
        if (ret)
-               goto error_create_hdmi;
-       return 0;
+               remove_hdmi(dev);
+       return ret;
+}
 
-error_create_hdmi:
-       remove_hdmi(dev);
+/*
+ * Alienware GFX amplifier support
+ * - Currently supports reading cable status
+ * - Leaving expansion room to possibly support dock/undock events later
+ */
+static ssize_t show_amplifier_status(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       acpi_status status;
+       u32 out_data;
+       struct wmax_basic_args in_args = {
+               .arg = 0,
+       };
+       status =
+           alienware_wmax_command(&in_args, WMAX_METHOD_AMPLIFIER_CABLE,
+                                  (u32 *) &out_data);
+       if (ACPI_SUCCESS(status)) {
+               if (out_data == 0)
+                       return scnprintf(buf, PAGE_SIZE,
+                                        "[unconnected] connected unknown\n");
+               else if (out_data == 1)
+                       return scnprintf(buf, PAGE_SIZE,
+                                        "unconnected [connected] unknown\n");
+       }
+       pr_err("alienware-wmi: unknown amplifier cable status: %d\n", status);
+       return scnprintf(buf, PAGE_SIZE, "unconnected connected [unknown]\n");
+}
+
+static DEVICE_ATTR(status, S_IRUGO, show_amplifier_status, NULL);
+
+static struct attribute *amplifier_attrs[] = {
+       &dev_attr_status.attr,
+       NULL,
+};
+
+static struct attribute_group amplifier_attribute_group = {
+       .name = "amplifier",
+       .attrs = amplifier_attrs,
+};
+
+static void remove_amplifier(struct platform_device *dev)
+{
+       if (quirks->amplifier > 0)
+               sysfs_remove_group(&dev->dev.kobj, &amplifier_attribute_group);
+}
+
+static int create_amplifier(struct platform_device *dev)
+{
+       int ret;
+
+       ret = sysfs_create_group(&dev->dev.kobj, &amplifier_attribute_group);
+       if (ret)
+               remove_amplifier(dev);
+       return ret;
+}
+
+/*
+ * Deep Sleep Control support
+ * - Modifies BIOS setting for deep sleep control allowing extra wakeup events
+ */
+static ssize_t show_deepsleep_status(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       acpi_status status;
+       u32 out_data;
+       struct wmax_basic_args in_args = {
+               .arg = 0,
+       };
+       status = alienware_wmax_command(&in_args, WMAX_METHOD_DEEP_SLEEP_STATUS,
+                                       (u32 *) &out_data);
+       if (ACPI_SUCCESS(status)) {
+               if (out_data == 0)
+                       return scnprintf(buf, PAGE_SIZE,
+                                        "[disabled] s5 s5_s4\n");
+               else if (out_data == 1)
+                       return scnprintf(buf, PAGE_SIZE,
+                                        "disabled [s5] s5_s4\n");
+               else if (out_data == 2)
+                       return scnprintf(buf, PAGE_SIZE,
+                                        "disabled s5 [s5_s4]\n");
+       }
+       pr_err("alienware-wmi: unknown deep sleep status: %d\n", status);
+       return scnprintf(buf, PAGE_SIZE, "disabled s5 s5_s4 [unknown]\n");
+}
+
+static ssize_t toggle_deepsleep(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       acpi_status status;
+       struct wmax_basic_args args;
+
+       if (strcmp(buf, "disabled\n") == 0)
+               args.arg = 0;
+       else if (strcmp(buf, "s5\n") == 0)
+               args.arg = 1;
+       else
+               args.arg = 2;
+       pr_debug("alienware-wmi: setting deep sleep to %d : %s", args.arg, buf);
+
+       status = alienware_wmax_command(&args, WMAX_METHOD_DEEP_SLEEP_CONTROL,
+                                       NULL);
+
+       if (ACPI_FAILURE(status))
+               pr_err("alienware-wmi: deep sleep control failed: results: %u\n",
+                       status);
+       return count;
+}
+
+static DEVICE_ATTR(deepsleep, S_IRUGO | S_IWUSR, show_deepsleep_status, toggle_deepsleep);
+
+static struct attribute *deepsleep_attrs[] = {
+       &dev_attr_deepsleep.attr,
+       NULL,
+};
+
+static struct attribute_group deepsleep_attribute_group = {
+       .name = "deepsleep",
+       .attrs = deepsleep_attrs,
+};
+
+static void remove_deepsleep(struct platform_device *dev)
+{
+       if (quirks->deepslp > 0)
+               sysfs_remove_group(&dev->dev.kobj, &deepsleep_attribute_group);
+}
+
+static int create_deepsleep(struct platform_device *dev)
+{
+       int ret;
+
+       ret = sysfs_create_group(&dev->dev.kobj, &deepsleep_attribute_group);
+       if (ret)
+               remove_deepsleep(dev);
        return ret;
 }
 
@@ -606,6 +798,18 @@ static int __init alienware_wmi_init(void)
                        goto fail_prep_hdmi;
        }
 
+       if (quirks->amplifier > 0) {
+               ret = create_amplifier(platform_device);
+               if (ret)
+                       goto fail_prep_amplifier;
+       }
+
+       if (quirks->deepslp > 0) {
+               ret = create_deepsleep(platform_device);
+               if (ret)
+                       goto fail_prep_deepsleep;
+       }
+
        ret = alienware_zone_init(platform_device);
        if (ret)
                goto fail_prep_zones;
@@ -614,6 +818,8 @@ static int __init alienware_wmi_init(void)
 
 fail_prep_zones:
        alienware_zone_exit(platform_device);
+fail_prep_deepsleep:
+fail_prep_amplifier:
 fail_prep_hdmi:
        platform_device_del(platform_device);
 fail_platform_device2:
index f236250ac10668df0368c644dcf099d471e8319f..4034d2d4c50795684611df474755855f7376ef6b 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/acpi.h>
 #include <linux/pnp.h>
 #include <linux/apple_bl.h>
+#include <linux/apple-gmux.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
@@ -57,7 +58,9 @@ struct apple_gmux_data {
        /* switcheroo data */
        acpi_handle dhandle;
        int gpe;
-       enum vga_switcheroo_client_id resume_client_id;
+       enum vga_switcheroo_client_id switch_state_display;
+       enum vga_switcheroo_client_id switch_state_ddc;
+       enum vga_switcheroo_client_id switch_state_external;
        enum vga_switcheroo_state power_state;
        struct completion powerchange_done;
 };
@@ -368,19 +371,70 @@ static const struct backlight_ops gmux_bl_ops = {
  * for the selected GPU.
  */
 
+static void gmux_read_switch_state(struct apple_gmux_data *gmux_data)
+{
+       if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DDC) == 1)
+               gmux_data->switch_state_ddc = VGA_SWITCHEROO_IGD;
+       else
+               gmux_data->switch_state_ddc = VGA_SWITCHEROO_DIS;
+
+       if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2)
+               gmux_data->switch_state_display = VGA_SWITCHEROO_IGD;
+       else
+               gmux_data->switch_state_display = VGA_SWITCHEROO_DIS;
+
+       if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL) == 2)
+               gmux_data->switch_state_external = VGA_SWITCHEROO_IGD;
+       else
+               gmux_data->switch_state_external = VGA_SWITCHEROO_DIS;
+}
+
+static void gmux_write_switch_state(struct apple_gmux_data *gmux_data)
+{
+       if (gmux_data->switch_state_ddc == VGA_SWITCHEROO_IGD)
+               gmux_write8(gmux_data, GMUX_PORT_SWITCH_DDC, 1);
+       else
+               gmux_write8(gmux_data, GMUX_PORT_SWITCH_DDC, 2);
+
+       if (gmux_data->switch_state_display == VGA_SWITCHEROO_IGD)
+               gmux_write8(gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2);
+       else
+               gmux_write8(gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3);
+
+       if (gmux_data->switch_state_external == VGA_SWITCHEROO_IGD)
+               gmux_write8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2);
+       else
+               gmux_write8(gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3);
+}
+
 static int gmux_switchto(enum vga_switcheroo_client_id id)
 {
-       if (id == VGA_SWITCHEROO_IGD) {
+       apple_gmux_data->switch_state_ddc = id;
+       apple_gmux_data->switch_state_display = id;
+       apple_gmux_data->switch_state_external = id;
+
+       gmux_write_switch_state(apple_gmux_data);
+
+       return 0;
+}
+
+static int gmux_switch_ddc(enum vga_switcheroo_client_id id)
+{
+       enum vga_switcheroo_client_id old_ddc_owner =
+               apple_gmux_data->switch_state_ddc;
+
+       if (id == old_ddc_owner)
+               return id;
+
+       pr_debug("Switching DDC from %d to %d\n", old_ddc_owner, id);
+       apple_gmux_data->switch_state_ddc = id;
+
+       if (id == VGA_SWITCHEROO_IGD)
                gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1);
-               gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2);
-               gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2);
-       } else {
+       else
                gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2);
-               gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3);
-               gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3);
-       }
 
-       return 0;
+       return old_ddc_owner;
 }
 
 /**
@@ -440,17 +494,15 @@ static int gmux_get_client_id(struct pci_dev *pdev)
                return VGA_SWITCHEROO_DIS;
 }
 
-static enum vga_switcheroo_client_id
-gmux_active_client(struct apple_gmux_data *gmux_data)
-{
-       if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2)
-               return VGA_SWITCHEROO_IGD;
-
-       return VGA_SWITCHEROO_DIS;
-}
+static const struct vga_switcheroo_handler gmux_handler_indexed = {
+       .switchto = gmux_switchto,
+       .power_state = gmux_set_power_state,
+       .get_client_id = gmux_get_client_id,
+};
 
-static const struct vga_switcheroo_handler gmux_handler = {
+static const struct vga_switcheroo_handler gmux_handler_classic = {
        .switchto = gmux_switchto,
+       .switch_ddc = gmux_switch_ddc,
        .power_state = gmux_set_power_state,
        .get_client_id = gmux_get_client_id,
 };
@@ -513,7 +565,6 @@ static int gmux_suspend(struct device *dev)
        struct pnp_dev *pnp = to_pnp_dev(dev);
        struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
 
-       gmux_data->resume_client_id = gmux_active_client(gmux_data);
        gmux_disable_interrupts(gmux_data);
        return 0;
 }
@@ -524,7 +575,7 @@ static int gmux_resume(struct device *dev)
        struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp);
 
        gmux_enable_interrupts(gmux_data);
-       gmux_switchto(gmux_data->resume_client_id);
+       gmux_write_switch_state(gmux_data);
        if (gmux_data->power_state == VGA_SWITCHEROO_OFF)
                gmux_set_discrete_state(gmux_data, gmux_data->power_state);
        return 0;
@@ -704,9 +755,23 @@ static int gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
        apple_gmux_data = gmux_data;
        init_completion(&gmux_data->powerchange_done);
        gmux_enable_interrupts(gmux_data);
+       gmux_read_switch_state(gmux_data);
 
-       if (vga_switcheroo_register_handler(&gmux_handler)) {
-               ret = -ENODEV;
+       /*
+        * Retina MacBook Pros cannot switch the panel's AUX separately
+        * and need eDP pre-calibration. They are distinguishable from
+        * pre-retinas by having an "indexed" gmux.
+        *
+        * Pre-retina MacBook Pros can switch the panel's DDC separately.
+        */
+       if (gmux_data->indexed)
+               ret = vga_switcheroo_register_handler(&gmux_handler_indexed,
+                                             VGA_SWITCHEROO_NEEDS_EDP_CONFIG);
+       else
+               ret = vga_switcheroo_register_handler(&gmux_handler_classic,
+                                             VGA_SWITCHEROO_CAN_SWITCH_DDC);
+       if (ret) {
+               pr_err("Failed to register vga_switcheroo handler\n");
                goto err_register_handler;
        }
 
@@ -764,7 +829,7 @@ static void gmux_remove(struct pnp_dev *pnp)
 }
 
 static const struct pnp_device_id gmux_device_ids[] = {
-       {"APP000B", 0},
+       {GMUX_ACPI_HID, 0},
        {"", 0}
 };
 
index d28db0e793dfb0a3b103bcfc71a9a6a859506ed4..d78ee151c9e4a703bbeb071cda98032cd03dccee 100644 (file)
@@ -899,6 +899,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"),
                },
        },
+       {
+               .ident = "Lenovo Yoga 700",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
+               },
+       },
        {
                .ident = "Lenovo Yoga 900",
                .matches = {
index 5b31d1548c07b1221114031eefc966e02538af2d..f5134acd6ff05d8a5eac080eed9f4b26a96847f3 100644 (file)
        } \
 }
 
+#ifdef CONFIG_PM_SLEEP
 static u8 suspend_prep_ok;
 static u32 suspend_shlw_ctr_temp, suspend_deep_ctr_temp;
 static u64 suspend_shlw_res_temp, suspend_deep_res_temp;
+#endif
 
 struct telemetry_susp_stats {
        u32 shlw_swake_ctr;
index a268a7abf8abe754f30fba55941b04fef2ffdef8..e305ab541a2227e1f94ff3e80a0da369ed385946 100644 (file)
@@ -6653,18 +6653,16 @@ static void __init tpacpi_detect_brightness_capabilities(void)
        switch (b) {
        case 16:
                bright_maxlvl = 15;
-               pr_info("detected a 16-level brightness capable ThinkPad\n");
                break;
        case 8:
        case 0:
                bright_maxlvl = 7;
-               pr_info("detected a 8-level brightness capable ThinkPad\n");
                break;
        default:
-               pr_info("Unsupported brightness interface\n");
                tp_features.bright_unkfw = 1;
                bright_maxlvl = b - 1;
        }
+       pr_debug("detected %u brightness levels\n", bright_maxlvl + 1);
 }
 
 static int __init brightness_init(struct ibm_init_struct *iibm)
index 73833079bac89a0b97381037b7eacef125ca15fe..df1f1a76a8629fdf4c1b709ff519040ffad18552 100644 (file)
@@ -36,6 +36,7 @@
 
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/moduleparam.h>
 #include <linux/init.h>
 #include <linux/types.h>
 #include <linux/proc_fs.h>
@@ -117,6 +118,7 @@ MODULE_LICENSE("GPL");
 #define HCI_LCD_BRIGHTNESS             0x002a
 #define HCI_WIRELESS                   0x0056
 #define HCI_ACCELEROMETER              0x006d
+#define HCI_COOLING_METHOD             0x007f
 #define HCI_KBD_ILLUMINATION           0x0095
 #define HCI_ECO_MODE                   0x0097
 #define HCI_ACCELEROMETER2             0x00a6
@@ -186,6 +188,7 @@ struct toshiba_acpi_dev {
        int usbsc_bat_level;
        int usbsc_mode_base;
        int hotkey_event_type;
+       int max_cooling_method;
 
        unsigned int illumination_supported:1;
        unsigned int video_supported:1;
@@ -205,6 +208,7 @@ struct toshiba_acpi_dev {
        unsigned int panel_power_on_supported:1;
        unsigned int usb_three_supported:1;
        unsigned int wwan_supported:1;
+       unsigned int cooling_method_supported:1;
        unsigned int sysfs_created:1;
        unsigned int special_functions;
 
@@ -217,6 +221,10 @@ struct toshiba_acpi_dev {
 
 static struct toshiba_acpi_dev *toshiba_acpi;
 
+static bool disable_hotkeys;
+module_param(disable_hotkeys, bool, 0444);
+MODULE_PARM_DESC(disable_hotkeys, "Disables the hotkeys activation");
+
 static const struct acpi_device_id toshiba_device_ids[] = {
        {"TOS6200", 0},
        {"TOS6207", 0},
@@ -1194,6 +1202,53 @@ static int toshiba_wwan_set(struct toshiba_acpi_dev *dev, u32 state)
        return out[0] == TOS_SUCCESS ? 0 : -EIO;
 }
 
+/* Cooling Method */
+static void toshiba_cooling_method_available(struct toshiba_acpi_dev *dev)
+{
+       u32 in[TCI_WORDS] = { HCI_GET, HCI_COOLING_METHOD, 0, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
+
+       dev->cooling_method_supported = 0;
+       dev->max_cooling_method = 0;
+
+       status = tci_raw(dev, in, out);
+       if (ACPI_FAILURE(status))
+               pr_err("ACPI call to get Cooling Method failed\n");
+
+       if (out[0] != TOS_SUCCESS && out[0] != TOS_SUCCESS2)
+               return;
+
+       dev->cooling_method_supported = 1;
+       dev->max_cooling_method = out[3];
+}
+
+static int toshiba_cooling_method_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+       u32 result = hci_read(dev, HCI_COOLING_METHOD, state);
+
+       if (result == TOS_FAILURE)
+               pr_err("ACPI call to get Cooling Method failed\n");
+
+       if (result == TOS_NOT_SUPPORTED)
+               return -ENODEV;
+
+       return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
+static int toshiba_cooling_method_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+       u32 result = hci_write(dev, HCI_COOLING_METHOD, state);
+
+       if (result == TOS_FAILURE)
+               pr_err("ACPI call to get Cooling Method failed\n");
+
+       if (result == TOS_NOT_SUPPORTED)
+               return -ENODEV;
+
+       return (result == TOS_SUCCESS || result == TOS_SUCCESS2) ? 0 : -EIO;
+}
+
 /* Transflective Backlight */
 static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, u32 *status)
 {
@@ -2239,6 +2294,54 @@ static ssize_t usb_three_store(struct device *dev,
 }
 static DEVICE_ATTR_RW(usb_three);
 
+static ssize_t cooling_method_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = toshiba_cooling_method_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d %d\n", state, toshiba->max_cooling_method);
+}
+
+static ssize_t cooling_method_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+
+       /*
+        * Check for supported values
+        * Depending on the laptop model, some only support these two:
+        * 0 - Maximum Performance
+        * 1 - Battery Optimized
+        *
+        * While some others support all three methods:
+        * 0 - Maximum Performance
+        * 1 - Performance
+        * 2 - Battery Optimized
+        */
+       if (state < 0 || state > toshiba->max_cooling_method)
+               return -EINVAL;
+
+       ret = toshiba_cooling_method_set(toshiba, state);
+       if (ret)
+               return ret;
+
+       return count;
+}
+static DEVICE_ATTR_RW(cooling_method);
+
 static struct attribute *toshiba_attributes[] = {
        &dev_attr_version.attr,
        &dev_attr_fan.attr,
@@ -2255,6 +2358,7 @@ static struct attribute *toshiba_attributes[] = {
        &dev_attr_kbd_function_keys.attr,
        &dev_attr_panel_power_on.attr,
        &dev_attr_usb_three.attr,
+       &dev_attr_cooling_method.attr,
        NULL,
 };
 
@@ -2289,6 +2393,8 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
                exists = (drv->panel_power_on_supported) ? true : false;
        else if (attr == &dev_attr_usb_three.attr)
                exists = (drv->usb_three_supported) ? true : false;
+       else if (attr == &dev_attr_cooling_method.attr)
+               exists = (drv->cooling_method_supported) ? true : false;
 
        return exists ? attr->mode : 0;
 }
@@ -2591,6 +2697,11 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
        acpi_handle ec_handle;
        int error;
 
+       if (disable_hotkeys) {
+               pr_info("Hotkeys disabled by module parameter\n");
+               return 0;
+       }
+
        if (wmi_has_guid(TOSHIBA_WMI_EVENT_GUID)) {
                pr_info("WMI event detected, hotkeys will not be monitored\n");
                return 0;
@@ -2779,6 +2890,8 @@ static void print_supported_features(struct toshiba_acpi_dev *dev)
                pr_cont(" usb3");
        if (dev->wwan_supported)
                pr_cont(" wwan");
+       if (dev->cooling_method_supported)
+               pr_cont(" cooling-method");
 
        pr_cont("\n");
 }
@@ -2963,6 +3076,8 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
        if (dev->wwan_supported)
                toshiba_acpi_setup_wwan_rfkill(dev);
 
+       toshiba_cooling_method_available(dev);
+
        print_supported_features(dev);
 
        ret = sysfs_create_group(&dev->acpi_dev->dev.kobj,
index f700723ca5d6b1a8a0c342c568908f9c53aa2435..d28e3ab9479c64e41fe014714c08721fc503faf3 100644 (file)
@@ -342,6 +342,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
 /* Device IDs of parts that have 32KB MCH space */
 static const unsigned int mch_quirk_devices[] = {
        0x0154, /* Ivy Bridge */
+       0x0a04, /* Haswell-ULT */
        0x0c00, /* Haswell */
        0x1604, /* Broadwell */
 };
index 934c139916c609e3647acd5c6229a781ca348d56..ee4f183ef9eed337b012c4088bfeba5ae0c949a3 100644 (file)
@@ -178,7 +178,6 @@ static int ptp_ixp_adjtime(struct ptp_clock_info *ptp, s64 delta)
 static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 {
        u64 ns;
-       u32 remainder;
        unsigned long flags;
        struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
        struct ixp46x_ts_regs *regs = ixp_clock->regs;
@@ -189,8 +188,7 @@ static int ptp_ixp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
 
        spin_unlock_irqrestore(&register_lock, flags);
 
-       ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
-       ts->tv_nsec = remainder;
+       *ts = ns_to_timespec64(ns);
        return 0;
 }
 
@@ -202,8 +200,7 @@ static int ptp_ixp_settime(struct ptp_clock_info *ptp,
        struct ixp_clock *ixp_clock = container_of(ptp, struct ixp_clock, caps);
        struct ixp46x_ts_regs *regs = ixp_clock->regs;
 
-       ns = ts->tv_sec * 1000000000ULL;
-       ns += ts->tv_nsec;
+       ns = timespec64_to_ns(ts);
 
        spin_lock_irqsave(&register_lock, flags);
 
index d7b87c64b7cd261c6ec6a1b22ddce040747679d9..e220edc85c68a12bda653b0177255cf38459c7d2 100644 (file)
@@ -117,7 +117,7 @@ int rio_request_inb_mbox(struct rio_mport *mport,
        if (mport->ops->open_inb_mbox == NULL)
                goto out;
 
-       res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+       res = kzalloc(sizeof(struct resource), GFP_KERNEL);
 
        if (res) {
                rio_init_mbox_res(res, mbox, mbox);
@@ -185,7 +185,7 @@ int rio_request_outb_mbox(struct rio_mport *mport,
        if (mport->ops->open_outb_mbox == NULL)
                goto out;
 
-       res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+       res = kzalloc(sizeof(struct resource), GFP_KERNEL);
 
        if (res) {
                rio_init_mbox_res(res, mbox, mbox);
@@ -285,7 +285,7 @@ int rio_request_inb_dbell(struct rio_mport *mport,
 {
        int rc = 0;
 
-       struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+       struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL);
 
        if (res) {
                rio_init_dbell_res(res, start, end);
@@ -360,7 +360,7 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end)
 struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start,
                                        u16 end)
 {
-       struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL);
+       struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL);
 
        if (res) {
                rio_init_dbell_res(res, start, end);
index 8155e80dd3f8cb1af05636eb07af16e799566127..6d5f4fce1d75fd187f205a05f3ce3421e55f5d12 100644 (file)
@@ -78,6 +78,15 @@ config REGULATOR_ACT8865
          This driver controls a active-semi act8865 voltage output
          regulator via I2C bus.
 
+config REGULATOR_ACT8945A
+       tristate "Active-semi ACT8945A voltage regulator"
+       depends on MFD_ACT8945A
+       help
+         This driver controls a active-semi ACT8945A voltage regulator
+         via I2C bus. The ACT8945A features three step-down DC/DC converters
+         and four low-dropout linear regulators, along with a ActivePath
+         battery charger.
+
 config REGULATOR_AD5398
        tristate "Analog Devices AD5398/AD5821 regulators"
        depends on I2C
@@ -549,6 +558,18 @@ config REGULATOR_QCOM_RPM
          Qualcomm RPM as a module. The module will be named
          "qcom_rpm-regulator".
 
+config REGULATOR_QCOM_SAW
+       tristate "Qualcomm SAW regulator driver"
+       depends on (ARCH_QCOM || COMPILE_TEST) && MFD_SYSCON
+       help
+         If you say yes to this option, support will be included for the
+         regulators providing power to the CPU cores on devices such as
+         APQ8064.
+
+         Say M here if you want to include support for the CPU core voltage
+         regulators as a module. The module will be named
+         "qcom_saw-regulator".
+
 config REGULATOR_QCOM_SMD_RPM
        tristate "Qualcomm SMD based RPM regulator driver"
        depends on QCOM_SMD_RPM
index 980b1943fa8153dd0c7cb1d286c5f5a23aacdbf4..75a0b4a8f1b2e767a300d57c0ebe5f9908f9f549 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
 obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
 obj-$(CONFIG_REGULATOR_AB8500) += ab8500-ext.o ab8500.o
 obj-$(CONFIG_REGULATOR_ACT8865) += act8865-regulator.o
+obj-$(CONFIG_REGULATOR_ACT8945A) += act8945a-regulator.o
 obj-$(CONFIG_REGULATOR_AD5398) += ad5398.o
 obj-$(CONFIG_REGULATOR_ANATOP) += anatop-regulator.o
 obj-$(CONFIG_REGULATOR_ARIZONA) += arizona-micsupp.o arizona-ldo1.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_REGULATOR_MC13XXX_CORE) +=  mc13xxx-regulator-core.o
 obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o
 obj-$(CONFIG_REGULATOR_MT6397) += mt6397-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_RPM) += qcom_rpm-regulator.o
+obj-$(CONFIG_REGULATOR_QCOM_SAW)+= qcom_saw-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_SMD_RPM) += qcom_smd-regulator.o
 obj-$(CONFIG_REGULATOR_QCOM_SPMI) += qcom_spmi-regulator.o
 obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
new file mode 100644 (file)
index 0000000..441864b
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Voltage regulation driver for active-semi ACT8945A PMIC
+ *
+ * Copyright (C) 2015 Atmel Corporation
+ *
+ * Author: Wenyou Yang <wenyou.yang@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+/**
+ * ACT8945A Global Register Map.
+ */
+#define ACT8945A_SYS_MODE      0x00
+#define ACT8945A_SYS_CTRL      0x01
+#define ACT8945A_DCDC1_VSET1   0x20
+#define ACT8945A_DCDC1_VSET2   0x21
+#define ACT8945A_DCDC1_CTRL    0x22
+#define ACT8945A_DCDC2_VSET1   0x30
+#define ACT8945A_DCDC2_VSET2   0x31
+#define ACT8945A_DCDC2_CTRL    0x32
+#define ACT8945A_DCDC3_VSET1   0x40
+#define ACT8945A_DCDC3_VSET2   0x41
+#define ACT8945A_DCDC3_CTRL    0x42
+#define ACT8945A_LDO1_VSET     0x50
+#define ACT8945A_LDO1_CTRL     0x51
+#define ACT8945A_LDO2_VSET     0x54
+#define ACT8945A_LDO2_CTRL     0x55
+#define ACT8945A_LDO3_VSET     0x60
+#define ACT8945A_LDO3_CTRL     0x61
+#define ACT8945A_LDO4_VSET     0x64
+#define ACT8945A_LDO4_CTRL     0x65
+
+/**
+ * Field Definitions.
+ */
+#define ACT8945A_ENA           0x80    /* ON - [7] */
+#define ACT8945A_VSEL_MASK     0x3F    /* VSET - [5:0] */
+
+/**
+ * ACT8945A Voltage Number
+ */
+#define ACT8945A_VOLTAGE_NUM   64
+
+enum {
+       ACT8945A_ID_DCDC1,
+       ACT8945A_ID_DCDC2,
+       ACT8945A_ID_DCDC3,
+       ACT8945A_ID_LDO1,
+       ACT8945A_ID_LDO2,
+       ACT8945A_ID_LDO3,
+       ACT8945A_ID_LDO4,
+       ACT8945A_REG_NUM,
+};
+
+static const struct regulator_linear_range act8945a_voltage_ranges[] = {
+       REGULATOR_LINEAR_RANGE(600000, 0, 23, 25000),
+       REGULATOR_LINEAR_RANGE(1200000, 24, 47, 50000),
+       REGULATOR_LINEAR_RANGE(2400000, 48, 63, 100000),
+};
+
+static struct regulator_ops act8945a_ops = {
+       .list_voltage           = regulator_list_voltage_linear_range,
+       .map_voltage            = regulator_map_voltage_linear_range,
+       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
+       .set_voltage_sel        = regulator_set_voltage_sel_regmap,
+       .enable                 = regulator_enable_regmap,
+       .disable                = regulator_disable_regmap,
+       .is_enabled             = regulator_is_enabled_regmap,
+};
+
+#define ACT89xx_REG(_name, _family, _id, _vsel_reg, _supply)           \
+       [_family##_ID_##_id] = {                                        \
+               .name                   = _name,                        \
+               .supply_name            = _supply,                      \
+               .of_match               = of_match_ptr("REG_"#_id),     \
+               .regulators_node        = of_match_ptr("regulators"),   \
+               .id                     = _family##_ID_##_id,           \
+               .type                   = REGULATOR_VOLTAGE,            \
+               .ops                    = &act8945a_ops,                \
+               .n_voltages             = ACT8945A_VOLTAGE_NUM,         \
+               .linear_ranges          = act8945a_voltage_ranges,      \
+               .n_linear_ranges        = ARRAY_SIZE(act8945a_voltage_ranges), \
+               .vsel_reg               = _family##_##_id##_##_vsel_reg, \
+               .vsel_mask              = ACT8945A_VSEL_MASK,           \
+               .enable_reg             = _family##_##_id##_CTRL,       \
+               .enable_mask            = ACT8945A_ENA,                 \
+               .owner                  = THIS_MODULE,                  \
+       }
+
+static const struct regulator_desc act8945a_regulators[] = {
+       ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET1, "vp1"),
+       ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET1, "vp2"),
+       ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET1, "vp3"),
+       ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"),
+       ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"),
+       ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"),
+       ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"),
+};
+
+static const struct regulator_desc act8945a_alt_regulators[] = {
+       ACT89xx_REG("DCDC_REG1", ACT8945A, DCDC1, VSET2, "vp1"),
+       ACT89xx_REG("DCDC_REG2", ACT8945A, DCDC2, VSET2, "vp2"),
+       ACT89xx_REG("DCDC_REG3", ACT8945A, DCDC3, VSET2, "vp3"),
+       ACT89xx_REG("LDO_REG1", ACT8945A, LDO1, VSET, "inl45"),
+       ACT89xx_REG("LDO_REG2", ACT8945A, LDO2, VSET, "inl45"),
+       ACT89xx_REG("LDO_REG3", ACT8945A, LDO3, VSET, "inl67"),
+       ACT89xx_REG("LDO_REG4", ACT8945A, LDO4, VSET, "inl67"),
+};
+
+static int act8945a_pmic_probe(struct platform_device *pdev)
+{
+       struct regulator_config config = { };
+       const struct regulator_desc *regulators;
+       struct regulator_dev *rdev;
+       int i, num_regulators;
+       bool voltage_select;
+
+       voltage_select = of_property_read_bool(pdev->dev.parent->of_node,
+                                              "active-semi,vsel-high");
+
+       if (voltage_select) {
+               regulators = act8945a_alt_regulators;
+               num_regulators = ARRAY_SIZE(act8945a_alt_regulators);
+       } else {
+               regulators = act8945a_regulators;
+               num_regulators = ARRAY_SIZE(act8945a_regulators);
+       }
+
+       config.dev = &pdev->dev;
+       config.dev->of_node = pdev->dev.parent->of_node;
+       for (i = 0; i < num_regulators; i++) {
+               rdev = devm_regulator_register(&pdev->dev, &regulators[i], &config);
+               if (IS_ERR(rdev)) {
+                       dev_err(&pdev->dev,
+                               "failed to register %s regulator\n",
+                               regulators[i].name);
+                       return PTR_ERR(rdev);
+               }
+       }
+
+       return 0;
+}
+
+static struct platform_driver act8945a_pmic_driver = {
+       .driver = {
+               .name = "act8945a-regulator",
+       },
+       .probe = act8945a_pmic_probe,
+};
+module_platform_driver(act8945a_pmic_driver);
+
+MODULE_DESCRIPTION("Active-semi ACT8945A voltage regulator driver");
+MODULE_AUTHOR("Wenyou Yang <wenyou.yang@atmel.com>");
+MODULE_LICENSE("GPL");
index f2e1a39ce0f35258447093fd84223fc25b087e29..7d29893f833c32f2350d6771e65e3c3ded6ae2a6 100644 (file)
@@ -78,8 +78,7 @@
                .ops            = &axp20x_ops,                                  \
        }
 
-#define AXP_DESC_SW(_family, _id, _match, _supply, _min, _max, _step, _vreg,   \
-                   _vmask, _ereg, _emask)                                      \
+#define AXP_DESC_SW(_family, _id, _match, _supply, _ereg, _emask)              \
        [_family##_##_id] = {                                                   \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
                .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = _family##_##_id,                              \
-               .n_voltages     = (((_max) - (_min)) / (_step) + 1),            \
                .owner          = THIS_MODULE,                                  \
-               .min_uV         = (_min) * 1000,                                \
-               .uV_step        = (_step) * 1000,                               \
-               .vsel_reg       = (_vreg),                                      \
-               .vsel_mask      = (_vmask),                                     \
                .enable_reg     = (_ereg),                                      \
                .enable_mask    = (_emask),                                     \
                .ops            = &axp20x_ops_sw,                               \
                .ops            = &axp20x_ops_fixed                             \
        }
 
-#define AXP_DESC_TABLE(_family, _id, _match, _supply, _table, _vreg, _vmask,   \
-                      _ereg, _emask)                                           \
+#define AXP_DESC_RANGES(_family, _id, _match, _supply, _ranges, _n_voltages,   \
+                       _vreg, _vmask, _ereg, _emask)                           \
        [_family##_##_id] = {                                                   \
                .name           = #_id,                                         \
                .supply_name    = (_supply),                                    \
                .regulators_node = of_match_ptr("regulators"),                  \
                .type           = REGULATOR_VOLTAGE,                            \
                .id             = _family##_##_id,                              \
-               .n_voltages     = ARRAY_SIZE(_table),                           \
+               .n_voltages     = (_n_voltages),                                \
                .owner          = THIS_MODULE,                                  \
                .vsel_reg       = (_vreg),                                      \
                .vsel_mask      = (_vmask),                                     \
                .enable_reg     = (_ereg),                                      \
                .enable_mask    = (_emask),                                     \
-               .volt_table     = (_table),                                     \
-               .ops            = &axp20x_ops_table,                            \
+               .linear_ranges  = (_ranges),                                    \
+               .n_linear_ranges = ARRAY_SIZE(_ranges),                         \
+               .ops            = &axp20x_ops_range,                            \
        }
 
-static const int axp20x_ldo4_data[] = { 1250000, 1300000, 1400000, 1500000, 1600000,
-                                       1700000, 1800000, 1900000, 2000000, 2500000,
-                                       2700000, 2800000, 3000000, 3100000, 3200000,
-                                       3300000 };
-
 static struct regulator_ops axp20x_ops_fixed = {
        .list_voltage           = regulator_list_voltage_linear,
 };
 
-static struct regulator_ops axp20x_ops_table = {
+static struct regulator_ops axp20x_ops_range = {
        .set_voltage_sel        = regulator_set_voltage_sel_regmap,
        .get_voltage_sel        = regulator_get_voltage_sel_regmap,
-       .list_voltage           = regulator_list_voltage_table,
-       .map_voltage            = regulator_map_voltage_ascend,
+       .list_voltage           = regulator_list_voltage_linear_range,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
        .is_enabled             = regulator_is_enabled_regmap,
@@ -160,13 +149,17 @@ static struct regulator_ops axp20x_ops = {
 };
 
 static struct regulator_ops axp20x_ops_sw = {
-       .get_voltage_sel        = regulator_get_voltage_sel_regmap,
-       .list_voltage           = regulator_list_voltage_linear,
        .enable                 = regulator_enable_regmap,
        .disable                = regulator_disable_regmap,
        .is_enabled             = regulator_is_enabled_regmap,
 };
 
+static const struct regulator_linear_range axp20x_ldo4_ranges[] = {
+       REGULATOR_LINEAR_RANGE(1250000, 0x0, 0x0, 0),
+       REGULATOR_LINEAR_RANGE(1300000, 0x1, 0x8, 100000),
+       REGULATOR_LINEAR_RANGE(2500000, 0x9, 0xf, 100000),
+};
+
 static const struct regulator_desc axp20x_regulators[] = {
        AXP_DESC(AXP20X, DCDC2, "dcdc2", "vin2", 700, 2275, 25,
                 AXP20X_DCDC2_V_OUT, 0x3f, AXP20X_PWR_OUT_CTRL, 0x10),
@@ -177,8 +170,9 @@ static const struct regulator_desc axp20x_regulators[] = {
                 AXP20X_LDO24_V_OUT, 0xf0, AXP20X_PWR_OUT_CTRL, 0x04),
        AXP_DESC(AXP20X, LDO3, "ldo3", "ldo3in", 700, 3500, 25,
                 AXP20X_LDO3_V_OUT, 0x7f, AXP20X_PWR_OUT_CTRL, 0x40),
-       AXP_DESC_TABLE(AXP20X, LDO4, "ldo4", "ldo24in", axp20x_ldo4_data,
-                      AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL, 0x08),
+       AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in", axp20x_ldo4_ranges,
+                       16, AXP20X_LDO24_V_OUT, 0x0f, AXP20X_PWR_OUT_CTRL,
+                       0x08),
        AXP_DESC_IO(AXP20X, LDO5, "ldo5", "ldo5in", 1800, 3300, 100,
                    AXP20X_LDO5_V_OUT, 0xf0, AXP20X_GPIO0_CTRL, 0x07,
                    AXP20X_IO_ENABLED, AXP20X_IO_DISABLED),
@@ -196,8 +190,8 @@ static const struct regulator_desc axp22x_regulators[] = {
        AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50,
                 AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)),
        /* secondary switchable output of DCDC1 */
-       AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL, 1600, 3400, 100,
-                   AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
+       AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", NULL, AXP22X_PWR_OUT_CTRL2,
+                   BIT(7)),
        /* LDO regulator internally chained to DCDC5 */
        AXP_DESC(AXP22X, DC5LDO, "dc5ldo", NULL, 700, 1400, 100,
                 AXP22X_DC5LDO_V_OUT, 0x7, AXP22X_PWR_OUT_CTRL1, BIT(0)),
index 744c9889f88d4f83e27c05687ac14427c833c0c5..4405be13eb0e261392ac8952638076e3bdf7e4c3 100644 (file)
@@ -1057,18 +1057,18 @@ static int set_machine_constraints(struct regulator_dev *rdev,
 
        ret = machine_constraints_voltage(rdev, rdev->constraints);
        if (ret != 0)
-               goto out;
+               return ret;
 
        ret = machine_constraints_current(rdev, rdev->constraints);
        if (ret != 0)
-               goto out;
+               return ret;
 
        if (rdev->constraints->ilim_uA && ops->set_input_current_limit) {
                ret = ops->set_input_current_limit(rdev,
                                                   rdev->constraints->ilim_uA);
                if (ret < 0) {
                        rdev_err(rdev, "failed to set input limit\n");
-                       goto out;
+                       return ret;
                }
        }
 
@@ -1077,21 +1077,20 @@ static int set_machine_constraints(struct regulator_dev *rdev,
                ret = suspend_prepare(rdev, rdev->constraints->initial_state);
                if (ret < 0) {
                        rdev_err(rdev, "failed to set suspend state\n");
-                       goto out;
+                       return ret;
                }
        }
 
        if (rdev->constraints->initial_mode) {
                if (!ops->set_mode) {
                        rdev_err(rdev, "no set_mode operation\n");
-                       ret = -EINVAL;
-                       goto out;
+                       return -EINVAL;
                }
 
                ret = ops->set_mode(rdev, rdev->constraints->initial_mode);
                if (ret < 0) {
                        rdev_err(rdev, "failed to set initial mode: %d\n", ret);
-                       goto out;
+                       return ret;
                }
        }
 
@@ -1102,7 +1101,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
                ret = _regulator_do_enable(rdev);
                if (ret < 0 && ret != -EINVAL) {
                        rdev_err(rdev, "failed to enable\n");
-                       goto out;
+                       return ret;
                }
        }
 
@@ -1111,7 +1110,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
                ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
                if (ret < 0) {
                        rdev_err(rdev, "failed to set ramp_delay\n");
-                       goto out;
+                       return ret;
                }
        }
 
@@ -1119,7 +1118,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
                ret = ops->set_pull_down(rdev);
                if (ret < 0) {
                        rdev_err(rdev, "failed to set pull down\n");
-                       goto out;
+                       return ret;
                }
        }
 
@@ -1127,7 +1126,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
                ret = ops->set_soft_start(rdev);
                if (ret < 0) {
                        rdev_err(rdev, "failed to set soft start\n");
-                       goto out;
+                       return ret;
                }
        }
 
@@ -1136,16 +1135,12 @@ static int set_machine_constraints(struct regulator_dev *rdev,
                ret = ops->set_over_current_protection(rdev);
                if (ret < 0) {
                        rdev_err(rdev, "failed to set over current protection\n");
-                       goto out;
+                       return ret;
                }
        }
 
        print_constraints(rdev);
        return 0;
-out:
-       kfree(rdev->constraints);
-       rdev->constraints = NULL;
-       return ret;
 }
 
 /**
@@ -3979,7 +3974,7 @@ unset_supplies:
 
 scrub:
        regulator_ena_gpio_free(rdev);
-       kfree(rdev->constraints);
+
 wash:
        device_unregister(&rdev->dev);
        /* device core frees rdev */
index 8b3cc9f0cd64c0fa323685c4e36842eb52318d54..01c0e3709b6659aded1d79cbbb5423c7d373d5e9 100644 (file)
@@ -132,6 +132,8 @@ static irqreturn_t da9210_irq_handler(int irq, void *data)
        if (error < 0)
                goto error_i2c;
 
+       mutex_lock(&chip->rdev->mutex);
+
        if (val & DA9210_E_OVCURR) {
                regulator_notifier_call_chain(chip->rdev,
                                              REGULATOR_EVENT_OVER_CURRENT,
@@ -155,6 +157,9 @@ static irqreturn_t da9210_irq_handler(int irq, void *data)
                                              NULL);
                handled |= DA9210_E_VMAX;
        }
+
+       mutex_unlock(&chip->rdev->mutex);
+
        if (handled) {
                /* Clear handled events */
                error = regmap_write(chip->regmap, DA9210_REG_EVENT_B, handled);
index 4940e8287df68090104a78cc6239ab90e22423c6..2cb5cc311610a12c467d518336dbdde814665667 100644 (file)
@@ -114,6 +114,22 @@ static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV)
        return 0;
 }
 
+static int fan53555_set_suspend_enable(struct regulator_dev *rdev)
+{
+       struct fan53555_device_info *di = rdev_get_drvdata(rdev);
+
+       return regmap_update_bits(di->regmap, di->sleep_reg,
+                                 VSEL_BUCK_EN, VSEL_BUCK_EN);
+}
+
+static int fan53555_set_suspend_disable(struct regulator_dev *rdev)
+{
+       struct fan53555_device_info *di = rdev_get_drvdata(rdev);
+
+       return regmap_update_bits(di->regmap, di->sleep_reg,
+                                 VSEL_BUCK_EN, 0);
+}
+
 static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode)
 {
        struct fan53555_device_info *di = rdev_get_drvdata(rdev);
@@ -192,6 +208,8 @@ static struct regulator_ops fan53555_regulator_ops = {
        .set_mode = fan53555_set_mode,
        .get_mode = fan53555_get_mode,
        .set_ramp_delay = fan53555_set_ramp,
+       .set_suspend_enable = fan53555_set_suspend_enable,
+       .set_suspend_disable = fan53555_set_suspend_disable,
 };
 
 static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di)
index 19d75848655311c4afb7d343a5ce5b1242610627..38992112fd6e2c3838fc95ab49a261abc6eab5bf 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/regmap.h>
 #include <linux/err.h>
 #include <linux/gpio.h>
+#include <linux/delay.h>
 #include <linux/regulator/lp872x.h>
 #include <linux/regulator/driver.h>
 #include <linux/platform_device.h>
@@ -738,10 +739,8 @@ static int lp872x_init_dvs(struct lp872x *lp)
                goto set_default_dvs_mode;
 
        gpio = dvs->gpio;
-       if (!gpio_is_valid(gpio)) {
-               dev_warn(lp->dev, "invalid gpio: %d\n", gpio);
+       if (!gpio_is_valid(gpio))
                goto set_default_dvs_mode;
-       }
 
        pinstate = dvs->init_state;
        ret = devm_gpio_request_one(lp->dev, gpio, pinstate, "LP872X DVS");
@@ -759,6 +758,33 @@ set_default_dvs_mode:
                                default_dvs_mode[lp->chipid]);
 }
 
+static int lp872x_hw_enable(struct lp872x *lp)
+{
+       int ret, gpio;
+
+       if (!lp->pdata)
+               return -EINVAL;
+
+       gpio = lp->pdata->enable_gpio;
+       if (!gpio_is_valid(gpio))
+               return 0;
+
+       /* Always set enable GPIO high. */
+       ret = devm_gpio_request_one(lp->dev, gpio, GPIOF_OUT_INIT_HIGH, "LP872X EN");
+       if (ret) {
+               dev_err(lp->dev, "gpio request err: %d\n", ret);
+               return ret;
+       }
+
+       /* Each chip has a different enable delay. */
+       if (lp->chipid == LP8720)
+               usleep_range(LP8720_ENABLE_DELAY, 1.5 * LP8720_ENABLE_DELAY);
+       else
+               usleep_range(LP8725_ENABLE_DELAY, 1.5 * LP8725_ENABLE_DELAY);
+
+       return 0;
+}
+
 static int lp872x_config(struct lp872x *lp)
 {
        struct lp872x_platform_data *pdata = lp->pdata;
@@ -877,6 +903,8 @@ static struct lp872x_platform_data
        of_property_read_u8(np, "ti,dvs-state", &dvs_state);
        pdata->dvs->init_state = dvs_state ? DVS_HIGH : DVS_LOW;
 
+       pdata->enable_gpio = of_get_named_gpio(np, "enable-gpios", 0);
+
        if (of_get_child_count(np) == 0)
                goto out;
 
@@ -950,6 +978,10 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
        lp->chipid = id->driver_data;
        i2c_set_clientdata(cl, lp);
 
+       ret = lp872x_hw_enable(lp);
+       if (ret)
+               return ret;
+
        ret = lp872x_config(lp);
        if (ret)
                return ret;
index a5b2f4762677c706c59712417a8c44627670773c..17a5b6c2d6a9f5d71c0bafded4f56d26c89fb3ce 100644 (file)
@@ -317,11 +317,25 @@ static int mt6397_regulator_probe(struct platform_device *pdev)
        return 0;
 }
 
+static const struct platform_device_id mt6397_platform_ids[] = {
+       {"mt6397-regulator", 0},
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, mt6397_platform_ids);
+
+static const struct of_device_id mt6397_of_match[] = {
+       { .compatible = "mediatek,mt6397-regulator", },
+       { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mt6397_of_match);
+
 static struct platform_driver mt6397_regulator_driver = {
        .driver = {
                .name = "mt6397-regulator",
+               .of_match_table = of_match_ptr(mt6397_of_match),
        },
        .probe = mt6397_regulator_probe,
+       .id_table = mt6397_platform_ids,
 };
 
 module_platform_driver(mt6397_regulator_driver);
@@ -329,4 +343,3 @@ module_platform_driver(mt6397_regulator_driver);
 MODULE_AUTHOR("Flora Fu <flora.fu@mediatek.com>");
 MODULE_DESCRIPTION("Regulator Driver for MediaTek MT6397 PMIC");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:mt6397-regulator");
index f9d74d63be7c7e6bb88d7cfeb8d4d4046d919002..bdb0f14703658ce585bd009f1224bbf911404f15 100644 (file)
@@ -177,7 +177,7 @@ static int pbias_regulator_probe(struct platform_device *pdev)
                        return -EINVAL;
 
                offset = res->start;
-               dev_WARN(&pdev->dev,
+               dev_WARN(&pdev->dev, true,
                         "using legacy dt data for pbias offset\n");
        }
 
diff --git a/drivers/regulator/qcom_saw-regulator.c b/drivers/regulator/qcom_saw-regulator.c
new file mode 100644 (file)
index 0000000..c800f16
--- /dev/null
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2016, Linaro Limited. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+
+#define        SPM_REG_STS_1                   0x10
+#define        SPM_REG_VCTL                    0x14
+#define        SPM_REG_PMIC_DATA_0             0x28
+#define        SPM_REG_PMIC_DATA_1             0x2c
+#define        SPM_REG_RST                     0x30
+
+struct saw_vreg {
+       struct device           *dev;
+       struct regmap           *regmap;
+       struct regulator_desc   rdesc;
+       struct regulator_dev    *rdev;
+       unsigned int            sel;
+};
+
+struct spm_vlevel_data {
+       struct saw_vreg *vreg;
+       unsigned int sel;
+};
+
+static int saw_regulator_get_voltage_sel(struct regulator_dev *rdev)
+{
+       struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+
+       return vreg->sel;
+}
+
+static void smp_set_vdd(void *data)
+{
+       struct spm_vlevel_data *vdata = (struct spm_vlevel_data *)data;
+       struct saw_vreg *vreg = vdata->vreg;
+       unsigned long new_sel = vdata->sel;
+       u32 val, new_val;
+       u32 vctl, data0, data1;
+       unsigned long timeout;
+
+       if (vreg->sel == new_sel)
+               return;
+
+       regmap_read(vreg->regmap, SPM_REG_VCTL, &vctl);
+       regmap_read(vreg->regmap, SPM_REG_PMIC_DATA_0, &data0);
+       regmap_read(vreg->regmap, SPM_REG_PMIC_DATA_1, &data1);
+
+       /* select the band */
+       val = 0x80 | new_sel;
+
+       vctl &= ~0xff;
+       vctl |= val;
+
+       data0 &= ~0xff;
+       data0 |= val;
+
+       data1 &= ~0x3f;
+       data1 |= val & 0x3f;
+       data1 &= ~0x3f0000;
+       data1 |= ((val & 0x3f) << 16);
+
+       regmap_write(vreg->regmap, SPM_REG_RST, 1);
+       regmap_write(vreg->regmap, SPM_REG_VCTL, vctl);
+       regmap_write(vreg->regmap, SPM_REG_PMIC_DATA_0, data0);
+       regmap_write(vreg->regmap, SPM_REG_PMIC_DATA_1, data1);
+
+       timeout = jiffies + usecs_to_jiffies(100);
+       do {
+               regmap_read(vreg->regmap, SPM_REG_STS_1, &new_val);
+               new_val &= 0xff;
+               if (new_val == val) {
+                       vreg->sel = new_sel;
+                       return;
+               }
+
+               cpu_relax();
+
+       } while (time_before(jiffies, timeout));
+
+       pr_err("%s: Voltage not changed: %#x\n", __func__, new_val);
+}
+
+static int saw_regulator_set_voltage_sel(struct regulator_dev *rdev,
+                                        unsigned selector)
+{
+       struct saw_vreg *vreg = rdev_get_drvdata(rdev);
+       struct spm_vlevel_data data;
+       int cpu = rdev_get_id(rdev);
+
+       data.vreg = vreg;
+       data.sel = selector;
+
+       return smp_call_function_single(cpu, smp_set_vdd, &data, true);
+}
+
+static struct regulator_ops saw_regulator_ops = {
+       .list_voltage = regulator_list_voltage_linear_range,
+       .set_voltage_sel = saw_regulator_set_voltage_sel,
+       .get_voltage_sel = saw_regulator_get_voltage_sel,
+       .set_voltage_time_sel = regulator_set_voltage_time_sel,
+};
+
+static struct regulator_desc saw_regulator = {
+       .owner = THIS_MODULE,
+       .type = REGULATOR_VOLTAGE,
+       .ops  = &saw_regulator_ops,
+       .linear_ranges = (struct regulator_linear_range[]) {
+               REGULATOR_LINEAR_RANGE(700000, 0, 56, 12500),
+       },
+       .n_linear_ranges = 1,
+       .n_voltages = 57,
+       .ramp_delay = 1250,
+};
+
+static struct saw_vreg *saw_get_drv(struct platform_device *pdev,
+                                   int *vreg_cpu)
+{
+       struct saw_vreg *vreg = NULL;
+       struct device_node *cpu_node, *saw_node;
+       int cpu;
+       bool found;
+
+       for_each_possible_cpu(cpu) {
+               cpu_node = of_cpu_device_node_get(cpu);
+               if (!cpu_node)
+                       continue;
+               saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+               found = (saw_node == pdev->dev.of_node->parent);
+               of_node_put(saw_node);
+               of_node_put(cpu_node);
+               if (found)
+                       break;
+       }
+
+       if (found) {
+               vreg = devm_kzalloc(&pdev->dev, sizeof(*vreg), GFP_KERNEL);
+               if (vreg)
+                       *vreg_cpu = cpu;
+       }
+
+       return vreg;
+}
+
+static const struct of_device_id qcom_saw_regulator_match[] = {
+       { .compatible = "qcom,apq8064-saw2-v1.1-regulator" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, qcom_saw_regulator_match);
+
+static int qcom_saw_regulator_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct device_node *saw_np;
+       struct saw_vreg *vreg;
+       struct regulator_config config = { };
+       int ret = 0, cpu = 0;
+       char name[] = "kraitXX";
+
+       vreg = saw_get_drv(pdev, &cpu);
+       if (!vreg)
+               return -EINVAL;
+
+       saw_np = of_get_parent(np);
+       if (!saw_np)
+               return -ENODEV;
+
+       vreg->regmap = syscon_node_to_regmap(saw_np);
+       of_node_put(saw_np);
+       if (IS_ERR(config.regmap))
+               return PTR_ERR(config.regmap);
+
+       snprintf(name, sizeof(name), "krait%d", cpu);
+
+       config.regmap = vreg->regmap;
+       config.dev = &pdev->dev;
+       config.of_node = np;
+       config.driver_data = vreg;
+
+       vreg->rdesc = saw_regulator;
+       vreg->rdesc.id = cpu;
+       vreg->rdesc.name = name;
+       config.init_data = of_get_regulator_init_data(&pdev->dev,
+                                                     pdev->dev.of_node,
+                                                     &vreg->rdesc);
+
+       vreg->rdev = devm_regulator_register(&pdev->dev, &vreg->rdesc, &config);
+       if (IS_ERR(vreg->rdev)) {
+               ret = PTR_ERR(vreg->rdev);
+               dev_err(dev, "failed to register SAW regulator: %d\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static struct platform_driver qcom_saw_regulator_driver = {
+       .driver = {
+               .name = "qcom-saw-regulator",
+               .of_match_table = qcom_saw_regulator_match,
+       },
+       .probe = qcom_saw_regulator_probe,
+};
+
+module_platform_driver(qcom_saw_regulator_driver);
+
+MODULE_ALIAS("platform:qcom-saw-regulator");
+MODULE_DESCRIPTION("Qualcomm SAW regulator driver");
+MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>");
+MODULE_LICENSE("GPL v2");
index 376322f71fd5723c90863298f4b4ed3eb7750d6e..ef456d3cf2655ac466f7671915d0b8e579492a50 100644 (file)
@@ -335,16 +335,6 @@ config RTC_DRV_RK808
          This driver can also be built as a module. If so, the module
          will be called rk808-rtc.
 
-config RTC_DRV_MAX77802
-       tristate "Maxim 77802 RTC"
-       depends on MFD_MAX77686
-       help
-         If you say yes here you will get support for the
-         RTC of Maxim MAX77802 PMIC.
-
-         This driver can also be built as a module. If so, the module
-         will be called rtc-max77802.
-
 config RTC_DRV_RS5C372
        tristate "Ricoh R2025S/D, RS5C372A/B, RV5C386, RV5C387A"
        help
index 62d61b26ca7e600a30c38cf754353457c19723f9..ed4519efa3caf01d5ce331cdddb1db6dd4e0b431 100644 (file)
@@ -86,7 +86,6 @@ obj-$(CONFIG_RTC_DRV_M48T86)  += rtc-m48t86.o
 obj-$(CONFIG_RTC_DRV_MAX6900)  += rtc-max6900.o
 obj-$(CONFIG_RTC_DRV_MAX6902)  += rtc-max6902.o
 obj-$(CONFIG_RTC_DRV_MAX77686) += rtc-max77686.o
-obj-$(CONFIG_RTC_DRV_MAX77802) += rtc-max77802.o
 obj-$(CONFIG_RTC_DRV_MAX8907)  += rtc-max8907.o
 obj-$(CONFIG_RTC_DRV_MAX8925)  += rtc-max8925.o
 obj-$(CONFIG_RTC_DRV_MAX8997)  += rtc-max8997.o
index 7184a0eda79384f8a59a5c8be8155018ba1a792e..0f2965d912ae8b7c6dbbc4bfb9dfe1d628b34d10 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * RTC driver for Maxim MAX77686
+ * RTC driver for Maxim MAX77686 and MAX77802
  *
  * Copyright (C) 2012 Samsung Electronics Co.Ltd
  *
@@ -12,8 +12,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/slab.h>
 #include <linux/rtc.h>
 #include <linux/delay.h>
 #define ALARM_ENABLE_SHIFT             7
 #define ALARM_ENABLE_MASK              (1 << ALARM_ENABLE_SHIFT)
 
-#define MAX77686_RTC_UPDATE_DELAY      16
+#define REG_RTC_NONE                   0xdeadbeef
+
+/*
+ * MAX77802 has separate register (RTCAE1) for alarm enable instead
+ * using 1 bit from registers RTC{SEC,MIN,HOUR,DAY,MONTH,YEAR,DATE}
+ * as in done in MAX77686.
+ */
+#define MAX77802_ALARM_ENABLE_VALUE    0x77
 
 enum {
        RTC_SEC = 0,
@@ -54,6 +59,19 @@ enum {
        RTC_NR_TIME
 };
 
+struct max77686_rtc_driver_data {
+       /* Minimum usecs needed for a RTC update */
+       unsigned long           delay;
+       /* Mask used to read RTC registers value */
+       u8                      mask;
+       /* Registers offset to I2C addresses map */
+       const unsigned int      *map;
+       /* Has a separate alarm enable register? */
+       bool                    alarm_enable_reg;
+       /* Has a separate I2C regmap for the RTC? */
+       bool                    separate_i2c_addr;
+};
+
 struct max77686_rtc_info {
        struct device           *dev;
        struct max77686_dev     *max77686;
@@ -63,6 +81,8 @@ struct max77686_rtc_info {
 
        struct regmap           *regmap;
 
+       const struct max77686_rtc_driver_data *drv_data;
+
        int virq;
        int rtc_24hr_mode;
 };
@@ -72,12 +92,120 @@ enum MAX77686_RTC_OP {
        MAX77686_RTC_READ,
 };
 
+/* These are not registers but just offsets that are mapped to addresses */
+enum max77686_rtc_reg_offset {
+       REG_RTC_CONTROLM = 0,
+       REG_RTC_CONTROL,
+       REG_RTC_UPDATE0,
+       REG_WTSR_SMPL_CNTL,
+       REG_RTC_SEC,
+       REG_RTC_MIN,
+       REG_RTC_HOUR,
+       REG_RTC_WEEKDAY,
+       REG_RTC_MONTH,
+       REG_RTC_YEAR,
+       REG_RTC_DATE,
+       REG_ALARM1_SEC,
+       REG_ALARM1_MIN,
+       REG_ALARM1_HOUR,
+       REG_ALARM1_WEEKDAY,
+       REG_ALARM1_MONTH,
+       REG_ALARM1_YEAR,
+       REG_ALARM1_DATE,
+       REG_ALARM2_SEC,
+       REG_ALARM2_MIN,
+       REG_ALARM2_HOUR,
+       REG_ALARM2_WEEKDAY,
+       REG_ALARM2_MONTH,
+       REG_ALARM2_YEAR,
+       REG_ALARM2_DATE,
+       REG_RTC_AE1,
+       REG_RTC_END,
+};
+
+/* Maps RTC registers offset to the MAX77686 register addresses */
+static const unsigned int max77686_map[REG_RTC_END] = {
+       [REG_RTC_CONTROLM]   = MAX77686_RTC_CONTROLM,
+       [REG_RTC_CONTROL]    = MAX77686_RTC_CONTROL,
+       [REG_RTC_UPDATE0]    = MAX77686_RTC_UPDATE0,
+       [REG_WTSR_SMPL_CNTL] = MAX77686_WTSR_SMPL_CNTL,
+       [REG_RTC_SEC]        = MAX77686_RTC_SEC,
+       [REG_RTC_MIN]        = MAX77686_RTC_MIN,
+       [REG_RTC_HOUR]       = MAX77686_RTC_HOUR,
+       [REG_RTC_WEEKDAY]    = MAX77686_RTC_WEEKDAY,
+       [REG_RTC_MONTH]      = MAX77686_RTC_MONTH,
+       [REG_RTC_YEAR]       = MAX77686_RTC_YEAR,
+       [REG_RTC_DATE]       = MAX77686_RTC_DATE,
+       [REG_ALARM1_SEC]     = MAX77686_ALARM1_SEC,
+       [REG_ALARM1_MIN]     = MAX77686_ALARM1_MIN,
+       [REG_ALARM1_HOUR]    = MAX77686_ALARM1_HOUR,
+       [REG_ALARM1_WEEKDAY] = MAX77686_ALARM1_WEEKDAY,
+       [REG_ALARM1_MONTH]   = MAX77686_ALARM1_MONTH,
+       [REG_ALARM1_YEAR]    = MAX77686_ALARM1_YEAR,
+       [REG_ALARM1_DATE]    = MAX77686_ALARM1_DATE,
+       [REG_ALARM2_SEC]     = MAX77686_ALARM2_SEC,
+       [REG_ALARM2_MIN]     = MAX77686_ALARM2_MIN,
+       [REG_ALARM2_HOUR]    = MAX77686_ALARM2_HOUR,
+       [REG_ALARM2_WEEKDAY] = MAX77686_ALARM2_WEEKDAY,
+       [REG_ALARM2_MONTH]   = MAX77686_ALARM2_MONTH,
+       [REG_ALARM2_YEAR]    = MAX77686_ALARM2_YEAR,
+       [REG_ALARM2_DATE]    = MAX77686_ALARM2_DATE,
+       [REG_RTC_AE1]        = REG_RTC_NONE,
+};
+
+static const struct max77686_rtc_driver_data max77686_drv_data = {
+       .delay = 16000,
+       .mask  = 0x7f,
+       .map   = max77686_map,
+       .alarm_enable_reg  = false,
+       .separate_i2c_addr = true,
+};
+
+static const unsigned int max77802_map[REG_RTC_END] = {
+       [REG_RTC_CONTROLM]   = MAX77802_RTC_CONTROLM,
+       [REG_RTC_CONTROL]    = MAX77802_RTC_CONTROL,
+       [REG_RTC_UPDATE0]    = MAX77802_RTC_UPDATE0,
+       [REG_WTSR_SMPL_CNTL] = MAX77802_WTSR_SMPL_CNTL,
+       [REG_RTC_SEC]        = MAX77802_RTC_SEC,
+       [REG_RTC_MIN]        = MAX77802_RTC_MIN,
+       [REG_RTC_HOUR]       = MAX77802_RTC_HOUR,
+       [REG_RTC_WEEKDAY]    = MAX77802_RTC_WEEKDAY,
+       [REG_RTC_MONTH]      = MAX77802_RTC_MONTH,
+       [REG_RTC_YEAR]       = MAX77802_RTC_YEAR,
+       [REG_RTC_DATE]       = MAX77802_RTC_DATE,
+       [REG_ALARM1_SEC]     = MAX77802_ALARM1_SEC,
+       [REG_ALARM1_MIN]     = MAX77802_ALARM1_MIN,
+       [REG_ALARM1_HOUR]    = MAX77802_ALARM1_HOUR,
+       [REG_ALARM1_WEEKDAY] = MAX77802_ALARM1_WEEKDAY,
+       [REG_ALARM1_MONTH]   = MAX77802_ALARM1_MONTH,
+       [REG_ALARM1_YEAR]    = MAX77802_ALARM1_YEAR,
+       [REG_ALARM1_DATE]    = MAX77802_ALARM1_DATE,
+       [REG_ALARM2_SEC]     = MAX77802_ALARM2_SEC,
+       [REG_ALARM2_MIN]     = MAX77802_ALARM2_MIN,
+       [REG_ALARM2_HOUR]    = MAX77802_ALARM2_HOUR,
+       [REG_ALARM2_WEEKDAY] = MAX77802_ALARM2_WEEKDAY,
+       [REG_ALARM2_MONTH]   = MAX77802_ALARM2_MONTH,
+       [REG_ALARM2_YEAR]    = MAX77802_ALARM2_YEAR,
+       [REG_ALARM2_DATE]    = MAX77802_ALARM2_DATE,
+       [REG_RTC_AE1]        = MAX77802_RTC_AE1,
+};
+
+static const struct max77686_rtc_driver_data max77802_drv_data = {
+       .delay = 200,
+       .mask  = 0xff,
+       .map   = max77802_map,
+       .alarm_enable_reg  = true,
+       .separate_i2c_addr = false,
+};
+
 static void max77686_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
-                                  int rtc_24hr_mode)
+                                   struct max77686_rtc_info *info)
 {
-       tm->tm_sec = data[RTC_SEC] & 0x7f;
-       tm->tm_min = data[RTC_MIN] & 0x7f;
-       if (rtc_24hr_mode)
+       u8 mask = info->drv_data->mask;
+
+       tm->tm_sec = data[RTC_SEC] & mask;
+       tm->tm_min = data[RTC_MIN] & mask;
+       if (info->rtc_24hr_mode)
                tm->tm_hour = data[RTC_HOUR] & 0x1f;
        else {
                tm->tm_hour = data[RTC_HOUR] & 0x0f;
@@ -86,15 +214,23 @@ static void max77686_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
        }
 
        /* Only a single bit is set in data[], so fls() would be equivalent */
-       tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0x7f) - 1;
+       tm->tm_wday = ffs(data[RTC_WEEKDAY] & mask) - 1;
        tm->tm_mday = data[RTC_DATE] & 0x1f;
        tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
-       tm->tm_year = (data[RTC_YEAR] & 0x7f) + 100;
+       tm->tm_year = data[RTC_YEAR] & mask;
        tm->tm_yday = 0;
        tm->tm_isdst = 0;
+
+       /*
+        * MAX77686 uses 1 bit from sec/min/hour/etc RTC registers and the
+        * year values are just 0..99 so add 100 to support up to 2099.
+        */
+       if (!info->drv_data->alarm_enable_reg)
+               tm->tm_year += 100;
 }
 
-static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
+static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data,
+                                  struct max77686_rtc_info *info)
 {
        data[RTC_SEC] = tm->tm_sec;
        data[RTC_MIN] = tm->tm_min;
@@ -102,13 +238,20 @@ static int max77686_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
        data[RTC_WEEKDAY] = 1 << tm->tm_wday;
        data[RTC_DATE] = tm->tm_mday;
        data[RTC_MONTH] = tm->tm_mon + 1;
+
+       if (info->drv_data->alarm_enable_reg) {
+               data[RTC_YEAR] = tm->tm_year;
+               return 0;
+       }
+
        data[RTC_YEAR] = tm->tm_year > 100 ? (tm->tm_year - 100) : 0;
 
        if (tm->tm_year < 100) {
-               pr_warn("RTC cannot handle the year %d.  Assume it's 2000.\n",
+               dev_err(info->dev, "RTC cannot handle the year %d.\n",
                        1900 + tm->tm_year);
                return -EINVAL;
        }
+
        return 0;
 }
 
@@ -117,6 +260,7 @@ static int max77686_rtc_update(struct max77686_rtc_info *info,
 {
        int ret;
        unsigned int data;
+       unsigned long delay = info->drv_data->delay;
 
        if (op == MAX77686_RTC_WRITE)
                data = 1 << RTC_UDR_SHIFT;
@@ -124,13 +268,14 @@ static int max77686_rtc_update(struct max77686_rtc_info *info,
                data = 1 << RTC_RBUDR_SHIFT;
 
        ret = regmap_update_bits(info->max77686->rtc_regmap,
-                                MAX77686_RTC_UPDATE0, data, data);
+                                info->drv_data->map[REG_RTC_UPDATE0],
+                                data, data);
        if (ret < 0)
-               dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
-                               __func__, ret, data);
+               dev_err(info->dev, "Fail to write update reg(ret=%d, data=0x%x)\n",
+                       ret, data);
        else {
-               /* Minimum 16ms delay required before RTC update. */
-               msleep(MAX77686_RTC_UPDATE_DELAY);
+               /* Minimum delay required before RTC update. */
+               usleep_range(delay, delay * 2);
        }
 
        return ret;
@@ -149,13 +294,14 @@ static int max77686_rtc_read_time(struct device *dev, struct rtc_time *tm)
                goto out;
 
        ret = regmap_bulk_read(info->max77686->rtc_regmap,
-                               MAX77686_RTC_SEC, data, RTC_NR_TIME);
+                              info->drv_data->map[REG_RTC_SEC],
+                              data, ARRAY_SIZE(data));
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__, ret);
+               dev_err(info->dev, "Fail to read time reg(%d)\n", ret);
                goto out;
        }
 
-       max77686_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
+       max77686_rtc_data_to_tm(data, tm, info);
 
        ret = rtc_valid_tm(tm);
 
@@ -170,17 +316,17 @@ static int max77686_rtc_set_time(struct device *dev, struct rtc_time *tm)
        u8 data[RTC_NR_TIME];
        int ret;
 
-       ret = max77686_rtc_tm_to_data(tm, data);
+       ret = max77686_rtc_tm_to_data(tm, data, info);
        if (ret < 0)
                return ret;
 
        mutex_lock(&info->lock);
 
        ret = regmap_bulk_write(info->max77686->rtc_regmap,
-                                MAX77686_RTC_SEC, data, RTC_NR_TIME);
+                               info->drv_data->map[REG_RTC_SEC],
+                               data, ARRAY_SIZE(data));
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
-                               ret);
+               dev_err(info->dev, "Fail to write time reg(%d)\n", ret);
                goto out;
        }
 
@@ -196,6 +342,7 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
        struct max77686_rtc_info *info = dev_get_drvdata(dev);
        u8 data[RTC_NR_TIME];
        unsigned int val;
+       const unsigned int *map = info->drv_data->map;
        int i, ret;
 
        mutex_lock(&info->lock);
@@ -205,28 +352,47 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
                goto out;
 
        ret = regmap_bulk_read(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+                              map[REG_ALARM1_SEC], data, ARRAY_SIZE(data));
        if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
-                               __func__, __LINE__, ret);
+               dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
                goto out;
        }
 
-       max77686_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
+       max77686_rtc_data_to_tm(data, &alrm->time, info);
 
        alrm->enabled = 0;
-       for (i = 0; i < RTC_NR_TIME; i++) {
-               if (data[i] & ALARM_ENABLE_MASK) {
+
+       if (info->drv_data->alarm_enable_reg) {
+               if (map[REG_RTC_AE1] == REG_RTC_NONE) {
+                       ret = -EINVAL;
+                       dev_err(info->dev,
+                               "alarm enable register not set(%d)\n", ret);
+                       goto out;
+               }
+
+               ret = regmap_read(info->max77686->regmap,
+                                 map[REG_RTC_AE1], &val);
+               if (ret < 0) {
+                       dev_err(info->dev,
+                               "fail to read alarm enable(%d)\n", ret);
+                       goto out;
+               }
+
+               if (val)
                        alrm->enabled = 1;
-                       break;
+       } else {
+               for (i = 0; i < ARRAY_SIZE(data); i++) {
+                       if (data[i] & ALARM_ENABLE_MASK) {
+                               alrm->enabled = 1;
+                               break;
+                       }
                }
        }
 
        alrm->pending = 0;
        ret = regmap_read(info->max77686->regmap, MAX77686_REG_STATUS2, &val);
        if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
-                               __func__, __LINE__, ret);
+               dev_err(info->dev, "Fail to read status2 reg(%d)\n", ret);
                goto out;
        }
 
@@ -235,7 +401,7 @@ static int max77686_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 
 out:
        mutex_unlock(&info->lock);
-       return 0;
+       return ret;
 }
 
 static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
@@ -243,6 +409,7 @@ static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
        u8 data[RTC_NR_TIME];
        int ret, i;
        struct rtc_time tm;
+       const unsigned int *map = info->drv_data->map;
 
        if (!mutex_is_locked(&info->lock))
                dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
@@ -251,24 +418,36 @@ static int max77686_rtc_stop_alarm(struct max77686_rtc_info *info)
        if (ret < 0)
                goto out;
 
-       ret = regmap_bulk_read(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
-                               __func__, ret);
-               goto out;
-       }
+       if (info->drv_data->alarm_enable_reg) {
+               if (map[REG_RTC_AE1] == REG_RTC_NONE) {
+                       ret = -EINVAL;
+                       dev_err(info->dev,
+                               "alarm enable register not set(%d)\n", ret);
+                       goto out;
+               }
 
-       max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
+               ret = regmap_write(info->max77686->regmap, map[REG_RTC_AE1], 0);
+       } else {
+               ret = regmap_bulk_read(info->max77686->rtc_regmap,
+                                      map[REG_ALARM1_SEC], data,
+                                      ARRAY_SIZE(data));
+               if (ret < 0) {
+                       dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
+                       goto out;
+               }
 
-       for (i = 0; i < RTC_NR_TIME; i++)
-               data[i] &= ~ALARM_ENABLE_MASK;
+               max77686_rtc_data_to_tm(data, &tm, info);
+
+               for (i = 0; i < ARRAY_SIZE(data); i++)
+                       data[i] &= ~ALARM_ENABLE_MASK;
+
+               ret = regmap_bulk_write(info->max77686->rtc_regmap,
+                                       map[REG_ALARM1_SEC], data,
+                                       ARRAY_SIZE(data));
+       }
 
-       ret = regmap_bulk_write(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                               __func__, ret);
+               dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
                goto out;
        }
 
@@ -282,6 +461,7 @@ static int max77686_rtc_start_alarm(struct max77686_rtc_info *info)
        u8 data[RTC_NR_TIME];
        int ret;
        struct rtc_time tm;
+       const unsigned int *map = info->drv_data->map;
 
        if (!mutex_is_locked(&info->lock))
                dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
@@ -290,32 +470,38 @@ static int max77686_rtc_start_alarm(struct max77686_rtc_info *info)
        if (ret < 0)
                goto out;
 
-       ret = regmap_bulk_read(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
-                               __func__, ret);
-               goto out;
-       }
-
-       max77686_rtc_data_to_tm(data, &tm, info->rtc_24hr_mode);
+       if (info->drv_data->alarm_enable_reg) {
+               ret = regmap_write(info->max77686->regmap, map[REG_RTC_AE1],
+                                  MAX77802_ALARM_ENABLE_VALUE);
+       } else {
+               ret = regmap_bulk_read(info->max77686->rtc_regmap,
+                                      map[REG_ALARM1_SEC], data,
+                                      ARRAY_SIZE(data));
+               if (ret < 0) {
+                       dev_err(info->dev, "Fail to read alarm reg(%d)\n", ret);
+                       goto out;
+               }
 
-       data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
-       data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
-       data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
-       data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
-       if (data[RTC_MONTH] & 0xf)
-               data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
-       if (data[RTC_YEAR] & 0x7f)
-               data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
-       if (data[RTC_DATE] & 0x1f)
-               data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+               max77686_rtc_data_to_tm(data, &tm, info);
+
+               data[RTC_SEC] |= (1 << ALARM_ENABLE_SHIFT);
+               data[RTC_MIN] |= (1 << ALARM_ENABLE_SHIFT);
+               data[RTC_HOUR] |= (1 << ALARM_ENABLE_SHIFT);
+               data[RTC_WEEKDAY] &= ~ALARM_ENABLE_MASK;
+               if (data[RTC_MONTH] & 0xf)
+                       data[RTC_MONTH] |= (1 << ALARM_ENABLE_SHIFT);
+               if (data[RTC_YEAR] & info->drv_data->mask)
+                       data[RTC_YEAR] |= (1 << ALARM_ENABLE_SHIFT);
+               if (data[RTC_DATE] & 0x1f)
+                       data[RTC_DATE] |= (1 << ALARM_ENABLE_SHIFT);
+
+               ret = regmap_bulk_write(info->max77686->rtc_regmap,
+                                       map[REG_ALARM1_SEC], data,
+                                       ARRAY_SIZE(data));
+       }
 
-       ret = regmap_bulk_write(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                               __func__, ret);
+               dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
                goto out;
        }
 
@@ -330,7 +516,7 @@ static int max77686_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
        u8 data[RTC_NR_TIME];
        int ret;
 
-       ret = max77686_rtc_tm_to_data(&alrm->time, data);
+       ret = max77686_rtc_tm_to_data(&alrm->time, data, info);
        if (ret < 0)
                return ret;
 
@@ -341,11 +527,11 @@ static int max77686_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
                goto out;
 
        ret = regmap_bulk_write(info->max77686->rtc_regmap,
-                                MAX77686_ALARM1_SEC, data, RTC_NR_TIME);
+                               info->drv_data->map[REG_ALARM1_SEC],
+                               data, ARRAY_SIZE(data));
 
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                               __func__, ret);
+               dev_err(info->dev, "Fail to write alarm reg(%d)\n", ret);
                goto out;
        }
 
@@ -380,7 +566,7 @@ static irqreturn_t max77686_rtc_alarm_irq(int irq, void *data)
 {
        struct max77686_rtc_info *info = data;
 
-       dev_info(info->dev, "%s:irq(%d)\n", __func__, irq);
+       dev_dbg(info->dev, "RTC alarm IRQ: %d\n", irq);
 
        rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
 
@@ -406,10 +592,11 @@ static int max77686_rtc_init_reg(struct max77686_rtc_info *info)
 
        info->rtc_24hr_mode = 1;
 
-       ret = regmap_bulk_write(info->max77686->rtc_regmap, MAX77686_RTC_CONTROLM, data, 2);
+       ret = regmap_bulk_write(info->max77686->rtc_regmap,
+                               info->drv_data->map[REG_RTC_CONTROLM],
+                               data, ARRAY_SIZE(data));
        if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
-                               __func__, ret);
+               dev_err(info->dev, "Fail to write controlm reg(%d)\n", ret);
                return ret;
        }
 
@@ -421,10 +608,9 @@ static int max77686_rtc_probe(struct platform_device *pdev)
 {
        struct max77686_dev *max77686 = dev_get_drvdata(pdev->dev.parent);
        struct max77686_rtc_info *info;
+       const struct platform_device_id *id = platform_get_device_id(pdev);
        int ret;
 
-       dev_info(&pdev->dev, "%s\n", __func__);
-
        info = devm_kzalloc(&pdev->dev, sizeof(struct max77686_rtc_info),
                                GFP_KERNEL);
        if (!info)
@@ -434,6 +620,11 @@ static int max77686_rtc_probe(struct platform_device *pdev)
        info->dev = &pdev->dev;
        info->max77686 = max77686;
        info->rtc = max77686->rtc;
+       info->drv_data = (const struct max77686_rtc_driver_data *)
+               id->driver_data;
+
+       if (!info->drv_data->separate_i2c_addr)
+               info->max77686->rtc_regmap = info->max77686->regmap;
 
        platform_set_drvdata(pdev, info);
 
@@ -446,7 +637,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
 
        device_init_wakeup(&pdev->dev, 1);
 
-       info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max77686-rtc",
+       info->rtc_dev = devm_rtc_device_register(&pdev->dev, id->name,
                                        &max77686_rtc_ops, THIS_MODULE);
 
        if (IS_ERR(info->rtc_dev)) {
@@ -459,13 +650,13 @@ static int max77686_rtc_probe(struct platform_device *pdev)
 
        if (!max77686->rtc_irq_data) {
                ret = -EINVAL;
-               dev_err(&pdev->dev, "%s: no RTC regmap IRQ chip\n", __func__);
+               dev_err(&pdev->dev, "No RTC regmap IRQ chip\n");
                goto err_rtc;
        }
 
        info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
                                         MAX77686_RTCIRQ_RTCA1);
-       if (!info->virq) {
+       if (info->virq <= 0) {
                ret = -ENXIO;
                goto err_rtc;
        }
@@ -508,7 +699,8 @@ static SIMPLE_DEV_PM_OPS(max77686_rtc_pm_ops,
                         max77686_rtc_suspend, max77686_rtc_resume);
 
 static const struct platform_device_id rtc_id[] = {
-       { "max77686-rtc", 0 },
+       { "max77686-rtc", .driver_data = (kernel_ulong_t)&max77686_drv_data, },
+       { "max77802-rtc", .driver_data = (kernel_ulong_t)&max77802_drv_data, },
        {},
 };
 MODULE_DEVICE_TABLE(platform, rtc_id);
diff --git a/drivers/rtc/rtc-max77802.c b/drivers/rtc/rtc-max77802.c
deleted file mode 100644 (file)
index 82ffcc5..0000000
+++ /dev/null
@@ -1,502 +0,0 @@
-/*
- * RTC driver for Maxim MAX77802
- *
- * Copyright (C) 2013 Google, Inc
- *
- * Copyright (C) 2012 Samsung Electronics Co.Ltd
- *
- *  based on rtc-max8997.c
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/rtc.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/max77686-private.h>
-#include <linux/irqdomain.h>
-#include <linux/regmap.h>
-
-/* RTC Control Register */
-#define BCD_EN_SHIFT                   0
-#define BCD_EN_MASK                    (1 << BCD_EN_SHIFT)
-#define MODEL24_SHIFT                  1
-#define MODEL24_MASK                   (1 << MODEL24_SHIFT)
-/* RTC Update Register1 */
-#define RTC_UDR_SHIFT                  0
-#define RTC_UDR_MASK                   (1 << RTC_UDR_SHIFT)
-#define RTC_RBUDR_SHIFT                        4
-#define RTC_RBUDR_MASK                 (1 << RTC_RBUDR_SHIFT)
-/* RTC Hour register */
-#define HOUR_PM_SHIFT                  6
-#define HOUR_PM_MASK                   (1 << HOUR_PM_SHIFT)
-/* RTC Alarm Enable */
-#define ALARM_ENABLE_SHIFT             7
-#define ALARM_ENABLE_MASK              (1 << ALARM_ENABLE_SHIFT)
-
-/* For the RTCAE1 register, we write this value to enable the alarm */
-#define ALARM_ENABLE_VALUE             0x77
-
-#define MAX77802_RTC_UPDATE_DELAY_US   200
-
-enum {
-       RTC_SEC = 0,
-       RTC_MIN,
-       RTC_HOUR,
-       RTC_WEEKDAY,
-       RTC_MONTH,
-       RTC_YEAR,
-       RTC_DATE,
-       RTC_NR_TIME
-};
-
-struct max77802_rtc_info {
-       struct device           *dev;
-       struct max77686_dev     *max77802;
-       struct i2c_client       *rtc;
-       struct rtc_device       *rtc_dev;
-       struct mutex            lock;
-
-       struct regmap           *regmap;
-
-       int virq;
-       int rtc_24hr_mode;
-};
-
-enum MAX77802_RTC_OP {
-       MAX77802_RTC_WRITE,
-       MAX77802_RTC_READ,
-};
-
-static void max77802_rtc_data_to_tm(u8 *data, struct rtc_time *tm,
-                                  int rtc_24hr_mode)
-{
-       tm->tm_sec = data[RTC_SEC] & 0xff;
-       tm->tm_min = data[RTC_MIN] & 0xff;
-       if (rtc_24hr_mode)
-               tm->tm_hour = data[RTC_HOUR] & 0x1f;
-       else {
-               tm->tm_hour = data[RTC_HOUR] & 0x0f;
-               if (data[RTC_HOUR] & HOUR_PM_MASK)
-                       tm->tm_hour += 12;
-       }
-
-       /* Only a single bit is set in data[], so fls() would be equivalent */
-       tm->tm_wday = ffs(data[RTC_WEEKDAY] & 0xff) - 1;
-       tm->tm_mday = data[RTC_DATE] & 0x1f;
-       tm->tm_mon = (data[RTC_MONTH] & 0x0f) - 1;
-
-       tm->tm_year = data[RTC_YEAR] & 0xff;
-       tm->tm_yday = 0;
-       tm->tm_isdst = 0;
-}
-
-static int max77802_rtc_tm_to_data(struct rtc_time *tm, u8 *data)
-{
-       data[RTC_SEC] = tm->tm_sec;
-       data[RTC_MIN] = tm->tm_min;
-       data[RTC_HOUR] = tm->tm_hour;
-       data[RTC_WEEKDAY] = 1 << tm->tm_wday;
-       data[RTC_DATE] = tm->tm_mday;
-       data[RTC_MONTH] = tm->tm_mon + 1;
-       data[RTC_YEAR] = tm->tm_year;
-
-       return 0;
-}
-
-static int max77802_rtc_update(struct max77802_rtc_info *info,
-       enum MAX77802_RTC_OP op)
-{
-       int ret;
-       unsigned int data;
-
-       if (op == MAX77802_RTC_WRITE)
-               data = 1 << RTC_UDR_SHIFT;
-       else
-               data = 1 << RTC_RBUDR_SHIFT;
-
-       ret = regmap_update_bits(info->max77802->regmap,
-                                MAX77802_RTC_UPDATE0, data, data);
-       if (ret < 0)
-               dev_err(info->dev, "%s: fail to write update reg(ret=%d, data=0x%x)\n",
-                               __func__, ret, data);
-       else {
-               /* Minimum delay required before RTC update. */
-               usleep_range(MAX77802_RTC_UPDATE_DELAY_US,
-                            MAX77802_RTC_UPDATE_DELAY_US * 2);
-       }
-
-       return ret;
-}
-
-static int max77802_rtc_read_time(struct device *dev, struct rtc_time *tm)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       u8 data[RTC_NR_TIME];
-       int ret;
-
-       mutex_lock(&info->lock);
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_bulk_read(info->max77802->regmap,
-                               MAX77802_RTC_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read time reg(%d)\n", __func__,
-                       ret);
-               goto out;
-       }
-
-       max77802_rtc_data_to_tm(data, tm, info->rtc_24hr_mode);
-
-       ret = rtc_valid_tm(tm);
-
-out:
-       mutex_unlock(&info->lock);
-       return ret;
-}
-
-static int max77802_rtc_set_time(struct device *dev, struct rtc_time *tm)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       u8 data[RTC_NR_TIME];
-       int ret;
-
-       ret = max77802_rtc_tm_to_data(tm, data);
-       if (ret < 0)
-               return ret;
-
-       mutex_lock(&info->lock);
-
-       ret = regmap_bulk_write(info->max77802->regmap,
-                                MAX77802_RTC_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write time reg(%d)\n", __func__,
-                       ret);
-               goto out;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-
-out:
-       mutex_unlock(&info->lock);
-       return ret;
-}
-
-static int max77802_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       u8 data[RTC_NR_TIME];
-       unsigned int val;
-       int ret;
-
-       mutex_lock(&info->lock);
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_bulk_read(info->max77802->regmap,
-                                MAX77802_ALARM1_SEC, data, RTC_NR_TIME);
-       if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read alarm reg(%d)\n",
-                               __func__, __LINE__, ret);
-               goto out;
-       }
-
-       max77802_rtc_data_to_tm(data, &alrm->time, info->rtc_24hr_mode);
-
-       alrm->enabled = 0;
-       ret = regmap_read(info->max77802->regmap,
-                         MAX77802_RTC_AE1, &val);
-       if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read alarm enable(%d)\n",
-                       __func__, __LINE__, ret);
-               goto out;
-       }
-       if (val)
-               alrm->enabled = 1;
-
-       alrm->pending = 0;
-       ret = regmap_read(info->max77802->regmap, MAX77802_REG_STATUS2, &val);
-       if (ret < 0) {
-               dev_err(info->dev, "%s:%d fail to read status2 reg(%d)\n",
-                               __func__, __LINE__, ret);
-               goto out;
-       }
-
-       if (val & (1 << 2)) /* RTCA1 */
-               alrm->pending = 1;
-
-out:
-       mutex_unlock(&info->lock);
-       return 0;
-}
-
-static int max77802_rtc_stop_alarm(struct max77802_rtc_info *info)
-{
-       int ret;
-
-       if (!mutex_is_locked(&info->lock))
-               dev_warn(info->dev, "%s: should have mutex locked\n", __func__);
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_write(info->max77802->regmap,
-                          MAX77802_RTC_AE1, 0);
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                       __func__, ret);
-               goto out;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-out:
-       return ret;
-}
-
-static int max77802_rtc_start_alarm(struct max77802_rtc_info *info)
-{
-       int ret;
-
-       if (!mutex_is_locked(&info->lock))
-               dev_warn(info->dev, "%s: should have mutex locked\n",
-                        __func__);
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_READ);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_write(info->max77802->regmap,
-                                  MAX77802_RTC_AE1,
-                                  ALARM_ENABLE_VALUE);
-
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to read alarm reg(%d)\n",
-                               __func__, ret);
-               goto out;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-out:
-       return ret;
-}
-
-static int max77802_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       u8 data[RTC_NR_TIME];
-       int ret;
-
-       ret = max77802_rtc_tm_to_data(&alrm->time, data);
-       if (ret < 0)
-               return ret;
-
-       mutex_lock(&info->lock);
-
-       ret = max77802_rtc_stop_alarm(info);
-       if (ret < 0)
-               goto out;
-
-       ret = regmap_bulk_write(info->max77802->regmap,
-                                MAX77802_ALARM1_SEC, data, RTC_NR_TIME);
-
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write alarm reg(%d)\n",
-                               __func__, ret);
-               goto out;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-       if (ret < 0)
-               goto out;
-
-       if (alrm->enabled)
-               ret = max77802_rtc_start_alarm(info);
-out:
-       mutex_unlock(&info->lock);
-       return ret;
-}
-
-static int max77802_rtc_alarm_irq_enable(struct device *dev,
-                                        unsigned int enabled)
-{
-       struct max77802_rtc_info *info = dev_get_drvdata(dev);
-       int ret;
-
-       mutex_lock(&info->lock);
-       if (enabled)
-               ret = max77802_rtc_start_alarm(info);
-       else
-               ret = max77802_rtc_stop_alarm(info);
-       mutex_unlock(&info->lock);
-
-       return ret;
-}
-
-static irqreturn_t max77802_rtc_alarm_irq(int irq, void *data)
-{
-       struct max77802_rtc_info *info = data;
-
-       dev_dbg(info->dev, "%s:irq(%d)\n", __func__, irq);
-
-       rtc_update_irq(info->rtc_dev, 1, RTC_IRQF | RTC_AF);
-
-       return IRQ_HANDLED;
-}
-
-static const struct rtc_class_ops max77802_rtc_ops = {
-       .read_time = max77802_rtc_read_time,
-       .set_time = max77802_rtc_set_time,
-       .read_alarm = max77802_rtc_read_alarm,
-       .set_alarm = max77802_rtc_set_alarm,
-       .alarm_irq_enable = max77802_rtc_alarm_irq_enable,
-};
-
-static int max77802_rtc_init_reg(struct max77802_rtc_info *info)
-{
-       u8 data[2];
-       int ret;
-
-       max77802_rtc_update(info, MAX77802_RTC_READ);
-
-       /* Set RTC control register : Binary mode, 24hour mdoe */
-       data[0] = (1 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
-       data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
-
-       info->rtc_24hr_mode = 1;
-
-       ret = regmap_bulk_write(info->max77802->regmap,
-                               MAX77802_RTC_CONTROLM, data, ARRAY_SIZE(data));
-       if (ret < 0) {
-               dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
-                               __func__, ret);
-               return ret;
-       }
-
-       ret = max77802_rtc_update(info, MAX77802_RTC_WRITE);
-       return ret;
-}
-
-static int max77802_rtc_probe(struct platform_device *pdev)
-{
-       struct max77686_dev *max77802 = dev_get_drvdata(pdev->dev.parent);
-       struct max77802_rtc_info *info;
-       int ret;
-
-       dev_dbg(&pdev->dev, "%s\n", __func__);
-
-       info = devm_kzalloc(&pdev->dev, sizeof(struct max77802_rtc_info),
-                           GFP_KERNEL);
-       if (!info)
-               return -ENOMEM;
-
-       mutex_init(&info->lock);
-       info->dev = &pdev->dev;
-       info->max77802 = max77802;
-       info->rtc = max77802->i2c;
-
-       platform_set_drvdata(pdev, info);
-
-       ret = max77802_rtc_init_reg(info);
-
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to initialize RTC reg:%d\n", ret);
-               return ret;
-       }
-
-       device_init_wakeup(&pdev->dev, 1);
-
-       info->rtc_dev = devm_rtc_device_register(&pdev->dev, "max77802-rtc",
-                                                &max77802_rtc_ops, THIS_MODULE);
-
-       if (IS_ERR(info->rtc_dev)) {
-               ret = PTR_ERR(info->rtc_dev);
-               dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret);
-               if (ret == 0)
-                       ret = -EINVAL;
-               return ret;
-       }
-
-       if (!max77802->rtc_irq_data) {
-               dev_err(&pdev->dev, "No RTC regmap IRQ chip\n");
-               return -EINVAL;
-       }
-
-       info->virq = regmap_irq_get_virq(max77802->rtc_irq_data,
-                                        MAX77686_RTCIRQ_RTCA1);
-
-       if (info->virq <= 0) {
-               dev_err(&pdev->dev, "Failed to get virtual IRQ %d\n",
-                       MAX77686_RTCIRQ_RTCA1);
-               return -EINVAL;
-       }
-
-       ret = devm_request_threaded_irq(&pdev->dev, info->virq, NULL,
-                                       max77802_rtc_alarm_irq, 0, "rtc-alarm1",
-                                       info);
-       if (ret < 0)
-               dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
-                       info->virq, ret);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int max77802_rtc_suspend(struct device *dev)
-{
-       if (device_may_wakeup(dev)) {
-               struct max77802_rtc_info *info = dev_get_drvdata(dev);
-
-               return enable_irq_wake(info->virq);
-       }
-
-       return 0;
-}
-
-static int max77802_rtc_resume(struct device *dev)
-{
-       if (device_may_wakeup(dev)) {
-               struct max77802_rtc_info *info = dev_get_drvdata(dev);
-
-               return disable_irq_wake(info->virq);
-       }
-
-       return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(max77802_rtc_pm_ops,
-                        max77802_rtc_suspend, max77802_rtc_resume);
-
-static const struct platform_device_id rtc_id[] = {
-       { "max77802-rtc", 0 },
-       {},
-};
-MODULE_DEVICE_TABLE(platform, rtc_id);
-
-static struct platform_driver max77802_rtc_driver = {
-       .driver         = {
-               .name   = "max77802-rtc",
-               .pm     = &max77802_rtc_pm_ops,
-       },
-       .probe          = max77802_rtc_probe,
-       .id_table       = rtc_id,
-};
-
-module_platform_driver(max77802_rtc_driver);
-
-MODULE_DESCRIPTION("Maxim MAX77802 RTC driver");
-MODULE_AUTHOR("Simon Glass <sjg@chromium.org>");
-MODULE_LICENSE("GPL");
index bd911bafb809a2ede1c46c3f2e2877ac7507fc50..85df3b4c2521c420e54bf56d38c811bb9f503fe9 100644 (file)
@@ -7,7 +7,7 @@
  * All rights reserved.
  *
  * Modified by fengjh at rising.com.cn
- * <http://lists.lm-sensors.org/mailman/listinfo/lm-sensors>
+ * <lm-sensors@lm-sensors.org>
  * 2006.11
  *
  * Code cleanup by Sergei Poselenov, <sposelenov@emcraft.com>
index 7d82bbcb12df4f17f9c67bab4493a31707e105d4..e7e078b3c7e6ee19e35db42666bccb23b83cdf30 100644 (file)
@@ -643,7 +643,6 @@ static void raw3215_shutdown(struct raw3215_info *raw)
        if ((raw->flags & RAW3215_WORKING) ||
            raw->queued_write != NULL ||
            raw->queued_read != NULL) {
-               raw->port.flags |= ASYNC_CLOSING;
                add_wait_queue(&raw->empty_wait, &wait);
                set_current_state(TASK_INTERRUPTIBLE);
                spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -651,7 +650,7 @@ static void raw3215_shutdown(struct raw3215_info *raw)
                spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
                remove_wait_queue(&raw->empty_wait, &wait);
                set_current_state(TASK_RUNNING);
-               raw->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING);
+               raw->port.flags &= ~ASYNC_INITIALIZED;
        }
        spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
 }
index c692dfebd0bab1c808f460349dff194b028f5c5c..50597f9522fe375ed764aab8842a400e56ef808d 100644 (file)
@@ -139,11 +139,11 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
 
        device = container_of(kobj, struct device, kobj);
        chp = to_channelpath(device);
-       if (!chp->cmg_chars)
+       if (chp->cmg == -1)
                return 0;
 
-       return memory_read_from_buffer(buf, count, &off,
-                               chp->cmg_chars, sizeof(struct cmg_chars));
+       return memory_read_from_buffer(buf, count, &off, &chp->cmg_chars,
+                                      sizeof(chp->cmg_chars));
 }
 
 static struct bin_attribute chp_measurement_chars_attr = {
@@ -416,7 +416,8 @@ static void chp_release(struct device *dev)
  * chp_update_desc - update channel-path description
  * @chp - channel-path
  *
- * Update the channel-path description of the specified channel-path.
+ * Update the channel-path description of the specified channel-path
+ * including channel measurement related information.
  * Return zero on success, non-zero otherwise.
  */
 int chp_update_desc(struct channel_path *chp)
@@ -428,8 +429,10 @@ int chp_update_desc(struct channel_path *chp)
                return rc;
 
        rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1);
+       if (rc)
+               return rc;
 
-       return rc;
+       return chsc_get_channel_measurement_chars(chp);
 }
 
 /**
@@ -466,14 +469,6 @@ int chp_new(struct chp_id chpid)
                ret = -ENODEV;
                goto out_free;
        }
-       /* Get channel-measurement characteristics. */
-       if (css_chsc_characteristics.scmc && css_chsc_characteristics.secm) {
-               ret = chsc_get_channel_measurement_chars(chp);
-               if (ret)
-                       goto out_free;
-       } else {
-               chp->cmg = -1;
-       }
        dev_set_name(&chp->dev, "chp%x.%02x", chpid.cssid, chpid.id);
 
        /* make it known to the system */
index 4efd5b867cc390780a18c2f983a2215ed05dcd72..af0232290dc4c7c183a65e3fe13d6683bb48cf1e 100644 (file)
@@ -48,7 +48,7 @@ struct channel_path {
        /* Channel-measurement related stuff: */
        int cmg;
        int shared;
-       void *cmg_chars;
+       struct cmg_chars cmg_chars;
 };
 
 /* Return channel_path struct for given chpid. */
index a831d18596a5ef592f88ec5af031f639606c8c0b..c424c0c7367e360acd9ccd1ce899dc80bb575265 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/device.h>
+#include <linux/mutex.h>
 #include <linux/pci.h>
 
 #include <asm/cio.h>
@@ -224,8 +225,9 @@ out_unreg:
 
 void chsc_chp_offline(struct chp_id chpid)
 {
-       char dbf_txt[15];
+       struct channel_path *chp = chpid_to_chp(chpid);
        struct chp_link link;
+       char dbf_txt[15];
 
        sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
        CIO_TRACE_EVENT(2, dbf_txt);
@@ -236,6 +238,11 @@ void chsc_chp_offline(struct chp_id chpid)
        link.chpid = chpid;
        /* Wait until previous actions have settled. */
        css_wait_for_slow_path();
+
+       mutex_lock(&chp->lock);
+       chp_update_desc(chp);
+       mutex_unlock(&chp->lock);
+
        for_each_subchannel_staged(s390_subchannel_remove_chpid, NULL, &link);
 }
 
@@ -690,8 +697,9 @@ static void chsc_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
 
 void chsc_chp_online(struct chp_id chpid)
 {
-       char dbf_txt[15];
+       struct channel_path *chp = chpid_to_chp(chpid);
        struct chp_link link;
+       char dbf_txt[15];
 
        sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
        CIO_TRACE_EVENT(2, dbf_txt);
@@ -701,6 +709,11 @@ void chsc_chp_online(struct chp_id chpid)
                link.chpid = chpid;
                /* Wait until previous actions have settled. */
                css_wait_for_slow_path();
+
+               mutex_lock(&chp->lock);
+               chp_update_desc(chp);
+               mutex_unlock(&chp->lock);
+
                for_each_subchannel_staged(__s390_process_res_acc, NULL,
                                           &link);
                css_schedule_reprobe();
@@ -967,22 +980,19 @@ static void
 chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
                          struct cmg_chars *chars)
 {
-       struct cmg_chars *cmg_chars;
        int i, mask;
 
-       cmg_chars = chp->cmg_chars;
        for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
                mask = 0x80 >> (i + 3);
                if (cmcv & mask)
-                       cmg_chars->values[i] = chars->values[i];
+                       chp->cmg_chars.values[i] = chars->values[i];
                else
-                       cmg_chars->values[i] = 0;
+                       chp->cmg_chars.values[i] = 0;
        }
 }
 
 int chsc_get_channel_measurement_chars(struct channel_path *chp)
 {
-       struct cmg_chars *cmg_chars;
        int ccode, ret;
 
        struct {
@@ -1006,10 +1016,11 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
                u32 data[NR_MEASUREMENT_CHARS];
        } __attribute__ ((packed)) *scmc_area;
 
-       chp->cmg_chars = NULL;
-       cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
-       if (!cmg_chars)
-               return -ENOMEM;
+       chp->shared = -1;
+       chp->cmg = -1;
+
+       if (!css_chsc_characteristics.scmc || !css_chsc_characteristics.secm)
+               return 0;
 
        spin_lock_irq(&chsc_page_lock);
        memset(chsc_page, 0, PAGE_SIZE);
@@ -1031,25 +1042,19 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
                              scmc_area->response.code);
                goto out;
        }
-       if (scmc_area->not_valid) {
-               chp->cmg = -1;
-               chp->shared = -1;
+       if (scmc_area->not_valid)
                goto out;
-       }
+
        chp->cmg = scmc_area->cmg;
        chp->shared = scmc_area->shared;
        if (chp->cmg != 2 && chp->cmg != 3) {
                /* No cmg-dependent data. */
                goto out;
        }
-       chp->cmg_chars = cmg_chars;
        chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
                                  (struct cmg_chars *) &scmc_area->data);
 out:
        spin_unlock_irq(&chsc_page_lock);
-       if (!chp->cmg_chars)
-               kfree(cmg_chars);
-
        return ret;
 }
 
index 7b23f43c7b080cc81b4de94ed9016cc0d8a91546..de1b6c1d172c8c0866dc0fd8fb709d032bdef820 100644 (file)
@@ -112,9 +112,10 @@ static inline int convert_error(struct zcrypt_device *zdev,
                atomic_set(&zcrypt_rescan_req, 1);
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                       zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
+                       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
+                       ehdr->reply_code);
                return -EAGAIN;
        case REP82_ERROR_TRANSPORT_FAIL:
        case REP82_ERROR_MACHINE_FAILURE:
@@ -123,16 +124,18 @@ static inline int convert_error(struct zcrypt_device *zdev,
                atomic_set(&zcrypt_rescan_req, 1);
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                       zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
+                       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
+                       ehdr->reply_code);
                return -EAGAIN;
        default:
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                       zdev->ap_dev->qid, zdev->online, ehdr->reply_code);
+                       AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
+                       ehdr->reply_code);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
index 74edf2934e7c9fd652c33d1d14bb47b9e2bf44b8..eedfaa2cf715f698db447c1a79e857f170727dc5 100644 (file)
@@ -336,9 +336,10 @@ static int convert_type80(struct zcrypt_device *zdev,
                /* The result is too short, the CEX2A card may not do that.. */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                              zdev->ap_dev->qid, zdev->online, t80h->code);
+                              AP_QID_DEVICE(zdev->ap_dev->qid),
+                              zdev->online, t80h->code);
 
                return -EAGAIN; /* repeat the request on a different device. */
        }
@@ -368,9 +369,9 @@ static int convert_response(struct zcrypt_device *zdev,
        default: /* Unknown response type, this should NEVER EVER happen */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
index 9a2dd472c1cc9870d37e98c5692c1c9c1e0ebac3..21959719daef94a00bd3f40282a2022077ec1344 100644 (file)
@@ -572,9 +572,9 @@ static int convert_type86_ica(struct zcrypt_device *zdev,
                        return -EINVAL;
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%drc%d",
-                              zdev->ap_dev->qid, zdev->online,
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online,
                               msg->hdr.reply_code);
                return -EAGAIN; /* repeat the request on a different device. */
        }
@@ -715,9 +715,9 @@ static int convert_response_ica(struct zcrypt_device *zdev,
        default: /* Unknown response type, this should NEVER EVER happen */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
@@ -747,9 +747,9 @@ static int convert_response_xcrb(struct zcrypt_device *zdev,
                xcRB->status = 0x0008044DL; /* HDD_InvalidParm */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
@@ -773,9 +773,9 @@ static int convert_response_ep11_xcrb(struct zcrypt_device *zdev,
        default: /* Unknown response type, this should NEVER EVER happen */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
@@ -800,9 +800,9 @@ static int convert_response_rng(struct zcrypt_device *zdev,
        default: /* Unknown response type, this should NEVER EVER happen */
                zdev->online = 0;
                pr_err("Cryptographic device %x failed and was set offline\n",
-                      zdev->ap_dev->qid);
+                      AP_QID_DEVICE(zdev->ap_dev->qid));
                ZCRYPT_DBF_DEV(DBF_ERR, zdev, "dev%04xo%dfail",
-                              zdev->ap_dev->qid, zdev->online);
+                              AP_QID_DEVICE(zdev->ap_dev->qid), zdev->online);
                return -EAGAIN; /* repeat the request on a different device. */
        }
 }
index 37a0c7156087905b8cb749103e71d92547091e2e..b67661836c9fa26fccd1e23a82508c1d567a5916 100644 (file)
@@ -1,5 +1,7 @@
 config SCSI_HISI_SAS
        tristate "HiSilicon SAS"
+       depends on HAS_DMA
+       depends on ARM64 || COMPILE_TEST
        select SCSI_SAS_LIBSAS
        select BLK_DEV_INTEGRITY
        help
index 0b8af186e70783e9c8318d9158b177ba25890ff3..2e4c82f8329c8ac0db7ea039f87b33f25d7344ff 100644 (file)
  *     Zhenyu Wang
  */
 
+#include <crypto/hash.h>
 #include <linux/types.h>
 #include <linux/inet.h>
 #include <linux/slab.h>
 #include <linux/file.h>
 #include <linux/blkdev.h>
-#include <linux/crypto.h>
 #include <linux/delay.h>
 #include <linux/kfifo.h>
 #include <linux/scatterlist.h>
@@ -428,7 +428,7 @@ static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
         * sufficient room.
         */
        if (conn->hdrdgst_en) {
-               iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
+               iscsi_tcp_dgst_header(tcp_sw_conn->tx_hash, hdr, hdrlen,
                                      hdr + hdrlen);
                hdrlen += ISCSI_DIGEST_SIZE;
        }
@@ -454,7 +454,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
 {
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
        struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
-       struct hash_desc *tx_hash = NULL;
+       struct ahash_request *tx_hash = NULL;
        unsigned int hdr_spec_len;
 
        ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
@@ -467,7 +467,7 @@ iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
        WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
 
        if (conn->datadgst_en)
-               tx_hash = &tcp_sw_conn->tx_hash;
+               tx_hash = tcp_sw_conn->tx_hash;
 
        return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
                                     sg, count, offset, len,
@@ -480,7 +480,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
 {
        struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
        struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
-       struct hash_desc *tx_hash = NULL;
+       struct ahash_request *tx_hash = NULL;
        unsigned int hdr_spec_len;
 
        ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
@@ -492,7 +492,7 @@ iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
        WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));
 
        if (conn->datadgst_en)
-               tx_hash = &tcp_sw_conn->tx_hash;
+               tx_hash = tcp_sw_conn->tx_hash;
 
        iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
                                data, len, NULL, tx_hash);
@@ -543,6 +543,7 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
        struct iscsi_cls_conn *cls_conn;
        struct iscsi_tcp_conn *tcp_conn;
        struct iscsi_sw_tcp_conn *tcp_sw_conn;
+       struct crypto_ahash *tfm;
 
        cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
                                        conn_idx);
@@ -552,23 +553,28 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
        tcp_conn = conn->dd_data;
        tcp_sw_conn = tcp_conn->dd_data;
 
-       tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-                                                    CRYPTO_ALG_ASYNC);
-       tcp_sw_conn->tx_hash.flags = 0;
-       if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
+       tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
                goto free_conn;
 
-       tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-                                                    CRYPTO_ALG_ASYNC);
-       tcp_sw_conn->rx_hash.flags = 0;
-       if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
-               goto free_tx_tfm;
-       tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;
+       tcp_sw_conn->tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!tcp_sw_conn->tx_hash)
+               goto free_tfm;
+       ahash_request_set_callback(tcp_sw_conn->tx_hash, 0, NULL, NULL);
+
+       tcp_sw_conn->rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!tcp_sw_conn->rx_hash)
+               goto free_tx_hash;
+       ahash_request_set_callback(tcp_sw_conn->rx_hash, 0, NULL, NULL);
+
+       tcp_conn->rx_hash = tcp_sw_conn->rx_hash;
 
        return cls_conn;
 
-free_tx_tfm:
-       crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
+free_tx_hash:
+       ahash_request_free(tcp_sw_conn->tx_hash);
+free_tfm:
+       crypto_free_ahash(tfm);
 free_conn:
        iscsi_conn_printk(KERN_ERR, conn,
                          "Could not create connection due to crc32c "
@@ -607,10 +613,14 @@ static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
 
        iscsi_sw_tcp_release_conn(conn);
 
-       if (tcp_sw_conn->tx_hash.tfm)
-               crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
-       if (tcp_sw_conn->rx_hash.tfm)
-               crypto_free_hash(tcp_sw_conn->rx_hash.tfm);
+       ahash_request_free(tcp_sw_conn->rx_hash);
+       if (tcp_sw_conn->tx_hash) {
+               struct crypto_ahash *tfm;
+
+               tfm = crypto_ahash_reqtfm(tcp_sw_conn->tx_hash);
+               ahash_request_free(tcp_sw_conn->tx_hash);
+               crypto_free_ahash(tfm);
+       }
 
        iscsi_tcp_conn_teardown(cls_conn);
 }
index f42ecb238af549327d539d822824d87001975fee..06d42d00a32319aa65c5b0c7112b662fe2fbf319 100644 (file)
@@ -45,8 +45,8 @@ struct iscsi_sw_tcp_conn {
        void                    (*old_write_space)(struct sock *);
 
        /* data and header digests */
-       struct hash_desc        tx_hash;        /* CRC32C (Tx) */
-       struct hash_desc        rx_hash;        /* CRC32C (Rx) */
+       struct ahash_request    *tx_hash;       /* CRC32C (Tx) */
+       struct ahash_request    *rx_hash;       /* CRC32C (Rx) */
 
        /* MIB custom statistics */
        uint32_t                sendpage_failures_cnt;
index 60cb6dc3c6f09a405fdc9dc157cc8d0716295134..63a1d69ff5154df7992098ce7e5e7fd4117c8a33 100644 (file)
  *     Zhenyu Wang
  */
 
+#include <crypto/hash.h>
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/inet.h>
 #include <linux/slab.h>
 #include <linux/file.h>
 #include <linux/blkdev.h>
-#include <linux/crypto.h>
 #include <linux/delay.h>
 #include <linux/kfifo.h>
 #include <linux/scatterlist.h>
@@ -214,7 +214,8 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
                } else
                        sg_init_one(&sg, segment->data + segment->copied,
                                    copied);
-               crypto_hash_update(segment->hash, &sg, copied);
+               ahash_request_set_crypt(segment->hash, &sg, NULL, copied);
+               crypto_ahash_update(segment->hash);
        }
 
        segment->copied += copied;
@@ -260,7 +261,9 @@ int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
         * is completely handled in hdr done function.
         */
        if (segment->hash) {
-               crypto_hash_final(segment->hash, segment->digest);
+               ahash_request_set_crypt(segment->hash, NULL,
+                                       segment->digest, 0);
+               crypto_ahash_final(segment->hash);
                iscsi_tcp_segment_splice_digest(segment,
                                 recv ? segment->recv_digest : segment->digest);
                return 0;
@@ -310,13 +313,14 @@ iscsi_tcp_segment_recv(struct iscsi_tcp_conn *tcp_conn,
 }
 
 inline void
-iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr, size_t hdrlen,
-                     unsigned char digest[ISCSI_DIGEST_SIZE])
+iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
+                     size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
 {
        struct scatterlist sg;
 
        sg_init_one(&sg, hdr, hdrlen);
-       crypto_hash_digest(hash, &sg, hdrlen, digest);
+       ahash_request_set_crypt(hash, &sg, digest, hdrlen);
+       crypto_ahash_digest(hash);
 }
 EXPORT_SYMBOL_GPL(iscsi_tcp_dgst_header);
 
@@ -341,7 +345,7 @@ iscsi_tcp_dgst_verify(struct iscsi_tcp_conn *tcp_conn,
  */
 static inline void
 __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
-                    iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+                    iscsi_segment_done_fn_t *done, struct ahash_request *hash)
 {
        memset(segment, 0, sizeof(*segment));
        segment->total_size = size;
@@ -349,14 +353,14 @@ __iscsi_segment_init(struct iscsi_segment *segment, size_t size,
 
        if (hash) {
                segment->hash = hash;
-               crypto_hash_init(hash);
+               crypto_ahash_init(hash);
        }
 }
 
 inline void
 iscsi_segment_init_linear(struct iscsi_segment *segment, void *data,
                          size_t size, iscsi_segment_done_fn_t *done,
-                         struct hash_desc *hash)
+                         struct ahash_request *hash)
 {
        __iscsi_segment_init(segment, size, done, hash);
        segment->data = data;
@@ -368,7 +372,8 @@ inline int
 iscsi_segment_seek_sg(struct iscsi_segment *segment,
                      struct scatterlist *sg_list, unsigned int sg_count,
                      unsigned int offset, size_t size,
-                     iscsi_segment_done_fn_t *done, struct hash_desc *hash)
+                     iscsi_segment_done_fn_t *done,
+                     struct ahash_request *hash)
 {
        struct scatterlist *sg;
        unsigned int i;
@@ -431,7 +436,7 @@ static void
 iscsi_tcp_data_recv_prep(struct iscsi_tcp_conn *tcp_conn)
 {
        struct iscsi_conn *conn = tcp_conn->iscsi_conn;
-       struct hash_desc *rx_hash = NULL;
+       struct ahash_request *rx_hash = NULL;
 
        if (conn->datadgst_en &&
            !(conn->session->tt->caps & CAP_DIGEST_OFFLOAD))
@@ -686,7 +691,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
 
                if (tcp_conn->in.datalen) {
                        struct iscsi_tcp_task *tcp_task = task->dd_data;
-                       struct hash_desc *rx_hash = NULL;
+                       struct ahash_request *rx_hash = NULL;
                        struct scsi_data_buffer *sdb = scsi_in(task->sc);
 
                        /*
index 1412266314292dfa59bf473cf2ed3fdb2edebe65..a6682c508c4cbb800a19fd8b010a551f404d55b3 100644 (file)
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/pci.h>
 #include <asm/dbdma.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #include <asm/macio.h>
 
 #include <scsi/scsi.h>
index 555367f002282b238f8da84ab20100526ba7b1c1..1753e42826dd99bf7d69e1ea83f54a9b5ba34309 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/interrupt.h>
 #include <linux/reboot.h>
 #include <linux/spinlock.h>
+#include <linux/pci.h>
 #include <asm/dbdma.h>
 #include <asm/io.h>
 #include <asm/pgtable.h>
@@ -38,7 +39,6 @@
 #include <asm/processor.h>
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
-#include <asm/pci-bridge.h>
 #include <asm/macio.h>
 
 #include <scsi/scsi.h>
index 4e08d1cd704d1c261c82a98067649154f433ecd1..bb669d32ccd0daee203a69840313fcb9cf343ee0 100644 (file)
@@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
            sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
            sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
                rw_max = q->limits.io_opt =
-                       logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
+                       sdkp->opt_xfer_blocks * sdp->sector_size;
        else
                rw_max = BLK_DEF_MAX_SECTORS;
 
@@ -3268,8 +3268,8 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
        struct scsi_disk *sdkp = dev_get_drvdata(dev);
        int ret = 0;
 
-       if (!sdkp)
-               return 0;       /* this can happen */
+       if (!sdkp)      /* E.g.: runtime suspend following sd_remove() */
+               return 0;
 
        if (sdkp->WCE && sdkp->media_present) {
                sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
@@ -3308,6 +3308,9 @@ static int sd_resume(struct device *dev)
 {
        struct scsi_disk *sdkp = dev_get_drvdata(dev);
 
+       if (!sdkp)      /* E.g.: runtime resume at the start of sd_probe() */
+               return 0;
+
        if (!sdkp->device->manage_start_stop)
                return 0;
 
index 503ab8b46c0b4e8a73d7e826ba1aaf3be1abd6b0..5e820674432ca38c46ad154b831c8eb19e974d34 100644 (file)
@@ -1261,7 +1261,7 @@ sg_mmap(struct file *filp, struct vm_area_struct *vma)
        }
 
        sfp->mmap_called = 1;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
        vma->vm_private_data = sfp;
        vma->vm_ops = &sg_mmap_vm_ops;
        return 0;
index 8bd54a64efd6a52aef163f0cbe4f34f5ef9ac71c..64c867405ad4ff2d5e53972a8ba34f785cc81962 100644 (file)
@@ -144,6 +144,9 @@ static int sr_runtime_suspend(struct device *dev)
 {
        struct scsi_cd *cd = dev_get_drvdata(dev);
 
+       if (!cd)        /* E.g.: runtime suspend following sr_remove() */
+               return 0;
+
        if (cd->media_present)
                return -EBUSY;
        else
@@ -985,6 +988,7 @@ static int sr_remove(struct device *dev)
        scsi_autopm_get_device(cd->device);
 
        del_gendisk(cd->disk);
+       dev_set_drvdata(dev, NULL);
 
        mutex_lock(&sr_ref_mutex);
        kref_put(&cd->kref, sr_kref_release);
index 85cd2564c15773f728e8bd6ae6fcde28867ab2bb..f6e74028d71c3e42335542b83b7bb05d3a6ecc18 100644 (file)
@@ -3751,9 +3751,9 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
        if (!err) {
                dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
                                __func__, tm_function);
-               if (ufshcd_clear_tm_cmd(hba, free_slot))
-                       dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
-                                       __func__, free_slot);
+               dev_WARN(hba->dev, ufshcd_clear_tm_cmd(hba, free_slot),
+                               "%s: unable clear tm cmd (slot %d) after timeout\n",
+                               __func__, free_slot);
                err = -ETIMEDOUT;
        } else {
                err = ufshcd_task_req_compl(hba, free_slot, tm_response);
index 2d9e7f3d56115aeaddea8eb250008dba7729d98e..bb1fb771213401a8cbf67b98011f464409ecb699 100644 (file)
@@ -66,7 +66,7 @@ int superhyway_add_device(unsigned long base, struct superhyway_device *sdev,
        superhyway_read_vcr(dev, base, &dev->vcr);
 
        if (!dev->resource) {
-               dev->resource = kmalloc(sizeof(struct resource), GFP_KERNEL);
+               dev->resource = kzalloc(sizeof(struct resource), GFP_KERNEL);
                if (!dev->resource) {
                        kfree(dev);
                        return -ENOMEM;
index 88260205a2614c84e8293bf82ca47c49abb2e29d..cb58ef0d9b2c50e071eeb453dca3a7aaac52a8de 100644 (file)
@@ -6,6 +6,7 @@ source "drivers/soc/fsl/qe/Kconfig"
 source "drivers/soc/mediatek/Kconfig"
 source "drivers/soc/qcom/Kconfig"
 source "drivers/soc/rockchip/Kconfig"
+source "drivers/soc/samsung/Kconfig"
 source "drivers/soc/sunxi/Kconfig"
 source "drivers/soc/tegra/Kconfig"
 source "drivers/soc/ti/Kconfig"
index 2afdc74f7491adf08e82937d0b6676e107705e2a..5ade71306ee10d080414aaf2168fc93fbcd6fe4b 100644 (file)
@@ -10,6 +10,7 @@ obj-y                         += fsl/
 obj-$(CONFIG_ARCH_MEDIATEK)    += mediatek/
 obj-$(CONFIG_ARCH_QCOM)                += qcom/
 obj-$(CONFIG_ARCH_ROCKCHIP)            += rockchip/
+obj-$(CONFIG_SOC_SAMSUNG)      += samsung/
 obj-$(CONFIG_ARCH_SUNXI)       += sunxi/
 obj-$(CONFIG_ARCH_TEGRA)       += tegra/
 obj-$(CONFIG_SOC_TI)           += ti/
index 498fd0581a451999b89cbb43805b37f003fdb379..d861eefffcc7c5e1fdf3f3f5ef324ba4a8a98e5f 100644 (file)
@@ -106,9 +106,9 @@ static const struct {
  * @channels:          list of all channels detected on this edge
  * @channels_lock:     guard for modifications of @channels
  * @allocated:         array of bitmaps representing already allocated channels
- * @need_rescan:       flag that the @work needs to scan smem for new channels
  * @smem_available:    last available amount of smem triggering a channel scan
- * @work:              work item for edge house keeping
+ * @scan_work:         work item for discovering new channels
+ * @state_work:                work item for edge state changes
  */
 struct qcom_smd_edge {
        struct qcom_smd *smd;
@@ -123,14 +123,16 @@ struct qcom_smd_edge {
        int ipc_bit;
 
        struct list_head channels;
-       spinlock_t channels_lock;
+       rwlock_t channels_lock;
 
        DECLARE_BITMAP(allocated[SMD_ALLOC_TBL_COUNT], SMD_ALLOC_TBL_SIZE);
 
-       bool need_rescan;
        unsigned smem_available;
 
-       struct work_struct work;
+       wait_queue_head_t new_channel_event;
+
+       struct work_struct scan_work;
+       struct work_struct state_work;
 };
 
 /*
@@ -186,13 +188,14 @@ struct qcom_smd_channel {
        int fifo_size;
 
        void *bounce_buffer;
-       int (*cb)(struct qcom_smd_device *, const void *, size_t);
+       qcom_smd_cb_t cb;
 
        spinlock_t recv_lock;
 
        int pkt_size;
 
        struct list_head list;
+       struct list_head dev_list;
 };
 
 /**
@@ -377,6 +380,19 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
        channel->pkt_size = 0;
 }
 
+/*
+ * Set the callback for a channel, with appropriate locking
+ */
+static void qcom_smd_channel_set_callback(struct qcom_smd_channel *channel,
+                                         qcom_smd_cb_t cb)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&channel->recv_lock, flags);
+       channel->cb = cb;
+       spin_unlock_irqrestore(&channel->recv_lock, flags);
+};
+
 /*
  * Calculate the amount of data available in the rx fifo
  */
@@ -606,13 +622,13 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
        /*
         * Handle state changes or data on each of the channels on this edge
         */
-       spin_lock(&edge->channels_lock);
+       read_lock(&edge->channels_lock);
        list_for_each_entry(channel, &edge->channels, list) {
                spin_lock(&channel->recv_lock);
                kick_worker |= qcom_smd_channel_intr(channel);
                spin_unlock(&channel->recv_lock);
        }
-       spin_unlock(&edge->channels_lock);
+       read_unlock(&edge->channels_lock);
 
        /*
         * Creating a new channel requires allocating an smem entry, so we only
@@ -622,12 +638,11 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
        available = qcom_smem_get_free_space(edge->remote_pid);
        if (available != edge->smem_available) {
                edge->smem_available = available;
-               edge->need_rescan = true;
                kick_worker = true;
        }
 
        if (kick_worker)
-               schedule_work(&edge->work);
+               schedule_work(&edge->scan_work);
 
        return IRQ_HANDLED;
 }
@@ -793,18 +808,12 @@ static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
 }
 
 /*
- * Probe the smd client.
- *
- * The remote side have indicated that it want the channel to be opened, so
- * complete the state handshake and probe our client driver.
+ * Helper for opening a channel
  */
-static int qcom_smd_dev_probe(struct device *dev)
+static int qcom_smd_channel_open(struct qcom_smd_channel *channel,
+                                qcom_smd_cb_t cb)
 {
-       struct qcom_smd_device *qsdev = to_smd_device(dev);
-       struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
-       struct qcom_smd_channel *channel = qsdev->channel;
        size_t bb_size;
-       int ret;
 
        /*
         * Packets are maximum 4k, but reduce if the fifo is smaller
@@ -814,12 +823,44 @@ static int qcom_smd_dev_probe(struct device *dev)
        if (!channel->bounce_buffer)
                return -ENOMEM;
 
-       channel->cb = qsdrv->callback;
-
+       qcom_smd_channel_set_callback(channel, cb);
        qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENING);
-
        qcom_smd_channel_set_state(channel, SMD_CHANNEL_OPENED);
 
+       return 0;
+}
+
+/*
+ * Helper for closing and resetting a channel
+ */
+static void qcom_smd_channel_close(struct qcom_smd_channel *channel)
+{
+       qcom_smd_channel_set_callback(channel, NULL);
+
+       kfree(channel->bounce_buffer);
+       channel->bounce_buffer = NULL;
+
+       qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+       qcom_smd_channel_reset(channel);
+}
+
+/*
+ * Probe the smd client.
+ *
+ * The remote side have indicated that it want the channel to be opened, so
+ * complete the state handshake and probe our client driver.
+ */
+static int qcom_smd_dev_probe(struct device *dev)
+{
+       struct qcom_smd_device *qsdev = to_smd_device(dev);
+       struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
+       struct qcom_smd_channel *channel = qsdev->channel;
+       int ret;
+
+       ret = qcom_smd_channel_open(channel, qsdrv->callback);
+       if (ret)
+               return ret;
+
        ret = qsdrv->probe(qsdev);
        if (ret)
                goto err;
@@ -831,11 +872,7 @@ static int qcom_smd_dev_probe(struct device *dev)
 err:
        dev_err(&qsdev->dev, "probe failed\n");
 
-       channel->cb = NULL;
-       kfree(channel->bounce_buffer);
-       channel->bounce_buffer = NULL;
-
-       qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
+       qcom_smd_channel_close(channel);
        return ret;
 }
 
@@ -850,16 +887,15 @@ static int qcom_smd_dev_remove(struct device *dev)
        struct qcom_smd_device *qsdev = to_smd_device(dev);
        struct qcom_smd_driver *qsdrv = to_smd_driver(dev);
        struct qcom_smd_channel *channel = qsdev->channel;
-       unsigned long flags;
+       struct qcom_smd_channel *tmp;
+       struct qcom_smd_channel *ch;
 
        qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING);
 
        /*
         * Make sure we don't race with the code receiving data.
         */
-       spin_lock_irqsave(&channel->recv_lock, flags);
-       channel->cb = NULL;
-       spin_unlock_irqrestore(&channel->recv_lock, flags);
+       qcom_smd_channel_set_callback(channel, NULL);
 
        /* Wake up any sleepers in qcom_smd_send() */
        wake_up_interruptible(&channel->fblockread_event);
@@ -872,15 +908,14 @@ static int qcom_smd_dev_remove(struct device *dev)
                qsdrv->remove(qsdev);
 
        /*
-        * The client is now gone, cleanup and reset the channel state.
+        * The client is now gone, close and release all channels associated
+        * with this sdev
         */
-       channel->qsdev = NULL;
-       kfree(channel->bounce_buffer);
-       channel->bounce_buffer = NULL;
-
-       qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSED);
-
-       qcom_smd_channel_reset(channel);
+       list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) {
+               qcom_smd_channel_close(ch);
+               list_del(&ch->dev_list);
+               ch->qsdev = NULL;
+       }
 
        return 0;
 }
@@ -1006,6 +1041,76 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *qsdrv)
 }
 EXPORT_SYMBOL(qcom_smd_driver_unregister);
 
+static struct qcom_smd_channel *
+qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name)
+{
+       struct qcom_smd_channel *channel;
+       struct qcom_smd_channel *ret = NULL;
+       unsigned state;
+
+       read_lock(&edge->channels_lock);
+       list_for_each_entry(channel, &edge->channels, list) {
+               if (strcmp(channel->name, name))
+                       continue;
+
+               state = GET_RX_CHANNEL_INFO(channel, state);
+               if (state != SMD_CHANNEL_OPENING &&
+                   state != SMD_CHANNEL_OPENED)
+                       continue;
+
+               ret = channel;
+               break;
+       }
+       read_unlock(&edge->channels_lock);
+
+       return ret;
+}
+
+/**
+ * qcom_smd_open_channel() - claim additional channels on the same edge
+ * @sdev:      smd_device handle
+ * @name:      channel name
+ * @cb:                callback method to use for incoming data
+ *
+ * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't
+ * ready.
+ */
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev,
+                                              const char *name,
+                                              qcom_smd_cb_t cb)
+{
+       struct qcom_smd_channel *channel;
+       struct qcom_smd_edge *edge = sdev->channel->edge;
+       int ret;
+
+       /* Wait up to HZ for the channel to appear */
+       ret = wait_event_interruptible_timeout(edge->new_channel_event,
+                       (channel = qcom_smd_find_channel(edge, name)) != NULL,
+                       HZ);
+       if (!ret)
+               return ERR_PTR(-ETIMEDOUT);
+
+       if (channel->state != SMD_CHANNEL_CLOSED) {
+               dev_err(&sdev->dev, "channel %s is busy\n", channel->name);
+               return ERR_PTR(-EBUSY);
+       }
+
+       channel->qsdev = sdev;
+       ret = qcom_smd_channel_open(channel, cb);
+       if (ret) {
+               channel->qsdev = NULL;
+               return ERR_PTR(ret);
+       }
+
+       /*
+        * Append the list of channel to the channels associated with the sdev
+        */
+       list_add_tail(&channel->dev_list, &sdev->channel->dev_list);
+
+       return channel;
+}
+EXPORT_SYMBOL(qcom_smd_open_channel);
+
 /*
  * Allocate the qcom_smd_channel object for a newly found smd channel,
  * retrieving and validating the smem items involved.
@@ -1027,6 +1132,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
        if (!channel)
                return ERR_PTR(-ENOMEM);
 
+       INIT_LIST_HEAD(&channel->dev_list);
        channel->edge = edge;
        channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL);
        if (!channel->name)
@@ -1089,8 +1195,9 @@ free_name_and_channel:
  * qcom_smd_create_channel() to create representations of these and add
  * them to the edge's list of channels.
  */
-static void qcom_discover_channels(struct qcom_smd_edge *edge)
+static void qcom_channel_scan_worker(struct work_struct *work)
 {
+       struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work);
        struct qcom_smd_alloc_entry *alloc_tbl;
        struct qcom_smd_alloc_entry *entry;
        struct qcom_smd_channel *channel;
@@ -1134,16 +1241,18 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
                        if (IS_ERR(channel))
                                continue;
 
-                       spin_lock_irqsave(&edge->channels_lock, flags);
+                       write_lock_irqsave(&edge->channels_lock, flags);
                        list_add(&channel->list, &edge->channels);
-                       spin_unlock_irqrestore(&edge->channels_lock, flags);
+                       write_unlock_irqrestore(&edge->channels_lock, flags);
 
                        dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name);
                        set_bit(i, edge->allocated[tbl]);
+
+                       wake_up_interruptible(&edge->new_channel_event);
                }
        }
 
-       schedule_work(&edge->work);
+       schedule_work(&edge->state_work);
 }
 
 /*
@@ -1151,29 +1260,22 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
  * then scans all registered channels for state changes that should be handled
  * by creating or destroying smd client devices for the registered channels.
  *
- * LOCKING: edge->channels_lock is not needed to be held during the traversal
- * of the channels list as it's done synchronously with the only writer.
+ * LOCKING: edge->channels_lock only needs to cover the list operations, as the
+ * worker is killed before any channels are deallocated
  */
 static void qcom_channel_state_worker(struct work_struct *work)
 {
        struct qcom_smd_channel *channel;
        struct qcom_smd_edge *edge = container_of(work,
                                                  struct qcom_smd_edge,
-                                                 work);
+                                                 state_work);
        unsigned remote_state;
 
-       /*
-        * Rescan smem if we have reason to belive that there are new channels.
-        */
-       if (edge->need_rescan) {
-               edge->need_rescan = false;
-               qcom_discover_channels(edge);
-       }
-
        /*
         * Register a device for any closed channel where the remote processor
         * is showing interest in opening the channel.
         */
+       read_lock(&edge->channels_lock);
        list_for_each_entry(channel, &edge->channels, list) {
                if (channel->state != SMD_CHANNEL_CLOSED)
                        continue;
@@ -1183,7 +1285,9 @@ static void qcom_channel_state_worker(struct work_struct *work)
                    remote_state != SMD_CHANNEL_OPENED)
                        continue;
 
+               read_unlock(&edge->channels_lock);
                qcom_smd_create_device(channel);
+               read_lock(&edge->channels_lock);
        }
 
        /*
@@ -1200,8 +1304,11 @@ static void qcom_channel_state_worker(struct work_struct *work)
                    remote_state == SMD_CHANNEL_OPENED)
                        continue;
 
+               read_unlock(&edge->channels_lock);
                qcom_smd_destroy_device(channel);
+               read_lock(&edge->channels_lock);
        }
+       read_unlock(&edge->channels_lock);
 }
 
 /*
@@ -1217,9 +1324,10 @@ static int qcom_smd_parse_edge(struct device *dev,
        int ret;
 
        INIT_LIST_HEAD(&edge->channels);
-       spin_lock_init(&edge->channels_lock);
+       rwlock_init(&edge->channels_lock);
 
-       INIT_WORK(&edge->work, qcom_channel_state_worker);
+       INIT_WORK(&edge->scan_work, qcom_channel_scan_worker);
+       INIT_WORK(&edge->state_work, qcom_channel_state_worker);
 
        edge->of_node = of_node_get(node);
 
@@ -1303,13 +1411,13 @@ static int qcom_smd_probe(struct platform_device *pdev)
        for_each_available_child_of_node(pdev->dev.of_node, node) {
                edge = &smd->edges[i++];
                edge->smd = smd;
+               init_waitqueue_head(&edge->new_channel_event);
 
                ret = qcom_smd_parse_edge(&pdev->dev, node, edge);
                if (ret)
                        continue;
 
-               edge->need_rescan = true;
-               schedule_work(&edge->work);
+               schedule_work(&edge->scan_work);
        }
 
        platform_set_drvdata(pdev, smd);
@@ -1332,8 +1440,10 @@ static int qcom_smd_remove(struct platform_device *pdev)
                edge = &smd->edges[i];
 
                disable_irq(edge->irq);
-               cancel_work_sync(&edge->work);
+               cancel_work_sync(&edge->scan_work);
+               cancel_work_sync(&edge->state_work);
 
+               /* No need to lock here, because the writer is gone */
                list_for_each_entry(channel, &edge->channels, list) {
                        if (!channel->qsdev)
                                continue;
index 5548a31e1a39a100142b45841cbe38e1aa007e38..f324451e0940eeadd10e3d76af56c727e30c6de6 100644 (file)
@@ -2,6 +2,8 @@
  * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
  * Copyright (c) 2014,2015, Linaro Ltd.
  *
+ * SAW power controller driver
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
  * only version 2 as published by the Free Software Foundation.
@@ -12,7 +14,6 @@
  * GNU General Public License for more details.
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/io.h>
@@ -378,8 +379,5 @@ static struct platform_driver spm_driver = {
                .of_match_table = spm_match_table,
        },
 };
-module_platform_driver(spm_driver);
 
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("SAW power controller driver");
-MODULE_ALIAS("platform:saw");
+builtin_platform_driver(spm_driver);
index 534c58937a566205b85e216bf1e230c9186a637f..43155e1f97b92ca7ee8f05b901ad681501041d80 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/regmap.h>
 #include <linux/mfd/syscon.h>
 #include <dt-bindings/power/rk3288-power.h>
+#include <dt-bindings/power/rk3368-power.h>
 
 struct rockchip_domain_info {
        int pwr_mask;
@@ -75,6 +76,9 @@ struct rockchip_pmu {
 #define DOMAIN_RK3288(pwr, status, req)                \
        DOMAIN(pwr, status, req, req, (req) + 16)
 
+#define DOMAIN_RK3368(pwr, status, req)                \
+       DOMAIN(pwr, status, req, (req) + 16, req)
+
 static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
 {
        struct rockchip_pmu *pmu = pd->pmu;
@@ -419,6 +423,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev)
                if (error) {
                        dev_err(dev, "failed to handle node %s: %d\n",
                                node->name, error);
+                       of_node_put(node);
                        goto err_out;
                }
        }
@@ -444,6 +449,14 @@ static const struct rockchip_domain_info rk3288_pm_domains[] = {
        [RK3288_PD_GPU]         = DOMAIN_RK3288(9, 9, 2),
 };
 
+static const struct rockchip_domain_info rk3368_pm_domains[] = {
+       [RK3368_PD_PERI]        = DOMAIN_RK3368(13, 12, 6),
+       [RK3368_PD_VIO]         = DOMAIN_RK3368(15, 14, 8),
+       [RK3368_PD_VIDEO]       = DOMAIN_RK3368(14, 13, 7),
+       [RK3368_PD_GPU_0]       = DOMAIN_RK3368(16, 15, 2),
+       [RK3368_PD_GPU_1]       = DOMAIN_RK3368(17, 16, 2),
+};
+
 static const struct rockchip_pmu_info rk3288_pmu = {
        .pwr_offset = 0x08,
        .status_offset = 0x0c,
@@ -461,11 +474,32 @@ static const struct rockchip_pmu_info rk3288_pmu = {
        .domain_info = rk3288_pm_domains,
 };
 
+static const struct rockchip_pmu_info rk3368_pmu = {
+       .pwr_offset = 0x0c,
+       .status_offset = 0x10,
+       .req_offset = 0x3c,
+       .idle_offset = 0x40,
+       .ack_offset = 0x40,
+
+       .core_pwrcnt_offset = 0x48,
+       .gpu_pwrcnt_offset = 0x50,
+
+       .core_power_transition_time = 24,
+       .gpu_power_transition_time = 24,
+
+       .num_domains = ARRAY_SIZE(rk3368_pm_domains),
+       .domain_info = rk3368_pm_domains,
+};
+
 static const struct of_device_id rockchip_pm_domain_dt_match[] = {
        {
                .compatible = "rockchip,rk3288-power-controller",
                .data = (void *)&rk3288_pmu,
        },
+       {
+               .compatible = "rockchip,rk3368-power-controller",
+               .data = (void *)&rk3368_pmu,
+       },
        { /* sentinel */ },
 };
 
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
new file mode 100644 (file)
index 0000000..895f169
--- /dev/null
@@ -0,0 +1,17 @@
+#
+# SAMSUNG SoC drivers
+#
+menu "Samsung SOC driver support"
+
+config SOC_SAMSUNG
+       bool
+
+config EXYNOS_SROM
+       bool
+       depends on ARM && ARCH_EXYNOS
+
+config EXYNOS_PMU
+       bool
+       depends on ARM && ARCH_EXYNOS
+
+endmenu
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
new file mode 100644 (file)
index 0000000..cef7970
--- /dev/null
@@ -0,0 +1,3 @@
+obj-$(CONFIG_EXYNOS_SROM)      += exynos-srom.o
+obj-$(CONFIG_EXYNOS_PMU)       += exynos-pmu.o exynos3250-pmu.o exynos4-pmu.o \
+                                       exynos5250-pmu.o exynos5420-pmu.o
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
new file mode 100644 (file)
index 0000000..0acdfd8
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS - CPU PMU(Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+struct exynos_pmu_context {
+       struct device *dev;
+       const struct exynos_pmu_data *pmu_data;
+};
+
+void __iomem *pmu_base_addr;
+static struct exynos_pmu_context *pmu_context;
+
+void pmu_raw_writel(u32 val, u32 offset)
+{
+       writel_relaxed(val, pmu_base_addr + offset);
+}
+
+u32 pmu_raw_readl(u32 offset)
+{
+       return readl_relaxed(pmu_base_addr + offset);
+}
+
+void exynos_sys_powerdown_conf(enum sys_powerdown mode)
+{
+       unsigned int i;
+       const struct exynos_pmu_data *pmu_data;
+
+       if (!pmu_context)
+               return;
+
+       pmu_data = pmu_context->pmu_data;
+
+       if (pmu_data->powerdown_conf)
+               pmu_data->powerdown_conf(mode);
+
+       if (pmu_data->pmu_config) {
+               for (i = 0; (pmu_data->pmu_config[i].offset != PMU_TABLE_END); i++)
+                       pmu_raw_writel(pmu_data->pmu_config[i].val[mode],
+                                       pmu_data->pmu_config[i].offset);
+       }
+
+       if (pmu_data->powerdown_conf_extra)
+               pmu_data->powerdown_conf_extra(mode);
+
+       if (pmu_data->pmu_config_extra) {
+               for (i = 0; pmu_data->pmu_config_extra[i].offset != PMU_TABLE_END; i++)
+                       pmu_raw_writel(pmu_data->pmu_config_extra[i].val[mode],
+                                       pmu_data->pmu_config_extra[i].offset);
+       }
+}
+
+/*
+ * PMU platform driver and devicetree bindings.
+ */
+static const struct of_device_id exynos_pmu_of_device_ids[] = {
+       {
+               .compatible = "samsung,exynos3250-pmu",
+               .data = &exynos3250_pmu_data,
+       }, {
+               .compatible = "samsung,exynos4210-pmu",
+               .data = &exynos4210_pmu_data,
+       }, {
+               .compatible = "samsung,exynos4212-pmu",
+               .data = &exynos4212_pmu_data,
+       }, {
+               .compatible = "samsung,exynos4412-pmu",
+               .data = &exynos4412_pmu_data,
+       }, {
+               .compatible = "samsung,exynos5250-pmu",
+               .data = &exynos5250_pmu_data,
+       }, {
+               .compatible = "samsung,exynos5420-pmu",
+               .data = &exynos5420_pmu_data,
+       },
+       { /*sentinel*/ },
+};
+
+static int exynos_pmu_probe(struct platform_device *pdev)
+{
+       const struct of_device_id *match;
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       pmu_base_addr = devm_ioremap_resource(dev, res);
+       if (IS_ERR(pmu_base_addr))
+               return PTR_ERR(pmu_base_addr);
+
+       pmu_context = devm_kzalloc(&pdev->dev,
+                       sizeof(struct exynos_pmu_context),
+                       GFP_KERNEL);
+       if (!pmu_context) {
+               dev_err(dev, "Cannot allocate memory.\n");
+               return -ENOMEM;
+       }
+       pmu_context->dev = dev;
+
+       match = of_match_node(exynos_pmu_of_device_ids, dev->of_node);
+
+       pmu_context->pmu_data = match->data;
+
+       if (pmu_context->pmu_data->pmu_init)
+               pmu_context->pmu_data->pmu_init();
+
+       platform_set_drvdata(pdev, pmu_context);
+
+       dev_dbg(dev, "Exynos PMU Driver probe done\n");
+       return 0;
+}
+
+static struct platform_driver exynos_pmu_driver = {
+       .driver  = {
+               .name   = "exynos-pmu",
+               .of_match_table = exynos_pmu_of_device_ids,
+       },
+       .probe = exynos_pmu_probe,
+};
+
+static int __init exynos_pmu_init(void)
+{
+       return platform_driver_register(&exynos_pmu_driver);
+
+}
+postcore_initcall(exynos_pmu_init);
diff --git a/drivers/soc/samsung/exynos-pmu.h b/drivers/soc/samsung/exynos-pmu.h
new file mode 100644 (file)
index 0000000..a469e36
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Header for EXYNOS PMU Driver support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __EXYNOS_PMU_H
+#define __EXYNOS_PMU_H
+
+#include <linux/io.h>
+
+#define PMU_TABLE_END  (-1U)
+
+struct exynos_pmu_conf {
+       unsigned int offset;
+       u8 val[NUM_SYS_POWERDOWN];
+};
+
+struct exynos_pmu_data {
+       const struct exynos_pmu_conf *pmu_config;
+       const struct exynos_pmu_conf *pmu_config_extra;
+
+       void (*pmu_init)(void);
+       void (*powerdown_conf)(enum sys_powerdown);
+       void (*powerdown_conf_extra)(enum sys_powerdown);
+};
+
+extern void __iomem *pmu_base_addr;
+/* list of all exported SoC specific data */
+extern const struct exynos_pmu_data exynos3250_pmu_data;
+extern const struct exynos_pmu_data exynos4210_pmu_data;
+extern const struct exynos_pmu_data exynos4212_pmu_data;
+extern const struct exynos_pmu_data exynos4412_pmu_data;
+extern const struct exynos_pmu_data exynos5250_pmu_data;
+extern const struct exynos_pmu_data exynos5420_pmu_data;
+
+extern void pmu_raw_writel(u32 val, u32 offset);
+extern u32 pmu_raw_readl(u32 offset);
+#endif /* __EXYNOS_PMU_H */
diff --git a/drivers/soc/samsung/exynos-srom.c b/drivers/soc/samsung/exynos-srom.c
new file mode 100644 (file)
index 0000000..a4cf547
--- /dev/null
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *           http://www.samsung.com/
+ *
+ * EXYNOS - SROM Controller support
+ * Author: Pankaj Dubey <pankaj.dubey@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "exynos-srom.h"
+
+static const unsigned long exynos_srom_offsets[] = {
+       /* SROM side */
+       EXYNOS_SROM_BW,
+       EXYNOS_SROM_BC0,
+       EXYNOS_SROM_BC1,
+       EXYNOS_SROM_BC2,
+       EXYNOS_SROM_BC3,
+};
+
+/**
+ * struct exynos_srom_reg_dump: register dump of SROM Controller registers.
+ * @offset: srom register offset from the controller base address.
+ * @value: the value of register under the offset.
+ */
+struct exynos_srom_reg_dump {
+       u32     offset;
+       u32     value;
+};
+
+/**
+ * struct exynos_srom: platform data for exynos srom controller driver.
+ * @dev: platform device pointer
+ * @reg_base: srom base address
+ * @reg_offset: exynos_srom_reg_dump pointer to hold offset and its value.
+ */
+struct exynos_srom {
+       struct device *dev;
+       void __iomem *reg_base;
+       struct exynos_srom_reg_dump *reg_offset;
+};
+
+static struct exynos_srom_reg_dump *exynos_srom_alloc_reg_dump(
+               const unsigned long *rdump,
+               unsigned long nr_rdump)
+{
+       struct exynos_srom_reg_dump *rd;
+       unsigned int i;
+
+       rd = kcalloc(nr_rdump, sizeof(*rd), GFP_KERNEL);
+       if (!rd)
+               return NULL;
+
+       for (i = 0; i < nr_rdump; ++i)
+               rd[i].offset = rdump[i];
+
+       return rd;
+}
+
+static int exynos_srom_configure_bank(struct exynos_srom *srom,
+                                     struct device_node *np)
+{
+       u32 bank, width, pmc;
+       u32 timing[6];
+       u32 cs, bw;
+
+       if (of_property_read_u32(np, "reg", &bank))
+               return -EINVAL;
+       if (of_property_read_u32(np, "reg-io-width", &width))
+               width = 1;
+       if (of_property_read_u32(np, "samsung,srom-page-mode", &pmc))
+               pmc = 0;
+       if (of_property_read_u32_array(np, "samsung,srom-timing", timing,
+                                      ARRAY_SIZE(timing)))
+               return -EINVAL;
+
+       bank *= 4; /* Convert bank into shift/offset */
+
+       cs = 1 << EXYNOS_SROM_BW__BYTEENABLE__SHIFT;
+       if (width == 2)
+               cs |= 1 << EXYNOS_SROM_BW__DATAWIDTH__SHIFT;
+
+       bw = __raw_readl(srom->reg_base + EXYNOS_SROM_BW);
+       bw = (bw & ~(EXYNOS_SROM_BW__CS_MASK << bank)) | (cs << bank);
+       __raw_writel(bw, srom->reg_base + EXYNOS_SROM_BW);
+
+       __raw_writel((pmc << EXYNOS_SROM_BCX__PMC__SHIFT) |
+                   (timing[0] << EXYNOS_SROM_BCX__TACP__SHIFT) |
+                   (timing[1] << EXYNOS_SROM_BCX__TCAH__SHIFT) |
+                   (timing[2] << EXYNOS_SROM_BCX__TCOH__SHIFT) |
+                   (timing[3] << EXYNOS_SROM_BCX__TACC__SHIFT) |
+                   (timing[4] << EXYNOS_SROM_BCX__TCOS__SHIFT) |
+                   (timing[5] << EXYNOS_SROM_BCX__TACS__SHIFT),
+                   srom->reg_base + EXYNOS_SROM_BC0 + bank);
+
+       return 0;
+}
+
+static int exynos_srom_probe(struct platform_device *pdev)
+{
+       struct device_node *np, *child;
+       struct exynos_srom *srom;
+       struct device *dev = &pdev->dev;
+       bool bad_bank_config = false;
+
+       np = dev->of_node;
+       if (!np) {
+               dev_err(&pdev->dev, "could not find device info\n");
+               return -EINVAL;
+       }
+
+       srom = devm_kzalloc(&pdev->dev,
+                       sizeof(struct exynos_srom), GFP_KERNEL);
+       if (!srom)
+               return -ENOMEM;
+
+       srom->dev = dev;
+       srom->reg_base = of_iomap(np, 0);
+       if (!srom->reg_base) {
+               dev_err(&pdev->dev, "iomap of exynos srom controller failed\n");
+               return -ENOMEM;
+       }
+
+       platform_set_drvdata(pdev, srom);
+
+       srom->reg_offset = exynos_srom_alloc_reg_dump(exynos_srom_offsets,
+                       sizeof(exynos_srom_offsets));
+       if (!srom->reg_offset) {
+               iounmap(srom->reg_base);
+               return -ENOMEM;
+       }
+
+       for_each_child_of_node(np, child) {
+               if (exynos_srom_configure_bank(srom, child)) {
+                       dev_err(dev,
+                               "Could not decode bank configuration for %s\n",
+                               child->name);
+                       bad_bank_config = true;
+               }
+       }
+
+       /*
+        * If any bank failed to configure, we still provide suspend/resume,
+        * but do not probe child devices
+        */
+       if (bad_bank_config)
+               return 0;
+
+       return of_platform_populate(np, NULL, NULL, dev);
+}
+
+static int exynos_srom_remove(struct platform_device *pdev)
+{
+       struct exynos_srom *srom = platform_get_drvdata(pdev);
+
+       kfree(srom->reg_offset);
+       iounmap(srom->reg_base);
+
+       return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void exynos_srom_save(void __iomem *base,
+                                   struct exynos_srom_reg_dump *rd,
+                                   unsigned int num_regs)
+{
+       for (; num_regs > 0; --num_regs, ++rd)
+               rd->value = readl(base + rd->offset);
+}
+
+static void exynos_srom_restore(void __iomem *base,
+                                     const struct exynos_srom_reg_dump *rd,
+                                     unsigned int num_regs)
+{
+       for (; num_regs > 0; --num_regs, ++rd)
+               writel(rd->value, base + rd->offset);
+}
+
+static int exynos_srom_suspend(struct device *dev)
+{
+       struct exynos_srom *srom = dev_get_drvdata(dev);
+
+       exynos_srom_save(srom->reg_base, srom->reg_offset,
+                               ARRAY_SIZE(exynos_srom_offsets));
+       return 0;
+}
+
+static int exynos_srom_resume(struct device *dev)
+{
+       struct exynos_srom *srom = dev_get_drvdata(dev);
+
+       exynos_srom_restore(srom->reg_base, srom->reg_offset,
+                               ARRAY_SIZE(exynos_srom_offsets));
+       return 0;
+}
+#endif
+
+static const struct of_device_id of_exynos_srom_ids[] = {
+       {
+               .compatible     = "samsung,exynos-srom",
+       },
+       {},
+};
+MODULE_DEVICE_TABLE(of, of_exynos_srom_ids);
+
+static SIMPLE_DEV_PM_OPS(exynos_srom_pm_ops, exynos_srom_suspend, exynos_srom_resume);
+
+static struct platform_driver exynos_srom_driver = {
+       .probe = exynos_srom_probe,
+       .remove = exynos_srom_remove,
+       .driver = {
+               .name = "exynos-srom",
+               .of_match_table = of_exynos_srom_ids,
+               .pm = &exynos_srom_pm_ops,
+       },
+};
+module_platform_driver(exynos_srom_driver);
+
+MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
+MODULE_DESCRIPTION("Exynos SROM Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/samsung/exynos-srom.h b/drivers/soc/samsung/exynos-srom.h
new file mode 100644 (file)
index 0000000..34660c6
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * Exynos SROMC register definitions
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef __EXYNOS_SROM_H
+#define __EXYNOS_SROM_H __FILE__
+
+#define EXYNOS_SROMREG(x)              (x)
+
+#define EXYNOS_SROM_BW         EXYNOS_SROMREG(0x0)
+#define EXYNOS_SROM_BC0                EXYNOS_SROMREG(0x4)
+#define EXYNOS_SROM_BC1                EXYNOS_SROMREG(0x8)
+#define EXYNOS_SROM_BC2                EXYNOS_SROMREG(0xc)
+#define EXYNOS_SROM_BC3                EXYNOS_SROMREG(0x10)
+#define EXYNOS_SROM_BC4                EXYNOS_SROMREG(0x14)
+#define EXYNOS_SROM_BC5                EXYNOS_SROMREG(0x18)
+
+/* one register BW holds 4 x 4-bit packed settings for NCS0 - NCS3 */
+
+#define EXYNOS_SROM_BW__DATAWIDTH__SHIFT       0
+#define EXYNOS_SROM_BW__ADDRMODE__SHIFT                1
+#define EXYNOS_SROM_BW__WAITENABLE__SHIFT      2
+#define EXYNOS_SROM_BW__BYTEENABLE__SHIFT      3
+
+#define EXYNOS_SROM_BW__CS_MASK                        0xf
+
+#define EXYNOS_SROM_BW__NCS0__SHIFT            0
+#define EXYNOS_SROM_BW__NCS1__SHIFT            4
+#define EXYNOS_SROM_BW__NCS2__SHIFT            8
+#define EXYNOS_SROM_BW__NCS3__SHIFT            12
+#define EXYNOS_SROM_BW__NCS4__SHIFT            16
+#define EXYNOS_SROM_BW__NCS5__SHIFT            20
+
+/* applies to same to BCS0 - BCS3 */
+
+#define EXYNOS_SROM_BCX__PMC__SHIFT            0
+#define EXYNOS_SROM_BCX__TACP__SHIFT           4
+#define EXYNOS_SROM_BCX__TCAH__SHIFT           8
+#define EXYNOS_SROM_BCX__TCOH__SHIFT           12
+#define EXYNOS_SROM_BCX__TACC__SHIFT           16
+#define EXYNOS_SROM_BCX__TCOS__SHIFT           24
+#define EXYNOS_SROM_BCX__TACS__SHIFT           28
+
+#endif /* __EXYNOS_SROM_H */
diff --git a/drivers/soc/samsung/exynos3250-pmu.c b/drivers/soc/samsung/exynos3250-pmu.c
new file mode 100644 (file)
index 0000000..20b3ab8
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS3250 - CPU PMU (Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static struct exynos_pmu_conf exynos3250_pmu_config[] = {
+       /* { .offset = offset, .val = { AFTR, W-AFTR, SLEEP } */
+       { EXYNOS3_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS3_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS3_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS3_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS3_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
+       { EXYNOS3_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
+       { EXYNOS3_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS3_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
+       { EXYNOS3_ARM_L2_SYS_PWR_REG,                   { 0x0, 0x0, 0x3} },
+       { EXYNOS3_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x1, 0x0} },
+       { EXYNOS3_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x1, 0x0} },
+       { EXYNOS3_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS3_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS3_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS3_LPDDR_PHY_DLL_LOCK_SYS_PWR_REG,       { 0x1, 0x1, 0x1} },
+       { EXYNOS3_CMU_ACLKSTOP_COREBLK_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_SCLKSTOP_COREBLK_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_COREBLK_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS3_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS3_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS3_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS3_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS3_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS3_UPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x1} },
+       { EXYNOS3_EPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_CAM_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_LCD0_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_CLKSTOP_MAUDIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_CAM_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_LCD0_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS3_CMU_RESET_MAUDIO_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS3_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS3_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS3_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x3, 0x3} },
+       { EXYNOS3_TOP_BUS_COREBLK_SYS_PWR_REG,          { 0x3, 0x0, 0x0} },
+       { EXYNOS3_TOP_RETENTION_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x1} },
+       { EXYNOS3_TOP_PWR_COREBLK_SYS_PWR_REG,          { 0x3, 0x3, 0x3} },
+       { EXYNOS3_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS3_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x1, 0x1} },
+       { EXYNOS3_LOGIC_RESET_COREBLK_SYS_PWR_REG,      { 0x1, 0x1, 0x0} },
+       { EXYNOS3_OSCCLK_GATE_COREBLK_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS3_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_MAUDIO_SYS_PWR_REG,     { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_MMC0_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_MMC1_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_MMC2_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_RETENTION_JTAG_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS3_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS3_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x0} },
+       { EXYNOS3_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
+       { EXYNOS3_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS3_EXT_REGULATOR_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
+       { EXYNOS3_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS3_GPIO_MODE_MAUDIO_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS3_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS3_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS3_TOP_ASB_RESET_COREBLK_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
+       { EXYNOS3_TOP_ASB_ISOLATION_COREBLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
+       { EXYNOS3_CAM_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS3_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS3_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS3_LCD0_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
+       { EXYNOS3_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS3_MAUDIO_SYS_PWR_REG,                   { 0x7, 0x0, 0x0} },
+       { EXYNOS3_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos3250_list_feed[] = {
+       EXYNOS3_ARM_CORE_OPTION(0),
+       EXYNOS3_ARM_CORE_OPTION(1),
+       EXYNOS3_ARM_CORE_OPTION(2),
+       EXYNOS3_ARM_CORE_OPTION(3),
+       EXYNOS3_ARM_COMMON_OPTION,
+       EXYNOS3_TOP_PWR_OPTION,
+       EXYNOS3_CORE_TOP_PWR_OPTION,
+       S5P_CAM_OPTION,
+       S5P_MFC_OPTION,
+       S5P_G3D_OPTION,
+       S5P_LCD0_OPTION,
+       S5P_ISP_OPTION,
+};
+
+static void exynos3250_powerdown_conf_extra(enum sys_powerdown mode)
+{
+       unsigned int i;
+       unsigned int tmp;
+
+       /* Enable only SC_FEEDBACK */
+       for (i = 0; i < ARRAY_SIZE(exynos3250_list_feed); i++) {
+               tmp = pmu_raw_readl(exynos3250_list_feed[i]);
+               tmp &= ~(EXYNOS3_OPTION_USE_SC_COUNTER);
+               tmp |= EXYNOS3_OPTION_USE_SC_FEEDBACK;
+               pmu_raw_writel(tmp, exynos3250_list_feed[i]);
+       }
+
+       if (mode != SYS_SLEEP)
+               return;
+
+       pmu_raw_writel(XUSBXTI_DURATION, EXYNOS3_XUSBXTI_DURATION);
+       pmu_raw_writel(XXTI_DURATION, EXYNOS3_XXTI_DURATION);
+       pmu_raw_writel(EXT_REGULATOR_DURATION, EXYNOS3_EXT_REGULATOR_DURATION);
+       pmu_raw_writel(EXT_REGULATOR_COREBLK_DURATION,
+                      EXYNOS3_EXT_REGULATOR_COREBLK_DURATION);
+}
+
+static void exynos3250_pmu_init(void)
+{
+       unsigned int value;
+
+       /*
+        * To prevent from issuing new bus request form L2 memory system
+        * If core status is power down, should be set '1' to L2 power down
+        */
+       value = pmu_raw_readl(EXYNOS3_ARM_COMMON_OPTION);
+       value |= EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+       pmu_raw_writel(value, EXYNOS3_ARM_COMMON_OPTION);
+
+       /* Enable USE_STANDBY_WFI for all CORE */
+       pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+       /*
+        * Set PSHOLD port for output high
+        */
+       value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+       value |= S5P_PS_HOLD_OUTPUT_HIGH;
+       pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+
+       /*
+        * Enable signal for PSHOLD port
+        */
+       value = pmu_raw_readl(S5P_PS_HOLD_CONTROL);
+       value |= S5P_PS_HOLD_EN;
+       pmu_raw_writel(value, S5P_PS_HOLD_CONTROL);
+}
+
+const struct exynos_pmu_data exynos3250_pmu_data = {
+       .pmu_config     = exynos3250_pmu_config,
+       .pmu_init       = exynos3250_pmu_init,
+       .powerdown_conf_extra   = exynos3250_powerdown_conf_extra,
+};
diff --git a/drivers/soc/samsung/exynos4-pmu.c b/drivers/soc/samsung/exynos4-pmu.c
new file mode 100644 (file)
index 0000000..bc4fa73
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS4 - CPU PMU(Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos4210_pmu_config[] = {
+       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+       { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_CORE1_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE1,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL1,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_COMMON_LOWPWR,                { 0x0, 0x0, 0x2 } },
+       { S5P_L2_0_LOWPWR,                      { 0x2, 0x2, 0x3 } },
+       { S5P_L2_1_LOWPWR,                      { 0x2, 0x2, 0x3 } },
+       { S5P_CMU_ACLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_SCLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_LOWPWR,                 { 0x1, 0x1, 0x0 } },
+       { S5P_APLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_MPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_VPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_EPLL_SYSCLK_LOWPWR,               { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR,     { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_GPSALIVE_LOWPWR,        { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_CAM_LOWPWR,           { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_TV_LOWPWR,            { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_MFC_LOWPWR,           { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_G3D_LOWPWR,           { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_LCD0_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_LCD1_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR,        { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_CLKSTOP_GPS_LOWPWR,           { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_CAM_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_TV_LOWPWR,              { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_MFC_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_G3D_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_LCD0_LOWPWR,            { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_LCD1_LOWPWR,            { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_GPS_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_TOP_BUS_LOWPWR,                   { 0x3, 0x0, 0x0 } },
+       { S5P_TOP_RETENTION_LOWPWR,             { 0x1, 0x0, 0x1 } },
+       { S5P_TOP_PWR_LOWPWR,                   { 0x3, 0x0, 0x3 } },
+       { S5P_LOGIC_RESET_LOWPWR,               { 0x1, 0x1, 0x0 } },
+       { S5P_ONENAND_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_MODIMIF_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_G2D_ACP_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_USBOTG_MEM_LOWPWR,                { 0x3, 0x0, 0x0 } },
+       { S5P_HSMMC_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_CSSYS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_SECSS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_PCIE_MEM_LOWPWR,                  { 0x3, 0x0, 0x0 } },
+       { S5P_SATA_MEM_LOWPWR,                  { 0x3, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_DRAM_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MAUDIO_LOWPWR,      { 0x1, 0x1, 0x0 } },
+       { S5P_PAD_RETENTION_GPIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_UART_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MMCA_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MMCB_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_EBIA_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_EBIB_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_ISOLATION_LOWPWR,   { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_ALV_SEL_LOWPWR,     { 0x1, 0x0, 0x0 } },
+       { S5P_XUSBXTI_LOWPWR,                   { 0x1, 0x1, 0x0 } },
+       { S5P_XXTI_LOWPWR,                      { 0x1, 0x1, 0x0 } },
+       { S5P_EXT_REGULATOR_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_GPIO_MODE_LOWPWR,                 { 0x1, 0x0, 0x0 } },
+       { S5P_GPIO_MODE_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CAM_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_TV_LOWPWR,                        { 0x7, 0x0, 0x0 } },
+       { S5P_MFC_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_G3D_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_LCD0_LOWPWR,                      { 0x7, 0x0, 0x0 } },
+       { S5P_LCD1_LOWPWR,                      { 0x7, 0x0, 0x0 } },
+       { S5P_MAUDIO_LOWPWR,                    { 0x7, 0x7, 0x0 } },
+       { S5P_GPS_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_GPS_ALIVE_LOWPWR,                 { 0x7, 0x0, 0x0 } },
+       { PMU_TABLE_END,},
+};
+
+static const struct exynos_pmu_conf exynos4x12_pmu_config[] = {
+       { S5P_ARM_CORE0_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE0,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL0,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_CORE1_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE1,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL1,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ISP_ARM_LOWPWR,                   { 0x1, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR,     { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR,   { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_COMMON_LOWPWR,                { 0x0, 0x0, 0x2 } },
+       { S5P_L2_0_LOWPWR,                      { 0x0, 0x0, 0x3 } },
+       /* XXX_OPTION register should be set other field */
+       { S5P_ARM_L2_0_OPTION,                  { 0x10, 0x10, 0x0 } },
+       { S5P_L2_1_LOWPWR,                      { 0x0, 0x0, 0x3 } },
+       { S5P_ARM_L2_1_OPTION,                  { 0x10, 0x10, 0x0 } },
+       { S5P_CMU_ACLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_SCLKSTOP_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_LOWPWR,                 { 0x1, 0x1, 0x0 } },
+       { S5P_DRAM_FREQ_DOWN_LOWPWR,            { 0x1, 0x1, 0x1 } },
+       { S5P_DDRPHY_DLLOFF_LOWPWR,             { 0x1, 0x1, 0x1 } },
+       { S5P_LPDDR_PHY_DLL_LOCK_LOWPWR,        { 0x1, 0x1, 0x1 } },
+       { S5P_CMU_ACLKSTOP_COREBLK_LOWPWR,      { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_SCLKSTOP_COREBLK_LOWPWR,      { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_COREBLK_LOWPWR,         { 0x1, 0x1, 0x0 } },
+       { S5P_APLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_MPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_VPLL_SYSCLK_LOWPWR,               { 0x1, 0x0, 0x0 } },
+       { S5P_EPLL_SYSCLK_LOWPWR,               { 0x1, 0x1, 0x0 } },
+       { S5P_MPLLUSER_SYSCLK_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_GPS_ALIVE_LOWPWR,     { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_GPSALIVE_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_CAM_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_TV_LOWPWR,            { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_MFC_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_G3D_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_LCD0_LOWPWR,          { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_ISP_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_MAUDIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_CLKSTOP_GPS_LOWPWR,           { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_CAM_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_TV_LOWPWR,              { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_MFC_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_G3D_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_LCD0_LOWPWR,            { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_ISP_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_RESET_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_CMU_RESET_GPS_LOWPWR,             { 0x1, 0x0, 0x0 } },
+       { S5P_TOP_BUS_LOWPWR,                   { 0x3, 0x0, 0x0 } },
+       { S5P_TOP_RETENTION_LOWPWR,             { 0x1, 0x0, 0x1 } },
+       { S5P_TOP_PWR_LOWPWR,                   { 0x3, 0x0, 0x3 } },
+       { S5P_TOP_BUS_COREBLK_LOWPWR,           { 0x3, 0x0, 0x0 } },
+       { S5P_TOP_RETENTION_COREBLK_LOWPWR,     { 0x1, 0x0, 0x1 } },
+       { S5P_TOP_PWR_COREBLK_LOWPWR,           { 0x3, 0x0, 0x3 } },
+       { S5P_LOGIC_RESET_LOWPWR,               { 0x1, 0x1, 0x0 } },
+       { S5P_OSCCLK_GATE_LOWPWR,               { 0x1, 0x0, 0x1 } },
+       { S5P_LOGIC_RESET_COREBLK_LOWPWR,       { 0x1, 0x1, 0x0 } },
+       { S5P_OSCCLK_GATE_COREBLK_LOWPWR,       { 0x1, 0x0, 0x1 } },
+       { S5P_ONENAND_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_ONENAND_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
+       { S5P_HSI_MEM_LOWPWR,                   { 0x3, 0x0, 0x0 } },
+       { S5P_HSI_MEM_OPTION,                   { 0x10, 0x10, 0x0 } },
+       { S5P_G2D_ACP_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_G2D_ACP_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
+       { S5P_USBOTG_MEM_LOWPWR,                { 0x3, 0x0, 0x0 } },
+       { S5P_USBOTG_MEM_OPTION,                { 0x10, 0x10, 0x0 } },
+       { S5P_HSMMC_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_HSMMC_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
+       { S5P_CSSYS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_CSSYS_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
+       { S5P_SECSS_MEM_LOWPWR,                 { 0x3, 0x0, 0x0 } },
+       { S5P_SECSS_MEM_OPTION,                 { 0x10, 0x10, 0x0 } },
+       { S5P_ROTATOR_MEM_LOWPWR,               { 0x3, 0x0, 0x0 } },
+       { S5P_ROTATOR_MEM_OPTION,               { 0x10, 0x10, 0x0 } },
+       { S5P_PAD_RETENTION_DRAM_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MAUDIO_LOWPWR,      { 0x1, 0x1, 0x0 } },
+       { S5P_PAD_RETENTION_GPIO_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_UART_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MMCA_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_MMCB_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_EBIA_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_EBIB_LOWPWR,        { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_GPIO_COREBLK_LOWPWR, { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_ISOLATION_LOWPWR,   { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_ISOLATION_COREBLK_LOWPWR,     { 0x1, 0x0, 0x0 } },
+       { S5P_PAD_RETENTION_ALV_SEL_LOWPWR,     { 0x1, 0x0, 0x0 } },
+       { S5P_XUSBXTI_LOWPWR,                   { 0x1, 0x1, 0x0 } },
+       { S5P_XXTI_LOWPWR,                      { 0x1, 0x1, 0x0 } },
+       { S5P_EXT_REGULATOR_LOWPWR,             { 0x1, 0x1, 0x0 } },
+       { S5P_GPIO_MODE_LOWPWR,                 { 0x1, 0x0, 0x0 } },
+       { S5P_GPIO_MODE_COREBLK_LOWPWR,         { 0x1, 0x0, 0x0 } },
+       { S5P_GPIO_MODE_MAUDIO_LOWPWR,          { 0x1, 0x1, 0x0 } },
+       { S5P_TOP_ASB_RESET_LOWPWR,             { 0x1, 0x1, 0x1 } },
+       { S5P_TOP_ASB_ISOLATION_LOWPWR,         { 0x1, 0x0, 0x1 } },
+       { S5P_CAM_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_TV_LOWPWR,                        { 0x7, 0x0, 0x0 } },
+       { S5P_MFC_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_G3D_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_LCD0_LOWPWR,                      { 0x7, 0x0, 0x0 } },
+       { S5P_ISP_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_MAUDIO_LOWPWR,                    { 0x7, 0x7, 0x0 } },
+       { S5P_GPS_LOWPWR,                       { 0x7, 0x0, 0x0 } },
+       { S5P_GPS_ALIVE_LOWPWR,                 { 0x7, 0x0, 0x0 } },
+       { S5P_CMU_SYSCLK_ISP_LOWPWR,            { 0x1, 0x0, 0x0 } },
+       { S5P_CMU_SYSCLK_GPS_LOWPWR,            { 0x1, 0x0, 0x0 } },
+       { PMU_TABLE_END,},
+};
+
+static const struct exynos_pmu_conf exynos4412_pmu_config[] = {
+       { S5P_ARM_CORE2_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE2,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL2,                 { 0x0, 0x0, 0x0 } },
+       { S5P_ARM_CORE3_LOWPWR,                 { 0x0, 0x0, 0x2 } },
+       { S5P_DIS_IRQ_CORE3,                    { 0x0, 0x0, 0x0 } },
+       { S5P_DIS_IRQ_CENTRAL3,                 { 0x0, 0x0, 0x0 } },
+       { PMU_TABLE_END,},
+};
+
+const struct exynos_pmu_data exynos4210_pmu_data = {
+       .pmu_config     = exynos4210_pmu_config,
+};
+
+const struct exynos_pmu_data exynos4212_pmu_data = {
+       .pmu_config     = exynos4x12_pmu_config,
+};
+
+const struct exynos_pmu_data exynos4412_pmu_data = {
+       .pmu_config             = exynos4x12_pmu_config,
+       .pmu_config_extra       = exynos4412_pmu_config,
+};
diff --git a/drivers/soc/samsung/exynos5250-pmu.c b/drivers/soc/samsung/exynos5250-pmu.c
new file mode 100644 (file)
index 0000000..3fac425
--- /dev/null
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS5250 - CPU PMU (Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include "exynos-pmu.h"
+
+static const struct exynos_pmu_conf exynos5250_pmu_config[] = {
+       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x2} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5_FSYS_ARM_SYS_PWR_REG,                 { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
+       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_COMMON_SYS_PWR_REG,               { 0x0, 0x0, 0x2} },
+       { EXYNOS5_ARM_L2_SYS_PWR_REG,                   { 0x3, 0x3, 0x3} },
+       { EXYNOS5_ARM_L2_OPTION,                        { 0x10, 0x10, 0x0 } },
+       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,           { 0x1, 0x1, 0x1} },
+       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x0, 0x3} },
+       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x3} },
+       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
+       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,       { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x1} },
+       { EXYNOS5_USBOTG_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_G2D_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_USBDRD_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SDMMC_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_CSSYS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SECSS_MEM_SYS_PWR_REG,                { 0x3, 0x0, 0x0} },
+       { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG,              { 0x3, 0x0, 0x0} },
+       { EXYNOS5_INTRAM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_INTROM_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_JPEG_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
+       { EXYNOS5_JPEG_MEM_OPTION,                      { 0x10, 0x10, 0x0} },
+       { EXYNOS5_HSI_MEM_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG,               { 0x3, 0x0, 0x0} },
+       { EXYNOS5_SATA_MEM_SYS_PWR_REG,                 { 0x3, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x1} },
+       { EXYNOS5_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
+       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x0, 0x1} },
+       { EXYNOS5_GSCL_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
+       { EXYNOS5_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_DISP1_SYS_PWR_REG,                    { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MAU_SYS_PWR_REG,                      { 0x7, 0x7, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG,          { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG,         { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG,           { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,            { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5_list_both_cnt_feed[] = {
+       EXYNOS5_ARM_CORE0_OPTION,
+       EXYNOS5_ARM_CORE1_OPTION,
+       EXYNOS5_ARM_COMMON_OPTION,
+       EXYNOS5_GSCL_OPTION,
+       EXYNOS5_ISP_OPTION,
+       EXYNOS5_MFC_OPTION,
+       EXYNOS5_G3D_OPTION,
+       EXYNOS5_DISP1_OPTION,
+       EXYNOS5_MAU_OPTION,
+       EXYNOS5_TOP_PWR_OPTION,
+       EXYNOS5_TOP_PWR_SYSMEM_OPTION,
+};
+
+static unsigned int const exynos5_list_disable_wfi_wfe[] = {
+       EXYNOS5_ARM_CORE1_OPTION,
+       EXYNOS5_FSYS_ARM_OPTION,
+       EXYNOS5_ISP_ARM_OPTION,
+};
+
+static void exynos5250_pmu_init(void)
+{
+       unsigned int value;
+       /*
+        * When SYS_WDTRESET is set, watchdog timer reset request
+        * is ignored by power management unit.
+        */
+       value = pmu_raw_readl(EXYNOS5_AUTO_WDTRESET_DISABLE);
+       value &= ~EXYNOS5_SYS_WDTRESET;
+       pmu_raw_writel(value, EXYNOS5_AUTO_WDTRESET_DISABLE);
+
+       value = pmu_raw_readl(EXYNOS5_MASK_WDTRESET_REQUEST);
+       value &= ~EXYNOS5_SYS_WDTRESET;
+       pmu_raw_writel(value, EXYNOS5_MASK_WDTRESET_REQUEST);
+}
+
+static void exynos5_powerdown_conf(enum sys_powerdown mode)
+{
+       unsigned int i;
+       unsigned int tmp;
+
+       /*
+        * Enable both SC_FEEDBACK and SC_COUNTER
+        */
+       for (i = 0; i < ARRAY_SIZE(exynos5_list_both_cnt_feed); i++) {
+               tmp = pmu_raw_readl(exynos5_list_both_cnt_feed[i]);
+               tmp |= (EXYNOS5_USE_SC_FEEDBACK |
+                       EXYNOS5_USE_SC_COUNTER);
+               pmu_raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
+       }
+
+       /*
+        * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
+        */
+       tmp = pmu_raw_readl(EXYNOS5_ARM_COMMON_OPTION);
+       tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+       pmu_raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
+
+       /*
+        * Disable WFI/WFE on XXX_OPTION
+        */
+       for (i = 0; i < ARRAY_SIZE(exynos5_list_disable_wfi_wfe); i++) {
+               tmp = pmu_raw_readl(exynos5_list_disable_wfi_wfe[i]);
+               tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
+                        EXYNOS5_OPTION_USE_STANDBYWFI);
+               pmu_raw_writel(tmp, exynos5_list_disable_wfi_wfe[i]);
+       }
+}
+
+const struct exynos_pmu_data exynos5250_pmu_data = {
+       .pmu_config     = exynos5250_pmu_config,
+       .pmu_init       = exynos5250_pmu_init,
+       .powerdown_conf = exynos5_powerdown_conf,
+};
diff --git a/drivers/soc/samsung/exynos5420-pmu.c b/drivers/soc/samsung/exynos5420-pmu.c
new file mode 100644 (file)
index 0000000..b962fb6
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Copyright (c) 2011-2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com/
+ *
+ * EXYNOS5420 - CPU PMU (Power Management Unit) support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pm.h>
+#include <linux/soc/samsung/exynos-regs-pmu.h>
+#include <linux/soc/samsung/exynos-pmu.h>
+
+#include <asm/cputype.h>
+
+#include "exynos-pmu.h"
+
+static struct exynos_pmu_conf exynos5420_pmu_config[] = {
+       /* { .offset = offset, .val = { AFTR, LPA, SLEEP } */
+       { EXYNOS5_ARM_CORE0_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_CORE1_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG,  { 0x0, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_ARM_CORE2_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_ARM_CORE3_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_ARM_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_ARM_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_CORE0_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_CORE1_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_CORE2_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE2_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE2_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_CORE3_SYS_PWR_REG,             { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE3_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_DIS_IRQ_KFC_CORE3_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ISP_ARM_SYS_PWR_REG,                  { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG,  { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_ARM_COMMON_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_COMMON_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5_ARM_L2_SYS_PWR_REG,                   { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_KFC_L2_SYS_PWR_REG,                { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG,             { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYS_PWR_REG,                { 0x1, 0x1, 0x0} },
+       { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG,      { 0x1, 0x0, 0x1} },
+       { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG,           { 0x1, 0x0, 0x1} },
+       { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG,           { 0x1, 0x0, 0x1} },
+       { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_DPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_IPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_KPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG,          { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_RPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_SPLL_SYSCLK_SYS_PWR_REG,           { 0x1, 0x0, 0x0} },
+       { EXYNOS5_TOP_BUS_SYS_PWR_REG,                  { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYS_PWR_REG,            { 0x1, 0x1, 0x1} },
+       { EXYNOS5_TOP_PWR_SYS_PWR_REG,                  { 0x3, 0x3, 0x0} },
+       { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
+       { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x1} },
+       { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG,           { 0x3, 0x0, 0x0} },
+       { EXYNOS5_LOGIC_RESET_SYS_PWR_REG,              { 0x1, 0x1, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG,              { 0x1, 0x0, 0x1} },
+       { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_INTRAM_MEM_SYS_PWR_REG,            { 0x3, 0x0, 0x3} },
+       { EXYNOS5420_INTROM_MEM_SYS_PWR_REG,            { 0x3, 0x0, 0x3} },
+       { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG,       { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG,        { 0x1, 0x1, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_JTAG_SYS_PWR_REG,    { 0x1, 0x1, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_DRAM_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_UART_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_MMC0_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_MMC1_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_MMC2_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_HSI_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_EBIA_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_EBIB_SYS_PWR_REG,    { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_SPI_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5420_PAD_RETENTION_DRAM_COREBLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG,     { 0x1, 0x0, 0x0} },
+       { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG,              { 0x1, 0x0, 0x0} },
+       { EXYNOS5_XUSBXTI_SYS_PWR_REG,                  { 0x1, 0x1, 0x0} },
+       { EXYNOS5_XXTI_SYS_PWR_REG,                     { 0x1, 0x1, 0x0} },
+       { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYS_PWR_REG,                { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG,         { 0x1, 0x1, 0x0} },
+       { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG,            { 0x1, 0x1, 0x0} },
+       { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG,        { 0x1, 0x0, 0x0} },
+       { EXYNOS5_GSCL_SYS_PWR_REG,                     { 0x7, 0x0, 0x0} },
+       { EXYNOS5_ISP_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_MFC_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5_G3D_SYS_PWR_REG,                      { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_DISP1_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_MAU_SYS_PWR_REG,                   { 0x7, 0x7, 0x0} },
+       { EXYNOS5420_G2D_SYS_PWR_REG,                   { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_MSC_SYS_PWR_REG,                   { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_FSYS_SYS_PWR_REG,                  { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_FSYS2_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_PSGEN_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_PERIC_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5420_WCORE_SYS_PWR_REG,                 { 0x7, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,         { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,     { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,          { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,           { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG,           { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,           { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,      { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,           { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,            { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,       { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,         { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,         { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,         { 0x0, 0x0, 0x0} },
+       { EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,        { 0x0, 0x0, 0x0} },
+       { PMU_TABLE_END,},
+};
+
+static unsigned int const exynos5420_list_disable_pmu_reg[] = {
+       EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG,
+       EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG,
+       EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_DISP1_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_MAU_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_G2D_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_MSC_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_FSYS_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_PSGEN_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_PERIC_SYS_PWR_REG,
+       EXYNOS5420_CMU_CLKSTOP_WCORE_SYS_PWR_REG,
+       EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG,
+       EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG,
+       EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_DISP1_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_MAU_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_G2D_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_MSC_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_FSYS_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_FSYS2_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_PSGEN_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_PERIC_SYS_PWR_REG,
+       EXYNOS5420_CMU_SYSCLK_WCORE_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_FSYS2_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_PSGEN_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_PERIC_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_WCORE_SYS_PWR_REG,
+       EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG,
+       EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG,
+       EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_DISP1_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_MAU_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_G2D_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_MSC_SYS_PWR_REG,
+       EXYNOS5420_CMU_RESET_FSYS_SYS_PWR_REG,
+};
+
+static void exynos5420_powerdown_conf(enum sys_powerdown mode)
+{
+       u32 this_cluster;
+
+       this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
+
+       /*
+        * set the cluster id to IROM register to ensure that we wake
+        * up with the current cluster.
+        */
+       pmu_raw_writel(this_cluster, EXYNOS_IROM_DATA2);
+}
+
+static void exynos5420_pmu_init(void)
+{
+       unsigned int value;
+       int i;
+
+       /*
+        * Set the CMU_RESET, CMU_SYSCLK and CMU_CLKSTOP registers
+        * for local power blocks to Low initially as per Table 8-4:
+        * "System-Level Power-Down Configuration Registers".
+        */
+       for (i = 0; i < ARRAY_SIZE(exynos5420_list_disable_pmu_reg); i++)
+               pmu_raw_writel(0, exynos5420_list_disable_pmu_reg[i]);
+
+       /* Enable USE_STANDBY_WFI for all CORE */
+       pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
+
+       value  = pmu_raw_readl(EXYNOS_L2_OPTION(0));
+       value &= ~EXYNOS5_USE_RETENTION;
+       pmu_raw_writel(value, EXYNOS_L2_OPTION(0));
+
+       value = pmu_raw_readl(EXYNOS_L2_OPTION(1));
+       value &= ~EXYNOS5_USE_RETENTION;
+       pmu_raw_writel(value, EXYNOS_L2_OPTION(1));
+
+       /*
+        * If L2_COMMON is turned off, clocks related to ATB async
+        * bridge are gated. Thus, when ISP power is gated, LPI
+        * may get stuck.
+        */
+       value = pmu_raw_readl(EXYNOS5420_LPI_MASK);
+       value |= EXYNOS5420_ATB_ISP_ARM;
+       pmu_raw_writel(value, EXYNOS5420_LPI_MASK);
+
+       value  = pmu_raw_readl(EXYNOS5420_LPI_MASK1);
+       value |= EXYNOS5420_ATB_KFC;
+       pmu_raw_writel(value, EXYNOS5420_LPI_MASK1);
+
+       /* Prevent issue of new bus request from L2 memory */
+       value = pmu_raw_readl(EXYNOS5420_ARM_COMMON_OPTION);
+       value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+       pmu_raw_writel(value, EXYNOS5420_ARM_COMMON_OPTION);
+
+       value = pmu_raw_readl(EXYNOS5420_KFC_COMMON_OPTION);
+       value |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN;
+       pmu_raw_writel(value, EXYNOS5420_KFC_COMMON_OPTION);
+
+       /* This setting is to reduce suspend/resume time */
+       pmu_raw_writel(DUR_WAIT_RESET, EXYNOS5420_LOGIC_RESET_DURATION3);
+
+       /* Serialized CPU wakeup of Eagle */
+       pmu_raw_writel(SPREAD_ENABLE, EXYNOS5420_ARM_INTR_SPREAD_ENABLE);
+
+       pmu_raw_writel(SPREAD_USE_STANDWFI,
+                       EXYNOS5420_ARM_INTR_SPREAD_USE_STANDBYWFI);
+
+       pmu_raw_writel(0x1, EXYNOS5420_UP_SCHEDULER);
+
+       pr_info("EXYNOS5420 PMU initialized\n");
+}
+
+const struct exynos_pmu_data exynos5420_pmu_data = {
+       .pmu_config     = exynos5420_pmu_config,
+       .pmu_init       = exynos5420_pmu_init,
+       .powerdown_conf = exynos5420_powerdown_conf,
+};
index bc52670c8f4b33a9b8181d19e89566a3c87f3c69..99e354c8f53f8cdff0c34c4285a42cef4021565c 100644 (file)
@@ -117,7 +117,7 @@ static int sunxi_sram_show(struct seq_file *s, void *data)
 
                        val = readl(base + sram_data->reg);
                        val >>= sram_data->offset;
-                       val &= sram_data->width;
+                       val &= GENMASK(sram_data->width - 1, 0);
 
                        for (func = sram_data->func; func->func; func++) {
                                seq_printf(s, "\t\t%s%c\n", func->func,
@@ -208,7 +208,8 @@ int sunxi_sram_claim(struct device *dev)
                return -EBUSY;
        }
 
-       mask = GENMASK(sram_data->offset + sram_data->width, sram_data->offset);
+       mask = GENMASK(sram_data->offset + sram_data->width - 1,
+                      sram_data->offset);
        val = readl(base + sram_data->reg);
        val &= ~mask;
        writel(val | ((device << sram_data->offset) & mask),
index d0c3c3e085e33fcba1fa994b722d226f96d1fbeb..03089ad2fc6574be9af2b236d7c5c3761c971b7e 100644 (file)
@@ -31,7 +31,6 @@ config ARCH_TEGRA_3x_SOC
 config ARCH_TEGRA_114_SOC
        bool "Enable support for Tegra114 family"
        select ARM_ERRATA_798181 if SMP
-       select ARM_L1_CACHE_SHIFT_6
        select HAVE_ARM_ARCH_TIMER
        select PINCTRL_TEGRA114
        select TEGRA_TIMER
@@ -41,7 +40,6 @@ config ARCH_TEGRA_114_SOC
 
 config ARCH_TEGRA_124_SOC
        bool "Enable support for Tegra124 family"
-       select ARM_L1_CACHE_SHIFT_6
        select HAVE_ARM_ARCH_TIMER
        select PINCTRL_TEGRA124
        select TEGRA_TIMER
index bc34cf7482fb559c6fcfdece56a5b4d88c24266b..88c7e506177e89354b9eb3fb533632b1ff050c09 100644 (file)
@@ -113,8 +113,11 @@ struct tegra_pmc_soc {
 
 /**
  * struct tegra_pmc - NVIDIA Tegra PMC
+ * @dev: pointer to PMC device structure
  * @base: pointer to I/O remapped register region
  * @clk: pointer to pclk clock
+ * @soc: pointer to SoC data structure
+ * @debugfs: pointer to debugfs entry
  * @rate: currently configured rate of pclk
  * @suspend_mode: lowest suspend mode available
  * @cpu_good_time: CPU power good time (in microseconds)
@@ -134,6 +137,7 @@ struct tegra_pmc {
        struct device *dev;
        void __iomem *base;
        struct clk *clk;
+       struct dentry *debugfs;
 
        const struct tegra_pmc_soc *soc;
 
@@ -445,11 +449,9 @@ static const struct file_operations powergate_fops = {
 
 static int tegra_powergate_debugfs_init(void)
 {
-       struct dentry *d;
-
-       d = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
-                               &powergate_fops);
-       if (!d)
+       pmc->debugfs = debugfs_create_file("powergate", S_IRUGO, NULL, NULL,
+                                          &powergate_fops);
+       if (!pmc->debugfs)
                return -ENOMEM;
 
        return 0;
@@ -727,7 +729,7 @@ static void tegra_pmc_init(struct tegra_pmc *pmc)
        tegra_pmc_writel(value, PMC_CNTRL);
 }
 
-void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
+static void tegra_pmc_init_tsense_reset(struct tegra_pmc *pmc)
 {
        static const char disabled[] = "emergency thermal reset disabled";
        u32 pmu_addr, ctrl_id, reg_addr, reg_data, pinmux;
@@ -842,6 +844,7 @@ static int tegra_pmc_probe(struct platform_device *pdev)
 
        err = register_restart_handler(&tegra_pmc_restart_handler);
        if (err) {
+               debugfs_remove(pmc->debugfs);
                dev_err(&pdev->dev, "unable to register restart handler, %d\n",
                        err);
                return err;
@@ -1006,17 +1009,13 @@ static const char * const tegra210_powergates[] = {
        [TEGRA_POWERGATE_3D] = "3d",
        [TEGRA_POWERGATE_VENC] = "venc",
        [TEGRA_POWERGATE_PCIE] = "pcie",
-       [TEGRA_POWERGATE_L2] = "l2",
        [TEGRA_POWERGATE_MPE] = "mpe",
-       [TEGRA_POWERGATE_HEG] = "heg",
        [TEGRA_POWERGATE_SATA] = "sata",
        [TEGRA_POWERGATE_CPU1] = "cpu1",
        [TEGRA_POWERGATE_CPU2] = "cpu2",
        [TEGRA_POWERGATE_CPU3] = "cpu3",
-       [TEGRA_POWERGATE_CELP] = "celp",
        [TEGRA_POWERGATE_CPU0] = "cpu0",
        [TEGRA_POWERGATE_C0NC] = "c0nc",
-       [TEGRA_POWERGATE_C1NC] = "c1nc",
        [TEGRA_POWERGATE_SOR] = "sor",
        [TEGRA_POWERGATE_DIS] = "dis",
        [TEGRA_POWERGATE_DISB] = "disb",
index 77064160dd762c26f6c1fd184b252b24360163e3..d71abc75ae5643b34c7ca08cbd1f1e2139b35dcb 100644 (file)
@@ -75,6 +75,22 @@ config SPI_ATMEL
          This selects a driver for the Atmel SPI Controller, present on
          many AT32 (AVR32) and AT91 (ARM) chips.
 
+config SPI_AU1550
+       tristate "Au1550/Au1200/Au1300 SPI Controller"
+       depends on MIPS_ALCHEMY
+       select SPI_BITBANG
+       help
+         If you say yes to this option, support will be included for the
+         PSC SPI controller found on Au1550, Au1200 and Au1300 series.
+
+config SPI_AXI_SPI_ENGINE
+       tristate "Analog Devices AXI SPI Engine controller"
+       depends on HAS_IOMEM
+       help
+         This enables support for the Analog Devices AXI SPI Engine SPI controller.
+         It is part of the SPI Engine framework that is used in some Analog Devices
+         reference designs for FPGAs.
+
 config SPI_BCM2835
        tristate "BCM2835 SPI controller"
        depends on GPIOLIB
@@ -90,8 +106,7 @@ config SPI_BCM2835
 
 config SPI_BCM2835AUX
        tristate "BCM2835 SPI auxiliary controller"
-       depends on ARCH_BCM2835 || COMPILE_TEST
-       depends on GPIOLIB
+       depends on (ARCH_BCM2835 && GPIOLIB) || COMPILE_TEST
        help
          This selects a driver for the Broadcom BCM2835 SPI aux master.
 
@@ -118,14 +133,6 @@ config SPI_BFIN_SPORT
        help
          Enable support for a SPI bus via the Blackfin SPORT peripheral.
 
-config SPI_AU1550
-       tristate "Au1550/Au1200/Au1300 SPI Controller"
-       depends on MIPS_ALCHEMY
-       select SPI_BITBANG
-       help
-         If you say yes to this option, support will be included for the
-         PSC SPI controller found on Au1550, Au1200 and Au1300 series.
-
 config SPI_BCM53XX
        tristate "Broadcom BCM53xx SPI controller"
        depends on ARCH_BCM_5301X
@@ -197,6 +204,23 @@ config SPI_DAVINCI
        help
          SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
 
+config SPI_DESIGNWARE
+       tristate "DesignWare SPI controller core support"
+       help
+         general driver for SPI controller core from DesignWare
+
+config SPI_DW_PCI
+       tristate "PCI interface driver for DW SPI core"
+       depends on SPI_DESIGNWARE && PCI
+
+config SPI_DW_MID_DMA
+       bool "DMA support for DW SPI controller on Intel MID platform"
+       depends on SPI_DW_PCI && DW_DMAC_PCI
+
+config SPI_DW_MMIO
+       tristate "Memory-mapped io interface driver for DW SPI core"
+       depends on SPI_DESIGNWARE
+
 config SPI_DLN2
        tristate "Diolan DLN-2 USB SPI adapter"
        depends on MFD_DLN2
@@ -346,6 +370,13 @@ config SPI_MT65XX
          say Y or M here.If you are not sure, say N.
          SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
 
+config SPI_NUC900
+       tristate "Nuvoton NUC900 series SPI"
+       depends on ARCH_W90X900
+       select SPI_BITBANG
+       help
+         SPI driver for Nuvoton NUC900 series ARM SoCs
+
 config SPI_OC_TINY
        tristate "OpenCores tiny SPI"
        depends on GPIOLIB || COMPILE_TEST
@@ -415,10 +446,6 @@ config SPI_PPC4xx
        help
          This selects a driver for the PPC4xx SPI Controller.
 
-config SPI_PXA2XX_DMA
-       def_bool y
-       depends on SPI_PXA2XX
-
 config SPI_PXA2XX
        tristate "PXA2xx SSP SPI master"
        depends on (ARCH_PXA || PCI || ACPI)
@@ -647,34 +674,10 @@ config SPI_ZYNQMP_GQSPI
        help
          Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
 
-config SPI_NUC900
-       tristate "Nuvoton NUC900 series SPI"
-       depends on ARCH_W90X900
-       select SPI_BITBANG
-       help
-         SPI driver for Nuvoton NUC900 series ARM SoCs
-
 #
 # Add new SPI master controllers in alphabetical order above this line
 #
 
-config SPI_DESIGNWARE
-       tristate "DesignWare SPI controller core support"
-       help
-         general driver for SPI controller core from DesignWare
-
-config SPI_DW_PCI
-       tristate "PCI interface driver for DW SPI core"
-       depends on SPI_DESIGNWARE && PCI
-
-config SPI_DW_MID_DMA
-       bool "DMA support for DW SPI controller on Intel MID platform"
-       depends on SPI_DW_PCI && DW_DMAC_PCI
-
-config SPI_DW_MMIO
-       tristate "Memory-mapped io interface driver for DW SPI core"
-       depends on SPI_DESIGNWARE
-
 #
 # There are lots of SPI device types, with sensors and memory
 # being probably the most widely used ones.
index 8991ffce6e12d66682dcbbbcdbecf86f9f08dc3c..85c9704cf888f9fbdd17c2be6bd730bba4845d24 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_SPI_ALTERA)              += spi-altera.o
 obj-$(CONFIG_SPI_ATMEL)                        += spi-atmel.o
 obj-$(CONFIG_SPI_ATH79)                        += spi-ath79.o
 obj-$(CONFIG_SPI_AU1550)               += spi-au1550.o
+obj-$(CONFIG_SPI_AXI_SPI_ENGINE)       += spi-axi-spi-engine.o
 obj-$(CONFIG_SPI_BCM2835)              += spi-bcm2835.o
 obj-$(CONFIG_SPI_BCM2835AUX)           += spi-bcm2835aux.o
 obj-$(CONFIG_SPI_BCM53XX)              += spi-bcm53xx.o
@@ -62,8 +63,7 @@ obj-$(CONFIG_SPI_TI_QSPI)             += spi-ti-qspi.o
 obj-$(CONFIG_SPI_ORION)                        += spi-orion.o
 obj-$(CONFIG_SPI_PL022)                        += spi-pl022.o
 obj-$(CONFIG_SPI_PPC4xx)               += spi-ppc4xx.o
-spi-pxa2xx-platform-objs               := spi-pxa2xx.o
-spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA)   += spi-pxa2xx-dma.o
+spi-pxa2xx-platform-objs               := spi-pxa2xx.o spi-pxa2xx-dma.o
 obj-$(CONFIG_SPI_PXA2XX)               += spi-pxa2xx-platform.o
 obj-$(CONFIG_SPI_PXA2XX_PCI)           += spi-pxa2xx-pci.o
 obj-$(CONFIG_SPI_QUP)                  += spi-qup.o
index aebad36391c93b36658c52222e761366c111216f..8feac599e9ab4bfe886b8da89960f04ed32c1aba 100644 (file)
@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
 
        as->use_cs_gpios = true;
        if (atmel_spi_is_v2(as) &&
+           pdev->dev.of_node &&
            !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
                as->use_cs_gpios = false;
                master->num_chipselect = 4;
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
new file mode 100644 (file)
index 0000000..c968ab2
--- /dev/null
@@ -0,0 +1,591 @@
+/*
+ * SPI-Engine SPI controller driver
+ * Copyright 2015 Analog Devices Inc.
+ *  Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+#define SPI_ENGINE_VERSION_MAJOR(x)    ((x >> 16) & 0xff)
+#define SPI_ENGINE_VERSION_MINOR(x)    ((x >> 8) & 0xff)
+#define SPI_ENGINE_VERSION_PATCH(x)    (x & 0xff)
+
+#define SPI_ENGINE_REG_VERSION                 0x00
+
+#define SPI_ENGINE_REG_RESET                   0x40
+
+#define SPI_ENGINE_REG_INT_ENABLE              0x80
+#define SPI_ENGINE_REG_INT_PENDING             0x84
+#define SPI_ENGINE_REG_INT_SOURCE              0x88
+
+#define SPI_ENGINE_REG_SYNC_ID                 0xc0
+
+#define SPI_ENGINE_REG_CMD_FIFO_ROOM           0xd0
+#define SPI_ENGINE_REG_SDO_FIFO_ROOM           0xd4
+#define SPI_ENGINE_REG_SDI_FIFO_LEVEL          0xd8
+
+#define SPI_ENGINE_REG_CMD_FIFO                        0xe0
+#define SPI_ENGINE_REG_SDO_DATA_FIFO           0xe4
+#define SPI_ENGINE_REG_SDI_DATA_FIFO           0xe8
+#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK      0xec
+
+#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY                BIT(0)
+#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY                BIT(1)
+#define SPI_ENGINE_INT_SDI_ALMOST_FULL         BIT(2)
+#define SPI_ENGINE_INT_SYNC                    BIT(3)
+
+#define SPI_ENGINE_CONFIG_CPHA                 BIT(0)
+#define SPI_ENGINE_CONFIG_CPOL                 BIT(1)
+#define SPI_ENGINE_CONFIG_3WIRE                        BIT(2)
+
+#define SPI_ENGINE_INST_TRANSFER               0x0
+#define SPI_ENGINE_INST_ASSERT                 0x1
+#define SPI_ENGINE_INST_WRITE                  0x2
+#define SPI_ENGINE_INST_MISC                   0x3
+
+#define SPI_ENGINE_CMD_REG_CLK_DIV             0x0
+#define SPI_ENGINE_CMD_REG_CONFIG              0x1
+
+#define SPI_ENGINE_MISC_SYNC                   0x0
+#define SPI_ENGINE_MISC_SLEEP                  0x1
+
+#define SPI_ENGINE_TRANSFER_WRITE              0x1
+#define SPI_ENGINE_TRANSFER_READ               0x2
+
+#define SPI_ENGINE_CMD(inst, arg1, arg2) \
+       (((inst) << 12) | ((arg1) << 8) | (arg2))
+
+#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
+       SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
+#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
+       SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
+#define SPI_ENGINE_CMD_WRITE(reg, val) \
+       SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
+#define SPI_ENGINE_CMD_SLEEP(delay) \
+       SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
+#define SPI_ENGINE_CMD_SYNC(id) \
+       SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
+
+struct spi_engine_program {
+       unsigned int length;
+       uint16_t instructions[];
+};
+
+struct spi_engine {
+       struct clk *clk;
+       struct clk *ref_clk;
+
+       spinlock_t lock;
+
+       void __iomem *base;
+
+       struct spi_message *msg;
+       struct spi_engine_program *p;
+       unsigned cmd_length;
+       const uint16_t *cmd_buf;
+
+       struct spi_transfer *tx_xfer;
+       unsigned int tx_length;
+       const uint8_t *tx_buf;
+
+       struct spi_transfer *rx_xfer;
+       unsigned int rx_length;
+       uint8_t *rx_buf;
+
+       unsigned int sync_id;
+       unsigned int completed_id;
+
+       unsigned int int_enable;
+};
+
+static void spi_engine_program_add_cmd(struct spi_engine_program *p,
+       bool dry, uint16_t cmd)
+{
+       if (!dry)
+               p->instructions[p->length] = cmd;
+       p->length++;
+}
+
+static unsigned int spi_engine_get_config(struct spi_device *spi)
+{
+       unsigned int config = 0;
+
+       if (spi->mode & SPI_CPOL)
+               config |= SPI_ENGINE_CONFIG_CPOL;
+       if (spi->mode & SPI_CPHA)
+               config |= SPI_ENGINE_CONFIG_CPHA;
+       if (spi->mode & SPI_3WIRE)
+               config |= SPI_ENGINE_CONFIG_3WIRE;
+
+       return config;
+}
+
+static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
+       struct spi_device *spi, struct spi_transfer *xfer)
+{
+       unsigned int clk_div;
+
+       clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
+               xfer->speed_hz * 2);
+       if (clk_div > 255)
+               clk_div = 255;
+       else if (clk_div > 0)
+               clk_div -= 1;
+
+       return clk_div;
+}
+
+static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
+       struct spi_transfer *xfer)
+{
+       unsigned int len = xfer->len;
+
+       while (len) {
+               unsigned int n = min(len, 256U);
+               unsigned int flags = 0;
+
+               if (xfer->tx_buf)
+                       flags |= SPI_ENGINE_TRANSFER_WRITE;
+               if (xfer->rx_buf)
+                       flags |= SPI_ENGINE_TRANSFER_READ;
+
+               spi_engine_program_add_cmd(p, dry,
+                       SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
+               len -= n;
+       }
+}
+
+static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
+       struct spi_engine *spi_engine, unsigned int clk_div, unsigned int delay)
+{
+       unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
+       unsigned int t;
+
+       if (delay == 0)
+               return;
+
+       t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
+       while (t) {
+               unsigned int n = min(t, 256U);
+
+               spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
+               t -= n;
+       }
+}
+
+static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
+               struct spi_device *spi, bool assert)
+{
+       unsigned int mask = 0xff;
+
+       if (assert)
+               mask ^= BIT(spi->chip_select);
+
+       spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
+}
+
+static int spi_engine_compile_message(struct spi_engine *spi_engine,
+       struct spi_message *msg, bool dry, struct spi_engine_program *p)
+{
+       struct spi_device *spi = msg->spi;
+       struct spi_transfer *xfer;
+       int clk_div, new_clk_div;
+       bool cs_change = true;
+
+       clk_div = -1;
+
+       spi_engine_program_add_cmd(p, dry,
+               SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
+                       spi_engine_get_config(spi)));
+
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
+               if (new_clk_div != clk_div) {
+                       clk_div = new_clk_div;
+                       spi_engine_program_add_cmd(p, dry,
+                               SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
+                                       clk_div));
+               }
+
+               if (cs_change)
+                       spi_engine_gen_cs(p, dry, spi, true);
+
+               spi_engine_gen_xfer(p, dry, xfer);
+               spi_engine_gen_sleep(p, dry, spi_engine, clk_div,
+                       xfer->delay_usecs);
+
+               cs_change = xfer->cs_change;
+               if (list_is_last(&xfer->transfer_list, &msg->transfers))
+                       cs_change = !cs_change;
+
+               if (cs_change)
+                       spi_engine_gen_cs(p, dry, spi, false);
+       }
+
+       return 0;
+}
+
+static void spi_engine_xfer_next(struct spi_engine *spi_engine,
+       struct spi_transfer **_xfer)
+{
+       struct spi_message *msg = spi_engine->msg;
+       struct spi_transfer *xfer = *_xfer;
+
+       if (!xfer) {
+               xfer = list_first_entry(&msg->transfers,
+                       struct spi_transfer, transfer_list);
+       } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
+               xfer = NULL;
+       } else {
+               xfer = list_next_entry(xfer, transfer_list);
+       }
+
+       *_xfer = xfer;
+}
+
+static void spi_engine_tx_next(struct spi_engine *spi_engine)
+{
+       struct spi_transfer *xfer = spi_engine->tx_xfer;
+
+       do {
+               spi_engine_xfer_next(spi_engine, &xfer);
+       } while (xfer && !xfer->tx_buf);
+
+       spi_engine->tx_xfer = xfer;
+       if (xfer) {
+               spi_engine->tx_length = xfer->len;
+               spi_engine->tx_buf = xfer->tx_buf;
+       } else {
+               spi_engine->tx_buf = NULL;
+       }
+}
+
+static void spi_engine_rx_next(struct spi_engine *spi_engine)
+{
+       struct spi_transfer *xfer = spi_engine->rx_xfer;
+
+       do {
+               spi_engine_xfer_next(spi_engine, &xfer);
+       } while (xfer && !xfer->rx_buf);
+
+       spi_engine->rx_xfer = xfer;
+       if (xfer) {
+               spi_engine->rx_length = xfer->len;
+               spi_engine->rx_buf = xfer->rx_buf;
+       } else {
+               spi_engine->rx_buf = NULL;
+       }
+}
+
+static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
+{
+       void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
+       unsigned int n, m, i;
+       const uint16_t *buf;
+
+       n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
+       while (n && spi_engine->cmd_length) {
+               m = min(n, spi_engine->cmd_length);
+               buf = spi_engine->cmd_buf;
+               for (i = 0; i < m; i++)
+                       writel_relaxed(buf[i], addr);
+               spi_engine->cmd_buf += m;
+               spi_engine->cmd_length -= m;
+               n -= m;
+       }
+
+       return spi_engine->cmd_length != 0;
+}
+
+static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
+{
+       void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
+       unsigned int n, m, i;
+       const uint8_t *buf;
+
+       n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
+       while (n && spi_engine->tx_length) {
+               m = min(n, spi_engine->tx_length);
+               buf = spi_engine->tx_buf;
+               for (i = 0; i < m; i++)
+                       writel_relaxed(buf[i], addr);
+               spi_engine->tx_buf += m;
+               spi_engine->tx_length -= m;
+               n -= m;
+               if (spi_engine->tx_length == 0)
+                       spi_engine_tx_next(spi_engine);
+       }
+
+       return spi_engine->tx_length != 0;
+}
+
+static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
+{
+       void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
+       unsigned int n, m, i;
+       uint8_t *buf;
+
+       n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
+       while (n && spi_engine->rx_length) {
+               m = min(n, spi_engine->rx_length);
+               buf = spi_engine->rx_buf;
+               for (i = 0; i < m; i++)
+                       buf[i] = readl_relaxed(addr);
+               spi_engine->rx_buf += m;
+               spi_engine->rx_length -= m;
+               n -= m;
+               if (spi_engine->rx_length == 0)
+                       spi_engine_rx_next(spi_engine);
+       }
+
+       return spi_engine->rx_length != 0;
+}
+
+static irqreturn_t spi_engine_irq(int irq, void *devid)
+{
+       struct spi_master *master = devid;
+       struct spi_engine *spi_engine = spi_master_get_devdata(master);
+       unsigned int disable_int = 0;
+       unsigned int pending;
+
+       pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+
+       if (pending & SPI_ENGINE_INT_SYNC) {
+               writel_relaxed(SPI_ENGINE_INT_SYNC,
+                       spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+               spi_engine->completed_id = readl_relaxed(
+                       spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
+       }
+
+       spin_lock(&spi_engine->lock);
+
+       if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
+               if (!spi_engine_write_cmd_fifo(spi_engine))
+                       disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+       }
+
+       if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
+               if (!spi_engine_write_tx_fifo(spi_engine))
+                       disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+       }
+
+       if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
+               if (!spi_engine_read_rx_fifo(spi_engine))
+                       disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+       }
+
+       if (pending & SPI_ENGINE_INT_SYNC) {
+               if (spi_engine->msg &&
+                   spi_engine->completed_id == spi_engine->sync_id) {
+                       struct spi_message *msg = spi_engine->msg;
+
+                       kfree(spi_engine->p);
+                       msg->status = 0;
+                       msg->actual_length = msg->frame_length;
+                       spi_engine->msg = NULL;
+                       spi_finalize_current_message(master);
+                       disable_int |= SPI_ENGINE_INT_SYNC;
+               }
+       }
+
+       if (disable_int) {
+               spi_engine->int_enable &= ~disable_int;
+               writel_relaxed(spi_engine->int_enable,
+                       spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+       }
+
+       spin_unlock(&spi_engine->lock);
+
+       return IRQ_HANDLED;
+}
+
+static int spi_engine_transfer_one_message(struct spi_master *master,
+       struct spi_message *msg)
+{
+       struct spi_engine_program p_dry, *p;
+       struct spi_engine *spi_engine = spi_master_get_devdata(master);
+       unsigned int int_enable = 0;
+       unsigned long flags;
+       size_t size;
+
+       p_dry.length = 0;
+       spi_engine_compile_message(spi_engine, msg, true, &p_dry);
+
+       size = sizeof(*p->instructions) * (p_dry.length + 1);
+       p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
+       if (!p)
+               return -ENOMEM;
+       spi_engine_compile_message(spi_engine, msg, false, p);
+
+       spin_lock_irqsave(&spi_engine->lock, flags);
+       spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
+       spi_engine_program_add_cmd(p, false,
+               SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
+
+       spi_engine->msg = msg;
+       spi_engine->p = p;
+
+       spi_engine->cmd_buf = p->instructions;
+       spi_engine->cmd_length = p->length;
+       if (spi_engine_write_cmd_fifo(spi_engine))
+               int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
+
+       spi_engine_tx_next(spi_engine);
+       if (spi_engine_write_tx_fifo(spi_engine))
+               int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
+
+       spi_engine_rx_next(spi_engine);
+       if (spi_engine->rx_length != 0)
+               int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
+
+       int_enable |= SPI_ENGINE_INT_SYNC;
+
+       writel_relaxed(int_enable,
+               spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+       spi_engine->int_enable = int_enable;
+       spin_unlock_irqrestore(&spi_engine->lock, flags);
+
+       return 0;
+}
+
+static int spi_engine_probe(struct platform_device *pdev)
+{
+       struct spi_engine *spi_engine;
+       struct spi_master *master;
+       unsigned int version;
+       struct resource *res;
+       int irq;
+       int ret;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0)
+               return -ENXIO;
+
+       spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
+       if (!spi_engine)
+               return -ENOMEM;
+
+       master = spi_alloc_master(&pdev->dev, 0);
+       if (!master)
+               return -ENOMEM;
+
+       spi_master_set_devdata(master, spi_engine);
+
+       spin_lock_init(&spi_engine->lock);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       spi_engine->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(spi_engine->base)) {
+               ret = PTR_ERR(spi_engine->base);
+               goto err_put_master;
+       }
+
+       version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
+       if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
+               dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+                       SPI_ENGINE_VERSION_MAJOR(version),
+                       SPI_ENGINE_VERSION_MINOR(version),
+                       SPI_ENGINE_VERSION_PATCH(version));
+               return -ENODEV;
+       }
+
+       spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+       if (IS_ERR(spi_engine->clk)) {
+               ret = PTR_ERR(spi_engine->clk);
+               goto err_put_master;
+       }
+
+       spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
+       if (IS_ERR(spi_engine->ref_clk)) {
+               ret = PTR_ERR(spi_engine->ref_clk);
+               goto err_put_master;
+       }
+
+       ret = clk_prepare_enable(spi_engine->clk);
+       if (ret)
+               goto err_put_master;
+
+       ret = clk_prepare_enable(spi_engine->ref_clk);
+       if (ret)
+               goto err_clk_disable;
+
+       writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
+       writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+       writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+
+       ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
+       if (ret)
+               goto err_ref_clk_disable;
+
+       master->dev.parent = &pdev->dev;
+       master->dev.of_node = pdev->dev.of_node;
+       master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
+       master->bits_per_word_mask = SPI_BPW_MASK(8);
+       master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
+       master->transfer_one_message = spi_engine_transfer_one_message;
+       master->num_chipselect = 8;
+
+       ret = spi_register_master(master);
+       if (ret)
+               goto err_free_irq;
+
+       platform_set_drvdata(pdev, master);
+
+       return 0;
+err_free_irq:
+       free_irq(irq, master);
+err_ref_clk_disable:
+       clk_disable_unprepare(spi_engine->ref_clk);
+err_clk_disable:
+       clk_disable_unprepare(spi_engine->clk);
+err_put_master:
+       spi_master_put(master);
+       return ret;
+}
+
+static int spi_engine_remove(struct platform_device *pdev)
+{
+       struct spi_master *master = platform_get_drvdata(pdev);
+       struct spi_engine *spi_engine = spi_master_get_devdata(master);
+       int irq = platform_get_irq(pdev, 0);
+
+       spi_unregister_master(master);
+
+       free_irq(irq, master);
+
+       writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
+       writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
+       writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
+
+       clk_disable_unprepare(spi_engine->ref_clk);
+       clk_disable_unprepare(spi_engine->clk);
+
+       return 0;
+}
+
+static const struct of_device_id spi_engine_match_table[] = {
+       { .compatible = "adi,axi-spi-engine-1.00.a" },
+       { },
+};
+
+static struct platform_driver spi_engine_driver = {
+       .probe = spi_engine_probe,
+       .remove = spi_engine_remove,
+       .driver = {
+               .name = "spi-engine",
+               .of_match_table = spi_engine_match_table,
+       },
+};
+module_platform_driver(spi_engine_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
+MODULE_LICENSE("GPL");
index 7de6f8472a8100656884647ad5447be94604e7bb..d2f0067ff5eadae1aa9f0ee8ede3217ddf91c050 100644 (file)
@@ -73,8 +73,8 @@
 
 /* Bitfields in CNTL1 */
 #define BCM2835_AUX_SPI_CNTL1_CSHIGH   0x00000700
-#define BCM2835_AUX_SPI_CNTL1_IDLE     0x00000080
-#define BCM2835_AUX_SPI_CNTL1_TXEMPTY  0x00000040
+#define BCM2835_AUX_SPI_CNTL1_TXEMPTY  0x00000080
+#define BCM2835_AUX_SPI_CNTL1_IDLE     0x00000040
 #define BCM2835_AUX_SPI_CNTL1_MSBF_IN  0x00000002
 #define BCM2835_AUX_SPI_CNTL1_KEEP_IN  0x00000001
 
@@ -212,6 +212,12 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
                ret = IRQ_HANDLED;
        }
 
+       if (!bs->tx_len) {
+               /* disable tx fifo empty interrupt */
+               bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
+                       BCM2835_AUX_SPI_CNTL1_IDLE);
+       }
+
        /* and if rx_len is 0 then wake up completion and disable spi */
        if (!bs->rx_len) {
                bcm2835aux_spi_reset_hw(bs);
index 9185f6c08459842e68a6292dbae6b6884d53fcde..e31971f91475b1b3d9f1b2011e0b6e4e2ae4697b 100644 (file)
@@ -89,10 +89,10 @@ static void mid_spi_dma_exit(struct dw_spi *dws)
        if (!dws->dma_inited)
                return;
 
-       dmaengine_terminate_all(dws->txchan);
+       dmaengine_terminate_sync(dws->txchan);
        dma_release_channel(dws->txchan);
 
-       dmaengine_terminate_all(dws->rxchan);
+       dmaengine_terminate_sync(dws->rxchan);
        dma_release_channel(dws->rxchan);
 }
 
index a6d7029a85ac8655ce44423751f2ded3b63b8816..447497e9124c922a4e3ecc20af8ab7ee91062e99 100644 (file)
@@ -47,11 +47,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
 
        /* Get basic io resource and map it */
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!mem) {
-               dev_err(&pdev->dev, "no mem resource?\n");
-               return -EINVAL;
-       }
-
        dws->regs = devm_ioremap_resource(&pdev->dev, mem);
        if (IS_ERR(dws->regs)) {
                dev_err(&pdev->dev, "SPI region map failed\n");
index 7fd6a4c009d25d8ebfd3f739bd72220f1701320a..7cb0c1921495959dcb6c919c3cb9f1811a5e90bb 100644 (file)
@@ -84,7 +84,7 @@ struct fsl_espi_transfer {
 /* SPCOM register values */
 #define SPCOM_CS(x)            ((x) << 30)
 #define SPCOM_TRANLEN(x)       ((x) << 0)
-#define        SPCOM_TRANLEN_MAX       0xFFFF  /* Max transaction length */
+#define        SPCOM_TRANLEN_MAX       0x10000 /* Max transaction length */
 
 #define AUTOSUSPEND_TIMEOUT 2000
 
@@ -233,7 +233,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
        reinit_completion(&mpc8xxx_spi->done);
 
        /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
-       if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
+       if (t->len > SPCOM_TRANLEN_MAX) {
                dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
                                " beyond the SPCOM[TRANLEN] field\n", t->len);
                return -EINVAL;
index d98c33cb64f9b4825998eb1faa2316b1e4796a7d..6a4ff27f4357eb229815c18b535327487f2df580 100644 (file)
@@ -929,7 +929,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
                                        tx->sgl, tx->nents, DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
                if (!desc_tx)
-                       goto no_dma;
+                       goto tx_nodma;
 
                desc_tx->callback = spi_imx_dma_tx_callback;
                desc_tx->callback_param = (void *)spi_imx;
@@ -941,7 +941,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
                                        rx->sgl, rx->nents, DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
                if (!desc_rx)
-                       goto no_dma;
+                       goto rx_nodma;
 
                desc_rx->callback = spi_imx_dma_rx_callback;
                desc_rx->callback_param = (void *)spi_imx;
@@ -1008,7 +1008,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
 
        return ret;
 
-no_dma:
+rx_nodma:
+       dmaengine_terminate_all(master->dma_tx);
+tx_nodma:
        pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
                     dev_driver_string(&master->dev),
                     dev_name(&master->dev));
index 894616f687b0f314168bc17e77d14b7e477a4ec8..cf4bb36bee25c9bdf8ad39a570ceb61fda3a7421 100644 (file)
@@ -761,6 +761,7 @@ static int spi_test_run_iter(struct spi_device *spi,
                test.iterate_transfer_mask = 1;
 
        /* count number of transfers with tx/rx_buf != NULL */
+       rx_count = tx_count = 0;
        for (i = 0; i < test.transfer_count; i++) {
                if (test.transfers[i].tx_buf)
                        tx_count++;
index ab9914ad8365a6b1fb31f107dab21dac4ef94b17..0eb79368eabc1073496fb7ca0372ae94ad65e433 100644 (file)
@@ -65,8 +65,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
 #define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE  BIT(24)
 #define LPSS_CS_CONTROL_SW_MODE                        BIT(0)
 #define LPSS_CS_CONTROL_CS_HIGH                        BIT(1)
-#define LPSS_CS_CONTROL_CS_SEL_SHIFT           8
-#define LPSS_CS_CONTROL_CS_SEL_MASK            (3 << LPSS_CS_CONTROL_CS_SEL_SHIFT)
 #define LPSS_CAPS_CS_EN_SHIFT                  9
 #define LPSS_CAPS_CS_EN_MASK                   (0xf << LPSS_CAPS_CS_EN_SHIFT)
 
@@ -82,6 +80,10 @@ struct lpss_config {
        u32 rx_threshold;
        u32 tx_threshold_lo;
        u32 tx_threshold_hi;
+       /* Chip select control */
+       unsigned cs_sel_shift;
+       unsigned cs_sel_mask;
+       unsigned cs_num;
 };
 
 /* Keep these sorted with enum pxa_ssp_type */
@@ -106,6 +108,19 @@ static const struct lpss_config lpss_platforms[] = {
                .tx_threshold_lo = 160,
                .tx_threshold_hi = 224,
        },
+       {       /* LPSS_BSW_SSP */
+               .offset = 0x400,
+               .reg_general = 0x08,
+               .reg_ssp = 0x0c,
+               .reg_cs_ctrl = 0x18,
+               .reg_capabilities = -1,
+               .rx_threshold = 64,
+               .tx_threshold_lo = 160,
+               .tx_threshold_hi = 224,
+               .cs_sel_shift = 2,
+               .cs_sel_mask = 1 << 2,
+               .cs_num = 2,
+       },
        {       /* LPSS_SPT_SSP */
                .offset = 0x200,
                .reg_general = -1,
@@ -125,6 +140,8 @@ static const struct lpss_config lpss_platforms[] = {
                .rx_threshold = 1,
                .tx_threshold_lo = 16,
                .tx_threshold_hi = 48,
+               .cs_sel_shift = 8,
+               .cs_sel_mask = 3 << 8,
        },
 };
 
@@ -139,6 +156,7 @@ static bool is_lpss_ssp(const struct driver_data *drv_data)
        switch (drv_data->ssp_type) {
        case LPSS_LPT_SSP:
        case LPSS_BYT_SSP:
+       case LPSS_BSW_SSP:
        case LPSS_SPT_SSP:
        case LPSS_BXT_SSP:
                return true;
@@ -288,37 +306,50 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
        }
 }
 
+static void lpss_ssp_select_cs(struct driver_data *drv_data,
+                              const struct lpss_config *config)
+{
+       u32 value, cs;
+
+       if (!config->cs_sel_mask)
+               return;
+
+       value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
+
+       cs = drv_data->cur_msg->spi->chip_select;
+       cs <<= config->cs_sel_shift;
+       if (cs != (value & config->cs_sel_mask)) {
+               /*
+                * When switching another chip select output active the
+                * output must be selected first and wait 2 ssp_clk cycles
+                * before changing state to active. Otherwise a short
+                * glitch will occur on the previous chip select since
+                * output select is latched but state control is not.
+                */
+               value &= ~config->cs_sel_mask;
+               value |= cs;
+               __lpss_ssp_write_priv(drv_data,
+                                     config->reg_cs_ctrl, value);
+               ndelay(1000000000 /
+                      (drv_data->master->max_speed_hz / 2));
+       }
+}
+
 static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
 {
        const struct lpss_config *config;
-       u32 value, cs;
+       u32 value;
 
        config = lpss_get_config(drv_data);
 
+       if (enable)
+               lpss_ssp_select_cs(drv_data, config);
+
        value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
-       if (enable) {
-               cs = drv_data->cur_msg->spi->chip_select;
-               cs <<= LPSS_CS_CONTROL_CS_SEL_SHIFT;
-               if (cs != (value & LPSS_CS_CONTROL_CS_SEL_MASK)) {
-                       /*
-                        * When switching another chip select output active
-                        * the output must be selected first and wait 2 ssp_clk
-                        * cycles before changing state to active. Otherwise
-                        * a short glitch will occur on the previous chip
-                        * select since output select is latched but state
-                        * control is not.
-                        */
-                       value &= ~LPSS_CS_CONTROL_CS_SEL_MASK;
-                       value |= cs;
-                       __lpss_ssp_write_priv(drv_data,
-                                             config->reg_cs_ctrl, value);
-                       ndelay(1000000000 /
-                              (drv_data->master->max_speed_hz / 2));
-               }
+       if (enable)
                value &= ~LPSS_CS_CONTROL_CS_HIGH;
-       } else {
+       else
                value |= LPSS_CS_CONTROL_CS_HIGH;
-       }
        __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
 }
 
@@ -496,6 +527,7 @@ static void giveback(struct driver_data *drv_data)
 {
        struct spi_transfer* last_transfer;
        struct spi_message *msg;
+       unsigned long timeout;
 
        msg = drv_data->cur_msg;
        drv_data->cur_msg = NULL;
@@ -508,6 +540,12 @@ static void giveback(struct driver_data *drv_data)
        if (last_transfer->delay_usecs)
                udelay(last_transfer->delay_usecs);
 
+       /* Wait until SSP becomes idle before deasserting the CS */
+       timeout = jiffies + msecs_to_jiffies(10);
+       while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY &&
+              !time_after(jiffies, timeout))
+               cpu_relax();
+
        /* Drop chip select UNLESS cs_change is true or we are returning
         * a message with an error, or next message is for another chip
         */
@@ -572,7 +610,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
 
 static void int_transfer_complete(struct driver_data *drv_data)
 {
-       /* Stop SSP */
+       /* Clear and disable interrupts */
        write_SSSR_CS(drv_data, drv_data->clear_sr);
        reset_sccr1(drv_data);
        if (!pxa25x_ssp_comp(drv_data))
@@ -1001,19 +1039,6 @@ static void pump_transfers(unsigned long data)
                                             "pump_transfers: DMA burst size reduced to match bits_per_word\n");
        }
 
-       /* NOTE:  PXA25x_SSP _could_ use external clocking ... */
-       cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
-       if (!pxa25x_ssp_comp(drv_data))
-               dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
-                       drv_data->master->max_speed_hz
-                               / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
-                       chip->enable_dma ? "DMA" : "PIO");
-       else
-               dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
-                       drv_data->master->max_speed_hz / 2
-                               / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
-                       chip->enable_dma ? "DMA" : "PIO");
-
        message->state = RUNNING_STATE;
 
        drv_data->dma_mapped = 0;
@@ -1040,6 +1065,19 @@ static void pump_transfers(unsigned long data)
                write_SSSR_CS(drv_data, drv_data->clear_sr);
        }
 
+       /* NOTE:  PXA25x_SSP _could_ use external clocking ... */
+       cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
+       if (!pxa25x_ssp_comp(drv_data))
+               dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+                       drv_data->master->max_speed_hz
+                               / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
+                       drv_data->dma_mapped ? "DMA" : "PIO");
+       else
+               dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
+                       drv_data->master->max_speed_hz / 2
+                               / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
+                       drv_data->dma_mapped ? "DMA" : "PIO");
+
        if (is_lpss_ssp(drv_data)) {
                if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
                    != chip->lpss_rx_threshold)
@@ -1166,6 +1204,7 @@ static int setup(struct spi_device *spi)
                break;
        case LPSS_LPT_SSP:
        case LPSS_BYT_SSP:
+       case LPSS_BSW_SSP:
        case LPSS_SPT_SSP:
        case LPSS_BXT_SSP:
                config = lpss_get_config(drv_data);
@@ -1313,7 +1352,7 @@ static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
        { "INT3430", LPSS_LPT_SSP },
        { "INT3431", LPSS_LPT_SSP },
        { "80860F0E", LPSS_BYT_SSP },
-       { "8086228E", LPSS_BYT_SSP },
+       { "8086228E", LPSS_BSW_SSP },
        { },
 };
 MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
@@ -1438,6 +1477,29 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
 }
 #endif
 
+static int pxa2xx_spi_fw_translate_cs(struct spi_master *master, unsigned cs)
+{
+       struct driver_data *drv_data = spi_master_get_devdata(master);
+
+       if (has_acpi_companion(&drv_data->pdev->dev)) {
+               switch (drv_data->ssp_type) {
+               /*
+                * For Atoms the ACPI DeviceSelection used by the Windows
+                * driver starts from 1 instead of 0 so translate it here
+                * to match what Linux expects.
+                */
+               case LPSS_BYT_SSP:
+               case LPSS_BSW_SSP:
+                       return cs - 1;
+
+               default:
+                       break;
+               }
+       }
+
+       return cs;
+}
+
 static int pxa2xx_spi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -1490,6 +1552,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
        master->setup = setup;
        master->transfer_one_message = pxa2xx_spi_transfer_one_message;
        master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
+       master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
        master->auto_runtime_pm = true;
 
        drv_data->ssp_type = ssp->type;
@@ -1576,6 +1639,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
                        tmp &= LPSS_CAPS_CS_EN_MASK;
                        tmp >>= LPSS_CAPS_CS_EN_SHIFT;
                        platform_info->num_chipselect = ffz(tmp);
+               } else if (config->cs_num) {
+                       platform_info->num_chipselect = config->cs_num;
                }
        }
        master->num_chipselect = platform_info->num_chipselect;
index 58efa98313aa0bf5c2aca5d8458032fac566a0eb..8e781c8b4374907bac18d0fc80744c736157f39c 100644 (file)
@@ -147,20 +147,9 @@ static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
 extern int pxa2xx_spi_flush(struct driver_data *drv_data);
 extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
 
-/*
- * Select the right DMA implementation.
- */
-#if defined(CONFIG_SPI_PXA2XX_DMA)
-#define SPI_PXA2XX_USE_DMA     1
 #define MAX_DMA_LEN            SZ_64K
 #define DEFAULT_DMA_CR1                (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
-#else
-#undef SPI_PXA2XX_USE_DMA
-#define MAX_DMA_LEN            0
-#define DEFAULT_DMA_CR1                0
-#endif
 
-#ifdef SPI_PXA2XX_USE_DMA
 extern bool pxa2xx_spi_dma_is_possible(size_t len);
 extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
 extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
@@ -173,29 +162,5 @@ extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
                                                  u8 bits_per_word,
                                                  u32 *burst_code,
                                                  u32 *threshold);
-#else
-static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
-static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
-{
-       return 0;
-}
-#define pxa2xx_spi_dma_transfer NULL
-static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
-                                         u32 dma_burst) {}
-static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
-static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
-{
-       return 0;
-}
-static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
-static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
-                                                        struct spi_device *spi,
-                                                        u8 bits_per_word,
-                                                        u32 *burst_code,
-                                                        u32 *threshold)
-{
-       return -ENODEV;
-}
-#endif
 
 #endif /* SPI_PXA2XX_H */
index 79a8bc4f6cec9e32ec9c78627a2e68fc85ab5e12..aa9561f586ab9a047410652f811ad4ebdffbc7ca 100644 (file)
@@ -199,6 +199,7 @@ struct rockchip_spi {
        struct sg_table rx_sg;
        struct rockchip_spi_dma_data dma_rx;
        struct rockchip_spi_dma_data dma_tx;
+       struct dma_slave_caps dma_caps;
 };
 
 static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
@@ -449,7 +450,10 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
                rxconf.direction = rs->dma_rx.direction;
                rxconf.src_addr = rs->dma_rx.addr;
                rxconf.src_addr_width = rs->n_bytes;
-               rxconf.src_maxburst = rs->n_bytes;
+               if (rs->dma_caps.max_burst > 4)
+                       rxconf.src_maxburst = 4;
+               else
+                       rxconf.src_maxburst = 1;
                dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
 
                rxdesc = dmaengine_prep_slave_sg(
@@ -466,7 +470,10 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
                txconf.direction = rs->dma_tx.direction;
                txconf.dst_addr = rs->dma_tx.addr;
                txconf.dst_addr_width = rs->n_bytes;
-               txconf.dst_maxburst = rs->n_bytes;
+               if (rs->dma_caps.max_burst > 4)
+                       txconf.dst_maxburst = 4;
+               else
+                       txconf.dst_maxburst = 1;
                dmaengine_slave_config(rs->dma_tx.ch, &txconf);
 
                txdesc = dmaengine_prep_slave_sg(
@@ -730,6 +737,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
        }
 
        if (rs->dma_tx.ch && rs->dma_rx.ch) {
+               dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
                rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
                rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
                rs->dma_tx.direction = DMA_MEM_TO_DEV;
index 64318fcfacf275c989ebe19d157eb8ff9e523ebf..eac3c960b2decb8c4aa73f73894b16ac3021beba 100644 (file)
@@ -31,6 +31,8 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
 
 #include <linux/spi/spi.h>
 
@@ -44,8 +46,9 @@ struct ti_qspi {
 
        struct spi_master       *master;
        void __iomem            *base;
-       void __iomem            *ctrl_base;
        void __iomem            *mmap_base;
+       struct regmap           *ctrl_base;
+       unsigned int            ctrl_reg;
        struct clk              *fclk;
        struct device           *dev;
 
@@ -55,7 +58,7 @@ struct ti_qspi {
        u32 cmd;
        u32 dc;
 
-       bool ctrl_mod;
+       bool mmap_enabled;
 };
 
 #define QSPI_PID                       (0x0)
@@ -65,11 +68,8 @@ struct ti_qspi {
 #define QSPI_SPI_CMD_REG               (0x48)
 #define QSPI_SPI_STATUS_REG            (0x4c)
 #define QSPI_SPI_DATA_REG              (0x50)
-#define QSPI_SPI_SETUP0_REG            (0x54)
+#define QSPI_SPI_SETUP_REG(n)          ((0x54 + 4 * n))
 #define QSPI_SPI_SWITCH_REG            (0x64)
-#define QSPI_SPI_SETUP1_REG            (0x58)
-#define QSPI_SPI_SETUP2_REG            (0x5c)
-#define QSPI_SPI_SETUP3_REG            (0x60)
 #define QSPI_SPI_DATA_REG_1            (0x68)
 #define QSPI_SPI_DATA_REG_2            (0x6c)
 #define QSPI_SPI_DATA_REG_3            (0x70)
@@ -109,6 +109,17 @@ struct ti_qspi {
 
 #define QSPI_AUTOSUSPEND_TIMEOUT         2000
 
+#define MEM_CS_EN(n)                   ((n + 1) << 8)
+#define MEM_CS_MASK                    (7 << 8)
+
+#define MM_SWITCH                      0x1
+
+#define QSPI_SETUP_RD_NORMAL           (0x0 << 12)
+#define QSPI_SETUP_RD_DUAL             (0x1 << 12)
+#define QSPI_SETUP_RD_QUAD             (0x3 << 12)
+#define QSPI_SETUP_ADDR_SHIFT          8
+#define QSPI_SETUP_DUMMY_SHIFT         10
+
 static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
                unsigned long reg)
 {
@@ -366,6 +377,72 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        return 0;
 }
 
+static void ti_qspi_enable_memory_map(struct spi_device *spi)
+{
+       struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
+
+       ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
+       if (qspi->ctrl_base) {
+               regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
+                                  MEM_CS_EN(spi->chip_select),
+                                  MEM_CS_MASK);
+       }
+       qspi->mmap_enabled = true;
+}
+
+static void ti_qspi_disable_memory_map(struct spi_device *spi)
+{
+       struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
+
+       ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
+       if (qspi->ctrl_base)
+               regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
+                                  0, MEM_CS_MASK);
+       qspi->mmap_enabled = false;
+}
+
+static void ti_qspi_setup_mmap_read(struct spi_device *spi,
+                                   struct spi_flash_read_message *msg)
+{
+       struct ti_qspi  *qspi = spi_master_get_devdata(spi->master);
+       u32 memval = msg->read_opcode;
+
+       switch (msg->data_nbits) {
+       case SPI_NBITS_QUAD:
+               memval |= QSPI_SETUP_RD_QUAD;
+               break;
+       case SPI_NBITS_DUAL:
+               memval |= QSPI_SETUP_RD_DUAL;
+               break;
+       default:
+               memval |= QSPI_SETUP_RD_NORMAL;
+               break;
+       }
+       memval |= ((msg->addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
+                  msg->dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
+       ti_qspi_write(qspi, memval,
+                     QSPI_SPI_SETUP_REG(spi->chip_select));
+}
+
+static int ti_qspi_spi_flash_read(struct  spi_device *spi,
+                                 struct spi_flash_read_message *msg)
+{
+       struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+       int ret = 0;
+
+       mutex_lock(&qspi->list_lock);
+
+       if (!qspi->mmap_enabled)
+               ti_qspi_enable_memory_map(spi);
+       ti_qspi_setup_mmap_read(spi, msg);
+       memcpy_fromio(msg->buf, qspi->mmap_base + msg->from, msg->len);
+       msg->retlen = msg->len;
+
+       mutex_unlock(&qspi->list_lock);
+
+       return ret;
+}
+
 static int ti_qspi_start_transfer_one(struct spi_master *master,
                struct spi_message *m)
 {
@@ -398,6 +475,9 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
 
        mutex_lock(&qspi->list_lock);
 
+       if (qspi->mmap_enabled)
+               ti_qspi_disable_memory_map(spi);
+
        list_for_each_entry(t, &m->transfers, transfer_list) {
                qspi->cmd |= QSPI_WLEN(t->bits_per_word);
 
@@ -441,7 +521,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
 {
        struct  ti_qspi *qspi;
        struct spi_master *master;
-       struct resource         *r, *res_ctrl, *res_mmap;
+       struct resource         *r, *res_mmap;
        struct device_node *np = pdev->dev.of_node;
        u32 max_freq;
        int ret = 0, num_cs, irq;
@@ -487,16 +567,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
                }
        }
 
-       res_ctrl = platform_get_resource_byname(pdev,
-                       IORESOURCE_MEM, "qspi_ctrlmod");
-       if (res_ctrl == NULL) {
-               res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-               if (res_ctrl == NULL) {
-                       dev_dbg(&pdev->dev,
-                               "control module resources not required\n");
-               }
-       }
-
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
                dev_err(&pdev->dev, "no irq resource?\n");
@@ -511,20 +581,31 @@ static int ti_qspi_probe(struct platform_device *pdev)
                goto free_master;
        }
 
-       if (res_ctrl) {
-               qspi->ctrl_mod = true;
-               qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl);
-               if (IS_ERR(qspi->ctrl_base)) {
-                       ret = PTR_ERR(qspi->ctrl_base);
-                       goto free_master;
-               }
-       }
-
        if (res_mmap) {
-               qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
+               qspi->mmap_base = devm_ioremap_resource(&pdev->dev,
+                                                       res_mmap);
+               master->spi_flash_read = ti_qspi_spi_flash_read;
                if (IS_ERR(qspi->mmap_base)) {
-                       ret = PTR_ERR(qspi->mmap_base);
-                       goto free_master;
+                       dev_err(&pdev->dev,
+                               "falling back to PIO mode\n");
+                       master->spi_flash_read = NULL;
+               }
+       }
+       qspi->mmap_enabled = false;
+
+       if (of_property_read_bool(np, "syscon-chipselects")) {
+               qspi->ctrl_base =
+               syscon_regmap_lookup_by_phandle(np,
+                                               "syscon-chipselects");
+               if (IS_ERR(qspi->ctrl_base))
+                       return PTR_ERR(qspi->ctrl_base);
+               ret = of_property_read_u32_index(np,
+                                                "syscon-chipselects",
+                                                1, &qspi->ctrl_reg);
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "couldn't get ctrl_mod reg index\n");
+                       return ret;
                }
        }
 
index 47eff8012a7715c362eb3ac3e706f8a9ea6e71c2..1413a6b027e651d4850b538535bfc793bee7a589 100644 (file)
@@ -144,6 +144,8 @@ SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
 SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
 SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
 
+SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
+
 static struct attribute *spi_dev_attrs[] = {
        &dev_attr_modalias.attr,
        NULL,
@@ -181,6 +183,7 @@ static struct attribute *spi_device_statistics_attrs[] = {
        &dev_attr_spi_device_transfer_bytes_histo14.attr,
        &dev_attr_spi_device_transfer_bytes_histo15.attr,
        &dev_attr_spi_device_transfer_bytes_histo16.attr,
+       &dev_attr_spi_device_transfers_split_maxsize.attr,
        NULL,
 };
 
@@ -223,6 +226,7 @@ static struct attribute *spi_master_statistics_attrs[] = {
        &dev_attr_spi_master_transfer_bytes_histo14.attr,
        &dev_attr_spi_master_transfer_bytes_histo15.attr,
        &dev_attr_spi_master_transfer_bytes_histo16.attr,
+       &dev_attr_spi_master_transfers_split_maxsize.attr,
        NULL,
 };
 
@@ -1024,6 +1028,8 @@ out:
        if (msg->status && master->handle_err)
                master->handle_err(master, msg);
 
+       spi_res_release(master, msg);
+
        spi_finalize_current_message(master);
 
        return ret;
@@ -1152,6 +1158,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
                }
        }
 
+       mutex_lock(&master->bus_lock_mutex);
        trace_spi_message_start(master->cur_msg);
 
        if (master->prepare_message) {
@@ -1161,6 +1168,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
                                "failed to prepare message: %d\n", ret);
                        master->cur_msg->status = ret;
                        spi_finalize_current_message(master);
+                       mutex_unlock(&master->bus_lock_mutex);
                        return;
                }
                master->cur_msg_prepared = true;
@@ -1170,6 +1178,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
        if (ret) {
                master->cur_msg->status = ret;
                spi_finalize_current_message(master);
+               mutex_unlock(&master->bus_lock_mutex);
                return;
        }
 
@@ -1177,8 +1186,10 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
        if (ret) {
                dev_err(&master->dev,
                        "failed to transfer one message from queue\n");
+               mutex_unlock(&master->bus_lock_mutex);
                return;
        }
+       mutex_unlock(&master->bus_lock_mutex);
 }
 
 /**
@@ -1581,13 +1592,30 @@ static void of_register_spi_devices(struct spi_master *master) { }
 static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
 {
        struct spi_device *spi = data;
+       struct spi_master *master = spi->master;
 
        if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
                struct acpi_resource_spi_serialbus *sb;
 
                sb = &ares->data.spi_serial_bus;
                if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
-                       spi->chip_select = sb->device_selection;
+                       /*
+                        * ACPI DeviceSelection numbering is handled by the
+                        * host controller driver in Windows and can vary
+                        * from driver to driver. In Linux we always expect
+                        * 0 .. max - 1 so we need to ask the driver to
+                        * translate between the two schemes.
+                        */
+                       if (master->fw_translate_cs) {
+                               int cs = master->fw_translate_cs(master,
+                                               sb->device_selection);
+                               if (cs < 0)
+                                       return cs;
+                               spi->chip_select = cs;
+                       } else {
+                               spi->chip_select = sb->device_selection;
+                       }
+
                        spi->max_speed_hz = sb->connection_speed;
 
                        if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
@@ -2013,6 +2041,334 @@ struct spi_master *spi_busnum_to_master(u16 bus_num)
 }
 EXPORT_SYMBOL_GPL(spi_busnum_to_master);
 
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for SPI resource management */
+
+/**
+ * spi_res_alloc - allocate a spi resource that is life-cycle managed
+ *                 during the processing of a spi_message while using
+ *                 spi_transfer_one
+ * @spi:     the spi device for which we allocate memory
+ * @release: the release code to execute for this resource
+ * @size:    size to alloc and return
+ * @gfp:     GFP allocation flags
+ *
+ * Return: the pointer to the allocated data
+ *
+ * This may get enhanced in the future to allocate from a memory pool
+ * of the @spi_device or @spi_master to avoid repeated allocations.
+ */
+void *spi_res_alloc(struct spi_device *spi,
+                   spi_res_release_t release,
+                   size_t size, gfp_t gfp)
+{
+       struct spi_res *sres;
+
+       sres = kzalloc(sizeof(*sres) + size, gfp);
+       if (!sres)
+               return NULL;
+
+       INIT_LIST_HEAD(&sres->entry);
+       sres->release = release;
+
+       return sres->data;
+}
+EXPORT_SYMBOL_GPL(spi_res_alloc);
+
+/**
+ * spi_res_free - free an spi resource
+ * @res: pointer to the custom data of a resource
+ *
+ */
+void spi_res_free(void *res)
+{
+       struct spi_res *sres = container_of(res, struct spi_res, data);
+
+       if (!res)
+               return;
+
+       WARN_ON(!list_empty(&sres->entry));
+       kfree(sres);
+}
+EXPORT_SYMBOL_GPL(spi_res_free);
+
+/**
+ * spi_res_add - add a spi_res to the spi_message
+ * @message: the spi message
+ * @res:     the spi_resource
+ */
+void spi_res_add(struct spi_message *message, void *res)
+{
+       struct spi_res *sres = container_of(res, struct spi_res, data);
+
+       WARN_ON(!list_empty(&sres->entry));
+       list_add_tail(&sres->entry, &message->resources);
+}
+EXPORT_SYMBOL_GPL(spi_res_add);
+
+/**
+ * spi_res_release - release all spi resources for this message
+ * @master:  the @spi_master
+ * @message: the @spi_message
+ */
+void spi_res_release(struct spi_master *master,
+                    struct spi_message *message)
+{
+       struct spi_res *res;
+
+       while (!list_empty(&message->resources)) {
+               res = list_last_entry(&message->resources,
+                                     struct spi_res, entry);
+
+               if (res->release)
+                       res->release(master, message, res->data);
+
+               list_del(&res->entry);
+
+               kfree(res);
+       }
+}
+EXPORT_SYMBOL_GPL(spi_res_release);
+
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for spi_message alterations */
+
+static void __spi_replace_transfers_release(struct spi_master *master,
+                                           struct spi_message *msg,
+                                           void *res)
+{
+       struct spi_replaced_transfers *rxfer = res;
+       size_t i;
+
+       /* call extra callback if requested */
+       if (rxfer->release)
+               rxfer->release(master, msg, res);
+
+       /* insert replaced transfers back into the message */
+       list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
+
+       /* remove the formerly inserted entries */
+       for (i = 0; i < rxfer->inserted; i++)
+               list_del(&rxfer->inserted_transfers[i].transfer_list);
+}
+
+/**
+ * spi_replace_transfers - replace transfers with several transfers
+ *                         and register change with spi_message.resources
+ * @msg:           the spi_message we work upon
+ * @xfer_first:    the first spi_transfer we want to replace
+ * @remove:        number of transfers to remove
+ * @insert:        the number of transfers we want to insert instead
+ * @release:       extra release code necessary in some circumstances
+ * @extradatasize: extra data to allocate (with alignment guarantees
+ *                 of struct @spi_transfer)
+ *
+ * Returns: pointer to @spi_replaced_transfers,
+ *          PTR_ERR(...) in case of errors.
+ */
+struct spi_replaced_transfers *spi_replace_transfers(
+       struct spi_message *msg,
+       struct spi_transfer *xfer_first,
+       size_t remove,
+       size_t insert,
+       spi_replaced_release_t release,
+       size_t extradatasize,
+       gfp_t gfp)
+{
+       struct spi_replaced_transfers *rxfer;
+       struct spi_transfer *xfer;
+       size_t i;
+
+       /* allocate the structure using spi_res */
+       rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
+                             insert * sizeof(struct spi_transfer)
+                             + sizeof(struct spi_replaced_transfers)
+                             + extradatasize,
+                             gfp);
+       if (!rxfer)
+               return ERR_PTR(-ENOMEM);
+
+       /* the release code to invoke before running the generic release */
+       rxfer->release = release;
+
+       /* assign extradata */
+       if (extradatasize)
+               rxfer->extradata =
+                       &rxfer->inserted_transfers[insert];
+
+       /* init the replaced_transfers list */
+       INIT_LIST_HEAD(&rxfer->replaced_transfers);
+
+       /* assign the list_entry after which we should reinsert
+        * the @replaced_transfers - it may be spi_message.messages!
+        */
+       rxfer->replaced_after = xfer_first->transfer_list.prev;
+
+       /* remove the requested number of transfers */
+       for (i = 0; i < remove; i++) {
+               /* if the entry after replaced_after it is msg->transfers
+                * then we have been requested to remove more transfers
+                * than are in the list
+                */
+               if (rxfer->replaced_after->next == &msg->transfers) {
+                       dev_err(&msg->spi->dev,
+                               "requested to remove more spi_transfers than are available\n");
+                       /* insert replaced transfers back into the message */
+                       list_splice(&rxfer->replaced_transfers,
+                                   rxfer->replaced_after);
+
+                       /* free the spi_replace_transfer structure */
+                       spi_res_free(rxfer);
+
+                       /* and return with an error */
+                       return ERR_PTR(-EINVAL);
+               }
+
+               /* remove the entry after replaced_after from list of
+                * transfers and add it to list of replaced_transfers
+                */
+               list_move_tail(rxfer->replaced_after->next,
+                              &rxfer->replaced_transfers);
+       }
+
+       /* create copy of the given xfer with identical settings
+        * based on the first transfer to get removed
+        */
+       for (i = 0; i < insert; i++) {
+               /* we need to run in reverse order */
+               xfer = &rxfer->inserted_transfers[insert - 1 - i];
+
+               /* copy all spi_transfer data */
+               memcpy(xfer, xfer_first, sizeof(*xfer));
+
+               /* add to list */
+               list_add(&xfer->transfer_list, rxfer->replaced_after);
+
+               /* clear cs_change and delay_usecs for all but the last */
+               if (i) {
+                       xfer->cs_change = false;
+                       xfer->delay_usecs = 0;
+               }
+       }
+
+       /* set up inserted */
+       rxfer->inserted = insert;
+
+       /* and register it with spi_res/spi_message */
+       spi_res_add(msg, rxfer);
+
+       return rxfer;
+}
+EXPORT_SYMBOL_GPL(spi_replace_transfers);
+
+int __spi_split_transfer_maxsize(struct spi_master *master,
+                                struct spi_message *msg,
+                                struct spi_transfer **xferp,
+                                size_t maxsize,
+                                gfp_t gfp)
+{
+       struct spi_transfer *xfer = *xferp, *xfers;
+       struct spi_replaced_transfers *srt;
+       size_t offset;
+       size_t count, i;
+
+       /* warn once about this fact that we are splitting a transfer */
+       dev_warn_once(&msg->spi->dev,
+                     "spi_transfer of length %i exceed max length of %i - needed to split transfers\n",
+                     xfer->len, maxsize);
+
+       /* calculate how many we have to replace */
+       count = DIV_ROUND_UP(xfer->len, maxsize);
+
+       /* create replacement */
+       srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
+       if (!srt)
+               return -ENOMEM;
+       xfers = srt->inserted_transfers;
+
+       /* now handle each of those newly inserted spi_transfers
+        * note that the replacements spi_transfers all are preset
+        * to the same values as *xferp, so tx_buf, rx_buf and len
+        * are all identical (as well as most others)
+        * so we just have to fix up len and the pointers.
+        *
+        * this also includes support for the depreciated
+        * spi_message.is_dma_mapped interface
+        */
+
+       /* the first transfer just needs the length modified, so we
+        * run it outside the loop
+        */
+       xfers[0].len = min(maxsize, xfer[0].len);
+
+       /* all the others need rx_buf/tx_buf also set */
+       for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
+               /* update rx_buf, tx_buf and dma */
+               if (xfers[i].rx_buf)
+                       xfers[i].rx_buf += offset;
+               if (xfers[i].rx_dma)
+                       xfers[i].rx_dma += offset;
+               if (xfers[i].tx_buf)
+                       xfers[i].tx_buf += offset;
+               if (xfers[i].tx_dma)
+                       xfers[i].tx_dma += offset;
+
+               /* update length */
+               xfers[i].len = min(maxsize, xfers[i].len - offset);
+       }
+
+       /* we set up xferp to the last entry we have inserted,
+        * so that we skip those already split transfers
+        */
+       *xferp = &xfers[count - 1];
+
+       /* increment statistics counters */
+       SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
+                                      transfers_split_maxsize);
+       SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
+                                      transfers_split_maxsize);
+
+       return 0;
+}
+
+/**
+ * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
+ *                              when an individual transfer exceeds a
+ *                              certain size
+ * @master:    the @spi_master for this transfer
+ * @message:   the @spi_message to transform
+ * @max_size:  the maximum when to apply this
+ *
+ * Return: status of transformation
+ */
+int spi_split_transfers_maxsize(struct spi_master *master,
+                               struct spi_message *msg,
+                               size_t maxsize,
+                               gfp_t gfp)
+{
+       struct spi_transfer *xfer;
+       int ret;
+
+       /* iterate over the transfer_list,
+        * but note that xfer is advanced to the last transfer inserted
+        * to avoid checking sizes again unnecessarily (also xfer does
+        * potentiall belong to a different list by the time the
+        * replacement has happened
+        */
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               if (xfer->len > maxsize) {
+                       ret = __spi_split_transfer_maxsize(
+                               master, msg, &xfer, maxsize, gfp);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
 
 /*-------------------------------------------------------------------------*/
 
@@ -2351,6 +2707,46 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
 EXPORT_SYMBOL_GPL(spi_async_locked);
 
 
+int spi_flash_read(struct spi_device *spi,
+                  struct spi_flash_read_message *msg)
+
+{
+       struct spi_master *master = spi->master;
+       int ret;
+
+       if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
+            msg->addr_nbits == SPI_NBITS_DUAL) &&
+           !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+               return -EINVAL;
+       if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
+            msg->addr_nbits == SPI_NBITS_QUAD) &&
+           !(spi->mode & SPI_TX_QUAD))
+               return -EINVAL;
+       if (msg->data_nbits == SPI_NBITS_DUAL &&
+           !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+               return -EINVAL;
+       if (msg->data_nbits == SPI_NBITS_QUAD &&
+           !(spi->mode &  SPI_RX_QUAD))
+               return -EINVAL;
+
+       if (master->auto_runtime_pm) {
+               ret = pm_runtime_get_sync(master->dev.parent);
+               if (ret < 0) {
+                       dev_err(&master->dev, "Failed to power device: %d\n",
+                               ret);
+                       return ret;
+               }
+       }
+       mutex_lock(&master->bus_lock_mutex);
+       ret = master->spi_flash_read(spi, msg);
+       mutex_unlock(&master->bus_lock_mutex);
+       if (master->auto_runtime_pm)
+               pm_runtime_put(master->dev.parent);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(spi_flash_read);
+
 /*-------------------------------------------------------------------------*/
 
 /* Utility methods for SPI master protocol drivers, layered on
index be822f7a9ce6262442ce3bad9426ad6ff2fb04e9..aca282d454213addcad7169c2a8c1732a41e195e 100644 (file)
@@ -10,6 +10,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+#include <linux/bitmap.h>
 #include <linux/delay.h>
 #include <linux/err.h>
 #include <linux/interrupt.h>
@@ -47,9 +48,9 @@
 #define SPMI_MAPPING_BIT_IS_1_FLAG(X)  (((X) >> 8) & 0x1)
 #define SPMI_MAPPING_BIT_IS_1_RESULT(X)        (((X) >> 0) & 0xFF)
 
-#define SPMI_MAPPING_TABLE_LEN         255
 #define SPMI_MAPPING_TABLE_TREE_DEPTH  16      /* Maximum of 16-bits */
-#define PPID_TO_CHAN_TABLE_SZ          BIT(12) /* PPID is 12bit chan is 1byte*/
+#define PMIC_ARB_MAX_PPID              BIT(12) /* PPID is 12bit */
+#define PMIC_ARB_CHAN_VALID            BIT(15)
 
 /* Ownership Table */
 #define SPMI_OWNERSHIP_TABLE_REG(N)    (0x0700 + (4 * (N)))
@@ -85,9 +86,7 @@ enum pmic_arb_cmd_op_code {
 };
 
 /* Maximum number of support PMIC peripherals */
-#define PMIC_ARB_MAX_PERIPHS           256
-#define PMIC_ARB_MAX_CHNL              128
-#define PMIC_ARB_PERIPH_ID_VALID       (1 << 15)
+#define PMIC_ARB_MAX_PERIPHS           512
 #define PMIC_ARB_TIMEOUT_US            100
 #define PMIC_ARB_MAX_TRANS_BYTES       (8)
 
@@ -125,18 +124,22 @@ struct spmi_pmic_arb_dev {
        void __iomem            *wr_base;
        void __iomem            *intr;
        void __iomem            *cnfg;
+       void __iomem            *core;
+       resource_size_t         core_size;
        raw_spinlock_t          lock;
        u8                      channel;
        int                     irq;
        u8                      ee;
-       u8                      min_apid;
-       u8                      max_apid;
-       u32                     mapping_table[SPMI_MAPPING_TABLE_LEN];
+       u16                     min_apid;
+       u16                     max_apid;
+       u32                     *mapping_table;
+       DECLARE_BITMAP(mapping_table_valid, PMIC_ARB_MAX_PERIPHS);
        struct irq_domain       *domain;
        struct spmi_controller  *spmic;
-       u16                     apid_to_ppid[256];
+       u16                     *apid_to_ppid;
        const struct pmic_arb_ver_ops *ver_ops;
-       u8                      *ppid_to_chan;
+       u16                     *ppid_to_chan;
+       u16                     last_channel;
 };
 
 /**
@@ -158,7 +161,8 @@ struct spmi_pmic_arb_dev {
  */
 struct pmic_arb_ver_ops {
        /* spmi commands (read_cmd, write_cmd, cmd) functionality */
-       u32 (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr);
+       int (*offset)(struct spmi_pmic_arb_dev *dev, u8 sid, u16 addr,
+                     u32 *offset);
        u32 (*fmt_cmd)(u8 opc, u8 sid, u16 addr, u8 bc);
        int (*non_data_cmd)(struct spmi_controller *ctrl, u8 opc, u8 sid);
        /* Interrupts controller functionality (offset of PIC registers) */
@@ -212,7 +216,14 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
        struct spmi_pmic_arb_dev *dev = spmi_controller_get_drvdata(ctrl);
        u32 status = 0;
        u32 timeout = PMIC_ARB_TIMEOUT_US;
-       u32 offset = dev->ver_ops->offset(dev, sid, addr) + PMIC_ARB_STATUS;
+       u32 offset;
+       int rc;
+
+       rc = dev->ver_ops->offset(dev, sid, addr, &offset);
+       if (rc)
+               return rc;
+
+       offset += PMIC_ARB_STATUS;
 
        while (timeout--) {
                status = readl_relaxed(base + offset);
@@ -257,7 +268,11 @@ pmic_arb_non_data_cmd_v1(struct spmi_controller *ctrl, u8 opc, u8 sid)
        unsigned long flags;
        u32 cmd;
        int rc;
-       u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, 0);
+       u32 offset;
+
+       rc = pmic_arb->ver_ops->offset(pmic_arb, sid, 0, &offset);
+       if (rc)
+               return rc;
 
        cmd = ((opc | 0x40) << 27) | ((sid & 0xf) << 20);
 
@@ -297,7 +312,11 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        u8 bc = len - 1;
        u32 cmd;
        int rc;
-       u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+       u32 offset;
+
+       rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+       if (rc)
+               return rc;
 
        if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
                dev_err(&ctrl->dev,
@@ -344,7 +363,11 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
        u8 bc = len - 1;
        u32 cmd;
        int rc;
-       u32 offset = pmic_arb->ver_ops->offset(pmic_arb, sid, addr);
+       u32 offset;
+
+       rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr, &offset);
+       if (rc)
+               return rc;
 
        if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
                dev_err(&ctrl->dev,
@@ -614,6 +637,10 @@ static int search_mapping_table(struct spmi_pmic_arb_dev *pa,
        u32 data;
 
        for (i = 0; i < SPMI_MAPPING_TABLE_TREE_DEPTH; ++i) {
+               if (!test_and_set_bit(index, pa->mapping_table_valid))
+                       mapping_table[index] = readl_relaxed(pa->cnfg +
+                                               SPMI_MAPPING_TABLE_REG(index));
+
                data = mapping_table[index];
 
                if (ppid & (1 << SPMI_MAPPING_BIT_INDEX(data))) {
@@ -701,18 +728,61 @@ static int qpnpint_irq_domain_map(struct irq_domain *d,
 }
 
 /* v1 offset per ee */
-static u32 pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v1(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
 {
-       return 0x800 + 0x80 * pa->channel;
+       *offset = 0x800 + 0x80 * pa->channel;
+       return 0;
 }
 
+static u16 pmic_arb_find_chan(struct spmi_pmic_arb_dev *pa, u16 ppid)
+{
+       u32 regval, offset;
+       u16 chan;
+       u16 id;
+
+       /*
+        * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
+        * ppid_to_chan is an in-memory invert of that table.
+        */
+       for (chan = pa->last_channel; ; chan++) {
+               offset = PMIC_ARB_REG_CHNL(chan);
+               if (offset >= pa->core_size)
+                       break;
+
+               regval = readl_relaxed(pa->core + offset);
+               if (!regval)
+                       continue;
+
+               id = (regval >> 8) & PMIC_ARB_PPID_MASK;
+               pa->ppid_to_chan[id] = chan | PMIC_ARB_CHAN_VALID;
+               if (id == ppid) {
+                       chan |= PMIC_ARB_CHAN_VALID;
+                       break;
+               }
+       }
+       pa->last_channel = chan & ~PMIC_ARB_CHAN_VALID;
+
+       return chan;
+}
+
+
 /* v2 offset per ppid (chan) and per ee */
-static u32 pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr)
+static int
+pmic_arb_offset_v2(struct spmi_pmic_arb_dev *pa, u8 sid, u16 addr, u32 *offset)
 {
        u16 ppid = (sid << 8) | (addr >> 8);
-       u8  chan = pa->ppid_to_chan[ppid];
+       u16 chan;
 
-       return 0x1000 * pa->ee + 0x8000 * chan;
+       chan = pa->ppid_to_chan[ppid];
+       if (!(chan & PMIC_ARB_CHAN_VALID))
+               chan = pmic_arb_find_chan(pa, ppid);
+       if (!(chan & PMIC_ARB_CHAN_VALID))
+               return -ENODEV;
+       chan &= ~PMIC_ARB_CHAN_VALID;
+
+       *offset = 0x1000 * pa->ee + 0x8000 * chan;
+       return 0;
 }
 
 static u32 pmic_arb_fmt_cmd_v1(u8 opc, u8 sid, u16 addr, u8 bc)
@@ -797,7 +867,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
        struct resource *res;
        void __iomem *core;
        u32 channel, ee, hw_ver;
-       int err, i;
+       int err;
        bool is_v1;
 
        ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
@@ -808,6 +878,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
        pa->spmic = ctrl;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+       pa->core_size = resource_size(res);
        core = devm_ioremap_resource(&ctrl->dev, res);
        if (IS_ERR(core)) {
                err = PTR_ERR(core);
@@ -825,10 +896,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                pa->wr_base = core;
                pa->rd_base = core;
        } else {
-               u8  chan;
-               u16 ppid;
-               u32 regval;
-
+               pa->core = core;
                pa->ver_ops = &pmic_arb_v2;
 
                res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -847,24 +915,14 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
                        goto err_put_ctrl;
                }
 
-               pa->ppid_to_chan = devm_kzalloc(&ctrl->dev,
-                                       PPID_TO_CHAN_TABLE_SZ, GFP_KERNEL);
+               pa->ppid_to_chan = devm_kcalloc(&ctrl->dev,
+                                               PMIC_ARB_MAX_PPID,
+                                               sizeof(*pa->ppid_to_chan),
+                                               GFP_KERNEL);
                if (!pa->ppid_to_chan) {
                        err = -ENOMEM;
                        goto err_put_ctrl;
                }
-               /*
-                * PMIC_ARB_REG_CHNL is a table in HW mapping channel to ppid.
-                * ppid_to_chan is an in-memory invert of that table.
-                */
-               for (chan = 0; chan < PMIC_ARB_MAX_CHNL; ++chan) {
-                       regval = readl_relaxed(core + PMIC_ARB_REG_CHNL(chan));
-                       if (!regval)
-                               continue;
-
-                       ppid = (regval >> 8) & 0xFFF;
-                       pa->ppid_to_chan[ppid] = chan;
-               }
        }
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr");
@@ -915,9 +973,20 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev)
 
        pa->ee = ee;
 
-       for (i = 0; i < ARRAY_SIZE(pa->mapping_table); ++i)
-               pa->mapping_table[i] = readl_relaxed(
-                               pa->cnfg + SPMI_MAPPING_TABLE_REG(i));
+       pa->apid_to_ppid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS,
+                                           sizeof(*pa->apid_to_ppid),
+                                           GFP_KERNEL);
+       if (!pa->apid_to_ppid) {
+               err = -ENOMEM;
+               goto err_put_ctrl;
+       }
+
+       pa->mapping_table = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PERIPHS - 1,
+                                       sizeof(*pa->mapping_table), GFP_KERNEL);
+       if (!pa->mapping_table) {
+               err = -ENOMEM;
+               goto err_put_ctrl;
+       }
 
        /* Initialize max_apid/min_apid to the opposite bounds, during
         * the irq domain translation, we are sure to update these */
index cde5ff7529eb28b26e684eea53f4e168361937ff..d1a750760cf30b270b44929ac91eb22971d15b22 100644 (file)
@@ -613,9 +613,10 @@ out:
        return err;
 }
 
-static int ssb_bus_register(struct ssb_bus *bus,
-                           ssb_invariants_func_t get_invariants,
-                           unsigned long baseaddr)
+static int __maybe_unused
+ssb_bus_register(struct ssb_bus *bus,
+                ssb_invariants_func_t get_invariants,
+                unsigned long baseaddr)
 {
        int err;
 
index e237e9f3312d6b99e5d2eac4a07f534ce03690c2..0754a37c967495fdafdf5bee4bd67d505c347dda 100644 (file)
@@ -1057,8 +1057,7 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
 {
 }
 
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                       size_t len,
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                                        enum dma_data_direction direction)
 {
        struct ion_buffer *buffer = dmabuf->priv;
@@ -1076,8 +1075,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
        return PTR_ERR_OR_ZERO(vaddr);
 }
 
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                      size_t len,
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
                                       enum dma_data_direction direction)
 {
        struct ion_buffer *buffer = dmabuf->priv;
index b8dcf5a26cc4ac1250bda0ded24d1ceff3c8cf24..da34bc12cd7cc5f9db3ea74d8e41eccf139873d3 100644 (file)
@@ -109,7 +109,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
        if (offset > dma_buf->size || size > dma_buf->size - offset)
                return -EINVAL;
 
-       ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+       ret = dma_buf_begin_cpu_access(dma_buf, dir);
        if (ret)
                return ret;
 
@@ -139,7 +139,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
                copy_offset = 0;
        }
 err:
-       dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+       dma_buf_end_cpu_access(dma_buf, dir);
        return ret;
 }
 
index bad355100825d569834065a8b6e41aa3d0a2a44b..294c1c83aa4d5b0e414b3d6301f72aefddf45512 100644 (file)
@@ -1530,7 +1530,7 @@ static void dgap_input(struct channel_t *ch)
        if ((bd->state != BOARD_READY) || !tp  ||
            (tp->magic != TTY_MAGIC) ||
            !(ch->ch_tun.un_flags & UN_ISOPEN) ||
-           !(tp->termios.c_cflag & CREAD) ||
+           !C_CREAD(tp) ||
            (ch->ch_tun.un_flags & UN_CLOSING)) {
                writew(head, &bs->rx_tail);
                writeb(1, &bs->idata);
@@ -1665,9 +1665,7 @@ static void dgap_input(struct channel_t *ch)
 }
 
 static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
-                             struct un_t *un, u32 mask,
-                             unsigned long *irq_flags1,
-                             unsigned long *irq_flags2)
+                             struct un_t *un, u32 mask)
 {
        if (!(un->un_flags & mask))
                return;
@@ -1677,17 +1675,7 @@ static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
        if (!(un->un_flags & UN_ISOPEN))
                return;
 
-       if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-           un->un_tty->ldisc->ops->write_wakeup) {
-               spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
-               spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
-
-               (un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
-
-               spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
-               spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
-       }
-       wake_up_interruptible(&un->un_tty->write_wait);
+       tty_wakeup(un->un_tty);
        wake_up_interruptible(&un->un_flags_wait);
 }
 
@@ -1952,10 +1940,8 @@ static int dgap_event(struct board_t *bd)
                 * Process Transmit low.
                 */
                if (reason & IFTLW) {
-                       dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
-                                         &lock_flags, &lock_flags2);
-                       dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
-                                         &lock_flags, &lock_flags2);
+                       dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW);
+                       dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW);
                        if (ch->ch_flags & CH_WLOW) {
                                ch->ch_flags &= ~CH_WLOW;
                                wake_up_interruptible(&ch->ch_flags_wait);
@@ -1966,10 +1952,8 @@ static int dgap_event(struct board_t *bd)
                 * Process Transmit empty.
                 */
                if (reason & IFTEM) {
-                       dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
-                                         &lock_flags, &lock_flags2);
-                       dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
-                                         &lock_flags, &lock_flags2);
+                       dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY);
+                       dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY);
                        if (ch->ch_flags & CH_WEMPTY) {
                                ch->ch_flags &= ~CH_WEMPTY;
                                wake_up_interruptible(&ch->ch_flags_wait);
@@ -3171,8 +3155,6 @@ static void dgap_tty_flush_buffer(struct tty_struct *tty)
 
        spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
        spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
-       if (waitqueue_active(&tty->write_wait))
-               wake_up_interruptible(&tty->write_wait);
        tty_wakeup(tty);
 }
 
@@ -4969,10 +4951,6 @@ static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
                        ch->ch_pun.un_flags &= ~(UN_LOW | UN_EMPTY);
                        wake_up_interruptible(&ch->ch_pun.un_flags_wait);
                }
-               if (waitqueue_active(&tty->write_wait))
-                       wake_up_interruptible(&tty->write_wait);
-
-               /* Can't hold any locks when calling tty_wakeup! */
                spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
                spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
                tty_wakeup(tty);
index b79eab084c023864389dc283aec9e703b72177e0..8b1ba65a6984673dd092c2f3fa08673639e8df2c 100644 (file)
@@ -541,7 +541,7 @@ void dgnc_input(struct channel_t *ch)
         */
        if (!tp || (tp->magic != TTY_MAGIC) ||
            !(ch->ch_tun.un_flags & UN_ISOPEN) ||
-           !(tp->termios.c_cflag & CREAD) ||
+           !C_CREAD(tp) ||
            (ch->ch_tun.un_flags & UN_CLOSING)) {
                ch->ch_r_head = tail;
 
@@ -933,14 +933,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
        }
 
        if (ch->ch_tun.un_flags & UN_ISOPEN) {
-               if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-                   ch->ch_tun.un_tty->ldisc->ops->write_wakeup) {
-                       spin_unlock_irqrestore(&ch->ch_lock, flags);
-                       ch->ch_tun.un_tty->ldisc->ops->write_wakeup(ch->ch_tun.un_tty);
-                       spin_lock_irqsave(&ch->ch_lock, flags);
-               }
-
-               wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
+               tty_wakeup(ch->ch_tun.un_tty);
 
                /*
                 * If unit is set to wait until empty, check to make sure
@@ -975,14 +968,7 @@ void dgnc_wakeup_writes(struct channel_t *ch)
        }
 
        if (ch->ch_pun.un_flags & UN_ISOPEN) {
-               if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
-                   ch->ch_pun.un_tty->ldisc->ops->write_wakeup) {
-                       spin_unlock_irqrestore(&ch->ch_lock, flags);
-                       ch->ch_pun.un_tty->ldisc->ops->write_wakeup(ch->ch_pun.un_tty);
-                       spin_lock_irqsave(&ch->ch_lock, flags);
-               }
-
-               wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
+               tty_wakeup(ch->ch_pun.un_tty);
 
                /*
                 * If unit is set to wait until empty, check to make sure
index 58d4517e18369f984fd1fac9bfe53b9cc9ba4a3a..b9519be90fdae94ad5dbd278a336216de2971ce8 100644 (file)
@@ -6,6 +6,7 @@ menu "Analog to digital converters"
 config AD7606
        tristate "Analog Devices AD7606 ADC driver"
        depends on GPIOLIB || COMPILE_TEST
+       depends on HAS_IOMEM
        select IIO_BUFFER
        select IIO_TRIGGERED_BUFFER
        help
index f129039bece3c74b96fb78e70996ea65279502ea..69287108f793bcb81bcf91451b64f1f4f86c2257 100644 (file)
@@ -217,8 +217,12 @@ error_ret:
 static int ade7753_reset(struct device *dev)
 {
        u16 val;
+       int ret;
+
+       ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+       if (ret)
+               return ret;
 
-       ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
        val |= BIT(6); /* Software Chip Reset */
 
        return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
@@ -343,8 +347,12 @@ error_ret:
 static int ade7753_stop_device(struct device *dev)
 {
        u16 val;
+       int ret;
+
+       ret = ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
+       if (ret)
+               return ret;
 
-       ade7753_spi_read_reg_16(dev, ADE7753_MODE, &val);
        val |= BIT(4);  /* AD converters can be turned off */
 
        return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
index 079d50ebfa3acd7c07c6d4d507827eec436d4293..94c01aad844be3a48dfaff19d08c09641e72eb13 100644 (file)
@@ -27,7 +27,7 @@
  * Copyright (c) 2012, Intel Corporation.
  */
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 #include <linux/scatterlist.h>
 #include "../../../include/linux/libcfs/libcfs.h"
 #include "linux-crypto.h"
@@ -38,9 +38,11 @@ static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
 
 static int cfs_crypto_hash_alloc(unsigned char alg_id,
                                 const struct cfs_crypto_hash_type **type,
-                                struct hash_desc *desc, unsigned char *key,
+                                struct ahash_request **req,
+                                unsigned char *key,
                                 unsigned int key_len)
 {
+       struct crypto_ahash *tfm;
        int     err = 0;
 
        *type = cfs_crypto_hash_type(alg_id);
@@ -50,18 +52,23 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
                      alg_id, CFS_HASH_ALG_MAX);
                return -EINVAL;
        }
-       desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0);
+       tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
 
-       if (desc->tfm == NULL)
-               return -EINVAL;
-
-       if (IS_ERR(desc->tfm)) {
+       if (IS_ERR(tfm)) {
                CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n",
                       (*type)->cht_name);
-               return PTR_ERR(desc->tfm);
+               return PTR_ERR(tfm);
        }
 
-       desc->flags = 0;
+       *req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!*req) {
+               CDEBUG(D_INFO, "Failed to alloc ahash_request for %s\n",
+                      (*type)->cht_name);
+               crypto_free_ahash(tfm);
+               return -ENOMEM;
+       }
+
+       ahash_request_set_callback(*req, 0, NULL, NULL);
 
        /** Shash have different logic for initialization then digest
         * shash: crypto_hash_setkey, crypto_hash_init
@@ -70,23 +77,27 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
         * cfs_crypto_hash_alloc.
         */
        if (key != NULL)
-               err = crypto_hash_setkey(desc->tfm, key, key_len);
+               err = crypto_ahash_setkey(tfm, key, key_len);
        else if ((*type)->cht_key != 0)
-               err = crypto_hash_setkey(desc->tfm,
+               err = crypto_ahash_setkey(tfm,
                                         (unsigned char *)&((*type)->cht_key),
                                         (*type)->cht_size);
 
        if (err != 0) {
-               crypto_free_hash(desc->tfm);
+               crypto_free_ahash(tfm);
                return err;
        }
 
        CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
-              (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name,
-              (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name,
+              crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
               cfs_crypto_hash_speeds[alg_id]);
 
-       return crypto_hash_init(desc);
+       err = crypto_ahash_init(*req);
+       if (err) {
+               ahash_request_free(*req);
+               crypto_free_ahash(tfm);
+       }
+       return err;
 }
 
 int cfs_crypto_hash_digest(unsigned char alg_id,
@@ -95,27 +106,29 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
                           unsigned char *hash, unsigned int *hash_len)
 {
        struct scatterlist      sl;
-       struct hash_desc        hdesc;
+       struct ahash_request *req;
        int                     err;
        const struct cfs_crypto_hash_type       *type;
 
        if (buf == NULL || buf_len == 0 || hash_len == NULL)
                return -EINVAL;
 
-       err = cfs_crypto_hash_alloc(alg_id, &type, &hdesc, key, key_len);
+       err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
        if (err != 0)
                return err;
 
        if (hash == NULL || *hash_len < type->cht_size) {
                *hash_len = type->cht_size;
-               crypto_free_hash(hdesc.tfm);
+               crypto_free_ahash(crypto_ahash_reqtfm(req));
+               ahash_request_free(req);
                return -ENOSPC;
        }
        sg_init_one(&sl, buf, buf_len);
 
-       hdesc.flags = 0;
-       err = crypto_hash_digest(&hdesc, &sl, sl.length, hash);
-       crypto_free_hash(hdesc.tfm);
+       ahash_request_set_crypt(req, &sl, hash, sl.length);
+       err = crypto_ahash_digest(req);
+       crypto_free_ahash(crypto_ahash_reqtfm(req));
+       ahash_request_free(req);
 
        return err;
 }
@@ -125,22 +138,15 @@ struct cfs_crypto_hash_desc *
        cfs_crypto_hash_init(unsigned char alg_id,
                             unsigned char *key, unsigned int key_len)
 {
-
-       struct  hash_desc       *hdesc;
+       struct ahash_request *req;
        int                  err;
        const struct cfs_crypto_hash_type       *type;
 
-       hdesc = kmalloc(sizeof(*hdesc), 0);
-       if (hdesc == NULL)
-               return ERR_PTR(-ENOMEM);
+       err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
 
-       err = cfs_crypto_hash_alloc(alg_id, &type, hdesc, key, key_len);
-
-       if (err) {
-               kfree(hdesc);
+       if (err)
                return ERR_PTR(err);
-       }
-       return (struct cfs_crypto_hash_desc *)hdesc;
+       return (struct cfs_crypto_hash_desc *)req;
 }
 EXPORT_SYMBOL(cfs_crypto_hash_init);
 
@@ -148,23 +154,27 @@ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
                                struct page *page, unsigned int offset,
                                unsigned int len)
 {
+       struct ahash_request *req = (void *)hdesc;
        struct scatterlist sl;
 
        sg_init_table(&sl, 1);
        sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK);
 
-       return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
+       ahash_request_set_crypt(req, &sl, NULL, sl.length);
+       return crypto_ahash_update(req);
 }
 EXPORT_SYMBOL(cfs_crypto_hash_update_page);
 
 int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
                           const void *buf, unsigned int buf_len)
 {
+       struct ahash_request *req = (void *)hdesc;
        struct scatterlist sl;
 
        sg_init_one(&sl, buf, buf_len);
 
-       return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
+       ahash_request_set_crypt(req, &sl, NULL, sl.length);
+       return crypto_ahash_update(req);
 }
 EXPORT_SYMBOL(cfs_crypto_hash_update);
 
@@ -173,25 +183,27 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
                          unsigned char *hash, unsigned int *hash_len)
 {
        int     err;
-       int     size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm);
+       struct ahash_request *req = (void *)hdesc;
+       int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
 
        if (hash_len == NULL) {
-               crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
-               kfree(hdesc);
+               crypto_free_ahash(crypto_ahash_reqtfm(req));
+               ahash_request_free(req);
                return 0;
        }
        if (hash == NULL || *hash_len < size) {
                *hash_len = size;
                return -ENOSPC;
        }
-       err = crypto_hash_final((struct hash_desc *) hdesc, hash);
+       ahash_request_set_crypt(req, NULL, hash, 0);
+       err = crypto_ahash_final(req);
 
        if (err < 0) {
                /* May be caller can fix error */
                return err;
        }
-       crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
-       kfree(hdesc);
+       crypto_free_ahash(crypto_ahash_reqtfm(req));
+       ahash_request_free(req);
        return err;
 }
 EXPORT_SYMBOL(cfs_crypto_hash_final);
index 7b7e7b26c1e8c525699c04451302586a36bc001a..3cc9be776f8b7a658acc0c1565afed3b39ced7ca 100644 (file)
@@ -538,7 +538,7 @@ struct vpfe_isif_raw_config {
 };
 
 /**********************************************************************
-      IPIPE API Structures
+*      IPIPE API Structures
 **********************************************************************/
 
 /* IPIPE module configurations */
index d009bcb439f0677c977d0af1bc3315797c2bf9fa..68ede6c56e6d2207da0752c38244a70655ed739d 100644 (file)
@@ -193,7 +193,7 @@ static int lirc_claim(void)
                        return 0;
                }
        }
-       out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP);
+       out(LIRC_LP_CONTROL, LP_PSELECP | LP_PINITP);
        is_claimed = 1;
        return 1;
 }
@@ -264,7 +264,7 @@ static void lirc_lirc_irq_handler(void *blah)
                init = 1;
        }
 
-       timeout = timer/10;     /* timeout after 1/10 sec. */
+       timeout = timer / 10;   /* timeout after 1/10 sec. */
        signal = 1;
        level = lirc_get_timer();
        do {
@@ -286,15 +286,15 @@ static void lirc_lirc_irq_handler(void *blah)
                /* adjust value to usecs */
                __u64 helper;
 
-               helper = ((__u64) signal)*1000000;
+               helper = ((__u64)signal) * 1000000;
                do_div(helper, timer);
-               signal = (long) helper;
+               signal = (long)helper;
 
                if (signal > LIRC_SFH506_DELAY)
                        data = signal - LIRC_SFH506_DELAY;
                else
                        data = 1;
-               rbuf_write(PULSE_BIT|data); /* pulse */
+               rbuf_write(PULSE_BIT | data); /* pulse */
        }
        lastkt = ktime_get();
 #else
@@ -331,7 +331,7 @@ static ssize_t lirc_read(struct file *filep, char __user *buf, size_t n,
        set_current_state(TASK_INTERRUPTIBLE);
        while (count < n) {
                if (rptr != wptr) {
-                       if (copy_to_user(buf+count, &rbuf[rptr],
+                       if (copy_to_user(buf + count, &rbuf[rptr],
                                         sizeof(int))) {
                                result = -EFAULT;
                                break;
@@ -393,9 +393,9 @@ static ssize_t lirc_write(struct file *filep, const char __user *buf, size_t n,
        for (i = 0; i < count; i++) {
                __u64 helper;
 
-               helper = ((__u64) wbuf[i])*timer;
+               helper = ((__u64)wbuf[i]) * timer;
                do_div(helper, 1000000);
-               wbuf[i] = (int) helper;
+               wbuf[i] = (int)helper;
        }
 
        local_irq_save(flags);
@@ -647,7 +647,7 @@ static int __init lirc_parallel_init(void)
                goto exit_device_put;
 
        pport = parport_find_base(io);
-       if (pport == NULL) {
+       if (!pport) {
                pr_notice("no port at %x found\n", io);
                result = -ENXIO;
                goto exit_device_put;
@@ -656,7 +656,7 @@ static int __init lirc_parallel_init(void)
                                           pf, kf, lirc_lirc_irq_handler, 0,
                                           NULL);
        parport_put_port(pport);
-       if (ppdevice == NULL) {
+       if (!ppdevice) {
                pr_notice("parport_register_device() failed\n");
                result = -ENXIO;
                goto exit_device_put;
@@ -664,7 +664,7 @@ static int __init lirc_parallel_init(void)
        if (parport_claim(ppdevice) != 0)
                goto skip_init;
        is_claimed = 1;
-       out(LIRC_LP_CONTROL, LP_PSELECP|LP_PINITP);
+       out(LIRC_LP_CONTROL, LP_PSELECP | LP_PINITP);
 
 #ifdef LIRC_TIMER
        if (debug)
@@ -730,7 +730,7 @@ module_param(irq, int, S_IRUGO);
 MODULE_PARM_DESC(irq, "Interrupt (7 or 5)");
 
 module_param(tx_mask, int, S_IRUGO);
-MODULE_PARM_DESC(tx_maxk, "Transmitter mask (default: 0x01)");
+MODULE_PARM_DESC(tx_mask, "Transmitter mask (default: 0x01)");
 
 module_param(debug, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Enable debugging messages");
index ae62975cf44a7bb4fa37ba008191c6e6968666b2..457dc7ffdaf1b5f19de7303bfe92cbb307da22fa 100644 (file)
@@ -78,7 +78,6 @@
 #define BL_ALL_UNLOCKED    0
 
 struct spinand_info {
-       struct nand_ecclayout *ecclayout;
        struct spi_device *spi;
        void *priv;
 };
index 79ac19246548afb5a6e5574a97d09dd7cb8ca110..70b8f4fabfad31af474fae8b7943f7a1404cbb8f 100644 (file)
@@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
        lcd_send_serial(0x1F);  /* R/W=W, RS=0 */
        lcd_send_serial(cmd & 0x0F);
        lcd_send_serial((cmd >> 4) & 0x0F);
-       /* the shortest command takes at least 40 us */
-       usleep_range(40, 100);
+       udelay(40);             /* the shortest command takes at least 40 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
        lcd_send_serial(0x5F);  /* R/W=W, RS=1 */
        lcd_send_serial(data & 0x0F);
        lcd_send_serial((data >> 4) & 0x0F);
-       /* the shortest data takes at least 40 us */
-       usleep_range(40, 100);
+       udelay(40);             /* the shortest data takes at least 40 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
        spin_lock_irq(&pprt_lock);
        /* present the data to the data port */
        w_dtr(pprt, cmd);
-       /* maintain the data during 20 us before the strobe */
-       usleep_range(20, 100);
+       udelay(20);     /* maintain the data during 20 us before the strobe */
 
        bits.e = BIT_SET;
        bits.rs = BIT_CLR;
        bits.rw = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(40, 100);  /* maintain the strobe during 40 us */
+       udelay(40);     /* maintain the strobe during 40 us */
 
        bits.e = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(120, 500); /* the shortest command takes at least 120 us */
+       udelay(120);    /* the shortest command takes at least 120 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
        spin_lock_irq(&pprt_lock);
        /* present the data to the data port */
        w_dtr(pprt, data);
-       /* maintain the data during 20 us before the strobe */
-       usleep_range(20, 100);
+       udelay(20);     /* maintain the data during 20 us before the strobe */
 
        bits.e = BIT_SET;
        bits.rs = BIT_SET;
        bits.rw = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(40, 100);  /* maintain the strobe during 40 us */
+       udelay(40);     /* maintain the strobe during 40 us */
 
        bits.e = BIT_CLR;
        set_ctrl_bits();
 
-       usleep_range(45, 100);  /* the shortest data takes at least 45 us */
+       udelay(45);     /* the shortest data takes at least 45 us */
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
        spin_lock_irq(&pprt_lock);
        /* present the data to the control port */
        w_ctr(pprt, cmd);
-       usleep_range(60, 120);
+       udelay(60);
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
        spin_lock_irq(&pprt_lock);
        /* present the data to the data port */
        w_dtr(pprt, data);
-       usleep_range(60, 120);
+       udelay(60);
        spin_unlock_irq(&pprt_lock);
 }
 
@@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
                lcd_send_serial(0x5F);  /* R/W=W, RS=1 */
                lcd_send_serial(' ' & 0x0F);
                lcd_send_serial((' ' >> 4) & 0x0F);
-               usleep_range(40, 100);  /* the shortest data takes at least 40 us */
+               udelay(40);     /* the shortest data takes at least 40 us */
        }
        spin_unlock_irq(&pprt_lock);
 
@@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
                w_dtr(pprt, ' ');
 
                /* maintain the data during 20 us before the strobe */
-               usleep_range(20, 100);
+               udelay(20);
 
                bits.e = BIT_SET;
                bits.rs = BIT_SET;
@@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
                set_ctrl_bits();
 
                /* maintain the strobe during 40 us */
-               usleep_range(40, 100);
+               udelay(40);
 
                bits.e = BIT_CLR;
                set_ctrl_bits();
 
                /* the shortest data takes at least 45 us */
-               usleep_range(45, 100);
+               udelay(45);
        }
        spin_unlock_irq(&pprt_lock);
 
@@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
        for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
                /* present the data to the data port */
                w_dtr(pprt, ' ');
-               usleep_range(60, 120);
+               udelay(60);
        }
 
        spin_unlock_irq(&pprt_lock);
index ba8765063174c4e05b517cdf45d33c6c5f57cf25..f1f3ecadf0fb0d0e1a0bb11d014d766e1187e55c 100644 (file)
@@ -22,12 +22,6 @@ menuconfig STAGING_RDMA
 # Please keep entries in alphabetic order
 if STAGING_RDMA
 
-source "drivers/staging/rdma/amso1100/Kconfig"
-
-source "drivers/staging/rdma/ehca/Kconfig"
-
 source "drivers/staging/rdma/hfi1/Kconfig"
 
-source "drivers/staging/rdma/ipath/Kconfig"
-
 endif
index 139d78ef2c24388b93ed6a008090190cff6de663..8c7fc1de48a7bf6f86f103ad120f9052ea399c58 100644 (file)
@@ -1,5 +1,2 @@
 # Entries for RDMA_STAGING tree
-obj-$(CONFIG_INFINIBAND_AMSO1100)      += amso1100/
-obj-$(CONFIG_INFINIBAND_EHCA)  += ehca/
 obj-$(CONFIG_INFINIBAND_HFI1)  += hfi1/
-obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
diff --git a/drivers/staging/rdma/amso1100/Kbuild b/drivers/staging/rdma/amso1100/Kbuild
deleted file mode 100644 (file)
index 950dfab..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-ccflags-$(CONFIG_INFINIBAND_AMSO1100_DEBUG) := -DDEBUG
-
-obj-$(CONFIG_INFINIBAND_AMSO1100) += iw_c2.o
-
-iw_c2-y := c2.o c2_provider.o c2_rnic.o c2_alloc.o c2_mq.o c2_ae.o c2_vq.o \
-       c2_intr.o c2_cq.o c2_qp.o c2_cm.o c2_mm.o c2_pd.o
diff --git a/drivers/staging/rdma/amso1100/Kconfig b/drivers/staging/rdma/amso1100/Kconfig
deleted file mode 100644 (file)
index e6ce5f2..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-config INFINIBAND_AMSO1100
-       tristate "Ammasso 1100 HCA support"
-       depends on PCI && INET
-       ---help---
-         This is a low-level driver for the Ammasso 1100 host
-         channel adapter (HCA).
-
-config INFINIBAND_AMSO1100_DEBUG
-       bool "Verbose debugging output"
-       depends on INFINIBAND_AMSO1100
-       default n
-       ---help---
-         This option causes the amso1100 driver to produce a bunch of
-         debug messages.  Select this if you are developing the driver
-         or trying to diagnose a problem.
diff --git a/drivers/staging/rdma/amso1100/TODO b/drivers/staging/rdma/amso1100/TODO
deleted file mode 100644 (file)
index 18b00a5..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-7/2015
-
-The amso1100 driver has been deprecated and moved to drivers/staging.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/amso1100/c2.c b/drivers/staging/rdma/amso1100/c2.c
deleted file mode 100644 (file)
index b46ebd1..0000000
+++ /dev/null
@@ -1,1240 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_provider.h"
-
-MODULE_AUTHOR("Tom Tucker <tom@opengridcomputing.com>");
-MODULE_DESCRIPTION("Ammasso AMSO1100 Low-level iWARP Driver");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(DRV_VERSION);
-
-static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
-    | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
-
-static int debug = -1;         /* defaults above */
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-
-static int c2_up(struct net_device *netdev);
-static int c2_down(struct net_device *netdev);
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-static void c2_tx_interrupt(struct net_device *netdev);
-static void c2_rx_interrupt(struct net_device *netdev);
-static irqreturn_t c2_interrupt(int irq, void *dev_id);
-static void c2_tx_timeout(struct net_device *netdev);
-static int c2_change_mtu(struct net_device *netdev, int new_mtu);
-static void c2_reset(struct c2_port *c2_port);
-
-static struct pci_device_id c2_pci_table[] = {
-       { PCI_DEVICE(0x18b8, 0xb001) },
-       { 0 }
-};
-
-MODULE_DEVICE_TABLE(pci, c2_pci_table);
-
-static void c2_set_rxbufsize(struct c2_port *c2_port)
-{
-       struct net_device *netdev = c2_port->netdev;
-
-       if (netdev->mtu > RX_BUF_SIZE)
-               c2_port->rx_buf_size =
-                   netdev->mtu + ETH_HLEN + sizeof(struct c2_rxp_hdr) +
-                   NET_IP_ALIGN;
-       else
-               c2_port->rx_buf_size = sizeof(struct c2_rxp_hdr) + RX_BUF_SIZE;
-}
-
-/*
- * Allocate TX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_tx_ring_alloc(struct c2_ring *tx_ring, void *vaddr,
-                           dma_addr_t base, void __iomem * mmio_txp_ring)
-{
-       struct c2_tx_desc *tx_desc;
-       struct c2_txp_desc __iomem *txp_desc;
-       struct c2_element *elem;
-       int i;
-
-       tx_ring->start = kmalloc_array(tx_ring->count, sizeof(*elem),
-                                      GFP_KERNEL);
-       if (!tx_ring->start)
-               return -ENOMEM;
-
-       elem = tx_ring->start;
-       tx_desc = vaddr;
-       txp_desc = mmio_txp_ring;
-       for (i = 0; i < tx_ring->count; i++, elem++, tx_desc++, txp_desc++) {
-               tx_desc->len = 0;
-               tx_desc->status = 0;
-
-               /* Set TXP_HTXD_UNINIT */
-               __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
-                            (void __iomem *) txp_desc + C2_TXP_ADDR);
-               __raw_writew(0, (void __iomem *) txp_desc + C2_TXP_LEN);
-               __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
-                            (void __iomem *) txp_desc + C2_TXP_FLAGS);
-
-               elem->skb = NULL;
-               elem->ht_desc = tx_desc;
-               elem->hw_desc = txp_desc;
-
-               if (i == tx_ring->count - 1) {
-                       elem->next = tx_ring->start;
-                       tx_desc->next_offset = base;
-               } else {
-                       elem->next = elem + 1;
-                       tx_desc->next_offset =
-                           base + (i + 1) * sizeof(*tx_desc);
-               }
-       }
-
-       tx_ring->to_use = tx_ring->to_clean = tx_ring->start;
-
-       return 0;
-}
-
-/*
- * Allocate RX ring elements and chain them together.
- * One-to-one association of adapter descriptors with ring elements.
- */
-static int c2_rx_ring_alloc(struct c2_ring *rx_ring, void *vaddr,
-                           dma_addr_t base, void __iomem * mmio_rxp_ring)
-{
-       struct c2_rx_desc *rx_desc;
-       struct c2_rxp_desc __iomem *rxp_desc;
-       struct c2_element *elem;
-       int i;
-
-       rx_ring->start = kmalloc_array(rx_ring->count, sizeof(*elem),
-                                      GFP_KERNEL);
-       if (!rx_ring->start)
-               return -ENOMEM;
-
-       elem = rx_ring->start;
-       rx_desc = vaddr;
-       rxp_desc = mmio_rxp_ring;
-       for (i = 0; i < rx_ring->count; i++, elem++, rx_desc++, rxp_desc++) {
-               rx_desc->len = 0;
-               rx_desc->status = 0;
-
-               /* Set RXP_HRXD_UNINIT */
-               __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_OK),
-                      (void __iomem *) rxp_desc + C2_RXP_STATUS);
-               __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_COUNT);
-               __raw_writew(0, (void __iomem *) rxp_desc + C2_RXP_LEN);
-               __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
-                            (void __iomem *) rxp_desc + C2_RXP_ADDR);
-               __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
-                            (void __iomem *) rxp_desc + C2_RXP_FLAGS);
-
-               elem->skb = NULL;
-               elem->ht_desc = rx_desc;
-               elem->hw_desc = rxp_desc;
-
-               if (i == rx_ring->count - 1) {
-                       elem->next = rx_ring->start;
-                       rx_desc->next_offset = base;
-               } else {
-                       elem->next = elem + 1;
-                       rx_desc->next_offset =
-                           base + (i + 1) * sizeof(*rx_desc);
-               }
-       }
-
-       rx_ring->to_use = rx_ring->to_clean = rx_ring->start;
-
-       return 0;
-}
-
-/* Setup buffer for receiving */
-static inline int c2_rx_alloc(struct c2_port *c2_port, struct c2_element *elem)
-{
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_rx_desc *rx_desc = elem->ht_desc;
-       struct sk_buff *skb;
-       dma_addr_t mapaddr;
-       u32 maplen;
-       struct c2_rxp_hdr *rxp_hdr;
-
-       skb = dev_alloc_skb(c2_port->rx_buf_size);
-       if (unlikely(!skb)) {
-               pr_debug("%s: out of memory for receive\n",
-                       c2_port->netdev->name);
-               return -ENOMEM;
-       }
-
-       /* Zero out the rxp hdr in the sk_buff */
-       memset(skb->data, 0, sizeof(*rxp_hdr));
-
-       skb->dev = c2_port->netdev;
-
-       maplen = c2_port->rx_buf_size;
-       mapaddr =
-           pci_map_single(c2dev->pcidev, skb->data, maplen,
-                          PCI_DMA_FROMDEVICE);
-
-       /* Set the sk_buff RXP_header to RXP_HRXD_READY */
-       rxp_hdr = (struct c2_rxp_hdr *) skb->data;
-       rxp_hdr->flags = RXP_HRXD_READY;
-
-       __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-       __raw_writew((__force u16) cpu_to_be16((u16) maplen - sizeof(*rxp_hdr)),
-                    elem->hw_desc + C2_RXP_LEN);
-       __raw_writeq((__force u64) cpu_to_be64(mapaddr), elem->hw_desc + C2_RXP_ADDR);
-       __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
-                    elem->hw_desc + C2_RXP_FLAGS);
-
-       elem->skb = skb;
-       elem->mapaddr = mapaddr;
-       elem->maplen = maplen;
-       rx_desc->len = maplen;
-
-       return 0;
-}
-
-/*
- * Allocate buffers for the Rx ring
- * For receive:  rx_ring.to_clean is next received frame
- */
-static int c2_rx_fill(struct c2_port *c2_port)
-{
-       struct c2_ring *rx_ring = &c2_port->rx_ring;
-       struct c2_element *elem;
-       int ret = 0;
-
-       elem = rx_ring->start;
-       do {
-               if (c2_rx_alloc(c2_port, elem)) {
-                       ret = 1;
-                       break;
-               }
-       } while ((elem = elem->next) != rx_ring->start);
-
-       rx_ring->to_clean = rx_ring->start;
-       return ret;
-}
-
-/* Free all buffers in RX ring, assumes receiver stopped */
-static void c2_rx_clean(struct c2_port *c2_port)
-{
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_ring *rx_ring = &c2_port->rx_ring;
-       struct c2_element *elem;
-       struct c2_rx_desc *rx_desc;
-
-       elem = rx_ring->start;
-       do {
-               rx_desc = elem->ht_desc;
-               rx_desc->len = 0;
-
-               __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-               __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
-               __raw_writew(0, elem->hw_desc + C2_RXP_LEN);
-               __raw_writeq((__force u64) cpu_to_be64(0x99aabbccddeeffULL),
-                            elem->hw_desc + C2_RXP_ADDR);
-               __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_UNINIT),
-                            elem->hw_desc + C2_RXP_FLAGS);
-
-               if (elem->skb) {
-                       pci_unmap_single(c2dev->pcidev, elem->mapaddr,
-                                        elem->maplen, PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb(elem->skb);
-                       elem->skb = NULL;
-               }
-       } while ((elem = elem->next) != rx_ring->start);
-}
-
-static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem)
-{
-       struct c2_tx_desc *tx_desc = elem->ht_desc;
-
-       tx_desc->len = 0;
-
-       pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen,
-                        PCI_DMA_TODEVICE);
-
-       if (elem->skb) {
-               dev_kfree_skb_any(elem->skb);
-               elem->skb = NULL;
-       }
-
-       return 0;
-}
-
-/* Free all buffers in TX ring, assumes transmitter stopped */
-static void c2_tx_clean(struct c2_port *c2_port)
-{
-       struct c2_ring *tx_ring = &c2_port->tx_ring;
-       struct c2_element *elem;
-       struct c2_txp_desc txp_htxd;
-       int retry;
-       unsigned long flags;
-
-       spin_lock_irqsave(&c2_port->tx_lock, flags);
-
-       elem = tx_ring->start;
-
-       do {
-               retry = 0;
-               do {
-                       txp_htxd.flags =
-                           readw(elem->hw_desc + C2_TXP_FLAGS);
-
-                       if (txp_htxd.flags == TXP_HTXD_READY) {
-                               retry = 1;
-                               __raw_writew(0,
-                                            elem->hw_desc + C2_TXP_LEN);
-                               __raw_writeq(0,
-                                            elem->hw_desc + C2_TXP_ADDR);
-                               __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_DONE),
-                                            elem->hw_desc + C2_TXP_FLAGS);
-                               c2_port->netdev->stats.tx_dropped++;
-                               break;
-                       } else {
-                               __raw_writew(0,
-                                            elem->hw_desc + C2_TXP_LEN);
-                               __raw_writeq((__force u64) cpu_to_be64(0x1122334455667788ULL),
-                                            elem->hw_desc + C2_TXP_ADDR);
-                               __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_UNINIT),
-                                            elem->hw_desc + C2_TXP_FLAGS);
-                       }
-
-                       c2_tx_free(c2_port->c2dev, elem);
-
-               } while ((elem = elem->next) != tx_ring->start);
-       } while (retry);
-
-       c2_port->tx_avail = c2_port->tx_ring.count - 1;
-       c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start;
-
-       if (c2_port->tx_avail > MAX_SKB_FRAGS + 1)
-               netif_wake_queue(c2_port->netdev);
-
-       spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-}
-
-/*
- * Process transmit descriptors marked 'DONE' by the firmware,
- * freeing up their unneeded sk_buffs.
- */
-static void c2_tx_interrupt(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_ring *tx_ring = &c2_port->tx_ring;
-       struct c2_element *elem;
-       struct c2_txp_desc txp_htxd;
-
-       spin_lock(&c2_port->tx_lock);
-
-       for (elem = tx_ring->to_clean; elem != tx_ring->to_use;
-            elem = elem->next) {
-               txp_htxd.flags =
-                   be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_FLAGS));
-
-               if (txp_htxd.flags != TXP_HTXD_DONE)
-                       break;
-
-               if (netif_msg_tx_done(c2_port)) {
-                       /* PCI reads are expensive in fast path */
-                       txp_htxd.len =
-                           be16_to_cpu((__force __be16) readw(elem->hw_desc + C2_TXP_LEN));
-                       pr_debug("%s: tx done slot %3Zu status 0x%x len "
-                               "%5u bytes\n",
-                               netdev->name, elem - tx_ring->start,
-                               txp_htxd.flags, txp_htxd.len);
-               }
-
-               c2_tx_free(c2dev, elem);
-               ++(c2_port->tx_avail);
-       }
-
-       tx_ring->to_clean = elem;
-
-       if (netif_queue_stopped(netdev)
-           && c2_port->tx_avail > MAX_SKB_FRAGS + 1)
-               netif_wake_queue(netdev);
-
-       spin_unlock(&c2_port->tx_lock);
-}
-
-static void c2_rx_error(struct c2_port *c2_port, struct c2_element *elem)
-{
-       struct c2_rx_desc *rx_desc = elem->ht_desc;
-       struct c2_rxp_hdr *rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
-
-       if (rxp_hdr->status != RXP_HRXD_OK ||
-           rxp_hdr->len > (rx_desc->len - sizeof(*rxp_hdr))) {
-               pr_debug("BAD RXP_HRXD\n");
-               pr_debug("  rx_desc : %p\n", rx_desc);
-               pr_debug("    index : %Zu\n",
-                       elem - c2_port->rx_ring.start);
-               pr_debug("    len   : %u\n", rx_desc->len);
-               pr_debug("  rxp_hdr : %p [PA %p]\n", rxp_hdr,
-                       (void *) __pa((unsigned long) rxp_hdr));
-               pr_debug("    flags : 0x%x\n", rxp_hdr->flags);
-               pr_debug("    status: 0x%x\n", rxp_hdr->status);
-               pr_debug("    len   : %u\n", rxp_hdr->len);
-               pr_debug("    rsvd  : 0x%x\n", rxp_hdr->rsvd);
-       }
-
-       /* Setup the skb for reuse since we're dropping this pkt */
-       elem->skb->data = elem->skb->head;
-       skb_reset_tail_pointer(elem->skb);
-
-       /* Zero out the rxp hdr in the sk_buff */
-       memset(elem->skb->data, 0, sizeof(*rxp_hdr));
-
-       /* Write the descriptor to the adapter's rx ring */
-       __raw_writew(0, elem->hw_desc + C2_RXP_STATUS);
-       __raw_writew(0, elem->hw_desc + C2_RXP_COUNT);
-       __raw_writew((__force u16) cpu_to_be16((u16) elem->maplen - sizeof(*rxp_hdr)),
-                    elem->hw_desc + C2_RXP_LEN);
-       __raw_writeq((__force u64) cpu_to_be64(elem->mapaddr),
-                    elem->hw_desc + C2_RXP_ADDR);
-       __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
-                    elem->hw_desc + C2_RXP_FLAGS);
-
-       pr_debug("packet dropped\n");
-       c2_port->netdev->stats.rx_dropped++;
-}
-
-static void c2_rx_interrupt(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_ring *rx_ring = &c2_port->rx_ring;
-       struct c2_element *elem;
-       struct c2_rx_desc *rx_desc;
-       struct c2_rxp_hdr *rxp_hdr;
-       struct sk_buff *skb;
-       dma_addr_t mapaddr;
-       u32 maplen, buflen;
-       unsigned long flags;
-
-       spin_lock_irqsave(&c2dev->lock, flags);
-
-       /* Begin where we left off */
-       rx_ring->to_clean = rx_ring->start + c2dev->cur_rx;
-
-       for (elem = rx_ring->to_clean; elem->next != rx_ring->to_clean;
-            elem = elem->next) {
-               rx_desc = elem->ht_desc;
-               mapaddr = elem->mapaddr;
-               maplen = elem->maplen;
-               skb = elem->skb;
-               rxp_hdr = (struct c2_rxp_hdr *) skb->data;
-
-               if (rxp_hdr->flags != RXP_HRXD_DONE)
-                       break;
-               buflen = rxp_hdr->len;
-
-               /* Sanity check the RXP header */
-               if (rxp_hdr->status != RXP_HRXD_OK ||
-                   buflen > (rx_desc->len - sizeof(*rxp_hdr))) {
-                       c2_rx_error(c2_port, elem);
-                       continue;
-               }
-
-               /*
-                * Allocate and map a new skb for replenishing the host
-                * RX desc
-                */
-               if (c2_rx_alloc(c2_port, elem)) {
-                       c2_rx_error(c2_port, elem);
-                       continue;
-               }
-
-               /* Unmap the old skb */
-               pci_unmap_single(c2dev->pcidev, mapaddr, maplen,
-                                PCI_DMA_FROMDEVICE);
-
-               prefetch(skb->data);
-
-               /*
-                * Skip past the leading 8 bytes comprising of the
-                * "struct c2_rxp_hdr", prepended by the adapter
-                * to the usual Ethernet header ("struct ethhdr"),
-                * to the start of the raw Ethernet packet.
-                *
-                * Fix up the various fields in the sk_buff before
-                * passing it up to netif_rx(). The transfer size
-                * (in bytes) specified by the adapter len field of
-                * the "struct rxp_hdr_t" does NOT include the
-                * "sizeof(struct c2_rxp_hdr)".
-                */
-               skb->data += sizeof(*rxp_hdr);
-               skb_set_tail_pointer(skb, buflen);
-               skb->len = buflen;
-               skb->protocol = eth_type_trans(skb, netdev);
-
-               netif_rx(skb);
-
-               netdev->stats.rx_packets++;
-               netdev->stats.rx_bytes += buflen;
-       }
-
-       /* Save where we left off */
-       rx_ring->to_clean = elem;
-       c2dev->cur_rx = elem - rx_ring->start;
-       C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
-       spin_unlock_irqrestore(&c2dev->lock, flags);
-}
-
-/*
- * Handle netisr0 TX & RX interrupts.
- */
-static irqreturn_t c2_interrupt(int irq, void *dev_id)
-{
-       unsigned int netisr0, dmaisr;
-       int handled = 0;
-       struct c2_dev *c2dev = dev_id;
-
-       /* Process CCILNET interrupts */
-       netisr0 = readl(c2dev->regs + C2_NISR0);
-       if (netisr0) {
-
-               /*
-                * There is an issue with the firmware that always
-                * provides the status of RX for both TX & RX
-                * interrupts.  So process both queues here.
-                */
-               c2_rx_interrupt(c2dev->netdev);
-               c2_tx_interrupt(c2dev->netdev);
-
-               /* Clear the interrupt */
-               writel(netisr0, c2dev->regs + C2_NISR0);
-               handled++;
-       }
-
-       /* Process RNIC interrupts */
-       dmaisr = readl(c2dev->regs + C2_DISR);
-       if (dmaisr) {
-               writel(dmaisr, c2dev->regs + C2_DISR);
-               c2_rnic_interrupt(c2dev);
-               handled++;
-       }
-
-       if (handled) {
-               return IRQ_HANDLED;
-       } else {
-               return IRQ_NONE;
-       }
-}
-
-static int c2_up(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_element *elem;
-       struct c2_rxp_hdr *rxp_hdr;
-       struct in_device *in_dev;
-       size_t rx_size, tx_size;
-       int ret, i;
-       unsigned int netimr0;
-
-       if (netif_msg_ifup(c2_port))
-               pr_debug("%s: enabling interface\n", netdev->name);
-
-       /* Set the Rx buffer size based on MTU */
-       c2_set_rxbufsize(c2_port);
-
-       /* Allocate DMA'able memory for Tx/Rx host descriptor rings */
-       rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
-       tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);
-
-       c2_port->mem_size = tx_size + rx_size;
-       c2_port->mem = pci_zalloc_consistent(c2dev->pcidev, c2_port->mem_size,
-                                            &c2_port->dma);
-       if (c2_port->mem == NULL) {
-               pr_debug("Unable to allocate memory for "
-                       "host descriptor rings\n");
-               return -ENOMEM;
-       }
-
-       /* Create the Rx host descriptor ring */
-       if ((ret =
-            c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
-                             c2dev->mmio_rxp_ring))) {
-               pr_debug("Unable to create RX ring\n");
-               goto bail0;
-       }
-
-       /* Allocate Rx buffers for the host descriptor ring */
-       if (c2_rx_fill(c2_port)) {
-               pr_debug("Unable to fill RX ring\n");
-               goto bail1;
-       }
-
-       /* Create the Tx host descriptor ring */
-       if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
-                                   c2_port->dma + rx_size,
-                                   c2dev->mmio_txp_ring))) {
-               pr_debug("Unable to create TX ring\n");
-               goto bail1;
-       }
-
-       /* Set the TX pointer to where we left off */
-       c2_port->tx_avail = c2_port->tx_ring.count - 1;
-       c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
-           c2_port->tx_ring.start + c2dev->cur_tx;
-
-       /* missing: Initialize MAC */
-
-       BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);
-
-       /* Reset the adapter, ensures the driver is in sync with the RXP */
-       c2_reset(c2_port);
-
-       /* Reset the READY bit in the sk_buff RXP headers & adapter HRXDQ */
-       for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
-            i++, elem++) {
-               rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
-               rxp_hdr->flags = 0;
-               __raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
-                            elem->hw_desc + C2_RXP_FLAGS);
-       }
-
-       /* Enable network packets */
-       netif_start_queue(netdev);
-
-       /* Enable IRQ */
-       writel(0, c2dev->regs + C2_IDIS);
-       netimr0 = readl(c2dev->regs + C2_NIMR0);
-       netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
-       writel(netimr0, c2dev->regs + C2_NIMR0);
-
-       /* Tell the stack to ignore arp requests for ipaddrs bound to
-        * other interfaces.  This is needed to prevent the host stack
-        * from responding to arp requests to the ipaddr bound on the
-        * rdma interface.
-        */
-       in_dev = in_dev_get(netdev);
-       IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
-       in_dev_put(in_dev);
-
-       return 0;
-
-bail1:
-       c2_rx_clean(c2_port);
-       kfree(c2_port->rx_ring.start);
-
-bail0:
-       pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
-                           c2_port->dma);
-
-       return ret;
-}
-
-static int c2_down(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-
-       if (netif_msg_ifdown(c2_port))
-               pr_debug("%s: disabling interface\n",
-                       netdev->name);
-
-       /* Wait for all the queued packets to get sent */
-       c2_tx_interrupt(netdev);
-
-       /* Disable network packets */
-       netif_stop_queue(netdev);
-
-       /* Disable IRQs by clearing the interrupt mask */
-       writel(1, c2dev->regs + C2_IDIS);
-       writel(0, c2dev->regs + C2_NIMR0);
-
-       /* missing: Stop transmitter */
-
-       /* missing: Stop receiver */
-
-       /* Reset the adapter, ensures the driver is in sync with the RXP */
-       c2_reset(c2_port);
-
-       /* missing: Turn off LEDs here */
-
-       /* Free all buffers in the host descriptor rings */
-       c2_tx_clean(c2_port);
-       c2_rx_clean(c2_port);
-
-       /* Free the host descriptor rings */
-       kfree(c2_port->rx_ring.start);
-       kfree(c2_port->tx_ring.start);
-       pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
-                           c2_port->dma);
-
-       return 0;
-}
-
-static void c2_reset(struct c2_port *c2_port)
-{
-       struct c2_dev *c2dev = c2_port->c2dev;
-       unsigned int cur_rx = c2dev->cur_rx;
-
-       /* Tell the hardware to quiesce */
-       C2_SET_CUR_RX(c2dev, cur_rx | C2_PCI_HRX_QUI);
-
-       /*
-        * The hardware will reset the C2_PCI_HRX_QUI bit once
-        * the RXP is quiesced.  Wait 2 seconds for this.
-        */
-       ssleep(2);
-
-       cur_rx = C2_GET_CUR_RX(c2dev);
-
-       if (cur_rx & C2_PCI_HRX_QUI)
-               pr_debug("c2_reset: failed to quiesce the hardware!\n");
-
-       cur_rx &= ~C2_PCI_HRX_QUI;
-
-       c2dev->cur_rx = cur_rx;
-
-       pr_debug("Current RX: %u\n", c2dev->cur_rx);
-}
-
-static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-       struct c2_dev *c2dev = c2_port->c2dev;
-       struct c2_ring *tx_ring = &c2_port->tx_ring;
-       struct c2_element *elem;
-       dma_addr_t mapaddr;
-       u32 maplen;
-       unsigned long flags;
-       unsigned int i;
-
-       spin_lock_irqsave(&c2_port->tx_lock, flags);
-
-       if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
-               netif_stop_queue(netdev);
-               spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
-               pr_debug("%s: Tx ring full when queue awake!\n",
-                       netdev->name);
-               return NETDEV_TX_BUSY;
-       }
-
-       maplen = skb_headlen(skb);
-       mapaddr =
-           pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);
-
-       elem = tx_ring->to_use;
-       elem->skb = skb;
-       elem->mapaddr = mapaddr;
-       elem->maplen = maplen;
-
-       /* Tell HW to xmit */
-       __raw_writeq((__force u64) cpu_to_be64(mapaddr),
-                    elem->hw_desc + C2_TXP_ADDR);
-       __raw_writew((__force u16) cpu_to_be16(maplen),
-                    elem->hw_desc + C2_TXP_LEN);
-       __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
-                    elem->hw_desc + C2_TXP_FLAGS);
-
-       netdev->stats.tx_packets++;
-       netdev->stats.tx_bytes += maplen;
-
-       /* Loop thru additional data fragments and queue them */
-       if (skb_shinfo(skb)->nr_frags) {
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-                       maplen = skb_frag_size(frag);
-                       mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
-                                                  0, maplen, DMA_TO_DEVICE);
-                       elem = elem->next;
-                       elem->skb = NULL;
-                       elem->mapaddr = mapaddr;
-                       elem->maplen = maplen;
-
-                       /* Tell HW to xmit */
-                       __raw_writeq((__force u64) cpu_to_be64(mapaddr),
-                                    elem->hw_desc + C2_TXP_ADDR);
-                       __raw_writew((__force u16) cpu_to_be16(maplen),
-                                    elem->hw_desc + C2_TXP_LEN);
-                       __raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
-                                    elem->hw_desc + C2_TXP_FLAGS);
-
-                       netdev->stats.tx_packets++;
-                       netdev->stats.tx_bytes += maplen;
-               }
-       }
-
-       tx_ring->to_use = elem->next;
-       c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);
-
-       if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
-               netif_stop_queue(netdev);
-               if (netif_msg_tx_queued(c2_port))
-                       pr_debug("%s: transmit queue full\n",
-                               netdev->name);
-       }
-
-       spin_unlock_irqrestore(&c2_port->tx_lock, flags);
-
-       netdev->trans_start = jiffies;
-
-       return NETDEV_TX_OK;
-}
-
-static void c2_tx_timeout(struct net_device *netdev)
-{
-       struct c2_port *c2_port = netdev_priv(netdev);
-
-       if (netif_msg_timer(c2_port))
-               pr_debug("%s: tx timeout\n", netdev->name);
-
-       c2_tx_clean(c2_port);
-}
-
-static int c2_change_mtu(struct net_device *netdev, int new_mtu)
-{
-       int ret = 0;
-
-       if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
-               return -EINVAL;
-
-       netdev->mtu = new_mtu;
-
-       if (netif_running(netdev)) {
-               c2_down(netdev);
-
-               c2_up(netdev);
-       }
-
-       return ret;
-}
-
-static const struct net_device_ops c2_netdev = {
-       .ndo_open               = c2_up,
-       .ndo_stop               = c2_down,
-       .ndo_start_xmit         = c2_xmit_frame,
-       .ndo_tx_timeout         = c2_tx_timeout,
-       .ndo_change_mtu         = c2_change_mtu,
-       .ndo_set_mac_address    = eth_mac_addr,
-       .ndo_validate_addr      = eth_validate_addr,
-};
-
-/* Initialize network device */
-static struct net_device *c2_devinit(struct c2_dev *c2dev,
-                                    void __iomem * mmio_addr)
-{
-       struct c2_port *c2_port = NULL;
-       struct net_device *netdev = alloc_etherdev(sizeof(*c2_port));
-
-       if (!netdev) {
-               pr_debug("c2_port etherdev alloc failed");
-               return NULL;
-       }
-
-       SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
-       netdev->netdev_ops = &c2_netdev;
-       netdev->watchdog_timeo = C2_TX_TIMEOUT;
-       netdev->irq = c2dev->pcidev->irq;
-
-       c2_port = netdev_priv(netdev);
-       c2_port->netdev = netdev;
-       c2_port->c2dev = c2dev;
-       c2_port->msg_enable = netif_msg_init(debug, default_msg);
-       c2_port->tx_ring.count = C2_NUM_TX_DESC;
-       c2_port->rx_ring.count = C2_NUM_RX_DESC;
-
-       spin_lock_init(&c2_port->tx_lock);
-
-       /* Copy our 48-bit ethernet hardware address */
-       memcpy_fromio(netdev->dev_addr, mmio_addr + C2_REGS_ENADDR, 6);
-
-       /* Validate the MAC address */
-       if (!is_valid_ether_addr(netdev->dev_addr)) {
-               pr_debug("Invalid MAC Address\n");
-               pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name,
-                        netdev->dev_addr, netdev->irq);
-               free_netdev(netdev);
-               return NULL;
-       }
-
-       c2dev->netdev = netdev;
-
-       return netdev;
-}
-
-static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
-{
-       int ret = 0, i;
-       unsigned long reg0_start, reg0_flags, reg0_len;
-       unsigned long reg2_start, reg2_flags, reg2_len;
-       unsigned long reg4_start, reg4_flags, reg4_len;
-       unsigned kva_map_size;
-       struct net_device *netdev = NULL;
-       struct c2_dev *c2dev = NULL;
-       void __iomem *mmio_regs = NULL;
-
-       printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n",
-               DRV_VERSION);
-
-       /* Enable PCI device */
-       ret = pci_enable_device(pcidev);
-       if (ret) {
-               printk(KERN_ERR PFX "%s: Unable to enable PCI device\n",
-                       pci_name(pcidev));
-               goto bail0;
-       }
-
-       reg0_start = pci_resource_start(pcidev, BAR_0);
-       reg0_len = pci_resource_len(pcidev, BAR_0);
-       reg0_flags = pci_resource_flags(pcidev, BAR_0);
-
-       reg2_start = pci_resource_start(pcidev, BAR_2);
-       reg2_len = pci_resource_len(pcidev, BAR_2);
-       reg2_flags = pci_resource_flags(pcidev, BAR_2);
-
-       reg4_start = pci_resource_start(pcidev, BAR_4);
-       reg4_len = pci_resource_len(pcidev, BAR_4);
-       reg4_flags = pci_resource_flags(pcidev, BAR_4);
-
-       pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len);
-       pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len);
-       pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len);
-
-       /* Make sure PCI base addr are MMIO */
-       if (!(reg0_flags & IORESOURCE_MEM) ||
-           !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) {
-               printk(KERN_ERR PFX "PCI regions not an MMIO resource\n");
-               ret = -ENODEV;
-               goto bail1;
-       }
-
-       /* Check for weird/broken PCI region reporting */
-       if ((reg0_len < C2_REG0_SIZE) ||
-           (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) {
-               printk(KERN_ERR PFX "Invalid PCI region sizes\n");
-               ret = -ENODEV;
-               goto bail1;
-       }
-
-       /* Reserve PCI I/O and memory resources */
-       ret = pci_request_regions(pcidev, DRV_NAME);
-       if (ret) {
-               printk(KERN_ERR PFX "%s: Unable to request regions\n",
-                       pci_name(pcidev));
-               goto bail1;
-       }
-
-       if ((sizeof(dma_addr_t) > 4)) {
-               ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
-               if (ret < 0) {
-                       printk(KERN_ERR PFX "64b DMA configuration failed\n");
-                       goto bail2;
-               }
-       } else {
-               ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
-               if (ret < 0) {
-                       printk(KERN_ERR PFX "32b DMA configuration failed\n");
-                       goto bail2;
-               }
-       }
-
-       /* Enables bus-mastering on the device */
-       pci_set_master(pcidev);
-
-       /* Remap the adapter PCI registers in BAR4 */
-       mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
-                                   sizeof(struct c2_adapter_pci_regs));
-       if (!mmio_regs) {
-               printk(KERN_ERR PFX
-                       "Unable to remap adapter PCI registers in BAR4\n");
-               ret = -EIO;
-               goto bail2;
-       }
-
-       /* Validate PCI regs magic */
-       for (i = 0; i < sizeof(c2_magic); i++) {
-               if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) {
-                       printk(KERN_ERR PFX "Downlevel Firmware boot loader "
-                               "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash "
-                              "utility to update your boot loader\n",
-                               i + 1, sizeof(c2_magic),
-                               readb(mmio_regs + C2_REGS_MAGIC + i),
-                               c2_magic[i]);
-                       printk(KERN_ERR PFX "Adapter not claimed\n");
-                       iounmap(mmio_regs);
-                       ret = -EIO;
-                       goto bail2;
-               }
-       }
-
-       /* Validate the adapter version */
-       if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) {
-               printk(KERN_ERR PFX "Version mismatch "
-                       "[fw=%u, c2=%u], Adapter not claimed\n",
-                       be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)),
-                       C2_VERSION);
-               ret = -EINVAL;
-               iounmap(mmio_regs);
-               goto bail2;
-       }
-
-       /* Validate the adapter IVN */
-       if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) {
-               printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using "
-                      "the OpenIB device support kit. "
-                      "[fw=0x%x, c2=0x%x], Adapter not claimed\n",
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)),
-                      C2_IVN);
-               ret = -EINVAL;
-               iounmap(mmio_regs);
-               goto bail2;
-       }
-
-       /* Allocate hardware structure */
-       c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev));
-       if (!c2dev) {
-               printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n",
-                       pci_name(pcidev));
-               ret = -ENOMEM;
-               iounmap(mmio_regs);
-               goto bail2;
-       }
-
-       memset(c2dev, 0, sizeof(*c2dev));
-       spin_lock_init(&c2dev->lock);
-       c2dev->pcidev = pcidev;
-       c2dev->cur_tx = 0;
-
-       /* Get the last RX index */
-       c2dev->cur_rx =
-           (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) -
-            0xffffc000) / sizeof(struct c2_rxp_desc);
-
-       /* Request an interrupt line for the driver */
-       ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev);
-       if (ret) {
-               printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n",
-                       pci_name(pcidev), pcidev->irq);
-               iounmap(mmio_regs);
-               goto bail3;
-       }
-
-       /* Set driver specific data */
-       pci_set_drvdata(pcidev, c2dev);
-
-       /* Initialize network device */
-       if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
-               ret = -ENOMEM;
-               iounmap(mmio_regs);
-               goto bail4;
-       }
-
-       /* Save off the actual size prior to unmapping mmio_regs */
-       kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE));
-
-       /* Unmap the adapter PCI registers in BAR4 */
-       iounmap(mmio_regs);
-
-       /* Register network device */
-       ret = register_netdev(netdev);
-       if (ret) {
-               printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n",
-                       ret);
-               goto bail5;
-       }
-
-       /* Disable network packets */
-       netif_stop_queue(netdev);
-
-       /* Remap the adapter HRXDQ PA space to kernel VA space */
-       c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET,
-                                              C2_RXP_HRXDQ_SIZE);
-       if (!c2dev->mmio_rxp_ring) {
-               printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n");
-               ret = -EIO;
-               goto bail6;
-       }
-
-       /* Remap the adapter HTXDQ PA space to kernel VA space */
-       c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET,
-                                              C2_TXP_HTXDQ_SIZE);
-       if (!c2dev->mmio_txp_ring) {
-               printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n");
-               ret = -EIO;
-               goto bail7;
-       }
-
-       /* Save off the current RX index in the last 4 bytes of the TXP Ring */
-       C2_SET_CUR_RX(c2dev, c2dev->cur_rx);
-
-       /* Remap the PCI registers in adapter BAR0 to kernel VA space */
-       c2dev->regs = ioremap_nocache(reg0_start, reg0_len);
-       if (!c2dev->regs) {
-               printk(KERN_ERR PFX "Unable to remap BAR0\n");
-               ret = -EIO;
-               goto bail8;
-       }
-
-       /* Remap the PCI registers in adapter BAR4 to kernel VA space */
-       c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET;
-       c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET,
-                                    kva_map_size);
-       if (!c2dev->kva) {
-               printk(KERN_ERR PFX "Unable to remap BAR4\n");
-               ret = -EIO;
-               goto bail9;
-       }
-
-       /* Print out the MAC address */
-       pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr,
-                netdev->irq);
-
-       ret = c2_rnic_init(c2dev);
-       if (ret) {
-               printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret);
-               goto bail10;
-       }
-
-       ret = c2_register_device(c2dev);
-       if (ret)
-               goto bail10;
-
-       return 0;
-
- bail10:
-       iounmap(c2dev->kva);
-
- bail9:
-       iounmap(c2dev->regs);
-
- bail8:
-       iounmap(c2dev->mmio_txp_ring);
-
- bail7:
-       iounmap(c2dev->mmio_rxp_ring);
-
- bail6:
-       unregister_netdev(netdev);
-
- bail5:
-       free_netdev(netdev);
-
- bail4:
-       free_irq(pcidev->irq, c2dev);
-
- bail3:
-       ib_dealloc_device(&c2dev->ibdev);
-
- bail2:
-       pci_release_regions(pcidev);
-
- bail1:
-       pci_disable_device(pcidev);
-
- bail0:
-       return ret;
-}
-
-static void c2_remove(struct pci_dev *pcidev)
-{
-       struct c2_dev *c2dev = pci_get_drvdata(pcidev);
-       struct net_device *netdev = c2dev->netdev;
-
-       /* Unregister with OpenIB */
-       c2_unregister_device(c2dev);
-
-       /* Clean up the RNIC resources */
-       c2_rnic_term(c2dev);
-
-       /* Remove network device from the kernel */
-       unregister_netdev(netdev);
-
-       /* Free network device */
-       free_netdev(netdev);
-
-       /* Free the interrupt line */
-       free_irq(pcidev->irq, c2dev);
-
-       /* missing: Turn LEDs off here */
-
-       /* Unmap adapter PA space */
-       iounmap(c2dev->kva);
-       iounmap(c2dev->regs);
-       iounmap(c2dev->mmio_txp_ring);
-       iounmap(c2dev->mmio_rxp_ring);
-
-       /* Free the hardware structure */
-       ib_dealloc_device(&c2dev->ibdev);
-
-       /* Release reserved PCI I/O and memory resources */
-       pci_release_regions(pcidev);
-
-       /* Disable PCI device */
-       pci_disable_device(pcidev);
-
-       /* Clear driver specific data */
-       pci_set_drvdata(pcidev, NULL);
-}
-
-static struct pci_driver c2_pci_driver = {
-       .name = DRV_NAME,
-       .id_table = c2_pci_table,
-       .probe = c2_probe,
-       .remove = c2_remove,
-};
-
-module_pci_driver(c2_pci_driver);
diff --git a/drivers/staging/rdma/amso1100/c2.h b/drivers/staging/rdma/amso1100/c2.h
deleted file mode 100644 (file)
index 21b565a..0000000
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef __C2_H
-#define __C2_H
-
-#include <linux/netdevice.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/idr.h>
-
-#include "c2_provider.h"
-#include "c2_mq.h"
-#include "c2_status.h"
-
-#define DRV_NAME     "c2"
-#define DRV_VERSION  "1.1"
-#define PFX          DRV_NAME ": "
-
-#define BAR_0                0
-#define BAR_2                2
-#define BAR_4                4
-
-#define RX_BUF_SIZE         (1536 + 8)
-#define ETH_JUMBO_MTU        9000
-#define C2_MAGIC            "CEPHEUS"
-#define C2_VERSION           4
-#define C2_IVN              (18 & 0x7fffffff)
-
-#define C2_REG0_SIZE        (16 * 1024)
-#define C2_REG2_SIZE        (2 * 1024 * 1024)
-#define C2_REG4_SIZE        (256 * 1024 * 1024)
-#define C2_NUM_TX_DESC       341
-#define C2_NUM_RX_DESC       256
-#define C2_PCI_REGS_OFFSET  (0x10000)
-#define C2_RXP_HRXDQ_OFFSET (((C2_REG4_SIZE)/2))
-#define C2_RXP_HRXDQ_SIZE   (4096)
-#define C2_TXP_HTXDQ_OFFSET (((C2_REG4_SIZE)/2) + C2_RXP_HRXDQ_SIZE)
-#define C2_TXP_HTXDQ_SIZE   (4096)
-#define C2_TX_TIMEOUT      (6*HZ)
-
-/* CEPHEUS */
-static const u8 c2_magic[] = {
-       0x43, 0x45, 0x50, 0x48, 0x45, 0x55, 0x53
-};
-
-enum adapter_pci_regs {
-       C2_REGS_MAGIC = 0x0000,
-       C2_REGS_VERS = 0x0008,
-       C2_REGS_IVN = 0x000C,
-       C2_REGS_PCI_WINSIZE = 0x0010,
-       C2_REGS_Q0_QSIZE = 0x0014,
-       C2_REGS_Q0_MSGSIZE = 0x0018,
-       C2_REGS_Q0_POOLSTART = 0x001C,
-       C2_REGS_Q0_SHARED = 0x0020,
-       C2_REGS_Q1_QSIZE = 0x0024,
-       C2_REGS_Q1_MSGSIZE = 0x0028,
-       C2_REGS_Q1_SHARED = 0x0030,
-       C2_REGS_Q2_QSIZE = 0x0034,
-       C2_REGS_Q2_MSGSIZE = 0x0038,
-       C2_REGS_Q2_SHARED = 0x0040,
-       C2_REGS_ENADDR = 0x004C,
-       C2_REGS_RDMA_ENADDR = 0x0054,
-       C2_REGS_HRX_CUR = 0x006C,
-};
-
-struct c2_adapter_pci_regs {
-       char reg_magic[8];
-       u32 version;
-       u32 ivn;
-       u32 pci_window_size;
-       u32 q0_q_size;
-       u32 q0_msg_size;
-       u32 q0_pool_start;
-       u32 q0_shared;
-       u32 q1_q_size;
-       u32 q1_msg_size;
-       u32 q1_pool_start;
-       u32 q1_shared;
-       u32 q2_q_size;
-       u32 q2_msg_size;
-       u32 q2_pool_start;
-       u32 q2_shared;
-       u32 log_start;
-       u32 log_size;
-       u8 host_enaddr[8];
-       u8 rdma_enaddr[8];
-       u32 crash_entry;
-       u32 crash_ready[2];
-       u32 fw_txd_cur;
-       u32 fw_hrxd_cur;
-       u32 fw_rxd_cur;
-};
-
-enum pci_regs {
-       C2_HISR = 0x0000,
-       C2_DISR = 0x0004,
-       C2_HIMR = 0x0008,
-       C2_DIMR = 0x000C,
-       C2_NISR0 = 0x0010,
-       C2_NISR1 = 0x0014,
-       C2_NIMR0 = 0x0018,
-       C2_NIMR1 = 0x001C,
-       C2_IDIS = 0x0020,
-};
-
-enum {
-       C2_PCI_HRX_INT = 1 << 8,
-       C2_PCI_HTX_INT = 1 << 17,
-       C2_PCI_HRX_QUI = 1 << 31,
-};
-
-/*
- * Cepheus registers in BAR0.
- */
-struct c2_pci_regs {
-       u32 hostisr;
-       u32 dmaisr;
-       u32 hostimr;
-       u32 dmaimr;
-       u32 netisr0;
-       u32 netisr1;
-       u32 netimr0;
-       u32 netimr1;
-       u32 int_disable;
-};
-
-/* TXP flags */
-enum c2_txp_flags {
-       TXP_HTXD_DONE = 0,
-       TXP_HTXD_READY = 1 << 0,
-       TXP_HTXD_UNINIT = 1 << 1,
-};
-
-/* RXP flags */
-enum c2_rxp_flags {
-       RXP_HRXD_UNINIT = 0,
-       RXP_HRXD_READY = 1 << 0,
-       RXP_HRXD_DONE = 1 << 1,
-};
-
-/* RXP status */
-enum c2_rxp_status {
-       RXP_HRXD_ZERO = 0,
-       RXP_HRXD_OK = 1 << 0,
-       RXP_HRXD_BUF_OV = 1 << 1,
-};
-
-/* TXP descriptor fields */
-enum txp_desc {
-       C2_TXP_FLAGS = 0x0000,
-       C2_TXP_LEN = 0x0002,
-       C2_TXP_ADDR = 0x0004,
-};
-
-/* RXP descriptor fields */
-enum rxp_desc {
-       C2_RXP_FLAGS = 0x0000,
-       C2_RXP_STATUS = 0x0002,
-       C2_RXP_COUNT = 0x0004,
-       C2_RXP_LEN = 0x0006,
-       C2_RXP_ADDR = 0x0008,
-};
-
-struct c2_txp_desc {
-       u16 flags;
-       u16 len;
-       u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_desc {
-       u16 flags;
-       u16 status;
-       u16 count;
-       u16 len;
-       u64 addr;
-} __attribute__ ((packed));
-
-struct c2_rxp_hdr {
-       u16 flags;
-       u16 status;
-       u16 len;
-       u16 rsvd;
-} __attribute__ ((packed));
-
-struct c2_tx_desc {
-       u32 len;
-       u32 status;
-       dma_addr_t next_offset;
-};
-
-struct c2_rx_desc {
-       u32 len;
-       u32 status;
-       dma_addr_t next_offset;
-};
-
-struct c2_alloc {
-       u32 last;
-       u32 max;
-       spinlock_t lock;
-       unsigned long *table;
-};
-
-struct c2_array {
-       struct {
-               void **page;
-               int used;
-       } *page_list;
-};
-
-/*
- * The MQ shared pointer pool is organized as a linked list of
- * chunks. Each chunk contains a linked list of free shared pointers
- * that can be allocated to a given user mode client.
- *
- */
-struct sp_chunk {
-       struct sp_chunk *next;
-       dma_addr_t dma_addr;
-       DEFINE_DMA_UNMAP_ADDR(mapping);
-       u16 head;
-       u16 shared_ptr[0];
-};
-
-struct c2_pd_table {
-       u32 last;
-       u32 max;
-       spinlock_t lock;
-       unsigned long *table;
-};
-
-struct c2_qp_table {
-       struct idr idr;
-       spinlock_t lock;
-};
-
-struct c2_element {
-       struct c2_element *next;
-       void *ht_desc;          /* host     descriptor */
-       void __iomem *hw_desc;  /* hardware descriptor */
-       struct sk_buff *skb;
-       dma_addr_t mapaddr;
-       u32 maplen;
-};
-
-struct c2_ring {
-       struct c2_element *to_clean;
-       struct c2_element *to_use;
-       struct c2_element *start;
-       unsigned long count;
-};
-
-struct c2_dev {
-       struct ib_device ibdev;
-       void __iomem *regs;
-       void __iomem *mmio_txp_ring; /* remapped adapter memory for hw rings */
-       void __iomem *mmio_rxp_ring;
-       spinlock_t lock;
-       struct pci_dev *pcidev;
-       struct net_device *netdev;
-       struct net_device *pseudo_netdev;
-       unsigned int cur_tx;
-       unsigned int cur_rx;
-       u32 adapter_handle;
-       int device_cap_flags;
-       void __iomem *kva;      /* KVA device memory */
-       unsigned long pa;       /* PA device memory */
-       void **qptr_array;
-
-       struct kmem_cache *host_msg_cache;
-
-       struct list_head cca_link;              /* adapter list */
-       struct list_head eh_wakeup_list;        /* event wakeup list */
-       wait_queue_head_t req_vq_wo;
-
-       /* Cached RNIC properties */
-       struct ib_device_attr props;
-
-       struct c2_pd_table pd_table;
-       struct c2_qp_table qp_table;
-       int ports;              /* num of GigE ports */
-       int devnum;
-       spinlock_t vqlock;      /* sync vbs req MQ */
-
-       /* Verbs Queues */
-       struct c2_mq req_vq;    /* Verbs Request MQ */
-       struct c2_mq rep_vq;    /* Verbs Reply MQ */
-       struct c2_mq aeq;       /* Async Events MQ */
-
-       /* Kernel client MQs */
-       struct sp_chunk *kern_mqsp_pool;
-
-       /* Device updates these values when posting messages to a host
-        * target queue */
-       u16 req_vq_shared;
-       u16 rep_vq_shared;
-       u16 aeq_shared;
-       u16 irq_claimed;
-
-       /*
-        * Shared host target pages for user-accessible MQs.
-        */
-       int hthead;             /* index of first free entry */
-       void *htpages;          /* kernel vaddr */
-       int htlen;              /* length of htpages memory */
-       void *htuva;            /* user mapped vaddr */
-       spinlock_t htlock;      /* serialize allocation */
-
-       u64 adapter_hint_uva;   /* access to the activity FIFO */
-
-       //      spinlock_t aeq_lock;
-       //      spinlock_t rnic_lock;
-
-       __be16 *hint_count;
-       dma_addr_t hint_count_dma;
-       u16 hints_read;
-
-       int init;               /* TRUE if it's ready */
-       char ae_cache_name[16];
-       char vq_cache_name[16];
-};
-
-struct c2_port {
-       u32 msg_enable;
-       struct c2_dev *c2dev;
-       struct net_device *netdev;
-
-       spinlock_t tx_lock;
-       u32 tx_avail;
-       struct c2_ring tx_ring;
-       struct c2_ring rx_ring;
-
-       void *mem;              /* PCI memory for host rings */
-       dma_addr_t dma;
-       unsigned long mem_size;
-
-       u32 rx_buf_size;
-};
-
-/*
- * Activity FIFO registers in BAR0.
- */
-#define PCI_BAR0_HOST_HINT     0x100
-#define PCI_BAR0_ADAPTER_HINT  0x2000
-
-/*
- * Ammasso PCI vendor id and Cepheus PCI device id.
- */
-#define CQ_ARMED       0x01
-#define CQ_WAIT_FOR_DMA        0x80
-
-/*
- * The format of a hint is as follows:
- * Lower 16 bits are the count of hints for the queue.
- * Next 15 bits are the qp_index
- * Upper most bit depends on who reads it:
- *    If read by producer, then it means Full (1) or Not-Full (0)
- *    If read by consumer, then it means Empty (1) or Not-Empty (0)
- */
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-
-/*
- * The following defines the offset in SDRAM for the c2_adapter_pci_regs_t
- * struct.
- */
-#define C2_ADAPTER_PCI_REGS_OFFSET 0x10000
-
-#ifndef readq
-static inline u64 readq(const void __iomem * addr)
-{
-       u64 ret = readl(addr + 4);
-       ret <<= 32;
-       ret |= readl(addr);
-
-       return ret;
-}
-#endif
-
-#ifndef writeq
-static inline void __raw_writeq(u64 val, void __iomem * addr)
-{
-       __raw_writel((u32) (val), addr);
-       __raw_writel((u32) (val >> 32), (addr + 4));
-}
-#endif
-
-#define C2_SET_CUR_RX(c2dev, cur_rx) \
-       __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092)
-
-#define C2_GET_CUR_RX(c2dev) \
-       be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092))
-
-static inline struct c2_dev *to_c2dev(struct ib_device *ibdev)
-{
-       return container_of(ibdev, struct c2_dev, ibdev);
-}
-
-static inline int c2_errno(void *reply)
-{
-       switch (c2_wr_get_result(reply)) {
-       case C2_OK:
-               return 0;
-       case CCERR_NO_BUFS:
-       case CCERR_INSUFFICIENT_RESOURCES:
-       case CCERR_ZERO_RDMA_READ_RESOURCES:
-               return -ENOMEM;
-       case CCERR_MR_IN_USE:
-       case CCERR_QP_IN_USE:
-               return -EBUSY;
-       case CCERR_ADDR_IN_USE:
-               return -EADDRINUSE;
-       case CCERR_ADDR_NOT_AVAIL:
-               return -EADDRNOTAVAIL;
-       case CCERR_CONN_RESET:
-               return -ECONNRESET;
-       case CCERR_NOT_IMPLEMENTED:
-       case CCERR_INVALID_WQE:
-               return -ENOSYS;
-       case CCERR_QP_NOT_PRIVILEGED:
-               return -EPERM;
-       case CCERR_STACK_ERROR:
-               return -EPROTO;
-       case CCERR_ACCESS_VIOLATION:
-       case CCERR_BASE_AND_BOUNDS_VIOLATION:
-               return -EFAULT;
-       case CCERR_STAG_STATE_NOT_INVALID:
-       case CCERR_INVALID_ADDRESS:
-       case CCERR_INVALID_CQ:
-       case CCERR_INVALID_EP:
-       case CCERR_INVALID_MODIFIER:
-       case CCERR_INVALID_MTU:
-       case CCERR_INVALID_PD_ID:
-       case CCERR_INVALID_QP:
-       case CCERR_INVALID_RNIC:
-       case CCERR_INVALID_STAG:
-               return -EINVAL;
-       default:
-               return -EAGAIN;
-       }
-}
-
-/* Device */
-int c2_register_device(struct c2_dev *c2dev);
-void c2_unregister_device(struct c2_dev *c2dev);
-int c2_rnic_init(struct c2_dev *c2dev);
-void c2_rnic_term(struct c2_dev *c2dev);
-void c2_rnic_interrupt(struct c2_dev *c2dev);
-int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask);
-
-/* QPs */
-int c2_alloc_qp(struct c2_dev *c2dev, struct c2_pd *pd,
-                      struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp);
-void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp);
-struct ib_qp *c2_get_qp(struct ib_device *device, int qpn);
-int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
-                       struct ib_qp_attr *attr, int attr_mask);
-int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
-                                int ord, int ird);
-int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
-                       struct ib_send_wr **bad_wr);
-int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
-                          struct ib_recv_wr **bad_wr);
-void c2_init_qp_table(struct c2_dev *c2dev);
-void c2_cleanup_qp_table(struct c2_dev *c2dev);
-void c2_set_qp_state(struct c2_qp *, int);
-struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);
-
-/* PDs */
-int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);
-void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd);
-int c2_init_pd_table(struct c2_dev *c2dev);
-void c2_cleanup_pd_table(struct c2_dev *c2dev);
-
-/* CQs */
-int c2_init_cq(struct c2_dev *c2dev, int entries,
-                     struct c2_ucontext *ctx, struct c2_cq *cq);
-void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
-void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
-void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
-int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
-
-/* CM */
-int c2_llp_connect(struct iw_cm_id *cm_id,
-                         struct iw_cm_conn_param *iw_param);
-int c2_llp_accept(struct iw_cm_id *cm_id,
-                        struct iw_cm_conn_param *iw_param);
-int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata,
-                        u8 pdata_len);
-int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog);
-int c2_llp_service_destroy(struct iw_cm_id *cm_id);
-
-/* MM */
-int c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
-                                     int page_size, int pbl_depth, u32 length,
-                                     u32 off, u64 *va, enum c2_acf acf,
-                                     struct c2_mr *mr);
-int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index);
-
-/* AE */
-void c2_ae_event(struct c2_dev *c2dev, u32 mq_index);
-
-/* MQSP Allocator */
-int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
-                            struct sp_chunk **root);
-void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root);
-__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
-                            dma_addr_t *dma_addr, gfp_t gfp_mask);
-void c2_free_mqsp(__be16* mqsp);
-#endif
diff --git a/drivers/staging/rdma/amso1100/c2_ae.c b/drivers/staging/rdma/amso1100/c2_ae.c
deleted file mode 100644 (file)
index eb7a92b..0000000
+++ /dev/null
@@ -1,327 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_status.h"
-#include "c2_ae.h"
-
-static int c2_convert_cm_status(u32 c2_status)
-{
-       switch (c2_status) {
-       case C2_CONN_STATUS_SUCCESS:
-               return 0;
-       case C2_CONN_STATUS_REJECTED:
-               return -ENETRESET;
-       case C2_CONN_STATUS_REFUSED:
-               return -ECONNREFUSED;
-       case C2_CONN_STATUS_TIMEDOUT:
-               return -ETIMEDOUT;
-       case C2_CONN_STATUS_NETUNREACH:
-               return -ENETUNREACH;
-       case C2_CONN_STATUS_HOSTUNREACH:
-               return -EHOSTUNREACH;
-       case C2_CONN_STATUS_INVALID_RNIC:
-               return -EINVAL;
-       case C2_CONN_STATUS_INVALID_QP:
-               return -EINVAL;
-       case C2_CONN_STATUS_INVALID_QP_STATE:
-               return -EINVAL;
-       case C2_CONN_STATUS_ADDR_NOT_AVAIL:
-               return -EADDRNOTAVAIL;
-       default:
-               printk(KERN_ERR PFX
-                      "%s - Unable to convert CM status: %d\n",
-                      __func__, c2_status);
-               return -EIO;
-       }
-}
-
-static const char* to_event_str(int event)
-{
-       static const char* event_str[] = {
-               "CCAE_REMOTE_SHUTDOWN",
-               "CCAE_ACTIVE_CONNECT_RESULTS",
-               "CCAE_CONNECTION_REQUEST",
-               "CCAE_LLP_CLOSE_COMPLETE",
-               "CCAE_TERMINATE_MESSAGE_RECEIVED",
-               "CCAE_LLP_CONNECTION_RESET",
-               "CCAE_LLP_CONNECTION_LOST",
-               "CCAE_LLP_SEGMENT_SIZE_INVALID",
-               "CCAE_LLP_INVALID_CRC",
-               "CCAE_LLP_BAD_FPDU",
-               "CCAE_INVALID_DDP_VERSION",
-               "CCAE_INVALID_RDMA_VERSION",
-               "CCAE_UNEXPECTED_OPCODE",
-               "CCAE_INVALID_DDP_QUEUE_NUMBER",
-               "CCAE_RDMA_READ_NOT_ENABLED",
-               "CCAE_RDMA_WRITE_NOT_ENABLED",
-               "CCAE_RDMA_READ_TOO_SMALL",
-               "CCAE_NO_L_BIT",
-               "CCAE_TAGGED_INVALID_STAG",
-               "CCAE_TAGGED_BASE_BOUNDS_VIOLATION",
-               "CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION",
-               "CCAE_TAGGED_INVALID_PD",
-               "CCAE_WRAP_ERROR",
-               "CCAE_BAD_CLOSE",
-               "CCAE_BAD_LLP_CLOSE",
-               "CCAE_INVALID_MSN_RANGE",
-               "CCAE_INVALID_MSN_GAP",
-               "CCAE_IRRQ_OVERFLOW",
-               "CCAE_IRRQ_MSN_GAP",
-               "CCAE_IRRQ_MSN_RANGE",
-               "CCAE_IRRQ_INVALID_STAG",
-               "CCAE_IRRQ_BASE_BOUNDS_VIOLATION",
-               "CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION",
-               "CCAE_IRRQ_INVALID_PD",
-               "CCAE_IRRQ_WRAP_ERROR",
-               "CCAE_CQ_SQ_COMPLETION_OVERFLOW",
-               "CCAE_CQ_RQ_COMPLETION_ERROR",
-               "CCAE_QP_SRQ_WQE_ERROR",
-               "CCAE_QP_LOCAL_CATASTROPHIC_ERROR",
-               "CCAE_CQ_OVERFLOW",
-               "CCAE_CQ_OPERATION_ERROR",
-               "CCAE_SRQ_LIMIT_REACHED",
-               "CCAE_QP_RQ_LIMIT_REACHED",
-               "CCAE_SRQ_CATASTROPHIC_ERROR",
-               "CCAE_RNIC_CATASTROPHIC_ERROR"
-       };
-
-       if (event < CCAE_REMOTE_SHUTDOWN ||
-           event > CCAE_RNIC_CATASTROPHIC_ERROR)
-               return "<invalid event>";
-
-       event -= CCAE_REMOTE_SHUTDOWN;
-       return event_str[event];
-}
-
-static const char *to_qp_state_str(int state)
-{
-       switch (state) {
-       case C2_QP_STATE_IDLE:
-               return "C2_QP_STATE_IDLE";
-       case C2_QP_STATE_CONNECTING:
-               return "C2_QP_STATE_CONNECTING";
-       case C2_QP_STATE_RTS:
-               return "C2_QP_STATE_RTS";
-       case C2_QP_STATE_CLOSING:
-               return "C2_QP_STATE_CLOSING";
-       case C2_QP_STATE_TERMINATE:
-               return "C2_QP_STATE_TERMINATE";
-       case C2_QP_STATE_ERROR:
-               return "C2_QP_STATE_ERROR";
-       default:
-               return "<invalid QP state>";
-       }
-}
-
-void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
-{
-       struct c2_mq *mq = c2dev->qptr_array[mq_index];
-       union c2wr *wr;
-       void *resource_user_context;
-       struct iw_cm_event cm_event;
-       struct ib_event ib_event;
-       enum c2_resource_indicator resource_indicator;
-       enum c2_event_id event_id;
-       unsigned long flags;
-       int status;
-       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
-       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
-
-       /*
-        * retrieve the message
-        */
-       wr = c2_mq_consume(mq);
-       if (!wr)
-               return;
-
-       memset(&ib_event, 0, sizeof(ib_event));
-       memset(&cm_event, 0, sizeof(cm_event));
-
-       event_id = c2_wr_get_id(wr);
-       resource_indicator = be32_to_cpu(wr->ae.ae_generic.resource_type);
-       resource_user_context =
-           (void *) (unsigned long) wr->ae.ae_generic.user_context;
-
-       status = cm_event.status = c2_convert_cm_status(c2_wr_get_result(wr));
-
-       pr_debug("event received c2_dev=%p, event_id=%d, "
-               "resource_indicator=%d, user_context=%p, status = %d\n",
-               c2dev, event_id, resource_indicator, resource_user_context,
-               status);
-
-       switch (resource_indicator) {
-       case C2_RES_IND_QP:{
-
-               struct c2_qp *qp = resource_user_context;
-               struct iw_cm_id *cm_id = qp->cm_id;
-               struct c2wr_ae_active_connect_results *res;
-
-               if (!cm_id) {
-                       pr_debug("event received, but cm_id is <nul>, qp=%p!\n",
-                               qp);
-                       goto ignore_it;
-               }
-               pr_debug("%s: event = %s, user_context=%llx, "
-                       "resource_type=%x, "
-                       "resource=%x, qp_state=%s\n",
-                       __func__,
-                       to_event_str(event_id),
-                       (unsigned long long) wr->ae.ae_generic.user_context,
-                       be32_to_cpu(wr->ae.ae_generic.resource_type),
-                       be32_to_cpu(wr->ae.ae_generic.resource),
-                       to_qp_state_str(be32_to_cpu(wr->ae.ae_generic.qp_state)));
-
-               c2_set_qp_state(qp, be32_to_cpu(wr->ae.ae_generic.qp_state));
-
-               switch (event_id) {
-               case CCAE_ACTIVE_CONNECT_RESULTS:
-                       res = &wr->ae.ae_active_connect_results;
-                       cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
-                       laddr->sin_addr.s_addr = res->laddr;
-                       raddr->sin_addr.s_addr = res->raddr;
-                       laddr->sin_port = res->lport;
-                       raddr->sin_port = res->rport;
-                       if (status == 0) {
-                               cm_event.private_data_len =
-                                       be32_to_cpu(res->private_data_length);
-                               cm_event.private_data = res->private_data;
-                       } else {
-                               spin_lock_irqsave(&qp->lock, flags);
-                               if (qp->cm_id) {
-                                       qp->cm_id->rem_ref(qp->cm_id);
-                                       qp->cm_id = NULL;
-                               }
-                               spin_unlock_irqrestore(&qp->lock, flags);
-                               cm_event.private_data_len = 0;
-                               cm_event.private_data = NULL;
-                       }
-                       if (cm_id->event_handler)
-                               cm_id->event_handler(cm_id, &cm_event);
-                       break;
-               case CCAE_TERMINATE_MESSAGE_RECEIVED:
-               case CCAE_CQ_SQ_COMPLETION_OVERFLOW:
-                       ib_event.device = &c2dev->ibdev;
-                       ib_event.element.qp = &qp->ibqp;
-                       ib_event.event = IB_EVENT_QP_REQ_ERR;
-
-                       if (qp->ibqp.event_handler)
-                               qp->ibqp.event_handler(&ib_event,
-                                                      qp->ibqp.
-                                                      qp_context);
-                       break;
-               case CCAE_BAD_CLOSE:
-               case CCAE_LLP_CLOSE_COMPLETE:
-               case CCAE_LLP_CONNECTION_RESET:
-               case CCAE_LLP_CONNECTION_LOST:
-                       BUG_ON(cm_id->event_handler==(void*)0x6b6b6b6b);
-
-                       spin_lock_irqsave(&qp->lock, flags);
-                       if (qp->cm_id) {
-                               qp->cm_id->rem_ref(qp->cm_id);
-                               qp->cm_id = NULL;
-                       }
-                       spin_unlock_irqrestore(&qp->lock, flags);
-                       cm_event.event = IW_CM_EVENT_CLOSE;
-                       cm_event.status = 0;
-                       if (cm_id->event_handler)
-                               cm_id->event_handler(cm_id, &cm_event);
-                       break;
-               default:
-                       BUG_ON(1);
-                       pr_debug("%s:%d Unexpected event_id=%d on QP=%p, "
-                               "CM_ID=%p\n",
-                               __func__, __LINE__,
-                               event_id, qp, cm_id);
-                       break;
-               }
-               break;
-       }
-
-       case C2_RES_IND_EP:{
-
-               struct c2wr_ae_connection_request *req =
-                       &wr->ae.ae_connection_request;
-               struct iw_cm_id *cm_id =
-                       resource_user_context;
-
-               pr_debug("C2_RES_IND_EP event_id=%d\n", event_id);
-               if (event_id != CCAE_CONNECTION_REQUEST) {
-                       pr_debug("%s: Invalid event_id: %d\n",
-                               __func__, event_id);
-                       break;
-               }
-               cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
-               cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
-               laddr->sin_addr.s_addr = req->laddr;
-               raddr->sin_addr.s_addr = req->raddr;
-               laddr->sin_port = req->lport;
-               raddr->sin_port = req->rport;
-               cm_event.private_data_len =
-                       be32_to_cpu(req->private_data_length);
-               cm_event.private_data = req->private_data;
-               /*
-                * Until ird/ord negotiation via MPAv2 support is added, send
-                * max supported values
-                */
-               cm_event.ird = cm_event.ord = 128;
-
-               if (cm_id->event_handler)
-                       cm_id->event_handler(cm_id, &cm_event);
-               break;
-       }
-
-       case C2_RES_IND_CQ:{
-               struct c2_cq *cq =
-                   resource_user_context;
-
-               pr_debug("IB_EVENT_CQ_ERR\n");
-               ib_event.device = &c2dev->ibdev;
-               ib_event.element.cq = &cq->ibcq;
-               ib_event.event = IB_EVENT_CQ_ERR;
-
-               if (cq->ibcq.event_handler)
-                       cq->ibcq.event_handler(&ib_event,
-                                              cq->ibcq.cq_context);
-               break;
-       }
-
-       default:
-               printk("Bad resource indicator = %d\n",
-                      resource_indicator);
-               break;
-       }
-
- ignore_it:
-       c2_mq_free(mq);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_ae.h b/drivers/staging/rdma/amso1100/c2_ae.h
deleted file mode 100644 (file)
index 3a065c3..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_AE_H_
-#define _C2_AE_H_
-
-/*
- * WARNING: If you change this file, also bump C2_IVN_BASE
- * in common/include/clustercore/c2_ivn.h.
- */
-
-/*
- * Asynchronous Event Identifiers
- *
- * These start at 0x80 only so it's obvious from inspection that
- * they are not work-request statuses.  This isn't critical.
- *
- * NOTE: these event id's must fit in eight bits.
- */
-enum c2_event_id {
-       CCAE_REMOTE_SHUTDOWN = 0x80,
-       CCAE_ACTIVE_CONNECT_RESULTS,
-       CCAE_CONNECTION_REQUEST,
-       CCAE_LLP_CLOSE_COMPLETE,
-       CCAE_TERMINATE_MESSAGE_RECEIVED,
-       CCAE_LLP_CONNECTION_RESET,
-       CCAE_LLP_CONNECTION_LOST,
-       CCAE_LLP_SEGMENT_SIZE_INVALID,
-       CCAE_LLP_INVALID_CRC,
-       CCAE_LLP_BAD_FPDU,
-       CCAE_INVALID_DDP_VERSION,
-       CCAE_INVALID_RDMA_VERSION,
-       CCAE_UNEXPECTED_OPCODE,
-       CCAE_INVALID_DDP_QUEUE_NUMBER,
-       CCAE_RDMA_READ_NOT_ENABLED,
-       CCAE_RDMA_WRITE_NOT_ENABLED,
-       CCAE_RDMA_READ_TOO_SMALL,
-       CCAE_NO_L_BIT,
-       CCAE_TAGGED_INVALID_STAG,
-       CCAE_TAGGED_BASE_BOUNDS_VIOLATION,
-       CCAE_TAGGED_ACCESS_RIGHTS_VIOLATION,
-       CCAE_TAGGED_INVALID_PD,
-       CCAE_WRAP_ERROR,
-       CCAE_BAD_CLOSE,
-       CCAE_BAD_LLP_CLOSE,
-       CCAE_INVALID_MSN_RANGE,
-       CCAE_INVALID_MSN_GAP,
-       CCAE_IRRQ_OVERFLOW,
-       CCAE_IRRQ_MSN_GAP,
-       CCAE_IRRQ_MSN_RANGE,
-       CCAE_IRRQ_INVALID_STAG,
-       CCAE_IRRQ_BASE_BOUNDS_VIOLATION,
-       CCAE_IRRQ_ACCESS_RIGHTS_VIOLATION,
-       CCAE_IRRQ_INVALID_PD,
-       CCAE_IRRQ_WRAP_ERROR,
-       CCAE_CQ_SQ_COMPLETION_OVERFLOW,
-       CCAE_CQ_RQ_COMPLETION_ERROR,
-       CCAE_QP_SRQ_WQE_ERROR,
-       CCAE_QP_LOCAL_CATASTROPHIC_ERROR,
-       CCAE_CQ_OVERFLOW,
-       CCAE_CQ_OPERATION_ERROR,
-       CCAE_SRQ_LIMIT_REACHED,
-       CCAE_QP_RQ_LIMIT_REACHED,
-       CCAE_SRQ_CATASTROPHIC_ERROR,
-       CCAE_RNIC_CATASTROPHIC_ERROR
-/* WARNING If you add more id's, make sure their values fit in eight bits. */
-};
-
-/*
- * Resource Indicators and Identifiers
- */
-enum c2_resource_indicator {
-       C2_RES_IND_QP = 1,
-       C2_RES_IND_EP,
-       C2_RES_IND_CQ,
-       C2_RES_IND_SRQ,
-};
-
-#endif /* _C2_AE_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_alloc.c b/drivers/staging/rdma/amso1100/c2_alloc.c
deleted file mode 100644 (file)
index 039872d..0000000
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/errno.h>
-#include <linux/bitmap.h>
-
-#include "c2.h"
-
-static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask,
-                              struct sp_chunk **head)
-{
-       int i;
-       struct sp_chunk *new_head;
-       dma_addr_t dma_addr;
-
-       new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE,
-                                     &dma_addr, gfp_mask);
-       if (new_head == NULL)
-               return -ENOMEM;
-
-       new_head->dma_addr = dma_addr;
-       dma_unmap_addr_set(new_head, mapping, new_head->dma_addr);
-
-       new_head->next = NULL;
-       new_head->head = 0;
-
-       /* build list where each index is the next free slot */
-       for (i = 0;
-            i < (PAGE_SIZE - sizeof(struct sp_chunk) -
-                 sizeof(u16)) / sizeof(u16) - 1;
-            i++) {
-               new_head->shared_ptr[i] = i + 1;
-       }
-       /* terminate list */
-       new_head->shared_ptr[i] = 0xFFFF;
-
-       *head = new_head;
-       return 0;
-}
-
-int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask,
-                     struct sp_chunk **root)
-{
-       return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root);
-}
-
-void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root)
-{
-       struct sp_chunk *next;
-
-       while (root) {
-               next = root->next;
-               dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root,
-                                 dma_unmap_addr(root, mapping));
-               root = next;
-       }
-}
-
-__be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head,
-                     dma_addr_t *dma_addr, gfp_t gfp_mask)
-{
-       u16 mqsp;
-
-       while (head) {
-               mqsp = head->head;
-               if (mqsp != 0xFFFF) {
-                       head->head = head->shared_ptr[mqsp];
-                       break;
-               } else if (head->next == NULL) {
-                       if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) ==
-                           0) {
-                               head = head->next;
-                               mqsp = head->head;
-                               head->head = head->shared_ptr[mqsp];
-                               break;
-                       } else
-                               return NULL;
-               } else
-                       head = head->next;
-       }
-       if (head) {
-               *dma_addr = head->dma_addr +
-                           ((unsigned long) &(head->shared_ptr[mqsp]) -
-                            (unsigned long) head);
-               pr_debug("%s addr %p dma_addr %llx\n", __func__,
-                        &(head->shared_ptr[mqsp]), (unsigned long long) *dma_addr);
-               return (__force __be16 *) &(head->shared_ptr[mqsp]);
-       }
-       return NULL;
-}
-
-void c2_free_mqsp(__be16 *mqsp)
-{
-       struct sp_chunk *head;
-       u16 idx;
-
-       /* The chunk containing this ptr begins at the page boundary */
-       head = (struct sp_chunk *) ((unsigned long) mqsp & PAGE_MASK);
-
-       /* Link head to new mqsp */
-       *mqsp = (__force __be16) head->head;
-
-       /* Compute the shared_ptr index */
-       idx = (offset_in_page(mqsp)) >> 1;
-       idx -= (unsigned long) &(((struct sp_chunk *) 0)->shared_ptr[0]) >> 1;
-
-       /* Point this index at the head */
-       head->shared_ptr[idx] = head->head;
-
-       /* Point head at this index */
-       head->head = idx;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cm.c b/drivers/staging/rdma/amso1100/c2_cm.c
deleted file mode 100644 (file)
index f8dbdb9..0000000
+++ /dev/null
@@ -1,458 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc.  All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_vq.h"
-#include <rdma/iw_cm.h>
-
-int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-       struct c2_dev *c2dev = to_c2dev(cm_id->device);
-       struct ib_qp *ibqp;
-       struct c2_qp *qp;
-       struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
-       struct c2_vq_req *vq_req;
-       int err;
-       struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
-
-       if (cm_id->remote_addr.ss_family != AF_INET)
-               return -ENOSYS;
-
-       ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
-       if (!ibqp)
-               return -EINVAL;
-       qp = to_c2qp(ibqp);
-
-       /* Associate QP <--> CM_ID */
-       cm_id->provider_data = qp;
-       cm_id->add_ref(cm_id);
-       qp->cm_id = cm_id;
-
-       /*
-        * only support the max private_data length
-        */
-       if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
-               err = -EINVAL;
-               goto bail0;
-       }
-       /*
-        * Set the rdma read limits
-        */
-       err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
-       if (err)
-               goto bail0;
-
-       /*
-        * Create and send a WR_QP_CONNECT...
-        */
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       c2_wr_set_id(wr, CCWR_QP_CONNECT);
-       wr->hdr.context = 0;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->qp_handle = qp->adapter_handle;
-
-       wr->remote_addr = raddr->sin_addr.s_addr;
-       wr->remote_port = raddr->sin_port;
-
-       /*
-        * Move any private data from the callers's buf into
-        * the WR.
-        */
-       if (iw_param->private_data) {
-               wr->private_data_length =
-                       cpu_to_be32(iw_param->private_data_len);
-               memcpy(&wr->private_data[0], iw_param->private_data,
-                      iw_param->private_data_len);
-       } else
-               wr->private_data_length = 0;
-
-       /*
-        * Send WR to adapter.  NOTE: There is no synch reply from
-        * the adapter.
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       vq_req_free(c2dev, vq_req);
-
- bail1:
-       kfree(wr);
- bail0:
-       if (err) {
-               /*
-                * If we fail, release reference on QP and
-                * disassociate QP from CM_ID
-                */
-               cm_id->provider_data = NULL;
-               qp->cm_id = NULL;
-               cm_id->rem_ref(cm_id);
-       }
-       return err;
-}
-
-int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
-{
-       struct c2_dev *c2dev;
-       struct c2wr_ep_listen_create_req wr;
-       struct c2wr_ep_listen_create_rep *reply;
-       struct c2_vq_req *vq_req;
-       int err;
-       struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
-
-       if (cm_id->local_addr.ss_family != AF_INET)
-               return -ENOSYS;
-
-       c2dev = to_c2dev(cm_id->device);
-       if (c2dev == NULL)
-               return -EINVAL;
-
-       /*
-        * Allocate verbs request.
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       /*
-        * Build the WR
-        */
-       c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
-       wr.hdr.context = (u64) (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.local_addr = laddr->sin_addr.s_addr;
-       wr.local_port = laddr->sin_port;
-       wr.backlog = cpu_to_be32(backlog);
-       wr.user_context = (u64) (unsigned long) cm_id;
-
-       /*
-        * Reference the request struct.  Dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       /*
-        * Process reply
-        */
-       reply =
-           (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       if ((err = c2_errno(reply)) != 0)
-               goto bail1;
-
-       /*
-        * Keep the adapter handle. Used in subsequent destroy
-        */
-       cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
-
-       /*
-        * free vq stuff
-        */
-       vq_repbuf_free(c2dev, reply);
-       vq_req_free(c2dev, vq_req);
-
-       return 0;
-
- bail1:
-       vq_repbuf_free(c2dev, reply);
- bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-
-int c2_llp_service_destroy(struct iw_cm_id *cm_id)
-{
-
-       struct c2_dev *c2dev;
-       struct c2wr_ep_listen_destroy_req wr;
-       struct c2wr_ep_listen_destroy_rep *reply;
-       struct c2_vq_req *vq_req;
-       int err;
-
-       c2dev = to_c2dev(cm_id->device);
-       if (c2dev == NULL)
-               return -EINVAL;
-
-       /*
-        * Allocate verbs request.
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       /*
-        * Build the WR
-        */
-       c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
-
-       /*
-        * reference the request struct.  dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       /*
-        * Process reply
-        */
-       reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       vq_repbuf_free(c2dev, reply);
- bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-       struct c2_dev *c2dev = to_c2dev(cm_id->device);
-       struct c2_qp *qp;
-       struct ib_qp *ibqp;
-       struct c2wr_cr_accept_req *wr;  /* variable length WR */
-       struct c2_vq_req *vq_req;
-       struct c2wr_cr_accept_rep *reply;       /* VQ Reply msg ptr. */
-       int err;
-
-       ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
-       if (!ibqp)
-               return -EINVAL;
-       qp = to_c2qp(ibqp);
-
-       /* Set the RDMA read limits */
-       err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
-       if (err)
-               goto bail0;
-
-       /* Allocate verbs request. */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-       vq_req->qp = qp;
-       vq_req->cm_id = cm_id;
-       vq_req->event = IW_CM_EVENT_ESTABLISHED;
-
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       /* Build the WR */
-       c2_wr_set_id(wr, CCWR_CR_ACCEPT);
-       wr->hdr.context = (unsigned long) vq_req;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
-       wr->qp_handle = qp->adapter_handle;
-
-       /* Replace the cr_handle with the QP after accept */
-       cm_id->provider_data = qp;
-       cm_id->add_ref(cm_id);
-       qp->cm_id = cm_id;
-
-       cm_id->provider_data = qp;
-
-       /* Validate private_data length */
-       if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
-               err = -EINVAL;
-               goto bail1;
-       }
-
-       if (iw_param->private_data) {
-               wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
-               memcpy(&wr->private_data[0],
-                      iw_param->private_data, iw_param->private_data_len);
-       } else
-               wr->private_data_length = 0;
-
-       /* Reference the request struct.  Dereferenced in the int handler. */
-       vq_req_get(c2dev, vq_req);
-
-       /* Send WR to adapter */
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       /* Wait for reply from adapter */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       /* Check that reply is present */
-       reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       err = c2_errno(reply);
-       vq_repbuf_free(c2dev, reply);
-
-       if (!err)
-               c2_set_qp_state(qp, C2_QP_STATE_RTS);
- bail1:
-       kfree(wr);
-       vq_req_free(c2dev, vq_req);
- bail0:
-       if (err) {
-               /*
-                * If we fail, release reference on QP and
-                * disassociate QP from CM_ID
-                */
-               cm_id->provider_data = NULL;
-               qp->cm_id = NULL;
-               cm_id->rem_ref(cm_id);
-       }
-       return err;
-}
-
-int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
-       struct c2_dev *c2dev;
-       struct c2wr_cr_reject_req wr;
-       struct c2_vq_req *vq_req;
-       struct c2wr_cr_reject_rep *reply;
-       int err;
-
-       c2dev = to_c2dev(cm_id->device);
-
-       /*
-        * Allocate verbs request.
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       /*
-        * Build the WR
-        */
-       c2_wr_set_id(&wr, CCWR_CR_REJECT);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
-
-       /*
-        * reference the request struct.  dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       /*
-        * Process reply
-        */
-       reply = (struct c2wr_cr_reject_rep *) (unsigned long)
-               vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-       err = c2_errno(reply);
-       /*
-        * free vq stuff
-        */
-       vq_repbuf_free(c2dev, reply);
-
- bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_cq.c b/drivers/staging/rdma/amso1100/c2_cq.c
deleted file mode 100644 (file)
index 7ad0c08..0000000
+++ /dev/null
@@ -1,437 +0,0 @@
-/*
- * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_CQ_MSG_SIZE ((sizeof(struct c2wr_ce) + 32-1) & ~(32-1))
-
-static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn)
-{
-       struct c2_cq *cq;
-       unsigned long flags;
-
-       spin_lock_irqsave(&c2dev->lock, flags);
-       cq = c2dev->qptr_array[cqn];
-       if (!cq) {
-               spin_unlock_irqrestore(&c2dev->lock, flags);
-               return NULL;
-       }
-       atomic_inc(&cq->refcount);
-       spin_unlock_irqrestore(&c2dev->lock, flags);
-       return cq;
-}
-
-static void c2_cq_put(struct c2_cq *cq)
-{
-       if (atomic_dec_and_test(&cq->refcount))
-               wake_up(&cq->wait);
-}
-
-void c2_cq_event(struct c2_dev *c2dev, u32 mq_index)
-{
-       struct c2_cq *cq;
-
-       cq = c2_cq_get(c2dev, mq_index);
-       if (!cq) {
-               printk("discarding events on destroyed CQN=%d\n", mq_index);
-               return;
-       }
-
-       (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-       c2_cq_put(cq);
-}
-
-void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index)
-{
-       struct c2_cq *cq;
-       struct c2_mq *q;
-
-       cq = c2_cq_get(c2dev, mq_index);
-       if (!cq)
-               return;
-
-       spin_lock_irq(&cq->lock);
-       q = &cq->mq;
-       if (q && !c2_mq_empty(q)) {
-               u16 priv = q->priv;
-               struct c2wr_ce *msg;
-
-               while (priv != be16_to_cpu(*q->shared)) {
-                       msg = (struct c2wr_ce *)
-                               (q->msg_pool.host + priv * q->msg_size);
-                       if (msg->qp_user_context == (u64) (unsigned long) qp) {
-                               msg->qp_user_context = (u64) 0;
-                       }
-                       priv = (priv + 1) % q->q_size;
-               }
-       }
-       spin_unlock_irq(&cq->lock);
-       c2_cq_put(cq);
-}
-
-static inline enum ib_wc_status c2_cqe_status_to_openib(u8 status)
-{
-       switch (status) {
-       case C2_OK:
-               return IB_WC_SUCCESS;
-       case CCERR_FLUSHED:
-               return IB_WC_WR_FLUSH_ERR;
-       case CCERR_BASE_AND_BOUNDS_VIOLATION:
-               return IB_WC_LOC_PROT_ERR;
-       case CCERR_ACCESS_VIOLATION:
-               return IB_WC_LOC_ACCESS_ERR;
-       case CCERR_TOTAL_LENGTH_TOO_BIG:
-               return IB_WC_LOC_LEN_ERR;
-       case CCERR_INVALID_WINDOW:
-               return IB_WC_MW_BIND_ERR;
-       default:
-               return IB_WC_GENERAL_ERR;
-       }
-}
-
-
-static inline int c2_poll_one(struct c2_dev *c2dev,
-                             struct c2_cq *cq, struct ib_wc *entry)
-{
-       struct c2wr_ce *ce;
-       struct c2_qp *qp;
-       int is_recv = 0;
-
-       ce = c2_mq_consume(&cq->mq);
-       if (!ce) {
-               return -EAGAIN;
-       }
-
-       /*
-        * if the qp returned is null then this qp has already
-        * been freed and we are unable process the completion.
-        * try pulling the next message
-        */
-       while ((qp =
-               (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
-               c2_mq_free(&cq->mq);
-               ce = c2_mq_consume(&cq->mq);
-               if (!ce)
-                       return -EAGAIN;
-       }
-
-       entry->status = c2_cqe_status_to_openib(c2_wr_get_result(ce));
-       entry->wr_id = ce->hdr.context;
-       entry->qp = &qp->ibqp;
-       entry->wc_flags = 0;
-       entry->slid = 0;
-       entry->sl = 0;
-       entry->src_qp = 0;
-       entry->dlid_path_bits = 0;
-       entry->pkey_index = 0;
-
-       switch (c2_wr_get_id(ce)) {
-       case C2_WR_TYPE_SEND:
-               entry->opcode = IB_WC_SEND;
-               break;
-       case C2_WR_TYPE_RDMA_WRITE:
-               entry->opcode = IB_WC_RDMA_WRITE;
-               break;
-       case C2_WR_TYPE_RDMA_READ:
-               entry->opcode = IB_WC_RDMA_READ;
-               break;
-       case C2_WR_TYPE_RECV:
-               entry->byte_len = be32_to_cpu(ce->bytes_rcvd);
-               entry->opcode = IB_WC_RECV;
-               is_recv = 1;
-               break;
-       default:
-               break;
-       }
-
-       /* consume the WQEs */
-       if (is_recv)
-               c2_mq_lconsume(&qp->rq_mq, 1);
-       else
-               c2_mq_lconsume(&qp->sq_mq,
-                              be32_to_cpu(c2_wr_get_wqe_count(ce)) + 1);
-
-       /* free the message */
-       c2_mq_free(&cq->mq);
-
-       return 0;
-}
-
-int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
-       struct c2_dev *c2dev = to_c2dev(ibcq->device);
-       struct c2_cq *cq = to_c2cq(ibcq);
-       unsigned long flags;
-       int npolled, err;
-
-       spin_lock_irqsave(&cq->lock, flags);
-
-       for (npolled = 0; npolled < num_entries; ++npolled) {
-
-               err = c2_poll_one(c2dev, cq, entry + npolled);
-               if (err)
-                       break;
-       }
-
-       spin_unlock_irqrestore(&cq->lock, flags);
-
-       return npolled;
-}
-
-int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
-       struct c2_mq_shared __iomem *shared;
-       struct c2_cq *cq;
-       unsigned long flags;
-       int ret = 0;
-
-       cq = to_c2cq(ibcq);
-       shared = cq->mq.peer;
-
-       if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
-               writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
-       else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
-               writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
-       else
-               return -EINVAL;
-
-       writeb(CQ_WAIT_FOR_DMA | CQ_ARMED, &shared->armed);
-
-       /*
-        * Now read back shared->armed to make the PCI
-        * write synchronous.  This is necessary for
-        * correct cq notification semantics.
-        */
-       readb(&shared->armed);
-
-       if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
-               spin_lock_irqsave(&cq->lock, flags);
-               ret = !c2_mq_empty(&cq->mq);
-               spin_unlock_irqrestore(&cq->lock, flags);
-       }
-
-       return ret;
-}
-
-static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
-{
-       dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq->msg_size,
-                         mq->msg_pool.host, dma_unmap_addr(mq, mapping));
-}
-
-static int c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq,
-                          size_t q_size, size_t msg_size)
-{
-       u8 *pool_start;
-
-       if (q_size > SIZE_MAX / msg_size)
-               return -EINVAL;
-
-       pool_start = dma_alloc_coherent(&c2dev->pcidev->dev, q_size * msg_size,
-                                       &mq->host_dma, GFP_KERNEL);
-       if (!pool_start)
-               return -ENOMEM;
-
-       c2_mq_rep_init(mq,
-                      0,               /* index (currently unknown) */
-                      q_size,
-                      msg_size,
-                      pool_start,
-                      NULL,    /* peer (currently unknown) */
-                      C2_MQ_HOST_TARGET);
-
-       dma_unmap_addr_set(mq, mapping, mq->host_dma);
-
-       return 0;
-}
-
-int c2_init_cq(struct c2_dev *c2dev, int entries,
-              struct c2_ucontext *ctx, struct c2_cq *cq)
-{
-       struct c2wr_cq_create_req wr;
-       struct c2wr_cq_create_rep *reply;
-       unsigned long peer_pa;
-       struct c2_vq_req *vq_req;
-       int err;
-
-       might_sleep();
-
-       cq->ibcq.cqe = entries - 1;
-       cq->is_kernel = !ctx;
-
-       /* Allocate a shared pointer */
-       cq->mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                     &cq->mq.shared_dma, GFP_KERNEL);
-       if (!cq->mq.shared)
-               return -ENOMEM;
-
-       /* Allocate pages for the message pool */
-       err = c2_alloc_cq_buf(c2dev, &cq->mq, entries + 1, C2_CQ_MSG_SIZE);
-       if (err)
-               goto bail0;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_CQ_CREATE);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.msg_size = cpu_to_be32(cq->mq.msg_size);
-       wr.depth = cpu_to_be32(cq->mq.q_size);
-       wr.shared_ht = cpu_to_be64(cq->mq.shared_dma);
-       wr.msg_pool = cpu_to_be64(cq->mq.host_dma);
-       wr.user_context = (u64) (unsigned long) (cq);
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail2;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail2;
-
-       reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail2;
-       }
-
-       if ((err = c2_errno(reply)) != 0)
-               goto bail3;
-
-       cq->adapter_handle = reply->cq_handle;
-       cq->mq.index = be32_to_cpu(reply->mq_index);
-
-       peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared);
-       cq->mq.peer = ioremap_nocache(peer_pa, PAGE_SIZE);
-       if (!cq->mq.peer) {
-               err = -ENOMEM;
-               goto bail3;
-       }
-
-       vq_repbuf_free(c2dev, reply);
-       vq_req_free(c2dev, vq_req);
-
-       spin_lock_init(&cq->lock);
-       atomic_set(&cq->refcount, 1);
-       init_waitqueue_head(&cq->wait);
-
-       /*
-        * Use the MQ index allocated by the adapter to
-        * store the CQ in the qptr_array
-        */
-       cq->cqn = cq->mq.index;
-       c2dev->qptr_array[cq->cqn] = cq;
-
-       return 0;
-
-bail3:
-       vq_repbuf_free(c2dev, reply);
-bail2:
-       vq_req_free(c2dev, vq_req);
-bail1:
-       c2_free_cq_buf(c2dev, &cq->mq);
-bail0:
-       c2_free_mqsp(cq->mq.shared);
-
-       return err;
-}
-
-void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq)
-{
-       int err;
-       struct c2_vq_req *vq_req;
-       struct c2wr_cq_destroy_req wr;
-       struct c2wr_cq_destroy_rep *reply;
-
-       might_sleep();
-
-       /* Clear CQ from the qptr array */
-       spin_lock_irq(&c2dev->lock);
-       c2dev->qptr_array[cq->mq.index] = NULL;
-       atomic_dec(&cq->refcount);
-       spin_unlock_irq(&c2dev->lock);
-
-       wait_event(cq->wait, !atomic_read(&cq->refcount));
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               goto bail0;
-       }
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_CQ_DESTROY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.cq_handle = cq->adapter_handle;
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg);
-       if (reply)
-               vq_repbuf_free(c2dev, reply);
-bail1:
-       vq_req_free(c2dev, vq_req);
-bail0:
-       if (cq->is_kernel) {
-               c2_free_cq_buf(c2dev, &cq->mq);
-       }
-
-       return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_intr.c b/drivers/staging/rdma/amso1100/c2_intr.c
deleted file mode 100644 (file)
index 74b32a9..0000000
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include <rdma/iw_cm.h>
-#include "c2_vq.h"
-
-static void handle_mq(struct c2_dev *c2dev, u32 index);
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index);
-
-/*
- * Handle RNIC interrupts
- */
-void c2_rnic_interrupt(struct c2_dev *c2dev)
-{
-       unsigned int mq_index;
-
-       while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) {
-               mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT);
-               if (mq_index & 0x80000000) {
-                       break;
-               }
-
-               c2dev->hints_read++;
-               handle_mq(c2dev, mq_index);
-       }
-
-}
-
-/*
- * Top level MQ handler
- */
-static void handle_mq(struct c2_dev *c2dev, u32 mq_index)
-{
-       if (c2dev->qptr_array[mq_index] == NULL) {
-               pr_debug("handle_mq: stray activity for mq_index=%d\n",
-                        mq_index);
-               return;
-       }
-
-       switch (mq_index) {
-       case (0):
-               /*
-                * An index of 0 in the activity queue
-                * indicates the req vq now has messages
-                * available...
-                *
-                * Wake up any waiters waiting on req VQ
-                * message availability.
-                */
-               wake_up(&c2dev->req_vq_wo);
-               break;
-       case (1):
-               handle_vq(c2dev, mq_index);
-               break;
-       case (2):
-               /* We have to purge the VQ in case there are pending
-                * accept reply requests that would result in the
-                * generation of an ESTABLISHED event. If we don't
-                * generate these first, a CLOSE event could end up
-                * being delivered before the ESTABLISHED event.
-                */
-               handle_vq(c2dev, 1);
-
-               c2_ae_event(c2dev, mq_index);
-               break;
-       default:
-               /* There is no event synchronization between CQ events
-                * and AE or CM events. In fact, CQE could be
-                * delivered for all of the I/O up to and including the
-                * FLUSH for a peer disconenct prior to the ESTABLISHED
-                * event being delivered to the app. The reason for this
-                * is that CM events are delivered on a thread, while AE
-                * and CM events are delivered on interrupt context.
-                */
-               c2_cq_event(c2dev, mq_index);
-               break;
-       }
-
-       return;
-}
-
-/*
- * Handles verbs WR replies.
- */
-static void handle_vq(struct c2_dev *c2dev, u32 mq_index)
-{
-       void *adapter_msg, *reply_msg;
-       struct c2wr_hdr *host_msg;
-       struct c2wr_hdr tmp;
-       struct c2_mq *reply_vq;
-       struct c2_vq_req *req;
-       struct iw_cm_event cm_event;
-       int err;
-
-       reply_vq = c2dev->qptr_array[mq_index];
-
-       /*
-        * get next msg from mq_index into adapter_msg.
-        * don't free it yet.
-        */
-       adapter_msg = c2_mq_consume(reply_vq);
-       if (adapter_msg == NULL) {
-               return;
-       }
-
-       host_msg = vq_repbuf_alloc(c2dev);
-
-       /*
-        * If we can't get a host buffer, then we'll still
-        * wakeup the waiter, we just won't give him the msg.
-        * It is assumed the waiter will deal with this...
-        */
-       if (!host_msg) {
-               pr_debug("handle_vq: no repbufs!\n");
-
-               /*
-                * just copy the WR header into a local variable.
-                * this allows us to still demux on the context
-                */
-               host_msg = &tmp;
-               memcpy(host_msg, adapter_msg, sizeof(tmp));
-               reply_msg = NULL;
-       } else {
-               memcpy(host_msg, adapter_msg, reply_vq->msg_size);
-               reply_msg = host_msg;
-       }
-
-       /*
-        * consume the msg from the MQ
-        */
-       c2_mq_free(reply_vq);
-
-       /*
-        * wakeup the waiter.
-        */
-       req = (struct c2_vq_req *) (unsigned long) host_msg->context;
-       if (req == NULL) {
-               /*
-                * We should never get here, as the adapter should
-                * never send us a reply that we're not expecting.
-                */
-               if (reply_msg != NULL)
-                       vq_repbuf_free(c2dev, host_msg);
-               pr_debug("handle_vq: UNEXPECTEDLY got NULL req\n");
-               return;
-       }
-
-       if (reply_msg)
-               err = c2_errno(reply_msg);
-       else
-               err = -ENOMEM;
-
-       if (!err) switch (req->event) {
-       case IW_CM_EVENT_ESTABLISHED:
-               c2_set_qp_state(req->qp,
-                               C2_QP_STATE_RTS);
-               /*
-                * Until ird/ord negotiation via MPAv2 support is added, send
-                * max supported values
-                */
-               cm_event.ird = cm_event.ord = 128;
-       case IW_CM_EVENT_CLOSE:
-
-               /*
-                * Move the QP to RTS if this is
-                * the established event
-                */
-               cm_event.event = req->event;
-               cm_event.status = 0;
-               cm_event.local_addr = req->cm_id->local_addr;
-               cm_event.remote_addr = req->cm_id->remote_addr;
-               cm_event.private_data = NULL;
-               cm_event.private_data_len = 0;
-               req->cm_id->event_handler(req->cm_id, &cm_event);
-               break;
-       default:
-               break;
-       }
-
-       req->reply_msg = (u64) (unsigned long) (reply_msg);
-       atomic_set(&req->reply_ready, 1);
-       wake_up(&req->wait_object);
-
-       /*
-        * If the request was cancelled, then this put will
-        * free the vq_req memory...and reply_msg!!!
-        */
-       vq_req_put(c2dev, req);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mm.c b/drivers/staging/rdma/amso1100/c2_mm.c
deleted file mode 100644 (file)
index 25081e2..0000000
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-
-#define PBL_VIRT 1
-#define PBL_PHYS 2
-
-/*
- * Send all the PBL messages to convey the remainder of the PBL
- * Wait for the adapter's reply on the last one.
- * This is indicated by setting the MEM_PBL_COMPLETE in the flags.
- *
- * NOTE:  vq_req is _not_ freed by this function.  The VQ Host
- *       Reply buffer _is_ freed by this function.
- */
-static int
-send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index,
-                 unsigned long va, u32 pbl_depth,
-                 struct c2_vq_req *vq_req, int pbl_type)
-{
-       u32 pbe_count;          /* amt that fits in a PBL msg */
-       u32 count;              /* amt in this PBL MSG. */
-       struct c2wr_nsmr_pbl_req *wr;   /* PBL WR ptr */
-       struct c2wr_nsmr_pbl_rep *reply;        /* reply ptr */
-       int err, pbl_virt, pbl_index, i;
-
-       switch (pbl_type) {
-       case PBL_VIRT:
-               pbl_virt = 1;
-               break;
-       case PBL_PHYS:
-               pbl_virt = 0;
-               break;
-       default:
-               return -EINVAL;
-               break;
-       }
-
-       pbe_count = (c2dev->req_vq.msg_size -
-                    sizeof(struct c2wr_nsmr_pbl_req)) / sizeof(u64);
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               return -ENOMEM;
-       }
-       c2_wr_set_id(wr, CCWR_NSMR_PBL);
-
-       /*
-        * Only the last PBL message will generate a reply from the verbs,
-        * so we set the context to 0 indicating there is no kernel verbs
-        * handler blocked awaiting this reply.
-        */
-       wr->hdr.context = 0;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->stag_index = stag_index;    /* already swapped */
-       wr->flags = 0;
-       pbl_index = 0;
-       while (pbl_depth) {
-               count = min(pbe_count, pbl_depth);
-               wr->addrs_length = cpu_to_be32(count);
-
-               /*
-                *  If this is the last message, then reference the
-                *  vq request struct cuz we're gonna wait for a reply.
-                *  also make this PBL msg as the last one.
-                */
-               if (count == pbl_depth) {
-                       /*
-                        * reference the request struct.  dereferenced in the
-                        * int handler.
-                        */
-                       vq_req_get(c2dev, vq_req);
-                       wr->flags = cpu_to_be32(MEM_PBL_COMPLETE);
-
-                       /*
-                        * This is the last PBL message.
-                        * Set the context to our VQ Request Object so we can
-                        * wait for the reply.
-                        */
-                       wr->hdr.context = (unsigned long) vq_req;
-               }
-
-               /*
-                * If pbl_virt is set then va is a virtual address
-                * that describes a virtually contiguous memory
-                * allocation. The wr needs the start of each virtual page
-                * to be converted to the corresponding physical address
-                * of the page. If pbl_virt is not set then va is an array
-                * of physical addresses and there is no conversion to do.
-                * Just fill in the wr with what is in the array.
-                */
-               for (i = 0; i < count; i++) {
-                       if (pbl_virt) {
-                               va += PAGE_SIZE;
-                       } else {
-                               wr->paddrs[i] =
-                                   cpu_to_be64(((u64 *)va)[pbl_index + i]);
-                       }
-               }
-
-               /*
-                * Send WR to adapter
-                */
-               err = vq_send_wr(c2dev, (union c2wr *) wr);
-               if (err) {
-                       if (count <= pbe_count) {
-                               vq_req_put(c2dev, vq_req);
-                       }
-                       goto bail0;
-               }
-               pbl_depth -= count;
-               pbl_index += count;
-       }
-
-       /*
-        *  Now wait for the reply...
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       /*
-        * Process reply
-        */
-       reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       err = c2_errno(reply);
-
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       kfree(wr);
-       return err;
-}
-
-#define C2_PBL_MAX_DEPTH 131072
-int
-c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list,
-                          int page_size, int pbl_depth, u32 length,
-                          u32 offset, u64 *va, enum c2_acf acf,
-                          struct c2_mr *mr)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_nsmr_register_req *wr;
-       struct c2wr_nsmr_register_rep *reply;
-       u16 flags;
-       int i, pbe_count, count;
-       int err;
-
-       if (!va || !length || !addr_list || !pbl_depth)
-               return -EINTR;
-
-       /*
-        * Verify PBL depth is within rnic max
-        */
-       if (pbl_depth > C2_PBL_MAX_DEPTH) {
-               return -EINTR;
-       }
-
-       /*
-        * allocate verbs request object
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       /*
-        * build the WR
-        */
-       c2_wr_set_id(wr, CCWR_NSMR_REGISTER);
-       wr->hdr.context = (unsigned long) vq_req;
-       wr->rnic_handle = c2dev->adapter_handle;
-
-       flags = (acf | MEM_VA_BASED | MEM_REMOTE);
-
-       /*
-        * compute how many pbes can fit in the message
-        */
-       pbe_count = (c2dev->req_vq.msg_size -
-                    sizeof(struct c2wr_nsmr_register_req)) / sizeof(u64);
-
-       if (pbl_depth <= pbe_count) {
-               flags |= MEM_PBL_COMPLETE;
-       }
-       wr->flags = cpu_to_be16(flags);
-       wr->stag_key = 0;       //stag_key;
-       wr->va = cpu_to_be64(*va);
-       wr->pd_id = mr->pd->pd_id;
-       wr->pbe_size = cpu_to_be32(page_size);
-       wr->length = cpu_to_be32(length);
-       wr->pbl_depth = cpu_to_be32(pbl_depth);
-       wr->fbo = cpu_to_be32(offset);
-       count = min(pbl_depth, pbe_count);
-       wr->addrs_length = cpu_to_be32(count);
-
-       /*
-        * fill out the PBL for this message
-        */
-       for (i = 0; i < count; i++) {
-               wr->paddrs[i] = cpu_to_be64(addr_list[i]);
-       }
-
-       /*
-        * regerence the request struct
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * send the WR to the adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       /*
-        * wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail1;
-       }
-
-       /*
-        * process reply
-        */
-       reply =
-           (struct c2wr_nsmr_register_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-       if ((err = c2_errno(reply))) {
-               goto bail2;
-       }
-       //*p_pb_entries = be32_to_cpu(reply->pbl_depth);
-       mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index);
-       vq_repbuf_free(c2dev, reply);
-
-       /*
-        * if there are still more PBEs we need to send them to
-        * the adapter and wait for a reply on the final one.
-        * reuse vq_req for this purpose.
-        */
-       pbl_depth -= count;
-       if (pbl_depth) {
-
-               vq_req->reply_msg = (unsigned long) NULL;
-               atomic_set(&vq_req->reply_ready, 0);
-               err = send_pbl_messages(c2dev,
-                                       cpu_to_be32(mr->ibmr.lkey),
-                                       (unsigned long) &addr_list[i],
-                                       pbl_depth, vq_req, PBL_PHYS);
-               if (err) {
-                       goto bail1;
-               }
-       }
-
-       vq_req_free(c2dev, vq_req);
-       kfree(wr);
-
-       return err;
-
-bail2:
-       vq_repbuf_free(c2dev, reply);
-bail1:
-       kfree(wr);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-int c2_stag_dealloc(struct c2_dev *c2dev, u32 stag_index)
-{
-       struct c2_vq_req *vq_req;       /* verbs request object */
-       struct c2wr_stag_dealloc_req wr;        /* work request */
-       struct c2wr_stag_dealloc_rep *reply;    /* WR reply  */
-       int err;
-
-
-       /*
-        * allocate verbs request object
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               return -ENOMEM;
-       }
-
-       /*
-        * Build the WR
-        */
-       c2_wr_set_id(&wr, CCWR_STAG_DEALLOC);
-       wr.hdr.context = (u64) (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.stag_index = cpu_to_be32(stag_index);
-
-       /*
-        * reference the request struct.  dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       /*
-        * Process reply
-        */
-       reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       err = c2_errno(reply);
-
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.c b/drivers/staging/rdma/amso1100/c2_mq.c
deleted file mode 100644 (file)
index 7827fb8..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include "c2.h"
-#include "c2_mq.h"
-
-void *c2_mq_alloc(struct c2_mq *q)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
-       if (c2_mq_full(q)) {
-               return NULL;
-       } else {
-#ifdef DEBUG
-               struct c2wr_hdr *m =
-                   (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
-               BUG_ON(m->magic != be32_to_cpu(~CCWR_MAGIC));
-               m->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
-               return m;
-#else
-               return q->msg_pool.host + q->priv * q->msg_size;
-#endif
-       }
-}
-
-void c2_mq_produce(struct c2_mq *q)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
-       if (!c2_mq_full(q)) {
-               q->priv = (q->priv + 1) % q->q_size;
-               q->hint_count++;
-               /* Update peer's offset. */
-               __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
-       }
-}
-
-void *c2_mq_consume(struct c2_mq *q)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
-       if (c2_mq_empty(q)) {
-               return NULL;
-       } else {
-#ifdef DEBUG
-               struct c2wr_hdr *m = (struct c2wr_hdr *)
-                   (q->msg_pool.host + q->priv * q->msg_size);
-#ifdef CCMSGMAGIC
-               BUG_ON(m->magic != be32_to_cpu(CCWR_MAGIC));
-#endif
-               return m;
-#else
-               return q->msg_pool.host + q->priv * q->msg_size;
-#endif
-       }
-}
-
-void c2_mq_free(struct c2_mq *q)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_HOST_TARGET);
-
-       if (!c2_mq_empty(q)) {
-
-#ifdef CCMSGMAGIC
-               {
-                       struct c2wr_hdr __iomem *m = (struct c2wr_hdr __iomem *)
-                           (q->msg_pool.adapter + q->priv * q->msg_size);
-                       __raw_writel(cpu_to_be32(~CCWR_MAGIC), &m->magic);
-               }
-#endif
-               q->priv = (q->priv + 1) % q->q_size;
-               /* Update peer's offset. */
-               __raw_writew((__force u16) cpu_to_be16(q->priv), &q->peer->shared);
-       }
-}
-
-
-void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count)
-{
-       BUG_ON(q->magic != C2_MQ_MAGIC);
-       BUG_ON(q->type != C2_MQ_ADAPTER_TARGET);
-
-       while (wqe_count--) {
-               BUG_ON(c2_mq_empty(q));
-               *q->shared = cpu_to_be16((be16_to_cpu(*q->shared)+1) % q->q_size);
-       }
-}
-
-#if 0
-u32 c2_mq_count(struct c2_mq *q)
-{
-       s32 count;
-
-       if (q->type == C2_MQ_HOST_TARGET)
-               count = be16_to_cpu(*q->shared) - q->priv;
-       else
-               count = q->priv - be16_to_cpu(*q->shared);
-
-       if (count < 0)
-               count += q->q_size;
-
-       return (u32) count;
-}
-#endif  /*  0  */
-
-void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-                   u8 __iomem *pool_start, u16 __iomem *peer, u32 type)
-{
-       BUG_ON(!q->shared);
-
-       /* This code assumes the byte swapping has already been done! */
-       q->index = index;
-       q->q_size = q_size;
-       q->msg_size = msg_size;
-       q->msg_pool.adapter = pool_start;
-       q->peer = (struct c2_mq_shared __iomem *) peer;
-       q->magic = C2_MQ_MAGIC;
-       q->type = type;
-       q->priv = 0;
-       q->hint_count = 0;
-       return;
-}
-
-void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-                   u8 *pool_start, u16 __iomem *peer, u32 type)
-{
-       BUG_ON(!q->shared);
-
-       /* This code assumes the byte swapping has already been done! */
-       q->index = index;
-       q->q_size = q_size;
-       q->msg_size = msg_size;
-       q->msg_pool.host = pool_start;
-       q->peer = (struct c2_mq_shared __iomem *) peer;
-       q->magic = C2_MQ_MAGIC;
-       q->type = type;
-       q->priv = 0;
-       q->hint_count = 0;
-       return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_mq.h b/drivers/staging/rdma/amso1100/c2_mq.h
deleted file mode 100644 (file)
index 8e1b4d1..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _C2_MQ_H_
-#define _C2_MQ_H_
-#include <linux/kernel.h>
-#include <linux/dma-mapping.h>
-#include "c2_wr.h"
-
-enum c2_shared_regs {
-
-       C2_SHARED_ARMED = 0x10,
-       C2_SHARED_NOTIFY = 0x18,
-       C2_SHARED_SHARED = 0x40,
-};
-
-struct c2_mq_shared {
-       u16 unused1;
-       u8 armed;
-       u8 notification_type;
-       u32 unused2;
-       u16 shared;
-       /* Pad to 64 bytes. */
-       u8 pad[64 - sizeof(u16) - 2 * sizeof(u8) - sizeof(u32) - sizeof(u16)];
-};
-
-enum c2_mq_type {
-       C2_MQ_HOST_TARGET = 1,
-       C2_MQ_ADAPTER_TARGET = 2,
-};
-
-/*
- * c2_mq_t is for kernel-mode MQs like the VQs Cand the AEQ.
- * c2_user_mq_t (which is the same format) is for user-mode MQs...
- */
-#define C2_MQ_MAGIC 0x4d512020 /* 'MQ  ' */
-struct c2_mq {
-       u32 magic;
-       union {
-               u8 *host;
-               u8 __iomem *adapter;
-       } msg_pool;
-       dma_addr_t host_dma;
-       DEFINE_DMA_UNMAP_ADDR(mapping);
-       u16 hint_count;
-       u16 priv;
-       struct c2_mq_shared __iomem *peer;
-       __be16 *shared;
-       dma_addr_t shared_dma;
-       u32 q_size;
-       u32 msg_size;
-       u32 index;
-       enum c2_mq_type type;
-};
-
-static __inline__ int c2_mq_empty(struct c2_mq *q)
-{
-       return q->priv == be16_to_cpu(*q->shared);
-}
-
-static __inline__ int c2_mq_full(struct c2_mq *q)
-{
-       return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size;
-}
-
-void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
-void *c2_mq_alloc(struct c2_mq *q);
-void c2_mq_produce(struct c2_mq *q);
-void *c2_mq_consume(struct c2_mq *q);
-void c2_mq_free(struct c2_mq *q);
-void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-                      u8 __iomem *pool_start, u16 __iomem *peer, u32 type);
-void c2_mq_rep_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
-                          u8 *pool_start, u16 __iomem *peer, u32 type);
-
-#endif                         /* _C2_MQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_pd.c b/drivers/staging/rdma/amso1100/c2_pd.c
deleted file mode 100644 (file)
index f3e81dc..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-
-#include "c2.h"
-#include "c2_provider.h"
-
-int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd)
-{
-       u32 obj;
-       int ret = 0;
-
-       spin_lock(&c2dev->pd_table.lock);
-       obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max,
-                                c2dev->pd_table.last);
-       if (obj >= c2dev->pd_table.max)
-               obj = find_first_zero_bit(c2dev->pd_table.table,
-                                         c2dev->pd_table.max);
-       if (obj < c2dev->pd_table.max) {
-               pd->pd_id = obj;
-               __set_bit(obj, c2dev->pd_table.table);
-               c2dev->pd_table.last = obj+1;
-               if (c2dev->pd_table.last >= c2dev->pd_table.max)
-                       c2dev->pd_table.last = 0;
-       } else
-               ret = -ENOMEM;
-       spin_unlock(&c2dev->pd_table.lock);
-       return ret;
-}
-
-void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)
-{
-       spin_lock(&c2dev->pd_table.lock);
-       __clear_bit(pd->pd_id, c2dev->pd_table.table);
-       spin_unlock(&c2dev->pd_table.lock);
-}
-
-int c2_init_pd_table(struct c2_dev *c2dev)
-{
-
-       c2dev->pd_table.last = 0;
-       c2dev->pd_table.max = c2dev->props.max_pd;
-       spin_lock_init(&c2dev->pd_table.lock);
-       c2dev->pd_table.table = kmalloc(BITS_TO_LONGS(c2dev->props.max_pd) *
-                                       sizeof(long), GFP_KERNEL);
-       if (!c2dev->pd_table.table)
-               return -ENOMEM;
-       bitmap_zero(c2dev->pd_table.table, c2dev->props.max_pd);
-       return 0;
-}
-
-void c2_cleanup_pd_table(struct c2_dev *c2dev)
-{
-       kfree(c2dev->pd_table.table);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.c b/drivers/staging/rdma/amso1100/c2_provider.c
deleted file mode 100644 (file)
index de8d10e..0000000
+++ /dev/null
@@ -1,862 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/inetdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/if_arp.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_umem.h>
-#include <rdma/ib_user_verbs.h>
-#include "c2.h"
-#include "c2_provider.h"
-#include "c2_user.h"
-
-static int c2_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-                          struct ib_udata *uhw)
-{
-       struct c2_dev *c2dev = to_c2dev(ibdev);
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       if (uhw->inlen || uhw->outlen)
-               return -EINVAL;
-
-       *props = c2dev->props;
-       return 0;
-}
-
-static int c2_query_port(struct ib_device *ibdev,
-                        u8 port, struct ib_port_attr *props)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       props->max_mtu = IB_MTU_4096;
-       props->lid = 0;
-       props->lmc = 0;
-       props->sm_lid = 0;
-       props->sm_sl = 0;
-       props->state = IB_PORT_ACTIVE;
-       props->phys_state = 0;
-       props->port_cap_flags =
-           IB_PORT_CM_SUP |
-           IB_PORT_REINIT_SUP |
-           IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
-       props->gid_tbl_len = 1;
-       props->pkey_tbl_len = 1;
-       props->qkey_viol_cntr = 0;
-       props->active_width = 1;
-       props->active_speed = IB_SPEED_SDR;
-
-       return 0;
-}
-
-static int c2_query_pkey(struct ib_device *ibdev,
-                        u8 port, u16 index, u16 * pkey)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       *pkey = 0;
-       return 0;
-}
-
-static int c2_query_gid(struct ib_device *ibdev, u8 port,
-                       int index, union ib_gid *gid)
-{
-       struct c2_dev *c2dev = to_c2dev(ibdev);
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       memset(&(gid->raw[0]), 0, sizeof(gid->raw));
-       memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6);
-
-       return 0;
-}
-
-/* Allocate the user context data structure. This keeps track
- * of all objects associated with a particular user-mode client.
- */
-static struct ib_ucontext *c2_alloc_ucontext(struct ib_device *ibdev,
-                                            struct ib_udata *udata)
-{
-       struct c2_ucontext *context;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       context = kmalloc(sizeof(*context), GFP_KERNEL);
-       if (!context)
-               return ERR_PTR(-ENOMEM);
-
-       return &context->ibucontext;
-}
-
-static int c2_dealloc_ucontext(struct ib_ucontext *context)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       kfree(context);
-       return 0;
-}
-
-static int c2_mmap_uar(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static struct ib_pd *c2_alloc_pd(struct ib_device *ibdev,
-                                struct ib_ucontext *context,
-                                struct ib_udata *udata)
-{
-       struct c2_pd *pd;
-       int err;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       pd = kmalloc(sizeof(*pd), GFP_KERNEL);
-       if (!pd)
-               return ERR_PTR(-ENOMEM);
-
-       err = c2_pd_alloc(to_c2dev(ibdev), !context, pd);
-       if (err) {
-               kfree(pd);
-               return ERR_PTR(err);
-       }
-
-       if (context) {
-               if (ib_copy_to_udata(udata, &pd->pd_id, sizeof(__u32))) {
-                       c2_pd_free(to_c2dev(ibdev), pd);
-                       kfree(pd);
-                       return ERR_PTR(-EFAULT);
-               }
-       }
-
-       return &pd->ibpd;
-}
-
-static int c2_dealloc_pd(struct ib_pd *pd)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       c2_pd_free(to_c2dev(pd->device), to_c2pd(pd));
-       kfree(pd);
-
-       return 0;
-}
-
-static struct ib_ah *c2_ah_create(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return ERR_PTR(-ENOSYS);
-}
-
-static int c2_ah_destroy(struct ib_ah *ah)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static void c2_add_ref(struct ib_qp *ibqp)
-{
-       struct c2_qp *qp;
-       BUG_ON(!ibqp);
-       qp = to_c2qp(ibqp);
-       atomic_inc(&qp->refcount);
-}
-
-static void c2_rem_ref(struct ib_qp *ibqp)
-{
-       struct c2_qp *qp;
-       BUG_ON(!ibqp);
-       qp = to_c2qp(ibqp);
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-}
-
-struct ib_qp *c2_get_qp(struct ib_device *device, int qpn)
-{
-       struct c2_dev* c2dev = to_c2dev(device);
-       struct c2_qp *qp;
-
-       qp = c2_find_qpn(c2dev, qpn);
-       pr_debug("%s Returning QP=%p for QPN=%d, device=%p, refcount=%d\n",
-               __func__, qp, qpn, device,
-               (qp?atomic_read(&qp->refcount):0));
-
-       return (qp?&qp->ibqp:NULL);
-}
-
-static struct ib_qp *c2_create_qp(struct ib_pd *pd,
-                                 struct ib_qp_init_attr *init_attr,
-                                 struct ib_udata *udata)
-{
-       struct c2_qp *qp;
-       int err;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       if (init_attr->create_flags)
-               return ERR_PTR(-EINVAL);
-
-       switch (init_attr->qp_type) {
-       case IB_QPT_RC:
-               qp = kzalloc(sizeof(*qp), GFP_KERNEL);
-               if (!qp) {
-                       pr_debug("%s: Unable to allocate QP\n", __func__);
-                       return ERR_PTR(-ENOMEM);
-               }
-               spin_lock_init(&qp->lock);
-               if (pd->uobject) {
-                       /* userspace specific */
-               }
-
-               err = c2_alloc_qp(to_c2dev(pd->device),
-                                 to_c2pd(pd), init_attr, qp);
-
-               if (err && pd->uobject) {
-                       /* userspace specific */
-               }
-
-               break;
-       default:
-               pr_debug("%s: Invalid QP type: %d\n", __func__,
-                       init_attr->qp_type);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (err) {
-               kfree(qp);
-               return ERR_PTR(err);
-       }
-
-       return &qp->ibqp;
-}
-
-static int c2_destroy_qp(struct ib_qp *ib_qp)
-{
-       struct c2_qp *qp = to_c2qp(ib_qp);
-
-       pr_debug("%s:%u qp=%p,qp->state=%d\n",
-               __func__, __LINE__, ib_qp, qp->state);
-       c2_free_qp(to_c2dev(ib_qp->device), qp);
-       kfree(qp);
-       return 0;
-}
-
-static struct ib_cq *c2_create_cq(struct ib_device *ibdev,
-                                 const struct ib_cq_init_attr *attr,
-                                 struct ib_ucontext *context,
-                                 struct ib_udata *udata)
-{
-       int entries = attr->cqe;
-       struct c2_cq *cq;
-       int err;
-
-       if (attr->flags)
-               return ERR_PTR(-EINVAL);
-
-       cq = kmalloc(sizeof(*cq), GFP_KERNEL);
-       if (!cq) {
-               pr_debug("%s: Unable to allocate CQ\n", __func__);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       err = c2_init_cq(to_c2dev(ibdev), entries, NULL, cq);
-       if (err) {
-               pr_debug("%s: error initializing CQ\n", __func__);
-               kfree(cq);
-               return ERR_PTR(err);
-       }
-
-       return &cq->ibcq;
-}
-
-static int c2_destroy_cq(struct ib_cq *ib_cq)
-{
-       struct c2_cq *cq = to_c2cq(ib_cq);
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       c2_free_cq(to_c2dev(ib_cq->device), cq);
-       kfree(cq);
-
-       return 0;
-}
-
-static inline u32 c2_convert_access(int acc)
-{
-       return (acc & IB_ACCESS_REMOTE_WRITE ? C2_ACF_REMOTE_WRITE : 0) |
-           (acc & IB_ACCESS_REMOTE_READ ? C2_ACF_REMOTE_READ : 0) |
-           (acc & IB_ACCESS_LOCAL_WRITE ? C2_ACF_LOCAL_WRITE : 0) |
-           C2_ACF_LOCAL_READ | C2_ACF_WINDOW_BIND;
-}
-
-static struct ib_mr *c2_get_dma_mr(struct ib_pd *pd, int acc)
-{
-       struct c2_mr *mr;
-       u64 *page_list;
-       const u32 total_len = 0xffffffff;       /* AMSO1100 limit */
-       int err, page_shift, pbl_depth, i;
-       u64 kva = 0;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       /*
-        * This is a map of all phy mem...use a 32k page_shift.
-        */
-       page_shift = PAGE_SHIFT + 3;
-       pbl_depth = ALIGN(total_len, BIT(page_shift)) >> page_shift;
-
-       page_list = vmalloc(sizeof(u64) * pbl_depth);
-       if (!page_list) {
-               pr_debug("couldn't vmalloc page_list of size %zd\n",
-                       (sizeof(u64) * pbl_depth));
-               return ERR_PTR(-ENOMEM);
-       }
-
-       for (i = 0; i < pbl_depth; i++)
-               page_list[i] = (i << page_shift);
-
-       mr = kmalloc(sizeof(*mr), GFP_KERNEL);
-       if (!mr) {
-               vfree(page_list);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       mr->pd = to_c2pd(pd);
-       mr->umem = NULL;
-       pr_debug("%s - page shift %d, pbl_depth %d, total_len %u, "
-               "*iova_start %llx, first pa %llx, last pa %llx\n",
-               __func__, page_shift, pbl_depth, total_len,
-               (unsigned long long) kva,
-               (unsigned long long) page_list[0],
-               (unsigned long long) page_list[pbl_depth-1]);
-       err = c2_nsmr_register_phys_kern(to_c2dev(pd->device), page_list,
-                                        BIT(page_shift), pbl_depth,
-                                        total_len, 0, &kva,
-                                        c2_convert_access(acc), mr);
-       vfree(page_list);
-       if (err) {
-               kfree(mr);
-               return ERR_PTR(err);
-       }
-
-       return &mr->ibmr;
-}
-
-static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                                   u64 virt, int acc, struct ib_udata *udata)
-{
-       u64 *pages;
-       u64 kva = 0;
-       int shift, n, len;
-       int i, k, entry;
-       int err = 0;
-       struct scatterlist *sg;
-       struct c2_pd *c2pd = to_c2pd(pd);
-       struct c2_mr *c2mr;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       c2mr = kmalloc(sizeof(*c2mr), GFP_KERNEL);
-       if (!c2mr)
-               return ERR_PTR(-ENOMEM);
-       c2mr->pd = c2pd;
-
-       c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
-       if (IS_ERR(c2mr->umem)) {
-               err = PTR_ERR(c2mr->umem);
-               kfree(c2mr);
-               return ERR_PTR(err);
-       }
-
-       shift = ffs(c2mr->umem->page_size) - 1;
-       n = c2mr->umem->nmap;
-
-       pages = kmalloc_array(n, sizeof(u64), GFP_KERNEL);
-       if (!pages) {
-               err = -ENOMEM;
-               goto err;
-       }
-
-       i = 0;
-       for_each_sg(c2mr->umem->sg_head.sgl, sg, c2mr->umem->nmap, entry) {
-               len = sg_dma_len(sg) >> shift;
-               for (k = 0; k < len; ++k) {
-                       pages[i++] =
-                               sg_dma_address(sg) +
-                               (c2mr->umem->page_size * k);
-               }
-       }
-
-       kva = virt;
-       err = c2_nsmr_register_phys_kern(to_c2dev(pd->device),
-                                        pages,
-                                        c2mr->umem->page_size,
-                                        i,
-                                        length,
-                                        ib_umem_offset(c2mr->umem),
-                                        &kva,
-                                        c2_convert_access(acc),
-                                        c2mr);
-       kfree(pages);
-       if (err)
-               goto err;
-       return &c2mr->ibmr;
-
-err:
-       ib_umem_release(c2mr->umem);
-       kfree(c2mr);
-       return ERR_PTR(err);
-}
-
-static int c2_dereg_mr(struct ib_mr *ib_mr)
-{
-       struct c2_mr *mr = to_c2mr(ib_mr);
-       int err;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       err = c2_stag_dealloc(to_c2dev(ib_mr->device), ib_mr->lkey);
-       if (err)
-               pr_debug("c2_stag_dealloc failed: %d\n", err);
-       else {
-               if (mr->umem)
-                       ib_umem_release(mr->umem);
-               kfree(mr);
-       }
-
-       return err;
-}
-
-static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
-                       char *buf)
-{
-       struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return sprintf(buf, "%x\n", c2dev->props.hw_ver);
-}
-
-static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
-                          char *buf)
-{
-       struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev);
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return sprintf(buf, "%x.%x.%x\n",
-                      (int) (c2dev->props.fw_ver >> 32),
-                      (int) (c2dev->props.fw_ver >> 16) & 0xffff,
-                      (int) (c2dev->props.fw_ver & 0xffff));
-}
-
-static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
-                       char *buf)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return sprintf(buf, "AMSO1100\n");
-}
-
-static ssize_t show_board(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return sprintf(buf, "%.*s\n", 32, "AMSO1100 Board ID");
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
-
-static struct device_attribute *c2_dev_attributes[] = {
-       &dev_attr_hw_rev,
-       &dev_attr_fw_ver,
-       &dev_attr_hca_type,
-       &dev_attr_board_id
-};
-
-static int c2_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                       int attr_mask, struct ib_udata *udata)
-{
-       int err;
-
-       err =
-           c2_qp_modify(to_c2dev(ibqp->device), to_c2qp(ibqp), attr,
-                        attr_mask);
-
-       return err;
-}
-
-static int c2_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static int c2_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static int c2_process_mad(struct ib_device *ibdev,
-                         int mad_flags,
-                         u8 port_num,
-                         const struct ib_wc *in_wc,
-                         const struct ib_grh *in_grh,
-                         const struct ib_mad_hdr *in_mad,
-                         size_t in_mad_size,
-                         struct ib_mad_hdr *out_mad,
-                         size_t *out_mad_size,
-                         u16 *out_mad_pkey_index)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       return -ENOSYS;
-}
-
-static int c2_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       /* Request a connection */
-       return c2_llp_connect(cm_id, iw_param);
-}
-
-static int c2_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       /* Accept the new connection */
-       return c2_llp_accept(cm_id, iw_param);
-}
-
-static int c2_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       return c2_llp_reject(cm_id, pdata, pdata_len);
-}
-
-static int c2_service_create(struct iw_cm_id *cm_id, int backlog)
-{
-       int err;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       err = c2_llp_service_create(cm_id, backlog);
-       pr_debug("%s:%u err=%d\n",
-               __func__, __LINE__,
-               err);
-       return err;
-}
-
-static int c2_service_destroy(struct iw_cm_id *cm_id)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-
-       return c2_llp_service_destroy(cm_id);
-}
-
-static int c2_pseudo_up(struct net_device *netdev)
-{
-       struct in_device *ind;
-       struct c2_dev *c2dev = netdev->ml_priv;
-
-       ind = in_dev_get(netdev);
-       if (!ind)
-               return 0;
-
-       pr_debug("adding...\n");
-       for_ifa(ind) {
-#ifdef DEBUG
-               u8 *ip = (u8 *) & ifa->ifa_address;
-
-               pr_debug("%s: %d.%d.%d.%d\n",
-                      ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
-               c2_add_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
-       }
-       endfor_ifa(ind);
-       in_dev_put(ind);
-
-       return 0;
-}
-
-static int c2_pseudo_down(struct net_device *netdev)
-{
-       struct in_device *ind;
-       struct c2_dev *c2dev = netdev->ml_priv;
-
-       ind = in_dev_get(netdev);
-       if (!ind)
-               return 0;
-
-       pr_debug("deleting...\n");
-       for_ifa(ind) {
-#ifdef DEBUG
-               u8 *ip = (u8 *) & ifa->ifa_address;
-
-               pr_debug("%s: %d.%d.%d.%d\n",
-                      ifa->ifa_label, ip[0], ip[1], ip[2], ip[3]);
-#endif
-               c2_del_addr(c2dev, ifa->ifa_address, ifa->ifa_mask);
-       }
-       endfor_ifa(ind);
-       in_dev_put(ind);
-
-       return 0;
-}
-
-static int c2_pseudo_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
-{
-       kfree_skb(skb);
-       return NETDEV_TX_OK;
-}
-
-static int c2_pseudo_change_mtu(struct net_device *netdev, int new_mtu)
-{
-       if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
-               return -EINVAL;
-
-       netdev->mtu = new_mtu;
-
-       /* TODO: Tell rnic about new rmda interface mtu */
-       return 0;
-}
-
-static const struct net_device_ops c2_pseudo_netdev_ops = {
-       .ndo_open               = c2_pseudo_up,
-       .ndo_stop               = c2_pseudo_down,
-       .ndo_start_xmit         = c2_pseudo_xmit_frame,
-       .ndo_change_mtu         = c2_pseudo_change_mtu,
-       .ndo_validate_addr      = eth_validate_addr,
-};
-
-static void setup(struct net_device *netdev)
-{
-       netdev->netdev_ops = &c2_pseudo_netdev_ops;
-
-       netdev->watchdog_timeo = 0;
-       netdev->type = ARPHRD_ETHER;
-       netdev->mtu = 1500;
-       netdev->hard_header_len = ETH_HLEN;
-       netdev->addr_len = ETH_ALEN;
-       netdev->tx_queue_len = 0;
-       netdev->flags |= IFF_NOARP;
-}
-
-static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
-{
-       char name[IFNAMSIZ];
-       struct net_device *netdev;
-
-       /* change ethxxx to iwxxx */
-       strcpy(name, "iw");
-       strcat(name, &c2dev->netdev->name[3]);
-       netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup);
-       if (!netdev) {
-               printk(KERN_ERR PFX "%s -  etherdev alloc failed",
-                       __func__);
-               return NULL;
-       }
-
-       netdev->ml_priv = c2dev;
-
-       SET_NETDEV_DEV(netdev, &c2dev->pcidev->dev);
-
-       memcpy_fromio(netdev->dev_addr, c2dev->kva + C2_REGS_RDMA_ENADDR, 6);
-
-       /* Print out the MAC address */
-       pr_debug("%s: MAC %pM\n", netdev->name, netdev->dev_addr);
-
-#if 0
-       /* Disable network packets */
-       netif_stop_queue(netdev);
-#endif
-       return netdev;
-}
-
-static int c2_port_immutable(struct ib_device *ibdev, u8 port_num,
-                            struct ib_port_immutable *immutable)
-{
-       struct ib_port_attr attr;
-       int err;
-
-       err = c2_query_port(ibdev, port_num, &attr);
-       if (err)
-               return err;
-
-       immutable->pkey_tbl_len = attr.pkey_tbl_len;
-       immutable->gid_tbl_len = attr.gid_tbl_len;
-       immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
-
-       return 0;
-}
-
-int c2_register_device(struct c2_dev *dev)
-{
-       int ret = -ENOMEM;
-       int i;
-
-       /* Register pseudo network device */
-       dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
-       if (!dev->pseudo_netdev)
-               goto out;
-
-       ret = register_netdev(dev->pseudo_netdev);
-       if (ret)
-               goto out_free_netdev;
-
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
-       dev->ibdev.owner = THIS_MODULE;
-       dev->ibdev.uverbs_cmd_mask =
-           (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
-           (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
-           (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
-           (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
-           (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
-           (1ull << IB_USER_VERBS_CMD_REG_MR) |
-           (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
-           (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
-           (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
-           (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
-           (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
-           (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
-           (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
-           (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
-           (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
-           (1ull << IB_USER_VERBS_CMD_POST_SEND) |
-           (1ull << IB_USER_VERBS_CMD_POST_RECV);
-
-       dev->ibdev.node_type = RDMA_NODE_RNIC;
-       memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
-       memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
-       dev->ibdev.phys_port_cnt = 1;
-       dev->ibdev.num_comp_vectors = 1;
-       dev->ibdev.dma_device = &dev->pcidev->dev;
-       dev->ibdev.query_device = c2_query_device;
-       dev->ibdev.query_port = c2_query_port;
-       dev->ibdev.query_pkey = c2_query_pkey;
-       dev->ibdev.query_gid = c2_query_gid;
-       dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
-       dev->ibdev.dealloc_ucontext = c2_dealloc_ucontext;
-       dev->ibdev.mmap = c2_mmap_uar;
-       dev->ibdev.alloc_pd = c2_alloc_pd;
-       dev->ibdev.dealloc_pd = c2_dealloc_pd;
-       dev->ibdev.create_ah = c2_ah_create;
-       dev->ibdev.destroy_ah = c2_ah_destroy;
-       dev->ibdev.create_qp = c2_create_qp;
-       dev->ibdev.modify_qp = c2_modify_qp;
-       dev->ibdev.destroy_qp = c2_destroy_qp;
-       dev->ibdev.create_cq = c2_create_cq;
-       dev->ibdev.destroy_cq = c2_destroy_cq;
-       dev->ibdev.poll_cq = c2_poll_cq;
-       dev->ibdev.get_dma_mr = c2_get_dma_mr;
-       dev->ibdev.reg_user_mr = c2_reg_user_mr;
-       dev->ibdev.dereg_mr = c2_dereg_mr;
-       dev->ibdev.get_port_immutable = c2_port_immutable;
-
-       dev->ibdev.alloc_fmr = NULL;
-       dev->ibdev.unmap_fmr = NULL;
-       dev->ibdev.dealloc_fmr = NULL;
-       dev->ibdev.map_phys_fmr = NULL;
-
-       dev->ibdev.attach_mcast = c2_multicast_attach;
-       dev->ibdev.detach_mcast = c2_multicast_detach;
-       dev->ibdev.process_mad = c2_process_mad;
-
-       dev->ibdev.req_notify_cq = c2_arm_cq;
-       dev->ibdev.post_send = c2_post_send;
-       dev->ibdev.post_recv = c2_post_receive;
-
-       dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
-       if (dev->ibdev.iwcm == NULL) {
-               ret = -ENOMEM;
-               goto out_unregister_netdev;
-       }
-       dev->ibdev.iwcm->add_ref = c2_add_ref;
-       dev->ibdev.iwcm->rem_ref = c2_rem_ref;
-       dev->ibdev.iwcm->get_qp = c2_get_qp;
-       dev->ibdev.iwcm->connect = c2_connect;
-       dev->ibdev.iwcm->accept = c2_accept;
-       dev->ibdev.iwcm->reject = c2_reject;
-       dev->ibdev.iwcm->create_listen = c2_service_create;
-       dev->ibdev.iwcm->destroy_listen = c2_service_destroy;
-
-       ret = ib_register_device(&dev->ibdev, NULL);
-       if (ret)
-               goto out_free_iwcm;
-
-       for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
-               ret = device_create_file(&dev->ibdev.dev,
-                                              c2_dev_attributes[i]);
-               if (ret)
-                       goto out_unregister_ibdev;
-       }
-       goto out;
-
-out_unregister_ibdev:
-       ib_unregister_device(&dev->ibdev);
-out_free_iwcm:
-       kfree(dev->ibdev.iwcm);
-out_unregister_netdev:
-       unregister_netdev(dev->pseudo_netdev);
-out_free_netdev:
-       free_netdev(dev->pseudo_netdev);
-out:
-       pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
-       return ret;
-}
-
-void c2_unregister_device(struct c2_dev *dev)
-{
-       pr_debug("%s:%u\n", __func__, __LINE__);
-       unregister_netdev(dev->pseudo_netdev);
-       free_netdev(dev->pseudo_netdev);
-       ib_unregister_device(&dev->ibdev);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_provider.h b/drivers/staging/rdma/amso1100/c2_provider.h
deleted file mode 100644 (file)
index bf18998..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_PROVIDER_H
-#define C2_PROVIDER_H
-#include <linux/inetdevice.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_pack.h>
-
-#include "c2_mq.h"
-#include <rdma/iw_cm.h>
-
-#define C2_MPT_FLAG_ATOMIC        (1 << 14)
-#define C2_MPT_FLAG_REMOTE_WRITE  (1 << 13)
-#define C2_MPT_FLAG_REMOTE_READ   (1 << 12)
-#define C2_MPT_FLAG_LOCAL_WRITE   (1 << 11)
-#define C2_MPT_FLAG_LOCAL_READ    (1 << 10)
-
-struct c2_buf_list {
-       void *buf;
-       DEFINE_DMA_UNMAP_ADDR(mapping);
-};
-
-
-/* The user context keeps track of objects allocated for a
- * particular user-mode client. */
-struct c2_ucontext {
-       struct ib_ucontext ibucontext;
-};
-
-struct c2_mtt;
-
-/* All objects associated with a PD are kept in the
- * associated user context if present.
- */
-struct c2_pd {
-       struct ib_pd ibpd;
-       u32 pd_id;
-};
-
-struct c2_mr {
-       struct ib_mr ibmr;
-       struct c2_pd *pd;
-       struct ib_umem *umem;
-};
-
-struct c2_av;
-
-enum c2_ah_type {
-       C2_AH_ON_HCA,
-       C2_AH_PCI_POOL,
-       C2_AH_KMALLOC
-};
-
-struct c2_ah {
-       struct ib_ah ibah;
-};
-
-struct c2_cq {
-       struct ib_cq ibcq;
-       spinlock_t lock;
-       atomic_t refcount;
-       int cqn;
-       int is_kernel;
-       wait_queue_head_t wait;
-
-       u32 adapter_handle;
-       struct c2_mq mq;
-};
-
-struct c2_wq {
-       spinlock_t lock;
-};
-struct iw_cm_id;
-struct c2_qp {
-       struct ib_qp ibqp;
-       struct iw_cm_id *cm_id;
-       spinlock_t lock;
-       atomic_t refcount;
-       wait_queue_head_t wait;
-       int qpn;
-
-       u32 adapter_handle;
-       u32 send_sgl_depth;
-       u32 recv_sgl_depth;
-       u32 rdma_write_sgl_depth;
-       u8 state;
-
-       struct c2_mq sq_mq;
-       struct c2_mq rq_mq;
-};
-
-struct c2_cr_query_attrs {
-       u32 local_addr;
-       u32 remote_addr;
-       u16 local_port;
-       u16 remote_port;
-};
-
-static inline struct c2_pd *to_c2pd(struct ib_pd *ibpd)
-{
-       return container_of(ibpd, struct c2_pd, ibpd);
-}
-
-static inline struct c2_ucontext *to_c2ucontext(struct ib_ucontext *ibucontext)
-{
-       return container_of(ibucontext, struct c2_ucontext, ibucontext);
-}
-
-static inline struct c2_mr *to_c2mr(struct ib_mr *ibmr)
-{
-       return container_of(ibmr, struct c2_mr, ibmr);
-}
-
-
-static inline struct c2_ah *to_c2ah(struct ib_ah *ibah)
-{
-       return container_of(ibah, struct c2_ah, ibah);
-}
-
-static inline struct c2_cq *to_c2cq(struct ib_cq *ibcq)
-{
-       return container_of(ibcq, struct c2_cq, ibcq);
-}
-
-static inline struct c2_qp *to_c2qp(struct ib_qp *ibqp)
-{
-       return container_of(ibqp, struct c2_qp, ibqp);
-}
-
-static inline int is_rnic_addr(struct net_device *netdev, u32 addr)
-{
-       struct in_device *ind;
-       int ret = 0;
-
-       ind = in_dev_get(netdev);
-       if (!ind)
-               return 0;
-
-       for_ifa(ind) {
-               if (ifa->ifa_address == addr) {
-                       ret = 1;
-                       break;
-               }
-       }
-       endfor_ifa(ind);
-       in_dev_put(ind);
-       return ret;
-}
-#endif                         /* C2_PROVIDER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_qp.c b/drivers/staging/rdma/amso1100/c2_qp.c
deleted file mode 100644 (file)
index ca364db..0000000
+++ /dev/null
@@ -1,1024 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#include <linux/delay.h>
-#include <linux/gfp.h>
-
-#include "c2.h"
-#include "c2_vq.h"
-#include "c2_status.h"
-
-#define C2_MAX_ORD_PER_QP 128
-#define C2_MAX_IRD_PER_QP 128
-
-#define C2_HINT_MAKE(q_index, hint_count) (((q_index) << 16) | hint_count)
-#define C2_HINT_GET_INDEX(hint) (((hint) & 0x7FFF0000) >> 16)
-#define C2_HINT_GET_COUNT(hint) ((hint) & 0x0000FFFF)
-
-#define NO_SUPPORT -1
-static const u8 c2_opcode[] = {
-       [IB_WR_SEND] = C2_WR_TYPE_SEND,
-       [IB_WR_SEND_WITH_IMM] = NO_SUPPORT,
-       [IB_WR_RDMA_WRITE] = C2_WR_TYPE_RDMA_WRITE,
-       [IB_WR_RDMA_WRITE_WITH_IMM] = NO_SUPPORT,
-       [IB_WR_RDMA_READ] = C2_WR_TYPE_RDMA_READ,
-       [IB_WR_ATOMIC_CMP_AND_SWP] = NO_SUPPORT,
-       [IB_WR_ATOMIC_FETCH_AND_ADD] = NO_SUPPORT,
-};
-
-static int to_c2_state(enum ib_qp_state ib_state)
-{
-       switch (ib_state) {
-       case IB_QPS_RESET:
-               return C2_QP_STATE_IDLE;
-       case IB_QPS_RTS:
-               return C2_QP_STATE_RTS;
-       case IB_QPS_SQD:
-               return C2_QP_STATE_CLOSING;
-       case IB_QPS_SQE:
-               return C2_QP_STATE_CLOSING;
-       case IB_QPS_ERR:
-               return C2_QP_STATE_ERROR;
-       default:
-               return -1;
-       }
-}
-
-static int to_ib_state(enum c2_qp_state c2_state)
-{
-       switch (c2_state) {
-       case C2_QP_STATE_IDLE:
-               return IB_QPS_RESET;
-       case C2_QP_STATE_CONNECTING:
-               return IB_QPS_RTR;
-       case C2_QP_STATE_RTS:
-               return IB_QPS_RTS;
-       case C2_QP_STATE_CLOSING:
-               return IB_QPS_SQD;
-       case C2_QP_STATE_ERROR:
-               return IB_QPS_ERR;
-       case C2_QP_STATE_TERMINATE:
-               return IB_QPS_SQE;
-       default:
-               return -1;
-       }
-}
-
-static const char *to_ib_state_str(int ib_state)
-{
-       static const char *state_str[] = {
-               "IB_QPS_RESET",
-               "IB_QPS_INIT",
-               "IB_QPS_RTR",
-               "IB_QPS_RTS",
-               "IB_QPS_SQD",
-               "IB_QPS_SQE",
-               "IB_QPS_ERR"
-       };
-       if (ib_state < IB_QPS_RESET ||
-           ib_state > IB_QPS_ERR)
-               return "<invalid IB QP state>";
-
-       ib_state -= IB_QPS_RESET;
-       return state_str[ib_state];
-}
-
-void c2_set_qp_state(struct c2_qp *qp, int c2_state)
-{
-       int new_state = to_ib_state(c2_state);
-
-       pr_debug("%s: qp[%p] state modify %s --> %s\n",
-              __func__,
-               qp,
-               to_ib_state_str(qp->state),
-               to_ib_state_str(new_state));
-       qp->state = new_state;
-}
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp,
-                struct ib_qp_attr *attr, int attr_mask)
-{
-       struct c2wr_qp_modify_req wr;
-       struct c2wr_qp_modify_rep *reply;
-       struct c2_vq_req *vq_req;
-       unsigned long flags;
-       u8 next_state;
-       int err;
-
-       pr_debug("%s:%d qp=%p, %s --> %s\n",
-               __func__, __LINE__,
-               qp,
-               to_ib_state_str(qp->state),
-               to_ib_state_str(attr->qp_state));
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       c2_wr_set_id(&wr, CCWR_QP_MODIFY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.qp_handle = qp->adapter_handle;
-       wr.ord = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.ird = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
-       if (attr_mask & IB_QP_STATE) {
-               /* Ensure the state is valid */
-               if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR) {
-                       err = -EINVAL;
-                       goto bail0;
-               }
-
-               wr.next_qp_state = cpu_to_be32(to_c2_state(attr->qp_state));
-
-               if (attr->qp_state == IB_QPS_ERR) {
-                       spin_lock_irqsave(&qp->lock, flags);
-                       if (qp->cm_id && qp->state == IB_QPS_RTS) {
-                               pr_debug("Generating CLOSE event for QP-->ERR, "
-                                       "qp=%p, cm_id=%p\n",qp,qp->cm_id);
-                               /* Generate an CLOSE event */
-                               vq_req->cm_id = qp->cm_id;
-                               vq_req->event = IW_CM_EVENT_CLOSE;
-                       }
-                       spin_unlock_irqrestore(&qp->lock, flags);
-               }
-               next_state =  attr->qp_state;
-
-       } else if (attr_mask & IB_QP_CUR_STATE) {
-
-               if (attr->cur_qp_state != IB_QPS_RTR &&
-                   attr->cur_qp_state != IB_QPS_RTS &&
-                   attr->cur_qp_state != IB_QPS_SQD &&
-                   attr->cur_qp_state != IB_QPS_SQE) {
-                       err = -EINVAL;
-                       goto bail0;
-               } else
-                       wr.next_qp_state =
-                           cpu_to_be32(to_c2_state(attr->cur_qp_state));
-
-               next_state = attr->cur_qp_state;
-
-       } else {
-               err = 0;
-               goto bail0;
-       }
-
-       /* reference the request struct */
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       err = c2_errno(reply);
-       if (!err)
-               qp->state = next_state;
-#ifdef DEBUG
-       else
-               pr_debug("%s: c2_errno=%d\n", __func__, err);
-#endif
-       /*
-        * If we're going to error and generating the event here, then
-        * we need to remove the reference because there will be no
-        * close event generated by the adapter
-       */
-       spin_lock_irqsave(&qp->lock, flags);
-       if (vq_req->event==IW_CM_EVENT_CLOSE && qp->cm_id) {
-               qp->cm_id->rem_ref(qp->cm_id);
-               qp->cm_id = NULL;
-       }
-       spin_unlock_irqrestore(&qp->lock, flags);
-
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-
-       pr_debug("%s:%d qp=%p, cur_state=%s\n",
-               __func__, __LINE__,
-               qp,
-               to_ib_state_str(qp->state));
-       return err;
-}
-
-int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp,
-                         int ord, int ird)
-{
-       struct c2wr_qp_modify_req wr;
-       struct c2wr_qp_modify_rep *reply;
-       struct c2_vq_req *vq_req;
-       int err;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       c2_wr_set_id(&wr, CCWR_QP_MODIFY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.qp_handle = qp->adapter_handle;
-       wr.ord = cpu_to_be32(ord);
-       wr.ird = cpu_to_be32(ird);
-       wr.sq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.rq_depth = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-       wr.next_qp_state = cpu_to_be32(C2_QP_NO_ATTR_CHANGE);
-
-       /* reference the request struct */
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail0;
-
-       reply = (struct c2wr_qp_modify_rep *) (unsigned long)
-               vq_req->reply_msg;
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       err = c2_errno(reply);
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_qp_destroy_req wr;
-       struct c2wr_qp_destroy_rep *reply;
-       unsigned long flags;
-       int err;
-
-       /*
-        * Allocate a verb request message
-        */
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req) {
-               return -ENOMEM;
-       }
-
-       /*
-        * Initialize the WR
-        */
-       c2_wr_set_id(&wr, CCWR_QP_DESTROY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.qp_handle = qp->adapter_handle;
-
-       /*
-        * reference the request struct.  dereferenced in the int handler.
-        */
-       vq_req_get(c2dev, vq_req);
-
-       spin_lock_irqsave(&qp->lock, flags);
-       if (qp->cm_id && qp->state == IB_QPS_RTS) {
-               pr_debug("destroy_qp: generating CLOSE event for QP-->ERR, "
-                       "qp=%p, cm_id=%p\n",qp,qp->cm_id);
-               /* Generate an CLOSE event */
-               vq_req->qp = qp;
-               vq_req->cm_id = qp->cm_id;
-               vq_req->event = IW_CM_EVENT_CLOSE;
-       }
-       spin_unlock_irqrestore(&qp->lock, flags);
-
-       /*
-        * Send WR to adapter
-        */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       /*
-        * Wait for reply from adapter
-        */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       /*
-        * Process reply
-        */
-       reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       spin_lock_irqsave(&qp->lock, flags);
-       if (qp->cm_id) {
-               qp->cm_id->rem_ref(qp->cm_id);
-               qp->cm_id = NULL;
-       }
-       spin_unlock_irqrestore(&qp->lock, flags);
-
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
-{
-       int ret;
-
-       idr_preload(GFP_KERNEL);
-       spin_lock_irq(&c2dev->qp_table.lock);
-
-       ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT);
-       if (ret >= 0)
-               qp->qpn = ret;
-
-       spin_unlock_irq(&c2dev->qp_table.lock);
-       idr_preload_end();
-       return ret < 0 ? ret : 0;
-}
-
-static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
-{
-       spin_lock_irq(&c2dev->qp_table.lock);
-       idr_remove(&c2dev->qp_table.idr, qpn);
-       spin_unlock_irq(&c2dev->qp_table.lock);
-}
-
-struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
-{
-       unsigned long flags;
-       struct c2_qp *qp;
-
-       spin_lock_irqsave(&c2dev->qp_table.lock, flags);
-       qp = idr_find(&c2dev->qp_table.idr, qpn);
-       spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
-       return qp;
-}
-
-int c2_alloc_qp(struct c2_dev *c2dev,
-               struct c2_pd *pd,
-               struct ib_qp_init_attr *qp_attrs, struct c2_qp *qp)
-{
-       struct c2wr_qp_create_req wr;
-       struct c2wr_qp_create_rep *reply;
-       struct c2_vq_req *vq_req;
-       struct c2_cq *send_cq = to_c2cq(qp_attrs->send_cq);
-       struct c2_cq *recv_cq = to_c2cq(qp_attrs->recv_cq);
-       unsigned long peer_pa;
-       u32 q_size, msg_size, mmap_size;
-       void __iomem *mmap;
-       int err;
-
-       err = c2_alloc_qpn(c2dev, qp);
-       if (err)
-               return err;
-       qp->ibqp.qp_num = qp->qpn;
-       qp->ibqp.qp_type = IB_QPT_RC;
-
-       /* Allocate the SQ and RQ shared pointers */
-       qp->sq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                        &qp->sq_mq.shared_dma, GFP_KERNEL);
-       if (!qp->sq_mq.shared) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       qp->rq_mq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                        &qp->rq_mq.shared_dma, GFP_KERNEL);
-       if (!qp->rq_mq.shared) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       /* Allocate the verbs request */
-       vq_req = vq_req_alloc(c2dev);
-       if (vq_req == NULL) {
-               err = -ENOMEM;
-               goto bail2;
-       }
-
-       /* Initialize the work request */
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_QP_CREATE);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-       wr.sq_cq_handle = send_cq->adapter_handle;
-       wr.rq_cq_handle = recv_cq->adapter_handle;
-       wr.sq_depth = cpu_to_be32(qp_attrs->cap.max_send_wr + 1);
-       wr.rq_depth = cpu_to_be32(qp_attrs->cap.max_recv_wr + 1);
-       wr.srq_handle = 0;
-       wr.flags = cpu_to_be32(QP_RDMA_READ | QP_RDMA_WRITE | QP_MW_BIND |
-                              QP_ZERO_STAG | QP_RDMA_READ_RESPONSE);
-       wr.send_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
-       wr.recv_sgl_depth = cpu_to_be32(qp_attrs->cap.max_recv_sge);
-       wr.rdma_write_sgl_depth = cpu_to_be32(qp_attrs->cap.max_send_sge);
-       wr.shared_sq_ht = cpu_to_be64(qp->sq_mq.shared_dma);
-       wr.shared_rq_ht = cpu_to_be64(qp->rq_mq.shared_dma);
-       wr.ord = cpu_to_be32(C2_MAX_ORD_PER_QP);
-       wr.ird = cpu_to_be32(C2_MAX_IRD_PER_QP);
-       wr.pd_id = pd->pd_id;
-       wr.user_context = (unsigned long) qp;
-
-       vq_req_get(c2dev, vq_req);
-
-       /* Send the WR to the adapter */
-       err = vq_send_wr(c2dev, (union c2wr *) & wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail3;
-       }
-
-       /* Wait for the verb reply  */
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail3;
-       }
-
-       /* Process the reply */
-       reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail3;
-       }
-
-       if ((err = c2_wr_get_result(reply)) != 0) {
-               goto bail4;
-       }
-
-       /* Fill in the kernel QP struct */
-       atomic_set(&qp->refcount, 1);
-       qp->adapter_handle = reply->qp_handle;
-       qp->state = IB_QPS_RESET;
-       qp->send_sgl_depth = qp_attrs->cap.max_send_sge;
-       qp->rdma_write_sgl_depth = qp_attrs->cap.max_send_sge;
-       qp->recv_sgl_depth = qp_attrs->cap.max_recv_sge;
-       init_waitqueue_head(&qp->wait);
-
-       /* Initialize the SQ MQ */
-       q_size = be32_to_cpu(reply->sq_depth);
-       msg_size = be32_to_cpu(reply->sq_msg_size);
-       peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start);
-       mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
-       mmap = ioremap_nocache(peer_pa, mmap_size);
-       if (!mmap) {
-               err = -ENOMEM;
-               goto bail5;
-       }
-
-       c2_mq_req_init(&qp->sq_mq,
-                      be32_to_cpu(reply->sq_mq_index),
-                      q_size,
-                      msg_size,
-                      mmap + sizeof(struct c2_mq_shared),      /* pool start */
-                      mmap,                            /* peer */
-                      C2_MQ_ADAPTER_TARGET);
-
-       /* Initialize the RQ mq */
-       q_size = be32_to_cpu(reply->rq_depth);
-       msg_size = be32_to_cpu(reply->rq_msg_size);
-       peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start);
-       mmap_size = PAGE_ALIGN(sizeof(struct c2_mq_shared) + msg_size * q_size);
-       mmap = ioremap_nocache(peer_pa, mmap_size);
-       if (!mmap) {
-               err = -ENOMEM;
-               goto bail6;
-       }
-
-       c2_mq_req_init(&qp->rq_mq,
-                      be32_to_cpu(reply->rq_mq_index),
-                      q_size,
-                      msg_size,
-                      mmap + sizeof(struct c2_mq_shared),      /* pool start */
-                      mmap,                            /* peer */
-                      C2_MQ_ADAPTER_TARGET);
-
-       vq_repbuf_free(c2dev, reply);
-       vq_req_free(c2dev, vq_req);
-
-       return 0;
-
-bail6:
-       iounmap(qp->sq_mq.peer);
-bail5:
-       destroy_qp(c2dev, qp);
-bail4:
-       vq_repbuf_free(c2dev, reply);
-bail3:
-       vq_req_free(c2dev, vq_req);
-bail2:
-       c2_free_mqsp(qp->rq_mq.shared);
-bail1:
-       c2_free_mqsp(qp->sq_mq.shared);
-bail0:
-       c2_free_qpn(c2dev, qp->qpn);
-       return err;
-}
-
-static inline void c2_lock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
-       if (send_cq == recv_cq)
-               spin_lock_irq(&send_cq->lock);
-       else if (send_cq > recv_cq) {
-               spin_lock_irq(&send_cq->lock);
-               spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
-       } else {
-               spin_lock_irq(&recv_cq->lock);
-               spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
-       }
-}
-
-static inline void c2_unlock_cqs(struct c2_cq *send_cq, struct c2_cq *recv_cq)
-{
-       if (send_cq == recv_cq)
-               spin_unlock_irq(&send_cq->lock);
-       else if (send_cq > recv_cq) {
-               spin_unlock(&recv_cq->lock);
-               spin_unlock_irq(&send_cq->lock);
-       } else {
-               spin_unlock(&send_cq->lock);
-               spin_unlock_irq(&recv_cq->lock);
-       }
-}
-
-void c2_free_qp(struct c2_dev *c2dev, struct c2_qp *qp)
-{
-       struct c2_cq *send_cq;
-       struct c2_cq *recv_cq;
-
-       send_cq = to_c2cq(qp->ibqp.send_cq);
-       recv_cq = to_c2cq(qp->ibqp.recv_cq);
-
-       /*
-        * Lock CQs here, so that CQ polling code can do QP lookup
-        * without taking a lock.
-        */
-       c2_lock_cqs(send_cq, recv_cq);
-       c2_free_qpn(c2dev, qp->qpn);
-       c2_unlock_cqs(send_cq, recv_cq);
-
-       /*
-        * Destroy qp in the rnic...
-        */
-       destroy_qp(c2dev, qp);
-
-       /*
-        * Mark any unreaped CQEs as null and void.
-        */
-       c2_cq_clean(c2dev, qp, send_cq->cqn);
-       if (send_cq != recv_cq)
-               c2_cq_clean(c2dev, qp, recv_cq->cqn);
-       /*
-        * Unmap the MQs and return the shared pointers
-        * to the message pool.
-        */
-       iounmap(qp->sq_mq.peer);
-       iounmap(qp->rq_mq.peer);
-       c2_free_mqsp(qp->sq_mq.shared);
-       c2_free_mqsp(qp->rq_mq.shared);
-
-       atomic_dec(&qp->refcount);
-       wait_event(qp->wait, !atomic_read(&qp->refcount));
-}
-
-/*
- * Function: move_sgl
- *
- * Description:
- * Move an SGL from the user's work request struct into a CCIL Work Request
- * message, swapping to WR byte order and ensure the total length doesn't
- * overflow.
- *
- * IN:
- * dst         - ptr to CCIL Work Request message SGL memory.
- * src         - ptr to the consumers SGL memory.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int
-move_sgl(struct c2_data_addr * dst, struct ib_sge *src, int count, u32 * p_len,
-        u8 * actual_count)
-{
-       u32 tot = 0;            /* running total */
-       u8 acount = 0;          /* running total non-0 len sge's */
-
-       while (count > 0) {
-               /*
-                * If the addition of this SGE causes the
-                * total SGL length to exceed 2^32-1, then
-                * fail-n-bail.
-                *
-                * If the current total plus the next element length
-                * wraps, then it will go negative and be less than the
-                * current total...
-                */
-               if ((tot + src->length) < tot) {
-                       return -EINVAL;
-               }
-               /*
-                * Bug: 1456 (as well as 1498 & 1643)
-                * Skip over any sge's supplied with len=0
-                */
-               if (src->length) {
-                       tot += src->length;
-                       dst->stag = cpu_to_be32(src->lkey);
-                       dst->to = cpu_to_be64(src->addr);
-                       dst->length = cpu_to_be32(src->length);
-                       dst++;
-                       acount++;
-               }
-               src++;
-               count--;
-       }
-
-       if (acount == 0) {
-               /*
-                * Bug: 1476 (as well as 1498, 1456 and 1643)
-                * Setup the SGL in the WR to make it easier for the RNIC.
-                * This way, the FW doesn't have to deal with special cases.
-                * Setting length=0 should be sufficient.
-                */
-               dst->stag = 0;
-               dst->to = 0;
-               dst->length = 0;
-       }
-
-       *p_len = tot;
-       *actual_count = acount;
-       return 0;
-}
-
-/*
- * Function: c2_activity (private function)
- *
- * Description:
- * Post an mq index to the host->adapter activity fifo.
- *
- * IN:
- * c2dev       - ptr to c2dev structure
- * mq_index    - mq index to post
- * shared      - value most recently written to shared
- *
- * OUT:
- *
- * Return:
- * none
- */
-static inline void c2_activity(struct c2_dev *c2dev, u32 mq_index, u16 shared)
-{
-       /*
-        * First read the register to see if the FIFO is full, and if so,
-        * spin until it's not.  This isn't perfect -- there is no
-        * synchronization among the clients of the register, but in
-        * practice it prevents multiple CPU from hammering the bus
-        * with PCI RETRY. Note that when this does happen, the card
-        * cannot get on the bus and the card and system hang in a
-        * deadlock -- thus the need for this code. [TOT]
-        */
-       while (readl(c2dev->regs + PCI_BAR0_ADAPTER_HINT) & 0x80000000)
-               udelay(10);
-
-       __raw_writel(C2_HINT_MAKE(mq_index, shared),
-                    c2dev->regs + PCI_BAR0_ADAPTER_HINT);
-}
-
-/*
- * Function: qp_wr_post
- *
- * Description:
- * This in-line function allocates a MQ msg, then moves the host-copy of
- * the completed WR into msg.  Then it posts the message.
- *
- * IN:
- * q           - ptr to user MQ.
- * wr          - ptr to host-copy of the WR.
- * qp          - ptr to user qp
- * size                - Number of bytes to post.  Assumed to be divisible by 4.
- *
- * OUT: none
- *
- * Return:
- * CCIL status codes.
- */
-static int qp_wr_post(struct c2_mq *q, union c2wr * wr, struct c2_qp *qp, u32 size)
-{
-       union c2wr *msg;
-
-       msg = c2_mq_alloc(q);
-       if (msg == NULL) {
-               return -EINVAL;
-       }
-#ifdef CCMSGMAGIC
-       ((c2wr_hdr_t *) wr)->magic = cpu_to_be32(CCWR_MAGIC);
-#endif
-
-       /*
-        * Since all header fields in the WR are the same as the
-        * CQE, set the following so the adapter need not.
-        */
-       c2_wr_set_result(wr, CCERR_PENDING);
-
-       /*
-        * Copy the wr down to the adapter
-        */
-       memcpy((void *) msg, (void *) wr, size);
-
-       c2_mq_produce(q);
-       return 0;
-}
-
-
-int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
-                struct ib_send_wr **bad_wr)
-{
-       struct c2_dev *c2dev = to_c2dev(ibqp->device);
-       struct c2_qp *qp = to_c2qp(ibqp);
-       union c2wr wr;
-       unsigned long lock_flags;
-       int err = 0;
-
-       u32 flags;
-       u32 tot_len;
-       u8 actual_sge_count;
-       u32 msg_size;
-
-       if (qp->state > IB_QPS_RTS) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       while (ib_wr) {
-
-               flags = 0;
-               wr.sqwr.sq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
-               if (ib_wr->send_flags & IB_SEND_SIGNALED) {
-                       flags |= SQ_SIGNALED;
-               }
-
-               switch (ib_wr->opcode) {
-               case IB_WR_SEND:
-               case IB_WR_SEND_WITH_INV:
-                       if (ib_wr->opcode == IB_WR_SEND) {
-                               if (ib_wr->send_flags & IB_SEND_SOLICITED)
-                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE);
-                               else
-                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND);
-                               wr.sqwr.send.remote_stag = 0;
-                       } else {
-                               if (ib_wr->send_flags & IB_SEND_SOLICITED)
-                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND_SE_INV);
-                               else
-                                       c2_wr_set_id(&wr, C2_WR_TYPE_SEND_INV);
-                               wr.sqwr.send.remote_stag =
-                                       cpu_to_be32(ib_wr->ex.invalidate_rkey);
-                       }
-
-                       msg_size = sizeof(struct c2wr_send_req) +
-                               sizeof(struct c2_data_addr) * ib_wr->num_sge;
-                       if (ib_wr->num_sge > qp->send_sgl_depth) {
-                               err = -EINVAL;
-                               break;
-                       }
-                       if (ib_wr->send_flags & IB_SEND_FENCE) {
-                               flags |= SQ_READ_FENCE;
-                       }
-                       err = move_sgl((struct c2_data_addr *) & (wr.sqwr.send.data),
-                                      ib_wr->sg_list,
-                                      ib_wr->num_sge,
-                                      &tot_len, &actual_sge_count);
-                       wr.sqwr.send.sge_len = cpu_to_be32(tot_len);
-                       c2_wr_set_sge_count(&wr, actual_sge_count);
-                       break;
-               case IB_WR_RDMA_WRITE:
-                       c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_WRITE);
-                       msg_size = sizeof(struct c2wr_rdma_write_req) +
-                           (sizeof(struct c2_data_addr) * ib_wr->num_sge);
-                       if (ib_wr->num_sge > qp->rdma_write_sgl_depth) {
-                               err = -EINVAL;
-                               break;
-                       }
-                       if (ib_wr->send_flags & IB_SEND_FENCE) {
-                               flags |= SQ_READ_FENCE;
-                       }
-                       wr.sqwr.rdma_write.remote_stag =
-                           cpu_to_be32(rdma_wr(ib_wr)->rkey);
-                       wr.sqwr.rdma_write.remote_to =
-                           cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
-                       err = move_sgl((struct c2_data_addr *)
-                                      & (wr.sqwr.rdma_write.data),
-                                      ib_wr->sg_list,
-                                      ib_wr->num_sge,
-                                      &tot_len, &actual_sge_count);
-                       wr.sqwr.rdma_write.sge_len = cpu_to_be32(tot_len);
-                       c2_wr_set_sge_count(&wr, actual_sge_count);
-                       break;
-               case IB_WR_RDMA_READ:
-                       c2_wr_set_id(&wr, C2_WR_TYPE_RDMA_READ);
-                       msg_size = sizeof(struct c2wr_rdma_read_req);
-
-                       /* IWarp only suppots 1 sge for RDMA reads */
-                       if (ib_wr->num_sge > 1) {
-                               err = -EINVAL;
-                               break;
-                       }
-
-                       /*
-                        * Move the local and remote stag/to/len into the WR.
-                        */
-                       wr.sqwr.rdma_read.local_stag =
-                           cpu_to_be32(ib_wr->sg_list->lkey);
-                       wr.sqwr.rdma_read.local_to =
-                           cpu_to_be64(ib_wr->sg_list->addr);
-                       wr.sqwr.rdma_read.remote_stag =
-                           cpu_to_be32(rdma_wr(ib_wr)->rkey);
-                       wr.sqwr.rdma_read.remote_to =
-                           cpu_to_be64(rdma_wr(ib_wr)->remote_addr);
-                       wr.sqwr.rdma_read.length =
-                           cpu_to_be32(ib_wr->sg_list->length);
-                       break;
-               default:
-                       /* error */
-                       msg_size = 0;
-                       err = -EINVAL;
-                       break;
-               }
-
-               /*
-                * If we had an error on the last wr build, then
-                * break out.  Possible errors include bogus WR
-                * type, and a bogus SGL length...
-                */
-               if (err) {
-                       break;
-               }
-
-               /*
-                * Store flags
-                */
-               c2_wr_set_flags(&wr, flags);
-
-               /*
-                * Post the puppy!
-                */
-               spin_lock_irqsave(&qp->lock, lock_flags);
-               err = qp_wr_post(&qp->sq_mq, &wr, qp, msg_size);
-               if (err) {
-                       spin_unlock_irqrestore(&qp->lock, lock_flags);
-                       break;
-               }
-
-               /*
-                * Enqueue mq index to activity FIFO.
-                */
-               c2_activity(c2dev, qp->sq_mq.index, qp->sq_mq.hint_count);
-               spin_unlock_irqrestore(&qp->lock, lock_flags);
-
-               ib_wr = ib_wr->next;
-       }
-
-out:
-       if (err)
-               *bad_wr = ib_wr;
-       return err;
-}
-
-int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
-                   struct ib_recv_wr **bad_wr)
-{
-       struct c2_dev *c2dev = to_c2dev(ibqp->device);
-       struct c2_qp *qp = to_c2qp(ibqp);
-       union c2wr wr;
-       unsigned long lock_flags;
-       int err = 0;
-
-       if (qp->state > IB_QPS_RTS) {
-               err = -EINVAL;
-               goto out;
-       }
-
-       /*
-        * Try and post each work request
-        */
-       while (ib_wr) {
-               u32 tot_len;
-               u8 actual_sge_count;
-
-               if (ib_wr->num_sge > qp->recv_sgl_depth) {
-                       err = -EINVAL;
-                       break;
-               }
-
-               /*
-                * Create local host-copy of the WR
-                */
-               wr.rqwr.rq_hdr.user_hdr.hdr.context = ib_wr->wr_id;
-               c2_wr_set_id(&wr, CCWR_RECV);
-               c2_wr_set_flags(&wr, 0);
-
-               /* sge_count is limited to eight bits. */
-               BUG_ON(ib_wr->num_sge >= 256);
-               err = move_sgl((struct c2_data_addr *) & (wr.rqwr.data),
-                              ib_wr->sg_list,
-                              ib_wr->num_sge, &tot_len, &actual_sge_count);
-               c2_wr_set_sge_count(&wr, actual_sge_count);
-
-               /*
-                * If we had an error on the last wr build, then
-                * break out.  Possible errors include bogus WR
-                * type, and a bogus SGL length...
-                */
-               if (err) {
-                       break;
-               }
-
-               spin_lock_irqsave(&qp->lock, lock_flags);
-               err = qp_wr_post(&qp->rq_mq, &wr, qp, qp->rq_mq.msg_size);
-               if (err) {
-                       spin_unlock_irqrestore(&qp->lock, lock_flags);
-                       break;
-               }
-
-               /*
-                * Enqueue mq index to activity FIFO
-                */
-               c2_activity(c2dev, qp->rq_mq.index, qp->rq_mq.hint_count);
-               spin_unlock_irqrestore(&qp->lock, lock_flags);
-
-               ib_wr = ib_wr->next;
-       }
-
-out:
-       if (err)
-               *bad_wr = ib_wr;
-       return err;
-}
-
-void c2_init_qp_table(struct c2_dev *c2dev)
-{
-       spin_lock_init(&c2dev->qp_table.lock);
-       idr_init(&c2dev->qp_table.idr);
-}
-
-void c2_cleanup_qp_table(struct c2_dev *c2dev)
-{
-       idr_destroy(&c2dev->qp_table.idr);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_rnic.c b/drivers/staging/rdma/amso1100/c2_rnic.c
deleted file mode 100644 (file)
index 5e65c6d..0000000
+++ /dev/null
@@ -1,652 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-#include <linux/ethtool.h>
-#include <linux/mii.h>
-#include <linux/if_vlan.h>
-#include <linux/crc32.h>
-#include <linux/in.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/inet.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-
-#include <linux/route.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <rdma/ib_smi.h>
-#include "c2.h"
-#include "c2_vq.h"
-
-/* Device capabilities */
-#define C2_MIN_PAGESIZE  1024
-
-#define C2_MAX_MRS       32768
-#define C2_MAX_QPS       16000
-#define C2_MAX_WQE_SZ    256
-#define C2_MAX_QP_WR     ((128*1024)/C2_MAX_WQE_SZ)
-#define C2_MAX_SGES      4
-#define C2_MAX_SGE_RD    1
-#define C2_MAX_CQS       32768
-#define C2_MAX_CQES      4096
-#define C2_MAX_PDS       16384
-
-/*
- * Send the adapter INIT message to the amso1100
- */
-static int c2_adapter_init(struct c2_dev *c2dev)
-{
-       struct c2wr_init_req wr;
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_INIT);
-       wr.hdr.context = 0;
-       wr.hint_count = cpu_to_be64(c2dev->hint_count_dma);
-       wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma);
-       wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma);
-       wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma);
-       wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma);
-       wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma);
-
-       /* Post the init message */
-       return vq_send_wr(c2dev, (union c2wr *) & wr);
-}
-
-/*
- * Send the adapter TERM message to the amso1100
- */
-static void c2_adapter_term(struct c2_dev *c2dev)
-{
-       struct c2wr_init_req wr;
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_TERM);
-       wr.hdr.context = 0;
-
-       /* Post the init message */
-       vq_send_wr(c2dev, (union c2wr *) & wr);
-       c2dev->init = 0;
-
-       return;
-}
-
-/*
- * Query the adapter
- */
-static int c2_rnic_query(struct c2_dev *c2dev, struct ib_device_attr *props)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_rnic_query_req wr;
-       struct c2wr_rnic_query_rep *reply;
-       int err;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       c2_wr_set_id(&wr, CCWR_RNIC_QUERY);
-       wr.hdr.context = (unsigned long) vq_req;
-       wr.rnic_handle = c2dev->adapter_handle;
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) &wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       reply =
-           (struct c2wr_rnic_query_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply)
-               err = -ENOMEM;
-       else
-               err = c2_errno(reply);
-       if (err)
-               goto bail2;
-
-       props->fw_ver =
-               ((u64)be32_to_cpu(reply->fw_ver_major) << 32) |
-               ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) |
-               (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF);
-       memcpy(&props->sys_image_guid, c2dev->netdev->dev_addr, 6);
-       props->max_mr_size         = 0xFFFFFFFF;
-       props->page_size_cap       = ~(C2_MIN_PAGESIZE-1);
-       props->vendor_id           = be32_to_cpu(reply->vendor_id);
-       props->vendor_part_id      = be32_to_cpu(reply->part_number);
-       props->hw_ver              = be32_to_cpu(reply->hw_version);
-       props->max_qp              = be32_to_cpu(reply->max_qps);
-       props->max_qp_wr           = be32_to_cpu(reply->max_qp_depth);
-       props->device_cap_flags    = c2dev->device_cap_flags;
-       props->max_sge             = C2_MAX_SGES;
-       props->max_sge_rd          = C2_MAX_SGE_RD;
-       props->max_cq              = be32_to_cpu(reply->max_cqs);
-       props->max_cqe             = be32_to_cpu(reply->max_cq_depth);
-       props->max_mr              = be32_to_cpu(reply->max_mrs);
-       props->max_pd              = be32_to_cpu(reply->max_pds);
-       props->max_qp_rd_atom      = be32_to_cpu(reply->max_qp_ird);
-       props->max_ee_rd_atom      = 0;
-       props->max_res_rd_atom     = be32_to_cpu(reply->max_global_ird);
-       props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord);
-       props->max_ee_init_rd_atom = 0;
-       props->atomic_cap          = IB_ATOMIC_NONE;
-       props->max_ee              = 0;
-       props->max_rdd             = 0;
-       props->max_mw              = be32_to_cpu(reply->max_mws);
-       props->max_raw_ipv6_qp     = 0;
-       props->max_raw_ethy_qp     = 0;
-       props->max_mcast_grp       = 0;
-       props->max_mcast_qp_attach = 0;
-       props->max_total_mcast_qp_attach = 0;
-       props->max_ah              = 0;
-       props->max_fmr             = 0;
-       props->max_map_per_fmr     = 0;
-       props->max_srq             = 0;
-       props->max_srq_wr          = 0;
-       props->max_srq_sge         = 0;
-       props->max_pkeys           = 0;
-       props->local_ca_ack_delay  = 0;
-
- bail2:
-       vq_repbuf_free(c2dev, reply);
-
- bail1:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Add an IP address to the RNIC interface
- */
-int c2_add_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_rnic_setconfig_req *wr;
-       struct c2wr_rnic_setconfig_rep *reply;
-       struct c2_netaddr netaddr;
-       int err, len;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       len = sizeof(struct c2_netaddr);
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
-       wr->hdr.context = (unsigned long) vq_req;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->option = cpu_to_be32(C2_CFG_ADD_ADDR);
-
-       netaddr.ip_addr = inaddr;
-       netaddr.netmask = inmask;
-       netaddr.mtu = 0;
-
-       memcpy(wr->data, &netaddr, len);
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       reply =
-           (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       err = c2_errno(reply);
-       vq_repbuf_free(c2dev, reply);
-
-bail1:
-       kfree(wr);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Delete an IP address from the RNIC interface
- */
-int c2_del_addr(struct c2_dev *c2dev, __be32 inaddr, __be32 inmask)
-{
-       struct c2_vq_req *vq_req;
-       struct c2wr_rnic_setconfig_req *wr;
-       struct c2wr_rnic_setconfig_rep *reply;
-       struct c2_netaddr netaddr;
-       int err, len;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (!vq_req)
-               return -ENOMEM;
-
-       len = sizeof(struct c2_netaddr);
-       wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
-       if (!wr) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       c2_wr_set_id(wr, CCWR_RNIC_SETCONFIG);
-       wr->hdr.context = (unsigned long) vq_req;
-       wr->rnic_handle = c2dev->adapter_handle;
-       wr->option = cpu_to_be32(C2_CFG_DEL_ADDR);
-
-       netaddr.ip_addr = inaddr;
-       netaddr.netmask = inmask;
-       netaddr.mtu = 0;
-
-       memcpy(wr->data, &netaddr, len);
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, (union c2wr *) wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail1;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err)
-               goto bail1;
-
-       reply =
-           (struct c2wr_rnic_setconfig_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       err = c2_errno(reply);
-       vq_repbuf_free(c2dev, reply);
-
-bail1:
-       kfree(wr);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Open a single RNIC instance to use with all
- * low level openib calls
- */
-static int c2_rnic_open(struct c2_dev *c2dev)
-{
-       struct c2_vq_req *vq_req;
-       union c2wr wr;
-       struct c2wr_rnic_open_rep *reply;
-       int err;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (vq_req == NULL) {
-               return -ENOMEM;
-       }
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_RNIC_OPEN);
-       wr.rnic_open.req.hdr.context = (unsigned long) (vq_req);
-       wr.rnic_open.req.flags = cpu_to_be16(RNIC_PRIV_MODE);
-       wr.rnic_open.req.port_num = cpu_to_be16(0);
-       wr.rnic_open.req.user_context = (unsigned long) c2dev;
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, &wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       if ((err = c2_errno(reply)) != 0) {
-               goto bail1;
-       }
-
-       c2dev->adapter_handle = reply->rnic_handle;
-
-bail1:
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Close the RNIC instance
- */
-static int c2_rnic_close(struct c2_dev *c2dev)
-{
-       struct c2_vq_req *vq_req;
-       union c2wr wr;
-       struct c2wr_rnic_close_rep *reply;
-       int err;
-
-       vq_req = vq_req_alloc(c2dev);
-       if (vq_req == NULL) {
-               return -ENOMEM;
-       }
-
-       memset(&wr, 0, sizeof(wr));
-       c2_wr_set_id(&wr, CCWR_RNIC_CLOSE);
-       wr.rnic_close.req.hdr.context = (unsigned long) vq_req;
-       wr.rnic_close.req.rnic_handle = c2dev->adapter_handle;
-
-       vq_req_get(c2dev, vq_req);
-
-       err = vq_send_wr(c2dev, &wr);
-       if (err) {
-               vq_req_put(c2dev, vq_req);
-               goto bail0;
-       }
-
-       err = vq_wait_for_reply(c2dev, vq_req);
-       if (err) {
-               goto bail0;
-       }
-
-       reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg);
-       if (!reply) {
-               err = -ENOMEM;
-               goto bail0;
-       }
-
-       if ((err = c2_errno(reply)) != 0) {
-               goto bail1;
-       }
-
-       c2dev->adapter_handle = 0;
-
-bail1:
-       vq_repbuf_free(c2dev, reply);
-bail0:
-       vq_req_free(c2dev, vq_req);
-       return err;
-}
-
-/*
- * Called by c2_probe to initialize the RNIC. This principally
- * involves initializing the various limits and resource pools that
- * comprise the RNIC instance.
- */
-int c2_rnic_init(struct c2_dev *c2dev)
-{
-       int err;
-       u32 qsize, msgsize;
-       void *q1_pages;
-       void *q2_pages;
-       void __iomem *mmio_regs;
-
-       /* Device capabilities */
-       c2dev->device_cap_flags =
-           (IB_DEVICE_RESIZE_MAX_WR |
-            IB_DEVICE_CURR_QP_STATE_MOD |
-            IB_DEVICE_SYS_IMAGE_GUID |
-            IB_DEVICE_LOCAL_DMA_LKEY |
-            IB_DEVICE_MEM_WINDOW);
-
-       /* Allocate the qptr_array */
-       c2dev->qptr_array = vzalloc(C2_MAX_CQS * sizeof(void *));
-       if (!c2dev->qptr_array) {
-               return -ENOMEM;
-       }
-
-       /* Initialize the qptr_array */
-       c2dev->qptr_array[0] = (void *) &c2dev->req_vq;
-       c2dev->qptr_array[1] = (void *) &c2dev->rep_vq;
-       c2dev->qptr_array[2] = (void *) &c2dev->aeq;
-
-       /* Initialize data structures */
-       init_waitqueue_head(&c2dev->req_vq_wo);
-       spin_lock_init(&c2dev->vqlock);
-       spin_lock_init(&c2dev->lock);
-
-       /* Allocate MQ shared pointer pool for kernel clients. User
-        * mode client pools are hung off the user context
-        */
-       err = c2_init_mqsp_pool(c2dev, GFP_KERNEL, &c2dev->kern_mqsp_pool);
-       if (err) {
-               goto bail0;
-       }
-
-       /* Allocate shared pointers for Q0, Q1, and Q2 from
-        * the shared pointer pool.
-        */
-
-       c2dev->hint_count = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                            &c2dev->hint_count_dma,
-                                            GFP_KERNEL);
-       c2dev->req_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                            &c2dev->req_vq.shared_dma,
-                                            GFP_KERNEL);
-       c2dev->rep_vq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                            &c2dev->rep_vq.shared_dma,
-                                            GFP_KERNEL);
-       c2dev->aeq.shared = c2_alloc_mqsp(c2dev, c2dev->kern_mqsp_pool,
-                                         &c2dev->aeq.shared_dma, GFP_KERNEL);
-       if (!c2dev->hint_count || !c2dev->req_vq.shared ||
-           !c2dev->rep_vq.shared || !c2dev->aeq.shared) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-
-       mmio_regs = c2dev->kva;
-       /* Initialize the Verbs Request Queue */
-       c2_mq_req_init(&c2dev->req_vq, 0,
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_QSIZE)),
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_MSGSIZE)),
-                      mmio_regs +
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_POOLSTART)),
-                      mmio_regs +
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q0_SHARED)),
-                      C2_MQ_ADAPTER_TARGET);
-
-       /* Initialize the Verbs Reply Queue */
-       qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_QSIZE));
-       msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_MSGSIZE));
-       q1_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
-                                     &c2dev->rep_vq.host_dma, GFP_KERNEL);
-       if (!q1_pages) {
-               err = -ENOMEM;
-               goto bail1;
-       }
-       dma_unmap_addr_set(&c2dev->rep_vq, mapping, c2dev->rep_vq.host_dma);
-       pr_debug("%s rep_vq va %p dma %llx\n", __func__, q1_pages,
-                (unsigned long long) c2dev->rep_vq.host_dma);
-       c2_mq_rep_init(&c2dev->rep_vq,
-                  1,
-                  qsize,
-                  msgsize,
-                  q1_pages,
-                  mmio_regs +
-                  be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q1_SHARED)),
-                  C2_MQ_HOST_TARGET);
-
-       /* Initialize the Asynchronus Event Queue */
-       qsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_QSIZE));
-       msgsize = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_MSGSIZE));
-       q2_pages = dma_alloc_coherent(&c2dev->pcidev->dev, qsize * msgsize,
-                                     &c2dev->aeq.host_dma, GFP_KERNEL);
-       if (!q2_pages) {
-               err = -ENOMEM;
-               goto bail2;
-       }
-       dma_unmap_addr_set(&c2dev->aeq, mapping, c2dev->aeq.host_dma);
-       pr_debug("%s aeq va %p dma %llx\n", __func__, q2_pages,
-                (unsigned long long) c2dev->aeq.host_dma);
-       c2_mq_rep_init(&c2dev->aeq,
-                      2,
-                      qsize,
-                      msgsize,
-                      q2_pages,
-                      mmio_regs +
-                      be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_Q2_SHARED)),
-                      C2_MQ_HOST_TARGET);
-
-       /* Initialize the verbs request allocator */
-       err = vq_init(c2dev);
-       if (err)
-               goto bail3;
-
-       /* Enable interrupts on the adapter */
-       writel(0, c2dev->regs + C2_IDIS);
-
-       /* create the WR init message */
-       err = c2_adapter_init(c2dev);
-       if (err)
-               goto bail4;
-       c2dev->init++;
-
-       /* open an adapter instance */
-       err = c2_rnic_open(c2dev);
-       if (err)
-               goto bail4;
-
-       /* Initialize cached the adapter limits */
-       err = c2_rnic_query(c2dev, &c2dev->props);
-       if (err)
-               goto bail5;
-
-       /* Initialize the PD pool */
-       err = c2_init_pd_table(c2dev);
-       if (err)
-               goto bail5;
-
-       /* Initialize the QP pool */
-       c2_init_qp_table(c2dev);
-       return 0;
-
-bail5:
-       c2_rnic_close(c2dev);
-bail4:
-       vq_term(c2dev);
-bail3:
-       dma_free_coherent(&c2dev->pcidev->dev,
-                         c2dev->aeq.q_size * c2dev->aeq.msg_size,
-                         q2_pages, dma_unmap_addr(&c2dev->aeq, mapping));
-bail2:
-       dma_free_coherent(&c2dev->pcidev->dev,
-                         c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
-                         q1_pages, dma_unmap_addr(&c2dev->rep_vq, mapping));
-bail1:
-       c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
-bail0:
-       vfree(c2dev->qptr_array);
-
-       return err;
-}
-
-/*
- * Called by c2_remove to cleanup the RNIC resources.
- */
-void c2_rnic_term(struct c2_dev *c2dev)
-{
-
-       /* Close the open adapter instance */
-       c2_rnic_close(c2dev);
-
-       /* Send the TERM message to the adapter */
-       c2_adapter_term(c2dev);
-
-       /* Disable interrupts on the adapter */
-       writel(1, c2dev->regs + C2_IDIS);
-
-       /* Free the QP pool */
-       c2_cleanup_qp_table(c2dev);
-
-       /* Free the PD pool */
-       c2_cleanup_pd_table(c2dev);
-
-       /* Free the verbs request allocator */
-       vq_term(c2dev);
-
-       /* Free the asynchronus event queue */
-       dma_free_coherent(&c2dev->pcidev->dev,
-                         c2dev->aeq.q_size * c2dev->aeq.msg_size,
-                         c2dev->aeq.msg_pool.host,
-                         dma_unmap_addr(&c2dev->aeq, mapping));
-
-       /* Free the verbs reply queue */
-       dma_free_coherent(&c2dev->pcidev->dev,
-                         c2dev->rep_vq.q_size * c2dev->rep_vq.msg_size,
-                         c2dev->rep_vq.msg_pool.host,
-                         dma_unmap_addr(&c2dev->rep_vq, mapping));
-
-       /* Free the MQ shared pointer pool */
-       c2_free_mqsp_pool(c2dev, c2dev->kern_mqsp_pool);
-
-       /* Free the qptr_array */
-       vfree(c2dev->qptr_array);
-
-       return;
-}
diff --git a/drivers/staging/rdma/amso1100/c2_status.h b/drivers/staging/rdma/amso1100/c2_status.h
deleted file mode 100644 (file)
index 6ee4aa9..0000000
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef        _C2_STATUS_H_
-#define _C2_STATUS_H_
-
-/*
- * Verbs Status Codes
- */
-enum c2_status {
-       C2_OK = 0,              /* This must be zero */
-       CCERR_INSUFFICIENT_RESOURCES = 1,
-       CCERR_INVALID_MODIFIER = 2,
-       CCERR_INVALID_MODE = 3,
-       CCERR_IN_USE = 4,
-       CCERR_INVALID_RNIC = 5,
-       CCERR_INTERRUPTED_OPERATION = 6,
-       CCERR_INVALID_EH = 7,
-       CCERR_INVALID_CQ = 8,
-       CCERR_CQ_EMPTY = 9,
-       CCERR_NOT_IMPLEMENTED = 10,
-       CCERR_CQ_DEPTH_TOO_SMALL = 11,
-       CCERR_PD_IN_USE = 12,
-       CCERR_INVALID_PD = 13,
-       CCERR_INVALID_SRQ = 14,
-       CCERR_INVALID_ADDRESS = 15,
-       CCERR_INVALID_NETMASK = 16,
-       CCERR_INVALID_QP = 17,
-       CCERR_INVALID_QP_STATE = 18,
-       CCERR_TOO_MANY_WRS_POSTED = 19,
-       CCERR_INVALID_WR_TYPE = 20,
-       CCERR_INVALID_SGL_LENGTH = 21,
-       CCERR_INVALID_SQ_DEPTH = 22,
-       CCERR_INVALID_RQ_DEPTH = 23,
-       CCERR_INVALID_ORD = 24,
-       CCERR_INVALID_IRD = 25,
-       CCERR_QP_ATTR_CANNOT_CHANGE = 26,
-       CCERR_INVALID_STAG = 27,
-       CCERR_QP_IN_USE = 28,
-       CCERR_OUTSTANDING_WRS = 29,
-       CCERR_STAG_IN_USE = 30,
-       CCERR_INVALID_STAG_INDEX = 31,
-       CCERR_INVALID_SGL_FORMAT = 32,
-       CCERR_ADAPTER_TIMEOUT = 33,
-       CCERR_INVALID_CQ_DEPTH = 34,
-       CCERR_INVALID_PRIVATE_DATA_LENGTH = 35,
-       CCERR_INVALID_EP = 36,
-       CCERR_MR_IN_USE = CCERR_STAG_IN_USE,
-       CCERR_FLUSHED = 38,
-       CCERR_INVALID_WQE = 39,
-       CCERR_LOCAL_QP_CATASTROPHIC_ERROR = 40,
-       CCERR_REMOTE_TERMINATION_ERROR = 41,
-       CCERR_BASE_AND_BOUNDS_VIOLATION = 42,
-       CCERR_ACCESS_VIOLATION = 43,
-       CCERR_INVALID_PD_ID = 44,
-       CCERR_WRAP_ERROR = 45,
-       CCERR_INV_STAG_ACCESS_ERROR = 46,
-       CCERR_ZERO_RDMA_READ_RESOURCES = 47,
-       CCERR_QP_NOT_PRIVILEGED = 48,
-       CCERR_STAG_STATE_NOT_INVALID = 49,
-       CCERR_INVALID_PAGE_SIZE = 50,
-       CCERR_INVALID_BUFFER_SIZE = 51,
-       CCERR_INVALID_PBE = 52,
-       CCERR_INVALID_FBO = 53,
-       CCERR_INVALID_LENGTH = 54,
-       CCERR_INVALID_ACCESS_RIGHTS = 55,
-       CCERR_PBL_TOO_BIG = 56,
-       CCERR_INVALID_VA = 57,
-       CCERR_INVALID_REGION = 58,
-       CCERR_INVALID_WINDOW = 59,
-       CCERR_TOTAL_LENGTH_TOO_BIG = 60,
-       CCERR_INVALID_QP_ID = 61,
-       CCERR_ADDR_IN_USE = 62,
-       CCERR_ADDR_NOT_AVAIL = 63,
-       CCERR_NET_DOWN = 64,
-       CCERR_NET_UNREACHABLE = 65,
-       CCERR_CONN_ABORTED = 66,
-       CCERR_CONN_RESET = 67,
-       CCERR_NO_BUFS = 68,
-       CCERR_CONN_TIMEDOUT = 69,
-       CCERR_CONN_REFUSED = 70,
-       CCERR_HOST_UNREACHABLE = 71,
-       CCERR_INVALID_SEND_SGL_DEPTH = 72,
-       CCERR_INVALID_RECV_SGL_DEPTH = 73,
-       CCERR_INVALID_RDMA_WRITE_SGL_DEPTH = 74,
-       CCERR_INSUFFICIENT_PRIVILEGES = 75,
-       CCERR_STACK_ERROR = 76,
-       CCERR_INVALID_VERSION = 77,
-       CCERR_INVALID_MTU = 78,
-       CCERR_INVALID_IMAGE = 79,
-       CCERR_PENDING = 98,     /* not an error; user internally by adapter */
-       CCERR_DEFER = 99,       /* not an error; used internally by adapter */
-       CCERR_FAILED_WRITE = 100,
-       CCERR_FAILED_ERASE = 101,
-       CCERR_FAILED_VERIFICATION = 102,
-       CCERR_NOT_FOUND = 103,
-
-};
-
-/*
- * CCAE_ACTIVE_CONNECT_RESULTS status result codes.
- */
-enum c2_connect_status {
-       C2_CONN_STATUS_SUCCESS = C2_OK,
-       C2_CONN_STATUS_NO_MEM = CCERR_INSUFFICIENT_RESOURCES,
-       C2_CONN_STATUS_TIMEDOUT = CCERR_CONN_TIMEDOUT,
-       C2_CONN_STATUS_REFUSED = CCERR_CONN_REFUSED,
-       C2_CONN_STATUS_NETUNREACH = CCERR_NET_UNREACHABLE,
-       C2_CONN_STATUS_HOSTUNREACH = CCERR_HOST_UNREACHABLE,
-       C2_CONN_STATUS_INVALID_RNIC = CCERR_INVALID_RNIC,
-       C2_CONN_STATUS_INVALID_QP = CCERR_INVALID_QP,
-       C2_CONN_STATUS_INVALID_QP_STATE = CCERR_INVALID_QP_STATE,
-       C2_CONN_STATUS_REJECTED = CCERR_CONN_RESET,
-       C2_CONN_STATUS_ADDR_NOT_AVAIL = CCERR_ADDR_NOT_AVAIL,
-};
-
-/*
- * Flash programming status codes.
- */
-enum c2_flash_status {
-       C2_FLASH_STATUS_SUCCESS = 0x0000,
-       C2_FLASH_STATUS_VERIFY_ERR = 0x0002,
-       C2_FLASH_STATUS_IMAGE_ERR = 0x0004,
-       C2_FLASH_STATUS_ECLBS = 0x0400,
-       C2_FLASH_STATUS_PSLBS = 0x0800,
-       C2_FLASH_STATUS_VPENS = 0x1000,
-};
-
-#endif                         /* _C2_STATUS_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_user.h b/drivers/staging/rdma/amso1100/c2_user.h
deleted file mode 100644 (file)
index 7e9e7ad..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) 2005 Topspin Communications.  All rights reserved.
- * Copyright (c) 2005 Cisco Systems.  All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef C2_USER_H
-#define C2_USER_H
-
-#include <linux/types.h>
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct c2_alloc_ucontext_resp {
-       __u32 qp_tab_size;
-       __u32 uarc_size;
-};
-
-struct c2_alloc_pd_resp {
-       __u32 pdn;
-       __u32 reserved;
-};
-
-struct c2_create_cq {
-       __u32 lkey;
-       __u32 pdn;
-       __u64 arm_db_page;
-       __u64 set_db_page;
-       __u32 arm_db_index;
-       __u32 set_db_index;
-};
-
-struct c2_create_cq_resp {
-       __u32 cqn;
-       __u32 reserved;
-};
-
-struct c2_create_qp {
-       __u32 lkey;
-       __u32 reserved;
-       __u64 sq_db_page;
-       __u64 rq_db_page;
-       __u32 sq_db_index;
-       __u32 rq_db_index;
-};
-
-#endif                         /* C2_USER_H */
diff --git a/drivers/staging/rdma/amso1100/c2_vq.c b/drivers/staging/rdma/amso1100/c2_vq.c
deleted file mode 100644 (file)
index 2ec716f..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-
-#include "c2_vq.h"
-#include "c2_provider.h"
-
-/*
- * Verbs Request Objects:
- *
- * VQ Request Objects are allocated by the kernel verbs handlers.
- * They contain a wait object, a refcnt, an atomic bool indicating that the
- * adapter has replied, and a copy of the verb reply work request.
- * A pointer to the VQ Request Object is passed down in the context
- * field of the work request message, and reflected back by the adapter
- * in the verbs reply message.  The function handle_vq() in the interrupt
- * path will use this pointer to:
- *     1) append a copy of the verbs reply message
- *     2) mark that the reply is ready
- *     3) wake up the kernel verbs handler blocked awaiting the reply.
- *
- *
- * The kernel verbs handlers do a "get" to put a 2nd reference on the
- * VQ Request object.  If the kernel verbs handler exits before the adapter
- * can respond, this extra reference will keep the VQ Request object around
- * until the adapter's reply can be processed.  The reason we need this is
- * because a pointer to this object is stuffed into the context field of
- * the verbs work request message, and reflected back in the reply message.
- * It is used in the interrupt handler (handle_vq()) to wake up the appropriate
- * kernel verb handler that is blocked awaiting the verb reply.
- * So handle_vq() will do a "put" on the object when it's done accessing it.
- * NOTE:  If we guarantee that the kernel verb handler will never bail before
- *        getting the reply, then we don't need these refcnts.
- *
- *
- * VQ Request objects are freed by the kernel verbs handlers only
- * after the verb has been processed, or when the adapter fails and
- * does not reply.
- *
- *
- * Verbs Reply Buffers:
- *
- * VQ Reply bufs are local host memory copies of a
- * outstanding Verb Request reply
- * message.  The are always allocated by the kernel verbs handlers, and _may_ be
- * freed by either the kernel verbs handler -or- the interrupt handler.  The
- * kernel verbs handler _must_ free the repbuf, then free the vq request object
- * in that order.
- */
-
-int vq_init(struct c2_dev *c2dev)
-{
-       sprintf(c2dev->vq_cache_name, "c2-vq:dev%c",
-               (char) ('0' + c2dev->devnum));
-       c2dev->host_msg_cache =
-           kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0,
-                             SLAB_HWCACHE_ALIGN, NULL);
-       if (c2dev->host_msg_cache == NULL) {
-               return -ENOMEM;
-       }
-       return 0;
-}
-
-void vq_term(struct c2_dev *c2dev)
-{
-       kmem_cache_destroy(c2dev->host_msg_cache);
-}
-
-/* vq_req_alloc - allocate a VQ Request Object and initialize it.
- * The refcnt is set to 1.
- */
-struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev)
-{
-       struct c2_vq_req *r;
-
-       r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL);
-       if (r) {
-               init_waitqueue_head(&r->wait_object);
-               r->reply_msg = 0;
-               r->event = 0;
-               r->cm_id = NULL;
-               r->qp = NULL;
-               atomic_set(&r->refcnt, 1);
-               atomic_set(&r->reply_ready, 0);
-       }
-       return r;
-}
-
-
-/* vq_req_free - free the VQ Request Object.  It is assumed the verbs handler
- * has already free the VQ Reply Buffer if it existed.
- */
-void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
-       r->reply_msg = 0;
-       if (atomic_dec_and_test(&r->refcnt)) {
-               kfree(r);
-       }
-}
-
-/* vq_req_get - reference a VQ Request Object.  Done
- * only in the kernel verbs handlers.
- */
-void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
-       atomic_inc(&r->refcnt);
-}
-
-
-/* vq_req_put - dereference and potentially free a VQ Request Object.
- *
- * This is only called by handle_vq() on the
- * interrupt when it is done processing
- * a verb reply message.  If the associated
- * kernel verbs handler has already bailed,
- * then this put will actually free the VQ
- * Request object _and_ the VQ Reply Buffer
- * if it exists.
- */
-void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r)
-{
-       if (atomic_dec_and_test(&r->refcnt)) {
-               if (r->reply_msg != 0)
-                       vq_repbuf_free(c2dev,
-                                      (void *) (unsigned long) r->reply_msg);
-               kfree(r);
-       }
-}
-
-
-/*
- * vq_repbuf_alloc - allocate a VQ Reply Buffer.
- */
-void *vq_repbuf_alloc(struct c2_dev *c2dev)
-{
-       return kmem_cache_alloc(c2dev->host_msg_cache, GFP_ATOMIC);
-}
-
-/*
- * vq_send_wr - post a verbs request message to the Verbs Request Queue.
- * If a message is not available in the MQ, then block until one is available.
- * NOTE: handle_mq() on the interrupt context will wake up threads blocked here.
- * When the adapter drains the Verbs Request Queue,
- * it inserts MQ index 0 in to the
- * adapter->host activity fifo and interrupts the host.
- */
-int vq_send_wr(struct c2_dev *c2dev, union c2wr *wr)
-{
-       void *msg;
-       wait_queue_t __wait;
-
-       /*
-        * grab adapter vq lock
-        */
-       spin_lock(&c2dev->vqlock);
-
-       /*
-        * allocate msg
-        */
-       msg = c2_mq_alloc(&c2dev->req_vq);
-
-       /*
-        * If we cannot get a msg, then we'll wait
-        * When a messages are available, the int handler will wake_up()
-        * any waiters.
-        */
-       while (msg == NULL) {
-               pr_debug("%s:%d no available msg in VQ, waiting...\n",
-                      __func__, __LINE__);
-               init_waitqueue_entry(&__wait, current);
-               add_wait_queue(&c2dev->req_vq_wo, &__wait);
-               spin_unlock(&c2dev->vqlock);
-               for (;;) {
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       if (!c2_mq_full(&c2dev->req_vq)) {
-                               break;
-                       }
-                       if (!signal_pending(current)) {
-                               schedule_timeout(1 * HZ);       /* 1 second... */
-                               continue;
-                       }
-                       set_current_state(TASK_RUNNING);
-                       remove_wait_queue(&c2dev->req_vq_wo, &__wait);
-                       return -EINTR;
-               }
-               set_current_state(TASK_RUNNING);
-               remove_wait_queue(&c2dev->req_vq_wo, &__wait);
-               spin_lock(&c2dev->vqlock);
-               msg = c2_mq_alloc(&c2dev->req_vq);
-       }
-
-       /*
-        * copy wr into adapter msg
-        */
-       memcpy(msg, wr, c2dev->req_vq.msg_size);
-
-       /*
-        * post msg
-        */
-       c2_mq_produce(&c2dev->req_vq);
-
-       /*
-        * release adapter vq lock
-        */
-       spin_unlock(&c2dev->vqlock);
-       return 0;
-}
-
-
-/*
- * vq_wait_for_reply - block until the adapter posts a Verb Reply Message.
- */
-int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req)
-{
-       if (!wait_event_timeout(req->wait_object,
-                               atomic_read(&req->reply_ready),
-                               60*HZ))
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
-/*
- * vq_repbuf_free - Free a Verbs Reply Buffer.
- */
-void vq_repbuf_free(struct c2_dev *c2dev, void *reply)
-{
-       kmem_cache_free(c2dev->host_msg_cache, reply);
-}
diff --git a/drivers/staging/rdma/amso1100/c2_vq.h b/drivers/staging/rdma/amso1100/c2_vq.h
deleted file mode 100644 (file)
index c1f6cef..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_VQ_H_
-#define _C2_VQ_H_
-#include <linux/sched.h>
-#include "c2.h"
-#include "c2_wr.h"
-#include "c2_provider.h"
-
-struct c2_vq_req {
-       u64 reply_msg;          /* ptr to reply msg */
-       wait_queue_head_t wait_object;  /* wait object for vq reqs */
-       atomic_t reply_ready;   /* set when reply is ready */
-       atomic_t refcnt;        /* used to cancel WRs... */
-       int event;
-       struct iw_cm_id *cm_id;
-       struct c2_qp *qp;
-};
-
-int vq_init(struct c2_dev *c2dev);
-void vq_term(struct c2_dev *c2dev);
-
-struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev);
-void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req);
-void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req);
-void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req);
-int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr);
-
-void *vq_repbuf_alloc(struct c2_dev *c2dev);
-void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
-
-int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
-#endif                         /* _C2_VQ_H_ */
diff --git a/drivers/staging/rdma/amso1100/c2_wr.h b/drivers/staging/rdma/amso1100/c2_wr.h
deleted file mode 100644 (file)
index 8d4b4ca..0000000
+++ /dev/null
@@ -1,1520 +0,0 @@
-/*
- * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef _C2_WR_H_
-#define _C2_WR_H_
-
-#ifdef CCDEBUG
-#define CCWR_MAGIC             0xb07700b0
-#endif
-
-#define C2_QP_NO_ATTR_CHANGE 0xFFFFFFFF
-
-/* Maximum allowed size in bytes of private_data exchange
- * on connect.
- */
-#define C2_MAX_PRIVATE_DATA_SIZE 200
-
-/*
- * These types are shared among the adapter, host, and CCIL consumer.
- */
-enum c2_cq_notification_type {
-       C2_CQ_NOTIFICATION_TYPE_NONE = 1,
-       C2_CQ_NOTIFICATION_TYPE_NEXT,
-       C2_CQ_NOTIFICATION_TYPE_NEXT_SE
-};
-
-enum c2_setconfig_cmd {
-       C2_CFG_ADD_ADDR = 1,
-       C2_CFG_DEL_ADDR = 2,
-       C2_CFG_ADD_ROUTE = 3,
-       C2_CFG_DEL_ROUTE = 4
-};
-
-enum c2_getconfig_cmd {
-       C2_GETCONFIG_ROUTES = 1,
-       C2_GETCONFIG_ADDRS
-};
-
-/*
- *  CCIL Work Request Identifiers
- */
-enum c2wr_ids {
-       CCWR_RNIC_OPEN = 1,
-       CCWR_RNIC_QUERY,
-       CCWR_RNIC_SETCONFIG,
-       CCWR_RNIC_GETCONFIG,
-       CCWR_RNIC_CLOSE,
-       CCWR_CQ_CREATE,
-       CCWR_CQ_QUERY,
-       CCWR_CQ_MODIFY,
-       CCWR_CQ_DESTROY,
-       CCWR_QP_CONNECT,
-       CCWR_PD_ALLOC,
-       CCWR_PD_DEALLOC,
-       CCWR_SRQ_CREATE,
-       CCWR_SRQ_QUERY,
-       CCWR_SRQ_MODIFY,
-       CCWR_SRQ_DESTROY,
-       CCWR_QP_CREATE,
-       CCWR_QP_QUERY,
-       CCWR_QP_MODIFY,
-       CCWR_QP_DESTROY,
-       CCWR_NSMR_STAG_ALLOC,
-       CCWR_NSMR_REGISTER,
-       CCWR_NSMR_PBL,
-       CCWR_STAG_DEALLOC,
-       CCWR_NSMR_REREGISTER,
-       CCWR_SMR_REGISTER,
-       CCWR_MR_QUERY,
-       CCWR_MW_ALLOC,
-       CCWR_MW_QUERY,
-       CCWR_EP_CREATE,
-       CCWR_EP_GETOPT,
-       CCWR_EP_SETOPT,
-       CCWR_EP_DESTROY,
-       CCWR_EP_BIND,
-       CCWR_EP_CONNECT,
-       CCWR_EP_LISTEN,
-       CCWR_EP_SHUTDOWN,
-       CCWR_EP_LISTEN_CREATE,
-       CCWR_EP_LISTEN_DESTROY,
-       CCWR_EP_QUERY,
-       CCWR_CR_ACCEPT,
-       CCWR_CR_REJECT,
-       CCWR_CONSOLE,
-       CCWR_TERM,
-       CCWR_FLASH_INIT,
-       CCWR_FLASH,
-       CCWR_BUF_ALLOC,
-       CCWR_BUF_FREE,
-       CCWR_FLASH_WRITE,
-       CCWR_INIT,              /* WARNING: Don't move this ever again! */
-
-
-
-       /* Add new IDs here */
-
-
-
-       /*
-        * WARNING: CCWR_LAST must always be the last verbs id defined!
-        *          All the preceding IDs are fixed, and must not change.
-        *          You can add new IDs, but must not remove or reorder
-        *          any IDs. If you do, YOU will ruin any hope of
-        *          compatibility between versions.
-        */
-       CCWR_LAST,
-
-       /*
-        * Start over at 1 so that arrays indexed by user wr id's
-        * begin at 1.  This is OK since the verbs and user wr id's
-        * are always used on disjoint sets of queues.
-        */
-       /*
-        * The order of the CCWR_SEND_XX verbs must
-        * match the order of the RDMA_OPs
-        */
-       CCWR_SEND = 1,
-       CCWR_SEND_INV,
-       CCWR_SEND_SE,
-       CCWR_SEND_SE_INV,
-       CCWR_RDMA_WRITE,
-       CCWR_RDMA_READ,
-       CCWR_RDMA_READ_INV,
-       CCWR_MW_BIND,
-       CCWR_NSMR_FASTREG,
-       CCWR_STAG_INVALIDATE,
-       CCWR_RECV,
-       CCWR_NOP,
-       CCWR_UNIMPL,
-/* WARNING: This must always be the last user wr id defined! */
-};
-#define RDMA_SEND_OPCODE_FROM_WR_ID(x)   (x+2)
-
-/*
- * SQ/RQ Work Request Types
- */
-enum c2_wr_type {
-       C2_WR_TYPE_SEND = CCWR_SEND,
-       C2_WR_TYPE_SEND_SE = CCWR_SEND_SE,
-       C2_WR_TYPE_SEND_INV = CCWR_SEND_INV,
-       C2_WR_TYPE_SEND_SE_INV = CCWR_SEND_SE_INV,
-       C2_WR_TYPE_RDMA_WRITE = CCWR_RDMA_WRITE,
-       C2_WR_TYPE_RDMA_READ = CCWR_RDMA_READ,
-       C2_WR_TYPE_RDMA_READ_INV_STAG = CCWR_RDMA_READ_INV,
-       C2_WR_TYPE_BIND_MW = CCWR_MW_BIND,
-       C2_WR_TYPE_FASTREG_NSMR = CCWR_NSMR_FASTREG,
-       C2_WR_TYPE_INV_STAG = CCWR_STAG_INVALIDATE,
-       C2_WR_TYPE_RECV = CCWR_RECV,
-       C2_WR_TYPE_NOP = CCWR_NOP,
-};
-
-struct c2_netaddr {
-       __be32 ip_addr;
-       __be32 netmask;
-       u32 mtu;
-};
-
-struct c2_route {
-       u32 ip_addr;            /* 0 indicates the default route */
-       u32 netmask;            /* netmask associated with dst */
-       u32 flags;
-       union {
-               u32 ipaddr;     /* address of the nexthop interface */
-               u8 enaddr[6];
-       } nexthop;
-};
-
-/*
- * A Scatter Gather Entry.
- */
-struct c2_data_addr {
-       __be32 stag;
-       __be32 length;
-       __be64 to;
-};
-
-/*
- * MR and MW flags used by the consumer, RI, and RNIC.
- */
-enum c2_mm_flags {
-       MEM_REMOTE = 0x0001,    /* allow mw binds with remote access. */
-       MEM_VA_BASED = 0x0002,  /* Not Zero-based */
-       MEM_PBL_COMPLETE = 0x0004,      /* PBL array is complete in this msg */
-       MEM_LOCAL_READ = 0x0008,        /* allow local reads */
-       MEM_LOCAL_WRITE = 0x0010,       /* allow local writes */
-       MEM_REMOTE_READ = 0x0020,       /* allow remote reads */
-       MEM_REMOTE_WRITE = 0x0040,      /* allow remote writes */
-       MEM_WINDOW_BIND = 0x0080,       /* binds allowed */
-       MEM_SHARED = 0x0100,    /* set if MR is shared */
-       MEM_STAG_VALID = 0x0200 /* set if STAG is in valid state */
-};
-
-/*
- * CCIL API ACF flags defined in terms of the low level mem flags.
- * This minimizes translation needed in the user API
- */
-enum c2_acf {
-       C2_ACF_LOCAL_READ = MEM_LOCAL_READ,
-       C2_ACF_LOCAL_WRITE = MEM_LOCAL_WRITE,
-       C2_ACF_REMOTE_READ = MEM_REMOTE_READ,
-       C2_ACF_REMOTE_WRITE = MEM_REMOTE_WRITE,
-       C2_ACF_WINDOW_BIND = MEM_WINDOW_BIND
-};
-
-/*
- * Image types of objects written to flash
- */
-#define C2_FLASH_IMG_BITFILE 1
-#define C2_FLASH_IMG_OPTION_ROM 2
-#define C2_FLASH_IMG_VPD 3
-
-/*
- *  to fix bug 1815 we define the max size allowable of the
- *  terminate message (per the IETF spec).Refer to the IETF
- *  protocol specification, section 12.1.6, page 64)
- *  The message is prefixed by 20 types of DDP info.
- *
- *  Then the message has 6 bytes for the terminate control
- *  and DDP segment length info plus a DDP header (either
- *  14 or 18 byts) plus 28 bytes for the RDMA header.
- *  Thus the max size in:
- *  20 + (6 + 18 + 28) = 72
- */
-#define C2_MAX_TERMINATE_MESSAGE_SIZE (72)
-
-/*
- * Build String Length.  It must be the same as C2_BUILD_STR_LEN in ccil_api.h
- */
-#define WR_BUILD_STR_LEN 64
-
-/*
- * WARNING:  All of these structs need to align any 64bit types on
- * 64 bit boundaries!  64bit types include u64 and u64.
- */
-
-/*
- * Clustercore Work Request Header.  Be sensitive to field layout
- * and alignment.
- */
-struct c2wr_hdr {
-       /* wqe_count is part of the cqe.  It is put here so the
-        * adapter can write to it while the wr is pending without
-        * clobbering part of the wr.  This word need not be dma'd
-        * from the host to adapter by libccil, but we copy it anyway
-        * to make the memcpy to the adapter better aligned.
-        */
-       __be32 wqe_count;
-
-       /* Put these fields next so that later 32- and 64-bit
-        * quantities are naturally aligned.
-        */
-       u8 id;
-       u8 result;              /* adapter -> host */
-       u8 sge_count;           /* host -> adapter */
-       u8 flags;               /* host -> adapter */
-
-       u64 context;
-#ifdef CCMSGMAGIC
-       u32 magic;
-       u32 pad;
-#endif
-} __attribute__((packed));
-
-/*
- *------------------------ RNIC ------------------------
- */
-
-/*
- * WR_RNIC_OPEN
- */
-
-/*
- * Flags for the RNIC WRs
- */
-enum c2_rnic_flags {
-       RNIC_IRD_STATIC = 0x0001,
-       RNIC_ORD_STATIC = 0x0002,
-       RNIC_QP_STATIC = 0x0004,
-       RNIC_SRQ_SUPPORTED = 0x0008,
-       RNIC_PBL_BLOCK_MODE = 0x0010,
-       RNIC_SRQ_MODEL_ARRIVAL = 0x0020,
-       RNIC_CQ_OVF_DETECTED = 0x0040,
-       RNIC_PRIV_MODE = 0x0080
-};
-
-struct c2wr_rnic_open_req {
-       struct c2wr_hdr hdr;
-       u64 user_context;
-       __be16 flags;           /* See enum c2_rnic_flags */
-       __be16 port_num;
-} __attribute__((packed));
-
-struct c2wr_rnic_open_rep {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-} __attribute__((packed));
-
-union c2wr_rnic_open {
-       struct c2wr_rnic_open_req req;
-       struct c2wr_rnic_open_rep rep;
-} __attribute__((packed));
-
-struct c2wr_rnic_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_QUERY
- */
-struct c2wr_rnic_query_rep {
-       struct c2wr_hdr hdr;
-       u64 user_context;
-       __be32 vendor_id;
-       __be32 part_number;
-       __be32 hw_version;
-       __be32 fw_ver_major;
-       __be32 fw_ver_minor;
-       __be32 fw_ver_patch;
-       char fw_ver_build_str[WR_BUILD_STR_LEN];
-       __be32 max_qps;
-       __be32 max_qp_depth;
-       u32 max_srq_depth;
-       u32 max_send_sgl_depth;
-       u32 max_rdma_sgl_depth;
-       __be32 max_cqs;
-       __be32 max_cq_depth;
-       u32 max_cq_event_handlers;
-       __be32 max_mrs;
-       u32 max_pbl_depth;
-       __be32 max_pds;
-       __be32 max_global_ird;
-       u32 max_global_ord;
-       __be32 max_qp_ird;
-       __be32 max_qp_ord;
-       u32 flags;
-       __be32 max_mws;
-       u32 pbe_range_low;
-       u32 pbe_range_high;
-       u32 max_srqs;
-       u32 page_size;
-} __attribute__((packed));
-
-union c2wr_rnic_query {
-       struct c2wr_rnic_query_req req;
-       struct c2wr_rnic_query_rep rep;
-} __attribute__((packed));
-
-/*
- * WR_RNIC_GETCONFIG
- */
-
-struct c2wr_rnic_getconfig_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 option;             /* see c2_getconfig_cmd_t */
-       u64 reply_buf;
-       u32 reply_buf_len;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_getconfig_rep {
-       struct c2wr_hdr hdr;
-       u32 option;             /* see c2_getconfig_cmd_t */
-       u32 count_len;          /* length of the number of addresses configured */
-} __attribute__((packed)) ;
-
-union c2wr_rnic_getconfig {
-       struct c2wr_rnic_getconfig_req req;
-       struct c2wr_rnic_getconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_SETCONFIG
- */
-struct c2wr_rnic_setconfig_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       __be32 option;          /* See c2_setconfig_cmd_t */
-       /* variable data and pad. See c2_netaddr and c2_route */
-       u8 data[0];
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_setconfig_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_setconfig {
-       struct c2wr_rnic_setconfig_req req;
-       struct c2wr_rnic_setconfig_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * WR_RNIC_CLOSE
- */
-struct c2wr_rnic_close_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_rnic_close_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_rnic_close {
-       struct c2wr_rnic_close_req req;
-       struct c2wr_rnic_close_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ CQ ------------------------
- */
-struct c2wr_cq_create_req {
-       struct c2wr_hdr hdr;
-       __be64 shared_ht;
-       u64 user_context;
-       __be64 msg_pool;
-       u32 rnic_handle;
-       __be32 msg_size;
-       __be32 depth;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_create_rep {
-       struct c2wr_hdr hdr;
-       __be32 mq_index;
-       __be32 adapter_shared;
-       u32 cq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_cq_create {
-       struct c2wr_cq_create_req req;
-       struct c2wr_cq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 cq_handle;
-       u32 new_depth;
-       u64 new_msg_pool;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_modify_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_modify {
-       struct c2wr_cq_modify_req req;
-       struct c2wr_cq_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 cq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_cq_destroy_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_cq_destroy {
-       struct c2wr_cq_destroy_req req;
-       struct c2wr_cq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ PD ------------------------
- */
-struct c2wr_pd_alloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_alloc_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_alloc {
-       struct c2wr_pd_alloc_req req;
-       struct c2wr_pd_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_pd_dealloc_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_pd_dealloc {
-       struct c2wr_pd_dealloc_req req;
-       struct c2wr_pd_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ SRQ ------------------------
- */
-struct c2wr_srq_create_req {
-       struct c2wr_hdr hdr;
-       u64 shared_ht;
-       u64 user_context;
-       u32 rnic_handle;
-       u32 srq_depth;
-       u32 srq_limit;
-       u32 sgl_depth;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_create_rep {
-       struct c2wr_hdr hdr;
-       u32 srq_depth;
-       u32 sgl_depth;
-       u32 msg_size;
-       u32 mq_index;
-       u32 mq_start;
-       u32 srq_handle;
-} __attribute__((packed)) ;
-
-union c2wr_srq_create {
-       struct c2wr_srq_create_req req;
-       struct c2wr_srq_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 srq_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_srq_destroy_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_srq_destroy {
-       struct c2wr_srq_destroy_req req;
-       struct c2wr_srq_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ QP ------------------------
- */
-enum c2wr_qp_flags {
-       QP_RDMA_READ = 0x00000001,      /* RDMA read enabled? */
-       QP_RDMA_WRITE = 0x00000002,     /* RDMA write enabled? */
-       QP_MW_BIND = 0x00000004,        /* MWs enabled */
-       QP_ZERO_STAG = 0x00000008,      /* enabled? */
-       QP_REMOTE_TERMINATION = 0x00000010,     /* remote end terminated */
-       QP_RDMA_READ_RESPONSE = 0x00000020      /* Remote RDMA read  */
-           /* enabled? */
-};
-
-struct c2wr_qp_create_req {
-       struct c2wr_hdr hdr;
-       __be64 shared_sq_ht;
-       __be64 shared_rq_ht;
-       u64 user_context;
-       u32 rnic_handle;
-       u32 sq_cq_handle;
-       u32 rq_cq_handle;
-       __be32 sq_depth;
-       __be32 rq_depth;
-       u32 srq_handle;
-       u32 srq_limit;
-       __be32 flags;           /* see enum c2wr_qp_flags */
-       __be32 send_sgl_depth;
-       __be32 recv_sgl_depth;
-       __be32 rdma_write_sgl_depth;
-       __be32 ord;
-       __be32 ird;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_create_rep {
-       struct c2wr_hdr hdr;
-       __be32 sq_depth;
-       __be32 rq_depth;
-       u32 send_sgl_depth;
-       u32 recv_sgl_depth;
-       u32 rdma_write_sgl_depth;
-       u32 ord;
-       u32 ird;
-       __be32 sq_msg_size;
-       __be32 sq_mq_index;
-       __be32 sq_mq_start;
-       __be32 rq_msg_size;
-       __be32 rq_mq_index;
-       __be32 rq_mq_start;
-       u32 qp_handle;
-} __attribute__((packed)) ;
-
-union c2wr_qp_create {
-       struct c2wr_qp_create_req req;
-       struct c2wr_qp_create_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_query_rep {
-       struct c2wr_hdr hdr;
-       u64 user_context;
-       u32 rnic_handle;
-       u32 sq_depth;
-       u32 rq_depth;
-       u32 send_sgl_depth;
-       u32 rdma_write_sgl_depth;
-       u32 recv_sgl_depth;
-       u32 ord;
-       u32 ird;
-       u16 qp_state;
-       u16 flags;              /* see c2wr_qp_flags_t */
-       u32 qp_id;
-       u32 local_addr;
-       u32 remote_addr;
-       u16 local_port;
-       u16 remote_port;
-       u32 terminate_msg_length;       /* 0 if not present */
-       u8 data[0];
-       /* Terminate Message in-line here. */
-} __attribute__((packed)) ;
-
-union c2wr_qp_query {
-       struct c2wr_qp_query_req req;
-       struct c2wr_qp_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_req {
-       struct c2wr_hdr hdr;
-       u64 stream_msg;
-       u32 stream_msg_length;
-       u32 rnic_handle;
-       u32 qp_handle;
-       __be32 next_qp_state;
-       __be32 ord;
-       __be32 ird;
-       __be32 sq_depth;
-       __be32 rq_depth;
-       u32 llp_ep_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_modify_rep {
-       struct c2wr_hdr hdr;
-       u32 ord;
-       u32 ird;
-       u32 sq_depth;
-       u32 rq_depth;
-       u32 sq_msg_size;
-       u32 sq_mq_index;
-       u32 sq_mq_start;
-       u32 rq_msg_size;
-       u32 rq_mq_index;
-       u32 rq_mq_start;
-} __attribute__((packed)) ;
-
-union c2wr_qp_modify {
-       struct c2wr_qp_modify_req req;
-       struct c2wr_qp_modify_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 qp_handle;
-} __attribute__((packed)) ;
-
-struct c2wr_qp_destroy_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_qp_destroy {
-       struct c2wr_qp_destroy_req req;
-       struct c2wr_qp_destroy_rep rep;
-} __attribute__((packed)) ;
-
-/*
- * The CCWR_QP_CONNECT msg is posted on the verbs request queue.  It can
- * only be posted when a QP is in IDLE state.  After the connect request is
- * submitted to the LLP, the adapter moves the QP to CONNECT_PENDING state.
- * No synchronous reply from adapter to this WR.  The results of
- * connection are passed back in an async event CCAE_ACTIVE_CONNECT_RESULTS
- * See c2wr_ae_active_connect_results_t
- */
-struct c2wr_qp_connect_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 qp_handle;
-       __be32 remote_addr;
-       __be16 remote_port;
-       u16 pad;
-       __be32 private_data_length;
-       u8 private_data[0];     /* Private data in-line. */
-} __attribute__((packed)) ;
-
-struct c2wr_qp_connect {
-       struct c2wr_qp_connect_req req;
-       /* no synchronous reply.         */
-} __attribute__((packed)) ;
-
-
-/*
- *------------------------ MM ------------------------
- */
-
-struct c2wr_nsmr_stag_alloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 pbl_depth;
-       u32 pd_id;
-       u32 flags;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_stag_alloc_rep {
-       struct c2wr_hdr hdr;
-       u32 pbl_depth;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_stag_alloc {
-       struct c2wr_nsmr_stag_alloc_req req;
-       struct c2wr_nsmr_stag_alloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_req {
-       struct c2wr_hdr hdr;
-       __be64 va;
-       u32 rnic_handle;
-       __be16 flags;
-       u8 stag_key;
-       u8 pad;
-       u32 pd_id;
-       __be32 pbl_depth;
-       __be32 pbe_size;
-       __be32 fbo;
-       __be32 length;
-       __be32 addrs_length;
-       /* array of paddrs (must be aligned on a 64bit boundary) */
-       __be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_register_rep {
-       struct c2wr_hdr hdr;
-       u32 pbl_depth;
-       __be32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_register {
-       struct c2wr_nsmr_register_req req;
-       struct c2wr_nsmr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       __be32 flags;
-       __be32 stag_index;
-       __be32 addrs_length;
-       /* array of paddrs (must be aligned on a 64bit boundary) */
-       __be64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_pbl_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_pbl {
-       struct c2wr_nsmr_pbl_req req;
-       struct c2wr_nsmr_pbl_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mr_query_rep {
-       struct c2wr_hdr hdr;
-       u8 stag_key;
-       u8 pad[3];
-       u32 pd_id;
-       u32 flags;
-       u32 pbl_depth;
-} __attribute__((packed)) ;
-
-union c2wr_mr_query {
-       struct c2wr_mr_query_req req;
-       struct c2wr_mr_query_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_query_rep {
-       struct c2wr_hdr hdr;
-       u8 stag_key;
-       u8 pad[3];
-       u32 pd_id;
-       u32 flags;
-} __attribute__((packed)) ;
-
-union c2wr_mw_query {
-       struct c2wr_mw_query_req req;
-       struct c2wr_mw_query_rep rep;
-} __attribute__((packed)) ;
-
-
-struct c2wr_stag_dealloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       __be32 stag_index;
-} __attribute__((packed)) ;
-
-struct c2wr_stag_dealloc_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed)) ;
-
-union c2wr_stag_dealloc {
-       struct c2wr_stag_dealloc_req req;
-       struct c2wr_stag_dealloc_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_req {
-       struct c2wr_hdr hdr;
-       u64 va;
-       u32 rnic_handle;
-       u16 flags;
-       u8 stag_key;
-       u8 pad;
-       u32 stag_index;
-       u32 pd_id;
-       u32 pbl_depth;
-       u32 pbe_size;
-       u32 fbo;
-       u32 length;
-       u32 addrs_length;
-       u32 pad1;
-       /* array of paddrs (must be aligned on a 64bit boundary) */
-       u64 paddrs[0];
-} __attribute__((packed)) ;
-
-struct c2wr_nsmr_reregister_rep {
-       struct c2wr_hdr hdr;
-       u32 pbl_depth;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_nsmr_reregister {
-       struct c2wr_nsmr_reregister_req req;
-       struct c2wr_nsmr_reregister_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_req {
-       struct c2wr_hdr hdr;
-       u64 va;
-       u32 rnic_handle;
-       u16 flags;
-       u8 stag_key;
-       u8 pad;
-       u32 stag_index;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_smr_register_rep {
-       struct c2wr_hdr hdr;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_smr_register {
-       struct c2wr_smr_register_req req;
-       struct c2wr_smr_register_rep rep;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 pd_id;
-} __attribute__((packed)) ;
-
-struct c2wr_mw_alloc_rep {
-       struct c2wr_hdr hdr;
-       u32 stag_index;
-} __attribute__((packed)) ;
-
-union c2wr_mw_alloc {
-       struct c2wr_mw_alloc_req req;
-       struct c2wr_mw_alloc_rep rep;
-} __attribute__((packed)) ;
-
-/*
- *------------------------ WRs -----------------------
- */
-
-struct c2wr_user_hdr {
-       struct c2wr_hdr hdr;            /* Has status and WR Type */
-} __attribute__((packed)) ;
-
-enum c2_qp_state {
-       C2_QP_STATE_IDLE = 0x01,
-       C2_QP_STATE_CONNECTING = 0x02,
-       C2_QP_STATE_RTS = 0x04,
-       C2_QP_STATE_CLOSING = 0x08,
-       C2_QP_STATE_TERMINATE = 0x10,
-       C2_QP_STATE_ERROR = 0x20,
-};
-
-/* Completion queue entry. */
-struct c2wr_ce {
-       struct c2wr_hdr hdr;            /* Has status and WR Type */
-       u64 qp_user_context;    /* c2_user_qp_t * */
-       u32 qp_state;           /* Current QP State */
-       u32 handle;             /* QPID or EP Handle */
-       __be32 bytes_rcvd;              /* valid for RECV WCs */
-       u32 stag;
-} __attribute__((packed)) ;
-
-
-/*
- * Flags used for all post-sq WRs.  These must fit in the flags
- * field of the struct c2wr_hdr (eight bits).
- */
-enum {
-       SQ_SIGNALED = 0x01,
-       SQ_READ_FENCE = 0x02,
-       SQ_FENCE = 0x04,
-};
-
-/*
- * Common fields for all post-sq WRs.  Namely the standard header and a
- * secondary header with fields common to all post-sq WRs.
- */
-struct c2_sq_hdr {
-       struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * Same as above but for post-rq WRs.
- */
-struct c2_rq_hdr {
-       struct c2wr_user_hdr user_hdr;
-} __attribute__((packed));
-
-/*
- * use the same struct for all sends.
- */
-struct c2wr_send_req {
-       struct c2_sq_hdr sq_hdr;
-       __be32 sge_len;
-       __be32 remote_stag;
-       u8 data[0];             /* SGE array */
-} __attribute__((packed));
-
-union c2wr_send {
-       struct c2wr_send_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_write_req {
-       struct c2_sq_hdr sq_hdr;
-       __be64 remote_to;
-       __be32 remote_stag;
-       __be32 sge_len;
-       u8 data[0];             /* SGE array */
-} __attribute__((packed));
-
-union c2wr_rdma_write {
-       struct c2wr_rdma_write_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_rdma_read_req {
-       struct c2_sq_hdr sq_hdr;
-       __be64 local_to;
-       __be64 remote_to;
-       __be32 local_stag;
-       __be32 remote_stag;
-       __be32 length;
-} __attribute__((packed));
-
-union c2wr_rdma_read {
-       struct c2wr_rdma_read_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_mw_bind_req {
-       struct c2_sq_hdr sq_hdr;
-       u64 va;
-       u8 stag_key;
-       u8 pad[3];
-       u32 mw_stag_index;
-       u32 mr_stag_index;
-       u32 length;
-       u32 flags;
-} __attribute__((packed));
-
-union c2wr_mw_bind {
-       struct c2wr_mw_bind_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_nsmr_fastreg_req {
-       struct c2_sq_hdr sq_hdr;
-       u64 va;
-       u8 stag_key;
-       u8 pad[3];
-       u32 stag_index;
-       u32 pbe_size;
-       u32 fbo;
-       u32 length;
-       u32 addrs_length;
-       /* array of paddrs (must be aligned on a 64bit boundary) */
-       u64 paddrs[0];
-} __attribute__((packed));
-
-union c2wr_nsmr_fastreg {
-       struct c2wr_nsmr_fastreg_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_stag_invalidate_req {
-       struct c2_sq_hdr sq_hdr;
-       u8 stag_key;
-       u8 pad[3];
-       u32 stag_index;
-} __attribute__((packed));
-
-union c2wr_stag_invalidate {
-       struct c2wr_stag_invalidate_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-union c2wr_sqwr {
-       struct c2_sq_hdr sq_hdr;
-       struct c2wr_send_req send;
-       struct c2wr_send_req send_se;
-       struct c2wr_send_req send_inv;
-       struct c2wr_send_req send_se_inv;
-       struct c2wr_rdma_write_req rdma_write;
-       struct c2wr_rdma_read_req rdma_read;
-       struct c2wr_mw_bind_req mw_bind;
-       struct c2wr_nsmr_fastreg_req nsmr_fastreg;
-       struct c2wr_stag_invalidate_req stag_inv;
-} __attribute__((packed));
-
-
-/*
- * RQ WRs
- */
-struct c2wr_rqwr {
-       struct c2_rq_hdr rq_hdr;
-       u8 data[0];             /* array of SGEs */
-} __attribute__((packed));
-
-union c2wr_recv {
-       struct c2wr_rqwr req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-/*
- * All AEs start with this header.  Most AEs only need to convey the
- * information in the header.  Some, like LLP connection events, need
- * more info.  The union typdef c2wr_ae_t has all the possible AEs.
- *
- * hdr.context is the user_context from the rnic_open WR.  NULL If this
- * is not affiliated with an rnic
- *
- * hdr.id is the AE identifier (eg;  CCAE_REMOTE_SHUTDOWN,
- * CCAE_LLP_CLOSE_COMPLETE)
- *
- * resource_type is one of:  C2_RES_IND_QP, C2_RES_IND_CQ, C2_RES_IND_SRQ
- *
- * user_context is the context passed down when the host created the resource.
- */
-struct c2wr_ae_hdr {
-       struct c2wr_hdr hdr;
-       u64 user_context;       /* user context for this res. */
-       __be32 resource_type;   /* see enum c2_resource_indicator */
-       __be32 resource;        /* handle for resource */
-       __be32 qp_state;        /* current QP State */
-} __attribute__((packed));
-
-/*
- * After submitting the CCAE_ACTIVE_CONNECT_RESULTS message on the AEQ,
- * the adapter moves the QP into RTS state
- */
-struct c2wr_ae_active_connect_results {
-       struct c2wr_ae_hdr ae_hdr;
-       __be32 laddr;
-       __be32 raddr;
-       __be16 lport;
-       __be16 rport;
-       __be32 private_data_length;
-       u8 private_data[0];     /* data is in-line in the msg. */
-} __attribute__((packed));
-
-/*
- * When connections are established by the stack (and the private data
- * MPA frame is received), the adapter will generate an event to the host.
- * The details of the connection, any private data, and the new connection
- * request handle is passed up via the CCAE_CONNECTION_REQUEST msg on the
- * AE queue:
- */
-struct c2wr_ae_connection_request {
-       struct c2wr_ae_hdr ae_hdr;
-       u32 cr_handle;          /* connreq handle (sock ptr) */
-       __be32 laddr;
-       __be32 raddr;
-       __be16 lport;
-       __be16 rport;
-       __be32 private_data_length;
-       u8 private_data[0];     /* data is in-line in the msg. */
-} __attribute__((packed));
-
-union c2wr_ae {
-       struct c2wr_ae_hdr ae_generic;
-       struct c2wr_ae_active_connect_results ae_active_connect_results;
-       struct c2wr_ae_connection_request ae_connection_request;
-} __attribute__((packed));
-
-struct c2wr_init_req {
-       struct c2wr_hdr hdr;
-       __be64 hint_count;
-       __be64 q0_host_shared;
-       __be64 q1_host_shared;
-       __be64 q1_host_msg_pool;
-       __be64 q2_host_shared;
-       __be64 q2_host_msg_pool;
-} __attribute__((packed));
-
-struct c2wr_init_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_init {
-       struct c2wr_init_req req;
-       struct c2wr_init_rep rep;
-} __attribute__((packed));
-
-/*
- * For upgrading flash.
- */
-
-struct c2wr_flash_init_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-} __attribute__((packed));
-
-struct c2wr_flash_init_rep {
-       struct c2wr_hdr hdr;
-       u32 adapter_flash_buf_offset;
-       u32 adapter_flash_len;
-} __attribute__((packed));
-
-union c2wr_flash_init {
-       struct c2wr_flash_init_req req;
-       struct c2wr_flash_init_rep rep;
-} __attribute__((packed));
-
-struct c2wr_flash_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 len;
-} __attribute__((packed));
-
-struct c2wr_flash_rep {
-       struct c2wr_hdr hdr;
-       u32 status;
-} __attribute__((packed));
-
-union c2wr_flash {
-       struct c2wr_flash_req req;
-       struct c2wr_flash_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 size;
-} __attribute__((packed));
-
-struct c2wr_buf_alloc_rep {
-       struct c2wr_hdr hdr;
-       u32 offset;             /* 0 if mem not available */
-       u32 size;               /* 0 if mem not available */
-} __attribute__((packed));
-
-union c2wr_buf_alloc {
-       struct c2wr_buf_alloc_req req;
-       struct c2wr_buf_alloc_rep rep;
-} __attribute__((packed));
-
-struct c2wr_buf_free_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 offset;             /* Must match value from alloc */
-       u32 size;               /* Must match value from alloc */
-} __attribute__((packed));
-
-struct c2wr_buf_free_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_buf_free {
-       struct c2wr_buf_free_req req;
-       struct c2wr_ce rep;
-} __attribute__((packed));
-
-struct c2wr_flash_write_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 offset;
-       u32 size;
-       u32 type;
-       u32 flags;
-} __attribute__((packed));
-
-struct c2wr_flash_write_rep {
-       struct c2wr_hdr hdr;
-       u32 status;
-} __attribute__((packed));
-
-union c2wr_flash_write {
-       struct c2wr_flash_write_req req;
-       struct c2wr_flash_write_rep rep;
-} __attribute__((packed));
-
-/*
- * Messages for LLP connection setup.
- */
-
-/*
- * Listen Request.  This allocates a listening endpoint to allow passive
- * connection setup.  Newly established LLP connections are passed up
- * via an AE.  See c2wr_ae_connection_request_t
- */
-struct c2wr_ep_listen_create_req {
-       struct c2wr_hdr hdr;
-       u64 user_context;       /* returned in AEs. */
-       u32 rnic_handle;
-       __be32 local_addr;              /* local addr, or 0  */
-       __be16 local_port;              /* 0 means "pick one" */
-       u16 pad;
-       __be32 backlog;         /* tradional tcp listen bl */
-} __attribute__((packed));
-
-struct c2wr_ep_listen_create_rep {
-       struct c2wr_hdr hdr;
-       u32 ep_handle;          /* handle to new listening ep */
-       u16 local_port;         /* resulting port... */
-       u16 pad;
-} __attribute__((packed));
-
-union c2wr_ep_listen_create {
-       struct c2wr_ep_listen_create_req req;
-       struct c2wr_ep_listen_create_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_listen_destroy_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_ep_listen_destroy {
-       struct c2wr_ep_listen_destroy_req req;
-       struct c2wr_ep_listen_destroy_rep rep;
-} __attribute__((packed));
-
-struct c2wr_ep_query_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 ep_handle;
-} __attribute__((packed));
-
-struct c2wr_ep_query_rep {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 local_addr;
-       u32 remote_addr;
-       u16 local_port;
-       u16 remote_port;
-} __attribute__((packed));
-
-union c2wr_ep_query {
-       struct c2wr_ep_query_req req;
-       struct c2wr_ep_query_rep rep;
-} __attribute__((packed));
-
-
-/*
- * The host passes this down to indicate acceptance of a pending iWARP
- * connection.  The cr_handle was obtained from the CONNECTION_REQUEST
- * AE passed up by the adapter.  See c2wr_ae_connection_request_t.
- */
-struct c2wr_cr_accept_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 qp_handle;          /* QP to bind to this LLP conn */
-       u32 ep_handle;          /* LLP  handle to accept */
-       __be32 private_data_length;
-       u8 private_data[0];     /* data in-line in msg. */
-} __attribute__((packed));
-
-/*
- * adapter sends reply when private data is successfully submitted to
- * the LLP.
- */
-struct c2wr_cr_accept_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_accept {
-       struct c2wr_cr_accept_req req;
-       struct c2wr_cr_accept_rep rep;
-} __attribute__((packed));
-
-/*
- * The host sends this down if a given iWARP connection request was
- * rejected by the consumer.  The cr_handle was obtained from a
- * previous c2wr_ae_connection_request_t AE sent by the adapter.
- */
-struct  c2wr_cr_reject_req {
-       struct c2wr_hdr hdr;
-       u32 rnic_handle;
-       u32 ep_handle;          /* LLP handle to reject */
-} __attribute__((packed));
-
-/*
- * Dunno if this is needed, but we'll add it for now.  The adapter will
- * send the reject_reply after the LLP endpoint has been destroyed.
- */
-struct  c2wr_cr_reject_rep {
-       struct c2wr_hdr hdr;
-} __attribute__((packed));
-
-union c2wr_cr_reject {
-       struct c2wr_cr_reject_req req;
-       struct c2wr_cr_reject_rep rep;
-} __attribute__((packed));
-
-/*
- * console command.  Used to implement a debug console over the verbs
- * request and reply queues.
- */
-
-/*
- * Console request message.  It contains:
- *     - message hdr with id = CCWR_CONSOLE
- *     - the physaddr/len of host memory to be used for the reply.
- *     - the command string.  eg:  "netstat -s" or "zoneinfo"
- */
-struct c2wr_console_req {
-       struct c2wr_hdr hdr;            /* id = CCWR_CONSOLE */
-       u64 reply_buf;          /* pinned host buf for reply */
-       u32 reply_buf_len;      /* length of reply buffer */
-       u8 command[0];          /* NUL terminated ascii string */
-       /* containing the command req */
-} __attribute__((packed));
-
-/*
- * flags used in the console reply.
- */
-enum c2_console_flags {
-       CONS_REPLY_TRUNCATED = 0x00000001       /* reply was truncated */
-} __attribute__((packed));
-
-/*
- * Console reply message.
- * hdr.result contains the c2_status_t error if the reply was _not_ generated,
- * or C2_OK if the reply was generated.
- */
-struct c2wr_console_rep {
-       struct c2wr_hdr hdr;            /* id = CCWR_CONSOLE */
-       u32 flags;
-} __attribute__((packed));
-
-union c2wr_console {
-       struct c2wr_console_req req;
-       struct c2wr_console_rep rep;
-} __attribute__((packed));
-
-
-/*
- * Giant union with all WRs.  Makes life easier...
- */
-union c2wr {
-       struct c2wr_hdr hdr;
-       struct c2wr_user_hdr user_hdr;
-       union c2wr_rnic_open rnic_open;
-       union c2wr_rnic_query rnic_query;
-       union c2wr_rnic_getconfig rnic_getconfig;
-       union c2wr_rnic_setconfig rnic_setconfig;
-       union c2wr_rnic_close rnic_close;
-       union c2wr_cq_create cq_create;
-       union c2wr_cq_modify cq_modify;
-       union c2wr_cq_destroy cq_destroy;
-       union c2wr_pd_alloc pd_alloc;
-       union c2wr_pd_dealloc pd_dealloc;
-       union c2wr_srq_create srq_create;
-       union c2wr_srq_destroy srq_destroy;
-       union c2wr_qp_create qp_create;
-       union c2wr_qp_query qp_query;
-       union c2wr_qp_modify qp_modify;
-       union c2wr_qp_destroy qp_destroy;
-       struct c2wr_qp_connect qp_connect;
-       union c2wr_nsmr_stag_alloc nsmr_stag_alloc;
-       union c2wr_nsmr_register nsmr_register;
-       union c2wr_nsmr_pbl nsmr_pbl;
-       union c2wr_mr_query mr_query;
-       union c2wr_mw_query mw_query;
-       union c2wr_stag_dealloc stag_dealloc;
-       union c2wr_sqwr sqwr;
-       struct c2wr_rqwr rqwr;
-       struct c2wr_ce ce;
-       union c2wr_ae ae;
-       union c2wr_init init;
-       union c2wr_ep_listen_create ep_listen_create;
-       union c2wr_ep_listen_destroy ep_listen_destroy;
-       union c2wr_cr_accept cr_accept;
-       union c2wr_cr_reject cr_reject;
-       union c2wr_console console;
-       union c2wr_flash_init flash_init;
-       union c2wr_flash flash;
-       union c2wr_buf_alloc buf_alloc;
-       union c2wr_buf_free buf_free;
-       union c2wr_flash_write flash_write;
-} __attribute__((packed));
-
-
-/*
- * Accessors for the wr fields that are packed together tightly to
- * reduce the wr message size.  The wr arguments are void* so that
- * either a struct c2wr*, a struct c2wr_hdr*, or a pointer to any of the types
- * in the struct c2wr union can be passed in.
- */
-static __inline__ u8 c2_wr_get_id(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->id;
-}
-static __inline__ void c2_wr_set_id(void *wr, u8 id)
-{
-       ((struct c2wr_hdr *) wr)->id = id;
-}
-static __inline__ u8 c2_wr_get_result(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->result;
-}
-static __inline__ void c2_wr_set_result(void *wr, u8 result)
-{
-       ((struct c2wr_hdr *) wr)->result = result;
-}
-static __inline__ u8 c2_wr_get_flags(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->flags;
-}
-static __inline__ void c2_wr_set_flags(void *wr, u8 flags)
-{
-       ((struct c2wr_hdr *) wr)->flags = flags;
-}
-static __inline__ u8 c2_wr_get_sge_count(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->sge_count;
-}
-static __inline__ void c2_wr_set_sge_count(void *wr, u8 sge_count)
-{
-       ((struct c2wr_hdr *) wr)->sge_count = sge_count;
-}
-static __inline__ __be32 c2_wr_get_wqe_count(void *wr)
-{
-       return ((struct c2wr_hdr *) wr)->wqe_count;
-}
-static __inline__ void c2_wr_set_wqe_count(void *wr, u32 wqe_count)
-{
-       ((struct c2wr_hdr *) wr)->wqe_count = wqe_count;
-}
-
-#endif                         /* _C2_WR_H_ */
diff --git a/drivers/staging/rdma/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
deleted file mode 100644 (file)
index 3fadd2a..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-config INFINIBAND_EHCA
-       tristate "eHCA support"
-       depends on IBMEBUS
-       ---help---
-       This driver supports the deprecated IBM pSeries eHCA InfiniBand
-       adapter.
-
-       To compile the driver as a module, choose M here. The module
-       will be called ib_ehca.
-
diff --git a/drivers/staging/rdma/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
deleted file mode 100644 (file)
index 74d284e..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-#  Authors: Heiko J Schick <schickhj@de.ibm.com>
-#           Christoph Raisch <raisch@de.ibm.com>
-#           Joachim Fenkes <fenkes@de.ibm.com>
-#
-#  Copyright (c) 2005 IBM Corporation
-#
-#  All rights reserved.
-#
-#  This source code is distributed under a dual license of GPL v2.0 and OpenIB BSD.
-
-obj-$(CONFIG_INFINIBAND_EHCA) += ib_ehca.o
-
-ib_ehca-objs  = ehca_main.o ehca_hca.o ehca_mcast.o ehca_pd.o ehca_av.o ehca_eq.o \
-               ehca_cq.o ehca_qp.o ehca_sqp.o ehca_mrmw.o ehca_reqs.o ehca_irq.o \
-               ehca_uverbs.o ipz_pt_fn.o hcp_if.o hcp_phyp.o
-
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
deleted file mode 100644 (file)
index 199a4a6..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-9/2015
-
-The ehca driver has been deprecated and moved to drivers/staging/rdma.
-It will be removed in the 4.6 merge window.
diff --git a/drivers/staging/rdma/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
deleted file mode 100644 (file)
index 94e088c..0000000
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  address vector functions
- *
- *  Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Khadija Souissi <souissik@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *av_cache;
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
-                 enum ib_rate path_rate, u32 *ipd)
-{
-       int path = ib_rate_to_mult(path_rate);
-       int link, ret;
-       struct ib_port_attr pa;
-
-       if (path_rate == IB_RATE_PORT_CURRENT) {
-               *ipd = 0;
-               return 0;
-       }
-
-       if (unlikely(path < 0)) {
-               ehca_err(&shca->ib_device, "Invalid static rate! path_rate=%x",
-                        path_rate);
-               return -EINVAL;
-       }
-
-       ret = ehca_query_port(&shca->ib_device, port, &pa);
-       if (unlikely(ret < 0)) {
-               ehca_err(&shca->ib_device, "Failed to query port  ret=%i", ret);
-               return ret;
-       }
-
-       link = ib_width_enum_to_int(pa.active_width) * pa.active_speed;
-
-       if (path >= link)
-               /* no need to throttle if path faster than link */
-               *ipd = 0;
-       else
-               /* IPD = round((link / path) - 1) */
-               *ipd = ((link + (path >> 1)) / path) - 1;
-
-       return 0;
-}
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
-{
-       int ret;
-       struct ehca_av *av;
-       struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
-                                             ib_device);
-
-       av = kmem_cache_alloc(av_cache, GFP_KERNEL);
-       if (!av) {
-               ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
-                        pd, ah_attr);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       av->av.sl = ah_attr->sl;
-       av->av.dlid = ah_attr->dlid;
-       av->av.slid_path_bits = ah_attr->src_path_bits;
-
-       if (ehca_static_rate < 0) {
-               u32 ipd;
-
-               if (ehca_calc_ipd(shca, ah_attr->port_num,
-                                 ah_attr->static_rate, &ipd)) {
-                       ret = -EINVAL;
-                       goto create_ah_exit1;
-               }
-               av->av.ipd = ipd;
-       } else
-               av->av.ipd = ehca_static_rate;
-
-       av->av.lnh = ah_attr->ah_flags;
-       av->av.grh.word_0 = EHCA_BMASK_SET(GRH_IPVERSION_MASK, 6);
-       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_TCLASS_MASK,
-                                           ah_attr->grh.traffic_class);
-       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
-                                           ah_attr->grh.flow_label);
-       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
-                                           ah_attr->grh.hop_limit);
-       av->av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1B);
-       /* set sgid in grh.word_1 */
-       if (ah_attr->ah_flags & IB_AH_GRH) {
-               int rc;
-               struct ib_port_attr port_attr;
-               union ib_gid gid;
-
-               memset(&port_attr, 0, sizeof(port_attr));
-               rc = ehca_query_port(pd->device, ah_attr->port_num,
-                                    &port_attr);
-               if (rc) { /* invalid port number */
-                       ret = -EINVAL;
-                       ehca_err(pd->device, "Invalid port number "
-                                "ehca_query_port() returned %x "
-                                "pd=%p ah_attr=%p", rc, pd, ah_attr);
-                       goto create_ah_exit1;
-               }
-               memset(&gid, 0, sizeof(gid));
-               rc = ehca_query_gid(pd->device,
-                                   ah_attr->port_num,
-                                   ah_attr->grh.sgid_index, &gid);
-               if (rc) {
-                       ret = -EINVAL;
-                       ehca_err(pd->device, "Failed to retrieve sgid "
-                                "ehca_query_gid() returned %x "
-                                "pd=%p ah_attr=%p", rc, pd, ah_attr);
-                       goto create_ah_exit1;
-               }
-               memcpy(&av->av.grh.word_1, &gid, sizeof(gid));
-       }
-       av->av.pmtu = shca->max_mtu;
-
-       /* dgid comes in grh.word_3 */
-       memcpy(&av->av.grh.word_3, &ah_attr->grh.dgid,
-              sizeof(ah_attr->grh.dgid));
-
-       return &av->ib_ah;
-
-create_ah_exit1:
-       kmem_cache_free(av_cache, av);
-
-       return ERR_PTR(ret);
-}
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
-       struct ehca_av *av;
-       struct ehca_ud_av new_ehca_av;
-       struct ehca_shca *shca = container_of(ah->pd->device, struct ehca_shca,
-                                             ib_device);
-
-       memset(&new_ehca_av, 0, sizeof(new_ehca_av));
-       new_ehca_av.sl = ah_attr->sl;
-       new_ehca_av.dlid = ah_attr->dlid;
-       new_ehca_av.slid_path_bits = ah_attr->src_path_bits;
-       new_ehca_av.ipd = ah_attr->static_rate;
-       new_ehca_av.lnh = EHCA_BMASK_SET(GRH_FLAG_MASK,
-                                        (ah_attr->ah_flags & IB_AH_GRH) > 0);
-       new_ehca_av.grh.word_0 = EHCA_BMASK_SET(GRH_TCLASS_MASK,
-                                               ah_attr->grh.traffic_class);
-       new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_FLOWLABEL_MASK,
-                                                ah_attr->grh.flow_label);
-       new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_HOPLIMIT_MASK,
-                                                ah_attr->grh.hop_limit);
-       new_ehca_av.grh.word_0 |= EHCA_BMASK_SET(GRH_NEXTHEADER_MASK, 0x1b);
-
-       /* set sgid in grh.word_1 */
-       if (ah_attr->ah_flags & IB_AH_GRH) {
-               int rc;
-               struct ib_port_attr port_attr;
-               union ib_gid gid;
-
-               memset(&port_attr, 0, sizeof(port_attr));
-               rc = ehca_query_port(ah->device, ah_attr->port_num,
-                                    &port_attr);
-               if (rc) { /* invalid port number */
-                       ehca_err(ah->device, "Invalid port number "
-                                "ehca_query_port() returned %x "
-                                "ah=%p ah_attr=%p port_num=%x",
-                                rc, ah, ah_attr, ah_attr->port_num);
-                       return -EINVAL;
-               }
-               memset(&gid, 0, sizeof(gid));
-               rc = ehca_query_gid(ah->device,
-                                   ah_attr->port_num,
-                                   ah_attr->grh.sgid_index, &gid);
-               if (rc) {
-                       ehca_err(ah->device, "Failed to retrieve sgid "
-                                "ehca_query_gid() returned %x "
-                                "ah=%p ah_attr=%p port_num=%x "
-                                "sgid_index=%x",
-                                rc, ah, ah_attr, ah_attr->port_num,
-                                ah_attr->grh.sgid_index);
-                       return -EINVAL;
-               }
-               memcpy(&new_ehca_av.grh.word_1, &gid, sizeof(gid));
-       }
-
-       new_ehca_av.pmtu = shca->max_mtu;
-
-       memcpy(&new_ehca_av.grh.word_3, &ah_attr->grh.dgid,
-              sizeof(ah_attr->grh.dgid));
-
-       av = container_of(ah, struct ehca_av, ib_ah);
-       av->av = new_ehca_av;
-
-       return 0;
-}
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
-{
-       struct ehca_av *av = container_of(ah, struct ehca_av, ib_ah);
-
-       memcpy(&ah_attr->grh.dgid, &av->av.grh.word_3,
-              sizeof(ah_attr->grh.dgid));
-       ah_attr->sl = av->av.sl;
-
-       ah_attr->dlid = av->av.dlid;
-
-       ah_attr->src_path_bits = av->av.slid_path_bits;
-       ah_attr->static_rate = av->av.ipd;
-       ah_attr->ah_flags = EHCA_BMASK_GET(GRH_FLAG_MASK, av->av.lnh);
-       ah_attr->grh.traffic_class = EHCA_BMASK_GET(GRH_TCLASS_MASK,
-                                                   av->av.grh.word_0);
-       ah_attr->grh.hop_limit = EHCA_BMASK_GET(GRH_HOPLIMIT_MASK,
-                                               av->av.grh.word_0);
-       ah_attr->grh.flow_label = EHCA_BMASK_GET(GRH_FLOWLABEL_MASK,
-                                                av->av.grh.word_0);
-
-       return 0;
-}
-
-int ehca_destroy_ah(struct ib_ah *ah)
-{
-       kmem_cache_free(av_cache, container_of(ah, struct ehca_av, ib_ah));
-
-       return 0;
-}
-
-int ehca_init_av_cache(void)
-{
-       av_cache = kmem_cache_create("ehca_cache_av",
-                                  sizeof(struct ehca_av), 0,
-                                  SLAB_HWCACHE_ALIGN,
-                                  NULL);
-       if (!av_cache)
-               return -ENOMEM;
-       return 0;
-}
-
-void ehca_cleanup_av_cache(void)
-{
-       kmem_cache_destroy(av_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
deleted file mode 100644 (file)
index e8c3387..0000000
+++ /dev/null
@@ -1,481 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Struct definition for eHCA internal structures
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_H__
-#define __EHCA_CLASSES_H__
-
-struct ehca_module;
-struct ehca_qp;
-struct ehca_cq;
-struct ehca_eq;
-struct ehca_mr;
-struct ehca_mw;
-struct ehca_pd;
-struct ehca_av;
-
-#include <linux/wait.h>
-#include <linux/mutex.h>
-
-#include <rdma/ib_verbs.h>
-#include <rdma/ib_user_verbs.h>
-
-#ifdef CONFIG_PPC64
-#include "ehca_classes_pSeries.h"
-#endif
-#include "ipz_pt_fn.h"
-#include "ehca_qes.h"
-#include "ehca_irq.h"
-
-#define EHCA_EQE_CACHE_SIZE 20
-#define EHCA_MAX_NUM_QUEUES 0xffff
-
-struct ehca_eqe_cache_entry {
-       struct ehca_eqe *eqe;
-       struct ehca_cq *cq;
-};
-
-struct ehca_eq {
-       u32 length;
-       struct ipz_queue ipz_queue;
-       struct ipz_eq_handle ipz_eq_handle;
-       struct work_struct work;
-       struct h_galpas galpas;
-       int is_initialized;
-       struct ehca_pfeq pf;
-       spinlock_t spinlock;
-       struct tasklet_struct interrupt_task;
-       u32 ist;
-       spinlock_t irq_spinlock;
-       struct ehca_eqe_cache_entry eqe_cache[EHCA_EQE_CACHE_SIZE];
-};
-
-struct ehca_sma_attr {
-       u16 lid, lmc, sm_sl, sm_lid;
-       u16 pkey_tbl_len, pkeys[16];
-};
-
-struct ehca_sport {
-       struct ib_cq *ibcq_aqp1;
-       struct ib_qp *ibqp_sqp[2];
-       /* lock to serialze modify_qp() calls for sqp in normal
-        * and irq path (when event PORT_ACTIVE is received first time)
-        */
-       spinlock_t mod_sqp_lock;
-       enum ib_port_state port_state;
-       struct ehca_sma_attr saved_attr;
-       u32 pma_qp_nr;
-};
-
-#define HCA_CAP_MR_PGSIZE_4K  0x80000000
-#define HCA_CAP_MR_PGSIZE_64K 0x40000000
-#define HCA_CAP_MR_PGSIZE_1M  0x20000000
-#define HCA_CAP_MR_PGSIZE_16M 0x10000000
-
-struct ehca_shca {
-       struct ib_device ib_device;
-       struct platform_device *ofdev;
-       u8 num_ports;
-       int hw_level;
-       struct list_head shca_list;
-       struct ipz_adapter_handle ipz_hca_handle;
-       struct ehca_sport sport[2];
-       struct ehca_eq eq;
-       struct ehca_eq neq;
-       struct ehca_mr *maxmr;
-       struct ehca_pd *pd;
-       struct h_galpas galpas;
-       struct mutex modify_mutex;
-       u64 hca_cap;
-       /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
-       u32 hca_cap_mr_pgsize;
-       int max_mtu;
-       int max_num_qps;
-       int max_num_cqs;
-       atomic_t num_cqs;
-       atomic_t num_qps;
-};
-
-struct ehca_pd {
-       struct ib_pd ib_pd;
-       struct ipz_pd fw_pd;
-       /* small queue mgmt */
-       struct mutex lock;
-       struct list_head free[2];
-       struct list_head full[2];
-};
-
-enum ehca_ext_qp_type {
-       EQPT_NORMAL    = 0,
-       EQPT_LLQP      = 1,
-       EQPT_SRQBASE   = 2,
-       EQPT_SRQ       = 3,
-};
-
-/* struct to cache modify_qp()'s parms for GSI/SMI qp */
-struct ehca_mod_qp_parm {
-       int mask;
-       struct ib_qp_attr attr;
-};
-
-#define EHCA_MOD_QP_PARM_MAX 4
-
-#define QMAP_IDX_MASK 0xFFFFULL
-
-/* struct for tracking if cqes have been reported to the application */
-struct ehca_qmap_entry {
-       u16 app_wr_id;
-       u8 reported;
-       u8 cqe_req;
-};
-
-struct ehca_queue_map {
-       struct ehca_qmap_entry *map;
-       unsigned int entries;
-       unsigned int tail;
-       unsigned int left_to_poll;
-       unsigned int next_wqe_idx;   /* Idx to first wqe to be flushed */
-};
-
-/* function to calculate the next index for the qmap */
-static inline unsigned int next_index(unsigned int cur_index, unsigned int limit)
-{
-       unsigned int temp = cur_index + 1;
-       return (temp == limit) ? 0 : temp;
-}
-
-struct ehca_qp {
-       union {
-               struct ib_qp ib_qp;
-               struct ib_srq ib_srq;
-       };
-       u32 qp_type;
-       enum ehca_ext_qp_type ext_type;
-       enum ib_qp_state state;
-       struct ipz_queue ipz_squeue;
-       struct ehca_queue_map sq_map;
-       struct ipz_queue ipz_rqueue;
-       struct ehca_queue_map rq_map;
-       struct h_galpas galpas;
-       u32 qkey;
-       u32 real_qp_num;
-       u32 token;
-       spinlock_t spinlock_s;
-       spinlock_t spinlock_r;
-       u32 sq_max_inline_data_size;
-       struct ipz_qp_handle ipz_qp_handle;
-       struct ehca_pfqp pf;
-       struct ib_qp_init_attr init_attr;
-       struct ehca_cq *send_cq;
-       struct ehca_cq *recv_cq;
-       unsigned int sqerr_purgeflag;
-       struct hlist_node list_entries;
-       /* array to cache modify_qp()'s parms for GSI/SMI qp */
-       struct ehca_mod_qp_parm *mod_qp_parm;
-       int mod_qp_parm_idx;
-       /* mmap counter for resources mapped into user space */
-       u32 mm_count_squeue;
-       u32 mm_count_rqueue;
-       u32 mm_count_galpa;
-       /* unsolicited ack circumvention */
-       int unsol_ack_circ;
-       int mtu_shift;
-       u32 message_count;
-       u32 packet_count;
-       atomic_t nr_events; /* events seen */
-       wait_queue_head_t wait_completion;
-       int mig_armed;
-       struct list_head sq_err_node;
-       struct list_head rq_err_node;
-};
-
-#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
-#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
-#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
-
-/* must be power of 2 */
-#define QP_HASHTAB_LEN 8
-
-struct ehca_cq {
-       struct ib_cq ib_cq;
-       struct ipz_queue ipz_queue;
-       struct h_galpas galpas;
-       spinlock_t spinlock;
-       u32 cq_number;
-       u32 token;
-       u32 nr_of_entries;
-       struct ipz_cq_handle ipz_cq_handle;
-       struct ehca_pfcq pf;
-       spinlock_t cb_lock;
-       struct hlist_head qp_hashtab[QP_HASHTAB_LEN];
-       struct list_head entry;
-       u32 nr_callbacks;   /* #events assigned to cpu by scaling code */
-       atomic_t nr_events; /* #events seen */
-       wait_queue_head_t wait_completion;
-       spinlock_t task_lock;
-       /* mmap counter for resources mapped into user space */
-       u32 mm_count_queue;
-       u32 mm_count_galpa;
-       struct list_head sqp_err_list;
-       struct list_head rqp_err_list;
-};
-
-enum ehca_mr_flag {
-       EHCA_MR_FLAG_FMR = 0x80000000,   /* FMR, created with ehca_alloc_fmr */
-       EHCA_MR_FLAG_MAXMR = 0x40000000, /* max-MR                           */
-};
-
-struct ehca_mr {
-       union {
-               struct ib_mr ib_mr;     /* must always be first in ehca_mr */
-               struct ib_fmr ib_fmr;   /* must always be first in ehca_mr */
-       } ib;
-       struct ib_umem *umem;
-       spinlock_t mrlock;
-
-       enum ehca_mr_flag flags;
-       u32 num_kpages;         /* number of kernel pages */
-       u32 num_hwpages;        /* number of hw pages to form MR */
-       u64 hwpage_size;        /* hw page size used for this MR */
-       int acl;                /* ACL (stored here for usage in reregister) */
-       u64 *start;             /* virtual start address (stored here for */
-                               /* usage in reregister) */
-       u64 size;               /* size (stored here for usage in reregister) */
-       u32 fmr_page_size;      /* page size for FMR */
-       u32 fmr_max_pages;      /* max pages for FMR */
-       u32 fmr_max_maps;       /* max outstanding maps for FMR */
-       u32 fmr_map_cnt;        /* map counter for FMR */
-       /* fw specific data */
-       struct ipz_mrmw_handle ipz_mr_handle;   /* MR handle for h-calls */
-       struct h_galpas galpas;
-};
-
-struct ehca_mw {
-       struct ib_mw ib_mw;     /* gen2 mw, must always be first in ehca_mw */
-       spinlock_t mwlock;
-
-       u8 never_bound;         /* indication MW was never bound */
-       struct ipz_mrmw_handle ipz_mw_handle;   /* MW handle for h-calls */
-       struct h_galpas galpas;
-};
-
-enum ehca_mr_pgi_type {
-       EHCA_MR_PGI_PHYS   = 1,  /* type of ehca_reg_phys_mr,
-                                 * ehca_rereg_phys_mr,
-                                 * ehca_reg_internal_maxmr */
-       EHCA_MR_PGI_USER   = 2,  /* type of ehca_reg_user_mr */
-       EHCA_MR_PGI_FMR    = 3   /* type of ehca_map_phys_fmr */
-};
-
-struct ehca_mr_pginfo {
-       enum ehca_mr_pgi_type type;
-       u64 num_kpages;
-       u64 kpage_cnt;
-       u64 hwpage_size;     /* hw page size used for this MR */
-       u64 num_hwpages;     /* number of hw pages */
-       u64 hwpage_cnt;      /* counter for hw pages */
-       u64 next_hwpage;     /* next hw page in buffer/chunk/listelem */
-
-       union {
-               struct { /* type EHCA_MR_PGI_PHYS section */
-                       u64 addr;
-                       u16 size;
-               } phy;
-               struct { /* type EHCA_MR_PGI_USER section */
-                       struct ib_umem *region;
-                       struct scatterlist *next_sg;
-                       u64 next_nmap;
-               } usr;
-               struct { /* type EHCA_MR_PGI_FMR section */
-                       u64 fmr_pgsize;
-                       u64 *page_list;
-                       u64 next_listelem;
-               } fmr;
-       } u;
-};
-
-/* output parameters for MR/FMR hipz calls */
-struct ehca_mr_hipzout_parms {
-       struct ipz_mrmw_handle handle;
-       u32 lkey;
-       u32 rkey;
-       u64 len;
-       u64 vaddr;
-       u32 acl;
-};
-
-/* output parameters for MW hipz calls */
-struct ehca_mw_hipzout_parms {
-       struct ipz_mrmw_handle handle;
-       u32 rkey;
-};
-
-struct ehca_av {
-       struct ib_ah ib_ah;
-       struct ehca_ud_av av;
-};
-
-struct ehca_ucontext {
-       struct ib_ucontext ib_ucontext;
-};
-
-int ehca_init_pd_cache(void);
-void ehca_cleanup_pd_cache(void);
-int ehca_init_cq_cache(void);
-void ehca_cleanup_cq_cache(void);
-int ehca_init_qp_cache(void);
-void ehca_cleanup_qp_cache(void);
-int ehca_init_av_cache(void);
-void ehca_cleanup_av_cache(void);
-int ehca_init_mrmw_cache(void);
-void ehca_cleanup_mrmw_cache(void);
-int ehca_init_small_qp_cache(void);
-void ehca_cleanup_small_qp_cache(void);
-
-extern rwlock_t ehca_qp_idr_lock;
-extern rwlock_t ehca_cq_idr_lock;
-extern struct idr ehca_qp_idr;
-extern struct idr ehca_cq_idr;
-extern spinlock_t shca_list_lock;
-
-extern int ehca_static_rate;
-extern int ehca_port_act_time;
-extern bool ehca_use_hp_mr;
-extern bool ehca_scaling_code;
-extern int ehca_lock_hcalls;
-extern int ehca_nr_ports;
-extern int ehca_max_cq;
-extern int ehca_max_qp;
-
-struct ipzu_queue_resp {
-       u32 qe_size;      /* queue entry size */
-       u32 act_nr_of_sg;
-       u32 queue_length; /* queue length allocated in bytes */
-       u32 pagesize;
-       u32 toggle_state;
-       u32 offset; /* save offset within a page for small_qp */
-};
-
-struct ehca_create_cq_resp {
-       u32 cq_number;
-       u32 token;
-       struct ipzu_queue_resp ipz_queue;
-       u32 fw_handle_ofs;
-       u32 dummy;
-};
-
-struct ehca_create_qp_resp {
-       u32 qp_num;
-       u32 token;
-       u32 qp_type;
-       u32 ext_type;
-       u32 qkey;
-       /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
-       u32 real_qp_num;
-       u32 fw_handle_ofs;
-       u32 dummy;
-       struct ipzu_queue_resp ipz_squeue;
-       struct ipzu_queue_resp ipz_rqueue;
-};
-
-struct ehca_alloc_cq_parms {
-       u32 nr_cqe;
-       u32 act_nr_of_entries;
-       u32 act_pages;
-       struct ipz_eq_handle eq_handle;
-};
-
-enum ehca_service_type {
-       ST_RC  = 0,
-       ST_UC  = 1,
-       ST_RD  = 2,
-       ST_UD  = 3,
-};
-
-enum ehca_ll_comp_flags {
-       LLQP_SEND_COMP = 0x20,
-       LLQP_RECV_COMP = 0x40,
-       LLQP_COMP_MASK = 0x60,
-};
-
-struct ehca_alloc_queue_parms {
-       /* input parameters */
-       int max_wr;
-       int max_sge;
-       int page_size;
-       int is_small;
-
-       /* output parameters */
-       u16 act_nr_wqes;
-       u8  act_nr_sges;
-       u32 queue_size; /* bytes for small queues, pages otherwise */
-};
-
-struct ehca_alloc_qp_parms {
-       struct ehca_alloc_queue_parms squeue;
-       struct ehca_alloc_queue_parms rqueue;
-
-       /* input parameters */
-       enum ehca_service_type servicetype;
-       int qp_storage;
-       int sigtype;
-       enum ehca_ext_qp_type ext_type;
-       enum ehca_ll_comp_flags ll_comp_flags;
-       int ud_av_l_key_ctl;
-
-       u32 token;
-       struct ipz_eq_handle eq_handle;
-       struct ipz_pd pd;
-       struct ipz_cq_handle send_cq_handle, recv_cq_handle;
-
-       u32 srq_qpn, srq_token, srq_limit;
-
-       /* output parameters */
-       u32 real_qp_num;
-       struct ipz_qp_handle qp_handle;
-       struct h_galpas galpas;
-};
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int qp_num);
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int qp_num);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
deleted file mode 100644 (file)
index 689c357..0000000
+++ /dev/null
@@ -1,208 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  pSeries interface definitions
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_CLASSES_PSERIES_H__
-#define __EHCA_CLASSES_PSERIES_H__
-
-#include "hcp_phyp.h"
-#include "ipz_pt_fn.h"
-
-
-struct ehca_pfqp {
-       struct ipz_qpt sqpt;
-       struct ipz_qpt rqpt;
-};
-
-struct ehca_pfcq {
-       struct ipz_qpt qpt;
-       u32 cqnr;
-};
-
-struct ehca_pfeq {
-       struct ipz_qpt qpt;
-       struct h_galpa galpa;
-       u32 eqnr;
-};
-
-struct ipz_adapter_handle {
-       u64 handle;
-};
-
-struct ipz_cq_handle {
-       u64 handle;
-};
-
-struct ipz_eq_handle {
-       u64 handle;
-};
-
-struct ipz_qp_handle {
-       u64 handle;
-};
-struct ipz_mrmw_handle {
-       u64 handle;
-};
-
-struct ipz_pd {
-       u32 value;
-};
-
-struct hcp_modify_qp_control_block {
-       u32 qkey;                      /* 00 */
-       u32 rdd;                       /* reliable datagram domain */
-       u32 send_psn;                  /* 02 */
-       u32 receive_psn;               /* 03 */
-       u32 prim_phys_port;            /* 04 */
-       u32 alt_phys_port;             /* 05 */
-       u32 prim_p_key_idx;            /* 06 */
-       u32 alt_p_key_idx;             /* 07 */
-       u32 rdma_atomic_ctrl;          /* 08 */
-       u32 qp_state;                  /* 09 */
-       u32 reserved_10;               /* 10 */
-       u32 rdma_nr_atomic_resp_res;   /* 11 */
-       u32 path_migration_state;      /* 12 */
-       u32 rdma_atomic_outst_dest_qp; /* 13 */
-       u32 dest_qp_nr;                /* 14 */
-       u32 min_rnr_nak_timer_field;   /* 15 */
-       u32 service_level;             /* 16 */
-       u32 send_grh_flag;             /* 17 */
-       u32 retry_count;               /* 18 */
-       u32 timeout;                   /* 19 */
-       u32 path_mtu;                  /* 20 */
-       u32 max_static_rate;           /* 21 */
-       u32 dlid;                      /* 22 */
-       u32 rnr_retry_count;           /* 23 */
-       u32 source_path_bits;          /* 24 */
-       u32 traffic_class;             /* 25 */
-       u32 hop_limit;                 /* 26 */
-       u32 source_gid_idx;            /* 27 */
-       u32 flow_label;                /* 28 */
-       u32 reserved_29;               /* 29 */
-       union {                        /* 30 */
-               u64 dw[2];
-               u8 byte[16];
-       } dest_gid;
-       u32 service_level_al;          /* 34 */
-       u32 send_grh_flag_al;          /* 35 */
-       u32 retry_count_al;            /* 36 */
-       u32 timeout_al;                /* 37 */
-       u32 max_static_rate_al;        /* 38 */
-       u32 dlid_al;                   /* 39 */
-       u32 rnr_retry_count_al;        /* 40 */
-       u32 source_path_bits_al;       /* 41 */
-       u32 traffic_class_al;          /* 42 */
-       u32 hop_limit_al;              /* 43 */
-       u32 source_gid_idx_al;         /* 44 */
-       u32 flow_label_al;             /* 45 */
-       u32 reserved_46;               /* 46 */
-       u32 reserved_47;               /* 47 */
-       union {                        /* 48 */
-               u64 dw[2];
-               u8 byte[16];
-       } dest_gid_al;
-       u32 max_nr_outst_send_wr;      /* 52 */
-       u32 max_nr_outst_recv_wr;      /* 53 */
-       u32 disable_ete_credit_check;  /* 54 */
-       u32 qp_number;                 /* 55 */
-       u64 send_queue_handle;         /* 56 */
-       u64 recv_queue_handle;         /* 58 */
-       u32 actual_nr_sges_in_sq_wqe;  /* 60 */
-       u32 actual_nr_sges_in_rq_wqe;  /* 61 */
-       u32 qp_enable;                 /* 62 */
-       u32 curr_srq_limit;            /* 63 */
-       u64 qp_aff_asyn_ev_log_reg;    /* 64 */
-       u64 shared_rq_hndl;            /* 66 */
-       u64 trigg_doorbell_qp_hndl;    /* 68 */
-       u32 reserved_70_127[58];       /* 70 */
-};
-
-#define MQPCB_MASK_QKEY                         EHCA_BMASK_IBM( 0,  0)
-#define MQPCB_MASK_SEND_PSN                     EHCA_BMASK_IBM( 2,  2)
-#define MQPCB_MASK_RECEIVE_PSN                  EHCA_BMASK_IBM( 3,  3)
-#define MQPCB_MASK_PRIM_PHYS_PORT               EHCA_BMASK_IBM( 4,  4)
-#define MQPCB_PRIM_PHYS_PORT                    EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_PHYS_PORT                EHCA_BMASK_IBM( 5,  5)
-#define MQPCB_MASK_PRIM_P_KEY_IDX               EHCA_BMASK_IBM( 6,  6)
-#define MQPCB_PRIM_P_KEY_IDX                    EHCA_BMASK_IBM(24, 31)
-#define MQPCB_MASK_ALT_P_KEY_IDX                EHCA_BMASK_IBM( 7,  7)
-#define MQPCB_MASK_RDMA_ATOMIC_CTRL             EHCA_BMASK_IBM( 8,  8)
-#define MQPCB_MASK_QP_STATE                     EHCA_BMASK_IBM( 9,  9)
-#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES      EHCA_BMASK_IBM(11, 11)
-#define MQPCB_MASK_PATH_MIGRATION_STATE         EHCA_BMASK_IBM(12, 12)
-#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP    EHCA_BMASK_IBM(13, 13)
-#define MQPCB_MASK_DEST_QP_NR                   EHCA_BMASK_IBM(14, 14)
-#define MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD      EHCA_BMASK_IBM(15, 15)
-#define MQPCB_MASK_SERVICE_LEVEL                EHCA_BMASK_IBM(16, 16)
-#define MQPCB_MASK_SEND_GRH_FLAG                EHCA_BMASK_IBM(17, 17)
-#define MQPCB_MASK_RETRY_COUNT                  EHCA_BMASK_IBM(18, 18)
-#define MQPCB_MASK_TIMEOUT                      EHCA_BMASK_IBM(19, 19)
-#define MQPCB_MASK_PATH_MTU                     EHCA_BMASK_IBM(20, 20)
-#define MQPCB_MASK_MAX_STATIC_RATE              EHCA_BMASK_IBM(21, 21)
-#define MQPCB_MASK_DLID                         EHCA_BMASK_IBM(22, 22)
-#define MQPCB_MASK_RNR_RETRY_COUNT              EHCA_BMASK_IBM(23, 23)
-#define MQPCB_MASK_SOURCE_PATH_BITS             EHCA_BMASK_IBM(24, 24)
-#define MQPCB_MASK_TRAFFIC_CLASS                EHCA_BMASK_IBM(25, 25)
-#define MQPCB_MASK_HOP_LIMIT                    EHCA_BMASK_IBM(26, 26)
-#define MQPCB_MASK_SOURCE_GID_IDX               EHCA_BMASK_IBM(27, 27)
-#define MQPCB_MASK_FLOW_LABEL                   EHCA_BMASK_IBM(28, 28)
-#define MQPCB_MASK_DEST_GID                     EHCA_BMASK_IBM(30, 30)
-#define MQPCB_MASK_SERVICE_LEVEL_AL             EHCA_BMASK_IBM(31, 31)
-#define MQPCB_MASK_SEND_GRH_FLAG_AL             EHCA_BMASK_IBM(32, 32)
-#define MQPCB_MASK_RETRY_COUNT_AL               EHCA_BMASK_IBM(33, 33)
-#define MQPCB_MASK_TIMEOUT_AL                   EHCA_BMASK_IBM(34, 34)
-#define MQPCB_MASK_MAX_STATIC_RATE_AL           EHCA_BMASK_IBM(35, 35)
-#define MQPCB_MASK_DLID_AL                      EHCA_BMASK_IBM(36, 36)
-#define MQPCB_MASK_RNR_RETRY_COUNT_AL           EHCA_BMASK_IBM(37, 37)
-#define MQPCB_MASK_SOURCE_PATH_BITS_AL          EHCA_BMASK_IBM(38, 38)
-#define MQPCB_MASK_TRAFFIC_CLASS_AL             EHCA_BMASK_IBM(39, 39)
-#define MQPCB_MASK_HOP_LIMIT_AL                 EHCA_BMASK_IBM(40, 40)
-#define MQPCB_MASK_SOURCE_GID_IDX_AL            EHCA_BMASK_IBM(41, 41)
-#define MQPCB_MASK_FLOW_LABEL_AL                EHCA_BMASK_IBM(42, 42)
-#define MQPCB_MASK_DEST_GID_AL                  EHCA_BMASK_IBM(44, 44)
-#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR         EHCA_BMASK_IBM(45, 45)
-#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR         EHCA_BMASK_IBM(46, 46)
-#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK     EHCA_BMASK_IBM(47, 47)
-#define MQPCB_MASK_QP_ENABLE                    EHCA_BMASK_IBM(48, 48)
-#define MQPCB_MASK_CURR_SRQ_LIMIT               EHCA_BMASK_IBM(49, 49)
-#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG       EHCA_BMASK_IBM(50, 50)
-#define MQPCB_MASK_SHARED_RQ_HNDL               EHCA_BMASK_IBM(51, 51)
-
-#endif /* __EHCA_CLASSES_PSERIES_H__ */
diff --git a/drivers/staging/rdma/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
deleted file mode 100644 (file)
index 1aa7931..0000000
+++ /dev/null
@@ -1,397 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Completion queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "hcp_if.h"
-
-static struct kmem_cache *cq_cache;
-
-int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp)
-{
-       unsigned int qp_num = qp->real_qp_num;
-       unsigned int key = qp_num & (QP_HASHTAB_LEN-1);
-       unsigned long flags;
-
-       spin_lock_irqsave(&cq->spinlock, flags);
-       hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]);
-       spin_unlock_irqrestore(&cq->spinlock, flags);
-
-       ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x",
-                cq->cq_number, qp_num);
-
-       return 0;
-}
-
-int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num)
-{
-       int ret = -EINVAL;
-       unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
-       struct hlist_node *iter;
-       struct ehca_qp *qp;
-       unsigned long flags;
-
-       spin_lock_irqsave(&cq->spinlock, flags);
-       hlist_for_each(iter, &cq->qp_hashtab[key]) {
-               qp = hlist_entry(iter, struct ehca_qp, list_entries);
-               if (qp->real_qp_num == real_qp_num) {
-                       hlist_del(iter);
-                       ehca_dbg(cq->ib_cq.device,
-                                "removed qp from cq .cq_num=%x real_qp_num=%x",
-                                cq->cq_number, real_qp_num);
-                       ret = 0;
-                       break;
-               }
-       }
-       spin_unlock_irqrestore(&cq->spinlock, flags);
-       if (ret)
-               ehca_err(cq->ib_cq.device,
-                        "qp not found cq_num=%x real_qp_num=%x",
-                        cq->cq_number, real_qp_num);
-
-       return ret;
-}
-
-struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num)
-{
-       struct ehca_qp *ret = NULL;
-       unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1);
-       struct hlist_node *iter;
-       struct ehca_qp *qp;
-       hlist_for_each(iter, &cq->qp_hashtab[key]) {
-               qp = hlist_entry(iter, struct ehca_qp, list_entries);
-               if (qp->real_qp_num == real_qp_num) {
-                       ret = qp;
-                       break;
-               }
-       }
-       return ret;
-}
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
-                            const struct ib_cq_init_attr *attr,
-                            struct ib_ucontext *context,
-                            struct ib_udata *udata)
-{
-       int cqe = attr->cqe;
-       static const u32 additional_cqe = 20;
-       struct ib_cq *cq;
-       struct ehca_cq *my_cq;
-       struct ehca_shca *shca =
-               container_of(device, struct ehca_shca, ib_device);
-       struct ipz_adapter_handle adapter_handle;
-       struct ehca_alloc_cq_parms param; /* h_call's out parameters */
-       struct h_galpa gal;
-       void *vpage;
-       u32 counter;
-       u64 rpage, cqx_fec, h_ret;
-       int rc, i;
-       unsigned long flags;
-
-       if (attr->flags)
-               return ERR_PTR(-EINVAL);
-
-       if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
-               return ERR_PTR(-EINVAL);
-
-       if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) {
-               ehca_err(device, "Unable to create CQ, max number of %i "
-                       "CQs reached.", shca->max_num_cqs);
-               ehca_err(device, "To increase the maximum number of CQs "
-                       "use the number_of_cqs module parameter.\n");
-               return ERR_PTR(-ENOSPC);
-       }
-
-       my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
-       if (!my_cq) {
-               ehca_err(device, "Out of memory for ehca_cq struct device=%p",
-                        device);
-               atomic_dec(&shca->num_cqs);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       memset(&param, 0, sizeof(struct ehca_alloc_cq_parms));
-
-       spin_lock_init(&my_cq->spinlock);
-       spin_lock_init(&my_cq->cb_lock);
-       spin_lock_init(&my_cq->task_lock);
-       atomic_set(&my_cq->nr_events, 0);
-       init_waitqueue_head(&my_cq->wait_completion);
-
-       cq = &my_cq->ib_cq;
-
-       adapter_handle = shca->ipz_hca_handle;
-       param.eq_handle = shca->eq.ipz_eq_handle;
-
-       idr_preload(GFP_KERNEL);
-       write_lock_irqsave(&ehca_cq_idr_lock, flags);
-       rc = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT);
-       write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-       idr_preload_end();
-
-       if (rc < 0) {
-               cq = ERR_PTR(-ENOMEM);
-               ehca_err(device, "Can't allocate new idr entry. device=%p",
-                        device);
-               goto create_cq_exit1;
-       }
-       my_cq->token = rc;
-
-       /*
-        * CQs maximum depth is 4GB-64, but we need additional 20 as buffer
-        * for receiving errors CQEs.
-        */
-       param.nr_cqe = cqe + additional_cqe;
-       h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param);
-
-       if (h_ret != H_SUCCESS) {
-               ehca_err(device, "hipz_h_alloc_resource_cq() failed "
-                        "h_ret=%lli device=%p", h_ret, device);
-               cq = ERR_PTR(ehca2ib_return_code(h_ret));
-               goto create_cq_exit2;
-       }
-
-       rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages,
-                               EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0);
-       if (!rc) {
-               ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p",
-                        rc, device);
-               cq = ERR_PTR(-EINVAL);
-               goto create_cq_exit3;
-       }
-
-       for (counter = 0; counter < param.act_pages; counter++) {
-               vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
-               if (!vpage) {
-                       ehca_err(device, "ipz_qpageit_get_inc() "
-                                "returns NULL device=%p", device);
-                       cq = ERR_PTR(-EAGAIN);
-                       goto create_cq_exit4;
-               }
-               rpage = __pa(vpage);
-
-               h_ret = hipz_h_register_rpage_cq(adapter_handle,
-                                                my_cq->ipz_cq_handle,
-                                                &my_cq->pf,
-                                                0,
-                                                0,
-                                                rpage,
-                                                1,
-                                                my_cq->galpas.
-                                                kernel);
-
-               if (h_ret < H_SUCCESS) {
-                       ehca_err(device, "hipz_h_register_rpage_cq() failed "
-                                "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i "
-                                "act_pages=%i", my_cq, my_cq->cq_number,
-                                h_ret, counter, param.act_pages);
-                       cq = ERR_PTR(-EINVAL);
-                       goto create_cq_exit4;
-               }
-
-               if (counter == (param.act_pages - 1)) {
-                       vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue);
-                       if ((h_ret != H_SUCCESS) || vpage) {
-                               ehca_err(device, "Registration of pages not "
-                                        "complete ehca_cq=%p cq_num=%x "
-                                        "h_ret=%lli", my_cq, my_cq->cq_number,
-                                        h_ret);
-                               cq = ERR_PTR(-EAGAIN);
-                               goto create_cq_exit4;
-                       }
-               } else {
-                       if (h_ret != H_PAGE_REGISTERED) {
-                               ehca_err(device, "Registration of page failed "
-                                        "ehca_cq=%p cq_num=%x h_ret=%lli "
-                                        "counter=%i act_pages=%i",
-                                        my_cq, my_cq->cq_number,
-                                        h_ret, counter, param.act_pages);
-                               cq = ERR_PTR(-ENOMEM);
-                               goto create_cq_exit4;
-                       }
-               }
-       }
-
-       ipz_qeit_reset(&my_cq->ipz_queue);
-
-       gal = my_cq->galpas.kernel;
-       cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec));
-       ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx",
-                my_cq, my_cq->cq_number, cqx_fec);
-
-       my_cq->ib_cq.cqe = my_cq->nr_of_entries =
-               param.act_nr_of_entries - additional_cqe;
-       my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff;
-
-       for (i = 0; i < QP_HASHTAB_LEN; i++)
-               INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]);
-
-       INIT_LIST_HEAD(&my_cq->sqp_err_list);
-       INIT_LIST_HEAD(&my_cq->rqp_err_list);
-
-       if (context) {
-               struct ipz_queue *ipz_queue = &my_cq->ipz_queue;
-               struct ehca_create_cq_resp resp;
-               memset(&resp, 0, sizeof(resp));
-               resp.cq_number = my_cq->cq_number;
-               resp.token = my_cq->token;
-               resp.ipz_queue.qe_size = ipz_queue->qe_size;
-               resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg;
-               resp.ipz_queue.queue_length = ipz_queue->queue_length;
-               resp.ipz_queue.pagesize = ipz_queue->pagesize;
-               resp.ipz_queue.toggle_state = ipz_queue->toggle_state;
-               resp.fw_handle_ofs = (u32)
-                       (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1));
-               if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
-                       ehca_err(device, "Copy to udata failed.");
-                       cq = ERR_PTR(-EFAULT);
-                       goto create_cq_exit4;
-               }
-       }
-
-       return cq;
-
-create_cq_exit4:
-       ipz_queue_dtor(NULL, &my_cq->ipz_queue);
-
-create_cq_exit3:
-       h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
-       if (h_ret != H_SUCCESS)
-               ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p "
-                        "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret);
-
-create_cq_exit2:
-       write_lock_irqsave(&ehca_cq_idr_lock, flags);
-       idr_remove(&ehca_cq_idr, my_cq->token);
-       write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
-create_cq_exit1:
-       kmem_cache_free(cq_cache, my_cq);
-
-       atomic_dec(&shca->num_cqs);
-       return cq;
-}
-
-int ehca_destroy_cq(struct ib_cq *cq)
-{
-       u64 h_ret;
-       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-       int cq_num = my_cq->cq_number;
-       struct ib_device *device = cq->device;
-       struct ehca_shca *shca = container_of(device, struct ehca_shca,
-                                             ib_device);
-       struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-       unsigned long flags;
-
-       if (cq->uobject) {
-               if (my_cq->mm_count_galpa || my_cq->mm_count_queue) {
-                       ehca_err(device, "Resources still referenced in "
-                                "user space cq_num=%x", my_cq->cq_number);
-                       return -EINVAL;
-               }
-       }
-
-       /*
-        * remove the CQ from the idr first to make sure
-        * no more interrupt tasklets will touch this CQ
-        */
-       write_lock_irqsave(&ehca_cq_idr_lock, flags);
-       idr_remove(&ehca_cq_idr, my_cq->token);
-       write_unlock_irqrestore(&ehca_cq_idr_lock, flags);
-
-       /* now wait until all pending events have completed */
-       wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events));
-
-       /* nobody's using our CQ any longer -- we can destroy it */
-       h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0);
-       if (h_ret == H_R_STATE) {
-               /* cq in err: read err data and destroy it forcibly */
-               ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err "
-                        "state. Try to delete it forcibly.",
-                        my_cq, cq_num, my_cq->ipz_cq_handle.handle);
-               ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle);
-               h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1);
-               if (h_ret == H_SUCCESS)
-                       ehca_dbg(device, "cq_num=%x deleted successfully.",
-                                cq_num);
-       }
-       if (h_ret != H_SUCCESS) {
-               ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli "
-                        "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num);
-               return ehca2ib_return_code(h_ret);
-       }
-       ipz_queue_dtor(NULL, &my_cq->ipz_queue);
-       kmem_cache_free(cq_cache, my_cq);
-
-       atomic_dec(&shca->num_cqs);
-       return 0;
-}
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
-{
-       /* TODO: proper resize needs to be done */
-       ehca_err(cq->device, "not implemented yet");
-
-       return -EFAULT;
-}
-
-int ehca_init_cq_cache(void)
-{
-       cq_cache = kmem_cache_create("ehca_cache_cq",
-                                    sizeof(struct ehca_cq), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!cq_cache)
-               return -ENOMEM;
-       return 0;
-}
-
-void ehca_cleanup_cq_cache(void)
-{
-       kmem_cache_destroy(cq_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
deleted file mode 100644 (file)
index 90da674..0000000
+++ /dev/null
@@ -1,189 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Event queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_qes.h"
-#include "hcp_if.h"
-#include "ipz_pt_fn.h"
-
-int ehca_create_eq(struct ehca_shca *shca,
-                  struct ehca_eq *eq,
-                  const enum ehca_eq_type type, const u32 length)
-{
-       int ret;
-       u64 h_ret;
-       u32 nr_pages;
-       u32 i;
-       void *vpage;
-       struct ib_device *ib_dev = &shca->ib_device;
-
-       spin_lock_init(&eq->spinlock);
-       spin_lock_init(&eq->irq_spinlock);
-       eq->is_initialized = 0;
-
-       if (type != EHCA_EQ && type != EHCA_NEQ) {
-               ehca_err(ib_dev, "Invalid EQ type %x. eq=%p", type, eq);
-               return -EINVAL;
-       }
-       if (!length) {
-               ehca_err(ib_dev, "EQ length must not be zero. eq=%p", eq);
-               return -EINVAL;
-       }
-
-       h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
-                                        &eq->pf,
-                                        type,
-                                        length,
-                                        &eq->ipz_eq_handle,
-                                        &eq->length,
-                                        &nr_pages, &eq->ist);
-
-       if (h_ret != H_SUCCESS) {
-               ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
-               return -EINVAL;
-       }
-
-       ret = ipz_queue_ctor(NULL, &eq->ipz_queue, nr_pages,
-                            EHCA_PAGESIZE, sizeof(struct ehca_eqe), 0, 0);
-       if (!ret) {
-               ehca_err(ib_dev, "Can't allocate EQ pages eq=%p", eq);
-               goto create_eq_exit1;
-       }
-
-       for (i = 0; i < nr_pages; i++) {
-               u64 rpage;
-
-               vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
-               if (!vpage)
-                       goto create_eq_exit2;
-
-               rpage = __pa(vpage);
-               h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
-                                                eq->ipz_eq_handle,
-                                                &eq->pf,
-                                                0, 0, rpage, 1);
-
-               if (i == (nr_pages - 1)) {
-                       /* last page */
-                       vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
-                       if (h_ret != H_SUCCESS || vpage)
-                               goto create_eq_exit2;
-               } else {
-                       if (h_ret != H_PAGE_REGISTERED)
-                               goto create_eq_exit2;
-               }
-       }
-
-       ipz_qeit_reset(&eq->ipz_queue);
-
-       /* register interrupt handlers and initialize work queues */
-       if (type == EHCA_EQ) {
-               tasklet_init(&eq->interrupt_task, ehca_tasklet_eq, (long)shca);
-
-               ret = ibmebus_request_irq(eq->ist, ehca_interrupt_eq,
-                                         0, "ehca_eq",
-                                         (void *)shca);
-               if (ret < 0)
-                       ehca_err(ib_dev, "Can't map interrupt handler.");
-       } else if (type == EHCA_NEQ) {
-               tasklet_init(&eq->interrupt_task, ehca_tasklet_neq, (long)shca);
-
-               ret = ibmebus_request_irq(eq->ist, ehca_interrupt_neq,
-                                         0, "ehca_neq",
-                                         (void *)shca);
-               if (ret < 0)
-                       ehca_err(ib_dev, "Can't map interrupt handler.");
-       }
-
-       eq->is_initialized = 1;
-
-       return 0;
-
-create_eq_exit2:
-       ipz_queue_dtor(NULL, &eq->ipz_queue);
-
-create_eq_exit1:
-       hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
-       return -EINVAL;
-}
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
-       unsigned long flags;
-       void *eqe;
-
-       spin_lock_irqsave(&eq->spinlock, flags);
-       eqe = ipz_eqit_eq_get_inc_valid(&eq->ipz_queue);
-       spin_unlock_irqrestore(&eq->spinlock, flags);
-
-       return eqe;
-}
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq)
-{
-       unsigned long flags;
-       u64 h_ret;
-
-       ibmebus_free_irq(eq->ist, (void *)shca);
-
-       spin_lock_irqsave(&shca_list_lock, flags);
-       eq->is_initialized = 0;
-       spin_unlock_irqrestore(&shca_list_lock, flags);
-
-       tasklet_kill(&eq->interrupt_task);
-
-       h_ret = hipz_h_destroy_eq(shca->ipz_hca_handle, eq);
-
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't free EQ resources.");
-               return -EINVAL;
-       }
-       ipz_queue_dtor(NULL, &eq->ipz_queue);
-
-       return 0;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
deleted file mode 100644 (file)
index e8b1bb6..0000000
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  HCA query functions
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/gfp.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-static unsigned int limit_uint(unsigned int value)
-{
-       return min_t(unsigned int, value, INT_MAX);
-}
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-                     struct ib_udata *uhw)
-{
-       int i, ret = 0;
-       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
-                                             ib_device);
-       struct hipz_query_hca *rblock;
-
-       static const u32 cap_mapping[] = {
-               IB_DEVICE_RESIZE_MAX_WR,      HCA_CAP_WQE_RESIZE,
-               IB_DEVICE_BAD_PKEY_CNTR,      HCA_CAP_BAD_P_KEY_CTR,
-               IB_DEVICE_BAD_QKEY_CNTR,      HCA_CAP_Q_KEY_VIOL_CTR,
-               IB_DEVICE_RAW_MULTI,          HCA_CAP_RAW_PACKET_MCAST,
-               IB_DEVICE_AUTO_PATH_MIG,      HCA_CAP_AUTO_PATH_MIG,
-               IB_DEVICE_CHANGE_PHY_PORT,    HCA_CAP_SQD_RTS_PORT_CHANGE,
-               IB_DEVICE_UD_AV_PORT_ENFORCE, HCA_CAP_AH_PORT_NR_CHECK,
-               IB_DEVICE_CURR_QP_STATE_MOD,  HCA_CAP_CUR_QP_STATE_MOD,
-               IB_DEVICE_SHUTDOWN_PORT,      HCA_CAP_SHUTDOWN_PORT,
-               IB_DEVICE_INIT_TYPE,          HCA_CAP_INIT_TYPE,
-               IB_DEVICE_PORT_ACTIVE_EVENT,  HCA_CAP_PORT_ACTIVE_EVENT,
-       };
-
-       if (uhw->inlen || uhw->outlen)
-               return -EINVAL;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query device properties");
-               ret = -EINVAL;
-               goto query_device1;
-       }
-
-       memset(props, 0, sizeof(struct ib_device_attr));
-       props->page_size_cap   = shca->hca_cap_mr_pgsize;
-       props->fw_ver          = rblock->hw_ver;
-       props->max_mr_size     = rblock->max_mr_size;
-       props->vendor_id       = rblock->vendor_id >> 8;
-       props->vendor_part_id  = rblock->vendor_part_id >> 16;
-       props->hw_ver          = rblock->hw_ver;
-       props->max_qp          = limit_uint(rblock->max_qp);
-       props->max_qp_wr       = limit_uint(rblock->max_wqes_wq);
-       props->max_sge         = limit_uint(rblock->max_sge);
-       props->max_sge_rd      = limit_uint(rblock->max_sge_rd);
-       props->max_cq          = limit_uint(rblock->max_cq);
-       props->max_cqe         = limit_uint(rblock->max_cqe);
-       props->max_mr          = limit_uint(rblock->max_mr);
-       props->max_mw          = limit_uint(rblock->max_mw);
-       props->max_pd          = limit_uint(rblock->max_pd);
-       props->max_ah          = limit_uint(rblock->max_ah);
-       props->max_ee          = limit_uint(rblock->max_rd_ee_context);
-       props->max_rdd         = limit_uint(rblock->max_rd_domain);
-       props->max_fmr         = limit_uint(rblock->max_mr);
-       props->max_qp_rd_atom  = limit_uint(rblock->max_rr_qp);
-       props->max_ee_rd_atom  = limit_uint(rblock->max_rr_ee_context);
-       props->max_res_rd_atom = limit_uint(rblock->max_rr_hca);
-       props->max_qp_init_rd_atom = limit_uint(rblock->max_act_wqs_qp);
-       props->max_ee_init_rd_atom = limit_uint(rblock->max_act_wqs_ee_context);
-
-       if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
-               props->max_srq         = limit_uint(props->max_qp);
-               props->max_srq_wr      = limit_uint(props->max_qp_wr);
-               props->max_srq_sge     = 3;
-       }
-
-       props->max_pkeys           = 16;
-       /* Some FW versions say 0 here; insert sensible value in that case */
-       props->local_ca_ack_delay  = rblock->local_ca_ack_delay ?
-               min_t(u8, rblock->local_ca_ack_delay, 255) : 12;
-       props->max_raw_ipv6_qp     = limit_uint(rblock->max_raw_ipv6_qp);
-       props->max_raw_ethy_qp     = limit_uint(rblock->max_raw_ethy_qp);
-       props->max_mcast_grp       = limit_uint(rblock->max_mcast_grp);
-       props->max_mcast_qp_attach = limit_uint(rblock->max_mcast_qp_attach);
-       props->max_total_mcast_qp_attach
-               = limit_uint(rblock->max_total_mcast_qp_attach);
-
-       /* translate device capabilities */
-       props->device_cap_flags = IB_DEVICE_SYS_IMAGE_GUID |
-               IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_N_NOTIFY_CQ;
-       for (i = 0; i < ARRAY_SIZE(cap_mapping); i += 2)
-               if (rblock->hca_cap_indicators & cap_mapping[i + 1])
-                       props->device_cap_flags |= cap_mapping[i];
-
-query_device1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-static enum ib_mtu map_mtu(struct ehca_shca *shca, u32 fw_mtu)
-{
-       switch (fw_mtu) {
-       case 0x1:
-               return IB_MTU_256;
-       case 0x2:
-               return IB_MTU_512;
-       case 0x3:
-               return IB_MTU_1024;
-       case 0x4:
-               return IB_MTU_2048;
-       case 0x5:
-               return IB_MTU_4096;
-       default:
-               ehca_err(&shca->ib_device, "Unknown MTU size: %x.",
-                        fw_mtu);
-               return 0;
-       }
-}
-
-static u8 map_number_of_vls(struct ehca_shca *shca, u32 vl_cap)
-{
-       switch (vl_cap) {
-       case 0x1:
-               return 1;
-       case 0x2:
-               return 2;
-       case 0x3:
-               return 4;
-       case 0x4:
-               return 8;
-       case 0x5:
-               return 15;
-       default:
-               ehca_err(&shca->ib_device, "invalid Vl Capability: %x.",
-                        vl_cap);
-               return 0;
-       }
-}
-
-int ehca_query_port(struct ib_device *ibdev,
-                   u8 port, struct ib_port_attr *props)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
-                                             ib_device);
-       struct hipz_query_port *rblock;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto query_port1;
-       }
-
-       memset(props, 0, sizeof(struct ib_port_attr));
-
-       props->active_mtu = props->max_mtu = map_mtu(shca, rblock->max_mtu);
-       props->port_cap_flags  = rblock->capability_mask;
-       props->gid_tbl_len     = rblock->gid_tbl_len;
-       if (rblock->max_msg_sz)
-               props->max_msg_sz      = rblock->max_msg_sz;
-       else
-               props->max_msg_sz      = 0x1 << 31;
-       props->bad_pkey_cntr   = rblock->bad_pkey_cntr;
-       props->qkey_viol_cntr  = rblock->qkey_viol_cntr;
-       props->pkey_tbl_len    = rblock->pkey_tbl_len;
-       props->lid             = rblock->lid;
-       props->sm_lid          = rblock->sm_lid;
-       props->lmc             = rblock->lmc;
-       props->sm_sl           = rblock->sm_sl;
-       props->subnet_timeout  = rblock->subnet_timeout;
-       props->init_type_reply = rblock->init_type_reply;
-       props->max_vl_num      = map_number_of_vls(shca, rblock->vl_cap);
-
-       if (rblock->state && rblock->phys_width) {
-               props->phys_state      = rblock->phys_pstate;
-               props->state           = rblock->phys_state;
-               props->active_width    = rblock->phys_width;
-               props->active_speed    = rblock->phys_speed;
-       } else {
-               /* old firmware releases don't report physical
-                * port info, so use default values
-                */
-               props->phys_state      = 5;
-               props->state           = rblock->state;
-               props->active_width    = IB_WIDTH_12X;
-               props->active_speed    = IB_SPEED_SDR;
-       }
-
-query_port1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-int ehca_query_sma_attr(struct ehca_shca *shca,
-                       u8 port, struct ehca_sma_attr *attr)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct hipz_query_port *rblock;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto query_sma_attr1;
-       }
-
-       memset(attr, 0, sizeof(struct ehca_sma_attr));
-
-       attr->lid    = rblock->lid;
-       attr->lmc    = rblock->lmc;
-       attr->sm_sl  = rblock->sm_sl;
-       attr->sm_lid = rblock->sm_lid;
-
-       attr->pkey_tbl_len = rblock->pkey_tbl_len;
-       memcpy(attr->pkeys, rblock->pkey_entries, sizeof(attr->pkeys));
-
-query_sma_attr1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_shca *shca;
-       struct hipz_query_port *rblock;
-
-       shca = container_of(ibdev, struct ehca_shca, ib_device);
-       if (index > 16) {
-               ehca_err(&shca->ib_device, "Invalid index: %x.", index);
-               return -EINVAL;
-       }
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto query_pkey1;
-       }
-
-       memcpy(pkey, &rblock->pkey_entries + index, sizeof(u16));
-
-query_pkey1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port,
-                  int index, union ib_gid *gid)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_shca *shca = container_of(ibdev, struct ehca_shca,
-                                             ib_device);
-       struct hipz_query_port *rblock;
-
-       if (index < 0 || index > 255) {
-               ehca_err(&shca->ib_device, "Invalid index: %x.", index);
-               return -EINVAL;
-       }
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto query_gid1;
-       }
-
-       memcpy(&gid->raw[0], &rblock->gid_prefix, sizeof(u64));
-       memcpy(&gid->raw[8], &rblock->guid_entries[index], sizeof(u64));
-
-query_gid1:
-       ehca_free_fw_ctrlblock(rblock);
-
-       return ret;
-}
-
-static const u32 allowed_port_caps = (
-       IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
-       IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
-       IB_PORT_VENDOR_CLASS_SUP);
-
-int ehca_modify_port(struct ib_device *ibdev,
-                    u8 port, int port_modify_mask,
-                    struct ib_port_modify *props)
-{
-       int ret = 0;
-       struct ehca_shca *shca;
-       struct hipz_query_port *rblock;
-       u32 cap;
-       u64 hret;
-
-       shca = container_of(ibdev, struct ehca_shca, ib_device);
-       if ((props->set_port_cap_mask | props->clr_port_cap_mask)
-           & ~allowed_port_caps) {
-               ehca_err(&shca->ib_device, "Non-changeable bits set in masks  "
-                        "set=%x  clr=%x  allowed=%x", props->set_port_cap_mask,
-                        props->clr_port_cap_mask, allowed_port_caps);
-               return -EINVAL;
-       }
-
-       if (mutex_lock_interruptible(&shca->modify_mutex))
-               return -ERESTARTSYS;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device,  "Can't allocate rblock memory.");
-               ret = -ENOMEM;
-               goto modify_port1;
-       }
-
-       hret = hipz_h_query_port(shca->ipz_hca_handle, port, rblock);
-       if (hret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query port properties");
-               ret = -EINVAL;
-               goto modify_port2;
-       }
-
-       cap = (rblock->capability_mask | props->set_port_cap_mask)
-               & ~props->clr_port_cap_mask;
-
-       hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
-                                 cap, props->init_type, port_modify_mask);
-       if (hret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Modify port failed  h_ret=%lli",
-                        hret);
-               ret = -EINVAL;
-       }
-
-modify_port2:
-       ehca_free_fw_ctrlblock(rblock);
-
-modify_port1:
-       mutex_unlock(&shca->modify_mutex);
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
deleted file mode 100644 (file)
index 8615d7c..0000000
+++ /dev/null
@@ -1,870 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Functions for EQs, NEQs and interrupts
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <linux/smpboot.h>
-
-#include "ehca_classes.h"
-#include "ehca_irq.h"
-#include "ehca_iverbs.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-#include "ipz_pt_fn.h"
-
-#define EQE_COMPLETION_EVENT   EHCA_BMASK_IBM( 1,  1)
-#define EQE_CQ_QP_NUMBER       EHCA_BMASK_IBM( 8, 31)
-#define EQE_EE_IDENTIFIER      EHCA_BMASK_IBM( 2,  7)
-#define EQE_CQ_NUMBER          EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_NUMBER          EHCA_BMASK_IBM( 8, 31)
-#define EQE_QP_TOKEN           EHCA_BMASK_IBM(32, 63)
-#define EQE_CQ_TOKEN           EHCA_BMASK_IBM(32, 63)
-
-#define NEQE_COMPLETION_EVENT  EHCA_BMASK_IBM( 1,  1)
-#define NEQE_EVENT_CODE        EHCA_BMASK_IBM( 2,  7)
-#define NEQE_PORT_NUMBER       EHCA_BMASK_IBM( 8, 15)
-#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
-#define NEQE_DISRUPTIVE        EHCA_BMASK_IBM(16, 16)
-#define NEQE_SPECIFIC_EVENT    EHCA_BMASK_IBM(16, 23)
-
-#define ERROR_DATA_LENGTH      EHCA_BMASK_IBM(52, 63)
-#define ERROR_DATA_TYPE        EHCA_BMASK_IBM( 0,  7)
-
-static void queue_comp_task(struct ehca_cq *__cq);
-
-static struct ehca_comp_pool *pool;
-
-static inline void comp_event_callback(struct ehca_cq *cq)
-{
-       if (!cq->ib_cq.comp_handler)
-               return;
-
-       spin_lock(&cq->cb_lock);
-       cq->ib_cq.comp_handler(&cq->ib_cq, cq->ib_cq.cq_context);
-       spin_unlock(&cq->cb_lock);
-
-       return;
-}
-
-static void print_error_data(struct ehca_shca *shca, void *data,
-                            u64 *rblock, int length)
-{
-       u64 type = EHCA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
-       u64 resource = rblock[1];
-
-       switch (type) {
-       case 0x1: /* Queue Pair */
-       {
-               struct ehca_qp *qp = (struct ehca_qp *)data;
-
-               /* only print error data if AER is set */
-               if (rblock[6] == 0)
-                       return;
-
-               ehca_err(&shca->ib_device,
-                        "QP 0x%x (resource=%llx) has errors.",
-                        qp->ib_qp.qp_num, resource);
-               break;
-       }
-       case 0x4: /* Completion Queue */
-       {
-               struct ehca_cq *cq = (struct ehca_cq *)data;
-
-               ehca_err(&shca->ib_device,
-                        "CQ 0x%x (resource=%llx) has errors.",
-                        cq->cq_number, resource);
-               break;
-       }
-       default:
-               ehca_err(&shca->ib_device,
-                        "Unknown error type: %llx on %s.",
-                        type, shca->ib_device.name);
-               break;
-       }
-
-       ehca_err(&shca->ib_device, "Error data is available: %llx.", resource);
-       ehca_err(&shca->ib_device, "EHCA ----- error data begin "
-                "---------------------------------------------------");
-       ehca_dmp(rblock, length, "resource=%llx", resource);
-       ehca_err(&shca->ib_device, "EHCA ----- error data end "
-                "----------------------------------------------------");
-
-       return;
-}
-
-int ehca_error_data(struct ehca_shca *shca, void *data,
-                   u64 resource)
-{
-
-       unsigned long ret;
-       u64 *rblock;
-       unsigned long block_count;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Cannot allocate rblock memory.");
-               ret = -ENOMEM;
-               goto error_data1;
-       }
-
-       /* rblock must be 4K aligned and should be 4K large */
-       ret = hipz_h_error_data(shca->ipz_hca_handle,
-                               resource,
-                               rblock,
-                               &block_count);
-
-       if (ret == H_R_STATE)
-               ehca_err(&shca->ib_device,
-                        "No error data is available: %llx.", resource);
-       else if (ret == H_SUCCESS) {
-               int length;
-
-               length = EHCA_BMASK_GET(ERROR_DATA_LENGTH, rblock[0]);
-
-               if (length > EHCA_PAGESIZE)
-                       length = EHCA_PAGESIZE;
-
-               print_error_data(shca, data, rblock, length);
-       } else
-               ehca_err(&shca->ib_device,
-                        "Error data could not be fetched: %llx", resource);
-
-       ehca_free_fw_ctrlblock(rblock);
-
-error_data1:
-       return ret;
-
-}
-
-static void dispatch_qp_event(struct ehca_shca *shca, struct ehca_qp *qp,
-                             enum ib_event_type event_type)
-{
-       struct ib_event event;
-
-       /* PATH_MIG without the QP ever having been armed is false alarm */
-       if (event_type == IB_EVENT_PATH_MIG && !qp->mig_armed)
-               return;
-
-       event.device = &shca->ib_device;
-       event.event = event_type;
-
-       if (qp->ext_type == EQPT_SRQ) {
-               if (!qp->ib_srq.event_handler)
-                       return;
-
-               event.element.srq = &qp->ib_srq;
-               qp->ib_srq.event_handler(&event, qp->ib_srq.srq_context);
-       } else {
-               if (!qp->ib_qp.event_handler)
-                       return;
-
-               event.element.qp = &qp->ib_qp;
-               qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
-       }
-}
-
-static void qp_event_callback(struct ehca_shca *shca, u64 eqe,
-                             enum ib_event_type event_type, int fatal)
-{
-       struct ehca_qp *qp;
-       u32 token = EHCA_BMASK_GET(EQE_QP_TOKEN, eqe);
-
-       read_lock(&ehca_qp_idr_lock);
-       qp = idr_find(&ehca_qp_idr, token);
-       if (qp)
-               atomic_inc(&qp->nr_events);
-       read_unlock(&ehca_qp_idr_lock);
-
-       if (!qp)
-               return;
-
-       if (fatal)
-               ehca_error_data(shca, qp, qp->ipz_qp_handle.handle);
-
-       dispatch_qp_event(shca, qp, fatal && qp->ext_type == EQPT_SRQ ?
-                         IB_EVENT_SRQ_ERR : event_type);
-
-       /*
-        * eHCA only processes one WQE at a time for SRQ base QPs,
-        * so the last WQE has been processed as soon as the QP enters
-        * error state.
-        */
-       if (fatal && qp->ext_type == EQPT_SRQBASE)
-               dispatch_qp_event(shca, qp, IB_EVENT_QP_LAST_WQE_REACHED);
-
-       if (atomic_dec_and_test(&qp->nr_events))
-               wake_up(&qp->wait_completion);
-       return;
-}
-
-static void cq_event_callback(struct ehca_shca *shca,
-                             u64 eqe)
-{
-       struct ehca_cq *cq;
-       u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);
-
-       read_lock(&ehca_cq_idr_lock);
-       cq = idr_find(&ehca_cq_idr, token);
-       if (cq)
-               atomic_inc(&cq->nr_events);
-       read_unlock(&ehca_cq_idr_lock);
-
-       if (!cq)
-               return;
-
-       ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);
-
-       if (atomic_dec_and_test(&cq->nr_events))
-               wake_up(&cq->wait_completion);
-
-       return;
-}
-
-static void parse_identifier(struct ehca_shca *shca, u64 eqe)
-{
-       u8 identifier = EHCA_BMASK_GET(EQE_EE_IDENTIFIER, eqe);
-
-       switch (identifier) {
-       case 0x02: /* path migrated */
-               qp_event_callback(shca, eqe, IB_EVENT_PATH_MIG, 0);
-               break;
-       case 0x03: /* communication established */
-               qp_event_callback(shca, eqe, IB_EVENT_COMM_EST, 0);
-               break;
-       case 0x04: /* send queue drained */
-               qp_event_callback(shca, eqe, IB_EVENT_SQ_DRAINED, 0);
-               break;
-       case 0x05: /* QP error */
-       case 0x06: /* QP error */
-               qp_event_callback(shca, eqe, IB_EVENT_QP_FATAL, 1);
-               break;
-       case 0x07: /* CQ error */
-       case 0x08: /* CQ error */
-               cq_event_callback(shca, eqe);
-               break;
-       case 0x09: /* MRMWPTE error */
-               ehca_err(&shca->ib_device, "MRMWPTE error.");
-               break;
-       case 0x0A: /* port event */
-               ehca_err(&shca->ib_device, "Port event.");
-               break;
-       case 0x0B: /* MR access error */
-               ehca_err(&shca->ib_device, "MR access error.");
-               break;
-       case 0x0C: /* EQ error */
-               ehca_err(&shca->ib_device, "EQ error.");
-               break;
-       case 0x0D: /* P/Q_Key mismatch */
-               ehca_err(&shca->ib_device, "P/Q_Key mismatch.");
-               break;
-       case 0x10: /* sampling complete */
-               ehca_err(&shca->ib_device, "Sampling complete.");
-               break;
-       case 0x11: /* unaffiliated access error */
-               ehca_err(&shca->ib_device, "Unaffiliated access error.");
-               break;
-       case 0x12: /* path migrating */
-               ehca_err(&shca->ib_device, "Path migrating.");
-               break;
-       case 0x13: /* interface trace stopped */
-               ehca_err(&shca->ib_device, "Interface trace stopped.");
-               break;
-       case 0x14: /* first error capture info available */
-               ehca_info(&shca->ib_device, "First error capture available");
-               break;
-       case 0x15: /* SRQ limit reached */
-               qp_event_callback(shca, eqe, IB_EVENT_SRQ_LIMIT_REACHED, 0);
-               break;
-       default:
-               ehca_err(&shca->ib_device, "Unknown identifier: %x on %s.",
-                        identifier, shca->ib_device.name);
-               break;
-       }
-
-       return;
-}
-
-static void dispatch_port_event(struct ehca_shca *shca, int port_num,
-                               enum ib_event_type type, const char *msg)
-{
-       struct ib_event event;
-
-       ehca_info(&shca->ib_device, "port %d %s.", port_num, msg);
-       event.device = &shca->ib_device;
-       event.event = type;
-       event.element.port_num = port_num;
-       ib_dispatch_event(&event);
-}
-
-static void notify_port_conf_change(struct ehca_shca *shca, int port_num)
-{
-       struct ehca_sma_attr  new_attr;
-       struct ehca_sma_attr *old_attr = &shca->sport[port_num - 1].saved_attr;
-
-       ehca_query_sma_attr(shca, port_num, &new_attr);
-
-       if (new_attr.sm_sl  != old_attr->sm_sl ||
-           new_attr.sm_lid != old_attr->sm_lid)
-               dispatch_port_event(shca, port_num, IB_EVENT_SM_CHANGE,
-                                   "SM changed");
-
-       if (new_attr.lid != old_attr->lid ||
-           new_attr.lmc != old_attr->lmc)
-               dispatch_port_event(shca, port_num, IB_EVENT_LID_CHANGE,
-                                   "LID changed");
-
-       if (new_attr.pkey_tbl_len != old_attr->pkey_tbl_len ||
-           memcmp(new_attr.pkeys, old_attr->pkeys,
-                  sizeof(u16) * new_attr.pkey_tbl_len))
-               dispatch_port_event(shca, port_num, IB_EVENT_PKEY_CHANGE,
-                                   "P_Key changed");
-
-       *old_attr = new_attr;
-}
-
-/* replay modify_qp for sqps -- return 0 if all is well, 1 if AQP1 destroyed */
-static int replay_modify_qp(struct ehca_sport *sport)
-{
-       int aqp1_destroyed;
-       unsigned long flags;
-
-       spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-
-       aqp1_destroyed = !sport->ibqp_sqp[IB_QPT_GSI];
-
-       if (sport->ibqp_sqp[IB_QPT_SMI])
-               ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
-       if (!aqp1_destroyed)
-               ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
-
-       spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-
-       return aqp1_destroyed;
-}
-
-static void parse_ec(struct ehca_shca *shca, u64 eqe)
-{
-       u8 ec   = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
-       u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
-       u8 spec_event;
-       struct ehca_sport *sport = &shca->sport[port - 1];
-
-       switch (ec) {
-       case 0x30: /* port availability change */
-               if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
-                       /* only replay modify_qp calls in autodetect mode;
-                        * if AQP1 was destroyed, the port is already down
-                        * again and we can drop the event.
-                        */
-                       if (ehca_nr_ports < 0)
-                               if (replay_modify_qp(sport))
-                                       break;
-
-                       sport->port_state = IB_PORT_ACTIVE;
-                       dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
-                                           "is active");
-                       ehca_query_sma_attr(shca, port, &sport->saved_attr);
-               } else {
-                       sport->port_state = IB_PORT_DOWN;
-                       dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
-                                           "is inactive");
-               }
-               break;
-       case 0x31:
-               /* port configuration change
-                * disruptive change is caused by
-                * LID, PKEY or SM change
-                */
-               if (EHCA_BMASK_GET(NEQE_DISRUPTIVE, eqe)) {
-                       ehca_warn(&shca->ib_device, "disruptive port "
-                                 "%d configuration change", port);
-
-                       sport->port_state = IB_PORT_DOWN;
-                       dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
-                                           "is inactive");
-
-                       sport->port_state = IB_PORT_ACTIVE;
-                       dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
-                                           "is active");
-                       ehca_query_sma_attr(shca, port,
-                                           &sport->saved_attr);
-               } else
-                       notify_port_conf_change(shca, port);
-               break;
-       case 0x32: /* adapter malfunction */
-               ehca_err(&shca->ib_device, "Adapter malfunction.");
-               break;
-       case 0x33:  /* trace stopped */
-               ehca_err(&shca->ib_device, "Traced stopped.");
-               break;
-       case 0x34: /* util async event */
-               spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
-               if (spec_event == 0x80) /* client reregister required */
-                       dispatch_port_event(shca, port,
-                                           IB_EVENT_CLIENT_REREGISTER,
-                                           "client reregister req.");
-               else
-                       ehca_warn(&shca->ib_device, "Unknown util async "
-                                 "event %x on port %x", spec_event, port);
-               break;
-       default:
-               ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
-                        ec, shca->ib_device.name);
-               break;
-       }
-
-       return;
-}
-
-static inline void reset_eq_pending(struct ehca_cq *cq)
-{
-       u64 CQx_EP;
-       struct h_galpa gal = cq->galpas.kernel;
-
-       hipz_galpa_store_cq(gal, cqx_ep, 0x0);
-       CQx_EP = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_ep));
-
-       return;
-}
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id)
-{
-       struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
-       tasklet_hi_schedule(&shca->neq.interrupt_task);
-
-       return IRQ_HANDLED;
-}
-
-void ehca_tasklet_neq(unsigned long data)
-{
-       struct ehca_shca *shca = (struct ehca_shca*)data;
-       struct ehca_eqe *eqe;
-       u64 ret;
-
-       eqe = ehca_poll_eq(shca, &shca->neq);
-
-       while (eqe) {
-               if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
-                       parse_ec(shca, eqe->entry);
-
-               eqe = ehca_poll_eq(shca, &shca->neq);
-       }
-
-       ret = hipz_h_reset_event(shca->ipz_hca_handle,
-                                shca->neq.ipz_eq_handle, 0xFFFFFFFFFFFFFFFFL);
-
-       if (ret != H_SUCCESS)
-               ehca_err(&shca->ib_device, "Can't clear notification events.");
-
-       return;
-}
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id)
-{
-       struct ehca_shca *shca = (struct ehca_shca*)dev_id;
-
-       tasklet_hi_schedule(&shca->eq.interrupt_task);
-
-       return IRQ_HANDLED;
-}
-
-
-static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
-{
-       u64 eqe_value;
-       u32 token;
-       struct ehca_cq *cq;
-
-       eqe_value = eqe->entry;
-       ehca_dbg(&shca->ib_device, "eqe_value=%llx", eqe_value);
-       if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
-               ehca_dbg(&shca->ib_device, "Got completion event");
-               token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
-               read_lock(&ehca_cq_idr_lock);
-               cq = idr_find(&ehca_cq_idr, token);
-               if (cq)
-                       atomic_inc(&cq->nr_events);
-               read_unlock(&ehca_cq_idr_lock);
-               if (cq == NULL) {
-                       ehca_err(&shca->ib_device,
-                                "Invalid eqe for non-existing cq token=%x",
-                                token);
-                       return;
-               }
-               reset_eq_pending(cq);
-               if (ehca_scaling_code)
-                       queue_comp_task(cq);
-               else {
-                       comp_event_callback(cq);
-                       if (atomic_dec_and_test(&cq->nr_events))
-                               wake_up(&cq->wait_completion);
-               }
-       } else {
-               ehca_dbg(&shca->ib_device, "Got non completion event");
-               parse_identifier(shca, eqe_value);
-       }
-}
-
-void ehca_process_eq(struct ehca_shca *shca, int is_irq)
-{
-       struct ehca_eq *eq = &shca->eq;
-       struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
-       u64 eqe_value, ret;
-       int eqe_cnt, i;
-       int eq_empty = 0;
-
-       spin_lock(&eq->irq_spinlock);
-       if (is_irq) {
-               const int max_query_cnt = 100;
-               int query_cnt = 0;
-               int int_state = 1;
-               do {
-                       int_state = hipz_h_query_int_state(
-                               shca->ipz_hca_handle, eq->ist);
-                       query_cnt++;
-                       iosync();
-               } while (int_state && query_cnt < max_query_cnt);
-               if (unlikely((query_cnt == max_query_cnt)))
-                       ehca_dbg(&shca->ib_device, "int_state=%x query_cnt=%x",
-                                int_state, query_cnt);
-       }
-
-       /* read out all eqes */
-       eqe_cnt = 0;
-       do {
-               u32 token;
-               eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
-               if (!eqe_cache[eqe_cnt].eqe)
-                       break;
-               eqe_value = eqe_cache[eqe_cnt].eqe->entry;
-               if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
-                       token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
-                       read_lock(&ehca_cq_idr_lock);
-                       eqe_cache[eqe_cnt].cq = idr_find(&ehca_cq_idr, token);
-                       if (eqe_cache[eqe_cnt].cq)
-                               atomic_inc(&eqe_cache[eqe_cnt].cq->nr_events);
-                       read_unlock(&ehca_cq_idr_lock);
-                       if (!eqe_cache[eqe_cnt].cq) {
-                               ehca_err(&shca->ib_device,
-                                        "Invalid eqe for non-existing cq "
-                                        "token=%x", token);
-                               continue;
-                       }
-               } else
-                       eqe_cache[eqe_cnt].cq = NULL;
-               eqe_cnt++;
-       } while (eqe_cnt < EHCA_EQE_CACHE_SIZE);
-       if (!eqe_cnt) {
-               if (is_irq)
-                       ehca_dbg(&shca->ib_device,
-                                "No eqe found for irq event");
-               goto unlock_irq_spinlock;
-       } else if (!is_irq) {
-               ret = hipz_h_eoi(eq->ist);
-               if (ret != H_SUCCESS)
-                       ehca_err(&shca->ib_device,
-                                "bad return code EOI -rc = %lld\n", ret);
-               ehca_dbg(&shca->ib_device, "deadman found %x eqe", eqe_cnt);
-       }
-       if (unlikely(eqe_cnt == EHCA_EQE_CACHE_SIZE))
-               ehca_dbg(&shca->ib_device, "too many eqes for one irq event");
-       /* enable irq for new packets */
-       for (i = 0; i < eqe_cnt; i++) {
-               if (eq->eqe_cache[i].cq)
-                       reset_eq_pending(eq->eqe_cache[i].cq);
-       }
-       /* check eq */
-       spin_lock(&eq->spinlock);
-       eq_empty = (!ipz_eqit_eq_peek_valid(&shca->eq.ipz_queue));
-       spin_unlock(&eq->spinlock);
-       /* call completion handler for cached eqes */
-       for (i = 0; i < eqe_cnt; i++)
-               if (eq->eqe_cache[i].cq) {
-                       if (ehca_scaling_code)
-                               queue_comp_task(eq->eqe_cache[i].cq);
-                       else {
-                               struct ehca_cq *cq = eq->eqe_cache[i].cq;
-                               comp_event_callback(cq);
-                               if (atomic_dec_and_test(&cq->nr_events))
-                                       wake_up(&cq->wait_completion);
-                       }
-               } else {
-                       ehca_dbg(&shca->ib_device, "Got non completion event");
-                       parse_identifier(shca, eq->eqe_cache[i].eqe->entry);
-               }
-       /* poll eq if not empty */
-       if (eq_empty)
-               goto unlock_irq_spinlock;
-       do {
-               struct ehca_eqe *eqe;
-               eqe = ehca_poll_eq(shca, &shca->eq);
-               if (!eqe)
-                       break;
-               process_eqe(shca, eqe);
-       } while (1);
-
-unlock_irq_spinlock:
-       spin_unlock(&eq->irq_spinlock);
-}
-
-void ehca_tasklet_eq(unsigned long data)
-{
-       ehca_process_eq((struct ehca_shca*)data, 1);
-}
-
-static int find_next_online_cpu(struct ehca_comp_pool *pool)
-{
-       int cpu;
-       unsigned long flags;
-
-       WARN_ON_ONCE(!in_interrupt());
-       if (ehca_debug_level >= 3)
-               ehca_dmp(cpu_online_mask, cpumask_size(), "");
-
-       spin_lock_irqsave(&pool->last_cpu_lock, flags);
-       do {
-               cpu = cpumask_next(pool->last_cpu, cpu_online_mask);
-               if (cpu >= nr_cpu_ids)
-                       cpu = cpumask_first(cpu_online_mask);
-               pool->last_cpu = cpu;
-       } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active);
-       spin_unlock_irqrestore(&pool->last_cpu_lock, flags);
-
-       return cpu;
-}
-
-static void __queue_comp_task(struct ehca_cq *__cq,
-                             struct ehca_cpu_comp_task *cct,
-                             struct task_struct *thread)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&cct->task_lock, flags);
-       spin_lock(&__cq->task_lock);
-
-       if (__cq->nr_callbacks == 0) {
-               __cq->nr_callbacks++;
-               list_add_tail(&__cq->entry, &cct->cq_list);
-               cct->cq_jobs++;
-               wake_up_process(thread);
-       } else
-               __cq->nr_callbacks++;
-
-       spin_unlock(&__cq->task_lock);
-       spin_unlock_irqrestore(&cct->task_lock, flags);
-}
-
-static void queue_comp_task(struct ehca_cq *__cq)
-{
-       int cpu_id;
-       struct ehca_cpu_comp_task *cct;
-       struct task_struct *thread;
-       int cq_jobs;
-       unsigned long flags;
-
-       cpu_id = find_next_online_cpu(pool);
-       BUG_ON(!cpu_online(cpu_id));
-
-       cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
-       thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
-       BUG_ON(!cct || !thread);
-
-       spin_lock_irqsave(&cct->task_lock, flags);
-       cq_jobs = cct->cq_jobs;
-       spin_unlock_irqrestore(&cct->task_lock, flags);
-       if (cq_jobs > 0) {
-               cpu_id = find_next_online_cpu(pool);
-               cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id);
-               thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id);
-               BUG_ON(!cct || !thread);
-       }
-       __queue_comp_task(__cq, cct, thread);
-}
-
-static void run_comp_task(struct ehca_cpu_comp_task *cct)
-{
-       struct ehca_cq *cq;
-
-       while (!list_empty(&cct->cq_list)) {
-               cq = list_entry(cct->cq_list.next, struct ehca_cq, entry);
-               spin_unlock_irq(&cct->task_lock);
-
-               comp_event_callback(cq);
-               if (atomic_dec_and_test(&cq->nr_events))
-                       wake_up(&cq->wait_completion);
-
-               spin_lock_irq(&cct->task_lock);
-               spin_lock(&cq->task_lock);
-               cq->nr_callbacks--;
-               if (!cq->nr_callbacks) {
-                       list_del_init(cct->cq_list.next);
-                       cct->cq_jobs--;
-               }
-               spin_unlock(&cq->task_lock);
-       }
-}
-
-static void comp_task_park(unsigned int cpu)
-{
-       struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-       struct ehca_cpu_comp_task *target;
-       struct task_struct *thread;
-       struct ehca_cq *cq, *tmp;
-       LIST_HEAD(list);
-
-       spin_lock_irq(&cct->task_lock);
-       cct->cq_jobs = 0;
-       cct->active = 0;
-       list_splice_init(&cct->cq_list, &list);
-       spin_unlock_irq(&cct->task_lock);
-
-       cpu = find_next_online_cpu(pool);
-       target = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-       thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu);
-       spin_lock_irq(&target->task_lock);
-       list_for_each_entry_safe(cq, tmp, &list, entry) {
-               list_del(&cq->entry);
-               __queue_comp_task(cq, target, thread);
-       }
-       spin_unlock_irq(&target->task_lock);
-}
-
-static void comp_task_stop(unsigned int cpu, bool online)
-{
-       struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
-       spin_lock_irq(&cct->task_lock);
-       cct->cq_jobs = 0;
-       cct->active = 0;
-       WARN_ON(!list_empty(&cct->cq_list));
-       spin_unlock_irq(&cct->task_lock);
-}
-
-static int comp_task_should_run(unsigned int cpu)
-{
-       struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-
-       return cct->cq_jobs;
-}
-
-static void comp_task(unsigned int cpu)
-{
-       struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks);
-       int cql_empty;
-
-       spin_lock_irq(&cct->task_lock);
-       cql_empty = list_empty(&cct->cq_list);
-       if (!cql_empty) {
-               __set_current_state(TASK_RUNNING);
-               run_comp_task(cct);
-       }
-       spin_unlock_irq(&cct->task_lock);
-}
-
-static struct smp_hotplug_thread comp_pool_threads = {
-       .thread_should_run      = comp_task_should_run,
-       .thread_fn              = comp_task,
-       .thread_comm            = "ehca_comp/%u",
-       .cleanup                = comp_task_stop,
-       .park                   = comp_task_park,
-};
-
-int ehca_create_comp_pool(void)
-{
-       int cpu, ret = -ENOMEM;
-
-       if (!ehca_scaling_code)
-               return 0;
-
-       pool = kzalloc(sizeof(struct ehca_comp_pool), GFP_KERNEL);
-       if (pool == NULL)
-               return -ENOMEM;
-
-       spin_lock_init(&pool->last_cpu_lock);
-       pool->last_cpu = cpumask_any(cpu_online_mask);
-
-       pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task);
-       if (!pool->cpu_comp_tasks)
-               goto out_pool;
-
-       pool->cpu_comp_threads = alloc_percpu(struct task_struct *);
-       if (!pool->cpu_comp_threads)
-               goto out_tasks;
-
-       for_each_present_cpu(cpu) {
-               struct ehca_cpu_comp_task *cct;
-
-               cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu);
-               spin_lock_init(&cct->task_lock);
-               INIT_LIST_HEAD(&cct->cq_list);
-       }
-
-       comp_pool_threads.store = pool->cpu_comp_threads;
-       ret = smpboot_register_percpu_thread(&comp_pool_threads);
-       if (ret)
-               goto out_threads;
-
-       pr_info("eHCA scaling code enabled\n");
-       return ret;
-
-out_threads:
-       free_percpu(pool->cpu_comp_threads);
-out_tasks:
-       free_percpu(pool->cpu_comp_tasks);
-out_pool:
-       kfree(pool);
-       return ret;
-}
-
-void ehca_destroy_comp_pool(void)
-{
-       if (!ehca_scaling_code)
-               return;
-
-       smpboot_unregister_percpu_thread(&comp_pool_threads);
-
-       free_percpu(pool->cpu_comp_threads);
-       free_percpu(pool->cpu_comp_tasks);
-       kfree(pool);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
deleted file mode 100644 (file)
index 5370199..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Function definitions and structs for EQs, NEQs and interrupts
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Khadija Souissi <souissi@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IRQ_H
-#define __EHCA_IRQ_H
-
-
-struct ehca_shca;
-
-#include <linux/interrupt.h>
-#include <linux/types.h>
-
-int ehca_error_data(struct ehca_shca *shca, void *data, u64 resource);
-
-irqreturn_t ehca_interrupt_neq(int irq, void *dev_id);
-void ehca_tasklet_neq(unsigned long data);
-
-irqreturn_t ehca_interrupt_eq(int irq, void *dev_id);
-void ehca_tasklet_eq(unsigned long data);
-void ehca_process_eq(struct ehca_shca *shca, int is_irq);
-
-struct ehca_cpu_comp_task {
-       struct list_head cq_list;
-       spinlock_t task_lock;
-       int cq_jobs;
-       int active;
-};
-
-struct ehca_comp_pool {
-       struct ehca_cpu_comp_task __percpu *cpu_comp_tasks;
-       struct task_struct * __percpu *cpu_comp_threads;
-       int last_cpu;
-       spinlock_t last_cpu_lock;
-};
-
-int ehca_create_comp_pool(void);
-void ehca_destroy_comp_pool(void);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
deleted file mode 100644 (file)
index cca5933..0000000
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Function definitions for internal functions
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Dietmar Decker <ddecker@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __EHCA_IVERBS_H__
-#define __EHCA_IVERBS_H__
-
-#include "ehca_classes.h"
-
-int ehca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-                     struct ib_udata *uhw);
-
-int ehca_query_port(struct ib_device *ibdev, u8 port,
-                   struct ib_port_attr *props);
-
-enum rdma_protocol_type
-ehca_query_protocol(struct ib_device *device, u8 port_num);
-
-int ehca_query_sma_attr(struct ehca_shca *shca, u8 port,
-                       struct ehca_sma_attr *attr);
-
-int ehca_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey);
-
-int ehca_query_gid(struct ib_device *ibdev, u8 port, int index,
-                  union ib_gid *gid);
-
-int ehca_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask,
-                    struct ib_port_modify *props);
-
-struct ib_pd *ehca_alloc_pd(struct ib_device *device,
-                           struct ib_ucontext *context,
-                           struct ib_udata *udata);
-
-int ehca_dealloc_pd(struct ib_pd *pd);
-
-struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
-
-int ehca_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
-
-int ehca_destroy_ah(struct ib_ah *ah);
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                              u64 virt, int mr_access_flags,
-                              struct ib_udata *udata);
-
-int ehca_dereg_mr(struct ib_mr *mr);
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);
-
-int ehca_dealloc_mw(struct ib_mw *mw);
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
-                             int mr_access_flags,
-                             struct ib_fmr_attr *fmr_attr);
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
-                     u64 *page_list, int list_len, u64 iova);
-
-int ehca_unmap_fmr(struct list_head *fmr_list);
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr);
-
-enum ehca_eq_type {
-       EHCA_EQ = 0, /* Event Queue              */
-       EHCA_NEQ     /* Notification Event Queue */
-};
-
-int ehca_create_eq(struct ehca_shca *shca, struct ehca_eq *eq,
-                  enum ehca_eq_type type, const u32 length);
-
-int ehca_destroy_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-void *ehca_poll_eq(struct ehca_shca *shca, struct ehca_eq *eq);
-
-
-struct ib_cq *ehca_create_cq(struct ib_device *device,
-                            const struct ib_cq_init_attr *attr,
-                            struct ib_ucontext *context,
-                            struct ib_udata *udata);
-
-int ehca_destroy_cq(struct ib_cq *cq);
-
-int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
-
-int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
-
-int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
-
-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
-
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
-                            struct ib_qp_init_attr *init_attr,
-                            struct ib_udata *udata);
-
-int ehca_destroy_qp(struct ib_qp *qp);
-
-int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
-                  struct ib_udata *udata);
-
-int ehca_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
-                 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
-                  struct ib_send_wr **bad_send_wr);
-
-int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
-                  struct ib_recv_wr **bad_recv_wr);
-
-int ehca_post_srq_recv(struct ib_srq *srq,
-                      struct ib_recv_wr *recv_wr,
-                      struct ib_recv_wr **bad_recv_wr);
-
-struct ib_srq *ehca_create_srq(struct ib_pd *pd,
-                              struct ib_srq_init_attr *init_attr,
-                              struct ib_udata *udata);
-
-int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
-                   enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
-
-int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
-
-int ehca_destroy_srq(struct ib_srq *srq);
-
-u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
-                   struct ib_qp_init_attr *qp_init_attr);
-
-int ehca_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-int ehca_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
-
-struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
-                                       struct ib_udata *udata);
-
-int ehca_dealloc_ucontext(struct ib_ucontext *context);
-
-int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-                    const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-                    const struct ib_mad_hdr *in, size_t in_mad_size,
-                    struct ib_mad_hdr *out, size_t *out_mad_size,
-                    u16 *out_mad_pkey_index);
-
-void ehca_poll_eqs(unsigned long data);
-
-int ehca_calc_ipd(struct ehca_shca *shca, int port,
-                 enum ib_rate path_rate, u32 *ipd);
-
-void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq);
-
-#ifdef CONFIG_PPC_64K_PAGES
-void *ehca_alloc_fw_ctrlblock(gfp_t flags);
-void ehca_free_fw_ctrlblock(void *ptr);
-#else
-#define ehca_alloc_fw_ctrlblock(flags) ((void *)get_zeroed_page(flags))
-#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
-#endif
-
-void ehca_recover_sqp(struct ib_qp *sqp);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
deleted file mode 100644 (file)
index 832f22f..0000000
+++ /dev/null
@@ -1,1118 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  module start stop, hca detection
- *
- *  Authors: Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifdef CONFIG_PPC_64K_PAGES
-#include <linux/slab.h>
-#endif
-
-#include <linux/notifier.h>
-#include <linux/memory.h>
-#include <rdma/ib_mad.h>
-#include "ehca_classes.h"
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-
-#define HCAD_VERSION "0029"
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
-MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
-MODULE_VERSION(HCAD_VERSION);
-
-static bool ehca_open_aqp1    = 0;
-static int ehca_hw_level      = 0;
-static bool ehca_poll_all_eqs = 1;
-
-int ehca_debug_level   = 0;
-int ehca_nr_ports      = -1;
-bool ehca_use_hp_mr    = 0;
-int ehca_port_act_time = 30;
-int ehca_static_rate   = -1;
-bool ehca_scaling_code = 0;
-int ehca_lock_hcalls   = -1;
-int ehca_max_cq        = -1;
-int ehca_max_qp        = -1;
-
-module_param_named(open_aqp1,     ehca_open_aqp1,     bool, S_IRUGO);
-module_param_named(debug_level,   ehca_debug_level,   int,  S_IRUGO);
-module_param_named(hw_level,      ehca_hw_level,      int,  S_IRUGO);
-module_param_named(nr_ports,      ehca_nr_ports,      int,  S_IRUGO);
-module_param_named(use_hp_mr,     ehca_use_hp_mr,     bool, S_IRUGO);
-module_param_named(port_act_time, ehca_port_act_time, int,  S_IRUGO);
-module_param_named(poll_all_eqs,  ehca_poll_all_eqs,  bool, S_IRUGO);
-module_param_named(static_rate,   ehca_static_rate,   int,  S_IRUGO);
-module_param_named(scaling_code,  ehca_scaling_code,  bool, S_IRUGO);
-module_param_named(lock_hcalls,   ehca_lock_hcalls,   bint, S_IRUGO);
-module_param_named(number_of_cqs, ehca_max_cq,        int,  S_IRUGO);
-module_param_named(number_of_qps, ehca_max_qp,        int,  S_IRUGO);
-
-MODULE_PARM_DESC(open_aqp1,
-                "Open AQP1 on startup (default: no)");
-MODULE_PARM_DESC(debug_level,
-                "Amount of debug output (0: none (default), 1: traces, "
-                "2: some dumps, 3: lots)");
-MODULE_PARM_DESC(hw_level,
-                "Hardware level (0: autosensing (default), "
-                "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
-MODULE_PARM_DESC(nr_ports,
-                "number of connected ports (-1: autodetect (default), "
-                "1: port one only, 2: two ports)");
-MODULE_PARM_DESC(use_hp_mr,
-                "Use high performance MRs (default: no)");
-MODULE_PARM_DESC(port_act_time,
-                "Time to wait for port activation (default: 30 sec)");
-MODULE_PARM_DESC(poll_all_eqs,
-                "Poll all event queues periodically (default: yes)");
-MODULE_PARM_DESC(static_rate,
-                "Set permanent static rate (default: no static rate)");
-MODULE_PARM_DESC(scaling_code,
-                "Enable scaling code (default: no)");
-MODULE_PARM_DESC(lock_hcalls,
-                "Serialize all hCalls made by the driver "
-                "(default: autodetect)");
-MODULE_PARM_DESC(number_of_cqs,
-               "Max number of CQs which can be allocated "
-               "(default: autodetect)");
-MODULE_PARM_DESC(number_of_qps,
-               "Max number of QPs which can be allocated "
-               "(default: autodetect)");
-
-DEFINE_RWLOCK(ehca_qp_idr_lock);
-DEFINE_RWLOCK(ehca_cq_idr_lock);
-DEFINE_IDR(ehca_qp_idr);
-DEFINE_IDR(ehca_cq_idr);
-
-static LIST_HEAD(shca_list); /* list of all registered ehcas */
-DEFINE_SPINLOCK(shca_list_lock);
-
-static struct timer_list poll_eqs_timer;
-
-#ifdef CONFIG_PPC_64K_PAGES
-static struct kmem_cache *ctblk_cache;
-
-void *ehca_alloc_fw_ctrlblock(gfp_t flags)
-{
-       void *ret = kmem_cache_zalloc(ctblk_cache, flags);
-       if (!ret)
-               ehca_gen_err("Out of memory for ctblk");
-       return ret;
-}
-
-void ehca_free_fw_ctrlblock(void *ptr)
-{
-       if (ptr)
-               kmem_cache_free(ctblk_cache, ptr);
-
-}
-#endif
-
-int ehca2ib_return_code(u64 ehca_rc)
-{
-       switch (ehca_rc) {
-       case H_SUCCESS:
-               return 0;
-       case H_RESOURCE:             /* Resource in use */
-       case H_BUSY:
-               return -EBUSY;
-       case H_NOT_ENOUGH_RESOURCES: /* insufficient resources */
-       case H_CONSTRAINED:          /* resource constraint */
-       case H_NO_MEM:
-               return -ENOMEM;
-       default:
-               return -EINVAL;
-       }
-}
-
-static int ehca_create_slab_caches(void)
-{
-       int ret;
-
-       ret = ehca_init_pd_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create PD SLAB cache.");
-               return ret;
-       }
-
-       ret = ehca_init_cq_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create CQ SLAB cache.");
-               goto create_slab_caches2;
-       }
-
-       ret = ehca_init_qp_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create QP SLAB cache.");
-               goto create_slab_caches3;
-       }
-
-       ret = ehca_init_av_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create AV SLAB cache.");
-               goto create_slab_caches4;
-       }
-
-       ret = ehca_init_mrmw_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create MR&MW SLAB cache.");
-               goto create_slab_caches5;
-       }
-
-       ret = ehca_init_small_qp_cache();
-       if (ret) {
-               ehca_gen_err("Cannot create small queue SLAB cache.");
-               goto create_slab_caches6;
-       }
-
-#ifdef CONFIG_PPC_64K_PAGES
-       ctblk_cache = kmem_cache_create("ehca_cache_ctblk",
-                                       EHCA_PAGESIZE, H_CB_ALIGNMENT,
-                                       SLAB_HWCACHE_ALIGN,
-                                       NULL);
-       if (!ctblk_cache) {
-               ehca_gen_err("Cannot create ctblk SLAB cache.");
-               ehca_cleanup_small_qp_cache();
-               ret = -ENOMEM;
-               goto create_slab_caches6;
-       }
-#endif
-       return 0;
-
-create_slab_caches6:
-       ehca_cleanup_mrmw_cache();
-
-create_slab_caches5:
-       ehca_cleanup_av_cache();
-
-create_slab_caches4:
-       ehca_cleanup_qp_cache();
-
-create_slab_caches3:
-       ehca_cleanup_cq_cache();
-
-create_slab_caches2:
-       ehca_cleanup_pd_cache();
-
-       return ret;
-}
-
-static void ehca_destroy_slab_caches(void)
-{
-       ehca_cleanup_small_qp_cache();
-       ehca_cleanup_mrmw_cache();
-       ehca_cleanup_av_cache();
-       ehca_cleanup_qp_cache();
-       ehca_cleanup_cq_cache();
-       ehca_cleanup_pd_cache();
-#ifdef CONFIG_PPC_64K_PAGES
-       kmem_cache_destroy(ctblk_cache);
-#endif
-}
-
-#define EHCA_HCAAVER  EHCA_BMASK_IBM(32, 39)
-#define EHCA_REVID    EHCA_BMASK_IBM(40, 63)
-
-static struct cap_descr {
-       u64 mask;
-       char *descr;
-} hca_cap_descr[] = {
-       { HCA_CAP_AH_PORT_NR_CHECK, "HCA_CAP_AH_PORT_NR_CHECK" },
-       { HCA_CAP_ATOMIC, "HCA_CAP_ATOMIC" },
-       { HCA_CAP_AUTO_PATH_MIG, "HCA_CAP_AUTO_PATH_MIG" },
-       { HCA_CAP_BAD_P_KEY_CTR, "HCA_CAP_BAD_P_KEY_CTR" },
-       { HCA_CAP_SQD_RTS_PORT_CHANGE, "HCA_CAP_SQD_RTS_PORT_CHANGE" },
-       { HCA_CAP_CUR_QP_STATE_MOD, "HCA_CAP_CUR_QP_STATE_MOD" },
-       { HCA_CAP_INIT_TYPE, "HCA_CAP_INIT_TYPE" },
-       { HCA_CAP_PORT_ACTIVE_EVENT, "HCA_CAP_PORT_ACTIVE_EVENT" },
-       { HCA_CAP_Q_KEY_VIOL_CTR, "HCA_CAP_Q_KEY_VIOL_CTR" },
-       { HCA_CAP_WQE_RESIZE, "HCA_CAP_WQE_RESIZE" },
-       { HCA_CAP_RAW_PACKET_MCAST, "HCA_CAP_RAW_PACKET_MCAST" },
-       { HCA_CAP_SHUTDOWN_PORT, "HCA_CAP_SHUTDOWN_PORT" },
-       { HCA_CAP_RC_LL_QP, "HCA_CAP_RC_LL_QP" },
-       { HCA_CAP_SRQ, "HCA_CAP_SRQ" },
-       { HCA_CAP_UD_LL_QP, "HCA_CAP_UD_LL_QP" },
-       { HCA_CAP_RESIZE_MR, "HCA_CAP_RESIZE_MR" },
-       { HCA_CAP_MINI_QP, "HCA_CAP_MINI_QP" },
-       { HCA_CAP_H_ALLOC_RES_SYNC, "HCA_CAP_H_ALLOC_RES_SYNC" },
-};
-
-static int ehca_sense_attributes(struct ehca_shca *shca)
-{
-       int i, ret = 0;
-       u64 h_ret;
-       struct hipz_query_hca *rblock;
-       struct hipz_query_port *port;
-       const char *loc_code;
-
-       static const u32 pgsize_map[] = {
-               HCA_CAP_MR_PGSIZE_4K,  0x1000,
-               HCA_CAP_MR_PGSIZE_64K, 0x10000,
-               HCA_CAP_MR_PGSIZE_1M,  0x100000,
-               HCA_CAP_MR_PGSIZE_16M, 0x1000000,
-       };
-
-       ehca_gen_dbg("Probing adapter %s...",
-                    shca->ofdev->dev.of_node->full_name);
-       loc_code = of_get_property(shca->ofdev->dev.of_node, "ibm,loc-code",
-                                  NULL);
-       if (loc_code)
-               ehca_gen_dbg(" ... location lode=%s", loc_code);
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_gen_err("Cannot allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_hca(shca->ipz_hca_handle, rblock);
-       if (h_ret != H_SUCCESS) {
-               ehca_gen_err("Cannot query device properties. h_ret=%lli",
-                            h_ret);
-               ret = -EPERM;
-               goto sense_attributes1;
-       }
-
-       if (ehca_nr_ports == 1)
-               shca->num_ports = 1;
-       else
-               shca->num_ports = (u8)rblock->num_ports;
-
-       ehca_gen_dbg(" ... found %x ports", rblock->num_ports);
-
-       if (ehca_hw_level == 0) {
-               u32 hcaaver;
-               u32 revid;
-
-               hcaaver = EHCA_BMASK_GET(EHCA_HCAAVER, rblock->hw_ver);
-               revid   = EHCA_BMASK_GET(EHCA_REVID, rblock->hw_ver);
-
-               ehca_gen_dbg(" ... hardware version=%x:%x", hcaaver, revid);
-
-               if (hcaaver == 1) {
-                       if (revid <= 3)
-                               shca->hw_level = 0x10 | (revid + 1);
-                       else
-                               shca->hw_level = 0x14;
-               } else if (hcaaver == 2) {
-                       if (revid == 0)
-                               shca->hw_level = 0x21;
-                       else if (revid == 0x10)
-                               shca->hw_level = 0x22;
-                       else if (revid == 0x20 || revid == 0x21)
-                               shca->hw_level = 0x23;
-               }
-
-               if (!shca->hw_level) {
-                       ehca_gen_warn("unknown hardware version"
-                                     " - assuming default level");
-                       shca->hw_level = 0x22;
-               }
-       } else
-               shca->hw_level = ehca_hw_level;
-       ehca_gen_dbg(" ... hardware level=%x", shca->hw_level);
-
-       shca->hca_cap = rblock->hca_cap_indicators;
-       ehca_gen_dbg(" ... HCA capabilities:");
-       for (i = 0; i < ARRAY_SIZE(hca_cap_descr); i++)
-               if (EHCA_BMASK_GET(hca_cap_descr[i].mask, shca->hca_cap))
-                       ehca_gen_dbg("   %s", hca_cap_descr[i].descr);
-
-       /* Autodetect hCall locking -- the "H_ALLOC_RESOURCE synced" flag is
-        * a firmware property, so it's valid across all adapters
-        */
-       if (ehca_lock_hcalls == -1)
-               ehca_lock_hcalls = !EHCA_BMASK_GET(HCA_CAP_H_ALLOC_RES_SYNC,
-                                       shca->hca_cap);
-
-       /* translate supported MR page sizes; always support 4K */
-       shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
-       for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
-               if (rblock->memory_page_size_supported & pgsize_map[i])
-                       shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
-
-       /* Set maximum number of CQs and QPs to calculate EQ size */
-       if (shca->max_num_qps == -1)
-               shca->max_num_qps = min_t(int, rblock->max_qp,
-                                         EHCA_MAX_NUM_QUEUES);
-       else if (shca->max_num_qps < 1 || shca->max_num_qps > rblock->max_qp) {
-               ehca_gen_warn("The requested number of QPs is out of range "
-                             "(1 - %i) specified by HW. Value is set to %i",
-                             rblock->max_qp, rblock->max_qp);
-               shca->max_num_qps = rblock->max_qp;
-       }
-
-       if (shca->max_num_cqs == -1)
-               shca->max_num_cqs = min_t(int, rblock->max_cq,
-                                         EHCA_MAX_NUM_QUEUES);
-       else if (shca->max_num_cqs < 1 || shca->max_num_cqs > rblock->max_cq) {
-               ehca_gen_warn("The requested number of CQs is out of range "
-                             "(1 - %i) specified by HW. Value is set to %i",
-                             rblock->max_cq, rblock->max_cq);
-       }
-
-       /* query max MTU from first port -- it's the same for all ports */
-       port = (struct hipz_query_port *)rblock;
-       h_ret = hipz_h_query_port(shca->ipz_hca_handle, 1, port);
-       if (h_ret != H_SUCCESS) {
-               ehca_gen_err("Cannot query port properties. h_ret=%lli",
-                            h_ret);
-               ret = -EPERM;
-               goto sense_attributes1;
-       }
-
-       shca->max_mtu = port->max_mtu;
-
-sense_attributes1:
-       ehca_free_fw_ctrlblock(rblock);
-       return ret;
-}
-
-static int init_node_guid(struct ehca_shca *shca)
-{
-       int ret = 0;
-       struct hipz_query_hca *rblock;
-
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!rblock) {
-               ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
-               return -ENOMEM;
-       }
-
-       if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "Can't query device properties");
-               ret = -EINVAL;
-               goto init_node_guid1;
-       }
-
-       memcpy(&shca->ib_device.node_guid, &rblock->node_guid, sizeof(u64));
-
-init_node_guid1:
-       ehca_free_fw_ctrlblock(rblock);
-       return ret;
-}
-
-static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num,
-                              struct ib_port_immutable *immutable)
-{
-       struct ib_port_attr attr;
-       int err;
-
-       err = ehca_query_port(ibdev, port_num, &attr);
-       if (err)
-               return err;
-
-       immutable->pkey_tbl_len = attr.pkey_tbl_len;
-       immutable->gid_tbl_len = attr.gid_tbl_len;
-       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
-       immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
-       return 0;
-}
-
-static int ehca_init_device(struct ehca_shca *shca)
-{
-       int ret;
-
-       ret = init_node_guid(shca);
-       if (ret)
-               return ret;
-
-       strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
-       shca->ib_device.owner               = THIS_MODULE;
-
-       shca->ib_device.uverbs_abi_ver      = 8;
-       shca->ib_device.uverbs_cmd_mask     =
-               (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
-               (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
-               (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
-               (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
-               (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
-               (1ull << IB_USER_VERBS_CMD_REG_MR)              |
-               (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
-               (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
-               (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
-               (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
-               (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
-               (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
-               (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
-               (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
-
-       shca->ib_device.node_type           = RDMA_NODE_IB_CA;
-       shca->ib_device.phys_port_cnt       = shca->num_ports;
-       shca->ib_device.num_comp_vectors    = 1;
-       shca->ib_device.dma_device          = &shca->ofdev->dev;
-       shca->ib_device.query_device        = ehca_query_device;
-       shca->ib_device.query_port          = ehca_query_port;
-       shca->ib_device.query_gid           = ehca_query_gid;
-       shca->ib_device.query_pkey          = ehca_query_pkey;
-       /* shca->in_device.modify_device    = ehca_modify_device    */
-       shca->ib_device.modify_port         = ehca_modify_port;
-       shca->ib_device.alloc_ucontext      = ehca_alloc_ucontext;
-       shca->ib_device.dealloc_ucontext    = ehca_dealloc_ucontext;
-       shca->ib_device.alloc_pd            = ehca_alloc_pd;
-       shca->ib_device.dealloc_pd          = ehca_dealloc_pd;
-       shca->ib_device.create_ah           = ehca_create_ah;
-       /* shca->ib_device.modify_ah        = ehca_modify_ah;       */
-       shca->ib_device.query_ah            = ehca_query_ah;
-       shca->ib_device.destroy_ah          = ehca_destroy_ah;
-       shca->ib_device.create_qp           = ehca_create_qp;
-       shca->ib_device.modify_qp           = ehca_modify_qp;
-       shca->ib_device.query_qp            = ehca_query_qp;
-       shca->ib_device.destroy_qp          = ehca_destroy_qp;
-       shca->ib_device.post_send           = ehca_post_send;
-       shca->ib_device.post_recv           = ehca_post_recv;
-       shca->ib_device.create_cq           = ehca_create_cq;
-       shca->ib_device.destroy_cq          = ehca_destroy_cq;
-       shca->ib_device.resize_cq           = ehca_resize_cq;
-       shca->ib_device.poll_cq             = ehca_poll_cq;
-       /* shca->ib_device.peek_cq          = ehca_peek_cq;         */
-       shca->ib_device.req_notify_cq       = ehca_req_notify_cq;
-       /* shca->ib_device.req_ncomp_notif  = ehca_req_ncomp_notif; */
-       shca->ib_device.get_dma_mr          = ehca_get_dma_mr;
-       shca->ib_device.reg_user_mr         = ehca_reg_user_mr;
-       shca->ib_device.dereg_mr            = ehca_dereg_mr;
-       shca->ib_device.alloc_mw            = ehca_alloc_mw;
-       shca->ib_device.dealloc_mw          = ehca_dealloc_mw;
-       shca->ib_device.alloc_fmr           = ehca_alloc_fmr;
-       shca->ib_device.map_phys_fmr        = ehca_map_phys_fmr;
-       shca->ib_device.unmap_fmr           = ehca_unmap_fmr;
-       shca->ib_device.dealloc_fmr         = ehca_dealloc_fmr;
-       shca->ib_device.attach_mcast        = ehca_attach_mcast;
-       shca->ib_device.detach_mcast        = ehca_detach_mcast;
-       shca->ib_device.process_mad         = ehca_process_mad;
-       shca->ib_device.mmap                = ehca_mmap;
-       shca->ib_device.dma_ops             = &ehca_dma_mapping_ops;
-       shca->ib_device.get_port_immutable  = ehca_port_immutable;
-
-       if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
-               shca->ib_device.uverbs_cmd_mask |=
-                       (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
-                       (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
-                       (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
-                       (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
-
-               shca->ib_device.create_srq          = ehca_create_srq;
-               shca->ib_device.modify_srq          = ehca_modify_srq;
-               shca->ib_device.query_srq           = ehca_query_srq;
-               shca->ib_device.destroy_srq         = ehca_destroy_srq;
-               shca->ib_device.post_srq_recv       = ehca_post_srq_recv;
-       }
-
-       return ret;
-}
-
-static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
-{
-       struct ehca_sport *sport = &shca->sport[port - 1];
-       struct ib_cq *ibcq;
-       struct ib_qp *ibqp;
-       struct ib_qp_init_attr qp_init_attr;
-       struct ib_cq_init_attr cq_attr = {};
-       int ret;
-
-       if (sport->ibcq_aqp1) {
-               ehca_err(&shca->ib_device, "AQP1 CQ is already created.");
-               return -EPERM;
-       }
-
-       cq_attr.cqe = 10;
-       ibcq = ib_create_cq(&shca->ib_device, NULL, NULL, (void *)(-1),
-                           &cq_attr);
-       if (IS_ERR(ibcq)) {
-               ehca_err(&shca->ib_device, "Cannot create AQP1 CQ.");
-               return PTR_ERR(ibcq);
-       }
-       sport->ibcq_aqp1 = ibcq;
-
-       if (sport->ibqp_sqp[IB_QPT_GSI]) {
-               ehca_err(&shca->ib_device, "AQP1 QP is already created.");
-               ret = -EPERM;
-               goto create_aqp1;
-       }
-
-       memset(&qp_init_attr, 0, sizeof(struct ib_qp_init_attr));
-       qp_init_attr.send_cq          = ibcq;
-       qp_init_attr.recv_cq          = ibcq;
-       qp_init_attr.sq_sig_type      = IB_SIGNAL_ALL_WR;
-       qp_init_attr.cap.max_send_wr  = 100;
-       qp_init_attr.cap.max_recv_wr  = 100;
-       qp_init_attr.cap.max_send_sge = 2;
-       qp_init_attr.cap.max_recv_sge = 1;
-       qp_init_attr.qp_type          = IB_QPT_GSI;
-       qp_init_attr.port_num         = port;
-       qp_init_attr.qp_context       = NULL;
-       qp_init_attr.event_handler    = NULL;
-       qp_init_attr.srq              = NULL;
-
-       ibqp = ib_create_qp(&shca->pd->ib_pd, &qp_init_attr);
-       if (IS_ERR(ibqp)) {
-               ehca_err(&shca->ib_device, "Cannot create AQP1 QP.");
-               ret = PTR_ERR(ibqp);
-               goto create_aqp1;
-       }
-       sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
-
-       return 0;
-
-create_aqp1:
-       ib_destroy_cq(sport->ibcq_aqp1);
-       return ret;
-}
-
-static int ehca_destroy_aqp1(struct ehca_sport *sport)
-{
-       int ret;
-
-       ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
-       if (ret) {
-               ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
-               return ret;
-       }
-
-       ret = ib_destroy_cq(sport->ibcq_aqp1);
-       if (ret)
-               ehca_gen_err("Cannot destroy AQP1 CQ. ret=%i", ret);
-
-       return ret;
-}
-
-static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
-{
-       return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
-}
-
-static ssize_t ehca_store_debug_level(struct device_driver *ddp,
-                                     const char *buf, size_t count)
-{
-       int value = (*buf) - '0';
-       if (value >= 0 && value <= 9)
-               ehca_debug_level = value;
-       return 1;
-}
-
-static DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR,
-                  ehca_show_debug_level, ehca_store_debug_level);
-
-static struct attribute *ehca_drv_attrs[] = {
-       &driver_attr_debug_level.attr,
-       NULL
-};
-
-static struct attribute_group ehca_drv_attr_grp = {
-       .attrs = ehca_drv_attrs
-};
-
-static const struct attribute_group *ehca_drv_attr_groups[] = {
-       &ehca_drv_attr_grp,
-       NULL,
-};
-
-#define EHCA_RESOURCE_ATTR(name)                                           \
-static ssize_t  ehca_show_##name(struct device *dev,                       \
-                                struct device_attribute *attr,            \
-                                char *buf)                                \
-{                                                                         \
-       struct ehca_shca *shca;                                            \
-       struct hipz_query_hca *rblock;                                     \
-       int data;                                                          \
-                                                                          \
-       shca = dev_get_drvdata(dev);                                       \
-                                                                          \
-       rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);                      \
-       if (!rblock) {                                                     \
-               dev_err(dev, "Can't allocate rblock memory.\n");           \
-               return 0;                                                  \
-       }                                                                  \
-                                                                          \
-       if (hipz_h_query_hca(shca->ipz_hca_handle, rblock) != H_SUCCESS) { \
-               dev_err(dev, "Can't query device properties\n");           \
-               ehca_free_fw_ctrlblock(rblock);                            \
-               return 0;                                                  \
-       }                                                                  \
-                                                                          \
-       data = rblock->name;                                               \
-       ehca_free_fw_ctrlblock(rblock);                                    \
-                                                                          \
-       if ((strcmp(#name, "num_ports") == 0) && (ehca_nr_ports == 1))     \
-               return snprintf(buf, 256, "1\n");                          \
-       else                                                               \
-               return snprintf(buf, 256, "%d\n", data);                   \
-                                                                          \
-}                                                                         \
-static DEVICE_ATTR(name, S_IRUGO, ehca_show_##name, NULL);
-
-EHCA_RESOURCE_ATTR(num_ports);
-EHCA_RESOURCE_ATTR(hw_ver);
-EHCA_RESOURCE_ATTR(max_eq);
-EHCA_RESOURCE_ATTR(cur_eq);
-EHCA_RESOURCE_ATTR(max_cq);
-EHCA_RESOURCE_ATTR(cur_cq);
-EHCA_RESOURCE_ATTR(max_qp);
-EHCA_RESOURCE_ATTR(cur_qp);
-EHCA_RESOURCE_ATTR(max_mr);
-EHCA_RESOURCE_ATTR(cur_mr);
-EHCA_RESOURCE_ATTR(max_mw);
-EHCA_RESOURCE_ATTR(cur_mw);
-EHCA_RESOURCE_ATTR(max_pd);
-EHCA_RESOURCE_ATTR(max_ah);
-
-static ssize_t ehca_show_adapter_handle(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
-{
-       struct ehca_shca *shca = dev_get_drvdata(dev);
-
-       return sprintf(buf, "%llx\n", shca->ipz_hca_handle.handle);
-
-}
-static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
-
-static struct attribute *ehca_dev_attrs[] = {
-       &dev_attr_adapter_handle.attr,
-       &dev_attr_num_ports.attr,
-       &dev_attr_hw_ver.attr,
-       &dev_attr_max_eq.attr,
-       &dev_attr_cur_eq.attr,
-       &dev_attr_max_cq.attr,
-       &dev_attr_cur_cq.attr,
-       &dev_attr_max_qp.attr,
-       &dev_attr_cur_qp.attr,
-       &dev_attr_max_mr.attr,
-       &dev_attr_cur_mr.attr,
-       &dev_attr_max_mw.attr,
-       &dev_attr_cur_mw.attr,
-       &dev_attr_max_pd.attr,
-       &dev_attr_max_ah.attr,
-       NULL
-};
-
-static struct attribute_group ehca_dev_attr_grp = {
-       .attrs = ehca_dev_attrs
-};
-
-static int ehca_probe(struct platform_device *dev)
-{
-       struct ehca_shca *shca;
-       const u64 *handle;
-       struct ib_pd *ibpd;
-       int ret, i, eq_size;
-       unsigned long flags;
-
-       handle = of_get_property(dev->dev.of_node, "ibm,hca-handle", NULL);
-       if (!handle) {
-               ehca_gen_err("Cannot get eHCA handle for adapter: %s.",
-                            dev->dev.of_node->full_name);
-               return -ENODEV;
-       }
-
-       if (!(*handle)) {
-               ehca_gen_err("Wrong eHCA handle for adapter: %s.",
-                            dev->dev.of_node->full_name);
-               return -ENODEV;
-       }
-
-       shca = (struct ehca_shca *)ib_alloc_device(sizeof(*shca));
-       if (!shca) {
-               ehca_gen_err("Cannot allocate shca memory.");
-               return -ENOMEM;
-       }
-
-       mutex_init(&shca->modify_mutex);
-       atomic_set(&shca->num_cqs, 0);
-       atomic_set(&shca->num_qps, 0);
-       shca->max_num_qps = ehca_max_qp;
-       shca->max_num_cqs = ehca_max_cq;
-
-       for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
-               spin_lock_init(&shca->sport[i].mod_sqp_lock);
-
-       shca->ofdev = dev;
-       shca->ipz_hca_handle.handle = *handle;
-       dev_set_drvdata(&dev->dev, shca);
-
-       ret = ehca_sense_attributes(shca);
-       if (ret < 0) {
-               ehca_gen_err("Cannot sense eHCA attributes.");
-               goto probe1;
-       }
-
-       ret = ehca_init_device(shca);
-       if (ret) {
-               ehca_gen_err("Cannot init ehca  device struct");
-               goto probe1;
-       }
-
-       eq_size = 2 * shca->max_num_cqs + 4 * shca->max_num_qps;
-       /* create event queues */
-       ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
-       if (ret) {
-               ehca_err(&shca->ib_device, "Cannot create EQ.");
-               goto probe1;
-       }
-
-       ret = ehca_create_eq(shca, &shca->neq, EHCA_NEQ, 513);
-       if (ret) {
-               ehca_err(&shca->ib_device, "Cannot create NEQ.");
-               goto probe3;
-       }
-
-       /* create internal protection domain */
-       ibpd = ehca_alloc_pd(&shca->ib_device, (void *)(-1), NULL);
-       if (IS_ERR(ibpd)) {
-               ehca_err(&shca->ib_device, "Cannot create internal PD.");
-               ret = PTR_ERR(ibpd);
-               goto probe4;
-       }
-
-       shca->pd = container_of(ibpd, struct ehca_pd, ib_pd);
-       shca->pd->ib_pd.device = &shca->ib_device;
-
-       /* create internal max MR */
-       ret = ehca_reg_internal_maxmr(shca, shca->pd, &shca->maxmr);
-
-       if (ret) {
-               ehca_err(&shca->ib_device, "Cannot create internal MR ret=%i",
-                        ret);
-               goto probe5;
-       }
-
-       ret = ib_register_device(&shca->ib_device, NULL);
-       if (ret) {
-               ehca_err(&shca->ib_device,
-                        "ib_register_device() failed ret=%i", ret);
-               goto probe6;
-       }
-
-       /* create AQP1 for port 1 */
-       if (ehca_open_aqp1 == 1) {
-               shca->sport[0].port_state = IB_PORT_DOWN;
-               ret = ehca_create_aqp1(shca, 1);
-               if (ret) {
-                       ehca_err(&shca->ib_device,
-                                "Cannot create AQP1 for port 1.");
-                       goto probe7;
-               }
-       }
-
-       /* create AQP1 for port 2 */
-       if ((ehca_open_aqp1 == 1) && (shca->num_ports == 2)) {
-               shca->sport[1].port_state = IB_PORT_DOWN;
-               ret = ehca_create_aqp1(shca, 2);
-               if (ret) {
-                       ehca_err(&shca->ib_device,
-                                "Cannot create AQP1 for port 2.");
-                       goto probe8;
-               }
-       }
-
-       ret = sysfs_create_group(&dev->dev.kobj, &ehca_dev_attr_grp);
-       if (ret) /* only complain; we can live without attributes */
-               ehca_err(&shca->ib_device,
-                        "Cannot create device attributes  ret=%d", ret);
-
-       spin_lock_irqsave(&shca_list_lock, flags);
-       list_add(&shca->shca_list, &shca_list);
-       spin_unlock_irqrestore(&shca_list_lock, flags);
-
-       return 0;
-
-probe8:
-       ret = ehca_destroy_aqp1(&shca->sport[0]);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy AQP1 for port 1. ret=%i", ret);
-
-probe7:
-       ib_unregister_device(&shca->ib_device);
-
-probe6:
-       ret = ehca_dereg_internal_maxmr(shca);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy internal MR. ret=%x", ret);
-
-probe5:
-       ret = ehca_dealloc_pd(&shca->pd->ib_pd);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy internal PD. ret=%x", ret);
-
-probe4:
-       ret = ehca_destroy_eq(shca, &shca->neq);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy NEQ. ret=%x", ret);
-
-probe3:
-       ret = ehca_destroy_eq(shca, &shca->eq);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy EQ. ret=%x", ret);
-
-probe1:
-       ib_dealloc_device(&shca->ib_device);
-
-       return -EINVAL;
-}
-
-static int ehca_remove(struct platform_device *dev)
-{
-       struct ehca_shca *shca = dev_get_drvdata(&dev->dev);
-       unsigned long flags;
-       int ret;
-
-       sysfs_remove_group(&dev->dev.kobj, &ehca_dev_attr_grp);
-
-       if (ehca_open_aqp1 == 1) {
-               int i;
-               for (i = 0; i < shca->num_ports; i++) {
-                       ret = ehca_destroy_aqp1(&shca->sport[i]);
-                       if (ret)
-                               ehca_err(&shca->ib_device,
-                                        "Cannot destroy AQP1 for port %x "
-                                        "ret=%i", ret, i);
-               }
-       }
-
-       ib_unregister_device(&shca->ib_device);
-
-       ret = ehca_dereg_internal_maxmr(shca);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy internal MR. ret=%i", ret);
-
-       ret = ehca_dealloc_pd(&shca->pd->ib_pd);
-       if (ret)
-               ehca_err(&shca->ib_device,
-                        "Cannot destroy internal PD. ret=%i", ret);
-
-       ret = ehca_destroy_eq(shca, &shca->eq);
-       if (ret)
-               ehca_err(&shca->ib_device, "Cannot destroy EQ. ret=%i", ret);
-
-       ret = ehca_destroy_eq(shca, &shca->neq);
-       if (ret)
-               ehca_err(&shca->ib_device, "Canot destroy NEQ. ret=%i", ret);
-
-       ib_dealloc_device(&shca->ib_device);
-
-       spin_lock_irqsave(&shca_list_lock, flags);
-       list_del(&shca->shca_list);
-       spin_unlock_irqrestore(&shca_list_lock, flags);
-
-       return ret;
-}
-
-static struct of_device_id ehca_device_table[] =
-{
-       {
-               .name       = "lhca",
-               .compatible = "IBM,lhca",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, ehca_device_table);
-
-static struct platform_driver ehca_driver = {
-       .probe       = ehca_probe,
-       .remove      = ehca_remove,
-       .driver = {
-               .name = "ehca",
-               .owner = THIS_MODULE,
-               .groups = ehca_drv_attr_groups,
-               .of_match_table = ehca_device_table,
-       },
-};
-
-void ehca_poll_eqs(unsigned long data)
-{
-       struct ehca_shca *shca;
-
-       spin_lock(&shca_list_lock);
-       list_for_each_entry(shca, &shca_list, shca_list) {
-               if (shca->eq.is_initialized) {
-                       /* call deadman proc only if eq ptr does not change */
-                       struct ehca_eq *eq = &shca->eq;
-                       int max = 3;
-                       volatile u64 q_ofs, q_ofs2;
-                       unsigned long flags;
-                       spin_lock_irqsave(&eq->spinlock, flags);
-                       q_ofs = eq->ipz_queue.current_q_offset;
-                       spin_unlock_irqrestore(&eq->spinlock, flags);
-                       do {
-                               spin_lock_irqsave(&eq->spinlock, flags);
-                               q_ofs2 = eq->ipz_queue.current_q_offset;
-                               spin_unlock_irqrestore(&eq->spinlock, flags);
-                               max--;
-                       } while (q_ofs == q_ofs2 && max > 0);
-                       if (q_ofs == q_ofs2)
-                               ehca_process_eq(shca, 0);
-               }
-       }
-       mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
-       spin_unlock(&shca_list_lock);
-}
-
-static int ehca_mem_notifier(struct notifier_block *nb,
-                            unsigned long action, void *data)
-{
-       static unsigned long ehca_dmem_warn_time;
-       unsigned long flags;
-
-       switch (action) {
-       case MEM_CANCEL_OFFLINE:
-       case MEM_CANCEL_ONLINE:
-       case MEM_ONLINE:
-       case MEM_OFFLINE:
-               return NOTIFY_OK;
-       case MEM_GOING_ONLINE:
-       case MEM_GOING_OFFLINE:
-               /* only ok if no hca is attached to the lpar */
-               spin_lock_irqsave(&shca_list_lock, flags);
-               if (list_empty(&shca_list)) {
-                       spin_unlock_irqrestore(&shca_list_lock, flags);
-                       return NOTIFY_OK;
-               } else {
-                       spin_unlock_irqrestore(&shca_list_lock, flags);
-                       if (printk_timed_ratelimit(&ehca_dmem_warn_time,
-                                                  30 * 1000))
-                               ehca_gen_err("DMEM operations are not allowed"
-                                            "in conjunction with eHCA");
-                       return NOTIFY_BAD;
-               }
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block ehca_mem_nb = {
-       .notifier_call = ehca_mem_notifier,
-};
-
-static int __init ehca_module_init(void)
-{
-       int ret;
-
-       printk(KERN_INFO "eHCA Infiniband Device Driver "
-              "(Version " HCAD_VERSION ")\n");
-
-       ret = ehca_create_comp_pool();
-       if (ret) {
-               ehca_gen_err("Cannot create comp pool.");
-               return ret;
-       }
-
-       ret = ehca_create_slab_caches();
-       if (ret) {
-               ehca_gen_err("Cannot create SLAB caches");
-               ret = -ENOMEM;
-               goto module_init1;
-       }
-
-       ret = ehca_create_busmap();
-       if (ret) {
-               ehca_gen_err("Cannot create busmap.");
-               goto module_init2;
-       }
-
-       ret = ibmebus_register_driver(&ehca_driver);
-       if (ret) {
-               ehca_gen_err("Cannot register eHCA device driver");
-               ret = -EINVAL;
-               goto module_init3;
-       }
-
-       ret = register_memory_notifier(&ehca_mem_nb);
-       if (ret) {
-               ehca_gen_err("Failed registering memory add/remove notifier");
-               goto module_init4;
-       }
-
-       if (ehca_poll_all_eqs != 1) {
-               ehca_gen_err("WARNING!!!");
-               ehca_gen_err("It is possible to lose interrupts.");
-       } else {
-               init_timer(&poll_eqs_timer);
-               poll_eqs_timer.function = ehca_poll_eqs;
-               poll_eqs_timer.expires = jiffies + HZ;
-               add_timer(&poll_eqs_timer);
-       }
-
-       return 0;
-
-module_init4:
-       ibmebus_unregister_driver(&ehca_driver);
-
-module_init3:
-       ehca_destroy_busmap();
-
-module_init2:
-       ehca_destroy_slab_caches();
-
-module_init1:
-       ehca_destroy_comp_pool();
-       return ret;
-};
-
-static void __exit ehca_module_exit(void)
-{
-       if (ehca_poll_all_eqs == 1)
-               del_timer_sync(&poll_eqs_timer);
-
-       ibmebus_unregister_driver(&ehca_driver);
-
-       unregister_memory_notifier(&ehca_mem_nb);
-
-       ehca_destroy_busmap();
-
-       ehca_destroy_slab_caches();
-
-       ehca_destroy_comp_pool();
-
-       idr_destroy(&ehca_cq_idr);
-       idr_destroy(&ehca_qp_idr);
-};
-
-module_init(ehca_module_init);
-module_exit(ehca_module_exit);
diff --git a/drivers/staging/rdma/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
deleted file mode 100644 (file)
index cec1815..0000000
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  mcast  functions
- *
- *  Authors: Khadija Souissi <souissik@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/module.h>
-#include <linux/err.h>
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-#define MAX_MC_LID 0xFFFE
-#define MIN_MC_LID 0xC000      /* Multicast limits */
-#define EHCA_VALID_MULTICAST_GID(gid)  ((gid)[0] == 0xFF)
-#define EHCA_VALID_MULTICAST_LID(lid) \
-       (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID))
-
-int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-       struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
-                                             ib_device);
-       union ib_gid my_gid;
-       u64 subnet_prefix, interface_id, h_ret;
-
-       if (ibqp->qp_type != IB_QPT_UD) {
-               ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type);
-               return -EINVAL;
-       }
-
-       if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
-               ehca_err(ibqp->device, "invalid mulitcast gid");
-               return -EINVAL;
-       } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
-               ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
-               return -EINVAL;
-       }
-
-       memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
-       subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
-       interface_id = be64_to_cpu(my_gid.global.interface_id);
-       h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle,
-                                  my_qp->ipz_qp_handle,
-                                  my_qp->galpas.kernel,
-                                  lid, subnet_prefix, interface_id);
-       if (h_ret != H_SUCCESS)
-               ehca_err(ibqp->device,
-                        "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed "
-                        "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
-       return ehca2ib_return_code(h_ret);
-}
-
-int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-       struct ehca_shca *shca = container_of(ibqp->pd->device,
-                                             struct ehca_shca, ib_device);
-       union ib_gid my_gid;
-       u64 subnet_prefix, interface_id, h_ret;
-
-       if (ibqp->qp_type != IB_QPT_UD) {
-               ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type);
-               return -EINVAL;
-       }
-
-       if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) {
-               ehca_err(ibqp->device, "invalid mulitcast gid");
-               return -EINVAL;
-       } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) {
-               ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid);
-               return -EINVAL;
-       }
-
-       memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
-
-       subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
-       interface_id = be64_to_cpu(my_gid.global.interface_id);
-       h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle,
-                                  my_qp->ipz_qp_handle,
-                                  my_qp->galpas.kernel,
-                                  lid, subnet_prefix, interface_id);
-       if (h_ret != H_SUCCESS)
-               ehca_err(ibqp->device,
-                        "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed "
-                        "h_ret=%lli", my_qp, ibqp->qp_num, h_ret);
-
-       return ehca2ib_return_code(h_ret);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
deleted file mode 100644 (file)
index 3367205..0000000
+++ /dev/null
@@ -1,2202 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  MR/MW functions
- *
- *  Authors: Dietmar Decker <ddecker@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-#include <rdma/ib_umem.h>
-
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "hcp_if.h"
-#include "hipz_hw.h"
-
-#define NUM_CHUNKS(length, chunk_size) \
-       (((length) + (chunk_size - 1)) / (chunk_size))
-
-/* max number of rpages (per hcall register_rpages) */
-#define MAX_RPAGES 512
-
-/* DMEM toleration management */
-#define EHCA_SECTSHIFT        SECTION_SIZE_BITS
-#define EHCA_SECTSIZE          (1UL << EHCA_SECTSHIFT)
-#define EHCA_HUGEPAGESHIFT     34
-#define EHCA_HUGEPAGE_SIZE     (1UL << EHCA_HUGEPAGESHIFT)
-#define EHCA_HUGEPAGE_PFN_MASK ((EHCA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
-#define EHCA_INVAL_ADDR        0xFFFFFFFFFFFFFFFFULL
-#define EHCA_DIR_INDEX_SHIFT 13                   /* 8k Entries in 64k block */
-#define EHCA_TOP_INDEX_SHIFT (EHCA_DIR_INDEX_SHIFT * 2)
-#define EHCA_MAP_ENTRIES (1 << EHCA_DIR_INDEX_SHIFT)
-#define EHCA_TOP_MAP_SIZE (0x10000)               /* currently fixed map size */
-#define EHCA_DIR_MAP_SIZE (0x10000)
-#define EHCA_ENT_MAP_SIZE (0x10000)
-#define EHCA_INDEX_MASK (EHCA_MAP_ENTRIES - 1)
-
-static unsigned long ehca_mr_len;
-
-/*
- * Memory map data structures
- */
-struct ehca_dir_bmap {
-       u64 ent[EHCA_MAP_ENTRIES];
-};
-struct ehca_top_bmap {
-       struct ehca_dir_bmap *dir[EHCA_MAP_ENTRIES];
-};
-struct ehca_bmap {
-       struct ehca_top_bmap *top[EHCA_MAP_ENTRIES];
-};
-
-static struct ehca_bmap *ehca_bmap;
-
-static struct kmem_cache *mr_cache;
-static struct kmem_cache *mw_cache;
-
-enum ehca_mr_pgsize {
-       EHCA_MR_PGSIZE4K  = 0x1000L,
-       EHCA_MR_PGSIZE64K = 0x10000L,
-       EHCA_MR_PGSIZE1M  = 0x100000L,
-       EHCA_MR_PGSIZE16M = 0x1000000L
-};
-
-#define EHCA_MR_PGSHIFT4K  12
-#define EHCA_MR_PGSHIFT64K 16
-#define EHCA_MR_PGSHIFT1M  20
-#define EHCA_MR_PGSHIFT16M 24
-
-static u64 ehca_map_vaddr(void *caddr);
-
-static u32 ehca_encode_hwpage_size(u32 pgsize)
-{
-       int log = ilog2(pgsize);
-       WARN_ON(log < 12 || log > 24 || log & 3);
-       return (log - 12) / 4;
-}
-
-static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca)
-{
-       return rounddown_pow_of_two(shca->hca_cap_mr_pgsize);
-}
-
-static struct ehca_mr *ehca_mr_new(void)
-{
-       struct ehca_mr *me;
-
-       me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
-       if (me)
-               spin_lock_init(&me->mrlock);
-       else
-               ehca_gen_err("alloc failed");
-
-       return me;
-}
-
-static void ehca_mr_delete(struct ehca_mr *me)
-{
-       kmem_cache_free(mr_cache, me);
-}
-
-static struct ehca_mw *ehca_mw_new(void)
-{
-       struct ehca_mw *me;
-
-       me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
-       if (me)
-               spin_lock_init(&me->mwlock);
-       else
-               ehca_gen_err("alloc failed");
-
-       return me;
-}
-
-static void ehca_mw_delete(struct ehca_mw *me)
-{
-       kmem_cache_free(mw_cache, me);
-}
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
-{
-       struct ib_mr *ib_mr;
-       int ret;
-       struct ehca_mr *e_maxmr;
-       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_shca *shca =
-               container_of(pd->device, struct ehca_shca, ib_device);
-
-       if (shca->maxmr) {
-               e_maxmr = ehca_mr_new();
-               if (!e_maxmr) {
-                       ehca_err(&shca->ib_device, "out of memory");
-                       ib_mr = ERR_PTR(-ENOMEM);
-                       goto get_dma_mr_exit0;
-               }
-
-               ret = ehca_reg_maxmr(shca, e_maxmr,
-                                    (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)),
-                                    mr_access_flags, e_pd,
-                                    &e_maxmr->ib.ib_mr.lkey,
-                                    &e_maxmr->ib.ib_mr.rkey);
-               if (ret) {
-                       ehca_mr_delete(e_maxmr);
-                       ib_mr = ERR_PTR(ret);
-                       goto get_dma_mr_exit0;
-               }
-               ib_mr = &e_maxmr->ib.ib_mr;
-       } else {
-               ehca_err(&shca->ib_device, "no internal max-MR exist!");
-               ib_mr = ERR_PTR(-EINVAL);
-               goto get_dma_mr_exit0;
-       }
-
-get_dma_mr_exit0:
-       if (IS_ERR(ib_mr))
-               ehca_err(&shca->ib_device, "h_ret=%li pd=%p mr_access_flags=%x",
-                        PTR_ERR(ib_mr), pd, mr_access_flags);
-       return ib_mr;
-} /* end ehca_get_dma_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                              u64 virt, int mr_access_flags,
-                              struct ib_udata *udata)
-{
-       struct ib_mr *ib_mr;
-       struct ehca_mr *e_mr;
-       struct ehca_shca *shca =
-               container_of(pd->device, struct ehca_shca, ib_device);
-       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_mr_pginfo pginfo;
-       int ret, page_shift;
-       u32 num_kpages;
-       u32 num_hwpages;
-       u64 hwpage_size;
-
-       if (!pd) {
-               ehca_gen_err("bad pd=%p", pd);
-               return ERR_PTR(-EFAULT);
-       }
-
-       if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
-            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
-           ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
-            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
-               /*
-                * Remote Write Access requires Local Write Access
-                * Remote Atomic Access requires Local Write Access
-                */
-               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
-                        mr_access_flags);
-               ib_mr = ERR_PTR(-EINVAL);
-               goto reg_user_mr_exit0;
-       }
-
-       if (length == 0 || virt + length < virt) {
-               ehca_err(pd->device, "bad input values: length=%llx "
-                        "virt_base=%llx", length, virt);
-               ib_mr = ERR_PTR(-EINVAL);
-               goto reg_user_mr_exit0;
-       }
-
-       e_mr = ehca_mr_new();
-       if (!e_mr) {
-               ehca_err(pd->device, "out of memory");
-               ib_mr = ERR_PTR(-ENOMEM);
-               goto reg_user_mr_exit0;
-       }
-
-       e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
-                                mr_access_flags, 0);
-       if (IS_ERR(e_mr->umem)) {
-               ib_mr = (void *)e_mr->umem;
-               goto reg_user_mr_exit1;
-       }
-
-       if (e_mr->umem->page_size != PAGE_SIZE) {
-               ehca_err(pd->device, "page size not supported, "
-                        "e_mr->umem->page_size=%x", e_mr->umem->page_size);
-               ib_mr = ERR_PTR(-EINVAL);
-               goto reg_user_mr_exit2;
-       }
-
-       /* determine number of MR pages */
-       num_kpages = NUM_CHUNKS((virt % PAGE_SIZE) + length, PAGE_SIZE);
-       /* select proper hw_pgsize */
-       page_shift = PAGE_SHIFT;
-       if (e_mr->umem->hugetlb) {
-               /* determine page_shift, clamp between 4K and 16M */
-               page_shift = (fls64(length - 1) + 3) & ~3;
-               page_shift = min(max(page_shift, EHCA_MR_PGSHIFT4K),
-                                EHCA_MR_PGSHIFT16M);
-       }
-       hwpage_size = 1UL << page_shift;
-
-       /* now that we have the desired page size, shift until it's
-        * supported, too. 4K is always supported, so this terminates.
-        */
-       while (!(hwpage_size & shca->hca_cap_mr_pgsize))
-               hwpage_size >>= 4;
-
-reg_user_mr_fallback:
-       num_hwpages = NUM_CHUNKS((virt % hwpage_size) + length, hwpage_size);
-       /* register MR on HCA */
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.type = EHCA_MR_PGI_USER;
-       pginfo.hwpage_size = hwpage_size;
-       pginfo.num_kpages = num_kpages;
-       pginfo.num_hwpages = num_hwpages;
-       pginfo.u.usr.region = e_mr->umem;
-       pginfo.next_hwpage = ib_umem_offset(e_mr->umem) / hwpage_size;
-       pginfo.u.usr.next_sg = pginfo.u.usr.region->sg_head.sgl;
-       ret = ehca_reg_mr(shca, e_mr, (u64 *)virt, length, mr_access_flags,
-                         e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
-                         &e_mr->ib.ib_mr.rkey, EHCA_REG_MR);
-       if (ret == -EINVAL && pginfo.hwpage_size > PAGE_SIZE) {
-               ehca_warn(pd->device, "failed to register mr "
-                         "with hwpage_size=%llx", hwpage_size);
-               ehca_info(pd->device, "try to register mr with "
-                         "kpage_size=%lx", PAGE_SIZE);
-               /*
-                * this means kpages are not contiguous for a hw page
-                * try kernel page size as fallback solution
-                */
-               hwpage_size = PAGE_SIZE;
-               goto reg_user_mr_fallback;
-       }
-       if (ret) {
-               ib_mr = ERR_PTR(ret);
-               goto reg_user_mr_exit2;
-       }
-
-       /* successful registration of all pages */
-       return &e_mr->ib.ib_mr;
-
-reg_user_mr_exit2:
-       ib_umem_release(e_mr->umem);
-reg_user_mr_exit1:
-       ehca_mr_delete(e_mr);
-reg_user_mr_exit0:
-       if (IS_ERR(ib_mr))
-               ehca_err(pd->device, "rc=%li pd=%p mr_access_flags=%x udata=%p",
-                        PTR_ERR(ib_mr), pd, mr_access_flags, udata);
-       return ib_mr;
-} /* end ehca_reg_user_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_mr(struct ib_mr *mr)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_shca *shca =
-               container_of(mr->device, struct ehca_shca, ib_device);
-       struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
-
-       if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
-               ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
-                        "e_mr->flags=%x", mr, e_mr, e_mr->flags);
-               ret = -EINVAL;
-               goto dereg_mr_exit0;
-       } else if (e_mr == shca->maxmr) {
-               /* should be impossible, however reject to be sure */
-               ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
-                        "shca->maxmr=%p mr->lkey=%x",
-                        mr, shca->maxmr, mr->lkey);
-               ret = -EINVAL;
-               goto dereg_mr_exit0;
-       }
-
-       /* TODO: BUSY: MR still has bound window(s) */
-       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lli shca=%p "
-                        "e_mr=%p hca_hndl=%llx mr_hndl=%llx mr->lkey=%x",
-                        h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
-                        e_mr->ipz_mr_handle.handle, mr->lkey);
-               ret = ehca2ib_return_code(h_ret);
-               goto dereg_mr_exit0;
-       }
-
-       if (e_mr->umem)
-               ib_umem_release(e_mr->umem);
-
-       /* successful deregistration */
-       ehca_mr_delete(e_mr);
-
-dereg_mr_exit0:
-       if (ret)
-               ehca_err(mr->device, "ret=%i mr=%p", ret, mr);
-       return ret;
-} /* end ehca_dereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
-{
-       struct ib_mw *ib_mw;
-       u64 h_ret;
-       struct ehca_mw *e_mw;
-       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_shca *shca =
-               container_of(pd->device, struct ehca_shca, ib_device);
-       struct ehca_mw_hipzout_parms hipzout;
-
-       if (type != IB_MW_TYPE_1)
-               return ERR_PTR(-EINVAL);
-
-       e_mw = ehca_mw_new();
-       if (!e_mw) {
-               ib_mw = ERR_PTR(-ENOMEM);
-               goto alloc_mw_exit0;
-       }
-
-       h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
-                                        e_pd->fw_pd, &hipzout);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lli "
-                        "shca=%p hca_hndl=%llx mw=%p",
-                        h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
-               ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
-               goto alloc_mw_exit1;
-       }
-       /* successful MW allocation */
-       e_mw->ipz_mw_handle = hipzout.handle;
-       e_mw->ib_mw.rkey    = hipzout.rkey;
-       return &e_mw->ib_mw;
-
-alloc_mw_exit1:
-       ehca_mw_delete(e_mw);
-alloc_mw_exit0:
-       if (IS_ERR(ib_mw))
-               ehca_err(pd->device, "h_ret=%li pd=%p", PTR_ERR(ib_mw), pd);
-       return ib_mw;
-} /* end ehca_alloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_mw(struct ib_mw *mw)
-{
-       u64 h_ret;
-       struct ehca_shca *shca =
-               container_of(mw->device, struct ehca_shca, ib_device);
-       struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
-
-       h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lli shca=%p "
-                        "mw=%p rkey=%x hca_hndl=%llx mw_hndl=%llx",
-                        h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
-                        e_mw->ipz_mw_handle.handle);
-               return ehca2ib_return_code(h_ret);
-       }
-       /* successful deallocation */
-       ehca_mw_delete(e_mw);
-       return 0;
-} /* end ehca_dealloc_mw() */
-
-/*----------------------------------------------------------------------*/
-
-struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
-                             int mr_access_flags,
-                             struct ib_fmr_attr *fmr_attr)
-{
-       struct ib_fmr *ib_fmr;
-       struct ehca_shca *shca =
-               container_of(pd->device, struct ehca_shca, ib_device);
-       struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_mr *e_fmr;
-       int ret;
-       u32 tmp_lkey, tmp_rkey;
-       struct ehca_mr_pginfo pginfo;
-       u64 hw_pgsize;
-
-       /* check other parameters */
-       if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
-            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
-           ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
-            !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
-               /*
-                * Remote Write Access requires Local Write Access
-                * Remote Atomic Access requires Local Write Access
-                */
-               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
-                        mr_access_flags);
-               ib_fmr = ERR_PTR(-EINVAL);
-               goto alloc_fmr_exit0;
-       }
-       if (mr_access_flags & IB_ACCESS_MW_BIND) {
-               ehca_err(pd->device, "bad input values: mr_access_flags=%x",
-                        mr_access_flags);
-               ib_fmr = ERR_PTR(-EINVAL);
-               goto alloc_fmr_exit0;
-       }
-       if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
-               ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
-                        "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
-                        fmr_attr->max_pages, fmr_attr->max_maps,
-                        fmr_attr->page_shift);
-               ib_fmr = ERR_PTR(-EINVAL);
-               goto alloc_fmr_exit0;
-       }
-
-       hw_pgsize = 1 << fmr_attr->page_shift;
-       if (!(hw_pgsize & shca->hca_cap_mr_pgsize)) {
-               ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
-                        fmr_attr->page_shift);
-               ib_fmr = ERR_PTR(-EINVAL);
-               goto alloc_fmr_exit0;
-       }
-
-       e_fmr = ehca_mr_new();
-       if (!e_fmr) {
-               ib_fmr = ERR_PTR(-ENOMEM);
-               goto alloc_fmr_exit0;
-       }
-       e_fmr->flags |= EHCA_MR_FLAG_FMR;
-
-       /* register MR on HCA */
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.hwpage_size = hw_pgsize;
-       /*
-        * pginfo.num_hwpages==0, ie register_rpages() will not be called
-        * but deferred to map_phys_fmr()
-        */
-       ret = ehca_reg_mr(shca, e_fmr, NULL,
-                         fmr_attr->max_pages * (1 << fmr_attr->page_shift),
-                         mr_access_flags, e_pd, &pginfo,
-                         &tmp_lkey, &tmp_rkey, EHCA_REG_MR);
-       if (ret) {
-               ib_fmr = ERR_PTR(ret);
-               goto alloc_fmr_exit1;
-       }
-
-       /* successful */
-       e_fmr->hwpage_size = hw_pgsize;
-       e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
-       e_fmr->fmr_max_pages = fmr_attr->max_pages;
-       e_fmr->fmr_max_maps = fmr_attr->max_maps;
-       e_fmr->fmr_map_cnt = 0;
-       return &e_fmr->ib.ib_fmr;
-
-alloc_fmr_exit1:
-       ehca_mr_delete(e_fmr);
-alloc_fmr_exit0:
-       return ib_fmr;
-} /* end ehca_alloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_map_phys_fmr(struct ib_fmr *fmr,
-                     u64 *page_list,
-                     int list_len,
-                     u64 iova)
-{
-       int ret;
-       struct ehca_shca *shca =
-               container_of(fmr->device, struct ehca_shca, ib_device);
-       struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-       struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
-       struct ehca_mr_pginfo pginfo;
-       u32 tmp_lkey, tmp_rkey;
-
-       if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-               ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
-                        e_fmr, e_fmr->flags);
-               ret = -EINVAL;
-               goto map_phys_fmr_exit0;
-       }
-       ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
-       if (ret)
-               goto map_phys_fmr_exit0;
-       if (iova % e_fmr->fmr_page_size) {
-               /* only whole-numbered pages */
-               ehca_err(fmr->device, "bad iova, iova=%llx fmr_page_size=%x",
-                        iova, e_fmr->fmr_page_size);
-               ret = -EINVAL;
-               goto map_phys_fmr_exit0;
-       }
-       if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
-               /* HCAD does not limit the maps, however trace this anyway */
-               ehca_info(fmr->device, "map limit exceeded, fmr=%p "
-                         "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
-                         fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
-       }
-
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.type = EHCA_MR_PGI_FMR;
-       pginfo.num_kpages = list_len;
-       pginfo.hwpage_size = e_fmr->hwpage_size;
-       pginfo.num_hwpages =
-               list_len * e_fmr->fmr_page_size / pginfo.hwpage_size;
-       pginfo.u.fmr.page_list = page_list;
-       pginfo.next_hwpage =
-               (iova & (e_fmr->fmr_page_size-1)) / pginfo.hwpage_size;
-       pginfo.u.fmr.fmr_pgsize = e_fmr->fmr_page_size;
-
-       ret = ehca_rereg_mr(shca, e_fmr, (u64 *)iova,
-                           list_len * e_fmr->fmr_page_size,
-                           e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
-       if (ret)
-               goto map_phys_fmr_exit0;
-
-       /* successful reregistration */
-       e_fmr->fmr_map_cnt++;
-       e_fmr->ib.ib_fmr.lkey = tmp_lkey;
-       e_fmr->ib.ib_fmr.rkey = tmp_rkey;
-       return 0;
-
-map_phys_fmr_exit0:
-       if (ret)
-               ehca_err(fmr->device, "ret=%i fmr=%p page_list=%p list_len=%x "
-                        "iova=%llx", ret, fmr, page_list, list_len, iova);
-       return ret;
-} /* end ehca_map_phys_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_fmr(struct list_head *fmr_list)
-{
-       int ret = 0;
-       struct ib_fmr *ib_fmr;
-       struct ehca_shca *shca = NULL;
-       struct ehca_shca *prev_shca;
-       struct ehca_mr *e_fmr;
-       u32 num_fmr = 0;
-       u32 unmap_fmr_cnt = 0;
-
-       /* check all FMR belong to same SHCA, and check internal flag */
-       list_for_each_entry(ib_fmr, fmr_list, list) {
-               prev_shca = shca;
-               shca = container_of(ib_fmr->device, struct ehca_shca,
-                                   ib_device);
-               e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
-               if ((shca != prev_shca) && prev_shca) {
-                       ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
-                                "prev_shca=%p e_fmr=%p",
-                                shca, prev_shca, e_fmr);
-                       ret = -EINVAL;
-                       goto unmap_fmr_exit0;
-               }
-               if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-                       ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
-                                "e_fmr->flags=%x", e_fmr, e_fmr->flags);
-                       ret = -EINVAL;
-                       goto unmap_fmr_exit0;
-               }
-               num_fmr++;
-       }
-
-       /* loop over all FMRs to unmap */
-       list_for_each_entry(ib_fmr, fmr_list, list) {
-               unmap_fmr_cnt++;
-               e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
-               shca = container_of(ib_fmr->device, struct ehca_shca,
-                                   ib_device);
-               ret = ehca_unmap_one_fmr(shca, e_fmr);
-               if (ret) {
-                       /* unmap failed, stop unmapping of rest of FMRs */
-                       ehca_err(&shca->ib_device, "unmap of one FMR failed, "
-                                "stop rest, e_fmr=%p num_fmr=%x "
-                                "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
-                                unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
-                       goto unmap_fmr_exit0;
-               }
-       }
-
-unmap_fmr_exit0:
-       if (ret)
-               ehca_gen_err("ret=%i fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
-                            ret, fmr_list, num_fmr, unmap_fmr_cnt);
-       return ret;
-} /* end ehca_unmap_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dealloc_fmr(struct ib_fmr *fmr)
-{
-       int ret;
-       u64 h_ret;
-       struct ehca_shca *shca =
-               container_of(fmr->device, struct ehca_shca, ib_device);
-       struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
-
-       if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
-               ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
-                        e_fmr, e_fmr->flags);
-               ret = -EINVAL;
-               goto free_fmr_exit0;
-       }
-
-       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lli e_fmr=%p "
-                        "hca_hndl=%llx fmr_hndl=%llx fmr->lkey=%x",
-                        h_ret, e_fmr, shca->ipz_hca_handle.handle,
-                        e_fmr->ipz_mr_handle.handle, fmr->lkey);
-               ret = ehca2ib_return_code(h_ret);
-               goto free_fmr_exit0;
-       }
-       /* successful deregistration */
-       ehca_mr_delete(e_fmr);
-       return 0;
-
-free_fmr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i fmr=%p", ret, fmr);
-       return ret;
-} /* end ehca_dealloc_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
-                                  struct ehca_mr *e_mr,
-                                  struct ehca_mr_pginfo *pginfo);
-
-int ehca_reg_mr(struct ehca_shca *shca,
-               struct ehca_mr *e_mr,
-               u64 *iova_start,
-               u64 size,
-               int acl,
-               struct ehca_pd *e_pd,
-               struct ehca_mr_pginfo *pginfo,
-               u32 *lkey, /*OUT*/
-               u32 *rkey, /*OUT*/
-               enum ehca_reg_type reg_type)
-{
-       int ret;
-       u64 h_ret;
-       u32 hipz_acl;
-       struct ehca_mr_hipzout_parms hipzout;
-
-       ehca_mrmw_map_acl(acl, &hipz_acl);
-       ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
-       if (ehca_use_hp_mr == 1)
-               hipz_acl |= 0x00000001;
-
-       h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
-                                        (u64)iova_start, size, hipz_acl,
-                                        e_pd->fw_pd, &hipzout);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lli "
-                        "hca_hndl=%llx", h_ret, shca->ipz_hca_handle.handle);
-               ret = ehca2ib_return_code(h_ret);
-               goto ehca_reg_mr_exit0;
-       }
-
-       e_mr->ipz_mr_handle = hipzout.handle;
-
-       if (reg_type == EHCA_REG_BUSMAP_MR)
-               ret = ehca_reg_bmap_mr_rpages(shca, e_mr, pginfo);
-       else if (reg_type == EHCA_REG_MR)
-               ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
-       else
-               ret = -EINVAL;
-
-       if (ret)
-               goto ehca_reg_mr_exit1;
-
-       /* successful registration */
-       e_mr->num_kpages = pginfo->num_kpages;
-       e_mr->num_hwpages = pginfo->num_hwpages;
-       e_mr->hwpage_size = pginfo->hwpage_size;
-       e_mr->start = iova_start;
-       e_mr->size = size;
-       e_mr->acl = acl;
-       *lkey = hipzout.lkey;
-       *rkey = hipzout.rkey;
-       return 0;
-
-ehca_reg_mr_exit1:
-       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "h_ret=%lli shca=%p e_mr=%p "
-                        "iova_start=%p size=%llx acl=%x e_pd=%p lkey=%x "
-                        "pginfo=%p num_kpages=%llx num_hwpages=%llx ret=%i",
-                        h_ret, shca, e_mr, iova_start, size, acl, e_pd,
-                        hipzout.lkey, pginfo, pginfo->num_kpages,
-                        pginfo->num_hwpages, ret);
-               ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
-                        "not recoverable");
-       }
-ehca_reg_mr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
-                        "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
-                        "num_kpages=%llx num_hwpages=%llx",
-                        ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
-                        pginfo->num_kpages, pginfo->num_hwpages);
-       return ret;
-} /* end ehca_reg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_mr_rpages(struct ehca_shca *shca,
-                      struct ehca_mr *e_mr,
-                      struct ehca_mr_pginfo *pginfo)
-{
-       int ret = 0;
-       u64 h_ret;
-       u32 rnum;
-       u64 rpage;
-       u32 i;
-       u64 *kpage;
-
-       if (!pginfo->num_hwpages) /* in case of fmr */
-               return 0;
-
-       kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!kpage) {
-               ehca_err(&shca->ib_device, "kpage alloc failed");
-               ret = -ENOMEM;
-               goto ehca_reg_mr_rpages_exit0;
-       }
-
-       /* max MAX_RPAGES ehca mr pages per register call */
-       for (i = 0; i < NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES); i++) {
-
-               if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
-                       rnum = pginfo->num_hwpages % MAX_RPAGES; /* last shot */
-                       if (rnum == 0)
-                               rnum = MAX_RPAGES;      /* last shot is full */
-               } else
-                       rnum = MAX_RPAGES;
-
-               ret = ehca_set_pagebuf(pginfo, rnum, kpage);
-               if (ret) {
-                       ehca_err(&shca->ib_device, "ehca_set_pagebuf "
-                                "bad rc, ret=%i rnum=%x kpage=%p",
-                                ret, rnum, kpage);
-                       goto ehca_reg_mr_rpages_exit1;
-               }
-
-               if (rnum > 1) {
-                       rpage = __pa(kpage);
-                       if (!rpage) {
-                               ehca_err(&shca->ib_device, "kpage=%p i=%x",
-                                        kpage, i);
-                               ret = -EFAULT;
-                               goto ehca_reg_mr_rpages_exit1;
-                       }
-               } else
-                       rpage = *kpage;
-
-               h_ret = hipz_h_register_rpage_mr(
-                       shca->ipz_hca_handle, e_mr,
-                       ehca_encode_hwpage_size(pginfo->hwpage_size),
-                       0, rpage, rnum);
-
-               if (i == NUM_CHUNKS(pginfo->num_hwpages, MAX_RPAGES) - 1) {
-                       /*
-                        * check for 'registration complete'==H_SUCCESS
-                        * and for 'page registered'==H_PAGE_REGISTERED
-                        */
-                       if (h_ret != H_SUCCESS) {
-                               ehca_err(&shca->ib_device, "last "
-                                        "hipz_reg_rpage_mr failed, h_ret=%lli "
-                                        "e_mr=%p i=%x hca_hndl=%llx mr_hndl=%llx"
-                                        " lkey=%x", h_ret, e_mr, i,
-                                        shca->ipz_hca_handle.handle,
-                                        e_mr->ipz_mr_handle.handle,
-                                        e_mr->ib.ib_mr.lkey);
-                               ret = ehca2ib_return_code(h_ret);
-                               break;
-                       } else
-                               ret = 0;
-               } else if (h_ret != H_PAGE_REGISTERED) {
-                       ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
-                                "h_ret=%lli e_mr=%p i=%x lkey=%x hca_hndl=%llx "
-                                "mr_hndl=%llx", h_ret, e_mr, i,
-                                e_mr->ib.ib_mr.lkey,
-                                shca->ipz_hca_handle.handle,
-                                e_mr->ipz_mr_handle.handle);
-                       ret = ehca2ib_return_code(h_ret);
-                       break;
-               } else
-                       ret = 0;
-       } /* end for(i) */
-
-
-ehca_reg_mr_rpages_exit1:
-       ehca_free_fw_ctrlblock(kpage);
-ehca_reg_mr_rpages_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p pginfo=%p "
-                        "num_kpages=%llx num_hwpages=%llx", ret, shca, e_mr,
-                        pginfo, pginfo->num_kpages, pginfo->num_hwpages);
-       return ret;
-} /* end ehca_reg_mr_rpages() */
-
-/*----------------------------------------------------------------------*/
-
-inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
-                               struct ehca_mr *e_mr,
-                               u64 *iova_start,
-                               u64 size,
-                               u32 acl,
-                               struct ehca_pd *e_pd,
-                               struct ehca_mr_pginfo *pginfo,
-                               u32 *lkey, /*OUT*/
-                               u32 *rkey) /*OUT*/
-{
-       int ret;
-       u64 h_ret;
-       u32 hipz_acl;
-       u64 *kpage;
-       u64 rpage;
-       struct ehca_mr_pginfo pginfo_save;
-       struct ehca_mr_hipzout_parms hipzout;
-
-       ehca_mrmw_map_acl(acl, &hipz_acl);
-       ehca_mrmw_set_pgsize_hipz_acl(pginfo->hwpage_size, &hipz_acl);
-
-       kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!kpage) {
-               ehca_err(&shca->ib_device, "kpage alloc failed");
-               ret = -ENOMEM;
-               goto ehca_rereg_mr_rereg1_exit0;
-       }
-
-       pginfo_save = *pginfo;
-       ret = ehca_set_pagebuf(pginfo, pginfo->num_hwpages, kpage);
-       if (ret) {
-               ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
-                        "pginfo=%p type=%x num_kpages=%llx num_hwpages=%llx "
-                        "kpage=%p", e_mr, pginfo, pginfo->type,
-                        pginfo->num_kpages, pginfo->num_hwpages, kpage);
-               goto ehca_rereg_mr_rereg1_exit1;
-       }
-       rpage = __pa(kpage);
-       if (!rpage) {
-               ehca_err(&shca->ib_device, "kpage=%p", kpage);
-               ret = -EFAULT;
-               goto ehca_rereg_mr_rereg1_exit1;
-       }
-       h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
-                                     (u64)iova_start, size, hipz_acl,
-                                     e_pd->fw_pd, rpage, &hipzout);
-       if (h_ret != H_SUCCESS) {
-               /*
-                * reregistration unsuccessful, try it again with the 3 hCalls,
-                * e.g. this is required in case H_MR_CONDITION
-                * (MW bound or MR is shared)
-                */
-               ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
-                         "(Rereg1), h_ret=%lli e_mr=%p", h_ret, e_mr);
-               *pginfo = pginfo_save;
-               ret = -EAGAIN;
-       } else if ((u64 *)hipzout.vaddr != iova_start) {
-               ehca_err(&shca->ib_device, "PHYP changed iova_start in "
-                        "rereg_pmr, iova_start=%p iova_start_out=%llx e_mr=%p "
-                        "mr_handle=%llx lkey=%x lkey_out=%x", iova_start,
-                        hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
-                        e_mr->ib.ib_mr.lkey, hipzout.lkey);
-               ret = -EFAULT;
-       } else {
-               /*
-                * successful reregistration
-                * note: start and start_out are identical for eServer HCAs
-                */
-               e_mr->num_kpages = pginfo->num_kpages;
-               e_mr->num_hwpages = pginfo->num_hwpages;
-               e_mr->hwpage_size = pginfo->hwpage_size;
-               e_mr->start = iova_start;
-               e_mr->size = size;
-               e_mr->acl = acl;
-               *lkey = hipzout.lkey;
-               *rkey = hipzout.rkey;
-       }
-
-ehca_rereg_mr_rereg1_exit1:
-       ehca_free_fw_ctrlblock(kpage);
-ehca_rereg_mr_rereg1_exit0:
-       if ( ret && (ret != -EAGAIN) )
-               ehca_err(&shca->ib_device, "ret=%i lkey=%x rkey=%x "
-                        "pginfo=%p num_kpages=%llx num_hwpages=%llx",
-                        ret, *lkey, *rkey, pginfo, pginfo->num_kpages,
-                        pginfo->num_hwpages);
-       return ret;
-} /* end ehca_rereg_mr_rereg1() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_rereg_mr(struct ehca_shca *shca,
-                 struct ehca_mr *e_mr,
-                 u64 *iova_start,
-                 u64 size,
-                 int acl,
-                 struct ehca_pd *e_pd,
-                 struct ehca_mr_pginfo *pginfo,
-                 u32 *lkey,
-                 u32 *rkey)
-{
-       int ret = 0;
-       u64 h_ret;
-       int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
-       int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
-
-       /* first determine reregistration hCall(s) */
-       if ((pginfo->num_hwpages > MAX_RPAGES) ||
-           (e_mr->num_hwpages > MAX_RPAGES) ||
-           (pginfo->num_hwpages > e_mr->num_hwpages)) {
-               ehca_dbg(&shca->ib_device, "Rereg3 case, "
-                        "pginfo->num_hwpages=%llx e_mr->num_hwpages=%x",
-                        pginfo->num_hwpages, e_mr->num_hwpages);
-               rereg_1_hcall = 0;
-               rereg_3_hcall = 1;
-       }
-
-       if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
-               rereg_1_hcall = 0;
-               rereg_3_hcall = 1;
-               e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
-               ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
-                        e_mr);
-       }
-
-       if (rereg_1_hcall) {
-               ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
-                                          acl, e_pd, pginfo, lkey, rkey);
-               if (ret) {
-                       if (ret == -EAGAIN)
-                               rereg_3_hcall = 1;
-                       else
-                               goto ehca_rereg_mr_exit0;
-               }
-       }
-
-       if (rereg_3_hcall) {
-               struct ehca_mr save_mr;
-
-               /* first deregister old MR */
-               h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
-               if (h_ret != H_SUCCESS) {
-                       ehca_err(&shca->ib_device, "hipz_free_mr failed, "
-                                "h_ret=%lli e_mr=%p hca_hndl=%llx mr_hndl=%llx "
-                                "mr->lkey=%x",
-                                h_ret, e_mr, shca->ipz_hca_handle.handle,
-                                e_mr->ipz_mr_handle.handle,
-                                e_mr->ib.ib_mr.lkey);
-                       ret = ehca2ib_return_code(h_ret);
-                       goto ehca_rereg_mr_exit0;
-               }
-               /* clean ehca_mr_t, without changing struct ib_mr and lock */
-               save_mr = *e_mr;
-               ehca_mr_deletenew(e_mr);
-
-               /* set some MR values */
-               e_mr->flags = save_mr.flags;
-               e_mr->hwpage_size = save_mr.hwpage_size;
-               e_mr->fmr_page_size = save_mr.fmr_page_size;
-               e_mr->fmr_max_pages = save_mr.fmr_max_pages;
-               e_mr->fmr_max_maps = save_mr.fmr_max_maps;
-               e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
-
-               ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
-                                 e_pd, pginfo, lkey, rkey, EHCA_REG_MR);
-               if (ret) {
-                       u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
-                       memcpy(&e_mr->flags, &(save_mr.flags),
-                              sizeof(struct ehca_mr) - offset);
-                       goto ehca_rereg_mr_exit0;
-               }
-       }
-
-ehca_rereg_mr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_mr=%p "
-                        "iova_start=%p size=%llx acl=%x e_pd=%p pginfo=%p "
-                        "num_kpages=%llx lkey=%x rkey=%x rereg_1_hcall=%x "
-                        "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
-                        acl, e_pd, pginfo, pginfo->num_kpages, *lkey, *rkey,
-                        rereg_1_hcall, rereg_3_hcall);
-       return ret;
-} /* end ehca_rereg_mr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_unmap_one_fmr(struct ehca_shca *shca,
-                      struct ehca_mr *e_fmr)
-{
-       int ret = 0;
-       u64 h_ret;
-       struct ehca_pd *e_pd =
-               container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
-       struct ehca_mr save_fmr;
-       u32 tmp_lkey, tmp_rkey;
-       struct ehca_mr_pginfo pginfo;
-       struct ehca_mr_hipzout_parms hipzout;
-       struct ehca_mr save_mr;
-
-       if (e_fmr->fmr_max_pages <= MAX_RPAGES) {
-               /*
-                * note: after using rereg hcall with len=0,
-                * rereg hcall must be used again for registering pages
-                */
-               h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
-                                             0, 0, e_pd->fw_pd, 0, &hipzout);
-               if (h_ret == H_SUCCESS) {
-                       /* successful reregistration */
-                       e_fmr->start = NULL;
-                       e_fmr->size = 0;
-                       tmp_lkey = hipzout.lkey;
-                       tmp_rkey = hipzout.rkey;
-                       return 0;
-               }
-               /*
-                * should not happen, because length checked above,
-                * FMRs are not shared and no MW bound to FMRs
-                */
-               ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
-                        "(Rereg1), h_ret=%lli e_fmr=%p hca_hndl=%llx "
-                        "mr_hndl=%llx lkey=%x lkey_out=%x",
-                        h_ret, e_fmr, shca->ipz_hca_handle.handle,
-                        e_fmr->ipz_mr_handle.handle,
-                        e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
-               /* try free and rereg */
-       }
-
-       /* first free old FMR */
-       h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_free_mr failed, "
-                        "h_ret=%lli e_fmr=%p hca_hndl=%llx mr_hndl=%llx "
-                        "lkey=%x",
-                        h_ret, e_fmr, shca->ipz_hca_handle.handle,
-                        e_fmr->ipz_mr_handle.handle,
-                        e_fmr->ib.ib_fmr.lkey);
-               ret = ehca2ib_return_code(h_ret);
-               goto ehca_unmap_one_fmr_exit0;
-       }
-       /* clean ehca_mr_t, without changing lock */
-       save_fmr = *e_fmr;
-       ehca_mr_deletenew(e_fmr);
-
-       /* set some MR values */
-       e_fmr->flags = save_fmr.flags;
-       e_fmr->hwpage_size = save_fmr.hwpage_size;
-       e_fmr->fmr_page_size = save_fmr.fmr_page_size;
-       e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
-       e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
-       e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
-       e_fmr->acl = save_fmr.acl;
-
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.type = EHCA_MR_PGI_FMR;
-       ret = ehca_reg_mr(shca, e_fmr, NULL,
-                         (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
-                         e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
-                         &tmp_rkey, EHCA_REG_MR);
-       if (ret) {
-               u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
-               memcpy(&e_fmr->flags, &(save_mr.flags),
-                      sizeof(struct ehca_mr) - offset);
-       }
-
-ehca_unmap_one_fmr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i tmp_lkey=%x tmp_rkey=%x "
-                        "fmr_max_pages=%x",
-                        ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages);
-       return ret;
-} /* end ehca_unmap_one_fmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_smr(struct ehca_shca *shca,
-                struct ehca_mr *e_origmr,
-                struct ehca_mr *e_newmr,
-                u64 *iova_start,
-                int acl,
-                struct ehca_pd *e_pd,
-                u32 *lkey, /*OUT*/
-                u32 *rkey) /*OUT*/
-{
-       int ret = 0;
-       u64 h_ret;
-       u32 hipz_acl;
-       struct ehca_mr_hipzout_parms hipzout;
-
-       ehca_mrmw_map_acl(acl, &hipz_acl);
-       ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
-       h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
-                                   (u64)iova_start, hipz_acl, e_pd->fw_pd,
-                                   &hipzout);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
-                        "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
-                        "e_pd=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
-                        h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
-                        shca->ipz_hca_handle.handle,
-                        e_origmr->ipz_mr_handle.handle,
-                        e_origmr->ib.ib_mr.lkey);
-               ret = ehca2ib_return_code(h_ret);
-               goto ehca_reg_smr_exit0;
-       }
-       /* successful registration */
-       e_newmr->num_kpages = e_origmr->num_kpages;
-       e_newmr->num_hwpages = e_origmr->num_hwpages;
-       e_newmr->hwpage_size   = e_origmr->hwpage_size;
-       e_newmr->start = iova_start;
-       e_newmr->size = e_origmr->size;
-       e_newmr->acl = acl;
-       e_newmr->ipz_mr_handle = hipzout.handle;
-       *lkey = hipzout.lkey;
-       *rkey = hipzout.rkey;
-       return 0;
-
-ehca_reg_smr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_origmr=%p "
-                        "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
-                        ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
-       return ret;
-} /* end ehca_reg_smr() */
-
-/*----------------------------------------------------------------------*/
-static inline void *ehca_calc_sectbase(int top, int dir, int idx)
-{
-       unsigned long ret = idx;
-       ret |= dir << EHCA_DIR_INDEX_SHIFT;
-       ret |= top << EHCA_TOP_INDEX_SHIFT;
-       return __va(ret << SECTION_SIZE_BITS);
-}
-
-#define ehca_bmap_valid(entry) \
-       ((u64)entry != (u64)EHCA_INVAL_ADDR)
-
-static u64 ehca_reg_mr_section(int top, int dir, int idx, u64 *kpage,
-                              struct ehca_shca *shca, struct ehca_mr *mr,
-                              struct ehca_mr_pginfo *pginfo)
-{
-       u64 h_ret = 0;
-       unsigned long page = 0;
-       u64 rpage = __pa(kpage);
-       int page_count;
-
-       void *sectbase = ehca_calc_sectbase(top, dir, idx);
-       if ((unsigned long)sectbase & (pginfo->hwpage_size - 1)) {
-               ehca_err(&shca->ib_device, "reg_mr_section will probably fail:"
-                                          "hwpage_size does not fit to "
-                                          "section start address");
-       }
-       page_count = EHCA_SECTSIZE / pginfo->hwpage_size;
-
-       while (page < page_count) {
-               u64 rnum;
-               for (rnum = 0; (rnum < MAX_RPAGES) && (page < page_count);
-                    rnum++) {
-                       void *pg = sectbase + ((page++) * pginfo->hwpage_size);
-                       kpage[rnum] = __pa(pg);
-               }
-
-               h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, mr,
-                       ehca_encode_hwpage_size(pginfo->hwpage_size),
-                       0, rpage, rnum);
-
-               if ((h_ret != H_SUCCESS) && (h_ret != H_PAGE_REGISTERED)) {
-                       ehca_err(&shca->ib_device, "register_rpage_mr failed");
-                       return h_ret;
-               }
-       }
-       return h_ret;
-}
-
-static u64 ehca_reg_mr_sections(int top, int dir, u64 *kpage,
-                               struct ehca_shca *shca, struct ehca_mr *mr,
-                               struct ehca_mr_pginfo *pginfo)
-{
-       u64 hret = H_SUCCESS;
-       int idx;
-
-       for (idx = 0; idx < EHCA_MAP_ENTRIES; idx++) {
-               if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]->ent[idx]))
-                       continue;
-
-               hret = ehca_reg_mr_section(top, dir, idx, kpage, shca, mr,
-                                          pginfo);
-               if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
-                               return hret;
-       }
-       return hret;
-}
-
-static u64 ehca_reg_mr_dir_sections(int top, u64 *kpage, struct ehca_shca *shca,
-                                   struct ehca_mr *mr,
-                                   struct ehca_mr_pginfo *pginfo)
-{
-       u64 hret = H_SUCCESS;
-       int dir;
-
-       for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
-               if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
-                       continue;
-
-               hret = ehca_reg_mr_sections(top, dir, kpage, shca, mr, pginfo);
-               if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
-                               return hret;
-       }
-       return hret;
-}
-
-/* register internal max-MR to internal SHCA */
-int ehca_reg_internal_maxmr(
-       struct ehca_shca *shca,
-       struct ehca_pd *e_pd,
-       struct ehca_mr **e_maxmr)  /*OUT*/
-{
-       int ret;
-       struct ehca_mr *e_mr;
-       u64 *iova_start;
-       u64 size_maxmr;
-       struct ehca_mr_pginfo pginfo;
-       u32 num_kpages;
-       u32 num_hwpages;
-       u64 hw_pgsize;
-
-       if (!ehca_bmap) {
-               ret = -EFAULT;
-               goto ehca_reg_internal_maxmr_exit0;
-       }
-
-       e_mr = ehca_mr_new();
-       if (!e_mr) {
-               ehca_err(&shca->ib_device, "out of memory");
-               ret = -ENOMEM;
-               goto ehca_reg_internal_maxmr_exit0;
-       }
-       e_mr->flags |= EHCA_MR_FLAG_MAXMR;
-
-       /* register internal max-MR on HCA */
-       size_maxmr = ehca_mr_len;
-       iova_start = (u64 *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START));
-       num_kpages = NUM_CHUNKS(((u64)iova_start % PAGE_SIZE) + size_maxmr,
-                               PAGE_SIZE);
-       hw_pgsize = ehca_get_max_hwpage_size(shca);
-       num_hwpages = NUM_CHUNKS(((u64)iova_start % hw_pgsize) + size_maxmr,
-                                hw_pgsize);
-
-       memset(&pginfo, 0, sizeof(pginfo));
-       pginfo.type = EHCA_MR_PGI_PHYS;
-       pginfo.num_kpages = num_kpages;
-       pginfo.num_hwpages = num_hwpages;
-       pginfo.hwpage_size = hw_pgsize;
-       pginfo.u.phy.addr = 0;
-       pginfo.u.phy.size = size_maxmr;
-
-       ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
-                         &pginfo, &e_mr->ib.ib_mr.lkey,
-                         &e_mr->ib.ib_mr.rkey, EHCA_REG_BUSMAP_MR);
-       if (ret) {
-               ehca_err(&shca->ib_device, "reg of internal max MR failed, "
-                        "e_mr=%p iova_start=%p size_maxmr=%llx num_kpages=%x "
-                        "num_hwpages=%x", e_mr, iova_start, size_maxmr,
-                        num_kpages, num_hwpages);
-               goto ehca_reg_internal_maxmr_exit1;
-       }
-
-       /* successful registration of all pages */
-       e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
-       e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
-       e_mr->ib.ib_mr.uobject = NULL;
-       atomic_inc(&(e_pd->ib_pd.usecnt));
-       *e_maxmr = e_mr;
-       return 0;
-
-ehca_reg_internal_maxmr_exit1:
-       ehca_mr_delete(e_mr);
-ehca_reg_internal_maxmr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p e_pd=%p e_maxmr=%p",
-                        ret, shca, e_pd, e_maxmr);
-       return ret;
-} /* end ehca_reg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_reg_maxmr(struct ehca_shca *shca,
-                  struct ehca_mr *e_newmr,
-                  u64 *iova_start,
-                  int acl,
-                  struct ehca_pd *e_pd,
-                  u32 *lkey,
-                  u32 *rkey)
-{
-       u64 h_ret;
-       struct ehca_mr *e_origmr = shca->maxmr;
-       u32 hipz_acl;
-       struct ehca_mr_hipzout_parms hipzout;
-
-       ehca_mrmw_map_acl(acl, &hipz_acl);
-       ehca_mrmw_set_pgsize_hipz_acl(e_origmr->hwpage_size, &hipz_acl);
-
-       h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
-                                   (u64)iova_start, hipz_acl, e_pd->fw_pd,
-                                   &hipzout);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lli "
-                        "e_origmr=%p hca_hndl=%llx mr_hndl=%llx lkey=%x",
-                        h_ret, e_origmr, shca->ipz_hca_handle.handle,
-                        e_origmr->ipz_mr_handle.handle,
-                        e_origmr->ib.ib_mr.lkey);
-               return ehca2ib_return_code(h_ret);
-       }
-       /* successful registration */
-       e_newmr->num_kpages = e_origmr->num_kpages;
-       e_newmr->num_hwpages = e_origmr->num_hwpages;
-       e_newmr->hwpage_size = e_origmr->hwpage_size;
-       e_newmr->start = iova_start;
-       e_newmr->size = e_origmr->size;
-       e_newmr->acl = acl;
-       e_newmr->ipz_mr_handle = hipzout.handle;
-       *lkey = hipzout.lkey;
-       *rkey = hipzout.rkey;
-       return 0;
-} /* end ehca_reg_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
-{
-       int ret;
-       struct ehca_mr *e_maxmr;
-       struct ib_pd *ib_pd;
-
-       if (!shca->maxmr) {
-               ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
-               ret = -EINVAL;
-               goto ehca_dereg_internal_maxmr_exit0;
-       }
-
-       e_maxmr = shca->maxmr;
-       ib_pd = e_maxmr->ib.ib_mr.pd;
-       shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
-
-       ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
-       if (ret) {
-               ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
-                        "ret=%i e_maxmr=%p shca=%p lkey=%x",
-                        ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
-               shca->maxmr = e_maxmr;
-               goto ehca_dereg_internal_maxmr_exit0;
-       }
-
-       atomic_dec(&ib_pd->usecnt);
-
-ehca_dereg_internal_maxmr_exit0:
-       if (ret)
-               ehca_err(&shca->ib_device, "ret=%i shca=%p shca->maxmr=%p",
-                        ret, shca, shca->maxmr);
-       return ret;
-} /* end ehca_dereg_internal_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-/* check page list of map FMR verb for validness */
-int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
-                            u64 *page_list,
-                            int list_len)
-{
-       u32 i;
-       u64 *page;
-
-       if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
-               ehca_gen_err("bad list_len, list_len=%x "
-                            "e_fmr->fmr_max_pages=%x fmr=%p",
-                            list_len, e_fmr->fmr_max_pages, e_fmr);
-               return -EINVAL;
-       }
-
-       /* each page must be aligned */
-       page = page_list;
-       for (i = 0; i < list_len; i++) {
-               if (*page % e_fmr->fmr_page_size) {
-                       ehca_gen_err("bad page, i=%x *page=%llx page=%p fmr=%p "
-                                    "fmr_page_size=%x", i, *page, page, e_fmr,
-                                    e_fmr->fmr_page_size);
-                       return -EINVAL;
-               }
-               page++;
-       }
-
-       return 0;
-} /* end ehca_fmr_check_page_list() */
-
-/*----------------------------------------------------------------------*/
-
-/* PAGE_SIZE >= pginfo->hwpage_size */
-static int ehca_set_pagebuf_user1(struct ehca_mr_pginfo *pginfo,
-                                 u32 number,
-                                 u64 *kpage)
-{
-       int ret = 0;
-       u64 pgaddr;
-       u32 j = 0;
-       int hwpages_per_kpage = PAGE_SIZE / pginfo->hwpage_size;
-       struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
-       while (*sg != NULL) {
-               pgaddr = page_to_pfn(sg_page(*sg))
-                       << PAGE_SHIFT;
-               *kpage = pgaddr + (pginfo->next_hwpage *
-                                  pginfo->hwpage_size);
-               if (!(*kpage)) {
-                       ehca_gen_err("pgaddr=%llx "
-                                    "sg_dma_address=%llx "
-                                    "entry=%llx next_hwpage=%llx",
-                                    pgaddr, (u64)sg_dma_address(*sg),
-                                    pginfo->u.usr.next_nmap,
-                                    pginfo->next_hwpage);
-                       return -EFAULT;
-               }
-               (pginfo->hwpage_cnt)++;
-               (pginfo->next_hwpage)++;
-               kpage++;
-               if (pginfo->next_hwpage % hwpages_per_kpage == 0) {
-                       (pginfo->kpage_cnt)++;
-                       (pginfo->u.usr.next_nmap)++;
-                       pginfo->next_hwpage = 0;
-                       *sg = sg_next(*sg);
-               }
-               j++;
-               if (j >= number)
-                       break;
-       }
-
-       return ret;
-}
-
-/*
- * check given pages for contiguous layout
- * last page addr is returned in prev_pgaddr for further check
- */
-static int ehca_check_kpages_per_ate(struct scatterlist **sg,
-                                    int num_pages,
-                                    u64 *prev_pgaddr)
-{
-       for (; *sg && num_pages > 0; *sg = sg_next(*sg), num_pages--) {
-               u64 pgaddr = page_to_pfn(sg_page(*sg)) << PAGE_SHIFT;
-               if (ehca_debug_level >= 3)
-                       ehca_gen_dbg("chunk_page=%llx value=%016llx", pgaddr,
-                                    *(u64 *)__va(pgaddr));
-               if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
-                       ehca_gen_err("uncontiguous page found pgaddr=%llx "
-                                    "prev_pgaddr=%llx entries_left_in_hwpage=%x",
-                                    pgaddr, *prev_pgaddr, num_pages);
-                       return -EINVAL;
-               }
-               *prev_pgaddr = pgaddr;
-       }
-       return 0;
-}
-
-/* PAGE_SIZE < pginfo->hwpage_size */
-static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
-                                 u32 number,
-                                 u64 *kpage)
-{
-       int ret = 0;
-       u64 pgaddr, prev_pgaddr;
-       u32 j = 0;
-       int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
-       int nr_kpages = kpages_per_hwpage;
-       struct scatterlist **sg = &pginfo->u.usr.next_sg;
-
-       while (*sg != NULL) {
-
-               if (nr_kpages == kpages_per_hwpage) {
-                       pgaddr = (page_to_pfn(sg_page(*sg))
-                                  << PAGE_SHIFT);
-                       *kpage = pgaddr;
-                       if (!(*kpage)) {
-                               ehca_gen_err("pgaddr=%llx entry=%llx",
-                                            pgaddr, pginfo->u.usr.next_nmap);
-                               ret = -EFAULT;
-                               return ret;
-                       }
-                       /*
-                        * The first page in a hwpage must be aligned;
-                        * the first MR page is exempt from this rule.
-                        */
-                       if (pgaddr & (pginfo->hwpage_size - 1)) {
-                               if (pginfo->hwpage_cnt) {
-                                       ehca_gen_err(
-                                               "invalid alignment "
-                                               "pgaddr=%llx entry=%llx "
-                                               "mr_pgsize=%llx",
-                                               pgaddr, pginfo->u.usr.next_nmap,
-                                               pginfo->hwpage_size);
-                                       ret = -EFAULT;
-                                       return ret;
-                               }
-                               /* first MR page */
-                               pginfo->kpage_cnt =
-                                       (pgaddr &
-                                        (pginfo->hwpage_size - 1)) >>
-                                       PAGE_SHIFT;
-                               nr_kpages -= pginfo->kpage_cnt;
-                               *kpage = pgaddr &
-                                        ~(pginfo->hwpage_size - 1);
-                       }
-                       if (ehca_debug_level >= 3) {
-                               u64 val = *(u64 *)__va(pgaddr);
-                               ehca_gen_dbg("kpage=%llx page=%llx "
-                                            "value=%016llx",
-                                            *kpage, pgaddr, val);
-                       }
-                       prev_pgaddr = pgaddr;
-                       *sg = sg_next(*sg);
-                       pginfo->kpage_cnt++;
-                       pginfo->u.usr.next_nmap++;
-                       nr_kpages--;
-                       if (!nr_kpages)
-                               goto next_kpage;
-                       continue;
-               }
-
-               ret = ehca_check_kpages_per_ate(sg, nr_kpages,
-                                               &prev_pgaddr);
-               if (ret)
-                       return ret;
-               pginfo->kpage_cnt += nr_kpages;
-               pginfo->u.usr.next_nmap += nr_kpages;
-
-next_kpage:
-               nr_kpages = kpages_per_hwpage;
-               (pginfo->hwpage_cnt)++;
-               kpage++;
-               j++;
-               if (j >= number)
-                       break;
-       }
-
-       return ret;
-}
-
-static int ehca_set_pagebuf_phys(struct ehca_mr_pginfo *pginfo,
-                                u32 number, u64 *kpage)
-{
-       int ret = 0;
-       u64 addr = pginfo->u.phy.addr;
-       u64 size = pginfo->u.phy.size;
-       u64 num_hw, offs_hw;
-       u32 i = 0;
-
-       num_hw  = NUM_CHUNKS((addr % pginfo->hwpage_size) + size,
-                               pginfo->hwpage_size);
-       offs_hw = (addr & ~(pginfo->hwpage_size - 1)) / pginfo->hwpage_size;
-
-       while (pginfo->next_hwpage < offs_hw + num_hw) {
-               /* sanity check */
-               if ((pginfo->kpage_cnt >= pginfo->num_kpages) ||
-                   (pginfo->hwpage_cnt >= pginfo->num_hwpages)) {
-                       ehca_gen_err("kpage_cnt >= num_kpages, "
-                                    "kpage_cnt=%llx num_kpages=%llx "
-                                    "hwpage_cnt=%llx "
-                                    "num_hwpages=%llx i=%x",
-                                    pginfo->kpage_cnt,
-                                    pginfo->num_kpages,
-                                    pginfo->hwpage_cnt,
-                                    pginfo->num_hwpages, i);
-                       return -EFAULT;
-               }
-               *kpage = (addr & ~(pginfo->hwpage_size - 1)) +
-                        (pginfo->next_hwpage * pginfo->hwpage_size);
-               if ( !(*kpage) && addr ) {
-                       ehca_gen_err("addr=%llx size=%llx "
-                                    "next_hwpage=%llx", addr,
-                                    size, pginfo->next_hwpage);
-                       return -EFAULT;
-               }
-               (pginfo->hwpage_cnt)++;
-               (pginfo->next_hwpage)++;
-               if (PAGE_SIZE >= pginfo->hwpage_size) {
-                       if (pginfo->next_hwpage %
-                           (PAGE_SIZE / pginfo->hwpage_size) == 0)
-                               (pginfo->kpage_cnt)++;
-               } else
-                       pginfo->kpage_cnt += pginfo->hwpage_size /
-                               PAGE_SIZE;
-               kpage++;
-               i++;
-               if (i >= number) break;
-       }
-       if (pginfo->next_hwpage >= offs_hw + num_hw) {
-               pginfo->next_hwpage = 0;
-       }
-
-       return ret;
-}
-
-static int ehca_set_pagebuf_fmr(struct ehca_mr_pginfo *pginfo,
-                               u32 number, u64 *kpage)
-{
-       int ret = 0;
-       u64 *fmrlist;
-       u32 i;
-
-       /* loop over desired page_list entries */
-       fmrlist = pginfo->u.fmr.page_list + pginfo->u.fmr.next_listelem;
-       for (i = 0; i < number; i++) {
-               *kpage = (*fmrlist & ~(pginfo->hwpage_size - 1)) +
-                          pginfo->next_hwpage * pginfo->hwpage_size;
-               if ( !(*kpage) ) {
-                       ehca_gen_err("*fmrlist=%llx fmrlist=%p "
-                                    "next_listelem=%llx next_hwpage=%llx",
-                                    *fmrlist, fmrlist,
-                                    pginfo->u.fmr.next_listelem,
-                                    pginfo->next_hwpage);
-                       return -EFAULT;
-               }
-               (pginfo->hwpage_cnt)++;
-               if (pginfo->u.fmr.fmr_pgsize >= pginfo->hwpage_size) {
-                       if (pginfo->next_hwpage %
-                           (pginfo->u.fmr.fmr_pgsize /
-                            pginfo->hwpage_size) == 0) {
-                               (pginfo->kpage_cnt)++;
-                               (pginfo->u.fmr.next_listelem)++;
-                               fmrlist++;
-                               pginfo->next_hwpage = 0;
-                       } else
-                               (pginfo->next_hwpage)++;
-               } else {
-                       unsigned int cnt_per_hwpage = pginfo->hwpage_size /
-                               pginfo->u.fmr.fmr_pgsize;
-                       unsigned int j;
-                       u64 prev = *kpage;
-                       /* check if adrs are contiguous */
-                       for (j = 1; j < cnt_per_hwpage; j++) {
-                               u64 p = fmrlist[j] & ~(pginfo->hwpage_size - 1);
-                               if (prev + pginfo->u.fmr.fmr_pgsize != p) {
-                                       ehca_gen_err("uncontiguous fmr pages "
-                                                    "found prev=%llx p=%llx "
-                                                    "idx=%x", prev, p, i + j);
-                                       return -EINVAL;
-                               }
-                               prev = p;
-                       }
-                       pginfo->kpage_cnt += cnt_per_hwpage;
-                       pginfo->u.fmr.next_listelem += cnt_per_hwpage;
-                       fmrlist += cnt_per_hwpage;
-               }
-               kpage++;
-       }
-       return ret;
-}
-
-/* setup page buffer from page info */
-int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
-                    u32 number,
-                    u64 *kpage)
-{
-       int ret;
-
-       switch (pginfo->type) {
-       case EHCA_MR_PGI_PHYS:
-               ret = ehca_set_pagebuf_phys(pginfo, number, kpage);
-               break;
-       case EHCA_MR_PGI_USER:
-               ret = PAGE_SIZE >= pginfo->hwpage_size ?
-                       ehca_set_pagebuf_user1(pginfo, number, kpage) :
-                       ehca_set_pagebuf_user2(pginfo, number, kpage);
-               break;
-       case EHCA_MR_PGI_FMR:
-               ret = ehca_set_pagebuf_fmr(pginfo, number, kpage);
-               break;
-       default:
-               ehca_gen_err("bad pginfo->type=%x", pginfo->type);
-               ret = -EFAULT;
-               break;
-       }
-       return ret;
-} /* end ehca_set_pagebuf() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * check MR if it is a max-MR, i.e. uses whole memory
- * in case it's a max-MR 1 is returned, else 0
- */
-int ehca_mr_is_maxmr(u64 size,
-                    u64 *iova_start)
-{
-       /* a MR is treated as max-MR only if it fits following: */
-       if ((size == ehca_mr_len) &&
-           (iova_start == (void *)ehca_map_vaddr((void *)(KERNELBASE + PHYSICAL_START)))) {
-               ehca_gen_dbg("this is a max-MR");
-               return 1;
-       } else
-               return 0;
-} /* end ehca_mr_is_maxmr() */
-
-/*----------------------------------------------------------------------*/
-
-/* map access control for MR/MW. This routine is used for MR and MW. */
-void ehca_mrmw_map_acl(int ib_acl,
-                      u32 *hipz_acl)
-{
-       *hipz_acl = 0;
-       if (ib_acl & IB_ACCESS_REMOTE_READ)
-               *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
-       if (ib_acl & IB_ACCESS_REMOTE_WRITE)
-               *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
-       if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
-               *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
-       if (ib_acl & IB_ACCESS_LOCAL_WRITE)
-               *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
-       if (ib_acl & IB_ACCESS_MW_BIND)
-               *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
-} /* end ehca_mrmw_map_acl() */
-
-/*----------------------------------------------------------------------*/
-
-/* sets page size in hipz access control for MR/MW. */
-void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl) /*INOUT*/
-{
-       *hipz_acl |= (ehca_encode_hwpage_size(pgsize) << 24);
-} /* end ehca_mrmw_set_pgsize_hipz_acl() */
-
-/*----------------------------------------------------------------------*/
-
-/*
- * reverse map access control for MR/MW.
- * This routine is used for MR and MW.
- */
-void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
-                              int *ib_acl) /*OUT*/
-{
-       *ib_acl = 0;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
-               *ib_acl |= IB_ACCESS_REMOTE_READ;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
-               *ib_acl |= IB_ACCESS_REMOTE_WRITE;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
-               *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
-               *ib_acl |= IB_ACCESS_LOCAL_WRITE;
-       if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
-               *ib_acl |= IB_ACCESS_MW_BIND;
-} /* end ehca_mrmw_reverse_map_acl() */
-
-
-/*----------------------------------------------------------------------*/
-
-/*
- * MR destructor and constructor
- * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
- * except struct ib_mr and spinlock
- */
-void ehca_mr_deletenew(struct ehca_mr *mr)
-{
-       mr->flags = 0;
-       mr->num_kpages = 0;
-       mr->num_hwpages = 0;
-       mr->acl = 0;
-       mr->start = NULL;
-       mr->fmr_page_size = 0;
-       mr->fmr_max_pages = 0;
-       mr->fmr_max_maps = 0;
-       mr->fmr_map_cnt = 0;
-       memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
-       memset(&mr->galpas, 0, sizeof(mr->galpas));
-} /* end ehca_mr_deletenew() */
-
-int ehca_init_mrmw_cache(void)
-{
-       mr_cache = kmem_cache_create("ehca_cache_mr",
-                                    sizeof(struct ehca_mr), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!mr_cache)
-               return -ENOMEM;
-       mw_cache = kmem_cache_create("ehca_cache_mw",
-                                    sizeof(struct ehca_mw), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!mw_cache) {
-               kmem_cache_destroy(mr_cache);
-               mr_cache = NULL;
-               return -ENOMEM;
-       }
-       return 0;
-}
-
-void ehca_cleanup_mrmw_cache(void)
-{
-       kmem_cache_destroy(mr_cache);
-       kmem_cache_destroy(mw_cache);
-}
-
-static inline int ehca_init_top_bmap(struct ehca_top_bmap *ehca_top_bmap,
-                                    int dir)
-{
-       if (!ehca_bmap_valid(ehca_top_bmap->dir[dir])) {
-               ehca_top_bmap->dir[dir] =
-                       kmalloc(sizeof(struct ehca_dir_bmap), GFP_KERNEL);
-               if (!ehca_top_bmap->dir[dir])
-                       return -ENOMEM;
-               /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
-               memset(ehca_top_bmap->dir[dir], 0xFF, EHCA_ENT_MAP_SIZE);
-       }
-       return 0;
-}
-
-static inline int ehca_init_bmap(struct ehca_bmap *ehca_bmap, int top, int dir)
-{
-       if (!ehca_bmap_valid(ehca_bmap->top[top])) {
-               ehca_bmap->top[top] =
-                       kmalloc(sizeof(struct ehca_top_bmap), GFP_KERNEL);
-               if (!ehca_bmap->top[top])
-                       return -ENOMEM;
-               /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
-               memset(ehca_bmap->top[top], 0xFF, EHCA_DIR_MAP_SIZE);
-       }
-       return ehca_init_top_bmap(ehca_bmap->top[top], dir);
-}
-
-static inline int ehca_calc_index(unsigned long i, unsigned long s)
-{
-       return (i >> s) & EHCA_INDEX_MASK;
-}
-
-void ehca_destroy_busmap(void)
-{
-       int top, dir;
-
-       if (!ehca_bmap)
-               return;
-
-       for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
-               if (!ehca_bmap_valid(ehca_bmap->top[top]))
-                       continue;
-               for (dir = 0; dir < EHCA_MAP_ENTRIES; dir++) {
-                       if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
-                               continue;
-
-                       kfree(ehca_bmap->top[top]->dir[dir]);
-               }
-
-               kfree(ehca_bmap->top[top]);
-       }
-
-       kfree(ehca_bmap);
-       ehca_bmap = NULL;
-}
-
-static int ehca_update_busmap(unsigned long pfn, unsigned long nr_pages)
-{
-       unsigned long i, start_section, end_section;
-       int top, dir, idx;
-
-       if (!nr_pages)
-               return 0;
-
-       if (!ehca_bmap) {
-               ehca_bmap = kmalloc(sizeof(struct ehca_bmap), GFP_KERNEL);
-               if (!ehca_bmap)
-                       return -ENOMEM;
-               /* Set map block to 0xFF according to EHCA_INVAL_ADDR */
-               memset(ehca_bmap, 0xFF, EHCA_TOP_MAP_SIZE);
-       }
-
-       start_section = (pfn * PAGE_SIZE) / EHCA_SECTSIZE;
-       end_section = ((pfn + nr_pages) * PAGE_SIZE) / EHCA_SECTSIZE;
-       for (i = start_section; i < end_section; i++) {
-               int ret;
-               top = ehca_calc_index(i, EHCA_TOP_INDEX_SHIFT);
-               dir = ehca_calc_index(i, EHCA_DIR_INDEX_SHIFT);
-               idx = i & EHCA_INDEX_MASK;
-
-               ret = ehca_init_bmap(ehca_bmap, top, dir);
-               if (ret) {
-                       ehca_destroy_busmap();
-                       return ret;
-               }
-               ehca_bmap->top[top]->dir[dir]->ent[idx] = ehca_mr_len;
-               ehca_mr_len += EHCA_SECTSIZE;
-       }
-       return 0;
-}
-
-static int ehca_is_hugepage(unsigned long pfn)
-{
-       int page_order;
-
-       if (pfn & EHCA_HUGEPAGE_PFN_MASK)
-               return 0;
-
-       page_order = compound_order(pfn_to_page(pfn));
-       if (page_order + PAGE_SHIFT != EHCA_HUGEPAGESHIFT)
-               return 0;
-
-       return 1;
-}
-
-static int ehca_create_busmap_callback(unsigned long initial_pfn,
-                                      unsigned long total_nr_pages, void *arg)
-{
-       int ret;
-       unsigned long pfn, start_pfn, end_pfn, nr_pages;
-
-       if ((total_nr_pages * PAGE_SIZE) < EHCA_HUGEPAGE_SIZE)
-               return ehca_update_busmap(initial_pfn, total_nr_pages);
-
-       /* Given chunk is >= 16GB -> check for hugepages */
-       start_pfn = initial_pfn;
-       end_pfn = initial_pfn + total_nr_pages;
-       pfn = start_pfn;
-
-       while (pfn < end_pfn) {
-               if (ehca_is_hugepage(pfn)) {
-                       /* Add mem found in front of the hugepage */
-                       nr_pages = pfn - start_pfn;
-                       ret = ehca_update_busmap(start_pfn, nr_pages);
-                       if (ret)
-                               return ret;
-                       /* Skip the hugepage */
-                       pfn += (EHCA_HUGEPAGE_SIZE / PAGE_SIZE);
-                       start_pfn = pfn;
-               } else
-                       pfn += (EHCA_SECTSIZE / PAGE_SIZE);
-       }
-
-       /* Add mem found behind the hugepage(s)  */
-       nr_pages = pfn - start_pfn;
-       return ehca_update_busmap(start_pfn, nr_pages);
-}
-
-int ehca_create_busmap(void)
-{
-       int ret;
-
-       ehca_mr_len = 0;
-       ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
-                                  ehca_create_busmap_callback);
-       return ret;
-}
-
-static int ehca_reg_bmap_mr_rpages(struct ehca_shca *shca,
-                                  struct ehca_mr *e_mr,
-                                  struct ehca_mr_pginfo *pginfo)
-{
-       int top;
-       u64 hret, *kpage;
-
-       kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!kpage) {
-               ehca_err(&shca->ib_device, "kpage alloc failed");
-               return -ENOMEM;
-       }
-       for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
-               if (!ehca_bmap_valid(ehca_bmap->top[top]))
-                       continue;
-               hret = ehca_reg_mr_dir_sections(top, kpage, shca, e_mr, pginfo);
-               if ((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
-                       break;
-       }
-
-       ehca_free_fw_ctrlblock(kpage);
-
-       if (hret == H_SUCCESS)
-               return 0; /* Everything is fine */
-       else {
-               ehca_err(&shca->ib_device, "ehca_reg_bmap_mr_rpages failed, "
-                                "h_ret=%lli e_mr=%p top=%x lkey=%x "
-                                "hca_hndl=%llx mr_hndl=%llx", hret, e_mr, top,
-                                e_mr->ib.ib_mr.lkey,
-                                shca->ipz_hca_handle.handle,
-                                e_mr->ipz_mr_handle.handle);
-               return ehca2ib_return_code(hret);
-       }
-}
-
-static u64 ehca_map_vaddr(void *caddr)
-{
-       int top, dir, idx;
-       unsigned long abs_addr, offset;
-       u64 entry;
-
-       if (!ehca_bmap)
-               return EHCA_INVAL_ADDR;
-
-       abs_addr = __pa(caddr);
-       top = ehca_calc_index(abs_addr, EHCA_TOP_INDEX_SHIFT + EHCA_SECTSHIFT);
-       if (!ehca_bmap_valid(ehca_bmap->top[top]))
-               return EHCA_INVAL_ADDR;
-
-       dir = ehca_calc_index(abs_addr, EHCA_DIR_INDEX_SHIFT + EHCA_SECTSHIFT);
-       if (!ehca_bmap_valid(ehca_bmap->top[top]->dir[dir]))
-               return EHCA_INVAL_ADDR;
-
-       idx = ehca_calc_index(abs_addr, EHCA_SECTSHIFT);
-
-       entry = ehca_bmap->top[top]->dir[dir]->ent[idx];
-       if (ehca_bmap_valid(entry)) {
-               offset = (unsigned long)caddr & (EHCA_SECTSIZE - 1);
-               return entry | offset;
-       } else
-               return EHCA_INVAL_ADDR;
-}
-
-static int ehca_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
-       return dma_addr == EHCA_INVAL_ADDR;
-}
-
-static u64 ehca_dma_map_single(struct ib_device *dev, void *cpu_addr,
-                              size_t size, enum dma_data_direction direction)
-{
-       if (cpu_addr)
-               return ehca_map_vaddr(cpu_addr);
-       else
-               return EHCA_INVAL_ADDR;
-}
-
-static void ehca_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
-                                 enum dma_data_direction direction)
-{
-       /* This is only a stub; nothing to be done here */
-}
-
-static u64 ehca_dma_map_page(struct ib_device *dev, struct page *page,
-                            unsigned long offset, size_t size,
-                            enum dma_data_direction direction)
-{
-       u64 addr;
-
-       if (offset + size > PAGE_SIZE)
-               return EHCA_INVAL_ADDR;
-
-       addr = ehca_map_vaddr(page_address(page));
-       if (!ehca_dma_mapping_error(dev, addr))
-               addr += offset;
-
-       return addr;
-}
-
-static void ehca_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
-                               enum dma_data_direction direction)
-{
-       /* This is only a stub; nothing to be done here */
-}
-
-static int ehca_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
-                          int nents, enum dma_data_direction direction)
-{
-       struct scatterlist *sg;
-       int i;
-
-       for_each_sg(sgl, sg, nents, i) {
-               u64 addr;
-               addr = ehca_map_vaddr(sg_virt(sg));
-               if (ehca_dma_mapping_error(dev, addr))
-                       return 0;
-
-               sg->dma_address = addr;
-               sg->dma_length = sg->length;
-       }
-       return nents;
-}
-
-static void ehca_dma_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
-                             int nents, enum dma_data_direction direction)
-{
-       /* This is only a stub; nothing to be done here */
-}
-
-static void ehca_dma_sync_single_for_cpu(struct ib_device *dev, u64 addr,
-                                        size_t size,
-                                        enum dma_data_direction dir)
-{
-       dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
-}
-
-static void ehca_dma_sync_single_for_device(struct ib_device *dev, u64 addr,
-                                           size_t size,
-                                           enum dma_data_direction dir)
-{
-       dma_sync_single_for_device(dev->dma_device, addr, size, dir);
-}
-
-static void *ehca_dma_alloc_coherent(struct ib_device *dev, size_t size,
-                                    u64 *dma_handle, gfp_t flag)
-{
-       struct page *p;
-       void *addr = NULL;
-       u64 dma_addr;
-
-       p = alloc_pages(flag, get_order(size));
-       if (p) {
-               addr = page_address(p);
-               dma_addr = ehca_map_vaddr(addr);
-               if (ehca_dma_mapping_error(dev, dma_addr)) {
-                       free_pages((unsigned long)addr, get_order(size));
-                       return NULL;
-               }
-               if (dma_handle)
-                       *dma_handle = dma_addr;
-               return addr;
-       }
-       return NULL;
-}
-
-static void ehca_dma_free_coherent(struct ib_device *dev, size_t size,
-                                  void *cpu_addr, u64 dma_handle)
-{
-       if (cpu_addr && size)
-               free_pages((unsigned long)cpu_addr, get_order(size));
-}
-
-
-struct ib_dma_mapping_ops ehca_dma_mapping_ops = {
-       .mapping_error          = ehca_dma_mapping_error,
-       .map_single             = ehca_dma_map_single,
-       .unmap_single           = ehca_dma_unmap_single,
-       .map_page               = ehca_dma_map_page,
-       .unmap_page             = ehca_dma_unmap_page,
-       .map_sg                 = ehca_dma_map_sg,
-       .unmap_sg               = ehca_dma_unmap_sg,
-       .sync_single_for_cpu    = ehca_dma_sync_single_for_cpu,
-       .sync_single_for_device = ehca_dma_sync_single_for_device,
-       .alloc_coherent         = ehca_dma_alloc_coherent,
-       .free_coherent          = ehca_dma_free_coherent,
-};
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
deleted file mode 100644 (file)
index 52bfa95..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  MR/MW declarations and inline functions
- *
- *  Authors: Dietmar Decker <ddecker@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _EHCA_MRMW_H_
-#define _EHCA_MRMW_H_
-
-enum ehca_reg_type {
-       EHCA_REG_MR,
-       EHCA_REG_BUSMAP_MR
-};
-
-int ehca_reg_mr(struct ehca_shca *shca,
-               struct ehca_mr *e_mr,
-               u64 *iova_start,
-               u64 size,
-               int acl,
-               struct ehca_pd *e_pd,
-               struct ehca_mr_pginfo *pginfo,
-               u32 *lkey,
-               u32 *rkey,
-               enum ehca_reg_type reg_type);
-
-int ehca_reg_mr_rpages(struct ehca_shca *shca,
-                      struct ehca_mr *e_mr,
-                      struct ehca_mr_pginfo *pginfo);
-
-int ehca_rereg_mr(struct ehca_shca *shca,
-                 struct ehca_mr *e_mr,
-                 u64 *iova_start,
-                 u64 size,
-                 int mr_access_flags,
-                 struct ehca_pd *e_pd,
-                 struct ehca_mr_pginfo *pginfo,
-                 u32 *lkey,
-                 u32 *rkey);
-
-int ehca_unmap_one_fmr(struct ehca_shca *shca,
-                      struct ehca_mr *e_fmr);
-
-int ehca_reg_smr(struct ehca_shca *shca,
-                struct ehca_mr *e_origmr,
-                struct ehca_mr *e_newmr,
-                u64 *iova_start,
-                int acl,
-                struct ehca_pd *e_pd,
-                u32 *lkey,
-                u32 *rkey);
-
-int ehca_reg_internal_maxmr(struct ehca_shca *shca,
-                           struct ehca_pd *e_pd,
-                           struct ehca_mr **maxmr);
-
-int ehca_reg_maxmr(struct ehca_shca *shca,
-                  struct ehca_mr *e_newmr,
-                  u64 *iova_start,
-                  int acl,
-                  struct ehca_pd *e_pd,
-                  u32 *lkey,
-                  u32 *rkey);
-
-int ehca_dereg_internal_maxmr(struct ehca_shca *shca);
-
-int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
-                            u64 *page_list,
-                            int list_len);
-
-int ehca_set_pagebuf(struct ehca_mr_pginfo *pginfo,
-                    u32 number,
-                    u64 *kpage);
-
-int ehca_mr_is_maxmr(u64 size,
-                    u64 *iova_start);
-
-void ehca_mrmw_map_acl(int ib_acl,
-                      u32 *hipz_acl);
-
-void ehca_mrmw_set_pgsize_hipz_acl(u32 pgsize, u32 *hipz_acl);
-
-void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
-                              int *ib_acl);
-
-void ehca_mr_deletenew(struct ehca_mr *mr);
-
-int ehca_create_busmap(void);
-
-void ehca_destroy_busmap(void);
-
-extern struct ib_dma_mapping_ops ehca_dma_mapping_ops;
-#endif  /*_EHCA_MRMW_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
deleted file mode 100644 (file)
index 2a8aae4..0000000
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  PD functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-
-static struct kmem_cache *pd_cache;
-
-struct ib_pd *ehca_alloc_pd(struct ib_device *device,
-                           struct ib_ucontext *context, struct ib_udata *udata)
-{
-       struct ehca_pd *pd;
-       int i;
-
-       pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
-       if (!pd) {
-               ehca_err(device, "device=%p context=%p out of memory",
-                        device, context);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       for (i = 0; i < 2; i++) {
-               INIT_LIST_HEAD(&pd->free[i]);
-               INIT_LIST_HEAD(&pd->full[i]);
-       }
-       mutex_init(&pd->lock);
-
-       /*
-        * Kernel PD: when device = -1, 0
-        * User   PD: when context != -1
-        */
-       if (!context) {
-               /*
-                * Kernel PDs after init reuses always
-                * the one created in ehca_shca_reopen()
-                */
-               struct ehca_shca *shca = container_of(device, struct ehca_shca,
-                                                     ib_device);
-               pd->fw_pd.value = shca->pd->fw_pd.value;
-       } else
-               pd->fw_pd.value = (u64)pd;
-
-       return &pd->ib_pd;
-}
-
-int ehca_dealloc_pd(struct ib_pd *pd)
-{
-       struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
-       int i, leftovers = 0;
-       struct ipz_small_queue_page *page, *tmp;
-
-       for (i = 0; i < 2; i++) {
-               list_splice(&my_pd->full[i], &my_pd->free[i]);
-               list_for_each_entry_safe(page, tmp, &my_pd->free[i], list) {
-                       leftovers = 1;
-                       free_page(page->page);
-                       kmem_cache_free(small_qp_cache, page);
-               }
-       }
-
-       if (leftovers)
-               ehca_warn(pd->device,
-                         "Some small queue pages were not freed");
-
-       kmem_cache_free(pd_cache, my_pd);
-
-       return 0;
-}
-
-int ehca_init_pd_cache(void)
-{
-       pd_cache = kmem_cache_create("ehca_cache_pd",
-                                    sizeof(struct ehca_pd), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!pd_cache)
-               return -ENOMEM;
-       return 0;
-}
-
-void ehca_cleanup_pd_cache(void)
-{
-       kmem_cache_destroy(pd_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h
deleted file mode 100644 (file)
index 90c4efa..0000000
+++ /dev/null
@@ -1,260 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Hardware request structures
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef _EHCA_QES_H_
-#define _EHCA_QES_H_
-
-#include "ehca_tools.h"
-
-/* virtual scatter gather entry to specify remote addresses with length */
-struct ehca_vsgentry {
-       u64 vaddr;
-       u32 lkey;
-       u32 length;
-};
-
-#define GRH_FLAG_MASK        EHCA_BMASK_IBM( 7,  7)
-#define GRH_IPVERSION_MASK   EHCA_BMASK_IBM( 0,  3)
-#define GRH_TCLASS_MASK      EHCA_BMASK_IBM( 4, 12)
-#define GRH_FLOWLABEL_MASK   EHCA_BMASK_IBM(13, 31)
-#define GRH_PAYLEN_MASK      EHCA_BMASK_IBM(32, 47)
-#define GRH_NEXTHEADER_MASK  EHCA_BMASK_IBM(48, 55)
-#define GRH_HOPLIMIT_MASK    EHCA_BMASK_IBM(56, 63)
-
-/*
- * Unreliable Datagram Address Vector Format
- * see IBTA Vol1 chapter 8.3 Global Routing Header
- */
-struct ehca_ud_av {
-       u8 sl;
-       u8 lnh;
-       u16 dlid;
-       u8 reserved1;
-       u8 reserved2;
-       u8 reserved3;
-       u8 slid_path_bits;
-       u8 reserved4;
-       u8 ipd;
-       u8 reserved5;
-       u8 pmtu;
-       u32 reserved6;
-       u64 reserved7;
-       union {
-               struct {
-                       u64 word_0; /* always set to 6  */
-                       /*should be 0x1B for IB transport */
-                       u64 word_1;
-                       u64 word_2;
-                       u64 word_3;
-                       u64 word_4;
-               } grh;
-               struct {
-                       u32 wd_0;
-                       u32 wd_1;
-                       /* DWord_1 --> SGID */
-
-                       u32 sgid_wd3;
-                       u32 sgid_wd2;
-
-                       u32 sgid_wd1;
-                       u32 sgid_wd0;
-                       /* DWord_3 --> DGID */
-
-                       u32 dgid_wd3;
-                       u32 dgid_wd2;
-
-                       u32 dgid_wd1;
-                       u32 dgid_wd0;
-               } grh_l;
-       };
-};
-
-/* maximum number of sg entries allowed in a WQE */
-#define MAX_WQE_SG_ENTRIES 252
-
-#define WQE_OPTYPE_SEND             0x80
-#define WQE_OPTYPE_RDMAREAD         0x40
-#define WQE_OPTYPE_RDMAWRITE        0x20
-#define WQE_OPTYPE_CMPSWAP          0x10
-#define WQE_OPTYPE_FETCHADD         0x08
-#define WQE_OPTYPE_BIND             0x04
-
-#define WQE_WRFLAG_REQ_SIGNAL_COM   0x80
-#define WQE_WRFLAG_FENCE            0x40
-#define WQE_WRFLAG_IMM_DATA_PRESENT 0x20
-#define WQE_WRFLAG_SOLIC_EVENT      0x10
-
-#define WQEF_CACHE_HINT             0x80
-#define WQEF_CACHE_HINT_RD_WR       0x40
-#define WQEF_TIMED_WQE              0x20
-#define WQEF_PURGE                  0x08
-#define WQEF_HIGH_NIBBLE            0xF0
-
-#define MW_BIND_ACCESSCTRL_R_WRITE   0x40
-#define MW_BIND_ACCESSCTRL_R_READ    0x20
-#define MW_BIND_ACCESSCTRL_R_ATOMIC  0x10
-
-struct ehca_wqe {
-       u64 work_request_id;
-       u8 optype;
-       u8 wr_flag;
-       u16 pkeyi;
-       u8 wqef;
-       u8 nr_of_data_seg;
-       u16 wqe_provided_slid;
-       u32 destination_qp_number;
-       u32 resync_psn_sqp;
-       u32 local_ee_context_qkey;
-       u32 immediate_data;
-       union {
-               struct {
-                       u64 remote_virtual_address;
-                       u32 rkey;
-                       u32 reserved;
-                       u64 atomic_1st_op_dma_len;
-                       u64 atomic_2nd_op;
-                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-
-               } nud;
-               struct {
-                       u64 ehca_ud_av_ptr;
-                       u64 reserved1;
-                       u64 reserved2;
-                       u64 reserved3;
-                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-               } ud_avp;
-               struct {
-                       struct ehca_ud_av ud_av;
-                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES -
-                                                    2];
-               } ud_av;
-               struct {
-                       u64 reserved0;
-                       u64 reserved1;
-                       u64 reserved2;
-                       u64 reserved3;
-                       struct ehca_vsgentry sg_list[MAX_WQE_SG_ENTRIES];
-               } all_rcv;
-
-               struct {
-                       u64 reserved;
-                       u32 rkey;
-                       u32 old_rkey;
-                       u64 reserved1;
-                       u64 reserved2;
-                       u64 virtual_address;
-                       u32 reserved3;
-                       u32 length;
-                       u32 reserved4;
-                       u16 reserved5;
-                       u8 reserved6;
-                       u8 lr_ctl;
-                       u32 lkey;
-                       u32 reserved7;
-                       u64 reserved8;
-                       u64 reserved9;
-                       u64 reserved10;
-                       u64 reserved11;
-               } bind;
-               struct {
-                       u64 reserved12;
-                       u64 reserved13;
-                       u32 size;
-                       u32 start;
-               } inline_data;
-       } u;
-
-};
-
-#define WC_SEND_RECEIVE EHCA_BMASK_IBM(0, 0)
-#define WC_IMM_DATA     EHCA_BMASK_IBM(1, 1)
-#define WC_GRH_PRESENT  EHCA_BMASK_IBM(2, 2)
-#define WC_SE_BIT       EHCA_BMASK_IBM(3, 3)
-#define WC_STATUS_ERROR_BIT 0x80000000
-#define WC_STATUS_REMOTE_ERROR_FLAGS 0x0000F800
-#define WC_STATUS_PURGE_BIT 0x10
-#define WC_SEND_RECEIVE_BIT 0x80
-
-struct ehca_cqe {
-       u64 work_request_id;
-       u8 optype;
-       u8 w_completion_flags;
-       u16 reserved1;
-       u32 nr_bytes_transferred;
-       u32 immediate_data;
-       u32 local_qp_number;
-       u8 freed_resource_count;
-       u8 service_level;
-       u16 wqe_count;
-       u32 qp_token;
-       u32 qkey_ee_token;
-       u32 remote_qp_number;
-       u16 dlid;
-       u16 rlid;
-       u16 reserved2;
-       u16 pkey_index;
-       u32 cqe_timestamp;
-       u32 wqe_timestamp;
-       u8 wqe_timestamp_valid;
-       u8 reserved3;
-       u8 reserved4;
-       u8 cqe_flags;
-       u32 status;
-};
-
-struct ehca_eqe {
-       u64 entry;
-};
-
-struct ehca_mrte {
-       u64 starting_va;
-       u64 length; /* length of memory region in bytes*/
-       u32 pd;
-       u8 key_instance;
-       u8 pagesize;
-       u8 mr_control;
-       u8 local_remote_access_ctrl;
-       u8 reserved[0x20 - 0x18];
-       u64 at_pointer[4];
-};
-#endif /*_EHCA_QES_H_*/
diff --git a/drivers/staging/rdma/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
deleted file mode 100644 (file)
index 896c01f..0000000
+++ /dev/null
@@ -1,2256 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  QP functions
- *
- *  Authors: Joachim Fenkes <fenkes@de.ibm.com>
- *           Stefan Roscher <stefan.roscher@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-
-static struct kmem_cache *qp_cache;
-
-/*
- * attributes not supported by query qp
- */
-#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS       | \
-                                    IB_QP_EN_SQD_ASYNC_NOTIFY)
-
-/*
- * ehca (internal) qp state values
- */
-enum ehca_qp_state {
-       EHCA_QPS_RESET = 1,
-       EHCA_QPS_INIT = 2,
-       EHCA_QPS_RTR = 3,
-       EHCA_QPS_RTS = 5,
-       EHCA_QPS_SQD = 6,
-       EHCA_QPS_SQE = 8,
-       EHCA_QPS_ERR = 128
-};
-
-/*
- * qp state transitions as defined by IB Arch Rel 1.1 page 431
- */
-enum ib_qp_statetrans {
-       IB_QPST_ANY2RESET,
-       IB_QPST_ANY2ERR,
-       IB_QPST_RESET2INIT,
-       IB_QPST_INIT2RTR,
-       IB_QPST_INIT2INIT,
-       IB_QPST_RTR2RTS,
-       IB_QPST_RTS2SQD,
-       IB_QPST_RTS2RTS,
-       IB_QPST_SQD2RTS,
-       IB_QPST_SQE2RTS,
-       IB_QPST_SQD2SQD,
-       IB_QPST_MAX     /* nr of transitions, this must be last!!! */
-};
-
-/*
- * ib2ehca_qp_state maps IB to ehca qp_state
- * returns ehca qp state corresponding to given ib qp state
- */
-static inline enum ehca_qp_state ib2ehca_qp_state(enum ib_qp_state ib_qp_state)
-{
-       switch (ib_qp_state) {
-       case IB_QPS_RESET:
-               return EHCA_QPS_RESET;
-       case IB_QPS_INIT:
-               return EHCA_QPS_INIT;
-       case IB_QPS_RTR:
-               return EHCA_QPS_RTR;
-       case IB_QPS_RTS:
-               return EHCA_QPS_RTS;
-       case IB_QPS_SQD:
-               return EHCA_QPS_SQD;
-       case IB_QPS_SQE:
-               return EHCA_QPS_SQE;
-       case IB_QPS_ERR:
-               return EHCA_QPS_ERR;
-       default:
-               ehca_gen_err("invalid ib_qp_state=%x", ib_qp_state);
-               return -EINVAL;
-       }
-}
-
-/*
- * ehca2ib_qp_state maps ehca to IB qp_state
- * returns ib qp state corresponding to given ehca qp state
- */
-static inline enum ib_qp_state ehca2ib_qp_state(enum ehca_qp_state
-                                               ehca_qp_state)
-{
-       switch (ehca_qp_state) {
-       case EHCA_QPS_RESET:
-               return IB_QPS_RESET;
-       case EHCA_QPS_INIT:
-               return IB_QPS_INIT;
-       case EHCA_QPS_RTR:
-               return IB_QPS_RTR;
-       case EHCA_QPS_RTS:
-               return IB_QPS_RTS;
-       case EHCA_QPS_SQD:
-               return IB_QPS_SQD;
-       case EHCA_QPS_SQE:
-               return IB_QPS_SQE;
-       case EHCA_QPS_ERR:
-               return IB_QPS_ERR;
-       default:
-               ehca_gen_err("invalid ehca_qp_state=%x", ehca_qp_state);
-               return -EINVAL;
-       }
-}
-
-/*
- * ehca_qp_type used as index for req_attr and opt_attr of
- * struct ehca_modqp_statetrans
- */
-enum ehca_qp_type {
-       QPT_RC = 0,
-       QPT_UC = 1,
-       QPT_UD = 2,
-       QPT_SQP = 3,
-       QPT_MAX
-};
-
-/*
- * ib2ehcaqptype maps Ib to ehca qp_type
- * returns ehca qp type corresponding to ib qp type
- */
-static inline enum ehca_qp_type ib2ehcaqptype(enum ib_qp_type ibqptype)
-{
-       switch (ibqptype) {
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               return QPT_SQP;
-       case IB_QPT_RC:
-               return QPT_RC;
-       case IB_QPT_UC:
-               return QPT_UC;
-       case IB_QPT_UD:
-               return QPT_UD;
-       default:
-               ehca_gen_err("Invalid ibqptype=%x", ibqptype);
-               return -EINVAL;
-       }
-}
-
-static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
-                                                        int ib_tostate)
-{
-       int index = -EINVAL;
-       switch (ib_tostate) {
-       case IB_QPS_RESET:
-               index = IB_QPST_ANY2RESET;
-               break;
-       case IB_QPS_INIT:
-               switch (ib_fromstate) {
-               case IB_QPS_RESET:
-                       index = IB_QPST_RESET2INIT;
-                       break;
-               case IB_QPS_INIT:
-                       index = IB_QPST_INIT2INIT;
-                       break;
-               }
-               break;
-       case IB_QPS_RTR:
-               if (ib_fromstate == IB_QPS_INIT)
-                       index = IB_QPST_INIT2RTR;
-               break;
-       case IB_QPS_RTS:
-               switch (ib_fromstate) {
-               case IB_QPS_RTR:
-                       index = IB_QPST_RTR2RTS;
-                       break;
-               case IB_QPS_RTS:
-                       index = IB_QPST_RTS2RTS;
-                       break;
-               case IB_QPS_SQD:
-                       index = IB_QPST_SQD2RTS;
-                       break;
-               case IB_QPS_SQE:
-                       index = IB_QPST_SQE2RTS;
-                       break;
-               }
-               break;
-       case IB_QPS_SQD:
-               if (ib_fromstate == IB_QPS_RTS)
-                       index = IB_QPST_RTS2SQD;
-               break;
-       case IB_QPS_SQE:
-               break;
-       case IB_QPS_ERR:
-               index = IB_QPST_ANY2ERR;
-               break;
-       default:
-               break;
-       }
-       return index;
-}
-
-/*
- * ibqptype2servicetype returns hcp service type corresponding to given
- * ib qp type used by create_qp()
- */
-static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
-{
-       switch (ibqptype) {
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               return ST_UD;
-       case IB_QPT_RC:
-               return ST_RC;
-       case IB_QPT_UC:
-               return ST_UC;
-       case IB_QPT_UD:
-               return ST_UD;
-       case IB_QPT_RAW_IPV6:
-               return -EINVAL;
-       case IB_QPT_RAW_ETHERTYPE:
-               return -EINVAL;
-       default:
-               ehca_gen_err("Invalid ibqptype=%x", ibqptype);
-               return -EINVAL;
-       }
-}
-
-/*
- * init userspace queue info from ipz_queue data
- */
-static inline void queue2resp(struct ipzu_queue_resp *resp,
-                             struct ipz_queue *queue)
-{
-       resp->qe_size = queue->qe_size;
-       resp->act_nr_of_sg = queue->act_nr_of_sg;
-       resp->queue_length = queue->queue_length;
-       resp->pagesize = queue->pagesize;
-       resp->toggle_state = queue->toggle_state;
-       resp->offset = queue->offset;
-}
-
-/*
- * init_qp_queue initializes/constructs r/squeue and registers queue pages.
- */
-static inline int init_qp_queue(struct ehca_shca *shca,
-                               struct ehca_pd *pd,
-                               struct ehca_qp *my_qp,
-                               struct ipz_queue *queue,
-                               int q_type,
-                               u64 expected_hret,
-                               struct ehca_alloc_queue_parms *parms,
-                               int wqe_size)
-{
-       int ret, cnt, ipz_rc, nr_q_pages;
-       void *vpage;
-       u64 rpage, h_ret;
-       struct ib_device *ib_dev = &shca->ib_device;
-       struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
-
-       if (!parms->queue_size)
-               return 0;
-
-       if (parms->is_small) {
-               nr_q_pages = 1;
-               ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
-                                       128 << parms->page_size,
-                                       wqe_size, parms->act_nr_sges, 1);
-       } else {
-               nr_q_pages = parms->queue_size;
-               ipz_rc = ipz_queue_ctor(pd, queue, nr_q_pages,
-                                       EHCA_PAGESIZE, wqe_size,
-                                       parms->act_nr_sges, 0);
-       }
-
-       if (!ipz_rc) {
-               ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%i",
-                        ipz_rc);
-               return -EBUSY;
-       }
-
-       /* register queue pages */
-       for (cnt = 0; cnt < nr_q_pages; cnt++) {
-               vpage = ipz_qpageit_get_inc(queue);
-               if (!vpage) {
-                       ehca_err(ib_dev, "ipz_qpageit_get_inc() "
-                                "failed p_vpage= %p", vpage);
-                       ret = -EINVAL;
-                       goto init_qp_queue1;
-               }
-               rpage = __pa(vpage);
-
-               h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
-                                                my_qp->ipz_qp_handle,
-                                                NULL, 0, q_type,
-                                                rpage, parms->is_small ? 0 : 1,
-                                                my_qp->galpas.kernel);
-               if (cnt == (nr_q_pages - 1)) {  /* last page! */
-                       if (h_ret != expected_hret) {
-                               ehca_err(ib_dev, "hipz_qp_register_rpage() "
-                                        "h_ret=%lli", h_ret);
-                               ret = ehca2ib_return_code(h_ret);
-                               goto init_qp_queue1;
-                       }
-                       vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
-                       if (vpage) {
-                               ehca_err(ib_dev, "ipz_qpageit_get_inc() "
-                                        "should not succeed vpage=%p", vpage);
-                               ret = -EINVAL;
-                               goto init_qp_queue1;
-                       }
-               } else {
-                       if (h_ret != H_PAGE_REGISTERED) {
-                               ehca_err(ib_dev, "hipz_qp_register_rpage() "
-                                        "h_ret=%lli", h_ret);
-                               ret = ehca2ib_return_code(h_ret);
-                               goto init_qp_queue1;
-                       }
-               }
-       }
-
-       ipz_qeit_reset(queue);
-
-       return 0;
-
-init_qp_queue1:
-       ipz_queue_dtor(pd, queue);
-       return ret;
-}
-
-static inline int ehca_calc_wqe_size(int act_nr_sge, int is_llqp)
-{
-       if (is_llqp)
-               return 128 << act_nr_sge;
-       else
-               return offsetof(struct ehca_wqe,
-                               u.nud.sg_list[act_nr_sge]);
-}
-
-static void ehca_determine_small_queue(struct ehca_alloc_queue_parms *queue,
-                                      int req_nr_sge, int is_llqp)
-{
-       u32 wqe_size, q_size;
-       int act_nr_sge = req_nr_sge;
-
-       if (!is_llqp)
-               /* round up #SGEs so WQE size is a power of 2 */
-               for (act_nr_sge = 4; act_nr_sge <= 252;
-                    act_nr_sge = 4 + 2 * act_nr_sge)
-                       if (act_nr_sge >= req_nr_sge)
-                               break;
-
-       wqe_size = ehca_calc_wqe_size(act_nr_sge, is_llqp);
-       q_size = wqe_size * (queue->max_wr + 1);
-
-       if (q_size <= 512)
-               queue->page_size = 2;
-       else if (q_size <= 1024)
-               queue->page_size = 3;
-       else
-               queue->page_size = 0;
-
-       queue->is_small = (queue->page_size != 0);
-}
-
-/* needs to be called with cq->spinlock held */
-void ehca_add_to_err_list(struct ehca_qp *qp, int on_sq)
-{
-       struct list_head *list, *node;
-
-       /* TODO: support low latency QPs */
-       if (qp->ext_type == EQPT_LLQP)
-               return;
-
-       if (on_sq) {
-               list = &qp->send_cq->sqp_err_list;
-               node = &qp->sq_err_node;
-       } else {
-               list = &qp->recv_cq->rqp_err_list;
-               node = &qp->rq_err_node;
-       }
-
-       if (list_empty(node))
-               list_add_tail(node, list);
-
-       return;
-}
-
-static void del_from_err_list(struct ehca_cq *cq, struct list_head *node)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&cq->spinlock, flags);
-
-       if (!list_empty(node))
-               list_del_init(node);
-
-       spin_unlock_irqrestore(&cq->spinlock, flags);
-}
-
-static void reset_queue_map(struct ehca_queue_map *qmap)
-{
-       int i;
-
-       qmap->tail = qmap->entries - 1;
-       qmap->left_to_poll = 0;
-       qmap->next_wqe_idx = 0;
-       for (i = 0; i < qmap->entries; i++) {
-               qmap->map[i].reported = 1;
-               qmap->map[i].cqe_req = 0;
-       }
-}
-
-/*
- * Create an ib_qp struct that is either a QP or an SRQ, depending on
- * the value of the is_srq parameter. If init_attr and srq_init_attr share
- * fields, the field out of init_attr is used.
- */
-static struct ehca_qp *internal_create_qp(
-       struct ib_pd *pd,
-       struct ib_qp_init_attr *init_attr,
-       struct ib_srq_init_attr *srq_init_attr,
-       struct ib_udata *udata, int is_srq)
-{
-       struct ehca_qp *my_qp, *my_srq = NULL;
-       struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
-       struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
-                                             ib_device);
-       struct ib_ucontext *context = NULL;
-       u64 h_ret;
-       int is_llqp = 0, has_srq = 0, is_user = 0;
-       int qp_type, max_send_sge, max_recv_sge, ret;
-
-       /* h_call's out parameters */
-       struct ehca_alloc_qp_parms parms;
-       u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
-       unsigned long flags;
-
-       if (!atomic_add_unless(&shca->num_qps, 1, shca->max_num_qps)) {
-               ehca_err(pd->device, "Unable to create QP, max number of %i "
-                        "QPs reached.", shca->max_num_qps);
-               ehca_err(pd->device, "To increase the maximum number of QPs "
-                        "use the number_of_qps module parameter.\n");
-               return ERR_PTR(-ENOSPC);
-       }
-
-       if (init_attr->create_flags) {
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-EINVAL);
-       }
-
-       memset(&parms, 0, sizeof(parms));
-       qp_type = init_attr->qp_type;
-
-       if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
-               init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
-               ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
-                        init_attr->sq_sig_type);
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-EINVAL);
-       }
-
-       /* save LLQP info */
-       if (qp_type & 0x80) {
-               is_llqp = 1;
-               parms.ext_type = EQPT_LLQP;
-               parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
-       }
-       qp_type &= 0x1F;
-       init_attr->qp_type &= 0x1F;
-
-       /* handle SRQ base QPs */
-       if (init_attr->srq) {
-               my_srq = container_of(init_attr->srq, struct ehca_qp, ib_srq);
-
-               if (qp_type == IB_QPT_UC) {
-                       ehca_err(pd->device, "UC with SRQ not supported");
-                       atomic_dec(&shca->num_qps);
-                       return ERR_PTR(-EINVAL);
-               }
-
-               has_srq = 1;
-               parms.ext_type = EQPT_SRQBASE;
-               parms.srq_qpn = my_srq->real_qp_num;
-       }
-
-       if (is_llqp && has_srq) {
-               ehca_err(pd->device, "LLQPs can't have an SRQ");
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-EINVAL);
-       }
-
-       /* handle SRQs */
-       if (is_srq) {
-               parms.ext_type = EQPT_SRQ;
-               parms.srq_limit = srq_init_attr->attr.srq_limit;
-               if (init_attr->cap.max_recv_sge > 3) {
-                       ehca_err(pd->device, "no more than three SGEs "
-                                "supported for SRQ  pd=%p  max_sge=%x",
-                                pd, init_attr->cap.max_recv_sge);
-                       atomic_dec(&shca->num_qps);
-                       return ERR_PTR(-EINVAL);
-               }
-       }
-
-       /* check QP type */
-       if (qp_type != IB_QPT_UD &&
-           qp_type != IB_QPT_UC &&
-           qp_type != IB_QPT_RC &&
-           qp_type != IB_QPT_SMI &&
-           qp_type != IB_QPT_GSI) {
-               ehca_err(pd->device, "wrong QP Type=%x", qp_type);
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-EINVAL);
-       }
-
-       if (is_llqp) {
-               switch (qp_type) {
-               case IB_QPT_RC:
-                       if ((init_attr->cap.max_send_wr > 255) ||
-                           (init_attr->cap.max_recv_wr > 255)) {
-                               ehca_err(pd->device,
-                                        "Invalid Number of max_sq_wr=%x "
-                                        "or max_rq_wr=%x for RC LLQP",
-                                        init_attr->cap.max_send_wr,
-                                        init_attr->cap.max_recv_wr);
-                               atomic_dec(&shca->num_qps);
-                               return ERR_PTR(-EINVAL);
-                       }
-                       break;
-               case IB_QPT_UD:
-                       if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
-                               ehca_err(pd->device, "UD LLQP not supported "
-                                        "by this adapter");
-                               atomic_dec(&shca->num_qps);
-                               return ERR_PTR(-ENOSYS);
-                       }
-                       if (!(init_attr->cap.max_send_sge <= 5
-                           && init_attr->cap.max_send_sge >= 1
-                           && init_attr->cap.max_recv_sge <= 5
-                           && init_attr->cap.max_recv_sge >= 1)) {
-                               ehca_err(pd->device,
-                                        "Invalid Number of max_send_sge=%x "
-                                        "or max_recv_sge=%x for UD LLQP",
-                                        init_attr->cap.max_send_sge,
-                                        init_attr->cap.max_recv_sge);
-                               atomic_dec(&shca->num_qps);
-                               return ERR_PTR(-EINVAL);
-                       } else if (init_attr->cap.max_send_wr > 255) {
-                               ehca_err(pd->device,
-                                        "Invalid Number of "
-                                        "max_send_wr=%x for UD QP_TYPE=%x",
-                                        init_attr->cap.max_send_wr, qp_type);
-                               atomic_dec(&shca->num_qps);
-                               return ERR_PTR(-EINVAL);
-                       }
-                       break;
-               default:
-                       ehca_err(pd->device, "unsupported LL QP Type=%x",
-                                qp_type);
-                       atomic_dec(&shca->num_qps);
-                       return ERR_PTR(-EINVAL);
-               }
-       } else {
-               int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
-                              || qp_type == IB_QPT_GSI) ? 250 : 252;
-
-               if (init_attr->cap.max_send_sge > max_sge
-                   || init_attr->cap.max_recv_sge > max_sge) {
-                       ehca_err(pd->device, "Invalid number of SGEs requested "
-                                "send_sge=%x recv_sge=%x max_sge=%x",
-                                init_attr->cap.max_send_sge,
-                                init_attr->cap.max_recv_sge, max_sge);
-                       atomic_dec(&shca->num_qps);
-                       return ERR_PTR(-EINVAL);
-               }
-       }
-
-       my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
-       if (!my_qp) {
-               ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
-               atomic_dec(&shca->num_qps);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       if (pd->uobject && udata) {
-               is_user = 1;
-               context = pd->uobject->context;
-       }
-
-       atomic_set(&my_qp->nr_events, 0);
-       init_waitqueue_head(&my_qp->wait_completion);
-       spin_lock_init(&my_qp->spinlock_s);
-       spin_lock_init(&my_qp->spinlock_r);
-       my_qp->qp_type = qp_type;
-       my_qp->ext_type = parms.ext_type;
-       my_qp->state = IB_QPS_RESET;
-
-       if (init_attr->recv_cq)
-               my_qp->recv_cq =
-                       container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
-       if (init_attr->send_cq)
-               my_qp->send_cq =
-                       container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
-
-       idr_preload(GFP_KERNEL);
-       write_lock_irqsave(&ehca_qp_idr_lock, flags);
-
-       ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT);
-       if (ret >= 0)
-               my_qp->token = ret;
-
-       write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-       idr_preload_end();
-       if (ret < 0) {
-               if (ret == -ENOSPC) {
-                       ret = -EINVAL;
-                       ehca_err(pd->device, "Invalid number of qp");
-               } else {
-                       ret = -ENOMEM;
-                       ehca_err(pd->device, "Can't allocate new idr entry.");
-               }
-               goto create_qp_exit0;
-       }
-
-       if (has_srq)
-               parms.srq_token = my_qp->token;
-
-       parms.servicetype = ibqptype2servicetype(qp_type);
-       if (parms.servicetype < 0) {
-               ret = -EINVAL;
-               ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
-               goto create_qp_exit1;
-       }
-
-       /* Always signal by WQE so we can hide circ. WQEs */
-       parms.sigtype = HCALL_SIGT_BY_WQE;
-
-       /* UD_AV CIRCUMVENTION */
-       max_send_sge = init_attr->cap.max_send_sge;
-       max_recv_sge = init_attr->cap.max_recv_sge;
-       if (parms.servicetype == ST_UD && !is_llqp) {
-               max_send_sge += 2;
-               max_recv_sge += 2;
-       }
-
-       parms.token = my_qp->token;
-       parms.eq_handle = shca->eq.ipz_eq_handle;
-       parms.pd = my_pd->fw_pd;
-       if (my_qp->send_cq)
-               parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
-       if (my_qp->recv_cq)
-               parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
-
-       parms.squeue.max_wr = init_attr->cap.max_send_wr;
-       parms.rqueue.max_wr = init_attr->cap.max_recv_wr;
-       parms.squeue.max_sge = max_send_sge;
-       parms.rqueue.max_sge = max_recv_sge;
-
-       /* RC QPs need one more SWQE for unsolicited ack circumvention */
-       if (qp_type == IB_QPT_RC)
-               parms.squeue.max_wr++;
-
-       if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
-               if (HAS_SQ(my_qp))
-                       ehca_determine_small_queue(
-                               &parms.squeue, max_send_sge, is_llqp);
-               if (HAS_RQ(my_qp))
-                       ehca_determine_small_queue(
-                               &parms.rqueue, max_recv_sge, is_llqp);
-               parms.qp_storage =
-                       (parms.squeue.is_small || parms.rqueue.is_small);
-       }
-
-       h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
-                        h_ret);
-               ret = ehca2ib_return_code(h_ret);
-               goto create_qp_exit1;
-       }
-
-       ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
-       my_qp->ipz_qp_handle = parms.qp_handle;
-       my_qp->galpas = parms.galpas;
-
-       swqe_size = ehca_calc_wqe_size(parms.squeue.act_nr_sges, is_llqp);
-       rwqe_size = ehca_calc_wqe_size(parms.rqueue.act_nr_sges, is_llqp);
-
-       switch (qp_type) {
-       case IB_QPT_RC:
-               if (is_llqp) {
-                       parms.squeue.act_nr_sges = 1;
-                       parms.rqueue.act_nr_sges = 1;
-               }
-               /* hide the extra WQE */
-               parms.squeue.act_nr_wqes--;
-               break;
-       case IB_QPT_UD:
-       case IB_QPT_GSI:
-       case IB_QPT_SMI:
-               /* UD circumvention */
-               if (is_llqp) {
-                       parms.squeue.act_nr_sges = 1;
-                       parms.rqueue.act_nr_sges = 1;
-               } else {
-                       parms.squeue.act_nr_sges -= 2;
-                       parms.rqueue.act_nr_sges -= 2;
-               }
-
-               if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
-                       parms.squeue.act_nr_wqes = init_attr->cap.max_send_wr;
-                       parms.rqueue.act_nr_wqes = init_attr->cap.max_recv_wr;
-                       parms.squeue.act_nr_sges = init_attr->cap.max_send_sge;
-                       parms.rqueue.act_nr_sges = init_attr->cap.max_recv_sge;
-                       ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
-               }
-
-               break;
-
-       default:
-               break;
-       }
-
-       /* initialize r/squeue and register queue pages */
-       if (HAS_SQ(my_qp)) {
-               ret = init_qp_queue(
-                       shca, my_pd, my_qp, &my_qp->ipz_squeue, 0,
-                       HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
-                       &parms.squeue, swqe_size);
-               if (ret) {
-                       ehca_err(pd->device, "Couldn't initialize squeue "
-                                "and pages ret=%i", ret);
-                       goto create_qp_exit2;
-               }
-
-               if (!is_user) {
-                       my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
-                               my_qp->ipz_squeue.qe_size;
-                       my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
-                                                   sizeof(struct ehca_qmap_entry));
-                       if (!my_qp->sq_map.map) {
-                               ehca_err(pd->device, "Couldn't allocate squeue "
-                                        "map ret=%i", ret);
-                               goto create_qp_exit3;
-                       }
-                       INIT_LIST_HEAD(&my_qp->sq_err_node);
-                       /* to avoid the generation of bogus flush CQEs */
-                       reset_queue_map(&my_qp->sq_map);
-               }
-       }
-
-       if (HAS_RQ(my_qp)) {
-               ret = init_qp_queue(
-                       shca, my_pd, my_qp, &my_qp->ipz_rqueue, 1,
-                       H_SUCCESS, &parms.rqueue, rwqe_size);
-               if (ret) {
-                       ehca_err(pd->device, "Couldn't initialize rqueue "
-                                "and pages ret=%i", ret);
-                       goto create_qp_exit4;
-               }
-               if (!is_user) {
-                       my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
-                               my_qp->ipz_rqueue.qe_size;
-                       my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
-                                                   sizeof(struct ehca_qmap_entry));
-                       if (!my_qp->rq_map.map) {
-                               ehca_err(pd->device, "Couldn't allocate squeue "
-                                        "map ret=%i", ret);
-                               goto create_qp_exit5;
-                       }
-                       INIT_LIST_HEAD(&my_qp->rq_err_node);
-                       /* to avoid the generation of bogus flush CQEs */
-                       reset_queue_map(&my_qp->rq_map);
-               }
-       } else if (init_attr->srq && !is_user) {
-               /* this is a base QP, use the queue map of the SRQ */
-               my_qp->rq_map = my_srq->rq_map;
-               INIT_LIST_HEAD(&my_qp->rq_err_node);
-
-               my_qp->ipz_rqueue = my_srq->ipz_rqueue;
-       }
-
-       if (is_srq) {
-               my_qp->ib_srq.pd = &my_pd->ib_pd;
-               my_qp->ib_srq.device = my_pd->ib_pd.device;
-
-               my_qp->ib_srq.srq_context = init_attr->qp_context;
-               my_qp->ib_srq.event_handler = init_attr->event_handler;
-       } else {
-               my_qp->ib_qp.qp_num = ib_qp_num;
-               my_qp->ib_qp.pd = &my_pd->ib_pd;
-               my_qp->ib_qp.device = my_pd->ib_pd.device;
-
-               my_qp->ib_qp.recv_cq = init_attr->recv_cq;
-               my_qp->ib_qp.send_cq = init_attr->send_cq;
-
-               my_qp->ib_qp.qp_type = qp_type;
-               my_qp->ib_qp.srq = init_attr->srq;
-
-               my_qp->ib_qp.qp_context = init_attr->qp_context;
-               my_qp->ib_qp.event_handler = init_attr->event_handler;
-       }
-
-       init_attr->cap.max_inline_data = 0; /* not supported yet */
-       init_attr->cap.max_recv_sge = parms.rqueue.act_nr_sges;
-       init_attr->cap.max_recv_wr = parms.rqueue.act_nr_wqes;
-       init_attr->cap.max_send_sge = parms.squeue.act_nr_sges;
-       init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
-       my_qp->init_attr = *init_attr;
-
-       if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
-               shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
-                       &my_qp->ib_qp;
-               if (ehca_nr_ports < 0) {
-                       /* alloc array to cache subsequent modify qp parms
-                        * for autodetect mode
-                        */
-                       my_qp->mod_qp_parm =
-                               kzalloc(EHCA_MOD_QP_PARM_MAX *
-                                       sizeof(*my_qp->mod_qp_parm),
-                                       GFP_KERNEL);
-                       if (!my_qp->mod_qp_parm) {
-                               ehca_err(pd->device,
-                                        "Could not alloc mod_qp_parm");
-                               goto create_qp_exit5;
-                       }
-               }
-       }
-
-       /* NOTE: define_apq0() not supported yet */
-       if (qp_type == IB_QPT_GSI) {
-               h_ret = ehca_define_sqp(shca, my_qp, init_attr);
-               if (h_ret != H_SUCCESS) {
-                       kfree(my_qp->mod_qp_parm);
-                       my_qp->mod_qp_parm = NULL;
-                       /* the QP pointer is no longer valid */
-                       shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
-                               NULL;
-                       ret = ehca2ib_return_code(h_ret);
-                       goto create_qp_exit6;
-               }
-       }
-
-       if (my_qp->send_cq) {
-               ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
-               if (ret) {
-                       ehca_err(pd->device,
-                                "Couldn't assign qp to send_cq ret=%i", ret);
-                       goto create_qp_exit7;
-               }
-       }
-
-       /* copy queues, galpa data to user space */
-       if (context && udata) {
-               struct ehca_create_qp_resp resp;
-               memset(&resp, 0, sizeof(resp));
-
-               resp.qp_num = my_qp->real_qp_num;
-               resp.token = my_qp->token;
-               resp.qp_type = my_qp->qp_type;
-               resp.ext_type = my_qp->ext_type;
-               resp.qkey = my_qp->qkey;
-               resp.real_qp_num = my_qp->real_qp_num;
-
-               if (HAS_SQ(my_qp))
-                       queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
-               if (HAS_RQ(my_qp))
-                       queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
-               resp.fw_handle_ofs = (u32)
-                       (my_qp->galpas.user.fw_handle & (PAGE_SIZE - 1));
-
-               if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
-                       ehca_err(pd->device, "Copy to udata failed");
-                       ret = -EINVAL;
-                       goto create_qp_exit8;
-               }
-       }
-
-       return my_qp;
-
-create_qp_exit8:
-       ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
-
-create_qp_exit7:
-       kfree(my_qp->mod_qp_parm);
-
-create_qp_exit6:
-       if (HAS_RQ(my_qp) && !is_user)
-               vfree(my_qp->rq_map.map);
-
-create_qp_exit5:
-       if (HAS_RQ(my_qp))
-               ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
-
-create_qp_exit4:
-       if (HAS_SQ(my_qp) && !is_user)
-               vfree(my_qp->sq_map.map);
-
-create_qp_exit3:
-       if (HAS_SQ(my_qp))
-               ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
-
-create_qp_exit2:
-       hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
-
-create_qp_exit1:
-       write_lock_irqsave(&ehca_qp_idr_lock, flags);
-       idr_remove(&ehca_qp_idr, my_qp->token);
-       write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-
-create_qp_exit0:
-       kmem_cache_free(qp_cache, my_qp);
-       atomic_dec(&shca->num_qps);
-       return ERR_PTR(ret);
-}
-
-struct ib_qp *ehca_create_qp(struct ib_pd *pd,
-                            struct ib_qp_init_attr *qp_init_attr,
-                            struct ib_udata *udata)
-{
-       struct ehca_qp *ret;
-
-       ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
-       return IS_ERR(ret) ? (struct ib_qp *)ret : &ret->ib_qp;
-}
-
-static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
-                              struct ib_uobject *uobject);
-
-struct ib_srq *ehca_create_srq(struct ib_pd *pd,
-                              struct ib_srq_init_attr *srq_init_attr,
-                              struct ib_udata *udata)
-{
-       struct ib_qp_init_attr qp_init_attr;
-       struct ehca_qp *my_qp;
-       struct ib_srq *ret;
-       struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
-                                             ib_device);
-       struct hcp_modify_qp_control_block *mqpcb;
-       u64 hret, update_mask;
-
-       if (srq_init_attr->srq_type != IB_SRQT_BASIC)
-               return ERR_PTR(-ENOSYS);
-
-       /* For common attributes, internal_create_qp() takes its info
-        * out of qp_init_attr, so copy all common attrs there.
-        */
-       memset(&qp_init_attr, 0, sizeof(qp_init_attr));
-       qp_init_attr.event_handler = srq_init_attr->event_handler;
-       qp_init_attr.qp_context = srq_init_attr->srq_context;
-       qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
-       qp_init_attr.qp_type = IB_QPT_RC;
-       qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
-       qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
-
-       my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
-       if (IS_ERR(my_qp))
-               return (struct ib_srq *)my_qp;
-
-       /* copy back return values */
-       srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
-       srq_init_attr->attr.max_sge = 3;
-
-       /* drive SRQ into RTR state */
-       mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!mqpcb) {
-               ehca_err(pd->device, "Could not get zeroed page for mqpcb "
-                        "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
-               ret = ERR_PTR(-ENOMEM);
-               goto create_srq1;
-       }
-
-       mqpcb->qp_state = EHCA_QPS_INIT;
-       mqpcb->prim_phys_port = 1;
-       update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
-       hret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               update_mask,
-                               mqpcb, my_qp->galpas.kernel);
-       if (hret != H_SUCCESS) {
-               ehca_err(pd->device, "Could not modify SRQ to INIT "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, my_qp->real_qp_num, hret);
-               goto create_srq2;
-       }
-
-       mqpcb->qp_enable = 1;
-       update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
-       hret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               update_mask,
-                               mqpcb, my_qp->galpas.kernel);
-       if (hret != H_SUCCESS) {
-               ehca_err(pd->device, "Could not enable SRQ "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, my_qp->real_qp_num, hret);
-               goto create_srq2;
-       }
-
-       mqpcb->qp_state  = EHCA_QPS_RTR;
-       update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
-       hret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               update_mask,
-                               mqpcb, my_qp->galpas.kernel);
-       if (hret != H_SUCCESS) {
-               ehca_err(pd->device, "Could not modify SRQ to RTR "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, my_qp->real_qp_num, hret);
-               goto create_srq2;
-       }
-
-       ehca_free_fw_ctrlblock(mqpcb);
-
-       return &my_qp->ib_srq;
-
-create_srq2:
-       ret = ERR_PTR(ehca2ib_return_code(hret));
-       ehca_free_fw_ctrlblock(mqpcb);
-
-create_srq1:
-       internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
-
-       return ret;
-}
-
-/*
- * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
- * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
- * returns total number of bad wqes in bad_wqe_cnt
- */
-static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
-                          int *bad_wqe_cnt)
-{
-       u64 h_ret;
-       struct ipz_queue *squeue;
-       void *bad_send_wqe_p, *bad_send_wqe_v;
-       u64 q_ofs;
-       struct ehca_wqe *wqe;
-       int qp_num = my_qp->ib_qp.qp_num;
-
-       /* get send wqe pointer */
-       h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
-                                          my_qp->ipz_qp_handle, &my_qp->pf,
-                                          &bad_send_wqe_p, NULL, 2);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(&shca->ib_device, "hipz_h_disable_and_get_wqe() failed"
-                        " ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, qp_num, h_ret);
-               return ehca2ib_return_code(h_ret);
-       }
-       bad_send_wqe_p = (void *)((u64)bad_send_wqe_p & (~(1L << 63)));
-       ehca_dbg(&shca->ib_device, "qp_num=%x bad_send_wqe_p=%p",
-                qp_num, bad_send_wqe_p);
-       /* convert wqe pointer to vadr */
-       bad_send_wqe_v = __va((u64)bad_send_wqe_p);
-       if (ehca_debug_level >= 2)
-               ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
-       squeue = &my_qp->ipz_squeue;
-       if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
-               ehca_err(&shca->ib_device, "failed to get wqe offset qp_num=%x"
-                        " bad_send_wqe_p=%p", qp_num, bad_send_wqe_p);
-               return -EFAULT;
-       }
-
-       /* loop sets wqe's purge bit */
-       wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
-       *bad_wqe_cnt = 0;
-       while (wqe->optype != 0xff && wqe->wqef != 0xff) {
-               if (ehca_debug_level >= 2)
-                       ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
-               wqe->nr_of_data_seg = 0; /* suppress data access */
-               wqe->wqef = WQEF_PURGE; /* WQE to be purged */
-               q_ofs = ipz_queue_advance_offset(squeue, q_ofs);
-               wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
-               *bad_wqe_cnt = (*bad_wqe_cnt)+1;
-       }
-       /*
-        * bad wqe will be reprocessed and ignored when pol_cq() is called,
-        *  i.e. nr of wqes with flush error status is one less
-        */
-       ehca_dbg(&shca->ib_device, "qp_num=%x flusherr_wqe_cnt=%x",
-                qp_num, (*bad_wqe_cnt)-1);
-       wqe->wqef = 0;
-
-       return 0;
-}
-
-static int calc_left_cqes(u64 wqe_p, struct ipz_queue *ipz_queue,
-                         struct ehca_queue_map *qmap)
-{
-       void *wqe_v;
-       u64 q_ofs;
-       u32 wqe_idx;
-       unsigned int tail_idx;
-
-       /* convert real to abs address */
-       wqe_p = wqe_p & (~(1UL << 63));
-
-       wqe_v = __va(wqe_p);
-
-       if (ipz_queue_abs_to_offset(ipz_queue, wqe_p, &q_ofs)) {
-               ehca_gen_err("Invalid offset for calculating left cqes "
-                               "wqe_p=%#llx wqe_v=%p\n", wqe_p, wqe_v);
-               return -EFAULT;
-       }
-
-       tail_idx = next_index(qmap->tail, qmap->entries);
-       wqe_idx = q_ofs / ipz_queue->qe_size;
-
-       /* check all processed wqes, whether a cqe is requested or not */
-       while (tail_idx != wqe_idx) {
-               if (qmap->map[tail_idx].cqe_req)
-                       qmap->left_to_poll++;
-               tail_idx = next_index(tail_idx, qmap->entries);
-       }
-       /* save index in queue, where we have to start flushing */
-       qmap->next_wqe_idx = wqe_idx;
-       return 0;
-}
-
-static int check_for_left_cqes(struct ehca_qp *my_qp, struct ehca_shca *shca)
-{
-       u64 h_ret;
-       void *send_wqe_p, *recv_wqe_p;
-       int ret;
-       unsigned long flags;
-       int qp_num = my_qp->ib_qp.qp_num;
-
-       /* this hcall is not supported on base QPs */
-       if (my_qp->ext_type != EQPT_SRQBASE) {
-               /* get send and receive wqe pointer */
-               h_ret = hipz_h_disable_and_get_wqe(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle, &my_qp->pf,
-                               &send_wqe_p, &recv_wqe_p, 4);
-               if (h_ret != H_SUCCESS) {
-                       ehca_err(&shca->ib_device, "disable_and_get_wqe() "
-                                "failed ehca_qp=%p qp_num=%x h_ret=%lli",
-                                my_qp, qp_num, h_ret);
-                       return ehca2ib_return_code(h_ret);
-               }
-
-               /*
-                * acquire lock to ensure that nobody is polling the cq which
-                * could mean that the qmap->tail pointer is in an
-                * inconsistent state.
-                */
-               spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
-               ret = calc_left_cqes((u64)send_wqe_p, &my_qp->ipz_squeue,
-                               &my_qp->sq_map);
-               spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-               if (ret)
-                       return ret;
-
-
-               spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
-               ret = calc_left_cqes((u64)recv_wqe_p, &my_qp->ipz_rqueue,
-                               &my_qp->rq_map);
-               spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
-               if (ret)
-                       return ret;
-       } else {
-               spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
-               my_qp->sq_map.left_to_poll = 0;
-               my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
-                                                       my_qp->sq_map.entries);
-               spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-
-               spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
-               my_qp->rq_map.left_to_poll = 0;
-               my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
-                                                       my_qp->rq_map.entries);
-               spin_unlock_irqrestore(&my_qp->recv_cq->spinlock, flags);
-       }
-
-       /* this assures flush cqes being generated only for pending wqes */
-       if ((my_qp->sq_map.left_to_poll == 0) &&
-                               (my_qp->rq_map.left_to_poll == 0)) {
-               spin_lock_irqsave(&my_qp->send_cq->spinlock, flags);
-               ehca_add_to_err_list(my_qp, 1);
-               spin_unlock_irqrestore(&my_qp->send_cq->spinlock, flags);
-
-               if (HAS_RQ(my_qp)) {
-                       spin_lock_irqsave(&my_qp->recv_cq->spinlock, flags);
-                       ehca_add_to_err_list(my_qp, 0);
-                       spin_unlock_irqrestore(&my_qp->recv_cq->spinlock,
-                                       flags);
-               }
-       }
-
-       return 0;
-}
-
-/*
- * internal_modify_qp with circumvention to handle aqp0 properly
- * smi_reset2init indicates if this is an internal reset-to-init-call for
- * smi. This flag must always be zero if called from ehca_modify_qp()!
- * This internal func was intorduced to avoid recursion of ehca_modify_qp()!
- */
-static int internal_modify_qp(struct ib_qp *ibqp,
-                             struct ib_qp_attr *attr,
-                             int attr_mask, int smi_reset2init)
-{
-       enum ib_qp_state qp_cur_state, qp_new_state;
-       int cnt, qp_attr_idx, ret = 0;
-       enum ib_qp_statetrans statetrans;
-       struct hcp_modify_qp_control_block *mqpcb;
-       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-       struct ehca_shca *shca =
-               container_of(ibqp->pd->device, struct ehca_shca, ib_device);
-       u64 update_mask;
-       u64 h_ret;
-       int bad_wqe_cnt = 0;
-       int is_user = 0;
-       int squeue_locked = 0;
-       unsigned long flags = 0;
-
-       /* do query_qp to obtain current attr values */
-       mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
-       if (!mqpcb) {
-               ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
-                        "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_qp(shca->ipz_hca_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               mqpcb, my_qp->galpas.kernel);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(ibqp->device, "hipz_h_query_qp() failed "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, ibqp->qp_num, h_ret);
-               ret = ehca2ib_return_code(h_ret);
-               goto modify_qp_exit1;
-       }
-       if (ibqp->uobject)
-               is_user = 1;
-
-       qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
-
-       if (qp_cur_state == -EINVAL) {  /* invalid qp state */
-               ret = -EINVAL;
-               ehca_err(ibqp->device, "Invalid current ehca_qp_state=%x "
-                        "ehca_qp=%p qp_num=%x",
-                        mqpcb->qp_state, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-       /*
-        * circumvention to set aqp0 initial state to init
-        * as expected by IB spec
-        */
-       if (smi_reset2init == 0 &&
-           ibqp->qp_type == IB_QPT_SMI &&
-           qp_cur_state == IB_QPS_RESET &&
-           (attr_mask & IB_QP_STATE) &&
-           attr->qp_state == IB_QPS_INIT) { /* RESET -> INIT */
-               struct ib_qp_attr smiqp_attr = {
-                       .qp_state = IB_QPS_INIT,
-                       .port_num = my_qp->init_attr.port_num,
-                       .pkey_index = 0,
-                       .qkey = 0
-               };
-               int smiqp_attr_mask = IB_QP_STATE | IB_QP_PORT |
-                       IB_QP_PKEY_INDEX | IB_QP_QKEY;
-               int smirc = internal_modify_qp(
-                       ibqp, &smiqp_attr, smiqp_attr_mask, 1);
-               if (smirc) {
-                       ehca_err(ibqp->device, "SMI RESET -> INIT failed. "
-                                "ehca_modify_qp() rc=%i", smirc);
-                       ret = H_PARAMETER;
-                       goto modify_qp_exit1;
-               }
-               qp_cur_state = IB_QPS_INIT;
-               ehca_dbg(ibqp->device, "SMI RESET -> INIT succeeded");
-       }
-       /* is transmitted current state  equal to "real" current state */
-       if ((attr_mask & IB_QP_CUR_STATE) &&
-           qp_cur_state != attr->cur_qp_state) {
-               ret = -EINVAL;
-               ehca_err(ibqp->device,
-                        "Invalid IB_QP_CUR_STATE attr->curr_qp_state=%x <>"
-                        " actual cur_qp_state=%x. ehca_qp=%p qp_num=%x",
-                        attr->cur_qp_state, qp_cur_state, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-
-       ehca_dbg(ibqp->device, "ehca_qp=%p qp_num=%x current qp_state=%x "
-                "new qp_state=%x attribute_mask=%x",
-                my_qp, ibqp->qp_num, qp_cur_state, attr->qp_state, attr_mask);
-
-       qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state;
-       if (!smi_reset2init &&
-           !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type,
-                               attr_mask, IB_LINK_LAYER_UNSPECIFIED)) {
-               ret = -EINVAL;
-               ehca_err(ibqp->device,
-                        "Invalid qp transition new_state=%x cur_state=%x "
-                        "ehca_qp=%p qp_num=%x attr_mask=%x", qp_new_state,
-                        qp_cur_state, my_qp, ibqp->qp_num, attr_mask);
-               goto modify_qp_exit1;
-       }
-
-       mqpcb->qp_state = ib2ehca_qp_state(qp_new_state);
-       if (mqpcb->qp_state)
-               update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
-       else {
-               ret = -EINVAL;
-               ehca_err(ibqp->device, "Invalid new qp state=%x "
-                        "ehca_qp=%p qp_num=%x",
-                        qp_new_state, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-
-       /* retrieve state transition struct to get req and opt attrs */
-       statetrans = get_modqp_statetrans(qp_cur_state, qp_new_state);
-       if (statetrans < 0) {
-               ret = -EINVAL;
-               ehca_err(ibqp->device, "<INVALID STATE CHANGE> qp_cur_state=%x "
-                        "new_qp_state=%x State_xsition=%x ehca_qp=%p "
-                        "qp_num=%x", qp_cur_state, qp_new_state,
-                        statetrans, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-
-       qp_attr_idx = ib2ehcaqptype(ibqp->qp_type);
-
-       if (qp_attr_idx < 0) {
-               ret = qp_attr_idx;
-               ehca_err(ibqp->device,
-                        "Invalid QP type=%x ehca_qp=%p qp_num=%x",
-                        ibqp->qp_type, my_qp, ibqp->qp_num);
-               goto modify_qp_exit1;
-       }
-
-       ehca_dbg(ibqp->device,
-                "ehca_qp=%p qp_num=%x <VALID STATE CHANGE> qp_state_xsit=%x",
-                my_qp, ibqp->qp_num, statetrans);
-
-       /* eHCA2 rev2 and higher require the SEND_GRH_FLAG to be set
-        * in non-LL UD QPs.
-        */
-       if ((my_qp->qp_type == IB_QPT_UD) &&
-           (my_qp->ext_type != EQPT_LLQP) &&
-           (statetrans == IB_QPST_INIT2RTR) &&
-           (shca->hw_level >= 0x22)) {
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
-               mqpcb->send_grh_flag = 1;
-       }
-
-       /* sqe -> rts: set purge bit of bad wqe before actual trans */
-       if ((my_qp->qp_type == IB_QPT_UD ||
-            my_qp->qp_type == IB_QPT_GSI ||
-            my_qp->qp_type == IB_QPT_SMI) &&
-           statetrans == IB_QPST_SQE2RTS) {
-               /* mark next free wqe if kernel */
-               if (!ibqp->uobject) {
-                       struct ehca_wqe *wqe;
-                       /* lock send queue */
-                       spin_lock_irqsave(&my_qp->spinlock_s, flags);
-                       squeue_locked = 1;
-                       /* mark next free wqe */
-                       wqe = (struct ehca_wqe *)
-                               ipz_qeit_get(&my_qp->ipz_squeue);
-                       wqe->optype = wqe->wqef = 0xff;
-                       ehca_dbg(ibqp->device, "qp_num=%x next_free_wqe=%p",
-                                ibqp->qp_num, wqe);
-               }
-               ret = prepare_sqe_rts(my_qp, shca, &bad_wqe_cnt);
-               if (ret) {
-                       ehca_err(ibqp->device, "prepare_sqe_rts() failed "
-                                "ehca_qp=%p qp_num=%x ret=%i",
-                                my_qp, ibqp->qp_num, ret);
-                       goto modify_qp_exit2;
-               }
-       }
-
-       /*
-        * enable RDMA_Atomic_Control if reset->init und reliable con
-        * this is necessary since gen2 does not provide that flag,
-        * but pHyp requires it
-        */
-       if (statetrans == IB_QPST_RESET2INIT &&
-           (ibqp->qp_type == IB_QPT_RC || ibqp->qp_type == IB_QPT_UC)) {
-               mqpcb->rdma_atomic_ctrl = 3;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RDMA_ATOMIC_CTRL, 1);
-       }
-       /* circ. pHyp requires #RDMA/Atomic Resp Res for UC INIT -> RTR */
-       if (statetrans == IB_QPST_INIT2RTR &&
-           (ibqp->qp_type == IB_QPT_UC) &&
-           !(attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)) {
-               mqpcb->rdma_nr_atomic_resp_res = 1; /* default to 1 */
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
-       }
-
-       if (attr_mask & IB_QP_PKEY_INDEX) {
-               if (attr->pkey_index >= 16) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid pkey_index=%x. "
-                                "ehca_qp=%p qp_num=%x max_pkey_index=f",
-                                attr->pkey_index, my_qp, ibqp->qp_num);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->prim_p_key_idx = attr->pkey_index;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
-       }
-       if (attr_mask & IB_QP_PORT) {
-               struct ehca_sport *sport;
-               struct ehca_qp *aqp1;
-               if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid port=%x. "
-                                "ehca_qp=%p qp_num=%x num_ports=%x",
-                                attr->port_num, my_qp, ibqp->qp_num,
-                                shca->num_ports);
-                       goto modify_qp_exit2;
-               }
-               sport = &shca->sport[attr->port_num - 1];
-               if (!sport->ibqp_sqp[IB_QPT_GSI]) {
-                       /* should not occur */
-                       ret = -EFAULT;
-                       ehca_err(ibqp->device, "AQP1 was not created for "
-                                "port=%x", attr->port_num);
-                       goto modify_qp_exit2;
-               }
-               aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
-                                   struct ehca_qp, ib_qp);
-               if (ibqp->qp_type != IB_QPT_GSI &&
-                   ibqp->qp_type != IB_QPT_SMI &&
-                   aqp1->mod_qp_parm) {
-                       /*
-                        * firmware will reject this modify_qp() because
-                        * port is not activated/initialized fully
-                        */
-                       ret = -EFAULT;
-                       ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
-                                 "either port is being activated (try again) "
-                                 "or cabling issue", attr->port_num);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->prim_phys_port = attr->port_num;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
-       }
-       if (attr_mask & IB_QP_QKEY) {
-               mqpcb->qkey = attr->qkey;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_QKEY, 1);
-       }
-       if (attr_mask & IB_QP_AV) {
-               mqpcb->dlid = attr->ah_attr.dlid;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DLID, 1);
-               mqpcb->source_path_bits = attr->ah_attr.src_path_bits;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS, 1);
-               mqpcb->service_level = attr->ah_attr.sl;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL, 1);
-
-               if (ehca_calc_ipd(shca, mqpcb->prim_phys_port,
-                                 attr->ah_attr.static_rate,
-                                 &mqpcb->max_static_rate)) {
-                       ret = -EINVAL;
-                       goto modify_qp_exit2;
-               }
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1);
-
-               /*
-                * Always supply the GRH flag, even if it's zero, to give the
-                * hypervisor a clear "yes" or "no" instead of a "perhaps"
-                */
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1);
-
-               /*
-                * only if GRH is TRUE we might consider SOURCE_GID_IDX
-                * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
-                */
-               if (attr->ah_attr.ah_flags == IB_AH_GRH) {
-                       mqpcb->send_grh_flag = 1;
-
-                       mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index;
-                       update_mask |=
-                               EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1);
-
-                       for (cnt = 0; cnt < 16; cnt++)
-                               mqpcb->dest_gid.byte[cnt] =
-                                       attr->ah_attr.grh.dgid.raw[cnt];
-
-                       update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_GID, 1);
-                       mqpcb->flow_label = attr->ah_attr.grh.flow_label;
-                       update_mask |= EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL, 1);
-                       mqpcb->hop_limit = attr->ah_attr.grh.hop_limit;
-                       update_mask |= EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT, 1);
-                       mqpcb->traffic_class = attr->ah_attr.grh.traffic_class;
-                       update_mask |=
-                               EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS, 1);
-               }
-       }
-
-       if (attr_mask & IB_QP_PATH_MTU) {
-               /* store ld(MTU) */
-               my_qp->mtu_shift = attr->path_mtu + 7;
-               mqpcb->path_mtu = attr->path_mtu;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
-       }
-       if (attr_mask & IB_QP_TIMEOUT) {
-               mqpcb->timeout = attr->timeout;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT, 1);
-       }
-       if (attr_mask & IB_QP_RETRY_CNT) {
-               mqpcb->retry_count = attr->retry_cnt;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT, 1);
-       }
-       if (attr_mask & IB_QP_RNR_RETRY) {
-               mqpcb->rnr_retry_count = attr->rnr_retry;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT, 1);
-       }
-       if (attr_mask & IB_QP_RQ_PSN) {
-               mqpcb->receive_psn = attr->rq_psn;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_RECEIVE_PSN, 1);
-       }
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
-               mqpcb->rdma_nr_atomic_resp_res = attr->max_dest_rd_atomic < 3 ?
-                       attr->max_dest_rd_atomic : 2;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES, 1);
-       }
-       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
-               mqpcb->rdma_atomic_outst_dest_qp = attr->max_rd_atomic < 3 ?
-                       attr->max_rd_atomic : 2;
-               update_mask |=
-                       EHCA_BMASK_SET
-                       (MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP, 1);
-       }
-       if (attr_mask & IB_QP_ALT_PATH) {
-               if (attr->alt_port_num < 1
-                   || attr->alt_port_num > shca->num_ports) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid alt_port=%x. "
-                                "ehca_qp=%p qp_num=%x num_ports=%x",
-                                attr->alt_port_num, my_qp, ibqp->qp_num,
-                                shca->num_ports);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->alt_phys_port = attr->alt_port_num;
-
-               if (attr->alt_pkey_index >= 16) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid alt_pkey_index=%x. "
-                                "ehca_qp=%p qp_num=%x max_pkey_index=f",
-                                attr->pkey_index, my_qp, ibqp->qp_num);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->alt_p_key_idx = attr->alt_pkey_index;
-
-               mqpcb->timeout_al = attr->alt_timeout;
-               mqpcb->dlid_al = attr->alt_ah_attr.dlid;
-               mqpcb->source_path_bits_al = attr->alt_ah_attr.src_path_bits;
-               mqpcb->service_level_al = attr->alt_ah_attr.sl;
-
-               if (ehca_calc_ipd(shca, mqpcb->alt_phys_port,
-                                 attr->alt_ah_attr.static_rate,
-                                 &mqpcb->max_static_rate_al)) {
-                       ret = -EINVAL;
-                       goto modify_qp_exit2;
-               }
-
-               /* OpenIB doesn't support alternate retry counts - copy them */
-               mqpcb->retry_count_al = mqpcb->retry_count;
-               mqpcb->rnr_retry_count_al = mqpcb->rnr_retry_count;
-
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_ALT_PHYS_PORT, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_ALT_P_KEY_IDX, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_TIMEOUT_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_DLID_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_PATH_BITS_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_SERVICE_LEVEL_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_RETRY_COUNT_AL, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_RNR_RETRY_COUNT_AL, 1);
-
-               /*
-                * Always supply the GRH flag, even if it's zero, to give the
-                * hypervisor a clear "yes" or "no" instead of a "perhaps"
-                */
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG_AL, 1);
-
-               /*
-                * only if GRH is TRUE we might consider SOURCE_GID_IDX
-                * and DEST_GID otherwise phype will return H_ATTR_PARM!!!
-                */
-               if (attr->alt_ah_attr.ah_flags == IB_AH_GRH) {
-                       mqpcb->send_grh_flag_al = 1;
-
-                       for (cnt = 0; cnt < 16; cnt++)
-                               mqpcb->dest_gid_al.byte[cnt] =
-                                       attr->alt_ah_attr.grh.dgid.raw[cnt];
-                       mqpcb->source_gid_idx_al =
-                               attr->alt_ah_attr.grh.sgid_index;
-                       mqpcb->flow_label_al = attr->alt_ah_attr.grh.flow_label;
-                       mqpcb->hop_limit_al = attr->alt_ah_attr.grh.hop_limit;
-                       mqpcb->traffic_class_al =
-                               attr->alt_ah_attr.grh.traffic_class;
-
-                       update_mask |=
-                               EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX_AL, 1)
-                               | EHCA_BMASK_SET(MQPCB_MASK_DEST_GID_AL, 1)
-                               | EHCA_BMASK_SET(MQPCB_MASK_FLOW_LABEL_AL, 1)
-                               | EHCA_BMASK_SET(MQPCB_MASK_HOP_LIMIT_AL, 1) |
-                               EHCA_BMASK_SET(MQPCB_MASK_TRAFFIC_CLASS_AL, 1);
-               }
-       }
-
-       if (attr_mask & IB_QP_MIN_RNR_TIMER) {
-               mqpcb->min_rnr_nak_timer_field = attr->min_rnr_timer;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_MIN_RNR_NAK_TIMER_FIELD, 1);
-       }
-
-       if (attr_mask & IB_QP_SQ_PSN) {
-               mqpcb->send_psn = attr->sq_psn;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_PSN, 1);
-       }
-
-       if (attr_mask & IB_QP_DEST_QPN) {
-               mqpcb->dest_qp_nr = attr->dest_qp_num;
-               update_mask |= EHCA_BMASK_SET(MQPCB_MASK_DEST_QP_NR, 1);
-       }
-
-       if (attr_mask & IB_QP_PATH_MIG_STATE) {
-               if (attr->path_mig_state != IB_MIG_REARM
-                   && attr->path_mig_state != IB_MIG_MIGRATED) {
-                       ret = -EINVAL;
-                       ehca_err(ibqp->device, "Invalid mig_state=%x",
-                                attr->path_mig_state);
-                       goto modify_qp_exit2;
-               }
-               mqpcb->path_migration_state = attr->path_mig_state + 1;
-               if (attr->path_mig_state == IB_MIG_REARM)
-                       my_qp->mig_armed = 1;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_PATH_MIGRATION_STATE, 1);
-       }
-
-       if (attr_mask & IB_QP_CAP) {
-               mqpcb->max_nr_outst_send_wr = attr->cap.max_send_wr+1;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_SEND_WR, 1);
-               mqpcb->max_nr_outst_recv_wr = attr->cap.max_recv_wr+1;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_MAX_NR_OUTST_RECV_WR, 1);
-               /* no support for max_send/recv_sge yet */
-       }
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
-
-       h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                                my_qp->ipz_qp_handle,
-                                &my_qp->pf,
-                                update_mask,
-                                mqpcb, my_qp->galpas.kernel);
-
-       if (h_ret != H_SUCCESS) {
-               ret = ehca2ib_return_code(h_ret);
-               ehca_err(ibqp->device, "hipz_h_modify_qp() failed h_ret=%lli "
-                        "ehca_qp=%p qp_num=%x", h_ret, my_qp, ibqp->qp_num);
-               goto modify_qp_exit2;
-       }
-
-       if ((my_qp->qp_type == IB_QPT_UD ||
-            my_qp->qp_type == IB_QPT_GSI ||
-            my_qp->qp_type == IB_QPT_SMI) &&
-           statetrans == IB_QPST_SQE2RTS) {
-               /* doorbell to reprocessing wqes */
-               iosync(); /* serialize GAL register access */
-               hipz_update_sqa(my_qp, bad_wqe_cnt-1);
-               ehca_gen_dbg("doorbell for %x wqes", bad_wqe_cnt);
-       }
-
-       if (statetrans == IB_QPST_RESET2INIT ||
-           statetrans == IB_QPST_INIT2INIT) {
-               mqpcb->qp_enable = 1;
-               mqpcb->qp_state = EHCA_QPS_INIT;
-               update_mask = 0;
-               update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
-
-               h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
-                                        my_qp->ipz_qp_handle,
-                                        &my_qp->pf,
-                                        update_mask,
-                                        mqpcb,
-                                        my_qp->galpas.kernel);
-
-               if (h_ret != H_SUCCESS) {
-                       ret = ehca2ib_return_code(h_ret);
-                       ehca_err(ibqp->device, "ENABLE in context of "
-                                "RESET_2_INIT failed! Maybe you didn't get "
-                                "a LID h_ret=%lli ehca_qp=%p qp_num=%x",
-                                h_ret, my_qp, ibqp->qp_num);
-                       goto modify_qp_exit2;
-               }
-       }
-       if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
-           && !is_user) {
-               ret = check_for_left_cqes(my_qp, shca);
-               if (ret)
-                       goto modify_qp_exit2;
-       }
-
-       if (statetrans == IB_QPST_ANY2RESET) {
-               ipz_qeit_reset(&my_qp->ipz_rqueue);
-               ipz_qeit_reset(&my_qp->ipz_squeue);
-
-               if (qp_cur_state == IB_QPS_ERR && !is_user) {
-                       del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
-
-                       if (HAS_RQ(my_qp))
-                               del_from_err_list(my_qp->recv_cq,
-                                                 &my_qp->rq_err_node);
-               }
-               if (!is_user)
-                       reset_queue_map(&my_qp->sq_map);
-
-               if (HAS_RQ(my_qp) && !is_user)
-                       reset_queue_map(&my_qp->rq_map);
-       }
-
-       if (attr_mask & IB_QP_QKEY)
-               my_qp->qkey = attr->qkey;
-
-modify_qp_exit2:
-       if (squeue_locked) { /* this means: sqe -> rts */
-               spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
-               my_qp->sqerr_purgeflag = 1;
-       }
-
-modify_qp_exit1:
-       ehca_free_fw_ctrlblock(mqpcb);
-
-       return ret;
-}
-
-int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
-                  struct ib_udata *udata)
-{
-       int ret = 0;
-
-       struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
-                                             ib_device);
-       struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
-
-       /* The if-block below caches qp_attr to be modified for GSI and SMI
-        * qps during the initialization by ib_mad. When the respective port
-        * is activated, ie we got an event PORT_ACTIVE, we'll replay the
-        * cached modify calls sequence, see ehca_recover_sqs() below.
-        * Why that is required:
-        * 1) If one port is connected, older code requires that port one
-        *    to be connected and module option nr_ports=1 to be given by
-        *    user, which is very inconvenient for end user.
-        * 2) Firmware accepts modify_qp() only if respective port has become
-        *    active. Older code had a wait loop of 30sec create_qp()/
-        *    define_aqp1(), which is not appropriate in practice. This
-        *    code now removes that wait loop, see define_aqp1(), and always
-        *    reports all ports to ib_mad resp. users. Only activated ports
-        *    will then usable for the users.
-        */
-       if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
-               int port = my_qp->init_attr.port_num;
-               struct ehca_sport *sport = &shca->sport[port - 1];
-               unsigned long flags;
-               spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-               /* cache qp_attr only during init */
-               if (my_qp->mod_qp_parm) {
-                       struct ehca_mod_qp_parm *p;
-                       if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
-                               ehca_err(&shca->ib_device,
-                                        "mod_qp_parm overflow state=%x port=%x"
-                                        " type=%x", attr->qp_state,
-                                        my_qp->init_attr.port_num,
-                                        ibqp->qp_type);
-                               spin_unlock_irqrestore(&sport->mod_sqp_lock,
-                                                      flags);
-                               return -EINVAL;
-                       }
-                       p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
-                       p->mask = attr_mask;
-                       p->attr = *attr;
-                       my_qp->mod_qp_parm_idx++;
-                       ehca_dbg(&shca->ib_device,
-                                "Saved qp_attr for state=%x port=%x type=%x",
-                                attr->qp_state, my_qp->init_attr.port_num,
-                                ibqp->qp_type);
-                       spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-                       goto out;
-               }
-               spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-       }
-
-       ret = internal_modify_qp(ibqp, attr, attr_mask, 0);
-
-out:
-       if ((ret == 0) && (attr_mask & IB_QP_STATE))
-               my_qp->state = attr->qp_state;
-
-       return ret;
-}
-
-void ehca_recover_sqp(struct ib_qp *sqp)
-{
-       struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
-       int port = my_sqp->init_attr.port_num;
-       struct ib_qp_attr attr;
-       struct ehca_mod_qp_parm *qp_parm;
-       int i, qp_parm_idx, ret;
-       unsigned long flags, wr_cnt;
-
-       if (!my_sqp->mod_qp_parm)
-               return;
-       ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
-
-       qp_parm = my_sqp->mod_qp_parm;
-       qp_parm_idx = my_sqp->mod_qp_parm_idx;
-       for (i = 0; i < qp_parm_idx; i++) {
-               attr = qp_parm[i].attr;
-               ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
-               if (ret) {
-                       ehca_err(sqp->device, "Could not modify SQP port=%x "
-                                "qp_num=%x ret=%x", port, sqp->qp_num, ret);
-                       goto free_qp_parm;
-               }
-               ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
-                        port, sqp->qp_num, attr.qp_state);
-       }
-
-       /* re-trigger posted recv wrs */
-       wr_cnt =  my_sqp->ipz_rqueue.current_q_offset /
-               my_sqp->ipz_rqueue.qe_size;
-       if (wr_cnt) {
-               spin_lock_irqsave(&my_sqp->spinlock_r, flags);
-               hipz_update_rqa(my_sqp, wr_cnt);
-               spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
-               ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
-                        port, sqp->qp_num, wr_cnt);
-       }
-
-free_qp_parm:
-       kfree(qp_parm);
-       /* this prevents subsequent calls to modify_qp() to cache qp_attr */
-       my_sqp->mod_qp_parm = NULL;
-}
-
-int ehca_query_qp(struct ib_qp *qp,
-                 struct ib_qp_attr *qp_attr,
-                 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
-{
-       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-       struct ehca_shca *shca = container_of(qp->device, struct ehca_shca,
-                                             ib_device);
-       struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-       struct hcp_modify_qp_control_block *qpcb;
-       int cnt, ret = 0;
-       u64 h_ret;
-
-       if (qp_attr_mask & QP_ATTR_QUERY_NOT_SUPPORTED) {
-               ehca_err(qp->device, "Invalid attribute mask "
-                        "ehca_qp=%p qp_num=%x qp_attr_mask=%x ",
-                        my_qp, qp->qp_num, qp_attr_mask);
-               return -EINVAL;
-       }
-
-       qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!qpcb) {
-               ehca_err(qp->device, "Out of memory for qpcb "
-                        "ehca_qp=%p qp_num=%x", my_qp, qp->qp_num);
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_qp(adapter_handle,
-                               my_qp->ipz_qp_handle,
-                               &my_qp->pf,
-                               qpcb, my_qp->galpas.kernel);
-
-       if (h_ret != H_SUCCESS) {
-               ret = ehca2ib_return_code(h_ret);
-               ehca_err(qp->device, "hipz_h_query_qp() failed "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, qp->qp_num, h_ret);
-               goto query_qp_exit1;
-       }
-
-       qp_attr->cur_qp_state = ehca2ib_qp_state(qpcb->qp_state);
-       qp_attr->qp_state = qp_attr->cur_qp_state;
-
-       if (qp_attr->cur_qp_state == -EINVAL) {
-               ret = -EINVAL;
-               ehca_err(qp->device, "Got invalid ehca_qp_state=%x "
-                        "ehca_qp=%p qp_num=%x",
-                        qpcb->qp_state, my_qp, qp->qp_num);
-               goto query_qp_exit1;
-       }
-
-       if (qp_attr->qp_state == IB_QPS_SQD)
-               qp_attr->sq_draining = 1;
-
-       qp_attr->qkey = qpcb->qkey;
-       qp_attr->path_mtu = qpcb->path_mtu;
-       qp_attr->path_mig_state = qpcb->path_migration_state - 1;
-       qp_attr->rq_psn = qpcb->receive_psn;
-       qp_attr->sq_psn = qpcb->send_psn;
-       qp_attr->min_rnr_timer = qpcb->min_rnr_nak_timer_field;
-       qp_attr->cap.max_send_wr = qpcb->max_nr_outst_send_wr-1;
-       qp_attr->cap.max_recv_wr = qpcb->max_nr_outst_recv_wr-1;
-       /* UD_AV CIRCUMVENTION */
-       if (my_qp->qp_type == IB_QPT_UD) {
-               qp_attr->cap.max_send_sge =
-                       qpcb->actual_nr_sges_in_sq_wqe - 2;
-               qp_attr->cap.max_recv_sge =
-                       qpcb->actual_nr_sges_in_rq_wqe - 2;
-       } else {
-               qp_attr->cap.max_send_sge =
-                       qpcb->actual_nr_sges_in_sq_wqe;
-               qp_attr->cap.max_recv_sge =
-                       qpcb->actual_nr_sges_in_rq_wqe;
-       }
-
-       qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
-       qp_attr->dest_qp_num = qpcb->dest_qp_nr;
-
-       qp_attr->pkey_index = qpcb->prim_p_key_idx;
-       qp_attr->port_num = qpcb->prim_phys_port;
-       qp_attr->timeout = qpcb->timeout;
-       qp_attr->retry_cnt = qpcb->retry_count;
-       qp_attr->rnr_retry = qpcb->rnr_retry_count;
-
-       qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
-       qp_attr->alt_port_num = qpcb->alt_phys_port;
-       qp_attr->alt_timeout = qpcb->timeout_al;
-
-       qp_attr->max_dest_rd_atomic = qpcb->rdma_nr_atomic_resp_res;
-       qp_attr->max_rd_atomic = qpcb->rdma_atomic_outst_dest_qp;
-
-       /* primary av */
-       qp_attr->ah_attr.sl = qpcb->service_level;
-
-       if (qpcb->send_grh_flag) {
-               qp_attr->ah_attr.ah_flags = IB_AH_GRH;
-       }
-
-       qp_attr->ah_attr.static_rate = qpcb->max_static_rate;
-       qp_attr->ah_attr.dlid = qpcb->dlid;
-       qp_attr->ah_attr.src_path_bits = qpcb->source_path_bits;
-       qp_attr->ah_attr.port_num = qp_attr->port_num;
-
-       /* primary GRH */
-       qp_attr->ah_attr.grh.traffic_class = qpcb->traffic_class;
-       qp_attr->ah_attr.grh.hop_limit = qpcb->hop_limit;
-       qp_attr->ah_attr.grh.sgid_index = qpcb->source_gid_idx;
-       qp_attr->ah_attr.grh.flow_label = qpcb->flow_label;
-
-       for (cnt = 0; cnt < 16; cnt++)
-               qp_attr->ah_attr.grh.dgid.raw[cnt] =
-                       qpcb->dest_gid.byte[cnt];
-
-       /* alternate AV */
-       qp_attr->alt_ah_attr.sl = qpcb->service_level_al;
-       if (qpcb->send_grh_flag_al) {
-               qp_attr->alt_ah_attr.ah_flags = IB_AH_GRH;
-       }
-
-       qp_attr->alt_ah_attr.static_rate = qpcb->max_static_rate_al;
-       qp_attr->alt_ah_attr.dlid = qpcb->dlid_al;
-       qp_attr->alt_ah_attr.src_path_bits = qpcb->source_path_bits_al;
-
-       /* alternate GRH */
-       qp_attr->alt_ah_attr.grh.traffic_class = qpcb->traffic_class_al;
-       qp_attr->alt_ah_attr.grh.hop_limit = qpcb->hop_limit_al;
-       qp_attr->alt_ah_attr.grh.sgid_index = qpcb->source_gid_idx_al;
-       qp_attr->alt_ah_attr.grh.flow_label = qpcb->flow_label_al;
-
-       for (cnt = 0; cnt < 16; cnt++)
-               qp_attr->alt_ah_attr.grh.dgid.raw[cnt] =
-                       qpcb->dest_gid_al.byte[cnt];
-
-       /* return init attributes given in ehca_create_qp */
-       if (qp_init_attr)
-               *qp_init_attr = my_qp->init_attr;
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
-
-query_qp_exit1:
-       ehca_free_fw_ctrlblock(qpcb);
-
-       return ret;
-}
-
-int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-                   enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
-{
-       struct ehca_qp *my_qp =
-               container_of(ibsrq, struct ehca_qp, ib_srq);
-       struct ehca_shca *shca =
-               container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
-       struct hcp_modify_qp_control_block *mqpcb;
-       u64 update_mask;
-       u64 h_ret;
-       int ret = 0;
-
-       mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!mqpcb) {
-               ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
-                        "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
-               return -ENOMEM;
-       }
-
-       update_mask = 0;
-       if (attr_mask & IB_SRQ_LIMIT) {
-               attr_mask &= ~IB_SRQ_LIMIT;
-               update_mask |=
-                       EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
-                       | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
-               mqpcb->curr_srq_limit = attr->srq_limit;
-               mqpcb->qp_aff_asyn_ev_log_reg =
-                       EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
-       }
-
-       /* by now, all bits in attr_mask should have been cleared */
-       if (attr_mask) {
-               ehca_err(ibsrq->device, "invalid attribute mask bits set  "
-                        "attr_mask=%x", attr_mask);
-               ret = -EINVAL;
-               goto modify_srq_exit0;
-       }
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
-
-       h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
-                                NULL, update_mask, mqpcb,
-                                my_qp->galpas.kernel);
-
-       if (h_ret != H_SUCCESS) {
-               ret = ehca2ib_return_code(h_ret);
-               ehca_err(ibsrq->device, "hipz_h_modify_qp() failed h_ret=%lli "
-                        "ehca_qp=%p qp_num=%x",
-                        h_ret, my_qp, my_qp->real_qp_num);
-       }
-
-modify_srq_exit0:
-       ehca_free_fw_ctrlblock(mqpcb);
-
-       return ret;
-}
-
-int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
-{
-       struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
-       struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
-                                             ib_device);
-       struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
-       struct hcp_modify_qp_control_block *qpcb;
-       int ret = 0;
-       u64 h_ret;
-
-       qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
-       if (!qpcb) {
-               ehca_err(srq->device, "Out of memory for qpcb "
-                        "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
-               return -ENOMEM;
-       }
-
-       h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
-                               NULL, qpcb, my_qp->galpas.kernel);
-
-       if (h_ret != H_SUCCESS) {
-               ret = ehca2ib_return_code(h_ret);
-               ehca_err(srq->device, "hipz_h_query_qp() failed "
-                        "ehca_qp=%p qp_num=%x h_ret=%lli",
-                        my_qp, my_qp->real_qp_num, h_ret);
-               goto query_srq_exit1;
-       }
-
-       srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
-       srq_attr->max_sge = 3;
-       srq_attr->srq_limit = qpcb->curr_srq_limit;
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
-
-query_srq_exit1:
-       ehca_free_fw_ctrlblock(qpcb);
-
-       return ret;
-}
-
-static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
-                              struct ib_uobject *uobject)
-{
-       struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
-       struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
-                                            ib_pd);
-       struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
-       u32 qp_num = my_qp->real_qp_num;
-       int ret;
-       u64 h_ret;
-       u8 port_num;
-       int is_user = 0;
-       enum ib_qp_type qp_type;
-       unsigned long flags;
-
-       if (uobject) {
-               is_user = 1;
-               if (my_qp->mm_count_galpa ||
-                   my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
-                       ehca_err(dev, "Resources still referenced in "
-                                "user space qp_num=%x", qp_num);
-                       return -EINVAL;
-               }
-       }
-
-       if (my_qp->send_cq) {
-               ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
-               if (ret) {
-                       ehca_err(dev, "Couldn't unassign qp from "
-                                "send_cq ret=%i qp_num=%x cq_num=%x", ret,
-                                qp_num, my_qp->send_cq->cq_number);
-                       return ret;
-               }
-       }
-
-       write_lock_irqsave(&ehca_qp_idr_lock, flags);
-       idr_remove(&ehca_qp_idr, my_qp->token);
-       write_unlock_irqrestore(&ehca_qp_idr_lock, flags);
-
-       /*
-        * SRQs will never get into an error list and do not have a recv_cq,
-        * so we need to skip them here.
-        */
-       if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
-               del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
-
-       if (HAS_SQ(my_qp) && !is_user)
-               del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
-
-       /* now wait until all pending events have completed */
-       wait_event(my_qp->wait_completion, !atomic_read(&my_qp->nr_events));
-
-       h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
-       if (h_ret != H_SUCCESS) {
-               ehca_err(dev, "hipz_h_destroy_qp() failed h_ret=%lli "
-                        "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
-               return ehca2ib_return_code(h_ret);
-       }
-
-       port_num = my_qp->init_attr.port_num;
-       qp_type  = my_qp->init_attr.qp_type;
-
-       if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
-               spin_lock_irqsave(&sport->mod_sqp_lock, flags);
-               kfree(my_qp->mod_qp_parm);
-               my_qp->mod_qp_parm = NULL;
-               shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
-               spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
-       }
-
-       /* no support for IB_QPT_SMI yet */
-       if (qp_type == IB_QPT_GSI) {
-               struct ib_event event;
-               ehca_info(dev, "device %s: port %x is inactive.",
-                               shca->ib_device.name, port_num);
-               event.device = &shca->ib_device;
-               event.event = IB_EVENT_PORT_ERR;
-               event.element.port_num = port_num;
-               shca->sport[port_num - 1].port_state = IB_PORT_DOWN;
-               ib_dispatch_event(&event);
-       }
-
-       if (HAS_RQ(my_qp)) {
-               ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
-               if (!is_user)
-                       vfree(my_qp->rq_map.map);
-       }
-       if (HAS_SQ(my_qp)) {
-               ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
-               if (!is_user)
-                       vfree(my_qp->sq_map.map);
-       }
-       kmem_cache_free(qp_cache, my_qp);
-       atomic_dec(&shca->num_qps);
-       return 0;
-}
-
-int ehca_destroy_qp(struct ib_qp *qp)
-{
-       return internal_destroy_qp(qp->device,
-                                  container_of(qp, struct ehca_qp, ib_qp),
-                                  qp->uobject);
-}
-
-int ehca_destroy_srq(struct ib_srq *srq)
-{
-       return internal_destroy_qp(srq->device,
-                                  container_of(srq, struct ehca_qp, ib_srq),
-                                  srq->uobject);
-}
-
-int ehca_init_qp_cache(void)
-{
-       qp_cache = kmem_cache_create("ehca_cache_qp",
-                                    sizeof(struct ehca_qp), 0,
-                                    SLAB_HWCACHE_ALIGN,
-                                    NULL);
-       if (!qp_cache)
-               return -ENOMEM;
-       return 0;
-}
-
-void ehca_cleanup_qp_cache(void)
-{
-       kmem_cache_destroy(qp_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
deleted file mode 100644 (file)
index 11813b8..0000000
+++ /dev/null
@@ -1,953 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  post_send/recv, poll_cq, req_notify
- *
- *  Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-#include "hipz_fns.h"
-
-/* in RC traffic, insert an empty RDMA READ every this many packets */
-#define ACK_CIRC_THRESHOLD 2000000
-
-static u64 replace_wr_id(u64 wr_id, u16 idx)
-{
-       u64 ret;
-
-       ret = wr_id & ~QMAP_IDX_MASK;
-       ret |= idx & QMAP_IDX_MASK;
-
-       return ret;
-}
-
-static u16 get_app_wr_id(u64 wr_id)
-{
-       return wr_id & QMAP_IDX_MASK;
-}
-
-static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
-                                 struct ehca_wqe *wqe_p,
-                                 struct ib_recv_wr *recv_wr,
-                                 u32 rq_map_idx)
-{
-       u8 cnt_ds;
-       if (unlikely((recv_wr->num_sge < 0) ||
-                    (recv_wr->num_sge > ipz_rqueue->act_nr_of_sg))) {
-               ehca_gen_err("Invalid number of WQE SGE. "
-                        "num_sqe=%x max_nr_of_sg=%x",
-                        recv_wr->num_sge, ipz_rqueue->act_nr_of_sg);
-               return -EINVAL; /* invalid SG list length */
-       }
-
-       /* clear wqe header until sglist */
-       memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
-
-       wqe_p->work_request_id = replace_wr_id(recv_wr->wr_id, rq_map_idx);
-       wqe_p->nr_of_data_seg = recv_wr->num_sge;
-
-       for (cnt_ds = 0; cnt_ds < recv_wr->num_sge; cnt_ds++) {
-               wqe_p->u.all_rcv.sg_list[cnt_ds].vaddr =
-                       recv_wr->sg_list[cnt_ds].addr;
-               wqe_p->u.all_rcv.sg_list[cnt_ds].lkey =
-                       recv_wr->sg_list[cnt_ds].lkey;
-               wqe_p->u.all_rcv.sg_list[cnt_ds].length =
-                       recv_wr->sg_list[cnt_ds].length;
-       }
-
-       if (ehca_debug_level >= 3) {
-               ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
-                            ipz_rqueue);
-               ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
-       }
-
-       return 0;
-}
-
-#if defined(DEBUG_GSI_SEND_WR)
-
-/* need ib_mad struct */
-#include <rdma/ib_mad.h>
-
-static void trace_ud_wr(const struct ib_ud_wr *ud_wr)
-{
-       int idx;
-       int j;
-       while (ud_wr) {
-               struct ib_mad_hdr *mad_hdr = ud_wrmad_hdr;
-               struct ib_sge *sge = ud_wr->wr.sg_list;
-               ehca_gen_dbg("ud_wr#%x wr_id=%lx num_sge=%x "
-                            "send_flags=%x opcode=%x", idx, ud_wr->wr.wr_id,
-                            ud_wr->wr.num_sge, ud_wr->wr.send_flags,
-                            ud_wr->.wr.opcode);
-               if (mad_hdr) {
-                       ehca_gen_dbg("ud_wr#%x mad_hdr base_version=%x "
-                                    "mgmt_class=%x class_version=%x method=%x "
-                                    "status=%x class_specific=%x tid=%lx "
-                                    "attr_id=%x resv=%x attr_mod=%x",
-                                    idx, mad_hdr->base_version,
-                                    mad_hdr->mgmt_class,
-                                    mad_hdr->class_version, mad_hdr->method,
-                                    mad_hdr->status, mad_hdr->class_specific,
-                                    mad_hdr->tid, mad_hdr->attr_id,
-                                    mad_hdr->resv,
-                                    mad_hdr->attr_mod);
-               }
-               for (j = 0; j < ud_wr->wr.num_sge; j++) {
-                       u8 *data = __va(sge->addr);
-                       ehca_gen_dbg("ud_wr#%x sge#%x addr=%p length=%x "
-                                    "lkey=%x",
-                                    idx, j, data, sge->length, sge->lkey);
-                       /* assume length is n*16 */
-                       ehca_dmp(data, sge->length, "ud_wr#%x sge#%x",
-                                idx, j);
-                       sge++;
-               } /* eof for j */
-               idx++;
-               ud_wr = ud_wr(ud_wr->wr.next);
-       } /* eof while ud_wr */
-}
-
-#endif /* DEBUG_GSI_SEND_WR */
-
-static inline int ehca_write_swqe(struct ehca_qp *qp,
-                                 struct ehca_wqe *wqe_p,
-                                 struct ib_send_wr *send_wr,
-                                 u32 sq_map_idx,
-                                 int hidden)
-{
-       u32 idx;
-       u64 dma_length;
-       struct ehca_av *my_av;
-       u32 remote_qkey;
-       struct ehca_qmap_entry *qmap_entry = &qp->sq_map.map[sq_map_idx];
-
-       if (unlikely((send_wr->num_sge < 0) ||
-                    (send_wr->num_sge > qp->ipz_squeue.act_nr_of_sg))) {
-               ehca_gen_err("Invalid number of WQE SGE. "
-                        "num_sqe=%x max_nr_of_sg=%x",
-                        send_wr->num_sge, qp->ipz_squeue.act_nr_of_sg);
-               return -EINVAL; /* invalid SG list length */
-       }
-
-       /* clear wqe header until sglist */
-       memset(wqe_p, 0, offsetof(struct ehca_wqe, u.ud_av.sg_list));
-
-       wqe_p->work_request_id = replace_wr_id(send_wr->wr_id, sq_map_idx);
-
-       qmap_entry->app_wr_id = get_app_wr_id(send_wr->wr_id);
-       qmap_entry->reported = 0;
-       qmap_entry->cqe_req = 0;
-
-       switch (send_wr->opcode) {
-       case IB_WR_SEND:
-       case IB_WR_SEND_WITH_IMM:
-               wqe_p->optype = WQE_OPTYPE_SEND;
-               break;
-       case IB_WR_RDMA_WRITE:
-       case IB_WR_RDMA_WRITE_WITH_IMM:
-               wqe_p->optype = WQE_OPTYPE_RDMAWRITE;
-               break;
-       case IB_WR_RDMA_READ:
-               wqe_p->optype = WQE_OPTYPE_RDMAREAD;
-               break;
-       default:
-               ehca_gen_err("Invalid opcode=%x", send_wr->opcode);
-               return -EINVAL; /* invalid opcode */
-       }
-
-       wqe_p->wqef = (send_wr->opcode) & WQEF_HIGH_NIBBLE;
-
-       wqe_p->wr_flag = 0;
-
-       if ((send_wr->send_flags & IB_SEND_SIGNALED ||
-           qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
-           && !hidden) {
-               wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
-               qmap_entry->cqe_req = 1;
-       }
-
-       if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
-           send_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
-               /* this might not work as long as HW does not support it */
-               wqe_p->immediate_data = be32_to_cpu(send_wr->ex.imm_data);
-               wqe_p->wr_flag |= WQE_WRFLAG_IMM_DATA_PRESENT;
-       }
-
-       wqe_p->nr_of_data_seg = send_wr->num_sge;
-
-       switch (qp->qp_type) {
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               /* no break is intential here */
-       case IB_QPT_UD:
-               /* IB 1.2 spec C10-15 compliance */
-               remote_qkey = ud_wr(send_wr)->remote_qkey;
-               if (remote_qkey & 0x80000000)
-                       remote_qkey = qp->qkey;
-
-               wqe_p->destination_qp_number = ud_wr(send_wr)->remote_qpn << 8;
-               wqe_p->local_ee_context_qkey = remote_qkey;
-               if (unlikely(!ud_wr(send_wr)->ah)) {
-                       ehca_gen_err("ud_wr(send_wr) is NULL. qp=%p", qp);
-                       return -EINVAL;
-               }
-               if (unlikely(ud_wr(send_wr)->remote_qpn == 0)) {
-                       ehca_gen_err("dest QP# is 0. qp=%x", qp->real_qp_num);
-                       return -EINVAL;
-               }
-               my_av = container_of(ud_wr(send_wr)->ah, struct ehca_av, ib_ah);
-               wqe_p->u.ud_av.ud_av = my_av->av;
-
-               /*
-                * omitted check of IB_SEND_INLINE
-                * since HW does not support it
-                */
-               for (idx = 0; idx < send_wr->num_sge; idx++) {
-                       wqe_p->u.ud_av.sg_list[idx].vaddr =
-                               send_wr->sg_list[idx].addr;
-                       wqe_p->u.ud_av.sg_list[idx].lkey =
-                               send_wr->sg_list[idx].lkey;
-                       wqe_p->u.ud_av.sg_list[idx].length =
-                               send_wr->sg_list[idx].length;
-               } /* eof for idx */
-               if (qp->qp_type == IB_QPT_SMI ||
-                   qp->qp_type == IB_QPT_GSI)
-                       wqe_p->u.ud_av.ud_av.pmtu = 1;
-               if (qp->qp_type == IB_QPT_GSI) {
-                       wqe_p->pkeyi = ud_wr(send_wr)->pkey_index;
-#ifdef DEBUG_GSI_SEND_WR
-                       trace_ud_wr(ud_wr(send_wr));
-#endif /* DEBUG_GSI_SEND_WR */
-               }
-               break;
-
-       case IB_QPT_UC:
-               if (send_wr->send_flags & IB_SEND_FENCE)
-                       wqe_p->wr_flag |= WQE_WRFLAG_FENCE;
-               /* no break is intentional here */
-       case IB_QPT_RC:
-               /* TODO: atomic not implemented */
-               wqe_p->u.nud.remote_virtual_address =
-                       rdma_wr(send_wr)->remote_addr;
-               wqe_p->u.nud.rkey = rdma_wr(send_wr)->rkey;
-
-               /*
-                * omitted checking of IB_SEND_INLINE
-                * since HW does not support it
-                */
-               dma_length = 0;
-               for (idx = 0; idx < send_wr->num_sge; idx++) {
-                       wqe_p->u.nud.sg_list[idx].vaddr =
-                               send_wr->sg_list[idx].addr;
-                       wqe_p->u.nud.sg_list[idx].lkey =
-                               send_wr->sg_list[idx].lkey;
-                       wqe_p->u.nud.sg_list[idx].length =
-                               send_wr->sg_list[idx].length;
-                       dma_length += send_wr->sg_list[idx].length;
-               } /* eof idx */
-               wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
-
-               /* unsolicited ack circumvention */
-               if (send_wr->opcode == IB_WR_RDMA_READ) {
-                       /* on RDMA read, switch on and reset counters */
-                       qp->message_count = qp->packet_count = 0;
-                       qp->unsol_ack_circ = 1;
-               } else
-                       /* else estimate #packets */
-                       qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
-
-               break;
-
-       default:
-               ehca_gen_err("Invalid qptype=%x", qp->qp_type);
-               return -EINVAL;
-       }
-
-       if (ehca_debug_level >= 3) {
-               ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
-               ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
-       }
-       return 0;
-}
-
-/* map_ib_wc_status converts raw cqe_status to ib_wc_status */
-static inline void map_ib_wc_status(u32 cqe_status,
-                                   enum ib_wc_status *wc_status)
-{
-       if (unlikely(cqe_status & WC_STATUS_ERROR_BIT)) {
-               switch (cqe_status & 0x3F) {
-               case 0x01:
-               case 0x21:
-                       *wc_status = IB_WC_LOC_LEN_ERR;
-                       break;
-               case 0x02:
-               case 0x22:
-                       *wc_status = IB_WC_LOC_QP_OP_ERR;
-                       break;
-               case 0x03:
-               case 0x23:
-                       *wc_status = IB_WC_LOC_EEC_OP_ERR;
-                       break;
-               case 0x04:
-               case 0x24:
-                       *wc_status = IB_WC_LOC_PROT_ERR;
-                       break;
-               case 0x05:
-               case 0x25:
-                       *wc_status = IB_WC_WR_FLUSH_ERR;
-                       break;
-               case 0x06:
-                       *wc_status = IB_WC_MW_BIND_ERR;
-                       break;
-               case 0x07: /* remote error - look into bits 20:24 */
-                       switch ((cqe_status
-                                & WC_STATUS_REMOTE_ERROR_FLAGS) >> 11) {
-                       case 0x0:
-                               /*
-                                * PSN Sequence Error!
-                                * couldn't find a matching status!
-                                */
-                               *wc_status = IB_WC_GENERAL_ERR;
-                               break;
-                       case 0x1:
-                               *wc_status = IB_WC_REM_INV_REQ_ERR;
-                               break;
-                       case 0x2:
-                               *wc_status = IB_WC_REM_ACCESS_ERR;
-                               break;
-                       case 0x3:
-                               *wc_status = IB_WC_REM_OP_ERR;
-                               break;
-                       case 0x4:
-                               *wc_status = IB_WC_REM_INV_RD_REQ_ERR;
-                               break;
-                       }
-                       break;
-               case 0x08:
-                       *wc_status = IB_WC_RETRY_EXC_ERR;
-                       break;
-               case 0x09:
-                       *wc_status = IB_WC_RNR_RETRY_EXC_ERR;
-                       break;
-               case 0x0A:
-               case 0x2D:
-                       *wc_status = IB_WC_REM_ABORT_ERR;
-                       break;
-               case 0x0B:
-               case 0x2E:
-                       *wc_status = IB_WC_INV_EECN_ERR;
-                       break;
-               case 0x0C:
-               case 0x2F:
-                       *wc_status = IB_WC_INV_EEC_STATE_ERR;
-                       break;
-               case 0x0D:
-                       *wc_status = IB_WC_BAD_RESP_ERR;
-                       break;
-               case 0x10:
-                       /* WQE purged */
-                       *wc_status = IB_WC_WR_FLUSH_ERR;
-                       break;
-               default:
-                       *wc_status = IB_WC_FATAL_ERR;
-
-               }
-       } else
-               *wc_status = IB_WC_SUCCESS;
-}
-
-static inline int post_one_send(struct ehca_qp *my_qp,
-                        struct ib_send_wr *cur_send_wr,
-                        int hidden)
-{
-       struct ehca_wqe *wqe_p;
-       int ret;
-       u32 sq_map_idx;
-       u64 start_offset = my_qp->ipz_squeue.current_q_offset;
-
-       /* get pointer next to free WQE */
-       wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
-       if (unlikely(!wqe_p)) {
-               /* too many posted work requests: queue overflow */
-               ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
-                        "qp_num=%x", my_qp->ib_qp.qp_num);
-               return -ENOMEM;
-       }
-
-       /*
-        * Get the index of the WQE in the send queue. The same index is used
-        * for writing into the sq_map.
-        */
-       sq_map_idx = start_offset / my_qp->ipz_squeue.qe_size;
-
-       /* write a SEND WQE into the QUEUE */
-       ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, sq_map_idx, hidden);
-       /*
-        * if something failed,
-        * reset the free entry pointer to the start value
-        */
-       if (unlikely(ret)) {
-               my_qp->ipz_squeue.current_q_offset = start_offset;
-               ehca_err(my_qp->ib_qp.device, "Could not write WQE "
-                        "qp_num=%x", my_qp->ib_qp.qp_num);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int ehca_post_send(struct ib_qp *qp,
-                  struct ib_send_wr *send_wr,
-                  struct ib_send_wr **bad_send_wr)
-{
-       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-       int wqe_cnt = 0;
-       int ret = 0;
-       unsigned long flags;
-
-       /* Reject WR if QP is in RESET, INIT or RTR state */
-       if (unlikely(my_qp->state < IB_QPS_RTS)) {
-               ehca_err(qp->device, "Invalid QP state  qp_state=%d qpn=%x",
-                        my_qp->state, qp->qp_num);
-               ret = -EINVAL;
-               goto out;
-       }
-
-       /* LOCK the QUEUE */
-       spin_lock_irqsave(&my_qp->spinlock_s, flags);
-
-       /* Send an empty extra RDMA read if:
-        *  1) there has been an RDMA read on this connection before
-        *  2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
-        *  3) we can be sure that any previous extra RDMA read has been
-        *     processed so we don't overflow the SQ
-        */
-       if (unlikely(my_qp->unsol_ack_circ &&
-                    my_qp->packet_count > ACK_CIRC_THRESHOLD &&
-                    my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
-               /* insert an empty RDMA READ to fix up the remote QP state */
-               struct ib_send_wr circ_wr;
-               memset(&circ_wr, 0, sizeof(circ_wr));
-               circ_wr.opcode = IB_WR_RDMA_READ;
-               post_one_send(my_qp, &circ_wr, 1); /* ignore retcode */
-               wqe_cnt++;
-               ehca_dbg(qp->device, "posted circ wr  qp_num=%x", qp->qp_num);
-               my_qp->message_count = my_qp->packet_count = 0;
-       }
-
-       /* loop processes list of send reqs */
-       while (send_wr) {
-               ret = post_one_send(my_qp, send_wr, 0);
-               if (unlikely(ret)) {
-                       goto post_send_exit0;
-               }
-               wqe_cnt++;
-               send_wr = send_wr->next;
-       }
-
-post_send_exit0:
-       iosync(); /* serialize GAL register access */
-       hipz_update_sqa(my_qp, wqe_cnt);
-       if (unlikely(ret || ehca_debug_level >= 2))
-               ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
-                        my_qp, qp->qp_num, wqe_cnt, ret);
-       my_qp->message_count += wqe_cnt;
-       spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
-
-out:
-       if (ret)
-               *bad_send_wr = send_wr;
-       return ret;
-}
-
-static int internal_post_recv(struct ehca_qp *my_qp,
-                             struct ib_device *dev,
-                             struct ib_recv_wr *recv_wr,
-                             struct ib_recv_wr **bad_recv_wr)
-{
-       struct ehca_wqe *wqe_p;
-       int wqe_cnt = 0;
-       int ret = 0;
-       u32 rq_map_idx;
-       unsigned long flags;
-       struct ehca_qmap_entry *qmap_entry;
-
-       if (unlikely(!HAS_RQ(my_qp))) {
-               ehca_err(dev, "QP has no RQ  ehca_qp=%p qp_num=%x ext_type=%d",
-                        my_qp, my_qp->real_qp_num, my_qp->ext_type);
-               ret = -ENODEV;
-               goto out;
-       }
-
-       /* LOCK the QUEUE */
-       spin_lock_irqsave(&my_qp->spinlock_r, flags);
-
-       /* loop processes list of recv reqs */
-       while (recv_wr) {
-               u64 start_offset = my_qp->ipz_rqueue.current_q_offset;
-               /* get pointer next to free WQE */
-               wqe_p = ipz_qeit_get_inc(&my_qp->ipz_rqueue);
-               if (unlikely(!wqe_p)) {
-                       /* too many posted work requests: queue overflow */
-                       ret = -ENOMEM;
-                       ehca_err(dev, "Too many posted WQEs "
-                               "qp_num=%x", my_qp->real_qp_num);
-                       goto post_recv_exit0;
-               }
-               /*
-                * Get the index of the WQE in the recv queue. The same index
-                * is used for writing into the rq_map.
-                */
-               rq_map_idx = start_offset / my_qp->ipz_rqueue.qe_size;
-
-               /* write a RECV WQE into the QUEUE */
-               ret = ehca_write_rwqe(&my_qp->ipz_rqueue, wqe_p, recv_wr,
-                               rq_map_idx);
-               /*
-                * if something failed,
-                * reset the free entry pointer to the start value
-                */
-               if (unlikely(ret)) {
-                       my_qp->ipz_rqueue.current_q_offset = start_offset;
-                       ret = -EINVAL;
-                       ehca_err(dev, "Could not write WQE "
-                               "qp_num=%x", my_qp->real_qp_num);
-                       goto post_recv_exit0;
-               }
-
-               qmap_entry = &my_qp->rq_map.map[rq_map_idx];
-               qmap_entry->app_wr_id = get_app_wr_id(recv_wr->wr_id);
-               qmap_entry->reported = 0;
-               qmap_entry->cqe_req = 1;
-
-               wqe_cnt++;
-               recv_wr = recv_wr->next;
-       } /* eof for recv_wr */
-
-post_recv_exit0:
-       iosync(); /* serialize GAL register access */
-       hipz_update_rqa(my_qp, wqe_cnt);
-       if (unlikely(ret || ehca_debug_level >= 2))
-           ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
-                    my_qp, my_qp->real_qp_num, wqe_cnt, ret);
-       spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
-
-out:
-       if (ret)
-               *bad_recv_wr = recv_wr;
-
-       return ret;
-}
-
-int ehca_post_recv(struct ib_qp *qp,
-                  struct ib_recv_wr *recv_wr,
-                  struct ib_recv_wr **bad_recv_wr)
-{
-       struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
-
-       /* Reject WR if QP is in RESET state */
-       if (unlikely(my_qp->state == IB_QPS_RESET)) {
-               ehca_err(qp->device, "Invalid QP state  qp_state=%d qpn=%x",
-                        my_qp->state, qp->qp_num);
-               *bad_recv_wr = recv_wr;
-               return -EINVAL;
-       }
-
-       return internal_post_recv(my_qp, qp->device, recv_wr, bad_recv_wr);
-}
-
-int ehca_post_srq_recv(struct ib_srq *srq,
-                      struct ib_recv_wr *recv_wr,
-                      struct ib_recv_wr **bad_recv_wr)
-{
-       return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
-                                 srq->device, recv_wr, bad_recv_wr);
-}
-
-/*
- * ib_wc_opcode table converts ehca wc opcode to ib
- * Since we use zero to indicate invalid opcode, the actual ib opcode must
- * be decremented!!!
- */
-static const u8 ib_wc_opcode[255] = {
-       [0x01] = IB_WC_RECV+1,
-       [0x02] = IB_WC_RECV_RDMA_WITH_IMM+1,
-       [0x08] = IB_WC_FETCH_ADD+1,
-       [0x10] = IB_WC_COMP_SWAP+1,
-       [0x20] = IB_WC_RDMA_WRITE+1,
-       [0x40] = IB_WC_RDMA_READ+1,
-       [0x80] = IB_WC_SEND+1
-};
-
-/* internal function to poll one entry of cq */
-static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
-{
-       int ret = 0, qmap_tail_idx;
-       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-       struct ehca_cqe *cqe;
-       struct ehca_qp *my_qp;
-       struct ehca_qmap_entry *qmap_entry;
-       struct ehca_queue_map *qmap;
-       int cqe_count = 0, is_error;
-
-repoll:
-       cqe = (struct ehca_cqe *)
-               ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
-       if (!cqe) {
-               ret = -EAGAIN;
-               if (ehca_debug_level >= 3)
-                       ehca_dbg(cq->device, "Completion queue is empty  "
-                                "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
-               goto poll_cq_one_exit0;
-       }
-
-       /* prevents loads being reordered across this point */
-       rmb();
-
-       cqe_count++;
-       if (unlikely(cqe->status & WC_STATUS_PURGE_BIT)) {
-               struct ehca_qp *qp;
-               int purgeflag;
-               unsigned long flags;
-
-               qp = ehca_cq_get_qp(my_cq, cqe->local_qp_number);
-               if (!qp) {
-                       ehca_err(cq->device, "cq_num=%x qp_num=%x "
-                                "could not find qp -> ignore cqe",
-                                my_cq->cq_number, cqe->local_qp_number);
-                       ehca_dmp(cqe, 64, "cq_num=%x qp_num=%x",
-                                my_cq->cq_number, cqe->local_qp_number);
-                       /* ignore this purged cqe */
-                       goto repoll;
-               }
-               spin_lock_irqsave(&qp->spinlock_s, flags);
-               purgeflag = qp->sqerr_purgeflag;
-               spin_unlock_irqrestore(&qp->spinlock_s, flags);
-
-               if (purgeflag) {
-                       ehca_dbg(cq->device,
-                                "Got CQE with purged bit qp_num=%x src_qp=%x",
-                                cqe->local_qp_number, cqe->remote_qp_number);
-                       if (ehca_debug_level >= 2)
-                               ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
-                                        cqe->local_qp_number,
-                                        cqe->remote_qp_number);
-                       /*
-                        * ignore this to avoid double cqes of bad wqe
-                        * that caused sqe and turn off purge flag
-                        */
-                       qp->sqerr_purgeflag = 0;
-                       goto repoll;
-               }
-       }
-
-       is_error = cqe->status & WC_STATUS_ERROR_BIT;
-
-       /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
-       if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
-               ehca_dbg(cq->device,
-                        "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
-                        is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
-               ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
-                        my_cq, my_cq->cq_number);
-               ehca_dbg(cq->device,
-                        "ehca_cq=%p cq_num=%x -------------------------",
-                        my_cq, my_cq->cq_number);
-       }
-
-       read_lock(&ehca_qp_idr_lock);
-       my_qp = idr_find(&ehca_qp_idr, cqe->qp_token);
-       read_unlock(&ehca_qp_idr_lock);
-       if (!my_qp)
-               goto repoll;
-       wc->qp = &my_qp->ib_qp;
-
-       qmap_tail_idx = get_app_wr_id(cqe->work_request_id);
-       if (!(cqe->w_completion_flags & WC_SEND_RECEIVE_BIT))
-               /* We got a send completion. */
-               qmap = &my_qp->sq_map;
-       else
-               /* We got a receive completion. */
-               qmap = &my_qp->rq_map;
-
-       /* advance the tail pointer */
-       qmap->tail = qmap_tail_idx;
-
-       if (is_error) {
-               /*
-                * set left_to_poll to 0 because in error state, we will not
-                * get any additional CQEs
-                */
-               my_qp->sq_map.next_wqe_idx = next_index(my_qp->sq_map.tail,
-                                                       my_qp->sq_map.entries);
-               my_qp->sq_map.left_to_poll = 0;
-               ehca_add_to_err_list(my_qp, 1);
-
-               my_qp->rq_map.next_wqe_idx = next_index(my_qp->rq_map.tail,
-                                                       my_qp->rq_map.entries);
-               my_qp->rq_map.left_to_poll = 0;
-               if (HAS_RQ(my_qp))
-                       ehca_add_to_err_list(my_qp, 0);
-       }
-
-       qmap_entry = &qmap->map[qmap_tail_idx];
-       if (qmap_entry->reported) {
-               ehca_warn(cq->device, "Double cqe on qp_num=%#x",
-                               my_qp->real_qp_num);
-               /* found a double cqe, discard it and read next one */
-               goto repoll;
-       }
-
-       wc->wr_id = replace_wr_id(cqe->work_request_id, qmap_entry->app_wr_id);
-       qmap_entry->reported = 1;
-
-       /* if left_to_poll is decremented to 0, add the QP to the error list */
-       if (qmap->left_to_poll > 0) {
-               qmap->left_to_poll--;
-               if ((my_qp->sq_map.left_to_poll == 0) &&
-                               (my_qp->rq_map.left_to_poll == 0)) {
-                       ehca_add_to_err_list(my_qp, 1);
-                       if (HAS_RQ(my_qp))
-                               ehca_add_to_err_list(my_qp, 0);
-               }
-       }
-
-       /* eval ib_wc_opcode */
-       wc->opcode = ib_wc_opcode[cqe->optype]-1;
-       if (unlikely(wc->opcode == -1)) {
-               ehca_err(cq->device, "Invalid cqe->OPType=%x cqe->status=%x "
-                        "ehca_cq=%p cq_num=%x",
-                        cqe->optype, cqe->status, my_cq, my_cq->cq_number);
-               /* dump cqe for other infos */
-               ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
-                        my_cq, my_cq->cq_number);
-               /* update also queue adder to throw away this entry!!! */
-               goto repoll;
-       }
-
-       /* eval ib_wc_status */
-       if (unlikely(is_error)) {
-               /* complete with errors */
-               map_ib_wc_status(cqe->status, &wc->status);
-               wc->vendor_err = wc->status;
-       } else
-               wc->status = IB_WC_SUCCESS;
-
-       wc->byte_len = cqe->nr_bytes_transferred;
-       wc->pkey_index = cqe->pkey_index;
-       wc->slid = cqe->rlid;
-       wc->dlid_path_bits = cqe->dlid;
-       wc->src_qp = cqe->remote_qp_number;
-       /*
-        * HW has "Immed data present" and "GRH present" in bits 6 and 5.
-        * SW defines those in bits 1 and 0, so we can just shift and mask.
-        */
-       wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
-       wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
-       wc->sl = cqe->service_level;
-
-poll_cq_one_exit0:
-       if (cqe_count > 0)
-               hipz_update_feca(my_cq, cqe_count);
-
-       return ret;
-}
-
-static int generate_flush_cqes(struct ehca_qp *my_qp, struct ib_cq *cq,
-                              struct ib_wc *wc, int num_entries,
-                              struct ipz_queue *ipz_queue, int on_sq)
-{
-       int nr = 0;
-       struct ehca_wqe *wqe;
-       u64 offset;
-       struct ehca_queue_map *qmap;
-       struct ehca_qmap_entry *qmap_entry;
-
-       if (on_sq)
-               qmap = &my_qp->sq_map;
-       else
-               qmap = &my_qp->rq_map;
-
-       qmap_entry = &qmap->map[qmap->next_wqe_idx];
-
-       while ((nr < num_entries) && (qmap_entry->reported == 0)) {
-               /* generate flush CQE */
-
-               memset(wc, 0, sizeof(*wc));
-
-               offset = qmap->next_wqe_idx * ipz_queue->qe_size;
-               wqe = (struct ehca_wqe *)ipz_qeit_calc(ipz_queue, offset);
-               if (!wqe) {
-                       ehca_err(cq->device, "Invalid wqe offset=%#llx on "
-                                "qp_num=%#x", offset, my_qp->real_qp_num);
-                       return nr;
-               }
-
-               wc->wr_id = replace_wr_id(wqe->work_request_id,
-                                         qmap_entry->app_wr_id);
-
-               if (on_sq) {
-                       switch (wqe->optype) {
-                       case WQE_OPTYPE_SEND:
-                               wc->opcode = IB_WC_SEND;
-                               break;
-                       case WQE_OPTYPE_RDMAWRITE:
-                               wc->opcode = IB_WC_RDMA_WRITE;
-                               break;
-                       case WQE_OPTYPE_RDMAREAD:
-                               wc->opcode = IB_WC_RDMA_READ;
-                               break;
-                       default:
-                               ehca_err(cq->device, "Invalid optype=%x",
-                                               wqe->optype);
-                               return nr;
-                       }
-               } else
-                       wc->opcode = IB_WC_RECV;
-
-               if (wqe->wr_flag & WQE_WRFLAG_IMM_DATA_PRESENT) {
-                       wc->ex.imm_data = wqe->immediate_data;
-                       wc->wc_flags |= IB_WC_WITH_IMM;
-               }
-
-               wc->status = IB_WC_WR_FLUSH_ERR;
-
-               wc->qp = &my_qp->ib_qp;
-
-               /* mark as reported and advance next_wqe pointer */
-               qmap_entry->reported = 1;
-               qmap->next_wqe_idx = next_index(qmap->next_wqe_idx,
-                                               qmap->entries);
-               qmap_entry = &qmap->map[qmap->next_wqe_idx];
-
-               wc++; nr++;
-       }
-
-       return nr;
-
-}
-
-int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
-{
-       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-       int nr;
-       struct ehca_qp *err_qp;
-       struct ib_wc *current_wc = wc;
-       int ret = 0;
-       unsigned long flags;
-       int entries_left = num_entries;
-
-       if (num_entries < 1) {
-               ehca_err(cq->device, "Invalid num_entries=%d ehca_cq=%p "
-                        "cq_num=%x", num_entries, my_cq, my_cq->cq_number);
-               ret = -EINVAL;
-               goto poll_cq_exit0;
-       }
-
-       spin_lock_irqsave(&my_cq->spinlock, flags);
-
-       /* generate flush cqes for send queues */
-       list_for_each_entry(err_qp, &my_cq->sqp_err_list, sq_err_node) {
-               nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
-                               &err_qp->ipz_squeue, 1);
-               entries_left -= nr;
-               current_wc += nr;
-
-               if (entries_left == 0)
-                       break;
-       }
-
-       /* generate flush cqes for receive queues */
-       list_for_each_entry(err_qp, &my_cq->rqp_err_list, rq_err_node) {
-               nr = generate_flush_cqes(err_qp, cq, current_wc, entries_left,
-                               &err_qp->ipz_rqueue, 0);
-               entries_left -= nr;
-               current_wc += nr;
-
-               if (entries_left == 0)
-                       break;
-       }
-
-       for (nr = 0; nr < entries_left; nr++) {
-               ret = ehca_poll_cq_one(cq, current_wc);
-               if (ret)
-                       break;
-               current_wc++;
-       } /* eof for nr */
-       entries_left -= nr;
-
-       spin_unlock_irqrestore(&my_cq->spinlock, flags);
-       if (ret == -EAGAIN  || !ret)
-               ret = num_entries - entries_left;
-
-poll_cq_exit0:
-       return ret;
-}
-
-int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
-{
-       struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
-       int ret = 0;
-
-       switch (notify_flags & IB_CQ_SOLICITED_MASK) {
-       case IB_CQ_SOLICITED:
-               hipz_set_cqx_n0(my_cq, 1);
-               break;
-       case IB_CQ_NEXT_COMP:
-               hipz_set_cqx_n1(my_cq, 1);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
-               unsigned long spl_flags;
-               spin_lock_irqsave(&my_cq->spinlock, spl_flags);
-               ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
-               spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
-       }
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c
deleted file mode 100644 (file)
index 376b031..0000000
+++ /dev/null
@@ -1,245 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  SQP functions
- *
- *  Authors: Khadija Souissi <souissi@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <rdma/ib_mad.h>
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "ehca_iverbs.h"
-#include "hcp_if.h"
-
-#define IB_MAD_STATUS_REDIRECT         cpu_to_be16(0x0002)
-#define IB_MAD_STATUS_UNSUP_VERSION    cpu_to_be16(0x0004)
-#define IB_MAD_STATUS_UNSUP_METHOD     cpu_to_be16(0x0008)
-
-#define IB_PMA_CLASS_PORT_INFO         cpu_to_be16(0x0001)
-
-/**
- * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue
- * pair is created successfully, the corresponding port gets active.
- *
- * Define Special Queue pair 0 (SMI QP) is still not supported.
- *
- * @qp_init_attr: Queue pair init attributes with port and queue pair type
- */
-
-u64 ehca_define_sqp(struct ehca_shca *shca,
-                   struct ehca_qp *ehca_qp,
-                   struct ib_qp_init_attr *qp_init_attr)
-{
-       u32 pma_qp_nr, bma_qp_nr;
-       u64 ret;
-       u8 port = qp_init_attr->port_num;
-       int counter;
-
-       shca->sport[port - 1].port_state = IB_PORT_DOWN;
-
-       switch (qp_init_attr->qp_type) {
-       case IB_QPT_SMI:
-               /* function not supported yet */
-               break;
-       case IB_QPT_GSI:
-               ret = hipz_h_define_aqp1(shca->ipz_hca_handle,
-                                        ehca_qp->ipz_qp_handle,
-                                        ehca_qp->galpas.kernel,
-                                        (u32) qp_init_attr->port_num,
-                                        &pma_qp_nr, &bma_qp_nr);
-
-               if (ret != H_SUCCESS) {
-                       ehca_err(&shca->ib_device,
-                                "Can't define AQP1 for port %x. h_ret=%lli",
-                                port, ret);
-                       return ret;
-               }
-               shca->sport[port - 1].pma_qp_nr = pma_qp_nr;
-               ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x",
-                        port, pma_qp_nr);
-               break;
-       default:
-               ehca_err(&shca->ib_device, "invalid qp_type=%x",
-                        qp_init_attr->qp_type);
-               return H_PARAMETER;
-       }
-
-       if (ehca_nr_ports < 0) /* autodetect mode */
-               return H_SUCCESS;
-
-       for (counter = 0;
-            shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
-                    counter < ehca_port_act_time;
-            counter++) {
-               ehca_dbg(&shca->ib_device, "... wait until port %x is active",
-                        port);
-               msleep_interruptible(1000);
-       }
-
-       if (counter == ehca_port_act_time) {
-               ehca_err(&shca->ib_device, "Port %x is not active.", port);
-               return H_HARDWARE;
-       }
-
-       return H_SUCCESS;
-}
-
-struct ib_perf {
-       struct ib_mad_hdr mad_hdr;
-       u8 reserved[40];
-       u8 data[192];
-} __attribute__ ((packed));
-
-/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
-struct tcslfl {
-       u32 tc:8;
-       u32 sl:4;
-       u32 fl:20;
-} __attribute__ ((packed));
-
-/* IP Version/TC/FL packed into 32 bits, as in GRH */
-struct vertcfl {
-       u32 ver:4;
-       u32 tc:8;
-       u32 fl:20;
-} __attribute__ ((packed));
-
-static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
-                            const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-                            const struct ib_mad *in_mad, struct ib_mad *out_mad)
-{
-       const struct ib_perf *in_perf = (const struct ib_perf *)in_mad;
-       struct ib_perf *out_perf = (struct ib_perf *)out_mad;
-       struct ib_class_port_info *poi =
-               (struct ib_class_port_info *)out_perf->data;
-       struct tcslfl *tcslfl =
-               (struct tcslfl *)&poi->redirect_tcslfl;
-       struct ehca_shca *shca =
-               container_of(ibdev, struct ehca_shca, ib_device);
-       struct ehca_sport *sport = &shca->sport[port_num - 1];
-
-       ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method);
-
-       *out_mad = *in_mad;
-
-       if (in_perf->mad_hdr.class_version != 1) {
-               ehca_warn(ibdev, "Unsupported class_version=%x",
-                         in_perf->mad_hdr.class_version);
-               out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION;
-               goto perf_reply;
-       }
-
-       switch (in_perf->mad_hdr.method) {
-       case IB_MGMT_METHOD_GET:
-       case IB_MGMT_METHOD_SET:
-               /* set class port info for redirection */
-               out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO;
-               out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT;
-               memset(poi, 0, sizeof(*poi));
-               poi->base_version = 1;
-               poi->class_version = 1;
-               poi->resp_time_value = 18;
-
-               /* copy local routing information from WC where applicable */
-               tcslfl->sl         = in_wc->sl;
-               poi->redirect_lid  =
-                       sport->saved_attr.lid | in_wc->dlid_path_bits;
-               poi->redirect_qp   = sport->pma_qp_nr;
-               poi->redirect_qkey = IB_QP1_QKEY;
-
-               ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
-                               &poi->redirect_pkey);
-
-               /* if request was globally routed, copy route info */
-               if (in_grh) {
-                       const struct vertcfl *vertcfl =
-                               (const struct vertcfl *)&in_grh->version_tclass_flow;
-                       memcpy(poi->redirect_gid, in_grh->dgid.raw,
-                              sizeof(poi->redirect_gid));
-                       tcslfl->tc        = vertcfl->tc;
-                       tcslfl->fl        = vertcfl->fl;
-               } else
-                       /* else only fill in default GID */
-                       ehca_query_gid(ibdev, port_num, 0,
-                                      (union ib_gid *)&poi->redirect_gid);
-
-               ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
-                        sport->saved_attr.lid, sport->pma_qp_nr);
-               break;
-
-       case IB_MGMT_METHOD_GET_RESP:
-               return IB_MAD_RESULT_FAILURE;
-
-       default:
-               out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD;
-               break;
-       }
-
-perf_reply:
-       out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
-
-       return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-                    const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-                    const struct ib_mad_hdr *in, size_t in_mad_size,
-                    struct ib_mad_hdr *out, size_t *out_mad_size,
-                    u16 *out_mad_pkey_index)
-{
-       int ret;
-       const struct ib_mad *in_mad = (const struct ib_mad *)in;
-       struct ib_mad *out_mad = (struct ib_mad *)out;
-
-       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
-                        *out_mad_size != sizeof(*out_mad)))
-               return IB_MAD_RESULT_FAILURE;
-
-       if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
-               return IB_MAD_RESULT_FAILURE;
-
-       /* accept only pma request */
-       if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
-               return IB_MAD_RESULT_SUCCESS;
-
-       ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
-       ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
-                               in_mad, out_mad);
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h
deleted file mode 100644 (file)
index d280b12..0000000
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  auxiliary functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Khadija Souissi <souissik@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-
-#ifndef EHCA_TOOLS_H
-#define EHCA_TOOLS_H
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/idr.h>
-#include <linux/kthread.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/vmalloc.h>
-#include <linux/notifier.h>
-#include <linux/cpu.h>
-#include <linux/device.h>
-
-#include <linux/atomic.h>
-#include <asm/ibmebus.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/hvcall.h>
-
-extern int ehca_debug_level;
-
-#define ehca_dbg(ib_dev, format, arg...) \
-       do { \
-               if (unlikely(ehca_debug_level)) \
-                       dev_printk(KERN_DEBUG, (ib_dev)->dma_device, \
-                                  "PU%04x EHCA_DBG:%s " format "\n", \
-                                  raw_smp_processor_id(), __func__, \
-                                  ## arg); \
-       } while (0)
-
-#define ehca_info(ib_dev, format, arg...) \
-       dev_info((ib_dev)->dma_device, "PU%04x EHCA_INFO:%s " format "\n", \
-                raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_warn(ib_dev, format, arg...) \
-       dev_warn((ib_dev)->dma_device, "PU%04x EHCA_WARN:%s " format "\n", \
-                raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_err(ib_dev, format, arg...) \
-       dev_err((ib_dev)->dma_device, "PU%04x EHCA_ERR:%s " format "\n", \
-               raw_smp_processor_id(), __func__, ## arg)
-
-/* use this one only if no ib_dev available */
-#define ehca_gen_dbg(format, arg...) \
-       do { \
-               if (unlikely(ehca_debug_level)) \
-                       printk(KERN_DEBUG "PU%04x EHCA_DBG:%s " format "\n", \
-                              raw_smp_processor_id(), __func__, ## arg); \
-       } while (0)
-
-#define ehca_gen_warn(format, arg...) \
-       printk(KERN_INFO "PU%04x EHCA_WARN:%s " format "\n", \
-              raw_smp_processor_id(), __func__, ## arg)
-
-#define ehca_gen_err(format, arg...) \
-       printk(KERN_ERR "PU%04x EHCA_ERR:%s " format "\n", \
-              raw_smp_processor_id(), __func__, ## arg)
-
-/**
- * ehca_dmp - printk a memory block, whose length is n*8 bytes.
- * Each line has the following layout:
- * <format string> adr=X ofs=Y <8 bytes hex> <8 bytes hex>
- */
-#define ehca_dmp(adr, len, format, args...) \
-       do { \
-               unsigned int x; \
-               unsigned int l = (unsigned int)(len); \
-               unsigned char *deb = (unsigned char *)(adr); \
-               for (x = 0; x < l; x += 16) { \
-                       printk(KERN_INFO "EHCA_DMP:%s " format \
-                              " adr=%p ofs=%04x %016llx %016llx\n", \
-                              __func__, ##args, deb, x, \
-                              *((u64 *)&deb[0]), *((u64 *)&deb[8])); \
-                       deb += 16; \
-               } \
-       } while (0)
-
-/* define a bitmask, little endian version */
-#define EHCA_BMASK(pos, length) (((pos) << 16) + (length))
-
-/* define a bitmask, the ibm way... */
-#define EHCA_BMASK_IBM(from, to) (((63 - to) << 16) + ((to) - (from) + 1))
-
-/* internal function, don't use */
-#define EHCA_BMASK_SHIFTPOS(mask) (((mask) >> 16) & 0xffff)
-
-/* internal function, don't use */
-#define EHCA_BMASK_MASK(mask) (~0ULL >> ((64 - (mask)) & 0xffff))
-
-/**
- * EHCA_BMASK_SET - return value shifted and masked by mask
- * variable|=EHCA_BMASK_SET(MY_MASK,0x4711) ORs the bits in variable
- * variable&=~EHCA_BMASK_SET(MY_MASK,-1) clears the bits from the mask
- * in variable
- */
-#define EHCA_BMASK_SET(mask, value) \
-       ((EHCA_BMASK_MASK(mask) & ((u64)(value))) << EHCA_BMASK_SHIFTPOS(mask))
-
-/**
- * EHCA_BMASK_GET - extract a parameter from value by mask
- */
-#define EHCA_BMASK_GET(mask, value) \
-       (EHCA_BMASK_MASK(mask) & (((u64)(value)) >> EHCA_BMASK_SHIFTPOS(mask)))
-
-/* Converts ehca to ib return code */
-int ehca2ib_return_code(u64 ehca_rc);
-
-#endif /* EHCA_TOOLS_H */
diff --git a/drivers/staging/rdma/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c
deleted file mode 100644 (file)
index 1a1d5d9..0000000
+++ /dev/null
@@ -1,309 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  userspace support verbs
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_classes.h"
-#include "ehca_iverbs.h"
-#include "ehca_mrmw.h"
-#include "ehca_tools.h"
-#include "hcp_if.h"
-
-struct ib_ucontext *ehca_alloc_ucontext(struct ib_device *device,
-                                       struct ib_udata *udata)
-{
-       struct ehca_ucontext *my_context;
-
-       my_context = kzalloc(sizeof *my_context, GFP_KERNEL);
-       if (!my_context) {
-               ehca_err(device, "Out of memory device=%p", device);
-               return ERR_PTR(-ENOMEM);
-       }
-
-       return &my_context->ib_ucontext;
-}
-
-int ehca_dealloc_ucontext(struct ib_ucontext *context)
-{
-       kfree(container_of(context, struct ehca_ucontext, ib_ucontext));
-       return 0;
-}
-
-static void ehca_mm_open(struct vm_area_struct *vma)
-{
-       u32 *count = (u32 *)vma->vm_private_data;
-       if (!count) {
-               ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
-                            vma->vm_start, vma->vm_end);
-               return;
-       }
-       (*count)++;
-       if (!(*count))
-               ehca_gen_err("Use count overflow vm_start=%lx vm_end=%lx",
-                            vma->vm_start, vma->vm_end);
-       ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
-                    vma->vm_start, vma->vm_end, *count);
-}
-
-static void ehca_mm_close(struct vm_area_struct *vma)
-{
-       u32 *count = (u32 *)vma->vm_private_data;
-       if (!count) {
-               ehca_gen_err("Invalid vma struct vm_start=%lx vm_end=%lx",
-                            vma->vm_start, vma->vm_end);
-               return;
-       }
-       (*count)--;
-       ehca_gen_dbg("vm_start=%lx vm_end=%lx count=%x",
-                    vma->vm_start, vma->vm_end, *count);
-}
-
-static const struct vm_operations_struct vm_ops = {
-       .open = ehca_mm_open,
-       .close = ehca_mm_close,
-};
-
-static int ehca_mmap_fw(struct vm_area_struct *vma, struct h_galpas *galpas,
-                       u32 *mm_count)
-{
-       int ret;
-       u64 vsize, physical;
-
-       vsize = vma->vm_end - vma->vm_start;
-       if (vsize < EHCA_PAGESIZE) {
-               ehca_gen_err("invalid vsize=%lx", vma->vm_end - vma->vm_start);
-               return -EINVAL;
-       }
-
-       physical = galpas->user.fw_handle;
-       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-       ehca_gen_dbg("vsize=%llx physical=%llx", vsize, physical);
-       /* VM_IO | VM_DONTEXPAND | VM_DONTDUMP are set by remap_pfn_range() */
-       ret = remap_4k_pfn(vma, vma->vm_start, physical >> EHCA_PAGESHIFT,
-                          vma->vm_page_prot);
-       if (unlikely(ret)) {
-               ehca_gen_err("remap_pfn_range() failed ret=%i", ret);
-               return -ENOMEM;
-       }
-
-       vma->vm_private_data = mm_count;
-       (*mm_count)++;
-       vma->vm_ops = &vm_ops;
-
-       return 0;
-}
-
-static int ehca_mmap_queue(struct vm_area_struct *vma, struct ipz_queue *queue,
-                          u32 *mm_count)
-{
-       int ret;
-       u64 start, ofs;
-       struct page *page;
-
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       start = vma->vm_start;
-       for (ofs = 0; ofs < queue->queue_length; ofs += PAGE_SIZE) {
-               u64 virt_addr = (u64)ipz_qeit_calc(queue, ofs);
-               page = virt_to_page(virt_addr);
-               ret = vm_insert_page(vma, start, page);
-               if (unlikely(ret)) {
-                       ehca_gen_err("vm_insert_page() failed rc=%i", ret);
-                       return ret;
-               }
-               start += PAGE_SIZE;
-       }
-       vma->vm_private_data = mm_count;
-       (*mm_count)++;
-       vma->vm_ops = &vm_ops;
-
-       return 0;
-}
-
-static int ehca_mmap_cq(struct vm_area_struct *vma, struct ehca_cq *cq,
-                       u32 rsrc_type)
-{
-       int ret;
-
-       switch (rsrc_type) {
-       case 0: /* galpa fw handle */
-               ehca_dbg(cq->ib_cq.device, "cq_num=%x fw", cq->cq_number);
-               ret = ehca_mmap_fw(vma, &cq->galpas, &cq->mm_count_galpa);
-               if (unlikely(ret)) {
-                       ehca_err(cq->ib_cq.device,
-                                "ehca_mmap_fw() failed rc=%i cq_num=%x",
-                                ret, cq->cq_number);
-                       return ret;
-               }
-               break;
-
-       case 1: /* cq queue_addr */
-               ehca_dbg(cq->ib_cq.device, "cq_num=%x queue", cq->cq_number);
-               ret = ehca_mmap_queue(vma, &cq->ipz_queue, &cq->mm_count_queue);
-               if (unlikely(ret)) {
-                       ehca_err(cq->ib_cq.device,
-                                "ehca_mmap_queue() failed rc=%i cq_num=%x",
-                                ret, cq->cq_number);
-                       return ret;
-               }
-               break;
-
-       default:
-               ehca_err(cq->ib_cq.device, "bad resource type=%x cq_num=%x",
-                        rsrc_type, cq->cq_number);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
-                       u32 rsrc_type)
-{
-       int ret;
-
-       switch (rsrc_type) {
-       case 0: /* galpa fw handle */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x fw", qp->ib_qp.qp_num);
-               ret = ehca_mmap_fw(vma, &qp->galpas, &qp->mm_count_galpa);
-               if (unlikely(ret)) {
-                       ehca_err(qp->ib_qp.device,
-                                "remap_pfn_range() failed ret=%i qp_num=%x",
-                                ret, qp->ib_qp.qp_num);
-                       return -ENOMEM;
-               }
-               break;
-
-       case 1: /* qp rqueue_addr */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
-               ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
-                                     &qp->mm_count_rqueue);
-               if (unlikely(ret)) {
-                       ehca_err(qp->ib_qp.device,
-                                "ehca_mmap_queue(rq) failed rc=%i qp_num=%x",
-                                ret, qp->ib_qp.qp_num);
-                       return ret;
-               }
-               break;
-
-       case 2: /* qp squeue_addr */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
-               ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
-                                     &qp->mm_count_squeue);
-               if (unlikely(ret)) {
-                       ehca_err(qp->ib_qp.device,
-                                "ehca_mmap_queue(sq) failed rc=%i qp_num=%x",
-                                ret, qp->ib_qp.qp_num);
-                       return ret;
-               }
-               break;
-
-       default:
-               ehca_err(qp->ib_qp.device, "bad resource type=%x qp=num=%x",
-                        rsrc_type, qp->ib_qp.qp_num);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
-       u64 fileoffset = vma->vm_pgoff;
-       u32 idr_handle = fileoffset & 0x1FFFFFF;
-       u32 q_type = (fileoffset >> 27) & 0x1;    /* CQ, QP,...        */
-       u32 rsrc_type = (fileoffset >> 25) & 0x3; /* sq,rq,cmnd_window */
-       u32 ret;
-       struct ehca_cq *cq;
-       struct ehca_qp *qp;
-       struct ib_uobject *uobject;
-
-       switch (q_type) {
-       case  0: /* CQ */
-               read_lock(&ehca_cq_idr_lock);
-               cq = idr_find(&ehca_cq_idr, idr_handle);
-               read_unlock(&ehca_cq_idr_lock);
-
-               /* make sure this mmap really belongs to the authorized user */
-               if (!cq)
-                       return -EINVAL;
-
-               if (!cq->ib_cq.uobject || cq->ib_cq.uobject->context != context)
-                       return -EINVAL;
-
-               ret = ehca_mmap_cq(vma, cq, rsrc_type);
-               if (unlikely(ret)) {
-                       ehca_err(cq->ib_cq.device,
-                                "ehca_mmap_cq() failed rc=%i cq_num=%x",
-                                ret, cq->cq_number);
-                       return ret;
-               }
-               break;
-
-       case 1: /* QP */
-               read_lock(&ehca_qp_idr_lock);
-               qp = idr_find(&ehca_qp_idr, idr_handle);
-               read_unlock(&ehca_qp_idr_lock);
-
-               /* make sure this mmap really belongs to the authorized user */
-               if (!qp)
-                       return -EINVAL;
-
-               uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
-               if (!uobject || uobject->context != context)
-                       return -EINVAL;
-
-               ret = ehca_mmap_qp(vma, qp, rsrc_type);
-               if (unlikely(ret)) {
-                       ehca_err(qp->ib_qp.device,
-                                "ehca_mmap_qp() failed rc=%i qp_num=%x",
-                                ret, qp->ib_qp.qp_num);
-                       return ret;
-               }
-               break;
-
-       default:
-               ehca_gen_err("bad queue type %x", q_type);
-               return -EINVAL;
-       }
-
-       return 0;
-}
diff --git a/drivers/staging/rdma/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c
deleted file mode 100644 (file)
index 89517ff..0000000
+++ /dev/null
@@ -1,949 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Firmware Infiniband Interface code for POWER
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Joachim Fenkes <fenkes@de.ibm.com>
- *           Gerd Bayer <gerd.bayer@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <asm/hvcall.h>
-#include "ehca_tools.h"
-#include "hcp_if.h"
-#include "hcp_phyp.h"
-#include "hipz_fns.h"
-#include "ipz_pt_fn.h"
-
-#define H_ALL_RES_QP_ENHANCED_OPS       EHCA_BMASK_IBM(9, 11)
-#define H_ALL_RES_QP_PTE_PIN            EHCA_BMASK_IBM(12, 12)
-#define H_ALL_RES_QP_SERVICE_TYPE       EHCA_BMASK_IBM(13, 15)
-#define H_ALL_RES_QP_STORAGE            EHCA_BMASK_IBM(16, 17)
-#define H_ALL_RES_QP_LL_RQ_CQE_POSTING  EHCA_BMASK_IBM(18, 18)
-#define H_ALL_RES_QP_LL_SQ_CQE_POSTING  EHCA_BMASK_IBM(19, 21)
-#define H_ALL_RES_QP_SIGNALING_TYPE     EHCA_BMASK_IBM(22, 23)
-#define H_ALL_RES_QP_UD_AV_LKEY_CTRL    EHCA_BMASK_IBM(31, 31)
-#define H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE EHCA_BMASK_IBM(32, 35)
-#define H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE EHCA_BMASK_IBM(36, 39)
-#define H_ALL_RES_QP_RESOURCE_TYPE      EHCA_BMASK_IBM(56, 63)
-
-#define H_ALL_RES_QP_MAX_OUTST_SEND_WR  EHCA_BMASK_IBM(0, 15)
-#define H_ALL_RES_QP_MAX_OUTST_RECV_WR  EHCA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_MAX_SEND_SGE       EHCA_BMASK_IBM(32, 39)
-#define H_ALL_RES_QP_MAX_RECV_SGE       EHCA_BMASK_IBM(40, 47)
-
-#define H_ALL_RES_QP_UD_AV_LKEY         EHCA_BMASK_IBM(32, 63)
-#define H_ALL_RES_QP_SRQ_QP_TOKEN       EHCA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_SRQ_QP_HANDLE      EHCA_BMASK_IBM(0, 64)
-#define H_ALL_RES_QP_SRQ_LIMIT          EHCA_BMASK_IBM(48, 63)
-#define H_ALL_RES_QP_SRQ_QPN            EHCA_BMASK_IBM(40, 63)
-
-#define H_ALL_RES_QP_ACT_OUTST_SEND_WR  EHCA_BMASK_IBM(16, 31)
-#define H_ALL_RES_QP_ACT_OUTST_RECV_WR  EHCA_BMASK_IBM(48, 63)
-#define H_ALL_RES_QP_ACT_SEND_SGE       EHCA_BMASK_IBM(8, 15)
-#define H_ALL_RES_QP_ACT_RECV_SGE       EHCA_BMASK_IBM(24, 31)
-
-#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES  EHCA_BMASK_IBM(0, 31)
-#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES  EHCA_BMASK_IBM(32, 63)
-
-#define H_MP_INIT_TYPE                  EHCA_BMASK_IBM(44, 47)
-#define H_MP_SHUTDOWN                   EHCA_BMASK_IBM(48, 48)
-#define H_MP_RESET_QKEY_CTR             EHCA_BMASK_IBM(49, 49)
-
-#define HCALL4_REGS_FORMAT "r4=%lx r5=%lx r6=%lx r7=%lx"
-#define HCALL7_REGS_FORMAT HCALL4_REGS_FORMAT " r8=%lx r9=%lx r10=%lx"
-#define HCALL9_REGS_FORMAT HCALL7_REGS_FORMAT " r11=%lx r12=%lx"
-
-static DEFINE_SPINLOCK(hcall_lock);
-
-static long ehca_plpar_hcall_norets(unsigned long opcode,
-                                   unsigned long arg1,
-                                   unsigned long arg2,
-                                   unsigned long arg3,
-                                   unsigned long arg4,
-                                   unsigned long arg5,
-                                   unsigned long arg6,
-                                   unsigned long arg7)
-{
-       long ret;
-       int i, sleep_msecs;
-       unsigned long flags = 0;
-
-       if (unlikely(ehca_debug_level >= 2))
-               ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
-                            opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
-
-       for (i = 0; i < 5; i++) {
-               /* serialize hCalls to work around firmware issue */
-               if (ehca_lock_hcalls)
-                       spin_lock_irqsave(&hcall_lock, flags);
-
-               ret = plpar_hcall_norets(opcode, arg1, arg2, arg3, arg4,
-                                        arg5, arg6, arg7);
-
-               if (ehca_lock_hcalls)
-                       spin_unlock_irqrestore(&hcall_lock, flags);
-
-               if (H_IS_LONG_BUSY(ret)) {
-                       sleep_msecs = get_longbusy_msecs(ret);
-                       msleep_interruptible(sleep_msecs);
-                       continue;
-               }
-
-               if (ret < H_SUCCESS)
-                       ehca_gen_err("opcode=%lx ret=%li " HCALL7_REGS_FORMAT,
-                                    opcode, ret, arg1, arg2, arg3,
-                                    arg4, arg5, arg6, arg7);
-               else
-                       if (unlikely(ehca_debug_level >= 2))
-                               ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
-
-               return ret;
-       }
-
-       return H_BUSY;
-}
-
-static long ehca_plpar_hcall9(unsigned long opcode,
-                             unsigned long *outs, /* array of 9 outputs */
-                             unsigned long arg1,
-                             unsigned long arg2,
-                             unsigned long arg3,
-                             unsigned long arg4,
-                             unsigned long arg5,
-                             unsigned long arg6,
-                             unsigned long arg7,
-                             unsigned long arg8,
-                             unsigned long arg9)
-{
-       long ret;
-       int i, sleep_msecs;
-       unsigned long flags = 0;
-
-       if (unlikely(ehca_debug_level >= 2))
-               ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
-                            arg1, arg2, arg3, arg4, arg5,
-                            arg6, arg7, arg8, arg9);
-
-       for (i = 0; i < 5; i++) {
-               /* serialize hCalls to work around firmware issue */
-               if (ehca_lock_hcalls)
-                       spin_lock_irqsave(&hcall_lock, flags);
-
-               ret = plpar_hcall9(opcode, outs,
-                                  arg1, arg2, arg3, arg4, arg5,
-                                  arg6, arg7, arg8, arg9);
-
-               if (ehca_lock_hcalls)
-                       spin_unlock_irqrestore(&hcall_lock, flags);
-
-               if (H_IS_LONG_BUSY(ret)) {
-                       sleep_msecs = get_longbusy_msecs(ret);
-                       msleep_interruptible(sleep_msecs);
-                       continue;
-               }
-
-               if (ret < H_SUCCESS) {
-                       ehca_gen_err("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT,
-                                    opcode, arg1, arg2, arg3, arg4, arg5,
-                                    arg6, arg7, arg8, arg9);
-                       ehca_gen_err("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
-                                    ret, outs[0], outs[1], outs[2], outs[3],
-                                    outs[4], outs[5], outs[6], outs[7],
-                                    outs[8]);
-               } else if (unlikely(ehca_debug_level >= 2))
-                       ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
-                                    ret, outs[0], outs[1], outs[2], outs[3],
-                                    outs[4], outs[5], outs[6], outs[7],
-                                    outs[8]);
-               return ret;
-       }
-
-       return H_BUSY;
-}
-
-u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_pfeq *pfeq,
-                            const u32 neq_control,
-                            const u32 number_of_entries,
-                            struct ipz_eq_handle *eq_handle,
-                            u32 *act_nr_of_entries,
-                            u32 *act_pages,
-                            u32 *eq_ist)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-       u64 allocate_controls;
-
-       /* resource type */
-       allocate_controls = 3ULL;
-
-       /* ISN is associated */
-       if (neq_control != 1)
-               allocate_controls = (1ULL << (63 - 7)) | allocate_controls;
-       else /* notification event queue */
-               allocate_controls = (1ULL << 63) | allocate_controls;
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,  /* r4 */
-                               allocate_controls,      /* r5 */
-                               number_of_entries,      /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       eq_handle->handle = outs[0];
-       *act_nr_of_entries = (u32)outs[3];
-       *act_pages = (u32)outs[4];
-       *eq_ist = (u32)outs[5];
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Not enough resource - ret=%lli ", ret);
-
-       return ret;
-}
-
-u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
-                      struct ipz_eq_handle eq_handle,
-                      const u64 event_mask)
-{
-       return ehca_plpar_hcall_norets(H_RESET_EVENTS,
-                                      adapter_handle.handle, /* r4 */
-                                      eq_handle.handle,      /* r5 */
-                                      event_mask,            /* r6 */
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_cq *cq,
-                            struct ehca_alloc_cq_parms *param)
-{
-       int rc;
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,   /* r4  */
-                               2,                       /* r5  */
-                               param->eq_handle.handle, /* r6  */
-                               cq->token,               /* r7  */
-                               param->nr_cqe,           /* r8  */
-                               0, 0, 0, 0);
-       cq->ipz_cq_handle.handle = outs[0];
-       param->act_nr_of_entries = (u32)outs[3];
-       param->act_pages = (u32)outs[4];
-
-       if (ret == H_SUCCESS) {
-               rc = hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
-               if (rc) {
-                       ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
-                                    rc, outs[5]);
-
-                       ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                               adapter_handle.handle,     /* r4 */
-                                               cq->ipz_cq_handle.handle,  /* r5 */
-                                               0, 0, 0, 0, 0);
-                       ret = H_NO_MEM;
-               }
-       }
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Not enough resources. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_alloc_qp_parms *parms, int is_user)
-{
-       int rc;
-       u64 ret;
-       u64 allocate_controls, max_r10_reg, r11, r12;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       allocate_controls =
-               EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_STORAGE, parms->qp_storage)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_SQ_PAGE_SIZE,
-                                parms->squeue.page_size)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_SMALL_RQ_PAGE_SIZE,
-                                parms->rqueue.page_size)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
-                                !!(parms->ll_comp_flags & LLQP_RECV_COMP))
-               | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
-                                !!(parms->ll_comp_flags & LLQP_SEND_COMP))
-               | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
-                                parms->ud_av_l_key_ctl)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
-
-       max_r10_reg =
-               EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
-                              parms->squeue.max_wr + 1)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
-                                parms->rqueue.max_wr + 1)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
-                                parms->squeue.max_sge)
-               | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
-                                parms->rqueue.max_sge);
-
-       r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
-
-       if (parms->ext_type == EQPT_SRQ)
-               r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
-       else
-               r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,             /* r4  */
-                               allocate_controls,                 /* r5  */
-                               parms->send_cq_handle.handle,
-                               parms->recv_cq_handle.handle,
-                               parms->eq_handle.handle,
-                               ((u64)parms->token << 32) | parms->pd.value,
-                               max_r10_reg, r11, r12);
-
-       parms->qp_handle.handle = outs[0];
-       parms->real_qp_num = (u32)outs[1];
-       parms->squeue.act_nr_wqes =
-               (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
-       parms->rqueue.act_nr_wqes =
-               (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_RECV_WR, outs[2]);
-       parms->squeue.act_nr_sges =
-               (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_SEND_SGE, outs[3]);
-       parms->rqueue.act_nr_sges =
-               (u8)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_RECV_SGE, outs[3]);
-       parms->squeue.queue_size =
-               (u32)EHCA_BMASK_GET(H_ALL_RES_QP_SQUEUE_SIZE_PAGES, outs[4]);
-       parms->rqueue.queue_size =
-               (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
-
-       if (ret == H_SUCCESS) {
-               rc = hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
-               if (rc) {
-                       ehca_gen_err("Could not establish HW access. rc=%d paddr=%#lx",
-                                    rc, outs[6]);
-
-                       ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                               adapter_handle.handle,     /* r4 */
-                                               parms->qp_handle.handle,  /* r5 */
-                                               0, 0, 0, 0, 0);
-                       ret = H_NO_MEM;
-               }
-       }
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Not enough resources. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
-                     const u8 port_id,
-                     struct hipz_query_port *query_port_response_block)
-{
-       u64 ret;
-       u64 r_cb = __pa(query_port_response_block);
-
-       if (r_cb & (EHCA_PAGESIZE-1)) {
-               ehca_gen_err("response block not page aligned");
-               return H_PARAMETER;
-       }
-
-       ret = ehca_plpar_hcall_norets(H_QUERY_PORT,
-                                     adapter_handle.handle, /* r4 */
-                                     port_id,               /* r5 */
-                                     r_cb,                  /* r6 */
-                                     0, 0, 0, 0);
-
-       if (ehca_debug_level >= 2)
-               ehca_dmp(query_port_response_block, 64, "response_block");
-
-       return ret;
-}
-
-u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
-                      const u8 port_id, const u32 port_cap,
-                      const u8 init_type, const int modify_mask)
-{
-       u64 port_attributes = port_cap;
-
-       if (modify_mask & IB_PORT_SHUTDOWN)
-               port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
-       if (modify_mask & IB_PORT_INIT_TYPE)
-               port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
-       if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
-               port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
-
-       return ehca_plpar_hcall_norets(H_MODIFY_PORT,
-                                      adapter_handle.handle, /* r4 */
-                                      port_id,               /* r5 */
-                                      port_attributes,       /* r6 */
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
-                    struct hipz_query_hca *query_hca_rblock)
-{
-       u64 r_cb = __pa(query_hca_rblock);
-
-       if (r_cb & (EHCA_PAGESIZE-1)) {
-               ehca_gen_err("response_block=%p not page aligned",
-                            query_hca_rblock);
-               return H_PARAMETER;
-       }
-
-       return ehca_plpar_hcall_norets(H_QUERY_HCA,
-                                      adapter_handle.handle, /* r4 */
-                                      r_cb,                  /* r5 */
-                                      0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
-                         const u8 pagesize,
-                         const u8 queue_type,
-                         const u64 resource_handle,
-                         const u64 logical_address_of_page,
-                         u64 count)
-{
-       return ehca_plpar_hcall_norets(H_REGISTER_RPAGES,
-                                      adapter_handle.handle,      /* r4  */
-                                      (u64)queue_type | ((u64)pagesize) << 8,
-                                      /* r5  */
-                                      resource_handle,            /* r6  */
-                                      logical_address_of_page,    /* r7  */
-                                      count,                      /* r8  */
-                                      0, 0);
-}
-
-u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_eq_handle eq_handle,
-                            struct ehca_pfeq *pfeq,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count)
-{
-       if (count != 1) {
-               ehca_gen_err("Ppage counter=%llx", count);
-               return H_PARAMETER;
-       }
-       return hipz_h_register_rpage(adapter_handle,
-                                    pagesize,
-                                    queue_type,
-                                    eq_handle.handle,
-                                    logical_address_of_page, count);
-}
-
-u64 hipz_h_query_int_state(const struct ipz_adapter_handle adapter_handle,
-                          u32 ist)
-{
-       u64 ret;
-       ret = ehca_plpar_hcall_norets(H_QUERY_INT_STATE,
-                                     adapter_handle.handle, /* r4 */
-                                     ist,                   /* r5 */
-                                     0, 0, 0, 0, 0);
-
-       if (ret != H_SUCCESS && ret != H_BUSY)
-               ehca_gen_err("Could not query interrupt state.");
-
-       return ret;
-}
-
-u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_cq_handle cq_handle,
-                            struct ehca_pfcq *pfcq,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count,
-                            const struct h_galpa gal)
-{
-       if (count != 1) {
-               ehca_gen_err("Page counter=%llx", count);
-               return H_PARAMETER;
-       }
-
-       return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
-                                    cq_handle.handle, logical_address_of_page,
-                                    count);
-}
-
-u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_qp_handle qp_handle,
-                            struct ehca_pfqp *pfqp,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count,
-                            const struct h_galpa galpa)
-{
-       if (count > 1) {
-               ehca_gen_err("Page counter=%llx", count);
-               return H_PARAMETER;
-       }
-
-       return hipz_h_register_rpage(adapter_handle, pagesize, queue_type,
-                                    qp_handle.handle, logical_address_of_page,
-                                    count);
-}
-
-u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
-                              const struct ipz_qp_handle qp_handle,
-                              struct ehca_pfqp *pfqp,
-                              void **log_addr_next_sq_wqe2processed,
-                              void **log_addr_next_rq_wqe2processed,
-                              int dis_and_get_function_code)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
-                               adapter_handle.handle,     /* r4 */
-                               dis_and_get_function_code, /* r5 */
-                               qp_handle.handle,          /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       if (log_addr_next_sq_wqe2processed)
-               *log_addr_next_sq_wqe2processed = (void *)outs[0];
-       if (log_addr_next_rq_wqe2processed)
-               *log_addr_next_rq_wqe2processed = (void *)outs[1];
-
-       return ret;
-}
-
-u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
-                    const struct ipz_qp_handle qp_handle,
-                    struct ehca_pfqp *pfqp,
-                    const u64 update_mask,
-                    struct hcp_modify_qp_control_block *mqpcb,
-                    struct h_galpa gal)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-       ret = ehca_plpar_hcall9(H_MODIFY_QP, outs,
-                               adapter_handle.handle, /* r4 */
-                               qp_handle.handle,      /* r5 */
-                               update_mask,           /* r6 */
-                               __pa(mqpcb),           /* r7 */
-                               0, 0, 0, 0, 0);
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Insufficient resources ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
-                   const struct ipz_qp_handle qp_handle,
-                   struct ehca_pfqp *pfqp,
-                   struct hcp_modify_qp_control_block *qqpcb,
-                   struct h_galpa gal)
-{
-       return ehca_plpar_hcall_norets(H_QUERY_QP,
-                                      adapter_handle.handle, /* r4 */
-                                      qp_handle.handle,      /* r5 */
-                                      __pa(qqpcb),           /* r6 */
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_qp *qp)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = hcp_galpas_dtor(&qp->galpas);
-       if (ret) {
-               ehca_gen_err("Could not destruct qp->galpas");
-               return H_RESOURCE;
-       }
-       ret = ehca_plpar_hcall9(H_DISABLE_AND_GETC, outs,
-                               adapter_handle.handle,     /* r4 */
-                               /* function code */
-                               1,                         /* r5 */
-                               qp->ipz_qp_handle.handle,  /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       if (ret == H_HARDWARE)
-               ehca_gen_err("HCA not operational. ret=%lli", ret);
-
-       ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                     adapter_handle.handle,     /* r4 */
-                                     qp->ipz_qp_handle.handle,  /* r5 */
-                                     0, 0, 0, 0, 0);
-
-       if (ret == H_RESOURCE)
-               ehca_gen_err("Resource still in use. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u32 port)
-{
-       return ehca_plpar_hcall_norets(H_DEFINE_AQP0,
-                                      adapter_handle.handle, /* r4 */
-                                      qp_handle.handle,      /* r5 */
-                                      port,                  /* r6 */
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u32 port, u32 * pma_qp_nr,
-                      u32 * bma_qp_nr)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_DEFINE_AQP1, outs,
-                               adapter_handle.handle, /* r4 */
-                               qp_handle.handle,      /* r5 */
-                               port,                  /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       *pma_qp_nr = (u32)outs[0];
-       *bma_qp_nr = (u32)outs[1];
-
-       if (ret == H_ALIAS_EXIST)
-               ehca_gen_err("AQP1 already exists. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u16 mcg_dlid,
-                      u64 subnet_prefix, u64 interface_id)
-{
-       u64 ret;
-
-       ret = ehca_plpar_hcall_norets(H_ATTACH_MCQP,
-                                     adapter_handle.handle,  /* r4 */
-                                     qp_handle.handle,       /* r5 */
-                                     mcg_dlid,               /* r6 */
-                                     interface_id,           /* r7 */
-                                     subnet_prefix,          /* r8 */
-                                     0, 0);
-
-       if (ret == H_NOT_ENOUGH_RESOURCES)
-               ehca_gen_err("Not enough resources. ret=%lli", ret);
-
-       return ret;
-}
-
-u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u16 mcg_dlid,
-                      u64 subnet_prefix, u64 interface_id)
-{
-       return ehca_plpar_hcall_norets(H_DETACH_MCQP,
-                                      adapter_handle.handle, /* r4 */
-                                      qp_handle.handle,      /* r5 */
-                                      mcg_dlid,              /* r6 */
-                                      interface_id,          /* r7 */
-                                      subnet_prefix,         /* r8 */
-                                      0, 0);
-}
-
-u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_cq *cq,
-                     u8 force_flag)
-{
-       u64 ret;
-
-       ret = hcp_galpas_dtor(&cq->galpas);
-       if (ret) {
-               ehca_gen_err("Could not destruct cp->galpas");
-               return H_RESOURCE;
-       }
-
-       ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                     adapter_handle.handle,     /* r4 */
-                                     cq->ipz_cq_handle.handle,  /* r5 */
-                                     force_flag != 0 ? 1L : 0L, /* r6 */
-                                     0, 0, 0, 0);
-
-       if (ret == H_RESOURCE)
-               ehca_gen_err("H_FREE_RESOURCE failed ret=%lli ", ret);
-
-       return ret;
-}
-
-u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_eq *eq)
-{
-       u64 ret;
-
-       ret = hcp_galpas_dtor(&eq->galpas);
-       if (ret) {
-               ehca_gen_err("Could not destruct eq->galpas");
-               return H_RESOURCE;
-       }
-
-       ret = ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                     adapter_handle.handle,     /* r4 */
-                                     eq->ipz_eq_handle.handle,  /* r5 */
-                                     0, 0, 0, 0, 0);
-
-       if (ret == H_RESOURCE)
-               ehca_gen_err("Resource in use. ret=%lli ", ret);
-
-       return ret;
-}
-
-u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mr *mr,
-                            const u64 vaddr,
-                            const u64 length,
-                            const u32 access_ctrl,
-                            const struct ipz_pd pd,
-                            struct ehca_mr_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,            /* r4 */
-                               5,                                /* r5 */
-                               vaddr,                            /* r6 */
-                               length,                           /* r7 */
-                               (((u64)access_ctrl) << 32ULL),    /* r8 */
-                               pd.value,                         /* r9 */
-                               0, 0, 0);
-       outparms->handle.handle = outs[0];
-       outparms->lkey = (u32)outs[2];
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mr *mr,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count)
-{
-       u64 ret;
-
-       if (unlikely(ehca_debug_level >= 3)) {
-               if (count > 1) {
-                       u64 *kpage;
-                       int i;
-                       kpage = __va(logical_address_of_page);
-                       for (i = 0; i < count; i++)
-                               ehca_gen_dbg("kpage[%d]=%p",
-                                            i, (void *)kpage[i]);
-               } else
-                       ehca_gen_dbg("kpage=%p",
-                                    (void *)logical_address_of_page);
-       }
-
-       if ((count > 1) && (logical_address_of_page & (EHCA_PAGESIZE-1))) {
-               ehca_gen_err("logical_address_of_page not on a 4k boundary "
-                            "adapter_handle=%llx mr=%p mr_handle=%llx "
-                            "pagesize=%x queue_type=%x "
-                            "logical_address_of_page=%llx count=%llx",
-                            adapter_handle.handle, mr,
-                            mr->ipz_mr_handle.handle, pagesize, queue_type,
-                            logical_address_of_page, count);
-               ret = H_PARAMETER;
-       } else
-               ret = hipz_h_register_rpage(adapter_handle, pagesize,
-                                           queue_type,
-                                           mr->ipz_mr_handle.handle,
-                                           logical_address_of_page, count);
-       return ret;
-}
-
-u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
-                   const struct ehca_mr *mr,
-                   struct ehca_mr_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_QUERY_MR, outs,
-                               adapter_handle.handle,     /* r4 */
-                               mr->ipz_mr_handle.handle,  /* r5 */
-                               0, 0, 0, 0, 0, 0, 0);
-       outparms->len = outs[0];
-       outparms->vaddr = outs[1];
-       outparms->acl  = outs[4] >> 32;
-       outparms->lkey = (u32)(outs[5] >> 32);
-       outparms->rkey = (u32)(outs[5] & (0xffffffff));
-
-       return ret;
-}
-
-u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
-                           const struct ehca_mr *mr)
-{
-       return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                      adapter_handle.handle,    /* r4 */
-                                      mr->ipz_mr_handle.handle, /* r5 */
-                                      0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
-                         const struct ehca_mr *mr,
-                         const u64 vaddr_in,
-                         const u64 length,
-                         const u32 access_ctrl,
-                         const struct ipz_pd pd,
-                         const u64 mr_addr_cb,
-                         struct ehca_mr_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_REREGISTER_PMR, outs,
-                               adapter_handle.handle,    /* r4 */
-                               mr->ipz_mr_handle.handle, /* r5 */
-                               vaddr_in,                 /* r6 */
-                               length,                   /* r7 */
-                               /* r8 */
-                               ((((u64)access_ctrl) << 32ULL) | pd.value),
-                               mr_addr_cb,               /* r9 */
-                               0, 0, 0);
-       outparms->vaddr = outs[1];
-       outparms->lkey = (u32)outs[2];
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
-                       const struct ehca_mr *mr,
-                       const struct ehca_mr *orig_mr,
-                       const u64 vaddr_in,
-                       const u32 access_ctrl,
-                       const struct ipz_pd pd,
-                       struct ehca_mr_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_REGISTER_SMR, outs,
-                               adapter_handle.handle,            /* r4 */
-                               orig_mr->ipz_mr_handle.handle,    /* r5 */
-                               vaddr_in,                         /* r6 */
-                               (((u64)access_ctrl) << 32ULL),    /* r7 */
-                               pd.value,                         /* r8 */
-                               0, 0, 0, 0);
-       outparms->handle.handle = outs[0];
-       outparms->lkey = (u32)outs[2];
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mw *mw,
-                            const struct ipz_pd pd,
-                            struct ehca_mw_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
-                               adapter_handle.handle,      /* r4 */
-                               6,                          /* r5 */
-                               pd.value,                   /* r6 */
-                               0, 0, 0, 0, 0, 0);
-       outparms->handle.handle = outs[0];
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
-                   const struct ehca_mw *mw,
-                   struct ehca_mw_hipzout_parms *outparms)
-{
-       u64 ret;
-       unsigned long outs[PLPAR_HCALL9_BUFSIZE];
-
-       ret = ehca_plpar_hcall9(H_QUERY_MW, outs,
-                               adapter_handle.handle,    /* r4 */
-                               mw->ipz_mw_handle.handle, /* r5 */
-                               0, 0, 0, 0, 0, 0, 0);
-       outparms->rkey = (u32)outs[3];
-
-       return ret;
-}
-
-u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
-                           const struct ehca_mw *mw)
-{
-       return ehca_plpar_hcall_norets(H_FREE_RESOURCE,
-                                      adapter_handle.handle,    /* r4 */
-                                      mw->ipz_mw_handle.handle, /* r5 */
-                                      0, 0, 0, 0, 0);
-}
-
-u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
-                     const u64 ressource_handle,
-                     void *rblock,
-                     unsigned long *byte_count)
-{
-       u64 r_cb = __pa(rblock);
-
-       if (r_cb & (EHCA_PAGESIZE-1)) {
-               ehca_gen_err("rblock not page aligned.");
-               return H_PARAMETER;
-       }
-
-       return ehca_plpar_hcall_norets(H_ERROR_DATA,
-                                      adapter_handle.handle,
-                                      ressource_handle,
-                                      r_cb,
-                                      0, 0, 0, 0);
-}
-
-u64 hipz_h_eoi(int irq)
-{
-       unsigned long xirr;
-
-       iosync();
-       xirr = (0xffULL << 24) | irq;
-
-       return plpar_hcall_norets(H_EOI, xirr);
-}
diff --git a/drivers/staging/rdma/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h
deleted file mode 100644 (file)
index a46e514..0000000
+++ /dev/null
@@ -1,265 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Firmware Infiniband Interface code for POWER
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Gerd Bayer <gerd.bayer@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HCP_IF_H__
-#define __HCP_IF_H__
-
-#include "ehca_classes.h"
-#include "ehca_tools.h"
-#include "hipz_hw.h"
-
-/*
- * hipz_h_alloc_resource_eq allocates EQ resources in HW and FW, initialize
- * resources, create the empty EQPT (ring).
- */
-u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_pfeq *pfeq,
-                            const u32 neq_control,
-                            const u32 number_of_entries,
-                            struct ipz_eq_handle *eq_handle,
-                            u32 * act_nr_of_entries,
-                            u32 * act_pages,
-                            u32 * eq_ist);
-
-u64 hipz_h_reset_event(const struct ipz_adapter_handle adapter_handle,
-                      struct ipz_eq_handle eq_handle,
-                      const u64 event_mask);
-/*
- * hipz_h_allocate_resource_cq allocates CQ resources in HW and FW, initialize
- * resources, create the empty CQPT (ring).
- */
-u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_cq *cq,
-                            struct ehca_alloc_cq_parms *param);
-
-
-/*
- * hipz_h_alloc_resource_qp allocates QP resources in HW and FW,
- * initialize resources, create empty QPPTs (2 rings).
- */
-u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
-                            struct ehca_alloc_qp_parms *parms, int is_user);
-
-u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
-                     const u8 port_id,
-                     struct hipz_query_port *query_port_response_block);
-
-u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
-                      const u8 port_id, const u32 port_cap,
-                      const u8 init_type, const int modify_mask);
-
-u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
-                    struct hipz_query_hca *query_hca_rblock);
-
-/*
- * hipz_h_register_rpage internal function in hcp_if.h for all
- * hcp_H_REGISTER_RPAGE calls.
- */
-u64 hipz_h_register_rpage(const struct ipz_adapter_handle adapter_handle,
-                         const u8 pagesize,
-                         const u8 queue_type,
-                         const u64 resource_handle,
-                         const u64 logical_address_of_page,
-                         u64 count);
-
-u64 hipz_h_register_rpage_eq(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_eq_handle eq_handle,
-                            struct ehca_pfeq *pfeq,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count);
-
-u64 hipz_h_query_int_state(const struct ipz_adapter_handle
-                          hcp_adapter_handle,
-                          u32 ist);
-
-u64 hipz_h_register_rpage_cq(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_cq_handle cq_handle,
-                            struct ehca_pfcq *pfcq,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count,
-                            const struct h_galpa gal);
-
-u64 hipz_h_register_rpage_qp(const struct ipz_adapter_handle adapter_handle,
-                            const struct ipz_qp_handle qp_handle,
-                            struct ehca_pfqp *pfqp,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count,
-                            const struct h_galpa galpa);
-
-u64 hipz_h_disable_and_get_wqe(const struct ipz_adapter_handle adapter_handle,
-                              const struct ipz_qp_handle qp_handle,
-                              struct ehca_pfqp *pfqp,
-                              void **log_addr_next_sq_wqe_tb_processed,
-                              void **log_addr_next_rq_wqe_tb_processed,
-                              int dis_and_get_function_code);
-enum hcall_sigt {
-       HCALL_SIGT_NO_CQE = 0,
-       HCALL_SIGT_BY_WQE = 1,
-       HCALL_SIGT_EVERY = 2
-};
-
-u64 hipz_h_modify_qp(const struct ipz_adapter_handle adapter_handle,
-                    const struct ipz_qp_handle qp_handle,
-                    struct ehca_pfqp *pfqp,
-                    const u64 update_mask,
-                    struct hcp_modify_qp_control_block *mqpcb,
-                    struct h_galpa gal);
-
-u64 hipz_h_query_qp(const struct ipz_adapter_handle adapter_handle,
-                   const struct ipz_qp_handle qp_handle,
-                   struct ehca_pfqp *pfqp,
-                   struct hcp_modify_qp_control_block *qqpcb,
-                   struct h_galpa gal);
-
-u64 hipz_h_destroy_qp(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_qp *qp);
-
-u64 hipz_h_define_aqp0(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u32 port);
-
-u64 hipz_h_define_aqp1(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u32 port, u32 * pma_qp_nr,
-                      u32 * bma_qp_nr);
-
-u64 hipz_h_attach_mcqp(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u16 mcg_dlid,
-                      u64 subnet_prefix, u64 interface_id);
-
-u64 hipz_h_detach_mcqp(const struct ipz_adapter_handle adapter_handle,
-                      const struct ipz_qp_handle qp_handle,
-                      struct h_galpa gal,
-                      u16 mcg_dlid,
-                      u64 subnet_prefix, u64 interface_id);
-
-u64 hipz_h_destroy_cq(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_cq *cq,
-                     u8 force_flag);
-
-u64 hipz_h_destroy_eq(const struct ipz_adapter_handle adapter_handle,
-                     struct ehca_eq *eq);
-
-/*
- * hipz_h_alloc_resource_mr allocates MR resources in HW and FW, initialize
- * resources.
- */
-u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mr *mr,
-                            const u64 vaddr,
-                            const u64 length,
-                            const u32 access_ctrl,
-                            const struct ipz_pd pd,
-                            struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_register_rpage_mr registers MR resource pages in HW and FW */
-u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mr *mr,
-                            const u8 pagesize,
-                            const u8 queue_type,
-                            const u64 logical_address_of_page,
-                            const u64 count);
-
-/* hipz_h_query_mr queries MR in HW and FW */
-u64 hipz_h_query_mr(const struct ipz_adapter_handle adapter_handle,
-                   const struct ehca_mr *mr,
-                   struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_free_resource_mr frees MR resources in HW and FW */
-u64 hipz_h_free_resource_mr(const struct ipz_adapter_handle adapter_handle,
-                           const struct ehca_mr *mr);
-
-/* hipz_h_reregister_pmr reregisters MR in HW and FW */
-u64 hipz_h_reregister_pmr(const struct ipz_adapter_handle adapter_handle,
-                         const struct ehca_mr *mr,
-                         const u64 vaddr_in,
-                         const u64 length,
-                         const u32 access_ctrl,
-                         const struct ipz_pd pd,
-                         const u64 mr_addr_cb,
-                         struct ehca_mr_hipzout_parms *outparms);
-
-/* hipz_h_register_smr register shared MR in HW and FW */
-u64 hipz_h_register_smr(const struct ipz_adapter_handle adapter_handle,
-                       const struct ehca_mr *mr,
-                       const struct ehca_mr *orig_mr,
-                       const u64 vaddr_in,
-                       const u32 access_ctrl,
-                       const struct ipz_pd pd,
-                       struct ehca_mr_hipzout_parms *outparms);
-
-/*
- * hipz_h_alloc_resource_mw allocates MW resources in HW and FW, initialize
- * resources.
- */
-u64 hipz_h_alloc_resource_mw(const struct ipz_adapter_handle adapter_handle,
-                            const struct ehca_mw *mw,
-                            const struct ipz_pd pd,
-                            struct ehca_mw_hipzout_parms *outparms);
-
-/* hipz_h_query_mw queries MW in HW and FW */
-u64 hipz_h_query_mw(const struct ipz_adapter_handle adapter_handle,
-                   const struct ehca_mw *mw,
-                   struct ehca_mw_hipzout_parms *outparms);
-
-/* hipz_h_free_resource_mw frees MW resources in HW and FW */
-u64 hipz_h_free_resource_mw(const struct ipz_adapter_handle adapter_handle,
-                           const struct ehca_mw *mw);
-
-u64 hipz_h_error_data(const struct ipz_adapter_handle adapter_handle,
-                     const u64 ressource_handle,
-                     void *rblock,
-                     unsigned long *byte_count);
-u64 hipz_h_eoi(int irq);
-
-#endif /* __HCP_IF_H__ */
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c
deleted file mode 100644 (file)
index 077376f..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *   load store abstraction for ehca register access with tracing
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "ehca_classes.h"
-#include "hipz_hw.h"
-
-u64 hcall_map_page(u64 physaddr)
-{
-       return (u64)ioremap(physaddr, EHCA_PAGESIZE);
-}
-
-int hcall_unmap_page(u64 mapaddr)
-{
-       iounmap((volatile void __iomem *) mapaddr);
-       return 0;
-}
-
-int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
-                   u64 paddr_kernel, u64 paddr_user)
-{
-       if (!is_user) {
-               galpas->kernel.fw_handle = hcall_map_page(paddr_kernel);
-               if (!galpas->kernel.fw_handle)
-                       return -ENOMEM;
-       } else
-               galpas->kernel.fw_handle = 0;
-
-       galpas->user.fw_handle = paddr_user;
-
-       return 0;
-}
-
-int hcp_galpas_dtor(struct h_galpas *galpas)
-{
-       if (galpas->kernel.fw_handle) {
-               int ret = hcall_unmap_page(galpas->kernel.fw_handle);
-               if (ret)
-                       return ret;
-       }
-
-       galpas->user.fw_handle = galpas->kernel.fw_handle = 0;
-
-       return 0;
-}
diff --git a/drivers/staging/rdma/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h
deleted file mode 100644 (file)
index d1b0299..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  Firmware calls
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Waleri Fomin <fomin@de.ibm.com>
- *           Gerd Bayer <gerd.bayer@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HCP_PHYP_H__
-#define __HCP_PHYP_H__
-
-
-/*
- * eHCA page (mapped into memory)
- * resource to access eHCA register pages in CPU address space
-*/
-struct h_galpa {
-       u64 fw_handle;
-       /* for pSeries this is a 64bit memory address where
-          I/O memory is mapped into CPU address space (kv) */
-};
-
-/*
- * resource to access eHCA address space registers, all types
- */
-struct h_galpas {
-       u32 pid;                /*PID of userspace galpa checking */
-       struct h_galpa user;    /* user space accessible resource,
-                                  set to 0 if unused */
-       struct h_galpa kernel;  /* kernel space accessible resource,
-                                  set to 0 if unused */
-};
-
-static inline u64 hipz_galpa_load(struct h_galpa galpa, u32 offset)
-{
-       u64 addr = galpa.fw_handle + offset;
-       return *(volatile u64 __force *)addr;
-}
-
-static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
-{
-       u64 addr = galpa.fw_handle + offset;
-       *(volatile u64 __force *)addr = value;
-}
-
-int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
-                   u64 paddr_kernel, u64 paddr_user);
-
-int hcp_galpas_dtor(struct h_galpas *galpas);
-
-u64 hcall_map_page(u64 physaddr);
-
-int hcall_unmap_page(u64 mapaddr);
-
-#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h
deleted file mode 100644 (file)
index 9dac93d..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  HW abstraction register functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_FNS_H__
-#define __HIPZ_FNS_H__
-
-#include "ehca_classes.h"
-#include "hipz_hw.h"
-
-#include "hipz_fns_core.h"
-
-#define hipz_galpa_store_eq(gal, offset, value) \
-       hipz_galpa_store(gal, EQTEMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_eq(gal, offset) \
-       hipz_galpa_load(gal, EQTEMM_OFFSET(offset))
-
-#define hipz_galpa_store_qped(gal, offset, value) \
-       hipz_galpa_store(gal, QPEDMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_qped(gal, offset) \
-       hipz_galpa_load(gal, QPEDMM_OFFSET(offset))
-
-#define hipz_galpa_store_mrmw(gal, offset, value) \
-       hipz_galpa_store(gal, MRMWMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_mrmw(gal, offset) \
-       hipz_galpa_load(gal, MRMWMM_OFFSET(offset))
-
-#endif
diff --git a/drivers/staging/rdma/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h
deleted file mode 100644 (file)
index 868735f..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  HW abstraction register functions
- *
- *  Authors: Christoph Raisch <raisch@de.ibm.com>
- *           Heiko J Schick <schickhj@de.ibm.com>
- *           Hoang-Nam Nguyen <hnguyen@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_FNS_CORE_H__
-#define __HIPZ_FNS_CORE_H__
-
-#include "hcp_phyp.h"
-#include "hipz_hw.h"
-
-#define hipz_galpa_store_cq(gal, offset, value) \
-       hipz_galpa_store(gal, CQTEMM_OFFSET(offset), value)
-
-#define hipz_galpa_load_cq(gal, offset) \
-       hipz_galpa_load(gal, CQTEMM_OFFSET(offset))
-
-#define hipz_galpa_store_qp(gal, offset, value) \
-       hipz_galpa_store(gal, QPTEMM_OFFSET(offset), value)
-#define hipz_galpa_load_qp(gal, offset) \
-       hipz_galpa_load(gal, QPTEMM_OFFSET(offset))
-
-static inline void hipz_update_sqa(struct ehca_qp *qp, u16 nr_wqes)
-{
-       /*  ringing doorbell :-) */
-       hipz_galpa_store_qp(qp->galpas.kernel, qpx_sqa,
-                           EHCA_BMASK_SET(QPX_SQADDER, nr_wqes));
-}
-
-static inline void hipz_update_rqa(struct ehca_qp *qp, u16 nr_wqes)
-{
-       /*  ringing doorbell :-) */
-       hipz_galpa_store_qp(qp->galpas.kernel, qpx_rqa,
-                           EHCA_BMASK_SET(QPX_RQADDER, nr_wqes));
-}
-
-static inline void hipz_update_feca(struct ehca_cq *cq, u32 nr_cqes)
-{
-       hipz_galpa_store_cq(cq->galpas.kernel, cqx_feca,
-                           EHCA_BMASK_SET(CQX_FECADDER, nr_cqes));
-}
-
-static inline void hipz_set_cqx_n0(struct ehca_cq *cq, u32 value)
-{
-       u64 cqx_n0_reg;
-
-       hipz_galpa_store_cq(cq->galpas.kernel, cqx_n0,
-                           EHCA_BMASK_SET(CQX_N0_GENERATE_SOLICITED_COMP_EVENT,
-                                          value));
-       cqx_n0_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n0);
-}
-
-static inline void hipz_set_cqx_n1(struct ehca_cq *cq, u32 value)
-{
-       u64 cqx_n1_reg;
-
-       hipz_galpa_store_cq(cq->galpas.kernel, cqx_n1,
-                           EHCA_BMASK_SET(CQX_N1_GENERATE_COMP_EVENT, value));
-       cqx_n1_reg = hipz_galpa_load_cq(cq->galpas.kernel, cqx_n1);
-}
-
-#endif /* __HIPZ_FNC_CORE_H__ */
diff --git a/drivers/staging/rdma/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h
deleted file mode 100644 (file)
index bf996c7..0000000
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  eHCA register definitions
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __HIPZ_HW_H__
-#define __HIPZ_HW_H__
-
-#include "ehca_tools.h"
-
-#define EHCA_MAX_MTU 4
-
-/* QP Table Entry Memory Map */
-struct hipz_qptemm {
-       u64 qpx_hcr;
-       u64 qpx_c;
-       u64 qpx_herr;
-       u64 qpx_aer;
-/* 0x20*/
-       u64 qpx_sqa;
-       u64 qpx_sqc;
-       u64 qpx_rqa;
-       u64 qpx_rqc;
-/* 0x40*/
-       u64 qpx_st;
-       u64 qpx_pmstate;
-       u64 qpx_pmfa;
-       u64 qpx_pkey;
-/* 0x60*/
-       u64 qpx_pkeya;
-       u64 qpx_pkeyb;
-       u64 qpx_pkeyc;
-       u64 qpx_pkeyd;
-/* 0x80*/
-       u64 qpx_qkey;
-       u64 qpx_dqp;
-       u64 qpx_dlidp;
-       u64 qpx_portp;
-/* 0xa0*/
-       u64 qpx_slidp;
-       u64 qpx_slidpp;
-       u64 qpx_dlida;
-       u64 qpx_porta;
-/* 0xc0*/
-       u64 qpx_slida;
-       u64 qpx_slidpa;
-       u64 qpx_slvl;
-       u64 qpx_ipd;
-/* 0xe0*/
-       u64 qpx_mtu;
-       u64 qpx_lato;
-       u64 qpx_rlimit;
-       u64 qpx_rnrlimit;
-/* 0x100*/
-       u64 qpx_t;
-       u64 qpx_sqhp;
-       u64 qpx_sqptp;
-       u64 qpx_nspsn;
-/* 0x120*/
-       u64 qpx_nspsnhwm;
-       u64 reserved1;
-       u64 qpx_sdsi;
-       u64 qpx_sdsbc;
-/* 0x140*/
-       u64 qpx_sqwsize;
-       u64 qpx_sqwts;
-       u64 qpx_lsn;
-       u64 qpx_nssn;
-/* 0x160 */
-       u64 qpx_mor;
-       u64 qpx_cor;
-       u64 qpx_sqsize;
-       u64 qpx_erc;
-/* 0x180*/
-       u64 qpx_rnrrc;
-       u64 qpx_ernrwt;
-       u64 qpx_rnrresp;
-       u64 qpx_lmsna;
-/* 0x1a0 */
-       u64 qpx_sqhpc;
-       u64 qpx_sqcptp;
-       u64 qpx_sigt;
-       u64 qpx_wqecnt;
-/* 0x1c0*/
-       u64 qpx_rqhp;
-       u64 qpx_rqptp;
-       u64 qpx_rqsize;
-       u64 qpx_nrr;
-/* 0x1e0*/
-       u64 qpx_rdmac;
-       u64 qpx_nrpsn;
-       u64 qpx_lapsn;
-       u64 qpx_lcr;
-/* 0x200*/
-       u64 qpx_rwc;
-       u64 qpx_rwva;
-       u64 qpx_rdsi;
-       u64 qpx_rdsbc;
-/* 0x220*/
-       u64 qpx_rqwsize;
-       u64 qpx_crmsn;
-       u64 qpx_rdd;
-       u64 qpx_larpsn;
-/* 0x240*/
-       u64 qpx_pd;
-       u64 qpx_scqn;
-       u64 qpx_rcqn;
-       u64 qpx_aeqn;
-/* 0x260*/
-       u64 qpx_aaelog;
-       u64 qpx_ram;
-       u64 qpx_rdmaqe0;
-       u64 qpx_rdmaqe1;
-/* 0x280*/
-       u64 qpx_rdmaqe2;
-       u64 qpx_rdmaqe3;
-       u64 qpx_nrpsnhwm;
-/* 0x298*/
-       u64 reserved[(0x400 - 0x298) / 8];
-/* 0x400 extended data */
-       u64 reserved_ext[(0x500 - 0x400) / 8];
-/* 0x500 */
-       u64 reserved2[(0x1000 - 0x500) / 8];
-/* 0x1000      */
-};
-
-#define QPX_SQADDER EHCA_BMASK_IBM(48, 63)
-#define QPX_RQADDER EHCA_BMASK_IBM(48, 63)
-#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3, 3)
-
-#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm, x)
-
-/* MRMWPT Entry Memory Map */
-struct hipz_mrmwmm {
-       /* 0x00 */
-       u64 mrx_hcr;
-
-       u64 mrx_c;
-       u64 mrx_herr;
-       u64 mrx_aer;
-       /* 0x20 */
-       u64 mrx_pp;
-       u64 reserved1;
-       u64 reserved2;
-       u64 reserved3;
-       /* 0x40 */
-       u64 reserved4[(0x200 - 0x40) / 8];
-       /* 0x200 */
-       u64 mrx_ctl[64];
-
-};
-
-#define MRMWMM_OFFSET(x) offsetof(struct hipz_mrmwmm, x)
-
-struct hipz_qpedmm {
-       /* 0x00 */
-       u64 reserved0[(0x400) / 8];
-       /* 0x400 */
-       u64 qpedx_phh;
-       u64 qpedx_ppsgp;
-       /* 0x410 */
-       u64 qpedx_ppsgu;
-       u64 qpedx_ppdgp;
-       /* 0x420 */
-       u64 qpedx_ppdgu;
-       u64 qpedx_aph;
-       /* 0x430 */
-       u64 qpedx_apsgp;
-       u64 qpedx_apsgu;
-       /* 0x440 */
-       u64 qpedx_apdgp;
-       u64 qpedx_apdgu;
-       /* 0x450 */
-       u64 qpedx_apav;
-       u64 qpedx_apsav;
-       /* 0x460  */
-       u64 qpedx_hcr;
-       u64 reserved1[4];
-       /* 0x488 */
-       u64 qpedx_rrl0;
-       /* 0x490 */
-       u64 qpedx_rrrkey0;
-       u64 qpedx_rrva0;
-       /* 0x4a0 */
-       u64 reserved2;
-       u64 qpedx_rrl1;
-       /* 0x4b0 */
-       u64 qpedx_rrrkey1;
-       u64 qpedx_rrva1;
-       /* 0x4c0 */
-       u64 reserved3;
-       u64 qpedx_rrl2;
-       /* 0x4d0 */
-       u64 qpedx_rrrkey2;
-       u64 qpedx_rrva2;
-       /* 0x4e0 */
-       u64 reserved4;
-       u64 qpedx_rrl3;
-       /* 0x4f0 */
-       u64 qpedx_rrrkey3;
-       u64 qpedx_rrva3;
-};
-
-#define QPEDMM_OFFSET(x) offsetof(struct hipz_qpedmm, x)
-
-/* CQ Table Entry Memory Map */
-struct hipz_cqtemm {
-       u64 cqx_hcr;
-       u64 cqx_c;
-       u64 cqx_herr;
-       u64 cqx_aer;
-/* 0x20  */
-       u64 cqx_ptp;
-       u64 cqx_tp;
-       u64 cqx_fec;
-       u64 cqx_feca;
-/* 0x40  */
-       u64 cqx_ep;
-       u64 cqx_eq;
-/* 0x50  */
-       u64 reserved1;
-       u64 cqx_n0;
-/* 0x60  */
-       u64 cqx_n1;
-       u64 reserved2[(0x1000 - 0x60) / 8];
-/* 0x1000 */
-};
-
-#define CQX_FEC_CQE_CNT           EHCA_BMASK_IBM(32, 63)
-#define CQX_FECADDER              EHCA_BMASK_IBM(32, 63)
-#define CQX_N0_GENERATE_SOLICITED_COMP_EVENT EHCA_BMASK_IBM(0, 0)
-#define CQX_N1_GENERATE_COMP_EVENT EHCA_BMASK_IBM(0, 0)
-
-#define CQTEMM_OFFSET(x) offsetof(struct hipz_cqtemm, x)
-
-/* EQ Table Entry Memory Map */
-struct hipz_eqtemm {
-       u64 eqx_hcr;
-       u64 eqx_c;
-
-       u64 eqx_herr;
-       u64 eqx_aer;
-/* 0x20 */
-       u64 eqx_ptp;
-       u64 eqx_tp;
-       u64 eqx_ssba;
-       u64 eqx_psba;
-
-/* 0x40 */
-       u64 eqx_cec;
-       u64 eqx_meql;
-       u64 eqx_xisbi;
-       u64 eqx_xisc;
-/* 0x60 */
-       u64 eqx_it;
-
-};
-
-#define EQTEMM_OFFSET(x) offsetof(struct hipz_eqtemm, x)
-
-/* access control defines for MR/MW */
-#define HIPZ_ACCESSCTRL_L_WRITE  0x00800000
-#define HIPZ_ACCESSCTRL_R_WRITE  0x00400000
-#define HIPZ_ACCESSCTRL_R_READ   0x00200000
-#define HIPZ_ACCESSCTRL_R_ATOMIC 0x00100000
-#define HIPZ_ACCESSCTRL_MW_BIND  0x00080000
-
-/* query hca response block */
-struct hipz_query_hca {
-       u32 cur_reliable_dg;
-       u32 cur_qp;
-       u32 cur_cq;
-       u32 cur_eq;
-       u32 cur_mr;
-       u32 cur_mw;
-       u32 cur_ee_context;
-       u32 cur_mcast_grp;
-       u32 cur_qp_attached_mcast_grp;
-       u32 reserved1;
-       u32 cur_ipv6_qp;
-       u32 cur_eth_qp;
-       u32 cur_hp_mr;
-       u32 reserved2[3];
-       u32 max_rd_domain;
-       u32 max_qp;
-       u32 max_cq;
-       u32 max_eq;
-       u32 max_mr;
-       u32 max_hp_mr;
-       u32 max_mw;
-       u32 max_mrwpte;
-       u32 max_special_mrwpte;
-       u32 max_rd_ee_context;
-       u32 max_mcast_grp;
-       u32 max_total_mcast_qp_attach;
-       u32 max_mcast_qp_attach;
-       u32 max_raw_ipv6_qp;
-       u32 max_raw_ethy_qp;
-       u32 internal_clock_frequency;
-       u32 max_pd;
-       u32 max_ah;
-       u32 max_cqe;
-       u32 max_wqes_wq;
-       u32 max_partitions;
-       u32 max_rr_ee_context;
-       u32 max_rr_qp;
-       u32 max_rr_hca;
-       u32 max_act_wqs_ee_context;
-       u32 max_act_wqs_qp;
-       u32 max_sge;
-       u32 max_sge_rd;
-       u32 memory_page_size_supported;
-       u64 max_mr_size;
-       u32 local_ca_ack_delay;
-       u32 num_ports;
-       u32 vendor_id;
-       u32 vendor_part_id;
-       u32 hw_ver;
-       u64 node_guid;
-       u64 hca_cap_indicators;
-       u32 data_counter_register_size;
-       u32 max_shared_rq;
-       u32 max_isns_eq;
-       u32 max_neq;
-} __attribute__ ((packed));
-
-#define HCA_CAP_AH_PORT_NR_CHECK      EHCA_BMASK_IBM( 0,  0)
-#define HCA_CAP_ATOMIC                EHCA_BMASK_IBM( 1,  1)
-#define HCA_CAP_AUTO_PATH_MIG         EHCA_BMASK_IBM( 2,  2)
-#define HCA_CAP_BAD_P_KEY_CTR         EHCA_BMASK_IBM( 3,  3)
-#define HCA_CAP_SQD_RTS_PORT_CHANGE   EHCA_BMASK_IBM( 4,  4)
-#define HCA_CAP_CUR_QP_STATE_MOD      EHCA_BMASK_IBM( 5,  5)
-#define HCA_CAP_INIT_TYPE             EHCA_BMASK_IBM( 6,  6)
-#define HCA_CAP_PORT_ACTIVE_EVENT     EHCA_BMASK_IBM( 7,  7)
-#define HCA_CAP_Q_KEY_VIOL_CTR        EHCA_BMASK_IBM( 8,  8)
-#define HCA_CAP_WQE_RESIZE            EHCA_BMASK_IBM( 9,  9)
-#define HCA_CAP_RAW_PACKET_MCAST      EHCA_BMASK_IBM(10, 10)
-#define HCA_CAP_SHUTDOWN_PORT         EHCA_BMASK_IBM(11, 11)
-#define HCA_CAP_RC_LL_QP              EHCA_BMASK_IBM(12, 12)
-#define HCA_CAP_SRQ                   EHCA_BMASK_IBM(13, 13)
-#define HCA_CAP_UD_LL_QP              EHCA_BMASK_IBM(16, 16)
-#define HCA_CAP_RESIZE_MR             EHCA_BMASK_IBM(17, 17)
-#define HCA_CAP_MINI_QP               EHCA_BMASK_IBM(18, 18)
-#define HCA_CAP_H_ALLOC_RES_SYNC      EHCA_BMASK_IBM(19, 19)
-
-/* query port response block */
-struct hipz_query_port {
-       u32 state;
-       u32 bad_pkey_cntr;
-       u32 lmc;
-       u32 lid;
-       u32 subnet_timeout;
-       u32 qkey_viol_cntr;
-       u32 sm_sl;
-       u32 sm_lid;
-       u32 capability_mask;
-       u32 init_type_reply;
-       u32 pkey_tbl_len;
-       u32 gid_tbl_len;
-       u64 gid_prefix;
-       u32 port_nr;
-       u16 pkey_entries[16];
-       u8  reserved1[32];
-       u32 trent_size;
-       u32 trbuf_size;
-       u64 max_msg_sz;
-       u32 max_mtu;
-       u32 vl_cap;
-       u32 phys_pstate;
-       u32 phys_state;
-       u32 phys_speed;
-       u32 phys_width;
-       u8  reserved2[1884];
-       u64 guid_entries[255];
-} __attribute__ ((packed));
-
-#endif
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c
deleted file mode 100644 (file)
index 7ffc748..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  internal queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/slab.h>
-
-#include "ehca_tools.h"
-#include "ipz_pt_fn.h"
-#include "ehca_classes.h"
-
-#define PAGES_PER_KPAGE (PAGE_SIZE >> EHCA_PAGESHIFT)
-
-struct kmem_cache *small_qp_cache;
-
-void *ipz_qpageit_get_inc(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       queue->current_q_offset += queue->pagesize;
-       if (queue->current_q_offset > queue->queue_length) {
-               queue->current_q_offset -= queue->pagesize;
-               ret = NULL;
-       }
-       if (((u64)ret) % queue->pagesize) {
-               ehca_gen_err("ERROR!! not at PAGE-Boundary");
-               return NULL;
-       }
-       return ret;
-}
-
-void *ipz_qeit_eq_get_inc(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       u64 last_entry_in_q = queue->queue_length - queue->qe_size;
-
-       queue->current_q_offset += queue->qe_size;
-       if (queue->current_q_offset > last_entry_in_q) {
-               queue->current_q_offset = 0;
-               queue->toggle_state = (~queue->toggle_state) & 1;
-       }
-
-       return ret;
-}
-
-int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset)
-{
-       int i;
-       for (i = 0; i < queue->queue_length / queue->pagesize; i++) {
-               u64 page = __pa(queue->queue_pages[i]);
-               if (addr >= page && addr < page + queue->pagesize) {
-                       *q_offset = addr - page + i * queue->pagesize;
-                       return 0;
-               }
-       }
-       return -EINVAL;
-}
-
-#if PAGE_SHIFT < EHCA_PAGESHIFT
-#error Kernel pages must be at least as large than eHCA pages (4K) !
-#endif
-
-/*
- * allocate pages for queue:
- * outer loop allocates whole kernel pages (page aligned) and
- * inner loop divides a kernel page into smaller hca queue pages
- */
-static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages)
-{
-       int k, f = 0;
-       u8 *kpage;
-
-       while (f < nr_of_pages) {
-               kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
-               if (!kpage)
-                       goto out;
-
-               for (k = 0; k < PAGES_PER_KPAGE && f < nr_of_pages; k++) {
-                       queue->queue_pages[f] = (struct ipz_page *)kpage;
-                       kpage += EHCA_PAGESIZE;
-                       f++;
-               }
-       }
-       return 1;
-
-out:
-       for (f = 0; f < nr_of_pages && queue->queue_pages[f];
-            f += PAGES_PER_KPAGE)
-               free_page((unsigned long)(queue->queue_pages)[f]);
-       return 0;
-}
-
-static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
-{
-       int order = ilog2(queue->pagesize) - 9;
-       struct ipz_small_queue_page *page;
-       unsigned long bit;
-
-       mutex_lock(&pd->lock);
-
-       if (!list_empty(&pd->free[order]))
-               page = list_entry(pd->free[order].next,
-                                 struct ipz_small_queue_page, list);
-       else {
-               page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL);
-               if (!page)
-                       goto out;
-
-               page->page = get_zeroed_page(GFP_KERNEL);
-               if (!page->page) {
-                       kmem_cache_free(small_qp_cache, page);
-                       goto out;
-               }
-
-               list_add(&page->list, &pd->free[order]);
-       }
-
-       bit = find_first_zero_bit(page->bitmap, IPZ_SPAGE_PER_KPAGE >> order);
-       __set_bit(bit, page->bitmap);
-       page->fill++;
-
-       if (page->fill == IPZ_SPAGE_PER_KPAGE >> order)
-               list_move(&page->list, &pd->full[order]);
-
-       mutex_unlock(&pd->lock);
-
-       queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9)));
-       queue->small_page = page;
-       queue->offset = bit << (order + 9);
-       return 1;
-
-out:
-       ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
-       mutex_unlock(&pd->lock);
-       return 0;
-}
-
-static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd)
-{
-       int order = ilog2(queue->pagesize) - 9;
-       struct ipz_small_queue_page *page = queue->small_page;
-       unsigned long bit;
-       int free_page = 0;
-
-       bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK)
-               >> (order + 9);
-
-       mutex_lock(&pd->lock);
-
-       __clear_bit(bit, page->bitmap);
-       page->fill--;
-
-       if (page->fill == 0) {
-               list_del(&page->list);
-               free_page = 1;
-       }
-
-       if (page->fill == (IPZ_SPAGE_PER_KPAGE >> order) - 1)
-               /* the page was full until we freed the chunk */
-               list_move_tail(&page->list, &pd->free[order]);
-
-       mutex_unlock(&pd->lock);
-
-       if (free_page) {
-               free_page(page->page);
-               kmem_cache_free(small_qp_cache, page);
-       }
-}
-
-int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
-                  const u32 nr_of_pages, const u32 pagesize,
-                  const u32 qe_size, const u32 nr_of_sg,
-                  int is_small)
-{
-       if (pagesize > PAGE_SIZE) {
-               ehca_gen_err("FATAL ERROR: pagesize=%x "
-                            "is greater than kernel page size", pagesize);
-               return 0;
-       }
-
-       /* init queue fields */
-       queue->queue_length = nr_of_pages * pagesize;
-       queue->pagesize = pagesize;
-       queue->qe_size = qe_size;
-       queue->act_nr_of_sg = nr_of_sg;
-       queue->current_q_offset = 0;
-       queue->toggle_state = 1;
-       queue->small_page = NULL;
-
-       /* allocate queue page pointers */
-       queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *),
-                                    GFP_KERNEL | __GFP_NOWARN);
-       if (!queue->queue_pages) {
-               queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *));
-               if (!queue->queue_pages) {
-                       ehca_gen_err("Couldn't allocate queue page list");
-                       return 0;
-               }
-       }
-
-       /* allocate actual queue pages */
-       if (is_small) {
-               if (!alloc_small_queue_page(queue, pd))
-                       goto ipz_queue_ctor_exit0;
-       } else
-               if (!alloc_queue_pages(queue, nr_of_pages))
-                       goto ipz_queue_ctor_exit0;
-
-       return 1;
-
-ipz_queue_ctor_exit0:
-       ehca_gen_err("Couldn't alloc pages queue=%p "
-                "nr_of_pages=%x",  queue, nr_of_pages);
-       kvfree(queue->queue_pages);
-
-       return 0;
-}
-
-int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
-{
-       int i, nr_pages;
-
-       if (!queue || !queue->queue_pages) {
-               ehca_gen_dbg("queue or queue_pages is NULL");
-               return 0;
-       }
-
-       if (queue->small_page)
-               free_small_queue_page(queue, pd);
-       else {
-               nr_pages = queue->queue_length / queue->pagesize;
-               for (i = 0; i < nr_pages; i += PAGES_PER_KPAGE)
-                       free_page((unsigned long)queue->queue_pages[i]);
-       }
-
-       kvfree(queue->queue_pages);
-
-       return 1;
-}
-
-int ehca_init_small_qp_cache(void)
-{
-       small_qp_cache = kmem_cache_create("ehca_cache_small_qp",
-                                          sizeof(struct ipz_small_queue_page),
-                                          0, SLAB_HWCACHE_ALIGN, NULL);
-       if (!small_qp_cache)
-               return -ENOMEM;
-
-       return 0;
-}
-
-void ehca_cleanup_small_qp_cache(void)
-{
-       kmem_cache_destroy(small_qp_cache);
-}
diff --git a/drivers/staging/rdma/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h
deleted file mode 100644 (file)
index a801274..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/*
- *  IBM eServer eHCA Infiniband device driver for Linux on POWER
- *
- *  internal queue handling
- *
- *  Authors: Waleri Fomin <fomin@de.ibm.com>
- *           Reinhard Ernst <rernst@de.ibm.com>
- *           Christoph Raisch <raisch@de.ibm.com>
- *
- *  Copyright (c) 2005 IBM Corporation
- *
- *  All rights reserved.
- *
- *  This source code is distributed under a dual license of GPL v2.0 and OpenIB
- *  BSD.
- *
- * OpenIB BSD License
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice, this
- * list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials
- * provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
- * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
- * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __IPZ_PT_FN_H__
-#define __IPZ_PT_FN_H__
-
-#define EHCA_PAGESHIFT   12
-#define EHCA_PAGESIZE   4096UL
-#define EHCA_PAGEMASK   (~(EHCA_PAGESIZE-1))
-#define EHCA_PT_ENTRIES 512UL
-
-#include "ehca_tools.h"
-#include "ehca_qes.h"
-
-struct ehca_pd;
-struct ipz_small_queue_page;
-
-extern struct kmem_cache *small_qp_cache;
-
-/* struct generic ehca page */
-struct ipz_page {
-       u8 entries[EHCA_PAGESIZE];
-};
-
-#define IPZ_SPAGE_PER_KPAGE (PAGE_SIZE / 512)
-
-struct ipz_small_queue_page {
-       unsigned long page;
-       unsigned long bitmap[IPZ_SPAGE_PER_KPAGE / BITS_PER_LONG];
-       int fill;
-       void *mapped_addr;
-       u32 mmap_count;
-       struct list_head list;
-};
-
-/* struct generic queue in linux kernel virtual memory (kv) */
-struct ipz_queue {
-       u64 current_q_offset;   /* current queue entry */
-
-       struct ipz_page **queue_pages;  /* array of pages belonging to queue */
-       u32 qe_size;            /* queue entry size */
-       u32 act_nr_of_sg;
-       u32 queue_length;       /* queue length allocated in bytes */
-       u32 pagesize;
-       u32 toggle_state;       /* toggle flag - per page */
-       u32 offset; /* save offset within page for small_qp */
-       struct ipz_small_queue_page *small_page;
-};
-
-/*
- * return current Queue Entry for a certain q_offset
- * returns address (kv) of Queue Entry
- */
-static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset)
-{
-       struct ipz_page *current_page;
-       if (q_offset >= queue->queue_length)
-               return NULL;
-       current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT];
-       return &current_page->entries[q_offset & (EHCA_PAGESIZE - 1)];
-}
-
-/*
- * return current Queue Entry
- * returns address (kv) of Queue Entry
- */
-static inline void *ipz_qeit_get(struct ipz_queue *queue)
-{
-       return ipz_qeit_calc(queue, queue->current_q_offset);
-}
-
-/*
- * return current Queue Page , increment Queue Page iterator from
- * page to page in struct ipz_queue, last increment will return 0! and
- * NOT wrap
- * returns address (kv) of Queue Page
- * warning don't use in parallel with ipz_QE_get_inc()
- */
-void *ipz_qpageit_get_inc(struct ipz_queue *queue);
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * warning don't use in parallel with ipz_qpageit_get_inc()
- */
-static inline void *ipz_qeit_get_inc(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       queue->current_q_offset += queue->qe_size;
-       if (queue->current_q_offset >= queue->queue_length) {
-               queue->current_q_offset = 0;
-               /* toggle the valid flag */
-               queue->toggle_state = (~queue->toggle_state) & 1;
-       }
-
-       return ret;
-}
-
-/*
- * return a bool indicating whether current Queue Entry is valid
- */
-static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
-{
-       struct ehca_cqe *cqe = ipz_qeit_get(queue);
-       return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1));
-}
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * returns 0 and does not increment, if wrong valid state
- * warning don't use in parallel with ipz_qpageit_get_inc()
- */
-static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
-{
-       return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL;
-}
-
-/*
- * returns and resets Queue Entry iterator
- * returns address (kv) of first Queue Entry
- */
-static inline void *ipz_qeit_reset(struct ipz_queue *queue)
-{
-       queue->current_q_offset = 0;
-       return ipz_qeit_get(queue);
-}
-
-/*
- * return the q_offset corresponding to an absolute address
- */
-int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
-
-/*
- * return the next queue offset. don't modify the queue.
- */
-static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset)
-{
-       offset += queue->qe_size;
-       if (offset >= queue->queue_length) offset = 0;
-       return offset;
-}
-
-/* struct generic page table */
-struct ipz_pt {
-       u64 entries[EHCA_PT_ENTRIES];
-};
-
-/* struct page table for a queue, only to be used in pf */
-struct ipz_qpt {
-       /* queue page tables (kv), use u64 because we know the element length */
-       u64 *qpts;
-       u32 n_qpts;
-       u32 n_ptes;       /*  number of page table entries */
-       u64 *current_pte_addr;
-};
-
-/*
- * constructor for a ipz_queue_t, placement new for ipz_queue_t,
- * new for all dependent datastructors
- * all QP Tables are the same
- * flow:
- *    allocate+pin queue
- * see ipz_qpt_ctor()
- * returns true if ok, false if out of memory
- */
-int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
-                  const u32 nr_of_pages, const u32 pagesize,
-                  const u32 qe_size, const u32 nr_of_sg,
-                  int is_small);
-
-/*
- * destructor for a ipz_queue_t
- *  -# free queue
- *  see ipz_queue_ctor()
- *  returns true if ok, false if queue was NULL-ptr of free failed
- */
-int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
-
-/*
- * constructor for a ipz_qpt_t,
- * placement new for struct ipz_queue, new for all dependent datastructors
- * all QP Tables are the same,
- * flow:
- * -# allocate+pin queue
- * -# initialise ptcb
- * -# allocate+pin PTs
- * -# link PTs to a ring, according to HCA Arch, set bit62 id needed
- * -# the ring must have room for exactly nr_of_PTEs
- * see ipz_qpt_ctor()
- */
-void ipz_qpt_ctor(struct ipz_qpt *qpt,
-                 const u32 nr_of_qes,
-                 const u32 pagesize,
-                 const u32 qe_size,
-                 const u8 lowbyte, const u8 toggle,
-                 u32 * act_nr_of_QEs, u32 * act_nr_of_pages);
-
-/*
- * return current Queue Entry, increment Queue Entry iterator by one
- * step in struct ipz_queue, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * warning don't use in parallel with ipz_qpageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
- * fix EQ page problems
- */
-void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
-
-/*
- * return current Event Queue Entry, increment Queue Entry iterator
- * by one step in struct ipz_queue if valid, will wrap in ringbuffer
- * returns address (kv) of Queue Entry BEFORE increment
- * returns 0 and does not increment, if wrong valid state
- * warning don't use in parallel with ipz_queue_QPageit_get_inc()
- * warning unpredictable results may occur if steps>act_nr_of_queue_entries
- */
-static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       u32 qe = *(u8 *)ret;
-       if ((qe >> 7) != (queue->toggle_state & 1))
-               return NULL;
-       ipz_qeit_eq_get_inc(queue); /* this is a good one */
-       return ret;
-}
-
-static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue)
-{
-       void *ret = ipz_qeit_get(queue);
-       u32 qe = *(u8 *)ret;
-       if ((qe >> 7) != (queue->toggle_state & 1))
-               return NULL;
-       return ret;
-}
-
-/* returns address (GX) of first queue entry */
-static inline u64 ipz_qpt_get_firstpage(struct ipz_qpt *qpt)
-{
-       return be64_to_cpu(qpt->qpts[0]);
-}
-
-/* returns address (kv) of first page of queue page table */
-static inline void *ipz_qpt_get_qpt(struct ipz_qpt *qpt)
-{
-       return qpt->qpts;
-}
-
-#endif                         /* __IPZ_PT_FN_H__ */
diff --git a/drivers/staging/rdma/ipath/Kconfig b/drivers/staging/rdma/ipath/Kconfig
deleted file mode 100644 (file)
index 041ce06..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-config INFINIBAND_IPATH
-       tristate "QLogic HTX HCA support"
-       depends on 64BIT && NET && HT_IRQ
-       ---help---
-       This is a driver for the deprecated QLogic Hyper-Transport
-       IB host channel adapter (model QHT7140),
-       including InfiniBand verbs support.  This driver allows these
-       devices to be used with both kernel upper level protocols such
-       as IP-over-InfiniBand as well as with userspace applications
-       (in conjunction with InfiniBand userspace access).
-       For QLogic PCIe QLE based cards, use the QIB driver instead.
-
-       If you have this hardware you will need to boot with PAT disabled
-       on your x86-64 systems, use the nopat kernel parameter.
-
-       Note that this driver will soon be removed entirely from the kernel.
diff --git a/drivers/staging/rdma/ipath/Makefile b/drivers/staging/rdma/ipath/Makefile
deleted file mode 100644 (file)
index 4496f28..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-ccflags-y := -DIPATH_IDSTR='"QLogic kernel.org driver"' \
-       -DIPATH_KERN_TYPE=0
-
-obj-$(CONFIG_INFINIBAND_IPATH) += ib_ipath.o
-
-ib_ipath-y := \
-       ipath_cq.o \
-       ipath_diag.o \
-       ipath_dma.o \
-       ipath_driver.o \
-       ipath_eeprom.o \
-       ipath_file_ops.o \
-       ipath_fs.o \
-       ipath_init_chip.o \
-       ipath_intr.o \
-       ipath_keys.o \
-       ipath_mad.o \
-       ipath_mmap.o \
-       ipath_mr.o \
-       ipath_qp.o \
-       ipath_rc.o \
-       ipath_ruc.o \
-       ipath_sdma.o \
-       ipath_srq.o \
-       ipath_stats.o \
-       ipath_sysfs.o \
-       ipath_uc.o \
-       ipath_ud.o \
-       ipath_user_pages.o \
-       ipath_user_sdma.o \
-       ipath_verbs_mcast.o \
-       ipath_verbs.o
-
-ib_ipath-$(CONFIG_HT_IRQ) += ipath_iba6110.o
-
-ib_ipath-$(CONFIG_X86_64) += ipath_wc_x86_64.o
-ib_ipath-$(CONFIG_PPC64) += ipath_wc_ppc64.o
diff --git a/drivers/staging/rdma/ipath/TODO b/drivers/staging/rdma/ipath/TODO
deleted file mode 100644 (file)
index cb00158..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-The ipath driver has been moved to staging in preparation for its removal in a
-few releases. The driver will be deleted during the 4.6 merge window.
-
-Contact Dennis Dalessandro <dennis.dalessandro@intel.com> and
-Cc: linux-rdma@vger.kernel.org
diff --git a/drivers/staging/rdma/ipath/ipath_common.h b/drivers/staging/rdma/ipath/ipath_common.h
deleted file mode 100644 (file)
index 28cfe97..0000000
+++ /dev/null
@@ -1,851 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_COMMON_H
-#define _IPATH_COMMON_H
-
-/*
- * This file contains defines, structures, etc. that are used
- * to communicate between kernel and user code.
- */
-
-
-/* This is the IEEE-assigned OUI for QLogic Inc. InfiniPath */
-#define IPATH_SRC_OUI_1 0x00
-#define IPATH_SRC_OUI_2 0x11
-#define IPATH_SRC_OUI_3 0x75
-
-/* version of protocol header (known to chip also). In the long run,
- * we should be able to generate and accept a range of version numbers;
- * for now we only accept one, and it's compiled in.
- */
-#define IPS_PROTO_VERSION 2
-
-/*
- * These are compile time constants that you may want to enable or disable
- * if you are trying to debug problems with code or performance.
- * IPATH_VERBOSE_TRACING define as 1 if you want additional tracing in
- * fastpath code
- * IPATH_TRACE_REGWRITES define as 1 if you want register writes to be
- * traced in faspath code
- * _IPATH_TRACING define as 0 if you want to remove all tracing in a
- * compilation unit
- * _IPATH_DEBUGGING define as 0 if you want to remove debug prints
- */
-
-/*
- * The value in the BTH QP field that InfiniPath uses to differentiate
- * an infinipath protocol IB packet vs standard IB transport
- */
-#define IPATH_KD_QP 0x656b79
-
-/*
- * valid states passed to ipath_set_linkstate() user call
- */
-#define IPATH_IB_LINKDOWN              0
-#define IPATH_IB_LINKARM               1
-#define IPATH_IB_LINKACTIVE            2
-#define IPATH_IB_LINKDOWN_ONLY         3
-#define IPATH_IB_LINKDOWN_SLEEP                4
-#define IPATH_IB_LINKDOWN_DISABLE      5
-#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
-#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
-#define IPATH_IB_LINK_NO_HRTBT 8 /* disable Heartbeat, e.g. for loopback */
-#define IPATH_IB_LINK_HRTBT    9 /* enable heartbeat, normal, non-loopback */
-
-/*
- * These 3 values (SDR and DDR may be ORed for auto-speed
- * negotiation) are used for the 3rd argument to path_f_set_ib_cfg
- * with cmd IPATH_IB_CFG_SPD_ENB, by direct calls or via sysfs.  They
- * are also the the possible values for ipath_link_speed_enabled and active
- * The values were chosen to match values used within the IB spec.
- */
-#define IPATH_IB_SDR 1
-#define IPATH_IB_DDR 2
-
-/*
- * stats maintained by the driver.  For now, at least, this is global
- * to all minor devices.
- */
-struct infinipath_stats {
-       /* number of interrupts taken */
-       __u64 sps_ints;
-       /* number of interrupts for errors */
-       __u64 sps_errints;
-       /* number of errors from chip (not incl. packet errors or CRC) */
-       __u64 sps_errs;
-       /* number of packet errors from chip other than CRC */
-       __u64 sps_pkterrs;
-       /* number of packets with CRC errors (ICRC and VCRC) */
-       __u64 sps_crcerrs;
-       /* number of hardware errors reported (parity, etc.) */
-       __u64 sps_hwerrs;
-       /* number of times IB link changed state unexpectedly */
-       __u64 sps_iblink;
-       __u64 sps_unused; /* was fastrcvint, no longer implemented */
-       /* number of kernel (port0) packets received */
-       __u64 sps_port0pkts;
-       /* number of "ethernet" packets sent by driver */
-       __u64 sps_ether_spkts;
-       /* number of "ethernet" packets received by driver */
-       __u64 sps_ether_rpkts;
-       /* number of SMA packets sent by driver. Obsolete. */
-       __u64 sps_sma_spkts;
-       /* number of SMA packets received by driver. Obsolete. */
-       __u64 sps_sma_rpkts;
-       /* number of times all ports rcvhdrq was full and packet dropped */
-       __u64 sps_hdrqfull;
-       /* number of times all ports egrtid was full and packet dropped */
-       __u64 sps_etidfull;
-       /*
-        * number of times we tried to send from driver, but no pio buffers
-        * avail
-        */
-       __u64 sps_nopiobufs;
-       /* number of ports currently open */
-       __u64 sps_ports;
-       /* list of pkeys (other than default) accepted (0 means not set) */
-       __u16 sps_pkeys[4];
-       __u16 sps_unused16[4]; /* available; maintaining compatible layout */
-       /* number of user ports per chip (not IB ports) */
-       __u32 sps_nports;
-       /* not our interrupt, or already handled */
-       __u32 sps_nullintr;
-       /* max number of packets handled per receive call */
-       __u32 sps_maxpkts_call;
-       /* avg number of packets handled per receive call */
-       __u32 sps_avgpkts_call;
-       /* total number of pages locked */
-       __u64 sps_pagelocks;
-       /* total number of pages unlocked */
-       __u64 sps_pageunlocks;
-       /*
-        * Number of packets dropped in kernel other than errors (ether
-        * packets if ipath not configured, etc.)
-        */
-       __u64 sps_krdrops;
-       __u64 sps_txeparity; /* PIO buffer parity error, recovered */
-       /* pad for future growth */
-       __u64 __sps_pad[45];
-};
-
-/*
- * These are the status bits readable (in ascii form, 64bit value)
- * from the "status" sysfs file.
- */
-#define IPATH_STATUS_INITTED       0x1 /* basic initialization done */
-#define IPATH_STATUS_DISABLED      0x2 /* hardware disabled */
-/* Device has been disabled via admin request */
-#define IPATH_STATUS_ADMIN_DISABLED    0x4
-/* Chip has been found and initted */
-#define IPATH_STATUS_CHIP_PRESENT 0x20
-/* IB link is at ACTIVE, usable for data traffic */
-#define IPATH_STATUS_IB_READY     0x40
-/* link is configured, LID, MTU, etc. have been set */
-#define IPATH_STATUS_IB_CONF      0x80
-/* no link established, probably no cable */
-#define IPATH_STATUS_IB_NOCABLE  0x100
-/* A Fatal hardware error has occurred. */
-#define IPATH_STATUS_HWERROR     0x200
-
-/*
- * The list of usermode accessible registers.  Also see Reg_* later in file.
- */
-typedef enum _ipath_ureg {
-       /* (RO)  DMA RcvHdr to be used next. */
-       ur_rcvhdrtail = 0,
-       /* (RW)  RcvHdr entry to be processed next by host. */
-       ur_rcvhdrhead = 1,
-       /* (RO)  Index of next Eager index to use. */
-       ur_rcvegrindextail = 2,
-       /* (RW)  Eager TID to be processed next */
-       ur_rcvegrindexhead = 3,
-       /* For internal use only; max register number. */
-       _IPATH_UregMax
-} ipath_ureg;
-
-/* bit values for spi_runtime_flags */
-#define IPATH_RUNTIME_HT       0x1
-#define IPATH_RUNTIME_PCIE     0x2
-#define IPATH_RUNTIME_FORCE_WC_ORDER   0x4
-#define IPATH_RUNTIME_RCVHDR_COPY      0x8
-#define IPATH_RUNTIME_MASTER   0x10
-#define IPATH_RUNTIME_NODMA_RTAIL 0x80
-#define IPATH_RUNTIME_SDMA           0x200
-#define IPATH_RUNTIME_FORCE_PIOAVAIL 0x400
-#define IPATH_RUNTIME_PIO_REGSWAPPED 0x800
-
-/*
- * This structure is returned by ipath_userinit() immediately after
- * open to get implementation-specific info, and info specific to this
- * instance.
- *
- * This struct must have explict pad fields where type sizes
- * may result in different alignments between 32 and 64 bit
- * programs, since the 64 bit * bit kernel requires the user code
- * to have matching offsets
- */
-struct ipath_base_info {
-       /* version of hardware, for feature checking. */
-       __u32 spi_hw_version;
-       /* version of software, for feature checking. */
-       __u32 spi_sw_version;
-       /* InfiniPath port assigned, goes into sent packets */
-       __u16 spi_port;
-       __u16 spi_subport;
-       /*
-        * IB MTU, packets IB data must be less than this.
-        * The MTU is in bytes, and will be a multiple of 4 bytes.
-        */
-       __u32 spi_mtu;
-       /*
-        * Size of a PIO buffer.  Any given packet's total size must be less
-        * than this (in words).  Included is the starting control word, so
-        * if 513 is returned, then total pkt size is 512 words or less.
-        */
-       __u32 spi_piosize;
-       /* size of the TID cache in infinipath, in entries */
-       __u32 spi_tidcnt;
-       /* size of the TID Eager list in infinipath, in entries */
-       __u32 spi_tidegrcnt;
-       /* size of a single receive header queue entry in words. */
-       __u32 spi_rcvhdrent_size;
-       /*
-        * Count of receive header queue entries allocated.
-        * This may be less than the spu_rcvhdrcnt passed in!.
-        */
-       __u32 spi_rcvhdr_cnt;
-
-       /* per-chip and other runtime features bitmap (IPATH_RUNTIME_*) */
-       __u32 spi_runtime_flags;
-
-       /* address where receive buffer queue is mapped into */
-       __u64 spi_rcvhdr_base;
-
-       /* user program. */
-
-       /* base address of eager TID receive buffers. */
-       __u64 spi_rcv_egrbufs;
-
-       /* Allocated by initialization code, not by protocol. */
-
-       /*
-        * Size of each TID buffer in host memory, starting at
-        * spi_rcv_egrbufs.  The buffers are virtually contiguous.
-        */
-       __u32 spi_rcv_egrbufsize;
-       /*
-        * The special QP (queue pair) value that identifies an infinipath
-        * protocol packet from standard IB packets.  More, probably much
-        * more, to be added.
-        */
-       __u32 spi_qpair;
-
-       /*
-        * User register base for init code, not to be used directly by
-        * protocol or applications.
-        */
-       __u64 __spi_uregbase;
-       /*
-        * Maximum buffer size in bytes that can be used in a single TID
-        * entry (assuming the buffer is aligned to this boundary).  This is
-        * the minimum of what the hardware and software support Guaranteed
-        * to be a power of 2.
-        */
-       __u32 spi_tid_maxsize;
-       /*
-        * alignment of each pio send buffer (byte count
-        * to add to spi_piobufbase to get to second buffer)
-        */
-       __u32 spi_pioalign;
-       /*
-        * The index of the first pio buffer available to this process;
-        * needed to do lookup in spi_pioavailaddr; not added to
-        * spi_piobufbase.
-        */
-       __u32 spi_pioindex;
-        /* number of buffers mapped for this process */
-       __u32 spi_piocnt;
-
-       /*
-        * Base address of writeonly pio buffers for this process.
-        * Each buffer has spi_piosize words, and is aligned on spi_pioalign
-        * boundaries.  spi_piocnt buffers are mapped from this address
-        */
-       __u64 spi_piobufbase;
-
-       /*
-        * Base address of readonly memory copy of the pioavail registers.
-        * There are 2 bits for each buffer.
-        */
-       __u64 spi_pioavailaddr;
-
-       /*
-        * Address where driver updates a copy of the interface and driver
-        * status (IPATH_STATUS_*) as a 64 bit value.  It's followed by a
-        * string indicating hardware error, if there was one.
-        */
-       __u64 spi_status;
-
-       /* number of chip ports available to user processes */
-       __u32 spi_nports;
-       /* unit number of chip we are using */
-       __u32 spi_unit;
-       /* num bufs in each contiguous set */
-       __u32 spi_rcv_egrperchunk;
-       /* size in bytes of each contiguous set */
-       __u32 spi_rcv_egrchunksize;
-       /* total size of mmap to cover full rcvegrbuffers */
-       __u32 spi_rcv_egrbuftotlen;
-       __u32 spi_filler_for_align;
-       /* address of readonly memory copy of the rcvhdrq tail register. */
-       __u64 spi_rcvhdr_tailaddr;
-
-       /* shared memory pages for subports if port is shared */
-       __u64 spi_subport_uregbase;
-       __u64 spi_subport_rcvegrbuf;
-       __u64 spi_subport_rcvhdr_base;
-
-       /* shared memory page for hardware port if it is shared */
-       __u64 spi_port_uregbase;
-       __u64 spi_port_rcvegrbuf;
-       __u64 spi_port_rcvhdr_base;
-       __u64 spi_port_rcvhdr_tailaddr;
-
-} __attribute__ ((aligned(8)));
-
-
-/*
- * This version number is given to the driver by the user code during
- * initialization in the spu_userversion field of ipath_user_info, so
- * the driver can check for compatibility with user code.
- *
- * The major version changes when data structures
- * change in an incompatible way.  The driver must be the same or higher
- * for initialization to succeed.  In some cases, a higher version
- * driver will not interoperate with older software, and initialization
- * will return an error.
- */
-#define IPATH_USER_SWMAJOR 1
-
-/*
- * Minor version differences are always compatible
- * a within a major version, however if user software is larger
- * than driver software, some new features and/or structure fields
- * may not be implemented; the user code must deal with this if it
- * cares, or it must abort after initialization reports the difference.
- */
-#define IPATH_USER_SWMINOR 6
-
-#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
-
-#define IPATH_KERN_TYPE 0
-
-/*
- * Similarly, this is the kernel version going back to the user.  It's
- * slightly different, in that we want to tell if the driver was built as
- * part of a QLogic release, or from the driver from openfabrics.org,
- * kernel.org, or a standard distribution, for support reasons.
- * The high bit is 0 for non-QLogic and 1 for QLogic-built/supplied.
- *
- * It's returned by the driver to the user code during initialization in the
- * spi_sw_version field of ipath_base_info, so the user code can in turn
- * check for compatibility with the kernel.
-*/
-#define IPATH_KERN_SWVERSION ((IPATH_KERN_TYPE<<31) | IPATH_USER_SWVERSION)
-
-/*
- * This structure is passed to ipath_userinit() to tell the driver where
- * user code buffers are, sizes, etc.   The offsets and sizes of the
- * fields must remain unchanged, for binary compatibility.  It can
- * be extended, if userversion is changed so user code can tell, if needed
- */
-struct ipath_user_info {
-       /*
-        * version of user software, to detect compatibility issues.
-        * Should be set to IPATH_USER_SWVERSION.
-        */
-       __u32 spu_userversion;
-
-       /* desired number of receive header queue entries */
-       __u32 spu_rcvhdrcnt;
-
-       /* size of struct base_info to write to */
-       __u32 spu_base_info_size;
-
-       /*
-        * number of words in KD protocol header
-        * This tells InfiniPath how many words to copy to rcvhdrq.  If 0,
-        * kernel uses a default.  Once set, attempts to set any other value
-        * are an error (EAGAIN) until driver is reloaded.
-        */
-       __u32 spu_rcvhdrsize;
-
-       /*
-        * If two or more processes wish to share a port, each process
-        * must set the spu_subport_cnt and spu_subport_id to the same
-        * values.  The only restriction on the spu_subport_id is that
-        * it be unique for a given node.
-        */
-       __u16 spu_subport_cnt;
-       __u16 spu_subport_id;
-
-       __u32 spu_unused; /* kept for compatible layout */
-
-       /*
-        * address of struct base_info to write to
-        */
-       __u64 spu_base_info;
-
-} __attribute__ ((aligned(8)));
-
-/* User commands. */
-
-#define IPATH_CMD_MIN          16
-
-#define __IPATH_CMD_USER_INIT  16      /* old set up userspace (for old user code) */
-#define IPATH_CMD_PORT_INFO    17      /* find out what resources we got */
-#define IPATH_CMD_RECV_CTRL    18      /* control receipt of packets */
-#define IPATH_CMD_TID_UPDATE   19      /* update expected TID entries */
-#define IPATH_CMD_TID_FREE     20      /* free expected TID entries */
-#define IPATH_CMD_SET_PART_KEY 21      /* add partition key */
-#define __IPATH_CMD_SLAVE_INFO 22      /* return info on slave processes (for old user code) */
-#define IPATH_CMD_ASSIGN_PORT  23      /* allocate HCA and port */
-#define IPATH_CMD_USER_INIT    24      /* set up userspace */
-#define IPATH_CMD_UNUSED_1     25
-#define IPATH_CMD_UNUSED_2     26
-#define IPATH_CMD_PIOAVAILUPD  27      /* force an update of PIOAvail reg */
-#define IPATH_CMD_POLL_TYPE    28      /* set the kind of polling we want */
-#define IPATH_CMD_ARMLAUNCH_CTRL       29 /* armlaunch detection control */
-/* 30 is unused */
-#define IPATH_CMD_SDMA_INFLIGHT 31     /* sdma inflight counter request */
-#define IPATH_CMD_SDMA_COMPLETE 32     /* sdma completion counter request */
-
-/*
- * Poll types
- */
-#define IPATH_POLL_TYPE_URGENT  0x01
-#define IPATH_POLL_TYPE_OVERFLOW 0x02
-
-struct ipath_port_info {
-       __u32 num_active;       /* number of active units */
-       __u32 unit;             /* unit (chip) assigned to caller */
-       __u16 port;             /* port on unit assigned to caller */
-       __u16 subport;          /* subport on unit assigned to caller */
-       __u16 num_ports;        /* number of ports available on unit */
-       __u16 num_subports;     /* number of subports opened on port */
-};
-
-struct ipath_tid_info {
-       __u32 tidcnt;
-       /* make structure same size in 32 and 64 bit */
-       __u32 tid__unused;
-       /* virtual address of first page in transfer */
-       __u64 tidvaddr;
-       /* pointer (same size 32/64 bit) to __u16 tid array */
-       __u64 tidlist;
-
-       /*
-        * pointer (same size 32/64 bit) to bitmap of TIDs used
-        * for this call; checked for being large enough at open
-        */
-       __u64 tidmap;
-};
-
-struct ipath_cmd {
-       __u32 type;                     /* command type */
-       union {
-               struct ipath_tid_info tid_info;
-               struct ipath_user_info user_info;
-
-               /*
-                * address in userspace where we should put the sdma
-                * inflight counter
-                */
-               __u64 sdma_inflight;
-               /*
-                * address in userspace where we should put the sdma
-                * completion counter
-                */
-               __u64 sdma_complete;
-               /* address in userspace of struct ipath_port_info to
-                  write result to */
-               __u64 port_info;
-               /* enable/disable receipt of packets */
-               __u32 recv_ctrl;
-               /* enable/disable armlaunch errors (non-zero to enable) */
-               __u32 armlaunch_ctrl;
-               /* partition key to set */
-               __u16 part_key;
-               /* user address of __u32 bitmask of active slaves */
-               __u64 slave_mask_addr;
-               /* type of polling we want */
-               __u16 poll_type;
-       } cmd;
-};
-
-struct ipath_iovec {
-       /* Pointer to data, but same size 32 and 64 bit */
-       __u64 iov_base;
-
-       /*
-        * Length of data; don't need 64 bits, but want
-        * ipath_sendpkt to remain same size as before 32 bit changes, so...
-        */
-       __u64 iov_len;
-};
-
-/*
- * Describes a single packet for send.  Each packet can have one or more
- * buffers, but the total length (exclusive of IB headers) must be less
- * than the MTU, and if using the PIO method, entire packet length,
- * including IB headers, must be less than the ipath_piosize value (words).
- * Use of this necessitates including sys/uio.h
- */
-struct __ipath_sendpkt {
-       __u32 sps_flags;        /* flags for packet (TBD) */
-       __u32 sps_cnt;          /* number of entries to use in sps_iov */
-       /* array of iov's describing packet. TEMPORARY */
-       struct ipath_iovec sps_iov[4];
-};
-
-/*
- * diagnostics can send a packet by "writing" one of the following
- * two structs to diag data special file
- * The first is the legacy version for backward compatibility
- */
-struct ipath_diag_pkt {
-       __u32 unit;
-       __u64 data;
-       __u32 len;
-};
-
-/* The second diag_pkt struct is the expanded version that allows
- * more control over the packet, specifically, by allowing a custom
- * pbc (+ static rate) qword, so that special modes and deliberate
- * changes to CRCs can be used. The elements were also re-ordered
- * for better alignment and to avoid padding issues.
- */
-struct ipath_diag_xpkt {
-       __u64 data;
-       __u64 pbc_wd;
-       __u32 unit;
-       __u32 len;
-};
-
-/*
- * Data layout in I2C flash (for GUID, etc.)
- * All fields are little-endian binary unless otherwise stated
- */
-#define IPATH_FLASH_VERSION 2
-struct ipath_flash {
-       /* flash layout version (IPATH_FLASH_VERSION) */
-       __u8 if_fversion;
-       /* checksum protecting if_length bytes */
-       __u8 if_csum;
-       /*
-        * valid length (in use, protected by if_csum), including
-        * if_fversion and if_csum themselves)
-        */
-       __u8 if_length;
-       /* the GUID, in network order */
-       __u8 if_guid[8];
-       /* number of GUIDs to use, starting from if_guid */
-       __u8 if_numguid;
-       /* the (last 10 characters of) board serial number, in ASCII */
-       char if_serial[12];
-       /* board mfg date (YYYYMMDD ASCII) */
-       char if_mfgdate[8];
-       /* last board rework/test date (YYYYMMDD ASCII) */
-       char if_testdate[8];
-       /* logging of error counts, TBD */
-       __u8 if_errcntp[4];
-       /* powered on hours, updated at driver unload */
-       __u8 if_powerhour[2];
-       /* ASCII free-form comment field */
-       char if_comment[32];
-       /* Backwards compatible prefix for longer QLogic Serial Numbers */
-       char if_sprefix[4];
-       /* 82 bytes used, min flash size is 128 bytes */
-       __u8 if_future[46];
-};
-
-/*
- * These are the counters implemented in the chip, and are listed in order.
- * The InterCaps naming is taken straight from the chip spec.
- */
-struct infinipath_counters {
-       __u64 LBIntCnt;
-       __u64 LBFlowStallCnt;
-       __u64 TxSDmaDescCnt;    /* was Reserved1 */
-       __u64 TxUnsupVLErrCnt;
-       __u64 TxDataPktCnt;
-       __u64 TxFlowPktCnt;
-       __u64 TxDwordCnt;
-       __u64 TxLenErrCnt;
-       __u64 TxMaxMinLenErrCnt;
-       __u64 TxUnderrunCnt;
-       __u64 TxFlowStallCnt;
-       __u64 TxDroppedPktCnt;
-       __u64 RxDroppedPktCnt;
-       __u64 RxDataPktCnt;
-       __u64 RxFlowPktCnt;
-       __u64 RxDwordCnt;
-       __u64 RxLenErrCnt;
-       __u64 RxMaxMinLenErrCnt;
-       __u64 RxICRCErrCnt;
-       __u64 RxVCRCErrCnt;
-       __u64 RxFlowCtrlErrCnt;
-       __u64 RxBadFormatCnt;
-       __u64 RxLinkProblemCnt;
-       __u64 RxEBPCnt;
-       __u64 RxLPCRCErrCnt;
-       __u64 RxBufOvflCnt;
-       __u64 RxTIDFullErrCnt;
-       __u64 RxTIDValidErrCnt;
-       __u64 RxPKeyMismatchCnt;
-       __u64 RxP0HdrEgrOvflCnt;
-       __u64 RxP1HdrEgrOvflCnt;
-       __u64 RxP2HdrEgrOvflCnt;
-       __u64 RxP3HdrEgrOvflCnt;
-       __u64 RxP4HdrEgrOvflCnt;
-       __u64 RxP5HdrEgrOvflCnt;
-       __u64 RxP6HdrEgrOvflCnt;
-       __u64 RxP7HdrEgrOvflCnt;
-       __u64 RxP8HdrEgrOvflCnt;
-       __u64 RxP9HdrEgrOvflCnt;        /* was Reserved6 */
-       __u64 RxP10HdrEgrOvflCnt;       /* was Reserved7 */
-       __u64 RxP11HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP12HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP13HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP14HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP15HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 RxP16HdrEgrOvflCnt;       /* new for IBA7220 */
-       __u64 IBStatusChangeCnt;
-       __u64 IBLinkErrRecoveryCnt;
-       __u64 IBLinkDownedCnt;
-       __u64 IBSymbolErrCnt;
-       /* The following are new for IBA7220 */
-       __u64 RxVL15DroppedPktCnt;
-       __u64 RxOtherLocalPhyErrCnt;
-       __u64 PcieRetryBufDiagQwordCnt;
-       __u64 ExcessBufferOvflCnt;
-       __u64 LocalLinkIntegrityErrCnt;
-       __u64 RxVlErrCnt;
-       __u64 RxDlidFltrCnt;
-};
-
-/*
- * The next set of defines are for packet headers, and chip register
- * and memory bits that are visible to and/or used by user-mode software
- * The other bits that are used only by the driver or diags are in
- * ipath_registers.h
- */
-
-/* RcvHdrFlags bits */
-#define INFINIPATH_RHF_LENGTH_MASK 0x7FF
-#define INFINIPATH_RHF_LENGTH_SHIFT 0
-#define INFINIPATH_RHF_RCVTYPE_MASK 0x7
-#define INFINIPATH_RHF_RCVTYPE_SHIFT 11
-#define INFINIPATH_RHF_EGRINDEX_MASK 0xFFF
-#define INFINIPATH_RHF_EGRINDEX_SHIFT 16
-#define INFINIPATH_RHF_SEQ_MASK 0xF
-#define INFINIPATH_RHF_SEQ_SHIFT 0
-#define INFINIPATH_RHF_HDRQ_OFFSET_MASK 0x7FF
-#define INFINIPATH_RHF_HDRQ_OFFSET_SHIFT 4
-#define INFINIPATH_RHF_H_ICRCERR   0x80000000
-#define INFINIPATH_RHF_H_VCRCERR   0x40000000
-#define INFINIPATH_RHF_H_PARITYERR 0x20000000
-#define INFINIPATH_RHF_H_LENERR    0x10000000
-#define INFINIPATH_RHF_H_MTUERR    0x08000000
-#define INFINIPATH_RHF_H_IHDRERR   0x04000000
-#define INFINIPATH_RHF_H_TIDERR    0x02000000
-#define INFINIPATH_RHF_H_MKERR     0x01000000
-#define INFINIPATH_RHF_H_IBERR     0x00800000
-#define INFINIPATH_RHF_H_ERR_MASK  0xFF800000
-#define INFINIPATH_RHF_L_USE_EGR   0x80000000
-#define INFINIPATH_RHF_L_SWA       0x00008000
-#define INFINIPATH_RHF_L_SWB       0x00004000
-
-/* infinipath header fields */
-#define INFINIPATH_I_VERS_MASK 0xF
-#define INFINIPATH_I_VERS_SHIFT 28
-#define INFINIPATH_I_PORT_MASK 0xF
-#define INFINIPATH_I_PORT_SHIFT 24
-#define INFINIPATH_I_TID_MASK 0x7FF
-#define INFINIPATH_I_TID_SHIFT 13
-#define INFINIPATH_I_OFFSET_MASK 0x1FFF
-#define INFINIPATH_I_OFFSET_SHIFT 0
-
-/* K_PktFlags bits */
-#define INFINIPATH_KPF_INTR 0x1
-#define INFINIPATH_KPF_SUBPORT_MASK 0x3
-#define INFINIPATH_KPF_SUBPORT_SHIFT 1
-
-#define INFINIPATH_MAX_SUBPORT 4
-
-/* SendPIO per-buffer control */
-#define INFINIPATH_SP_TEST    0x40
-#define INFINIPATH_SP_TESTEBP 0x20
-#define INFINIPATH_SP_TRIGGER_SHIFT  15
-
-/* SendPIOAvail bits */
-#define INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT 1
-#define INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT 0
-
-/* infinipath header format */
-struct ipath_header {
-       /*
-        * Version - 4 bits, Port - 4 bits, TID - 10 bits and Offset -
-        * 14 bits before ECO change ~28 Dec 03.  After that, Vers 4,
-        * Port 4, TID 11, offset 13.
-        */
-       __le32 ver_port_tid_offset;
-       __le16 chksum;
-       __le16 pkt_flags;
-};
-
-/* infinipath user message header format.
- * This structure contains the first 4 fields common to all protocols
- * that employ infinipath.
- */
-struct ipath_message_header {
-       __be16 lrh[4];
-       __be32 bth[3];
-       /* fields below this point are in host byte order */
-       struct ipath_header iph;
-       __u8 sub_opcode;
-};
-
-/* infinipath ethernet header format */
-struct ether_header {
-       __be16 lrh[4];
-       __be32 bth[3];
-       struct ipath_header iph;
-       __u8 sub_opcode;
-       __u8 cmd;
-       __be16 lid;
-       __u16 mac[3];
-       __u8 frag_num;
-       __u8 seq_num;
-       __le32 len;
-       /* MUST be of word size due to PIO write requirements */
-       __le32 csum;
-       __le16 csum_offset;
-       __le16 flags;
-       __u16 first_2_bytes;
-       __u8 unused[2];         /* currently unused */
-};
-
-
-/* IB - LRH header consts */
-#define IPATH_LRH_GRH 0x0003   /* 1. word of IB LRH - next header: GRH */
-#define IPATH_LRH_BTH 0x0002   /* 1. word of IB LRH - next header: BTH */
-
-/* misc. */
-#define SIZE_OF_CRC 1
-
-#define IPATH_DEFAULT_P_KEY 0xFFFF
-#define IPATH_PERMISSIVE_LID 0xFFFF
-#define IPATH_AETH_CREDIT_SHIFT 24
-#define IPATH_AETH_CREDIT_MASK 0x1F
-#define IPATH_AETH_CREDIT_INVAL 0x1F
-#define IPATH_PSN_MASK 0xFFFFFF
-#define IPATH_MSN_MASK 0xFFFFFF
-#define IPATH_QPN_MASK 0xFFFFFF
-#define IPATH_MULTICAST_LID_BASE 0xC000
-#define IPATH_EAGER_TID_ID INFINIPATH_I_TID_MASK
-#define IPATH_MULTICAST_QPN 0xFFFFFF
-
-/* Receive Header Queue: receive type (from infinipath) */
-#define RCVHQ_RCV_TYPE_EXPECTED  0
-#define RCVHQ_RCV_TYPE_EAGER     1
-#define RCVHQ_RCV_TYPE_NON_KD    2
-#define RCVHQ_RCV_TYPE_ERROR     3
-
-
-/* sub OpCodes - ith4x  */
-#define IPATH_ITH4X_OPCODE_ENCAP 0x81
-#define IPATH_ITH4X_OPCODE_LID_ARP 0x82
-
-#define IPATH_HEADER_QUEUE_WORDS 9
-
-/* functions for extracting fields from rcvhdrq entries for the driver.
- */
-static inline __u32 ipath_hdrget_err_flags(const __le32 * rbuf)
-{
-       return __le32_to_cpu(rbuf[1]) & INFINIPATH_RHF_H_ERR_MASK;
-}
-
-static inline __u32 ipath_hdrget_rcv_type(const __le32 * rbuf)
-{
-       return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_RCVTYPE_SHIFT)
-           & INFINIPATH_RHF_RCVTYPE_MASK;
-}
-
-static inline __u32 ipath_hdrget_length_in_bytes(const __le32 * rbuf)
-{
-       return ((__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_LENGTH_SHIFT)
-               & INFINIPATH_RHF_LENGTH_MASK) << 2;
-}
-
-static inline __u32 ipath_hdrget_index(const __le32 * rbuf)
-{
-       return (__le32_to_cpu(rbuf[0]) >> INFINIPATH_RHF_EGRINDEX_SHIFT)
-           & INFINIPATH_RHF_EGRINDEX_MASK;
-}
-
-static inline __u32 ipath_hdrget_seq(const __le32 *rbuf)
-{
-       return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_SEQ_SHIFT)
-               & INFINIPATH_RHF_SEQ_MASK;
-}
-
-static inline __u32 ipath_hdrget_offset(const __le32 *rbuf)
-{
-       return (__le32_to_cpu(rbuf[1]) >> INFINIPATH_RHF_HDRQ_OFFSET_SHIFT)
-               & INFINIPATH_RHF_HDRQ_OFFSET_MASK;
-}
-
-static inline __u32 ipath_hdrget_use_egr_buf(const __le32 *rbuf)
-{
-       return __le32_to_cpu(rbuf[0]) & INFINIPATH_RHF_L_USE_EGR;
-}
-
-static inline __u32 ipath_hdrget_ipath_ver(__le32 hdrword)
-{
-       return (__le32_to_cpu(hdrword) >> INFINIPATH_I_VERS_SHIFT)
-           & INFINIPATH_I_VERS_MASK;
-}
-
-#endif                         /* _IPATH_COMMON_H */
diff --git a/drivers/staging/rdma/ipath/ipath_cq.c b/drivers/staging/rdma/ipath/ipath_cq.c
deleted file mode 100644 (file)
index e9dd911..0000000
+++ /dev/null
@@ -1,483 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_cq_enter - add a new entry to the completion queue
- * @cq: completion queue
- * @entry: work completion entry to add
- * @sig: true if @entry is a solicitated entry
- *
- * This may be called with qp->s_lock held.
- */
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
-{
-       struct ipath_cq_wc *wc;
-       unsigned long flags;
-       u32 head;
-       u32 next;
-
-       spin_lock_irqsave(&cq->lock, flags);
-
-       /*
-        * Note that the head pointer might be writable by user processes.
-        * Take care to verify it is a sane value.
-        */
-       wc = cq->queue;
-       head = wc->head;
-       if (head >= (unsigned) cq->ibcq.cqe) {
-               head = cq->ibcq.cqe;
-               next = 0;
-       } else
-               next = head + 1;
-       if (unlikely(next == wc->tail)) {
-               spin_unlock_irqrestore(&cq->lock, flags);
-               if (cq->ibcq.event_handler) {
-                       struct ib_event ev;
-
-                       ev.device = cq->ibcq.device;
-                       ev.element.cq = &cq->ibcq;
-                       ev.event = IB_EVENT_CQ_ERR;
-                       cq->ibcq.event_handler(&ev, cq->ibcq.cq_context);
-               }
-               return;
-       }
-       if (cq->ip) {
-               wc->uqueue[head].wr_id = entry->wr_id;
-               wc->uqueue[head].status = entry->status;
-               wc->uqueue[head].opcode = entry->opcode;
-               wc->uqueue[head].vendor_err = entry->vendor_err;
-               wc->uqueue[head].byte_len = entry->byte_len;
-               wc->uqueue[head].ex.imm_data = (__u32 __force) entry->ex.imm_data;
-               wc->uqueue[head].qp_num = entry->qp->qp_num;
-               wc->uqueue[head].src_qp = entry->src_qp;
-               wc->uqueue[head].wc_flags = entry->wc_flags;
-               wc->uqueue[head].pkey_index = entry->pkey_index;
-               wc->uqueue[head].slid = entry->slid;
-               wc->uqueue[head].sl = entry->sl;
-               wc->uqueue[head].dlid_path_bits = entry->dlid_path_bits;
-               wc->uqueue[head].port_num = entry->port_num;
-               /* Make sure entry is written before the head index. */
-               smp_wmb();
-       } else
-               wc->kqueue[head] = *entry;
-       wc->head = next;
-
-       if (cq->notify == IB_CQ_NEXT_COMP ||
-           (cq->notify == IB_CQ_SOLICITED && solicited)) {
-               cq->notify = IB_CQ_NONE;
-               cq->triggered++;
-               /*
-                * This will cause send_complete() to be called in
-                * another thread.
-                */
-               tasklet_hi_schedule(&cq->comptask);
-       }
-
-       spin_unlock_irqrestore(&cq->lock, flags);
-
-       if (entry->status != IB_WC_SUCCESS)
-               to_idev(cq->ibcq.device)->n_wqe_errs++;
-}
-
-/**
- * ipath_poll_cq - poll for work completion entries
- * @ibcq: the completion queue to poll
- * @num_entries: the maximum number of entries to return
- * @entry: pointer to array where work completions are placed
- *
- * Returns the number of completion entries polled.
- *
- * This may be called from interrupt context.  Also called by ib_poll_cq()
- * in the generic verbs code.
- */
-int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
-{
-       struct ipath_cq *cq = to_icq(ibcq);
-       struct ipath_cq_wc *wc;
-       unsigned long flags;
-       int npolled;
-       u32 tail;
-
-       /* The kernel can only poll a kernel completion queue */
-       if (cq->ip) {
-               npolled = -EINVAL;
-               goto bail;
-       }
-
-       spin_lock_irqsave(&cq->lock, flags);
-
-       wc = cq->queue;
-       tail = wc->tail;
-       if (tail > (u32) cq->ibcq.cqe)
-               tail = (u32) cq->ibcq.cqe;
-       for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
-               if (tail == wc->head)
-                       break;
-               /* The kernel doesn't need a RMB since it has the lock. */
-               *entry = wc->kqueue[tail];
-               if (tail >= cq->ibcq.cqe)
-                       tail = 0;
-               else
-                       tail++;
-       }
-       wc->tail = tail;
-
-       spin_unlock_irqrestore(&cq->lock, flags);
-
-bail:
-       return npolled;
-}
-
-static void send_complete(unsigned long data)
-{
-       struct ipath_cq *cq = (struct ipath_cq *)data;
-
-       /*
-        * The completion handler will most likely rearm the notification
-        * and poll for all pending entries.  If a new completion entry
-        * is added while we are in this routine, tasklet_hi_schedule()
-        * won't call us again until we return so we check triggered to
-        * see if we need to call the handler again.
-        */
-       for (;;) {
-               u8 triggered = cq->triggered;
-
-               cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
-
-               if (cq->triggered == triggered)
-                       return;
-       }
-}
-
-/**
- * ipath_create_cq - create a completion queue
- * @ibdev: the device this completion queue is attached to
- * @attr: creation attributes
- * @context: unused by the InfiniPath driver
- * @udata: unused by the InfiniPath driver
- *
- * Returns a pointer to the completion queue or negative errno values
- * for failure.
- *
- * Called by ib_create_cq() in the generic verbs code.
- */
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
-                             const struct ib_cq_init_attr *attr,
-                             struct ib_ucontext *context,
-                             struct ib_udata *udata)
-{
-       int entries = attr->cqe;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cq *cq;
-       struct ipath_cq_wc *wc;
-       struct ib_cq *ret;
-       u32 sz;
-
-       if (attr->flags)
-               return ERR_PTR(-EINVAL);
-
-       if (entries < 1 || entries > ib_ipath_max_cqes) {
-               ret = ERR_PTR(-EINVAL);
-               goto done;
-       }
-
-       /* Allocate the completion queue structure. */
-       cq = kmalloc(sizeof(*cq), GFP_KERNEL);
-       if (!cq) {
-               ret = ERR_PTR(-ENOMEM);
-               goto done;
-       }
-
-       /*
-        * Allocate the completion queue entries and head/tail pointers.
-        * This is allocated separately so that it can be resized and
-        * also mapped into user space.
-        * We need to use vmalloc() in order to support mmap and large
-        * numbers of entries.
-        */
-       sz = sizeof(*wc);
-       if (udata && udata->outlen >= sizeof(__u64))
-               sz += sizeof(struct ib_uverbs_wc) * (entries + 1);
-       else
-               sz += sizeof(struct ib_wc) * (entries + 1);
-       wc = vmalloc_user(sz);
-       if (!wc) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_cq;
-       }
-
-       /*
-        * Return the address of the WC as the offset to mmap.
-        * See ipath_mmap() for details.
-        */
-       if (udata && udata->outlen >= sizeof(__u64)) {
-               int err;
-
-               cq->ip = ipath_create_mmap_info(dev, sz, context, wc);
-               if (!cq->ip) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail_wc;
-               }
-
-               err = ib_copy_to_udata(udata, &cq->ip->offset,
-                                      sizeof(cq->ip->offset));
-               if (err) {
-                       ret = ERR_PTR(err);
-                       goto bail_ip;
-               }
-       } else
-               cq->ip = NULL;
-
-       spin_lock(&dev->n_cqs_lock);
-       if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
-               spin_unlock(&dev->n_cqs_lock);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_ip;
-       }
-
-       dev->n_cqs_allocated++;
-       spin_unlock(&dev->n_cqs_lock);
-
-       if (cq->ip) {
-               spin_lock_irq(&dev->pending_lock);
-               list_add(&cq->ip->pending_mmaps, &dev->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-       }
-
-       /*
-        * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe.
-        * The number of entries should be >= the number requested or return
-        * an error.
-        */
-       cq->ibcq.cqe = entries;
-       cq->notify = IB_CQ_NONE;
-       cq->triggered = 0;
-       spin_lock_init(&cq->lock);
-       tasklet_init(&cq->comptask, send_complete, (unsigned long)cq);
-       wc->head = 0;
-       wc->tail = 0;
-       cq->queue = wc;
-
-       ret = &cq->ibcq;
-
-       goto done;
-
-bail_ip:
-       kfree(cq->ip);
-bail_wc:
-       vfree(wc);
-bail_cq:
-       kfree(cq);
-done:
-       return ret;
-}
-
-/**
- * ipath_destroy_cq - destroy a completion queue
- * @ibcq: the completion queue to destroy.
- *
- * Returns 0 for success.
- *
- * Called by ib_destroy_cq() in the generic verbs code.
- */
-int ipath_destroy_cq(struct ib_cq *ibcq)
-{
-       struct ipath_ibdev *dev = to_idev(ibcq->device);
-       struct ipath_cq *cq = to_icq(ibcq);
-
-       tasklet_kill(&cq->comptask);
-       spin_lock(&dev->n_cqs_lock);
-       dev->n_cqs_allocated--;
-       spin_unlock(&dev->n_cqs_lock);
-       if (cq->ip)
-               kref_put(&cq->ip->ref, ipath_release_mmap_info);
-       else
-               vfree(cq->queue);
-       kfree(cq);
-
-       return 0;
-}
-
-/**
- * ipath_req_notify_cq - change the notification type for a completion queue
- * @ibcq: the completion queue
- * @notify_flags: the type of notification to request
- *
- * Returns 0 for success.
- *
- * This may be called from interrupt context.  Also called by
- * ib_req_notify_cq() in the generic verbs code.
- */
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
-{
-       struct ipath_cq *cq = to_icq(ibcq);
-       unsigned long flags;
-       int ret = 0;
-
-       spin_lock_irqsave(&cq->lock, flags);
-       /*
-        * Don't change IB_CQ_NEXT_COMP to IB_CQ_SOLICITED but allow
-        * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
-        */
-       if (cq->notify != IB_CQ_NEXT_COMP)
-               cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
-
-       if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
-           cq->queue->head != cq->queue->tail)
-               ret = 1;
-
-       spin_unlock_irqrestore(&cq->lock, flags);
-
-       return ret;
-}
-
-/**
- * ipath_resize_cq - change the size of the CQ
- * @ibcq: the completion queue
- *
- * Returns 0 for success.
- */
-int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
-{
-       struct ipath_cq *cq = to_icq(ibcq);
-       struct ipath_cq_wc *old_wc;
-       struct ipath_cq_wc *wc;
-       u32 head, tail, n;
-       int ret;
-       u32 sz;
-
-       if (cqe < 1 || cqe > ib_ipath_max_cqes) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       /*
-        * Need to use vmalloc() if we want to support large #s of entries.
-        */
-       sz = sizeof(*wc);
-       if (udata && udata->outlen >= sizeof(__u64))
-               sz += sizeof(struct ib_uverbs_wc) * (cqe + 1);
-       else
-               sz += sizeof(struct ib_wc) * (cqe + 1);
-       wc = vmalloc_user(sz);
-       if (!wc) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       /* Check that we can write the offset to mmap. */
-       if (udata && udata->outlen >= sizeof(__u64)) {
-               __u64 offset = 0;
-
-               ret = ib_copy_to_udata(udata, &offset, sizeof(offset));
-               if (ret)
-                       goto bail_free;
-       }
-
-       spin_lock_irq(&cq->lock);
-       /*
-        * Make sure head and tail are sane since they
-        * might be user writable.
-        */
-       old_wc = cq->queue;
-       head = old_wc->head;
-       if (head > (u32) cq->ibcq.cqe)
-               head = (u32) cq->ibcq.cqe;
-       tail = old_wc->tail;
-       if (tail > (u32) cq->ibcq.cqe)
-               tail = (u32) cq->ibcq.cqe;
-       if (head < tail)
-               n = cq->ibcq.cqe + 1 + head - tail;
-       else
-               n = head - tail;
-       if (unlikely((u32)cqe < n)) {
-               ret = -EINVAL;
-               goto bail_unlock;
-       }
-       for (n = 0; tail != head; n++) {
-               if (cq->ip)
-                       wc->uqueue[n] = old_wc->uqueue[tail];
-               else
-                       wc->kqueue[n] = old_wc->kqueue[tail];
-               if (tail == (u32) cq->ibcq.cqe)
-                       tail = 0;
-               else
-                       tail++;
-       }
-       cq->ibcq.cqe = cqe;
-       wc->head = n;
-       wc->tail = 0;
-       cq->queue = wc;
-       spin_unlock_irq(&cq->lock);
-
-       vfree(old_wc);
-
-       if (cq->ip) {
-               struct ipath_ibdev *dev = to_idev(ibcq->device);
-               struct ipath_mmap_info *ip = cq->ip;
-
-               ipath_update_mmap_info(dev, ip, sz, wc);
-
-               /*
-                * Return the offset to mmap.
-                * See ipath_mmap() for details.
-                */
-               if (udata && udata->outlen >= sizeof(__u64)) {
-                       ret = ib_copy_to_udata(udata, &ip->offset,
-                                              sizeof(ip->offset));
-                       if (ret)
-                               goto bail;
-               }
-
-               spin_lock_irq(&dev->pending_lock);
-               if (list_empty(&ip->pending_mmaps))
-                       list_add(&ip->pending_mmaps, &dev->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-       }
-
-       ret = 0;
-       goto bail;
-
-bail_unlock:
-       spin_unlock_irq(&cq->lock);
-bail_free:
-       vfree(wc);
-bail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_debug.h b/drivers/staging/rdma/ipath/ipath_debug.h
deleted file mode 100644 (file)
index 65926cd..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_DEBUG_H
-#define _IPATH_DEBUG_H
-
-#ifndef _IPATH_DEBUGGING       /* debugging enabled or not */
-#define _IPATH_DEBUGGING 1
-#endif
-
-#if _IPATH_DEBUGGING
-
-/*
- * Mask values for debugging.  The scheme allows us to compile out any
- * of the debug tracing stuff, and if compiled in, to enable or disable
- * dynamically.  This can be set at modprobe time also:
- *      modprobe infinipath.ko infinipath_debug=7
- */
-
-#define __IPATH_INFO        0x1        /* generic low verbosity stuff */
-#define __IPATH_DBG         0x2        /* generic debug */
-#define __IPATH_TRSAMPLE    0x8        /* generate trace buffer sample entries */
-/* leave some low verbosity spots open */
-#define __IPATH_VERBDBG     0x40       /* very verbose debug */
-#define __IPATH_PKTDBG      0x80       /* print packet data */
-/* print process startup (init)/exit messages */
-#define __IPATH_PROCDBG     0x100
-/* print mmap/fault stuff, not using VDBG any more */
-#define __IPATH_MMDBG       0x200
-#define __IPATH_ERRPKTDBG   0x400
-#define __IPATH_USER_SEND   0x1000     /* use user mode send */
-#define __IPATH_KERNEL_SEND 0x2000     /* use kernel mode send */
-#define __IPATH_EPKTDBG     0x4000     /* print ethernet packet data */
-#define __IPATH_IPATHDBG    0x10000    /* Ethernet (IPATH) gen debug */
-#define __IPATH_IPATHWARN   0x20000    /* Ethernet (IPATH) warnings */
-#define __IPATH_IPATHERR    0x40000    /* Ethernet (IPATH) errors */
-#define __IPATH_IPATHPD     0x80000    /* Ethernet (IPATH) packet dump */
-#define __IPATH_IPATHTABLE  0x100000   /* Ethernet (IPATH) table dump */
-#define __IPATH_LINKVERBDBG 0x200000   /* very verbose linkchange debug */
-
-#else                          /* _IPATH_DEBUGGING */
-
-/*
- * define all of these even with debugging off, for the few places that do
- * if(infinipath_debug & _IPATH_xyzzy), but in a way that will make the
- * compiler eliminate the code
- */
-
-#define __IPATH_INFO      0x0  /* generic low verbosity stuff */
-#define __IPATH_DBG       0x0  /* generic debug */
-#define __IPATH_TRSAMPLE  0x0  /* generate trace buffer sample entries */
-#define __IPATH_VERBDBG   0x0  /* very verbose debug */
-#define __IPATH_PKTDBG    0x0  /* print packet data */
-#define __IPATH_PROCDBG   0x0  /* process startup (init)/exit messages */
-/* print mmap/fault stuff, not using VDBG any more */
-#define __IPATH_MMDBG     0x0
-#define __IPATH_EPKTDBG   0x0  /* print ethernet packet data */
-#define __IPATH_IPATHDBG  0x0  /* Ethernet (IPATH) table dump on */
-#define __IPATH_IPATHWARN 0x0  /* Ethernet (IPATH) warnings on   */
-#define __IPATH_IPATHERR  0x0  /* Ethernet (IPATH) errors on   */
-#define __IPATH_IPATHPD   0x0  /* Ethernet (IPATH) packet dump on   */
-#define __IPATH_IPATHTABLE 0x0 /* Ethernet (IPATH) packet dump on   */
-#define __IPATH_LINKVERBDBG 0x0        /* very verbose linkchange debug */
-
-#endif                         /* _IPATH_DEBUGGING */
-
-#define __IPATH_VERBOSEDBG __IPATH_VERBDBG
-
-#endif                         /* _IPATH_DEBUG_H */
diff --git a/drivers/staging/rdma/ipath/ipath_diag.c b/drivers/staging/rdma/ipath/ipath_diag.c
deleted file mode 100644 (file)
index 45802e9..0000000
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains support for diagnostic functions.  It is accessed by
- * opening the ipath_diag device, normally minor number 129.  Diagnostic use
- * of the InfiniPath chip may render the chip or board unusable until the
- * driver is unloaded, or in some cases, until the system is rebooted.
- *
- * Accesses to the chip through this interface are not similar to going
- * through the /sys/bus/pci resource mmap interface.
- */
-
-#include <linux/io.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/export.h>
-#include <asm/uaccess.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-
-int ipath_diag_inuse;
-static int diag_set_link;
-
-static int ipath_diag_open(struct inode *in, struct file *fp);
-static int ipath_diag_release(struct inode *in, struct file *fp);
-static ssize_t ipath_diag_read(struct file *fp, char __user *data,
-                              size_t count, loff_t *off);
-static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
-                               size_t count, loff_t *off);
-
-static const struct file_operations diag_file_ops = {
-       .owner = THIS_MODULE,
-       .write = ipath_diag_write,
-       .read = ipath_diag_read,
-       .open = ipath_diag_open,
-       .release = ipath_diag_release,
-       .llseek = default_llseek,
-};
-
-static ssize_t ipath_diagpkt_write(struct file *fp,
-                                  const char __user *data,
-                                  size_t count, loff_t *off);
-
-static const struct file_operations diagpkt_file_ops = {
-       .owner = THIS_MODULE,
-       .write = ipath_diagpkt_write,
-       .llseek = noop_llseek,
-};
-
-static atomic_t diagpkt_count = ATOMIC_INIT(0);
-static struct cdev *diagpkt_cdev;
-static struct device *diagpkt_dev;
-
-int ipath_diag_add(struct ipath_devdata *dd)
-{
-       char name[16];
-       int ret = 0;
-
-       if (atomic_inc_return(&diagpkt_count) == 1) {
-               ret = ipath_cdev_init(IPATH_DIAGPKT_MINOR,
-                                     "ipath_diagpkt", &diagpkt_file_ops,
-                                     &diagpkt_cdev, &diagpkt_dev);
-
-               if (ret) {
-                       ipath_dev_err(dd, "Couldn't create ipath_diagpkt "
-                                     "device: %d", ret);
-                       goto done;
-               }
-       }
-
-       snprintf(name, sizeof(name), "ipath_diag%d", dd->ipath_unit);
-
-       ret = ipath_cdev_init(IPATH_DIAG_MINOR_BASE + dd->ipath_unit, name,
-                             &diag_file_ops, &dd->diag_cdev,
-                             &dd->diag_dev);
-       if (ret)
-               ipath_dev_err(dd, "Couldn't create %s device: %d",
-                             name, ret);
-
-done:
-       return ret;
-}
-
-void ipath_diag_remove(struct ipath_devdata *dd)
-{
-       if (atomic_dec_and_test(&diagpkt_count))
-               ipath_cdev_cleanup(&diagpkt_cdev, &diagpkt_dev);
-
-       ipath_cdev_cleanup(&dd->diag_cdev, &dd->diag_dev);
-}
-
-/**
- * ipath_read_umem64 - read a 64-bit quantity from the chip into user space
- * @dd: the infinipath device
- * @uaddr: the location to store the data in user memory
- * @caddr: the source chip address (full pointer, not offset)
- * @count: number of bytes to copy (multiple of 32 bits)
- *
- * This function also localizes all chip memory accesses.
- * The copy should be written such that we read full cacheline packets
- * from the chip.  This is usually used for a single qword
- *
- * NOTE:  This assumes the chip address is 64-bit aligned.
- */
-static int ipath_read_umem64(struct ipath_devdata *dd, void __user *uaddr,
-                            const void __iomem *caddr, size_t count)
-{
-       const u64 __iomem *reg_addr = caddr;
-       const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
-       int ret;
-
-       /* not very efficient, but it works for now */
-       if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       while (reg_addr < reg_end) {
-               u64 data = readq(reg_addr);
-               if (copy_to_user(uaddr, &data, sizeof(u64))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-               reg_addr++;
-               uaddr += sizeof(u64);
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_write_umem64 - write a 64-bit quantity to the chip from user space
- * @dd: the infinipath device
- * @caddr: the destination chip address (full pointer, not offset)
- * @uaddr: the source of the data in user memory
- * @count: the number of bytes to copy (multiple of 32 bits)
- *
- * This is usually used for a single qword
- * NOTE:  This assumes the chip address is 64-bit aligned.
- */
-
-static int ipath_write_umem64(struct ipath_devdata *dd, void __iomem *caddr,
-                             const void __user *uaddr, size_t count)
-{
-       u64 __iomem *reg_addr = caddr;
-       const u64 __iomem *reg_end = reg_addr + (count / sizeof(u64));
-       int ret;
-
-       /* not very efficient, but it works for now */
-       if (reg_addr < dd->ipath_kregbase || reg_end > dd->ipath_kregend) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       while (reg_addr < reg_end) {
-               u64 data;
-               if (copy_from_user(&data, uaddr, sizeof(data))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-               writeq(data, reg_addr);
-
-               reg_addr++;
-               uaddr += sizeof(u64);
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_read_umem32 - read a 32-bit quantity from the chip into user space
- * @dd: the infinipath device
- * @uaddr: the location to store the data in user memory
- * @caddr: the source chip address (full pointer, not offset)
- * @count: number of bytes to copy
- *
- * read 32 bit values, not 64 bit; for memories that only
- * support 32 bit reads; usually a single dword.
- */
-static int ipath_read_umem32(struct ipath_devdata *dd, void __user *uaddr,
-                            const void __iomem *caddr, size_t count)
-{
-       const u32 __iomem *reg_addr = caddr;
-       const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
-       int ret;
-
-       if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
-           reg_end > (u32 __iomem *) dd->ipath_kregend) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       /* not very efficient, but it works for now */
-       while (reg_addr < reg_end) {
-               u32 data = readl(reg_addr);
-               if (copy_to_user(uaddr, &data, sizeof(data))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-
-               reg_addr++;
-               uaddr += sizeof(u32);
-
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_write_umem32 - write a 32-bit quantity to the chip from user space
- * @dd: the infinipath device
- * @caddr: the destination chip address (full pointer, not offset)
- * @uaddr: the source of the data in user memory
- * @count: number of bytes to copy
- *
- * write 32 bit values, not 64 bit; for memories that only
- * support 32 bit write; usually a single dword.
- */
-
-static int ipath_write_umem32(struct ipath_devdata *dd, void __iomem *caddr,
-                             const void __user *uaddr, size_t count)
-{
-       u32 __iomem *reg_addr = caddr;
-       const u32 __iomem *reg_end = reg_addr + (count / sizeof(u32));
-       int ret;
-
-       if (reg_addr < (u32 __iomem *) dd->ipath_kregbase ||
-           reg_end > (u32 __iomem *) dd->ipath_kregend) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       while (reg_addr < reg_end) {
-               u32 data;
-               if (copy_from_user(&data, uaddr, sizeof(data))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-               writel(data, reg_addr);
-
-               reg_addr++;
-               uaddr += sizeof(u32);
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-static int ipath_diag_open(struct inode *in, struct file *fp)
-{
-       int unit = iminor(in) - IPATH_DIAG_MINOR_BASE;
-       struct ipath_devdata *dd;
-       int ret;
-
-       mutex_lock(&ipath_mutex);
-
-       if (ipath_diag_inuse) {
-               ret = -EBUSY;
-               goto bail;
-       }
-
-       dd = ipath_lookup(unit);
-
-       if (dd == NULL || !(dd->ipath_flags & IPATH_PRESENT) ||
-           !dd->ipath_kregbase) {
-               ret = -ENODEV;
-               goto bail;
-       }
-
-       fp->private_data = dd;
-       ipath_diag_inuse = -2;
-       diag_set_link = 0;
-       ret = 0;
-
-       /* Only expose a way to reset the device if we
-          make it into diag mode. */
-       ipath_expose_reset(&dd->pcidev->dev);
-
-bail:
-       mutex_unlock(&ipath_mutex);
-
-       return ret;
-}
-
-/**
- * ipath_diagpkt_write - write an IB packet
- * @fp: the diag data device file pointer
- * @data: ipath_diag_pkt structure saying where to get the packet
- * @count: size of data to write
- * @off: unused by this code
- */
-static ssize_t ipath_diagpkt_write(struct file *fp,
-                                  const char __user *data,
-                                  size_t count, loff_t *off)
-{
-       u32 __iomem *piobuf;
-       u32 plen, pbufn, maxlen_reserve;
-       struct ipath_diag_pkt odp;
-       struct ipath_diag_xpkt dp;
-       u32 *tmpbuf = NULL;
-       struct ipath_devdata *dd;
-       ssize_t ret = 0;
-       u64 val;
-       u32 l_state, lt_state; /* LinkState, LinkTrainingState */
-
-
-       if (count == sizeof(dp)) {
-               if (copy_from_user(&dp, data, sizeof(dp))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-       } else if (count == sizeof(odp)) {
-               if (copy_from_user(&odp, data, sizeof(odp))) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-               dp.len = odp.len;
-               dp.unit = odp.unit;
-               dp.data = odp.data;
-               dp.pbc_wd = 0;
-       } else {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       /* send count must be an exact number of dwords */
-       if (dp.len & 3) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       plen = dp.len >> 2;
-
-       dd = ipath_lookup(dp.unit);
-       if (!dd || !(dd->ipath_flags & IPATH_PRESENT) ||
-           !dd->ipath_kregbase) {
-               ipath_cdbg(VERBOSE, "illegal unit %u for diag data send\n",
-                          dp.unit);
-               ret = -ENODEV;
-               goto bail;
-       }
-
-       if (ipath_diag_inuse && !diag_set_link &&
-           !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-               diag_set_link = 1;
-               ipath_cdbg(VERBOSE, "Trying to set to set link active for "
-                          "diag pkt\n");
-               ipath_set_linkstate(dd, IPATH_IB_LINKARM);
-               ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
-       }
-
-       if (!(dd->ipath_flags & IPATH_INITTED)) {
-               /* no hardware, freeze, etc. */
-               ipath_cdbg(VERBOSE, "unit %u not usable\n", dd->ipath_unit);
-               ret = -ENODEV;
-               goto bail;
-       }
-       /*
-        * Want to skip check for l_state if using custom PBC,
-        * because we might be trying to force an SM packet out.
-        * first-cut, skip _all_ state checking in that case.
-        */
-       val = ipath_ib_state(dd, dd->ipath_lastibcstat);
-       lt_state = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
-       l_state = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
-       if (!dp.pbc_wd && (lt_state != INFINIPATH_IBCS_LT_STATE_LINKUP ||
-           (val != dd->ib_init && val != dd->ib_arm &&
-           val != dd->ib_active))) {
-               ipath_cdbg(VERBOSE, "unit %u not ready (state %llx)\n",
-                          dd->ipath_unit, (unsigned long long) val);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       /*
-        * need total length before first word written, plus 2 Dwords. One Dword
-        * is for padding so we get the full user data when not aligned on
-        * a word boundary. The other Dword is to make sure we have room for the
-        * ICRC which gets tacked on later.
-        */
-       maxlen_reserve = 2 * sizeof(u32);
-       if (dp.len > dd->ipath_ibmaxlen - maxlen_reserve) {
-               ipath_dbg("Pkt len 0x%x > ibmaxlen %x\n",
-                         dp.len, dd->ipath_ibmaxlen);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       plen = sizeof(u32) + dp.len;
-
-       tmpbuf = vmalloc(plen);
-       if (!tmpbuf) {
-               dev_info(&dd->pcidev->dev, "Unable to allocate tmp buffer, "
-                        "failing\n");
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       if (copy_from_user(tmpbuf,
-                          (const void __user *) (unsigned long) dp.data,
-                          dp.len)) {
-               ret = -EFAULT;
-               goto bail;
-       }
-
-       plen >>= 2;             /* in dwords */
-
-       piobuf = ipath_getpiobuf(dd, plen, &pbufn);
-       if (!piobuf) {
-               ipath_cdbg(VERBOSE, "No PIO buffers avail unit for %u\n",
-                          dd->ipath_unit);
-               ret = -EBUSY;
-               goto bail;
-       }
-       /* disarm it just to be extra sure */
-       ipath_disarm_piobufs(dd, pbufn, 1);
-
-       if (ipath_debug & __IPATH_PKTDBG)
-               ipath_cdbg(VERBOSE, "unit %u 0x%x+1w pio%d\n",
-                          dd->ipath_unit, plen - 1, pbufn);
-
-       if (dp.pbc_wd == 0)
-               dp.pbc_wd = plen;
-       writeq(dp.pbc_wd, piobuf);
-       /*
-        * Copy all by the trigger word, then flush, so it's written
-        * to chip before trigger word, then write trigger word, then
-        * flush again, so packet is sent.
-        */
-       if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
-               ipath_flush_wc();
-               __iowrite32_copy(piobuf + 2, tmpbuf, plen - 1);
-               ipath_flush_wc();
-               __raw_writel(tmpbuf[plen - 1], piobuf + plen + 1);
-       } else
-               __iowrite32_copy(piobuf + 2, tmpbuf, plen);
-
-       ipath_flush_wc();
-
-       ret = sizeof(dp);
-
-bail:
-       vfree(tmpbuf);
-       return ret;
-}
-
-static int ipath_diag_release(struct inode *in, struct file *fp)
-{
-       mutex_lock(&ipath_mutex);
-       ipath_diag_inuse = 0;
-       fp->private_data = NULL;
-       mutex_unlock(&ipath_mutex);
-       return 0;
-}
-
-static ssize_t ipath_diag_read(struct file *fp, char __user *data,
-                              size_t count, loff_t *off)
-{
-       struct ipath_devdata *dd = fp->private_data;
-       void __iomem *kreg_base;
-       ssize_t ret;
-
-       kreg_base = dd->ipath_kregbase;
-
-       if (count == 0)
-               ret = 0;
-       else if ((count % 4) || (*off % 4))
-               /* address or length is not 32-bit aligned, hence invalid */
-               ret = -EINVAL;
-       else if (ipath_diag_inuse < 1 && (*off || count != 8))
-               ret = -EINVAL;  /* prevent cat /dev/ipath_diag* */
-       else if ((count % 8) || (*off % 8))
-               /* address or length not 64-bit aligned; do 32-bit reads */
-               ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
-       else
-               ret = ipath_read_umem64(dd, data, kreg_base + *off, count);
-
-       if (ret >= 0) {
-               *off += count;
-               ret = count;
-               if (ipath_diag_inuse == -2)
-                       ipath_diag_inuse++;
-       }
-
-       return ret;
-}
-
-static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
-                               size_t count, loff_t *off)
-{
-       struct ipath_devdata *dd = fp->private_data;
-       void __iomem *kreg_base;
-       ssize_t ret;
-
-       kreg_base = dd->ipath_kregbase;
-
-       if (count == 0)
-               ret = 0;
-       else if ((count % 4) || (*off % 4))
-               /* address or length is not 32-bit aligned, hence invalid */
-               ret = -EINVAL;
-       else if ((ipath_diag_inuse == -1 && (*off || count != 8)) ||
-                ipath_diag_inuse == -2)  /* read qw off 0, write qw off 0 */
-               ret = -EINVAL;  /* before any other write allowed */
-       else if ((count % 8) || (*off % 8))
-               /* address or length not 64-bit aligned; do 32-bit writes */
-               ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
-       else
-               ret = ipath_write_umem64(dd, kreg_base + *off, data, count);
-
-       if (ret >= 0) {
-               *off += count;
-               ret = count;
-               if (ipath_diag_inuse == -1)
-                       ipath_diag_inuse = 1; /* all read/write OK now */
-       }
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_dma.c b/drivers/staging/rdma/ipath/ipath_dma.c
deleted file mode 100644 (file)
index 123a8c0..0000000
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) 2006 QLogic, Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/scatterlist.h>
-#include <linux/gfp.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_verbs.h"
-
-#define BAD_DMA_ADDRESS ((u64) 0)
-
-/*
- * The following functions implement driver specific replacements
- * for the ib_dma_*() functions.
- *
- * These functions return kernel virtual addresses instead of
- * device bus addresses since the driver uses the CPU to copy
- * data instead of using hardware DMA.
- */
-
-static int ipath_mapping_error(struct ib_device *dev, u64 dma_addr)
-{
-       return dma_addr == BAD_DMA_ADDRESS;
-}
-
-static u64 ipath_dma_map_single(struct ib_device *dev,
-                               void *cpu_addr, size_t size,
-                               enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-       return (u64) cpu_addr;
-}
-
-static void ipath_dma_unmap_single(struct ib_device *dev,
-                                  u64 addr, size_t size,
-                                  enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-}
-
-static u64 ipath_dma_map_page(struct ib_device *dev,
-                             struct page *page,
-                             unsigned long offset,
-                             size_t size,
-                             enum dma_data_direction direction)
-{
-       u64 addr;
-
-       BUG_ON(!valid_dma_direction(direction));
-
-       if (offset + size > PAGE_SIZE) {
-               addr = BAD_DMA_ADDRESS;
-               goto done;
-       }
-
-       addr = (u64) page_address(page);
-       if (addr)
-               addr += offset;
-       /* TODO: handle highmem pages */
-
-done:
-       return addr;
-}
-
-static void ipath_dma_unmap_page(struct ib_device *dev,
-                                u64 addr, size_t size,
-                                enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-}
-
-static int ipath_map_sg(struct ib_device *dev, struct scatterlist *sgl,
-                       int nents, enum dma_data_direction direction)
-{
-       struct scatterlist *sg;
-       u64 addr;
-       int i;
-       int ret = nents;
-
-       BUG_ON(!valid_dma_direction(direction));
-
-       for_each_sg(sgl, sg, nents, i) {
-               addr = (u64) page_address(sg_page(sg));
-               /* TODO: handle highmem pages */
-               if (!addr) {
-                       ret = 0;
-                       break;
-               }
-               sg->dma_address = addr + sg->offset;
-#ifdef CONFIG_NEED_SG_DMA_LENGTH
-               sg->dma_length = sg->length;
-#endif
-       }
-       return ret;
-}
-
-static void ipath_unmap_sg(struct ib_device *dev,
-                          struct scatterlist *sg, int nents,
-                          enum dma_data_direction direction)
-{
-       BUG_ON(!valid_dma_direction(direction));
-}
-
-static void ipath_sync_single_for_cpu(struct ib_device *dev,
-                                     u64 addr,
-                                     size_t size,
-                                     enum dma_data_direction dir)
-{
-}
-
-static void ipath_sync_single_for_device(struct ib_device *dev,
-                                        u64 addr,
-                                        size_t size,
-                                        enum dma_data_direction dir)
-{
-}
-
-static void *ipath_dma_alloc_coherent(struct ib_device *dev, size_t size,
-                                     u64 *dma_handle, gfp_t flag)
-{
-       struct page *p;
-       void *addr = NULL;
-
-       p = alloc_pages(flag, get_order(size));
-       if (p)
-               addr = page_address(p);
-       if (dma_handle)
-               *dma_handle = (u64) addr;
-       return addr;
-}
-
-static void ipath_dma_free_coherent(struct ib_device *dev, size_t size,
-                                   void *cpu_addr, u64 dma_handle)
-{
-       free_pages((unsigned long) cpu_addr, get_order(size));
-}
-
-struct ib_dma_mapping_ops ipath_dma_mapping_ops = {
-       .mapping_error = ipath_mapping_error,
-       .map_single = ipath_dma_map_single,
-       .unmap_single = ipath_dma_unmap_single,
-       .map_page = ipath_dma_map_page,
-       .unmap_page = ipath_dma_unmap_page,
-       .map_sg = ipath_map_sg,
-       .unmap_sg = ipath_unmap_sg,
-       .sync_single_for_cpu = ipath_sync_single_for_cpu,
-       .sync_single_for_device = ipath_sync_single_for_device,
-       .alloc_coherent = ipath_dma_alloc_coherent,
-       .free_coherent = ipath_dma_free_coherent
-};
diff --git a/drivers/staging/rdma/ipath/ipath_driver.c b/drivers/staging/rdma/ipath/ipath_driver.c
deleted file mode 100644 (file)
index 2ab22f9..0000000
+++ /dev/null
@@ -1,2784 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/spinlock.h>
-#include <linux/idr.h>
-#include <linux/pci.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/bitmap.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#ifdef CONFIG_X86_64
-#include <asm/pat.h>
-#endif
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-
-static void ipath_update_pio_bufs(struct ipath_devdata *);
-
-const char *ipath_get_unit_name(int unit)
-{
-       static char iname[16];
-       snprintf(iname, sizeof iname, "infinipath%u", unit);
-       return iname;
-}
-
-#define DRIVER_LOAD_MSG "QLogic " IPATH_DRV_NAME " loaded: "
-#define PFX IPATH_DRV_NAME ": "
-
-/*
- * The size has to be longer than this string, so we can append
- * board/chip information to it in the init code.
- */
-const char ib_ipath_version[] = IPATH_IDSTR "\n";
-
-static struct idr unit_table;
-DEFINE_SPINLOCK(ipath_devs_lock);
-LIST_HEAD(ipath_dev_list);
-
-wait_queue_head_t ipath_state_wait;
-
-unsigned ipath_debug = __IPATH_INFO;
-
-module_param_named(debug, ipath_debug, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, "mask for debug prints");
-EXPORT_SYMBOL_GPL(ipath_debug);
-
-unsigned ipath_mtu4096 = 1; /* max 4KB IB mtu by default, if supported */
-module_param_named(mtu4096, ipath_mtu4096, uint, S_IRUGO);
-MODULE_PARM_DESC(mtu4096, "enable MTU of 4096 bytes, if supported");
-
-static unsigned ipath_hol_timeout_ms = 13000;
-module_param_named(hol_timeout_ms, ipath_hol_timeout_ms, uint, S_IRUGO);
-MODULE_PARM_DESC(hol_timeout_ms,
-       "duration of user app suspension after link failure");
-
-unsigned ipath_linkrecovery = 1;
-module_param_named(linkrecovery, ipath_linkrecovery, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(linkrecovery, "enable workaround for link recovery issue");
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("QLogic <support@qlogic.com>");
-MODULE_DESCRIPTION("QLogic InfiniPath driver");
-
-/*
- * Table to translate the LINKTRAININGSTATE portion of
- * IBCStatus to a human-readable form.
- */
-const char *ipath_ibcstatus_str[] = {
-       "Disabled",
-       "LinkUp",
-       "PollActive",
-       "PollQuiet",
-       "SleepDelay",
-       "SleepQuiet",
-       "LState6",              /* unused */
-       "LState7",              /* unused */
-       "CfgDebounce",
-       "CfgRcvfCfg",
-       "CfgWaitRmt",
-       "CfgIdle",
-       "RecovRetrain",
-       "CfgTxRevLane",         /* unused before IBA7220 */
-       "RecovWaitRmt",
-       "RecovIdle",
-       /* below were added for IBA7220 */
-       "CfgEnhanced",
-       "CfgTest",
-       "CfgWaitRmtTest",
-       "CfgWaitCfgEnhanced",
-       "SendTS_T",
-       "SendTstIdles",
-       "RcvTS_T",
-       "SendTst_TS1s",
-       "LTState18", "LTState19", "LTState1A", "LTState1B",
-       "LTState1C", "LTState1D", "LTState1E", "LTState1F"
-};
-
-static void ipath_remove_one(struct pci_dev *);
-static int ipath_init_one(struct pci_dev *, const struct pci_device_id *);
-
-/* Only needed for registration, nothing else needs this info */
-#define PCI_VENDOR_ID_PATHSCALE 0x1fc1
-#define PCI_DEVICE_ID_INFINIPATH_HT 0xd
-
-/* Number of seconds before our card status check...  */
-#define STATUS_TIMEOUT 60
-
-static const struct pci_device_id ipath_pci_tbl[] = {
-       { PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_INFINIPATH_HT) },
-       { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);
-
-static struct pci_driver ipath_driver = {
-       .name = IPATH_DRV_NAME,
-       .probe = ipath_init_one,
-       .remove = ipath_remove_one,
-       .id_table = ipath_pci_tbl,
-       .driver = {
-               .groups = ipath_driver_attr_groups,
-       },
-};
-
-static inline void read_bars(struct ipath_devdata *dd, struct pci_dev *dev,
-                            u32 *bar0, u32 *bar1)
-{
-       int ret;
-
-       ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, bar0);
-       if (ret)
-               ipath_dev_err(dd, "failed to read bar0 before enable: "
-                             "error %d\n", -ret);
-
-       ret = pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, bar1);
-       if (ret)
-               ipath_dev_err(dd, "failed to read bar1 before enable: "
-                             "error %d\n", -ret);
-
-       ipath_dbg("Read bar0 %x bar1 %x\n", *bar0, *bar1);
-}
-
-static void ipath_free_devdata(struct pci_dev *pdev,
-                              struct ipath_devdata *dd)
-{
-       unsigned long flags;
-
-       pci_set_drvdata(pdev, NULL);
-
-       if (dd->ipath_unit != -1) {
-               spin_lock_irqsave(&ipath_devs_lock, flags);
-               idr_remove(&unit_table, dd->ipath_unit);
-               list_del(&dd->ipath_list);
-               spin_unlock_irqrestore(&ipath_devs_lock, flags);
-       }
-       vfree(dd);
-}
-
-static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)
-{
-       unsigned long flags;
-       struct ipath_devdata *dd;
-       int ret;
-
-       dd = vzalloc(sizeof(*dd));
-       if (!dd) {
-               dd = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-       dd->ipath_unit = -1;
-
-       idr_preload(GFP_KERNEL);
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-
-       ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Could not allocate unit ID: error %d\n", -ret);
-               ipath_free_devdata(pdev, dd);
-               dd = ERR_PTR(ret);
-               goto bail_unlock;
-       }
-       dd->ipath_unit = ret;
-
-       dd->pcidev = pdev;
-       pci_set_drvdata(pdev, dd);
-
-       list_add(&dd->ipath_list, &ipath_dev_list);
-
-bail_unlock:
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-       idr_preload_end();
-bail:
-       return dd;
-}
-
-static inline struct ipath_devdata *__ipath_lookup(int unit)
-{
-       return idr_find(&unit_table, unit);
-}
-
-struct ipath_devdata *ipath_lookup(int unit)
-{
-       struct ipath_devdata *dd;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-       dd = __ipath_lookup(unit);
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-       return dd;
-}
-
-int ipath_count_units(int *npresentp, int *nupp, int *maxportsp)
-{
-       int nunits, npresent, nup;
-       struct ipath_devdata *dd;
-       unsigned long flags;
-       int maxports;
-
-       nunits = npresent = nup = maxports = 0;
-
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-
-       list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
-               nunits++;
-               if ((dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase)
-                       npresent++;
-               if (dd->ipath_lid &&
-                   !(dd->ipath_flags & (IPATH_DISABLED | IPATH_LINKDOWN
-                                        | IPATH_LINKUNK)))
-                       nup++;
-               if (dd->ipath_cfgports > maxports)
-                       maxports = dd->ipath_cfgports;
-       }
-
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-       if (npresentp)
-               *npresentp = npresent;
-       if (nupp)
-               *nupp = nup;
-       if (maxportsp)
-               *maxportsp = maxports;
-
-       return nunits;
-}
-
-/*
- * These next two routines are placeholders in case we don't have per-arch
- * code for controlling write combining.  If explicit control of write
- * combining is not available, performance will probably be awful.
- */
-
-int __attribute__((weak)) ipath_enable_wc(struct ipath_devdata *dd)
-{
-       return -EOPNOTSUPP;
-}
-
-void __attribute__((weak)) ipath_disable_wc(struct ipath_devdata *dd)
-{
-}
-
-/*
- * Perform a PIO buffer bandwidth write test, to verify proper system
- * configuration.  Even when all the setup calls work, occasionally
- * BIOS or other issues can prevent write combining from working, or
- * can cause other bandwidth problems to the chip.
- *
- * This test simply writes the same buffer over and over again, and
- * measures close to the peak bandwidth to the chip (not testing
- * data bandwidth to the wire).   On chips that use an address-based
- * trigger to send packets to the wire, this is easy.  On chips that
- * use a count to trigger, we want to make sure that the packet doesn't
- * go out on the wire, or trigger flow control checks.
- */
-static void ipath_verify_pioperf(struct ipath_devdata *dd)
-{
-       u32 pbnum, cnt, lcnt;
-       u32 __iomem *piobuf;
-       u32 *addr;
-       u64 msecs, emsecs;
-
-       piobuf = ipath_getpiobuf(dd, 0, &pbnum);
-       if (!piobuf) {
-               dev_info(&dd->pcidev->dev,
-                       "No PIObufs for checking perf, skipping\n");
-               return;
-       }
-
-       /*
-        * Enough to give us a reasonable test, less than piobuf size, and
-        * likely multiple of store buffer length.
-        */
-       cnt = 1024;
-
-       addr = vmalloc(cnt);
-       if (!addr) {
-               dev_info(&dd->pcidev->dev,
-                       "Couldn't get memory for checking PIO perf,"
-                       " skipping\n");
-               goto done;
-       }
-
-       preempt_disable();  /* we want reasonably accurate elapsed time */
-       msecs = 1 + jiffies_to_msecs(jiffies);
-       for (lcnt = 0; lcnt < 10000U; lcnt++) {
-               /* wait until we cross msec boundary */
-               if (jiffies_to_msecs(jiffies) >= msecs)
-                       break;
-               udelay(1);
-       }
-
-       ipath_disable_armlaunch(dd);
-
-       /*
-        * length 0, no dwords actually sent, and mark as VL15
-        * on chips where that may matter (due to IB flowcontrol)
-        */
-       if ((dd->ipath_flags & IPATH_HAS_PBC_CNT))
-               writeq(1UL << 63, piobuf);
-       else
-               writeq(0, piobuf);
-       ipath_flush_wc();
-
-       /*
-        * this is only roughly accurate, since even with preempt we
-        * still take interrupts that could take a while.   Running for
-        * >= 5 msec seems to get us "close enough" to accurate values
-        */
-       msecs = jiffies_to_msecs(jiffies);
-       for (emsecs = lcnt = 0; emsecs <= 5UL; lcnt++) {
-               __iowrite32_copy(piobuf + 64, addr, cnt >> 2);
-               emsecs = jiffies_to_msecs(jiffies) - msecs;
-       }
-
-       /* 1 GiB/sec, slightly over IB SDR line rate */
-       if (lcnt < (emsecs * 1024U))
-               ipath_dev_err(dd,
-                       "Performance problem: bandwidth to PIO buffers is "
-                       "only %u MiB/sec\n",
-                       lcnt / (u32) emsecs);
-       else
-               ipath_dbg("PIO buffer bandwidth %u MiB/sec is OK\n",
-                       lcnt / (u32) emsecs);
-
-       preempt_enable();
-
-       vfree(addr);
-
-done:
-       /* disarm piobuf, so it's available again */
-       ipath_disarm_piobufs(dd, pbnum, 1);
-       ipath_enable_armlaunch(dd);
-}
-
-static void cleanup_device(struct ipath_devdata *dd);
-
-static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       int ret, len, j;
-       struct ipath_devdata *dd;
-       unsigned long long addr;
-       u32 bar0 = 0, bar1 = 0;
-
-#ifdef CONFIG_X86_64
-       if (pat_enabled()) {
-               pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
-               ret = -ENODEV;
-               goto bail;
-       }
-#endif
-
-       dd = ipath_alloc_devdata(pdev);
-       if (IS_ERR(dd)) {
-               ret = PTR_ERR(dd);
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Could not allocate devdata: error %d\n", -ret);
-               goto bail;
-       }
-
-       ipath_cdbg(VERBOSE, "initializing unit #%u\n", dd->ipath_unit);
-
-       ret = pci_enable_device(pdev);
-       if (ret) {
-               /* This can happen iff:
-                *
-                * We did a chip reset, and then failed to reprogram the
-                * BAR, or the chip reset due to an internal error.  We then
-                * unloaded the driver and reloaded it.
-                *
-                * Both reset cases set the BAR back to initial state.  For
-                * the latter case, the AER sticky error bit at offset 0x718
-                * should be set, but the Linux kernel doesn't yet know
-                * about that, it appears.  If the original BAR was retained
-                * in the kernel data structures, this may be OK.
-                */
-               ipath_dev_err(dd, "enable unit %d failed: error %d\n",
-                             dd->ipath_unit, -ret);
-               goto bail_devdata;
-       }
-       addr = pci_resource_start(pdev, 0);
-       len = pci_resource_len(pdev, 0);
-       ipath_cdbg(VERBOSE, "regbase (0) %llx len %d irq %d, vend %x/%x "
-                  "driver_data %lx\n", addr, len, pdev->irq, ent->vendor,
-                  ent->device, ent->driver_data);
-
-       read_bars(dd, pdev, &bar0, &bar1);
-
-       if (!bar1 && !(bar0 & ~0xf)) {
-               if (addr) {
-                       dev_info(&pdev->dev, "BAR is 0 (probable RESET), "
-                                "rewriting as %llx\n", addr);
-                       ret = pci_write_config_dword(
-                               pdev, PCI_BASE_ADDRESS_0, addr);
-                       if (ret) {
-                               ipath_dev_err(dd, "rewrite of BAR0 "
-                                             "failed: err %d\n", -ret);
-                               goto bail_disable;
-                       }
-                       ret = pci_write_config_dword(
-                               pdev, PCI_BASE_ADDRESS_1, addr >> 32);
-                       if (ret) {
-                               ipath_dev_err(dd, "rewrite of BAR1 "
-                                             "failed: err %d\n", -ret);
-                               goto bail_disable;
-                       }
-               } else {
-                       ipath_dev_err(dd, "BAR is 0 (probable RESET), "
-                                     "not usable until reboot\n");
-                       ret = -ENODEV;
-                       goto bail_disable;
-               }
-       }
-
-       ret = pci_request_regions(pdev, IPATH_DRV_NAME);
-       if (ret) {
-               dev_info(&pdev->dev, "pci_request_regions unit %u fails: "
-                        "err %d\n", dd->ipath_unit, -ret);
-               goto bail_disable;
-       }
-
-       ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
-       if (ret) {
-               /*
-                * if the 64 bit setup fails, try 32 bit.  Some systems
-                * do not setup 64 bit maps on systems with 2GB or less
-                * memory installed.
-                */
-               ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (ret) {
-                       dev_info(&pdev->dev,
-                               "Unable to set DMA mask for unit %u: %d\n",
-                               dd->ipath_unit, ret);
-                       goto bail_regions;
-               } else {
-                       ipath_dbg("No 64bit DMA mask, used 32 bit mask\n");
-                       ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-                       if (ret)
-                               dev_info(&pdev->dev,
-                                       "Unable to set DMA consistent mask "
-                                       "for unit %u: %d\n",
-                                       dd->ipath_unit, ret);
-
-               }
-       } else {
-               ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-               if (ret)
-                       dev_info(&pdev->dev,
-                               "Unable to set DMA consistent mask "
-                               "for unit %u: %d\n",
-                               dd->ipath_unit, ret);
-       }
-
-       pci_set_master(pdev);
-
-       /*
-        * Save BARs to rewrite after device reset.  Save all 64 bits of
-        * BAR, just in case.
-        */
-       dd->ipath_pcibar0 = addr;
-       dd->ipath_pcibar1 = addr >> 32;
-       dd->ipath_deviceid = ent->device;       /* save for later use */
-       dd->ipath_vendorid = ent->vendor;
-
-       /* setup the chip-specific functions, as early as possible. */
-       switch (ent->device) {
-       case PCI_DEVICE_ID_INFINIPATH_HT:
-               ipath_init_iba6110_funcs(dd);
-               break;
-
-       default:
-               ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
-                             "failing\n", ent->device);
-               return -ENODEV;
-       }
-
-       for (j = 0; j < 6; j++) {
-               if (!pdev->resource[j].start)
-                       continue;
-               ipath_cdbg(VERBOSE, "BAR %d %pR, len %llx\n",
-                          j, &pdev->resource[j],
-                          (unsigned long long)pci_resource_len(pdev, j));
-       }
-
-       if (!addr) {
-               ipath_dev_err(dd, "No valid address in BAR 0!\n");
-               ret = -ENODEV;
-               goto bail_regions;
-       }
-
-       dd->ipath_pcirev = pdev->revision;
-
-#if defined(__powerpc__)
-       /* There isn't a generic way to specify writethrough mappings */
-       dd->ipath_kregbase = __ioremap(addr, len,
-               (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
-#else
-       /* XXX: split this properly to enable on PAT */
-       dd->ipath_kregbase = ioremap_nocache(addr, len);
-#endif
-
-       if (!dd->ipath_kregbase) {
-               ipath_dbg("Unable to map io addr %llx to kvirt, failing\n",
-                         addr);
-               ret = -ENOMEM;
-               goto bail_iounmap;
-       }
-       dd->ipath_kregend = (u64 __iomem *)
-               ((void __iomem *)dd->ipath_kregbase + len);
-       dd->ipath_physaddr = addr;      /* used for io_remap, etc. */
-       /* for user mmap */
-       ipath_cdbg(VERBOSE, "mapped io addr %llx to kregbase %p\n",
-                  addr, dd->ipath_kregbase);
-
-       if (dd->ipath_f_bus(dd, pdev))
-               ipath_dev_err(dd, "Failed to setup config space; "
-                             "continuing anyway\n");
-
-       /*
-        * set up our interrupt handler; IRQF_SHARED probably not needed,
-        * since MSI interrupts shouldn't be shared but won't  hurt for now.
-        * check 0 irq after we return from chip-specific bus setup, since
-        * that can affect this due to setup
-        */
-       if (!dd->ipath_irq)
-               ipath_dev_err(dd, "irq is 0, BIOS error?  Interrupts won't "
-                             "work\n");
-       else {
-               ret = request_irq(dd->ipath_irq, ipath_intr, IRQF_SHARED,
-                                 IPATH_DRV_NAME, dd);
-               if (ret) {
-                       ipath_dev_err(dd, "Couldn't setup irq handler, "
-                                     "irq=%d: %d\n", dd->ipath_irq, ret);
-                       goto bail_iounmap;
-               }
-       }
-
-       ret = ipath_init_chip(dd, 0);   /* do the chip-specific init */
-       if (ret)
-               goto bail_irqsetup;
-
-       ret = ipath_enable_wc(dd);
-
-       if (ret)
-               ret = 0;
-
-       ipath_verify_pioperf(dd);
-
-       ipath_device_create_group(&pdev->dev, dd);
-       ipathfs_add_device(dd);
-       ipath_user_add(dd);
-       ipath_diag_add(dd);
-       ipath_register_ib_device(dd);
-
-       goto bail;
-
-bail_irqsetup:
-       cleanup_device(dd);
-
-       if (dd->ipath_irq)
-               dd->ipath_f_free_irq(dd);
-
-       if (dd->ipath_f_cleanup)
-               dd->ipath_f_cleanup(dd);
-
-bail_iounmap:
-       iounmap((volatile void __iomem *) dd->ipath_kregbase);
-
-bail_regions:
-       pci_release_regions(pdev);
-
-bail_disable:
-       pci_disable_device(pdev);
-
-bail_devdata:
-       ipath_free_devdata(pdev, dd);
-
-bail:
-       return ret;
-}
-
-static void cleanup_device(struct ipath_devdata *dd)
-{
-       int port;
-       struct ipath_portdata **tmp;
-       unsigned long flags;
-
-       if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
-               /* can't do anything more with chip; needs re-init */
-               *dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
-               if (dd->ipath_kregbase) {
-                       /*
-                        * if we haven't already cleaned up before these are
-                        * to ensure any register reads/writes "fail" until
-                        * re-init
-                        */
-                       dd->ipath_kregbase = NULL;
-                       dd->ipath_uregbase = 0;
-                       dd->ipath_sregbase = 0;
-                       dd->ipath_cregbase = 0;
-                       dd->ipath_kregsize = 0;
-               }
-               ipath_disable_wc(dd);
-       }
-
-       if (dd->ipath_spectriggerhit)
-               dev_info(&dd->pcidev->dev, "%lu special trigger hits\n",
-                        dd->ipath_spectriggerhit);
-
-       if (dd->ipath_pioavailregs_dma) {
-               dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-                                 (void *) dd->ipath_pioavailregs_dma,
-                                 dd->ipath_pioavailregs_phys);
-               dd->ipath_pioavailregs_dma = NULL;
-       }
-       if (dd->ipath_dummy_hdrq) {
-               dma_free_coherent(&dd->pcidev->dev,
-                       dd->ipath_pd[0]->port_rcvhdrq_size,
-                       dd->ipath_dummy_hdrq, dd->ipath_dummy_hdrq_phys);
-               dd->ipath_dummy_hdrq = NULL;
-       }
-
-       if (dd->ipath_pageshadow) {
-               struct page **tmpp = dd->ipath_pageshadow;
-               dma_addr_t *tmpd = dd->ipath_physshadow;
-               int i, cnt = 0;
-
-               ipath_cdbg(VERBOSE, "Unlocking any expTID pages still "
-                          "locked\n");
-               for (port = 0; port < dd->ipath_cfgports; port++) {
-                       int port_tidbase = port * dd->ipath_rcvtidcnt;
-                       int maxtid = port_tidbase + dd->ipath_rcvtidcnt;
-                       for (i = port_tidbase; i < maxtid; i++) {
-                               if (!tmpp[i])
-                                       continue;
-                               pci_unmap_page(dd->pcidev, tmpd[i],
-                                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
-                               ipath_release_user_pages(&tmpp[i], 1);
-                               tmpp[i] = NULL;
-                               cnt++;
-                       }
-               }
-               if (cnt) {
-                       ipath_stats.sps_pageunlocks += cnt;
-                       ipath_cdbg(VERBOSE, "There were still %u expTID "
-                                  "entries locked\n", cnt);
-               }
-               if (ipath_stats.sps_pagelocks ||
-                   ipath_stats.sps_pageunlocks)
-                       ipath_cdbg(VERBOSE, "%llu pages locked, %llu "
-                                  "unlocked via ipath_m{un}lock\n",
-                                  (unsigned long long)
-                                  ipath_stats.sps_pagelocks,
-                                  (unsigned long long)
-                                  ipath_stats.sps_pageunlocks);
-
-               ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
-                          dd->ipath_pageshadow);
-               tmpp = dd->ipath_pageshadow;
-               dd->ipath_pageshadow = NULL;
-               vfree(tmpp);
-
-               dd->ipath_egrtidbase = NULL;
-       }
-
-       /*
-        * free any resources still in use (usually just kernel ports)
-        * at unload; we do for portcnt, because that's what we allocate.
-        * We acquire lock to be really paranoid that ipath_pd isn't being
-        * accessed from some interrupt-related code (that should not happen,
-        * but best to be sure).
-        */
-       spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-       tmp = dd->ipath_pd;
-       dd->ipath_pd = NULL;
-       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-       for (port = 0; port < dd->ipath_portcnt; port++) {
-               struct ipath_portdata *pd = tmp[port];
-               tmp[port] = NULL; /* debugging paranoia */
-               ipath_free_pddata(dd, pd);
-       }
-       kfree(tmp);
-}
-
-static void ipath_remove_one(struct pci_dev *pdev)
-{
-       struct ipath_devdata *dd = pci_get_drvdata(pdev);
-
-       ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
-
-       /*
-        * disable the IB link early, to be sure no new packets arrive, which
-        * complicates the shutdown process
-        */
-       ipath_shutdown_device(dd);
-
-       flush_workqueue(ib_wq);
-
-       if (dd->verbs_dev)
-               ipath_unregister_ib_device(dd->verbs_dev);
-
-       ipath_diag_remove(dd);
-       ipath_user_remove(dd);
-       ipathfs_remove_device(dd);
-       ipath_device_remove_group(&pdev->dev, dd);
-
-       ipath_cdbg(VERBOSE, "Releasing pci memory regions, dd %p, "
-                  "unit %u\n", dd, (u32) dd->ipath_unit);
-
-       cleanup_device(dd);
-
-       /*
-        * turn off rcv, send, and interrupts for all ports, all drivers
-        * should also hard reset the chip here?
-        * free up port 0 (kernel) rcvhdr, egr bufs, and eventually tid bufs
-        * for all versions of the driver, if they were allocated
-        */
-       if (dd->ipath_irq) {
-               ipath_cdbg(VERBOSE, "unit %u free irq %d\n",
-                          dd->ipath_unit, dd->ipath_irq);
-               dd->ipath_f_free_irq(dd);
-       } else
-               ipath_dbg("irq is 0, not doing free_irq "
-                         "for unit %u\n", dd->ipath_unit);
-       /*
-        * we check for NULL here, because it's outside
-        * the kregbase check, and we need to call it
-        * after the free_irq.  Thus it's possible that
-        * the function pointers were never initialized.
-        */
-       if (dd->ipath_f_cleanup)
-               /* clean up chip-specific stuff */
-               dd->ipath_f_cleanup(dd);
-
-       ipath_cdbg(VERBOSE, "Unmapping kregbase %p\n", dd->ipath_kregbase);
-       iounmap((volatile void __iomem *) dd->ipath_kregbase);
-       pci_release_regions(pdev);
-       ipath_cdbg(VERBOSE, "calling pci_disable_device\n");
-       pci_disable_device(pdev);
-
-       ipath_free_devdata(pdev, dd);
-}
-
-/* general driver use */
-DEFINE_MUTEX(ipath_mutex);
-
-static DEFINE_SPINLOCK(ipath_pioavail_lock);
-
-/**
- * ipath_disarm_piobufs - cancel a range of PIO buffers
- * @dd: the infinipath device
- * @first: the first PIO buffer to cancel
- * @cnt: the number of PIO buffers to cancel
- *
- * cancel a range of PIO buffers, used when they might be armed, but
- * not triggered.  Used at init to ensure buffer state, and also user
- * process close, in case it died while writing to a PIO buffer
- * Also after errors.
- */
-void ipath_disarm_piobufs(struct ipath_devdata *dd, unsigned first,
-                         unsigned cnt)
-{
-       unsigned i, last = first + cnt;
-       unsigned long flags;
-
-       ipath_cdbg(PKT, "disarm %u PIObufs first=%u\n", cnt, first);
-       for (i = first; i < last; i++) {
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               /*
-                * The disarm-related bits are write-only, so it
-                * is ok to OR them in with our copy of sendctrl
-                * while we hold the lock.
-                */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl | INFINIPATH_S_DISARM |
-                       (i << INFINIPATH_S_DISARMPIOBUF_SHIFT));
-               /* can't disarm bufs back-to-back per iba7220 spec */
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-       }
-       /* on some older chips, update may not happen after cancel */
-       ipath_force_pio_avail_update(dd);
-}
-
-/**
- * ipath_wait_linkstate - wait for an IB link state change to occur
- * @dd: the infinipath device
- * @state: the state to wait for
- * @msecs: the number of milliseconds to wait
- *
- * wait up to msecs milliseconds for IB link state change to occur for
- * now, take the easy polling route.  Currently used only by
- * ipath_set_linkstate.  Returns 0 if state reached, otherwise
- * -ETIMEDOUT state can have multiple states set, for any of several
- * transitions.
- */
-int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state, int msecs)
-{
-       dd->ipath_state_wanted = state;
-       wait_event_interruptible_timeout(ipath_state_wait,
-                                        (dd->ipath_flags & state),
-                                        msecs_to_jiffies(msecs));
-       dd->ipath_state_wanted = 0;
-
-       if (!(dd->ipath_flags & state)) {
-               u64 val;
-               ipath_cdbg(VERBOSE, "Didn't reach linkstate %s within %u"
-                          " ms\n",
-                          /* test INIT ahead of DOWN, both can be set */
-                          (state & IPATH_LINKINIT) ? "INIT" :
-                          ((state & IPATH_LINKDOWN) ? "DOWN" :
-                           ((state & IPATH_LINKARMED) ? "ARM" : "ACTIVE")),
-                          msecs);
-               val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-               ipath_cdbg(VERBOSE, "ibcc=%llx ibcstatus=%llx (%s)\n",
-                          (unsigned long long) ipath_read_kreg64(
-                                  dd, dd->ipath_kregs->kr_ibcctrl),
-                          (unsigned long long) val,
-                          ipath_ibcstatus_str[val & dd->ibcs_lts_mask]);
-       }
-       return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
-}
-
-static void decode_sdma_errs(struct ipath_devdata *dd, ipath_err_t err,
-       char *buf, size_t blen)
-{
-       static const struct {
-               ipath_err_t err;
-               const char *msg;
-       } errs[] = {
-               { INFINIPATH_E_SDMAGENMISMATCH, "SDmaGenMismatch" },
-               { INFINIPATH_E_SDMAOUTOFBOUND, "SDmaOutOfBound" },
-               { INFINIPATH_E_SDMATAILOUTOFBOUND, "SDmaTailOutOfBound" },
-               { INFINIPATH_E_SDMABASE, "SDmaBase" },
-               { INFINIPATH_E_SDMA1STDESC, "SDma1stDesc" },
-               { INFINIPATH_E_SDMARPYTAG, "SDmaRpyTag" },
-               { INFINIPATH_E_SDMADWEN, "SDmaDwEn" },
-               { INFINIPATH_E_SDMAMISSINGDW, "SDmaMissingDw" },
-               { INFINIPATH_E_SDMAUNEXPDATA, "SDmaUnexpData" },
-               { INFINIPATH_E_SDMADESCADDRMISALIGN, "SDmaDescAddrMisalign" },
-               { INFINIPATH_E_SENDBUFMISUSE, "SendBufMisuse" },
-               { INFINIPATH_E_SDMADISABLED, "SDmaDisabled" },
-       };
-       int i;
-       int expected;
-       size_t bidx = 0;
-
-       for (i = 0; i < ARRAY_SIZE(errs); i++) {
-               expected = (errs[i].err != INFINIPATH_E_SDMADISABLED) ? 0 :
-                       test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-               if ((err & errs[i].err) && !expected)
-                       bidx += snprintf(buf + bidx, blen - bidx,
-                                        "%s ", errs[i].msg);
-       }
-}
-
-/*
- * Decode the error status into strings, deciding whether to always
- * print * it or not depending on "normal packet errors" vs everything
- * else.   Return 1 if "real" errors, otherwise 0 if only packet
- * errors, so caller can decide what to print with the string.
- */
-int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
-       ipath_err_t err)
-{
-       int iserr = 1;
-       *buf = '\0';
-       if (err & INFINIPATH_E_PKTERRS) {
-               if (!(err & ~INFINIPATH_E_PKTERRS))
-                       iserr = 0; // if only packet errors.
-               if (ipath_debug & __IPATH_ERRPKTDBG) {
-                       if (err & INFINIPATH_E_REBP)
-                               strlcat(buf, "EBP ", blen);
-                       if (err & INFINIPATH_E_RVCRC)
-                               strlcat(buf, "VCRC ", blen);
-                       if (err & INFINIPATH_E_RICRC) {
-                               strlcat(buf, "CRC ", blen);
-                               // clear for check below, so only once
-                               err &= INFINIPATH_E_RICRC;
-                       }
-                       if (err & INFINIPATH_E_RSHORTPKTLEN)
-                               strlcat(buf, "rshortpktlen ", blen);
-                       if (err & INFINIPATH_E_SDROPPEDDATAPKT)
-                               strlcat(buf, "sdroppeddatapkt ", blen);
-                       if (err & INFINIPATH_E_SPKTLEN)
-                               strlcat(buf, "spktlen ", blen);
-               }
-               if ((err & INFINIPATH_E_RICRC) &&
-                       !(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
-                       strlcat(buf, "CRC ", blen);
-               if (!iserr)
-                       goto done;
-       }
-       if (err & INFINIPATH_E_RHDRLEN)
-               strlcat(buf, "rhdrlen ", blen);
-       if (err & INFINIPATH_E_RBADTID)
-               strlcat(buf, "rbadtid ", blen);
-       if (err & INFINIPATH_E_RBADVERSION)
-               strlcat(buf, "rbadversion ", blen);
-       if (err & INFINIPATH_E_RHDR)
-               strlcat(buf, "rhdr ", blen);
-       if (err & INFINIPATH_E_SENDSPECIALTRIGGER)
-               strlcat(buf, "sendspecialtrigger ", blen);
-       if (err & INFINIPATH_E_RLONGPKTLEN)
-               strlcat(buf, "rlongpktlen ", blen);
-       if (err & INFINIPATH_E_RMAXPKTLEN)
-               strlcat(buf, "rmaxpktlen ", blen);
-       if (err & INFINIPATH_E_RMINPKTLEN)
-               strlcat(buf, "rminpktlen ", blen);
-       if (err & INFINIPATH_E_SMINPKTLEN)
-               strlcat(buf, "sminpktlen ", blen);
-       if (err & INFINIPATH_E_RFORMATERR)
-               strlcat(buf, "rformaterr ", blen);
-       if (err & INFINIPATH_E_RUNSUPVL)
-               strlcat(buf, "runsupvl ", blen);
-       if (err & INFINIPATH_E_RUNEXPCHAR)
-               strlcat(buf, "runexpchar ", blen);
-       if (err & INFINIPATH_E_RIBFLOW)
-               strlcat(buf, "ribflow ", blen);
-       if (err & INFINIPATH_E_SUNDERRUN)
-               strlcat(buf, "sunderrun ", blen);
-       if (err & INFINIPATH_E_SPIOARMLAUNCH)
-               strlcat(buf, "spioarmlaunch ", blen);
-       if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
-               strlcat(buf, "sunexperrpktnum ", blen);
-       if (err & INFINIPATH_E_SDROPPEDSMPPKT)
-               strlcat(buf, "sdroppedsmppkt ", blen);
-       if (err & INFINIPATH_E_SMAXPKTLEN)
-               strlcat(buf, "smaxpktlen ", blen);
-       if (err & INFINIPATH_E_SUNSUPVL)
-               strlcat(buf, "sunsupVL ", blen);
-       if (err & INFINIPATH_E_INVALIDADDR)
-               strlcat(buf, "invalidaddr ", blen);
-       if (err & INFINIPATH_E_RRCVEGRFULL)
-               strlcat(buf, "rcvegrfull ", blen);
-       if (err & INFINIPATH_E_RRCVHDRFULL)
-               strlcat(buf, "rcvhdrfull ", blen);
-       if (err & INFINIPATH_E_IBSTATUSCHANGED)
-               strlcat(buf, "ibcstatuschg ", blen);
-       if (err & INFINIPATH_E_RIBLOSTLINK)
-               strlcat(buf, "riblostlink ", blen);
-       if (err & INFINIPATH_E_HARDWARE)
-               strlcat(buf, "hardware ", blen);
-       if (err & INFINIPATH_E_RESET)
-               strlcat(buf, "reset ", blen);
-       if (err & INFINIPATH_E_SDMAERRS)
-               decode_sdma_errs(dd, err, buf, blen);
-       if (err & INFINIPATH_E_INVALIDEEPCMD)
-               strlcat(buf, "invalideepromcmd ", blen);
-done:
-       return iserr;
-}
-
-/**
- * get_rhf_errstring - decode RHF errors
- * @err: the err number
- * @msg: the output buffer
- * @len: the length of the output buffer
- *
- * only used one place now, may want more later
- */
-static void get_rhf_errstring(u32 err, char *msg, size_t len)
-{
-       /* if no errors, and so don't need to check what's first */
-       *msg = '\0';
-
-       if (err & INFINIPATH_RHF_H_ICRCERR)
-               strlcat(msg, "icrcerr ", len);
-       if (err & INFINIPATH_RHF_H_VCRCERR)
-               strlcat(msg, "vcrcerr ", len);
-       if (err & INFINIPATH_RHF_H_PARITYERR)
-               strlcat(msg, "parityerr ", len);
-       if (err & INFINIPATH_RHF_H_LENERR)
-               strlcat(msg, "lenerr ", len);
-       if (err & INFINIPATH_RHF_H_MTUERR)
-               strlcat(msg, "mtuerr ", len);
-       if (err & INFINIPATH_RHF_H_IHDRERR)
-               /* infinipath hdr checksum error */
-               strlcat(msg, "ipathhdrerr ", len);
-       if (err & INFINIPATH_RHF_H_TIDERR)
-               strlcat(msg, "tiderr ", len);
-       if (err & INFINIPATH_RHF_H_MKERR)
-               /* bad port, offset, etc. */
-               strlcat(msg, "invalid ipathhdr ", len);
-       if (err & INFINIPATH_RHF_H_IBERR)
-               strlcat(msg, "iberr ", len);
-       if (err & INFINIPATH_RHF_L_SWA)
-               strlcat(msg, "swA ", len);
-       if (err & INFINIPATH_RHF_L_SWB)
-               strlcat(msg, "swB ", len);
-}
-
-/**
- * ipath_get_egrbuf - get an eager buffer
- * @dd: the infinipath device
- * @bufnum: the eager buffer to get
- *
- * must only be called if ipath_pd[port] is known to be allocated
- */
-static inline void *ipath_get_egrbuf(struct ipath_devdata *dd, u32 bufnum)
-{
-       return dd->ipath_port0_skbinfo ?
-               (void *) dd->ipath_port0_skbinfo[bufnum].skb->data : NULL;
-}
-
-/**
- * ipath_alloc_skb - allocate an skb and buffer with possible constraints
- * @dd: the infinipath device
- * @gfp_mask: the sk_buff SFP mask
- */
-struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd,
-                               gfp_t gfp_mask)
-{
-       struct sk_buff *skb;
-       u32 len;
-
-       /*
-        * Only fully supported way to handle this is to allocate lots
-        * extra, align as needed, and then do skb_reserve().  That wastes
-        * a lot of memory...  I'll have to hack this into infinipath_copy
-        * also.
-        */
-
-       /*
-        * We need 2 extra bytes for ipath_ether data sent in the
-        * key header.  In order to keep everything dword aligned,
-        * we'll reserve 4 bytes.
-        */
-       len = dd->ipath_ibmaxlen + 4;
-
-       if (dd->ipath_flags & IPATH_4BYTE_TID) {
-               /* We need a 2KB multiple alignment, and there is no way
-                * to do it except to allocate extra and then skb_reserve
-                * enough to bring it up to the right alignment.
-                */
-               len += 2047;
-       }
-
-       skb = __dev_alloc_skb(len, gfp_mask);
-       if (!skb) {
-               ipath_dev_err(dd, "Failed to allocate skbuff, length %u\n",
-                             len);
-               goto bail;
-       }
-
-       skb_reserve(skb, 4);
-
-       if (dd->ipath_flags & IPATH_4BYTE_TID) {
-               u32 una = (unsigned long)skb->data & 2047;
-               if (una)
-                       skb_reserve(skb, 2048 - una);
-       }
-
-bail:
-       return skb;
-}
-
-static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
-                            u32 eflags,
-                            u32 l,
-                            u32 etail,
-                            __le32 *rhf_addr,
-                            struct ipath_message_header *hdr)
-{
-       char emsg[128];
-
-       get_rhf_errstring(eflags, emsg, sizeof emsg);
-       ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
-                  "tlen=%x opcode=%x egridx=%x: %s\n",
-                  eflags, l,
-                  ipath_hdrget_rcv_type(rhf_addr),
-                  ipath_hdrget_length_in_bytes(rhf_addr),
-                  be32_to_cpu(hdr->bth[0]) >> 24,
-                  etail, emsg);
-
-       /* Count local link integrity errors. */
-       if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
-               u8 n = (dd->ipath_ibcctrl >>
-                       INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-                       INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-
-               if (++dd->ipath_lli_counter > n) {
-                       dd->ipath_lli_counter = 0;
-                       dd->ipath_lli_errors++;
-               }
-       }
-}
-
-/*
- * ipath_kreceive - receive a packet
- * @pd: the infinipath port
- *
- * called from interrupt handler for errors or receive interrupt
- */
-void ipath_kreceive(struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       __le32 *rhf_addr;
-       void *ebuf;
-       const u32 rsize = dd->ipath_rcvhdrentsize;      /* words */
-       const u32 maxcnt = dd->ipath_rcvhdrcnt * rsize; /* words */
-       u32 etail = -1, l, hdrqtail;
-       struct ipath_message_header *hdr;
-       u32 eflags, i, etype, tlen, pkttot = 0, updegr = 0, reloop = 0;
-       static u64 totcalls;    /* stats, may eventually remove */
-       int last;
-
-       l = pd->port_head;
-       rhf_addr = (__le32 *) pd->port_rcvhdrq + l + dd->ipath_rhf_offset;
-       if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
-               u32 seq = ipath_hdrget_seq(rhf_addr);
-
-               if (seq != pd->port_seq_cnt)
-                       goto bail;
-               hdrqtail = 0;
-       } else {
-               hdrqtail = ipath_get_rcvhdrtail(pd);
-               if (l == hdrqtail)
-                       goto bail;
-               smp_rmb();
-       }
-
-reloop:
-       for (last = 0, i = 1; !last; i += !last) {
-               hdr = dd->ipath_f_get_msgheader(dd, rhf_addr);
-               eflags = ipath_hdrget_err_flags(rhf_addr);
-               etype = ipath_hdrget_rcv_type(rhf_addr);
-               /* total length */
-               tlen = ipath_hdrget_length_in_bytes(rhf_addr);
-               ebuf = NULL;
-               if ((dd->ipath_flags & IPATH_NODMA_RTAIL) ?
-                   ipath_hdrget_use_egr_buf(rhf_addr) :
-                   (etype != RCVHQ_RCV_TYPE_EXPECTED)) {
-                       /*
-                        * It turns out that the chip uses an eager buffer
-                        * for all non-expected packets, whether it "needs"
-                        * one or not.  So always get the index, but don't
-                        * set ebuf (so we try to copy data) unless the
-                        * length requires it.
-                        */
-                       etail = ipath_hdrget_index(rhf_addr);
-                       updegr = 1;
-                       if (tlen > sizeof(*hdr) ||
-                           etype == RCVHQ_RCV_TYPE_NON_KD)
-                               ebuf = ipath_get_egrbuf(dd, etail);
-               }
-
-               /*
-                * both tiderr and ipathhdrerr are set for all plain IB
-                * packets; only ipathhdrerr should be set.
-                */
-
-               if (etype != RCVHQ_RCV_TYPE_NON_KD &&
-                   etype != RCVHQ_RCV_TYPE_ERROR &&
-                   ipath_hdrget_ipath_ver(hdr->iph.ver_port_tid_offset) !=
-                   IPS_PROTO_VERSION)
-                       ipath_cdbg(PKT, "Bad InfiniPath protocol version "
-                                  "%x\n", etype);
-
-               if (unlikely(eflags))
-                       ipath_rcv_hdrerr(dd, eflags, l, etail, rhf_addr, hdr);
-               else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
-                       ipath_ib_rcv(dd->verbs_dev, (u32 *)hdr, ebuf, tlen);
-                       if (dd->ipath_lli_counter)
-                               dd->ipath_lli_counter--;
-               } else if (etype == RCVHQ_RCV_TYPE_EAGER) {
-                       u8 opcode = be32_to_cpu(hdr->bth[0]) >> 24;
-                       u32 qp = be32_to_cpu(hdr->bth[1]) & 0xffffff;
-                       ipath_cdbg(PKT, "typ %x, opcode %x (eager, "
-                                  "qp=%x), len %x; ignored\n",
-                                  etype, opcode, qp, tlen);
-               } else if (etype == RCVHQ_RCV_TYPE_EXPECTED) {
-                       ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
-                                 be32_to_cpu(hdr->bth[0]) >> 24);
-               } else {
-                       /*
-                        * error packet, type of error unknown.
-                        * Probably type 3, but we don't know, so don't
-                        * even try to print the opcode, etc.
-                        * Usually caused by a "bad packet", that has no
-                        * BTH, when the LRH says it should.
-                        */
-                       ipath_cdbg(ERRPKT, "Error Pkt, but no eflags! egrbuf"
-                                 " %x, len %x hdrq+%x rhf: %Lx\n",
-                                 etail, tlen, l, (unsigned long long)
-                                 le64_to_cpu(*(__le64 *) rhf_addr));
-                       if (ipath_debug & __IPATH_ERRPKTDBG) {
-                               u32 j, *d, dw = rsize-2;
-                               if (rsize > (tlen>>2))
-                                       dw = tlen>>2;
-                               d = (u32 *)hdr;
-                               printk(KERN_DEBUG "EPkt rcvhdr(%x dw):\n",
-                                       dw);
-                               for (j = 0; j < dw; j++)
-                                       printk(KERN_DEBUG "%8x%s", d[j],
-                                               (j%8) == 7 ? "\n" : " ");
-                               printk(KERN_DEBUG ".\n");
-                       }
-               }
-               l += rsize;
-               if (l >= maxcnt)
-                       l = 0;
-               rhf_addr = (__le32 *) pd->port_rcvhdrq +
-                       l + dd->ipath_rhf_offset;
-               if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
-                       u32 seq = ipath_hdrget_seq(rhf_addr);
-
-                       if (++pd->port_seq_cnt > 13)
-                               pd->port_seq_cnt = 1;
-                       if (seq != pd->port_seq_cnt)
-                               last = 1;
-               } else if (l == hdrqtail) {
-                       last = 1;
-               }
-               /*
-                * update head regs on last packet, and every 16 packets.
-                * Reduce bus traffic, while still trying to prevent
-                * rcvhdrq overflows, for when the queue is nearly full
-                */
-               if (last || !(i & 0xf)) {
-                       u64 lval = l;
-
-                       /* request IBA6120 and 7220 interrupt only on last */
-                       if (last)
-                               lval |= dd->ipath_rhdrhead_intr_off;
-                       ipath_write_ureg(dd, ur_rcvhdrhead, lval,
-                               pd->port_port);
-                       if (updegr) {
-                               ipath_write_ureg(dd, ur_rcvegrindexhead,
-                                                etail, pd->port_port);
-                               updegr = 0;
-                       }
-               }
-       }
-
-       if (!dd->ipath_rhdrhead_intr_off && !reloop &&
-           !(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
-               /* IBA6110 workaround; we can have a race clearing chip
-                * interrupt with another interrupt about to be delivered,
-                * and can clear it before it is delivered on the GPIO
-                * workaround.  By doing the extra check here for the
-                * in-memory tail register updating while we were doing
-                * earlier packets, we "almost" guarantee we have covered
-                * that case.
-                */
-               u32 hqtail = ipath_get_rcvhdrtail(pd);
-               if (hqtail != hdrqtail) {
-                       hdrqtail = hqtail;
-                       reloop = 1; /* loop 1 extra time at most */
-                       goto reloop;
-               }
-       }
-
-       pkttot += i;
-
-       pd->port_head = l;
-
-       if (pkttot > ipath_stats.sps_maxpkts_call)
-               ipath_stats.sps_maxpkts_call = pkttot;
-       ipath_stats.sps_port0pkts += pkttot;
-       ipath_stats.sps_avgpkts_call =
-               ipath_stats.sps_port0pkts / ++totcalls;
-
-bail:;
-}
-
-/**
- * ipath_update_pio_bufs - update shadow copy of the PIO availability map
- * @dd: the infinipath device
- *
- * called whenever our local copy indicates we have run out of send buffers
- * NOTE: This can be called from interrupt context by some code
- * and from non-interrupt context by ipath_getpiobuf().
- */
-
-static void ipath_update_pio_bufs(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-       int i;
-       const unsigned piobregs = (unsigned)dd->ipath_pioavregs;
-
-       /* If the generation (check) bits have changed, then we update the
-        * busy bit for the corresponding PIO buffer.  This algorithm will
-        * modify positions to the value they already have in some cases
-        * (i.e., no change), but it's faster than changing only the bits
-        * that have changed.
-        *
-        * We would like to do this atomicly, to avoid spinlocks in the
-        * critical send path, but that's not really possible, given the
-        * type of changes, and that this routine could be called on
-        * multiple cpu's simultaneously, so we lock in this routine only,
-        * to avoid conflicting updates; all we change is the shadow, and
-        * it's a single 64 bit memory location, so by definition the update
-        * is atomic in terms of what other cpu's can see in testing the
-        * bits.  The spin_lock overhead isn't too bad, since it only
-        * happens when all buffers are in use, so only cpu overhead, not
-        * latency or bandwidth is affected.
-        */
-       if (!dd->ipath_pioavailregs_dma) {
-               ipath_dbg("Update shadow pioavail, but regs_dma NULL!\n");
-               return;
-       }
-       if (ipath_debug & __IPATH_VERBDBG) {
-               /* only if packet debug and verbose */
-               volatile __le64 *dma = dd->ipath_pioavailregs_dma;
-               unsigned long *shadow = dd->ipath_pioavailshadow;
-
-               ipath_cdbg(PKT, "Refill avail, dma0=%llx shad0=%lx, "
-                          "d1=%llx s1=%lx, d2=%llx s2=%lx, d3=%llx "
-                          "s3=%lx\n",
-                          (unsigned long long) le64_to_cpu(dma[0]),
-                          shadow[0],
-                          (unsigned long long) le64_to_cpu(dma[1]),
-                          shadow[1],
-                          (unsigned long long) le64_to_cpu(dma[2]),
-                          shadow[2],
-                          (unsigned long long) le64_to_cpu(dma[3]),
-                          shadow[3]);
-               if (piobregs > 4)
-                       ipath_cdbg(
-                               PKT, "2nd group, dma4=%llx shad4=%lx, "
-                               "d5=%llx s5=%lx, d6=%llx s6=%lx, "
-                               "d7=%llx s7=%lx\n",
-                               (unsigned long long) le64_to_cpu(dma[4]),
-                               shadow[4],
-                               (unsigned long long) le64_to_cpu(dma[5]),
-                               shadow[5],
-                               (unsigned long long) le64_to_cpu(dma[6]),
-                               shadow[6],
-                               (unsigned long long) le64_to_cpu(dma[7]),
-                               shadow[7]);
-       }
-       spin_lock_irqsave(&ipath_pioavail_lock, flags);
-       for (i = 0; i < piobregs; i++) {
-               u64 pchbusy, pchg, piov, pnew;
-               /*
-                * Chip Errata: bug 6641; even and odd qwords>3 are swapped
-                */
-               if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
-                       piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i ^ 1]);
-               else
-                       piov = le64_to_cpu(dd->ipath_pioavailregs_dma[i]);
-               pchg = dd->ipath_pioavailkernel[i] &
-                       ~(dd->ipath_pioavailshadow[i] ^ piov);
-               pchbusy = pchg << INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT;
-               if (pchg && (pchbusy & dd->ipath_pioavailshadow[i])) {
-                       pnew = dd->ipath_pioavailshadow[i] & ~pchbusy;
-                       pnew |= piov & pchbusy;
-                       dd->ipath_pioavailshadow[i] = pnew;
-               }
-       }
-       spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-}
-
-/*
- * used to force update of pioavailshadow if we can't get a pio buffer.
- * Needed primarily due to exitting freeze mode after recovering
- * from errors.  Done lazily, because it's safer (known to not
- * be writing pio buffers).
- */
-static void ipath_reset_availshadow(struct ipath_devdata *dd)
-{
-       int i, im;
-       unsigned long flags;
-
-       spin_lock_irqsave(&ipath_pioavail_lock, flags);
-       for (i = 0; i < dd->ipath_pioavregs; i++) {
-               u64 val, oldval;
-               /* deal with 6110 chip bug on high register #s */
-               im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
-                       i ^ 1 : i;
-               val = le64_to_cpu(dd->ipath_pioavailregs_dma[im]);
-               /*
-                * busy out the buffers not in the kernel avail list,
-                * without changing the generation bits.
-                */
-               oldval = dd->ipath_pioavailshadow[i];
-               dd->ipath_pioavailshadow[i] = val |
-                       ((~dd->ipath_pioavailkernel[i] <<
-                       INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT) &
-                       0xaaaaaaaaaaaaaaaaULL); /* All BUSY bits in qword */
-               if (oldval != dd->ipath_pioavailshadow[i])
-                       ipath_dbg("shadow[%d] was %Lx, now %lx\n",
-                               i, (unsigned long long) oldval,
-                               dd->ipath_pioavailshadow[i]);
-       }
-       spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-}
-
-/**
- * ipath_setrcvhdrsize - set the receive header size
- * @dd: the infinipath device
- * @rhdrsize: the receive header size
- *
- * called from user init code, and also layered driver init
- */
-int ipath_setrcvhdrsize(struct ipath_devdata *dd, unsigned rhdrsize)
-{
-       int ret = 0;
-
-       if (dd->ipath_flags & IPATH_RCVHDRSZ_SET) {
-               if (dd->ipath_rcvhdrsize != rhdrsize) {
-                       dev_info(&dd->pcidev->dev,
-                                "Error: can't set protocol header "
-                                "size %u, already %u\n",
-                                rhdrsize, dd->ipath_rcvhdrsize);
-                       ret = -EAGAIN;
-               } else
-                       ipath_cdbg(VERBOSE, "Reuse same protocol header "
-                                  "size %u\n", dd->ipath_rcvhdrsize);
-       } else if (rhdrsize > (dd->ipath_rcvhdrentsize -
-                              (sizeof(u64) / sizeof(u32)))) {
-               ipath_dbg("Error: can't set protocol header size %u "
-                         "(> max %u)\n", rhdrsize,
-                         dd->ipath_rcvhdrentsize -
-                         (u32) (sizeof(u64) / sizeof(u32)));
-               ret = -EOVERFLOW;
-       } else {
-               dd->ipath_flags |= IPATH_RCVHDRSZ_SET;
-               dd->ipath_rcvhdrsize = rhdrsize;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
-                                dd->ipath_rcvhdrsize);
-               ipath_cdbg(VERBOSE, "Set protocol header size to %u\n",
-                          dd->ipath_rcvhdrsize);
-       }
-       return ret;
-}
-
-/*
- * debugging code and stats updates if no pio buffers available.
- */
-static noinline void no_pio_bufs(struct ipath_devdata *dd)
-{
-       unsigned long *shadow = dd->ipath_pioavailshadow;
-       __le64 *dma = (__le64 *)dd->ipath_pioavailregs_dma;
-
-       dd->ipath_upd_pio_shadow = 1;
-
-       /*
-        * not atomic, but if we lose a stat count in a while, that's OK
-        */
-       ipath_stats.sps_nopiobufs++;
-       if (!(++dd->ipath_consec_nopiobuf % 100000)) {
-               ipath_force_pio_avail_update(dd); /* at start */
-               ipath_dbg("%u tries no piobufavail ts%lx; dmacopy: "
-                       "%llx %llx %llx %llx\n"
-                       "ipath  shadow:  %lx %lx %lx %lx\n",
-                       dd->ipath_consec_nopiobuf,
-                       (unsigned long)get_cycles(),
-                       (unsigned long long) le64_to_cpu(dma[0]),
-                       (unsigned long long) le64_to_cpu(dma[1]),
-                       (unsigned long long) le64_to_cpu(dma[2]),
-                       (unsigned long long) le64_to_cpu(dma[3]),
-                       shadow[0], shadow[1], shadow[2], shadow[3]);
-               /*
-                * 4 buffers per byte, 4 registers above, cover rest
-                * below
-                */
-               if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) >
-                   (sizeof(shadow[0]) * 4 * 4))
-                       ipath_dbg("2nd group: dmacopy: "
-                                 "%llx %llx %llx %llx\n"
-                                 "ipath  shadow:  %lx %lx %lx %lx\n",
-                                 (unsigned long long)le64_to_cpu(dma[4]),
-                                 (unsigned long long)le64_to_cpu(dma[5]),
-                                 (unsigned long long)le64_to_cpu(dma[6]),
-                                 (unsigned long long)le64_to_cpu(dma[7]),
-                                 shadow[4], shadow[5], shadow[6], shadow[7]);
-
-               /* at end, so update likely happened */
-               ipath_reset_availshadow(dd);
-       }
-}
-
-/*
- * common code for normal driver pio buffer allocation, and reserved
- * allocation.
- *
- * do appropriate marking as busy, etc.
- * returns buffer number if one found (>=0), negative number is error.
- */
-static u32 __iomem *ipath_getpiobuf_range(struct ipath_devdata *dd,
-       u32 *pbufnum, u32 first, u32 last, u32 firsti)
-{
-       int i, j, updated = 0;
-       unsigned piobcnt;
-       unsigned long flags;
-       unsigned long *shadow = dd->ipath_pioavailshadow;
-       u32 __iomem *buf;
-
-       piobcnt = last - first;
-       if (dd->ipath_upd_pio_shadow) {
-               /*
-                * Minor optimization.  If we had no buffers on last call,
-                * start out by doing the update; continue and do scan even
-                * if no buffers were updated, to be paranoid
-                */
-               ipath_update_pio_bufs(dd);
-               updated++;
-               i = first;
-       } else
-               i = firsti;
-rescan:
-       /*
-        * while test_and_set_bit() is atomic, we do that and then the
-        * change_bit(), and the pair is not.  See if this is the cause
-        * of the remaining armlaunch errors.
-        */
-       spin_lock_irqsave(&ipath_pioavail_lock, flags);
-       for (j = 0; j < piobcnt; j++, i++) {
-               if (i >= last)
-                       i = first;
-               if (__test_and_set_bit((2 * i) + 1, shadow))
-                       continue;
-               /* flip generation bit */
-               __change_bit(2 * i, shadow);
-               break;
-       }
-       spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-
-       if (j == piobcnt) {
-               if (!updated) {
-                       /*
-                        * first time through; shadow exhausted, but may be
-                        * buffers available, try an update and then rescan.
-                        */
-                       ipath_update_pio_bufs(dd);
-                       updated++;
-                       i = first;
-                       goto rescan;
-               } else if (updated == 1 && piobcnt <=
-                       ((dd->ipath_sendctrl
-                       >> INFINIPATH_S_UPDTHRESH_SHIFT) &
-                       INFINIPATH_S_UPDTHRESH_MASK)) {
-                       /*
-                        * for chips supporting and using the update
-                        * threshold we need to force an update of the
-                        * in-memory copy if the count is less than the
-                        * thershold, then check one more time.
-                        */
-                       ipath_force_pio_avail_update(dd);
-                       ipath_update_pio_bufs(dd);
-                       updated++;
-                       i = first;
-                       goto rescan;
-               }
-
-               no_pio_bufs(dd);
-               buf = NULL;
-       } else {
-               if (i < dd->ipath_piobcnt2k)
-                       buf = (u32 __iomem *) (dd->ipath_pio2kbase +
-                                              i * dd->ipath_palign);
-               else
-                       buf = (u32 __iomem *)
-                               (dd->ipath_pio4kbase +
-                                (i - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
-               if (pbufnum)
-                       *pbufnum = i;
-       }
-
-       return buf;
-}
-
-/**
- * ipath_getpiobuf - find an available pio buffer
- * @dd: the infinipath device
- * @plen: the size of the PIO buffer needed in 32-bit words
- * @pbufnum: the buffer number is placed here
- */
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *dd, u32 plen, u32 *pbufnum)
-{
-       u32 __iomem *buf;
-       u32 pnum, nbufs;
-       u32 first, lasti;
-
-       if (plen + 1 >= IPATH_SMALLBUF_DWORDS) {
-               first = dd->ipath_piobcnt2k;
-               lasti = dd->ipath_lastpioindexl;
-       } else {
-               first = 0;
-               lasti = dd->ipath_lastpioindex;
-       }
-       nbufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-       buf = ipath_getpiobuf_range(dd, &pnum, first, nbufs, lasti);
-
-       if (buf) {
-               /*
-                * Set next starting place.  It's just an optimization,
-                * it doesn't matter who wins on this, so no locking
-                */
-               if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
-                       dd->ipath_lastpioindexl = pnum + 1;
-               else
-                       dd->ipath_lastpioindex = pnum + 1;
-               if (dd->ipath_upd_pio_shadow)
-                       dd->ipath_upd_pio_shadow = 0;
-               if (dd->ipath_consec_nopiobuf)
-                       dd->ipath_consec_nopiobuf = 0;
-               ipath_cdbg(VERBOSE, "Return piobuf%u %uk @ %p\n",
-                          pnum, (pnum < dd->ipath_piobcnt2k) ? 2 : 4, buf);
-               if (pbufnum)
-                       *pbufnum = pnum;
-
-       }
-       return buf;
-}
-
-/**
- * ipath_chg_pioavailkernel - change which send buffers are available for kernel
- * @dd: the infinipath device
- * @start: the starting send buffer number
- * @len: the number of send buffers
- * @avail: true if the buffers are available for kernel use, false otherwise
- */
-void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
-                             unsigned len, int avail)
-{
-       unsigned long flags;
-       unsigned end, cnt = 0;
-
-       /* There are two bits per send buffer (busy and generation) */
-       start *= 2;
-       end = start + len * 2;
-
-       spin_lock_irqsave(&ipath_pioavail_lock, flags);
-       /* Set or clear the busy bit in the shadow. */
-       while (start < end) {
-               if (avail) {
-                       unsigned long dma;
-                       int i, im;
-                       /*
-                        * the BUSY bit will never be set, because we disarm
-                        * the user buffers before we hand them back to the
-                        * kernel.  We do have to make sure the generation
-                        * bit is set correctly in shadow, since it could
-                        * have changed many times while allocated to user.
-                        * We can't use the bitmap functions on the full
-                        * dma array because it is always little-endian, so
-                        * we have to flip to host-order first.
-                        * BITS_PER_LONG is slightly wrong, since it's
-                        * always 64 bits per register in chip...
-                        * We only work on 64 bit kernels, so that's OK.
-                        */
-                       /* deal with 6110 chip bug on high register #s */
-                       i = start / BITS_PER_LONG;
-                       im = (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS)) ?
-                               i ^ 1 : i;
-                       __clear_bit(INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT
-                               + start, dd->ipath_pioavailshadow);
-                       dma = (unsigned long) le64_to_cpu(
-                               dd->ipath_pioavailregs_dma[im]);
-                       if (test_bit((INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
-                               + start) % BITS_PER_LONG, &dma))
-                               __set_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
-                                       + start, dd->ipath_pioavailshadow);
-                       else
-                               __clear_bit(INFINIPATH_SENDPIOAVAIL_CHECK_SHIFT
-                                       + start, dd->ipath_pioavailshadow);
-                       __set_bit(start, dd->ipath_pioavailkernel);
-               } else {
-                       __set_bit(start + INFINIPATH_SENDPIOAVAIL_BUSY_SHIFT,
-                               dd->ipath_pioavailshadow);
-                       __clear_bit(start, dd->ipath_pioavailkernel);
-               }
-               start += 2;
-       }
-
-       if (dd->ipath_pioupd_thresh) {
-               end = 2 * (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
-               cnt = bitmap_weight(dd->ipath_pioavailkernel, end);
-       }
-       spin_unlock_irqrestore(&ipath_pioavail_lock, flags);
-
-       /*
-        * When moving buffers from kernel to user, if number assigned to
-        * the user is less than the pio update threshold, and threshold
-        * is supported (cnt was computed > 0), drop the update threshold
-        * so we update at least once per allocated number of buffers.
-        * In any case, if the kernel buffers are less than the threshold,
-        * drop the threshold.  We don't bother increasing it, having once
-        * decreased it, since it would typically just cycle back and forth.
-        * If we don't decrease below buffers in use, we can wait a long
-        * time for an update, until some other context uses PIO buffers.
-        */
-       if (!avail && len < cnt)
-               cnt = len;
-       if (cnt < dd->ipath_pioupd_thresh) {
-               dd->ipath_pioupd_thresh = cnt;
-               ipath_dbg("Decreased pio update threshold to %u\n",
-                       dd->ipath_pioupd_thresh);
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl &= ~(INFINIPATH_S_UPDTHRESH_MASK
-                       << INFINIPATH_S_UPDTHRESH_SHIFT);
-               dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
-                       << INFINIPATH_S_UPDTHRESH_SHIFT;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-       }
-}
-
-/**
- * ipath_create_rcvhdrq - create a receive header queue
- * @dd: the infinipath device
- * @pd: the port data
- *
- * this must be contiguous memory (from an i/o perspective), and must be
- * DMA'able (which means for some systems, it will go through an IOMMU,
- * or be forced into a low address range).
- */
-int ipath_create_rcvhdrq(struct ipath_devdata *dd,
-                        struct ipath_portdata *pd)
-{
-       int ret = 0;
-
-       if (!pd->port_rcvhdrq) {
-               dma_addr_t phys_hdrqtail;
-               gfp_t gfp_flags = GFP_USER | __GFP_COMP;
-               int amt = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-                               sizeof(u32), PAGE_SIZE);
-
-               pd->port_rcvhdrq = dma_alloc_coherent(
-                       &dd->pcidev->dev, amt, &pd->port_rcvhdrq_phys,
-                       gfp_flags);
-
-               if (!pd->port_rcvhdrq) {
-                       ipath_dev_err(dd, "attempt to allocate %d bytes "
-                                     "for port %u rcvhdrq failed\n",
-                                     amt, pd->port_port);
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-
-               if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
-                       pd->port_rcvhdrtail_kvaddr = dma_alloc_coherent(
-                               &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail,
-                               GFP_KERNEL);
-                       if (!pd->port_rcvhdrtail_kvaddr) {
-                               ipath_dev_err(dd, "attempt to allocate 1 page "
-                                       "for port %u rcvhdrqtailaddr "
-                                       "failed\n", pd->port_port);
-                               ret = -ENOMEM;
-                               dma_free_coherent(&dd->pcidev->dev, amt,
-                                       pd->port_rcvhdrq,
-                                       pd->port_rcvhdrq_phys);
-                               pd->port_rcvhdrq = NULL;
-                               goto bail;
-                       }
-                       pd->port_rcvhdrqtailaddr_phys = phys_hdrqtail;
-                       ipath_cdbg(VERBOSE, "port %d hdrtailaddr, %llx "
-                                  "physical\n", pd->port_port,
-                                  (unsigned long long) phys_hdrqtail);
-               }
-
-               pd->port_rcvhdrq_size = amt;
-
-               ipath_cdbg(VERBOSE, "%d pages at %p (phys %lx) size=%lu "
-                          "for port %u rcvhdr Q\n",
-                          amt >> PAGE_SHIFT, pd->port_rcvhdrq,
-                          (unsigned long) pd->port_rcvhdrq_phys,
-                          (unsigned long) pd->port_rcvhdrq_size,
-                          pd->port_port);
-       } else {
-               ipath_cdbg(VERBOSE, "reuse port %d rcvhdrq @%p %llx phys; "
-                          "hdrtailaddr@%p %llx physical\n",
-                          pd->port_port, pd->port_rcvhdrq,
-                          (unsigned long long) pd->port_rcvhdrq_phys,
-                          pd->port_rcvhdrtail_kvaddr, (unsigned long long)
-                          pd->port_rcvhdrqtailaddr_phys);
-       }
-       /* clear for security and sanity on each use */
-       memset(pd->port_rcvhdrq, 0, pd->port_rcvhdrq_size);
-       if (pd->port_rcvhdrtail_kvaddr)
-               memset(pd->port_rcvhdrtail_kvaddr, 0, PAGE_SIZE);
-
-       /*
-        * tell chip each time we init it, even if we are re-using previous
-        * memory (we zero the register at process close)
-        */
-       ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr,
-                             pd->port_port, pd->port_rcvhdrqtailaddr_phys);
-       ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
-                             pd->port_port, pd->port_rcvhdrq_phys);
-
-bail:
-       return ret;
-}
-
-
-/*
- * Flush all sends that might be in the ready to send state, as well as any
- * that are in the process of being sent.   Used whenever we need to be
- * sure the send side is idle.  Cleans up all buffer state by canceling
- * all pio buffers, and issuing an abort, which cleans up anything in the
- * launch fifo.  The cancel is superfluous on some chip versions, but
- * it's safer to always do it.
- * PIOAvail bits are updated by the chip as if normal send had happened.
- */
-void ipath_cancel_sends(struct ipath_devdata *dd, int restore_sendctrl)
-{
-       unsigned long flags;
-
-       if (dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) {
-               ipath_cdbg(VERBOSE, "Ignore while in autonegotiation\n");
-               goto bail;
-       }
-       /*
-        * If we have SDMA, and it's not disabled, we have to kick off the
-        * abort state machine, provided we aren't already aborting.
-        * If we are in the process of aborting SDMA (!DISABLED, but ABORTING),
-        * we skip the rest of this routine. It is already "in progress"
-        */
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA) {
-               int skip_cancel;
-               unsigned long *statp = &dd->ipath_sdma_status;
-
-               spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-               skip_cancel =
-                       test_and_set_bit(IPATH_SDMA_ABORTING, statp)
-                       && !test_bit(IPATH_SDMA_DISABLED, statp);
-               spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-               if (skip_cancel)
-                       goto bail;
-       }
-
-       ipath_dbg("Cancelling all in-progress send buffers\n");
-
-       /* skip armlaunch errs for a while */
-       dd->ipath_lastcancel = jiffies + HZ / 2;
-
-       /*
-        * The abort bit is auto-clearing.  We also don't want pioavail
-        * update happening during this, and we don't want any other
-        * sends going out, so turn those off for the duration.  We read
-        * the scratch register to be sure that cancels and the abort
-        * have taken effect in the chip.  Otherwise two parts are same
-        * as ipath_force_pio_avail_update()
-        */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl &= ~(INFINIPATH_S_PIOBUFAVAILUPD
-               | INFINIPATH_S_PIOENABLE);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-               dd->ipath_sendctrl | INFINIPATH_S_ABORT);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       /* disarm all send buffers */
-       ipath_disarm_piobufs(dd, 0,
-               dd->ipath_piobcnt2k + dd->ipath_piobcnt4k);
-
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-               set_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
-
-       if (restore_sendctrl) {
-               /* else done by caller later if needed */
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl |= INFINIPATH_S_PIOBUFAVAILUPD |
-                       INFINIPATH_S_PIOENABLE;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl);
-               /* and again, be sure all have hit the chip */
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-       }
-
-       if ((dd->ipath_flags & IPATH_HAS_SEND_DMA) &&
-           !test_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status) &&
-           test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)) {
-               spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-               /* only wait so long for intr */
-               dd->ipath_sdma_abort_intr_timeout = jiffies + HZ;
-               dd->ipath_sdma_reset_wait = 200;
-               if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-                       tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
-               spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-       }
-bail:;
-}
-
-/*
- * Force an update of in-memory copy of the pioavail registers, when
- * needed for any of a variety of reasons.  We read the scratch register
- * to make it highly likely that the update will have happened by the
- * time we return.  If already off (as in cancel_sends above), this
- * routine is a nop, on the assumption that the caller will "do the
- * right thing".
- */
-void ipath_force_pio_avail_update(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       if (dd->ipath_sendctrl & INFINIPATH_S_PIOBUFAVAILUPD) {
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                       dd->ipath_sendctrl);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       }
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-}
-
-static void ipath_set_ib_lstate(struct ipath_devdata *dd, int linkcmd,
-                               int linitcmd)
-{
-       u64 mod_wd;
-       static const char *what[4] = {
-               [0] = "NOP",
-               [INFINIPATH_IBCC_LINKCMD_DOWN] = "DOWN",
-               [INFINIPATH_IBCC_LINKCMD_ARMED] = "ARMED",
-               [INFINIPATH_IBCC_LINKCMD_ACTIVE] = "ACTIVE"
-       };
-
-       if (linitcmd == INFINIPATH_IBCC_LINKINITCMD_DISABLE) {
-               /*
-                * If we are told to disable, note that so link-recovery
-                * code does not attempt to bring us back up.
-                */
-               preempt_disable();
-               dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
-               preempt_enable();
-       } else if (linitcmd) {
-               /*
-                * Any other linkinitcmd will lead to LINKDOWN and then
-                * to INIT (if all is well), so clear flag to let
-                * link-recovery code attempt to bring us back up.
-                */
-               preempt_disable();
-               dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
-               preempt_enable();
-       }
-
-       mod_wd = (linkcmd << dd->ibcc_lc_shift) |
-               (linitcmd << INFINIPATH_IBCC_LINKINITCMD_SHIFT);
-       ipath_cdbg(VERBOSE,
-               "Moving unit %u to %s (initcmd=0x%x), current ltstate is %s\n",
-               dd->ipath_unit, what[linkcmd], linitcmd,
-               ipath_ibcstatus_str[ipath_ib_linktrstate(dd,
-                       ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus))]);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                        dd->ipath_ibcctrl | mod_wd);
-       /* read from chip so write is flushed */
-       (void) ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-}
-
-int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
-{
-       u32 lstate;
-       int ret;
-
-       switch (newstate) {
-       case IPATH_IB_LINKDOWN_ONLY:
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN, 0);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINKDOWN:
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
-                                       INFINIPATH_IBCC_LINKINITCMD_POLL);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINKDOWN_SLEEP:
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
-                                       INFINIPATH_IBCC_LINKINITCMD_SLEEP);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINKDOWN_DISABLE:
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_DOWN,
-                                       INFINIPATH_IBCC_LINKINITCMD_DISABLE);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINKARM:
-               if (dd->ipath_flags & IPATH_LINKARMED) {
-                       ret = 0;
-                       goto bail;
-               }
-               if (!(dd->ipath_flags &
-                     (IPATH_LINKINIT | IPATH_LINKACTIVE))) {
-                       ret = -EINVAL;
-                       goto bail;
-               }
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ARMED, 0);
-
-               /*
-                * Since the port can transition to ACTIVE by receiving
-                * a non VL 15 packet, wait for either state.
-                */
-               lstate = IPATH_LINKARMED | IPATH_LINKACTIVE;
-               break;
-
-       case IPATH_IB_LINKACTIVE:
-               if (dd->ipath_flags & IPATH_LINKACTIVE) {
-                       ret = 0;
-                       goto bail;
-               }
-               if (!(dd->ipath_flags & IPATH_LINKARMED)) {
-                       ret = -EINVAL;
-                       goto bail;
-               }
-               ipath_set_ib_lstate(dd, INFINIPATH_IBCC_LINKCMD_ACTIVE, 0);
-               lstate = IPATH_LINKACTIVE;
-               break;
-
-       case IPATH_IB_LINK_LOOPBACK:
-               dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
-               dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-
-               /* turn heartbeat off, as it causes loopback to fail */
-               dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-                                      IPATH_IB_HRTBT_OFF);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       case IPATH_IB_LINK_EXTERNAL:
-               dev_info(&dd->pcidev->dev,
-                       "Disabling IB local loopback (normal)\n");
-               dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-                                      IPATH_IB_HRTBT_ON);
-               dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-               /* don't wait */
-               ret = 0;
-               goto bail;
-
-       /*
-        * Heartbeat can be explicitly enabled by the user via
-        * "hrtbt_enable" "file", and if disabled, trying to enable here
-        * will have no effect.  Implicit changes (heartbeat off when
-        * loopback on, and vice versa) are included to ease testing.
-        */
-       case IPATH_IB_LINK_HRTBT:
-               ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-                       IPATH_IB_HRTBT_ON);
-               goto bail;
-
-       case IPATH_IB_LINK_NO_HRTBT:
-               ret = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT,
-                       IPATH_IB_HRTBT_OFF);
-               goto bail;
-
-       default:
-               ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
-               ret = -EINVAL;
-               goto bail;
-       }
-       ret = ipath_wait_linkstate(dd, lstate, 2000);
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_set_mtu - set the MTU
- * @dd: the infinipath device
- * @arg: the new MTU
- *
- * we can handle "any" incoming size, the issue here is whether we
- * need to restrict our outgoing size.   For now, we don't do any
- * sanity checking on this, and we don't deal with what happens to
- * programs that are already running when the size changes.
- * NOTE: changing the MTU will usually cause the IBC to go back to
- * link INIT state...
- */
-int ipath_set_mtu(struct ipath_devdata *dd, u16 arg)
-{
-       u32 piosize;
-       int changed = 0;
-       int ret;
-
-       /*
-        * mtu is IB data payload max.  It's the largest power of 2 less
-        * than piosize (or even larger, since it only really controls the
-        * largest we can receive; we can send the max of the mtu and
-        * piosize).  We check that it's one of the valid IB sizes.
-        */
-       if (arg != 256 && arg != 512 && arg != 1024 && arg != 2048 &&
-           (arg != 4096 || !ipath_mtu4096)) {
-               ipath_dbg("Trying to set invalid mtu %u, failing\n", arg);
-               ret = -EINVAL;
-               goto bail;
-       }
-       if (dd->ipath_ibmtu == arg) {
-               ret = 0;        /* same as current */
-               goto bail;
-       }
-
-       piosize = dd->ipath_ibmaxlen;
-       dd->ipath_ibmtu = arg;
-
-       if (arg >= (piosize - IPATH_PIO_MAXIBHDR)) {
-               /* Only if it's not the initial value (or reset to it) */
-               if (piosize != dd->ipath_init_ibmaxlen) {
-                       if (arg > piosize && arg <= dd->ipath_init_ibmaxlen)
-                               piosize = dd->ipath_init_ibmaxlen;
-                       dd->ipath_ibmaxlen = piosize;
-                       changed = 1;
-               }
-       } else if ((arg + IPATH_PIO_MAXIBHDR) != dd->ipath_ibmaxlen) {
-               piosize = arg + IPATH_PIO_MAXIBHDR;
-               ipath_cdbg(VERBOSE, "ibmaxlen was 0x%x, setting to 0x%x "
-                          "(mtu 0x%x)\n", dd->ipath_ibmaxlen, piosize,
-                          arg);
-               dd->ipath_ibmaxlen = piosize;
-               changed = 1;
-       }
-
-       if (changed) {
-               u64 ibc = dd->ipath_ibcctrl, ibdw;
-               /*
-                * update our housekeeping variables, and set IBC max
-                * size, same as init code; max IBC is max we allow in
-                * buffer, less the qword pbc, plus 1 for ICRC, in dwords
-                */
-               dd->ipath_ibmaxlen = piosize - 2 * sizeof(u32);
-               ibdw = (dd->ipath_ibmaxlen >> 2) + 1;
-               ibc &= ~(INFINIPATH_IBCC_MAXPKTLEN_MASK <<
-                        dd->ibcc_mpl_shift);
-               ibc |= ibdw << dd->ibcc_mpl_shift;
-               dd->ipath_ibcctrl = ibc;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-               dd->ipath_f_tidtemplate(dd);
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-int ipath_set_lid(struct ipath_devdata *dd, u32 lid, u8 lmc)
-{
-       dd->ipath_lid = lid;
-       dd->ipath_lmc = lmc;
-
-       dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LIDLMC, lid |
-               (~((1U << lmc) - 1)) << 16);
-
-       dev_info(&dd->pcidev->dev, "We got a lid: 0x%x\n", lid);
-
-       return 0;
-}
-
-
-/**
- * ipath_write_kreg_port - write a device's per-port 64-bit kernel register
- * @dd: the infinipath device
- * @regno: the register number to write
- * @port: the port containing the register
- * @value: the value to write
- *
- * Registers that vary with the chip implementation constants (port)
- * use this routine.
- */
-void ipath_write_kreg_port(const struct ipath_devdata *dd, ipath_kreg regno,
-                         unsigned port, u64 value)
-{
-       u16 where;
-
-       if (port < dd->ipath_portcnt &&
-           (regno == dd->ipath_kregs->kr_rcvhdraddr ||
-            regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
-               where = regno + port;
-       else
-               where = -1;
-
-       ipath_write_kreg(dd, where, value);
-}
-
-/*
- * Following deal with the "obviously simple" task of overriding the state
- * of the LEDS, which normally indicate link physical and logical status.
- * The complications arise in dealing with different hardware mappings
- * and the board-dependent routine being called from interrupts.
- * and then there's the requirement to _flash_ them.
- */
-#define LED_OVER_FREQ_SHIFT 8
-#define LED_OVER_FREQ_MASK (0xFF<<LED_OVER_FREQ_SHIFT)
-/* Below is "non-zero" to force override, but both actual LEDs are off */
-#define LED_OVER_BOTH_OFF (8)
-
-static void ipath_run_led_override(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-       int timeoff;
-       int pidx;
-       u64 lstate, ltstate, val;
-
-       if (!(dd->ipath_flags & IPATH_INITTED))
-               return;
-
-       pidx = dd->ipath_led_override_phase++ & 1;
-       dd->ipath_led_override = dd->ipath_led_override_vals[pidx];
-       timeoff = dd->ipath_led_override_timeoff;
-
-       /*
-        * below potentially restores the LED values per current status,
-        * should also possibly setup the traffic-blink register,
-        * but leave that to per-chip functions.
-        */
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-       ltstate = ipath_ib_linktrstate(dd, val);
-       lstate = ipath_ib_linkstate(dd, val);
-
-       dd->ipath_f_setextled(dd, lstate, ltstate);
-       mod_timer(&dd->ipath_led_override_timer, jiffies + timeoff);
-}
-
-void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val)
-{
-       int timeoff, freq;
-
-       if (!(dd->ipath_flags & IPATH_INITTED))
-               return;
-
-       /* First check if we are blinking. If not, use 1HZ polling */
-       timeoff = HZ;
-       freq = (val & LED_OVER_FREQ_MASK) >> LED_OVER_FREQ_SHIFT;
-
-       if (freq) {
-               /* For blink, set each phase from one nybble of val */
-               dd->ipath_led_override_vals[0] = val & 0xF;
-               dd->ipath_led_override_vals[1] = (val >> 4) & 0xF;
-               timeoff = (HZ << 4)/freq;
-       } else {
-               /* Non-blink set both phases the same. */
-               dd->ipath_led_override_vals[0] = val & 0xF;
-               dd->ipath_led_override_vals[1] = val & 0xF;
-       }
-       dd->ipath_led_override_timeoff = timeoff;
-
-       /*
-        * If the timer has not already been started, do so. Use a "quick"
-        * timeout so the function will be called soon, to look at our request.
-        */
-       if (atomic_inc_return(&dd->ipath_led_override_timer_active) == 1) {
-               /* Need to start timer */
-               setup_timer(&dd->ipath_led_override_timer,
-                               ipath_run_led_override, (unsigned long)dd);
-
-               dd->ipath_led_override_timer.expires = jiffies + 1;
-               add_timer(&dd->ipath_led_override_timer);
-       } else
-               atomic_dec(&dd->ipath_led_override_timer_active);
-}
-
-/**
- * ipath_shutdown_device - shut down a device
- * @dd: the infinipath device
- *
- * This is called to make the device quiet when we are about to
- * unload the driver, and also when the device is administratively
- * disabled.   It does not free any data structures.
- * Everything it does has to be setup again by ipath_init_chip(dd,1)
- */
-void ipath_shutdown_device(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-
-       ipath_dbg("Shutting down the device\n");
-
-       ipath_hol_up(dd); /* make sure user processes aren't suspended */
-
-       dd->ipath_flags |= IPATH_LINKUNK;
-       dd->ipath_flags &= ~(IPATH_INITTED | IPATH_LINKDOWN |
-                            IPATH_LINKINIT | IPATH_LINKARMED |
-                            IPATH_LINKACTIVE);
-       *dd->ipath_statusp &= ~(IPATH_STATUS_IB_CONF |
-                               IPATH_STATUS_IB_READY);
-
-       /* mask interrupts, but not errors */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-
-       dd->ipath_rcvctrl = 0;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-               teardown_sdma(dd);
-
-       /*
-        * gracefully stop all sends allowing any in progress to trickle out
-        * first.
-        */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl = 0;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       /* flush it */
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       /*
-        * enough for anything that's going to trickle out to have actually
-        * done so.
-        */
-       udelay(5);
-
-       dd->ipath_f_setextled(dd, 0, 0); /* make sure LEDs are off */
-
-       ipath_set_ib_lstate(dd, 0, INFINIPATH_IBCC_LINKINITCMD_DISABLE);
-       ipath_cancel_sends(dd, 0);
-
-       /*
-        * we are shutting down, so tell components that care.  We don't do
-        * this on just a link state change, much like ethernet, a cable
-        * unplug, etc. doesn't change driver state
-        */
-       signal_ib_event(dd, IB_EVENT_PORT_ERR);
-
-       /* disable IBC */
-       dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control | INFINIPATH_C_FREEZEMODE);
-
-       /*
-        * clear SerdesEnable and turn the leds off; do this here because
-        * we are unloading, so don't count on interrupts to move along
-        * Turn the LEDs off explicitly for the same reason.
-        */
-       dd->ipath_f_quiet_serdes(dd);
-
-       /* stop all the timers that might still be running */
-       del_timer_sync(&dd->ipath_hol_timer);
-       if (dd->ipath_stats_timer_active) {
-               del_timer_sync(&dd->ipath_stats_timer);
-               dd->ipath_stats_timer_active = 0;
-       }
-       if (dd->ipath_intrchk_timer.data) {
-               del_timer_sync(&dd->ipath_intrchk_timer);
-               dd->ipath_intrchk_timer.data = 0;
-       }
-       if (atomic_read(&dd->ipath_led_override_timer_active)) {
-               del_timer_sync(&dd->ipath_led_override_timer);
-               atomic_set(&dd->ipath_led_override_timer_active, 0);
-       }
-
-       /*
-        * clear all interrupts and errors, so that the next time the driver
-        * is loaded or device is enabled, we know that whatever is set
-        * happened while we were unloaded
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                        ~0ULL & ~INFINIPATH_HWE_MEMBISTFAILED);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
-
-       ipath_cdbg(VERBOSE, "Flush time and errors to EEPROM\n");
-       ipath_update_eeprom_log(dd);
-}
-
-/**
- * ipath_free_pddata - free a port's allocated data
- * @dd: the infinipath device
- * @pd: the portdata structure
- *
- * free up any allocated data for a port
- * This should not touch anything that would affect a simultaneous
- * re-allocation of port data, because it is called after ipath_mutex
- * is released (and can be called from reinit as well).
- * It should never change any chip state, or global driver state.
- * (The only exception to global state is freeing the port0 port0_skbs.)
- */
-void ipath_free_pddata(struct ipath_devdata *dd, struct ipath_portdata *pd)
-{
-       if (!pd)
-               return;
-
-       if (pd->port_rcvhdrq) {
-               ipath_cdbg(VERBOSE, "free closed port %d rcvhdrq @ %p "
-                          "(size=%lu)\n", pd->port_port, pd->port_rcvhdrq,
-                          (unsigned long) pd->port_rcvhdrq_size);
-               dma_free_coherent(&dd->pcidev->dev, pd->port_rcvhdrq_size,
-                                 pd->port_rcvhdrq, pd->port_rcvhdrq_phys);
-               pd->port_rcvhdrq = NULL;
-               if (pd->port_rcvhdrtail_kvaddr) {
-                       dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-                                        pd->port_rcvhdrtail_kvaddr,
-                                        pd->port_rcvhdrqtailaddr_phys);
-                       pd->port_rcvhdrtail_kvaddr = NULL;
-               }
-       }
-       if (pd->port_port && pd->port_rcvegrbuf) {
-               unsigned e;
-
-               for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-                       void *base = pd->port_rcvegrbuf[e];
-                       size_t size = pd->port_rcvegrbuf_size;
-
-                       ipath_cdbg(VERBOSE, "egrbuf free(%p, %lu), "
-                                  "chunk %u/%u\n", base,
-                                  (unsigned long) size,
-                                  e, pd->port_rcvegrbuf_chunks);
-                       dma_free_coherent(&dd->pcidev->dev, size,
-                               base, pd->port_rcvegrbuf_phys[e]);
-               }
-               kfree(pd->port_rcvegrbuf);
-               pd->port_rcvegrbuf = NULL;
-               kfree(pd->port_rcvegrbuf_phys);
-               pd->port_rcvegrbuf_phys = NULL;
-               pd->port_rcvegrbuf_chunks = 0;
-       } else if (pd->port_port == 0 && dd->ipath_port0_skbinfo) {
-               unsigned e;
-               struct ipath_skbinfo *skbinfo = dd->ipath_port0_skbinfo;
-
-               dd->ipath_port0_skbinfo = NULL;
-               ipath_cdbg(VERBOSE, "free closed port %d "
-                          "ipath_port0_skbinfo @ %p\n", pd->port_port,
-                          skbinfo);
-               for (e = 0; e < dd->ipath_p0_rcvegrcnt; e++)
-                       if (skbinfo[e].skb) {
-                               pci_unmap_single(dd->pcidev, skbinfo[e].phys,
-                                                dd->ipath_ibmaxlen,
-                                                PCI_DMA_FROMDEVICE);
-                               dev_kfree_skb(skbinfo[e].skb);
-                       }
-               vfree(skbinfo);
-       }
-       kfree(pd->port_tid_pg_list);
-       vfree(pd->subport_uregbase);
-       vfree(pd->subport_rcvegrbuf);
-       vfree(pd->subport_rcvhdr_base);
-       kfree(pd);
-}
-
-static int __init infinipath_init(void)
-{
-       int ret;
-
-       if (ipath_debug & __IPATH_DBG)
-               printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
-
-       /*
-        * These must be called before the driver is registered with
-        * the PCI subsystem.
-        */
-       idr_init(&unit_table);
-
-       ret = pci_register_driver(&ipath_driver);
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Unable to register driver: error %d\n", -ret);
-               goto bail_unit;
-       }
-
-       ret = ipath_init_ipathfs();
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME ": Unable to create "
-                      "ipathfs: error %d\n", -ret);
-               goto bail_pci;
-       }
-
-       goto bail;
-
-bail_pci:
-       pci_unregister_driver(&ipath_driver);
-
-bail_unit:
-       idr_destroy(&unit_table);
-
-bail:
-       return ret;
-}
-
-static void __exit infinipath_cleanup(void)
-{
-       ipath_exit_ipathfs();
-
-       ipath_cdbg(VERBOSE, "Unregistering pci driver\n");
-       pci_unregister_driver(&ipath_driver);
-
-       idr_destroy(&unit_table);
-}
-
-/**
- * ipath_reset_device - reset the chip if possible
- * @unit: the device to reset
- *
- * Whether or not reset is successful, we attempt to re-initialize the chip
- * (that is, much like a driver unload/reload).  We clear the INITTED flag
- * so that the various entry points will fail until we reinitialize.  For
- * now, we only allow this if no user ports are open that use chip resources
- */
-int ipath_reset_device(int unit)
-{
-       int ret, i;
-       struct ipath_devdata *dd = ipath_lookup(unit);
-       unsigned long flags;
-
-       if (!dd) {
-               ret = -ENODEV;
-               goto bail;
-       }
-
-       if (atomic_read(&dd->ipath_led_override_timer_active)) {
-               /* Need to stop LED timer, _then_ shut off LEDs */
-               del_timer_sync(&dd->ipath_led_override_timer);
-               atomic_set(&dd->ipath_led_override_timer_active, 0);
-       }
-
-       /* Shut off LEDs after we are sure timer is not running */
-       dd->ipath_led_override = LED_OVER_BOTH_OFF;
-       dd->ipath_f_setextled(dd, 0, 0);
-
-       dev_info(&dd->pcidev->dev, "Reset on unit %u requested\n", unit);
-
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT)) {
-               dev_info(&dd->pcidev->dev, "Invalid unit number %u or "
-                        "not initialized or not present\n", unit);
-               ret = -ENXIO;
-               goto bail;
-       }
-
-       spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-       if (dd->ipath_pd)
-               for (i = 1; i < dd->ipath_cfgports; i++) {
-                       if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
-                               continue;
-                       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-                       ipath_dbg("unit %u port %d is in use "
-                                 "(PID %u cmd %s), can't reset\n",
-                                 unit, i,
-                                 pid_nr(dd->ipath_pd[i]->port_pid),
-                                 dd->ipath_pd[i]->port_comm);
-                       ret = -EBUSY;
-                       goto bail;
-               }
-       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-               teardown_sdma(dd);
-
-       dd->ipath_flags &= ~IPATH_INITTED;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-       ret = dd->ipath_f_reset(dd);
-       if (ret == 1) {
-               ipath_dbg("Reinitializing unit %u after reset attempt\n",
-                         unit);
-               ret = ipath_init_chip(dd, 1);
-       } else
-               ret = -EAGAIN;
-       if (ret)
-               ipath_dev_err(dd, "Reinitialize unit %u after "
-                             "reset failed with %d\n", unit, ret);
-       else
-               dev_info(&dd->pcidev->dev, "Reinitialized unit %u after "
-                        "resetting\n", unit);
-
-bail:
-       return ret;
-}
-
-/*
- * send a signal to all the processes that have the driver open
- * through the normal interfaces (i.e., everything other than diags
- * interface).  Returns number of signalled processes.
- */
-static int ipath_signal_procs(struct ipath_devdata *dd, int sig)
-{
-       int i, sub, any = 0;
-       struct pid *pid;
-       unsigned long flags;
-
-       if (!dd->ipath_pd)
-               return 0;
-
-       spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-       for (i = 1; i < dd->ipath_cfgports; i++) {
-               if (!dd->ipath_pd[i] || !dd->ipath_pd[i]->port_cnt)
-                       continue;
-               pid = dd->ipath_pd[i]->port_pid;
-               if (!pid)
-                       continue;
-
-               dev_info(&dd->pcidev->dev, "context %d in use "
-                         "(PID %u), sending signal %d\n",
-                         i, pid_nr(pid), sig);
-               kill_pid(pid, sig, 1);
-               any++;
-               for (sub = 0; sub < INFINIPATH_MAX_SUBPORT; sub++) {
-                       pid = dd->ipath_pd[i]->port_subpid[sub];
-                       if (!pid)
-                               continue;
-                       dev_info(&dd->pcidev->dev, "sub-context "
-                               "%d:%d in use (PID %u), sending "
-                               "signal %d\n", i, sub, pid_nr(pid), sig);
-                       kill_pid(pid, sig, 1);
-                       any++;
-               }
-       }
-       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-       return any;
-}
-
-static void ipath_hol_signal_down(struct ipath_devdata *dd)
-{
-       if (ipath_signal_procs(dd, SIGSTOP))
-               ipath_dbg("Stopped some processes\n");
-       ipath_cancel_sends(dd, 1);
-}
-
-
-static void ipath_hol_signal_up(struct ipath_devdata *dd)
-{
-       if (ipath_signal_procs(dd, SIGCONT))
-               ipath_dbg("Continued some processes\n");
-}
-
-/*
- * link is down, stop any users processes, and flush pending sends
- * to prevent HoL blocking, then start the HoL timer that
- * periodically continues, then stop procs, so they can detect
- * link down if they want, and do something about it.
- * Timer may already be running, so use mod_timer, not add_timer.
- */
-void ipath_hol_down(struct ipath_devdata *dd)
-{
-       dd->ipath_hol_state = IPATH_HOL_DOWN;
-       ipath_hol_signal_down(dd);
-       dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
-       dd->ipath_hol_timer.expires = jiffies +
-               msecs_to_jiffies(ipath_hol_timeout_ms);
-       mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
-}
-
-/*
- * link is up, continue any user processes, and ensure timer
- * is a nop, if running.  Let timer keep running, if set; it
- * will nop when it sees the link is up
- */
-void ipath_hol_up(struct ipath_devdata *dd)
-{
-       ipath_hol_signal_up(dd);
-       dd->ipath_hol_state = IPATH_HOL_UP;
-}
-
-/*
- * toggle the running/not running state of user proceses
- * to prevent HoL blocking on chip resources, but still allow
- * user processes to do link down special case handling.
- * Should only be called via the timer
- */
-void ipath_hol_event(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
-       if (dd->ipath_hol_next == IPATH_HOL_DOWNSTOP
-               && dd->ipath_hol_state != IPATH_HOL_UP) {
-               dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
-               ipath_dbg("Stopping processes\n");
-               ipath_hol_signal_down(dd);
-       } else { /* may do "extra" if also in ipath_hol_up() */
-               dd->ipath_hol_next = IPATH_HOL_DOWNSTOP;
-               ipath_dbg("Continuing processes\n");
-               ipath_hol_signal_up(dd);
-       }
-       if (dd->ipath_hol_state == IPATH_HOL_UP)
-               ipath_dbg("link's up, don't resched timer\n");
-       else {
-               dd->ipath_hol_timer.expires = jiffies +
-                       msecs_to_jiffies(ipath_hol_timeout_ms);
-               mod_timer(&dd->ipath_hol_timer,
-                       dd->ipath_hol_timer.expires);
-       }
-}
-
-int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv)
-{
-       u64 val;
-
-       if (new_pol_inv > INFINIPATH_XGXS_RX_POL_MASK)
-               return -1;
-       if (dd->ipath_rx_pol_inv != new_pol_inv) {
-               dd->ipath_rx_pol_inv = new_pol_inv;
-               val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-               val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
-                        INFINIPATH_XGXS_RX_POL_SHIFT);
-               val |= ((u64)dd->ipath_rx_pol_inv) <<
-                       INFINIPATH_XGXS_RX_POL_SHIFT;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-       }
-       return 0;
-}
-
-/*
- * Disable and enable the armlaunch error.  Used for PIO bandwidth testing on
- * the 7220, which is count-based, rather than trigger-based.  Safe for the
- * driver check, since it's at init.   Not completely safe when used for
- * user-mode checking, since some error checking can be lost, but not
- * particularly risky, and only has problematic side-effects in the face of
- * very buggy user code.  There is no reference counting, but that's also
- * fine, given the intended use.
- */
-void ipath_enable_armlaunch(struct ipath_devdata *dd)
-{
-       dd->ipath_lasterror &= ~INFINIPATH_E_SPIOARMLAUNCH;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-               INFINIPATH_E_SPIOARMLAUNCH);
-       dd->ipath_errormask |= INFINIPATH_E_SPIOARMLAUNCH;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-               dd->ipath_errormask);
-}
-
-void ipath_disable_armlaunch(struct ipath_devdata *dd)
-{
-       /* so don't re-enable if already set */
-       dd->ipath_maskederrs &= ~INFINIPATH_E_SPIOARMLAUNCH;
-       dd->ipath_errormask &= ~INFINIPATH_E_SPIOARMLAUNCH;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-               dd->ipath_errormask);
-}
-
-module_init(infinipath_init);
-module_exit(infinipath_cleanup);
diff --git a/drivers/staging/rdma/ipath/ipath_eeprom.c b/drivers/staging/rdma/ipath/ipath_eeprom.c
deleted file mode 100644 (file)
index ef84107..0000000
+++ /dev/null
@@ -1,1183 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_kernel.h"
-
-/*
- * InfiniPath I2C driver for a serial eeprom.  This is not a generic
- * I2C interface.  For a start, the device we're using (Atmel AT24C11)
- * doesn't work like a regular I2C device.  It looks like one
- * electrically, but not logically.  Normal I2C devices have a single
- * 7-bit or 10-bit I2C address that they respond to.  Valid 7-bit
- * addresses range from 0x03 to 0x77.  Addresses 0x00 to 0x02 and 0x78
- * to 0x7F are special reserved addresses (e.g. 0x00 is the "general
- * call" address.)  The Atmel device, on the other hand, responds to ALL
- * 7-bit addresses.  It's designed to be the only device on a given I2C
- * bus.  A 7-bit address corresponds to the memory address within the
- * Atmel device itself.
- *
- * Also, the timing requirements mean more than simple software
- * bitbanging, with readbacks from chip to ensure timing (simple udelay
- * is not enough).
- *
- * This all means that accessing the device is specialized enough
- * that using the standard kernel I2C bitbanging interface would be
- * impossible.  For example, the core I2C eeprom driver expects to find
- * a device at one or more of a limited set of addresses only.  It doesn't
- * allow writing to an eeprom.  It also doesn't provide any means of
- * accessing eeprom contents from within the kernel, only via sysfs.
- */
-
-/* Added functionality for IBA7220-based cards */
-#define IPATH_EEPROM_DEV_V1 0xA0
-#define IPATH_EEPROM_DEV_V2 0xA2
-#define IPATH_TEMP_DEV 0x98
-#define IPATH_BAD_DEV (IPATH_EEPROM_DEV_V2+2)
-#define IPATH_NO_DEV (0xFF)
-
-/*
- * The number of I2C chains is proliferating. Table below brings
- * some order to the madness. The basic principle is that the
- * table is scanned from the top, and a "probe" is made to the
- * device probe_dev. If that succeeds, the chain is considered
- * to be of that type, and dd->i2c_chain_type is set to the index+1
- * of the entry.
- * The +1 is so static initialization can mean "unknown, do probe."
- */
-static struct i2c_chain_desc {
-       u8 probe_dev;   /* If seen at probe, chain is this type */
-       u8 eeprom_dev;  /* Dev addr (if any) for EEPROM */
-       u8 temp_dev;    /* Dev Addr (if any) for Temp-sense */
-} i2c_chains[] = {
-       { IPATH_BAD_DEV, IPATH_NO_DEV, IPATH_NO_DEV }, /* pre-iba7220 bds */
-       { IPATH_EEPROM_DEV_V1, IPATH_EEPROM_DEV_V1, IPATH_TEMP_DEV}, /* V1 */
-       { IPATH_EEPROM_DEV_V2, IPATH_EEPROM_DEV_V2, IPATH_TEMP_DEV}, /* V2 */
-       { IPATH_NO_DEV }
-};
-
-enum i2c_type {
-       i2c_line_scl = 0,
-       i2c_line_sda
-};
-
-enum i2c_state {
-       i2c_line_low = 0,
-       i2c_line_high
-};
-
-#define READ_CMD 1
-#define WRITE_CMD 0
-
-/**
- * i2c_gpio_set - set a GPIO line
- * @dd: the infinipath device
- * @line: the line to set
- * @new_line_state: the state to set
- *
- * Returns 0 if the line was set to the new state successfully, non-zero
- * on error.
- */
-static int i2c_gpio_set(struct ipath_devdata *dd,
-                       enum i2c_type line,
-                       enum i2c_state new_line_state)
-{
-       u64 out_mask, dir_mask, *gpioval;
-       unsigned long flags = 0;
-
-       gpioval = &dd->ipath_gpio_out;
-
-       if (line == i2c_line_scl) {
-               dir_mask = dd->ipath_gpio_scl;
-               out_mask = (1UL << dd->ipath_gpio_scl_num);
-       } else {
-               dir_mask = dd->ipath_gpio_sda;
-               out_mask = (1UL << dd->ipath_gpio_sda_num);
-       }
-
-       spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-       if (new_line_state == i2c_line_high) {
-               /* tri-state the output rather than force high */
-               dd->ipath_extctrl &= ~dir_mask;
-       } else {
-               /* config line to be an output */
-               dd->ipath_extctrl |= dir_mask;
-       }
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
-
-       /* set output as well (no real verify) */
-       if (new_line_state == i2c_line_high)
-               *gpioval |= out_mask;
-       else
-               *gpioval &= ~out_mask;
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_out, *gpioval);
-       spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-       return 0;
-}
-
-/**
- * i2c_gpio_get - get a GPIO line state
- * @dd: the infinipath device
- * @line: the line to get
- * @curr_statep: where to put the line state
- *
- * Returns 0 if the line was set to the new state successfully, non-zero
- * on error.  curr_state is not set on error.
- */
-static int i2c_gpio_get(struct ipath_devdata *dd,
-                       enum i2c_type line,
-                       enum i2c_state *curr_statep)
-{
-       u64 read_val, mask;
-       int ret;
-       unsigned long flags = 0;
-
-       /* check args */
-       if (curr_statep == NULL) {
-               ret = 1;
-               goto bail;
-       }
-
-       /* config line to be an input */
-       if (line == i2c_line_scl)
-               mask = dd->ipath_gpio_scl;
-       else
-               mask = dd->ipath_gpio_sda;
-
-       spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-       dd->ipath_extctrl &= ~mask;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, dd->ipath_extctrl);
-       /*
-        * Below is very unlikely to reflect true input state if Output
-        * Enable actually changed.
-        */
-       read_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-       spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-       if (read_val & mask)
-               *curr_statep = i2c_line_high;
-       else
-               *curr_statep = i2c_line_low;
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * i2c_wait_for_writes - wait for a write
- * @dd: the infinipath device
- *
- * We use this instead of udelay directly, so we can make sure
- * that previous register writes have been flushed all the way
- * to the chip.  Since we are delaying anyway, the cost doesn't
- * hurt, and makes the bit twiddling more regular
- */
-static void i2c_wait_for_writes(struct ipath_devdata *dd)
-{
-       (void)ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-       rmb();
-}
-
-static void scl_out(struct ipath_devdata *dd, u8 bit)
-{
-       udelay(1);
-       i2c_gpio_set(dd, i2c_line_scl, bit ? i2c_line_high : i2c_line_low);
-
-       i2c_wait_for_writes(dd);
-}
-
-static void sda_out(struct ipath_devdata *dd, u8 bit)
-{
-       i2c_gpio_set(dd, i2c_line_sda, bit ? i2c_line_high : i2c_line_low);
-
-       i2c_wait_for_writes(dd);
-}
-
-static u8 sda_in(struct ipath_devdata *dd, int wait)
-{
-       enum i2c_state bit;
-
-       if (i2c_gpio_get(dd, i2c_line_sda, &bit))
-               ipath_dbg("get bit failed!\n");
-
-       if (wait)
-               i2c_wait_for_writes(dd);
-
-       return bit == i2c_line_high ? 1U : 0;
-}
-
-/**
- * i2c_ackrcv - see if ack following write is true
- * @dd: the infinipath device
- */
-static int i2c_ackrcv(struct ipath_devdata *dd)
-{
-       u8 ack_received;
-
-       /* AT ENTRY SCL = LOW */
-       /* change direction, ignore data */
-       ack_received = sda_in(dd, 1);
-       scl_out(dd, i2c_line_high);
-       ack_received = sda_in(dd, 1) == 0;
-       scl_out(dd, i2c_line_low);
-       return ack_received;
-}
-
-/**
- * rd_byte - read a byte, leaving ACK, STOP, etc up to caller
- * @dd: the infinipath device
- *
- * Returns byte shifted out of device
- */
-static int rd_byte(struct ipath_devdata *dd)
-{
-       int bit_cntr, data;
-
-       data = 0;
-
-       for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
-               data <<= 1;
-               scl_out(dd, i2c_line_high);
-               data |= sda_in(dd, 0);
-               scl_out(dd, i2c_line_low);
-       }
-       return data;
-}
-
-/**
- * wr_byte - write a byte, one bit at a time
- * @dd: the infinipath device
- * @data: the byte to write
- *
- * Returns 0 if we got the following ack, otherwise 1
- */
-static int wr_byte(struct ipath_devdata *dd, u8 data)
-{
-       int bit_cntr;
-       u8 bit;
-
-       for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
-               bit = (data >> bit_cntr) & 1;
-               sda_out(dd, bit);
-               scl_out(dd, i2c_line_high);
-               scl_out(dd, i2c_line_low);
-       }
-       return (!i2c_ackrcv(dd)) ? 1 : 0;
-}
-
-static void send_ack(struct ipath_devdata *dd)
-{
-       sda_out(dd, i2c_line_low);
-       scl_out(dd, i2c_line_high);
-       scl_out(dd, i2c_line_low);
-       sda_out(dd, i2c_line_high);
-}
-
-/**
- * i2c_startcmd - transmit the start condition, followed by address/cmd
- * @dd: the infinipath device
- * @offset_dir: direction byte
- *
- *      (both clock/data high, clock high, data low while clock is high)
- */
-static int i2c_startcmd(struct ipath_devdata *dd, u8 offset_dir)
-{
-       int res;
-
-       /* issue start sequence */
-       sda_out(dd, i2c_line_high);
-       scl_out(dd, i2c_line_high);
-       sda_out(dd, i2c_line_low);
-       scl_out(dd, i2c_line_low);
-
-       /* issue length and direction byte */
-       res = wr_byte(dd, offset_dir);
-
-       if (res)
-               ipath_cdbg(VERBOSE, "No ack to complete start\n");
-
-       return res;
-}
-
-/**
- * stop_cmd - transmit the stop condition
- * @dd: the infinipath device
- *
- * (both clock/data low, clock high, data high while clock is high)
- */
-static void stop_cmd(struct ipath_devdata *dd)
-{
-       scl_out(dd, i2c_line_low);
-       sda_out(dd, i2c_line_low);
-       scl_out(dd, i2c_line_high);
-       sda_out(dd, i2c_line_high);
-       udelay(2);
-}
-
-/**
- * eeprom_reset - reset I2C communication
- * @dd: the infinipath device
- */
-
-static int eeprom_reset(struct ipath_devdata *dd)
-{
-       int clock_cycles_left = 9;
-       u64 *gpioval = &dd->ipath_gpio_out;
-       int ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-       /* Make sure shadows are consistent */
-       dd->ipath_extctrl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extctrl);
-       *gpioval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_out);
-       spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-
-       ipath_cdbg(VERBOSE, "Resetting i2c eeprom; initial gpioout reg "
-                  "is %llx\n", (unsigned long long) *gpioval);
-
-       /*
-        * This is to get the i2c into a known state, by first going low,
-        * then tristate sda (and then tristate scl as first thing
-        * in loop)
-        */
-       scl_out(dd, i2c_line_low);
-       sda_out(dd, i2c_line_high);
-
-       /* Clock up to 9 cycles looking for SDA hi, then issue START and STOP */
-       while (clock_cycles_left--) {
-               scl_out(dd, i2c_line_high);
-
-               /* SDA seen high, issue START by dropping it while SCL high */
-               if (sda_in(dd, 0)) {
-                       sda_out(dd, i2c_line_low);
-                       scl_out(dd, i2c_line_low);
-                       /* ATMEL spec says must be followed by STOP. */
-                       scl_out(dd, i2c_line_high);
-                       sda_out(dd, i2c_line_high);
-                       ret = 0;
-                       goto bail;
-               }
-
-               scl_out(dd, i2c_line_low);
-       }
-
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/*
- * Probe for I2C device at specified address. Returns 0 for "success"
- * to match rest of this file.
- * Leave bus in "reasonable" state for further commands.
- */
-static int i2c_probe(struct ipath_devdata *dd, int devaddr)
-{
-       int ret;
-
-       ret = eeprom_reset(dd);
-       if (ret) {
-               ipath_dev_err(dd, "Failed reset probing device 0x%02X\n",
-                             devaddr);
-               return ret;
-       }
-       /*
-        * Reset no longer leaves bus in start condition, so normal
-        * i2c_startcmd() will do.
-        */
-       ret = i2c_startcmd(dd, devaddr | READ_CMD);
-       if (ret)
-               ipath_cdbg(VERBOSE, "Failed startcmd for device 0x%02X\n",
-                          devaddr);
-       else {
-               /*
-                * Device did respond. Complete a single-byte read, because some
-                * devices apparently cannot handle STOP immediately after they
-                * ACK the start-cmd.
-                */
-               int data;
-               data = rd_byte(dd);
-               stop_cmd(dd);
-               ipath_cdbg(VERBOSE, "Response from device 0x%02X\n", devaddr);
-       }
-       return ret;
-}
-
-/*
- * Returns the "i2c type". This is a pointer to a struct that describes
- * the I2C chain on this board. To minimize impact on struct ipath_devdata,
- * the (small integer) index into the table is actually memoized, rather
- * then the pointer.
- * Memoization is because the type is determined on the first call per chip.
- * An alternative would be to move type determination to early
- * init code.
- */
-static struct i2c_chain_desc *ipath_i2c_type(struct ipath_devdata *dd)
-{
-       int idx;
-
-       /* Get memoized index, from previous successful probes */
-       idx = dd->ipath_i2c_chain_type - 1;
-       if (idx >= 0 && idx < (ARRAY_SIZE(i2c_chains) - 1))
-               goto done;
-
-       idx = 0;
-       while (i2c_chains[idx].probe_dev != IPATH_NO_DEV) {
-               /* if probe succeeds, this is type */
-               if (!i2c_probe(dd, i2c_chains[idx].probe_dev))
-                       break;
-               ++idx;
-       }
-
-       /*
-        * Old EEPROM (first entry) may require a reset after probe,
-        * rather than being able to "start" after "stop"
-        */
-       if (idx == 0)
-               eeprom_reset(dd);
-
-       if (i2c_chains[idx].probe_dev == IPATH_NO_DEV)
-               idx = -1;
-       else
-               dd->ipath_i2c_chain_type = idx + 1;
-done:
-       return (idx >= 0) ? i2c_chains + idx : NULL;
-}
-
-static int ipath_eeprom_internal_read(struct ipath_devdata *dd,
-                                       u8 eeprom_offset, void *buffer, int len)
-{
-       int ret;
-       struct i2c_chain_desc *icd;
-       u8 *bp = buffer;
-
-       ret = 1;
-       icd = ipath_i2c_type(dd);
-       if (!icd)
-               goto bail;
-
-       if (icd->eeprom_dev == IPATH_NO_DEV) {
-               /* legacy not-really-I2C */
-               ipath_cdbg(VERBOSE, "Start command only address\n");
-               eeprom_offset = (eeprom_offset << 1) | READ_CMD;
-               ret = i2c_startcmd(dd, eeprom_offset);
-       } else {
-               /* Actual I2C */
-               ipath_cdbg(VERBOSE, "Start command uses devaddr\n");
-               if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
-                       ipath_dbg("Failed EEPROM startcmd\n");
-                       stop_cmd(dd);
-                       ret = 1;
-                       goto bail;
-               }
-               ret = wr_byte(dd, eeprom_offset);
-               stop_cmd(dd);
-               if (ret) {
-                       ipath_dev_err(dd, "Failed to write EEPROM address\n");
-                       ret = 1;
-                       goto bail;
-               }
-               ret = i2c_startcmd(dd, icd->eeprom_dev | READ_CMD);
-       }
-       if (ret) {
-               ipath_dbg("Failed startcmd for dev %02X\n", icd->eeprom_dev);
-               stop_cmd(dd);
-               ret = 1;
-               goto bail;
-       }
-
-       /*
-        * eeprom keeps clocking data out as long as we ack, automatically
-        * incrementing the address.
-        */
-       while (len-- > 0) {
-               /* get and store data */
-               *bp++ = rd_byte(dd);
-               /* send ack if not the last byte */
-               if (len)
-                       send_ack(dd);
-       }
-
-       stop_cmd(dd);
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static int ipath_eeprom_internal_write(struct ipath_devdata *dd, u8 eeprom_offset,
-                                      const void *buffer, int len)
-{
-       int sub_len;
-       const u8 *bp = buffer;
-       int max_wait_time, i;
-       int ret;
-       struct i2c_chain_desc *icd;
-
-       ret = 1;
-       icd = ipath_i2c_type(dd);
-       if (!icd)
-               goto bail;
-
-       while (len > 0) {
-               if (icd->eeprom_dev == IPATH_NO_DEV) {
-                       if (i2c_startcmd(dd,
-                                        (eeprom_offset << 1) | WRITE_CMD)) {
-                               ipath_dbg("Failed to start cmd offset %u\n",
-                                       eeprom_offset);
-                               goto failed_write;
-                       }
-               } else {
-                       /* Real I2C */
-                       if (i2c_startcmd(dd, icd->eeprom_dev | WRITE_CMD)) {
-                               ipath_dbg("Failed EEPROM startcmd\n");
-                               goto failed_write;
-                       }
-                       ret = wr_byte(dd, eeprom_offset);
-                       if (ret) {
-                               ipath_dev_err(dd, "Failed to write EEPROM "
-                                             "address\n");
-                               goto failed_write;
-                       }
-               }
-
-               sub_len = min(len, 4);
-               eeprom_offset += sub_len;
-               len -= sub_len;
-
-               for (i = 0; i < sub_len; i++) {
-                       if (wr_byte(dd, *bp++)) {
-                               ipath_dbg("no ack after byte %u/%u (%u "
-                                         "total remain)\n", i, sub_len,
-                                         len + sub_len - i);
-                               goto failed_write;
-                       }
-               }
-
-               stop_cmd(dd);
-
-               /*
-                * wait for write complete by waiting for a successful
-                * read (the chip replies with a zero after the write
-                * cmd completes, and before it writes to the eeprom.
-                * The startcmd for the read will fail the ack until
-                * the writes have completed.   We do this inline to avoid
-                * the debug prints that are in the real read routine
-                * if the startcmd fails.
-                * We also use the proper device address, so it doesn't matter
-                * whether we have real eeprom_dev. legacy likes any address.
-                */
-               max_wait_time = 100;
-               while (i2c_startcmd(dd, icd->eeprom_dev | READ_CMD)) {
-                       stop_cmd(dd);
-                       if (!--max_wait_time) {
-                               ipath_dbg("Did not get successful read to "
-                                         "complete write\n");
-                               goto failed_write;
-                       }
-               }
-               /* now read (and ignore) the resulting byte */
-               rd_byte(dd);
-               stop_cmd(dd);
-       }
-
-       ret = 0;
-       goto bail;
-
-failed_write:
-       stop_cmd(dd);
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_eeprom_read - receives bytes from the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: address to read from
- * @buffer: where to store result
- * @len: number of bytes to receive
- */
-int ipath_eeprom_read(struct ipath_devdata *dd, u8 eeprom_offset,
-                       void *buff, int len)
-{
-       int ret;
-
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (!ret) {
-               ret = ipath_eeprom_internal_read(dd, eeprom_offset, buff, len);
-               mutex_unlock(&dd->ipath_eep_lock);
-       }
-
-       return ret;
-}
-
-/**
- * ipath_eeprom_write - writes data to the eeprom via I2C
- * @dd: the infinipath device
- * @eeprom_offset: where to place data
- * @buffer: data to write
- * @len: number of bytes to write
- */
-int ipath_eeprom_write(struct ipath_devdata *dd, u8 eeprom_offset,
-                       const void *buff, int len)
-{
-       int ret;
-
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (!ret) {
-               ret = ipath_eeprom_internal_write(dd, eeprom_offset, buff, len);
-               mutex_unlock(&dd->ipath_eep_lock);
-       }
-
-       return ret;
-}
-
-static u8 flash_csum(struct ipath_flash *ifp, int adjust)
-{
-       u8 *ip = (u8 *) ifp;
-       u8 csum = 0, len;
-
-       /*
-        * Limit length checksummed to max length of actual data.
-        * Checksum of erased eeprom will still be bad, but we avoid
-        * reading past the end of the buffer we were passed.
-        */
-       len = ifp->if_length;
-       if (len > sizeof(struct ipath_flash))
-               len = sizeof(struct ipath_flash);
-       while (len--)
-               csum += *ip++;
-       csum -= ifp->if_csum;
-       csum = ~csum;
-       if (adjust)
-               ifp->if_csum = csum;
-
-       return csum;
-}
-
-/**
- * ipath_get_guid - get the GUID from the i2c device
- * @dd: the infinipath device
- *
- * We have the capability to use the ipath_nguid field, and get
- * the guid from the first chip's flash, to use for all of them.
- */
-void ipath_get_eeprom_info(struct ipath_devdata *dd)
-{
-       void *buf;
-       struct ipath_flash *ifp;
-       __be64 guid;
-       int len, eep_stat;
-       u8 csum, *bguid;
-       int t = dd->ipath_unit;
-       struct ipath_devdata *dd0 = ipath_lookup(0);
-
-       if (t && dd0->ipath_nguid > 1 && t <= dd0->ipath_nguid) {
-               u8 oguid;
-               dd->ipath_guid = dd0->ipath_guid;
-               bguid = (u8 *) & dd->ipath_guid;
-
-               oguid = bguid[7];
-               bguid[7] += t;
-               if (oguid > bguid[7]) {
-                       if (bguid[6] == 0xff) {
-                               if (bguid[5] == 0xff) {
-                                       ipath_dev_err(
-                                               dd,
-                                               "Can't set %s GUID from "
-                                               "base, wraps to OUI!\n",
-                                               ipath_get_unit_name(t));
-                                       dd->ipath_guid = 0;
-                                       goto bail;
-                               }
-                               bguid[5]++;
-                       }
-                       bguid[6]++;
-               }
-               dd->ipath_nguid = 1;
-
-               ipath_dbg("nguid %u, so adding %u to device 0 guid, "
-                         "for %llx\n",
-                         dd0->ipath_nguid, t,
-                         (unsigned long long) be64_to_cpu(dd->ipath_guid));
-               goto bail;
-       }
-
-       /*
-        * read full flash, not just currently used part, since it may have
-        * been written with a newer definition
-        * */
-       len = sizeof(struct ipath_flash);
-       buf = vmalloc(len);
-       if (!buf) {
-               ipath_dev_err(dd, "Couldn't allocate memory to read %u "
-                             "bytes from eeprom for GUID\n", len);
-               goto bail;
-       }
-
-       mutex_lock(&dd->ipath_eep_lock);
-       eep_stat = ipath_eeprom_internal_read(dd, 0, buf, len);
-       mutex_unlock(&dd->ipath_eep_lock);
-
-       if (eep_stat) {
-               ipath_dev_err(dd, "Failed reading GUID from eeprom\n");
-               goto done;
-       }
-       ifp = (struct ipath_flash *)buf;
-
-       csum = flash_csum(ifp, 0);
-       if (csum != ifp->if_csum) {
-               dev_info(&dd->pcidev->dev, "Bad I2C flash checksum: "
-                        "0x%x, not 0x%x\n", csum, ifp->if_csum);
-               goto done;
-       }
-       if (*(__be64 *) ifp->if_guid == cpu_to_be64(0) ||
-           *(__be64 *) ifp->if_guid == ~cpu_to_be64(0)) {
-               ipath_dev_err(dd, "Invalid GUID %llx from flash; "
-                             "ignoring\n",
-                             *(unsigned long long *) ifp->if_guid);
-               /* don't allow GUID if all 0 or all 1's */
-               goto done;
-       }
-
-       /* complain, but allow it */
-       if (*(u64 *) ifp->if_guid == 0x100007511000000ULL)
-               dev_info(&dd->pcidev->dev, "Warning, GUID %llx is "
-                        "default, probably not correct!\n",
-                        *(unsigned long long *) ifp->if_guid);
-
-       bguid = ifp->if_guid;
-       if (!bguid[0] && !bguid[1] && !bguid[2]) {
-               /* original incorrect GUID format in flash; fix in
-                * core copy, by shifting up 2 octets; don't need to
-                * change top octet, since both it and shifted are
-                * 0.. */
-               bguid[1] = bguid[3];
-               bguid[2] = bguid[4];
-               bguid[3] = bguid[4] = 0;
-               guid = *(__be64 *) ifp->if_guid;
-               ipath_cdbg(VERBOSE, "Old GUID format in flash, top 3 zero, "
-                          "shifting 2 octets\n");
-       } else
-               guid = *(__be64 *) ifp->if_guid;
-       dd->ipath_guid = guid;
-       dd->ipath_nguid = ifp->if_numguid;
-       /*
-        * Things are slightly complicated by the desire to transparently
-        * support both the Pathscale 10-digit serial number and the QLogic
-        * 13-character version.
-        */
-       if ((ifp->if_fversion > 1) && ifp->if_sprefix[0]
-               && ((u8 *)ifp->if_sprefix)[0] != 0xFF) {
-               /* This board has a Serial-prefix, which is stored
-                * elsewhere for backward-compatibility.
-                */
-               char *snp = dd->ipath_serial;
-               memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
-               snp[sizeof ifp->if_sprefix] = '\0';
-               len = strlen(snp);
-               snp += len;
-               len = (sizeof dd->ipath_serial) - len;
-               if (len > sizeof ifp->if_serial) {
-                       len = sizeof ifp->if_serial;
-               }
-               memcpy(snp, ifp->if_serial, len);
-       } else
-               memcpy(dd->ipath_serial, ifp->if_serial,
-                      sizeof ifp->if_serial);
-       if (!strstr(ifp->if_comment, "Tested successfully"))
-               ipath_dev_err(dd, "Board SN %s did not pass functional "
-                       "test: %s\n", dd->ipath_serial,
-                       ifp->if_comment);
-
-       ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
-                  (unsigned long long) be64_to_cpu(dd->ipath_guid));
-
-       memcpy(&dd->ipath_eep_st_errs, &ifp->if_errcntp, IPATH_EEP_LOG_CNT);
-       /*
-        * Power-on (actually "active") hours are kept as little-endian value
-        * in EEPROM, but as seconds in a (possibly as small as 24-bit)
-        * atomic_t while running.
-        */
-       atomic_set(&dd->ipath_active_time, 0);
-       dd->ipath_eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
-done:
-       vfree(buf);
-
-bail:;
-}
-
-/**
- * ipath_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the infinipath device
- *
- * Although the time is kept as seconds in the ipath_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct ipath_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-
-int ipath_update_eeprom_log(struct ipath_devdata *dd)
-{
-       void *buf;
-       struct ipath_flash *ifp;
-       int len, hi_water;
-       uint32_t new_time, new_hrs;
-       u8 csum;
-       int ret, idx;
-       unsigned long flags;
-
-       /* first, check if we actually need to do anything. */
-       ret = 0;
-       for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
-               if (dd->ipath_eep_st_new_errs[idx]) {
-                       ret = 1;
-                       break;
-               }
-       }
-       new_time = atomic_read(&dd->ipath_active_time);
-
-       if (ret == 0 && new_time < 3600)
-               return 0;
-
-       /*
-        * The quick-check above determined that there is something worthy
-        * of logging, so get current contents and do a more detailed idea.
-        * read full flash, not just currently used part, since it may have
-        * been written with a newer definition
-        */
-       len = sizeof(struct ipath_flash);
-       buf = vmalloc(len);
-       ret = 1;
-       if (!buf) {
-               ipath_dev_err(dd, "Couldn't allocate memory to read %u "
-                               "bytes from eeprom for logging\n", len);
-               goto bail;
-       }
-
-       /* Grab semaphore and read current EEPROM. If we get an
-        * error, let go, but if not, keep it until we finish write.
-        */
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (ret) {
-               ipath_dev_err(dd, "Unable to acquire EEPROM for logging\n");
-               goto free_bail;
-       }
-       ret = ipath_eeprom_internal_read(dd, 0, buf, len);
-       if (ret) {
-               mutex_unlock(&dd->ipath_eep_lock);
-               ipath_dev_err(dd, "Unable read EEPROM for logging\n");
-               goto free_bail;
-       }
-       ifp = (struct ipath_flash *)buf;
-
-       csum = flash_csum(ifp, 0);
-       if (csum != ifp->if_csum) {
-               mutex_unlock(&dd->ipath_eep_lock);
-               ipath_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
-                               csum, ifp->if_csum);
-               ret = 1;
-               goto free_bail;
-       }
-       hi_water = 0;
-       spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
-       for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
-               int new_val = dd->ipath_eep_st_new_errs[idx];
-               if (new_val) {
-                       /*
-                        * If we have seen any errors, add to EEPROM values
-                        * We need to saturate at 0xFF (255) and we also
-                        * would need to adjust the checksum if we were
-                        * trying to minimize EEPROM traffic
-                        * Note that we add to actual current count in EEPROM,
-                        * in case it was altered while we were running.
-                        */
-                       new_val += ifp->if_errcntp[idx];
-                       if (new_val > 0xFF)
-                               new_val = 0xFF;
-                       if (ifp->if_errcntp[idx] != new_val) {
-                               ifp->if_errcntp[idx] = new_val;
-                               hi_water = offsetof(struct ipath_flash,
-                                               if_errcntp) + idx;
-                       }
-                       /*
-                        * update our shadow (used to minimize EEPROM
-                        * traffic), to match what we are about to write.
-                        */
-                       dd->ipath_eep_st_errs[idx] = new_val;
-                       dd->ipath_eep_st_new_errs[idx] = 0;
-               }
-       }
-       /*
-        * now update active-time. We would like to round to the nearest hour
-        * but unless atomic_t are sure to be proper signed ints we cannot,
-        * because we need to account for what we "transfer" to EEPROM and
-        * if we log an hour at 31 minutes, then we would need to set
-        * active_time to -29 to accurately count the _next_ hour.
-        */
-       if (new_time >= 3600) {
-               new_hrs = new_time / 3600;
-               atomic_sub((new_hrs * 3600), &dd->ipath_active_time);
-               new_hrs += dd->ipath_eep_hrs;
-               if (new_hrs > 0xFFFF)
-                       new_hrs = 0xFFFF;
-               dd->ipath_eep_hrs = new_hrs;
-               if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
-                       ifp->if_powerhour[0] = new_hrs & 0xFF;
-                       hi_water = offsetof(struct ipath_flash, if_powerhour);
-               }
-               if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
-                       ifp->if_powerhour[1] = new_hrs >> 8;
-                       hi_water = offsetof(struct ipath_flash, if_powerhour)
-                                       + 1;
-               }
-       }
-       /*
-        * There is a tiny possibility that we could somehow fail to write
-        * the EEPROM after updating our shadows, but problems from holding
-        * the spinlock too long are a much bigger issue.
-        */
-       spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-       if (hi_water) {
-               /* we made some change to the data, uopdate cksum and write */
-               csum = flash_csum(ifp, 1);
-               ret = ipath_eeprom_internal_write(dd, 0, buf, hi_water + 1);
-       }
-       mutex_unlock(&dd->ipath_eep_lock);
-       if (ret)
-               ipath_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
-       vfree(buf);
-bail:
-       return ret;
-
-}
-
-/**
- * ipath_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the infinipath device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever ipath_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-
-void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr)
-{
-       uint new_val;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
-       new_val = dd->ipath_eep_st_new_errs[eidx] + incr;
-       if (new_val > 255)
-               new_val = 255;
-       dd->ipath_eep_st_new_errs[eidx] = new_val;
-       spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-       return;
-}
-
-static int ipath_tempsense_internal_read(struct ipath_devdata *dd, u8 regnum)
-{
-       int ret;
-       struct i2c_chain_desc *icd;
-
-       ret = -ENOENT;
-
-       icd = ipath_i2c_type(dd);
-       if (!icd)
-               goto bail;
-
-       if (icd->temp_dev == IPATH_NO_DEV) {
-               /* tempsense only exists on new, real-I2C boards */
-               ret = -ENXIO;
-               goto bail;
-       }
-
-       if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
-               ipath_dbg("Failed tempsense startcmd\n");
-               stop_cmd(dd);
-               ret = -ENXIO;
-               goto bail;
-       }
-       ret = wr_byte(dd, regnum);
-       stop_cmd(dd);
-       if (ret) {
-               ipath_dev_err(dd, "Failed tempsense WR command %02X\n",
-                             regnum);
-               ret = -ENXIO;
-               goto bail;
-       }
-       if (i2c_startcmd(dd, icd->temp_dev | READ_CMD)) {
-               ipath_dbg("Failed tempsense RD startcmd\n");
-               stop_cmd(dd);
-               ret = -ENXIO;
-               goto bail;
-       }
-       /*
-        * We can only clock out one byte per command, sensibly
-        */
-       ret = rd_byte(dd);
-       stop_cmd(dd);
-
-bail:
-       return ret;
-}
-
-#define VALID_TS_RD_REG_MASK 0xBF
-
-/**
- * ipath_tempsense_read - read register of temp sensor via I2C
- * @dd: the infinipath device
- * @regnum: register to read from
- *
- * returns reg contents (0..255) or < 0 for error
- */
-int ipath_tempsense_read(struct ipath_devdata *dd, u8 regnum)
-{
-       int ret;
-
-       if (regnum > 7)
-               return -EINVAL;
-
-       /* return a bogus value for (the one) register we do not have */
-       if (!((1 << regnum) & VALID_TS_RD_REG_MASK))
-               return 0;
-
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (!ret) {
-               ret = ipath_tempsense_internal_read(dd, regnum);
-               mutex_unlock(&dd->ipath_eep_lock);
-       }
-
-       /*
-        * There are three possibilities here:
-        * ret is actual value (0..255)
-        * ret is -ENXIO or -EINVAL from code in this file
-        * ret is -EINTR from mutex_lock_interruptible.
-        */
-       return ret;
-}
-
-static int ipath_tempsense_internal_write(struct ipath_devdata *dd,
-                                         u8 regnum, u8 data)
-{
-       int ret = -ENOENT;
-       struct i2c_chain_desc *icd;
-
-       icd = ipath_i2c_type(dd);
-       if (!icd)
-               goto bail;
-
-       if (icd->temp_dev == IPATH_NO_DEV) {
-               /* tempsense only exists on new, real-I2C boards */
-               ret = -ENXIO;
-               goto bail;
-       }
-       if (i2c_startcmd(dd, icd->temp_dev | WRITE_CMD)) {
-               ipath_dbg("Failed tempsense startcmd\n");
-               stop_cmd(dd);
-               ret = -ENXIO;
-               goto bail;
-       }
-       ret = wr_byte(dd, regnum);
-       if (ret) {
-               stop_cmd(dd);
-               ipath_dev_err(dd, "Failed to write tempsense command %02X\n",
-                             regnum);
-               ret = -ENXIO;
-               goto bail;
-       }
-       ret = wr_byte(dd, data);
-       stop_cmd(dd);
-       ret = i2c_startcmd(dd, icd->temp_dev | READ_CMD);
-       if (ret) {
-               ipath_dev_err(dd, "Failed tempsense data wrt to %02X\n",
-                             regnum);
-               ret = -ENXIO;
-       }
-
-bail:
-       return ret;
-}
-
-#define VALID_TS_WR_REG_MASK ((1 << 9) | (1 << 0xB) | (1 << 0xD))
-
-/**
- * ipath_tempsense_write - write register of temp sensor via I2C
- * @dd: the infinipath device
- * @regnum: register to write
- * @data: data to write
- *
- * returns 0 for success or < 0 for error
- */
-int ipath_tempsense_write(struct ipath_devdata *dd, u8 regnum, u8 data)
-{
-       int ret;
-
-       if (regnum > 15 || !((1 << regnum) & VALID_TS_WR_REG_MASK))
-               return -EINVAL;
-
-       ret = mutex_lock_interruptible(&dd->ipath_eep_lock);
-       if (!ret) {
-               ret = ipath_tempsense_internal_write(dd, regnum, data);
-               mutex_unlock(&dd->ipath_eep_lock);
-       }
-
-       /*
-        * There are three possibilities here:
-        * ret is 0 for success
-        * ret is -ENXIO or -EINVAL from code in this file
-        * ret is -EINTR from mutex_lock_interruptible.
-        */
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_file_ops.c b/drivers/staging/rdma/ipath/ipath_file_ops.c
deleted file mode 100644 (file)
index 6187b84..0000000
+++ /dev/null
@@ -1,2619 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/poll.h>
-#include <linux/cdev.h>
-#include <linux/swap.h>
-#include <linux/export.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/cpu.h>
-#include <linux/uio.h>
-#include <asm/pgtable.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-#include "ipath_user_sdma.h"
-
-static int ipath_open(struct inode *, struct file *);
-static int ipath_close(struct inode *, struct file *);
-static ssize_t ipath_write(struct file *, const char __user *, size_t,
-                          loff_t *);
-static ssize_t ipath_write_iter(struct kiocb *, struct iov_iter *from);
-static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
-static int ipath_mmap(struct file *, struct vm_area_struct *);
-
-/*
- * This is really, really weird shit - write() and writev() here
- * have completely unrelated semantics.  Sucky userland ABI,
- * film at 11.
- */
-static const struct file_operations ipath_file_ops = {
-       .owner = THIS_MODULE,
-       .write = ipath_write,
-       .write_iter = ipath_write_iter,
-       .open = ipath_open,
-       .release = ipath_close,
-       .poll = ipath_poll,
-       .mmap = ipath_mmap,
-       .llseek = noop_llseek,
-};
-
-/*
- * Convert kernel virtual addresses to physical addresses so they don't
- * potentially conflict with the chip addresses used as mmap offsets.
- * It doesn't really matter what mmap offset we use as long as we can
- * interpret it correctly.
- */
-static u64 cvt_kvaddr(void *p)
-{
-       struct page *page;
-       u64 paddr = 0;
-
-       page = vmalloc_to_page(p);
-       if (page)
-               paddr = page_to_pfn(page) << PAGE_SHIFT;
-
-       return paddr;
-}
-
-static int ipath_get_base_info(struct file *fp,
-                              void __user *ubase, size_t ubase_size)
-{
-       struct ipath_portdata *pd = port_fp(fp);
-       int ret = 0;
-       struct ipath_base_info *kinfo = NULL;
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned subport_cnt;
-       int shared, master;
-       size_t sz;
-
-       subport_cnt = pd->port_subport_cnt;
-       if (!subport_cnt) {
-               shared = 0;
-               master = 0;
-               subport_cnt = 1;
-       } else {
-               shared = 1;
-               master = !subport_fp(fp);
-       }
-
-       sz = sizeof(*kinfo);
-       /* If port sharing is not requested, allow the old size structure */
-       if (!shared)
-               sz -= 7 * sizeof(u64);
-       if (ubase_size < sz) {
-               ipath_cdbg(PROC,
-                          "Base size %zu, need %zu (version mismatch?)\n",
-                          ubase_size, sz);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
-       if (kinfo == NULL) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       ret = dd->ipath_f_get_base_info(pd, kinfo);
-       if (ret < 0)
-               goto bail;
-
-       kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
-       kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
-       kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
-       kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
-       /*
-        * have to mmap whole thing
-        */
-       kinfo->spi_rcv_egrbuftotlen =
-               pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
-       kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
-       kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
-               pd->port_rcvegrbuf_chunks;
-       kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt;
-       if (master)
-               kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt;
-       /*
-        * for this use, may be ipath_cfgports summed over all chips that
-        * are are configured and present
-        */
-       kinfo->spi_nports = dd->ipath_cfgports;
-       /* unit (chip/board) our port is on */
-       kinfo->spi_unit = dd->ipath_unit;
-       /* for now, only a single page */
-       kinfo->spi_tid_maxsize = PAGE_SIZE;
-
-       /*
-        * Doing this per port, and based on the skip value, etc.  This has
-        * to be the actual buffer size, since the protocol code treats it
-        * as an array.
-        *
-        * These have to be set to user addresses in the user code via mmap.
-        * These values are used on return to user code for the mmap target
-        * addresses only.  For 32 bit, same 44 bit address problem, so use
-        * the physical address, not virtual.  Before 2.6.11, using the
-        * page_address() macro worked, but in 2.6.11, even that returns the
-        * full 64 bit address (upper bits all 1's).  So far, using the
-        * physical addresses (or chip offsets, for chip mapping) works, but
-        * no doubt some future kernel release will change that, and we'll be
-        * on to yet another method of dealing with this.
-        */
-       kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
-       kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys;
-       kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
-       kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
-       kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
-               (void *) dd->ipath_statusp -
-               (void *) dd->ipath_pioavailregs_dma;
-       if (!shared) {
-               kinfo->spi_piocnt = pd->port_piocnt;
-               kinfo->spi_piobufbase = (u64) pd->port_piobufs;
-               kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
-                       dd->ipath_ureg_align * pd->port_port;
-       } else if (master) {
-               kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) +
-                                   (pd->port_piocnt % subport_cnt);
-               /* Master's PIO buffers are after all the slave's */
-               kinfo->spi_piobufbase = (u64) pd->port_piobufs +
-                       dd->ipath_palign *
-                       (pd->port_piocnt - kinfo->spi_piocnt);
-       } else {
-               unsigned slave = subport_fp(fp) - 1;
-
-               kinfo->spi_piocnt = pd->port_piocnt / subport_cnt;
-               kinfo->spi_piobufbase = (u64) pd->port_piobufs +
-                       dd->ipath_palign * kinfo->spi_piocnt * slave;
-       }
-
-       if (shared) {
-               kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
-                       dd->ipath_ureg_align * pd->port_port;
-               kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
-               kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
-               kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
-
-               kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
-                       PAGE_SIZE * subport_fp(fp));
-
-               kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
-                       pd->port_rcvhdrq_size * subport_fp(fp));
-               kinfo->spi_rcvhdr_tailaddr = 0;
-               kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
-                       pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
-                       subport_fp(fp));
-
-               kinfo->spi_subport_uregbase =
-                       cvt_kvaddr(pd->subport_uregbase);
-               kinfo->spi_subport_rcvegrbuf =
-                       cvt_kvaddr(pd->subport_rcvegrbuf);
-               kinfo->spi_subport_rcvhdr_base =
-                       cvt_kvaddr(pd->subport_rcvhdr_base);
-               ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
-                       kinfo->spi_port, kinfo->spi_runtime_flags,
-                       (unsigned long long) kinfo->spi_subport_uregbase,
-                       (unsigned long long) kinfo->spi_subport_rcvegrbuf,
-                       (unsigned long long) kinfo->spi_subport_rcvhdr_base);
-       }
-
-       /*
-        * All user buffers are 2KB buffers.  If we ever support
-        * giving 4KB buffers to user processes, this will need some
-        * work.
-        */
-       kinfo->spi_pioindex = (kinfo->spi_piobufbase -
-               (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign;
-       kinfo->spi_pioalign = dd->ipath_palign;
-
-       kinfo->spi_qpair = IPATH_KD_QP;
-       /*
-        * user mode PIO buffers are always 2KB, even when 4KB can
-        * be received, and sent via the kernel; this is ibmaxlen
-        * for 2K MTU.
-        */
-       kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32);
-       kinfo->spi_mtu = dd->ipath_ibmaxlen;    /* maxlen, not ibmtu */
-       kinfo->spi_port = pd->port_port;
-       kinfo->spi_subport = subport_fp(fp);
-       kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
-       kinfo->spi_hw_version = dd->ipath_revision;
-
-       if (master) {
-               kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
-       }
-
-       sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
-       if (copy_to_user(ubase, kinfo, sz))
-               ret = -EFAULT;
-
-bail:
-       kfree(kinfo);
-       return ret;
-}
-
-/**
- * ipath_tid_update - update a port TID
- * @pd: the port
- * @fp: the ipath device file
- * @ti: the TID information
- *
- * The new implementation as of Oct 2004 is that the driver assigns
- * the tid and returns it to the caller.   To make it easier to
- * catch bugs, and to reduce search time, we keep a cursor for
- * each port, walking the shadow tid array to find one that's not
- * in use.
- *
- * For now, if we can't allocate the full list, we fail, although
- * in the long run, we'll allocate as many as we can, and the
- * caller will deal with that by trying the remaining pages later.
- * That means that when we fail, we have to mark the tids as not in
- * use again, in our shadow copy.
- *
- * It's up to the caller to free the tids when they are done.
- * We'll unlock the pages as they free them.
- *
- * Also, right now we are locking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.
- */
-static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
-                           const struct ipath_tid_info *ti)
-{
-       int ret = 0, ntids;
-       u32 tid, porttid, cnt, i, tidcnt, tidoff;
-       u16 *tidlist;
-       struct ipath_devdata *dd = pd->port_dd;
-       u64 physaddr;
-       unsigned long vaddr;
-       u64 __iomem *tidbase;
-       unsigned long tidmap[8];
-       struct page **pagep = NULL;
-       unsigned subport = subport_fp(fp);
-
-       if (!dd->ipath_pageshadow) {
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       cnt = ti->tidcnt;
-       if (!cnt) {
-               ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
-                         (unsigned long long) ti->tidlist);
-               /*
-                * Should we treat as success?  likely a bug
-                */
-               ret = -EFAULT;
-               goto done;
-       }
-       porttid = pd->port_port * dd->ipath_rcvtidcnt;
-       if (!pd->port_subport_cnt) {
-               tidcnt = dd->ipath_rcvtidcnt;
-               tid = pd->port_tidcursor;
-               tidoff = 0;
-       } else if (!subport) {
-               tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
-                        (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
-               tidoff = dd->ipath_rcvtidcnt - tidcnt;
-               porttid += tidoff;
-               tid = tidcursor_fp(fp);
-       } else {
-               tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
-               tidoff = tidcnt * (subport - 1);
-               porttid += tidoff;
-               tid = tidcursor_fp(fp);
-       }
-       if (cnt > tidcnt) {
-               /* make sure it all fits in port_tid_pg_list */
-               dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
-                        "TIDs, only trying max (%u)\n", cnt, tidcnt);
-               cnt = tidcnt;
-       }
-       pagep = &((struct page **) pd->port_tid_pg_list)[tidoff];
-       tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff];
-
-       memset(tidmap, 0, sizeof(tidmap));
-       /* before decrement; chip actual # */
-       ntids = tidcnt;
-       tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
-                                  dd->ipath_rcvtidbase +
-                                  porttid * sizeof(*tidbase));
-
-       ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
-                  pd->port_port, cnt, tid, tidbase);
-
-       /* virtual address of first page in transfer */
-       vaddr = ti->tidvaddr;
-       if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
-                      cnt * PAGE_SIZE)) {
-               ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
-                         (void *)vaddr, cnt);
-               ret = -EFAULT;
-               goto done;
-       }
-       ret = ipath_get_user_pages(vaddr, cnt, pagep);
-       if (ret) {
-               if (ret == -EBUSY) {
-                       ipath_dbg("Failed to lock addr %p, %u pages "
-                                 "(already locked)\n",
-                                 (void *) vaddr, cnt);
-                       /*
-                        * for now, continue, and see what happens but with
-                        * the new implementation, this should never happen,
-                        * unless perhaps the user has mpin'ed the pages
-                        * themselves (something we need to test)
-                        */
-                       ret = 0;
-               } else {
-                       dev_info(&dd->pcidev->dev,
-                                "Failed to lock addr %p, %u pages: "
-                                "errno %d\n", (void *) vaddr, cnt, -ret);
-                       goto done;
-               }
-       }
-       for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
-               for (; ntids--; tid++) {
-                       if (tid == tidcnt)
-                               tid = 0;
-                       if (!dd->ipath_pageshadow[porttid + tid])
-                               break;
-               }
-               if (ntids < 0) {
-                       /*
-                        * oops, wrapped all the way through their TIDs,
-                        * and didn't have enough free; see comments at
-                        * start of routine
-                        */
-                       ipath_dbg("Not enough free TIDs for %u pages "
-                                 "(index %d), failing\n", cnt, i);
-                       i--;    /* last tidlist[i] not filled in */
-                       ret = -ENOMEM;
-                       break;
-               }
-               tidlist[i] = tid + tidoff;
-               ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
-                          "vaddr %lx\n", i, tid + tidoff, vaddr);
-               /* we "know" system pages and TID pages are same size */
-               dd->ipath_pageshadow[porttid + tid] = pagep[i];
-               dd->ipath_physshadow[porttid + tid] = ipath_map_page(
-                       dd->pcidev, pagep[i], 0, PAGE_SIZE,
-                       PCI_DMA_FROMDEVICE);
-               /*
-                * don't need atomic or it's overhead
-                */
-               __set_bit(tid, tidmap);
-               physaddr = dd->ipath_physshadow[porttid + tid];
-               ipath_stats.sps_pagelocks++;
-               ipath_cdbg(VERBOSE,
-                          "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
-                          tid, vaddr, (unsigned long long) physaddr,
-                          pagep[i]);
-               dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
-                                   physaddr);
-               /*
-                * don't check this tid in ipath_portshadow, since we
-                * just filled it in; start with the next one.
-                */
-               tid++;
-       }
-
-       if (ret) {
-               u32 limit;
-       cleanup:
-               /* jump here if copy out of updated info failed... */
-               ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
-                         -ret, i, cnt);
-               /* same code that's in ipath_free_tid() */
-               limit = sizeof(tidmap) * BITS_PER_BYTE;
-               if (limit > tidcnt)
-                       /* just in case size changes in future */
-                       limit = tidcnt;
-               tid = find_first_bit((const unsigned long *)tidmap, limit);
-               for (; tid < limit; tid++) {
-                       if (!test_bit(tid, tidmap))
-                               continue;
-                       if (dd->ipath_pageshadow[porttid + tid]) {
-                               ipath_cdbg(VERBOSE, "Freeing TID %u\n",
-                                          tid);
-                               dd->ipath_f_put_tid(dd, &tidbase[tid],
-                                                   RCVHQ_RCV_TYPE_EXPECTED,
-                                                   dd->ipath_tidinvalid);
-                               pci_unmap_page(dd->pcidev,
-                                       dd->ipath_physshadow[porttid + tid],
-                                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
-                               dd->ipath_pageshadow[porttid + tid] = NULL;
-                               ipath_stats.sps_pageunlocks++;
-                       }
-               }
-               ipath_release_user_pages(pagep, cnt);
-       } else {
-               /*
-                * Copy the updated array, with ipath_tid's filled in, back
-                * to user.  Since we did the copy in already, this "should
-                * never fail" If it does, we have to clean up...
-                */
-               if (copy_to_user((void __user *)
-                                (unsigned long) ti->tidlist,
-                                tidlist, cnt * sizeof(*tidlist))) {
-                       ret = -EFAULT;
-                       goto cleanup;
-               }
-               if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
-                                tidmap, sizeof tidmap)) {
-                       ret = -EFAULT;
-                       goto cleanup;
-               }
-               if (tid == tidcnt)
-                       tid = 0;
-               if (!pd->port_subport_cnt)
-                       pd->port_tidcursor = tid;
-               else
-                       tidcursor_fp(fp) = tid;
-       }
-
-done:
-       if (ret)
-               ipath_dbg("Failed to map %u TID pages, failing with %d\n",
-                         ti->tidcnt, -ret);
-       return ret;
-}
-
-/**
- * ipath_tid_free - free a port TID
- * @pd: the port
- * @subport: the subport
- * @ti: the TID info
- *
- * right now we are unlocking one page at a time, but since
- * the intended use of this routine is for a single group of
- * virtually contiguous pages, that should change to improve
- * performance.  We check that the TID is in range for this port
- * but otherwise don't check validity; if user has an error and
- * frees the wrong tid, it's only their own data that can thereby
- * be corrupted.  We do check that the TID was in use, for sanity
- * We always use our idea of the saved address, not the address that
- * they pass in to us.
- */
-
-static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
-                         const struct ipath_tid_info *ti)
-{
-       int ret = 0;
-       u32 tid, porttid, cnt, limit, tidcnt;
-       struct ipath_devdata *dd = pd->port_dd;
-       u64 __iomem *tidbase;
-       unsigned long tidmap[8];
-
-       if (!dd->ipath_pageshadow) {
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
-                          sizeof tidmap)) {
-               ret = -EFAULT;
-               goto done;
-       }
-
-       porttid = pd->port_port * dd->ipath_rcvtidcnt;
-       if (!pd->port_subport_cnt)
-               tidcnt = dd->ipath_rcvtidcnt;
-       else if (!subport) {
-               tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
-                        (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
-               porttid += dd->ipath_rcvtidcnt - tidcnt;
-       } else {
-               tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
-               porttid += tidcnt * (subport - 1);
-       }
-       tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-                                  dd->ipath_rcvtidbase +
-                                  porttid * sizeof(*tidbase));
-
-       limit = sizeof(tidmap) * BITS_PER_BYTE;
-       if (limit > tidcnt)
-               /* just in case size changes in future */
-               limit = tidcnt;
-       tid = find_first_bit(tidmap, limit);
-       ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
-                  "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
-                  limit, tid, porttid);
-       for (cnt = 0; tid < limit; tid++) {
-               /*
-                * small optimization; if we detect a run of 3 or so without
-                * any set, use find_first_bit again.  That's mainly to
-                * accelerate the case where we wrapped, so we have some at
-                * the beginning, and some at the end, and a big gap
-                * in the middle.
-                */
-               if (!test_bit(tid, tidmap))
-                       continue;
-               cnt++;
-               if (dd->ipath_pageshadow[porttid + tid]) {
-                       struct page *p;
-                       p = dd->ipath_pageshadow[porttid + tid];
-                       dd->ipath_pageshadow[porttid + tid] = NULL;
-                       ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
-                                  pid_nr(pd->port_pid), tid);
-                       dd->ipath_f_put_tid(dd, &tidbase[tid],
-                                           RCVHQ_RCV_TYPE_EXPECTED,
-                                           dd->ipath_tidinvalid);
-                       pci_unmap_page(dd->pcidev,
-                               dd->ipath_physshadow[porttid + tid],
-                               PAGE_SIZE, PCI_DMA_FROMDEVICE);
-                       ipath_release_user_pages(&p, 1);
-                       ipath_stats.sps_pageunlocks++;
-               } else
-                       ipath_dbg("Unused tid %u, ignoring\n", tid);
-       }
-       if (cnt != ti->tidcnt)
-               ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
-                         ti->tidcnt, cnt);
-done:
-       if (ret)
-               ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
-                         ti->tidcnt, -ret);
-       return ret;
-}
-
-/**
- * ipath_set_part_key - set a partition key
- * @pd: the port
- * @key: the key
- *
- * We can have up to 4 active at a time (other than the default, which is
- * always allowed).  This is somewhat tricky, since multiple ports may set
- * the same key, so we reference count them, and clean up at exit.  All 4
- * partition keys are packed into a single infinipath register.  It's an
- * error for a process to set the same pkey multiple times.  We provide no
- * mechanism to de-allocate a pkey at this time, we may eventually need to
- * do that.  I've used the atomic operations, and no locking, and only make
- * a single pass through what's available.  This should be more than
- * adequate for some time. I'll think about spinlocks or the like if and as
- * it's necessary.
- */
-static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       int i, any = 0, pidx = -1;
-       u16 lkey = key & 0x7FFF;
-       int ret;
-
-       if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
-               /* nothing to do; this key always valid */
-               ret = 0;
-               goto bail;
-       }
-
-       ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
-                  "%hx:%x %hx:%x %hx:%x %hx:%x\n",
-                  pd->port_port, key, dd->ipath_pkeys[0],
-                  atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
-                  atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
-                  atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
-                  atomic_read(&dd->ipath_pkeyrefs[3]));
-
-       if (!lkey) {
-               ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
-                          pd->port_port);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       /*
-        * Set the full membership bit, because it has to be
-        * set in the register or the packet, and it seems
-        * cleaner to set in the register than to force all
-        * callers to set it. (see bug 4331)
-        */
-       key |= 0x8000;
-
-       for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-               if (!pd->port_pkeys[i] && pidx == -1)
-                       pidx = i;
-               if (pd->port_pkeys[i] == key) {
-                       ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
-                                  "(%x) more than once\n",
-                                  pd->port_port, key);
-                       ret = -EEXIST;
-                       goto bail;
-               }
-       }
-       if (pidx == -1) {
-               ipath_dbg("All pkeys for port %u already in use, "
-                         "can't set %x\n", pd->port_port, key);
-               ret = -EBUSY;
-               goto bail;
-       }
-       for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (!dd->ipath_pkeys[i]) {
-                       any++;
-                       continue;
-               }
-               if (dd->ipath_pkeys[i] == key) {
-                       atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
-
-                       if (atomic_inc_return(pkrefs) > 1) {
-                               pd->port_pkeys[pidx] = key;
-                               ipath_cdbg(VERBOSE, "p%u set key %x "
-                                          "matches #%d, count now %d\n",
-                                          pd->port_port, key, i,
-                                          atomic_read(pkrefs));
-                               ret = 0;
-                               goto bail;
-                       } else {
-                               /*
-                                * lost race, decrement count, catch below
-                                */
-                               atomic_dec(pkrefs);
-                               ipath_cdbg(VERBOSE, "Lost race, count was "
-                                          "0, after dec, it's %d\n",
-                                          atomic_read(pkrefs));
-                               any++;
-                       }
-               }
-               if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
-                       /*
-                        * It makes no sense to have both the limited and
-                        * full membership PKEY set at the same time since
-                        * the unlimited one will disable the limited one.
-                        */
-                       ret = -EEXIST;
-                       goto bail;
-               }
-       }
-       if (!any) {
-               ipath_dbg("port %u, all pkeys already in use, "
-                         "can't set %x\n", pd->port_port, key);
-               ret = -EBUSY;
-               goto bail;
-       }
-       for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (!dd->ipath_pkeys[i] &&
-                   atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
-                       u64 pkey;
-
-                       /* for ipathstats, etc. */
-                       ipath_stats.sps_pkeys[i] = lkey;
-                       pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
-                       pkey =
-                               (u64) dd->ipath_pkeys[0] |
-                               ((u64) dd->ipath_pkeys[1] << 16) |
-                               ((u64) dd->ipath_pkeys[2] << 32) |
-                               ((u64) dd->ipath_pkeys[3] << 48);
-                       ipath_cdbg(PROC, "p%u set key %x in #%d, "
-                                  "portidx %d, new pkey reg %llx\n",
-                                  pd->port_port, key, i, pidx,
-                                  (unsigned long long) pkey);
-                       ipath_write_kreg(
-                               dd, dd->ipath_kregs->kr_partitionkey, pkey);
-
-                       ret = 0;
-                       goto bail;
-               }
-       }
-       ipath_dbg("port %u, all pkeys already in use 2nd pass, "
-                 "can't set %x\n", pd->port_port, key);
-       ret = -EBUSY;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_manage_rcvq - manage a port's receive queue
- * @pd: the port
- * @subport: the subport
- * @start_stop: action to carry out
- *
- * start_stop == 0 disables receive on the port, for use in queue
- * overflow conditions.  start_stop==1 re-enables, to be used to
- * re-init the software copy of the head register
- */
-static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
-                            int start_stop)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-
-       ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
-                  start_stop ? "en" : "dis", dd->ipath_unit,
-                  pd->port_port, subport);
-       if (subport)
-               goto bail;
-       /* atomically clear receive enable port. */
-       if (start_stop) {
-               /*
-                * On enable, force in-memory copy of the tail register to
-                * 0, so that protocol code doesn't have to worry about
-                * whether or not the chip has yet updated the in-memory
-                * copy or not on return from the system call. The chip
-                * always resets it's tail register back to 0 on a
-                * transition from disabled to enabled.  This could cause a
-                * problem if software was broken, and did the enable w/o
-                * the disable, but eventually the in-memory copy will be
-                * updated and correct itself, even in the face of software
-                * bugs.
-                */
-               if (pd->port_rcvhdrtail_kvaddr)
-                       ipath_clear_rcvhdrtail(pd);
-               set_bit(dd->ipath_r_portenable_shift + pd->port_port,
-                       &dd->ipath_rcvctrl);
-       } else
-               clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
-                         &dd->ipath_rcvctrl);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-       /* now be sure chip saw it before we return */
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       if (start_stop) {
-               /*
-                * And try to be sure that tail reg update has happened too.
-                * This should in theory interlock with the RXE changes to
-                * the tail register.  Don't assign it to the tail register
-                * in memory copy, since we could overwrite an update by the
-                * chip if we did.
-                */
-               ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-       }
-       /* always; new head should be equal to new tail; see above */
-bail:
-       return 0;
-}
-
-static void ipath_clean_part_key(struct ipath_portdata *pd,
-                                struct ipath_devdata *dd)
-{
-       int i, j, pchanged = 0;
-       u64 oldpkey;
-
-       /* for debugging only */
-       oldpkey = (u64) dd->ipath_pkeys[0] |
-               ((u64) dd->ipath_pkeys[1] << 16) |
-               ((u64) dd->ipath_pkeys[2] << 32) |
-               ((u64) dd->ipath_pkeys[3] << 48);
-
-       for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-               if (!pd->port_pkeys[i])
-                       continue;
-               ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
-                          pd->port_pkeys[i]);
-               for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
-                       /* check for match independent of the global bit */
-                       if ((dd->ipath_pkeys[j] & 0x7fff) !=
-                           (pd->port_pkeys[i] & 0x7fff))
-                               continue;
-                       if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
-                               ipath_cdbg(VERBOSE, "p%u clear key "
-                                          "%x matches #%d\n",
-                                          pd->port_port,
-                                          pd->port_pkeys[i], j);
-                               ipath_stats.sps_pkeys[j] =
-                                       dd->ipath_pkeys[j] = 0;
-                               pchanged++;
-                       } else {
-                               ipath_cdbg(VERBOSE, "p%u key %x matches #%d, "
-                                          "but ref still %d\n", pd->port_port,
-                                          pd->port_pkeys[i], j,
-                                          atomic_read(&dd->ipath_pkeyrefs[j]));
-                               break;
-                       }
-               }
-               pd->port_pkeys[i] = 0;
-       }
-       if (pchanged) {
-               u64 pkey = (u64) dd->ipath_pkeys[0] |
-                       ((u64) dd->ipath_pkeys[1] << 16) |
-                       ((u64) dd->ipath_pkeys[2] << 32) |
-                       ((u64) dd->ipath_pkeys[3] << 48);
-               ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
-                          "new pkey reg %llx\n", pd->port_port,
-                          (unsigned long long) oldpkey,
-                          (unsigned long long) pkey);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
-                                pkey);
-       }
-}
-
-/*
- * Initialize the port data with the receive buffer sizes
- * so this can be done while the master port is locked.
- * Otherwise, there is a race with a slave opening the port
- * and seeing these fields uninitialized.
- */
-static void init_user_egr_sizes(struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned egrperchunk, egrcnt, size;
-
-       /*
-        * to avoid wasting a lot of memory, we allocate 32KB chunks of
-        * physically contiguous memory, advance through it until used up
-        * and then allocate more.  Of course, we need memory to store those
-        * extra pointers, now.  Started out with 256KB, but under heavy
-        * memory pressure (creating large files and then copying them over
-        * NFS while doing lots of MPI jobs), we hit some allocation
-        * failures, even though we can sleep...  (2.6.10) Still get
-        * failures at 64K.  32K is the lowest we can go without wasting
-        * additional memory.
-        */
-       size = 0x8000;
-       egrperchunk = size / dd->ipath_rcvegrbufsize;
-       egrcnt = dd->ipath_rcvegrcnt;
-       pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk;
-       pd->port_rcvegrbufs_perchunk = egrperchunk;
-       pd->port_rcvegrbuf_size = size;
-}
-
-/**
- * ipath_create_user_egr - allocate eager TID buffers
- * @pd: the port to allocate TID buffers for
- *
- * This routine is now quite different for user and kernel, because
- * the kernel uses skb's, for the accelerated network performance
- * This is the user port version
- *
- * Allocate the eager TID buffers and program them into infinipath
- * They are no longer completely contiguous, we do multiple allocation
- * calls.
- */
-static int ipath_create_user_egr(struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
-       size_t size;
-       int ret;
-       gfp_t gfp_flags;
-
-       /*
-        * GFP_USER, but without GFP_FS, so buffer cache can be
-        * coalesced (we hope); otherwise, even at order 4,
-        * heavy filesystem activity makes these fail, and we can
-        * use compound pages.
-        */
-       gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP;
-
-       egrcnt = dd->ipath_rcvegrcnt;
-       /* TID number offset for this port */
-       egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt;
-       egrsize = dd->ipath_rcvegrbufsize;
-       ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
-                  "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
-
-       chunk = pd->port_rcvegrbuf_chunks;
-       egrperchunk = pd->port_rcvegrbufs_perchunk;
-       size = pd->port_rcvegrbuf_size;
-       pd->port_rcvegrbuf = kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf[0]),
-                                          GFP_KERNEL);
-       if (!pd->port_rcvegrbuf) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-       pd->port_rcvegrbuf_phys =
-               kmalloc_array(chunk, sizeof(pd->port_rcvegrbuf_phys[0]),
-                             GFP_KERNEL);
-       if (!pd->port_rcvegrbuf_phys) {
-               ret = -ENOMEM;
-               goto bail_rcvegrbuf;
-       }
-       for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
-
-               pd->port_rcvegrbuf[e] = dma_alloc_coherent(
-                       &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
-                       gfp_flags);
-
-               if (!pd->port_rcvegrbuf[e]) {
-                       ret = -ENOMEM;
-                       goto bail_rcvegrbuf_phys;
-               }
-       }
-
-       pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
-
-       for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
-               dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
-               unsigned i;
-
-               for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
-                       dd->ipath_f_put_tid(dd, e + egroff +
-                                           (u64 __iomem *)
-                                           ((char __iomem *)
-                                            dd->ipath_kregbase +
-                                            dd->ipath_rcvegrbase),
-                                           RCVHQ_RCV_TYPE_EAGER, pa);
-                       pa += egrsize;
-               }
-               cond_resched(); /* don't hog the cpu */
-       }
-
-       ret = 0;
-       goto bail;
-
-bail_rcvegrbuf_phys:
-       for (e = 0; e < pd->port_rcvegrbuf_chunks &&
-               pd->port_rcvegrbuf[e]; e++) {
-               dma_free_coherent(&dd->pcidev->dev, size,
-                                 pd->port_rcvegrbuf[e],
-                                 pd->port_rcvegrbuf_phys[e]);
-
-       }
-       kfree(pd->port_rcvegrbuf_phys);
-       pd->port_rcvegrbuf_phys = NULL;
-bail_rcvegrbuf:
-       kfree(pd->port_rcvegrbuf);
-       pd->port_rcvegrbuf = NULL;
-bail:
-       return ret;
-}
-
-
-/* common code for the mappings on dma_alloc_coherent mem */
-static int ipath_mmap_mem(struct vm_area_struct *vma,
-       struct ipath_portdata *pd, unsigned len, int write_ok,
-       void *kvaddr, char *what)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned long pfn;
-       int ret;
-
-       if ((vma->vm_end - vma->vm_start) > len) {
-               dev_info(&dd->pcidev->dev,
-                        "FAIL on %s: len %lx > %x\n", what,
-                        vma->vm_end - vma->vm_start, len);
-               ret = -EFAULT;
-               goto bail;
-       }
-
-       if (!write_ok) {
-               if (vma->vm_flags & VM_WRITE) {
-                       dev_info(&dd->pcidev->dev,
-                                "%s must be mapped readonly\n", what);
-                       ret = -EPERM;
-                       goto bail;
-               }
-
-               /* don't allow them to later change with mprotect */
-               vma->vm_flags &= ~VM_MAYWRITE;
-       }
-
-       pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
-       ret = remap_pfn_range(vma, vma->vm_start, pfn,
-                             len, vma->vm_page_prot);
-       if (ret)
-               dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x "
-                        "bytes r%c failed: %d\n", what, pd->port_port,
-                        pfn, len, write_ok?'w':'o', ret);
-       else
-               ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes "
-                          "r%c\n", what, pd->port_port, pfn, len,
-                          write_ok?'w':'o');
-bail:
-       return ret;
-}
-
-static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
-                    u64 ureg)
-{
-       unsigned long phys;
-       int ret;
-
-       /*
-        * This is real hardware, so use io_remap.  This is the mechanism
-        * for the user process to update the head registers for their port
-        * in the chip.
-        */
-       if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
-               dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
-                        "%lx > PAGE\n", vma->vm_end - vma->vm_start);
-               ret = -EFAULT;
-       } else {
-               phys = dd->ipath_physaddr + ureg;
-               vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
-               vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
-               ret = io_remap_pfn_range(vma, vma->vm_start,
-                                        phys >> PAGE_SHIFT,
-                                        vma->vm_end - vma->vm_start,
-                                        vma->vm_page_prot);
-       }
-       return ret;
-}
-
-static int mmap_piobufs(struct vm_area_struct *vma,
-                       struct ipath_devdata *dd,
-                       struct ipath_portdata *pd,
-                       unsigned piobufs, unsigned piocnt)
-{
-       unsigned long phys;
-       int ret;
-
-       /*
-        * When we map the PIO buffers in the chip, we want to map them as
-        * writeonly, no read possible.   This prevents access to previous
-        * process data, and catches users who might try to read the i/o
-        * space due to a bug.
-        */
-       if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) {
-               dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
-                        "reqlen %lx > PAGE\n",
-                        vma->vm_end - vma->vm_start);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       phys = dd->ipath_physaddr + piobufs;
-
-#if defined(__powerpc__)
-       /* There isn't a generic way to specify writethrough mappings */
-       pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
-       pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
-       pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
-#endif
-
-       /*
-        * don't allow them to later change to readable with mprotect (for when
-        * not initially mapped readable, as is normally the case)
-        */
-       vma->vm_flags &= ~VM_MAYREAD;
-       vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
-
-       ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
-                                vma->vm_end - vma->vm_start,
-                                vma->vm_page_prot);
-bail:
-       return ret;
-}
-
-static int mmap_rcvegrbufs(struct vm_area_struct *vma,
-                          struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       unsigned long start, size;
-       size_t total_size, i;
-       unsigned long pfn;
-       int ret;
-
-       size = pd->port_rcvegrbuf_size;
-       total_size = pd->port_rcvegrbuf_chunks * size;
-       if ((vma->vm_end - vma->vm_start) > total_size) {
-               dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
-                        "reqlen %lx > actual %lx\n",
-                        vma->vm_end - vma->vm_start,
-                        (unsigned long) total_size);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (vma->vm_flags & VM_WRITE) {
-               dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
-                        "writable (flags=%lx)\n", vma->vm_flags);
-               ret = -EPERM;
-               goto bail;
-       }
-       /* don't allow them to later change to writeable with mprotect */
-       vma->vm_flags &= ~VM_MAYWRITE;
-
-       start = vma->vm_start;
-
-       for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
-               pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT;
-               ret = remap_pfn_range(vma, start, pfn, size,
-                                     vma->vm_page_prot);
-               if (ret < 0)
-                       goto bail;
-       }
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/*
- * ipath_file_vma_fault - handle a VMA page fault.
- */
-static int ipath_file_vma_fault(struct vm_area_struct *vma,
-                                       struct vm_fault *vmf)
-{
-       struct page *page;
-
-       page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
-       if (!page)
-               return VM_FAULT_SIGBUS;
-       get_page(page);
-       vmf->page = page;
-
-       return 0;
-}
-
-static const struct vm_operations_struct ipath_file_vm_ops = {
-       .fault = ipath_file_vma_fault,
-};
-
-static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
-                      struct ipath_portdata *pd, unsigned subport)
-{
-       unsigned long len;
-       struct ipath_devdata *dd;
-       void *addr;
-       size_t size;
-       int ret = 0;
-
-       /* If the port is not shared, all addresses should be physical */
-       if (!pd->port_subport_cnt)
-               goto bail;
-
-       dd = pd->port_dd;
-       size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
-
-       /*
-        * Each process has all the subport uregbase, rcvhdrq, and
-        * rcvegrbufs mmapped - as an array for all the processes,
-        * and also separately for this process.
-        */
-       if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
-               addr = pd->subport_uregbase;
-               size = PAGE_SIZE * pd->port_subport_cnt;
-       } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
-               addr = pd->subport_rcvhdr_base;
-               size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
-       } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
-               addr = pd->subport_rcvegrbuf;
-               size *= pd->port_subport_cnt;
-        } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
-                                        PAGE_SIZE * subport)) {
-                addr = pd->subport_uregbase + PAGE_SIZE * subport;
-                size = PAGE_SIZE;
-        } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
-                                pd->port_rcvhdrq_size * subport)) {
-                addr = pd->subport_rcvhdr_base +
-                        pd->port_rcvhdrq_size * subport;
-                size = pd->port_rcvhdrq_size;
-        } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
-                               size * subport)) {
-                addr = pd->subport_rcvegrbuf + size * subport;
-                /* rcvegrbufs are read-only on the slave */
-                if (vma->vm_flags & VM_WRITE) {
-                        dev_info(&dd->pcidev->dev,
-                                 "Can't map eager buffers as "
-                                 "writable (flags=%lx)\n", vma->vm_flags);
-                        ret = -EPERM;
-                        goto bail;
-                }
-                /*
-                 * Don't allow permission to later change to writeable
-                 * with mprotect.
-                 */
-                vma->vm_flags &= ~VM_MAYWRITE;
-       } else {
-               goto bail;
-       }
-       len = vma->vm_end - vma->vm_start;
-       if (len > size) {
-               ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
-       vma->vm_ops = &ipath_file_vm_ops;
-       vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_mmap - mmap various structures into user space
- * @fp: the file pointer
- * @vma: the VM area
- *
- * We use this to have a shared buffer between the kernel and the user code
- * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
- * buffers in the chip.  We have the open and close entries so we can bump
- * the ref count and keep the driver from being unloaded while still mapped.
- */
-static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
-{
-       struct ipath_portdata *pd;
-       struct ipath_devdata *dd;
-       u64 pgaddr, ureg;
-       unsigned piobufs, piocnt;
-       int ret;
-
-       pd = port_fp(fp);
-       if (!pd) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       dd = pd->port_dd;
-
-       /*
-        * This is the ipath_do_user_init() code, mapping the shared buffers
-        * into the user process. The address referred to by vm_pgoff is the
-        * file offset passed via mmap().  For shared ports, this is the
-        * kernel vmalloc() address of the pages to share with the master.
-        * For non-shared or master ports, this is a physical address.
-        * We only do one mmap for each space mapped.
-        */
-       pgaddr = vma->vm_pgoff << PAGE_SHIFT;
-
-       /*
-        * Check for 0 in case one of the allocations failed, but user
-        * called mmap anyway.
-        */
-       if (!pgaddr)  {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
-                  (unsigned long long) pgaddr, vma->vm_start,
-                  vma->vm_end - vma->vm_start, dd->ipath_unit,
-                  pd->port_port, subport_fp(fp));
-
-       /*
-        * Physical addresses must fit in 40 bits for our hardware.
-        * Check for kernel virtual addresses first, anything else must
-        * match a HW or memory address.
-        */
-       ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
-       if (ret) {
-               if (ret > 0)
-                       ret = 0;
-               goto bail;
-       }
-
-       ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port;
-       if (!pd->port_subport_cnt) {
-               /* port is not shared */
-               piocnt = pd->port_piocnt;
-               piobufs = pd->port_piobufs;
-       } else if (!subport_fp(fp)) {
-               /* caller is the master */
-               piocnt = (pd->port_piocnt / pd->port_subport_cnt) +
-                        (pd->port_piocnt % pd->port_subport_cnt);
-               piobufs = pd->port_piobufs +
-                       dd->ipath_palign * (pd->port_piocnt - piocnt);
-       } else {
-               unsigned slave = subport_fp(fp) - 1;
-
-               /* caller is a slave */
-               piocnt = pd->port_piocnt / pd->port_subport_cnt;
-               piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
-       }
-
-       if (pgaddr == ureg)
-               ret = mmap_ureg(vma, dd, ureg);
-       else if (pgaddr == piobufs)
-               ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
-       else if (pgaddr == dd->ipath_pioavailregs_phys)
-               /* in-memory copy of pioavail registers */
-               ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
-                                    (void *) dd->ipath_pioavailregs_dma,
-                                    "pioavail registers");
-       else if (pgaddr == pd->port_rcvegr_phys)
-               ret = mmap_rcvegrbufs(vma, pd);
-       else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
-               /*
-                * The rcvhdrq itself; readonly except on HT (so have
-                * to allow writable mapping), multiple pages, contiguous
-                * from an i/o perspective.
-                */
-               ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1,
-                                    pd->port_rcvhdrq,
-                                    "rcvhdrq");
-       else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys)
-               /* in-memory copy of rcvhdrq tail register */
-               ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
-                                    pd->port_rcvhdrtail_kvaddr,
-                                    "rcvhdrq tail");
-       else
-               ret = -EINVAL;
-
-       vma->vm_private_data = NULL;
-
-       if (ret < 0)
-               dev_info(&dd->pcidev->dev,
-                        "Failure %d on off %llx len %lx\n",
-                        -ret, (unsigned long long)pgaddr,
-                        vma->vm_end - vma->vm_start);
-bail:
-       return ret;
-}
-
-static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd)
-{
-       unsigned pollflag = 0;
-
-       if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) &&
-           pd->port_hdrqfull != pd->port_hdrqfull_poll) {
-               pollflag |= POLLIN | POLLRDNORM;
-               pd->port_hdrqfull_poll = pd->port_hdrqfull;
-       }
-
-       return pollflag;
-}
-
-static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
-                                     struct file *fp,
-                                     struct poll_table_struct *pt)
-{
-       unsigned pollflag = 0;
-       struct ipath_devdata *dd;
-
-       dd = pd->port_dd;
-
-       /* variable access in ipath_poll_hdrqfull() needs this */
-       rmb();
-       pollflag = ipath_poll_hdrqfull(pd);
-
-       if (pd->port_urgent != pd->port_urgent_poll) {
-               pollflag |= POLLIN | POLLRDNORM;
-               pd->port_urgent_poll = pd->port_urgent;
-       }
-
-       if (!pollflag) {
-               /* this saves a spin_lock/unlock in interrupt handler... */
-               set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
-               /* flush waiting flag so don't miss an event... */
-               wmb();
-               poll_wait(fp, &pd->port_wait, pt);
-       }
-
-       return pollflag;
-}
-
-static unsigned int ipath_poll_next(struct ipath_portdata *pd,
-                                   struct file *fp,
-                                   struct poll_table_struct *pt)
-{
-       u32 head;
-       u32 tail;
-       unsigned pollflag = 0;
-       struct ipath_devdata *dd;
-
-       dd = pd->port_dd;
-
-       /* variable access in ipath_poll_hdrqfull() needs this */
-       rmb();
-       pollflag = ipath_poll_hdrqfull(pd);
-
-       head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
-       if (pd->port_rcvhdrtail_kvaddr)
-               tail = ipath_get_rcvhdrtail(pd);
-       else
-               tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
-
-       if (head != tail)
-               pollflag |= POLLIN | POLLRDNORM;
-       else {
-               /* this saves a spin_lock/unlock in interrupt handler */
-               set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
-               /* flush waiting flag so we don't miss an event */
-               wmb();
-
-               set_bit(pd->port_port + dd->ipath_r_intravail_shift,
-                       &dd->ipath_rcvctrl);
-
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                                dd->ipath_rcvctrl);
-
-               if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
-                       ipath_write_ureg(dd, ur_rcvhdrhead,
-                                        dd->ipath_rhdrhead_intr_off | head,
-                                        pd->port_port);
-
-               poll_wait(fp, &pd->port_wait, pt);
-       }
-
-       return pollflag;
-}
-
-static unsigned int ipath_poll(struct file *fp,
-                              struct poll_table_struct *pt)
-{
-       struct ipath_portdata *pd;
-       unsigned pollflag;
-
-       pd = port_fp(fp);
-       if (!pd)
-               pollflag = 0;
-       else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
-               pollflag = ipath_poll_urgent(pd, fp, pt);
-       else
-               pollflag = ipath_poll_next(pd, fp, pt);
-
-       return pollflag;
-}
-
-static int ipath_supports_subports(int user_swmajor, int user_swminor)
-{
-       /* no subport implementation prior to software version 1.3 */
-       return (user_swmajor > 1) || (user_swminor >= 3);
-}
-
-static int ipath_compatible_subports(int user_swmajor, int user_swminor)
-{
-       /* this code is written long-hand for clarity */
-       if (IPATH_USER_SWMAJOR != user_swmajor) {
-               /* no promise of compatibility if major mismatch */
-               return 0;
-       }
-       if (IPATH_USER_SWMAJOR == 1) {
-               switch (IPATH_USER_SWMINOR) {
-               case 0:
-               case 1:
-               case 2:
-                       /* no subport implementation so cannot be compatible */
-                       return 0;
-               case 3:
-                       /* 3 is only compatible with itself */
-                       return user_swminor == 3;
-               default:
-                       /* >= 4 are compatible (or are expected to be) */
-                       return user_swminor >= 4;
-               }
-       }
-       /* make no promises yet for future major versions */
-       return 0;
-}
-
-static int init_subports(struct ipath_devdata *dd,
-                        struct ipath_portdata *pd,
-                        const struct ipath_user_info *uinfo)
-{
-       int ret = 0;
-       unsigned num_subports;
-       size_t size;
-
-       /*
-        * If the user is requesting zero subports,
-        * skip the subport allocation.
-        */
-       if (uinfo->spu_subport_cnt <= 0)
-               goto bail;
-
-       /* Self-consistency check for ipath_compatible_subports() */
-       if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
-           !ipath_compatible_subports(IPATH_USER_SWMAJOR,
-                                      IPATH_USER_SWMINOR)) {
-               dev_info(&dd->pcidev->dev,
-                        "Inconsistent ipath_compatible_subports()\n");
-               goto bail;
-       }
-
-       /* Check for subport compatibility */
-       if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
-                                      uinfo->spu_userversion & 0xffff)) {
-               dev_info(&dd->pcidev->dev,
-                        "Mismatched user version (%d.%d) and driver "
-                        "version (%d.%d) while port sharing. Ensure "
-                         "that driver and library are from the same "
-                         "release.\n",
-                        (int) (uinfo->spu_userversion >> 16),
-                         (int) (uinfo->spu_userversion & 0xffff),
-                        IPATH_USER_SWMAJOR,
-                        IPATH_USER_SWMINOR);
-               goto bail;
-       }
-       if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       num_subports = uinfo->spu_subport_cnt;
-       pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports);
-       if (!pd->subport_uregbase) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-       /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
-       size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
-                    sizeof(u32), PAGE_SIZE) * num_subports;
-       pd->subport_rcvhdr_base = vzalloc(size);
-       if (!pd->subport_rcvhdr_base) {
-               ret = -ENOMEM;
-               goto bail_ureg;
-       }
-
-       pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks *
-                                       pd->port_rcvegrbuf_size *
-                                       num_subports);
-       if (!pd->subport_rcvegrbuf) {
-               ret = -ENOMEM;
-               goto bail_rhdr;
-       }
-
-       pd->port_subport_cnt = uinfo->spu_subport_cnt;
-       pd->port_subport_id = uinfo->spu_subport_id;
-       pd->active_slaves = 1;
-       set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
-       goto bail;
-
-bail_rhdr:
-       vfree(pd->subport_rcvhdr_base);
-bail_ureg:
-       vfree(pd->subport_uregbase);
-       pd->subport_uregbase = NULL;
-bail:
-       return ret;
-}
-
-static int try_alloc_port(struct ipath_devdata *dd, int port,
-                         struct file *fp,
-                         const struct ipath_user_info *uinfo)
-{
-       struct ipath_portdata *pd;
-       int ret;
-
-       if (!(pd = dd->ipath_pd[port])) {
-               void *ptmp;
-
-               pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
-
-               /*
-                * Allocate memory for use in ipath_tid_update() just once
-                * at open, not per call.  Reduces cost of expected send
-                * setup.
-                */
-               ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
-                              dd->ipath_rcvtidcnt * sizeof(struct page **),
-                              GFP_KERNEL);
-               if (!pd || !ptmp) {
-                       ipath_dev_err(dd, "Unable to allocate portdata "
-                                     "memory, failing open\n");
-                       ret = -ENOMEM;
-                       kfree(pd);
-                       kfree(ptmp);
-                       goto bail;
-               }
-               dd->ipath_pd[port] = pd;
-               dd->ipath_pd[port]->port_port = port;
-               dd->ipath_pd[port]->port_dd = dd;
-               dd->ipath_pd[port]->port_tid_pg_list = ptmp;
-               init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
-       }
-       if (!pd->port_cnt) {
-               pd->userversion = uinfo->spu_userversion;
-               init_user_egr_sizes(pd);
-               if ((ret = init_subports(dd, pd, uinfo)) != 0)
-                       goto bail;
-               ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
-                          current->comm, current->pid, dd->ipath_unit,
-                          port);
-               pd->port_cnt = 1;
-               port_fp(fp) = pd;
-               pd->port_pid = get_pid(task_pid(current));
-               strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
-               ipath_stats.sps_ports++;
-               ret = 0;
-       } else
-               ret = -EBUSY;
-
-bail:
-       return ret;
-}
-
-static inline int usable(struct ipath_devdata *dd)
-{
-       return dd &&
-               (dd->ipath_flags & IPATH_PRESENT) &&
-               dd->ipath_kregbase &&
-               dd->ipath_lid &&
-               !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
-                                    | IPATH_LINKUNK));
-}
-
-static int find_free_port(int unit, struct file *fp,
-                         const struct ipath_user_info *uinfo)
-{
-       struct ipath_devdata *dd = ipath_lookup(unit);
-       int ret, i;
-
-       if (!dd) {
-               ret = -ENODEV;
-               goto bail;
-       }
-
-       if (!usable(dd)) {
-               ret = -ENETDOWN;
-               goto bail;
-       }
-
-       for (i = 1; i < dd->ipath_cfgports; i++) {
-               ret = try_alloc_port(dd, i, fp, uinfo);
-               if (ret != -EBUSY)
-                       goto bail;
-       }
-       ret = -EBUSY;
-
-bail:
-       return ret;
-}
-
-static int find_best_unit(struct file *fp,
-                         const struct ipath_user_info *uinfo)
-{
-       int ret = 0, i, prefunit = -1, devmax;
-       int maxofallports, npresent, nup;
-       int ndev;
-
-       devmax = ipath_count_units(&npresent, &nup, &maxofallports);
-
-       /*
-        * This code is present to allow a knowledgeable person to
-        * specify the layout of processes to processors before opening
-        * this driver, and then we'll assign the process to the "closest"
-        * InfiniPath chip to that processor (we assume reasonable connectivity,
-        * for now).  This code assumes that if affinity has been set
-        * before this point, that at most one cpu is set; for now this
-        * is reasonable.  I check for both cpumask_empty() and cpumask_full(),
-        * in case some kernel variant sets none of the bits when no
-        * affinity is set.  2.6.11 and 12 kernels have all present
-        * cpus set.  Some day we'll have to fix it up further to handle
-        * a cpu subset.  This algorithm fails for two HT chips connected
-        * in tunnel fashion.  Eventually this needs real topology
-        * information.  There may be some issues with dual core numbering
-        * as well.  This needs more work prior to release.
-        */
-       if (!cpumask_empty(tsk_cpus_allowed(current)) &&
-           !cpumask_full(tsk_cpus_allowed(current))) {
-               int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
-               get_online_cpus();
-               for_each_online_cpu(i)
-                       if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) {
-                               ipath_cdbg(PROC, "%s[%u] affinity set for "
-                                          "cpu %d/%d\n", current->comm,
-                                          current->pid, i, ncpus);
-                               curcpu = i;
-                               nset++;
-                       }
-               put_online_cpus();
-               if (curcpu != -1 && nset != ncpus) {
-                       if (npresent) {
-                               prefunit = curcpu / (ncpus / npresent);
-                               ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
-                                         "%d cpus/chip, select unit %d\n",
-                                         current->comm, current->pid,
-                                         npresent, ncpus, ncpus / npresent,
-                                         prefunit);
-                       }
-               }
-       }
-
-       /*
-        * user ports start at 1, kernel port is 0
-        * For now, we do round-robin access across all chips
-        */
-
-       if (prefunit != -1)
-               devmax = prefunit + 1;
-recheck:
-       for (i = 1; i < maxofallports; i++) {
-               for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
-                    ndev++) {
-                       struct ipath_devdata *dd = ipath_lookup(ndev);
-
-                       if (!usable(dd))
-                               continue; /* can't use this unit */
-                       if (i >= dd->ipath_cfgports)
-                               /*
-                                * Maxed out on users of this unit. Try
-                                * next.
-                                */
-                               continue;
-                       ret = try_alloc_port(dd, i, fp, uinfo);
-                       if (!ret)
-                               goto done;
-               }
-       }
-
-       if (npresent) {
-               if (nup == 0) {
-                       ret = -ENETDOWN;
-                       ipath_dbg("No ports available (none initialized "
-                                 "and ready)\n");
-               } else {
-                       if (prefunit > 0) {
-                               /* if started above 0, retry from 0 */
-                               ipath_cdbg(PROC,
-                                          "%s[%u] no ports on prefunit "
-                                          "%d, clear and re-check\n",
-                                          current->comm, current->pid,
-                                          prefunit);
-                               devmax = ipath_count_units(NULL, NULL,
-                                                          NULL);
-                               prefunit = -1;
-                               goto recheck;
-                       }
-                       ret = -EBUSY;
-                       ipath_dbg("No ports available\n");
-               }
-       } else {
-               ret = -ENXIO;
-               ipath_dbg("No boards found\n");
-       }
-
-done:
-       return ret;
-}
-
-static int find_shared_port(struct file *fp,
-                           const struct ipath_user_info *uinfo)
-{
-       int devmax, ndev, i;
-       int ret = 0;
-
-       devmax = ipath_count_units(NULL, NULL, NULL);
-
-       for (ndev = 0; ndev < devmax; ndev++) {
-               struct ipath_devdata *dd = ipath_lookup(ndev);
-
-               if (!usable(dd))
-                       continue;
-               for (i = 1; i < dd->ipath_cfgports; i++) {
-                       struct ipath_portdata *pd = dd->ipath_pd[i];
-
-                       /* Skip ports which are not yet open */
-                       if (!pd || !pd->port_cnt)
-                               continue;
-                       /* Skip port if it doesn't match the requested one */
-                       if (pd->port_subport_id != uinfo->spu_subport_id)
-                               continue;
-                       /* Verify the sharing process matches the master */
-                       if (pd->port_subport_cnt != uinfo->spu_subport_cnt ||
-                           pd->userversion != uinfo->spu_userversion ||
-                           pd->port_cnt >= pd->port_subport_cnt) {
-                               ret = -EINVAL;
-                               goto done;
-                       }
-                       port_fp(fp) = pd;
-                       subport_fp(fp) = pd->port_cnt++;
-                       pd->port_subpid[subport_fp(fp)] =
-                               get_pid(task_pid(current));
-                       tidcursor_fp(fp) = 0;
-                       pd->active_slaves |= 1 << subport_fp(fp);
-                       ipath_cdbg(PROC,
-                                  "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
-                                  current->comm, current->pid,
-                                  subport_fp(fp),
-                                  pd->port_comm, pid_nr(pd->port_pid),
-                                  dd->ipath_unit, pd->port_port);
-                       ret = 1;
-                       goto done;
-               }
-       }
-
-done:
-       return ret;
-}
-
-static int ipath_open(struct inode *in, struct file *fp)
-{
-       /* The real work is performed later in ipath_assign_port() */
-       fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
-       return fp->private_data ? 0 : -ENOMEM;
-}
-
-/* Get port early, so can set affinity prior to memory allocation */
-static int ipath_assign_port(struct file *fp,
-                             const struct ipath_user_info *uinfo)
-{
-       int ret;
-       int i_minor;
-       unsigned swmajor, swminor;
-
-       /* Check to be sure we haven't already initialized this file */
-       if (port_fp(fp)) {
-               ret = -EINVAL;
-               goto done;
-       }
-
-       /* for now, if major version is different, bail */
-       swmajor = uinfo->spu_userversion >> 16;
-       if (swmajor != IPATH_USER_SWMAJOR) {
-               ipath_dbg("User major version %d not same as driver "
-                         "major %d\n", uinfo->spu_userversion >> 16,
-                         IPATH_USER_SWMAJOR);
-               ret = -ENODEV;
-               goto done;
-       }
-
-       swminor = uinfo->spu_userversion & 0xffff;
-       if (swminor != IPATH_USER_SWMINOR)
-               ipath_dbg("User minor version %d not same as driver "
-                         "minor %d\n", swminor, IPATH_USER_SWMINOR);
-
-       mutex_lock(&ipath_mutex);
-
-       if (ipath_compatible_subports(swmajor, swminor) &&
-           uinfo->spu_subport_cnt &&
-           (ret = find_shared_port(fp, uinfo))) {
-               if (ret > 0)
-                       ret = 0;
-               goto done_chk_sdma;
-       }
-
-       i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;
-       ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
-                  (long)file_inode(fp)->i_rdev, i_minor);
-
-       if (i_minor)
-               ret = find_free_port(i_minor - 1, fp, uinfo);
-       else
-               ret = find_best_unit(fp, uinfo);
-
-done_chk_sdma:
-       if (!ret) {
-               struct ipath_filedata *fd = fp->private_data;
-               const struct ipath_portdata *pd = fd->pd;
-               const struct ipath_devdata *dd = pd->port_dd;
-
-               fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev,
-                                                     dd->ipath_unit,
-                                                     pd->port_port,
-                                                     fd->subport);
-
-               if (!fd->pq)
-                       ret = -ENOMEM;
-       }
-
-       mutex_unlock(&ipath_mutex);
-
-done:
-       return ret;
-}
-
-
-static int ipath_do_user_init(struct file *fp,
-                             const struct ipath_user_info *uinfo)
-{
-       int ret;
-       struct ipath_portdata *pd = port_fp(fp);
-       struct ipath_devdata *dd;
-       u32 head32;
-
-       /* Subports don't need to initialize anything since master did it. */
-       if (subport_fp(fp)) {
-               ret = wait_event_interruptible(pd->port_wait,
-                       !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
-               goto done;
-       }
-
-       dd = pd->port_dd;
-
-       if (uinfo->spu_rcvhdrsize) {
-               ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
-               if (ret)
-                       goto done;
-       }
-
-       /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
-
-       /* some ports may get extra buffers, calculate that here */
-       if (pd->port_port <= dd->ipath_ports_extrabuf)
-               pd->port_piocnt = dd->ipath_pbufsport + 1;
-       else
-               pd->port_piocnt = dd->ipath_pbufsport;
-
-       /* for right now, kernel piobufs are at end, so port 1 is at 0 */
-       if (pd->port_port <= dd->ipath_ports_extrabuf)
-               pd->port_pio_base = (dd->ipath_pbufsport + 1)
-                       * (pd->port_port - 1);
-       else
-               pd->port_pio_base = dd->ipath_ports_extrabuf +
-                       dd->ipath_pbufsport * (pd->port_port - 1);
-       pd->port_piobufs = dd->ipath_piobufbase +
-               pd->port_pio_base * dd->ipath_palign;
-       ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u,"
-               " first pio %u\n", pd->port_port, pd->port_piobufs,
-               pd->port_piocnt, pd->port_pio_base);
-       ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0);
-
-       /*
-        * Now allocate the rcvhdr Q and eager TIDs; skip the TID
-        * array for time being.  If pd->port_port > chip-supported,
-        * we need to do extra stuff here to handle by handling overflow
-        * through port 0, someday
-        */
-       ret = ipath_create_rcvhdrq(dd, pd);
-       if (!ret)
-               ret = ipath_create_user_egr(pd);
-       if (ret)
-               goto done;
-
-       /*
-        * set the eager head register for this port to the current values
-        * of the tail pointers, since we don't know if they were
-        * updated on last use of the port.
-        */
-       head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
-       ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
-       pd->port_lastrcvhdrqtail = -1;
-       ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
-               pd->port_port, head32);
-       pd->port_tidcursor = 0; /* start at beginning after open */
-
-       /* initialize poll variables... */
-       pd->port_urgent = 0;
-       pd->port_urgent_poll = 0;
-       pd->port_hdrqfull_poll = pd->port_hdrqfull;
-
-       /*
-        * Now enable the port for receive.
-        * For chips that are set to DMA the tail register to memory
-        * when they change (and when the update bit transitions from
-        * 0 to 1.  So for those chips, we turn it off and then back on.
-        * This will (very briefly) affect any other open ports, but the
-        * duration is very short, and therefore isn't an issue.  We
-        * explicitly set the in-memory tail copy to 0 beforehand, so we
-        * don't have to wait to be sure the DMA update has happened
-        * (chip resets head/tail to 0 on transition to enable).
-        */
-       set_bit(dd->ipath_r_portenable_shift + pd->port_port,
-               &dd->ipath_rcvctrl);
-       if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) {
-               if (pd->port_rcvhdrtail_kvaddr)
-                       ipath_clear_rcvhdrtail(pd);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                       dd->ipath_rcvctrl &
-                       ~(1ULL << dd->ipath_r_tailupd_shift));
-       }
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-       /* Notify any waiting slaves */
-       if (pd->port_subport_cnt) {
-               clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
-               wake_up(&pd->port_wait);
-       }
-done:
-       return ret;
-}
-
-/**
- * unlock_exptid - unlock any expected TID entries port still had in use
- * @pd: port
- *
- * We don't actually update the chip here, because we do a bulk update
- * below, using ipath_f_clear_tids.
- */
-static void unlock_expected_tids(struct ipath_portdata *pd)
-{
-       struct ipath_devdata *dd = pd->port_dd;
-       int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
-       int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
-
-       ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
-                  pd->port_port);
-       for (i = port_tidbase; i < maxtid; i++) {
-               struct page *ps = dd->ipath_pageshadow[i];
-
-               if (!ps)
-                       continue;
-
-               dd->ipath_pageshadow[i] = NULL;
-               pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
-                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
-               ipath_release_user_pages_on_close(&ps, 1);
-               cnt++;
-               ipath_stats.sps_pageunlocks++;
-       }
-       if (cnt)
-               ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
-                          pd->port_port, cnt);
-
-       if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
-               ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
-                          (unsigned long long) ipath_stats.sps_pagelocks,
-                          (unsigned long long)
-                          ipath_stats.sps_pageunlocks);
-}
-
-static int ipath_close(struct inode *in, struct file *fp)
-{
-       struct ipath_filedata *fd;
-       struct ipath_portdata *pd;
-       struct ipath_devdata *dd;
-       unsigned long flags;
-       unsigned port;
-       struct pid *pid;
-
-       ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
-                  (long)in->i_rdev, fp->private_data);
-
-       mutex_lock(&ipath_mutex);
-
-       fd = fp->private_data;
-       fp->private_data = NULL;
-       pd = fd->pd;
-       if (!pd) {
-               mutex_unlock(&ipath_mutex);
-               goto bail;
-       }
-
-       dd = pd->port_dd;
-
-       /* drain user sdma queue */
-       ipath_user_sdma_queue_drain(dd, fd->pq);
-       ipath_user_sdma_queue_destroy(fd->pq);
-
-       if (--pd->port_cnt) {
-               /*
-                * XXX If the master closes the port before the slave(s),
-                * revoke the mmap for the eager receive queue so
-                * the slave(s) don't wait for receive data forever.
-                */
-               pd->active_slaves &= ~(1 << fd->subport);
-               put_pid(pd->port_subpid[fd->subport]);
-               pd->port_subpid[fd->subport] = NULL;
-               mutex_unlock(&ipath_mutex);
-               goto bail;
-       }
-       /* early; no interrupt users after this */
-       spin_lock_irqsave(&dd->ipath_uctxt_lock, flags);
-       port = pd->port_port;
-       dd->ipath_pd[port] = NULL;
-       pid = pd->port_pid;
-       pd->port_pid = NULL;
-       spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags);
-
-       if (pd->port_rcvwait_to || pd->port_piowait_to
-           || pd->port_rcvnowait || pd->port_pionowait) {
-               ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
-                          "%u rcv %u, pio already\n",
-                          pd->port_port, pd->port_rcvwait_to,
-                          pd->port_piowait_to, pd->port_rcvnowait,
-                          pd->port_pionowait);
-               pd->port_rcvwait_to = pd->port_piowait_to =
-                       pd->port_rcvnowait = pd->port_pionowait = 0;
-       }
-       if (pd->port_flag) {
-               ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n",
-                         pd->port_port, pd->port_flag);
-               pd->port_flag = 0;
-       }
-
-       if (dd->ipath_kregbase) {
-               /* atomically clear receive enable port and intr avail. */
-               clear_bit(dd->ipath_r_portenable_shift + port,
-                         &dd->ipath_rcvctrl);
-               clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
-                         &dd->ipath_rcvctrl);
-               ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
-                       dd->ipath_rcvctrl);
-               /* and read back from chip to be sure that nothing
-                * else is in flight when we do the rest */
-               (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-               /* clean up the pkeys for this port user */
-               ipath_clean_part_key(pd, dd);
-               /*
-                * be paranoid, and never write 0's to these, just use an
-                * unused part of the port 0 tail page.  Of course,
-                * rcvhdraddr points to a large chunk of memory, so this
-                * could still trash things, but at least it won't trash
-                * page 0, and by disabling the port, it should stop "soon",
-                * even if a packet or two is in already in flight after we
-                * disabled the port.
-                */
-               ipath_write_kreg_port(dd,
-                       dd->ipath_kregs->kr_rcvhdrtailaddr, port,
-                       dd->ipath_dummy_hdrq_phys);
-               ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
-                       pd->port_port, dd->ipath_dummy_hdrq_phys);
-
-               ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt);
-               ipath_chg_pioavailkernel(dd, pd->port_pio_base,
-                       pd->port_piocnt, 1);
-
-               dd->ipath_f_clear_tids(dd, pd->port_port);
-
-               if (dd->ipath_pageshadow)
-                       unlock_expected_tids(pd);
-               ipath_stats.sps_ports--;
-               ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
-                          pd->port_comm, pid_nr(pid),
-                          dd->ipath_unit, port);
-       }
-
-       put_pid(pid);
-       mutex_unlock(&ipath_mutex);
-       ipath_free_pddata(dd, pd); /* after releasing the mutex */
-
-bail:
-       kfree(fd);
-       return 0;
-}
-
-static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
-                          struct ipath_port_info __user *uinfo)
-{
-       struct ipath_port_info info;
-       int nup;
-       int ret;
-       size_t sz;
-
-       (void) ipath_count_units(NULL, &nup, NULL);
-       info.num_active = nup;
-       info.unit = pd->port_dd->ipath_unit;
-       info.port = pd->port_port;
-       info.subport = subport;
-       /* Don't return new fields if old library opened the port. */
-       if (ipath_supports_subports(pd->userversion >> 16,
-                                   pd->userversion & 0xffff)) {
-               /* Number of user ports available for this device. */
-               info.num_ports = pd->port_dd->ipath_cfgports - 1;
-               info.num_subports = pd->port_subport_cnt;
-               sz = sizeof(info);
-       } else
-               sz = sizeof(info) - 2 * sizeof(u16);
-
-       if (copy_to_user(uinfo, &info, sz)) {
-               ret = -EFAULT;
-               goto bail;
-       }
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static int ipath_get_slave_info(struct ipath_portdata *pd,
-                               void __user *slave_mask_addr)
-{
-       int ret = 0;
-
-       if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32)))
-               ret = -EFAULT;
-       return ret;
-}
-
-static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq,
-                                  u32 __user *inflightp)
-{
-       const u32 val = ipath_user_sdma_inflight_counter(pq);
-
-       if (put_user(val, inflightp))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int ipath_sdma_get_complete(struct ipath_devdata *dd,
-                                  struct ipath_user_sdma_queue *pq,
-                                  u32 __user *completep)
-{
-       u32 val;
-       int err;
-
-       err = ipath_user_sdma_make_progress(dd, pq);
-       if (err < 0)
-               return err;
-
-       val = ipath_user_sdma_complete_counter(pq);
-       if (put_user(val, completep))
-               return -EFAULT;
-
-       return 0;
-}
-
-static ssize_t ipath_write(struct file *fp, const char __user *data,
-                          size_t count, loff_t *off)
-{
-       const struct ipath_cmd __user *ucmd;
-       struct ipath_portdata *pd;
-       const void __user *src;
-       size_t consumed, copy;
-       struct ipath_cmd cmd;
-       ssize_t ret = 0;
-       void *dest;
-
-       if (count < sizeof(cmd.type)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       ucmd = (const struct ipath_cmd __user *) data;
-
-       if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
-               ret = -EFAULT;
-               goto bail;
-       }
-
-       consumed = sizeof(cmd.type);
-
-       switch (cmd.type) {
-       case IPATH_CMD_ASSIGN_PORT:
-       case __IPATH_CMD_USER_INIT:
-       case IPATH_CMD_USER_INIT:
-               copy = sizeof(cmd.cmd.user_info);
-               dest = &cmd.cmd.user_info;
-               src = &ucmd->cmd.user_info;
-               break;
-       case IPATH_CMD_RECV_CTRL:
-               copy = sizeof(cmd.cmd.recv_ctrl);
-               dest = &cmd.cmd.recv_ctrl;
-               src = &ucmd->cmd.recv_ctrl;
-               break;
-       case IPATH_CMD_PORT_INFO:
-               copy = sizeof(cmd.cmd.port_info);
-               dest = &cmd.cmd.port_info;
-               src = &ucmd->cmd.port_info;
-               break;
-       case IPATH_CMD_TID_UPDATE:
-       case IPATH_CMD_TID_FREE:
-               copy = sizeof(cmd.cmd.tid_info);
-               dest = &cmd.cmd.tid_info;
-               src = &ucmd->cmd.tid_info;
-               break;
-       case IPATH_CMD_SET_PART_KEY:
-               copy = sizeof(cmd.cmd.part_key);
-               dest = &cmd.cmd.part_key;
-               src = &ucmd->cmd.part_key;
-               break;
-       case __IPATH_CMD_SLAVE_INFO:
-               copy = sizeof(cmd.cmd.slave_mask_addr);
-               dest = &cmd.cmd.slave_mask_addr;
-               src = &ucmd->cmd.slave_mask_addr;
-               break;
-       case IPATH_CMD_PIOAVAILUPD:     // force an update of PIOAvail reg
-               copy = 0;
-               src = NULL;
-               dest = NULL;
-               break;
-       case IPATH_CMD_POLL_TYPE:
-               copy = sizeof(cmd.cmd.poll_type);
-               dest = &cmd.cmd.poll_type;
-               src = &ucmd->cmd.poll_type;
-               break;
-       case IPATH_CMD_ARMLAUNCH_CTRL:
-               copy = sizeof(cmd.cmd.armlaunch_ctrl);
-               dest = &cmd.cmd.armlaunch_ctrl;
-               src = &ucmd->cmd.armlaunch_ctrl;
-               break;
-       case IPATH_CMD_SDMA_INFLIGHT:
-               copy = sizeof(cmd.cmd.sdma_inflight);
-               dest = &cmd.cmd.sdma_inflight;
-               src = &ucmd->cmd.sdma_inflight;
-               break;
-       case IPATH_CMD_SDMA_COMPLETE:
-               copy = sizeof(cmd.cmd.sdma_complete);
-               dest = &cmd.cmd.sdma_complete;
-               src = &ucmd->cmd.sdma_complete;
-               break;
-       default:
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (copy) {
-               if ((count - consumed) < copy) {
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               if (copy_from_user(dest, src, copy)) {
-                       ret = -EFAULT;
-                       goto bail;
-               }
-
-               consumed += copy;
-       }
-
-       pd = port_fp(fp);
-       if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
-               cmd.type != IPATH_CMD_ASSIGN_PORT) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       switch (cmd.type) {
-       case IPATH_CMD_ASSIGN_PORT:
-               ret = ipath_assign_port(fp, &cmd.cmd.user_info);
-               if (ret)
-                       goto bail;
-               break;
-       case __IPATH_CMD_USER_INIT:
-               /* backwards compatibility, get port first */
-               ret = ipath_assign_port(fp, &cmd.cmd.user_info);
-               if (ret)
-                       goto bail;
-               /* and fall through to current version. */
-       case IPATH_CMD_USER_INIT:
-               ret = ipath_do_user_init(fp, &cmd.cmd.user_info);
-               if (ret)
-                       goto bail;
-               ret = ipath_get_base_info(
-                       fp, (void __user *) (unsigned long)
-                       cmd.cmd.user_info.spu_base_info,
-                       cmd.cmd.user_info.spu_base_info_size);
-               break;
-       case IPATH_CMD_RECV_CTRL:
-               ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl);
-               break;
-       case IPATH_CMD_PORT_INFO:
-               ret = ipath_port_info(pd, subport_fp(fp),
-                                     (struct ipath_port_info __user *)
-                                     (unsigned long) cmd.cmd.port_info);
-               break;
-       case IPATH_CMD_TID_UPDATE:
-               ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info);
-               break;
-       case IPATH_CMD_TID_FREE:
-               ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info);
-               break;
-       case IPATH_CMD_SET_PART_KEY:
-               ret = ipath_set_part_key(pd, cmd.cmd.part_key);
-               break;
-       case __IPATH_CMD_SLAVE_INFO:
-               ret = ipath_get_slave_info(pd,
-                                          (void __user *) (unsigned long)
-                                          cmd.cmd.slave_mask_addr);
-               break;
-       case IPATH_CMD_PIOAVAILUPD:
-               ipath_force_pio_avail_update(pd->port_dd);
-               break;
-       case IPATH_CMD_POLL_TYPE:
-               pd->poll_type = cmd.cmd.poll_type;
-               break;
-       case IPATH_CMD_ARMLAUNCH_CTRL:
-               if (cmd.cmd.armlaunch_ctrl)
-                       ipath_enable_armlaunch(pd->port_dd);
-               else
-                       ipath_disable_armlaunch(pd->port_dd);
-               break;
-       case IPATH_CMD_SDMA_INFLIGHT:
-               ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp),
-                                             (u32 __user *) (unsigned long)
-                                             cmd.cmd.sdma_inflight);
-               break;
-       case IPATH_CMD_SDMA_COMPLETE:
-               ret = ipath_sdma_get_complete(pd->port_dd,
-                                             user_sdma_queue_fp(fp),
-                                             (u32 __user *) (unsigned long)
-                                             cmd.cmd.sdma_complete);
-               break;
-       }
-
-       if (ret >= 0)
-               ret = consumed;
-
-bail:
-       return ret;
-}
-
-static ssize_t ipath_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
-       struct file *filp = iocb->ki_filp;
-       struct ipath_filedata *fp = filp->private_data;
-       struct ipath_portdata *pd = port_fp(filp);
-       struct ipath_user_sdma_queue *pq = fp->pq;
-
-       if (!iter_is_iovec(from) || !from->nr_segs)
-               return -EINVAL;
-
-       return ipath_user_sdma_writev(pd->port_dd, pq, from->iov, from->nr_segs);
-}
-
-static struct class *ipath_class;
-
-static int init_cdev(int minor, char *name, const struct file_operations *fops,
-                    struct cdev **cdevp, struct device **devp)
-{
-       const dev_t dev = MKDEV(IPATH_MAJOR, minor);
-       struct cdev *cdev = NULL;
-       struct device *device = NULL;
-       int ret;
-
-       cdev = cdev_alloc();
-       if (!cdev) {
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Could not allocate cdev for minor %d, %s\n",
-                      minor, name);
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       cdev->owner = THIS_MODULE;
-       cdev->ops = fops;
-       kobject_set_name(&cdev->kobj, name);
-
-       ret = cdev_add(cdev, dev, 1);
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME
-                      ": Could not add cdev for minor %d, %s (err %d)\n",
-                      minor, name, -ret);
-               goto err_cdev;
-       }
-
-       device = device_create(ipath_class, NULL, dev, NULL, name);
-
-       if (IS_ERR(device)) {
-               ret = PTR_ERR(device);
-               printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
-                      "device for minor %d, %s (err %d)\n",
-                      minor, name, -ret);
-               goto err_cdev;
-       }
-
-       goto done;
-
-err_cdev:
-       cdev_del(cdev);
-       cdev = NULL;
-
-done:
-       if (ret >= 0) {
-               *cdevp = cdev;
-               *devp = device;
-       } else {
-               *cdevp = NULL;
-               *devp = NULL;
-       }
-
-       return ret;
-}
-
-int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
-                   struct cdev **cdevp, struct device **devp)
-{
-       return init_cdev(minor, name, fops, cdevp, devp);
-}
-
-static void cleanup_cdev(struct cdev **cdevp,
-                        struct device **devp)
-{
-       struct device *dev = *devp;
-
-       if (dev) {
-               device_unregister(dev);
-               *devp = NULL;
-       }
-
-       if (*cdevp) {
-               cdev_del(*cdevp);
-               *cdevp = NULL;
-       }
-}
-
-void ipath_cdev_cleanup(struct cdev **cdevp,
-                       struct device **devp)
-{
-       cleanup_cdev(cdevp, devp);
-}
-
-static struct cdev *wildcard_cdev;
-static struct device *wildcard_dev;
-
-static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
-
-static int user_init(void)
-{
-       int ret;
-
-       ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
-       if (ret < 0) {
-               printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
-                      "chrdev region (err %d)\n", -ret);
-               goto done;
-       }
-
-       ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
-
-       if (IS_ERR(ipath_class)) {
-               ret = PTR_ERR(ipath_class);
-               printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
-                      "device class (err %d)\n", -ret);
-               goto bail;
-       }
-
-       goto done;
-bail:
-       unregister_chrdev_region(dev, IPATH_NMINORS);
-done:
-       return ret;
-}
-
-static void user_cleanup(void)
-{
-       if (ipath_class) {
-               class_destroy(ipath_class);
-               ipath_class = NULL;
-       }
-
-       unregister_chrdev_region(dev, IPATH_NMINORS);
-}
-
-static atomic_t user_count = ATOMIC_INIT(0);
-static atomic_t user_setup = ATOMIC_INIT(0);
-
-int ipath_user_add(struct ipath_devdata *dd)
-{
-       char name[10];
-       int ret;
-
-       if (atomic_inc_return(&user_count) == 1) {
-               ret = user_init();
-               if (ret < 0) {
-                       ipath_dev_err(dd, "Unable to set up user support: "
-                                     "error %d\n", -ret);
-                       goto bail;
-               }
-               ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
-                               &wildcard_dev);
-               if (ret < 0) {
-                       ipath_dev_err(dd, "Could not create wildcard "
-                                     "minor: error %d\n", -ret);
-                       goto bail_user;
-               }
-
-               atomic_set(&user_setup, 1);
-       }
-
-       snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
-
-       ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
-                       &dd->user_cdev, &dd->user_dev);
-       if (ret < 0)
-               ipath_dev_err(dd, "Could not create user minor %d, %s\n",
-                             dd->ipath_unit + 1, name);
-
-       goto bail;
-
-bail_user:
-       user_cleanup();
-bail:
-       return ret;
-}
-
-void ipath_user_remove(struct ipath_devdata *dd)
-{
-       cleanup_cdev(&dd->user_cdev, &dd->user_dev);
-
-       if (atomic_dec_return(&user_count) == 0) {
-               if (atomic_read(&user_setup) == 0)
-                       goto bail;
-
-               cleanup_cdev(&wildcard_cdev, &wildcard_dev);
-               user_cleanup();
-
-               atomic_set(&user_setup, 0);
-       }
-bail:
-       return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_fs.c b/drivers/staging/rdma/ipath/ipath_fs.c
deleted file mode 100644 (file)
index 476fcdf..0000000
+++ /dev/null
@@ -1,415 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/init.h>
-#include <linux/namei.h>
-#include <linux/slab.h>
-
-#include "ipath_kernel.h"
-
-#define IPATHFS_MAGIC 0x726a77
-
-static struct super_block *ipath_super;
-
-static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
-                        umode_t mode, const struct file_operations *fops,
-                        void *data)
-{
-       int error;
-       struct inode *inode = new_inode(dir->i_sb);
-
-       if (!inode) {
-               error = -EPERM;
-               goto bail;
-       }
-
-       inode->i_ino = get_next_ino();
-       inode->i_mode = mode;
-       inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
-       inode->i_private = data;
-       if (S_ISDIR(mode)) {
-               inode->i_op = &simple_dir_inode_operations;
-               inc_nlink(inode);
-               inc_nlink(dir);
-       }
-
-       inode->i_fop = fops;
-
-       d_instantiate(dentry, inode);
-       error = 0;
-
-bail:
-       return error;
-}
-
-static int create_file(const char *name, umode_t mode,
-                      struct dentry *parent, struct dentry **dentry,
-                      const struct file_operations *fops, void *data)
-{
-       int error;
-
-       inode_lock(d_inode(parent));
-       *dentry = lookup_one_len(name, parent, strlen(name));
-       if (!IS_ERR(*dentry))
-               error = ipathfs_mknod(d_inode(parent), *dentry,
-                                     mode, fops, data);
-       else
-               error = PTR_ERR(*dentry);
-       inode_unlock(d_inode(parent));
-
-       return error;
-}
-
-static ssize_t atomic_stats_read(struct file *file, char __user *buf,
-                                size_t count, loff_t *ppos)
-{
-       return simple_read_from_buffer(buf, count, ppos, &ipath_stats,
-                                      sizeof ipath_stats);
-}
-
-static const struct file_operations atomic_stats_ops = {
-       .read = atomic_stats_read,
-       .llseek = default_llseek,
-};
-
-static ssize_t atomic_counters_read(struct file *file, char __user *buf,
-                                   size_t count, loff_t *ppos)
-{
-       struct infinipath_counters counters;
-       struct ipath_devdata *dd;
-
-       dd = file_inode(file)->i_private;
-       dd->ipath_f_read_counters(dd, &counters);
-
-       return simple_read_from_buffer(buf, count, ppos, &counters,
-                                      sizeof counters);
-}
-
-static const struct file_operations atomic_counters_ops = {
-       .read = atomic_counters_read,
-       .llseek = default_llseek,
-};
-
-static ssize_t flash_read(struct file *file, char __user *buf,
-                         size_t count, loff_t *ppos)
-{
-       struct ipath_devdata *dd;
-       ssize_t ret;
-       loff_t pos;
-       char *tmp;
-
-       pos = *ppos;
-
-       if ( pos < 0) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (pos >= sizeof(struct ipath_flash)) {
-               ret = 0;
-               goto bail;
-       }
-
-       if (count > sizeof(struct ipath_flash) - pos)
-               count = sizeof(struct ipath_flash) - pos;
-
-       tmp = kmalloc(count, GFP_KERNEL);
-       if (!tmp) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       dd = file_inode(file)->i_private;
-       if (ipath_eeprom_read(dd, pos, tmp, count)) {
-               ipath_dev_err(dd, "failed to read from flash\n");
-               ret = -ENXIO;
-               goto bail_tmp;
-       }
-
-       if (copy_to_user(buf, tmp, count)) {
-               ret = -EFAULT;
-               goto bail_tmp;
-       }
-
-       *ppos = pos + count;
-       ret = count;
-
-bail_tmp:
-       kfree(tmp);
-
-bail:
-       return ret;
-}
-
-static ssize_t flash_write(struct file *file, const char __user *buf,
-                          size_t count, loff_t *ppos)
-{
-       struct ipath_devdata *dd;
-       ssize_t ret;
-       loff_t pos;
-       char *tmp;
-
-       pos = *ppos;
-
-       if (pos != 0) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (count != sizeof(struct ipath_flash)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       tmp = memdup_user(buf, count);
-       if (IS_ERR(tmp))
-               return PTR_ERR(tmp);
-
-       dd = file_inode(file)->i_private;
-       if (ipath_eeprom_write(dd, pos, tmp, count)) {
-               ret = -ENXIO;
-               ipath_dev_err(dd, "failed to write to flash\n");
-               goto bail_tmp;
-       }
-
-       *ppos = pos + count;
-       ret = count;
-
-bail_tmp:
-       kfree(tmp);
-
-bail:
-       return ret;
-}
-
-static const struct file_operations flash_ops = {
-       .read = flash_read,
-       .write = flash_write,
-       .llseek = default_llseek,
-};
-
-static int create_device_files(struct super_block *sb,
-                              struct ipath_devdata *dd)
-{
-       struct dentry *dir, *tmp;
-       char unit[10];
-       int ret;
-
-       snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
-       ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
-                         &simple_dir_operations, dd);
-       if (ret) {
-               printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret);
-               goto bail;
-       }
-
-       ret = create_file("atomic_counters", S_IFREG|S_IRUGO, dir, &tmp,
-                         &atomic_counters_ops, dd);
-       if (ret) {
-               printk(KERN_ERR "create_file(%s/atomic_counters) "
-                      "failed: %d\n", unit, ret);
-               goto bail;
-       }
-
-       ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp,
-                         &flash_ops, dd);
-       if (ret) {
-               printk(KERN_ERR "create_file(%s/flash) "
-                      "failed: %d\n", unit, ret);
-               goto bail;
-       }
-
-bail:
-       return ret;
-}
-
-static int remove_file(struct dentry *parent, char *name)
-{
-       struct dentry *tmp;
-       int ret;
-
-       tmp = lookup_one_len(name, parent, strlen(name));
-
-       if (IS_ERR(tmp)) {
-               ret = PTR_ERR(tmp);
-               goto bail;
-       }
-
-       spin_lock(&tmp->d_lock);
-       if (simple_positive(tmp)) {
-               dget_dlock(tmp);
-               __d_drop(tmp);
-               spin_unlock(&tmp->d_lock);
-               simple_unlink(d_inode(parent), tmp);
-       } else
-               spin_unlock(&tmp->d_lock);
-
-       ret = 0;
-bail:
-       /*
-        * We don't expect clients to care about the return value, but
-        * it's there if they need it.
-        */
-       return ret;
-}
-
-static int remove_device_files(struct super_block *sb,
-                              struct ipath_devdata *dd)
-{
-       struct dentry *dir, *root;
-       char unit[10];
-       int ret;
-
-       root = dget(sb->s_root);
-       inode_lock(d_inode(root));
-       snprintf(unit, sizeof unit, "%02d", dd->ipath_unit);
-       dir = lookup_one_len(unit, root, strlen(unit));
-
-       if (IS_ERR(dir)) {
-               ret = PTR_ERR(dir);
-               printk(KERN_ERR "Lookup of %s failed\n", unit);
-               goto bail;
-       }
-
-       remove_file(dir, "flash");
-       remove_file(dir, "atomic_counters");
-       d_delete(dir);
-       ret = simple_rmdir(d_inode(root), dir);
-
-bail:
-       inode_unlock(d_inode(root));
-       dput(root);
-       return ret;
-}
-
-static int ipathfs_fill_super(struct super_block *sb, void *data,
-                             int silent)
-{
-       struct ipath_devdata *dd, *tmp;
-       unsigned long flags;
-       int ret;
-
-       static struct tree_descr files[] = {
-               [2] = {"atomic_stats", &atomic_stats_ops, S_IRUGO},
-               {""},
-       };
-
-       ret = simple_fill_super(sb, IPATHFS_MAGIC, files);
-       if (ret) {
-               printk(KERN_ERR "simple_fill_super failed: %d\n", ret);
-               goto bail;
-       }
-
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-
-       list_for_each_entry_safe(dd, tmp, &ipath_dev_list, ipath_list) {
-               spin_unlock_irqrestore(&ipath_devs_lock, flags);
-               ret = create_device_files(sb, dd);
-               if (ret)
-                       goto bail;
-               spin_lock_irqsave(&ipath_devs_lock, flags);
-       }
-
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-bail:
-       return ret;
-}
-
-static struct dentry *ipathfs_mount(struct file_system_type *fs_type,
-                       int flags, const char *dev_name, void *data)
-{
-       struct dentry *ret;
-       ret = mount_single(fs_type, flags, data, ipathfs_fill_super);
-       if (!IS_ERR(ret))
-               ipath_super = ret->d_sb;
-       return ret;
-}
-
-static void ipathfs_kill_super(struct super_block *s)
-{
-       kill_litter_super(s);
-       ipath_super = NULL;
-}
-
-int ipathfs_add_device(struct ipath_devdata *dd)
-{
-       int ret;
-
-       if (ipath_super == NULL) {
-               ret = 0;
-               goto bail;
-       }
-
-       ret = create_device_files(ipath_super, dd);
-
-bail:
-       return ret;
-}
-
-int ipathfs_remove_device(struct ipath_devdata *dd)
-{
-       int ret;
-
-       if (ipath_super == NULL) {
-               ret = 0;
-               goto bail;
-       }
-
-       ret = remove_device_files(ipath_super, dd);
-
-bail:
-       return ret;
-}
-
-static struct file_system_type ipathfs_fs_type = {
-       .owner =        THIS_MODULE,
-       .name =         "ipathfs",
-       .mount =        ipathfs_mount,
-       .kill_sb =      ipathfs_kill_super,
-};
-MODULE_ALIAS_FS("ipathfs");
-
-int __init ipath_init_ipathfs(void)
-{
-       return register_filesystem(&ipathfs_fs_type);
-}
-
-void __exit ipath_exit_ipathfs(void)
-{
-       unregister_filesystem(&ipathfs_fs_type);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_iba6110.c b/drivers/staging/rdma/ipath/ipath_iba6110.c
deleted file mode 100644 (file)
index 5f13572..0000000
+++ /dev/null
@@ -1,1939 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file contains all of the code that is specific to the InfiniPath
- * HT chip.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/htirq.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_kernel.h"
-#include "ipath_registers.h"
-
-static void ipath_setup_ht_setextled(struct ipath_devdata *, u64, u64);
-
-
-/*
- * This lists the InfiniPath registers, in the actual chip layout.
- * This structure should never be directly accessed.
- *
- * The names are in InterCap form because they're taken straight from
- * the chip specification.  Since they're only used in this file, they
- * don't pollute the rest of the source.
-*/
-
-struct _infinipath_do_not_use_kernel_regs {
-       unsigned long long Revision;
-       unsigned long long Control;
-       unsigned long long PageAlign;
-       unsigned long long PortCnt;
-       unsigned long long DebugPortSelect;
-       unsigned long long DebugPort;
-       unsigned long long SendRegBase;
-       unsigned long long UserRegBase;
-       unsigned long long CounterRegBase;
-       unsigned long long Scratch;
-       unsigned long long ReservedMisc1;
-       unsigned long long InterruptConfig;
-       unsigned long long IntBlocked;
-       unsigned long long IntMask;
-       unsigned long long IntStatus;
-       unsigned long long IntClear;
-       unsigned long long ErrorMask;
-       unsigned long long ErrorStatus;
-       unsigned long long ErrorClear;
-       unsigned long long HwErrMask;
-       unsigned long long HwErrStatus;
-       unsigned long long HwErrClear;
-       unsigned long long HwDiagCtrl;
-       unsigned long long MDIO;
-       unsigned long long IBCStatus;
-       unsigned long long IBCCtrl;
-       unsigned long long ExtStatus;
-       unsigned long long ExtCtrl;
-       unsigned long long GPIOOut;
-       unsigned long long GPIOMask;
-       unsigned long long GPIOStatus;
-       unsigned long long GPIOClear;
-       unsigned long long RcvCtrl;
-       unsigned long long RcvBTHQP;
-       unsigned long long RcvHdrSize;
-       unsigned long long RcvHdrCnt;
-       unsigned long long RcvHdrEntSize;
-       unsigned long long RcvTIDBase;
-       unsigned long long RcvTIDCnt;
-       unsigned long long RcvEgrBase;
-       unsigned long long RcvEgrCnt;
-       unsigned long long RcvBufBase;
-       unsigned long long RcvBufSize;
-       unsigned long long RxIntMemBase;
-       unsigned long long RxIntMemSize;
-       unsigned long long RcvPartitionKey;
-       unsigned long long ReservedRcv[10];
-       unsigned long long SendCtrl;
-       unsigned long long SendPIOBufBase;
-       unsigned long long SendPIOSize;
-       unsigned long long SendPIOBufCnt;
-       unsigned long long SendPIOAvailAddr;
-       unsigned long long TxIntMemBase;
-       unsigned long long TxIntMemSize;
-       unsigned long long ReservedSend[9];
-       unsigned long long SendBufferError;
-       unsigned long long SendBufferErrorCONT1;
-       unsigned long long SendBufferErrorCONT2;
-       unsigned long long SendBufferErrorCONT3;
-       unsigned long long ReservedSBE[4];
-       unsigned long long RcvHdrAddr0;
-       unsigned long long RcvHdrAddr1;
-       unsigned long long RcvHdrAddr2;
-       unsigned long long RcvHdrAddr3;
-       unsigned long long RcvHdrAddr4;
-       unsigned long long RcvHdrAddr5;
-       unsigned long long RcvHdrAddr6;
-       unsigned long long RcvHdrAddr7;
-       unsigned long long RcvHdrAddr8;
-       unsigned long long ReservedRHA[7];
-       unsigned long long RcvHdrTailAddr0;
-       unsigned long long RcvHdrTailAddr1;
-       unsigned long long RcvHdrTailAddr2;
-       unsigned long long RcvHdrTailAddr3;
-       unsigned long long RcvHdrTailAddr4;
-       unsigned long long RcvHdrTailAddr5;
-       unsigned long long RcvHdrTailAddr6;
-       unsigned long long RcvHdrTailAddr7;
-       unsigned long long RcvHdrTailAddr8;
-       unsigned long long ReservedRHTA[7];
-       unsigned long long Sync;        /* Software only */
-       unsigned long long Dump;        /* Software only */
-       unsigned long long SimVer;      /* Software only */
-       unsigned long long ReservedSW[5];
-       unsigned long long SerdesConfig0;
-       unsigned long long SerdesConfig1;
-       unsigned long long SerdesStatus;
-       unsigned long long XGXSConfig;
-       unsigned long long ReservedSW2[4];
-};
-
-struct _infinipath_do_not_use_counters {
-       __u64 LBIntCnt;
-       __u64 LBFlowStallCnt;
-       __u64 Reserved1;
-       __u64 TxUnsupVLErrCnt;
-       __u64 TxDataPktCnt;
-       __u64 TxFlowPktCnt;
-       __u64 TxDwordCnt;
-       __u64 TxLenErrCnt;
-       __u64 TxMaxMinLenErrCnt;
-       __u64 TxUnderrunCnt;
-       __u64 TxFlowStallCnt;
-       __u64 TxDroppedPktCnt;
-       __u64 RxDroppedPktCnt;
-       __u64 RxDataPktCnt;
-       __u64 RxFlowPktCnt;
-       __u64 RxDwordCnt;
-       __u64 RxLenErrCnt;
-       __u64 RxMaxMinLenErrCnt;
-       __u64 RxICRCErrCnt;
-       __u64 RxVCRCErrCnt;
-       __u64 RxFlowCtrlErrCnt;
-       __u64 RxBadFormatCnt;
-       __u64 RxLinkProblemCnt;
-       __u64 RxEBPCnt;
-       __u64 RxLPCRCErrCnt;
-       __u64 RxBufOvflCnt;
-       __u64 RxTIDFullErrCnt;
-       __u64 RxTIDValidErrCnt;
-       __u64 RxPKeyMismatchCnt;
-       __u64 RxP0HdrEgrOvflCnt;
-       __u64 RxP1HdrEgrOvflCnt;
-       __u64 RxP2HdrEgrOvflCnt;
-       __u64 RxP3HdrEgrOvflCnt;
-       __u64 RxP4HdrEgrOvflCnt;
-       __u64 RxP5HdrEgrOvflCnt;
-       __u64 RxP6HdrEgrOvflCnt;
-       __u64 RxP7HdrEgrOvflCnt;
-       __u64 RxP8HdrEgrOvflCnt;
-       __u64 Reserved6;
-       __u64 Reserved7;
-       __u64 IBStatusChangeCnt;
-       __u64 IBLinkErrRecoveryCnt;
-       __u64 IBLinkDownedCnt;
-       __u64 IBSymbolErrCnt;
-};
-
-#define IPATH_KREG_OFFSET(field) (offsetof( \
-       struct _infinipath_do_not_use_kernel_regs, field) / sizeof(u64))
-#define IPATH_CREG_OFFSET(field) (offsetof( \
-       struct _infinipath_do_not_use_counters, field) / sizeof(u64))
-
-static const struct ipath_kregs ipath_ht_kregs = {
-       .kr_control = IPATH_KREG_OFFSET(Control),
-       .kr_counterregbase = IPATH_KREG_OFFSET(CounterRegBase),
-       .kr_debugport = IPATH_KREG_OFFSET(DebugPort),
-       .kr_debugportselect = IPATH_KREG_OFFSET(DebugPortSelect),
-       .kr_errorclear = IPATH_KREG_OFFSET(ErrorClear),
-       .kr_errormask = IPATH_KREG_OFFSET(ErrorMask),
-       .kr_errorstatus = IPATH_KREG_OFFSET(ErrorStatus),
-       .kr_extctrl = IPATH_KREG_OFFSET(ExtCtrl),
-       .kr_extstatus = IPATH_KREG_OFFSET(ExtStatus),
-       .kr_gpio_clear = IPATH_KREG_OFFSET(GPIOClear),
-       .kr_gpio_mask = IPATH_KREG_OFFSET(GPIOMask),
-       .kr_gpio_out = IPATH_KREG_OFFSET(GPIOOut),
-       .kr_gpio_status = IPATH_KREG_OFFSET(GPIOStatus),
-       .kr_hwdiagctrl = IPATH_KREG_OFFSET(HwDiagCtrl),
-       .kr_hwerrclear = IPATH_KREG_OFFSET(HwErrClear),
-       .kr_hwerrmask = IPATH_KREG_OFFSET(HwErrMask),
-       .kr_hwerrstatus = IPATH_KREG_OFFSET(HwErrStatus),
-       .kr_ibcctrl = IPATH_KREG_OFFSET(IBCCtrl),
-       .kr_ibcstatus = IPATH_KREG_OFFSET(IBCStatus),
-       .kr_intblocked = IPATH_KREG_OFFSET(IntBlocked),
-       .kr_intclear = IPATH_KREG_OFFSET(IntClear),
-       .kr_interruptconfig = IPATH_KREG_OFFSET(InterruptConfig),
-       .kr_intmask = IPATH_KREG_OFFSET(IntMask),
-       .kr_intstatus = IPATH_KREG_OFFSET(IntStatus),
-       .kr_mdio = IPATH_KREG_OFFSET(MDIO),
-       .kr_pagealign = IPATH_KREG_OFFSET(PageAlign),
-       .kr_partitionkey = IPATH_KREG_OFFSET(RcvPartitionKey),
-       .kr_portcnt = IPATH_KREG_OFFSET(PortCnt),
-       .kr_rcvbthqp = IPATH_KREG_OFFSET(RcvBTHQP),
-       .kr_rcvbufbase = IPATH_KREG_OFFSET(RcvBufBase),
-       .kr_rcvbufsize = IPATH_KREG_OFFSET(RcvBufSize),
-       .kr_rcvctrl = IPATH_KREG_OFFSET(RcvCtrl),
-       .kr_rcvegrbase = IPATH_KREG_OFFSET(RcvEgrBase),
-       .kr_rcvegrcnt = IPATH_KREG_OFFSET(RcvEgrCnt),
-       .kr_rcvhdrcnt = IPATH_KREG_OFFSET(RcvHdrCnt),
-       .kr_rcvhdrentsize = IPATH_KREG_OFFSET(RcvHdrEntSize),
-       .kr_rcvhdrsize = IPATH_KREG_OFFSET(RcvHdrSize),
-       .kr_rcvintmembase = IPATH_KREG_OFFSET(RxIntMemBase),
-       .kr_rcvintmemsize = IPATH_KREG_OFFSET(RxIntMemSize),
-       .kr_rcvtidbase = IPATH_KREG_OFFSET(RcvTIDBase),
-       .kr_rcvtidcnt = IPATH_KREG_OFFSET(RcvTIDCnt),
-       .kr_revision = IPATH_KREG_OFFSET(Revision),
-       .kr_scratch = IPATH_KREG_OFFSET(Scratch),
-       .kr_sendbuffererror = IPATH_KREG_OFFSET(SendBufferError),
-       .kr_sendctrl = IPATH_KREG_OFFSET(SendCtrl),
-       .kr_sendpioavailaddr = IPATH_KREG_OFFSET(SendPIOAvailAddr),
-       .kr_sendpiobufbase = IPATH_KREG_OFFSET(SendPIOBufBase),
-       .kr_sendpiobufcnt = IPATH_KREG_OFFSET(SendPIOBufCnt),
-       .kr_sendpiosize = IPATH_KREG_OFFSET(SendPIOSize),
-       .kr_sendregbase = IPATH_KREG_OFFSET(SendRegBase),
-       .kr_txintmembase = IPATH_KREG_OFFSET(TxIntMemBase),
-       .kr_txintmemsize = IPATH_KREG_OFFSET(TxIntMemSize),
-       .kr_userregbase = IPATH_KREG_OFFSET(UserRegBase),
-       .kr_serdesconfig0 = IPATH_KREG_OFFSET(SerdesConfig0),
-       .kr_serdesconfig1 = IPATH_KREG_OFFSET(SerdesConfig1),
-       .kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
-       .kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
-       /*
-        * These should not be used directly via ipath_write_kreg64(),
-        * use them with ipath_write_kreg64_port(),
-        */
-       .kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
-       .kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
-};
-
-static const struct ipath_cregs ipath_ht_cregs = {
-       .cr_badformatcnt = IPATH_CREG_OFFSET(RxBadFormatCnt),
-       .cr_erricrccnt = IPATH_CREG_OFFSET(RxICRCErrCnt),
-       .cr_errlinkcnt = IPATH_CREG_OFFSET(RxLinkProblemCnt),
-       .cr_errlpcrccnt = IPATH_CREG_OFFSET(RxLPCRCErrCnt),
-       .cr_errpkey = IPATH_CREG_OFFSET(RxPKeyMismatchCnt),
-       .cr_errrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowCtrlErrCnt),
-       .cr_err_rlencnt = IPATH_CREG_OFFSET(RxLenErrCnt),
-       .cr_errslencnt = IPATH_CREG_OFFSET(TxLenErrCnt),
-       .cr_errtidfull = IPATH_CREG_OFFSET(RxTIDFullErrCnt),
-       .cr_errtidvalid = IPATH_CREG_OFFSET(RxTIDValidErrCnt),
-       .cr_errvcrccnt = IPATH_CREG_OFFSET(RxVCRCErrCnt),
-       .cr_ibstatuschange = IPATH_CREG_OFFSET(IBStatusChangeCnt),
-       /* calc from Reg_CounterRegBase + offset */
-       .cr_intcnt = IPATH_CREG_OFFSET(LBIntCnt),
-       .cr_invalidrlencnt = IPATH_CREG_OFFSET(RxMaxMinLenErrCnt),
-       .cr_invalidslencnt = IPATH_CREG_OFFSET(TxMaxMinLenErrCnt),
-       .cr_lbflowstallcnt = IPATH_CREG_OFFSET(LBFlowStallCnt),
-       .cr_pktrcvcnt = IPATH_CREG_OFFSET(RxDataPktCnt),
-       .cr_pktrcvflowctrlcnt = IPATH_CREG_OFFSET(RxFlowPktCnt),
-       .cr_pktsendcnt = IPATH_CREG_OFFSET(TxDataPktCnt),
-       .cr_pktsendflowcnt = IPATH_CREG_OFFSET(TxFlowPktCnt),
-       .cr_portovflcnt = IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt),
-       .cr_rcvebpcnt = IPATH_CREG_OFFSET(RxEBPCnt),
-       .cr_rcvovflcnt = IPATH_CREG_OFFSET(RxBufOvflCnt),
-       .cr_senddropped = IPATH_CREG_OFFSET(TxDroppedPktCnt),
-       .cr_sendstallcnt = IPATH_CREG_OFFSET(TxFlowStallCnt),
-       .cr_sendunderruncnt = IPATH_CREG_OFFSET(TxUnderrunCnt),
-       .cr_wordrcvcnt = IPATH_CREG_OFFSET(RxDwordCnt),
-       .cr_wordsendcnt = IPATH_CREG_OFFSET(TxDwordCnt),
-       .cr_unsupvlcnt = IPATH_CREG_OFFSET(TxUnsupVLErrCnt),
-       .cr_rxdroppktcnt = IPATH_CREG_OFFSET(RxDroppedPktCnt),
-       .cr_iblinkerrrecovcnt = IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt),
-       .cr_iblinkdowncnt = IPATH_CREG_OFFSET(IBLinkDownedCnt),
-       .cr_ibsymbolerrcnt = IPATH_CREG_OFFSET(IBSymbolErrCnt)
-};
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_RCVURG_MASK ((1U<<9)-1)
-#define INFINIPATH_I_RCVURG_SHIFT 0
-#define INFINIPATH_I_RCVAVAIL_MASK ((1U<<9)-1)
-#define INFINIPATH_I_RCVAVAIL_SHIFT 12
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-#define INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT 0
-#define INFINIPATH_HWE_HTCMEMPARITYERR_MASK 0x3FFFFFULL
-#define INFINIPATH_HWE_HTCLNKABYTE0CRCERR   0x0000000000800000ULL
-#define INFINIPATH_HWE_HTCLNKABYTE1CRCERR   0x0000000001000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE0CRCERR   0x0000000002000000ULL
-#define INFINIPATH_HWE_HTCLNKBBYTE1CRCERR   0x0000000004000000ULL
-#define INFINIPATH_HWE_HTCMISCERR4          0x0000000008000000ULL
-#define INFINIPATH_HWE_HTCMISCERR5          0x0000000010000000ULL
-#define INFINIPATH_HWE_HTCMISCERR6          0x0000000020000000ULL
-#define INFINIPATH_HWE_HTCMISCERR7          0x0000000040000000ULL
-#define INFINIPATH_HWE_HTCBUSTREQPARITYERR  0x0000000080000000ULL
-#define INFINIPATH_HWE_HTCBUSTRESPPARITYERR 0x0000000100000000ULL
-#define INFINIPATH_HWE_HTCBUSIREQPARITYERR  0x0000000200000000ULL
-#define INFINIPATH_HWE_COREPLL_FBSLIP       0x0080000000000000ULL
-#define INFINIPATH_HWE_COREPLL_RFSLIP       0x0100000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_FBSLIP        0x0200000000000000ULL
-#define INFINIPATH_HWE_HTBPLL_RFSLIP        0x0400000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_FBSLIP        0x0800000000000000ULL
-#define INFINIPATH_HWE_HTAPLL_RFSLIP        0x1000000000000000ULL
-#define INFINIPATH_HWE_SERDESPLLFAILED      0x2000000000000000ULL
-
-#define IBA6110_IBCS_LINKTRAININGSTATE_MASK 0xf
-#define IBA6110_IBCS_LINKSTATE_SHIFT 4
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_FREQSEL 0x2
-#define INFINIPATH_EXTS_SERDESSEL 0x4
-#define INFINIPATH_EXTS_MEMBIST_ENDTEST     0x0000000000004000
-#define INFINIPATH_EXTS_MEMBIST_CORRECT     0x0000000000008000
-
-
-/* TID entries (memory), HT-only */
-#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL        /* 40 bits valid */
-#define INFINIPATH_RT_VALID 0x8000000000000000ULL
-#define INFINIPATH_RT_ADDR_SHIFT 0
-#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
-#define INFINIPATH_RT_BUFSIZE_SHIFT 48
-
-#define INFINIPATH_R_INTRAVAIL_SHIFT 16
-#define INFINIPATH_R_TAILUPD_SHIFT 31
-
-/* kr_xgxsconfig bits */
-#define INFINIPATH_XGXS_RESET          0x7ULL
-
-/*
- * masks and bits that are different in different chips, or present only
- * in one
- */
-static const ipath_err_t infinipath_hwe_htcmemparityerr_mask =
-    INFINIPATH_HWE_HTCMEMPARITYERR_MASK;
-static const ipath_err_t infinipath_hwe_htcmemparityerr_shift =
-    INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT;
-
-static const ipath_err_t infinipath_hwe_htclnkabyte0crcerr =
-    INFINIPATH_HWE_HTCLNKABYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkabyte1crcerr =
-    INFINIPATH_HWE_HTCLNKABYTE1CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte0crcerr =
-    INFINIPATH_HWE_HTCLNKBBYTE0CRCERR;
-static const ipath_err_t infinipath_hwe_htclnkbbyte1crcerr =
-    INFINIPATH_HWE_HTCLNKBBYTE1CRCERR;
-
-#define _IPATH_GPIO_SDA_NUM 1
-#define _IPATH_GPIO_SCL_NUM 0
-
-#define IPATH_GPIO_SDA \
-       (1ULL << (_IPATH_GPIO_SDA_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-#define IPATH_GPIO_SCL \
-       (1ULL << (_IPATH_GPIO_SCL_NUM+INFINIPATH_EXTC_GPIOOE_SHIFT))
-
-/* keep the code below somewhat more readable; not used elsewhere */
-#define _IPATH_HTLINK0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |    \
-                               infinipath_hwe_htclnkabyte1crcerr)
-#define _IPATH_HTLINK1_CRCBITS (infinipath_hwe_htclnkbbyte0crcerr |    \
-                               infinipath_hwe_htclnkbbyte1crcerr)
-#define _IPATH_HTLANE0_CRCBITS (infinipath_hwe_htclnkabyte0crcerr |    \
-                               infinipath_hwe_htclnkbbyte0crcerr)
-#define _IPATH_HTLANE1_CRCBITS (infinipath_hwe_htclnkabyte1crcerr |    \
-                               infinipath_hwe_htclnkbbyte1crcerr)
-
-static void hwerr_crcbits(struct ipath_devdata *dd, ipath_err_t hwerrs,
-                         char *msg, size_t msgl)
-{
-       char bitsmsg[64];
-       ipath_err_t crcbits = hwerrs &
-               (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS);
-       /* don't check if 8bit HT */
-       if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
-               crcbits &= ~infinipath_hwe_htclnkabyte1crcerr;
-       /* don't check if 8bit HT */
-       if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
-               crcbits &= ~infinipath_hwe_htclnkbbyte1crcerr;
-       /*
-        * we'll want to ignore link errors on link that is
-        * not in use, if any.  For now, complain about both
-        */
-       if (crcbits) {
-               u16 ctrl0, ctrl1;
-               snprintf(bitsmsg, sizeof bitsmsg,
-                        "[HT%s lane %s CRC (%llx); powercycle to completely clear]",
-                        !(crcbits & _IPATH_HTLINK1_CRCBITS) ?
-                        "0 (A)" : (!(crcbits & _IPATH_HTLINK0_CRCBITS)
-                                   ? "1 (B)" : "0+1 (A+B)"),
-                        !(crcbits & _IPATH_HTLANE1_CRCBITS) ? "0"
-                        : (!(crcbits & _IPATH_HTLANE0_CRCBITS) ? "1" :
-                           "0+1"), (unsigned long long) crcbits);
-               strlcat(msg, bitsmsg, msgl);
-
-               /*
-                * print extra info for debugging.  slave/primary
-                * config word 4, 8 (link control 0, 1)
-                */
-
-               if (pci_read_config_word(dd->pcidev,
-                                        dd->ipath_ht_slave_off + 0x4,
-                                        &ctrl0))
-                       dev_info(&dd->pcidev->dev, "Couldn't read "
-                                "linkctrl0 of slave/primary "
-                                "config block\n");
-               else if (!(ctrl0 & 1 << 6))
-                       /* not if EOC bit set */
-                       ipath_dbg("HT linkctrl0 0x%x%s%s\n", ctrl0,
-                                 ((ctrl0 >> 8) & 7) ? " CRC" : "",
-                                 ((ctrl0 >> 4) & 1) ? "linkfail" :
-                                 "");
-               if (pci_read_config_word(dd->pcidev,
-                                        dd->ipath_ht_slave_off + 0x8,
-                                        &ctrl1))
-                       dev_info(&dd->pcidev->dev, "Couldn't read "
-                                "linkctrl1 of slave/primary "
-                                "config block\n");
-               else if (!(ctrl1 & 1 << 6))
-                       /* not if EOC bit set */
-                       ipath_dbg("HT linkctrl1 0x%x%s%s\n", ctrl1,
-                                 ((ctrl1 >> 8) & 7) ? " CRC" : "",
-                                 ((ctrl1 >> 4) & 1) ? "linkfail" :
-                                 "");
-
-               /* disable until driver reloaded */
-               dd->ipath_hwerrmask &= ~crcbits;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                                dd->ipath_hwerrmask);
-               ipath_dbg("HT crc errs: %s\n", msg);
-       } else
-               ipath_dbg("ignoring HT crc errors 0x%llx, "
-                         "not in use\n", (unsigned long long)
-                         (hwerrs & (_IPATH_HTLINK0_CRCBITS |
-                                    _IPATH_HTLINK1_CRCBITS)));
-}
-
-/* 6110 specific hardware errors... */
-static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
-       INFINIPATH_HWE_MSG(HTCBUSIREQPARITYERR, "HTC Ireq Parity"),
-       INFINIPATH_HWE_MSG(HTCBUSTREQPARITYERR, "HTC Treq Parity"),
-       INFINIPATH_HWE_MSG(HTCBUSTRESPPARITYERR, "HTC Tresp Parity"),
-       INFINIPATH_HWE_MSG(HTCMISCERR5, "HT core Misc5"),
-       INFINIPATH_HWE_MSG(HTCMISCERR6, "HT core Misc6"),
-       INFINIPATH_HWE_MSG(HTCMISCERR7, "HT core Misc7"),
-       INFINIPATH_HWE_MSG(RXDSYNCMEMPARITYERR, "Rx Dsync"),
-       INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
-};
-
-#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
-                       INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
-                       << INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
-#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
-                         << INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
-
-static void ipath_ht_txe_recover(struct ipath_devdata *dd)
-{
-       ++ipath_stats.sps_txeparity;
-       dev_info(&dd->pcidev->dev,
-               "Recovering from TXE PIO parity error\n");
-}
-
-
-/**
- * ipath_ht_handle_hwerrors - display hardware errors.
- * @dd: the infinipath device
- * @msg: the output buffer
- * @msgl: the size of the output buffer
- *
- * Use same msg buffer as regular errors to avoid excessive stack
- * use.  Most hardware errors are catastrophic, but for right now,
- * we'll print them and continue.  We reuse the same message buffer as
- * ipath_handle_errors() to avoid excessive stack usage.
- */
-static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
-                                    size_t msgl)
-{
-       ipath_err_t hwerrs;
-       u32 bits, ctrl;
-       int isfatal = 0;
-       char bitsmsg[64];
-       int log_idx;
-
-       hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-
-       if (!hwerrs) {
-               ipath_cdbg(VERBOSE, "Called but no hardware errors set\n");
-               /*
-                * better than printing cofusing messages
-                * This seems to be related to clearing the crc error, or
-                * the pll error during init.
-                */
-               goto bail;
-       } else if (hwerrs == -1LL) {
-               ipath_dev_err(dd, "Read of hardware error status failed "
-                             "(all bits set); ignoring\n");
-               goto bail;
-       }
-       ipath_stats.sps_hwerrs++;
-
-       /* Always clear the error status register, except MEMBISTFAIL,
-        * regardless of whether we continue or stop using the chip.
-        * We want that set so we know it failed, even across driver reload.
-        * We'll still ignore it in the hwerrmask.  We do this partly for
-        * diagnostics, but also for support */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                        hwerrs&~INFINIPATH_HWE_MEMBISTFAILED);
-
-       hwerrs &= dd->ipath_hwerrmask;
-
-       /* We log some errors to EEPROM, check if we have any of those. */
-       for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx)
-               if (hwerrs & dd->ipath_eep_st_masks[log_idx].hwerrs_to_log)
-                       ipath_inc_eeprom_err(dd, log_idx, 1);
-
-       /*
-        * make sure we get this much out, unless told to be quiet,
-        * it's a parity error we may recover from,
-        * or it's occurred within the last 5 seconds
-        */
-       if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
-               RXE_EAGER_PARITY)) ||
-               (ipath_debug & __IPATH_VERBDBG))
-               dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
-                        "(cleared)\n", (unsigned long long) hwerrs);
-       dd->ipath_lasthwerror |= hwerrs;
-
-       if (hwerrs & ~dd->ipath_hwe_bitsextant)
-               ipath_dev_err(dd, "hwerror interrupt with unknown errors "
-                             "%llx set\n", (unsigned long long)
-                             (hwerrs & ~dd->ipath_hwe_bitsextant));
-
-       ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-       if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
-               /*
-                * parity errors in send memory are recoverable,
-                * just cancel the send (if indicated in * sendbuffererror),
-                * count the occurrence, unfreeze (if no other handled
-                * hardware error bits are set), and continue. They can
-                * occur if a processor speculative read is done to the PIO
-                * buffer while we are sending a packet, for example.
-                */
-               if (hwerrs & TXE_PIO_PARITY) {
-                       ipath_ht_txe_recover(dd);
-                       hwerrs &= ~TXE_PIO_PARITY;
-               }
-
-               if (!hwerrs) {
-                       ipath_dbg("Clearing freezemode on ignored or "
-                                 "recovered hardware error\n");
-                       ipath_clear_freeze(dd);
-               }
-       }
-
-       *msg = '\0';
-
-       /*
-        * may someday want to decode into which bits are which
-        * functional area for parity errors, etc.
-        */
-       if (hwerrs & (infinipath_hwe_htcmemparityerr_mask
-                     << INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT)) {
-               bits = (u32) ((hwerrs >>
-                              INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) &
-                             INFINIPATH_HWE_HTCMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof bitsmsg, "[HTC Parity Errs %x] ",
-                        bits);
-               strlcat(msg, bitsmsg, msgl);
-       }
-
-       ipath_format_hwerrors(hwerrs,
-                             ipath_6110_hwerror_msgs,
-                             ARRAY_SIZE(ipath_6110_hwerror_msgs),
-                             msg, msgl);
-
-       if (hwerrs & (_IPATH_HTLINK0_CRCBITS | _IPATH_HTLINK1_CRCBITS))
-               hwerr_crcbits(dd, hwerrs, msg, msgl);
-
-       if (hwerrs & INFINIPATH_HWE_MEMBISTFAILED) {
-               strlcat(msg, "[Memory BIST test failed, InfiniPath hardware unusable]",
-                       msgl);
-               /* ignore from now on, so disable until driver reloaded */
-               dd->ipath_hwerrmask &= ~INFINIPATH_HWE_MEMBISTFAILED;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                                dd->ipath_hwerrmask);
-       }
-#define _IPATH_PLL_FAIL (INFINIPATH_HWE_COREPLL_FBSLIP |       \
-                        INFINIPATH_HWE_COREPLL_RFSLIP |        \
-                        INFINIPATH_HWE_HTBPLL_FBSLIP |         \
-                        INFINIPATH_HWE_HTBPLL_RFSLIP |         \
-                        INFINIPATH_HWE_HTAPLL_FBSLIP |         \
-                        INFINIPATH_HWE_HTAPLL_RFSLIP)
-
-       if (hwerrs & _IPATH_PLL_FAIL) {
-               snprintf(bitsmsg, sizeof bitsmsg,
-                        "[PLL failed (%llx), InfiniPath hardware unusable]",
-                        (unsigned long long) (hwerrs & _IPATH_PLL_FAIL));
-               strlcat(msg, bitsmsg, msgl);
-               /* ignore from now on, so disable until driver reloaded */
-               dd->ipath_hwerrmask &= ~(hwerrs & _IPATH_PLL_FAIL);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                                dd->ipath_hwerrmask);
-       }
-
-       if (hwerrs & INFINIPATH_HWE_SERDESPLLFAILED) {
-               /*
-                * If it occurs, it is left masked since the eternal
-                * interface is unused
-                */
-               dd->ipath_hwerrmask &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                                dd->ipath_hwerrmask);
-       }
-
-       if (hwerrs) {
-               /*
-                * if any set that we aren't ignoring; only
-                * make the complaint once, in case it's stuck
-                * or recurring, and we get here multiple
-                * times.
-                * force link down, so switch knows, and
-                * LEDs are turned off
-                */
-               if (dd->ipath_flags & IPATH_INITTED) {
-                       ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
-                       ipath_setup_ht_setextled(dd,
-                               INFINIPATH_IBCS_L_STATE_DOWN,
-                               INFINIPATH_IBCS_LT_STATE_DISABLED);
-                       ipath_dev_err(dd, "Fatal Hardware Error (freeze "
-                                         "mode), no longer usable, SN %.16s\n",
-                                         dd->ipath_serial);
-                       isfatal = 1;
-               }
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-               /* mark as having had error */
-               *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-               /*
-                * mark as not usable, at a minimum until driver
-                * is reloaded, probably until reboot, since no
-                * other reset is possible.
-                */
-               dd->ipath_flags &= ~IPATH_INITTED;
-       } else {
-               *msg = 0; /* recovered from all of them */
-       }
-       if (*msg)
-               ipath_dev_err(dd, "%s hardware error\n", msg);
-       if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
-               /*
-                * for status file; if no trailing brace is copied,
-                * we'll know it was truncated.
-                */
-               snprintf(dd->ipath_freezemsg,
-                        dd->ipath_freezelen, "{%s}", msg);
-
-bail:;
-}
-
-/**
- * ipath_ht_boardname - fill in the board name
- * @dd: the infinipath device
- * @name: the output buffer
- * @namelen: the size of the output buffer
- *
- * fill in the board name, based on the board revision register
- */
-static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
-                             size_t namelen)
-{
-       char *n = NULL;
-       u8 boardrev = dd->ipath_boardrev;
-       int ret = 0;
-
-       switch (boardrev) {
-       case 5:
-               /*
-                * original production board; two production levels, with
-                * different serial number ranges.   See ipath_ht_early_init() for
-                * case where we enable IPATH_GPIO_INTR for later serial # range.
-                * Original 112* serial number is no longer supported.
-                */
-               n = "InfiniPath_QHT7040";
-               break;
-       case 7:
-               /* small form factor production board */
-               n = "InfiniPath_QHT7140";
-               break;
-       default:                /* don't know, just print the number */
-               ipath_dev_err(dd, "Don't yet know about board "
-                             "with ID %u\n", boardrev);
-               snprintf(name, namelen, "Unknown_InfiniPath_QHT7xxx_%u",
-                        boardrev);
-               break;
-       }
-       if (n)
-               snprintf(name, namelen, "%s", n);
-
-       if (ret) {
-               ipath_dev_err(dd, "Unsupported InfiniPath board %s!\n", name);
-               goto bail;
-       }
-       if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 ||
-               dd->ipath_minrev > 4)) {
-               /*
-                * This version of the driver only supports Rev 3.2 - 3.4
-                */
-               ipath_dev_err(dd,
-                             "Unsupported InfiniPath hardware revision %u.%u!\n",
-                             dd->ipath_majrev, dd->ipath_minrev);
-               ret = 1;
-               goto bail;
-       }
-       /*
-        * pkt/word counters are 32 bit, and therefore wrap fast enough
-        * that we snapshot them from a timer, and maintain 64 bit shadow
-        * copies
-        */
-       dd->ipath_flags |= IPATH_32BITCOUNTERS;
-       dd->ipath_flags |= IPATH_GPIO_INTR;
-       if (dd->ipath_lbus_speed != 800)
-               ipath_dev_err(dd,
-                             "Incorrectly configured for HT @ %uMHz\n",
-                             dd->ipath_lbus_speed);
-
-       /*
-        * set here, not in ipath_init_*_funcs because we have to do
-        * it after we can read chip registers.
-        */
-       dd->ipath_ureg_align =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-
-bail:
-       return ret;
-}
-
-static void ipath_check_htlink(struct ipath_devdata *dd)
-{
-       u8 linkerr, link_off, i;
-
-       for (i = 0; i < 2; i++) {
-               link_off = dd->ipath_ht_slave_off + i * 4 + 0xd;
-               if (pci_read_config_byte(dd->pcidev, link_off, &linkerr))
-                       dev_info(&dd->pcidev->dev, "Couldn't read "
-                                "linkerror%d of HT slave/primary block\n",
-                                i);
-               else if (linkerr & 0xf0) {
-                       ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
-                                  "clearing\n", linkerr >> 4, i);
-                       /*
-                        * writing the linkerr bits that are set should
-                        * clear them
-                        */
-                       if (pci_write_config_byte(dd->pcidev, link_off,
-                                                 linkerr))
-                               ipath_dbg("Failed write to clear HT "
-                                         "linkerror%d\n", i);
-                       if (pci_read_config_byte(dd->pcidev, link_off,
-                                                &linkerr))
-                               dev_info(&dd->pcidev->dev,
-                                        "Couldn't reread linkerror%d of "
-                                        "HT slave/primary block\n", i);
-                       else if (linkerr & 0xf0)
-                               dev_info(&dd->pcidev->dev,
-                                        "HT linkerror%d bits 0x%x "
-                                        "couldn't be cleared\n",
-                                        i, linkerr >> 4);
-               }
-       }
-}
-
-static int ipath_setup_ht_reset(struct ipath_devdata *dd)
-{
-       ipath_dbg("No reset possible for this InfiniPath hardware\n");
-       return 0;
-}
-
-#define HT_INTR_DISC_CONFIG  0x80      /* HT interrupt and discovery cap */
-#define HT_INTR_REG_INDEX    2 /* intconfig requires indirect accesses */
-
-/*
- * Bits 13-15 of command==0 is slave/primary block.  Clear any HT CRC
- * errors.  We only bother to do this at load time, because it's OK if
- * it happened before we were loaded (first time after boot/reset),
- * but any time after that, it's fatal anyway.  Also need to not check
- * for upper byte errors if we are in 8 bit mode, so figure out
- * our width.  For now, at least, also complain if it's 8 bit.
- */
-static void slave_or_pri_blk(struct ipath_devdata *dd, struct pci_dev *pdev,
-                            int pos, u8 cap_type)
-{
-       u8 linkwidth = 0, linkerr, link_a_b_off, link_off;
-       u16 linkctrl = 0;
-       int i;
-
-       dd->ipath_ht_slave_off = pos;
-       /* command word, master_host bit */
-       /* master host || slave */
-       if ((cap_type >> 2) & 1)
-               link_a_b_off = 4;
-       else
-               link_a_b_off = 0;
-       ipath_cdbg(VERBOSE, "HT%u (Link %c) connected to processor\n",
-                  link_a_b_off ? 1 : 0,
-                  link_a_b_off ? 'B' : 'A');
-
-       link_a_b_off += pos;
-
-       /*
-        * check both link control registers; clear both HT CRC sets if
-        * necessary.
-        */
-       for (i = 0; i < 2; i++) {
-               link_off = pos + i * 4 + 0x4;
-               if (pci_read_config_word(pdev, link_off, &linkctrl))
-                       ipath_dev_err(dd, "Couldn't read HT link control%d "
-                                     "register\n", i);
-               else if (linkctrl & (0xf << 8)) {
-                       ipath_cdbg(VERBOSE, "Clear linkctrl%d CRC Error "
-                                  "bits %x\n", i, linkctrl & (0xf << 8));
-                       /*
-                        * now write them back to clear the error.
-                        */
-                       pci_write_config_word(pdev, link_off,
-                                             linkctrl & (0xf << 8));
-               }
-       }
-
-       /*
-        * As with HT CRC bits, same for protocol errors that might occur
-        * during boot.
-        */
-       for (i = 0; i < 2; i++) {
-               link_off = pos + i * 4 + 0xd;
-               if (pci_read_config_byte(pdev, link_off, &linkerr))
-                       dev_info(&pdev->dev, "Couldn't read linkerror%d "
-                                "of HT slave/primary block\n", i);
-               else if (linkerr & 0xf0) {
-                       ipath_cdbg(VERBOSE, "HT linkerr%d bits 0x%x set, "
-                                  "clearing\n", linkerr >> 4, i);
-                       /*
-                        * writing the linkerr bits that are set will clear
-                        * them
-                        */
-                       if (pci_write_config_byte
-                           (pdev, link_off, linkerr))
-                               ipath_dbg("Failed write to clear HT "
-                                         "linkerror%d\n", i);
-                       if (pci_read_config_byte(pdev, link_off, &linkerr))
-                               dev_info(&pdev->dev, "Couldn't reread "
-                                        "linkerror%d of HT slave/primary "
-                                        "block\n", i);
-                       else if (linkerr & 0xf0)
-                               dev_info(&pdev->dev, "HT linkerror%d bits "
-                                        "0x%x couldn't be cleared\n",
-                                        i, linkerr >> 4);
-               }
-       }
-
-       /*
-        * this is just for our link to the host, not devices connected
-        * through tunnel.
-        */
-
-       if (pci_read_config_byte(pdev, link_a_b_off + 7, &linkwidth))
-               ipath_dev_err(dd, "Couldn't read HT link width "
-                             "config register\n");
-       else {
-               u32 width;
-               switch (linkwidth & 7) {
-               case 5:
-                       width = 4;
-                       break;
-               case 4:
-                       width = 2;
-                       break;
-               case 3:
-                       width = 32;
-                       break;
-               case 1:
-                       width = 16;
-                       break;
-               case 0:
-               default:        /* if wrong, assume 8 bit */
-                       width = 8;
-                       break;
-               }
-
-               dd->ipath_lbus_width = width;
-
-               if (linkwidth != 0x11) {
-                       ipath_dev_err(dd, "Not configured for 16 bit HT "
-                                     "(%x)\n", linkwidth);
-                       if (!(linkwidth & 0xf)) {
-                               ipath_dbg("Will ignore HT lane1 errors\n");
-                               dd->ipath_flags |= IPATH_8BIT_IN_HT0;
-                       }
-               }
-       }
-
-       /*
-        * this is just for our link to the host, not devices connected
-        * through tunnel.
-        */
-       if (pci_read_config_byte(pdev, link_a_b_off + 0xd, &linkwidth))
-               ipath_dev_err(dd, "Couldn't read HT link frequency "
-                             "config register\n");
-       else {
-               u32 speed;
-               switch (linkwidth & 0xf) {
-               case 6:
-                       speed = 1000;
-                       break;
-               case 5:
-                       speed = 800;
-                       break;
-               case 4:
-                       speed = 600;
-                       break;
-               case 3:
-                       speed = 500;
-                       break;
-               case 2:
-                       speed = 400;
-                       break;
-               case 1:
-                       speed = 300;
-                       break;
-               default:
-                       /*
-                        * assume reserved and vendor-specific are 200...
-                        */
-               case 0:
-                       speed = 200;
-                       break;
-               }
-               dd->ipath_lbus_speed = speed;
-       }
-
-       snprintf(dd->ipath_lbus_info, sizeof(dd->ipath_lbus_info),
-               "HyperTransport,%uMHz,x%u\n",
-               dd->ipath_lbus_speed,
-               dd->ipath_lbus_width);
-}
-
-static int ipath_ht_intconfig(struct ipath_devdata *dd)
-{
-       int ret;
-
-       if (dd->ipath_intconfig) {
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_interruptconfig,
-                                dd->ipath_intconfig);  /* interrupt address */
-               ret = 0;
-       } else {
-               ipath_dev_err(dd, "No interrupts enabled, couldn't setup "
-                             "interrupt address\n");
-               ret = -EINVAL;
-       }
-
-       return ret;
-}
-
-static void ipath_ht_irq_update(struct pci_dev *dev, int irq,
-                               struct ht_irq_msg *msg)
-{
-       struct ipath_devdata *dd = pci_get_drvdata(dev);
-       u64 prev_intconfig = dd->ipath_intconfig;
-
-       dd->ipath_intconfig = msg->address_lo;
-       dd->ipath_intconfig |= ((u64) msg->address_hi) << 32;
-
-       /*
-        * If the previous value of dd->ipath_intconfig is zero, we're
-        * getting configured for the first time, and must not program the
-        * intconfig register here (it will be programmed later, when the
-        * hardware is ready).  Otherwise, we should.
-        */
-       if (prev_intconfig)
-               ipath_ht_intconfig(dd);
-}
-
-/**
- * ipath_setup_ht_config - setup the interruptconfig register
- * @dd: the infinipath device
- * @pdev: the PCI device
- *
- * setup the interruptconfig register from the HT config info.
- * Also clear CRC errors in HT linkcontrol, if necessary.
- * This is done only for the real hardware.  It is done before
- * chip address space is initted, so can't touch infinipath registers
- */
-static int ipath_setup_ht_config(struct ipath_devdata *dd,
-                                struct pci_dev *pdev)
-{
-       int pos, ret;
-
-       ret = __ht_create_irq(pdev, 0, ipath_ht_irq_update);
-       if (ret < 0) {
-               ipath_dev_err(dd, "Couldn't create interrupt handler: "
-                             "err %d\n", ret);
-               goto bail;
-       }
-       dd->ipath_irq = ret;
-       ret = 0;
-
-       /*
-        * Handle clearing CRC errors in linkctrl register if necessary.  We
-        * do this early, before we ever enable errors or hardware errors,
-        * mostly to avoid causing the chip to enter freeze mode.
-        */
-       pos = pci_find_capability(pdev, PCI_CAP_ID_HT);
-       if (!pos) {
-               ipath_dev_err(dd, "Couldn't find HyperTransport "
-                             "capability; no interrupts\n");
-               ret = -ENODEV;
-               goto bail;
-       }
-       do {
-               u8 cap_type;
-
-               /*
-                * The HT capability type byte is 3 bytes after the
-                * capability byte.
-                */
-               if (pci_read_config_byte(pdev, pos + 3, &cap_type)) {
-                       dev_info(&pdev->dev, "Couldn't read config "
-                                "command @ %d\n", pos);
-                       continue;
-               }
-               if (!(cap_type & 0xE0))
-                       slave_or_pri_blk(dd, pdev, pos, cap_type);
-       } while ((pos = pci_find_next_capability(pdev, pos,
-                                                PCI_CAP_ID_HT)));
-
-       dd->ipath_flags |= IPATH_SWAP_PIOBUFS;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_setup_ht_cleanup - clean up any per-chip chip-specific stuff
- * @dd: the infinipath device
- *
- * Called during driver unload.
- * This is currently a nop for the HT chip, not for all chips
- */
-static void ipath_setup_ht_cleanup(struct ipath_devdata *dd)
-{
-}
-
-/**
- * ipath_setup_ht_setextled - set the state of the two external LEDs
- * @dd: the infinipath device
- * @lst: the L state
- * @ltst: the LT state
- *
- * Set the state of the two external LEDs, to indicate physical and
- * logical state of IB link.   For this chip (at least with recommended
- * board pinouts), LED1 is Green (physical state), and LED2 is Yellow
- * (logical state)
- *
- * Note:  We try to match the Mellanox HCA LED behavior as best
- * we can.  Green indicates physical link state is OK (something is
- * plugged in, and we can train).
- * Amber indicates the link is logically up (ACTIVE).
- * Mellanox further blinks the amber LED to indicate data packet
- * activity, but we have no hardware support for that, so it would
- * require waking up every 10-20 msecs and checking the counters
- * on the chip, and then turning the LED off if appropriate.  That's
- * visible overhead, so not something we will do.
- *
- */
-static void ipath_setup_ht_setextled(struct ipath_devdata *dd,
-                                    u64 lst, u64 ltst)
-{
-       u64 extctl;
-       unsigned long flags = 0;
-
-       /* the diags use the LED to indicate diag info, so we leave
-        * the external LED alone when the diags are running */
-       if (ipath_diag_inuse)
-               return;
-
-       /* Allow override of LED display for, e.g. Locating system in rack */
-       if (dd->ipath_led_override) {
-               ltst = (dd->ipath_led_override & IPATH_LED_PHYS)
-                       ? INFINIPATH_IBCS_LT_STATE_LINKUP
-                       : INFINIPATH_IBCS_LT_STATE_DISABLED;
-               lst = (dd->ipath_led_override & IPATH_LED_LOG)
-                       ? INFINIPATH_IBCS_L_STATE_ACTIVE
-                       : INFINIPATH_IBCS_L_STATE_DOWN;
-       }
-
-       spin_lock_irqsave(&dd->ipath_gpio_lock, flags);
-       /*
-        * start by setting both LED control bits to off, then turn
-        * on the appropriate bit(s).
-        */
-       if (dd->ipath_boardrev == 8) { /* LS/X-1 uses different pins */
-               /*
-                * major difference is that INFINIPATH_EXTC_LEDGBLERR_OFF
-                * is inverted,  because it is normally used to indicate
-                * a hardware fault at reset, if there were errors
-                */
-               extctl = (dd->ipath_extctrl & ~INFINIPATH_EXTC_LEDGBLOK_ON)
-                       | INFINIPATH_EXTC_LEDGBLERR_OFF;
-               if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
-                       extctl &= ~INFINIPATH_EXTC_LEDGBLERR_OFF;
-               if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-                       extctl |= INFINIPATH_EXTC_LEDGBLOK_ON;
-       } else {
-               extctl = dd->ipath_extctrl &
-                       ~(INFINIPATH_EXTC_LED1PRIPORT_ON |
-                         INFINIPATH_EXTC_LED2PRIPORT_ON);
-               if (ltst == INFINIPATH_IBCS_LT_STATE_LINKUP)
-                       extctl |= INFINIPATH_EXTC_LED1PRIPORT_ON;
-               if (lst == INFINIPATH_IBCS_L_STATE_ACTIVE)
-                       extctl |= INFINIPATH_EXTC_LED2PRIPORT_ON;
-       }
-       dd->ipath_extctrl = extctl;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_extctrl, extctl);
-       spin_unlock_irqrestore(&dd->ipath_gpio_lock, flags);
-}
-
-static void ipath_init_ht_variables(struct ipath_devdata *dd)
-{
-       /*
-        * setup the register offsets, since they are different for each
-        * chip
-        */
-       dd->ipath_kregs = &ipath_ht_kregs;
-       dd->ipath_cregs = &ipath_ht_cregs;
-
-       dd->ipath_gpio_sda_num = _IPATH_GPIO_SDA_NUM;
-       dd->ipath_gpio_scl_num = _IPATH_GPIO_SCL_NUM;
-       dd->ipath_gpio_sda = IPATH_GPIO_SDA;
-       dd->ipath_gpio_scl = IPATH_GPIO_SCL;
-
-       /*
-        * Fill in data for field-values that change in newer chips.
-        * We dynamically specify only the mask for LINKTRAININGSTATE
-        * and only the shift for LINKSTATE, as they are the only ones
-        * that change.  Also precalculate the 3 link states of interest
-        * and the combined mask.
-        */
-       dd->ibcs_ls_shift = IBA6110_IBCS_LINKSTATE_SHIFT;
-       dd->ibcs_lts_mask = IBA6110_IBCS_LINKTRAININGSTATE_MASK;
-       dd->ibcs_mask = (INFINIPATH_IBCS_LINKSTATE_MASK <<
-               dd->ibcs_ls_shift) | dd->ibcs_lts_mask;
-       dd->ib_init = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-               INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-               (INFINIPATH_IBCS_L_STATE_INIT << dd->ibcs_ls_shift);
-       dd->ib_arm = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-               INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-               (INFINIPATH_IBCS_L_STATE_ARM << dd->ibcs_ls_shift);
-       dd->ib_active = (INFINIPATH_IBCS_LT_STATE_LINKUP <<
-               INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) |
-               (INFINIPATH_IBCS_L_STATE_ACTIVE << dd->ibcs_ls_shift);
-
-       /*
-        * Fill in data for ibcc field-values that change in newer chips.
-        * We dynamically specify only the mask for LINKINITCMD
-        * and only the shift for LINKCMD and MAXPKTLEN, as they are
-        * the only ones that change.
-        */
-       dd->ibcc_lic_mask = INFINIPATH_IBCC_LINKINITCMD_MASK;
-       dd->ibcc_lc_shift = INFINIPATH_IBCC_LINKCMD_SHIFT;
-       dd->ibcc_mpl_shift = INFINIPATH_IBCC_MAXPKTLEN_SHIFT;
-
-       /* Fill in shifts for RcvCtrl. */
-       dd->ipath_r_portenable_shift = INFINIPATH_R_PORTENABLE_SHIFT;
-       dd->ipath_r_intravail_shift = INFINIPATH_R_INTRAVAIL_SHIFT;
-       dd->ipath_r_tailupd_shift = INFINIPATH_R_TAILUPD_SHIFT;
-       dd->ipath_r_portcfg_shift = 0; /* Not on IBA6110 */
-
-       dd->ipath_i_bitsextant =
-               (INFINIPATH_I_RCVURG_MASK << INFINIPATH_I_RCVURG_SHIFT) |
-               (INFINIPATH_I_RCVAVAIL_MASK <<
-                INFINIPATH_I_RCVAVAIL_SHIFT) |
-               INFINIPATH_I_ERROR | INFINIPATH_I_SPIOSENT |
-               INFINIPATH_I_SPIOBUFAVAIL | INFINIPATH_I_GPIO;
-
-       dd->ipath_e_bitsextant =
-               INFINIPATH_E_RFORMATERR | INFINIPATH_E_RVCRC |
-               INFINIPATH_E_RICRC | INFINIPATH_E_RMINPKTLEN |
-               INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RLONGPKTLEN |
-               INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RUNEXPCHAR |
-               INFINIPATH_E_RUNSUPVL | INFINIPATH_E_REBP |
-               INFINIPATH_E_RIBFLOW | INFINIPATH_E_RBADVERSION |
-               INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-               INFINIPATH_E_RBADTID | INFINIPATH_E_RHDRLEN |
-               INFINIPATH_E_RHDR | INFINIPATH_E_RIBLOSTLINK |
-               INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SMAXPKTLEN |
-               INFINIPATH_E_SUNDERRUN | INFINIPATH_E_SPKTLEN |
-               INFINIPATH_E_SDROPPEDSMPPKT | INFINIPATH_E_SDROPPEDDATAPKT |
-               INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM |
-               INFINIPATH_E_SUNSUPVL | INFINIPATH_E_IBSTATUSCHANGED |
-               INFINIPATH_E_INVALIDADDR | INFINIPATH_E_RESET |
-               INFINIPATH_E_HARDWARE;
-
-       dd->ipath_hwe_bitsextant =
-               (INFINIPATH_HWE_HTCMEMPARITYERR_MASK <<
-                INFINIPATH_HWE_HTCMEMPARITYERR_SHIFT) |
-               (INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-                INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) |
-               (INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-                INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) |
-               INFINIPATH_HWE_HTCLNKABYTE0CRCERR |
-               INFINIPATH_HWE_HTCLNKABYTE1CRCERR |
-               INFINIPATH_HWE_HTCLNKBBYTE0CRCERR |
-               INFINIPATH_HWE_HTCLNKBBYTE1CRCERR |
-               INFINIPATH_HWE_HTCMISCERR4 |
-               INFINIPATH_HWE_HTCMISCERR5 | INFINIPATH_HWE_HTCMISCERR6 |
-               INFINIPATH_HWE_HTCMISCERR7 |
-               INFINIPATH_HWE_HTCBUSTREQPARITYERR |
-               INFINIPATH_HWE_HTCBUSTRESPPARITYERR |
-               INFINIPATH_HWE_HTCBUSIREQPARITYERR |
-               INFINIPATH_HWE_RXDSYNCMEMPARITYERR |
-               INFINIPATH_HWE_MEMBISTFAILED |
-               INFINIPATH_HWE_COREPLL_FBSLIP |
-               INFINIPATH_HWE_COREPLL_RFSLIP |
-               INFINIPATH_HWE_HTBPLL_FBSLIP |
-               INFINIPATH_HWE_HTBPLL_RFSLIP |
-               INFINIPATH_HWE_HTAPLL_FBSLIP |
-               INFINIPATH_HWE_HTAPLL_RFSLIP |
-               INFINIPATH_HWE_SERDESPLLFAILED |
-               INFINIPATH_HWE_IBCBUSTOSPCPARITYERR |
-               INFINIPATH_HWE_IBCBUSFRSPCPARITYERR;
-
-       dd->ipath_i_rcvavail_mask = INFINIPATH_I_RCVAVAIL_MASK;
-       dd->ipath_i_rcvurg_mask = INFINIPATH_I_RCVURG_MASK;
-       dd->ipath_i_rcvavail_shift = INFINIPATH_I_RCVAVAIL_SHIFT;
-       dd->ipath_i_rcvurg_shift = INFINIPATH_I_RCVURG_SHIFT;
-
-       /*
-        * EEPROM error log 0 is TXE Parity errors. 1 is RXE Parity.
-        * 2 is Some Misc, 3 is reserved for future.
-        */
-       dd->ipath_eep_st_masks[0].hwerrs_to_log =
-               INFINIPATH_HWE_TXEMEMPARITYERR_MASK <<
-               INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT;
-
-       dd->ipath_eep_st_masks[1].hwerrs_to_log =
-               INFINIPATH_HWE_RXEMEMPARITYERR_MASK <<
-               INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT;
-
-       dd->ipath_eep_st_masks[2].errs_to_log = INFINIPATH_E_RESET;
-
-       dd->delay_mult = 2; /* SDR, 4X, can't change */
-
-       dd->ipath_link_width_supported = IB_WIDTH_1X | IB_WIDTH_4X;
-       dd->ipath_link_speed_supported = IPATH_IB_SDR;
-       dd->ipath_link_width_enabled = IB_WIDTH_4X;
-       dd->ipath_link_speed_enabled = dd->ipath_link_speed_supported;
-       /* these can't change for this chip, so set once */
-       dd->ipath_link_width_active = dd->ipath_link_width_enabled;
-       dd->ipath_link_speed_active = dd->ipath_link_speed_enabled;
-}
-
-/**
- * ipath_ht_init_hwerrors - enable hardware errors
- * @dd: the infinipath device
- *
- * now that we have finished initializing everything that might reasonably
- * cause a hardware error, and cleared those errors bits as they occur,
- * we can enable hardware errors in the mask (potentially enabling
- * freeze mode), and enable hardware errors as errors (along with
- * everything else) in errormask
- */
-static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
-{
-       ipath_err_t val;
-       u64 extsval;
-
-       extsval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_extstatus);
-
-       if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
-               ipath_dev_err(dd, "MemBIST did not complete!\n");
-       if (extsval & INFINIPATH_EXTS_MEMBIST_CORRECT)
-               ipath_dbg("MemBIST corrected\n");
-
-       ipath_check_htlink(dd);
-
-       /* barring bugs, all hwerrors become interrupts, which can */
-       val = -1LL;
-       /* don't look at crc lane1 if 8 bit */
-       if (dd->ipath_flags & IPATH_8BIT_IN_HT0)
-               val &= ~infinipath_hwe_htclnkabyte1crcerr;
-       /* don't look at crc lane1 if 8 bit */
-       if (dd->ipath_flags & IPATH_8BIT_IN_HT1)
-               val &= ~infinipath_hwe_htclnkbbyte1crcerr;
-
-       /*
-        * disable RXDSYNCMEMPARITY because external serdes is unused,
-        * and therefore the logic will never be used or initialized,
-        * and uninitialized state will normally result in this error
-        * being asserted.  Similarly for the external serdess pll
-        * lock signal.
-        */
-       val &= ~(INFINIPATH_HWE_SERDESPLLFAILED |
-                INFINIPATH_HWE_RXDSYNCMEMPARITYERR);
-
-       /*
-        * Disable MISCERR4 because of an inversion in the HT core
-        * logic checking for errors that cause this bit to be set.
-        * The errata can also cause the protocol error bit to be set
-        * in the HT config space linkerror register(s).
-        */
-       val &= ~INFINIPATH_HWE_HTCMISCERR4;
-
-       /*
-        * PLL ignored because unused MDIO interface has a logic problem
-        */
-       if (dd->ipath_boardrev == 4 || dd->ipath_boardrev == 9)
-               val &= ~INFINIPATH_HWE_SERDESPLLFAILED;
-       dd->ipath_hwerrmask = val;
-}
-
-
-
-
-/**
- * ipath_ht_bringup_serdes - bring up the serdes
- * @dd: the infinipath device
- */
-static int ipath_ht_bringup_serdes(struct ipath_devdata *dd)
-{
-       u64 val, config1;
-       int ret = 0, change = 0;
-
-       ipath_dbg("Trying to bringup serdes\n");
-
-       if (ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus) &
-           INFINIPATH_HWE_SERDESPLLFAILED)
-       {
-               ipath_dbg("At start, serdes PLL failed bit set in "
-                         "hwerrstatus, clearing and continuing\n");
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                                INFINIPATH_HWE_SERDESPLLFAILED);
-       }
-
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-       config1 = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig1);
-
-       ipath_cdbg(VERBOSE, "Initial serdes status is config0=%llx "
-                  "config1=%llx, sstatus=%llx xgxs %llx\n",
-                  (unsigned long long) val, (unsigned long long) config1,
-                  (unsigned long long)
-                  ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-                  (unsigned long long)
-                  ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-       /* force reset on */
-       val |= INFINIPATH_SERDC0_RESET_PLL
-               /* | INFINIPATH_SERDC0_RESET_MASK */
-               ;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-       udelay(15);             /* need pll reset set at least for a bit */
-
-       if (val & INFINIPATH_SERDC0_RESET_PLL) {
-               u64 val2 = val &= ~INFINIPATH_SERDC0_RESET_PLL;
-               /* set lane resets, and tx idle, during pll reset */
-               val2 |= INFINIPATH_SERDC0_RESET_MASK |
-                       INFINIPATH_SERDC0_TXIDLE;
-               ipath_cdbg(VERBOSE, "Clearing serdes PLL reset (writing "
-                          "%llx)\n", (unsigned long long) val2);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
-                                val2);
-               /*
-                * be sure chip saw it
-                */
-               val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               /*
-                * need pll reset clear at least 11 usec before lane
-                * resets cleared; give it a few more
-                */
-               udelay(15);
-               val = val2;     /* for check below */
-       }
-
-       if (val & (INFINIPATH_SERDC0_RESET_PLL |
-                  INFINIPATH_SERDC0_RESET_MASK |
-                  INFINIPATH_SERDC0_TXIDLE)) {
-               val &= ~(INFINIPATH_SERDC0_RESET_PLL |
-                        INFINIPATH_SERDC0_RESET_MASK |
-                        INFINIPATH_SERDC0_TXIDLE);
-               /* clear them */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0,
-                                val);
-       }
-
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-       if (val & INFINIPATH_XGXS_RESET) {
-               /* normally true after boot */
-               val &= ~INFINIPATH_XGXS_RESET;
-               change = 1;
-       }
-       if (((val >> INFINIPATH_XGXS_RX_POL_SHIFT) &
-            INFINIPATH_XGXS_RX_POL_MASK) != dd->ipath_rx_pol_inv ) {
-               /* need to compensate for Tx inversion in partner */
-               val &= ~(INFINIPATH_XGXS_RX_POL_MASK <<
-                        INFINIPATH_XGXS_RX_POL_SHIFT);
-               val |= dd->ipath_rx_pol_inv <<
-                       INFINIPATH_XGXS_RX_POL_SHIFT;
-               change = 1;
-       }
-       if (change)
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-       /* clear current and de-emphasis bits */
-       config1 &= ~0x0ffffffff00ULL;
-       /* set current to 20ma */
-       config1 |= 0x00000000000ULL;
-       /* set de-emphasis to -5.68dB */
-       config1 |= 0x0cccc000000ULL;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig1, config1);
-
-       ipath_cdbg(VERBOSE, "After setup: serdes status is config0=%llx "
-                  "config1=%llx, sstatus=%llx xgxs %llx\n",
-                  (unsigned long long) val, (unsigned long long) config1,
-                  (unsigned long long)
-                  ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesstatus),
-                  (unsigned long long)
-                  ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig));
-
-       return ret;             /* for now, say we always succeeded */
-}
-
-/**
- * ipath_ht_quiet_serdes - set serdes to txidle
- * @dd: the infinipath device
- * driver is being unloaded
- */
-static void ipath_ht_quiet_serdes(struct ipath_devdata *dd)
-{
-       u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
-
-       val |= INFINIPATH_SERDC0_TXIDLE;
-       ipath_dbg("Setting TxIdleEn on serdes (config0 = %llx)\n",
-                 (unsigned long long) val);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_serdesconfig0, val);
-}
-
-/**
- * ipath_pe_put_tid - write a TID in chip
- * @dd: the infinipath device
- * @tidptr: pointer to the expected TID (in chip) to update
- * @tidtype: RCVHQ_RCV_TYPE_EAGER (1) for eager, RCVHQ_RCV_TYPE_EXPECTED (0) for expected
- * @pa: physical address of in memory buffer; ipath_tidinvalid if freeing
- *
- * This exists as a separate routine to allow for special locking etc.
- * It's used for both the full cleanup on exit, as well as the normal
- * setup and teardown.
- */
-static void ipath_ht_put_tid(struct ipath_devdata *dd,
-                            u64 __iomem *tidptr, u32 type,
-                            unsigned long pa)
-{
-       if (!dd->ipath_kregbase)
-               return;
-
-       if (pa != dd->ipath_tidinvalid) {
-               if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
-                       dev_info(&dd->pcidev->dev,
-                                "physaddr %lx has more than "
-                                "40 bits, using only 40!!!\n", pa);
-                       pa &= INFINIPATH_RT_ADDR_MASK;
-               }
-               if (type == RCVHQ_RCV_TYPE_EAGER)
-                       pa |= dd->ipath_tidtemplate;
-               else {
-                       /* in words (fixed, full page).  */
-                       u64 lenvalid = PAGE_SIZE >> 2;
-                       lenvalid <<= INFINIPATH_RT_BUFSIZE_SHIFT;
-                       pa |= lenvalid | INFINIPATH_RT_VALID;
-               }
-       }
-
-       writeq(pa, tidptr);
-}
-
-
-/**
- * ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
- * @dd: the infinipath device
- * @port: the port
- *
- * Used from ipath_close(), and at chip initialization.
- */
-static void ipath_ht_clear_tids(struct ipath_devdata *dd, unsigned port)
-{
-       u64 __iomem *tidbase;
-       int i;
-
-       if (!dd->ipath_kregbase)
-               return;
-
-       ipath_cdbg(VERBOSE, "Invalidate TIDs for port %u\n", port);
-
-       /*
-        * need to invalidate all of the expected TID entries for this
-        * port, so we don't have valid entries that might somehow get
-        * used (early in next use of this port, or through some bug)
-        */
-       tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-                                  dd->ipath_rcvtidbase +
-                                  port * dd->ipath_rcvtidcnt *
-                                  sizeof(*tidbase));
-       for (i = 0; i < dd->ipath_rcvtidcnt; i++)
-               ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EXPECTED,
-                                dd->ipath_tidinvalid);
-
-       tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
-                                  dd->ipath_rcvegrbase +
-                                  port * dd->ipath_rcvegrcnt *
-                                  sizeof(*tidbase));
-
-       for (i = 0; i < dd->ipath_rcvegrcnt; i++)
-               ipath_ht_put_tid(dd, &tidbase[i], RCVHQ_RCV_TYPE_EAGER,
-                                dd->ipath_tidinvalid);
-}
-
-/**
- * ipath_ht_tidtemplate - setup constants for TID updates
- * @dd: the infinipath device
- *
- * We setup stuff that we use a lot, to avoid calculating each time
- */
-static void ipath_ht_tidtemplate(struct ipath_devdata *dd)
-{
-       dd->ipath_tidtemplate = dd->ipath_ibmaxlen >> 2;
-       dd->ipath_tidtemplate <<= INFINIPATH_RT_BUFSIZE_SHIFT;
-       dd->ipath_tidtemplate |= INFINIPATH_RT_VALID;
-
-       /*
-        * work around chip errata bug 7358, by marking invalid tids
-        * as having max length
-        */
-       dd->ipath_tidinvalid = (-1LL & INFINIPATH_RT_BUFSIZE_MASK) <<
-               INFINIPATH_RT_BUFSIZE_SHIFT;
-}
-
-static int ipath_ht_early_init(struct ipath_devdata *dd)
-{
-       u32 __iomem *piobuf;
-       u32 pioincr, val32;
-       int i;
-
-       /*
-        * one cache line; long IB headers will spill over into received
-        * buffer
-        */
-       dd->ipath_rcvhdrentsize = 16;
-       dd->ipath_rcvhdrsize = IPATH_DFLT_RCVHDRSIZE;
-
-       /*
-        * For HT, we allocate a somewhat overly large eager buffer,
-        * such that we can guarantee that we can receive the largest
-        * packet that we can send out.  To truly support a 4KB MTU,
-        * we need to bump this to a large value.  To date, other than
-        * testing, we have never encountered an HCA that can really
-        * send 4KB MTU packets, so we do not handle that (we'll get
-        * errors interrupts if we ever see one).
-        */
-       dd->ipath_rcvegrbufsize = dd->ipath_piosize2k;
-
-       /*
-        * the min() check here is currently a nop, but it may not
-        * always be, depending on just how we do ipath_rcvegrbufsize
-        */
-       dd->ipath_ibmaxlen = min(dd->ipath_piosize2k,
-                                dd->ipath_rcvegrbufsize);
-       dd->ipath_init_ibmaxlen = dd->ipath_ibmaxlen;
-       ipath_ht_tidtemplate(dd);
-
-       /*
-        * zero all the TID entries at startup.  We do this for sanity,
-        * in case of a previous driver crash of some kind, and also
-        * because the chip powers up with these memories in an unknown
-        * state.  Use portcnt, not cfgports, since this is for the
-        * full chip, not for current (possibly different) configuration
-        * value.
-        * Chip Errata bug 6447
-        */
-       for (val32 = 0; val32 < dd->ipath_portcnt; val32++)
-               ipath_ht_clear_tids(dd, val32);
-
-       /*
-        * write the pbc of each buffer, to be sure it's initialized, then
-        * cancel all the buffers, and also abort any packets that might
-        * have been in flight for some reason (the latter is for driver
-        * unload/reload, but isn't a bad idea at first init).  PIO send
-        * isn't enabled at this point, so there is no danger of sending
-        * these out on the wire.
-        * Chip Errata bug 6610
-        */
-       piobuf = (u32 __iomem *) (((char __iomem *)(dd->ipath_kregbase)) +
-                                 dd->ipath_piobufbase);
-       pioincr = dd->ipath_palign / sizeof(*piobuf);
-       for (i = 0; i < dd->ipath_piobcnt2k; i++) {
-               /*
-                * reasonable word count, just to init pbc
-                */
-               writel(16, piobuf);
-               piobuf += pioincr;
-       }
-
-       ipath_get_eeprom_info(dd);
-       if (dd->ipath_boardrev == 5) {
-               /*
-                * Later production QHT7040 has same changes as QHT7140, so
-                * can use GPIO interrupts.  They have serial #'s starting
-                * with 128, rather than 112.
-                */
-               if (dd->ipath_serial[0] == '1' &&
-                   dd->ipath_serial[1] == '2' &&
-                   dd->ipath_serial[2] == '8')
-                       dd->ipath_flags |= IPATH_GPIO_INTR;
-               else {
-                       ipath_dev_err(dd, "Unsupported InfiniPath board "
-                               "(serial number %.16s)!\n",
-                               dd->ipath_serial);
-                       return 1;
-               }
-       }
-
-       if (dd->ipath_minrev >= 4) {
-               /* Rev4+ reports extra errors via internal GPIO pins */
-               dd->ipath_flags |= IPATH_GPIO_ERRINTRS;
-               dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-                                dd->ipath_gpio_mask);
-       }
-
-       return 0;
-}
-
-
-/**
- * ipath_init_ht_get_base_info - set chip-specific flags for user code
- * @dd: the infinipath device
- * @kbase: ipath_base_info pointer
- *
- * We set the PCIE flag because the lower bandwidth on PCIe vs
- * HyperTransport can affect some user packet algorithms.
- */
-static int ipath_ht_get_base_info(struct ipath_portdata *pd, void *kbase)
-{
-       struct ipath_base_info *kinfo = kbase;
-
-       kinfo->spi_runtime_flags |= IPATH_RUNTIME_HT |
-               IPATH_RUNTIME_PIO_REGSWAPPED;
-
-       if (pd->port_dd->ipath_minrev < 4)
-               kinfo->spi_runtime_flags |= IPATH_RUNTIME_RCVHDR_COPY;
-
-       return 0;
-}
-
-static void ipath_ht_free_irq(struct ipath_devdata *dd)
-{
-       free_irq(dd->ipath_irq, dd);
-       ht_destroy_irq(dd->ipath_irq);
-       dd->ipath_irq = 0;
-       dd->ipath_intconfig = 0;
-}
-
-static struct ipath_message_header *
-ipath_ht_get_msgheader(struct ipath_devdata *dd, __le32 *rhf_addr)
-{
-       return (struct ipath_message_header *)
-               &rhf_addr[sizeof(u64) / sizeof(u32)];
-}
-
-static void ipath_ht_config_ports(struct ipath_devdata *dd, ushort cfgports)
-{
-       dd->ipath_portcnt =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt);
-       dd->ipath_p0_rcvegrcnt =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-}
-
-static void ipath_ht_read_counters(struct ipath_devdata *dd,
-                                  struct infinipath_counters *cntrs)
-{
-       cntrs->LBIntCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBIntCnt));
-       cntrs->LBFlowStallCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(LBFlowStallCnt));
-       cntrs->TxSDmaDescCnt = 0;
-       cntrs->TxUnsupVLErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnsupVLErrCnt));
-       cntrs->TxDataPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDataPktCnt));
-       cntrs->TxFlowPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowPktCnt));
-       cntrs->TxDwordCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDwordCnt));
-       cntrs->TxLenErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxLenErrCnt));
-       cntrs->TxMaxMinLenErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxMaxMinLenErrCnt));
-       cntrs->TxUnderrunCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxUnderrunCnt));
-       cntrs->TxFlowStallCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxFlowStallCnt));
-       cntrs->TxDroppedPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(TxDroppedPktCnt));
-       cntrs->RxDroppedPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDroppedPktCnt));
-       cntrs->RxDataPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDataPktCnt));
-       cntrs->RxFlowPktCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowPktCnt));
-       cntrs->RxDwordCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxDwordCnt));
-       cntrs->RxLenErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLenErrCnt));
-       cntrs->RxMaxMinLenErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxMaxMinLenErrCnt));
-       cntrs->RxICRCErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxICRCErrCnt));
-       cntrs->RxVCRCErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxVCRCErrCnt));
-       cntrs->RxFlowCtrlErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxFlowCtrlErrCnt));
-       cntrs->RxBadFormatCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBadFormatCnt));
-       cntrs->RxLinkProblemCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLinkProblemCnt));
-       cntrs->RxEBPCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxEBPCnt));
-       cntrs->RxLPCRCErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxLPCRCErrCnt));
-       cntrs->RxBufOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxBufOvflCnt));
-       cntrs->RxTIDFullErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDFullErrCnt));
-       cntrs->RxTIDValidErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxTIDValidErrCnt));
-       cntrs->RxPKeyMismatchCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxPKeyMismatchCnt));
-       cntrs->RxP0HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP0HdrEgrOvflCnt));
-       cntrs->RxP1HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP1HdrEgrOvflCnt));
-       cntrs->RxP2HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP2HdrEgrOvflCnt));
-       cntrs->RxP3HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP3HdrEgrOvflCnt));
-       cntrs->RxP4HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP4HdrEgrOvflCnt));
-       cntrs->RxP5HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP5HdrEgrOvflCnt));
-       cntrs->RxP6HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP6HdrEgrOvflCnt));
-       cntrs->RxP7HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP7HdrEgrOvflCnt));
-       cntrs->RxP8HdrEgrOvflCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(RxP8HdrEgrOvflCnt));
-       cntrs->RxP9HdrEgrOvflCnt = 0;
-       cntrs->RxP10HdrEgrOvflCnt = 0;
-       cntrs->RxP11HdrEgrOvflCnt = 0;
-       cntrs->RxP12HdrEgrOvflCnt = 0;
-       cntrs->RxP13HdrEgrOvflCnt = 0;
-       cntrs->RxP14HdrEgrOvflCnt = 0;
-       cntrs->RxP15HdrEgrOvflCnt = 0;
-       cntrs->RxP16HdrEgrOvflCnt = 0;
-       cntrs->IBStatusChangeCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBStatusChangeCnt));
-       cntrs->IBLinkErrRecoveryCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkErrRecoveryCnt));
-       cntrs->IBLinkDownedCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBLinkDownedCnt));
-       cntrs->IBSymbolErrCnt =
-               ipath_snap_cntr(dd, IPATH_CREG_OFFSET(IBSymbolErrCnt));
-       cntrs->RxVL15DroppedPktCnt = 0;
-       cntrs->RxOtherLocalPhyErrCnt = 0;
-       cntrs->PcieRetryBufDiagQwordCnt = 0;
-       cntrs->ExcessBufferOvflCnt = dd->ipath_overrun_thresh_errs;
-       cntrs->LocalLinkIntegrityErrCnt =
-               (dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
-               dd->ipath_lli_errs : dd->ipath_lli_errors;
-       cntrs->RxVlErrCnt = 0;
-       cntrs->RxDlidFltrCnt = 0;
-}
-
-
-/* no interrupt fallback for these chips */
-static int ipath_ht_nointr_fallback(struct ipath_devdata *dd)
-{
-       return 0;
-}
-
-
-/*
- * reset the XGXS (between serdes and IBC).  Slightly less intrusive
- * than resetting the IBC or external link state, and useful in some
- * cases to cause some retraining.  To do this right, we reset IBC
- * as well.
- */
-static void ipath_ht_xgxs_reset(struct ipath_devdata *dd)
-{
-       u64 val, prev_val;
-
-       prev_val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_xgxsconfig);
-       val = prev_val | INFINIPATH_XGXS_RESET;
-       prev_val &= ~INFINIPATH_XGXS_RESET; /* be sure */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control & ~INFINIPATH_C_LINKENABLE);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, val);
-       ipath_read_kreg32(dd, dd->ipath_kregs->kr_scratch);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_xgxsconfig, prev_val);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control);
-}
-
-
-static int ipath_ht_get_ib_cfg(struct ipath_devdata *dd, int which)
-{
-       int ret;
-
-       switch (which) {
-       case IPATH_IB_CFG_LWID:
-               ret = dd->ipath_link_width_active;
-               break;
-       case IPATH_IB_CFG_SPD:
-               ret = dd->ipath_link_speed_active;
-               break;
-       case IPATH_IB_CFG_LWID_ENB:
-               ret = dd->ipath_link_width_enabled;
-               break;
-       case IPATH_IB_CFG_SPD_ENB:
-               ret = dd->ipath_link_speed_enabled;
-               break;
-       default:
-               ret =  -ENOTSUPP;
-               break;
-       }
-       return ret;
-}
-
-
-/* we assume range checking is already done, if needed */
-static int ipath_ht_set_ib_cfg(struct ipath_devdata *dd, int which, u32 val)
-{
-       int ret = 0;
-
-       if (which == IPATH_IB_CFG_LWID_ENB)
-               dd->ipath_link_width_enabled = val;
-       else if (which == IPATH_IB_CFG_SPD_ENB)
-               dd->ipath_link_speed_enabled = val;
-       else
-               ret = -ENOTSUPP;
-       return ret;
-}
-
-
-static void ipath_ht_config_jint(struct ipath_devdata *dd, u16 a, u16 b)
-{
-}
-
-
-static int ipath_ht_ib_updown(struct ipath_devdata *dd, int ibup, u64 ibcs)
-{
-       ipath_setup_ht_setextled(dd, ipath_ib_linkstate(dd, ibcs),
-               ipath_ib_linktrstate(dd, ibcs));
-       return 0;
-}
-
-
-/**
- * ipath_init_iba6110_funcs - set up the chip-specific function pointers
- * @dd: the infinipath device
- *
- * This is global, and is called directly at init to set up the
- * chip-specific function pointers for later use.
- */
-void ipath_init_iba6110_funcs(struct ipath_devdata *dd)
-{
-       dd->ipath_f_intrsetup = ipath_ht_intconfig;
-       dd->ipath_f_bus = ipath_setup_ht_config;
-       dd->ipath_f_reset = ipath_setup_ht_reset;
-       dd->ipath_f_get_boardname = ipath_ht_boardname;
-       dd->ipath_f_init_hwerrors = ipath_ht_init_hwerrors;
-       dd->ipath_f_early_init = ipath_ht_early_init;
-       dd->ipath_f_handle_hwerrors = ipath_ht_handle_hwerrors;
-       dd->ipath_f_quiet_serdes = ipath_ht_quiet_serdes;
-       dd->ipath_f_bringup_serdes = ipath_ht_bringup_serdes;
-       dd->ipath_f_clear_tids = ipath_ht_clear_tids;
-       dd->ipath_f_put_tid = ipath_ht_put_tid;
-       dd->ipath_f_cleanup = ipath_setup_ht_cleanup;
-       dd->ipath_f_setextled = ipath_setup_ht_setextled;
-       dd->ipath_f_get_base_info = ipath_ht_get_base_info;
-       dd->ipath_f_free_irq = ipath_ht_free_irq;
-       dd->ipath_f_tidtemplate = ipath_ht_tidtemplate;
-       dd->ipath_f_intr_fallback = ipath_ht_nointr_fallback;
-       dd->ipath_f_get_msgheader = ipath_ht_get_msgheader;
-       dd->ipath_f_config_ports = ipath_ht_config_ports;
-       dd->ipath_f_read_counters = ipath_ht_read_counters;
-       dd->ipath_f_xgxs_reset = ipath_ht_xgxs_reset;
-       dd->ipath_f_get_ib_cfg = ipath_ht_get_ib_cfg;
-       dd->ipath_f_set_ib_cfg = ipath_ht_set_ib_cfg;
-       dd->ipath_f_config_jint = ipath_ht_config_jint;
-       dd->ipath_f_ib_updown = ipath_ht_ib_updown;
-
-       /*
-        * initialize chip-specific variables
-        */
-       ipath_init_ht_variables(dd);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_init_chip.c b/drivers/staging/rdma/ipath/ipath_init_chip.c
deleted file mode 100644 (file)
index a5eea19..0000000
+++ /dev/null
@@ -1,1062 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/netdevice.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/stat.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_kernel.h"
-#include "ipath_common.h"
-
-/*
- * min buffers we want to have per port, after driver
- */
-#define IPATH_MIN_USER_PORT_BUFCNT 7
-
-/*
- * Number of ports we are configured to use (to allow for more pio
- * buffers per port, etc.)  Zero means use chip value.
- */
-static ushort ipath_cfgports;
-
-module_param_named(cfgports, ipath_cfgports, ushort, S_IRUGO);
-MODULE_PARM_DESC(cfgports, "Set max number of ports to use");
-
-/*
- * Number of buffers reserved for driver (verbs and layered drivers.)
- * Initialized based on number of PIO buffers if not set via module interface.
- * The problem with this is that it's global, but we'll use different
- * numbers for different chip types.
- */
-static ushort ipath_kpiobufs;
-
-static int ipath_set_kpiobufs(const char *val, struct kernel_param *kp);
-
-module_param_call(kpiobufs, ipath_set_kpiobufs, param_get_ushort,
-                 &ipath_kpiobufs, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(kpiobufs, "Set number of PIO buffers for driver");
-
-/**
- * create_port0_egr - allocate the eager TID buffers
- * @dd: the infinipath device
- *
- * This code is now quite different for user and kernel, because
- * the kernel uses skb's, for the accelerated network performance.
- * This is the kernel (port0) version.
- *
- * Allocate the eager TID buffers and program them into infinipath.
- * We use the network layer alloc_skb() allocator to allocate the
- * memory, and either use the buffers as is for things like verbs
- * packets, or pass the buffers up to the ipath layered driver and
- * thence the network layer, replacing them as we do so (see
- * ipath_rcv_layer()).
- */
-static int create_port0_egr(struct ipath_devdata *dd)
-{
-       unsigned e, egrcnt;
-       struct ipath_skbinfo *skbinfo;
-       int ret;
-
-       egrcnt = dd->ipath_p0_rcvegrcnt;
-
-       skbinfo = vmalloc(sizeof(*dd->ipath_port0_skbinfo) * egrcnt);
-       if (skbinfo == NULL) {
-               ipath_dev_err(dd, "allocation error for eager TID "
-                             "skb array\n");
-               ret = -ENOMEM;
-               goto bail;
-       }
-       for (e = 0; e < egrcnt; e++) {
-               /*
-                * This is a bit tricky in that we allocate extra
-                * space for 2 bytes of the 14 byte ethernet header.
-                * These two bytes are passed in the ipath header so
-                * the rest of the data is word aligned.  We allocate
-                * 4 bytes so that the data buffer stays word aligned.
-                * See ipath_kreceive() for more details.
-                */
-               skbinfo[e].skb = ipath_alloc_skb(dd, GFP_KERNEL);
-               if (!skbinfo[e].skb) {
-                       ipath_dev_err(dd, "SKB allocation error for "
-                                     "eager TID %u\n", e);
-                       while (e != 0)
-                               dev_kfree_skb(skbinfo[--e].skb);
-                       vfree(skbinfo);
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-       }
-       /*
-        * After loop above, so we can test non-NULL to see if ready
-        * to use at receive, etc.
-        */
-       dd->ipath_port0_skbinfo = skbinfo;
-
-       for (e = 0; e < egrcnt; e++) {
-               dd->ipath_port0_skbinfo[e].phys =
-                 ipath_map_single(dd->pcidev,
-                                  dd->ipath_port0_skbinfo[e].skb->data,
-                                  dd->ipath_ibmaxlen, PCI_DMA_FROMDEVICE);
-               dd->ipath_f_put_tid(dd, e + (u64 __iomem *)
-                                   ((char __iomem *) dd->ipath_kregbase +
-                                    dd->ipath_rcvegrbase),
-                                   RCVHQ_RCV_TYPE_EAGER,
-                                   dd->ipath_port0_skbinfo[e].phys);
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static int bringup_link(struct ipath_devdata *dd)
-{
-       u64 val, ibc;
-       int ret = 0;
-
-       /* hold IBC in reset */
-       dd->ipath_control &= ~INFINIPATH_C_LINKENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control);
-
-       /*
-        * set initial max size pkt IBC will send, including ICRC; it's the
-        * PIO buffer size in dwords, less 1; also see ipath_set_mtu()
-        */
-       val = (dd->ipath_ibmaxlen >> 2) + 1;
-       ibc = val << dd->ibcc_mpl_shift;
-
-       /* flowcontrolwatermark is in units of KBytes */
-       ibc |= 0x5ULL << INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT;
-       /*
-        * How often flowctrl sent.  More or less in usecs; balance against
-        * watermark value, so that in theory senders always get a flow
-        * control update in time to not let the IB link go idle.
-        */
-       ibc |= 0x3ULL << INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT;
-       /* max error tolerance */
-       ibc |= 0xfULL << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
-       /* use "real" buffer space for */
-       ibc |= 4ULL << INFINIPATH_IBCC_CREDITSCALE_SHIFT;
-       /* IB credit flow control. */
-       ibc |= 0xfULL << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
-       /* initially come up waiting for TS1, without sending anything. */
-       dd->ipath_ibcctrl = ibc;
-       /*
-        * Want to start out with both LINKCMD and LINKINITCMD in NOP
-        * (0 and 0).  Don't put linkinitcmd in ipath_ibcctrl, want that
-        * to stay a NOP. Flag that we are disabled, for the (unlikely)
-        * case that some recovery path is trying to bring the link up
-        * before we are ready.
-        */
-       ibc |= INFINIPATH_IBCC_LINKINITCMD_DISABLE <<
-               INFINIPATH_IBCC_LINKINITCMD_SHIFT;
-       dd->ipath_flags |= IPATH_IB_LINK_DISABLED;
-       ipath_cdbg(VERBOSE, "Writing 0x%llx to ibcctrl\n",
-                  (unsigned long long) ibc);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl, ibc);
-
-       // be sure chip saw it
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-       ret = dd->ipath_f_bringup_serdes(dd);
-
-       if (ret)
-               dev_info(&dd->pcidev->dev, "Could not initialize SerDes, "
-                        "not usable\n");
-       else {
-               /* enable IBC */
-               dd->ipath_control |= INFINIPATH_C_LINKENABLE;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                                dd->ipath_control);
-       }
-
-       return ret;
-}
-
-static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
-{
-       struct ipath_portdata *pd;
-
-       pd = kzalloc(sizeof(*pd), GFP_KERNEL);
-       if (pd) {
-               pd->port_dd = dd;
-               pd->port_cnt = 1;
-               /* The port 0 pkey table is used by the layer interface. */
-               pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
-               pd->port_seq_cnt = 1;
-       }
-       return pd;
-}
-
-static int init_chip_first(struct ipath_devdata *dd)
-{
-       struct ipath_portdata *pd;
-       int ret = 0;
-       u64 val;
-
-       spin_lock_init(&dd->ipath_kernel_tid_lock);
-       spin_lock_init(&dd->ipath_user_tid_lock);
-       spin_lock_init(&dd->ipath_sendctrl_lock);
-       spin_lock_init(&dd->ipath_uctxt_lock);
-       spin_lock_init(&dd->ipath_sdma_lock);
-       spin_lock_init(&dd->ipath_gpio_lock);
-       spin_lock_init(&dd->ipath_eep_st_lock);
-       spin_lock_init(&dd->ipath_sdepb_lock);
-       mutex_init(&dd->ipath_eep_lock);
-
-       /*
-        * skip cfgports stuff because we are not allocating memory,
-        * and we don't want problems if the portcnt changed due to
-        * cfgports.  We do still check and report a difference, if
-        * not same (should be impossible).
-        */
-       dd->ipath_f_config_ports(dd, ipath_cfgports);
-       if (!ipath_cfgports)
-               dd->ipath_cfgports = dd->ipath_portcnt;
-       else if (ipath_cfgports <= dd->ipath_portcnt) {
-               dd->ipath_cfgports = ipath_cfgports;
-               ipath_dbg("Configured to use %u ports out of %u in chip\n",
-                         dd->ipath_cfgports, ipath_read_kreg32(dd,
-                         dd->ipath_kregs->kr_portcnt));
-       } else {
-               dd->ipath_cfgports = dd->ipath_portcnt;
-               ipath_dbg("Tried to configured to use %u ports; chip "
-                         "only supports %u\n", ipath_cfgports,
-                         ipath_read_kreg32(dd,
-                                 dd->ipath_kregs->kr_portcnt));
-       }
-       /*
-        * Allocate full portcnt array, rather than just cfgports, because
-        * cleanup iterates across all possible ports.
-        */
-       dd->ipath_pd = kcalloc(dd->ipath_portcnt, sizeof(*dd->ipath_pd),
-                              GFP_KERNEL);
-
-       if (!dd->ipath_pd) {
-               ipath_dev_err(dd, "Unable to allocate portdata array, "
-                             "failing\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       pd = create_portdata0(dd);
-       if (!pd) {
-               ipath_dev_err(dd, "Unable to allocate portdata for port "
-                             "0, failing\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-       dd->ipath_pd[0] = pd;
-
-       dd->ipath_rcvtidcnt =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
-       dd->ipath_rcvtidbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
-       dd->ipath_rcvegrcnt =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-       dd->ipath_rcvegrbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
-       dd->ipath_palign =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_pagealign);
-       dd->ipath_piobufbase =
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufbase);
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiosize);
-       dd->ipath_piosize2k = val & ~0U;
-       dd->ipath_piosize4k = val >> 32;
-       if (dd->ipath_piosize4k == 0 && ipath_mtu4096)
-               ipath_mtu4096 = 0; /* 4KB not supported by this chip */
-       dd->ipath_ibmtu = ipath_mtu4096 ? 4096 : 2048;
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpiobufcnt);
-       dd->ipath_piobcnt2k = val & ~0U;
-       dd->ipath_piobcnt4k = val >> 32;
-       dd->ipath_pio2kbase =
-               (u32 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
-                                (dd->ipath_piobufbase & 0xffffffff));
-       if (dd->ipath_piobcnt4k) {
-               dd->ipath_pio4kbase = (u32 __iomem *)
-                       (((char __iomem *) dd->ipath_kregbase) +
-                        (dd->ipath_piobufbase >> 32));
-               /*
-                * 4K buffers take 2 pages; we use roundup just to be
-                * paranoid; we calculate it once here, rather than on
-                * ever buf allocate
-                */
-               dd->ipath_4kalign = ALIGN(dd->ipath_piosize4k,
-                                         dd->ipath_palign);
-               ipath_dbg("%u 2k(%x) piobufs @ %p, %u 4k(%x) @ %p "
-                         "(%x aligned)\n",
-                         dd->ipath_piobcnt2k, dd->ipath_piosize2k,
-                         dd->ipath_pio2kbase, dd->ipath_piobcnt4k,
-                         dd->ipath_piosize4k, dd->ipath_pio4kbase,
-                         dd->ipath_4kalign);
-       } else {
-               ipath_dbg("%u 2k piobufs @ %p\n",
-                         dd->ipath_piobcnt2k, dd->ipath_pio2kbase);
-       }
-done:
-       return ret;
-}
-
-/**
- * init_chip_reset - re-initialize after a reset, or enable
- * @dd: the infinipath device
- *
- * sanity check at least some of the values after reset, and
- * ensure no receive or transmit (explicitly, in case reset
- * failed
- */
-static int init_chip_reset(struct ipath_devdata *dd)
-{
-       u32 rtmp;
-       int i;
-       unsigned long flags;
-
-       /*
-        * ensure chip does no sends or receives, tail updates, or
-        * pioavail updates while we re-initialize
-        */
-       dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift);
-       for (i = 0; i < dd->ipath_portcnt; i++) {
-               clear_bit(dd->ipath_r_portenable_shift + i,
-                         &dd->ipath_rcvctrl);
-               clear_bit(dd->ipath_r_intravail_shift + i,
-                         &dd->ipath_rcvctrl);
-       }
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-               dd->ipath_rcvctrl);
-
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl = 0U; /* no sdma, etc */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
-
-       rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
-       if (rtmp != dd->ipath_rcvtidcnt)
-               dev_info(&dd->pcidev->dev, "tidcnt was %u before "
-                        "reset, now %u, using original\n",
-                        dd->ipath_rcvtidcnt, rtmp);
-       rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidbase);
-       if (rtmp != dd->ipath_rcvtidbase)
-               dev_info(&dd->pcidev->dev, "tidbase was %u before "
-                        "reset, now %u, using original\n",
-                        dd->ipath_rcvtidbase, rtmp);
-       rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrcnt);
-       if (rtmp != dd->ipath_rcvegrcnt)
-               dev_info(&dd->pcidev->dev, "egrcnt was %u before "
-                        "reset, now %u, using original\n",
-                        dd->ipath_rcvegrcnt, rtmp);
-       rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvegrbase);
-       if (rtmp != dd->ipath_rcvegrbase)
-               dev_info(&dd->pcidev->dev, "egrbase was %u before "
-                        "reset, now %u, using original\n",
-                        dd->ipath_rcvegrbase, rtmp);
-
-       return 0;
-}
-
-static int init_pioavailregs(struct ipath_devdata *dd)
-{
-       int ret;
-
-       dd->ipath_pioavailregs_dma = dma_alloc_coherent(
-               &dd->pcidev->dev, PAGE_SIZE, &dd->ipath_pioavailregs_phys,
-               GFP_KERNEL);
-       if (!dd->ipath_pioavailregs_dma) {
-               ipath_dev_err(dd, "failed to allocate PIOavail reg area "
-                             "in memory\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       /*
-        * we really want L2 cache aligned, but for current CPUs of
-        * interest, they are the same.
-        */
-       dd->ipath_statusp = (u64 *)
-               ((char *)dd->ipath_pioavailregs_dma +
-                ((2 * L1_CACHE_BYTES +
-                  dd->ipath_pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES));
-       /* copy the current value now that it's really allocated */
-       *dd->ipath_statusp = dd->_ipath_status;
-       /*
-        * setup buffer to hold freeze msg, accessible to apps,
-        * following statusp
-        */
-       dd->ipath_freezemsg = (char *)&dd->ipath_statusp[1];
-       /* and its length */
-       dd->ipath_freezelen = L1_CACHE_BYTES - sizeof(dd->ipath_statusp[0]);
-
-       ret = 0;
-
-done:
-       return ret;
-}
-
-/**
- * init_shadow_tids - allocate the shadow TID array
- * @dd: the infinipath device
- *
- * allocate the shadow TID array, so we can ipath_munlock previous
- * entries.  It may make more sense to move the pageshadow to the
- * port data structure, so we only allocate memory for ports actually
- * in use, since we at 8k per port, now.
- */
-static void init_shadow_tids(struct ipath_devdata *dd)
-{
-       struct page **pages;
-       dma_addr_t *addrs;
-
-       pages = vzalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
-                       sizeof(struct page *));
-       if (!pages) {
-               ipath_dev_err(dd, "failed to allocate shadow page * "
-                             "array, no expected sends!\n");
-               dd->ipath_pageshadow = NULL;
-               return;
-       }
-
-       addrs = vmalloc(dd->ipath_cfgports * dd->ipath_rcvtidcnt *
-                       sizeof(dma_addr_t));
-       if (!addrs) {
-               ipath_dev_err(dd, "failed to allocate shadow dma handle "
-                             "array, no expected sends!\n");
-               vfree(pages);
-               dd->ipath_pageshadow = NULL;
-               return;
-       }
-
-       dd->ipath_pageshadow = pages;
-       dd->ipath_physshadow = addrs;
-}
-
-static void enable_chip(struct ipath_devdata *dd, int reinit)
-{
-       u32 val;
-       u64 rcvmask;
-       unsigned long flags;
-       int i;
-
-       if (!reinit)
-               init_waitqueue_head(&ipath_state_wait);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       /* Enable PIO send, and update of PIOavail regs to memory. */
-       dd->ipath_sendctrl = INFINIPATH_S_PIOENABLE |
-               INFINIPATH_S_PIOBUFAVAILUPD;
-
-       /*
-        * Set the PIO avail update threshold to host memory
-        * on chips that support it.
-        */
-       if (dd->ipath_pioupd_thresh)
-               dd->ipath_sendctrl |= dd->ipath_pioupd_thresh
-                       << INFINIPATH_S_UPDTHRESH_SHIFT;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       /*
-        * Enable kernel ports' receive and receive interrupt.
-        * Other ports done as user opens and inits them.
-        */
-       rcvmask = 1ULL;
-       dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) |
-               (rcvmask << dd->ipath_r_intravail_shift);
-       if (!(dd->ipath_flags & IPATH_NODMA_RTAIL))
-               dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                        dd->ipath_rcvctrl);
-
-       /*
-        * now ready for use.  this should be cleared whenever we
-        * detect a reset, or initiate one.
-        */
-       dd->ipath_flags |= IPATH_INITTED;
-
-       /*
-        * Init our shadow copies of head from tail values,
-        * and write head values to match.
-        */
-       val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0);
-       ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0);
-
-       /* Initialize so we interrupt on next packet received */
-       ipath_write_ureg(dd, ur_rcvhdrhead,
-                        dd->ipath_rhdrhead_intr_off |
-                        dd->ipath_pd[0]->port_head, 0);
-
-       /*
-        * by now pioavail updates to memory should have occurred, so
-        * copy them into our working/shadow registers; this is in
-        * case something went wrong with abort, but mostly to get the
-        * initial values of the generation bit correct.
-        */
-       for (i = 0; i < dd->ipath_pioavregs; i++) {
-               __le64 pioavail;
-
-               /*
-                * Chip Errata bug 6641; even and odd qwords>3 are swapped.
-                */
-               if (i > 3 && (dd->ipath_flags & IPATH_SWAP_PIOBUFS))
-                       pioavail = dd->ipath_pioavailregs_dma[i ^ 1];
-               else
-                       pioavail = dd->ipath_pioavailregs_dma[i];
-               /*
-                * don't need to worry about ipath_pioavailkernel here
-                * because we will call ipath_chg_pioavailkernel() later
-                * in initialization, to busy out buffers as needed
-                */
-               dd->ipath_pioavailshadow[i] = le64_to_cpu(pioavail);
-       }
-       /* can get counters, stats, etc. */
-       dd->ipath_flags |= IPATH_PRESENT;
-}
-
-static int init_housekeeping(struct ipath_devdata *dd, int reinit)
-{
-       char boardn[40];
-       int ret = 0;
-
-       /*
-        * have to clear shadow copies of registers at init that are
-        * not otherwise set here, or all kinds of bizarre things
-        * happen with driver on chip reset
-        */
-       dd->ipath_rcvhdrsize = 0;
-
-       /*
-        * Don't clear ipath_flags as 8bit mode was set before
-        * entering this func. However, we do set the linkstate to
-        * unknown, so we can watch for a transition.
-        * PRESENT is set because we want register reads to work,
-        * and the kernel infrastructure saw it in config space;
-        * We clear it if we have failures.
-        */
-       dd->ipath_flags |= IPATH_LINKUNK | IPATH_PRESENT;
-       dd->ipath_flags &= ~(IPATH_LINKACTIVE | IPATH_LINKARMED |
-                            IPATH_LINKDOWN | IPATH_LINKINIT);
-
-       ipath_cdbg(VERBOSE, "Try to read spc chip revision\n");
-       dd->ipath_revision =
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_revision);
-
-       /*
-        * set up fundamental info we need to use the chip; we assume
-        * if the revision reg and these regs are OK, we don't need to
-        * special case the rest
-        */
-       dd->ipath_sregbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_sendregbase);
-       dd->ipath_cregbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_counterregbase);
-       dd->ipath_uregbase =
-               ipath_read_kreg32(dd, dd->ipath_kregs->kr_userregbase);
-       ipath_cdbg(VERBOSE, "ipath_kregbase %p, sendbase %x usrbase %x, "
-                  "cntrbase %x\n", dd->ipath_kregbase, dd->ipath_sregbase,
-                  dd->ipath_uregbase, dd->ipath_cregbase);
-       if ((dd->ipath_revision & 0xffffffff) == 0xffffffff
-           || (dd->ipath_sregbase & 0xffffffff) == 0xffffffff
-           || (dd->ipath_cregbase & 0xffffffff) == 0xffffffff
-           || (dd->ipath_uregbase & 0xffffffff) == 0xffffffff) {
-               ipath_dev_err(dd, "Register read failures from chip, "
-                             "giving up initialization\n");
-               dd->ipath_flags &= ~IPATH_PRESENT;
-               ret = -ENODEV;
-               goto done;
-       }
-
-
-       /* clear diagctrl register, in case diags were running and crashed */
-       ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0);
-
-       /* clear the initial reset flag, in case first driver load */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-                        INFINIPATH_E_RESET);
-
-       ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n",
-                  (unsigned long long) dd->ipath_revision,
-                  dd->ipath_pcirev);
-
-       if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) &
-            INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) {
-               ipath_dev_err(dd, "Driver only handles version %d, "
-                             "chip swversion is %d (%llx), failng\n",
-                             IPATH_CHIP_SWVERSION,
-                             (int)(dd->ipath_revision >>
-                                   INFINIPATH_R_SOFTWARE_SHIFT) &
-                             INFINIPATH_R_SOFTWARE_MASK,
-                             (unsigned long long) dd->ipath_revision);
-               ret = -ENOSYS;
-               goto done;
-       }
-       dd->ipath_majrev = (u8) ((dd->ipath_revision >>
-                                 INFINIPATH_R_CHIPREVMAJOR_SHIFT) &
-                                INFINIPATH_R_CHIPREVMAJOR_MASK);
-       dd->ipath_minrev = (u8) ((dd->ipath_revision >>
-                                 INFINIPATH_R_CHIPREVMINOR_SHIFT) &
-                                INFINIPATH_R_CHIPREVMINOR_MASK);
-       dd->ipath_boardrev = (u8) ((dd->ipath_revision >>
-                                   INFINIPATH_R_BOARDID_SHIFT) &
-                                  INFINIPATH_R_BOARDID_MASK);
-
-       ret = dd->ipath_f_get_boardname(dd, boardn, sizeof boardn);
-
-       snprintf(dd->ipath_boardversion, sizeof(dd->ipath_boardversion),
-                "ChipABI %u.%u, %s, InfiniPath%u %u.%u, PCI %u, "
-                "SW Compat %u\n",
-                IPATH_CHIP_VERS_MAJ, IPATH_CHIP_VERS_MIN, boardn,
-                (unsigned)(dd->ipath_revision >> INFINIPATH_R_ARCH_SHIFT) &
-                INFINIPATH_R_ARCH_MASK,
-                dd->ipath_majrev, dd->ipath_minrev, dd->ipath_pcirev,
-                (unsigned)(dd->ipath_revision >>
-                           INFINIPATH_R_SOFTWARE_SHIFT) &
-                INFINIPATH_R_SOFTWARE_MASK);
-
-       ipath_dbg("%s", dd->ipath_boardversion);
-
-       if (ret)
-               goto done;
-
-       if (reinit)
-               ret = init_chip_reset(dd);
-       else
-               ret = init_chip_first(dd);
-
-done:
-       return ret;
-}
-
-static void verify_interrupt(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-
-       if (!dd)
-               return; /* being torn down */
-
-       /*
-        * If we don't have any interrupts, let the user know and
-        * don't bother checking again.
-        */
-       if (dd->ipath_int_counter == 0) {
-               if (!dd->ipath_f_intr_fallback(dd))
-                       dev_err(&dd->pcidev->dev, "No interrupts detected, "
-                               "not usable.\n");
-               else /* re-arm the timer to see if fallback works */
-                       mod_timer(&dd->ipath_intrchk_timer, jiffies + HZ/2);
-       } else
-               ipath_cdbg(VERBOSE, "%u interrupts at timer check\n",
-                       dd->ipath_int_counter);
-}
-
-/**
- * ipath_init_chip - do the actual initialization sequence on the chip
- * @dd: the infinipath device
- * @reinit: reinitializing, so don't allocate new memory
- *
- * Do the actual initialization sequence on the chip.  This is done
- * both from the init routine called from the PCI infrastructure, and
- * when we reset the chip, or detect that it was reset internally,
- * or it's administratively re-enabled.
- *
- * Memory allocation here and in called routines is only done in
- * the first case (reinit == 0).  We have to be careful, because even
- * without memory allocation, we need to re-write all the chip registers
- * TIDs, etc. after the reset or enable has completed.
- */
-int ipath_init_chip(struct ipath_devdata *dd, int reinit)
-{
-       int ret = 0;
-       u32 kpiobufs, defkbufs;
-       u32 piobufs, uports;
-       u64 val;
-       struct ipath_portdata *pd;
-       gfp_t gfp_flags = GFP_USER | __GFP_COMP;
-
-       ret = init_housekeeping(dd, reinit);
-       if (ret)
-               goto done;
-
-       /*
-        * We could bump this to allow for full rcvegrcnt + rcvtidcnt,
-        * but then it no longer nicely fits power of two, and since
-        * we now use routines that backend onto __get_free_pages, the
-        * rest would be wasted.
-        */
-       dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt,
-                        dd->ipath_rcvhdrcnt);
-
-       /*
-        * Set up the shadow copies of the piobufavail registers,
-        * which we compare against the chip registers for now, and
-        * the in memory DMA'ed copies of the registers.  This has to
-        * be done early, before we calculate lastport, etc.
-        */
-       piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-       /*
-        * calc number of pioavail registers, and save it; we have 2
-        * bits per buffer.
-        */
-       dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
-               / (sizeof(u64) * BITS_PER_BYTE / 2);
-       uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
-       if (piobufs > 144)
-               defkbufs = 32 + dd->ipath_pioreserved;
-       else
-               defkbufs = 16 + dd->ipath_pioreserved;
-
-       if (ipath_kpiobufs && (ipath_kpiobufs +
-               (uports * IPATH_MIN_USER_PORT_BUFCNT)) > piobufs) {
-               int i = (int) piobufs -
-                       (int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
-               if (i < 1)
-                       i = 1;
-               dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
-                        "%d for kernel leaves too few for %d user ports "
-                        "(%d each); using %u\n", ipath_kpiobufs,
-                        piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
-               /*
-                * shouldn't change ipath_kpiobufs, because could be
-                * different for different devices...
-                */
-               kpiobufs = i;
-       } else if (ipath_kpiobufs)
-               kpiobufs = ipath_kpiobufs;
-       else
-               kpiobufs = defkbufs;
-       dd->ipath_lastport_piobuf = piobufs - kpiobufs;
-       dd->ipath_pbufsport =
-               uports ? dd->ipath_lastport_piobuf / uports : 0;
-       /* if not an even divisor, some user ports get extra buffers */
-       dd->ipath_ports_extrabuf = dd->ipath_lastport_piobuf -
-               (dd->ipath_pbufsport * uports);
-       if (dd->ipath_ports_extrabuf)
-               ipath_dbg("%u pbufs/port leaves some unused, add 1 buffer to "
-                       "ports <= %u\n", dd->ipath_pbufsport,
-                       dd->ipath_ports_extrabuf);
-       dd->ipath_lastpioindex = 0;
-       dd->ipath_lastpioindexl = dd->ipath_piobcnt2k;
-       /* ipath_pioavailshadow initialized earlier */
-       ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
-                  "each for %u user ports\n", kpiobufs,
-                  piobufs, dd->ipath_pbufsport, uports);
-       ret = dd->ipath_f_early_init(dd);
-       if (ret) {
-               ipath_dev_err(dd, "Early initialization failure\n");
-               goto done;
-       }
-
-       /*
-        * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be
-        * done after early_init.
-        */
-       dd->ipath_hdrqlast =
-               dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize,
-                        dd->ipath_rcvhdrentsize);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrsize,
-                        dd->ipath_rcvhdrsize);
-
-       if (!reinit) {
-               ret = init_pioavailregs(dd);
-               init_shadow_tids(dd);
-               if (ret)
-                       goto done;
-       }
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendpioavailaddr,
-                        dd->ipath_pioavailregs_phys);
-
-       /*
-        * this is to detect s/w errors, which the h/w works around by
-        * ignoring the low 6 bits of address, if it wasn't aligned.
-        */
-       val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendpioavailaddr);
-       if (val != dd->ipath_pioavailregs_phys) {
-               ipath_dev_err(dd, "Catastrophic software error, "
-                             "SendPIOAvailAddr written as %lx, "
-                             "read back as %llx\n",
-                             (unsigned long) dd->ipath_pioavailregs_phys,
-                             (unsigned long long) val);
-               ret = -EINVAL;
-               goto done;
-       }
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvbthqp, IPATH_KD_QP);
-
-       /*
-        * make sure we are not in freeze, and PIO send enabled, so
-        * writes to pbc happen
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask, 0ULL);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                        ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control, 0ULL);
-
-       /*
-        * before error clears, since we expect serdes pll errors during
-        * this, the first time after reset
-        */
-       if (bringup_link(dd)) {
-               dev_info(&dd->pcidev->dev, "Failed to bringup IB link\n");
-               ret = -ENETDOWN;
-               goto done;
-       }
-
-       /*
-        * clear any "expected" hwerrs from reset and/or initialization
-        * clear any that aren't enabled (at least this once), and then
-        * set the enable mask
-        */
-       dd->ipath_f_init_hwerrors(dd);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear,
-                        ~0ULL&~INFINIPATH_HWE_MEMBISTFAILED);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrmask,
-                        dd->ipath_hwerrmask);
-
-       /* clear all */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, -1LL);
-       /* enable errors that are masked, at least this first time. */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-                        ~dd->ipath_maskederrs);
-       dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */
-       dd->ipath_errormask =
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
-       /* clear any interrupts up to this point (ints still not enabled) */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL);
-
-       dd->ipath_f_tidtemplate(dd);
-
-       /*
-        * Set up the port 0 (kernel) rcvhdr q and egr TIDs.  If doing
-        * re-init, the simplest way to handle this is to free
-        * existing, and re-allocate.
-        * Need to re-create rest of port 0 portdata as well.
-        */
-       pd = dd->ipath_pd[0];
-       if (reinit) {
-               struct ipath_portdata *npd;
-
-               /*
-                * Alloc and init new ipath_portdata for port0,
-                * Then free old pd. Could lead to fragmentation, but also
-                * makes later support for hot-swap easier.
-                */
-               npd = create_portdata0(dd);
-               if (npd) {
-                       ipath_free_pddata(dd, pd);
-                       dd->ipath_pd[0] = npd;
-                       pd = npd;
-               } else {
-                       ipath_dev_err(dd, "Unable to allocate portdata"
-                                     " for port 0, failing\n");
-                       ret = -ENOMEM;
-                       goto done;
-               }
-       }
-       ret = ipath_create_rcvhdrq(dd, pd);
-       if (!ret)
-               ret = create_port0_egr(dd);
-       if (ret) {
-               ipath_dev_err(dd, "failed to allocate kernel port's "
-                             "rcvhdrq and/or egr bufs\n");
-               goto done;
-       } else {
-               enable_chip(dd, reinit);
-       }
-
-       /* after enable_chip, so pioavailshadow setup */
-       ipath_chg_pioavailkernel(dd, 0, piobufs, 1);
-
-       /*
-        * Cancel any possible active sends from early driver load.
-        * Follows early_init because some chips have to initialize
-        * PIO buffers in early_init to avoid false parity errors.
-        * After enable and ipath_chg_pioavailkernel so we can safely
-        * enable pioavail updates and PIOENABLE; packets are now
-        * ready to go out.
-        */
-       ipath_cancel_sends(dd, 1);
-
-       if (!reinit) {
-               /*
-                * Used when we close a port, for DMA already in flight
-                * at close.
-                */
-               dd->ipath_dummy_hdrq = dma_alloc_coherent(
-                       &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size,
-                       &dd->ipath_dummy_hdrq_phys,
-                       gfp_flags);
-               if (!dd->ipath_dummy_hdrq) {
-                       dev_info(&dd->pcidev->dev,
-                               "Couldn't allocate 0x%lx bytes for dummy hdrq\n",
-                               dd->ipath_pd[0]->port_rcvhdrq_size);
-                       /* fallback to just 0'ing */
-                       dd->ipath_dummy_hdrq_phys = 0UL;
-               }
-       }
-
-       /*
-        * cause retrigger of pending interrupts ignored during init,
-        * even if we had errors
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-
-       if (!dd->ipath_stats_timer_active) {
-               /*
-                * first init, or after an admin disable/enable
-                * set up stats retrieval timer, even if we had errors
-                * in last portion of setup
-                */
-               setup_timer(&dd->ipath_stats_timer, ipath_get_faststats,
-                               (unsigned long)dd);
-               /* every 5 seconds; */
-               dd->ipath_stats_timer.expires = jiffies + 5 * HZ;
-               /* takes ~16 seconds to overflow at full IB 4x bandwdith */
-               add_timer(&dd->ipath_stats_timer);
-               dd->ipath_stats_timer_active = 1;
-       }
-
-       /* Set up SendDMA if chip supports it */
-       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-               ret = setup_sdma(dd);
-
-       /* Set up HoL state */
-       setup_timer(&dd->ipath_hol_timer, ipath_hol_event, (unsigned long)dd);
-
-       dd->ipath_hol_state = IPATH_HOL_UP;
-
-done:
-       if (!ret) {
-               *dd->ipath_statusp |= IPATH_STATUS_CHIP_PRESENT;
-               if (!dd->ipath_f_intrsetup(dd)) {
-                       /* now we can enable all interrupts from the chip */
-                       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
-                                        -1LL);
-                       /* force re-interrupt of any pending interrupts. */
-                       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear,
-                                        0ULL);
-                       /* chip is usable; mark it as initialized */
-                       *dd->ipath_statusp |= IPATH_STATUS_INITTED;
-
-                       /*
-                        * setup to verify we get an interrupt, and fallback
-                        * to an alternate if necessary and possible
-                        */
-                       if (!reinit) {
-                               setup_timer(&dd->ipath_intrchk_timer,
-                                               verify_interrupt,
-                                               (unsigned long)dd);
-                       }
-                       dd->ipath_intrchk_timer.expires = jiffies + HZ/2;
-                       add_timer(&dd->ipath_intrchk_timer);
-               } else
-                       ipath_dev_err(dd, "No interrupts enabled, couldn't "
-                                     "setup interrupt address\n");
-
-               if (dd->ipath_cfgports > ipath_stats.sps_nports)
-                       /*
-                        * sps_nports is a global, so, we set it to
-                        * the highest number of ports of any of the
-                        * chips we find; we never decrement it, at
-                        * least for now.  Since this might have changed
-                        * over disable/enable or prior to reset, always
-                        * do the check and potentially adjust.
-                        */
-                       ipath_stats.sps_nports = dd->ipath_cfgports;
-       } else
-               ipath_dbg("Failed (%d) to initialize chip\n", ret);
-
-       /* if ret is non-zero, we probably should do some cleanup
-          here... */
-       return ret;
-}
-
-static int ipath_set_kpiobufs(const char *str, struct kernel_param *kp)
-{
-       struct ipath_devdata *dd;
-       unsigned long flags;
-       unsigned short val;
-       int ret;
-
-       ret = ipath_parse_ushort(str, &val);
-
-       spin_lock_irqsave(&ipath_devs_lock, flags);
-
-       if (ret < 0)
-               goto bail;
-
-       if (val == 0) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       list_for_each_entry(dd, &ipath_dev_list, ipath_list) {
-               if (dd->ipath_kregbase)
-                       continue;
-               if (val > (dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
-                          (dd->ipath_cfgports *
-                           IPATH_MIN_USER_PORT_BUFCNT)))
-               {
-                       ipath_dev_err(
-                               dd,
-                               "Allocating %d PIO bufs for kernel leaves "
-                               "too few for %d user ports (%d each)\n",
-                               val, dd->ipath_cfgports - 1,
-                               IPATH_MIN_USER_PORT_BUFCNT);
-                       ret = -EINVAL;
-                       goto bail;
-               }
-               dd->ipath_lastport_piobuf =
-                       dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - val;
-       }
-
-       ipath_kpiobufs = val;
-       ret = 0;
-bail:
-       spin_unlock_irqrestore(&ipath_devs_lock, flags);
-
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_intr.c b/drivers/staging/rdma/ipath/ipath_intr.c
deleted file mode 100644 (file)
index 0403fa2..0000000
+++ /dev/null
@@ -1,1271 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-
-/*
- * Called when we might have an error that is specific to a particular
- * PIO buffer, and may need to cancel that buffer, so it can be re-used.
- */
-void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
-{
-       u32 piobcnt;
-       unsigned long sbuf[4];
-       /*
-        * it's possible that sendbuffererror could have bits set; might
-        * have already done this as a result of hardware error handling
-        */
-       piobcnt = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-       /* read these before writing errorclear */
-       sbuf[0] = ipath_read_kreg64(
-               dd, dd->ipath_kregs->kr_sendbuffererror);
-       sbuf[1] = ipath_read_kreg64(
-               dd, dd->ipath_kregs->kr_sendbuffererror + 1);
-       if (piobcnt > 128)
-               sbuf[2] = ipath_read_kreg64(
-                       dd, dd->ipath_kregs->kr_sendbuffererror + 2);
-       if (piobcnt > 192)
-               sbuf[3] = ipath_read_kreg64(
-                       dd, dd->ipath_kregs->kr_sendbuffererror + 3);
-       else
-               sbuf[3] = 0;
-
-       if (sbuf[0] || sbuf[1] || (piobcnt > 128 && (sbuf[2] || sbuf[3]))) {
-               int i;
-               if (ipath_debug & (__IPATH_PKTDBG|__IPATH_DBG) &&
-                       time_after(dd->ipath_lastcancel, jiffies)) {
-                       __IPATH_DBG_WHICH(__IPATH_PKTDBG|__IPATH_DBG,
-                                         "SendbufErrs %lx %lx", sbuf[0],
-                                         sbuf[1]);
-                       if (ipath_debug & __IPATH_PKTDBG && piobcnt > 128)
-                               printk(" %lx %lx ", sbuf[2], sbuf[3]);
-                       printk("\n");
-               }
-
-               for (i = 0; i < piobcnt; i++)
-                       if (test_bit(i, sbuf))
-                               ipath_disarm_piobufs(dd, i, 1);
-               /* ignore armlaunch errs for a bit */
-               dd->ipath_lastcancel = jiffies+3;
-       }
-}
-
-
-/* These are all rcv-related errors which we want to count for stats */
-#define E_SUM_PKTERRS \
-       (INFINIPATH_E_RHDRLEN | INFINIPATH_E_RBADTID | \
-        INFINIPATH_E_RBADVERSION | INFINIPATH_E_RHDR | \
-        INFINIPATH_E_RLONGPKTLEN | INFINIPATH_E_RSHORTPKTLEN | \
-        INFINIPATH_E_RMAXPKTLEN | INFINIPATH_E_RMINPKTLEN | \
-        INFINIPATH_E_RFORMATERR | INFINIPATH_E_RUNSUPVL | \
-        INFINIPATH_E_RUNEXPCHAR | INFINIPATH_E_REBP)
-
-/* These are all send-related errors which we want to count for stats */
-#define E_SUM_ERRS \
-       (INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SUNEXPERRPKTNUM | \
-        INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
-        INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SUNSUPVL | \
-        INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
-        INFINIPATH_E_INVALIDADDR)
-
-/*
- * this is similar to E_SUM_ERRS, but can't ignore armlaunch, don't ignore
- * errors not related to freeze and cancelling buffers.  Can't ignore
- * armlaunch because could get more while still cleaning up, and need
- * to cancel those as they happen.
- */
-#define E_SPKT_ERRS_IGNORE \
-        (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
-        INFINIPATH_E_SMAXPKTLEN | INFINIPATH_E_SMINPKTLEN | \
-        INFINIPATH_E_SPKTLEN)
-
-/*
- * these are errors that can occur when the link changes state while
- * a packet is being sent or received.  This doesn't cover things
- * like EBP or VCRC that can be the result of a sending having the
- * link change state, so we receive a "known bad" packet.
- */
-#define E_SUM_LINK_PKTERRS \
-       (INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_SDROPPEDSMPPKT | \
-        INFINIPATH_E_SMINPKTLEN | INFINIPATH_E_SPKTLEN | \
-        INFINIPATH_E_RSHORTPKTLEN | INFINIPATH_E_RMINPKTLEN | \
-        INFINIPATH_E_RUNEXPCHAR)
-
-static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
-{
-       u64 ignore_this_time = 0;
-
-       ipath_disarm_senderrbufs(dd);
-       if ((errs & E_SUM_LINK_PKTERRS) &&
-           !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-               /*
-                * This can happen when SMA is trying to bring the link
-                * up, but the IB link changes state at the "wrong" time.
-                * The IB logic then complains that the packet isn't
-                * valid.  We don't want to confuse people, so we just
-                * don't print them, except at debug
-                */
-               ipath_dbg("Ignoring packet errors %llx, because link not "
-                         "ACTIVE\n", (unsigned long long) errs);
-               ignore_this_time = errs & E_SUM_LINK_PKTERRS;
-       }
-
-       return ignore_this_time;
-}
-
-/* generic hw error messages... */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_MSG(a) \
-       { \
-               .mask = ( INFINIPATH_HWE_TXEMEMPARITYERR_##a <<    \
-                         INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT ),   \
-               .msg = "TXE " #a " Memory Parity"            \
-       }
-#define INFINIPATH_HWE_RXEMEMPARITYERR_MSG(a) \
-       { \
-               .mask = ( INFINIPATH_HWE_RXEMEMPARITYERR_##a <<    \
-                         INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT ),   \
-               .msg = "RXE " #a " Memory Parity"            \
-       }
-
-static const struct ipath_hwerror_msgs ipath_generic_hwerror_msgs[] = {
-       INFINIPATH_HWE_MSG(IBCBUSFRSPCPARITYERR, "IPATH2IB Parity"),
-       INFINIPATH_HWE_MSG(IBCBUSTOSPCPARITYERR, "IB2IPATH Parity"),
-
-       INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOBUF),
-       INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOPBC),
-       INFINIPATH_HWE_TXEMEMPARITYERR_MSG(PIOLAUNCHFIFO),
-
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(RCVBUF),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(LOOKUPQ),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EAGERTID),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(EXPTID),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(FLAGBUF),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(DATAINFO),
-       INFINIPATH_HWE_RXEMEMPARITYERR_MSG(HDRINFO),
-};
-
-/**
- * ipath_format_hwmsg - format a single hwerror message
- * @msg message buffer
- * @msgl length of message buffer
- * @hwmsg message to add to message buffer
- */
-static void ipath_format_hwmsg(char *msg, size_t msgl, const char *hwmsg)
-{
-       strlcat(msg, "[", msgl);
-       strlcat(msg, hwmsg, msgl);
-       strlcat(msg, "]", msgl);
-}
-
-/**
- * ipath_format_hwerrors - format hardware error messages for display
- * @hwerrs hardware errors bit vector
- * @hwerrmsgs hardware error descriptions
- * @nhwerrmsgs number of hwerrmsgs
- * @msg message buffer
- * @msgl message buffer length
- */
-void ipath_format_hwerrors(u64 hwerrs,
-                          const struct ipath_hwerror_msgs *hwerrmsgs,
-                          size_t nhwerrmsgs,
-                          char *msg, size_t msgl)
-{
-       int i;
-       const int glen =
-           ARRAY_SIZE(ipath_generic_hwerror_msgs);
-
-       for (i=0; i<glen; i++) {
-               if (hwerrs & ipath_generic_hwerror_msgs[i].mask) {
-                       ipath_format_hwmsg(msg, msgl,
-                                          ipath_generic_hwerror_msgs[i].msg);
-               }
-       }
-
-       for (i=0; i<nhwerrmsgs; i++) {
-               if (hwerrs & hwerrmsgs[i].mask) {
-                       ipath_format_hwmsg(msg, msgl, hwerrmsgs[i].msg);
-               }
-       }
-}
-
-/* return the strings for the most common link states */
-static char *ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
-{
-       char *ret;
-       u32 state;
-
-       state = ipath_ib_state(dd, ibcs);
-       if (state == dd->ib_init)
-               ret = "Init";
-       else if (state == dd->ib_arm)
-               ret = "Arm";
-       else if (state == dd->ib_active)
-               ret = "Active";
-       else
-               ret = "Down";
-       return ret;
-}
-
-void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev)
-{
-       struct ib_event event;
-
-       event.device = &dd->verbs_dev->ibdev;
-       event.element.port_num = 1;
-       event.event = ev;
-       ib_dispatch_event(&event);
-}
-
-static void handle_e_ibstatuschanged(struct ipath_devdata *dd,
-                                    ipath_err_t errs)
-{
-       u32 ltstate, lstate, ibstate, lastlstate;
-       u32 init = dd->ib_init;
-       u32 arm = dd->ib_arm;
-       u32 active = dd->ib_active;
-       const u64 ibcs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_ibcstatus);
-
-       lstate = ipath_ib_linkstate(dd, ibcs); /* linkstate */
-       ibstate = ipath_ib_state(dd, ibcs);
-       /* linkstate at last interrupt */
-       lastlstate = ipath_ib_linkstate(dd, dd->ipath_lastibcstat);
-       ltstate = ipath_ib_linktrstate(dd, ibcs); /* linktrainingtate */
-
-       /*
-        * Since going into a recovery state causes the link state to go
-        * down and since recovery is transitory, it is better if we "miss"
-        * ever seeing the link training state go into recovery (i.e.,
-        * ignore this transition for link state special handling purposes)
-        * without even updating ipath_lastibcstat.
-        */
-       if ((ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN) ||
-           (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT) ||
-           (ltstate == INFINIPATH_IBCS_LT_STATE_RECOVERIDLE))
-               goto done;
-
-       /*
-        * if linkstate transitions into INIT from any of the various down
-        * states, or if it transitions from any of the up (INIT or better)
-        * states into any of the down states (except link recovery), then
-        * call the chip-specific code to take appropriate actions.
-        */
-       if (lstate >= INFINIPATH_IBCS_L_STATE_INIT &&
-               lastlstate == INFINIPATH_IBCS_L_STATE_DOWN) {
-               /* transitioned to UP */
-               if (dd->ipath_f_ib_updown(dd, 1, ibcs)) {
-                       /* link came up, so we must no longer be disabled */
-                       dd->ipath_flags &= ~IPATH_IB_LINK_DISABLED;
-                       ipath_cdbg(LINKVERB, "LinkUp handled, skipped\n");
-                       goto skip_ibchange; /* chip-code handled */
-               }
-       } else if ((lastlstate >= INFINIPATH_IBCS_L_STATE_INIT ||
-               (dd->ipath_flags & IPATH_IB_FORCE_NOTIFY)) &&
-               ltstate <= INFINIPATH_IBCS_LT_STATE_CFGWAITRMT &&
-               ltstate != INFINIPATH_IBCS_LT_STATE_LINKUP) {
-               int handled;
-               handled = dd->ipath_f_ib_updown(dd, 0, ibcs);
-               dd->ipath_flags &= ~IPATH_IB_FORCE_NOTIFY;
-               if (handled) {
-                       ipath_cdbg(LINKVERB, "LinkDown handled, skipped\n");
-                       goto skip_ibchange; /* chip-code handled */
-               }
-       }
-
-       /*
-        * Significant enough to always print and get into logs, if it was
-        * unexpected.  If it was a requested state change, we'll have
-        * already cleared the flags, so we won't print this warning
-        */
-       if ((ibstate != arm && ibstate != active) &&
-           (dd->ipath_flags & (IPATH_LINKARMED | IPATH_LINKACTIVE))) {
-               dev_info(&dd->pcidev->dev, "Link state changed from %s "
-                        "to %s\n", (dd->ipath_flags & IPATH_LINKARMED) ?
-                        "ARM" : "ACTIVE", ib_linkstate(dd, ibcs));
-       }
-
-       if (ltstate == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
-           ltstate == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-               u32 lastlts;
-               lastlts = ipath_ib_linktrstate(dd, dd->ipath_lastibcstat);
-               /*
-                * Ignore cycling back and forth from Polling.Active to
-                * Polling.Quiet while waiting for the other end of the link
-                * to come up, except to try and decide if we are connected
-                * to a live IB device or not.  We will cycle back and
-                * forth between them if no cable is plugged in, the other
-                * device is powered off or disabled, etc.
-                */
-               if (lastlts == INFINIPATH_IBCS_LT_STATE_POLLACTIVE ||
-                   lastlts == INFINIPATH_IBCS_LT_STATE_POLLQUIET) {
-                       if (!(dd->ipath_flags & IPATH_IB_AUTONEG_INPROG) &&
-                            (++dd->ipath_ibpollcnt == 40)) {
-                               dd->ipath_flags |= IPATH_NOCABLE;
-                               *dd->ipath_statusp |=
-                                       IPATH_STATUS_IB_NOCABLE;
-                               ipath_cdbg(LINKVERB, "Set NOCABLE\n");
-                       }
-                       ipath_cdbg(LINKVERB, "POLL change to %s (%x)\n",
-                               ipath_ibcstatus_str[ltstate], ibstate);
-                       goto skip_ibchange;
-               }
-       }
-
-       dd->ipath_ibpollcnt = 0; /* not poll*, now */
-       ipath_stats.sps_iblink++;
-
-       if (ibstate != init && dd->ipath_lastlinkrecov && ipath_linkrecovery) {
-               u64 linkrecov;
-               linkrecov = ipath_snap_cntr(dd,
-                       dd->ipath_cregs->cr_iblinkerrrecovcnt);
-               if (linkrecov != dd->ipath_lastlinkrecov) {
-                       ipath_dbg("IB linkrecov up %Lx (%s %s) recov %Lu\n",
-                               (unsigned long long) ibcs,
-                               ib_linkstate(dd, ibcs),
-                               ipath_ibcstatus_str[ltstate],
-                               (unsigned long long) linkrecov);
-                       /* and no more until active again */
-                       dd->ipath_lastlinkrecov = 0;
-                       ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
-                       goto skip_ibchange;
-               }
-       }
-
-       if (ibstate == init || ibstate == arm || ibstate == active) {
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_NOCABLE;
-               if (ibstate == init || ibstate == arm) {
-                       *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-                       if (dd->ipath_flags & IPATH_LINKACTIVE)
-                               signal_ib_event(dd, IB_EVENT_PORT_ERR);
-               }
-               if (ibstate == arm) {
-                       dd->ipath_flags |= IPATH_LINKARMED;
-                       dd->ipath_flags &= ~(IPATH_LINKUNK |
-                               IPATH_LINKINIT | IPATH_LINKDOWN |
-                               IPATH_LINKACTIVE | IPATH_NOCABLE);
-                       ipath_hol_down(dd);
-               } else  if (ibstate == init) {
-                       /*
-                        * set INIT and DOWN.  Down is checked by
-                        * most of the other code, but INIT is
-                        * useful to know in a few places.
-                        */
-                       dd->ipath_flags |= IPATH_LINKINIT |
-                               IPATH_LINKDOWN;
-                       dd->ipath_flags &= ~(IPATH_LINKUNK |
-                               IPATH_LINKARMED | IPATH_LINKACTIVE |
-                               IPATH_NOCABLE);
-                       ipath_hol_down(dd);
-               } else {  /* active */
-                       dd->ipath_lastlinkrecov = ipath_snap_cntr(dd,
-                               dd->ipath_cregs->cr_iblinkerrrecovcnt);
-                       *dd->ipath_statusp |=
-                               IPATH_STATUS_IB_READY | IPATH_STATUS_IB_CONF;
-                       dd->ipath_flags |= IPATH_LINKACTIVE;
-                       dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
-                               | IPATH_LINKDOWN | IPATH_LINKARMED |
-                               IPATH_NOCABLE);
-                       if (dd->ipath_flags & IPATH_HAS_SEND_DMA)
-                               ipath_restart_sdma(dd);
-                       signal_ib_event(dd, IB_EVENT_PORT_ACTIVE);
-                       /* LED active not handled in chip _f_updown */
-                       dd->ipath_f_setextled(dd, lstate, ltstate);
-                       ipath_hol_up(dd);
-               }
-
-               /*
-                * print after we've already done the work, so as not to
-                * delay the state changes and notifications, for debugging
-                */
-               if (lstate == lastlstate)
-                       ipath_cdbg(LINKVERB, "Unchanged from last: %s "
-                               "(%x)\n", ib_linkstate(dd, ibcs), ibstate);
-               else
-                       ipath_cdbg(VERBOSE, "Unit %u: link up to %s %s (%x)\n",
-                                 dd->ipath_unit, ib_linkstate(dd, ibcs),
-                                 ipath_ibcstatus_str[ltstate],  ibstate);
-       } else { /* down */
-               if (dd->ipath_flags & IPATH_LINKACTIVE)
-                       signal_ib_event(dd, IB_EVENT_PORT_ERR);
-               dd->ipath_flags |= IPATH_LINKDOWN;
-               dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
-                                    | IPATH_LINKACTIVE |
-                                    IPATH_LINKARMED);
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-               dd->ipath_lli_counter = 0;
-
-               if (lastlstate != INFINIPATH_IBCS_L_STATE_DOWN)
-                       ipath_cdbg(VERBOSE, "Unit %u link state down "
-                                  "(state 0x%x), from %s\n",
-                                  dd->ipath_unit, lstate,
-                                  ib_linkstate(dd, dd->ipath_lastibcstat));
-               else
-                       ipath_cdbg(LINKVERB, "Unit %u link state changed "
-                                  "to %s (0x%x) from down (%x)\n",
-                                  dd->ipath_unit,
-                                  ipath_ibcstatus_str[ltstate],
-                                  ibstate, lastlstate);
-       }
-
-skip_ibchange:
-       dd->ipath_lastibcstat = ibcs;
-done:
-       return;
-}
-
-static void handle_supp_msgs(struct ipath_devdata *dd,
-                            unsigned supp_msgs, char *msg, u32 msgsz)
-{
-       /*
-        * Print the message unless it's ibc status change only, which
-        * happens so often we never want to count it.
-        */
-       if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
-               int iserr;
-               ipath_err_t mask;
-               iserr = ipath_decode_err(dd, msg, msgsz,
-                                        dd->ipath_lasterror &
-                                        ~INFINIPATH_E_IBSTATUSCHANGED);
-
-               mask = INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-                       INFINIPATH_E_PKTERRS | INFINIPATH_E_SDMADISABLED;
-
-               /* if we're in debug, then don't mask SDMADISABLED msgs */
-               if (ipath_debug & __IPATH_DBG)
-                       mask &= ~INFINIPATH_E_SDMADISABLED;
-
-               if (dd->ipath_lasterror & ~mask)
-                       ipath_dev_err(dd, "Suppressed %u messages for "
-                                     "fast-repeating errors (%s) (%llx)\n",
-                                     supp_msgs, msg,
-                                     (unsigned long long)
-                                     dd->ipath_lasterror);
-               else {
-                       /*
-                        * rcvegrfull and rcvhdrqfull are "normal", for some
-                        * types of processes (mostly benchmarks) that send
-                        * huge numbers of messages, while not processing
-                        * them. So only complain about these at debug
-                        * level.
-                        */
-                       if (iserr)
-                               ipath_dbg("Suppressed %u messages for %s\n",
-                                         supp_msgs, msg);
-                       else
-                               ipath_cdbg(ERRPKT,
-                                       "Suppressed %u messages for %s\n",
-                                         supp_msgs, msg);
-               }
-       }
-}
-
-static unsigned handle_frequent_errors(struct ipath_devdata *dd,
-                                      ipath_err_t errs, char *msg,
-                                      u32 msgsz, int *noprint)
-{
-       unsigned long nc;
-       static unsigned long nextmsg_time;
-       static unsigned nmsgs, supp_msgs;
-
-       /*
-        * Throttle back "fast" messages to no more than 10 per 5 seconds.
-        * This isn't perfect, but it's a reasonable heuristic. If we get
-        * more than 10, give a 6x longer delay.
-        */
-       nc = jiffies;
-       if (nmsgs > 10) {
-               if (time_before(nc, nextmsg_time)) {
-                       *noprint = 1;
-                       if (!supp_msgs++)
-                               nextmsg_time = nc + HZ * 3;
-               } else if (supp_msgs) {
-                       handle_supp_msgs(dd, supp_msgs, msg, msgsz);
-                       supp_msgs = 0;
-                       nmsgs = 0;
-               }
-       } else if (!nmsgs++ || time_after(nc, nextmsg_time)) {
-               nextmsg_time = nc + HZ / 2;
-       }
-
-       return supp_msgs;
-}
-
-static void handle_sdma_errors(struct ipath_devdata *dd, ipath_err_t errs)
-{
-       unsigned long flags;
-       int expected;
-
-       if (ipath_debug & __IPATH_DBG) {
-               char msg[128];
-               ipath_decode_err(dd, msg, sizeof msg, errs &
-                       INFINIPATH_E_SDMAERRS);
-               ipath_dbg("errors %lx (%s)\n", (unsigned long)errs, msg);
-       }
-       if (ipath_debug & __IPATH_VERBDBG) {
-               unsigned long tl, hd, status, lengen;
-               tl = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
-               hd = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
-               status = ipath_read_kreg64(dd
-                       , dd->ipath_kregs->kr_senddmastatus);
-               lengen = ipath_read_kreg64(dd,
-                       dd->ipath_kregs->kr_senddmalengen);
-               ipath_cdbg(VERBOSE, "sdma tl 0x%lx hd 0x%lx status 0x%lx "
-                       "lengen 0x%lx\n", tl, hd, status, lengen);
-       }
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-       expected = test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-       if (!expected)
-               ipath_cancel_sends(dd, 1);
-}
-
-static void handle_sdma_intr(struct ipath_devdata *dd, u64 istat)
-{
-       unsigned long flags;
-       int expected;
-
-       if ((istat & INFINIPATH_I_SDMAINT) &&
-           !test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-               ipath_sdma_intr(dd);
-
-       if (istat & INFINIPATH_I_SDMADISABLED) {
-               expected = test_bit(IPATH_SDMA_ABORTING,
-                       &dd->ipath_sdma_status);
-               ipath_dbg("%s SDmaDisabled intr\n",
-                       expected ? "expected" : "unexpected");
-               spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-               __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-               spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-               if (!expected)
-                       ipath_cancel_sends(dd, 1);
-               if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-                       tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
-       }
-}
-
-static int handle_hdrq_full(struct ipath_devdata *dd)
-{
-       int chkerrpkts = 0;
-       u32 hd, tl;
-       u32 i;
-
-       ipath_stats.sps_hdrqfull++;
-       for (i = 0; i < dd->ipath_cfgports; i++) {
-               struct ipath_portdata *pd = dd->ipath_pd[i];
-
-               if (i == 0) {
-                       /*
-                        * For kernel receive queues, we just want to know
-                        * if there are packets in the queue that we can
-                        * process.
-                        */
-                       if (pd->port_head != ipath_get_hdrqtail(pd))
-                               chkerrpkts |= 1 << i;
-                       continue;
-               }
-
-               /* Skip if user context is not open */
-               if (!pd || !pd->port_cnt)
-                       continue;
-
-               /* Don't report the same point multiple times. */
-               if (dd->ipath_flags & IPATH_NODMA_RTAIL)
-                       tl = ipath_read_ureg32(dd, ur_rcvhdrtail, i);
-               else
-                       tl = ipath_get_rcvhdrtail(pd);
-               if (tl == pd->port_lastrcvhdrqtail)
-                       continue;
-
-               hd = ipath_read_ureg32(dd, ur_rcvhdrhead, i);
-               if (hd == (tl + 1) || (!hd && tl == dd->ipath_hdrqlast)) {
-                       pd->port_lastrcvhdrqtail = tl;
-                       pd->port_hdrqfull++;
-                       /* flush hdrqfull so that poll() sees it */
-                       wmb();
-                       wake_up_interruptible(&pd->port_wait);
-               }
-       }
-
-       return chkerrpkts;
-}
-
-static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
-{
-       char msg[128];
-       u64 ignore_this_time = 0;
-       u64 iserr = 0;
-       int chkerrpkts = 0, noprint = 0;
-       unsigned supp_msgs;
-       int log_idx;
-
-       /*
-        * don't report errors that are masked, either at init
-        * (not set in ipath_errormask), or temporarily (set in
-        * ipath_maskederrs)
-        */
-       errs &= dd->ipath_errormask & ~dd->ipath_maskederrs;
-
-       supp_msgs = handle_frequent_errors(dd, errs, msg, (u32)sizeof msg,
-               &noprint);
-
-       /* do these first, they are most important */
-       if (errs & INFINIPATH_E_HARDWARE) {
-               /* reuse same msg buf */
-               dd->ipath_f_handle_hwerrors(dd, msg, sizeof msg);
-       } else {
-               u64 mask;
-               for (log_idx = 0; log_idx < IPATH_EEP_LOG_CNT; ++log_idx) {
-                       mask = dd->ipath_eep_st_masks[log_idx].errs_to_log;
-                       if (errs & mask)
-                               ipath_inc_eeprom_err(dd, log_idx, 1);
-               }
-       }
-
-       if (errs & INFINIPATH_E_SDMAERRS)
-               handle_sdma_errors(dd, errs);
-
-       if (!noprint && (errs & ~dd->ipath_e_bitsextant))
-               ipath_dev_err(dd, "error interrupt with unknown errors "
-                             "%llx set\n", (unsigned long long)
-                             (errs & ~dd->ipath_e_bitsextant));
-
-       if (errs & E_SUM_ERRS)
-               ignore_this_time = handle_e_sum_errs(dd, errs);
-       else if ((errs & E_SUM_LINK_PKTERRS) &&
-           !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-               /*
-                * This can happen when SMA is trying to bring the link
-                * up, but the IB link changes state at the "wrong" time.
-                * The IB logic then complains that the packet isn't
-                * valid.  We don't want to confuse people, so we just
-                * don't print them, except at debug
-                */
-               ipath_dbg("Ignoring packet errors %llx, because link not "
-                         "ACTIVE\n", (unsigned long long) errs);
-               ignore_this_time = errs & E_SUM_LINK_PKTERRS;
-       }
-
-       if (supp_msgs == 250000) {
-               int s_iserr;
-               /*
-                * It's not entirely reasonable assuming that the errors set
-                * in the last clear period are all responsible for the
-                * problem, but the alternative is to assume it's the only
-                * ones on this particular interrupt, which also isn't great
-                */
-               dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
-
-               dd->ipath_errormask &= ~dd->ipath_maskederrs;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-                                dd->ipath_errormask);
-               s_iserr = ipath_decode_err(dd, msg, sizeof msg,
-                                          dd->ipath_maskederrs);
-
-               if (dd->ipath_maskederrs &
-                   ~(INFINIPATH_E_RRCVEGRFULL |
-                     INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
-                       ipath_dev_err(dd, "Temporarily disabling "
-                           "error(s) %llx reporting; too frequent (%s)\n",
-                               (unsigned long long) dd->ipath_maskederrs,
-                               msg);
-               else {
-                       /*
-                        * rcvegrfull and rcvhdrqfull are "normal",
-                        * for some types of processes (mostly benchmarks)
-                        * that send huge numbers of messages, while not
-                        * processing them.  So only complain about
-                        * these at debug level.
-                        */
-                       if (s_iserr)
-                               ipath_dbg("Temporarily disabling reporting "
-                                   "too frequent queue full errors (%s)\n",
-                                   msg);
-                       else
-                               ipath_cdbg(ERRPKT,
-                                   "Temporarily disabling reporting too"
-                                   " frequent packet errors (%s)\n",
-                                   msg);
-               }
-
-               /*
-                * Re-enable the masked errors after around 3 minutes.  in
-                * ipath_get_faststats().  If we have a series of fast
-                * repeating but different errors, the interval will keep
-                * stretching out, but that's OK, as that's pretty
-                * catastrophic.
-                */
-               dd->ipath_unmasktime = jiffies + HZ * 180;
-       }
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, errs);
-       if (ignore_this_time)
-               errs &= ~ignore_this_time;
-       if (errs & ~dd->ipath_lasterror) {
-               errs &= ~dd->ipath_lasterror;
-               /* never suppress duplicate hwerrors or ibstatuschange */
-               dd->ipath_lasterror |= errs &
-                       ~(INFINIPATH_E_HARDWARE |
-                         INFINIPATH_E_IBSTATUSCHANGED);
-       }
-
-       if (errs & INFINIPATH_E_SENDSPECIALTRIGGER) {
-               dd->ipath_spectriggerhit++;
-               ipath_dbg("%lu special trigger hits\n",
-                       dd->ipath_spectriggerhit);
-       }
-
-       /* likely due to cancel; so suppress message unless verbose */
-       if ((errs & (INFINIPATH_E_SPKTLEN | INFINIPATH_E_SPIOARMLAUNCH)) &&
-               time_after(dd->ipath_lastcancel, jiffies)) {
-               /* armlaunch takes precedence; it often causes both. */
-               ipath_cdbg(VERBOSE,
-                       "Suppressed %s error (%llx) after sendbuf cancel\n",
-                       (errs &  INFINIPATH_E_SPIOARMLAUNCH) ?
-                       "armlaunch" : "sendpktlen", (unsigned long long)errs);
-               errs &= ~(INFINIPATH_E_SPIOARMLAUNCH | INFINIPATH_E_SPKTLEN);
-       }
-
-       if (!errs)
-               return 0;
-
-       if (!noprint) {
-               ipath_err_t mask;
-               /*
-                * The ones we mask off are handled specially below
-                * or above.  Also mask SDMADISABLED by default as it
-                * is too chatty.
-                */
-               mask = INFINIPATH_E_IBSTATUSCHANGED |
-                       INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-                       INFINIPATH_E_HARDWARE | INFINIPATH_E_SDMADISABLED;
-
-               /* if we're in debug, then don't mask SDMADISABLED msgs */
-               if (ipath_debug & __IPATH_DBG)
-                       mask &= ~INFINIPATH_E_SDMADISABLED;
-
-               ipath_decode_err(dd, msg, sizeof msg, errs & ~mask);
-       } else
-               /* so we don't need if (!noprint) at strlcat's below */
-               *msg = 0;
-
-       if (errs & E_SUM_PKTERRS) {
-               ipath_stats.sps_pkterrs++;
-               chkerrpkts = 1;
-       }
-       if (errs & E_SUM_ERRS)
-               ipath_stats.sps_errs++;
-
-       if (errs & (INFINIPATH_E_RICRC | INFINIPATH_E_RVCRC)) {
-               ipath_stats.sps_crcerrs++;
-               chkerrpkts = 1;
-       }
-       iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);
-
-
-       /*
-        * We don't want to print these two as they happen, or we can make
-        * the situation even worse, because it takes so long to print
-        * messages to serial consoles.  Kernel ports get printed from
-        * fast_stats, no more than every 5 seconds, user ports get printed
-        * on close
-        */
-       if (errs & INFINIPATH_E_RRCVHDRFULL)
-               chkerrpkts |= handle_hdrq_full(dd);
-       if (errs & INFINIPATH_E_RRCVEGRFULL) {
-               struct ipath_portdata *pd = dd->ipath_pd[0];
-
-               /*
-                * since this is of less importance and not likely to
-                * happen without also getting hdrfull, only count
-                * occurrences; don't check each port (or even the kernel
-                * vs user)
-                */
-               ipath_stats.sps_etidfull++;
-               if (pd->port_head != ipath_get_hdrqtail(pd))
-                       chkerrpkts |= 1;
-       }
-
-       /*
-        * do this before IBSTATUSCHANGED, in case both bits set in a single
-        * interrupt; we want the STATUSCHANGE to "win", so we do our
-        * internal copy of state machine correctly
-        */
-       if (errs & INFINIPATH_E_RIBLOSTLINK) {
-               /*
-                * force through block below
-                */
-               errs |= INFINIPATH_E_IBSTATUSCHANGED;
-               ipath_stats.sps_iblink++;
-               dd->ipath_flags |= IPATH_LINKDOWN;
-               dd->ipath_flags &= ~(IPATH_LINKUNK | IPATH_LINKINIT
-                                    | IPATH_LINKARMED | IPATH_LINKACTIVE);
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
-
-               ipath_dbg("Lost link, link now down (%s)\n",
-                       ipath_ibcstatus_str[ipath_read_kreg64(dd,
-                       dd->ipath_kregs->kr_ibcstatus) & 0xf]);
-       }
-       if (errs & INFINIPATH_E_IBSTATUSCHANGED)
-               handle_e_ibstatuschanged(dd, errs);
-
-       if (errs & INFINIPATH_E_RESET) {
-               if (!noprint)
-                       ipath_dev_err(dd, "Got reset, requires re-init "
-                                     "(unload and reload driver)\n");
-               dd->ipath_flags &= ~IPATH_INITTED;      /* needs re-init */
-               /* mark as having had error */
-               *dd->ipath_statusp |= IPATH_STATUS_HWERROR;
-               *dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
-       }
-
-       if (!noprint && *msg) {
-               if (iserr)
-                       ipath_dev_err(dd, "%s error\n", msg);
-       }
-       if (dd->ipath_state_wanted & dd->ipath_flags) {
-               ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
-                          "waking\n", dd->ipath_state_wanted,
-                          dd->ipath_flags);
-               wake_up_interruptible(&ipath_state_wait);
-       }
-
-       return chkerrpkts;
-}
-
-/*
- * try to cleanup as much as possible for anything that might have gone
- * wrong while in freeze mode, such as pio buffers being written by user
- * processes (causing armlaunch), send errors due to going into freeze mode,
- * etc., and try to avoid causing extra interrupts while doing so.
- * Forcibly update the in-memory pioavail register copies after cleanup
- * because the chip won't do it while in freeze mode (the register values
- * themselves are kept correct).
- * Make sure that we don't lose any important interrupts by using the chip
- * feature that says that writing 0 to a bit in *clear that is set in
- * *status will cause an interrupt to be generated again (if allowed by
- * the *mask value).
- */
-void ipath_clear_freeze(struct ipath_devdata *dd)
-{
-       /* disable error interrupts, to avoid confusion */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, 0ULL);
-
-       /* also disable interrupts; errormask is sometimes overwriten */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-
-       ipath_cancel_sends(dd, 1);
-
-       /* clear the freeze, and be sure chip saw it */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
-                        dd->ipath_control);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-
-       /* force in-memory update now we are out of freeze */
-       ipath_force_pio_avail_update(dd);
-
-       /*
-        * force new interrupt if any hwerr, error or interrupt bits are
-        * still set, and clear "safe" send packet errors related to freeze
-        * and cancelling sends.  Re-enable error interrupts before possible
-        * force of re-interrupt on pending interrupts.
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
-               E_SPKT_ERRS_IGNORE);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-               dd->ipath_errormask);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, -1LL);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-}
-
-
-/* this is separate to allow for better optimization of ipath_intr() */
-
-static noinline void ipath_bad_intr(struct ipath_devdata *dd, u32 *unexpectp)
-{
-       /*
-        * sometimes happen during driver init and unload, don't want
-        * to process any interrupts at that point
-        */
-
-       /* this is just a bandaid, not a fix, if something goes badly
-        * wrong */
-       if (++*unexpectp > 100) {
-               if (++*unexpectp > 105) {
-                       /*
-                        * ok, we must be taking somebody else's interrupts,
-                        * due to a messed up mptable and/or PIRQ table, so
-                        * unregister the interrupt.  We've seen this during
-                        * linuxbios development work, and it may happen in
-                        * the future again.
-                        */
-                       if (dd->pcidev && dd->ipath_irq) {
-                               ipath_dev_err(dd, "Now %u unexpected "
-                                             "interrupts, unregistering "
-                                             "interrupt handler\n",
-                                             *unexpectp);
-                               ipath_dbg("free_irq of irq %d\n",
-                                         dd->ipath_irq);
-                               dd->ipath_f_free_irq(dd);
-                       }
-               }
-               if (ipath_read_ireg(dd, dd->ipath_kregs->kr_intmask)) {
-                       ipath_dev_err(dd, "%u unexpected interrupts, "
-                                     "disabling interrupts completely\n",
-                                     *unexpectp);
-                       /*
-                        * disable all interrupts, something is very wrong
-                        */
-                       ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask,
-                                        0ULL);
-               }
-       } else if (*unexpectp > 1)
-               ipath_dbg("Interrupt when not ready, should not happen, "
-                         "ignoring\n");
-}
-
-static noinline void ipath_bad_regread(struct ipath_devdata *dd)
-{
-       static int allbits;
-
-       /* separate routine, for better optimization of ipath_intr() */
-
-       /*
-        * We print the message and disable interrupts, in hope of
-        * having a better chance of debugging the problem.
-        */
-       ipath_dev_err(dd,
-                     "Read of interrupt status failed (all bits set)\n");
-       if (allbits++) {
-               /* disable all interrupts, something is very wrong */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_intmask, 0ULL);
-               if (allbits == 2) {
-                       ipath_dev_err(dd, "Still bad interrupt status, "
-                                     "unregistering interrupt\n");
-                       dd->ipath_f_free_irq(dd);
-               } else if (allbits > 2) {
-                       if ((allbits % 10000) == 0)
-                               printk(".");
-               } else
-                       ipath_dev_err(dd, "Disabling interrupts, "
-                                     "multiple errors\n");
-       }
-}
-
-static void handle_layer_pioavail(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-       int ret;
-
-       ret = ipath_ib_piobufavail(dd->verbs_dev);
-       if (ret > 0)
-               goto set;
-
-       return;
-set:
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                        dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-}
-
-/*
- * Handle receive interrupts for user ports; this means a user
- * process was waiting for a packet to arrive, and didn't want
- * to poll
- */
-static void handle_urcv(struct ipath_devdata *dd, u64 istat)
-{
-       u64 portr;
-       int i;
-       int rcvdint = 0;
-
-       /*
-        * test_and_clear_bit(IPATH_PORT_WAITING_RCV) and
-        * test_and_clear_bit(IPATH_PORT_WAITING_URG) below
-        * would both like timely updates of the bits so that
-        * we don't pass them by unnecessarily.  the rmb()
-        * here ensures that we see them promptly -- the
-        * corresponding wmb()'s are in ipath_poll_urgent()
-        * and ipath_poll_next()...
-        */
-       rmb();
-       portr = ((istat >> dd->ipath_i_rcvavail_shift) &
-                dd->ipath_i_rcvavail_mask) |
-               ((istat >> dd->ipath_i_rcvurg_shift) &
-                dd->ipath_i_rcvurg_mask);
-       for (i = 1; i < dd->ipath_cfgports; i++) {
-               struct ipath_portdata *pd = dd->ipath_pd[i];
-
-               if (portr & (1 << i) && pd && pd->port_cnt) {
-                       if (test_and_clear_bit(IPATH_PORT_WAITING_RCV,
-                                              &pd->port_flag)) {
-                               clear_bit(i + dd->ipath_r_intravail_shift,
-                                         &dd->ipath_rcvctrl);
-                               wake_up_interruptible(&pd->port_wait);
-                               rcvdint = 1;
-                       } else if (test_and_clear_bit(IPATH_PORT_WAITING_URG,
-                                                     &pd->port_flag)) {
-                               pd->port_urgent++;
-                               wake_up_interruptible(&pd->port_wait);
-                       }
-               }
-       }
-       if (rcvdint) {
-               /* only want to take one interrupt, so turn off the rcv
-                * interrupt for all the ports that we set the rcv_waiting
-                * (but never for kernel port)
-                */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
-                                dd->ipath_rcvctrl);
-       }
-}
-
-irqreturn_t ipath_intr(int irq, void *data)
-{
-       struct ipath_devdata *dd = data;
-       u64 istat, chk0rcv = 0;
-       ipath_err_t estat = 0;
-       irqreturn_t ret;
-       static unsigned unexpected = 0;
-       u64 kportrbits;
-
-       ipath_stats.sps_ints++;
-
-       if (dd->ipath_int_counter != (u32) -1)
-               dd->ipath_int_counter++;
-
-       if (!(dd->ipath_flags & IPATH_PRESENT)) {
-               /*
-                * This return value is not great, but we do not want the
-                * interrupt core code to remove our interrupt handler
-                * because we don't appear to be handling an interrupt
-                * during a chip reset.
-                */
-               return IRQ_HANDLED;
-       }
-
-       /*
-        * this needs to be flags&initted, not statusp, so we keep
-        * taking interrupts even after link goes down, etc.
-        * Also, we *must* clear the interrupt at some point, or we won't
-        * take it again, which can be real bad for errors, etc...
-        */
-
-       if (!(dd->ipath_flags & IPATH_INITTED)) {
-               ipath_bad_intr(dd, &unexpected);
-               ret = IRQ_NONE;
-               goto bail;
-       }
-
-       istat = ipath_read_ireg(dd, dd->ipath_kregs->kr_intstatus);
-
-       if (unlikely(!istat)) {
-               ipath_stats.sps_nullintr++;
-               ret = IRQ_NONE; /* not our interrupt, or already handled */
-               goto bail;
-       }
-       if (unlikely(istat == -1)) {
-               ipath_bad_regread(dd);
-               /* don't know if it was our interrupt or not */
-               ret = IRQ_NONE;
-               goto bail;
-       }
-
-       if (unexpected)
-               unexpected = 0;
-
-       if (unlikely(istat & ~dd->ipath_i_bitsextant))
-               ipath_dev_err(dd,
-                             "interrupt with unknown interrupts %Lx set\n",
-                             (unsigned long long)
-                             istat & ~dd->ipath_i_bitsextant);
-       else if (istat & ~INFINIPATH_I_ERROR) /* errors do own printing */
-               ipath_cdbg(VERBOSE, "intr stat=0x%Lx\n",
-                       (unsigned long long) istat);
-
-       if (istat & INFINIPATH_I_ERROR) {
-               ipath_stats.sps_errints++;
-               estat = ipath_read_kreg64(dd,
-                                         dd->ipath_kregs->kr_errorstatus);
-               if (!estat)
-                       dev_info(&dd->pcidev->dev, "error interrupt (%Lx), "
-                                "but no error bits set!\n",
-                                (unsigned long long) istat);
-               else if (estat == -1LL)
-                       /*
-                        * should we try clearing all, or hope next read
-                        * works?
-                        */
-                       ipath_dev_err(dd, "Read of error status failed "
-                                     "(all bits set); ignoring\n");
-               else
-                       chk0rcv |= handle_errors(dd, estat);
-       }
-
-       if (istat & INFINIPATH_I_GPIO) {
-               /*
-                * GPIO interrupts fall in two broad classes:
-                * GPIO_2 indicates (on some HT4xx boards) that a packet
-                *        has arrived for Port 0. Checking for this
-                *        is controlled by flag IPATH_GPIO_INTR.
-                * GPIO_3..5 on IBA6120 Rev2 and IBA6110 Rev4 chips indicate
-                *        errors that we need to count. Checking for this
-                *        is controlled by flag IPATH_GPIO_ERRINTRS.
-                */
-               u32 gpiostatus;
-               u32 to_clear = 0;
-
-               gpiostatus = ipath_read_kreg32(
-                       dd, dd->ipath_kregs->kr_gpio_status);
-               /* First the error-counter case. */
-               if ((gpiostatus & IPATH_GPIO_ERRINTR_MASK) &&
-                   (dd->ipath_flags & IPATH_GPIO_ERRINTRS)) {
-                       /* want to clear the bits we see asserted. */
-                       to_clear |= (gpiostatus & IPATH_GPIO_ERRINTR_MASK);
-
-                       /*
-                        * Count appropriately, clear bits out of our copy,
-                        * as they have been "handled".
-                        */
-                       if (gpiostatus & (1 << IPATH_GPIO_RXUVL_BIT)) {
-                               ipath_dbg("FlowCtl on UnsupVL\n");
-                               dd->ipath_rxfc_unsupvl_errs++;
-                       }
-                       if (gpiostatus & (1 << IPATH_GPIO_OVRUN_BIT)) {
-                               ipath_dbg("Overrun Threshold exceeded\n");
-                               dd->ipath_overrun_thresh_errs++;
-                       }
-                       if (gpiostatus & (1 << IPATH_GPIO_LLI_BIT)) {
-                               ipath_dbg("Local Link Integrity error\n");
-                               dd->ipath_lli_errs++;
-                       }
-                       gpiostatus &= ~IPATH_GPIO_ERRINTR_MASK;
-               }
-               /* Now the Port0 Receive case */
-               if ((gpiostatus & (1 << IPATH_GPIO_PORT0_BIT)) &&
-                   (dd->ipath_flags & IPATH_GPIO_INTR)) {
-                       /*
-                        * GPIO status bit 2 is set, and we expected it.
-                        * clear it and indicate in p0bits.
-                        * This probably only happens if a Port0 pkt
-                        * arrives at _just_ the wrong time, and we
-                        * handle that by seting chk0rcv;
-                        */
-                       to_clear |= (1 << IPATH_GPIO_PORT0_BIT);
-                       gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT);
-                       chk0rcv = 1;
-               }
-               if (gpiostatus) {
-                       /*
-                        * Some unexpected bits remain. If they could have
-                        * caused the interrupt, complain and clear.
-                        * To avoid repetition of this condition, also clear
-                        * the mask. It is almost certainly due to error.
-                        */
-                       const u32 mask = (u32) dd->ipath_gpio_mask;
-
-                       if (mask & gpiostatus) {
-                               ipath_dbg("Unexpected GPIO IRQ bits %x\n",
-                                 gpiostatus & mask);
-                               to_clear |= (gpiostatus & mask);
-                               dd->ipath_gpio_mask &= ~(gpiostatus & mask);
-                               ipath_write_kreg(dd,
-                                       dd->ipath_kregs->kr_gpio_mask,
-                                       dd->ipath_gpio_mask);
-                       }
-               }
-               if (to_clear) {
-                       ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_clear,
-                                       (u64) to_clear);
-               }
-       }
-
-       /*
-        * Clear the interrupt bits we found set, unless they are receive
-        * related, in which case we already cleared them above, and don't
-        * want to clear them again, because we might lose an interrupt.
-        * Clear it early, so we "know" know the chip will have seen this by
-        * the time we process the queue, and will re-interrupt if necessary.
-        * The processor itself won't take the interrupt again until we return.
-        */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, istat);
-
-       /*
-        * Handle kernel receive queues before checking for pio buffers
-        * available since receives can overflow; piobuf waiters can afford
-        * a few extra cycles, since they were waiting anyway, and user's
-        * waiting for receive are at the bottom.
-        */
-       kportrbits = (1ULL << dd->ipath_i_rcvavail_shift) |
-               (1ULL << dd->ipath_i_rcvurg_shift);
-       if (chk0rcv || (istat & kportrbits)) {
-               istat &= ~kportrbits;
-               ipath_kreceive(dd->ipath_pd[0]);
-       }
-
-       if (istat & ((dd->ipath_i_rcvavail_mask << dd->ipath_i_rcvavail_shift) |
-                    (dd->ipath_i_rcvurg_mask << dd->ipath_i_rcvurg_shift)))
-               handle_urcv(dd, istat);
-
-       if (istat & (INFINIPATH_I_SDMAINT | INFINIPATH_I_SDMADISABLED))
-               handle_sdma_intr(dd, istat);
-
-       if (istat & INFINIPATH_I_SPIOBUFAVAIL) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl &= ~INFINIPATH_S_PIOINTBUFAVAIL;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                                dd->ipath_sendctrl);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-               /* always process; sdma verbs uses PIO for acks and VL15  */
-               handle_layer_pioavail(dd);
-       }
-
-       ret = IRQ_HANDLED;
-
-bail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_kernel.h b/drivers/staging/rdma/ipath/ipath_kernel.h
deleted file mode 100644 (file)
index 66c934a..0000000
+++ /dev/null
@@ -1,1374 +0,0 @@
-#ifndef _IPATH_KERNEL_H
-#define _IPATH_KERNEL_H
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This header file is the base header file for infinipath kernel code
- * ipath_user.h serves a similar purpose for user code.
- */
-
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/scatterlist.h>
-#include <linux/sched.h>
-#include <asm/io.h>
-#include <rdma/ib_verbs.h>
-
-#include "ipath_common.h"
-#include "ipath_debug.h"
-#include "ipath_registers.h"
-
-/* only s/w major version of InfiniPath we can handle */
-#define IPATH_CHIP_VERS_MAJ 2U
-
-/* don't care about this except printing */
-#define IPATH_CHIP_VERS_MIN 0U
-
-/* temporary, maybe always */
-extern struct infinipath_stats ipath_stats;
-
-#define IPATH_CHIP_SWVERSION IPATH_CHIP_VERS_MAJ
-/*
- * First-cut critierion for "device is active" is
- * two thousand dwords combined Tx, Rx traffic per
- * 5-second interval. SMA packets are 64 dwords,
- * and occur "a few per second", presumably each way.
- */
-#define IPATH_TRAFFIC_ACTIVE_THRESHOLD (2000)
-/*
- * Struct used to indicate which errors are logged in each of the
- * error-counters that are logged to EEPROM. A counter is incremented
- * _once_ (saturating at 255) for each event with any bits set in
- * the error or hwerror register masks below.
- */
-#define IPATH_EEP_LOG_CNT (4)
-struct ipath_eep_log_mask {
-       u64 errs_to_log;
-       u64 hwerrs_to_log;
-};
-
-struct ipath_portdata {
-       void **port_rcvegrbuf;
-       dma_addr_t *port_rcvegrbuf_phys;
-       /* rcvhdrq base, needs mmap before useful */
-       void *port_rcvhdrq;
-       /* kernel virtual address where hdrqtail is updated */
-       void *port_rcvhdrtail_kvaddr;
-       /*
-        * temp buffer for expected send setup, allocated at open, instead
-        * of each setup call
-        */
-       void *port_tid_pg_list;
-       /* when waiting for rcv or pioavail */
-       wait_queue_head_t port_wait;
-       /*
-        * rcvegr bufs base, physical, must fit
-        * in 44 bits so 32 bit programs mmap64 44 bit works)
-        */
-       dma_addr_t port_rcvegr_phys;
-       /* mmap of hdrq, must fit in 44 bits */
-       dma_addr_t port_rcvhdrq_phys;
-       dma_addr_t port_rcvhdrqtailaddr_phys;
-       /*
-        * number of opens (including slave subports) on this instance
-        * (ignoring forks, dup, etc. for now)
-        */
-       int port_cnt;
-       /*
-        * how much space to leave at start of eager TID entries for
-        * protocol use, on each TID
-        */
-       /* instead of calculating it */
-       unsigned port_port;
-       /* non-zero if port is being shared. */
-       u16 port_subport_cnt;
-       /* non-zero if port is being shared. */
-       u16 port_subport_id;
-       /* number of pio bufs for this port (all procs, if shared) */
-       u32 port_piocnt;
-       /* first pio buffer for this port */
-       u32 port_pio_base;
-       /* chip offset of PIO buffers for this port */
-       u32 port_piobufs;
-       /* how many alloc_pages() chunks in port_rcvegrbuf_pages */
-       u32 port_rcvegrbuf_chunks;
-       /* how many egrbufs per chunk */
-       u32 port_rcvegrbufs_perchunk;
-       /* order for port_rcvegrbuf_pages */
-       size_t port_rcvegrbuf_size;
-       /* rcvhdrq size (for freeing) */
-       size_t port_rcvhdrq_size;
-       /* next expected TID to check when looking for free */
-       u32 port_tidcursor;
-       /* next expected TID to check */
-       unsigned long port_flag;
-       /* what happened */
-       unsigned long int_flag;
-       /* WAIT_RCV that timed out, no interrupt */
-       u32 port_rcvwait_to;
-       /* WAIT_PIO that timed out, no interrupt */
-       u32 port_piowait_to;
-       /* WAIT_RCV already happened, no wait */
-       u32 port_rcvnowait;
-       /* WAIT_PIO already happened, no wait */
-       u32 port_pionowait;
-       /* total number of rcvhdrqfull errors */
-       u32 port_hdrqfull;
-       /*
-        * Used to suppress multiple instances of same
-        * port staying stuck at same point.
-        */
-       u32 port_lastrcvhdrqtail;
-       /* saved total number of rcvhdrqfull errors for poll edge trigger */
-       u32 port_hdrqfull_poll;
-       /* total number of polled urgent packets */
-       u32 port_urgent;
-       /* saved total number of polled urgent packets for poll edge trigger */
-       u32 port_urgent_poll;
-       /* pid of process using this port */
-       struct pid *port_pid;
-       struct pid *port_subpid[INFINIPATH_MAX_SUBPORT];
-       /* same size as task_struct .comm[] */
-       char port_comm[TASK_COMM_LEN];
-       /* pkeys set by this use of this port */
-       u16 port_pkeys[4];
-       /* so file ops can get at unit */
-       struct ipath_devdata *port_dd;
-       /* A page of memory for rcvhdrhead, rcvegrhead, rcvegrtail * N */
-       void *subport_uregbase;
-       /* An array of pages for the eager receive buffers * N */
-       void *subport_rcvegrbuf;
-       /* An array of pages for the eager header queue entries * N */
-       void *subport_rcvhdr_base;
-       /* The version of the library which opened this port */
-       u32 userversion;
-       /* Bitmask of active slaves */
-       u32 active_slaves;
-       /* Type of packets or conditions we want to poll for */
-       u16 poll_type;
-       /* port rcvhdrq head offset */
-       u32 port_head;
-       /* receive packet sequence counter */
-       u32 port_seq_cnt;
-};
-
-struct sk_buff;
-struct ipath_sge_state;
-struct ipath_verbs_txreq;
-
-/*
- * control information for layered drivers
- */
-struct _ipath_layer {
-       void *l_arg;
-};
-
-struct ipath_skbinfo {
-       struct sk_buff *skb;
-       dma_addr_t phys;
-};
-
-struct ipath_sdma_txreq {
-       int                 flags;
-       int                 sg_count;
-       union {
-               struct scatterlist *sg;
-               void *map_addr;
-       };
-       void              (*callback)(void *, int);
-       void               *callback_cookie;
-       int                 callback_status;
-       u16                 start_idx;  /* sdma private */
-       u16                 next_descq_idx;  /* sdma private */
-       struct list_head    list;       /* sdma private */
-};
-
-struct ipath_sdma_desc {
-       __le64 qw[2];
-};
-
-#define IPATH_SDMA_TXREQ_F_USELARGEBUF  0x1
-#define IPATH_SDMA_TXREQ_F_HEADTOHOST   0x2
-#define IPATH_SDMA_TXREQ_F_INTREQ       0x4
-#define IPATH_SDMA_TXREQ_F_FREEBUF      0x8
-#define IPATH_SDMA_TXREQ_F_FREEDESC     0x10
-#define IPATH_SDMA_TXREQ_F_VL15         0x20
-
-#define IPATH_SDMA_TXREQ_S_OK        0
-#define IPATH_SDMA_TXREQ_S_SENDERROR 1
-#define IPATH_SDMA_TXREQ_S_ABORTED   2
-#define IPATH_SDMA_TXREQ_S_SHUTDOWN  3
-
-#define IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG    (1ull << 63)
-#define IPATH_SDMA_STATUS_ABORT_IN_PROG                        (1ull << 62)
-#define IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE         (1ull << 61)
-#define IPATH_SDMA_STATUS_SCB_EMPTY                    (1ull << 30)
-
-/* max dwords in small buffer packet */
-#define IPATH_SMALLBUF_DWORDS (dd->ipath_piosize2k >> 2)
-
-/*
- * Possible IB config parameters for ipath_f_get/set_ib_cfg()
- */
-#define IPATH_IB_CFG_LIDLMC 0 /* Get/set LID (LS16b) and Mask (MS16b) */
-#define IPATH_IB_CFG_HRTBT 1 /* Get/set Heartbeat off/enable/auto */
-#define IPATH_IB_HRTBT_ON 3 /* Heartbeat enabled, sent every 100msec */
-#define IPATH_IB_HRTBT_OFF 0 /* Heartbeat off */
-#define IPATH_IB_CFG_LWID_ENB 2 /* Get/set allowed Link-width */
-#define IPATH_IB_CFG_LWID 3 /* Get currently active Link-width */
-#define IPATH_IB_CFG_SPD_ENB 4 /* Get/set allowed Link speeds */
-#define IPATH_IB_CFG_SPD 5 /* Get current Link spd */
-#define IPATH_IB_CFG_RXPOL_ENB 6 /* Get/set Auto-RX-polarity enable */
-#define IPATH_IB_CFG_LREV_ENB 7 /* Get/set Auto-Lane-reversal enable */
-#define IPATH_IB_CFG_LINKLATENCY 8 /* Get Auto-Lane-reversal enable */
-
-
-struct ipath_devdata {
-       struct list_head ipath_list;
-
-       struct ipath_kregs const *ipath_kregs;
-       struct ipath_cregs const *ipath_cregs;
-
-       /* mem-mapped pointer to base of chip regs */
-       u64 __iomem *ipath_kregbase;
-       /* end of mem-mapped chip space; range checking */
-       u64 __iomem *ipath_kregend;
-       /* physical address of chip for io_remap, etc. */
-       unsigned long ipath_physaddr;
-       /* base of memory alloced for ipath_kregbase, for free */
-       u64 *ipath_kregalloc;
-       /* ipath_cfgports pointers */
-       struct ipath_portdata **ipath_pd;
-       /* sk_buffs used by port 0 eager receive queue */
-       struct ipath_skbinfo *ipath_port0_skbinfo;
-       /* kvirt address of 1st 2k pio buffer */
-       void __iomem *ipath_pio2kbase;
-       /* kvirt address of 1st 4k pio buffer */
-       void __iomem *ipath_pio4kbase;
-       /*
-        * points to area where PIOavail registers will be DMA'ed.
-        * Has to be on a page of it's own, because the page will be
-        * mapped into user program space.  This copy is *ONLY* ever
-        * written by DMA, not by the driver!  Need a copy per device
-        * when we get to multiple devices
-        */
-       volatile __le64 *ipath_pioavailregs_dma;
-       /* physical address where updates occur */
-       dma_addr_t ipath_pioavailregs_phys;
-       struct _ipath_layer ipath_layer;
-       /* setup intr */
-       int (*ipath_f_intrsetup)(struct ipath_devdata *);
-       /* fallback to alternate interrupt type if possible */
-       int (*ipath_f_intr_fallback)(struct ipath_devdata *);
-       /* setup on-chip bus config */
-       int (*ipath_f_bus)(struct ipath_devdata *, struct pci_dev *);
-       /* hard reset chip */
-       int (*ipath_f_reset)(struct ipath_devdata *);
-       int (*ipath_f_get_boardname)(struct ipath_devdata *, char *,
-                                    size_t);
-       void (*ipath_f_init_hwerrors)(struct ipath_devdata *);
-       void (*ipath_f_handle_hwerrors)(struct ipath_devdata *, char *,
-                                       size_t);
-       void (*ipath_f_quiet_serdes)(struct ipath_devdata *);
-       int (*ipath_f_bringup_serdes)(struct ipath_devdata *);
-       int (*ipath_f_early_init)(struct ipath_devdata *);
-       void (*ipath_f_clear_tids)(struct ipath_devdata *, unsigned);
-       void (*ipath_f_put_tid)(struct ipath_devdata *, u64 __iomem*,
-                               u32, unsigned long);
-       void (*ipath_f_tidtemplate)(struct ipath_devdata *);
-       void (*ipath_f_cleanup)(struct ipath_devdata *);
-       void (*ipath_f_setextled)(struct ipath_devdata *, u64, u64);
-       /* fill out chip-specific fields */
-       int (*ipath_f_get_base_info)(struct ipath_portdata *, void *);
-       /* free irq */
-       void (*ipath_f_free_irq)(struct ipath_devdata *);
-       struct ipath_message_header *(*ipath_f_get_msgheader)
-                                       (struct ipath_devdata *, __le32 *);
-       void (*ipath_f_config_ports)(struct ipath_devdata *, ushort);
-       int (*ipath_f_get_ib_cfg)(struct ipath_devdata *, int);
-       int (*ipath_f_set_ib_cfg)(struct ipath_devdata *, int, u32);
-       void (*ipath_f_config_jint)(struct ipath_devdata *, u16 , u16);
-       void (*ipath_f_read_counters)(struct ipath_devdata *,
-                                       struct infinipath_counters *);
-       void (*ipath_f_xgxs_reset)(struct ipath_devdata *);
-       /* per chip actions needed for IB Link up/down changes */
-       int (*ipath_f_ib_updown)(struct ipath_devdata *, int, u64);
-
-       unsigned ipath_lastegr_idx;
-       struct ipath_ibdev *verbs_dev;
-       struct timer_list verbs_timer;
-       /* total dwords sent (summed from counter) */
-       u64 ipath_sword;
-       /* total dwords rcvd (summed from counter) */
-       u64 ipath_rword;
-       /* total packets sent (summed from counter) */
-       u64 ipath_spkts;
-       /* total packets rcvd (summed from counter) */
-       u64 ipath_rpkts;
-       /* ipath_statusp initially points to this. */
-       u64 _ipath_status;
-       /* GUID for this interface, in network order */
-       __be64 ipath_guid;
-       /*
-        * aggregrate of error bits reported since last cleared, for
-        * limiting of error reporting
-        */
-       ipath_err_t ipath_lasterror;
-       /*
-        * aggregrate of error bits reported since last cleared, for
-        * limiting of hwerror reporting
-        */
-       ipath_err_t ipath_lasthwerror;
-       /* errors masked because they occur too fast */
-       ipath_err_t ipath_maskederrs;
-       u64 ipath_lastlinkrecov; /* link recoveries at last ACTIVE */
-       /* these 5 fields are used to establish deltas for IB Symbol
-        * errors and linkrecovery errors. They can be reported on
-        * some chips during link negotiation prior to INIT, and with
-        * DDR when faking DDR negotiations with non-IBTA switches.
-        * The chip counters are adjusted at driver unload if there is
-        * a non-zero delta.
-        */
-       u64 ibdeltainprog;
-       u64 ibsymdelta;
-       u64 ibsymsnap;
-       u64 iblnkerrdelta;
-       u64 iblnkerrsnap;
-
-       /* time in jiffies at which to re-enable maskederrs */
-       unsigned long ipath_unmasktime;
-       /* count of egrfull errors, combined for all ports */
-       u64 ipath_last_tidfull;
-       /* for ipath_qcheck() */
-       u64 ipath_lastport0rcv_cnt;
-       /* template for writing TIDs  */
-       u64 ipath_tidtemplate;
-       /* value to write to free TIDs */
-       u64 ipath_tidinvalid;
-       /* IBA6120 rcv interrupt setup */
-       u64 ipath_rhdrhead_intr_off;
-
-       /* size of memory at ipath_kregbase */
-       u32 ipath_kregsize;
-       /* number of registers used for pioavail */
-       u32 ipath_pioavregs;
-       /* IPATH_POLL, etc. */
-       u32 ipath_flags;
-       /* ipath_flags driver is waiting for */
-       u32 ipath_state_wanted;
-       /* last buffer for user use, first buf for kernel use is this
-        * index. */
-       u32 ipath_lastport_piobuf;
-       /* is a stats timer active */
-       u32 ipath_stats_timer_active;
-       /* number of interrupts for this device -- saturates... */
-       u32 ipath_int_counter;
-       /* dwords sent read from counter */
-       u32 ipath_lastsword;
-       /* dwords received read from counter */
-       u32 ipath_lastrword;
-       /* sent packets read from counter */
-       u32 ipath_lastspkts;
-       /* received packets read from counter */
-       u32 ipath_lastrpkts;
-       /* pio bufs allocated per port */
-       u32 ipath_pbufsport;
-       /* if remainder on bufs/port, ports < extrabuf get 1 extra */
-       u32 ipath_ports_extrabuf;
-       u32 ipath_pioupd_thresh; /* update threshold, some chips */
-       /*
-        * number of ports configured as max; zero is set to number chip
-        * supports, less gives more pio bufs/port, etc.
-        */
-       u32 ipath_cfgports;
-       /* count of port 0 hdrqfull errors */
-       u32 ipath_p0_hdrqfull;
-       /* port 0 number of receive eager buffers */
-       u32 ipath_p0_rcvegrcnt;
-
-       /*
-        * index of last piobuffer we used.  Speeds up searching, by
-        * starting at this point.  Doesn't matter if multiple cpu's use and
-        * update, last updater is only write that matters.  Whenever it
-        * wraps, we update shadow copies.  Need a copy per device when we
-        * get to multiple devices
-        */
-       u32 ipath_lastpioindex;
-       u32 ipath_lastpioindexl;
-       /* max length of freezemsg */
-       u32 ipath_freezelen;
-       /*
-        * consecutive times we wanted a PIO buffer but were unable to
-        * get one
-        */
-       u32 ipath_consec_nopiobuf;
-       /*
-        * hint that we should update ipath_pioavailshadow before
-        * looking for a PIO buffer
-        */
-       u32 ipath_upd_pio_shadow;
-       /* so we can rewrite it after a chip reset */
-       u32 ipath_pcibar0;
-       /* so we can rewrite it after a chip reset */
-       u32 ipath_pcibar1;
-       u32 ipath_x1_fix_tries;
-       u32 ipath_autoneg_tries;
-       u32 serdes_first_init_done;
-
-       struct ipath_relock {
-               atomic_t ipath_relock_timer_active;
-               struct timer_list ipath_relock_timer;
-               unsigned int ipath_relock_interval; /* in jiffies */
-       } ipath_relock_singleton;
-
-       /* interrupt number */
-       int ipath_irq;
-       /* HT/PCI Vendor ID (here for NodeInfo) */
-       u16 ipath_vendorid;
-       /* HT/PCI Device ID (here for NodeInfo) */
-       u16 ipath_deviceid;
-       /* offset in HT config space of slave/primary interface block */
-       u8 ipath_ht_slave_off;
-       /* for write combining settings */
-       int wc_cookie;
-       /* ref count for each pkey */
-       atomic_t ipath_pkeyrefs[4];
-       /* shadow copy of struct page *'s for exp tid pages */
-       struct page **ipath_pageshadow;
-       /* shadow copy of dma handles for exp tid pages */
-       dma_addr_t *ipath_physshadow;
-       u64 __iomem *ipath_egrtidbase;
-       /* lock to workaround chip bug 9437 and others */
-       spinlock_t ipath_kernel_tid_lock;
-       spinlock_t ipath_user_tid_lock;
-       spinlock_t ipath_sendctrl_lock;
-       /* around ipath_pd and (user ports) port_cnt use (intr vs free) */
-       spinlock_t ipath_uctxt_lock;
-
-       /*
-        * IPATH_STATUS_*,
-        * this address is mapped readonly into user processes so they can
-        * get status cheaply, whenever they want.
-        */
-       u64 *ipath_statusp;
-       /* freeze msg if hw error put chip in freeze */
-       char *ipath_freezemsg;
-       /* pci access data structure */
-       struct pci_dev *pcidev;
-       struct cdev *user_cdev;
-       struct cdev *diag_cdev;
-       struct device *user_dev;
-       struct device *diag_dev;
-       /* timer used to prevent stats overflow, error throttling, etc. */
-       struct timer_list ipath_stats_timer;
-       /* timer to verify interrupts work, and fallback if possible */
-       struct timer_list ipath_intrchk_timer;
-       void *ipath_dummy_hdrq; /* used after port close */
-       dma_addr_t ipath_dummy_hdrq_phys;
-
-       /* SendDMA related entries */
-       spinlock_t            ipath_sdma_lock;
-       unsigned long         ipath_sdma_status;
-       unsigned long         ipath_sdma_abort_jiffies;
-       unsigned long         ipath_sdma_abort_intr_timeout;
-       unsigned long         ipath_sdma_buf_jiffies;
-       struct ipath_sdma_desc *ipath_sdma_descq;
-       u64                   ipath_sdma_descq_added;
-       u64                   ipath_sdma_descq_removed;
-       int                   ipath_sdma_desc_nreserved;
-       u16                   ipath_sdma_descq_cnt;
-       u16                   ipath_sdma_descq_tail;
-       u16                   ipath_sdma_descq_head;
-       u16                   ipath_sdma_next_intr;
-       u16                   ipath_sdma_reset_wait;
-       u8                    ipath_sdma_generation;
-       struct tasklet_struct ipath_sdma_abort_task;
-       struct tasklet_struct ipath_sdma_notify_task;
-       struct list_head      ipath_sdma_activelist;
-       struct list_head      ipath_sdma_notifylist;
-       atomic_t              ipath_sdma_vl15_count;
-       struct timer_list     ipath_sdma_vl15_timer;
-
-       dma_addr_t       ipath_sdma_descq_phys;
-       volatile __le64 *ipath_sdma_head_dma;
-       dma_addr_t       ipath_sdma_head_phys;
-
-       unsigned long ipath_ureg_align; /* user register alignment */
-
-       struct delayed_work ipath_autoneg_work;
-       wait_queue_head_t ipath_autoneg_wait;
-
-       /* HoL blocking / user app forward-progress state */
-       unsigned          ipath_hol_state;
-       unsigned          ipath_hol_next;
-       struct timer_list ipath_hol_timer;
-
-       /*
-        * Shadow copies of registers; size indicates read access size.
-        * Most of them are readonly, but some are write-only register,
-        * where we manipulate the bits in the shadow copy, and then write
-        * the shadow copy to infinipath.
-        *
-        * We deliberately make most of these 32 bits, since they have
-        * restricted range.  For any that we read, we won't to generate 32
-        * bit accesses, since Opteron will generate 2 separate 32 bit HT
-        * transactions for a 64 bit read, and we want to avoid unnecessary
-        * HT transactions.
-        */
-
-       /* This is the 64 bit group */
-
-       /*
-        * shadow of pioavail, check to be sure it's large enough at
-        * init time.
-        */
-       unsigned long ipath_pioavailshadow[8];
-       /* bitmap of send buffers available for the kernel to use with PIO. */
-       unsigned long ipath_pioavailkernel[8];
-       /* shadow of kr_gpio_out, for rmw ops */
-       u64 ipath_gpio_out;
-       /* shadow the gpio mask register */
-       u64 ipath_gpio_mask;
-       /* shadow the gpio output enable, etc... */
-       u64 ipath_extctrl;
-       /* kr_revision shadow */
-       u64 ipath_revision;
-       /*
-        * shadow of ibcctrl, for interrupt handling of link changes,
-        * etc.
-        */
-       u64 ipath_ibcctrl;
-       /*
-        * last ibcstatus, to suppress "duplicate" status change messages,
-        * mostly from 2 to 3
-        */
-       u64 ipath_lastibcstat;
-       /* hwerrmask shadow */
-       ipath_err_t ipath_hwerrmask;
-       ipath_err_t ipath_errormask; /* errormask shadow */
-       /* interrupt config reg shadow */
-       u64 ipath_intconfig;
-       /* kr_sendpiobufbase value */
-       u64 ipath_piobufbase;
-       /* kr_ibcddrctrl shadow */
-       u64 ipath_ibcddrctrl;
-
-       /* these are the "32 bit" regs */
-
-       /*
-        * number of GUIDs in the flash for this interface; may need some
-        * rethinking for setting on other ifaces
-        */
-       u32 ipath_nguid;
-       /*
-        * the following two are 32-bit bitmasks, but {test,clear,set}_bit
-        * all expect bit fields to be "unsigned long"
-        */
-       /* shadow kr_rcvctrl */
-       unsigned long ipath_rcvctrl;
-       /* shadow kr_sendctrl */
-       unsigned long ipath_sendctrl;
-       /* to not count armlaunch after cancel */
-       unsigned long ipath_lastcancel;
-       /* count cases where special trigger was needed (double write) */
-       unsigned long ipath_spectriggerhit;
-
-       /* value we put in kr_rcvhdrcnt */
-       u32 ipath_rcvhdrcnt;
-       /* value we put in kr_rcvhdrsize */
-       u32 ipath_rcvhdrsize;
-       /* value we put in kr_rcvhdrentsize */
-       u32 ipath_rcvhdrentsize;
-       /* offset of last entry in rcvhdrq */
-       u32 ipath_hdrqlast;
-       /* kr_portcnt value */
-       u32 ipath_portcnt;
-       /* kr_pagealign value */
-       u32 ipath_palign;
-       /* number of "2KB" PIO buffers */
-       u32 ipath_piobcnt2k;
-       /* size in bytes of "2KB" PIO buffers */
-       u32 ipath_piosize2k;
-       /* number of "4KB" PIO buffers */
-       u32 ipath_piobcnt4k;
-       /* size in bytes of "4KB" PIO buffers */
-       u32 ipath_piosize4k;
-       u32 ipath_pioreserved; /* reserved special-inkernel; */
-       /* kr_rcvegrbase value */
-       u32 ipath_rcvegrbase;
-       /* kr_rcvegrcnt value */
-       u32 ipath_rcvegrcnt;
-       /* kr_rcvtidbase value */
-       u32 ipath_rcvtidbase;
-       /* kr_rcvtidcnt value */
-       u32 ipath_rcvtidcnt;
-       /* kr_sendregbase */
-       u32 ipath_sregbase;
-       /* kr_userregbase */
-       u32 ipath_uregbase;
-       /* kr_counterregbase */
-       u32 ipath_cregbase;
-       /* shadow the control register contents */
-       u32 ipath_control;
-       /* PCI revision register (HTC rev on FPGA) */
-       u32 ipath_pcirev;
-
-       /* chip address space used by 4k pio buffers */
-       u32 ipath_4kalign;
-       /* The MTU programmed for this unit */
-       u32 ipath_ibmtu;
-       /*
-        * The max size IB packet, included IB headers that we can send.
-        * Starts same as ipath_piosize, but is affected when ibmtu is
-        * changed, or by size of eager buffers
-        */
-       u32 ipath_ibmaxlen;
-       /*
-        * ibmaxlen at init time, limited by chip and by receive buffer
-        * size.  Not changed after init.
-        */
-       u32 ipath_init_ibmaxlen;
-       /* size of each rcvegrbuffer */
-       u32 ipath_rcvegrbufsize;
-       /* localbus width (1, 2,4,8,16,32) from config space  */
-       u32 ipath_lbus_width;
-       /* localbus speed (HT: 200,400,800,1000; PCIe 2500) */
-       u32 ipath_lbus_speed;
-       /*
-        * number of sequential ibcstatus change for polling active/quiet
-        * (i.e., link not coming up).
-        */
-       u32 ipath_ibpollcnt;
-       /* low and high portions of MSI capability/vector */
-       u32 ipath_msi_lo;
-       /* saved after PCIe init for restore after reset */
-       u32 ipath_msi_hi;
-       /* MSI data (vector) saved for restore */
-       u16 ipath_msi_data;
-       /* MLID programmed for this instance */
-       u16 ipath_mlid;
-       /* LID programmed for this instance */
-       u16 ipath_lid;
-       /* list of pkeys programmed; 0 if not set */
-       u16 ipath_pkeys[4];
-       /*
-        * ASCII serial number, from flash, large enough for original
-        * all digit strings, and longer QLogic serial number format
-        */
-       u8 ipath_serial[16];
-       /* human readable board version */
-       u8 ipath_boardversion[96];
-       u8 ipath_lbus_info[32]; /* human readable localbus info */
-       /* chip major rev, from ipath_revision */
-       u8 ipath_majrev;
-       /* chip minor rev, from ipath_revision */
-       u8 ipath_minrev;
-       /* board rev, from ipath_revision */
-       u8 ipath_boardrev;
-       /* saved for restore after reset */
-       u8 ipath_pci_cacheline;
-       /* LID mask control */
-       u8 ipath_lmc;
-       /* link width supported */
-       u8 ipath_link_width_supported;
-       /* link speed supported */
-       u8 ipath_link_speed_supported;
-       u8 ipath_link_width_enabled;
-       u8 ipath_link_speed_enabled;
-       u8 ipath_link_width_active;
-       u8 ipath_link_speed_active;
-       /* Rx Polarity inversion (compensate for ~tx on partner) */
-       u8 ipath_rx_pol_inv;
-
-       u8 ipath_r_portenable_shift;
-       u8 ipath_r_intravail_shift;
-       u8 ipath_r_tailupd_shift;
-       u8 ipath_r_portcfg_shift;
-
-       /* unit # of this chip, if present */
-       int ipath_unit;
-
-       /* local link integrity counter */
-       u32 ipath_lli_counter;
-       /* local link integrity errors */
-       u32 ipath_lli_errors;
-       /*
-        * Above counts only cases where _successive_ LocalLinkIntegrity
-        * errors were seen in the receive headers of kern-packets.
-        * Below are the three (monotonically increasing) counters
-        * maintained via GPIO interrupts on iba6120-rev2.
-        */
-       u32 ipath_rxfc_unsupvl_errs;
-       u32 ipath_overrun_thresh_errs;
-       u32 ipath_lli_errs;
-
-       /*
-        * Not all devices managed by a driver instance are the same
-        * type, so these fields must be per-device.
-        */
-       u64 ipath_i_bitsextant;
-       ipath_err_t ipath_e_bitsextant;
-       ipath_err_t ipath_hwe_bitsextant;
-
-       /*
-        * Below should be computable from number of ports,
-        * since they are never modified.
-        */
-       u64 ipath_i_rcvavail_mask;
-       u64 ipath_i_rcvurg_mask;
-       u16 ipath_i_rcvurg_shift;
-       u16 ipath_i_rcvavail_shift;
-
-       /*
-        * Register bits for selecting i2c direction and values, used for
-        * I2C serial flash.
-        */
-       u8 ipath_gpio_sda_num;
-       u8 ipath_gpio_scl_num;
-       u8 ipath_i2c_chain_type;
-       u64 ipath_gpio_sda;
-       u64 ipath_gpio_scl;
-
-       /* lock for doing RMW of shadows/regs for ExtCtrl and GPIO */
-       spinlock_t ipath_gpio_lock;
-
-       /*
-        * IB link and linktraining states and masks that vary per chip in
-        * some way.  Set at init, to avoid each IB status change interrupt
-        */
-       u8 ibcs_ls_shift;
-       u8 ibcs_lts_mask;
-       u32 ibcs_mask;
-       u32 ib_init;
-       u32 ib_arm;
-       u32 ib_active;
-
-       u16 ipath_rhf_offset; /* offset of RHF within receive header entry */
-
-       /*
-        * shift/mask for linkcmd, linkinitcmd, maxpktlen in ibccontol
-        * reg. Changes for IBA7220
-        */
-       u8 ibcc_lic_mask; /* LinkInitCmd */
-       u8 ibcc_lc_shift; /* LinkCmd */
-       u8 ibcc_mpl_shift; /* Maxpktlen */
-
-       u8 delay_mult;
-
-       /* used to override LED behavior */
-       u8 ipath_led_override;  /* Substituted for normal value, if non-zero */
-       u16 ipath_led_override_timeoff; /* delta to next timer event */
-       u8 ipath_led_override_vals[2]; /* Alternates per blink-frame */
-       u8 ipath_led_override_phase; /* Just counts, LSB picks from vals[] */
-       atomic_t ipath_led_override_timer_active;
-       /* Used to flash LEDs in override mode */
-       struct timer_list ipath_led_override_timer;
-
-       /* Support (including locks) for EEPROM logging of errors and time */
-       /* control access to actual counters, timer */
-       spinlock_t ipath_eep_st_lock;
-       /* control high-level access to EEPROM */
-       struct mutex ipath_eep_lock;
-       /* Below inc'd by ipath_snap_cntrs(), locked by ipath_eep_st_lock */
-       uint64_t ipath_traffic_wds;
-       /* active time is kept in seconds, but logged in hours */
-       atomic_t ipath_active_time;
-       /* Below are nominal shadow of EEPROM, new since last EEPROM update */
-       uint8_t ipath_eep_st_errs[IPATH_EEP_LOG_CNT];
-       uint8_t ipath_eep_st_new_errs[IPATH_EEP_LOG_CNT];
-       uint16_t ipath_eep_hrs;
-       /*
-        * masks for which bits of errs, hwerrs that cause
-        * each of the counters to increment.
-        */
-       struct ipath_eep_log_mask ipath_eep_st_masks[IPATH_EEP_LOG_CNT];
-
-       /* interrupt mitigation reload register info */
-       u16 ipath_jint_idle_ticks;      /* idle clock ticks */
-       u16 ipath_jint_max_packets;     /* max packets across all ports */
-
-       /*
-        * lock for access to SerDes, and flags to sequence preset
-        * versus steady-state. 7220-only at the moment.
-        */
-       spinlock_t ipath_sdepb_lock;
-       u8 ipath_presets_needed; /* Set if presets to be restored next DOWN */
-};
-
-/* ipath_hol_state values (stopping/starting user proc, send flushing) */
-#define IPATH_HOL_UP       0
-#define IPATH_HOL_DOWN     1
-/* ipath_hol_next toggle values, used when hol_state IPATH_HOL_DOWN */
-#define IPATH_HOL_DOWNSTOP 0
-#define IPATH_HOL_DOWNCONT 1
-
-/* bit positions for sdma_status */
-#define IPATH_SDMA_ABORTING  0
-#define IPATH_SDMA_DISARMED  1
-#define IPATH_SDMA_DISABLED  2
-#define IPATH_SDMA_LAYERBUF  3
-#define IPATH_SDMA_RUNNING  30
-#define IPATH_SDMA_SHUTDOWN 31
-
-/* bit combinations that correspond to abort states */
-#define IPATH_SDMA_ABORT_NONE 0
-#define IPATH_SDMA_ABORT_ABORTING (1UL << IPATH_SDMA_ABORTING)
-#define IPATH_SDMA_ABORT_DISARMED ((1UL << IPATH_SDMA_ABORTING) | \
-       (1UL << IPATH_SDMA_DISARMED))
-#define IPATH_SDMA_ABORT_DISABLED ((1UL << IPATH_SDMA_ABORTING) | \
-       (1UL << IPATH_SDMA_DISABLED))
-#define IPATH_SDMA_ABORT_ABORTED ((1UL << IPATH_SDMA_ABORTING) | \
-       (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
-#define IPATH_SDMA_ABORT_MASK ((1UL<<IPATH_SDMA_ABORTING) | \
-       (1UL << IPATH_SDMA_DISARMED) | (1UL << IPATH_SDMA_DISABLED))
-
-#define IPATH_SDMA_BUF_NONE 0
-#define IPATH_SDMA_BUF_MASK (1UL<<IPATH_SDMA_LAYERBUF)
-
-/* Private data for file operations */
-struct ipath_filedata {
-       struct ipath_portdata *pd;
-       unsigned subport;
-       unsigned tidcursor;
-       struct ipath_user_sdma_queue *pq;
-};
-extern struct list_head ipath_dev_list;
-extern spinlock_t ipath_devs_lock;
-extern struct ipath_devdata *ipath_lookup(int unit);
-
-int ipath_init_chip(struct ipath_devdata *, int);
-int ipath_enable_wc(struct ipath_devdata *dd);
-void ipath_disable_wc(struct ipath_devdata *dd);
-int ipath_count_units(int *npresentp, int *nupp, int *maxportsp);
-void ipath_shutdown_device(struct ipath_devdata *);
-void ipath_clear_freeze(struct ipath_devdata *);
-
-struct file_operations;
-int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
-                   struct cdev **cdevp, struct device **devp);
-void ipath_cdev_cleanup(struct cdev **cdevp,
-                       struct device **devp);
-
-int ipath_diag_add(struct ipath_devdata *);
-void ipath_diag_remove(struct ipath_devdata *);
-
-extern wait_queue_head_t ipath_state_wait;
-
-int ipath_user_add(struct ipath_devdata *dd);
-void ipath_user_remove(struct ipath_devdata *dd);
-
-struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
-
-extern int ipath_diag_inuse;
-
-irqreturn_t ipath_intr(int irq, void *devid);
-int ipath_decode_err(struct ipath_devdata *dd, char *buf, size_t blen,
-                    ipath_err_t err);
-#if __IPATH_INFO || __IPATH_DBG
-extern const char *ipath_ibcstatus_str[];
-#endif
-
-/* clean up any per-chip chip-specific stuff */
-void ipath_chip_cleanup(struct ipath_devdata *);
-/* clean up any chip type-specific stuff */
-void ipath_chip_done(void);
-
-void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
-                         unsigned cnt);
-void ipath_cancel_sends(struct ipath_devdata *, int);
-
-int ipath_create_rcvhdrq(struct ipath_devdata *, struct ipath_portdata *);
-void ipath_free_pddata(struct ipath_devdata *, struct ipath_portdata *);
-
-int ipath_parse_ushort(const char *str, unsigned short *valp);
-
-void ipath_kreceive(struct ipath_portdata *);
-int ipath_setrcvhdrsize(struct ipath_devdata *, unsigned);
-int ipath_reset_device(int);
-void ipath_get_faststats(unsigned long);
-int ipath_wait_linkstate(struct ipath_devdata *, u32, int);
-int ipath_set_linkstate(struct ipath_devdata *, u8);
-int ipath_set_mtu(struct ipath_devdata *, u16);
-int ipath_set_lid(struct ipath_devdata *, u32, u8);
-int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
-void ipath_enable_armlaunch(struct ipath_devdata *);
-void ipath_disable_armlaunch(struct ipath_devdata *);
-void ipath_hol_down(struct ipath_devdata *);
-void ipath_hol_up(struct ipath_devdata *);
-void ipath_hol_event(unsigned long);
-void ipath_toggle_rclkrls(struct ipath_devdata *);
-void ipath_sd7220_clr_ibpar(struct ipath_devdata *);
-void ipath_set_relock_poll(struct ipath_devdata *, int);
-void ipath_shutdown_relock_poll(struct ipath_devdata *);
-
-/* for use in system calls, where we want to know device type, etc. */
-#define port_fp(fp) ((struct ipath_filedata *)(fp)->private_data)->pd
-#define subport_fp(fp) \
-       ((struct ipath_filedata *)(fp)->private_data)->subport
-#define tidcursor_fp(fp) \
-       ((struct ipath_filedata *)(fp)->private_data)->tidcursor
-#define user_sdma_queue_fp(fp) \
-       ((struct ipath_filedata *)(fp)->private_data)->pq
-
-/*
- * values for ipath_flags
- */
-               /* chip can report link latency (IB 1.2) */
-#define IPATH_HAS_LINK_LATENCY 0x1
-               /* The chip is up and initted */
-#define IPATH_INITTED       0x2
-               /* set if any user code has set kr_rcvhdrsize */
-#define IPATH_RCVHDRSZ_SET  0x4
-               /* The chip is present and valid for accesses */
-#define IPATH_PRESENT       0x8
-               /* HT link0 is only 8 bits wide, ignore upper byte crc
-                * errors, etc. */
-#define IPATH_8BIT_IN_HT0   0x10
-               /* HT link1 is only 8 bits wide, ignore upper byte crc
-                * errors, etc. */
-#define IPATH_8BIT_IN_HT1   0x20
-               /* The link is down */
-#define IPATH_LINKDOWN      0x40
-               /* The link level is up (0x11) */
-#define IPATH_LINKINIT      0x80
-               /* The link is in the armed (0x21) state */
-#define IPATH_LINKARMED     0x100
-               /* The link is in the active (0x31) state */
-#define IPATH_LINKACTIVE    0x200
-               /* link current state is unknown */
-#define IPATH_LINKUNK       0x400
-               /* Write combining flush needed for PIO */
-#define IPATH_PIO_FLUSH_WC  0x1000
-               /* DMA Receive tail pointer */
-#define IPATH_NODMA_RTAIL   0x2000
-               /* no IB cable, or no device on IB cable */
-#define IPATH_NOCABLE       0x4000
-               /* Supports port zero per packet receive interrupts via
-                * GPIO */
-#define IPATH_GPIO_INTR     0x8000
-               /* uses the coded 4byte TID, not 8 byte */
-#define IPATH_4BYTE_TID     0x10000
-               /* packet/word counters are 32 bit, else those 4 counters
-                * are 64bit */
-#define IPATH_32BITCOUNTERS 0x20000
-               /* Interrupt register is 64 bits */
-#define IPATH_INTREG_64     0x40000
-               /* can miss port0 rx interrupts */
-#define IPATH_DISABLED      0x80000 /* administratively disabled */
-               /* Use GPIO interrupts for new counters */
-#define IPATH_GPIO_ERRINTRS 0x100000
-#define IPATH_SWAP_PIOBUFS  0x200000
-               /* Supports Send DMA */
-#define IPATH_HAS_SEND_DMA  0x400000
-               /* Supports Send Count (not just word count) in PBC */
-#define IPATH_HAS_PBC_CNT   0x800000
-               /* Suppress heartbeat, even if turning off loopback */
-#define IPATH_NO_HRTBT      0x1000000
-#define IPATH_HAS_THRESH_UPDATE 0x4000000
-#define IPATH_HAS_MULT_IB_SPEED 0x8000000
-#define IPATH_IB_AUTONEG_INPROG 0x10000000
-#define IPATH_IB_AUTONEG_FAILED 0x20000000
-               /* Linkdown-disable intentionally, Do not attempt to bring up */
-#define IPATH_IB_LINK_DISABLED 0x40000000
-#define IPATH_IB_FORCE_NOTIFY 0x80000000 /* force notify on next ib change */
-
-/* Bits in GPIO for the added interrupts */
-#define IPATH_GPIO_PORT0_BIT 2
-#define IPATH_GPIO_RXUVL_BIT 3
-#define IPATH_GPIO_OVRUN_BIT 4
-#define IPATH_GPIO_LLI_BIT 5
-#define IPATH_GPIO_ERRINTR_MASK 0x38
-
-/* portdata flag bit offsets */
-               /* waiting for a packet to arrive */
-#define IPATH_PORT_WAITING_RCV   2
-               /* master has not finished initializing */
-#define IPATH_PORT_MASTER_UNINIT 4
-               /* waiting for an urgent packet to arrive */
-#define IPATH_PORT_WAITING_URG 5
-
-/* free up any allocated data at closes */
-void ipath_free_data(struct ipath_portdata *dd);
-u32 __iomem *ipath_getpiobuf(struct ipath_devdata *, u32, u32 *);
-void ipath_chg_pioavailkernel(struct ipath_devdata *dd, unsigned start,
-                               unsigned len, int avail);
-void ipath_init_iba6110_funcs(struct ipath_devdata *);
-void ipath_get_eeprom_info(struct ipath_devdata *);
-int ipath_update_eeprom_log(struct ipath_devdata *dd);
-void ipath_inc_eeprom_err(struct ipath_devdata *dd, u32 eidx, u32 incr);
-u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
-void ipath_disarm_senderrbufs(struct ipath_devdata *);
-void ipath_force_pio_avail_update(struct ipath_devdata *);
-void signal_ib_event(struct ipath_devdata *dd, enum ib_event_type ev);
-
-/*
- * Set LED override, only the two LSBs have "public" meaning, but
- * any non-zero value substitutes them for the Link and LinkTrain
- * LED states.
- */
-#define IPATH_LED_PHYS 1 /* Physical (linktraining) GREEN LED */
-#define IPATH_LED_LOG 2  /* Logical (link) YELLOW LED */
-void ipath_set_led_override(struct ipath_devdata *dd, unsigned int val);
-
-/* send dma routines */
-int setup_sdma(struct ipath_devdata *);
-void teardown_sdma(struct ipath_devdata *);
-void ipath_restart_sdma(struct ipath_devdata *);
-void ipath_sdma_intr(struct ipath_devdata *);
-int ipath_sdma_verbs_send(struct ipath_devdata *, struct ipath_sge_state *,
-                         u32, struct ipath_verbs_txreq *);
-/* ipath_sdma_lock should be locked before calling this. */
-int ipath_sdma_make_progress(struct ipath_devdata *dd);
-
-/* must be called under ipath_sdma_lock */
-static inline u16 ipath_sdma_descq_freecnt(const struct ipath_devdata *dd)
-{
-       return dd->ipath_sdma_descq_cnt -
-               (dd->ipath_sdma_descq_added - dd->ipath_sdma_descq_removed) -
-               1 - dd->ipath_sdma_desc_nreserved;
-}
-
-static inline void ipath_sdma_desc_reserve(struct ipath_devdata *dd, u16 cnt)
-{
-       dd->ipath_sdma_desc_nreserved += cnt;
-}
-
-static inline void ipath_sdma_desc_unreserve(struct ipath_devdata *dd, u16 cnt)
-{
-       dd->ipath_sdma_desc_nreserved -= cnt;
-}
-
-/*
- * number of words used for protocol header if not set by ipath_userinit();
- */
-#define IPATH_DFLT_RCVHDRSIZE 9
-
-int ipath_get_user_pages(unsigned long, size_t, struct page **);
-void ipath_release_user_pages(struct page **, size_t);
-void ipath_release_user_pages_on_close(struct page **, size_t);
-int ipath_eeprom_read(struct ipath_devdata *, u8, void *, int);
-int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
-int ipath_tempsense_read(struct ipath_devdata *, u8 regnum);
-int ipath_tempsense_write(struct ipath_devdata *, u8 regnum, u8 data);
-
-/* these are used for the registers that vary with port */
-void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
-                          unsigned, u64);
-
-/*
- * We could have a single register get/put routine, that takes a group type,
- * but this is somewhat clearer and cleaner.  It also gives us some error
- * checking.  64 bit register reads should always work, but are inefficient
- * on opteron (the northbridge always generates 2 separate HT 32 bit reads),
- * so we use kreg32 wherever possible.  User register and counter register
- * reads are always 32 bit reads, so only one form of those routines.
- */
-
-/*
- * At the moment, none of the s-registers are writable, so no
- * ipath_write_sreg().
- */
-
-/**
- * ipath_read_ureg32 - read 32-bit virtualized per-port register
- * @dd: device
- * @regno: register number
- * @port: port number
- *
- * Return the contents of a register that is virtualized to be per port.
- * Returns -1 on errors (not distinguishable from valid contents at
- * runtime; we may add a separate error variable at some point).
- */
-static inline u32 ipath_read_ureg32(const struct ipath_devdata *dd,
-                                   ipath_ureg regno, int port)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return 0;
-
-       return readl(regno + (u64 __iomem *)
-                    (dd->ipath_uregbase +
-                     (char __iomem *)dd->ipath_kregbase +
-                     dd->ipath_ureg_align * port));
-}
-
-/**
- * ipath_write_ureg - write 32-bit virtualized per-port register
- * @dd: device
- * @regno: register number
- * @value: value
- * @port: port
- *
- * Write the contents of a register that is virtualized to be per port.
- */
-static inline void ipath_write_ureg(const struct ipath_devdata *dd,
-                                   ipath_ureg regno, u64 value, int port)
-{
-       u64 __iomem *ubase = (u64 __iomem *)
-               (dd->ipath_uregbase + (char __iomem *) dd->ipath_kregbase +
-                dd->ipath_ureg_align * port);
-       if (dd->ipath_kregbase)
-               writeq(value, &ubase[regno]);
-}
-
-static inline u32 ipath_read_kreg32(const struct ipath_devdata *dd,
-                                   ipath_kreg regno)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return -1;
-       return readl((u32 __iomem *) & dd->ipath_kregbase[regno]);
-}
-
-static inline u64 ipath_read_kreg64(const struct ipath_devdata *dd,
-                                   ipath_kreg regno)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return -1;
-
-       return readq(&dd->ipath_kregbase[regno]);
-}
-
-static inline void ipath_write_kreg(const struct ipath_devdata *dd,
-                                   ipath_kreg regno, u64 value)
-{
-       if (dd->ipath_kregbase)
-               writeq(value, &dd->ipath_kregbase[regno]);
-}
-
-static inline u64 ipath_read_creg(const struct ipath_devdata *dd,
-                                 ipath_sreg regno)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return 0;
-
-       return readq(regno + (u64 __iomem *)
-                    (dd->ipath_cregbase +
-                     (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline u32 ipath_read_creg32(const struct ipath_devdata *dd,
-                                        ipath_sreg regno)
-{
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT))
-               return 0;
-       return readl(regno + (u64 __iomem *)
-                    (dd->ipath_cregbase +
-                     (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline void ipath_write_creg(const struct ipath_devdata *dd,
-                                   ipath_creg regno, u64 value)
-{
-       if (dd->ipath_kregbase)
-               writeq(value, regno + (u64 __iomem *)
-                      (dd->ipath_cregbase +
-                       (char __iomem *)dd->ipath_kregbase));
-}
-
-static inline void ipath_clear_rcvhdrtail(const struct ipath_portdata *pd)
-{
-       *((u64 *) pd->port_rcvhdrtail_kvaddr) = 0ULL;
-}
-
-static inline u32 ipath_get_rcvhdrtail(const struct ipath_portdata *pd)
-{
-       return (u32) le64_to_cpu(*((volatile __le64 *)
-                               pd->port_rcvhdrtail_kvaddr));
-}
-
-static inline u32 ipath_get_hdrqtail(const struct ipath_portdata *pd)
-{
-       const struct ipath_devdata *dd = pd->port_dd;
-       u32 hdrqtail;
-
-       if (dd->ipath_flags & IPATH_NODMA_RTAIL) {
-               __le32 *rhf_addr;
-               u32 seq;
-
-               rhf_addr = (__le32 *) pd->port_rcvhdrq +
-                       pd->port_head + dd->ipath_rhf_offset;
-               seq = ipath_hdrget_seq(rhf_addr);
-               hdrqtail = pd->port_head;
-               if (seq == pd->port_seq_cnt)
-                       hdrqtail++;
-       } else
-               hdrqtail = ipath_get_rcvhdrtail(pd);
-
-       return hdrqtail;
-}
-
-static inline u64 ipath_read_ireg(const struct ipath_devdata *dd, ipath_kreg r)
-{
-       return (dd->ipath_flags & IPATH_INTREG_64) ?
-               ipath_read_kreg64(dd, r) : ipath_read_kreg32(dd, r);
-}
-
-/*
- * from contents of IBCStatus (or a saved copy), return linkstate
- * Report ACTIVE_DEFER as ACTIVE, because we treat them the same
- * everywhere, anyway (and should be, for almost all purposes).
- */
-static inline u32 ipath_ib_linkstate(struct ipath_devdata *dd, u64 ibcs)
-{
-       u32 state = (u32)(ibcs >> dd->ibcs_ls_shift) &
-               INFINIPATH_IBCS_LINKSTATE_MASK;
-       if (state == INFINIPATH_IBCS_L_STATE_ACT_DEFER)
-               state = INFINIPATH_IBCS_L_STATE_ACTIVE;
-       return state;
-}
-
-/* from contents of IBCStatus (or a saved copy), return linktrainingstate */
-static inline u32 ipath_ib_linktrstate(struct ipath_devdata *dd, u64 ibcs)
-{
-       return (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-               dd->ibcs_lts_mask;
-}
-
-/*
- * from contents of IBCStatus (or a saved copy), return logical link state
- * combination of link state and linktraining state (down, active, init,
- * arm, etc.
- */
-static inline u32 ipath_ib_state(struct ipath_devdata *dd, u64 ibcs)
-{
-       u32 ibs;
-       ibs = (u32)(ibcs >> INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) &
-               dd->ibcs_lts_mask;
-       ibs |= (u32)(ibcs &
-               (INFINIPATH_IBCS_LINKSTATE_MASK << dd->ibcs_ls_shift));
-       return ibs;
-}
-
-/*
- * sysfs interface.
- */
-
-struct device_driver;
-
-extern const char ib_ipath_version[];
-
-extern const struct attribute_group *ipath_driver_attr_groups[];
-
-int ipath_device_create_group(struct device *, struct ipath_devdata *);
-void ipath_device_remove_group(struct device *, struct ipath_devdata *);
-int ipath_expose_reset(struct device *);
-
-int ipath_init_ipathfs(void);
-void ipath_exit_ipathfs(void);
-int ipathfs_add_device(struct ipath_devdata *);
-int ipathfs_remove_device(struct ipath_devdata *);
-
-/*
- * dma_addr wrappers - all 0's invalid for hw
- */
-dma_addr_t ipath_map_page(struct pci_dev *, struct page *, unsigned long,
-                         size_t, int);
-dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
-const char *ipath_get_unit_name(int unit);
-
-/*
- * Flush write combining store buffers (if present) and perform a write
- * barrier.
- */
-#if defined(CONFIG_X86_64)
-#define ipath_flush_wc() asm volatile("sfence" ::: "memory")
-#else
-#define ipath_flush_wc() wmb()
-#endif
-
-extern unsigned ipath_debug; /* debugging bit mask */
-extern unsigned ipath_linkrecovery;
-extern unsigned ipath_mtu4096;
-extern struct mutex ipath_mutex;
-
-#define IPATH_DRV_NAME         "ib_ipath"
-#define IPATH_MAJOR            233
-#define IPATH_USER_MINOR_BASE  0
-#define IPATH_DIAGPKT_MINOR    127
-#define IPATH_DIAG_MINOR_BASE  129
-#define IPATH_NMINORS          255
-
-#define ipath_dev_err(dd,fmt,...) \
-       do { \
-               const struct ipath_devdata *__dd = (dd); \
-               if (__dd->pcidev) \
-                       dev_err(&__dd->pcidev->dev, "%s: " fmt, \
-                               ipath_get_unit_name(__dd->ipath_unit), \
-                               ##__VA_ARGS__); \
-               else \
-                       printk(KERN_ERR IPATH_DRV_NAME ": %s: " fmt, \
-                              ipath_get_unit_name(__dd->ipath_unit), \
-                              ##__VA_ARGS__); \
-       } while (0)
-
-#if _IPATH_DEBUGGING
-
-# define __IPATH_DBG_WHICH(which,fmt,...) \
-       do { \
-               if (unlikely(ipath_debug & (which))) \
-                       printk(KERN_DEBUG IPATH_DRV_NAME ": %s: " fmt, \
-                              __func__,##__VA_ARGS__); \
-       } while(0)
-
-# define ipath_dbg(fmt,...) \
-       __IPATH_DBG_WHICH(__IPATH_DBG,fmt,##__VA_ARGS__)
-# define ipath_cdbg(which,fmt,...) \
-       __IPATH_DBG_WHICH(__IPATH_##which##DBG,fmt,##__VA_ARGS__)
-
-#else /* ! _IPATH_DEBUGGING */
-
-# define ipath_dbg(fmt,...)
-# define ipath_cdbg(which,fmt,...)
-
-#endif /* _IPATH_DEBUGGING */
-
-/*
- * this is used for formatting hw error messages...
- */
-struct ipath_hwerror_msgs {
-       u64 mask;
-       const char *msg;
-};
-
-#define INFINIPATH_HWE_MSG(a, b) { .mask = INFINIPATH_HWE_##a, .msg = b }
-
-/* in ipath_intr.c... */
-void ipath_format_hwerrors(u64 hwerrs,
-                          const struct ipath_hwerror_msgs *hwerrmsgs,
-                          size_t nhwerrmsgs,
-                          char *msg, size_t lmsg);
-
-#endif                         /* _IPATH_KERNEL_H */
diff --git a/drivers/staging/rdma/ipath/ipath_keys.c b/drivers/staging/rdma/ipath/ipath_keys.c
deleted file mode 100644 (file)
index c0e933f..0000000
+++ /dev/null
@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <asm/io.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/**
- * ipath_alloc_lkey - allocate an lkey
- * @rkt: lkey table in which to allocate the lkey
- * @mr: memory region that this lkey protects
- *
- * Returns 1 if successful, otherwise returns 0.
- */
-
-int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
-{
-       unsigned long flags;
-       u32 r;
-       u32 n;
-       int ret;
-
-       spin_lock_irqsave(&rkt->lock, flags);
-
-       /* Find the next available LKEY */
-       r = n = rkt->next;
-       for (;;) {
-               if (rkt->table[r] == NULL)
-                       break;
-               r = (r + 1) & (rkt->max - 1);
-               if (r == n) {
-                       spin_unlock_irqrestore(&rkt->lock, flags);
-                       ipath_dbg("LKEY table full\n");
-                       ret = 0;
-                       goto bail;
-               }
-       }
-       rkt->next = (r + 1) & (rkt->max - 1);
-       /*
-        * Make sure lkey is never zero which is reserved to indicate an
-        * unrestricted LKEY.
-        */
-       rkt->gen++;
-       mr->lkey = (r << (32 - ib_ipath_lkey_table_size)) |
-               ((((1 << (24 - ib_ipath_lkey_table_size)) - 1) & rkt->gen)
-                << 8);
-       if (mr->lkey == 0) {
-               mr->lkey |= 1 << 8;
-               rkt->gen++;
-       }
-       rkt->table[r] = mr;
-       spin_unlock_irqrestore(&rkt->lock, flags);
-
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_free_lkey - free an lkey
- * @rkt: table from which to free the lkey
- * @lkey: lkey id to free
- */
-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey)
-{
-       unsigned long flags;
-       u32 r;
-
-       if (lkey == 0)
-               return;
-       r = lkey >> (32 - ib_ipath_lkey_table_size);
-       spin_lock_irqsave(&rkt->lock, flags);
-       rkt->table[r] = NULL;
-       spin_unlock_irqrestore(&rkt->lock, flags);
-}
-
-/**
- * ipath_lkey_ok - check IB SGE for validity and initialize
- * @rkt: table containing lkey to check SGE against
- * @isge: outgoing internal SGE
- * @sge: SGE to check
- * @acc: access flags
- *
- * Return 1 if valid and successful, otherwise returns 0.
- *
- * Check the IB SGE for validity and initialize our internal version
- * of it.
- */
-int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
-                 struct ib_sge *sge, int acc)
-{
-       struct ipath_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table;
-       struct ipath_mregion *mr;
-       unsigned n, m;
-       size_t off;
-       int ret;
-
-       /*
-        * We use LKEY == zero for kernel virtual addresses
-        * (see ipath_get_dma_mr and ipath_dma.c).
-        */
-       if (sge->lkey == 0) {
-               /* always a kernel port, no locking needed */
-               struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
-
-               if (pd->user) {
-                       ret = 0;
-                       goto bail;
-               }
-               isge->mr = NULL;
-               isge->vaddr = (void *) sge->addr;
-               isge->length = sge->length;
-               isge->sge_length = sge->length;
-               ret = 1;
-               goto bail;
-       }
-       mr = rkt->table[(sge->lkey >> (32 - ib_ipath_lkey_table_size))];
-       if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
-                    qp->ibqp.pd != mr->pd)) {
-               ret = 0;
-               goto bail;
-       }
-
-       off = sge->addr - mr->user_base;
-       if (unlikely(sge->addr < mr->user_base ||
-                    off + sge->length > mr->length ||
-                    (mr->access_flags & acc) != acc)) {
-               ret = 0;
-               goto bail;
-       }
-
-       off += mr->offset;
-       m = 0;
-       n = 0;
-       while (off >= mr->map[m]->segs[n].length) {
-               off -= mr->map[m]->segs[n].length;
-               n++;
-               if (n >= IPATH_SEGSZ) {
-                       m++;
-                       n = 0;
-               }
-       }
-       isge->mr = mr;
-       isge->vaddr = mr->map[m]->segs[n].vaddr + off;
-       isge->length = mr->map[m]->segs[n].length - off;
-       isge->sge_length = sge->length;
-       isge->m = m;
-       isge->n = n;
-
-       ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_rkey_ok - check the IB virtual address, length, and RKEY
- * @dev: infiniband device
- * @ss: SGE state
- * @len: length of data
- * @vaddr: virtual address to place data
- * @rkey: rkey to check
- * @acc: access flags
- *
- * Return 1 if successful, otherwise 0.
- */
-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
-                 u32 len, u64 vaddr, u32 rkey, int acc)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_lkey_table *rkt = &dev->lk_table;
-       struct ipath_sge *sge = &ss->sge;
-       struct ipath_mregion *mr;
-       unsigned n, m;
-       size_t off;
-       int ret;
-
-       /*
-        * We use RKEY == zero for kernel virtual addresses
-        * (see ipath_get_dma_mr and ipath_dma.c).
-        */
-       if (rkey == 0) {
-               /* always a kernel port, no locking needed */
-               struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
-
-               if (pd->user) {
-                       ret = 0;
-                       goto bail;
-               }
-               sge->mr = NULL;
-               sge->vaddr = (void *) vaddr;
-               sge->length = len;
-               sge->sge_length = len;
-               ss->sg_list = NULL;
-               ss->num_sge = 1;
-               ret = 1;
-               goto bail;
-       }
-
-       mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
-       if (unlikely(mr == NULL || mr->lkey != rkey ||
-                    qp->ibqp.pd != mr->pd)) {
-               ret = 0;
-               goto bail;
-       }
-
-       off = vaddr - mr->iova;
-       if (unlikely(vaddr < mr->iova || off + len > mr->length ||
-                    (mr->access_flags & acc) == 0)) {
-               ret = 0;
-               goto bail;
-       }
-
-       off += mr->offset;
-       m = 0;
-       n = 0;
-       while (off >= mr->map[m]->segs[n].length) {
-               off -= mr->map[m]->segs[n].length;
-               n++;
-               if (n >= IPATH_SEGSZ) {
-                       m++;
-                       n = 0;
-               }
-       }
-       sge->mr = mr;
-       sge->vaddr = mr->map[m]->segs[n].vaddr + off;
-       sge->length = mr->map[m]->segs[n].length - off;
-       sge->sge_length = len;
-       sge->m = m;
-       sge->n = n;
-       ss->sg_list = NULL;
-       ss->num_sge = 1;
-
-       ret = 1;
-
-bail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mad.c b/drivers/staging/rdma/ipath/ipath_mad.c
deleted file mode 100644 (file)
index ad3a926..0000000
+++ /dev/null
@@ -1,1521 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-#include <rdma/ib_pma.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-#define IB_SMP_UNSUP_VERSION   cpu_to_be16(0x0004)
-#define IB_SMP_UNSUP_METHOD    cpu_to_be16(0x0008)
-#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
-#define IB_SMP_INVALID_FIELD   cpu_to_be16(0x001C)
-
-static int reply(struct ib_smp *smp)
-{
-       /*
-        * The verbs framework will handle the directed/LID route
-        * packet changes.
-        */
-       smp->method = IB_MGMT_METHOD_GET_RESP;
-       if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
-               smp->status |= IB_SMP_DIRECTION;
-       return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
-}
-
-static int recv_subn_get_nodedescription(struct ib_smp *smp,
-                                        struct ib_device *ibdev)
-{
-       if (smp->attr_mod)
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
-
-       return reply(smp);
-}
-
-struct nodeinfo {
-       u8 base_version;
-       u8 class_version;
-       u8 node_type;
-       u8 num_ports;
-       __be64 sys_guid;
-       __be64 node_guid;
-       __be64 port_guid;
-       __be16 partition_cap;
-       __be16 device_id;
-       __be32 revision;
-       u8 local_port_num;
-       u8 vendor_id[3];
-} __attribute__ ((packed));
-
-static int recv_subn_get_nodeinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev, u8 port)
-{
-       struct nodeinfo *nip = (struct nodeinfo *)&smp->data;
-       struct ipath_devdata *dd = to_idev(ibdev)->dd;
-       u32 vendor, majrev, minrev;
-
-       /* GUID 0 is illegal */
-       if (smp->attr_mod || (dd->ipath_guid == 0))
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       nip->base_version = 1;
-       nip->class_version = 1;
-       nip->node_type = 1;     /* channel adapter */
-       /*
-        * XXX The num_ports value will need a layer function to get
-        * the value if we ever have more than one IB port on a chip.
-        * We will also need to get the GUID for the port.
-        */
-       nip->num_ports = ibdev->phys_port_cnt;
-       /* This is already in network order */
-       nip->sys_guid = to_idev(ibdev)->sys_image_guid;
-       nip->node_guid = dd->ipath_guid;
-       nip->port_guid = dd->ipath_guid;
-       nip->partition_cap = cpu_to_be16(ipath_get_npkeys(dd));
-       nip->device_id = cpu_to_be16(dd->ipath_deviceid);
-       majrev = dd->ipath_majrev;
-       minrev = dd->ipath_minrev;
-       nip->revision = cpu_to_be32((majrev << 16) | minrev);
-       nip->local_port_num = port;
-       vendor = dd->ipath_vendorid;
-       nip->vendor_id[0] = IPATH_SRC_OUI_1;
-       nip->vendor_id[1] = IPATH_SRC_OUI_2;
-       nip->vendor_id[2] = IPATH_SRC_OUI_3;
-
-       return reply(smp);
-}
-
-static int recv_subn_get_guidinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev)
-{
-       u32 startgx = 8 * be32_to_cpu(smp->attr_mod);
-       __be64 *p = (__be64 *) smp->data;
-
-       /* 32 blocks of 8 64-bit GUIDs per block */
-
-       memset(smp->data, 0, sizeof(smp->data));
-
-       /*
-        * We only support one GUID for now.  If this changes, the
-        * portinfo.guid_cap field needs to be updated too.
-        */
-       if (startgx == 0) {
-               __be64 g = to_idev(ibdev)->dd->ipath_guid;
-               if (g == 0)
-                       /* GUID 0 is illegal */
-                       smp->status |= IB_SMP_INVALID_FIELD;
-               else
-                       /* The first is a copy of the read-only HW GUID. */
-                       *p = g;
-       } else
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       return reply(smp);
-}
-
-static void set_link_width_enabled(struct ipath_devdata *dd, u32 w)
-{
-       (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, w);
-}
-
-static void set_link_speed_enabled(struct ipath_devdata *dd, u32 s)
-{
-       (void) dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, s);
-}
-
-static int get_overrunthreshold(struct ipath_devdata *dd)
-{
-       return (dd->ipath_ibcctrl >>
-               INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
-               INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-}
-
-/**
- * set_overrunthreshold - set the overrun threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_overrunthreshold(struct ipath_devdata *dd, unsigned n)
-{
-       unsigned v;
-
-       v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT) &
-               INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK;
-       if (v != n) {
-               dd->ipath_ibcctrl &=
-                       ~(INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK <<
-                         INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT);
-               dd->ipath_ibcctrl |=
-                       (u64) n << INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-       }
-       return 0;
-}
-
-static int get_phyerrthreshold(struct ipath_devdata *dd)
-{
-       return (dd->ipath_ibcctrl >>
-               INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-               INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-}
-
-/**
- * set_phyerrthreshold - set the physical error threshold
- * @dd: the infinipath device
- * @n: the new threshold
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_phyerrthreshold(struct ipath_devdata *dd, unsigned n)
-{
-       unsigned v;
-
-       v = (dd->ipath_ibcctrl >> INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
-               INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
-       if (v != n) {
-               dd->ipath_ibcctrl &=
-                       ~(INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK <<
-                         INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT);
-               dd->ipath_ibcctrl |=
-                       (u64) n << INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                                dd->ipath_ibcctrl);
-       }
-       return 0;
-}
-
-/**
- * get_linkdowndefaultstate - get the default linkdown state
- * @dd: the infinipath device
- *
- * Returns zero if the default is POLL, 1 if the default is SLEEP.
- */
-static int get_linkdowndefaultstate(struct ipath_devdata *dd)
-{
-       return !!(dd->ipath_ibcctrl & INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE);
-}
-
-static int recv_subn_get_portinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev, u8 port)
-{
-       struct ipath_ibdev *dev;
-       struct ipath_devdata *dd;
-       struct ib_port_info *pip = (struct ib_port_info *)smp->data;
-       u16 lid;
-       u8 ibcstat;
-       u8 mtu;
-       int ret;
-
-       if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt) {
-               smp->status |= IB_SMP_INVALID_FIELD;
-               ret = reply(smp);
-               goto bail;
-       }
-
-       dev = to_idev(ibdev);
-       dd = dev->dd;
-
-       /* Clear all fields.  Only set the non-zero fields. */
-       memset(smp->data, 0, sizeof(smp->data));
-
-       /* Only return the mkey if the protection field allows it. */
-       if (smp->method == IB_MGMT_METHOD_SET || dev->mkey == smp->mkey ||
-           dev->mkeyprot == 0)
-               pip->mkey = dev->mkey;
-       pip->gid_prefix = dev->gid_prefix;
-       lid = dd->ipath_lid;
-       pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE;
-       pip->sm_lid = cpu_to_be16(dev->sm_lid);
-       pip->cap_mask = cpu_to_be32(dev->port_cap_flags);
-       /* pip->diag_code; */
-       pip->mkey_lease_period = cpu_to_be16(dev->mkey_lease_period);
-       pip->local_port_num = port;
-       pip->link_width_enabled = dd->ipath_link_width_enabled;
-       pip->link_width_supported = dd->ipath_link_width_supported;
-       pip->link_width_active = dd->ipath_link_width_active;
-       pip->linkspeed_portstate = dd->ipath_link_speed_supported << 4;
-       ibcstat = dd->ipath_lastibcstat;
-       /* map LinkState to IB portinfo values.  */
-       pip->linkspeed_portstate |= ipath_ib_linkstate(dd, ibcstat) + 1;
-
-       pip->portphysstate_linkdown =
-               (ipath_cvt_physportstate[ibcstat & dd->ibcs_lts_mask] << 4) |
-               (get_linkdowndefaultstate(dd) ? 1 : 2);
-       pip->mkeyprot_resv_lmc = (dev->mkeyprot << 6) | dd->ipath_lmc;
-       pip->linkspeedactive_enabled = (dd->ipath_link_speed_active << 4) |
-               dd->ipath_link_speed_enabled;
-       switch (dd->ipath_ibmtu) {
-       case 4096:
-               mtu = IB_MTU_4096;
-               break;
-       case 2048:
-               mtu = IB_MTU_2048;
-               break;
-       case 1024:
-               mtu = IB_MTU_1024;
-               break;
-       case 512:
-               mtu = IB_MTU_512;
-               break;
-       case 256:
-               mtu = IB_MTU_256;
-               break;
-       default:                /* oops, something is wrong */
-               mtu = IB_MTU_2048;
-               break;
-       }
-       pip->neighbormtu_mastersmsl = (mtu << 4) | dev->sm_sl;
-       pip->vlcap_inittype = 0x10;     /* VLCap = VL0, InitType = 0 */
-       pip->vl_high_limit = dev->vl_high_limit;
-       /* pip->vl_arb_high_cap; // only one VL */
-       /* pip->vl_arb_low_cap; // only one VL */
-       /* InitTypeReply = 0 */
-       /* our mtu cap depends on whether 4K MTU enabled or not */
-       pip->inittypereply_mtucap = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
-       /* HCAs ignore VLStallCount and HOQLife */
-       /* pip->vlstallcnt_hoqlife; */
-       pip->operationalvl_pei_peo_fpi_fpo = 0x10;      /* OVLs = 1 */
-       pip->mkey_violations = cpu_to_be16(dev->mkey_violations);
-       /* P_KeyViolations are counted by hardware. */
-       pip->pkey_violations =
-               cpu_to_be16((ipath_get_cr_errpkey(dd) -
-                            dev->z_pkey_violations) & 0xFFFF);
-       pip->qkey_violations = cpu_to_be16(dev->qkey_violations);
-       /* Only the hardware GUID is supported for now */
-       pip->guid_cap = 1;
-       pip->clientrereg_resv_subnetto = dev->subnet_timeout;
-       /* 32.768 usec. response time (guessing) */
-       pip->resv_resptimevalue = 3;
-       pip->localphyerrors_overrunerrors =
-               (get_phyerrthreshold(dd) << 4) |
-               get_overrunthreshold(dd);
-       /* pip->max_credit_hint; */
-       if (dev->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) {
-               u32 v;
-
-               v = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LINKLATENCY);
-               pip->link_roundtrip_latency[0] = v >> 16;
-               pip->link_roundtrip_latency[1] = v >> 8;
-               pip->link_roundtrip_latency[2] = v;
-       }
-
-       ret = reply(smp);
-
-bail:
-       return ret;
-}
-
-/**
- * get_pkeys - return the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the pkey table is placed here
- */
-static int get_pkeys(struct ipath_devdata *dd, u16 * pkeys)
-{
-       /* always a kernel port, no locking needed */
-       struct ipath_portdata *pd = dd->ipath_pd[0];
-
-       memcpy(pkeys, pd->port_pkeys, sizeof(pd->port_pkeys));
-
-       return 0;
-}
-
-static int recv_subn_get_pkeytable(struct ib_smp *smp,
-                                  struct ib_device *ibdev)
-{
-       u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
-       u16 *p = (u16 *) smp->data;
-       __be16 *q = (__be16 *) smp->data;
-
-       /* 64 blocks of 32 16-bit P_Key entries */
-
-       memset(smp->data, 0, sizeof(smp->data));
-       if (startpx == 0) {
-               struct ipath_ibdev *dev = to_idev(ibdev);
-               unsigned i, n = ipath_get_npkeys(dev->dd);
-
-               get_pkeys(dev->dd, p);
-
-               for (i = 0; i < n; i++)
-                       q[i] = cpu_to_be16(p[i]);
-       } else
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       return reply(smp);
-}
-
-static int recv_subn_set_guidinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev)
-{
-       /* The only GUID we support is the first read-only entry. */
-       return recv_subn_get_guidinfo(smp, ibdev);
-}
-
-/**
- * set_linkdowndefaultstate - set the default linkdown state
- * @dd: the infinipath device
- * @sleep: the new state
- *
- * Note that this will only take effect when the link state changes.
- */
-static int set_linkdowndefaultstate(struct ipath_devdata *dd, int sleep)
-{
-       if (sleep)
-               dd->ipath_ibcctrl |= INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
-       else
-               dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
-                        dd->ipath_ibcctrl);
-       return 0;
-}
-
-/**
- * recv_subn_set_portinfo - set port information
- * @smp: the incoming SM packet
- * @ibdev: the infiniband device
- * @port: the port on the device
- *
- * Set Portinfo (see ch. 14.2.5.6).
- */
-static int recv_subn_set_portinfo(struct ib_smp *smp,
-                                 struct ib_device *ibdev, u8 port)
-{
-       struct ib_port_info *pip = (struct ib_port_info *)smp->data;
-       struct ib_event event;
-       struct ipath_ibdev *dev;
-       struct ipath_devdata *dd;
-       char clientrereg = 0;
-       u16 lid, smlid;
-       u8 lwe;
-       u8 lse;
-       u8 state;
-       u16 lstate;
-       u32 mtu;
-       int ret, ore;
-
-       if (be32_to_cpu(smp->attr_mod) > ibdev->phys_port_cnt)
-               goto err;
-
-       dev = to_idev(ibdev);
-       dd = dev->dd;
-       event.device = ibdev;
-       event.element.port_num = port;
-
-       dev->mkey = pip->mkey;
-       dev->gid_prefix = pip->gid_prefix;
-       dev->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period);
-
-       lid = be16_to_cpu(pip->lid);
-       if (dd->ipath_lid != lid ||
-           dd->ipath_lmc != (pip->mkeyprot_resv_lmc & 7)) {
-               /* Must be a valid unicast LID address. */
-               if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE)
-                       goto err;
-               ipath_set_lid(dd, lid, pip->mkeyprot_resv_lmc & 7);
-               event.event = IB_EVENT_LID_CHANGE;
-               ib_dispatch_event(&event);
-       }
-
-       smlid = be16_to_cpu(pip->sm_lid);
-       if (smlid != dev->sm_lid) {
-               /* Must be a valid unicast LID address. */
-               if (smlid == 0 || smlid >= IPATH_MULTICAST_LID_BASE)
-                       goto err;
-               dev->sm_lid = smlid;
-               event.event = IB_EVENT_SM_CHANGE;
-               ib_dispatch_event(&event);
-       }
-
-       /* Allow 1x or 4x to be set (see 14.2.6.6). */
-       lwe = pip->link_width_enabled;
-       if (lwe) {
-               if (lwe == 0xFF)
-                       lwe = dd->ipath_link_width_supported;
-               else if (lwe >= 16 || (lwe & ~dd->ipath_link_width_supported))
-                       goto err;
-               set_link_width_enabled(dd, lwe);
-       }
-
-       /* Allow 2.5 or 5.0 Gbs. */
-       lse = pip->linkspeedactive_enabled & 0xF;
-       if (lse) {
-               if (lse == 15)
-                       lse = dd->ipath_link_speed_supported;
-               else if (lse >= 8 || (lse & ~dd->ipath_link_speed_supported))
-                       goto err;
-               set_link_speed_enabled(dd, lse);
-       }
-
-       /* Set link down default state. */
-       switch (pip->portphysstate_linkdown & 0xF) {
-       case 0: /* NOP */
-               break;
-       case 1: /* SLEEP */
-               if (set_linkdowndefaultstate(dd, 1))
-                       goto err;
-               break;
-       case 2: /* POLL */
-               if (set_linkdowndefaultstate(dd, 0))
-                       goto err;
-               break;
-       default:
-               goto err;
-       }
-
-       dev->mkeyprot = pip->mkeyprot_resv_lmc >> 6;
-       dev->vl_high_limit = pip->vl_high_limit;
-
-       switch ((pip->neighbormtu_mastersmsl >> 4) & 0xF) {
-       case IB_MTU_256:
-               mtu = 256;
-               break;
-       case IB_MTU_512:
-               mtu = 512;
-               break;
-       case IB_MTU_1024:
-               mtu = 1024;
-               break;
-       case IB_MTU_2048:
-               mtu = 2048;
-               break;
-       case IB_MTU_4096:
-               if (!ipath_mtu4096)
-                       goto err;
-               mtu = 4096;
-               break;
-       default:
-               /* XXX We have already partially updated our state! */
-               goto err;
-       }
-       ipath_set_mtu(dd, mtu);
-
-       dev->sm_sl = pip->neighbormtu_mastersmsl & 0xF;
-
-       /* We only support VL0 */
-       if (((pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF) > 1)
-               goto err;
-
-       if (pip->mkey_violations == 0)
-               dev->mkey_violations = 0;
-
-       /*
-        * Hardware counter can't be reset so snapshot and subtract
-        * later.
-        */
-       if (pip->pkey_violations == 0)
-               dev->z_pkey_violations = ipath_get_cr_errpkey(dd);
-
-       if (pip->qkey_violations == 0)
-               dev->qkey_violations = 0;
-
-       ore = pip->localphyerrors_overrunerrors;
-       if (set_phyerrthreshold(dd, (ore >> 4) & 0xF))
-               goto err;
-
-       if (set_overrunthreshold(dd, (ore & 0xF)))
-               goto err;
-
-       dev->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F;
-
-       if (pip->clientrereg_resv_subnetto & 0x80) {
-               clientrereg = 1;
-               event.event = IB_EVENT_CLIENT_REREGISTER;
-               ib_dispatch_event(&event);
-       }
-
-       /*
-        * Do the port state change now that the other link parameters
-        * have been set.
-        * Changing the port physical state only makes sense if the link
-        * is down or is being set to down.
-        */
-       state = pip->linkspeed_portstate & 0xF;
-       lstate = (pip->portphysstate_linkdown >> 4) & 0xF;
-       if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP))
-               goto err;
-
-       /*
-        * Only state changes of DOWN, ARM, and ACTIVE are valid
-        * and must be in the correct state to take effect (see 7.2.6).
-        */
-       switch (state) {
-       case IB_PORT_NOP:
-               if (lstate == 0)
-                       break;
-               /* FALLTHROUGH */
-       case IB_PORT_DOWN:
-               if (lstate == 0)
-                       lstate = IPATH_IB_LINKDOWN_ONLY;
-               else if (lstate == 1)
-                       lstate = IPATH_IB_LINKDOWN_SLEEP;
-               else if (lstate == 2)
-                       lstate = IPATH_IB_LINKDOWN;
-               else if (lstate == 3)
-                       lstate = IPATH_IB_LINKDOWN_DISABLE;
-               else
-                       goto err;
-               ipath_set_linkstate(dd, lstate);
-               if (lstate == IPATH_IB_LINKDOWN_DISABLE) {
-                       ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-                       goto done;
-               }
-               ipath_wait_linkstate(dd, IPATH_LINKINIT | IPATH_LINKARMED |
-                               IPATH_LINKACTIVE, 1000);
-               break;
-       case IB_PORT_ARMED:
-               ipath_set_linkstate(dd, IPATH_IB_LINKARM);
-               break;
-       case IB_PORT_ACTIVE:
-               ipath_set_linkstate(dd, IPATH_IB_LINKACTIVE);
-               break;
-       default:
-               /* XXX We have already partially updated our state! */
-               goto err;
-       }
-
-       ret = recv_subn_get_portinfo(smp, ibdev, port);
-
-       if (clientrereg)
-               pip->clientrereg_resv_subnetto |= 0x80;
-
-       goto done;
-
-err:
-       smp->status |= IB_SMP_INVALID_FIELD;
-       ret = recv_subn_get_portinfo(smp, ibdev, port);
-
-done:
-       return ret;
-}
-
-/**
- * rm_pkey - decrecment the reference count for the given PKEY
- * @dd: the infinipath device
- * @key: the PKEY index
- *
- * Return true if this was the last reference and the hardware table entry
- * needs to be changed.
- */
-static int rm_pkey(struct ipath_devdata *dd, u16 key)
-{
-       int i;
-       int ret;
-
-       for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (dd->ipath_pkeys[i] != key)
-                       continue;
-               if (atomic_dec_and_test(&dd->ipath_pkeyrefs[i])) {
-                       dd->ipath_pkeys[i] = 0;
-                       ret = 1;
-                       goto bail;
-               }
-               break;
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * add_pkey - add the given PKEY to the hardware table
- * @dd: the infinipath device
- * @key: the PKEY
- *
- * Return an error code if unable to add the entry, zero if no change,
- * or 1 if the hardware PKEY register needs to be updated.
- */
-static int add_pkey(struct ipath_devdata *dd, u16 key)
-{
-       int i;
-       u16 lkey = key & 0x7FFF;
-       int any = 0;
-       int ret;
-
-       if (lkey == 0x7FFF) {
-               ret = 0;
-               goto bail;
-       }
-
-       /* Look for an empty slot or a matching PKEY. */
-       for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (!dd->ipath_pkeys[i]) {
-                       any++;
-                       continue;
-               }
-               /* If it matches exactly, try to increment the ref count */
-               if (dd->ipath_pkeys[i] == key) {
-                       if (atomic_inc_return(&dd->ipath_pkeyrefs[i]) > 1) {
-                               ret = 0;
-                               goto bail;
-                       }
-                       /* Lost the race. Look for an empty slot below. */
-                       atomic_dec(&dd->ipath_pkeyrefs[i]);
-                       any++;
-               }
-               /*
-                * It makes no sense to have both the limited and unlimited
-                * PKEY set at the same time since the unlimited one will
-                * disable the limited one.
-                */
-               if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
-                       ret = -EEXIST;
-                       goto bail;
-               }
-       }
-       if (!any) {
-               ret = -EBUSY;
-               goto bail;
-       }
-       for (i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
-               if (!dd->ipath_pkeys[i] &&
-                   atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
-                       /* for ipathstats, etc. */
-                       ipath_stats.sps_pkeys[i] = lkey;
-                       dd->ipath_pkeys[i] = key;
-                       ret = 1;
-                       goto bail;
-               }
-       }
-       ret = -EBUSY;
-
-bail:
-       return ret;
-}
-
-/**
- * set_pkeys - set the PKEY table for port 0
- * @dd: the infinipath device
- * @pkeys: the PKEY table
- */
-static int set_pkeys(struct ipath_devdata *dd, u16 *pkeys, u8 port)
-{
-       struct ipath_portdata *pd;
-       int i;
-       int changed = 0;
-
-       /* always a kernel port, no locking needed */
-       pd = dd->ipath_pd[0];
-
-       for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
-               u16 key = pkeys[i];
-               u16 okey = pd->port_pkeys[i];
-
-               if (key == okey)
-                       continue;
-               /*
-                * The value of this PKEY table entry is changing.
-                * Remove the old entry in the hardware's array of PKEYs.
-                */
-               if (okey & 0x7FFF)
-                       changed |= rm_pkey(dd, okey);
-               if (key & 0x7FFF) {
-                       int ret = add_pkey(dd, key);
-
-                       if (ret < 0)
-                               key = 0;
-                       else
-                               changed |= ret;
-               }
-               pd->port_pkeys[i] = key;
-       }
-       if (changed) {
-               u64 pkey;
-               struct ib_event event;
-
-               pkey = (u64) dd->ipath_pkeys[0] |
-                       ((u64) dd->ipath_pkeys[1] << 16) |
-                       ((u64) dd->ipath_pkeys[2] << 32) |
-                       ((u64) dd->ipath_pkeys[3] << 48);
-               ipath_cdbg(VERBOSE, "p0 new pkey reg %llx\n",
-                          (unsigned long long) pkey);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
-                                pkey);
-
-               event.event = IB_EVENT_PKEY_CHANGE;
-               event.device = &dd->verbs_dev->ibdev;
-               event.element.port_num = port;
-               ib_dispatch_event(&event);
-       }
-       return 0;
-}
-
-static int recv_subn_set_pkeytable(struct ib_smp *smp,
-                                  struct ib_device *ibdev, u8 port)
-{
-       u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff);
-       __be16 *p = (__be16 *) smp->data;
-       u16 *q = (u16 *) smp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       unsigned i, n = ipath_get_npkeys(dev->dd);
-
-       for (i = 0; i < n; i++)
-               q[i] = be16_to_cpu(p[i]);
-
-       if (startpx != 0 || set_pkeys(dev->dd, q, port) != 0)
-               smp->status |= IB_SMP_INVALID_FIELD;
-
-       return recv_subn_get_pkeytable(smp, ibdev);
-}
-
-static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
-{
-       struct ib_class_port_info *p =
-               (struct ib_class_port_info *)pmp->data;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-
-       if (pmp->mad_hdr.attr_mod != 0)
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
-       /* Indicate AllPortSelect is valid (only one port anyway) */
-       p->capability_mask = cpu_to_be16(1 << 8);
-       p->base_version = 1;
-       p->class_version = 1;
-       /*
-        * Expected response time is 4.096 usec. * 2^18 == 1.073741824
-        * sec.
-        */
-       p->resp_time_value = 18;
-
-       return reply((struct ib_smp *) pmp);
-}
-
-/*
- * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
- * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
- * We support 5 counters which only count the mandatory quantities.
- */
-#define COUNTER_MASK(q, n) (q << ((9 - n) * 3))
-#define COUNTER_MASK0_9 cpu_to_be32(COUNTER_MASK(1, 0) | \
-                                   COUNTER_MASK(1, 1) | \
-                                   COUNTER_MASK(1, 2) | \
-                                   COUNTER_MASK(1, 3) | \
-                                   COUNTER_MASK(1, 4))
-
-static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
-                                          struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portsamplescontrol *p =
-               (struct ib_pma_portsamplescontrol *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-       unsigned long flags;
-       u8 port_select = p->port_select;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-
-       p->port_select = port_select;
-       if (pmp->mad_hdr.attr_mod != 0 ||
-           (port_select != port && port_select != 0xFF))
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-       /*
-        * Ticks are 10x the link transfer period which for 2.5Gbs is 4
-        * nsec.  0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec.  Sample
-        * intervals are counted in ticks.  Since we use Linux timers, that
-        * count in jiffies, we can't sample for less than 1000 ticks if HZ
-        * == 1000 (4000 ticks if HZ is 250).  link_speed_active returns 2 for
-        * DDR, 1 for SDR, set the tick to 1 for DDR, 0 for SDR on chips that
-        * have hardware support for delaying packets.
-        */
-       if (crp->cr_psstat)
-               p->tick = dev->dd->ipath_link_speed_active - 1;
-       else
-               p->tick = 250;          /* 1 usec. */
-       p->counter_width = 4;   /* 32 bit counters */
-       p->counter_mask0_9 = COUNTER_MASK0_9;
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       if (crp->cr_psstat)
-               p->sample_status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-       else
-               p->sample_status = dev->pma_sample_status;
-       p->sample_start = cpu_to_be32(dev->pma_sample_start);
-       p->sample_interval = cpu_to_be32(dev->pma_sample_interval);
-       p->tag = cpu_to_be16(dev->pma_tag);
-       p->counter_select[0] = dev->pma_counter_select[0];
-       p->counter_select[1] = dev->pma_counter_select[1];
-       p->counter_select[2] = dev->pma_counter_select[2];
-       p->counter_select[3] = dev->pma_counter_select[3];
-       p->counter_select[4] = dev->pma_counter_select[4];
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
-                                          struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portsamplescontrol *p =
-               (struct ib_pma_portsamplescontrol *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-       unsigned long flags;
-       u8 status;
-       int ret;
-
-       if (pmp->mad_hdr.attr_mod != 0 ||
-           (p->port_select != port && p->port_select != 0xFF)) {
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-               ret = reply((struct ib_smp *) pmp);
-               goto bail;
-       }
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       if (crp->cr_psstat)
-               status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-       else
-               status = dev->pma_sample_status;
-       if (status == IB_PMA_SAMPLE_STATUS_DONE) {
-               dev->pma_sample_start = be32_to_cpu(p->sample_start);
-               dev->pma_sample_interval = be32_to_cpu(p->sample_interval);
-               dev->pma_tag = be16_to_cpu(p->tag);
-               dev->pma_counter_select[0] = p->counter_select[0];
-               dev->pma_counter_select[1] = p->counter_select[1];
-               dev->pma_counter_select[2] = p->counter_select[2];
-               dev->pma_counter_select[3] = p->counter_select[3];
-               dev->pma_counter_select[4] = p->counter_select[4];
-               if (crp->cr_psstat) {
-                       ipath_write_creg(dev->dd, crp->cr_psinterval,
-                                        dev->pma_sample_interval);
-                       ipath_write_creg(dev->dd, crp->cr_psstart,
-                                        dev->pma_sample_start);
-               } else
-                       dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_STARTED;
-       }
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-       ret = recv_pma_get_portsamplescontrol(pmp, ibdev, port);
-
-bail:
-       return ret;
-}
-
-static u64 get_counter(struct ipath_ibdev *dev,
-                      struct ipath_cregs const *crp,
-                      __be16 sel)
-{
-       u64 ret;
-
-       switch (sel) {
-       case IB_PMA_PORT_XMIT_DATA:
-               ret = (crp->cr_psxmitdatacount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psxmitdatacount) :
-                       dev->ipath_sword;
-               break;
-       case IB_PMA_PORT_RCV_DATA:
-               ret = (crp->cr_psrcvdatacount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psrcvdatacount) :
-                       dev->ipath_rword;
-               break;
-       case IB_PMA_PORT_XMIT_PKTS:
-               ret = (crp->cr_psxmitpktscount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psxmitpktscount) :
-                       dev->ipath_spkts;
-               break;
-       case IB_PMA_PORT_RCV_PKTS:
-               ret = (crp->cr_psrcvpktscount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psrcvpktscount) :
-                       dev->ipath_rpkts;
-               break;
-       case IB_PMA_PORT_XMIT_WAIT:
-               ret = (crp->cr_psxmitwaitcount) ?
-                       ipath_read_creg32(dev->dd, crp->cr_psxmitwaitcount) :
-                       dev->ipath_xmit_wait;
-               break;
-       default:
-               ret = 0;
-       }
-
-       return ret;
-}
-
-static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
-                                         struct ib_device *ibdev)
-{
-       struct ib_pma_portsamplesresult *p =
-               (struct ib_pma_portsamplesresult *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-       u8 status;
-       int i;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-       p->tag = cpu_to_be16(dev->pma_tag);
-       if (crp->cr_psstat)
-               status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-       else
-               status = dev->pma_sample_status;
-       p->sample_status = cpu_to_be16(status);
-       for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
-               p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
-                   cpu_to_be32(
-                       get_counter(dev, crp, dev->pma_counter_select[i]));
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
-                                             struct ib_device *ibdev)
-{
-       struct ib_pma_portsamplesresult_ext *p =
-               (struct ib_pma_portsamplesresult_ext *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_cregs const *crp = dev->dd->ipath_cregs;
-       u8 status;
-       int i;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-       p->tag = cpu_to_be16(dev->pma_tag);
-       if (crp->cr_psstat)
-               status = ipath_read_creg32(dev->dd, crp->cr_psstat);
-       else
-               status = dev->pma_sample_status;
-       p->sample_status = cpu_to_be16(status);
-       /* 64 bits */
-       p->extended_width = cpu_to_be32(0x80000000);
-       for (i = 0; i < ARRAY_SIZE(dev->pma_counter_select); i++)
-               p->counter[i] = (status != IB_PMA_SAMPLE_STATUS_DONE) ? 0 :
-                   cpu_to_be64(
-                       get_counter(dev, crp, dev->pma_counter_select[i]));
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
-                                    struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
-               pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_verbs_counters cntrs;
-       u8 port_select = p->port_select;
-
-       ipath_get_counters(dev->dd, &cntrs);
-
-       /* Adjust counters for any resets done. */
-       cntrs.symbol_error_counter -= dev->z_symbol_error_counter;
-       cntrs.link_error_recovery_counter -=
-               dev->z_link_error_recovery_counter;
-       cntrs.link_downed_counter -= dev->z_link_downed_counter;
-       cntrs.port_rcv_errors += dev->rcv_errors;
-       cntrs.port_rcv_errors -= dev->z_port_rcv_errors;
-       cntrs.port_rcv_remphys_errors -= dev->z_port_rcv_remphys_errors;
-       cntrs.port_xmit_discards -= dev->z_port_xmit_discards;
-       cntrs.port_xmit_data -= dev->z_port_xmit_data;
-       cntrs.port_rcv_data -= dev->z_port_rcv_data;
-       cntrs.port_xmit_packets -= dev->z_port_xmit_packets;
-       cntrs.port_rcv_packets -= dev->z_port_rcv_packets;
-       cntrs.local_link_integrity_errors -=
-               dev->z_local_link_integrity_errors;
-       cntrs.excessive_buffer_overrun_errors -=
-               dev->z_excessive_buffer_overrun_errors;
-       cntrs.vl15_dropped -= dev->z_vl15_dropped;
-       cntrs.vl15_dropped += dev->n_vl15_dropped;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-
-       p->port_select = port_select;
-       if (pmp->mad_hdr.attr_mod != 0 ||
-           (port_select != port && port_select != 0xFF))
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
-       if (cntrs.symbol_error_counter > 0xFFFFUL)
-               p->symbol_error_counter = cpu_to_be16(0xFFFF);
-       else
-               p->symbol_error_counter =
-                       cpu_to_be16((u16)cntrs.symbol_error_counter);
-       if (cntrs.link_error_recovery_counter > 0xFFUL)
-               p->link_error_recovery_counter = 0xFF;
-       else
-               p->link_error_recovery_counter =
-                       (u8)cntrs.link_error_recovery_counter;
-       if (cntrs.link_downed_counter > 0xFFUL)
-               p->link_downed_counter = 0xFF;
-       else
-               p->link_downed_counter = (u8)cntrs.link_downed_counter;
-       if (cntrs.port_rcv_errors > 0xFFFFUL)
-               p->port_rcv_errors = cpu_to_be16(0xFFFF);
-       else
-               p->port_rcv_errors =
-                       cpu_to_be16((u16) cntrs.port_rcv_errors);
-       if (cntrs.port_rcv_remphys_errors > 0xFFFFUL)
-               p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF);
-       else
-               p->port_rcv_remphys_errors =
-                       cpu_to_be16((u16)cntrs.port_rcv_remphys_errors);
-       if (cntrs.port_xmit_discards > 0xFFFFUL)
-               p->port_xmit_discards = cpu_to_be16(0xFFFF);
-       else
-               p->port_xmit_discards =
-                       cpu_to_be16((u16)cntrs.port_xmit_discards);
-       if (cntrs.local_link_integrity_errors > 0xFUL)
-               cntrs.local_link_integrity_errors = 0xFUL;
-       if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
-               cntrs.excessive_buffer_overrun_errors = 0xFUL;
-       p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
-               cntrs.excessive_buffer_overrun_errors;
-       if (cntrs.vl15_dropped > 0xFFFFUL)
-               p->vl15_dropped = cpu_to_be16(0xFFFF);
-       else
-               p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped);
-       if (cntrs.port_xmit_data > 0xFFFFFFFFUL)
-               p->port_xmit_data = cpu_to_be32(0xFFFFFFFF);
-       else
-               p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data);
-       if (cntrs.port_rcv_data > 0xFFFFFFFFUL)
-               p->port_rcv_data = cpu_to_be32(0xFFFFFFFF);
-       else
-               p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data);
-       if (cntrs.port_xmit_packets > 0xFFFFFFFFUL)
-               p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF);
-       else
-               p->port_xmit_packets =
-                       cpu_to_be32((u32)cntrs.port_xmit_packets);
-       if (cntrs.port_rcv_packets > 0xFFFFFFFFUL)
-               p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF);
-       else
-               p->port_rcv_packets =
-                       cpu_to_be32((u32) cntrs.port_rcv_packets);
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
-                                        struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portcounters_ext *p =
-               (struct ib_pma_portcounters_ext *)pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       u64 swords, rwords, spkts, rpkts, xwait;
-       u8 port_select = p->port_select;
-
-       ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
-                               &rpkts, &xwait);
-
-       /* Adjust counters for any resets done. */
-       swords -= dev->z_port_xmit_data;
-       rwords -= dev->z_port_rcv_data;
-       spkts -= dev->z_port_xmit_packets;
-       rpkts -= dev->z_port_rcv_packets;
-
-       memset(pmp->data, 0, sizeof(pmp->data));
-
-       p->port_select = port_select;
-       if (pmp->mad_hdr.attr_mod != 0 ||
-           (port_select != port && port_select != 0xFF))
-               pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
-
-       p->port_xmit_data = cpu_to_be64(swords);
-       p->port_rcv_data = cpu_to_be64(rwords);
-       p->port_xmit_packets = cpu_to_be64(spkts);
-       p->port_rcv_packets = cpu_to_be64(rpkts);
-       p->port_unicast_xmit_packets = cpu_to_be64(dev->n_unicast_xmit);
-       p->port_unicast_rcv_packets = cpu_to_be64(dev->n_unicast_rcv);
-       p->port_multicast_xmit_packets = cpu_to_be64(dev->n_multicast_xmit);
-       p->port_multicast_rcv_packets = cpu_to_be64(dev->n_multicast_rcv);
-
-       return reply((struct ib_smp *) pmp);
-}
-
-static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
-                                    struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
-               pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_verbs_counters cntrs;
-
-       /*
-        * Since the HW doesn't support clearing counters, we save the
-        * current count and subtract it from future responses.
-        */
-       ipath_get_counters(dev->dd, &cntrs);
-
-       if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR)
-               dev->z_symbol_error_counter = cntrs.symbol_error_counter;
-
-       if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY)
-               dev->z_link_error_recovery_counter =
-                       cntrs.link_error_recovery_counter;
-
-       if (p->counter_select & IB_PMA_SEL_LINK_DOWNED)
-               dev->z_link_downed_counter = cntrs.link_downed_counter;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS)
-               dev->z_port_rcv_errors =
-                       cntrs.port_rcv_errors + dev->rcv_errors;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS)
-               dev->z_port_rcv_remphys_errors =
-                       cntrs.port_rcv_remphys_errors;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS)
-               dev->z_port_xmit_discards = cntrs.port_xmit_discards;
-
-       if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS)
-               dev->z_local_link_integrity_errors =
-                       cntrs.local_link_integrity_errors;
-
-       if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS)
-               dev->z_excessive_buffer_overrun_errors =
-                       cntrs.excessive_buffer_overrun_errors;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) {
-               dev->n_vl15_dropped = 0;
-               dev->z_vl15_dropped = cntrs.vl15_dropped;
-       }
-
-       if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA)
-               dev->z_port_xmit_data = cntrs.port_xmit_data;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA)
-               dev->z_port_rcv_data = cntrs.port_rcv_data;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS)
-               dev->z_port_xmit_packets = cntrs.port_xmit_packets;
-
-       if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS)
-               dev->z_port_rcv_packets = cntrs.port_rcv_packets;
-
-       return recv_pma_get_portcounters(pmp, ibdev, port);
-}
-
-static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
-                                        struct ib_device *ibdev, u8 port)
-{
-       struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
-               pmp->data;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       u64 swords, rwords, spkts, rpkts, xwait;
-
-       ipath_snapshot_counters(dev->dd, &swords, &rwords, &spkts,
-                               &rpkts, &xwait);
-
-       if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA)
-               dev->z_port_xmit_data = swords;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA)
-               dev->z_port_rcv_data = rwords;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS)
-               dev->z_port_xmit_packets = spkts;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS)
-               dev->z_port_rcv_packets = rpkts;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS)
-               dev->n_unicast_xmit = 0;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS)
-               dev->n_unicast_rcv = 0;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS)
-               dev->n_multicast_xmit = 0;
-
-       if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS)
-               dev->n_multicast_rcv = 0;
-
-       return recv_pma_get_portcounters_ext(pmp, ibdev, port);
-}
-
-static int process_subn(struct ib_device *ibdev, int mad_flags,
-                       u8 port_num, const struct ib_mad *in_mad,
-                       struct ib_mad *out_mad)
-{
-       struct ib_smp *smp = (struct ib_smp *)out_mad;
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       int ret;
-
-       *out_mad = *in_mad;
-       if (smp->class_version != 1) {
-               smp->status |= IB_SMP_UNSUP_VERSION;
-               ret = reply(smp);
-               goto bail;
-       }
-
-       /* Is the mkey in the process of expiring? */
-       if (dev->mkey_lease_timeout &&
-           time_after_eq(jiffies, dev->mkey_lease_timeout)) {
-               /* Clear timeout and mkey protection field. */
-               dev->mkey_lease_timeout = 0;
-               dev->mkeyprot = 0;
-       }
-
-       /*
-        * M_Key checking depends on
-        * Portinfo:M_Key_protect_bits
-        */
-       if ((mad_flags & IB_MAD_IGNORE_MKEY) == 0 && dev->mkey != 0 &&
-           dev->mkey != smp->mkey &&
-           (smp->method == IB_MGMT_METHOD_SET ||
-            (smp->method == IB_MGMT_METHOD_GET &&
-             dev->mkeyprot >= 2))) {
-               if (dev->mkey_violations != 0xFFFF)
-                       ++dev->mkey_violations;
-               if (dev->mkey_lease_timeout ||
-                   dev->mkey_lease_period == 0) {
-                       ret = IB_MAD_RESULT_SUCCESS |
-                               IB_MAD_RESULT_CONSUMED;
-                       goto bail;
-               }
-               dev->mkey_lease_timeout = jiffies +
-                       dev->mkey_lease_period * HZ;
-               /* Future: Generate a trap notice. */
-               ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
-               goto bail;
-       } else if (dev->mkey_lease_timeout)
-               dev->mkey_lease_timeout = 0;
-
-       switch (smp->method) {
-       case IB_MGMT_METHOD_GET:
-               switch (smp->attr_id) {
-               case IB_SMP_ATTR_NODE_DESC:
-                       ret = recv_subn_get_nodedescription(smp, ibdev);
-                       goto bail;
-               case IB_SMP_ATTR_NODE_INFO:
-                       ret = recv_subn_get_nodeinfo(smp, ibdev, port_num);
-                       goto bail;
-               case IB_SMP_ATTR_GUID_INFO:
-                       ret = recv_subn_get_guidinfo(smp, ibdev);
-                       goto bail;
-               case IB_SMP_ATTR_PORT_INFO:
-                       ret = recv_subn_get_portinfo(smp, ibdev, port_num);
-                       goto bail;
-               case IB_SMP_ATTR_PKEY_TABLE:
-                       ret = recv_subn_get_pkeytable(smp, ibdev);
-                       goto bail;
-               case IB_SMP_ATTR_SM_INFO:
-                       if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
-                               ret = IB_MAD_RESULT_SUCCESS |
-                                       IB_MAD_RESULT_CONSUMED;
-                               goto bail;
-                       }
-                       if (dev->port_cap_flags & IB_PORT_SM) {
-                               ret = IB_MAD_RESULT_SUCCESS;
-                               goto bail;
-                       }
-                       /* FALLTHROUGH */
-               default:
-                       smp->status |= IB_SMP_UNSUP_METH_ATTR;
-                       ret = reply(smp);
-                       goto bail;
-               }
-
-       case IB_MGMT_METHOD_SET:
-               switch (smp->attr_id) {
-               case IB_SMP_ATTR_GUID_INFO:
-                       ret = recv_subn_set_guidinfo(smp, ibdev);
-                       goto bail;
-               case IB_SMP_ATTR_PORT_INFO:
-                       ret = recv_subn_set_portinfo(smp, ibdev, port_num);
-                       goto bail;
-               case IB_SMP_ATTR_PKEY_TABLE:
-                       ret = recv_subn_set_pkeytable(smp, ibdev, port_num);
-                       goto bail;
-               case IB_SMP_ATTR_SM_INFO:
-                       if (dev->port_cap_flags & IB_PORT_SM_DISABLED) {
-                               ret = IB_MAD_RESULT_SUCCESS |
-                                       IB_MAD_RESULT_CONSUMED;
-                               goto bail;
-                       }
-                       if (dev->port_cap_flags & IB_PORT_SM) {
-                               ret = IB_MAD_RESULT_SUCCESS;
-                               goto bail;
-                       }
-                       /* FALLTHROUGH */
-               default:
-                       smp->status |= IB_SMP_UNSUP_METH_ATTR;
-                       ret = reply(smp);
-                       goto bail;
-               }
-
-       case IB_MGMT_METHOD_TRAP:
-       case IB_MGMT_METHOD_REPORT:
-       case IB_MGMT_METHOD_REPORT_RESP:
-       case IB_MGMT_METHOD_TRAP_REPRESS:
-       case IB_MGMT_METHOD_GET_RESP:
-               /*
-                * The ib_mad module will call us to process responses
-                * before checking for other consumers.
-                * Just tell the caller to process it normally.
-                */
-               ret = IB_MAD_RESULT_SUCCESS;
-               goto bail;
-       default:
-               smp->status |= IB_SMP_UNSUP_METHOD;
-               ret = reply(smp);
-       }
-
-bail:
-       return ret;
-}
-
-static int process_perf(struct ib_device *ibdev, u8 port_num,
-                       const struct ib_mad *in_mad,
-                       struct ib_mad *out_mad)
-{
-       struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
-       int ret;
-
-       *out_mad = *in_mad;
-       if (pmp->mad_hdr.class_version != 1) {
-               pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
-               ret = reply((struct ib_smp *) pmp);
-               goto bail;
-       }
-
-       switch (pmp->mad_hdr.method) {
-       case IB_MGMT_METHOD_GET:
-               switch (pmp->mad_hdr.attr_id) {
-               case IB_PMA_CLASS_PORT_INFO:
-                       ret = recv_pma_get_classportinfo(pmp);
-                       goto bail;
-               case IB_PMA_PORT_SAMPLES_CONTROL:
-                       ret = recv_pma_get_portsamplescontrol(pmp, ibdev,
-                                                             port_num);
-                       goto bail;
-               case IB_PMA_PORT_SAMPLES_RESULT:
-                       ret = recv_pma_get_portsamplesresult(pmp, ibdev);
-                       goto bail;
-               case IB_PMA_PORT_SAMPLES_RESULT_EXT:
-                       ret = recv_pma_get_portsamplesresult_ext(pmp,
-                                                                ibdev);
-                       goto bail;
-               case IB_PMA_PORT_COUNTERS:
-                       ret = recv_pma_get_portcounters(pmp, ibdev,
-                                                       port_num);
-                       goto bail;
-               case IB_PMA_PORT_COUNTERS_EXT:
-                       ret = recv_pma_get_portcounters_ext(pmp, ibdev,
-                                                           port_num);
-                       goto bail;
-               default:
-                       pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
-                       ret = reply((struct ib_smp *) pmp);
-                       goto bail;
-               }
-
-       case IB_MGMT_METHOD_SET:
-               switch (pmp->mad_hdr.attr_id) {
-               case IB_PMA_PORT_SAMPLES_CONTROL:
-                       ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
-                                                             port_num);
-                       goto bail;
-               case IB_PMA_PORT_COUNTERS:
-                       ret = recv_pma_set_portcounters(pmp, ibdev,
-                                                       port_num);
-                       goto bail;
-               case IB_PMA_PORT_COUNTERS_EXT:
-                       ret = recv_pma_set_portcounters_ext(pmp, ibdev,
-                                                           port_num);
-                       goto bail;
-               default:
-                       pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
-                       ret = reply((struct ib_smp *) pmp);
-                       goto bail;
-               }
-
-       case IB_MGMT_METHOD_GET_RESP:
-               /*
-                * The ib_mad module will call us to process responses
-                * before checking for other consumers.
-                * Just tell the caller to process it normally.
-                */
-               ret = IB_MAD_RESULT_SUCCESS;
-               goto bail;
-       default:
-               pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
-               ret = reply((struct ib_smp *) pmp);
-       }
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_process_mad - process an incoming MAD packet
- * @ibdev: the infiniband device this packet came in on
- * @mad_flags: MAD flags
- * @port_num: the port number this packet came in on
- * @in_wc: the work completion entry for this packet
- * @in_grh: the global route header for this packet
- * @in_mad: the incoming MAD
- * @out_mad: any outgoing MAD reply
- *
- * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not
- * interested in processing.
- *
- * Note that the verbs framework has already done the MAD sanity checks,
- * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE
- * MADs.
- *
- * This is called by the ib_mad module.
- */
-int ipath_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
-                     const struct ib_wc *in_wc, const struct ib_grh *in_grh,
-                     const struct ib_mad_hdr *in, size_t in_mad_size,
-                     struct ib_mad_hdr *out, size_t *out_mad_size,
-                     u16 *out_mad_pkey_index)
-{
-       int ret;
-       const struct ib_mad *in_mad = (const struct ib_mad *)in;
-       struct ib_mad *out_mad = (struct ib_mad *)out;
-
-       if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
-                        *out_mad_size != sizeof(*out_mad)))
-               return IB_MAD_RESULT_FAILURE;
-
-       switch (in_mad->mad_hdr.mgmt_class) {
-       case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
-       case IB_MGMT_CLASS_SUBN_LID_ROUTED:
-               ret = process_subn(ibdev, mad_flags, port_num,
-                                  in_mad, out_mad);
-               goto bail;
-       case IB_MGMT_CLASS_PERF_MGMT:
-               ret = process_perf(ibdev, port_num, in_mad, out_mad);
-               goto bail;
-       default:
-               ret = IB_MAD_RESULT_SUCCESS;
-       }
-
-bail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mmap.c b/drivers/staging/rdma/ipath/ipath_mmap.c
deleted file mode 100644 (file)
index e732742..0000000
+++ /dev/null
@@ -1,174 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/errno.h>
-#include <asm/pgtable.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_release_mmap_info - free mmap info structure
- * @ref: a pointer to the kref within struct ipath_mmap_info
- */
-void ipath_release_mmap_info(struct kref *ref)
-{
-       struct ipath_mmap_info *ip =
-               container_of(ref, struct ipath_mmap_info, ref);
-       struct ipath_ibdev *dev = to_idev(ip->context->device);
-
-       spin_lock_irq(&dev->pending_lock);
-       list_del(&ip->pending_mmaps);
-       spin_unlock_irq(&dev->pending_lock);
-
-       vfree(ip->obj);
-       kfree(ip);
-}
-
-/*
- * open and close keep track of how many times the CQ is mapped,
- * to avoid releasing it.
- */
-static void ipath_vma_open(struct vm_area_struct *vma)
-{
-       struct ipath_mmap_info *ip = vma->vm_private_data;
-
-       kref_get(&ip->ref);
-}
-
-static void ipath_vma_close(struct vm_area_struct *vma)
-{
-       struct ipath_mmap_info *ip = vma->vm_private_data;
-
-       kref_put(&ip->ref, ipath_release_mmap_info);
-}
-
-static const struct vm_operations_struct ipath_vm_ops = {
-       .open =     ipath_vma_open,
-       .close =    ipath_vma_close,
-};
-
-/**
- * ipath_mmap - create a new mmap region
- * @context: the IB user context of the process making the mmap() call
- * @vma: the VMA to be initialized
- * Return zero if the mmap is OK. Otherwise, return an errno.
- */
-int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
-{
-       struct ipath_ibdev *dev = to_idev(context->device);
-       unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
-       unsigned long size = vma->vm_end - vma->vm_start;
-       struct ipath_mmap_info *ip, *pp;
-       int ret = -EINVAL;
-
-       /*
-        * Search the device's list of objects waiting for a mmap call.
-        * Normally, this list is very short since a call to create a
-        * CQ, QP, or SRQ is soon followed by a call to mmap().
-        */
-       spin_lock_irq(&dev->pending_lock);
-       list_for_each_entry_safe(ip, pp, &dev->pending_mmaps,
-                                pending_mmaps) {
-               /* Only the creator is allowed to mmap the object */
-               if (context != ip->context || (__u64) offset != ip->offset)
-                       continue;
-               /* Don't allow a mmap larger than the object. */
-               if (size > ip->size)
-                       break;
-
-               list_del_init(&ip->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-
-               ret = remap_vmalloc_range(vma, ip->obj, 0);
-               if (ret)
-                       goto done;
-               vma->vm_ops = &ipath_vm_ops;
-               vma->vm_private_data = ip;
-               ipath_vma_open(vma);
-               goto done;
-       }
-       spin_unlock_irq(&dev->pending_lock);
-done:
-       return ret;
-}
-
-/*
- * Allocate information for ipath_mmap
- */
-struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
-                                              u32 size,
-                                              struct ib_ucontext *context,
-                                              void *obj) {
-       struct ipath_mmap_info *ip;
-
-       ip = kmalloc(sizeof *ip, GFP_KERNEL);
-       if (!ip)
-               goto bail;
-
-       size = PAGE_ALIGN(size);
-
-       spin_lock_irq(&dev->mmap_offset_lock);
-       if (dev->mmap_offset == 0)
-               dev->mmap_offset = PAGE_SIZE;
-       ip->offset = dev->mmap_offset;
-       dev->mmap_offset += size;
-       spin_unlock_irq(&dev->mmap_offset_lock);
-
-       INIT_LIST_HEAD(&ip->pending_mmaps);
-       ip->size = size;
-       ip->context = context;
-       ip->obj = obj;
-       kref_init(&ip->ref);
-
-bail:
-       return ip;
-}
-
-void ipath_update_mmap_info(struct ipath_ibdev *dev,
-                           struct ipath_mmap_info *ip,
-                           u32 size, void *obj) {
-       size = PAGE_ALIGN(size);
-
-       spin_lock_irq(&dev->mmap_offset_lock);
-       if (dev->mmap_offset == 0)
-               dev->mmap_offset = PAGE_SIZE;
-       ip->offset = dev->mmap_offset;
-       dev->mmap_offset += size;
-       spin_unlock_irq(&dev->mmap_offset_lock);
-
-       ip->size = size;
-       ip->obj = obj;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_mr.c b/drivers/staging/rdma/ipath/ipath_mr.c
deleted file mode 100644 (file)
index b76b0ce..0000000
+++ /dev/null
@@ -1,370 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/slab.h>
-
-#include <rdma/ib_umem.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_smi.h>
-
-#include "ipath_verbs.h"
-
-/* Fast memory region */
-struct ipath_fmr {
-       struct ib_fmr ibfmr;
-       u8 page_shift;
-       struct ipath_mregion mr;        /* must be last */
-};
-
-static inline struct ipath_fmr *to_ifmr(struct ib_fmr *ibfmr)
-{
-       return container_of(ibfmr, struct ipath_fmr, ibfmr);
-}
-
-/**
- * ipath_get_dma_mr - get a DMA memory region
- * @pd: protection domain for this memory region
- * @acc: access flags
- *
- * Returns the memory region on success, otherwise returns an errno.
- * Note that all DMA addresses should be created via the
- * struct ib_dma_mapping_ops functions (see ipath_dma.c).
- */
-struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc)
-{
-       struct ipath_mr *mr;
-       struct ib_mr *ret;
-
-       mr = kzalloc(sizeof *mr, GFP_KERNEL);
-       if (!mr) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       mr->mr.access_flags = acc;
-       ret = &mr->ibmr;
-
-bail:
-       return ret;
-}
-
-static struct ipath_mr *alloc_mr(int count,
-                                struct ipath_lkey_table *lk_table)
-{
-       struct ipath_mr *mr;
-       int m, i = 0;
-
-       /* Allocate struct plus pointers to first level page tables. */
-       m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
-       mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
-       if (!mr)
-               goto done;
-
-       /* Allocate first level page tables. */
-       for (; i < m; i++) {
-               mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL);
-               if (!mr->mr.map[i])
-                       goto bail;
-       }
-       mr->mr.mapsz = m;
-
-       if (!ipath_alloc_lkey(lk_table, &mr->mr))
-               goto bail;
-       mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey;
-
-       goto done;
-
-bail:
-       while (i) {
-               i--;
-               kfree(mr->mr.map[i]);
-       }
-       kfree(mr);
-       mr = NULL;
-
-done:
-       return mr;
-}
-
-/**
- * ipath_reg_user_mr - register a userspace memory region
- * @pd: protection domain for this memory region
- * @start: starting userspace address
- * @length: length of region to register
- * @virt_addr: virtual address to use (from HCA's point of view)
- * @mr_access_flags: access flags for this memory region
- * @udata: unused by the InfiniPath driver
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                               u64 virt_addr, int mr_access_flags,
-                               struct ib_udata *udata)
-{
-       struct ipath_mr *mr;
-       struct ib_umem *umem;
-       int n, m, entry;
-       struct scatterlist *sg;
-       struct ib_mr *ret;
-
-       if (length == 0) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       umem = ib_umem_get(pd->uobject->context, start, length,
-                          mr_access_flags, 0);
-       if (IS_ERR(umem))
-               return (void *) umem;
-
-       n = umem->nmap;
-       mr = alloc_mr(n, &to_idev(pd->device)->lk_table);
-       if (!mr) {
-               ret = ERR_PTR(-ENOMEM);
-               ib_umem_release(umem);
-               goto bail;
-       }
-
-       mr->mr.pd = pd;
-       mr->mr.user_base = start;
-       mr->mr.iova = virt_addr;
-       mr->mr.length = length;
-       mr->mr.offset = ib_umem_offset(umem);
-       mr->mr.access_flags = mr_access_flags;
-       mr->mr.max_segs = n;
-       mr->umem = umem;
-
-       m = 0;
-       n = 0;
-       for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
-               void *vaddr;
-
-               vaddr = page_address(sg_page(sg));
-               if (!vaddr) {
-                       ret = ERR_PTR(-EINVAL);
-                       goto bail;
-               }
-               mr->mr.map[m]->segs[n].vaddr = vaddr;
-               mr->mr.map[m]->segs[n].length = umem->page_size;
-               n++;
-               if (n == IPATH_SEGSZ) {
-                       m++;
-                       n = 0;
-               }
-       }
-       ret = &mr->ibmr;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_dereg_mr - unregister and free a memory region
- * @ibmr: the memory region to free
- *
- * Returns 0 on success.
- *
- * Note that this is called to free MRs created by ipath_get_dma_mr()
- * or ipath_reg_user_mr().
- */
-int ipath_dereg_mr(struct ib_mr *ibmr)
-{
-       struct ipath_mr *mr = to_imr(ibmr);
-       int i;
-
-       ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey);
-       i = mr->mr.mapsz;
-       while (i) {
-               i--;
-               kfree(mr->mr.map[i]);
-       }
-
-       if (mr->umem)
-               ib_umem_release(mr->umem);
-
-       kfree(mr);
-       return 0;
-}
-
-/**
- * ipath_alloc_fmr - allocate a fast memory region
- * @pd: the protection domain for this memory region
- * @mr_access_flags: access flags for this memory region
- * @fmr_attr: fast memory region attributes
- *
- * Returns the memory region on success, otherwise returns an errno.
- */
-struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
-                              struct ib_fmr_attr *fmr_attr)
-{
-       struct ipath_fmr *fmr;
-       int m, i = 0;
-       struct ib_fmr *ret;
-
-       /* Allocate struct plus pointers to first level page tables. */
-       m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ;
-       fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
-       if (!fmr)
-               goto bail;
-
-       /* Allocate first level page tables. */
-       for (; i < m; i++) {
-               fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0],
-                                        GFP_KERNEL);
-               if (!fmr->mr.map[i])
-                       goto bail;
-       }
-       fmr->mr.mapsz = m;
-
-       /*
-        * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
-        * rkey.
-        */
-       if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr))
-               goto bail;
-       fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey;
-       /*
-        * Resources are allocated but no valid mapping (RKEY can't be
-        * used).
-        */
-       fmr->mr.pd = pd;
-       fmr->mr.user_base = 0;
-       fmr->mr.iova = 0;
-       fmr->mr.length = 0;
-       fmr->mr.offset = 0;
-       fmr->mr.access_flags = mr_access_flags;
-       fmr->mr.max_segs = fmr_attr->max_pages;
-       fmr->page_shift = fmr_attr->page_shift;
-
-       ret = &fmr->ibfmr;
-       goto done;
-
-bail:
-       while (i)
-               kfree(fmr->mr.map[--i]);
-       kfree(fmr);
-       ret = ERR_PTR(-ENOMEM);
-
-done:
-       return ret;
-}
-
-/**
- * ipath_map_phys_fmr - set up a fast memory region
- * @ibmfr: the fast memory region to set up
- * @page_list: the list of pages to associate with the fast memory region
- * @list_len: the number of pages to associate with the fast memory region
- * @iova: the virtual address of the start of the fast memory region
- *
- * This may be called from interrupt context.
- */
-
-int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
-                      int list_len, u64 iova)
-{
-       struct ipath_fmr *fmr = to_ifmr(ibfmr);
-       struct ipath_lkey_table *rkt;
-       unsigned long flags;
-       int m, n, i;
-       u32 ps;
-       int ret;
-
-       if (list_len > fmr->mr.max_segs) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       rkt = &to_idev(ibfmr->device)->lk_table;
-       spin_lock_irqsave(&rkt->lock, flags);
-       fmr->mr.user_base = iova;
-       fmr->mr.iova = iova;
-       ps = 1 << fmr->page_shift;
-       fmr->mr.length = list_len * ps;
-       m = 0;
-       n = 0;
-       ps = 1 << fmr->page_shift;
-       for (i = 0; i < list_len; i++) {
-               fmr->mr.map[m]->segs[n].vaddr = (void *) page_list[i];
-               fmr->mr.map[m]->segs[n].length = ps;
-               if (++n == IPATH_SEGSZ) {
-                       m++;
-                       n = 0;
-               }
-       }
-       spin_unlock_irqrestore(&rkt->lock, flags);
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_unmap_fmr - unmap fast memory regions
- * @fmr_list: the list of fast memory regions to unmap
- *
- * Returns 0 on success.
- */
-int ipath_unmap_fmr(struct list_head *fmr_list)
-{
-       struct ipath_fmr *fmr;
-       struct ipath_lkey_table *rkt;
-       unsigned long flags;
-
-       list_for_each_entry(fmr, fmr_list, ibfmr.list) {
-               rkt = &to_idev(fmr->ibfmr.device)->lk_table;
-               spin_lock_irqsave(&rkt->lock, flags);
-               fmr->mr.user_base = 0;
-               fmr->mr.iova = 0;
-               fmr->mr.length = 0;
-               spin_unlock_irqrestore(&rkt->lock, flags);
-       }
-       return 0;
-}
-
-/**
- * ipath_dealloc_fmr - deallocate a fast memory region
- * @ibfmr: the fast memory region to deallocate
- *
- * Returns 0 on success.
- */
-int ipath_dealloc_fmr(struct ib_fmr *ibfmr)
-{
-       struct ipath_fmr *fmr = to_ifmr(ibfmr);
-       int i;
-
-       ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey);
-       i = fmr->mr.mapsz;
-       while (i)
-               kfree(fmr->mr.map[--i]);
-       kfree(fmr);
-       return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_qp.c b/drivers/staging/rdma/ipath/ipath_qp.c
deleted file mode 100644 (file)
index 280cd2d..0000000
+++ /dev/null
@@ -1,1079 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-#define BITS_PER_PAGE          (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK     (BITS_PER_PAGE-1)
-#define mk_qpn(qpt, map, off)  (((map) - (qpt)->map) * BITS_PER_PAGE + \
-                                (off))
-#define find_next_offset(map, off) find_next_zero_bit((map)->page, \
-                                                     BITS_PER_PAGE, off)
-
-/*
- * Convert the AETH credit code into the number of credits.
- */
-static u32 credit_table[31] = {
-       0,                      /* 0 */
-       1,                      /* 1 */
-       2,                      /* 2 */
-       3,                      /* 3 */
-       4,                      /* 4 */
-       6,                      /* 5 */
-       8,                      /* 6 */
-       12,                     /* 7 */
-       16,                     /* 8 */
-       24,                     /* 9 */
-       32,                     /* A */
-       48,                     /* B */
-       64,                     /* C */
-       96,                     /* D */
-       128,                    /* E */
-       192,                    /* F */
-       256,                    /* 10 */
-       384,                    /* 11 */
-       512,                    /* 12 */
-       768,                    /* 13 */
-       1024,                   /* 14 */
-       1536,                   /* 15 */
-       2048,                   /* 16 */
-       3072,                   /* 17 */
-       4096,                   /* 18 */
-       6144,                   /* 19 */
-       8192,                   /* 1A */
-       12288,                  /* 1B */
-       16384,                  /* 1C */
-       24576,                  /* 1D */
-       32768                   /* 1E */
-};
-
-
-static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
-{
-       unsigned long page = get_zeroed_page(GFP_KERNEL);
-       unsigned long flags;
-
-       /*
-        * Free the page if someone raced with us installing it.
-        */
-
-       spin_lock_irqsave(&qpt->lock, flags);
-       if (map->page)
-               free_page(page);
-       else
-               map->page = (void *)page;
-       spin_unlock_irqrestore(&qpt->lock, flags);
-}
-
-
-static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
-{
-       u32 i, offset, max_scan, qpn;
-       struct qpn_map *map;
-       u32 ret = -1;
-
-       if (type == IB_QPT_SMI)
-               ret = 0;
-       else if (type == IB_QPT_GSI)
-               ret = 1;
-
-       if (ret != -1) {
-               map = &qpt->map[0];
-               if (unlikely(!map->page)) {
-                       get_map_page(qpt, map);
-                       if (unlikely(!map->page)) {
-                               ret = -ENOMEM;
-                               goto bail;
-                       }
-               }
-               if (!test_and_set_bit(ret, map->page))
-                       atomic_dec(&map->n_free);
-               else
-                       ret = -EBUSY;
-               goto bail;
-       }
-
-       qpn = qpt->last + 1;
-       if (qpn >= QPN_MAX)
-               qpn = 2;
-       offset = qpn & BITS_PER_PAGE_MASK;
-       map = &qpt->map[qpn / BITS_PER_PAGE];
-       max_scan = qpt->nmaps - !offset;
-       for (i = 0;;) {
-               if (unlikely(!map->page)) {
-                       get_map_page(qpt, map);
-                       if (unlikely(!map->page))
-                               break;
-               }
-               if (likely(atomic_read(&map->n_free))) {
-                       do {
-                               if (!test_and_set_bit(offset, map->page)) {
-                                       atomic_dec(&map->n_free);
-                                       qpt->last = qpn;
-                                       ret = qpn;
-                                       goto bail;
-                               }
-                               offset = find_next_offset(map, offset);
-                               qpn = mk_qpn(qpt, map, offset);
-                               /*
-                                * This test differs from alloc_pidmap().
-                                * If find_next_offset() does find a zero
-                                * bit, we don't need to check for QPN
-                                * wrapping around past our starting QPN.
-                                * We just need to be sure we don't loop
-                                * forever.
-                                */
-                       } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
-               }
-               /*
-                * In order to keep the number of pages allocated to a
-                * minimum, we scan the all existing pages before increasing
-                * the size of the bitmap table.
-                */
-               if (++i > max_scan) {
-                       if (qpt->nmaps == QPNMAP_ENTRIES)
-                               break;
-                       map = &qpt->map[qpt->nmaps++];
-                       offset = 0;
-               } else if (map < &qpt->map[qpt->nmaps]) {
-                       ++map;
-                       offset = 0;
-               } else {
-                       map = &qpt->map[0];
-                       offset = 2;
-               }
-               qpn = mk_qpn(qpt, map, offset);
-       }
-
-       ret = -ENOMEM;
-
-bail:
-       return ret;
-}
-
-static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
-{
-       struct qpn_map *map;
-
-       map = qpt->map + qpn / BITS_PER_PAGE;
-       if (map->page)
-               clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
-       atomic_inc(&map->n_free);
-}
-
-/**
- * ipath_alloc_qpn - allocate a QP number
- * @qpt: the QP table
- * @qp: the QP
- * @type: the QP type (IB_QPT_SMI and IB_QPT_GSI are special)
- *
- * Allocate the next available QPN and put the QP into the hash table.
- * The hash table holds a reference to the QP.
- */
-static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
-                          enum ib_qp_type type)
-{
-       unsigned long flags;
-       int ret;
-
-       ret = alloc_qpn(qpt, type);
-       if (ret < 0)
-               goto bail;
-       qp->ibqp.qp_num = ret;
-
-       /* Add the QP to the hash table. */
-       spin_lock_irqsave(&qpt->lock, flags);
-
-       ret %= qpt->max;
-       qp->next = qpt->table[ret];
-       qpt->table[ret] = qp;
-       atomic_inc(&qp->refcount);
-
-       spin_unlock_irqrestore(&qpt->lock, flags);
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_free_qp - remove a QP from the QP table
- * @qpt: the QP table
- * @qp: the QP to remove
- *
- * Remove the QP from the table so it can't be found asynchronously by
- * the receive interrupt routine.
- */
-static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
-{
-       struct ipath_qp *q, **qpp;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qpt->lock, flags);
-
-       /* Remove QP from the hash table. */
-       qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
-       for (; (q = *qpp) != NULL; qpp = &q->next) {
-               if (q == qp) {
-                       *qpp = qp->next;
-                       qp->next = NULL;
-                       atomic_dec(&qp->refcount);
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(&qpt->lock, flags);
-}
-
-/**
- * ipath_free_all_qps - check for QPs still in use
- * @qpt: the QP table to empty
- *
- * There should not be any QPs still in use.
- * Free memory for table.
- */
-unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
-{
-       unsigned long flags;
-       struct ipath_qp *qp;
-       u32 n, qp_inuse = 0;
-
-       spin_lock_irqsave(&qpt->lock, flags);
-       for (n = 0; n < qpt->max; n++) {
-               qp = qpt->table[n];
-               qpt->table[n] = NULL;
-
-               for (; qp; qp = qp->next)
-                       qp_inuse++;
-       }
-       spin_unlock_irqrestore(&qpt->lock, flags);
-
-       for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
-               if (qpt->map[n].page)
-                       free_page((unsigned long) qpt->map[n].page);
-       return qp_inuse;
-}
-
-/**
- * ipath_lookup_qpn - return the QP with the given QPN
- * @qpt: the QP table
- * @qpn: the QP number to look up
- *
- * The caller is responsible for decrementing the QP reference count
- * when done.
- */
-struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
-{
-       unsigned long flags;
-       struct ipath_qp *qp;
-
-       spin_lock_irqsave(&qpt->lock, flags);
-
-       for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
-               if (qp->ibqp.qp_num == qpn) {
-                       atomic_inc(&qp->refcount);
-                       break;
-               }
-       }
-
-       spin_unlock_irqrestore(&qpt->lock, flags);
-       return qp;
-}
-
-/**
- * ipath_reset_qp - initialize the QP state to the reset state
- * @qp: the QP to reset
- * @type: the QP type
- */
-static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
-{
-       qp->remote_qpn = 0;
-       qp->qkey = 0;
-       qp->qp_access_flags = 0;
-       atomic_set(&qp->s_dma_busy, 0);
-       qp->s_flags &= IPATH_S_SIGNAL_REQ_WR;
-       qp->s_hdrwords = 0;
-       qp->s_wqe = NULL;
-       qp->s_pkt_delay = 0;
-       qp->s_draining = 0;
-       qp->s_psn = 0;
-       qp->r_psn = 0;
-       qp->r_msn = 0;
-       if (type == IB_QPT_RC) {
-               qp->s_state = IB_OPCODE_RC_SEND_LAST;
-               qp->r_state = IB_OPCODE_RC_SEND_LAST;
-       } else {
-               qp->s_state = IB_OPCODE_UC_SEND_LAST;
-               qp->r_state = IB_OPCODE_UC_SEND_LAST;
-       }
-       qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
-       qp->r_nak_state = 0;
-       qp->r_aflags = 0;
-       qp->r_flags = 0;
-       qp->s_rnr_timeout = 0;
-       qp->s_head = 0;
-       qp->s_tail = 0;
-       qp->s_cur = 0;
-       qp->s_last = 0;
-       qp->s_ssn = 1;
-       qp->s_lsn = 0;
-       memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
-       qp->r_head_ack_queue = 0;
-       qp->s_tail_ack_queue = 0;
-       qp->s_num_rd_atomic = 0;
-       if (qp->r_rq.wq) {
-               qp->r_rq.wq->head = 0;
-               qp->r_rq.wq->tail = 0;
-       }
-}
-
-/**
- * ipath_error_qp - put a QP into the error state
- * @qp: the QP to put into the error state
- * @err: the receive completion error to signal if a RWQE is active
- *
- * Flushes both send and receive work queues.
- * Returns true if last WQE event should be generated.
- * The QP s_lock should be held and interrupts disabled.
- * If we are already in error state, just return.
- */
-
-int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ib_wc wc;
-       int ret = 0;
-
-       if (qp->state == IB_QPS_ERR)
-               goto bail;
-
-       qp->state = IB_QPS_ERR;
-
-       spin_lock(&dev->pending_lock);
-       if (!list_empty(&qp->timerwait))
-               list_del_init(&qp->timerwait);
-       if (!list_empty(&qp->piowait))
-               list_del_init(&qp->piowait);
-       spin_unlock(&dev->pending_lock);
-
-       /* Schedule the sending tasklet to drain the send work queue. */
-       if (qp->s_last != qp->s_head)
-               ipath_schedule_send(qp);
-
-       memset(&wc, 0, sizeof(wc));
-       wc.qp = &qp->ibqp;
-       wc.opcode = IB_WC_RECV;
-
-       if (test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags)) {
-               wc.wr_id = qp->r_wr_id;
-               wc.status = err;
-               ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
-       }
-       wc.status = IB_WC_WR_FLUSH_ERR;
-
-       if (qp->r_rq.wq) {
-               struct ipath_rwq *wq;
-               u32 head;
-               u32 tail;
-
-               spin_lock(&qp->r_rq.lock);
-
-               /* sanity check pointers before trusting them */
-               wq = qp->r_rq.wq;
-               head = wq->head;
-               if (head >= qp->r_rq.size)
-                       head = 0;
-               tail = wq->tail;
-               if (tail >= qp->r_rq.size)
-                       tail = 0;
-               while (tail != head) {
-                       wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
-                       if (++tail >= qp->r_rq.size)
-                               tail = 0;
-                       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
-               }
-               wq->tail = tail;
-
-               spin_unlock(&qp->r_rq.lock);
-       } else if (qp->ibqp.event_handler)
-               ret = 1;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_modify_qp - modify the attributes of a queue pair
- * @ibqp: the queue pair who's attributes we're modifying
- * @attr: the new attributes
- * @attr_mask: the mask of attributes to modify
- * @udata: user data for ipathverbs.so
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                   int attr_mask, struct ib_udata *udata)
-{
-       struct ipath_ibdev *dev = to_idev(ibqp->device);
-       struct ipath_qp *qp = to_iqp(ibqp);
-       enum ib_qp_state cur_state, new_state;
-       int lastwqe = 0;
-       int ret;
-
-       spin_lock_irq(&qp->s_lock);
-
-       cur_state = attr_mask & IB_QP_CUR_STATE ?
-               attr->cur_qp_state : qp->state;
-       new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
-
-       if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
-                               attr_mask, IB_LINK_LAYER_UNSPECIFIED))
-               goto inval;
-
-       if (attr_mask & IB_QP_AV) {
-               if (attr->ah_attr.dlid == 0 ||
-                   attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
-                       goto inval;
-
-               if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
-                   (attr->ah_attr.grh.sgid_index > 1))
-                       goto inval;
-       }
-
-       if (attr_mask & IB_QP_PKEY_INDEX)
-               if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
-                       goto inval;
-
-       if (attr_mask & IB_QP_MIN_RNR_TIMER)
-               if (attr->min_rnr_timer > 31)
-                       goto inval;
-
-       if (attr_mask & IB_QP_PORT)
-               if (attr->port_num == 0 ||
-                   attr->port_num > ibqp->device->phys_port_cnt)
-                       goto inval;
-
-       /*
-        * don't allow invalid Path MTU values or greater than 2048
-        * unless we are configured for a 4KB MTU
-        */
-       if ((attr_mask & IB_QP_PATH_MTU) &&
-               (ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
-               (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
-               goto inval;
-
-       if (attr_mask & IB_QP_PATH_MIG_STATE)
-               if (attr->path_mig_state != IB_MIG_MIGRATED &&
-                   attr->path_mig_state != IB_MIG_REARM)
-                       goto inval;
-
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
-               if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
-                       goto inval;
-
-       switch (new_state) {
-       case IB_QPS_RESET:
-               if (qp->state != IB_QPS_RESET) {
-                       qp->state = IB_QPS_RESET;
-                       spin_lock(&dev->pending_lock);
-                       if (!list_empty(&qp->timerwait))
-                               list_del_init(&qp->timerwait);
-                       if (!list_empty(&qp->piowait))
-                               list_del_init(&qp->piowait);
-                       spin_unlock(&dev->pending_lock);
-                       qp->s_flags &= ~IPATH_S_ANY_WAIT;
-                       spin_unlock_irq(&qp->s_lock);
-                       /* Stop the sending tasklet */
-                       tasklet_kill(&qp->s_task);
-                       wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
-                       spin_lock_irq(&qp->s_lock);
-               }
-               ipath_reset_qp(qp, ibqp->qp_type);
-               break;
-
-       case IB_QPS_SQD:
-               qp->s_draining = qp->s_last != qp->s_cur;
-               qp->state = new_state;
-               break;
-
-       case IB_QPS_SQE:
-               if (qp->ibqp.qp_type == IB_QPT_RC)
-                       goto inval;
-               qp->state = new_state;
-               break;
-
-       case IB_QPS_ERR:
-               lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-               break;
-
-       default:
-               qp->state = new_state;
-               break;
-       }
-
-       if (attr_mask & IB_QP_PKEY_INDEX)
-               qp->s_pkey_index = attr->pkey_index;
-
-       if (attr_mask & IB_QP_DEST_QPN)
-               qp->remote_qpn = attr->dest_qp_num;
-
-       if (attr_mask & IB_QP_SQ_PSN) {
-               qp->s_psn = qp->s_next_psn = attr->sq_psn;
-               qp->s_last_psn = qp->s_next_psn - 1;
-       }
-
-       if (attr_mask & IB_QP_RQ_PSN)
-               qp->r_psn = attr->rq_psn;
-
-       if (attr_mask & IB_QP_ACCESS_FLAGS)
-               qp->qp_access_flags = attr->qp_access_flags;
-
-       if (attr_mask & IB_QP_AV) {
-               qp->remote_ah_attr = attr->ah_attr;
-               qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
-       }
-
-       if (attr_mask & IB_QP_PATH_MTU)
-               qp->path_mtu = attr->path_mtu;
-
-       if (attr_mask & IB_QP_RETRY_CNT)
-               qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
-
-       if (attr_mask & IB_QP_RNR_RETRY) {
-               qp->s_rnr_retry = attr->rnr_retry;
-               if (qp->s_rnr_retry > 7)
-                       qp->s_rnr_retry = 7;
-               qp->s_rnr_retry_cnt = qp->s_rnr_retry;
-       }
-
-       if (attr_mask & IB_QP_MIN_RNR_TIMER)
-               qp->r_min_rnr_timer = attr->min_rnr_timer;
-
-       if (attr_mask & IB_QP_TIMEOUT)
-               qp->timeout = attr->timeout;
-
-       if (attr_mask & IB_QP_QKEY)
-               qp->qkey = attr->qkey;
-
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
-               qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
-
-       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
-               qp->s_max_rd_atomic = attr->max_rd_atomic;
-
-       spin_unlock_irq(&qp->s_lock);
-
-       if (lastwqe) {
-               struct ib_event ev;
-
-               ev.device = qp->ibqp.device;
-               ev.element.qp = &qp->ibqp;
-               ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
-               qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-       }
-       ret = 0;
-       goto bail;
-
-inval:
-       spin_unlock_irq(&qp->s_lock);
-       ret = -EINVAL;
-
-bail:
-       return ret;
-}
-
-int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                  int attr_mask, struct ib_qp_init_attr *init_attr)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-
-       attr->qp_state = qp->state;
-       attr->cur_qp_state = attr->qp_state;
-       attr->path_mtu = qp->path_mtu;
-       attr->path_mig_state = 0;
-       attr->qkey = qp->qkey;
-       attr->rq_psn = qp->r_psn;
-       attr->sq_psn = qp->s_next_psn;
-       attr->dest_qp_num = qp->remote_qpn;
-       attr->qp_access_flags = qp->qp_access_flags;
-       attr->cap.max_send_wr = qp->s_size - 1;
-       attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
-       attr->cap.max_send_sge = qp->s_max_sge;
-       attr->cap.max_recv_sge = qp->r_rq.max_sge;
-       attr->cap.max_inline_data = 0;
-       attr->ah_attr = qp->remote_ah_attr;
-       memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
-       attr->pkey_index = qp->s_pkey_index;
-       attr->alt_pkey_index = 0;
-       attr->en_sqd_async_notify = 0;
-       attr->sq_draining = qp->s_draining;
-       attr->max_rd_atomic = qp->s_max_rd_atomic;
-       attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
-       attr->min_rnr_timer = qp->r_min_rnr_timer;
-       attr->port_num = 1;
-       attr->timeout = qp->timeout;
-       attr->retry_cnt = qp->s_retry_cnt;
-       attr->rnr_retry = qp->s_rnr_retry_cnt;
-       attr->alt_port_num = 0;
-       attr->alt_timeout = 0;
-
-       init_attr->event_handler = qp->ibqp.event_handler;
-       init_attr->qp_context = qp->ibqp.qp_context;
-       init_attr->send_cq = qp->ibqp.send_cq;
-       init_attr->recv_cq = qp->ibqp.recv_cq;
-       init_attr->srq = qp->ibqp.srq;
-       init_attr->cap = attr->cap;
-       if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
-               init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
-       else
-               init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
-       init_attr->qp_type = qp->ibqp.qp_type;
-       init_attr->port_num = 1;
-       return 0;
-}
-
-/**
- * ipath_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
-__be32 ipath_compute_aeth(struct ipath_qp *qp)
-{
-       u32 aeth = qp->r_msn & IPATH_MSN_MASK;
-
-       if (qp->ibqp.srq) {
-               /*
-                * Shared receive queues don't generate credits.
-                * Set the credit field to the invalid value.
-                */
-               aeth |= IPATH_AETH_CREDIT_INVAL << IPATH_AETH_CREDIT_SHIFT;
-       } else {
-               u32 min, max, x;
-               u32 credits;
-               struct ipath_rwq *wq = qp->r_rq.wq;
-               u32 head;
-               u32 tail;
-
-               /* sanity check pointers before trusting them */
-               head = wq->head;
-               if (head >= qp->r_rq.size)
-                       head = 0;
-               tail = wq->tail;
-               if (tail >= qp->r_rq.size)
-                       tail = 0;
-               /*
-                * Compute the number of credits available (RWQEs).
-                * XXX Not holding the r_rq.lock here so there is a small
-                * chance that the pair of reads are not atomic.
-                */
-               credits = head - tail;
-               if ((int)credits < 0)
-                       credits += qp->r_rq.size;
-               /*
-                * Binary search the credit table to find the code to
-                * use.
-                */
-               min = 0;
-               max = 31;
-               for (;;) {
-                       x = (min + max) / 2;
-                       if (credit_table[x] == credits)
-                               break;
-                       if (credit_table[x] > credits)
-                               max = x;
-                       else if (min == x)
-                               break;
-                       else
-                               min = x;
-               }
-               aeth |= x << IPATH_AETH_CREDIT_SHIFT;
-       }
-       return cpu_to_be32(aeth);
-}
-
-/**
- * ipath_create_qp - create a queue pair for a device
- * @ibpd: the protection domain who's device we create the queue pair for
- * @init_attr: the attributes of the queue pair
- * @udata: unused by InfiniPath
- *
- * Returns the queue pair on success, otherwise returns an errno.
- *
- * Called by the ib_create_qp() core verbs function.
- */
-struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
-                             struct ib_qp_init_attr *init_attr,
-                             struct ib_udata *udata)
-{
-       struct ipath_qp *qp;
-       int err;
-       struct ipath_swqe *swq = NULL;
-       struct ipath_ibdev *dev;
-       size_t sz;
-       size_t sg_list_sz;
-       struct ib_qp *ret;
-
-       if (init_attr->create_flags) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
-           init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       /* Check receive queue parameters if no SRQ is specified. */
-       if (!init_attr->srq) {
-               if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
-                   init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
-                       ret = ERR_PTR(-EINVAL);
-                       goto bail;
-               }
-               if (init_attr->cap.max_send_sge +
-                   init_attr->cap.max_send_wr +
-                   init_attr->cap.max_recv_sge +
-                   init_attr->cap.max_recv_wr == 0) {
-                       ret = ERR_PTR(-EINVAL);
-                       goto bail;
-               }
-       }
-
-       switch (init_attr->qp_type) {
-       case IB_QPT_UC:
-       case IB_QPT_RC:
-       case IB_QPT_UD:
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               sz = sizeof(struct ipath_sge) *
-                       init_attr->cap.max_send_sge +
-                       sizeof(struct ipath_swqe);
-               swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
-               if (swq == NULL) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail;
-               }
-               sz = sizeof(*qp);
-               sg_list_sz = 0;
-               if (init_attr->srq) {
-                       struct ipath_srq *srq = to_isrq(init_attr->srq);
-
-                       if (srq->rq.max_sge > 1)
-                               sg_list_sz = sizeof(*qp->r_sg_list) *
-                                       (srq->rq.max_sge - 1);
-               } else if (init_attr->cap.max_recv_sge > 1)
-                       sg_list_sz = sizeof(*qp->r_sg_list) *
-                               (init_attr->cap.max_recv_sge - 1);
-               qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
-               if (!qp) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail_swq;
-               }
-               if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
-                   init_attr->qp_type == IB_QPT_SMI ||
-                   init_attr->qp_type == IB_QPT_GSI)) {
-                       qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
-                       if (!qp->r_ud_sg_list) {
-                               ret = ERR_PTR(-ENOMEM);
-                               goto bail_qp;
-                       }
-               } else
-                       qp->r_ud_sg_list = NULL;
-               if (init_attr->srq) {
-                       sz = 0;
-                       qp->r_rq.size = 0;
-                       qp->r_rq.max_sge = 0;
-                       qp->r_rq.wq = NULL;
-                       init_attr->cap.max_recv_wr = 0;
-                       init_attr->cap.max_recv_sge = 0;
-               } else {
-                       qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
-                       qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
-                       sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
-                               sizeof(struct ipath_rwqe);
-                       qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
-                                             qp->r_rq.size * sz);
-                       if (!qp->r_rq.wq) {
-                               ret = ERR_PTR(-ENOMEM);
-                               goto bail_sg_list;
-                       }
-               }
-
-               /*
-                * ib_create_qp() will initialize qp->ibqp
-                * except for qp->ibqp.qp_num.
-                */
-               spin_lock_init(&qp->s_lock);
-               spin_lock_init(&qp->r_rq.lock);
-               atomic_set(&qp->refcount, 0);
-               init_waitqueue_head(&qp->wait);
-               init_waitqueue_head(&qp->wait_dma);
-               tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
-               INIT_LIST_HEAD(&qp->piowait);
-               INIT_LIST_HEAD(&qp->timerwait);
-               qp->state = IB_QPS_RESET;
-               qp->s_wq = swq;
-               qp->s_size = init_attr->cap.max_send_wr + 1;
-               qp->s_max_sge = init_attr->cap.max_send_sge;
-               if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
-                       qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
-               else
-                       qp->s_flags = 0;
-               dev = to_idev(ibpd->device);
-               err = ipath_alloc_qpn(&dev->qp_table, qp,
-                                     init_attr->qp_type);
-               if (err) {
-                       ret = ERR_PTR(err);
-                       vfree(qp->r_rq.wq);
-                       goto bail_sg_list;
-               }
-               qp->ip = NULL;
-               qp->s_tx = NULL;
-               ipath_reset_qp(qp, init_attr->qp_type);
-               break;
-
-       default:
-               /* Don't support raw QPs */
-               ret = ERR_PTR(-ENOSYS);
-               goto bail;
-       }
-
-       init_attr->cap.max_inline_data = 0;
-
-       /*
-        * Return the address of the RWQ as the offset to mmap.
-        * See ipath_mmap() for details.
-        */
-       if (udata && udata->outlen >= sizeof(__u64)) {
-               if (!qp->r_rq.wq) {
-                       __u64 offset = 0;
-
-                       err = ib_copy_to_udata(udata, &offset,
-                                              sizeof(offset));
-                       if (err) {
-                               ret = ERR_PTR(err);
-                               goto bail_ip;
-                       }
-               } else {
-                       u32 s = sizeof(struct ipath_rwq) +
-                               qp->r_rq.size * sz;
-
-                       qp->ip =
-                           ipath_create_mmap_info(dev, s,
-                                                  ibpd->uobject->context,
-                                                  qp->r_rq.wq);
-                       if (!qp->ip) {
-                               ret = ERR_PTR(-ENOMEM);
-                               goto bail_ip;
-                       }
-
-                       err = ib_copy_to_udata(udata, &(qp->ip->offset),
-                                              sizeof(qp->ip->offset));
-                       if (err) {
-                               ret = ERR_PTR(err);
-                               goto bail_ip;
-                       }
-               }
-       }
-
-       spin_lock(&dev->n_qps_lock);
-       if (dev->n_qps_allocated == ib_ipath_max_qps) {
-               spin_unlock(&dev->n_qps_lock);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_ip;
-       }
-
-       dev->n_qps_allocated++;
-       spin_unlock(&dev->n_qps_lock);
-
-       if (qp->ip) {
-               spin_lock_irq(&dev->pending_lock);
-               list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-       }
-
-       ret = &qp->ibqp;
-       goto bail;
-
-bail_ip:
-       if (qp->ip)
-               kref_put(&qp->ip->ref, ipath_release_mmap_info);
-       else
-               vfree(qp->r_rq.wq);
-       ipath_free_qp(&dev->qp_table, qp);
-       free_qpn(&dev->qp_table, qp->ibqp.qp_num);
-bail_sg_list:
-       kfree(qp->r_ud_sg_list);
-bail_qp:
-       kfree(qp);
-bail_swq:
-       vfree(swq);
-bail:
-       return ret;
-}
-
-/**
- * ipath_destroy_qp - destroy a queue pair
- * @ibqp: the queue pair to destroy
- *
- * Returns 0 on success.
- *
- * Note that this can be called while the QP is actively sending or
- * receiving!
- */
-int ipath_destroy_qp(struct ib_qp *ibqp)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       struct ipath_ibdev *dev = to_idev(ibqp->device);
-
-       /* Make sure HW and driver activity is stopped. */
-       spin_lock_irq(&qp->s_lock);
-       if (qp->state != IB_QPS_RESET) {
-               qp->state = IB_QPS_RESET;
-               spin_lock(&dev->pending_lock);
-               if (!list_empty(&qp->timerwait))
-                       list_del_init(&qp->timerwait);
-               if (!list_empty(&qp->piowait))
-                       list_del_init(&qp->piowait);
-               spin_unlock(&dev->pending_lock);
-               qp->s_flags &= ~IPATH_S_ANY_WAIT;
-               spin_unlock_irq(&qp->s_lock);
-               /* Stop the sending tasklet */
-               tasklet_kill(&qp->s_task);
-               wait_event(qp->wait_dma, !atomic_read(&qp->s_dma_busy));
-       } else
-               spin_unlock_irq(&qp->s_lock);
-
-       ipath_free_qp(&dev->qp_table, qp);
-
-       if (qp->s_tx) {
-               atomic_dec(&qp->refcount);
-               if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
-                       kfree(qp->s_tx->txreq.map_addr);
-               spin_lock_irq(&dev->pending_lock);
-               list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
-               spin_unlock_irq(&dev->pending_lock);
-               qp->s_tx = NULL;
-       }
-
-       wait_event(qp->wait, !atomic_read(&qp->refcount));
-
-       /* all user's cleaned up, mark it available */
-       free_qpn(&dev->qp_table, qp->ibqp.qp_num);
-       spin_lock(&dev->n_qps_lock);
-       dev->n_qps_allocated--;
-       spin_unlock(&dev->n_qps_lock);
-
-       if (qp->ip)
-               kref_put(&qp->ip->ref, ipath_release_mmap_info);
-       else
-               vfree(qp->r_rq.wq);
-       kfree(qp->r_ud_sg_list);
-       vfree(qp->s_wq);
-       kfree(qp);
-       return 0;
-}
-
-/**
- * ipath_init_qp_table - initialize the QP table for a device
- * @idev: the device who's QP table we're initializing
- * @size: the size of the QP table
- *
- * Returns 0 on success, otherwise returns an errno.
- */
-int ipath_init_qp_table(struct ipath_ibdev *idev, int size)
-{
-       int i;
-       int ret;
-
-       idev->qp_table.last = 1;        /* QPN 0 and 1 are special. */
-       idev->qp_table.max = size;
-       idev->qp_table.nmaps = 1;
-       idev->qp_table.table = kcalloc(size, sizeof(*idev->qp_table.table),
-                                      GFP_KERNEL);
-       if (idev->qp_table.table == NULL) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
-               atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
-               idev->qp_table.map[i].page = NULL;
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
-void ipath_get_credit(struct ipath_qp *qp, u32 aeth)
-{
-       u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
-
-       /*
-        * If the credit is invalid, we can send
-        * as many packets as we like.  Otherwise, we have to
-        * honor the credit field.
-        */
-       if (credit == IPATH_AETH_CREDIT_INVAL)
-               qp->s_lsn = (u32) -1;
-       else if (qp->s_lsn != (u32) -1) {
-               /* Compute new LSN (i.e., MSN + credit) */
-               credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
-               if (ipath_cmp24(credit, qp->s_lsn) > 0)
-                       qp->s_lsn = credit;
-       }
-
-       /* Restart sending if it was blocked due to lack of credits. */
-       if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
-           qp->s_cur != qp->s_head &&
-           (qp->s_lsn == (u32) -1 ||
-            ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
-                        qp->s_lsn + 1) <= 0))
-               ipath_schedule_send(qp);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_rc.c b/drivers/staging/rdma/ipath/ipath_rc.c
deleted file mode 100644 (file)
index d4aa535..0000000
+++ /dev/null
@@ -1,1969 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/io.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_RC_##x
-
-static u32 restart_sge(struct ipath_sge_state *ss, struct ipath_swqe *wqe,
-                      u32 psn, u32 pmtu)
-{
-       u32 len;
-
-       len = ((psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
-       ss->sge = wqe->sg_list[0];
-       ss->sg_list = wqe->sg_list + 1;
-       ss->num_sge = wqe->wr.num_sge;
-       ipath_skip_sge(ss, len);
-       return wqe->length - len;
-}
-
-/**
- * ipath_init_restart- initialize the qp->s_sge after a restart
- * @qp: the QP who's SGE we're restarting
- * @wqe: the work queue to initialize the QP's SGE from
- *
- * The QP s_lock should be held and interrupts disabled.
- */
-static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
-{
-       struct ipath_ibdev *dev;
-
-       qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn,
-                               ib_mtu_enum_to_int(qp->path_mtu));
-       dev = to_idev(qp->ibqp.device);
-       spin_lock(&dev->pending_lock);
-       if (list_empty(&qp->timerwait))
-               list_add_tail(&qp->timerwait,
-                             &dev->pending[dev->pending_index]);
-       spin_unlock(&dev->pending_lock);
-}
-
-/**
- * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
- * @qp: a pointer to the QP
- * @ohdr: a pointer to the IB header being constructed
- * @pmtu: the path MTU
- *
- * Return 1 if constructed; otherwise, return 0.
- * Note that we are in the responder's side of the QP context.
- * Note the QP s_lock must be held.
- */
-static int ipath_make_rc_ack(struct ipath_ibdev *dev, struct ipath_qp *qp,
-                            struct ipath_other_headers *ohdr, u32 pmtu)
-{
-       struct ipath_ack_entry *e;
-       u32 hwords;
-       u32 len;
-       u32 bth0;
-       u32 bth2;
-
-       /* Don't send an ACK if we aren't supposed to. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-               goto bail;
-
-       /* header size in 32-bit words LRH+BTH = (8+12)/4. */
-       hwords = 5;
-
-       switch (qp->s_ack_state) {
-       case OP(RDMA_READ_RESPONSE_LAST):
-       case OP(RDMA_READ_RESPONSE_ONLY):
-       case OP(ATOMIC_ACKNOWLEDGE):
-               /*
-                * We can increment the tail pointer now that the last
-                * response has been sent instead of only being
-                * constructed.
-                */
-               if (++qp->s_tail_ack_queue > IPATH_MAX_RDMA_ATOMIC)
-                       qp->s_tail_ack_queue = 0;
-               /* FALLTHROUGH */
-       case OP(SEND_ONLY):
-       case OP(ACKNOWLEDGE):
-               /* Check for no next entry in the queue. */
-               if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
-                       if (qp->s_flags & IPATH_S_ACK_PENDING)
-                               goto normal;
-                       qp->s_ack_state = OP(ACKNOWLEDGE);
-                       goto bail;
-               }
-
-               e = &qp->s_ack_queue[qp->s_tail_ack_queue];
-               if (e->opcode == OP(RDMA_READ_REQUEST)) {
-                       /* Copy SGE state in case we need to resend */
-                       qp->s_ack_rdma_sge = e->rdma_sge;
-                       qp->s_cur_sge = &qp->s_ack_rdma_sge;
-                       len = e->rdma_sge.sge.sge_length;
-                       if (len > pmtu) {
-                               len = pmtu;
-                               qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
-                       } else {
-                               qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
-                               e->sent = 1;
-                       }
-                       ohdr->u.aeth = ipath_compute_aeth(qp);
-                       hwords++;
-                       qp->s_ack_rdma_psn = e->psn;
-                       bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
-               } else {
-                       /* COMPARE_SWAP or FETCH_ADD */
-                       qp->s_cur_sge = NULL;
-                       len = 0;
-                       qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
-                       ohdr->u.at.aeth = ipath_compute_aeth(qp);
-                       ohdr->u.at.atomic_ack_eth[0] =
-                               cpu_to_be32(e->atomic_data >> 32);
-                       ohdr->u.at.atomic_ack_eth[1] =
-                               cpu_to_be32(e->atomic_data);
-                       hwords += sizeof(ohdr->u.at) / sizeof(u32);
-                       bth2 = e->psn;
-                       e->sent = 1;
-               }
-               bth0 = qp->s_ack_state << 24;
-               break;
-
-       case OP(RDMA_READ_RESPONSE_FIRST):
-               qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(RDMA_READ_RESPONSE_MIDDLE):
-               len = qp->s_ack_rdma_sge.sge.sge_length;
-               if (len > pmtu)
-                       len = pmtu;
-               else {
-                       ohdr->u.aeth = ipath_compute_aeth(qp);
-                       hwords++;
-                       qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
-                       qp->s_ack_queue[qp->s_tail_ack_queue].sent = 1;
-               }
-               bth0 = qp->s_ack_state << 24;
-               bth2 = qp->s_ack_rdma_psn++ & IPATH_PSN_MASK;
-               break;
-
-       default:
-       normal:
-               /*
-                * Send a regular ACK.
-                * Set the s_ack_state so we wait until after sending
-                * the ACK before setting s_ack_state to ACKNOWLEDGE
-                * (see above).
-                */
-               qp->s_ack_state = OP(SEND_ONLY);
-               qp->s_flags &= ~IPATH_S_ACK_PENDING;
-               qp->s_cur_sge = NULL;
-               if (qp->s_nak_state)
-                       ohdr->u.aeth =
-                               cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
-                                           (qp->s_nak_state <<
-                                            IPATH_AETH_CREDIT_SHIFT));
-               else
-                       ohdr->u.aeth = ipath_compute_aeth(qp);
-               hwords++;
-               len = 0;
-               bth0 = OP(ACKNOWLEDGE) << 24;
-               bth2 = qp->s_ack_psn & IPATH_PSN_MASK;
-       }
-       qp->s_hdrwords = hwords;
-       qp->s_cur_size = len;
-       ipath_make_ruc_header(dev, qp, ohdr, bth0, bth2);
-       return 1;
-
-bail:
-       return 0;
-}
-
-/**
- * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
- * @qp: a pointer to the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_rc_req(struct ipath_qp *qp)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_other_headers *ohdr;
-       struct ipath_sge_state *ss;
-       struct ipath_swqe *wqe;
-       u32 hwords;
-       u32 len;
-       u32 bth0;
-       u32 bth2;
-       u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-       char newreq;
-       unsigned long flags;
-       int ret = 0;
-
-       ohdr = &qp->s_hdr.u.oth;
-       if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-               ohdr = &qp->s_hdr.u.l.oth;
-
-       /*
-        * The lock is needed to synchronize between the sending tasklet,
-        * the receive interrupt handler, and timeout resends.
-        */
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       /* Sending responses has higher priority over sending requests. */
-       if ((qp->r_head_ack_queue != qp->s_tail_ack_queue ||
-            (qp->s_flags & IPATH_S_ACK_PENDING) ||
-            qp->s_ack_state != OP(ACKNOWLEDGE)) &&
-           ipath_make_rc_ack(dev, qp, ohdr, pmtu))
-               goto done;
-
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
-                       goto bail;
-               /* We are in the error state, flush the work request. */
-               if (qp->s_last == qp->s_head)
-                       goto bail;
-               /* If DMAs are in progress, we can't flush immediately. */
-               if (atomic_read(&qp->s_dma_busy)) {
-                       qp->s_flags |= IPATH_S_WAIT_DMA;
-                       goto bail;
-               }
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
-               goto done;
-       }
-
-       /* Leave BUSY set until RNR timeout. */
-       if (qp->s_rnr_timeout) {
-               qp->s_flags |= IPATH_S_WAITING;
-               goto bail;
-       }
-
-       /* header size in 32-bit words LRH+BTH = (8+12)/4. */
-       hwords = 5;
-       bth0 = 1 << 22; /* Set M bit */
-
-       /* Send a request. */
-       wqe = get_swqe_ptr(qp, qp->s_cur);
-       switch (qp->s_state) {
-       default:
-               if (!(ib_ipath_state_ops[qp->state] &
-                   IPATH_PROCESS_NEXT_SEND_OK))
-                       goto bail;
-               /*
-                * Resend an old request or start a new one.
-                *
-                * We keep track of the current SWQE so that
-                * we don't reset the "furthest progress" state
-                * if we need to back up.
-                */
-               newreq = 0;
-               if (qp->s_cur == qp->s_tail) {
-                       /* Check if send work queue is empty. */
-                       if (qp->s_tail == qp->s_head)
-                               goto bail;
-                       /*
-                        * If a fence is requested, wait for previous
-                        * RDMA read and atomic operations to finish.
-                        */
-                       if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
-                           qp->s_num_rd_atomic) {
-                               qp->s_flags |= IPATH_S_FENCE_PENDING;
-                               goto bail;
-                       }
-                       wqe->psn = qp->s_next_psn;
-                       newreq = 1;
-               }
-               /*
-                * Note that we have to be careful not to modify the
-                * original work request since we may need to resend
-                * it.
-                */
-               len = wqe->length;
-               ss = &qp->s_sge;
-               bth2 = 0;
-               switch (wqe->wr.opcode) {
-               case IB_WR_SEND:
-               case IB_WR_SEND_WITH_IMM:
-                       /* If no credit, return. */
-                       if (qp->s_lsn != (u32) -1 &&
-                           ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
-                               qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
-                               goto bail;
-                       }
-                       wqe->lpsn = wqe->psn;
-                       if (len > pmtu) {
-                               wqe->lpsn += (len - 1) / pmtu;
-                               qp->s_state = OP(SEND_FIRST);
-                               len = pmtu;
-                               break;
-                       }
-                       if (wqe->wr.opcode == IB_WR_SEND)
-                               qp->s_state = OP(SEND_ONLY);
-                       else {
-                               qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
-                               /* Immediate data comes after the BTH */
-                               ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                               hwords += 1;
-                       }
-                       if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                               bth0 |= 1 << 23;
-                       bth2 = 1 << 31; /* Request ACK. */
-                       if (++qp->s_cur == qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               case IB_WR_RDMA_WRITE:
-                       if (newreq && qp->s_lsn != (u32) -1)
-                               qp->s_lsn++;
-                       /* FALLTHROUGH */
-               case IB_WR_RDMA_WRITE_WITH_IMM:
-                       /* If no credit, return. */
-                       if (qp->s_lsn != (u32) -1 &&
-                           ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) {
-                               qp->s_flags |= IPATH_S_WAIT_SSN_CREDIT;
-                               goto bail;
-                       }
-                       ohdr->u.rc.reth.vaddr =
-                               cpu_to_be64(wqe->rdma_wr.remote_addr);
-                       ohdr->u.rc.reth.rkey =
-                               cpu_to_be32(wqe->rdma_wr.rkey);
-                       ohdr->u.rc.reth.length = cpu_to_be32(len);
-                       hwords += sizeof(struct ib_reth) / sizeof(u32);
-                       wqe->lpsn = wqe->psn;
-                       if (len > pmtu) {
-                               wqe->lpsn += (len - 1) / pmtu;
-                               qp->s_state = OP(RDMA_WRITE_FIRST);
-                               len = pmtu;
-                               break;
-                       }
-                       if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-                               qp->s_state = OP(RDMA_WRITE_ONLY);
-                       else {
-                               qp->s_state =
-                                       OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
-                               /* Immediate data comes after RETH */
-                               ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
-                               hwords += 1;
-                               if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                                       bth0 |= 1 << 23;
-                       }
-                       bth2 = 1 << 31; /* Request ACK. */
-                       if (++qp->s_cur == qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               case IB_WR_RDMA_READ:
-                       /*
-                        * Don't allow more operations to be started
-                        * than the QP limits allow.
-                        */
-                       if (newreq) {
-                               if (qp->s_num_rd_atomic >=
-                                   qp->s_max_rd_atomic) {
-                                       qp->s_flags |= IPATH_S_RDMAR_PENDING;
-                                       goto bail;
-                               }
-                               qp->s_num_rd_atomic++;
-                               if (qp->s_lsn != (u32) -1)
-                                       qp->s_lsn++;
-                               /*
-                                * Adjust s_next_psn to count the
-                                * expected number of responses.
-                                */
-                               if (len > pmtu)
-                                       qp->s_next_psn += (len - 1) / pmtu;
-                               wqe->lpsn = qp->s_next_psn++;
-                       }
-                       ohdr->u.rc.reth.vaddr =
-                               cpu_to_be64(wqe->rdma_wr.remote_addr);
-                       ohdr->u.rc.reth.rkey =
-                               cpu_to_be32(wqe->rdma_wr.rkey);
-                       ohdr->u.rc.reth.length = cpu_to_be32(len);
-                       qp->s_state = OP(RDMA_READ_REQUEST);
-                       hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
-                       ss = NULL;
-                       len = 0;
-                       if (++qp->s_cur == qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               case IB_WR_ATOMIC_CMP_AND_SWP:
-               case IB_WR_ATOMIC_FETCH_AND_ADD:
-                       /*
-                        * Don't allow more operations to be started
-                        * than the QP limits allow.
-                        */
-                       if (newreq) {
-                               if (qp->s_num_rd_atomic >=
-                                   qp->s_max_rd_atomic) {
-                                       qp->s_flags |= IPATH_S_RDMAR_PENDING;
-                                       goto bail;
-                               }
-                               qp->s_num_rd_atomic++;
-                               if (qp->s_lsn != (u32) -1)
-                                       qp->s_lsn++;
-                               wqe->lpsn = wqe->psn;
-                       }
-                       if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
-                               qp->s_state = OP(COMPARE_SWAP);
-                               ohdr->u.atomic_eth.swap_data = cpu_to_be64(
-                                       wqe->atomic_wr.swap);
-                               ohdr->u.atomic_eth.compare_data = cpu_to_be64(
-                                       wqe->atomic_wr.compare_add);
-                       } else {
-                               qp->s_state = OP(FETCH_ADD);
-                               ohdr->u.atomic_eth.swap_data = cpu_to_be64(
-                                       wqe->atomic_wr.compare_add);
-                               ohdr->u.atomic_eth.compare_data = 0;
-                       }
-                       ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32(
-                               wqe->atomic_wr.remote_addr >> 32);
-                       ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32(
-                               wqe->atomic_wr.remote_addr);
-                       ohdr->u.atomic_eth.rkey = cpu_to_be32(
-                               wqe->atomic_wr.rkey);
-                       hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
-                       ss = NULL;
-                       len = 0;
-                       if (++qp->s_cur == qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               default:
-                       goto bail;
-               }
-               qp->s_sge.sge = wqe->sg_list[0];
-               qp->s_sge.sg_list = wqe->sg_list + 1;
-               qp->s_sge.num_sge = wqe->wr.num_sge;
-               qp->s_len = wqe->length;
-               if (newreq) {
-                       qp->s_tail++;
-                       if (qp->s_tail >= qp->s_size)
-                               qp->s_tail = 0;
-               }
-               bth2 |= qp->s_psn & IPATH_PSN_MASK;
-               if (wqe->wr.opcode == IB_WR_RDMA_READ)
-                       qp->s_psn = wqe->lpsn + 1;
-               else {
-                       qp->s_psn++;
-                       if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
-                               qp->s_next_psn = qp->s_psn;
-               }
-               /*
-                * Put the QP on the pending list so lost ACKs will cause
-                * a retry.  More than one request can be pending so the
-                * QP may already be on the dev->pending list.
-                */
-               spin_lock(&dev->pending_lock);
-               if (list_empty(&qp->timerwait))
-                       list_add_tail(&qp->timerwait,
-                                     &dev->pending[dev->pending_index]);
-               spin_unlock(&dev->pending_lock);
-               break;
-
-       case OP(RDMA_READ_RESPONSE_FIRST):
-               /*
-                * This case can only happen if a send is restarted.
-                * See ipath_restart_rc().
-                */
-               ipath_init_restart(qp, wqe);
-               /* FALLTHROUGH */
-       case OP(SEND_FIRST):
-               qp->s_state = OP(SEND_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(SEND_MIDDLE):
-               bth2 = qp->s_psn++ & IPATH_PSN_MASK;
-               if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
-                       qp->s_next_psn = qp->s_psn;
-               ss = &qp->s_sge;
-               len = qp->s_len;
-               if (len > pmtu) {
-                       len = pmtu;
-                       break;
-               }
-               if (wqe->wr.opcode == IB_WR_SEND)
-                       qp->s_state = OP(SEND_LAST);
-               else {
-                       qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
-                       /* Immediate data comes after the BTH */
-                       ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                       hwords += 1;
-               }
-               if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                       bth0 |= 1 << 23;
-               bth2 |= 1 << 31;        /* Request ACK. */
-               qp->s_cur++;
-               if (qp->s_cur >= qp->s_size)
-                       qp->s_cur = 0;
-               break;
-
-       case OP(RDMA_READ_RESPONSE_LAST):
-               /*
-                * This case can only happen if a RDMA write is restarted.
-                * See ipath_restart_rc().
-                */
-               ipath_init_restart(qp, wqe);
-               /* FALLTHROUGH */
-       case OP(RDMA_WRITE_FIRST):
-               qp->s_state = OP(RDMA_WRITE_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(RDMA_WRITE_MIDDLE):
-               bth2 = qp->s_psn++ & IPATH_PSN_MASK;
-               if (ipath_cmp24(qp->s_psn, qp->s_next_psn) > 0)
-                       qp->s_next_psn = qp->s_psn;
-               ss = &qp->s_sge;
-               len = qp->s_len;
-               if (len > pmtu) {
-                       len = pmtu;
-                       break;
-               }
-               if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-                       qp->s_state = OP(RDMA_WRITE_LAST);
-               else {
-                       qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
-                       /* Immediate data comes after the BTH */
-                       ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                       hwords += 1;
-                       if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                               bth0 |= 1 << 23;
-               }
-               bth2 |= 1 << 31;        /* Request ACK. */
-               qp->s_cur++;
-               if (qp->s_cur >= qp->s_size)
-                       qp->s_cur = 0;
-               break;
-
-       case OP(RDMA_READ_RESPONSE_MIDDLE):
-               /*
-                * This case can only happen if a RDMA read is restarted.
-                * See ipath_restart_rc().
-                */
-               ipath_init_restart(qp, wqe);
-               len = ((qp->s_psn - wqe->psn) & IPATH_PSN_MASK) * pmtu;
-               ohdr->u.rc.reth.vaddr =
-                       cpu_to_be64(wqe->rdma_wr.remote_addr + len);
-               ohdr->u.rc.reth.rkey =
-                       cpu_to_be32(wqe->rdma_wr.rkey);
-               ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
-               qp->s_state = OP(RDMA_READ_REQUEST);
-               hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
-               bth2 = qp->s_psn & IPATH_PSN_MASK;
-               qp->s_psn = wqe->lpsn + 1;
-               ss = NULL;
-               len = 0;
-               qp->s_cur++;
-               if (qp->s_cur == qp->s_size)
-                       qp->s_cur = 0;
-               break;
-       }
-       if (ipath_cmp24(qp->s_psn, qp->s_last_psn + IPATH_PSN_CREDIT - 1) >= 0)
-               bth2 |= 1 << 31;        /* Request ACK. */
-       qp->s_len -= len;
-       qp->s_hdrwords = hwords;
-       qp->s_cur_sge = ss;
-       qp->s_cur_size = len;
-       ipath_make_ruc_header(dev, qp, ohdr, bth0 | (qp->s_state << 24), bth2);
-done:
-       ret = 1;
-       goto unlock;
-
-bail:
-       qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       return ret;
-}
-
-/**
- * send_rc_ack - Construct an ACK packet and send it
- * @qp: a pointer to the QP
- *
- * This is called from ipath_rc_rcv() and only uses the receive
- * side QP state.
- * Note that RDMA reads and atomics are handled in the
- * send side QP state and tasklet.
- */
-static void send_rc_ack(struct ipath_qp *qp)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_devdata *dd;
-       u16 lrh0;
-       u32 bth0;
-       u32 hwords;
-       u32 __iomem *piobuf;
-       struct ipath_ib_header hdr;
-       struct ipath_other_headers *ohdr;
-       unsigned long flags;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
-       if (qp->r_head_ack_queue != qp->s_tail_ack_queue ||
-           (qp->s_flags & IPATH_S_ACK_PENDING) ||
-           qp->s_ack_state != OP(ACKNOWLEDGE))
-               goto queue_ack;
-
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-
-       /* Don't try to send ACKs if the link isn't ACTIVE */
-       dd = dev->dd;
-       if (!(dd->ipath_flags & IPATH_LINKACTIVE))
-               goto done;
-
-       piobuf = ipath_getpiobuf(dd, 0, NULL);
-       if (!piobuf) {
-               /*
-                * We are out of PIO buffers at the moment.
-                * Pass responsibility for sending the ACK to the
-                * send tasklet so that when a PIO buffer becomes
-                * available, the ACK is sent ahead of other outgoing
-                * packets.
-                */
-               spin_lock_irqsave(&qp->s_lock, flags);
-               goto queue_ack;
-       }
-
-       /* Construct the header. */
-       ohdr = &hdr.u.oth;
-       lrh0 = IPATH_LRH_BTH;
-       /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
-       hwords = 6;
-       if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-               hwords += ipath_make_grh(dev, &hdr.u.l.grh,
-                                        &qp->remote_ah_attr.grh,
-                                        hwords, 0);
-               ohdr = &hdr.u.l.oth;
-               lrh0 = IPATH_LRH_GRH;
-       }
-       /* read pkey_index w/o lock (its atomic) */
-       bth0 = ipath_get_pkey(dd, qp->s_pkey_index) |
-               (OP(ACKNOWLEDGE) << 24) | (1 << 22);
-       if (qp->r_nak_state)
-               ohdr->u.aeth = cpu_to_be32((qp->r_msn & IPATH_MSN_MASK) |
-                                           (qp->r_nak_state <<
-                                            IPATH_AETH_CREDIT_SHIFT));
-       else
-               ohdr->u.aeth = ipath_compute_aeth(qp);
-       lrh0 |= qp->remote_ah_attr.sl << 4;
-       hdr.lrh[0] = cpu_to_be16(lrh0);
-       hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-       hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
-       hdr.lrh[3] = cpu_to_be16(dd->ipath_lid |
-                                qp->remote_ah_attr.src_path_bits);
-       ohdr->bth[0] = cpu_to_be32(bth0);
-       ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-       ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & IPATH_PSN_MASK);
-
-       writeq(hwords + 1, piobuf);
-
-       if (dd->ipath_flags & IPATH_PIO_FLUSH_WC) {
-               u32 *hdrp = (u32 *) &hdr;
-
-               ipath_flush_wc();
-               __iowrite32_copy(piobuf + 2, hdrp, hwords - 1);
-               ipath_flush_wc();
-               __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1);
-       } else
-               __iowrite32_copy(piobuf + 2, (u32 *) &hdr, hwords);
-
-       ipath_flush_wc();
-
-       dev->n_unicast_xmit++;
-       goto done;
-
-queue_ack:
-       if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK) {
-               dev->n_rc_qacks++;
-               qp->s_flags |= IPATH_S_ACK_PENDING;
-               qp->s_nak_state = qp->r_nak_state;
-               qp->s_ack_psn = qp->r_ack_psn;
-
-               /* Schedule the send tasklet. */
-               ipath_schedule_send(qp);
-       }
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
-       return;
-}
-
-/**
- * reset_psn - reset the QP state to send starting from PSN
- * @qp: the QP
- * @psn: the packet sequence number to restart at
- *
- * This is called from ipath_rc_rcv() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held.
- */
-static void reset_psn(struct ipath_qp *qp, u32 psn)
-{
-       u32 n = qp->s_last;
-       struct ipath_swqe *wqe = get_swqe_ptr(qp, n);
-       u32 opcode;
-
-       qp->s_cur = n;
-
-       /*
-        * If we are starting the request from the beginning,
-        * let the normal send code handle initialization.
-        */
-       if (ipath_cmp24(psn, wqe->psn) <= 0) {
-               qp->s_state = OP(SEND_LAST);
-               goto done;
-       }
-
-       /* Find the work request opcode corresponding to the given PSN. */
-       opcode = wqe->wr.opcode;
-       for (;;) {
-               int diff;
-
-               if (++n == qp->s_size)
-                       n = 0;
-               if (n == qp->s_tail)
-                       break;
-               wqe = get_swqe_ptr(qp, n);
-               diff = ipath_cmp24(psn, wqe->psn);
-               if (diff < 0)
-                       break;
-               qp->s_cur = n;
-               /*
-                * If we are starting the request from the beginning,
-                * let the normal send code handle initialization.
-                */
-               if (diff == 0) {
-                       qp->s_state = OP(SEND_LAST);
-                       goto done;
-               }
-               opcode = wqe->wr.opcode;
-       }
-
-       /*
-        * Set the state to restart in the middle of a request.
-        * Don't change the s_sge, s_cur_sge, or s_cur_size.
-        * See ipath_make_rc_req().
-        */
-       switch (opcode) {
-       case IB_WR_SEND:
-       case IB_WR_SEND_WITH_IMM:
-               qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
-               break;
-
-       case IB_WR_RDMA_WRITE:
-       case IB_WR_RDMA_WRITE_WITH_IMM:
-               qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
-               break;
-
-       case IB_WR_RDMA_READ:
-               qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
-               break;
-
-       default:
-               /*
-                * This case shouldn't happen since its only
-                * one PSN per req.
-                */
-               qp->s_state = OP(SEND_LAST);
-       }
-done:
-       qp->s_psn = psn;
-}
-
-/**
- * ipath_restart_rc - back up requester to resend the last un-ACKed request
- * @qp: the QP to restart
- * @psn: packet sequence number for the request
- * @wc: the work completion request
- *
- * The QP s_lock should be held and interrupts disabled.
- */
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn)
-{
-       struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
-       struct ipath_ibdev *dev;
-
-       if (qp->s_retry == 0) {
-               ipath_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
-               ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-               goto bail;
-       }
-       qp->s_retry--;
-
-       /*
-        * Remove the QP from the timeout queue.
-        * Note: it may already have been removed by ipath_ib_timer().
-        */
-       dev = to_idev(qp->ibqp.device);
-       spin_lock(&dev->pending_lock);
-       if (!list_empty(&qp->timerwait))
-               list_del_init(&qp->timerwait);
-       if (!list_empty(&qp->piowait))
-               list_del_init(&qp->piowait);
-       spin_unlock(&dev->pending_lock);
-
-       if (wqe->wr.opcode == IB_WR_RDMA_READ)
-               dev->n_rc_resends++;
-       else
-               dev->n_rc_resends += (qp->s_psn - psn) & IPATH_PSN_MASK;
-
-       reset_psn(qp, psn);
-       ipath_schedule_send(qp);
-
-bail:
-       return;
-}
-
-static inline void update_last_psn(struct ipath_qp *qp, u32 psn)
-{
-       qp->s_last_psn = psn;
-}
-
-/**
- * do_rc_ack - process an incoming RC ACK
- * @qp: the QP the ACK came in on
- * @psn: the packet sequence number of the ACK
- * @opcode: the opcode of the request that resulted in the ACK
- *
- * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
- * for the given QP.
- * Called at interrupt level with the QP s_lock held and interrupts disabled.
- * Returns 1 if OK, 0 if current operation should be aborted (NAK).
- */
-static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode,
-                    u64 val)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ib_wc wc;
-       enum ib_wc_status status;
-       struct ipath_swqe *wqe;
-       int ret = 0;
-       u32 ack_psn;
-       int diff;
-
-       /*
-        * Remove the QP from the timeout queue (or RNR timeout queue).
-        * If ipath_ib_timer() has already removed it,
-        * it's OK since we hold the QP s_lock and ipath_restart_rc()
-        * just won't find anything to restart if we ACK everything.
-        */
-       spin_lock(&dev->pending_lock);
-       if (!list_empty(&qp->timerwait))
-               list_del_init(&qp->timerwait);
-       spin_unlock(&dev->pending_lock);
-
-       /*
-        * Note that NAKs implicitly ACK outstanding SEND and RDMA write
-        * requests and implicitly NAK RDMA read and atomic requests issued
-        * before the NAK'ed request.  The MSN won't include the NAK'ed
-        * request but will include an ACK'ed request(s).
-        */
-       ack_psn = psn;
-       if (aeth >> 29)
-               ack_psn--;
-       wqe = get_swqe_ptr(qp, qp->s_last);
-
-       /*
-        * The MSN might be for a later WQE than the PSN indicates so
-        * only complete WQEs that the PSN finishes.
-        */
-       while ((diff = ipath_cmp24(ack_psn, wqe->lpsn)) >= 0) {
-               /*
-                * RDMA_READ_RESPONSE_ONLY is a special case since
-                * we want to generate completion events for everything
-                * before the RDMA read, copy the data, then generate
-                * the completion for the read.
-                */
-               if (wqe->wr.opcode == IB_WR_RDMA_READ &&
-                   opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
-                   diff == 0) {
-                       ret = 1;
-                       goto bail;
-               }
-               /*
-                * If this request is a RDMA read or atomic, and the ACK is
-                * for a later operation, this ACK NAKs the RDMA read or
-                * atomic.  In other words, only a RDMA_READ_LAST or ONLY
-                * can ACK a RDMA read and likewise for atomic ops.  Note
-                * that the NAK case can only happen if relaxed ordering is
-                * used and requests are sent after an RDMA read or atomic
-                * is sent but before the response is received.
-                */
-               if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
-                    (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
-                   ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-                     wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
-                    (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
-                       /*
-                        * The last valid PSN seen is the previous
-                        * request's.
-                        */
-                       update_last_psn(qp, wqe->psn - 1);
-                       /* Retry this request. */
-                       ipath_restart_rc(qp, wqe->psn);
-                       /*
-                        * No need to process the ACK/NAK since we are
-                        * restarting an earlier request.
-                        */
-                       goto bail;
-               }
-               if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-                   wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
-                       *(u64 *) wqe->sg_list[0].vaddr = val;
-               if (qp->s_num_rd_atomic &&
-                   (wqe->wr.opcode == IB_WR_RDMA_READ ||
-                    wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-                    wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
-                       qp->s_num_rd_atomic--;
-                       /* Restart sending task if fence is complete */
-                       if (((qp->s_flags & IPATH_S_FENCE_PENDING) &&
-                            !qp->s_num_rd_atomic) ||
-                           qp->s_flags & IPATH_S_RDMAR_PENDING)
-                               ipath_schedule_send(qp);
-               }
-               /* Post a send completion queue entry if requested. */
-               if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
-                   (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
-                       wc.wr_id = wqe->wr.wr_id;
-                       wc.status = IB_WC_SUCCESS;
-                       wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
-                       wc.byte_len = wqe->length;
-                       wc.qp = &qp->ibqp;
-                       wc.src_qp = qp->remote_qpn;
-                       wc.slid = qp->remote_ah_attr.dlid;
-                       wc.sl = qp->remote_ah_attr.sl;
-                       ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
-               }
-               qp->s_retry = qp->s_retry_cnt;
-               /*
-                * If we are completing a request which is in the process of
-                * being resent, we can stop resending it since we know the
-                * responder has already seen it.
-                */
-               if (qp->s_last == qp->s_cur) {
-                       if (++qp->s_cur >= qp->s_size)
-                               qp->s_cur = 0;
-                       qp->s_last = qp->s_cur;
-                       if (qp->s_last == qp->s_tail)
-                               break;
-                       wqe = get_swqe_ptr(qp, qp->s_cur);
-                       qp->s_state = OP(SEND_LAST);
-                       qp->s_psn = wqe->psn;
-               } else {
-                       if (++qp->s_last >= qp->s_size)
-                               qp->s_last = 0;
-                       if (qp->state == IB_QPS_SQD && qp->s_last == qp->s_cur)
-                               qp->s_draining = 0;
-                       if (qp->s_last == qp->s_tail)
-                               break;
-                       wqe = get_swqe_ptr(qp, qp->s_last);
-               }
-       }
-
-       switch (aeth >> 29) {
-       case 0:         /* ACK */
-               dev->n_rc_acks++;
-               /* If this is a partial ACK, reset the retransmit timer. */
-               if (qp->s_last != qp->s_tail) {
-                       spin_lock(&dev->pending_lock);
-                       if (list_empty(&qp->timerwait))
-                               list_add_tail(&qp->timerwait,
-                                       &dev->pending[dev->pending_index]);
-                       spin_unlock(&dev->pending_lock);
-                       /*
-                        * If we get a partial ACK for a resent operation,
-                        * we can stop resending the earlier packets and
-                        * continue with the next packet the receiver wants.
-                        */
-                       if (ipath_cmp24(qp->s_psn, psn) <= 0) {
-                               reset_psn(qp, psn + 1);
-                               ipath_schedule_send(qp);
-                       }
-               } else if (ipath_cmp24(qp->s_psn, psn) <= 0) {
-                       qp->s_state = OP(SEND_LAST);
-                       qp->s_psn = psn + 1;
-               }
-               ipath_get_credit(qp, aeth);
-               qp->s_rnr_retry = qp->s_rnr_retry_cnt;
-               qp->s_retry = qp->s_retry_cnt;
-               update_last_psn(qp, psn);
-               ret = 1;
-               goto bail;
-
-       case 1:         /* RNR NAK */
-               dev->n_rnr_naks++;
-               if (qp->s_last == qp->s_tail)
-                       goto bail;
-               if (qp->s_rnr_retry == 0) {
-                       status = IB_WC_RNR_RETRY_EXC_ERR;
-                       goto class_b;
-               }
-               if (qp->s_rnr_retry_cnt < 7)
-                       qp->s_rnr_retry--;
-
-               /* The last valid PSN is the previous PSN. */
-               update_last_psn(qp, psn - 1);
-
-               if (wqe->wr.opcode == IB_WR_RDMA_READ)
-                       dev->n_rc_resends++;
-               else
-                       dev->n_rc_resends +=
-                               (qp->s_psn - psn) & IPATH_PSN_MASK;
-
-               reset_psn(qp, psn);
-
-               qp->s_rnr_timeout =
-                       ib_ipath_rnr_table[(aeth >> IPATH_AETH_CREDIT_SHIFT) &
-                                          IPATH_AETH_CREDIT_MASK];
-               ipath_insert_rnr_queue(qp);
-               ipath_schedule_send(qp);
-               goto bail;
-
-       case 3:         /* NAK */
-               if (qp->s_last == qp->s_tail)
-                       goto bail;
-               /* The last valid PSN is the previous PSN. */
-               update_last_psn(qp, psn - 1);
-               switch ((aeth >> IPATH_AETH_CREDIT_SHIFT) &
-                       IPATH_AETH_CREDIT_MASK) {
-               case 0: /* PSN sequence error */
-                       dev->n_seq_naks++;
-                       /*
-                        * Back up to the responder's expected PSN.
-                        * Note that we might get a NAK in the middle of an
-                        * RDMA READ response which terminates the RDMA
-                        * READ.
-                        */
-                       ipath_restart_rc(qp, psn);
-                       break;
-
-               case 1: /* Invalid Request */
-                       status = IB_WC_REM_INV_REQ_ERR;
-                       dev->n_other_naks++;
-                       goto class_b;
-
-               case 2: /* Remote Access Error */
-                       status = IB_WC_REM_ACCESS_ERR;
-                       dev->n_other_naks++;
-                       goto class_b;
-
-               case 3: /* Remote Operation Error */
-                       status = IB_WC_REM_OP_ERR;
-                       dev->n_other_naks++;
-               class_b:
-                       ipath_send_complete(qp, wqe, status);
-                       ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-                       break;
-
-               default:
-                       /* Ignore other reserved NAK error codes */
-                       goto reserved;
-               }
-               qp->s_rnr_retry = qp->s_rnr_retry_cnt;
-               goto bail;
-
-       default:                /* 2: reserved */
-       reserved:
-               /* Ignore reserved NAK codes. */
-               goto bail;
-       }
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_rc_rcv_resp - process an incoming RC response packet
- * @dev: the device this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @hdrsize: the header length
- * @pmtu: the path MTU
- * @header_in_data: true if part of the header data is in the data buffer
- *
- * This is called from ipath_rc_rcv() to process an incoming RC response
- * packet for the given QP.
- * Called at interrupt level.
- */
-static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
-                                    struct ipath_other_headers *ohdr,
-                                    void *data, u32 tlen,
-                                    struct ipath_qp *qp,
-                                    u32 opcode,
-                                    u32 psn, u32 hdrsize, u32 pmtu,
-                                    int header_in_data)
-{
-       struct ipath_swqe *wqe;
-       enum ib_wc_status status;
-       unsigned long flags;
-       int diff;
-       u32 pad;
-       u32 aeth;
-       u64 val;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       /* Double check we can process this now that we hold the s_lock. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-               goto ack_done;
-
-       /* Ignore invalid responses. */
-       if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
-               goto ack_done;
-
-       /* Ignore duplicate responses. */
-       diff = ipath_cmp24(psn, qp->s_last_psn);
-       if (unlikely(diff <= 0)) {
-               /* Update credits for "ghost" ACKs */
-               if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
-                       if (!header_in_data)
-                               aeth = be32_to_cpu(ohdr->u.aeth);
-                       else {
-                               aeth = be32_to_cpu(((__be32 *) data)[0]);
-                               data += sizeof(__be32);
-                       }
-                       if ((aeth >> 29) == 0)
-                               ipath_get_credit(qp, aeth);
-               }
-               goto ack_done;
-       }
-
-       if (unlikely(qp->s_last == qp->s_tail))
-               goto ack_done;
-       wqe = get_swqe_ptr(qp, qp->s_last);
-       status = IB_WC_SUCCESS;
-
-       switch (opcode) {
-       case OP(ACKNOWLEDGE):
-       case OP(ATOMIC_ACKNOWLEDGE):
-       case OP(RDMA_READ_RESPONSE_FIRST):
-               if (!header_in_data)
-                       aeth = be32_to_cpu(ohdr->u.aeth);
-               else {
-                       aeth = be32_to_cpu(((__be32 *) data)[0]);
-                       data += sizeof(__be32);
-               }
-               if (opcode == OP(ATOMIC_ACKNOWLEDGE)) {
-                       if (!header_in_data) {
-                               __be32 *p = ohdr->u.at.atomic_ack_eth;
-
-                               val = ((u64) be32_to_cpu(p[0]) << 32) |
-                                       be32_to_cpu(p[1]);
-                       } else
-                               val = be64_to_cpu(((__be64 *) data)[0]);
-               } else
-                       val = 0;
-               if (!do_rc_ack(qp, aeth, psn, opcode, val) ||
-                   opcode != OP(RDMA_READ_RESPONSE_FIRST))
-                       goto ack_done;
-               hdrsize += 4;
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
-                       goto ack_op_err;
-               qp->r_flags &= ~IPATH_R_RDMAR_SEQ;
-               /*
-                * If this is a response to a resent RDMA read, we
-                * have to be careful to copy the data to the right
-                * location.
-                */
-               qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
-                                                 wqe, psn, pmtu);
-               goto read_middle;
-
-       case OP(RDMA_READ_RESPONSE_MIDDLE):
-               /* no AETH, no ACK */
-               if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
-                       dev->n_rdma_seq++;
-                       if (qp->r_flags & IPATH_R_RDMAR_SEQ)
-                               goto ack_done;
-                       qp->r_flags |= IPATH_R_RDMAR_SEQ;
-                       ipath_restart_rc(qp, qp->s_last_psn + 1);
-                       goto ack_done;
-               }
-               if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
-                       goto ack_op_err;
-       read_middle:
-               if (unlikely(tlen != (hdrsize + pmtu + 4)))
-                       goto ack_len_err;
-               if (unlikely(pmtu >= qp->s_rdma_read_len))
-                       goto ack_len_err;
-
-               /* We got a response so update the timeout. */
-               spin_lock(&dev->pending_lock);
-               if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
-                       list_move_tail(&qp->timerwait,
-                                      &dev->pending[dev->pending_index]);
-               spin_unlock(&dev->pending_lock);
-
-               if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
-                       qp->s_retry = qp->s_retry_cnt;
-
-               /*
-                * Update the RDMA receive state but do the copy w/o
-                * holding the locks and blocking interrupts.
-                */
-               qp->s_rdma_read_len -= pmtu;
-               update_last_psn(qp, psn);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-               ipath_copy_sge(&qp->s_rdma_read_sge, data, pmtu);
-               goto bail;
-
-       case OP(RDMA_READ_RESPONSE_ONLY):
-               if (!header_in_data)
-                       aeth = be32_to_cpu(ohdr->u.aeth);
-               else
-                       aeth = be32_to_cpu(((__be32 *) data)[0]);
-               if (!do_rc_ack(qp, aeth, psn, opcode, 0))
-                       goto ack_done;
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /*
-                * Check that the data size is >= 0 && <= pmtu.
-                * Remember to account for the AETH header (4) and
-                * ICRC (4).
-                */
-               if (unlikely(tlen < (hdrsize + pad + 8)))
-                       goto ack_len_err;
-               /*
-                * If this is a response to a resent RDMA read, we
-                * have to be careful to copy the data to the right
-                * location.
-                */
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
-                                                 wqe, psn, pmtu);
-               goto read_last;
-
-       case OP(RDMA_READ_RESPONSE_LAST):
-               /* ACKs READ req. */
-               if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
-                       dev->n_rdma_seq++;
-                       if (qp->r_flags & IPATH_R_RDMAR_SEQ)
-                               goto ack_done;
-                       qp->r_flags |= IPATH_R_RDMAR_SEQ;
-                       ipath_restart_rc(qp, qp->s_last_psn + 1);
-                       goto ack_done;
-               }
-               if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
-                       goto ack_op_err;
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /*
-                * Check that the data size is >= 1 && <= pmtu.
-                * Remember to account for the AETH header (4) and
-                * ICRC (4).
-                */
-               if (unlikely(tlen <= (hdrsize + pad + 8)))
-                       goto ack_len_err;
-       read_last:
-               tlen -= hdrsize + pad + 8;
-               if (unlikely(tlen != qp->s_rdma_read_len))
-                       goto ack_len_err;
-               if (!header_in_data)
-                       aeth = be32_to_cpu(ohdr->u.aeth);
-               else {
-                       aeth = be32_to_cpu(((__be32 *) data)[0]);
-                       data += sizeof(__be32);
-               }
-               ipath_copy_sge(&qp->s_rdma_read_sge, data, tlen);
-               (void) do_rc_ack(qp, aeth, psn,
-                                OP(RDMA_READ_RESPONSE_LAST), 0);
-               goto ack_done;
-       }
-
-ack_op_err:
-       status = IB_WC_LOC_QP_OP_ERR;
-       goto ack_err;
-
-ack_len_err:
-       status = IB_WC_LOC_LEN_ERR;
-ack_err:
-       ipath_send_complete(qp, wqe, status);
-       ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
-ack_done:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-bail:
-       return;
-}
-
-/**
- * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
- * @dev: the device this packet came in on
- * @ohdr: the other headers for this packet
- * @data: the packet data
- * @qp: the QP for this packet
- * @opcode: the opcode for this packet
- * @psn: the packet sequence number for this packet
- * @diff: the difference between the PSN and the expected PSN
- * @header_in_data: true if part of the header data is in the data buffer
- *
- * This is called from ipath_rc_rcv() to process an unexpected
- * incoming RC packet for the given QP.
- * Called at interrupt level.
- * Return 1 if no more processing is needed; otherwise return 0 to
- * schedule a response to be sent.
- */
-static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
-                                    struct ipath_other_headers *ohdr,
-                                    void *data,
-                                    struct ipath_qp *qp,
-                                    u32 opcode,
-                                    u32 psn,
-                                    int diff,
-                                    int header_in_data)
-{
-       struct ipath_ack_entry *e;
-       u8 i, prev;
-       int old_req;
-       unsigned long flags;
-
-       if (diff > 0) {
-               /*
-                * Packet sequence error.
-                * A NAK will ACK earlier sends and RDMA writes.
-                * Don't queue the NAK if we already sent one.
-                */
-               if (!qp->r_nak_state) {
-                       qp->r_nak_state = IB_NAK_PSN_ERROR;
-                       /* Use the expected PSN. */
-                       qp->r_ack_psn = qp->r_psn;
-                       goto send_ack;
-               }
-               goto done;
-       }
-
-       /*
-        * Handle a duplicate request.  Don't re-execute SEND, RDMA
-        * write or atomic op.  Don't NAK errors, just silently drop
-        * the duplicate request.  Note that r_sge, r_len, and
-        * r_rcv_len may be in use so don't modify them.
-        *
-        * We are supposed to ACK the earliest duplicate PSN but we
-        * can coalesce an outstanding duplicate ACK.  We have to
-        * send the earliest so that RDMA reads can be restarted at
-        * the requester's expected PSN.
-        *
-        * First, find where this duplicate PSN falls within the
-        * ACKs previously sent.
-        */
-       psn &= IPATH_PSN_MASK;
-       e = NULL;
-       old_req = 1;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-       /* Double check we can process this now that we hold the s_lock. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-               goto unlock_done;
-
-       for (i = qp->r_head_ack_queue; ; i = prev) {
-               if (i == qp->s_tail_ack_queue)
-                       old_req = 0;
-               if (i)
-                       prev = i - 1;
-               else
-                       prev = IPATH_MAX_RDMA_ATOMIC;
-               if (prev == qp->r_head_ack_queue) {
-                       e = NULL;
-                       break;
-               }
-               e = &qp->s_ack_queue[prev];
-               if (!e->opcode) {
-                       e = NULL;
-                       break;
-               }
-               if (ipath_cmp24(psn, e->psn) >= 0) {
-                       if (prev == qp->s_tail_ack_queue)
-                               old_req = 0;
-                       break;
-               }
-       }
-       switch (opcode) {
-       case OP(RDMA_READ_REQUEST): {
-               struct ib_reth *reth;
-               u32 offset;
-               u32 len;
-
-               /*
-                * If we didn't find the RDMA read request in the ack queue,
-                * or the send tasklet is already backed up to send an
-                * earlier entry, we can ignore this request.
-                */
-               if (!e || e->opcode != OP(RDMA_READ_REQUEST) || old_req)
-                       goto unlock_done;
-               /* RETH comes after BTH */
-               if (!header_in_data)
-                       reth = &ohdr->u.rc.reth;
-               else {
-                       reth = (struct ib_reth *)data;
-                       data += sizeof(*reth);
-               }
-               /*
-                * Address range must be a subset of the original
-                * request and start on pmtu boundaries.
-                * We reuse the old ack_queue slot since the requester
-                * should not back up and request an earlier PSN for the
-                * same request.
-                */
-               offset = ((psn - e->psn) & IPATH_PSN_MASK) *
-                       ib_mtu_enum_to_int(qp->path_mtu);
-               len = be32_to_cpu(reth->length);
-               if (unlikely(offset + len > e->rdma_sge.sge.sge_length))
-                       goto unlock_done;
-               if (len != 0) {
-                       u32 rkey = be32_to_cpu(reth->rkey);
-                       u64 vaddr = be64_to_cpu(reth->vaddr);
-                       int ok;
-
-                       ok = ipath_rkey_ok(qp, &e->rdma_sge,
-                                          len, vaddr, rkey,
-                                          IB_ACCESS_REMOTE_READ);
-                       if (unlikely(!ok))
-                               goto unlock_done;
-               } else {
-                       e->rdma_sge.sg_list = NULL;
-                       e->rdma_sge.num_sge = 0;
-                       e->rdma_sge.sge.mr = NULL;
-                       e->rdma_sge.sge.vaddr = NULL;
-                       e->rdma_sge.sge.length = 0;
-                       e->rdma_sge.sge.sge_length = 0;
-               }
-               e->psn = psn;
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-               qp->s_tail_ack_queue = prev;
-               break;
-       }
-
-       case OP(COMPARE_SWAP):
-       case OP(FETCH_ADD): {
-               /*
-                * If we didn't find the atomic request in the ack queue
-                * or the send tasklet is already backed up to send an
-                * earlier entry, we can ignore this request.
-                */
-               if (!e || e->opcode != (u8) opcode || old_req)
-                       goto unlock_done;
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-               qp->s_tail_ack_queue = prev;
-               break;
-       }
-
-       default:
-               if (old_req)
-                       goto unlock_done;
-               /*
-                * Resend the most recent ACK if this request is
-                * after all the previous RDMA reads and atomics.
-                */
-               if (i == qp->r_head_ack_queue) {
-                       spin_unlock_irqrestore(&qp->s_lock, flags);
-                       qp->r_nak_state = 0;
-                       qp->r_ack_psn = qp->r_psn - 1;
-                       goto send_ack;
-               }
-               /*
-                * Try to send a simple ACK to work around a Mellanox bug
-                * which doesn't accept a RDMA read response or atomic
-                * response as an ACK for earlier SENDs or RDMA writes.
-                */
-               if (qp->r_head_ack_queue == qp->s_tail_ack_queue &&
-                   !(qp->s_flags & IPATH_S_ACK_PENDING) &&
-                   qp->s_ack_state == OP(ACKNOWLEDGE)) {
-                       spin_unlock_irqrestore(&qp->s_lock, flags);
-                       qp->r_nak_state = 0;
-                       qp->r_ack_psn = qp->s_ack_queue[i].psn - 1;
-                       goto send_ack;
-               }
-               /*
-                * Resend the RDMA read or atomic op which
-                * ACKs this duplicate request.
-                */
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-               qp->s_tail_ack_queue = i;
-               break;
-       }
-       qp->r_nak_state = 0;
-       ipath_schedule_send(qp);
-
-unlock_done:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
-       return 1;
-
-send_ack:
-       return 0;
-}
-
-void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err)
-{
-       unsigned long flags;
-       int lastwqe;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-       lastwqe = ipath_error_qp(qp, err);
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-
-       if (lastwqe) {
-               struct ib_event ev;
-
-               ev.device = qp->ibqp.device;
-               ev.element.qp = &qp->ibqp;
-               ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
-               qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
-       }
-}
-
-static inline void ipath_update_ack_queue(struct ipath_qp *qp, unsigned n)
-{
-       unsigned next;
-
-       next = n + 1;
-       if (next > IPATH_MAX_RDMA_ATOMIC)
-               next = 0;
-       if (n == qp->s_tail_ack_queue) {
-               qp->s_tail_ack_queue = next;
-               qp->s_ack_state = OP(ACKNOWLEDGE);
-       }
-}
-
-/**
- * ipath_rc_rcv - process an incoming RC packet
- * @dev: the device this packet came in on
- * @hdr: the header of this packet
- * @has_grh: true if the header has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP for this packet
- *
- * This is called from ipath_qp_rcv() to process an incoming RC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
-       struct ipath_other_headers *ohdr;
-       u32 opcode;
-       u32 hdrsize;
-       u32 psn;
-       u32 pad;
-       struct ib_wc wc;
-       u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-       int diff;
-       struct ib_reth *reth;
-       int header_in_data;
-       unsigned long flags;
-
-       /* Validate the SLID. See Ch. 9.6.1.5 */
-       if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
-               goto done;
-
-       /* Check for GRH */
-       if (!has_grh) {
-               ohdr = &hdr->u.oth;
-               hdrsize = 8 + 12;       /* LRH + BTH */
-               psn = be32_to_cpu(ohdr->bth[2]);
-               header_in_data = 0;
-       } else {
-               ohdr = &hdr->u.l.oth;
-               hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
-               /*
-                * The header with GRH is 60 bytes and the core driver sets
-                * the eager header buffer size to 56 bytes so the last 4
-                * bytes of the BTH header (PSN) is in the data buffer.
-                */
-               header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
-               if (header_in_data) {
-                       psn = be32_to_cpu(((__be32 *) data)[0]);
-                       data += sizeof(__be32);
-               } else
-                       psn = be32_to_cpu(ohdr->bth[2]);
-       }
-
-       /*
-        * Process responses (ACKs) before anything else.  Note that the
-        * packet sequence number will be for something in the send work
-        * queue rather than the expected receive packet sequence number.
-        * In other words, this QP is the requester.
-        */
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-       if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
-           opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
-               ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
-                                 hdrsize, pmtu, header_in_data);
-               goto done;
-       }
-
-       /* Compute 24 bits worth of difference. */
-       diff = ipath_cmp24(psn, qp->r_psn);
-       if (unlikely(diff)) {
-               if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
-                                      psn, diff, header_in_data))
-                       goto done;
-               goto send_ack;
-       }
-
-       /* Check for opcode sequence errors. */
-       switch (qp->r_state) {
-       case OP(SEND_FIRST):
-       case OP(SEND_MIDDLE):
-               if (opcode == OP(SEND_MIDDLE) ||
-                   opcode == OP(SEND_LAST) ||
-                   opcode == OP(SEND_LAST_WITH_IMMEDIATE))
-                       break;
-               goto nack_inv;
-
-       case OP(RDMA_WRITE_FIRST):
-       case OP(RDMA_WRITE_MIDDLE):
-               if (opcode == OP(RDMA_WRITE_MIDDLE) ||
-                   opcode == OP(RDMA_WRITE_LAST) ||
-                   opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
-                       break;
-               goto nack_inv;
-
-       default:
-               if (opcode == OP(SEND_MIDDLE) ||
-                   opcode == OP(SEND_LAST) ||
-                   opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
-                   opcode == OP(RDMA_WRITE_MIDDLE) ||
-                   opcode == OP(RDMA_WRITE_LAST) ||
-                   opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
-                       goto nack_inv;
-               /*
-                * Note that it is up to the requester to not send a new
-                * RDMA read or atomic operation before receiving an ACK
-                * for the previous operation.
-                */
-               break;
-       }
-
-       memset(&wc, 0, sizeof wc);
-
-       /* OK, process the packet. */
-       switch (opcode) {
-       case OP(SEND_FIRST):
-               if (!ipath_get_rwqe(qp, 0))
-                       goto rnr_nak;
-               qp->r_rcv_len = 0;
-               /* FALLTHROUGH */
-       case OP(SEND_MIDDLE):
-       case OP(RDMA_WRITE_MIDDLE):
-       send_middle:
-               /* Check for invalid length PMTU or posted rwqe len. */
-               if (unlikely(tlen != (hdrsize + pmtu + 4)))
-                       goto nack_inv;
-               qp->r_rcv_len += pmtu;
-               if (unlikely(qp->r_rcv_len > qp->r_len))
-                       goto nack_inv;
-               ipath_copy_sge(&qp->r_sge, data, pmtu);
-               break;
-
-       case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
-               /* consume RWQE */
-               if (!ipath_get_rwqe(qp, 1))
-                       goto rnr_nak;
-               goto send_last_imm;
-
-       case OP(SEND_ONLY):
-       case OP(SEND_ONLY_WITH_IMMEDIATE):
-               if (!ipath_get_rwqe(qp, 0))
-                       goto rnr_nak;
-               qp->r_rcv_len = 0;
-               if (opcode == OP(SEND_ONLY))
-                       goto send_last;
-               /* FALLTHROUGH */
-       case OP(SEND_LAST_WITH_IMMEDIATE):
-       send_last_imm:
-               if (header_in_data) {
-                       wc.ex.imm_data = *(__be32 *) data;
-                       data += sizeof(__be32);
-               } else {
-                       /* Immediate data comes after BTH */
-                       wc.ex.imm_data = ohdr->u.imm_data;
-               }
-               hdrsize += 4;
-               wc.wc_flags = IB_WC_WITH_IMM;
-               /* FALLTHROUGH */
-       case OP(SEND_LAST):
-       case OP(RDMA_WRITE_LAST):
-       send_last:
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /* Check for invalid length. */
-               /* XXX LAST len should be >= 1 */
-               if (unlikely(tlen < (hdrsize + pad + 4)))
-                       goto nack_inv;
-               /* Don't count the CRC. */
-               tlen -= (hdrsize + pad + 4);
-               wc.byte_len = tlen + qp->r_rcv_len;
-               if (unlikely(wc.byte_len > qp->r_len))
-                       goto nack_inv;
-               ipath_copy_sge(&qp->r_sge, data, tlen);
-               qp->r_msn++;
-               if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
-                       break;
-               wc.wr_id = qp->r_wr_id;
-               wc.status = IB_WC_SUCCESS;
-               if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
-                   opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
-                       wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-               else
-                       wc.opcode = IB_WC_RECV;
-               wc.qp = &qp->ibqp;
-               wc.src_qp = qp->remote_qpn;
-               wc.slid = qp->remote_ah_attr.dlid;
-               wc.sl = qp->remote_ah_attr.sl;
-               /* Signal completion event if the solicited bit is set. */
-               ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                              (ohdr->bth[0] &
-                               cpu_to_be32(1 << 23)) != 0);
-               break;
-
-       case OP(RDMA_WRITE_FIRST):
-       case OP(RDMA_WRITE_ONLY):
-       case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
-               if (unlikely(!(qp->qp_access_flags &
-                              IB_ACCESS_REMOTE_WRITE)))
-                       goto nack_inv;
-               /* consume RWQE */
-               /* RETH comes after BTH */
-               if (!header_in_data)
-                       reth = &ohdr->u.rc.reth;
-               else {
-                       reth = (struct ib_reth *)data;
-                       data += sizeof(*reth);
-               }
-               hdrsize += sizeof(*reth);
-               qp->r_len = be32_to_cpu(reth->length);
-               qp->r_rcv_len = 0;
-               if (qp->r_len != 0) {
-                       u32 rkey = be32_to_cpu(reth->rkey);
-                       u64 vaddr = be64_to_cpu(reth->vaddr);
-                       int ok;
-
-                       /* Check rkey & NAK */
-                       ok = ipath_rkey_ok(qp, &qp->r_sge,
-                                          qp->r_len, vaddr, rkey,
-                                          IB_ACCESS_REMOTE_WRITE);
-                       if (unlikely(!ok))
-                               goto nack_acc;
-               } else {
-                       qp->r_sge.sg_list = NULL;
-                       qp->r_sge.sge.mr = NULL;
-                       qp->r_sge.sge.vaddr = NULL;
-                       qp->r_sge.sge.length = 0;
-                       qp->r_sge.sge.sge_length = 0;
-               }
-               if (opcode == OP(RDMA_WRITE_FIRST))
-                       goto send_middle;
-               else if (opcode == OP(RDMA_WRITE_ONLY))
-                       goto send_last;
-               if (!ipath_get_rwqe(qp, 1))
-                       goto rnr_nak;
-               goto send_last_imm;
-
-       case OP(RDMA_READ_REQUEST): {
-               struct ipath_ack_entry *e;
-               u32 len;
-               u8 next;
-
-               if (unlikely(!(qp->qp_access_flags &
-                              IB_ACCESS_REMOTE_READ)))
-                       goto nack_inv;
-               next = qp->r_head_ack_queue + 1;
-               if (next > IPATH_MAX_RDMA_ATOMIC)
-                       next = 0;
-               spin_lock_irqsave(&qp->s_lock, flags);
-               /* Double check we can process this while holding the s_lock. */
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-                       goto unlock;
-               if (unlikely(next == qp->s_tail_ack_queue)) {
-                       if (!qp->s_ack_queue[next].sent)
-                               goto nack_inv_unlck;
-                       ipath_update_ack_queue(qp, next);
-               }
-               e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               /* RETH comes after BTH */
-               if (!header_in_data)
-                       reth = &ohdr->u.rc.reth;
-               else {
-                       reth = (struct ib_reth *)data;
-                       data += sizeof(*reth);
-               }
-               len = be32_to_cpu(reth->length);
-               if (len) {
-                       u32 rkey = be32_to_cpu(reth->rkey);
-                       u64 vaddr = be64_to_cpu(reth->vaddr);
-                       int ok;
-
-                       /* Check rkey & NAK */
-                       ok = ipath_rkey_ok(qp, &e->rdma_sge, len, vaddr,
-                                          rkey, IB_ACCESS_REMOTE_READ);
-                       if (unlikely(!ok))
-                               goto nack_acc_unlck;
-                       /*
-                        * Update the next expected PSN.  We add 1 later
-                        * below, so only add the remainder here.
-                        */
-                       if (len > pmtu)
-                               qp->r_psn += (len - 1) / pmtu;
-               } else {
-                       e->rdma_sge.sg_list = NULL;
-                       e->rdma_sge.num_sge = 0;
-                       e->rdma_sge.sge.mr = NULL;
-                       e->rdma_sge.sge.vaddr = NULL;
-                       e->rdma_sge.sge.length = 0;
-                       e->rdma_sge.sge.sge_length = 0;
-               }
-               e->opcode = opcode;
-               e->sent = 0;
-               e->psn = psn;
-               /*
-                * We need to increment the MSN here instead of when we
-                * finish sending the result since a duplicate request would
-                * increment it more than once.
-                */
-               qp->r_msn++;
-               qp->r_psn++;
-               qp->r_state = opcode;
-               qp->r_nak_state = 0;
-               qp->r_head_ack_queue = next;
-
-               /* Schedule the send tasklet. */
-               ipath_schedule_send(qp);
-
-               goto unlock;
-       }
-
-       case OP(COMPARE_SWAP):
-       case OP(FETCH_ADD): {
-               struct ib_atomic_eth *ateth;
-               struct ipath_ack_entry *e;
-               u64 vaddr;
-               atomic64_t *maddr;
-               u64 sdata;
-               u32 rkey;
-               u8 next;
-
-               if (unlikely(!(qp->qp_access_flags &
-                              IB_ACCESS_REMOTE_ATOMIC)))
-                       goto nack_inv;
-               next = qp->r_head_ack_queue + 1;
-               if (next > IPATH_MAX_RDMA_ATOMIC)
-                       next = 0;
-               spin_lock_irqsave(&qp->s_lock, flags);
-               /* Double check we can process this while holding the s_lock. */
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK))
-                       goto unlock;
-               if (unlikely(next == qp->s_tail_ack_queue)) {
-                       if (!qp->s_ack_queue[next].sent)
-                               goto nack_inv_unlck;
-                       ipath_update_ack_queue(qp, next);
-               }
-               if (!header_in_data)
-                       ateth = &ohdr->u.atomic_eth;
-               else
-                       ateth = (struct ib_atomic_eth *)data;
-               vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) |
-                       be32_to_cpu(ateth->vaddr[1]);
-               if (unlikely(vaddr & (sizeof(u64) - 1)))
-                       goto nack_inv_unlck;
-               rkey = be32_to_cpu(ateth->rkey);
-               /* Check rkey & NAK */
-               if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge,
-                                           sizeof(u64), vaddr, rkey,
-                                           IB_ACCESS_REMOTE_ATOMIC)))
-                       goto nack_acc_unlck;
-               /* Perform atomic OP and save result. */
-               maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
-               sdata = be64_to_cpu(ateth->swap_data);
-               e = &qp->s_ack_queue[qp->r_head_ack_queue];
-               e->atomic_data = (opcode == OP(FETCH_ADD)) ?
-                       (u64) atomic64_add_return(sdata, maddr) - sdata :
-                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
-                                     be64_to_cpu(ateth->compare_data),
-                                     sdata);
-               e->opcode = opcode;
-               e->sent = 0;
-               e->psn = psn & IPATH_PSN_MASK;
-               qp->r_msn++;
-               qp->r_psn++;
-               qp->r_state = opcode;
-               qp->r_nak_state = 0;
-               qp->r_head_ack_queue = next;
-
-               /* Schedule the send tasklet. */
-               ipath_schedule_send(qp);
-
-               goto unlock;
-       }
-
-       default:
-               /* NAK unknown opcodes. */
-               goto nack_inv;
-       }
-       qp->r_psn++;
-       qp->r_state = opcode;
-       qp->r_ack_psn = psn;
-       qp->r_nak_state = 0;
-       /* Send an ACK if requested or required. */
-       if (psn & (1 << 31))
-               goto send_ack;
-       goto done;
-
-rnr_nak:
-       qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer;
-       qp->r_ack_psn = qp->r_psn;
-       goto send_ack;
-
-nack_inv_unlck:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_inv:
-       ipath_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
-       qp->r_nak_state = IB_NAK_INVALID_REQUEST;
-       qp->r_ack_psn = qp->r_psn;
-       goto send_ack;
-
-nack_acc_unlck:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-nack_acc:
-       ipath_rc_error(qp, IB_WC_LOC_PROT_ERR);
-       qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
-       qp->r_ack_psn = qp->r_psn;
-send_ack:
-       send_rc_ack(qp);
-       goto done;
-
-unlock:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-done:
-       return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_registers.h b/drivers/staging/rdma/ipath/ipath_registers.h
deleted file mode 100644 (file)
index 8f44d0c..0000000
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _IPATH_REGISTERS_H
-#define _IPATH_REGISTERS_H
-
-/*
- * This file should only be included by kernel source, and by the diags.  It
- * defines the registers, and their contents, for InfiniPath chips.
- */
-
-/*
- * These are the InfiniPath register and buffer bit definitions,
- * that are visible to software, and needed only by the kernel
- * and diag code.  A few, that are visible to protocol and user
- * code are in ipath_common.h.  Some bits are specific
- * to a given chip implementation, and have been moved to the
- * chip-specific source file
- */
-
-/* kr_revision bits */
-#define INFINIPATH_R_CHIPREVMINOR_MASK 0xFF
-#define INFINIPATH_R_CHIPREVMINOR_SHIFT 0
-#define INFINIPATH_R_CHIPREVMAJOR_MASK 0xFF
-#define INFINIPATH_R_CHIPREVMAJOR_SHIFT 8
-#define INFINIPATH_R_ARCH_MASK 0xFF
-#define INFINIPATH_R_ARCH_SHIFT 16
-#define INFINIPATH_R_SOFTWARE_MASK 0xFF
-#define INFINIPATH_R_SOFTWARE_SHIFT 24
-#define INFINIPATH_R_BOARDID_MASK 0xFF
-#define INFINIPATH_R_BOARDID_SHIFT 32
-
-/* kr_control bits */
-#define INFINIPATH_C_FREEZEMODE 0x00000002
-#define INFINIPATH_C_LINKENABLE 0x00000004
-
-/* kr_sendctrl bits */
-#define INFINIPATH_S_DISARMPIOBUF_SHIFT 16
-#define INFINIPATH_S_UPDTHRESH_SHIFT 24
-#define INFINIPATH_S_UPDTHRESH_MASK 0x1f
-
-#define IPATH_S_ABORT          0
-#define IPATH_S_PIOINTBUFAVAIL 1
-#define IPATH_S_PIOBUFAVAILUPD 2
-#define IPATH_S_PIOENABLE      3
-#define IPATH_S_SDMAINTENABLE  9
-#define IPATH_S_SDMASINGLEDESCRIPTOR   10
-#define IPATH_S_SDMAENABLE     11
-#define IPATH_S_SDMAHALT       12
-#define IPATH_S_DISARM         31
-
-#define INFINIPATH_S_ABORT             (1U << IPATH_S_ABORT)
-#define INFINIPATH_S_PIOINTBUFAVAIL    (1U << IPATH_S_PIOINTBUFAVAIL)
-#define INFINIPATH_S_PIOBUFAVAILUPD    (1U << IPATH_S_PIOBUFAVAILUPD)
-#define INFINIPATH_S_PIOENABLE         (1U << IPATH_S_PIOENABLE)
-#define INFINIPATH_S_SDMAINTENABLE     (1U << IPATH_S_SDMAINTENABLE)
-#define INFINIPATH_S_SDMASINGLEDESCRIPTOR \
-                                       (1U << IPATH_S_SDMASINGLEDESCRIPTOR)
-#define INFINIPATH_S_SDMAENABLE                (1U << IPATH_S_SDMAENABLE)
-#define INFINIPATH_S_SDMAHALT          (1U << IPATH_S_SDMAHALT)
-#define INFINIPATH_S_DISARM            (1U << IPATH_S_DISARM)
-
-/* kr_rcvctrl bits that are the same on multiple chips */
-#define INFINIPATH_R_PORTENABLE_SHIFT 0
-#define INFINIPATH_R_QPMAP_ENABLE (1ULL << 38)
-
-/* kr_intstatus, kr_intclear, kr_intmask bits */
-#define INFINIPATH_I_SDMAINT           0x8000000000000000ULL
-#define INFINIPATH_I_SDMADISABLED      0x4000000000000000ULL
-#define INFINIPATH_I_ERROR             0x0000000080000000ULL
-#define INFINIPATH_I_SPIOSENT          0x0000000040000000ULL
-#define INFINIPATH_I_SPIOBUFAVAIL      0x0000000020000000ULL
-#define INFINIPATH_I_GPIO              0x0000000010000000ULL
-#define INFINIPATH_I_JINT              0x0000000004000000ULL
-
-/* kr_errorstatus, kr_errorclear, kr_errormask bits */
-#define INFINIPATH_E_RFORMATERR                        0x0000000000000001ULL
-#define INFINIPATH_E_RVCRC                     0x0000000000000002ULL
-#define INFINIPATH_E_RICRC                     0x0000000000000004ULL
-#define INFINIPATH_E_RMINPKTLEN                        0x0000000000000008ULL
-#define INFINIPATH_E_RMAXPKTLEN                        0x0000000000000010ULL
-#define INFINIPATH_E_RLONGPKTLEN               0x0000000000000020ULL
-#define INFINIPATH_E_RSHORTPKTLEN              0x0000000000000040ULL
-#define INFINIPATH_E_RUNEXPCHAR                        0x0000000000000080ULL
-#define INFINIPATH_E_RUNSUPVL                  0x0000000000000100ULL
-#define INFINIPATH_E_REBP                      0x0000000000000200ULL
-#define INFINIPATH_E_RIBFLOW                   0x0000000000000400ULL
-#define INFINIPATH_E_RBADVERSION               0x0000000000000800ULL
-#define INFINIPATH_E_RRCVEGRFULL               0x0000000000001000ULL
-#define INFINIPATH_E_RRCVHDRFULL               0x0000000000002000ULL
-#define INFINIPATH_E_RBADTID                   0x0000000000004000ULL
-#define INFINIPATH_E_RHDRLEN                   0x0000000000008000ULL
-#define INFINIPATH_E_RHDR                      0x0000000000010000ULL
-#define INFINIPATH_E_RIBLOSTLINK               0x0000000000020000ULL
-#define INFINIPATH_E_SENDSPECIALTRIGGER                0x0000000008000000ULL
-#define INFINIPATH_E_SDMADISABLED              0x0000000010000000ULL
-#define INFINIPATH_E_SMINPKTLEN                        0x0000000020000000ULL
-#define INFINIPATH_E_SMAXPKTLEN                        0x0000000040000000ULL
-#define INFINIPATH_E_SUNDERRUN                 0x0000000080000000ULL
-#define INFINIPATH_E_SPKTLEN                   0x0000000100000000ULL
-#define INFINIPATH_E_SDROPPEDSMPPKT            0x0000000200000000ULL
-#define INFINIPATH_E_SDROPPEDDATAPKT           0x0000000400000000ULL
-#define INFINIPATH_E_SPIOARMLAUNCH             0x0000000800000000ULL
-#define INFINIPATH_E_SUNEXPERRPKTNUM           0x0000001000000000ULL
-#define INFINIPATH_E_SUNSUPVL                  0x0000002000000000ULL
-#define INFINIPATH_E_SENDBUFMISUSE             0x0000004000000000ULL
-#define INFINIPATH_E_SDMAGENMISMATCH           0x0000008000000000ULL
-#define INFINIPATH_E_SDMAOUTOFBOUND            0x0000010000000000ULL
-#define INFINIPATH_E_SDMATAILOUTOFBOUND                0x0000020000000000ULL
-#define INFINIPATH_E_SDMABASE                  0x0000040000000000ULL
-#define INFINIPATH_E_SDMA1STDESC               0x0000080000000000ULL
-#define INFINIPATH_E_SDMARPYTAG                        0x0000100000000000ULL
-#define INFINIPATH_E_SDMADWEN                  0x0000200000000000ULL
-#define INFINIPATH_E_SDMAMISSINGDW             0x0000400000000000ULL
-#define INFINIPATH_E_SDMAUNEXPDATA             0x0000800000000000ULL
-#define INFINIPATH_E_IBSTATUSCHANGED           0x0001000000000000ULL
-#define INFINIPATH_E_INVALIDADDR               0x0002000000000000ULL
-#define INFINIPATH_E_RESET                     0x0004000000000000ULL
-#define INFINIPATH_E_HARDWARE                  0x0008000000000000ULL
-#define INFINIPATH_E_SDMADESCADDRMISALIGN      0x0010000000000000ULL
-#define INFINIPATH_E_INVALIDEEPCMD             0x0020000000000000ULL
-
-/*
- * this is used to print "common" packet errors only when the
- * __IPATH_ERRPKTDBG bit is set in ipath_debug.
- */
-#define INFINIPATH_E_PKTERRS ( INFINIPATH_E_SPKTLEN \
-               | INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_RVCRC \
-               | INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
-               | INFINIPATH_E_REBP )
-
-/* Convenience for decoding Send DMA errors */
-#define INFINIPATH_E_SDMAERRS ( \
-       INFINIPATH_E_SDMAGENMISMATCH | INFINIPATH_E_SDMAOUTOFBOUND | \
-       INFINIPATH_E_SDMATAILOUTOFBOUND | INFINIPATH_E_SDMABASE | \
-       INFINIPATH_E_SDMA1STDESC | INFINIPATH_E_SDMARPYTAG | \
-       INFINIPATH_E_SDMADWEN | INFINIPATH_E_SDMAMISSINGDW | \
-       INFINIPATH_E_SDMAUNEXPDATA | \
-       INFINIPATH_E_SDMADESCADDRMISALIGN | \
-       INFINIPATH_E_SDMADISABLED | \
-       INFINIPATH_E_SENDBUFMISUSE)
-
-/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
-/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
- * RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2:  expTID, 3: eagerTID
- *             bit 4: flag buffer, 5: datainfo, 6: header info */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40
-#define INFINIPATH_HWE_RXEMEMPARITYERR_MASK 0x7FULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT 44
-#define INFINIPATH_HWE_IBCBUSTOSPCPARITYERR 0x4000000000000000ULL
-#define INFINIPATH_HWE_IBCBUSFRSPCPARITYERR 0x8000000000000000ULL
-/* txe mem parity errors (shift by INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT) */
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF  0x1ULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC  0x2ULL
-#define INFINIPATH_HWE_TXEMEMPARITYERR_PIOLAUNCHFIFO 0x4ULL
-/* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */
-#define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF   0x01ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ  0x02ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID   0x04ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x08ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF  0x10ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL
-#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO  0x40ULL
-/* waldo specific -- find the rest in ipath_6110.c */
-#define INFINIPATH_HWE_RXDSYNCMEMPARITYERR  0x0000000400000000ULL
-/* 6120/7220 specific -- find the rest in ipath_6120.c and ipath_7220.c */
-#define INFINIPATH_HWE_MEMBISTFAILED   0x0040000000000000ULL
-
-/* kr_hwdiagctrl bits */
-#define INFINIPATH_DC_FORCETXEMEMPARITYERR_MASK 0xFULL
-#define INFINIPATH_DC_FORCETXEMEMPARITYERR_SHIFT 40
-#define INFINIPATH_DC_FORCERXEMEMPARITYERR_MASK 0x7FULL
-#define INFINIPATH_DC_FORCERXEMEMPARITYERR_SHIFT 44
-#define INFINIPATH_DC_FORCERXDSYNCMEMPARITYERR  0x0000000400000000ULL
-#define INFINIPATH_DC_COUNTERDISABLE            0x1000000000000000ULL
-#define INFINIPATH_DC_COUNTERWREN               0x2000000000000000ULL
-#define INFINIPATH_DC_FORCEIBCBUSTOSPCPARITYERR 0x4000000000000000ULL
-#define INFINIPATH_DC_FORCEIBCBUSFRSPCPARITYERR 0x8000000000000000ULL
-
-/* kr_ibcctrl bits */
-#define INFINIPATH_IBCC_FLOWCTRLPERIOD_MASK 0xFFULL
-#define INFINIPATH_IBCC_FLOWCTRLPERIOD_SHIFT 0
-#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_MASK 0xFFULL
-#define INFINIPATH_IBCC_FLOWCTRLWATERMARK_SHIFT 8
-#define INFINIPATH_IBCC_LINKINITCMD_MASK 0x3ULL
-#define INFINIPATH_IBCC_LINKINITCMD_DISABLE 1
-/* cycle through TS1/TS2 till OK */
-#define INFINIPATH_IBCC_LINKINITCMD_POLL 2
-/* wait for TS1, then go on */
-#define INFINIPATH_IBCC_LINKINITCMD_SLEEP 3
-#define INFINIPATH_IBCC_LINKINITCMD_SHIFT 16
-#define INFINIPATH_IBCC_LINKCMD_MASK 0x3ULL
-#define INFINIPATH_IBCC_LINKCMD_DOWN 1         /* move to 0x11 */
-#define INFINIPATH_IBCC_LINKCMD_ARMED 2                /* move to 0x21 */
-#define INFINIPATH_IBCC_LINKCMD_ACTIVE 3       /* move to 0x31 */
-#define INFINIPATH_IBCC_LINKCMD_SHIFT 18
-#define INFINIPATH_IBCC_MAXPKTLEN_MASK 0x7FFULL
-#define INFINIPATH_IBCC_MAXPKTLEN_SHIFT 20
-#define INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK 0xFULL
-#define INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT 32
-#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_MASK 0xFULL
-#define INFINIPATH_IBCC_OVERRUNTHRESHOLD_SHIFT 36
-#define INFINIPATH_IBCC_CREDITSCALE_MASK 0x7ULL
-#define INFINIPATH_IBCC_CREDITSCALE_SHIFT 40
-#define INFINIPATH_IBCC_LOOPBACK             0x8000000000000000ULL
-#define INFINIPATH_IBCC_LINKDOWNDEFAULTSTATE 0x4000000000000000ULL
-
-/* kr_ibcstatus bits */
-#define INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT 0
-#define INFINIPATH_IBCS_LINKSTATE_MASK 0x7
-
-#define INFINIPATH_IBCS_TXREADY       0x40000000
-#define INFINIPATH_IBCS_TXCREDITOK    0x80000000
-/* link training states (shift by
-   INFINIPATH_IBCS_LINKTRAININGSTATE_SHIFT) */
-#define INFINIPATH_IBCS_LT_STATE_DISABLED      0x00
-#define INFINIPATH_IBCS_LT_STATE_LINKUP                0x01
-#define INFINIPATH_IBCS_LT_STATE_POLLACTIVE    0x02
-#define INFINIPATH_IBCS_LT_STATE_POLLQUIET     0x03
-#define INFINIPATH_IBCS_LT_STATE_SLEEPDELAY    0x04
-#define INFINIPATH_IBCS_LT_STATE_SLEEPQUIET    0x05
-#define INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE   0x08
-#define INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG    0x09
-#define INFINIPATH_IBCS_LT_STATE_CFGWAITRMT    0x0a
-#define INFINIPATH_IBCS_LT_STATE_CFGIDLE       0x0b
-#define INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN        0x0c
-#define INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT        0x0e
-#define INFINIPATH_IBCS_LT_STATE_RECOVERIDLE   0x0f
-/* link state machine states (shift by ibcs_ls_shift) */
-#define INFINIPATH_IBCS_L_STATE_DOWN           0x0
-#define INFINIPATH_IBCS_L_STATE_INIT           0x1
-#define INFINIPATH_IBCS_L_STATE_ARM            0x2
-#define INFINIPATH_IBCS_L_STATE_ACTIVE         0x3
-#define INFINIPATH_IBCS_L_STATE_ACT_DEFER      0x4
-
-
-/* kr_extstatus bits */
-#define INFINIPATH_EXTS_SERDESPLLLOCK 0x1
-#define INFINIPATH_EXTS_GPIOIN_MASK 0xFFFFULL
-#define INFINIPATH_EXTS_GPIOIN_SHIFT 48
-
-/* kr_extctrl bits */
-#define INFINIPATH_EXTC_GPIOINVERT_MASK 0xFFFFULL
-#define INFINIPATH_EXTC_GPIOINVERT_SHIFT 32
-#define INFINIPATH_EXTC_GPIOOE_MASK 0xFFFFULL
-#define INFINIPATH_EXTC_GPIOOE_SHIFT 48
-#define INFINIPATH_EXTC_SERDESENABLE         0x80000000ULL
-#define INFINIPATH_EXTC_SERDESCONNECT        0x40000000ULL
-#define INFINIPATH_EXTC_SERDESENTRUNKING     0x20000000ULL
-#define INFINIPATH_EXTC_SERDESDISRXFIFO      0x10000000ULL
-#define INFINIPATH_EXTC_SERDESENPLPBK1       0x08000000ULL
-#define INFINIPATH_EXTC_SERDESENPLPBK2       0x04000000ULL
-#define INFINIPATH_EXTC_SERDESENENCDEC       0x02000000ULL
-#define INFINIPATH_EXTC_LED1SECPORT_ON       0x00000020ULL
-#define INFINIPATH_EXTC_LED2SECPORT_ON       0x00000010ULL
-#define INFINIPATH_EXTC_LED1PRIPORT_ON       0x00000008ULL
-#define INFINIPATH_EXTC_LED2PRIPORT_ON       0x00000004ULL
-#define INFINIPATH_EXTC_LEDGBLOK_ON          0x00000002ULL
-#define INFINIPATH_EXTC_LEDGBLERR_OFF        0x00000001ULL
-
-/* kr_partitionkey bits */
-#define INFINIPATH_PKEY_SIZE 16
-#define INFINIPATH_PKEY_MASK 0xFFFF
-#define INFINIPATH_PKEY_DEFAULT_PKEY 0xFFFF
-
-/* kr_serdesconfig0 bits */
-#define INFINIPATH_SERDC0_RESET_MASK  0xfULL   /* overal reset bits */
-#define INFINIPATH_SERDC0_RESET_PLL   0x10000000ULL    /* pll reset */
-/* tx idle enables (per lane) */
-#define INFINIPATH_SERDC0_TXIDLE      0xF000ULL
-/* rx detect enables (per lane) */
-#define INFINIPATH_SERDC0_RXDETECT_EN 0xF0000ULL
-/* L1 Power down; use with RXDETECT, Otherwise not used on IB side */
-#define INFINIPATH_SERDC0_L1PWR_DN      0xF0ULL
-
-/* common kr_xgxsconfig bits (or safe in all, even if not implemented) */
-#define INFINIPATH_XGXS_RX_POL_SHIFT 19
-#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
-
-
-/*
- * IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our
- * PIO send buffers.  This is well beyond anything currently
- * defined in the InfiniBand spec.
- */
-#define IPATH_PIO_MAXIBHDR 128
-
-typedef u64 ipath_err_t;
-
-/* The following change with the type of device, so
- * need to be part of the ipath_devdata struct, or
- * we could have problems plugging in devices of
- * different types (e.g. one HT, one PCIE)
- * in one system, to be managed by one driver.
- * On the other hand, this file is may also be included
- * by other code, so leave the declarations here
- * temporarily. Minor footprint issue if common-model
- * linker used, none if C89+ linker used.
- */
-
-/* mask of defined bits for various registers */
-extern u64 infinipath_i_bitsextant;
-extern ipath_err_t infinipath_e_bitsextant, infinipath_hwe_bitsextant;
-
-/* masks that are different in various chips, or only exist in some chips */
-extern u32 infinipath_i_rcvavail_mask, infinipath_i_rcvurg_mask;
-
-/*
- * These are the infinipath general register numbers (not offsets).
- * The kernel registers are used directly, those beyond the kernel
- * registers are calculated from one of the base registers.  The use of
- * an integer type doesn't allow type-checking as thorough as, say,
- * an enum but allows for better hiding of chip differences.
- */
-typedef const u16 ipath_kreg,  /* infinipath general registers */
- ipath_creg,                   /* infinipath counter registers */
- ipath_sreg;                   /* kernel-only, infinipath send registers */
-
-/*
- * These are the chip registers common to all infinipath chips, and
- * used both by the kernel and the diagnostics or other user code.
- * They are all implemented such that 64 bit accesses work.
- * Some implement no more than 32 bits.  Because 64 bit reads
- * require 2 HT cmds on opteron, we access those with 32 bit
- * reads for efficiency (they are written as 64 bits, since
- * the extra 32 bits are nearly free on writes, and it slightly reduces
- * complexity).  The rest are all accessed as 64 bits.
- */
-struct ipath_kregs {
-       /* These are the 32 bit group */
-       ipath_kreg kr_control;
-       ipath_kreg kr_counterregbase;
-       ipath_kreg kr_intmask;
-       ipath_kreg kr_intstatus;
-       ipath_kreg kr_pagealign;
-       ipath_kreg kr_portcnt;
-       ipath_kreg kr_rcvtidbase;
-       ipath_kreg kr_rcvtidcnt;
-       ipath_kreg kr_rcvegrbase;
-       ipath_kreg kr_rcvegrcnt;
-       ipath_kreg kr_scratch;
-       ipath_kreg kr_sendctrl;
-       ipath_kreg kr_sendpiobufbase;
-       ipath_kreg kr_sendpiobufcnt;
-       ipath_kreg kr_sendpiosize;
-       ipath_kreg kr_sendregbase;
-       ipath_kreg kr_userregbase;
-       /* These are the 64 bit group */
-       ipath_kreg kr_debugport;
-       ipath_kreg kr_debugportselect;
-       ipath_kreg kr_errorclear;
-       ipath_kreg kr_errormask;
-       ipath_kreg kr_errorstatus;
-       ipath_kreg kr_extctrl;
-       ipath_kreg kr_extstatus;
-       ipath_kreg kr_gpio_clear;
-       ipath_kreg kr_gpio_mask;
-       ipath_kreg kr_gpio_out;
-       ipath_kreg kr_gpio_status;
-       ipath_kreg kr_hwdiagctrl;
-       ipath_kreg kr_hwerrclear;
-       ipath_kreg kr_hwerrmask;
-       ipath_kreg kr_hwerrstatus;
-       ipath_kreg kr_ibcctrl;
-       ipath_kreg kr_ibcstatus;
-       ipath_kreg kr_intblocked;
-       ipath_kreg kr_intclear;
-       ipath_kreg kr_interruptconfig;
-       ipath_kreg kr_mdio;
-       ipath_kreg kr_partitionkey;
-       ipath_kreg kr_rcvbthqp;
-       ipath_kreg kr_rcvbufbase;
-       ipath_kreg kr_rcvbufsize;
-       ipath_kreg kr_rcvctrl;
-       ipath_kreg kr_rcvhdrcnt;
-       ipath_kreg kr_rcvhdrentsize;
-       ipath_kreg kr_rcvhdrsize;
-       ipath_kreg kr_rcvintmembase;
-       ipath_kreg kr_rcvintmemsize;
-       ipath_kreg kr_revision;
-       ipath_kreg kr_sendbuffererror;
-       ipath_kreg kr_sendpioavailaddr;
-       ipath_kreg kr_serdesconfig0;
-       ipath_kreg kr_serdesconfig1;
-       ipath_kreg kr_serdesstatus;
-       ipath_kreg kr_txintmembase;
-       ipath_kreg kr_txintmemsize;
-       ipath_kreg kr_xgxsconfig;
-       ipath_kreg kr_ibpllcfg;
-       /* use these two (and the following N ports) only with
-        * ipath_k*_kreg64_port(); not *kreg64() */
-       ipath_kreg kr_rcvhdraddr;
-       ipath_kreg kr_rcvhdrtailaddr;
-
-       /* remaining registers are not present on all types of infinipath
-          chips  */
-       ipath_kreg kr_rcvpktledcnt;
-       ipath_kreg kr_pcierbuftestreg0;
-       ipath_kreg kr_pcierbuftestreg1;
-       ipath_kreg kr_pcieq0serdesconfig0;
-       ipath_kreg kr_pcieq0serdesconfig1;
-       ipath_kreg kr_pcieq0serdesstatus;
-       ipath_kreg kr_pcieq1serdesconfig0;
-       ipath_kreg kr_pcieq1serdesconfig1;
-       ipath_kreg kr_pcieq1serdesstatus;
-       ipath_kreg kr_hrtbt_guid;
-       ipath_kreg kr_ibcddrctrl;
-       ipath_kreg kr_ibcddrstatus;
-       ipath_kreg kr_jintreload;
-
-       /* send dma related regs */
-       ipath_kreg kr_senddmabase;
-       ipath_kreg kr_senddmalengen;
-       ipath_kreg kr_senddmatail;
-       ipath_kreg kr_senddmahead;
-       ipath_kreg kr_senddmaheadaddr;
-       ipath_kreg kr_senddmabufmask0;
-       ipath_kreg kr_senddmabufmask1;
-       ipath_kreg kr_senddmabufmask2;
-       ipath_kreg kr_senddmastatus;
-
-       /* SerDes related regs (IBA7220-only) */
-       ipath_kreg kr_ibserdesctrl;
-       ipath_kreg kr_ib_epbacc;
-       ipath_kreg kr_ib_epbtrans;
-       ipath_kreg kr_pcie_epbacc;
-       ipath_kreg kr_pcie_epbtrans;
-       ipath_kreg kr_ib_ddsrxeq;
-};
-
-struct ipath_cregs {
-       ipath_creg cr_badformatcnt;
-       ipath_creg cr_erricrccnt;
-       ipath_creg cr_errlinkcnt;
-       ipath_creg cr_errlpcrccnt;
-       ipath_creg cr_errpkey;
-       ipath_creg cr_errrcvflowctrlcnt;
-       ipath_creg cr_err_rlencnt;
-       ipath_creg cr_errslencnt;
-       ipath_creg cr_errtidfull;
-       ipath_creg cr_errtidvalid;
-       ipath_creg cr_errvcrccnt;
-       ipath_creg cr_ibstatuschange;
-       ipath_creg cr_intcnt;
-       ipath_creg cr_invalidrlencnt;
-       ipath_creg cr_invalidslencnt;
-       ipath_creg cr_lbflowstallcnt;
-       ipath_creg cr_iblinkdowncnt;
-       ipath_creg cr_iblinkerrrecovcnt;
-       ipath_creg cr_ibsymbolerrcnt;
-       ipath_creg cr_pktrcvcnt;
-       ipath_creg cr_pktrcvflowctrlcnt;
-       ipath_creg cr_pktsendcnt;
-       ipath_creg cr_pktsendflowcnt;
-       ipath_creg cr_portovflcnt;
-       ipath_creg cr_rcvebpcnt;
-       ipath_creg cr_rcvovflcnt;
-       ipath_creg cr_rxdroppktcnt;
-       ipath_creg cr_senddropped;
-       ipath_creg cr_sendstallcnt;
-       ipath_creg cr_sendunderruncnt;
-       ipath_creg cr_unsupvlcnt;
-       ipath_creg cr_wordrcvcnt;
-       ipath_creg cr_wordsendcnt;
-       ipath_creg cr_vl15droppedpktcnt;
-       ipath_creg cr_rxotherlocalphyerrcnt;
-       ipath_creg cr_excessbufferovflcnt;
-       ipath_creg cr_locallinkintegrityerrcnt;
-       ipath_creg cr_rxvlerrcnt;
-       ipath_creg cr_rxdlidfltrcnt;
-       ipath_creg cr_psstat;
-       ipath_creg cr_psstart;
-       ipath_creg cr_psinterval;
-       ipath_creg cr_psrcvdatacount;
-       ipath_creg cr_psrcvpktscount;
-       ipath_creg cr_psxmitdatacount;
-       ipath_creg cr_psxmitpktscount;
-       ipath_creg cr_psxmitwaitcount;
-};
-
-#endif                         /* _IPATH_REGISTERS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_ruc.c b/drivers/staging/rdma/ipath/ipath_ruc.c
deleted file mode 100644 (file)
index e541a01..0000000
+++ /dev/null
@@ -1,733 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/*
- * Convert the AETH RNR timeout code into the number of milliseconds.
- */
-const u32 ib_ipath_rnr_table[32] = {
-       656,                    /* 0 */
-       1,                      /* 1 */
-       1,                      /* 2 */
-       1,                      /* 3 */
-       1,                      /* 4 */
-       1,                      /* 5 */
-       1,                      /* 6 */
-       1,                      /* 7 */
-       1,                      /* 8 */
-       1,                      /* 9 */
-       1,                      /* A */
-       1,                      /* B */
-       1,                      /* C */
-       1,                      /* D */
-       2,                      /* E */
-       2,                      /* F */
-       3,                      /* 10 */
-       4,                      /* 11 */
-       6,                      /* 12 */
-       8,                      /* 13 */
-       11,                     /* 14 */
-       16,                     /* 15 */
-       21,                     /* 16 */
-       31,                     /* 17 */
-       41,                     /* 18 */
-       62,                     /* 19 */
-       82,                     /* 1A */
-       123,                    /* 1B */
-       164,                    /* 1C */
-       246,                    /* 1D */
-       328,                    /* 1E */
-       492                     /* 1F */
-};
-
-/**
- * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
- * @qp: the QP
- *
- * Called with the QP s_lock held and interrupts disabled.
- * XXX Use a simple list for now.  We might need a priority
- * queue if we have lots of QPs waiting for RNR timeouts
- * but that should be rare.
- */
-void ipath_insert_rnr_queue(struct ipath_qp *qp)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-
-       /* We already did a spin_lock_irqsave(), so just use spin_lock */
-       spin_lock(&dev->pending_lock);
-       if (list_empty(&dev->rnrwait))
-               list_add(&qp->timerwait, &dev->rnrwait);
-       else {
-               struct list_head *l = &dev->rnrwait;
-               struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
-                                                 timerwait);
-
-               while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
-                       qp->s_rnr_timeout -= nqp->s_rnr_timeout;
-                       l = l->next;
-                       if (l->next == &dev->rnrwait) {
-                               nqp = NULL;
-                               break;
-                       }
-                       nqp = list_entry(l->next, struct ipath_qp,
-                                        timerwait);
-               }
-               if (nqp)
-                       nqp->s_rnr_timeout -= qp->s_rnr_timeout;
-               list_add(&qp->timerwait, l);
-       }
-       spin_unlock(&dev->pending_lock);
-}
-
-/**
- * ipath_init_sge - Validate a RWQE and fill in the SGE state
- * @qp: the QP
- *
- * Return 1 if OK.
- */
-int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
-                  u32 *lengthp, struct ipath_sge_state *ss)
-{
-       int i, j, ret;
-       struct ib_wc wc;
-
-       *lengthp = 0;
-       for (i = j = 0; i < wqe->num_sge; i++) {
-               if (wqe->sg_list[i].length == 0)
-                       continue;
-               /* Check LKEY */
-               if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge,
-                                  &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
-                       goto bad_lkey;
-               *lengthp += wqe->sg_list[i].length;
-               j++;
-       }
-       ss->num_sge = j;
-       ret = 1;
-       goto bail;
-
-bad_lkey:
-       memset(&wc, 0, sizeof(wc));
-       wc.wr_id = wqe->wr_id;
-       wc.status = IB_WC_LOC_PROT_ERR;
-       wc.opcode = IB_WC_RECV;
-       wc.qp = &qp->ibqp;
-       /* Signal solicited completion event. */
-       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
- * @qp: the QP
- * @wr_id_only: update qp->r_wr_id only, not qp->r_sge
- *
- * Return 0 if no RWQE is available, otherwise return 1.
- *
- * Can be called from interrupt level.
- */
-int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
-{
-       unsigned long flags;
-       struct ipath_rq *rq;
-       struct ipath_rwq *wq;
-       struct ipath_srq *srq;
-       struct ipath_rwqe *wqe;
-       void (*handler)(struct ib_event *, void *);
-       u32 tail;
-       int ret;
-
-       if (qp->ibqp.srq) {
-               srq = to_isrq(qp->ibqp.srq);
-               handler = srq->ibsrq.event_handler;
-               rq = &srq->rq;
-       } else {
-               srq = NULL;
-               handler = NULL;
-               rq = &qp->r_rq;
-       }
-
-       spin_lock_irqsave(&rq->lock, flags);
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-               ret = 0;
-               goto unlock;
-       }
-
-       wq = rq->wq;
-       tail = wq->tail;
-       /* Validate tail before using it since it is user writable. */
-       if (tail >= rq->size)
-               tail = 0;
-       do {
-               if (unlikely(tail == wq->head)) {
-                       ret = 0;
-                       goto unlock;
-               }
-               /* Make sure entry is read after head index is read. */
-               smp_rmb();
-               wqe = get_rwqe_ptr(rq, tail);
-               if (++tail >= rq->size)
-                       tail = 0;
-               if (wr_id_only)
-                       break;
-               qp->r_sge.sg_list = qp->r_sg_list;
-       } while (!ipath_init_sge(qp, wqe, &qp->r_len, &qp->r_sge));
-       qp->r_wr_id = wqe->wr_id;
-       wq->tail = tail;
-
-       ret = 1;
-       set_bit(IPATH_R_WRID_VALID, &qp->r_aflags);
-       if (handler) {
-               u32 n;
-
-               /*
-                * validate head pointer value and compute
-                * the number of remaining WQEs.
-                */
-               n = wq->head;
-               if (n >= rq->size)
-                       n = 0;
-               if (n < tail)
-                       n += rq->size - tail;
-               else
-                       n -= tail;
-               if (n < srq->limit) {
-                       struct ib_event ev;
-
-                       srq->limit = 0;
-                       spin_unlock_irqrestore(&rq->lock, flags);
-                       ev.device = qp->ibqp.device;
-                       ev.element.srq = qp->ibqp.srq;
-                       ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
-                       handler(&ev, srq->ibsrq.srq_context);
-                       goto bail;
-               }
-       }
-unlock:
-       spin_unlock_irqrestore(&rq->lock, flags);
-bail:
-       return ret;
-}
-
-/**
- * ipath_ruc_loopback - handle UC and RC lookback requests
- * @sqp: the sending QP
- *
- * This is called from ipath_do_send() to
- * forward a WQE addressed to the same HCA.
- * Note that although we are single threaded due to the tasklet, we still
- * have to protect against post_send().  We don't have to worry about
- * receive interrupts since this is a connected protocol and all packets
- * will pass through here.
- */
-static void ipath_ruc_loopback(struct ipath_qp *sqp)
-{
-       struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
-       struct ipath_qp *qp;
-       struct ipath_swqe *wqe;
-       struct ipath_sge *sge;
-       unsigned long flags;
-       struct ib_wc wc;
-       u64 sdata;
-       atomic64_t *maddr;
-       enum ib_wc_status send_status;
-
-       /*
-        * Note that we check the responder QP state after
-        * checking the requester's state.
-        */
-       qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
-
-       spin_lock_irqsave(&sqp->s_lock, flags);
-
-       /* Return if we are already busy processing a work request. */
-       if ((sqp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
-           !(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
-               goto unlock;
-
-       sqp->s_flags |= IPATH_S_BUSY;
-
-again:
-       if (sqp->s_last == sqp->s_head)
-               goto clr_busy;
-       wqe = get_swqe_ptr(sqp, sqp->s_last);
-
-       /* Return if it is not OK to start a new work reqeust. */
-       if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
-               if (!(ib_ipath_state_ops[sqp->state] & IPATH_FLUSH_SEND))
-                       goto clr_busy;
-               /* We are in the error state, flush the work request. */
-               send_status = IB_WC_WR_FLUSH_ERR;
-               goto flush_send;
-       }
-
-       /*
-        * We can rely on the entry not changing without the s_lock
-        * being held until we update s_last.
-        * We increment s_cur to indicate s_last is in progress.
-        */
-       if (sqp->s_last == sqp->s_cur) {
-               if (++sqp->s_cur >= sqp->s_size)
-                       sqp->s_cur = 0;
-       }
-       spin_unlock_irqrestore(&sqp->s_lock, flags);
-
-       if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-               dev->n_pkt_drops++;
-               /*
-                * For RC, the requester would timeout and retry so
-                * shortcut the timeouts and just signal too many retries.
-                */
-               if (sqp->ibqp.qp_type == IB_QPT_RC)
-                       send_status = IB_WC_RETRY_EXC_ERR;
-               else
-                       send_status = IB_WC_SUCCESS;
-               goto serr;
-       }
-
-       memset(&wc, 0, sizeof wc);
-       send_status = IB_WC_SUCCESS;
-
-       sqp->s_sge.sge = wqe->sg_list[0];
-       sqp->s_sge.sg_list = wqe->sg_list + 1;
-       sqp->s_sge.num_sge = wqe->wr.num_sge;
-       sqp->s_len = wqe->length;
-       switch (wqe->wr.opcode) {
-       case IB_WR_SEND_WITH_IMM:
-               wc.wc_flags = IB_WC_WITH_IMM;
-               wc.ex.imm_data = wqe->wr.ex.imm_data;
-               /* FALLTHROUGH */
-       case IB_WR_SEND:
-               if (!ipath_get_rwqe(qp, 0))
-                       goto rnr_nak;
-               break;
-
-       case IB_WR_RDMA_WRITE_WITH_IMM:
-               if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
-                       goto inv_err;
-               wc.wc_flags = IB_WC_WITH_IMM;
-               wc.ex.imm_data = wqe->wr.ex.imm_data;
-               if (!ipath_get_rwqe(qp, 1))
-                       goto rnr_nak;
-               /* FALLTHROUGH */
-       case IB_WR_RDMA_WRITE:
-               if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
-                       goto inv_err;
-               if (wqe->length == 0)
-                       break;
-               if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
-                                           wqe->rdma_wr.remote_addr,
-                                           wqe->rdma_wr.rkey,
-                                           IB_ACCESS_REMOTE_WRITE)))
-                       goto acc_err;
-               break;
-
-       case IB_WR_RDMA_READ:
-               if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
-                       goto inv_err;
-               if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
-                                           wqe->rdma_wr.remote_addr,
-                                           wqe->rdma_wr.rkey,
-                                           IB_ACCESS_REMOTE_READ)))
-                       goto acc_err;
-               qp->r_sge.sge = wqe->sg_list[0];
-               qp->r_sge.sg_list = wqe->sg_list + 1;
-               qp->r_sge.num_sge = wqe->wr.num_sge;
-               break;
-
-       case IB_WR_ATOMIC_CMP_AND_SWP:
-       case IB_WR_ATOMIC_FETCH_AND_ADD:
-               if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
-                       goto inv_err;
-               if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
-                                           wqe->atomic_wr.remote_addr,
-                                           wqe->atomic_wr.rkey,
-                                           IB_ACCESS_REMOTE_ATOMIC)))
-                       goto acc_err;
-               /* Perform atomic OP and save result. */
-               maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
-               sdata = wqe->atomic_wr.compare_add;
-               *(u64 *) sqp->s_sge.sge.vaddr =
-                       (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
-                       (u64) atomic64_add_return(sdata, maddr) - sdata :
-                       (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
-                                     sdata, wqe->atomic_wr.swap);
-               goto send_comp;
-
-       default:
-               send_status = IB_WC_LOC_QP_OP_ERR;
-               goto serr;
-       }
-
-       sge = &sqp->s_sge.sge;
-       while (sqp->s_len) {
-               u32 len = sqp->s_len;
-
-               if (len > sge->length)
-                       len = sge->length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--sqp->s_sge.num_sge)
-                               *sge = *sqp->s_sge.sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               sqp->s_len -= len;
-       }
-
-       if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
-               goto send_comp;
-
-       if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
-               wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-       else
-               wc.opcode = IB_WC_RECV;
-       wc.wr_id = qp->r_wr_id;
-       wc.status = IB_WC_SUCCESS;
-       wc.byte_len = wqe->length;
-       wc.qp = &qp->ibqp;
-       wc.src_qp = qp->remote_qpn;
-       wc.slid = qp->remote_ah_attr.dlid;
-       wc.sl = qp->remote_ah_attr.sl;
-       wc.port_num = 1;
-       /* Signal completion event if the solicited bit is set. */
-       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                      wqe->wr.send_flags & IB_SEND_SOLICITED);
-
-send_comp:
-       spin_lock_irqsave(&sqp->s_lock, flags);
-flush_send:
-       sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
-       ipath_send_complete(sqp, wqe, send_status);
-       goto again;
-
-rnr_nak:
-       /* Handle RNR NAK */
-       if (qp->ibqp.qp_type == IB_QPT_UC)
-               goto send_comp;
-       /*
-        * Note: we don't need the s_lock held since the BUSY flag
-        * makes this single threaded.
-        */
-       if (sqp->s_rnr_retry == 0) {
-               send_status = IB_WC_RNR_RETRY_EXC_ERR;
-               goto serr;
-       }
-       if (sqp->s_rnr_retry_cnt < 7)
-               sqp->s_rnr_retry--;
-       spin_lock_irqsave(&sqp->s_lock, flags);
-       if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
-               goto clr_busy;
-       sqp->s_flags |= IPATH_S_WAITING;
-       dev->n_rnr_naks++;
-       sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer];
-       ipath_insert_rnr_queue(sqp);
-       goto clr_busy;
-
-inv_err:
-       send_status = IB_WC_REM_INV_REQ_ERR;
-       wc.status = IB_WC_LOC_QP_OP_ERR;
-       goto err;
-
-acc_err:
-       send_status = IB_WC_REM_ACCESS_ERR;
-       wc.status = IB_WC_LOC_PROT_ERR;
-err:
-       /* responder goes to error state */
-       ipath_rc_error(qp, wc.status);
-
-serr:
-       spin_lock_irqsave(&sqp->s_lock, flags);
-       ipath_send_complete(sqp, wqe, send_status);
-       if (sqp->ibqp.qp_type == IB_QPT_RC) {
-               int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
-
-               sqp->s_flags &= ~IPATH_S_BUSY;
-               spin_unlock_irqrestore(&sqp->s_lock, flags);
-               if (lastwqe) {
-                       struct ib_event ev;
-
-                       ev.device = sqp->ibqp.device;
-                       ev.element.qp = &sqp->ibqp;
-                       ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
-                       sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
-               }
-               goto done;
-       }
-clr_busy:
-       sqp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-       spin_unlock_irqrestore(&sqp->s_lock, flags);
-done:
-       if (qp && atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-}
-
-static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
-{
-       if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
-           qp->ibqp.qp_type == IB_QPT_SMI) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                                dd->ipath_sendctrl);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-       }
-}
-
-/**
- * ipath_no_bufs_available - tell the layer driver we need buffers
- * @qp: the QP that caused the problem
- * @dev: the device we ran out of buffers on
- *
- * Called when we run out of PIO buffers.
- * If we are now in the error state, return zero to flush the
- * send work request.
- */
-static int ipath_no_bufs_available(struct ipath_qp *qp,
-                                   struct ipath_ibdev *dev)
-{
-       unsigned long flags;
-       int ret = 1;
-
-       /*
-        * Note that as soon as want_buffer() is called and
-        * possibly before it returns, ipath_ib_piobufavail()
-        * could be called. Therefore, put QP on the piowait list before
-        * enabling the PIO avail interrupt.
-        */
-       spin_lock_irqsave(&qp->s_lock, flags);
-       if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
-               dev->n_piowait++;
-               qp->s_flags |= IPATH_S_WAITING;
-               qp->s_flags &= ~IPATH_S_BUSY;
-               spin_lock(&dev->pending_lock);
-               if (list_empty(&qp->piowait))
-                       list_add_tail(&qp->piowait, &dev->piowait);
-               spin_unlock(&dev->pending_lock);
-       } else
-               ret = 0;
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       if (ret)
-               want_buffer(dev->dd, qp);
-       return ret;
-}
-
-/**
- * ipath_make_grh - construct a GRH header
- * @dev: a pointer to the ipath device
- * @hdr: a pointer to the GRH header being constructed
- * @grh: the global route address to send to
- * @hwords: the number of 32 bit words of header being sent
- * @nwords: the number of 32 bit words of data being sent
- *
- * Return the size of the header in 32 bit words.
- */
-u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
-                  struct ib_global_route *grh, u32 hwords, u32 nwords)
-{
-       hdr->version_tclass_flow =
-               cpu_to_be32((6 << 28) |
-                           (grh->traffic_class << 20) |
-                           grh->flow_label);
-       hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
-       /* next_hdr is defined by C8-7 in ch. 8.4.1 */
-       hdr->next_hdr = 0x1B;
-       hdr->hop_limit = grh->hop_limit;
-       /* The SGID is 32-bit aligned. */
-       hdr->sgid.global.subnet_prefix = dev->gid_prefix;
-       hdr->sgid.global.interface_id = dev->dd->ipath_guid;
-       hdr->dgid = grh->dgid;
-
-       /* GRH header size in 32-bit words. */
-       return sizeof(struct ib_grh) / sizeof(u32);
-}
-
-void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
-                          struct ipath_other_headers *ohdr,
-                          u32 bth0, u32 bth2)
-{
-       u16 lrh0;
-       u32 nwords;
-       u32 extra_bytes;
-
-       /* Construct the header. */
-       extra_bytes = -qp->s_cur_size & 3;
-       nwords = (qp->s_cur_size + extra_bytes) >> 2;
-       lrh0 = IPATH_LRH_BTH;
-       if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
-               qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
-                                                &qp->remote_ah_attr.grh,
-                                                qp->s_hdrwords, nwords);
-               lrh0 = IPATH_LRH_GRH;
-       }
-       lrh0 |= qp->remote_ah_attr.sl << 4;
-       qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-       qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
-       qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
-       qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid |
-                                      qp->remote_ah_attr.src_path_bits);
-       bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
-       bth0 |= extra_bytes << 20;
-       ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
-       ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
-       ohdr->bth[2] = cpu_to_be32(bth2);
-}
-
-/**
- * ipath_do_send - perform a send on a QP
- * @data: contains a pointer to the QP
- *
- * Process entries in the send work queue until credit or queue is
- * exhausted.  Only allow one CPU to send a packet per QP (tasklet).
- * Otherwise, two threads could send packets out of order.
- */
-void ipath_do_send(unsigned long data)
-{
-       struct ipath_qp *qp = (struct ipath_qp *)data;
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       int (*make_req)(struct ipath_qp *qp);
-       unsigned long flags;
-
-       if ((qp->ibqp.qp_type == IB_QPT_RC ||
-            qp->ibqp.qp_type == IB_QPT_UC) &&
-           qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
-               ipath_ruc_loopback(qp);
-               goto bail;
-       }
-
-       if (qp->ibqp.qp_type == IB_QPT_RC)
-              make_req = ipath_make_rc_req;
-       else if (qp->ibqp.qp_type == IB_QPT_UC)
-              make_req = ipath_make_uc_req;
-       else
-              make_req = ipath_make_ud_req;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       /* Return if we are already busy processing a work request. */
-       if ((qp->s_flags & (IPATH_S_BUSY | IPATH_S_ANY_WAIT)) ||
-           !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND)) {
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-               goto bail;
-       }
-
-       qp->s_flags |= IPATH_S_BUSY;
-
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-
-again:
-       /* Check for a constructed packet to be sent. */
-       if (qp->s_hdrwords != 0) {
-               /*
-                * If no PIO bufs are available, return.  An interrupt will
-                * call ipath_ib_piobufavail() when one is available.
-                */
-               if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
-                                    qp->s_cur_sge, qp->s_cur_size)) {
-                       if (ipath_no_bufs_available(qp, dev))
-                               goto bail;
-               }
-               dev->n_unicast_xmit++;
-               /* Record that we sent the packet and s_hdr is empty. */
-               qp->s_hdrwords = 0;
-       }
-
-       if (make_req(qp))
-               goto again;
-
-bail:;
-}
-
-/*
- * This should be called with s_lock held.
- */
-void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
-                        enum ib_wc_status status)
-{
-       u32 old_last, last;
-
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_OR_FLUSH_SEND))
-               return;
-
-       /* See ch. 11.2.4.1 and 10.7.3.1 */
-       if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
-           (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
-           status != IB_WC_SUCCESS) {
-               struct ib_wc wc;
-
-               memset(&wc, 0, sizeof wc);
-               wc.wr_id = wqe->wr.wr_id;
-               wc.status = status;
-               wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
-               wc.qp = &qp->ibqp;
-               if (status == IB_WC_SUCCESS)
-                       wc.byte_len = wqe->length;
-               ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
-                              status != IB_WC_SUCCESS);
-       }
-
-       old_last = last = qp->s_last;
-       if (++last >= qp->s_size)
-               last = 0;
-       qp->s_last = last;
-       if (qp->s_cur == old_last)
-               qp->s_cur = last;
-       if (qp->s_tail == old_last)
-               qp->s_tail = last;
-       if (qp->state == IB_QPS_SQD && last == qp->s_cur)
-               qp->s_draining = 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_sdma.c b/drivers/staging/rdma/ipath/ipath_sdma.c
deleted file mode 100644 (file)
index 1ffc06a..0000000
+++ /dev/null
@@ -1,818 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/gfp.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-#define SDMA_DESCQ_SZ PAGE_SIZE /* 256 entries per 4KB page */
-
-static void vl15_watchdog_enq(struct ipath_devdata *dd)
-{
-       /* ipath_sdma_lock must already be held */
-       if (atomic_inc_return(&dd->ipath_sdma_vl15_count) == 1) {
-               unsigned long interval = (HZ + 19) / 20;
-               dd->ipath_sdma_vl15_timer.expires = jiffies + interval;
-               add_timer(&dd->ipath_sdma_vl15_timer);
-       }
-}
-
-static void vl15_watchdog_deq(struct ipath_devdata *dd)
-{
-       /* ipath_sdma_lock must already be held */
-       if (atomic_dec_return(&dd->ipath_sdma_vl15_count) != 0) {
-               unsigned long interval = (HZ + 19) / 20;
-               mod_timer(&dd->ipath_sdma_vl15_timer, jiffies + interval);
-       } else {
-               del_timer(&dd->ipath_sdma_vl15_timer);
-       }
-}
-
-static void vl15_watchdog_timeout(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
-       if (atomic_read(&dd->ipath_sdma_vl15_count) != 0) {
-               ipath_dbg("vl15 watchdog timeout - clearing\n");
-               ipath_cancel_sends(dd, 1);
-               ipath_hol_down(dd);
-       } else {
-               ipath_dbg("vl15 watchdog timeout - "
-                         "condition already cleared\n");
-       }
-}
-
-static void unmap_desc(struct ipath_devdata *dd, unsigned head)
-{
-       __le64 *descqp = &dd->ipath_sdma_descq[head].qw[0];
-       u64 desc[2];
-       dma_addr_t addr;
-       size_t len;
-
-       desc[0] = le64_to_cpu(descqp[0]);
-       desc[1] = le64_to_cpu(descqp[1]);
-
-       addr = (desc[1] << 32) | (desc[0] >> 32);
-       len = (desc[0] >> 14) & (0x7ffULL << 2);
-       dma_unmap_single(&dd->pcidev->dev, addr, len, DMA_TO_DEVICE);
-}
-
-/*
- * ipath_sdma_lock should be locked before calling this.
- */
-int ipath_sdma_make_progress(struct ipath_devdata *dd)
-{
-       struct list_head *lp = NULL;
-       struct ipath_sdma_txreq *txp = NULL;
-       u16 dmahead;
-       u16 start_idx = 0;
-       int progress = 0;
-
-       if (!list_empty(&dd->ipath_sdma_activelist)) {
-               lp = dd->ipath_sdma_activelist.next;
-               txp = list_entry(lp, struct ipath_sdma_txreq, list);
-               start_idx = txp->start_idx;
-       }
-
-       /*
-        * Read the SDMA head register in order to know that the
-        * interrupt clear has been written to the chip.
-        * Otherwise, we may not get an interrupt for the last
-        * descriptor in the queue.
-        */
-       dmahead = (u16)ipath_read_kreg32(dd, dd->ipath_kregs->kr_senddmahead);
-       /* sanity check return value for error handling (chip reset, etc.) */
-       if (dmahead >= dd->ipath_sdma_descq_cnt)
-               goto done;
-
-       while (dd->ipath_sdma_descq_head != dmahead) {
-               if (txp && txp->flags & IPATH_SDMA_TXREQ_F_FREEDESC &&
-                   dd->ipath_sdma_descq_head == start_idx) {
-                       unmap_desc(dd, dd->ipath_sdma_descq_head);
-                       start_idx++;
-                       if (start_idx == dd->ipath_sdma_descq_cnt)
-                               start_idx = 0;
-               }
-
-               /* increment free count and head */
-               dd->ipath_sdma_descq_removed++;
-               if (++dd->ipath_sdma_descq_head == dd->ipath_sdma_descq_cnt)
-                       dd->ipath_sdma_descq_head = 0;
-
-               if (txp && txp->next_descq_idx == dd->ipath_sdma_descq_head) {
-                       /* move to notify list */
-                       if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
-                               vl15_watchdog_deq(dd);
-                       list_move_tail(lp, &dd->ipath_sdma_notifylist);
-                       if (!list_empty(&dd->ipath_sdma_activelist)) {
-                               lp = dd->ipath_sdma_activelist.next;
-                               txp = list_entry(lp, struct ipath_sdma_txreq,
-                                                list);
-                               start_idx = txp->start_idx;
-                       } else {
-                               lp = NULL;
-                               txp = NULL;
-                       }
-               }
-               progress = 1;
-       }
-
-       if (progress)
-               tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
-
-done:
-       return progress;
-}
-
-static void ipath_sdma_notify(struct ipath_devdata *dd, struct list_head *list)
-{
-       struct ipath_sdma_txreq *txp, *txp_next;
-
-       list_for_each_entry_safe(txp, txp_next, list, list) {
-               list_del_init(&txp->list);
-
-               if (txp->callback)
-                       (*txp->callback)(txp->callback_cookie,
-                                        txp->callback_status);
-       }
-}
-
-static void sdma_notify_taskbody(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-       struct list_head list;
-
-       INIT_LIST_HEAD(&list);
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       list_splice_init(&dd->ipath_sdma_notifylist, &list);
-
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       ipath_sdma_notify(dd, &list);
-
-       /*
-        * The IB verbs layer needs to see the callback before getting
-        * the call to ipath_ib_piobufavail() because the callback
-        * handles releasing resources the next send will need.
-        * Otherwise, we could do these calls in
-        * ipath_sdma_make_progress().
-        */
-       ipath_ib_piobufavail(dd->verbs_dev);
-}
-
-static void sdma_notify_task(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *)opaque;
-
-       if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-               sdma_notify_taskbody(dd);
-}
-
-static void dump_sdma_state(struct ipath_devdata *dd)
-{
-       unsigned long reg;
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmastatus);
-       ipath_cdbg(VERBOSE, "kr_senddmastatus: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_sendctrl);
-       ipath_cdbg(VERBOSE, "kr_sendctrl: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask0);
-       ipath_cdbg(VERBOSE, "kr_senddmabufmask0: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask1);
-       ipath_cdbg(VERBOSE, "kr_senddmabufmask1: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmabufmask2);
-       ipath_cdbg(VERBOSE, "kr_senddmabufmask2: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmatail);
-       ipath_cdbg(VERBOSE, "kr_senddmatail: 0x%016lx\n", reg);
-
-       reg = ipath_read_kreg64(dd, dd->ipath_kregs->kr_senddmahead);
-       ipath_cdbg(VERBOSE, "kr_senddmahead: 0x%016lx\n", reg);
-}
-
-static void sdma_abort_task(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-       u64 status;
-       unsigned long flags;
-
-       if (test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-               return;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       status = dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK;
-
-       /* nothing to do */
-       if (status == IPATH_SDMA_ABORT_NONE)
-               goto unlock;
-
-       /* ipath_sdma_abort() is done, waiting for interrupt */
-       if (status == IPATH_SDMA_ABORT_DISARMED) {
-               if (time_before(jiffies, dd->ipath_sdma_abort_intr_timeout))
-                       goto resched_noprint;
-               /* give up, intr got lost somewhere */
-               ipath_dbg("give up waiting for SDMADISABLED intr\n");
-               __set_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-               status = IPATH_SDMA_ABORT_ABORTED;
-       }
-
-       /* everything is stopped, time to clean up and restart */
-       if (status == IPATH_SDMA_ABORT_ABORTED) {
-               struct ipath_sdma_txreq *txp, *txpnext;
-               u64 hwstatus;
-               int notify = 0;
-
-               hwstatus = ipath_read_kreg64(dd,
-                               dd->ipath_kregs->kr_senddmastatus);
-
-               if ((hwstatus & (IPATH_SDMA_STATUS_SCORE_BOARD_DRAIN_IN_PROG |
-                                IPATH_SDMA_STATUS_ABORT_IN_PROG             |
-                                IPATH_SDMA_STATUS_INTERNAL_SDMA_ENABLE)) ||
-                   !(hwstatus & IPATH_SDMA_STATUS_SCB_EMPTY)) {
-                       if (dd->ipath_sdma_reset_wait > 0) {
-                               /* not done shutting down sdma */
-                               --dd->ipath_sdma_reset_wait;
-                               goto resched;
-                       }
-                       ipath_cdbg(VERBOSE, "gave up waiting for quiescent "
-                               "status after SDMA reset, continuing\n");
-                       dump_sdma_state(dd);
-               }
-
-               /* dequeue all "sent" requests */
-               list_for_each_entry_safe(txp, txpnext,
-                                        &dd->ipath_sdma_activelist, list) {
-                       txp->callback_status = IPATH_SDMA_TXREQ_S_ABORTED;
-                       if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
-                               vl15_watchdog_deq(dd);
-                       list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
-                       notify = 1;
-               }
-               if (notify)
-                       tasklet_hi_schedule(&dd->ipath_sdma_notify_task);
-
-               /* reset our notion of head and tail */
-               dd->ipath_sdma_descq_tail = 0;
-               dd->ipath_sdma_descq_head = 0;
-               dd->ipath_sdma_head_dma[0] = 0;
-               dd->ipath_sdma_generation = 0;
-               dd->ipath_sdma_descq_removed = dd->ipath_sdma_descq_added;
-
-               /* Reset SendDmaLenGen */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen,
-                       (u64) dd->ipath_sdma_descq_cnt | (1ULL << 18));
-
-               /* done with sdma state for a bit */
-               spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-               /*
-                * Don't restart sdma here (with the exception
-                * below). Wait until link is up to ACTIVE.  VL15 MADs
-                * used to bring the link up use PIO, and multiple link
-                * transitions otherwise cause the sdma engine to be
-                * stopped and started multiple times.
-                * The disable is done here, including the shadow,
-                * so the state is kept consistent.
-                * See ipath_restart_sdma() for the actual starting
-                * of sdma.
-                */
-               spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-               dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-                                dd->ipath_sendctrl);
-               ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-               spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-               /* make sure I see next message */
-               dd->ipath_sdma_abort_jiffies = 0;
-
-               /*
-                * Not everything that takes SDMA offline is a link
-                * status change.  If the link was up, restart SDMA.
-                */
-               if (dd->ipath_flags & IPATH_LINKACTIVE)
-                       ipath_restart_sdma(dd);
-
-               goto done;
-       }
-
-resched:
-       /*
-        * for now, keep spinning
-        * JAG - this is bad to just have default be a loop without
-        * state change
-        */
-       if (time_after(jiffies, dd->ipath_sdma_abort_jiffies)) {
-               ipath_dbg("looping with status 0x%08lx\n",
-                         dd->ipath_sdma_status);
-               dd->ipath_sdma_abort_jiffies = jiffies + 5 * HZ;
-       }
-resched_noprint:
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-       if (!test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-               tasklet_hi_schedule(&dd->ipath_sdma_abort_task);
-       return;
-
-unlock:
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-done:
-       return;
-}
-
-/*
- * This is called from interrupt context.
- */
-void ipath_sdma_intr(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       (void) ipath_sdma_make_progress(dd);
-
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-}
-
-static int alloc_sdma(struct ipath_devdata *dd)
-{
-       int ret = 0;
-
-       /* Allocate memory for SendDMA descriptor FIFO */
-       dd->ipath_sdma_descq = dma_alloc_coherent(&dd->pcidev->dev,
-               SDMA_DESCQ_SZ, &dd->ipath_sdma_descq_phys, GFP_KERNEL);
-
-       if (!dd->ipath_sdma_descq) {
-               ipath_dev_err(dd, "failed to allocate SendDMA descriptor "
-                       "FIFO memory\n");
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       dd->ipath_sdma_descq_cnt =
-               SDMA_DESCQ_SZ / sizeof(struct ipath_sdma_desc);
-
-       /* Allocate memory for DMA of head register to memory */
-       dd->ipath_sdma_head_dma = dma_alloc_coherent(&dd->pcidev->dev,
-               PAGE_SIZE, &dd->ipath_sdma_head_phys, GFP_KERNEL);
-       if (!dd->ipath_sdma_head_dma) {
-               ipath_dev_err(dd, "failed to allocate SendDMA head memory\n");
-               ret = -ENOMEM;
-               goto cleanup_descq;
-       }
-       dd->ipath_sdma_head_dma[0] = 0;
-
-       setup_timer(&dd->ipath_sdma_vl15_timer, vl15_watchdog_timeout,
-                       (unsigned long)dd);
-
-       atomic_set(&dd->ipath_sdma_vl15_count, 0);
-
-       goto done;
-
-cleanup_descq:
-       dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
-               (void *)dd->ipath_sdma_descq, dd->ipath_sdma_descq_phys);
-       dd->ipath_sdma_descq = NULL;
-       dd->ipath_sdma_descq_phys = 0;
-done:
-       return ret;
-}
-
-int setup_sdma(struct ipath_devdata *dd)
-{
-       int ret = 0;
-       unsigned i, n;
-       u64 tmp64;
-       u64 senddmabufmask[3] = { 0 };
-       unsigned long flags;
-
-       ret = alloc_sdma(dd);
-       if (ret)
-               goto done;
-
-       if (!dd->ipath_sdma_descq) {
-               ipath_dev_err(dd, "SendDMA memory not allocated\n");
-               goto done;
-       }
-
-       /*
-        * Set initial status as if we had been up, then gone down.
-        * This lets initial start on transition to ACTIVE be the
-        * same as restart after link flap.
-        */
-       dd->ipath_sdma_status = IPATH_SDMA_ABORT_ABORTED;
-       dd->ipath_sdma_abort_jiffies = 0;
-       dd->ipath_sdma_generation = 0;
-       dd->ipath_sdma_descq_tail = 0;
-       dd->ipath_sdma_descq_head = 0;
-       dd->ipath_sdma_descq_removed = 0;
-       dd->ipath_sdma_descq_added = 0;
-
-       /* Set SendDmaBase */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase,
-                        dd->ipath_sdma_descq_phys);
-       /* Set SendDmaLenGen */
-       tmp64 = dd->ipath_sdma_descq_cnt;
-       tmp64 |= 1<<18; /* enable generation checking */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, tmp64);
-       /* Set SendDmaTail */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail,
-                        dd->ipath_sdma_descq_tail);
-       /* Set SendDmaHeadAddr */
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr,
-                        dd->ipath_sdma_head_phys);
-
-       /*
-        * Reserve all the former "kernel" piobufs, using high number range
-        * so we get as many 4K buffers as possible
-        */
-       n = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
-       i = dd->ipath_lastport_piobuf + dd->ipath_pioreserved;
-       ipath_chg_pioavailkernel(dd, i, n - i , 0);
-       for (; i < n; ++i) {
-               unsigned word = i / 64;
-               unsigned bit = i & 63;
-               BUG_ON(word >= 3);
-               senddmabufmask[word] |= 1ULL << bit;
-       }
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0,
-                        senddmabufmask[0]);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1,
-                        senddmabufmask[1]);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2,
-                        senddmabufmask[2]);
-
-       INIT_LIST_HEAD(&dd->ipath_sdma_activelist);
-       INIT_LIST_HEAD(&dd->ipath_sdma_notifylist);
-
-       tasklet_init(&dd->ipath_sdma_notify_task, sdma_notify_task,
-                    (unsigned long) dd);
-       tasklet_init(&dd->ipath_sdma_abort_task, sdma_abort_task,
-                    (unsigned long) dd);
-
-       /*
-        * No use to turn on SDMA here, as link is probably not ACTIVE
-        * Just mark it RUNNING and enable the interrupt, and let the
-        * ipath_restart_sdma() on link transition to ACTIVE actually
-        * enable it.
-        */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl |= INFINIPATH_S_SDMAINTENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       __set_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-done:
-       return ret;
-}
-
-void teardown_sdma(struct ipath_devdata *dd)
-{
-       struct ipath_sdma_txreq *txp, *txpnext;
-       unsigned long flags;
-       dma_addr_t sdma_head_phys = 0;
-       dma_addr_t sdma_descq_phys = 0;
-       void *sdma_descq = NULL;
-       void *sdma_head_dma = NULL;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       __clear_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status);
-       __set_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-       __set_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status);
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       tasklet_kill(&dd->ipath_sdma_abort_task);
-       tasklet_kill(&dd->ipath_sdma_notify_task);
-
-       /* turn off sdma */
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
-               dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       /* dequeue all "sent" requests */
-       list_for_each_entry_safe(txp, txpnext, &dd->ipath_sdma_activelist,
-                                list) {
-               txp->callback_status = IPATH_SDMA_TXREQ_S_SHUTDOWN;
-               if (txp->flags & IPATH_SDMA_TXREQ_F_VL15)
-                       vl15_watchdog_deq(dd);
-               list_move_tail(&txp->list, &dd->ipath_sdma_notifylist);
-       }
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       sdma_notify_taskbody(dd);
-
-       del_timer_sync(&dd->ipath_sdma_vl15_timer);
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       dd->ipath_sdma_abort_jiffies = 0;
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabase, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmalengen, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmaheadaddr, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask0, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask1, 0);
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmabufmask2, 0);
-
-       if (dd->ipath_sdma_head_dma) {
-               sdma_head_dma = (void *) dd->ipath_sdma_head_dma;
-               sdma_head_phys = dd->ipath_sdma_head_phys;
-               dd->ipath_sdma_head_dma = NULL;
-               dd->ipath_sdma_head_phys = 0;
-       }
-
-       if (dd->ipath_sdma_descq) {
-               sdma_descq = dd->ipath_sdma_descq;
-               sdma_descq_phys = dd->ipath_sdma_descq_phys;
-               dd->ipath_sdma_descq = NULL;
-               dd->ipath_sdma_descq_phys = 0;
-       }
-
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       if (sdma_head_dma)
-               dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
-                                 sdma_head_dma, sdma_head_phys);
-
-       if (sdma_descq)
-               dma_free_coherent(&dd->pcidev->dev, SDMA_DESCQ_SZ,
-                                 sdma_descq, sdma_descq_phys);
-}
-
-/*
- * [Re]start SDMA, if we use it, and it's not already OK.
- * This is called on transition to link ACTIVE, either the first or
- * subsequent times.
- */
-void ipath_restart_sdma(struct ipath_devdata *dd)
-{
-       unsigned long flags;
-       int needed = 1;
-
-       if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA))
-               goto bail;
-
-       /*
-        * First, make sure we should, which is to say,
-        * check that we are "RUNNING" (not in teardown)
-        * and not "SHUTDOWN"
-        */
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       if (!test_bit(IPATH_SDMA_RUNNING, &dd->ipath_sdma_status)
-               || test_bit(IPATH_SDMA_SHUTDOWN, &dd->ipath_sdma_status))
-                       needed = 0;
-       else {
-               __clear_bit(IPATH_SDMA_DISABLED, &dd->ipath_sdma_status);
-               __clear_bit(IPATH_SDMA_DISARMED, &dd->ipath_sdma_status);
-               __clear_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status);
-       }
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-       if (!needed) {
-               ipath_dbg("invalid attempt to restart SDMA, status 0x%08lx\n",
-                       dd->ipath_sdma_status);
-               goto bail;
-       }
-       spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
-       /*
-        * First clear, just to be safe. Enable is only done
-        * in chip on 0->1 transition
-        */
-       dd->ipath_sendctrl &= ~INFINIPATH_S_SDMAENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       dd->ipath_sendctrl |= INFINIPATH_S_SDMAENABLE;
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
-       ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
-       spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
-
-       /* notify upper layers */
-       ipath_ib_piobufavail(dd->verbs_dev);
-
-bail:
-       return;
-}
-
-static inline void make_sdma_desc(struct ipath_devdata *dd,
-       u64 *sdmadesc, u64 addr, u64 dwlen, u64 dwoffset)
-{
-       WARN_ON(addr & 3);
-       /* SDmaPhyAddr[47:32] */
-       sdmadesc[1] = addr >> 32;
-       /* SDmaPhyAddr[31:0] */
-       sdmadesc[0] = (addr & 0xfffffffcULL) << 32;
-       /* SDmaGeneration[1:0] */
-       sdmadesc[0] |= (dd->ipath_sdma_generation & 3ULL) << 30;
-       /* SDmaDwordCount[10:0] */
-       sdmadesc[0] |= (dwlen & 0x7ffULL) << 16;
-       /* SDmaBufOffset[12:2] */
-       sdmadesc[0] |= dwoffset & 0x7ffULL;
-}
-
-/*
- * This function queues one IB packet onto the send DMA queue per call.
- * The caller is responsible for checking:
- * 1) The number of send DMA descriptor entries is less than the size of
- *    the descriptor queue.
- * 2) The IB SGE addresses and lengths are 32-bit aligned
- *    (except possibly the last SGE's length)
- * 3) The SGE addresses are suitable for passing to dma_map_single().
- */
-int ipath_sdma_verbs_send(struct ipath_devdata *dd,
-       struct ipath_sge_state *ss, u32 dwords,
-       struct ipath_verbs_txreq *tx)
-{
-
-       unsigned long flags;
-       struct ipath_sge *sge;
-       int ret = 0;
-       u16 tail;
-       __le64 *descqp;
-       u64 sdmadesc[2];
-       u32 dwoffset;
-       dma_addr_t addr;
-
-       if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) {
-               ipath_dbg("packet size %X > ibmax %X, fail\n",
-                       tx->map_len + (dwords<<2), dd->ipath_ibmaxlen);
-               ret = -EMSGSIZE;
-               goto fail;
-       }
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-retry:
-       if (unlikely(test_bit(IPATH_SDMA_ABORTING, &dd->ipath_sdma_status))) {
-               ret = -EBUSY;
-               goto unlock;
-       }
-
-       if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) {
-               if (ipath_sdma_make_progress(dd))
-                       goto retry;
-               ret = -ENOBUFS;
-               goto unlock;
-       }
-
-       addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
-                             tx->map_len, DMA_TO_DEVICE);
-       if (dma_mapping_error(&dd->pcidev->dev, addr))
-               goto ioerr;
-
-       dwoffset = tx->map_len >> 2;
-       make_sdma_desc(dd, sdmadesc, (u64) addr, dwoffset, 0);
-
-       /* SDmaFirstDesc */
-       sdmadesc[0] |= 1ULL << 12;
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
-               sdmadesc[0] |= 1ULL << 14;      /* SDmaUseLargeBuf */
-
-       /* write to the descq */
-       tail = dd->ipath_sdma_descq_tail;
-       descqp = &dd->ipath_sdma_descq[tail].qw[0];
-       *descqp++ = cpu_to_le64(sdmadesc[0]);
-       *descqp++ = cpu_to_le64(sdmadesc[1]);
-
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC)
-               tx->txreq.start_idx = tail;
-
-       /* increment the tail */
-       if (++tail == dd->ipath_sdma_descq_cnt) {
-               tail = 0;
-               descqp = &dd->ipath_sdma_descq[0].qw[0];
-               ++dd->ipath_sdma_generation;
-       }
-
-       sge = &ss->sge;
-       while (dwords) {
-               u32 dw;
-               u32 len;
-
-               len = dwords << 2;
-               if (len > sge->length)
-                       len = sge->length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               dw = (len + 3) >> 2;
-               addr = dma_map_single(&dd->pcidev->dev, sge->vaddr, dw << 2,
-                                     DMA_TO_DEVICE);
-               if (dma_mapping_error(&dd->pcidev->dev, addr))
-                       goto unmap;
-               make_sdma_desc(dd, sdmadesc, (u64) addr, dw, dwoffset);
-               /* SDmaUseLargeBuf has to be set in every descriptor */
-               if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF)
-                       sdmadesc[0] |= 1ULL << 14;
-               /* write to the descq */
-               *descqp++ = cpu_to_le64(sdmadesc[0]);
-               *descqp++ = cpu_to_le64(sdmadesc[1]);
-
-               /* increment the tail */
-               if (++tail == dd->ipath_sdma_descq_cnt) {
-                       tail = 0;
-                       descqp = &dd->ipath_sdma_descq[0].qw[0];
-                       ++dd->ipath_sdma_generation;
-               }
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-
-               dwoffset += dw;
-               dwords -= dw;
-       }
-
-       if (!tail)
-               descqp = &dd->ipath_sdma_descq[dd->ipath_sdma_descq_cnt].qw[0];
-       descqp -= 2;
-       /* SDmaLastDesc */
-       descqp[0] |= cpu_to_le64(1ULL << 11);
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) {
-               /* SDmaIntReq */
-               descqp[0] |= cpu_to_le64(1ULL << 15);
-       }
-
-       /* Commit writes to memory and advance the tail on the chip */
-       wmb();
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
-
-       tx->txreq.next_descq_idx = tail;
-       tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK;
-       dd->ipath_sdma_descq_tail = tail;
-       dd->ipath_sdma_descq_added += tx->txreq.sg_count;
-       list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist);
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15)
-               vl15_watchdog_enq(dd);
-       goto unlock;
-
-unmap:
-       while (tail != dd->ipath_sdma_descq_tail) {
-               if (!tail)
-                       tail = dd->ipath_sdma_descq_cnt - 1;
-               else
-                       tail--;
-               unmap_desc(dd, tail);
-       }
-ioerr:
-       ret = -EIO;
-unlock:
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-fail:
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_srq.c b/drivers/staging/rdma/ipath/ipath_srq.c
deleted file mode 100644 (file)
index 2627198..0000000
+++ /dev/null
@@ -1,380 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-
-#include "ipath_verbs.h"
-
-/**
- * ipath_post_srq_receive - post a receive on a shared receive queue
- * @ibsrq: the SRQ to post the receive on
- * @wr: the list of work requests to post
- * @bad_wr: the first WR to cause a problem is put here
- *
- * This may be called from interrupt context.
- */
-int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
-                          struct ib_recv_wr **bad_wr)
-{
-       struct ipath_srq *srq = to_isrq(ibsrq);
-       struct ipath_rwq *wq;
-       unsigned long flags;
-       int ret;
-
-       for (; wr; wr = wr->next) {
-               struct ipath_rwqe *wqe;
-               u32 next;
-               int i;
-
-               if ((unsigned) wr->num_sge > srq->rq.max_sge) {
-                       *bad_wr = wr;
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               spin_lock_irqsave(&srq->rq.lock, flags);
-               wq = srq->rq.wq;
-               next = wq->head + 1;
-               if (next >= srq->rq.size)
-                       next = 0;
-               if (next == wq->tail) {
-                       spin_unlock_irqrestore(&srq->rq.lock, flags);
-                       *bad_wr = wr;
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-
-               wqe = get_rwqe_ptr(&srq->rq, wq->head);
-               wqe->wr_id = wr->wr_id;
-               wqe->num_sge = wr->num_sge;
-               for (i = 0; i < wr->num_sge; i++)
-                       wqe->sg_list[i] = wr->sg_list[i];
-               /* Make sure queue entry is written before the head index. */
-               smp_wmb();
-               wq->head = next;
-               spin_unlock_irqrestore(&srq->rq.lock, flags);
-       }
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_create_srq - create a shared receive queue
- * @ibpd: the protection domain of the SRQ to create
- * @srq_init_attr: the attributes of the SRQ
- * @udata: data from libipathverbs when creating a user SRQ
- */
-struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
-                               struct ib_srq_init_attr *srq_init_attr,
-                               struct ib_udata *udata)
-{
-       struct ipath_ibdev *dev = to_idev(ibpd->device);
-       struct ipath_srq *srq;
-       u32 sz;
-       struct ib_srq *ret;
-
-       if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
-               ret = ERR_PTR(-ENOSYS);
-               goto done;
-       }
-
-       if (srq_init_attr->attr.max_wr == 0) {
-               ret = ERR_PTR(-EINVAL);
-               goto done;
-       }
-
-       if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
-           (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
-               ret = ERR_PTR(-EINVAL);
-               goto done;
-       }
-
-       srq = kmalloc(sizeof(*srq), GFP_KERNEL);
-       if (!srq) {
-               ret = ERR_PTR(-ENOMEM);
-               goto done;
-       }
-
-       /*
-        * Need to use vmalloc() if we want to support large #s of entries.
-        */
-       srq->rq.size = srq_init_attr->attr.max_wr + 1;
-       srq->rq.max_sge = srq_init_attr->attr.max_sge;
-       sz = sizeof(struct ib_sge) * srq->rq.max_sge +
-               sizeof(struct ipath_rwqe);
-       srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz);
-       if (!srq->rq.wq) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_srq;
-       }
-
-       /*
-        * Return the address of the RWQ as the offset to mmap.
-        * See ipath_mmap() for details.
-        */
-       if (udata && udata->outlen >= sizeof(__u64)) {
-               int err;
-               u32 s = sizeof(struct ipath_rwq) + srq->rq.size * sz;
-
-               srq->ip =
-                   ipath_create_mmap_info(dev, s,
-                                          ibpd->uobject->context,
-                                          srq->rq.wq);
-               if (!srq->ip) {
-                       ret = ERR_PTR(-ENOMEM);
-                       goto bail_wq;
-               }
-
-               err = ib_copy_to_udata(udata, &srq->ip->offset,
-                                      sizeof(srq->ip->offset));
-               if (err) {
-                       ret = ERR_PTR(err);
-                       goto bail_ip;
-               }
-       } else
-               srq->ip = NULL;
-
-       /*
-        * ib_create_srq() will initialize srq->ibsrq.
-        */
-       spin_lock_init(&srq->rq.lock);
-       srq->rq.wq->head = 0;
-       srq->rq.wq->tail = 0;
-       srq->limit = srq_init_attr->attr.srq_limit;
-
-       spin_lock(&dev->n_srqs_lock);
-       if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
-               spin_unlock(&dev->n_srqs_lock);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail_ip;
-       }
-
-       dev->n_srqs_allocated++;
-       spin_unlock(&dev->n_srqs_lock);
-
-       if (srq->ip) {
-               spin_lock_irq(&dev->pending_lock);
-               list_add(&srq->ip->pending_mmaps, &dev->pending_mmaps);
-               spin_unlock_irq(&dev->pending_lock);
-       }
-
-       ret = &srq->ibsrq;
-       goto done;
-
-bail_ip:
-       kfree(srq->ip);
-bail_wq:
-       vfree(srq->rq.wq);
-bail_srq:
-       kfree(srq);
-done:
-       return ret;
-}
-
-/**
- * ipath_modify_srq - modify a shared receive queue
- * @ibsrq: the SRQ to modify
- * @attr: the new attributes of the SRQ
- * @attr_mask: indicates which attributes to modify
- * @udata: user data for ipathverbs.so
- */
-int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-                    enum ib_srq_attr_mask attr_mask,
-                    struct ib_udata *udata)
-{
-       struct ipath_srq *srq = to_isrq(ibsrq);
-       struct ipath_rwq *wq;
-       int ret = 0;
-
-       if (attr_mask & IB_SRQ_MAX_WR) {
-               struct ipath_rwq *owq;
-               struct ipath_rwqe *p;
-               u32 sz, size, n, head, tail;
-
-               /* Check that the requested sizes are below the limits. */
-               if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
-                   ((attr_mask & IB_SRQ_LIMIT) ?
-                    attr->srq_limit : srq->limit) > attr->max_wr) {
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               sz = sizeof(struct ipath_rwqe) +
-                       srq->rq.max_sge * sizeof(struct ib_sge);
-               size = attr->max_wr + 1;
-               wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz);
-               if (!wq) {
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-
-               /* Check that we can write the offset to mmap. */
-               if (udata && udata->inlen >= sizeof(__u64)) {
-                       __u64 offset_addr;
-                       __u64 offset = 0;
-
-                       ret = ib_copy_from_udata(&offset_addr, udata,
-                                                sizeof(offset_addr));
-                       if (ret)
-                               goto bail_free;
-                       udata->outbuf =
-                               (void __user *) (unsigned long) offset_addr;
-                       ret = ib_copy_to_udata(udata, &offset,
-                                              sizeof(offset));
-                       if (ret)
-                               goto bail_free;
-               }
-
-               spin_lock_irq(&srq->rq.lock);
-               /*
-                * validate head pointer value and compute
-                * the number of remaining WQEs.
-                */
-               owq = srq->rq.wq;
-               head = owq->head;
-               if (head >= srq->rq.size)
-                       head = 0;
-               tail = owq->tail;
-               if (tail >= srq->rq.size)
-                       tail = 0;
-               n = head;
-               if (n < tail)
-                       n += srq->rq.size - tail;
-               else
-                       n -= tail;
-               if (size <= n) {
-                       ret = -EINVAL;
-                       goto bail_unlock;
-               }
-               n = 0;
-               p = wq->wq;
-               while (tail != head) {
-                       struct ipath_rwqe *wqe;
-                       int i;
-
-                       wqe = get_rwqe_ptr(&srq->rq, tail);
-                       p->wr_id = wqe->wr_id;
-                       p->num_sge = wqe->num_sge;
-                       for (i = 0; i < wqe->num_sge; i++)
-                               p->sg_list[i] = wqe->sg_list[i];
-                       n++;
-                       p = (struct ipath_rwqe *)((char *) p + sz);
-                       if (++tail >= srq->rq.size)
-                               tail = 0;
-               }
-               srq->rq.wq = wq;
-               srq->rq.size = size;
-               wq->head = n;
-               wq->tail = 0;
-               if (attr_mask & IB_SRQ_LIMIT)
-                       srq->limit = attr->srq_limit;
-               spin_unlock_irq(&srq->rq.lock);
-
-               vfree(owq);
-
-               if (srq->ip) {
-                       struct ipath_mmap_info *ip = srq->ip;
-                       struct ipath_ibdev *dev = to_idev(srq->ibsrq.device);
-                       u32 s = sizeof(struct ipath_rwq) + size * sz;
-
-                       ipath_update_mmap_info(dev, ip, s, wq);
-
-                       /*
-                        * Return the offset to mmap.
-                        * See ipath_mmap() for details.
-                        */
-                       if (udata && udata->inlen >= sizeof(__u64)) {
-                               ret = ib_copy_to_udata(udata, &ip->offset,
-                                                      sizeof(ip->offset));
-                               if (ret)
-                                       goto bail;
-                       }
-
-                       spin_lock_irq(&dev->pending_lock);
-                       if (list_empty(&ip->pending_mmaps))
-                               list_add(&ip->pending_mmaps,
-                                        &dev->pending_mmaps);
-                       spin_unlock_irq(&dev->pending_lock);
-               }
-       } else if (attr_mask & IB_SRQ_LIMIT) {
-               spin_lock_irq(&srq->rq.lock);
-               if (attr->srq_limit >= srq->rq.size)
-                       ret = -EINVAL;
-               else
-                       srq->limit = attr->srq_limit;
-               spin_unlock_irq(&srq->rq.lock);
-       }
-       goto bail;
-
-bail_unlock:
-       spin_unlock_irq(&srq->rq.lock);
-bail_free:
-       vfree(wq);
-bail:
-       return ret;
-}
-
-int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
-{
-       struct ipath_srq *srq = to_isrq(ibsrq);
-
-       attr->max_wr = srq->rq.size - 1;
-       attr->max_sge = srq->rq.max_sge;
-       attr->srq_limit = srq->limit;
-       return 0;
-}
-
-/**
- * ipath_destroy_srq - destroy a shared receive queue
- * @ibsrq: the SRQ to destroy
- */
-int ipath_destroy_srq(struct ib_srq *ibsrq)
-{
-       struct ipath_srq *srq = to_isrq(ibsrq);
-       struct ipath_ibdev *dev = to_idev(ibsrq->device);
-
-       spin_lock(&dev->n_srqs_lock);
-       dev->n_srqs_allocated--;
-       spin_unlock(&dev->n_srqs_lock);
-       if (srq->ip)
-               kref_put(&srq->ip->ref, ipath_release_mmap_info);
-       else
-               vfree(srq->rq.wq);
-       kfree(srq);
-
-       return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_stats.c b/drivers/staging/rdma/ipath/ipath_stats.c
deleted file mode 100644 (file)
index f63e143..0000000
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ipath_kernel.h"
-
-struct infinipath_stats ipath_stats;
-
-/**
- * ipath_snap_cntr - snapshot a chip counter
- * @dd: the infinipath device
- * @creg: the counter to snapshot
- *
- * called from add_timer and user counter read calls, to deal with
- * counters that wrap in "human time".  The words sent and received, and
- * the packets sent and received are all that we worry about.  For now,
- * at least, we don't worry about error counters, because if they wrap
- * that quickly, we probably don't care.  We may eventually just make this
- * handle all the counters.  word counters can wrap in about 20 seconds
- * of full bandwidth traffic, packet counters in a few hours.
- */
-
-u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
-{
-       u32 val, reg64 = 0;
-       u64 val64;
-       unsigned long t0, t1;
-       u64 ret;
-
-       t0 = jiffies;
-       /* If fast increment counters are only 32 bits, snapshot them,
-        * and maintain them as 64bit values in the driver */
-       if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
-           (creg == dd->ipath_cregs->cr_wordsendcnt ||
-            creg == dd->ipath_cregs->cr_wordrcvcnt ||
-            creg == dd->ipath_cregs->cr_pktsendcnt ||
-            creg == dd->ipath_cregs->cr_pktrcvcnt)) {
-               val64 = ipath_read_creg(dd, creg);
-               val = val64 == ~0ULL ? ~0U : 0;
-               reg64 = 1;
-       } else                  /* val64 just to keep gcc quiet... */
-               val64 = val = ipath_read_creg32(dd, creg);
-       /*
-        * See if a second has passed.  This is just a way to detect things
-        * that are quite broken.  Normally this should take just a few
-        * cycles (the check is for long enough that we don't care if we get
-        * pre-empted.)  An Opteron HT O read timeout is 4 seconds with
-        * normal NB values
-        */
-       t1 = jiffies;
-       if (time_before(t0 + HZ, t1) && val == -1) {
-               ipath_dev_err(dd, "Error!  Read counter 0x%x timed out\n",
-                             creg);
-               ret = 0ULL;
-               goto bail;
-       }
-       if (reg64) {
-               ret = val64;
-               goto bail;
-       }
-
-       if (creg == dd->ipath_cregs->cr_wordsendcnt) {
-               if (val != dd->ipath_lastsword) {
-                       dd->ipath_sword += val - dd->ipath_lastsword;
-                       dd->ipath_lastsword = val;
-               }
-               val64 = dd->ipath_sword;
-       } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
-               if (val != dd->ipath_lastrword) {
-                       dd->ipath_rword += val - dd->ipath_lastrword;
-                       dd->ipath_lastrword = val;
-               }
-               val64 = dd->ipath_rword;
-       } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
-               if (val != dd->ipath_lastspkts) {
-                       dd->ipath_spkts += val - dd->ipath_lastspkts;
-                       dd->ipath_lastspkts = val;
-               }
-               val64 = dd->ipath_spkts;
-       } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
-               if (val != dd->ipath_lastrpkts) {
-                       dd->ipath_rpkts += val - dd->ipath_lastrpkts;
-                       dd->ipath_lastrpkts = val;
-               }
-               val64 = dd->ipath_rpkts;
-       } else if (creg == dd->ipath_cregs->cr_ibsymbolerrcnt) {
-               if (dd->ibdeltainprog)
-                       val64 -= val64 - dd->ibsymsnap;
-               val64 -= dd->ibsymdelta;
-       } else if (creg == dd->ipath_cregs->cr_iblinkerrrecovcnt) {
-               if (dd->ibdeltainprog)
-                       val64 -= val64 - dd->iblnkerrsnap;
-               val64 -= dd->iblnkerrdelta;
-       } else
-               val64 = (u64) val;
-
-       ret = val64;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
- * @dd: the infinipath device
- *
- * print the delta of egrfull/hdrqfull errors for kernel ports no more than
- * every 5 seconds.  User processes are printed at close, but kernel doesn't
- * close, so...  Separate routine so may call from other places someday, and
- * so function name when printed by _IPATH_INFO is meaningfull
- */
-static void ipath_qcheck(struct ipath_devdata *dd)
-{
-       static u64 last_tot_hdrqfull;
-       struct ipath_portdata *pd = dd->ipath_pd[0];
-       size_t blen = 0;
-       char buf[128];
-       u32 hdrqtail;
-
-       *buf = 0;
-       if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
-               blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
-                               pd->port_hdrqfull -
-                               dd->ipath_p0_hdrqfull);
-               dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
-       }
-       if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
-               blen += snprintf(buf + blen, sizeof buf - blen,
-                                "%srcvegrfull %llu",
-                                blen ? ", " : "",
-                                (unsigned long long)
-                                (ipath_stats.sps_etidfull -
-                                 dd->ipath_last_tidfull));
-               dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
-       }
-
-       /*
-        * this is actually the number of hdrq full interrupts, not actual
-        * events, but at the moment that's mostly what I'm interested in.
-        * Actual count, etc. is in the counters, if needed.  For production
-        * users this won't ordinarily be printed.
-        */
-
-       if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
-           ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
-               blen += snprintf(buf + blen, sizeof buf - blen,
-                                "%shdrqfull %llu (all ports)",
-                                blen ? ", " : "",
-                                (unsigned long long)
-                                (ipath_stats.sps_hdrqfull -
-                                 last_tot_hdrqfull));
-               last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
-       }
-       if (blen)
-               ipath_dbg("%s\n", buf);
-
-       hdrqtail = ipath_get_hdrqtail(pd);
-       if (pd->port_head != hdrqtail) {
-               if (dd->ipath_lastport0rcv_cnt ==
-                   ipath_stats.sps_port0pkts) {
-                       ipath_cdbg(PKT, "missing rcv interrupts? "
-                                  "port0 hd=%x tl=%x; port0pkts %llx; write"
-                                  " hd (w/intr)\n",
-                                  pd->port_head, hdrqtail,
-                                  (unsigned long long)
-                                  ipath_stats.sps_port0pkts);
-                       ipath_write_ureg(dd, ur_rcvhdrhead, hdrqtail |
-                               dd->ipath_rhdrhead_intr_off, pd->port_port);
-               }
-               dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
-       }
-}
-
-static void ipath_chk_errormask(struct ipath_devdata *dd)
-{
-       static u32 fixed;
-       u32 ctrl;
-       unsigned long errormask;
-       unsigned long hwerrs;
-
-       if (!dd->ipath_errormask || !(dd->ipath_flags & IPATH_INITTED))
-               return;
-
-       errormask = ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask);
-
-       if (errormask == dd->ipath_errormask)
-               return;
-       fixed++;
-
-       hwerrs = ipath_read_kreg64(dd, dd->ipath_kregs->kr_hwerrstatus);
-       ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
-
-       ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-               dd->ipath_errormask);
-
-       if ((hwerrs & dd->ipath_hwerrmask) ||
-               (ctrl & INFINIPATH_C_FREEZEMODE)) {
-               /* force re-interrupt of pending events, just in case */
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_hwerrclear, 0ULL);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, 0ULL);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, 0ULL);
-               dev_info(&dd->pcidev->dev,
-                       "errormask fixed(%u) %lx -> %lx, ctrl %x hwerr %lx\n",
-                       fixed, errormask, (unsigned long)dd->ipath_errormask,
-                       ctrl, hwerrs);
-       } else
-               ipath_dbg("errormask fixed(%u) %lx -> %lx, no freeze\n",
-                       fixed, errormask,
-                       (unsigned long)dd->ipath_errormask);
-}
-
-
-/**
- * ipath_get_faststats - get word counters from chip before they overflow
- * @opaque - contains a pointer to the infinipath device ipath_devdata
- *
- * called from add_timer
- */
-void ipath_get_faststats(unsigned long opaque)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
-       int i;
-       static unsigned cnt;
-       unsigned long flags;
-       u64 traffic_wds;
-
-       /*
-        * don't access the chip while running diags, or memory diags can
-        * fail
-        */
-       if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
-           ipath_diag_inuse)
-               /* but re-arm the timer, for diags case; won't hurt other */
-               goto done;
-
-       /*
-        * We now try to maintain a "active timer", based on traffic
-        * exceeding a threshold, so we need to check the word-counts
-        * even if they are 64-bit.
-        */
-       traffic_wds = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt) +
-               ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
-       spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
-       traffic_wds -= dd->ipath_traffic_wds;
-       dd->ipath_traffic_wds += traffic_wds;
-       if (traffic_wds  >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->ipath_active_time); /* S/B #define */
-       spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
-
-       if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
-               ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
-               ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
-       }
-
-       ipath_qcheck(dd);
-
-       /*
-        * deal with repeat error suppression.  Doesn't really matter if
-        * last error was almost a full interval ago, or just a few usecs
-        * ago; still won't get more than 2 per interval.  We may want
-        * longer intervals for this eventually, could do with mod, counter
-        * or separate timer.  Also see code in ipath_handle_errors() and
-        * ipath_handle_hwerrors().
-        */
-
-       if (dd->ipath_lasterror)
-               dd->ipath_lasterror = 0;
-       if (dd->ipath_lasthwerror)
-               dd->ipath_lasthwerror = 0;
-       if (dd->ipath_maskederrs
-           && time_after(jiffies, dd->ipath_unmasktime)) {
-               char ebuf[256];
-               int iserr;
-               iserr = ipath_decode_err(dd, ebuf, sizeof ebuf,
-                                        dd->ipath_maskederrs);
-               if (dd->ipath_maskederrs &
-                   ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
-                     INFINIPATH_E_PKTERRS))
-                       ipath_dev_err(dd, "Re-enabling masked errors "
-                                     "(%s)\n", ebuf);
-               else {
-                       /*
-                        * rcvegrfull and rcvhdrqfull are "normal", for some
-                        * types of processes (mostly benchmarks) that send
-                        * huge numbers of messages, while not processing
-                        * them.  So only complain about these at debug
-                        * level.
-                        */
-                       if (iserr)
-                               ipath_dbg(
-                                       "Re-enabling queue full errors (%s)\n",
-                                       ebuf);
-                       else
-                               ipath_cdbg(ERRPKT, "Re-enabling packet"
-                                       " problem interrupt (%s)\n", ebuf);
-               }
-
-               /* re-enable masked errors */
-               dd->ipath_errormask |= dd->ipath_maskederrs;
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
-                                dd->ipath_errormask);
-               dd->ipath_maskederrs = 0;
-       }
-
-       /* limit qfull messages to ~one per minute per port */
-       if ((++cnt & 0x10)) {
-               for (i = (int) dd->ipath_cfgports; --i >= 0; ) {
-                       struct ipath_portdata *pd = dd->ipath_pd[i];
-
-                       if (pd && pd->port_lastrcvhdrqtail != -1)
-                               pd->port_lastrcvhdrqtail = -1;
-               }
-       }
-
-       ipath_chk_errormask(dd);
-done:
-       mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_sysfs.c b/drivers/staging/rdma/ipath/ipath_sysfs.c
deleted file mode 100644 (file)
index b12b1f6..0000000
+++ /dev/null
@@ -1,1237 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/ctype.h>
-#include <linux/stat.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-/**
- * ipath_parse_ushort - parse an unsigned short value in an arbitrary base
- * @str: the string containing the number
- * @valp: where to put the result
- *
- * returns the number of bytes consumed, or negative value on error
- */
-int ipath_parse_ushort(const char *str, unsigned short *valp)
-{
-       unsigned long val;
-       char *end;
-       int ret;
-
-       if (!isdigit(str[0])) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       val = simple_strtoul(str, &end, 0);
-
-       if (val > 0xffff) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       *valp = val;
-
-       ret = end + 1 - str;
-       if (ret == 0)
-               ret = -EINVAL;
-
-bail:
-       return ret;
-}
-
-static ssize_t show_version(struct device_driver *dev, char *buf)
-{
-       /* The string printed here is already newline-terminated. */
-       return scnprintf(buf, PAGE_SIZE, "%s", ib_ipath_version);
-}
-
-static ssize_t show_num_units(struct device_driver *dev, char *buf)
-{
-       return scnprintf(buf, PAGE_SIZE, "%d\n",
-                        ipath_count_units(NULL, NULL, NULL));
-}
-
-static ssize_t show_status(struct device *dev,
-                          struct device_attribute *attr,
-                          char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       ssize_t ret;
-
-       if (!dd->ipath_statusp) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       ret = scnprintf(buf, PAGE_SIZE, "0x%llx\n",
-                       (unsigned long long) *(dd->ipath_statusp));
-
-bail:
-       return ret;
-}
-
-static const char *ipath_status_str[] = {
-       "Initted",
-       "Disabled",
-       "Admin_Disabled",
-       "", /* This used to be the old "OIB_SMA" status. */
-       "", /* This used to be the old "SMA" status. */
-       "Present",
-       "IB_link_up",
-       "IB_configured",
-       "NoIBcable",
-       "Fatal_Hardware_Error",
-       NULL,
-};
-
-static ssize_t show_status_str(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int i, any;
-       u64 s;
-       ssize_t ret;
-
-       if (!dd->ipath_statusp) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       s = *(dd->ipath_statusp);
-       *buf = '\0';
-       for (any = i = 0; s && ipath_status_str[i]; i++) {
-               if (s & 1) {
-                       if (any && strlcat(buf, " ", PAGE_SIZE) >=
-                           PAGE_SIZE)
-                               /* overflow */
-                               break;
-                       if (strlcat(buf, ipath_status_str[i],
-                                   PAGE_SIZE) >= PAGE_SIZE)
-                               break;
-                       any = 1;
-               }
-               s >>= 1;
-       }
-       if (any)
-               strlcat(buf, "\n", PAGE_SIZE);
-
-       ret = strlen(buf);
-
-bail:
-       return ret;
-}
-
-static ssize_t show_boardversion(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       /* The string printed here is already newline-terminated. */
-       return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_boardversion);
-}
-
-static ssize_t show_localbus_info(struct device *dev,
-                              struct device_attribute *attr,
-                              char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       /* The string printed here is already newline-terminated. */
-       return scnprintf(buf, PAGE_SIZE, "%s", dd->ipath_lbus_info);
-}
-
-static ssize_t show_lmc(struct device *dev,
-                       struct device_attribute *attr,
-                       char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_lmc);
-}
-
-static ssize_t store_lmc(struct device *dev,
-                        struct device_attribute *attr,
-                        const char *buf,
-                        size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 lmc = 0;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &lmc);
-       if (ret < 0)
-               goto invalid;
-
-       if (lmc > 7) {
-               ret = -EINVAL;
-               goto invalid;
-       }
-
-       ipath_set_lid(dd, dd->ipath_lid, lmc);
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid LMC %u\n", lmc);
-bail:
-       return ret;
-}
-
-static ssize_t show_lid(struct device *dev,
-                       struct device_attribute *attr,
-                       char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_lid);
-}
-
-static ssize_t store_lid(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 lid = 0;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &lid);
-       if (ret < 0)
-               goto invalid;
-
-       if (lid == 0 || lid >= IPATH_MULTICAST_LID_BASE) {
-               ret = -EINVAL;
-               goto invalid;
-       }
-
-       ipath_set_lid(dd, lid, dd->ipath_lmc);
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid LID 0x%x\n", lid);
-bail:
-       return ret;
-}
-
-static ssize_t show_mlid(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "0x%x\n", dd->ipath_mlid);
-}
-
-static ssize_t store_mlid(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 mlid;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &mlid);
-       if (ret < 0 || mlid < IPATH_MULTICAST_LID_BASE)
-               goto invalid;
-
-       dd->ipath_mlid = mlid;
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid MLID\n");
-bail:
-       return ret;
-}
-
-static ssize_t show_guid(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u8 *guid;
-
-       guid = (u8 *) & (dd->ipath_guid);
-
-       return scnprintf(buf, PAGE_SIZE,
-                        "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
-                        guid[0], guid[1], guid[2], guid[3],
-                        guid[4], guid[5], guid[6], guid[7]);
-}
-
-static ssize_t store_guid(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       ssize_t ret;
-       unsigned short guid[8];
-       __be64 new_guid;
-       u8 *ng;
-       int i;
-
-       if (sscanf(buf, "%hx:%hx:%hx:%hx:%hx:%hx:%hx:%hx",
-                  &guid[0], &guid[1], &guid[2], &guid[3],
-                  &guid[4], &guid[5], &guid[6], &guid[7]) != 8)
-               goto invalid;
-
-       ng = (u8 *) &new_guid;
-
-       for (i = 0; i < 8; i++) {
-               if (guid[i] > 0xff)
-                       goto invalid;
-               ng[i] = guid[i];
-       }
-
-       if (new_guid == 0)
-               goto invalid;
-
-       dd->ipath_guid = new_guid;
-       dd->ipath_nguid = 1;
-       if (dd->verbs_dev)
-               dd->verbs_dev->ibdev.node_guid = new_guid;
-
-       ret = strlen(buf);
-       goto bail;
-
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid GUID\n");
-       ret = -EINVAL;
-
-bail:
-       return ret;
-}
-
-static ssize_t show_nguid(struct device *dev,
-                         struct device_attribute *attr,
-                         char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_nguid);
-}
-
-static ssize_t show_nports(struct device *dev,
-                          struct device_attribute *attr,
-                          char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       /* Return the number of user ports available. */
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_cfgports - 1);
-}
-
-static ssize_t show_serial(struct device *dev,
-                          struct device_attribute *attr,
-                          char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       buf[sizeof dd->ipath_serial] = '\0';
-       memcpy(buf, dd->ipath_serial, sizeof dd->ipath_serial);
-       strcat(buf, "\n");
-       return strlen(buf);
-}
-
-static ssize_t show_unit(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_unit);
-}
-
-static ssize_t show_jint_max_packets(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_max_packets);
-}
-
-static ssize_t store_jint_max_packets(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf,
-                                     size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 v = 0;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &v);
-       if (ret < 0)
-               ipath_dev_err(dd, "invalid jint_max_packets.\n");
-       else
-               dd->ipath_f_config_jint(dd, dd->ipath_jint_idle_ticks, v);
-
-       return ret;
-}
-
-static ssize_t show_jint_idle_ticks(struct device *dev,
-                                   struct device_attribute *attr,
-                                   char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-
-       return scnprintf(buf, PAGE_SIZE, "%hu\n", dd->ipath_jint_idle_ticks);
-}
-
-static ssize_t store_jint_idle_ticks(struct device *dev,
-                                    struct device_attribute *attr,
-                                    const char *buf,
-                                    size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       u16 v = 0;
-       int ret;
-
-       ret = ipath_parse_ushort(buf, &v);
-       if (ret < 0)
-               ipath_dev_err(dd, "invalid jint_idle_ticks.\n");
-       else
-               dd->ipath_f_config_jint(dd, v, dd->ipath_jint_max_packets);
-
-       return ret;
-}
-
-#define DEVICE_COUNTER(name, attr) \
-       static ssize_t show_counter_##name(struct device *dev, \
-                                          struct device_attribute *attr, \
-                                          char *buf) \
-       { \
-               struct ipath_devdata *dd = dev_get_drvdata(dev); \
-               return scnprintf(\
-                       buf, PAGE_SIZE, "%llu\n", (unsigned long long) \
-                       ipath_snap_cntr( \
-                               dd, offsetof(struct infinipath_counters, \
-                                            attr) / sizeof(u64)));     \
-       } \
-       static DEVICE_ATTR(name, S_IRUGO, show_counter_##name, NULL);
-
-DEVICE_COUNTER(ib_link_downeds, IBLinkDownedCnt);
-DEVICE_COUNTER(ib_link_err_recoveries, IBLinkErrRecoveryCnt);
-DEVICE_COUNTER(ib_status_changes, IBStatusChangeCnt);
-DEVICE_COUNTER(ib_symbol_errs, IBSymbolErrCnt);
-DEVICE_COUNTER(lb_flow_stalls, LBFlowStallCnt);
-DEVICE_COUNTER(lb_ints, LBIntCnt);
-DEVICE_COUNTER(rx_bad_formats, RxBadFormatCnt);
-DEVICE_COUNTER(rx_buf_ovfls, RxBufOvflCnt);
-DEVICE_COUNTER(rx_data_pkts, RxDataPktCnt);
-DEVICE_COUNTER(rx_dropped_pkts, RxDroppedPktCnt);
-DEVICE_COUNTER(rx_dwords, RxDwordCnt);
-DEVICE_COUNTER(rx_ebps, RxEBPCnt);
-DEVICE_COUNTER(rx_flow_ctrl_errs, RxFlowCtrlErrCnt);
-DEVICE_COUNTER(rx_flow_pkts, RxFlowPktCnt);
-DEVICE_COUNTER(rx_icrc_errs, RxICRCErrCnt);
-DEVICE_COUNTER(rx_len_errs, RxLenErrCnt);
-DEVICE_COUNTER(rx_link_problems, RxLinkProblemCnt);
-DEVICE_COUNTER(rx_lpcrc_errs, RxLPCRCErrCnt);
-DEVICE_COUNTER(rx_max_min_len_errs, RxMaxMinLenErrCnt);
-DEVICE_COUNTER(rx_p0_hdr_egr_ovfls, RxP0HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p1_hdr_egr_ovfls, RxP1HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p2_hdr_egr_ovfls, RxP2HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p3_hdr_egr_ovfls, RxP3HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p4_hdr_egr_ovfls, RxP4HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p5_hdr_egr_ovfls, RxP5HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p6_hdr_egr_ovfls, RxP6HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p7_hdr_egr_ovfls, RxP7HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_p8_hdr_egr_ovfls, RxP8HdrEgrOvflCnt);
-DEVICE_COUNTER(rx_pkey_mismatches, RxPKeyMismatchCnt);
-DEVICE_COUNTER(rx_tid_full_errs, RxTIDFullErrCnt);
-DEVICE_COUNTER(rx_tid_valid_errs, RxTIDValidErrCnt);
-DEVICE_COUNTER(rx_vcrc_errs, RxVCRCErrCnt);
-DEVICE_COUNTER(tx_data_pkts, TxDataPktCnt);
-DEVICE_COUNTER(tx_dropped_pkts, TxDroppedPktCnt);
-DEVICE_COUNTER(tx_dwords, TxDwordCnt);
-DEVICE_COUNTER(tx_flow_pkts, TxFlowPktCnt);
-DEVICE_COUNTER(tx_flow_stalls, TxFlowStallCnt);
-DEVICE_COUNTER(tx_len_errs, TxLenErrCnt);
-DEVICE_COUNTER(tx_max_min_len_errs, TxMaxMinLenErrCnt);
-DEVICE_COUNTER(tx_underruns, TxUnderrunCnt);
-DEVICE_COUNTER(tx_unsup_vl_errs, TxUnsupVLErrCnt);
-
-static struct attribute *dev_counter_attributes[] = {
-       &dev_attr_ib_link_downeds.attr,
-       &dev_attr_ib_link_err_recoveries.attr,
-       &dev_attr_ib_status_changes.attr,
-       &dev_attr_ib_symbol_errs.attr,
-       &dev_attr_lb_flow_stalls.attr,
-       &dev_attr_lb_ints.attr,
-       &dev_attr_rx_bad_formats.attr,
-       &dev_attr_rx_buf_ovfls.attr,
-       &dev_attr_rx_data_pkts.attr,
-       &dev_attr_rx_dropped_pkts.attr,
-       &dev_attr_rx_dwords.attr,
-       &dev_attr_rx_ebps.attr,
-       &dev_attr_rx_flow_ctrl_errs.attr,
-       &dev_attr_rx_flow_pkts.attr,
-       &dev_attr_rx_icrc_errs.attr,
-       &dev_attr_rx_len_errs.attr,
-       &dev_attr_rx_link_problems.attr,
-       &dev_attr_rx_lpcrc_errs.attr,
-       &dev_attr_rx_max_min_len_errs.attr,
-       &dev_attr_rx_p0_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p1_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p2_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p3_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p4_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p5_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p6_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p7_hdr_egr_ovfls.attr,
-       &dev_attr_rx_p8_hdr_egr_ovfls.attr,
-       &dev_attr_rx_pkey_mismatches.attr,
-       &dev_attr_rx_tid_full_errs.attr,
-       &dev_attr_rx_tid_valid_errs.attr,
-       &dev_attr_rx_vcrc_errs.attr,
-       &dev_attr_tx_data_pkts.attr,
-       &dev_attr_tx_dropped_pkts.attr,
-       &dev_attr_tx_dwords.attr,
-       &dev_attr_tx_flow_pkts.attr,
-       &dev_attr_tx_flow_stalls.attr,
-       &dev_attr_tx_len_errs.attr,
-       &dev_attr_tx_max_min_len_errs.attr,
-       &dev_attr_tx_underruns.attr,
-       &dev_attr_tx_unsup_vl_errs.attr,
-       NULL
-};
-
-static struct attribute_group dev_counter_attr_group = {
-       .name = "counters",
-       .attrs = dev_counter_attributes
-};
-
-static ssize_t store_reset(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       if (count < 5 || memcmp(buf, "reset", 5)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       if (dd->ipath_flags & IPATH_DISABLED) {
-               /*
-                * post-reset init would re-enable interrupts, etc.
-                * so don't allow reset on disabled devices.  Not
-                * perfect error, but about the best choice.
-                */
-               dev_info(dev,"Unit %d is disabled, can't reset\n",
-                        dd->ipath_unit);
-               ret = -EINVAL;
-               goto bail;
-       }
-       ret = ipath_reset_device(dd->ipath_unit);
-bail:
-       return ret<0 ? ret : count;
-}
-
-static ssize_t store_link_state(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 state;
-
-       ret = ipath_parse_ushort(buf, &state);
-       if (ret < 0)
-               goto invalid;
-
-       r = ipath_set_linkstate(dd, state);
-       if (r < 0) {
-               ret = r;
-               goto bail;
-       }
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid link state\n");
-bail:
-       return ret;
-}
-
-static ssize_t show_mtu(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       return scnprintf(buf, PAGE_SIZE, "%u\n", dd->ipath_ibmtu);
-}
-
-static ssize_t store_mtu(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       ssize_t ret;
-       u16 mtu = 0;
-       int r;
-
-       ret = ipath_parse_ushort(buf, &mtu);
-       if (ret < 0)
-               goto invalid;
-
-       r = ipath_set_mtu(dd, mtu);
-       if (r < 0)
-               ret = r;
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid MTU\n");
-bail:
-       return ret;
-}
-
-static ssize_t show_enabled(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       return scnprintf(buf, PAGE_SIZE, "%u\n",
-                        (dd->ipath_flags & IPATH_DISABLED) ? 0 : 1);
-}
-
-static ssize_t store_enabled(struct device *dev,
-                        struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       ssize_t ret;
-       u16 enable = 0;
-
-       ret = ipath_parse_ushort(buf, &enable);
-       if (ret < 0) {
-               ipath_dev_err(dd, "attempt to use non-numeric on enable\n");
-               goto bail;
-       }
-
-       if (enable) {
-               if (!(dd->ipath_flags & IPATH_DISABLED))
-                       goto bail;
-
-               dev_info(dev, "Enabling unit %d\n", dd->ipath_unit);
-               /* same as post-reset */
-               ret = ipath_init_chip(dd, 1);
-               if (ret)
-                       ipath_dev_err(dd, "Failed to enable unit %d\n",
-                                     dd->ipath_unit);
-               else {
-                       dd->ipath_flags &= ~IPATH_DISABLED;
-                       *dd->ipath_statusp &= ~IPATH_STATUS_ADMIN_DISABLED;
-               }
-       } else if (!(dd->ipath_flags & IPATH_DISABLED)) {
-               dev_info(dev, "Disabling unit %d\n", dd->ipath_unit);
-               ipath_shutdown_device(dd);
-               dd->ipath_flags |= IPATH_DISABLED;
-               *dd->ipath_statusp |= IPATH_STATUS_ADMIN_DISABLED;
-       }
-
-bail:
-       return ret;
-}
-
-static ssize_t store_rx_pol_inv(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret < 0)
-               goto invalid;
-
-       r = ipath_set_rx_pol_inv(dd, val);
-       if (r < 0) {
-               ret = r;
-               goto bail;
-       }
-
-       goto bail;
-invalid:
-       ipath_dev_err(dd, "attempt to set invalid Rx Polarity invert\n");
-bail:
-       return ret;
-}
-
-static ssize_t store_led_override(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret > 0)
-               ipath_set_led_override(dd, val);
-       else
-               ipath_dev_err(dd, "attempt to set invalid LED override\n");
-       return ret;
-}
-
-static ssize_t show_logged_errs(struct device *dev,
-                               struct device_attribute *attr,
-                               char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int idx, count;
-
-       /* force consistency with actual EEPROM */
-       if (ipath_update_eeprom_log(dd) != 0)
-               return -ENXIO;
-
-       count = 0;
-       for (idx = 0; idx < IPATH_EEP_LOG_CNT; ++idx) {
-               count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
-                       dd->ipath_eep_st_errs[idx],
-                       idx == (IPATH_EEP_LOG_CNT - 1) ? '\n' : ' ');
-       }
-
-       return count;
-}
-
-/*
- * New sysfs entries to control various IB config. These all turn into
- * accesses via ipath_f_get/set_ib_cfg.
- *
- * Get/Set heartbeat enable. Or of 1=enabled, 2=auto
- */
-static ssize_t show_hrtbt_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_HRTBT);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_hrtbt_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && val > 3)
-               ret = -EINVAL;
-       if (ret < 0) {
-               ipath_dev_err(dd, "attempt to set invalid Heartbeat enable\n");
-               goto bail;
-       }
-
-       /*
-        * Set the "intentional" heartbeat enable per either of
-        * "Enable" and "Auto", as these are normally set together.
-        * This bit is consulted when leaving loopback mode,
-        * because entering loopback mode overrides it and automatically
-        * disables heartbeat.
-        */
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_HRTBT, val);
-       if (r < 0)
-               ret = r;
-       else if (val == IPATH_IB_HRTBT_OFF)
-               dd->ipath_flags |= IPATH_NO_HRTBT;
-       else
-               dd->ipath_flags &= ~IPATH_NO_HRTBT;
-
-bail:
-       return ret;
-}
-
-/*
- * Get/Set Link-widths enabled. Or of 1=1x, 2=4x (this is human/IB centric,
- * _not_ the particular encoding of any given chip)
- */
-static ssize_t show_lwid_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_lwid_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && (val == 0 || val > 3))
-               ret = -EINVAL;
-       if (ret < 0) {
-               ipath_dev_err(dd,
-                       "attempt to set invalid Link Width (enable)\n");
-               goto bail;
-       }
-
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LWID_ENB, val);
-       if (r < 0)
-               ret = r;
-
-bail:
-       return ret;
-}
-
-/* Get current link width */
-static ssize_t show_lwid(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LWID);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-/*
- * Get/Set Link-speeds enabled. Or of 1=SDR 2=DDR.
- */
-static ssize_t show_spd_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_spd_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && (val == 0 || val > (IPATH_IB_SDR | IPATH_IB_DDR)))
-               ret = -EINVAL;
-       if (ret < 0) {
-               ipath_dev_err(dd,
-                       "attempt to set invalid Link Speed (enable)\n");
-               goto bail;
-       }
-
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_SPD_ENB, val);
-       if (r < 0)
-               ret = r;
-
-bail:
-       return ret;
-}
-
-/* Get current link speed */
-static ssize_t show_spd(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_SPD);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-/*
- * Get/Set RX polarity-invert enable. 0=no, 1=yes.
- */
-static ssize_t show_rx_polinv_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_rx_polinv_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && val > 1) {
-               ipath_dev_err(dd,
-                       "attempt to set invalid Rx Polarity (enable)\n");
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_RXPOL_ENB, val);
-       if (r < 0)
-               ret = r;
-
-bail:
-       return ret;
-}
-
-/*
- * Get/Set RX lane-reversal enable. 0=no, 1=yes.
- */
-static ssize_t show_lanerev_enb(struct device *dev,
-                        struct device_attribute *attr,
-                        char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-
-       ret = dd->ipath_f_get_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB);
-       if (ret >= 0)
-               ret = scnprintf(buf, PAGE_SIZE, "%d\n", ret);
-       return ret;
-}
-
-static ssize_t store_lanerev_enb(struct device *dev,
-                         struct device_attribute *attr,
-                         const char *buf,
-                         size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, r;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret >= 0 && val > 1) {
-               ret = -EINVAL;
-               ipath_dev_err(dd,
-                       "attempt to set invalid Lane reversal (enable)\n");
-               goto bail;
-       }
-
-       r = dd->ipath_f_set_ib_cfg(dd, IPATH_IB_CFG_LREV_ENB, val);
-       if (r < 0)
-               ret = r;
-
-bail:
-       return ret;
-}
-
-static DRIVER_ATTR(num_units, S_IRUGO, show_num_units, NULL);
-static DRIVER_ATTR(version, S_IRUGO, show_version, NULL);
-
-static struct attribute *driver_attributes[] = {
-       &driver_attr_num_units.attr,
-       &driver_attr_version.attr,
-       NULL
-};
-
-static struct attribute_group driver_attr_group = {
-       .attrs = driver_attributes
-};
-
-static ssize_t store_tempsense(struct device *dev,
-                              struct device_attribute *attr,
-                              const char *buf,
-                              size_t count)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret, stat;
-       u16 val;
-
-       ret = ipath_parse_ushort(buf, &val);
-       if (ret <= 0) {
-               ipath_dev_err(dd, "attempt to set invalid tempsense config\n");
-               goto bail;
-       }
-       /* If anything but the highest limit, enable T_CRIT_A "interrupt" */
-       stat = ipath_tempsense_write(dd, 9, (val == 0x7f7f) ? 0x80 : 0);
-       if (stat) {
-               ipath_dev_err(dd, "Unable to set tempsense config\n");
-               ret = -1;
-               goto bail;
-       }
-       stat = ipath_tempsense_write(dd, 0xB, (u8) (val & 0xFF));
-       if (stat) {
-               ipath_dev_err(dd, "Unable to set local Tcrit\n");
-               ret = -1;
-               goto bail;
-       }
-       stat = ipath_tempsense_write(dd, 0xD, (u8) (val >> 8));
-       if (stat) {
-               ipath_dev_err(dd, "Unable to set remote Tcrit\n");
-               ret = -1;
-               goto bail;
-       }
-
-bail:
-       return ret;
-}
-
-/*
- * dump tempsense regs. in decimal, to ease shell-scripts.
- */
-static ssize_t show_tempsense(struct device *dev,
-                             struct device_attribute *attr,
-                             char *buf)
-{
-       struct ipath_devdata *dd = dev_get_drvdata(dev);
-       int ret;
-       int idx;
-       u8 regvals[8];
-
-       ret = -ENXIO;
-       for (idx = 0; idx < 8; ++idx) {
-               if (idx == 6)
-                       continue;
-               ret = ipath_tempsense_read(dd, idx);
-               if (ret < 0)
-                       break;
-               regvals[idx] = ret;
-       }
-       if (idx == 8)
-               ret = scnprintf(buf, PAGE_SIZE, "%d %d %02X %02X %d %d\n",
-                       *(signed char *)(regvals),
-                       *(signed char *)(regvals + 1),
-                       regvals[2], regvals[3],
-                       *(signed char *)(regvals + 5),
-                       *(signed char *)(regvals + 7));
-       return ret;
-}
-
-const struct attribute_group *ipath_driver_attr_groups[] = {
-       &driver_attr_group,
-       NULL,
-};
-
-static DEVICE_ATTR(guid, S_IWUSR | S_IRUGO, show_guid, store_guid);
-static DEVICE_ATTR(lmc, S_IWUSR | S_IRUGO, show_lmc, store_lmc);
-static DEVICE_ATTR(lid, S_IWUSR | S_IRUGO, show_lid, store_lid);
-static DEVICE_ATTR(link_state, S_IWUSR, NULL, store_link_state);
-static DEVICE_ATTR(mlid, S_IWUSR | S_IRUGO, show_mlid, store_mlid);
-static DEVICE_ATTR(mtu, S_IWUSR | S_IRUGO, show_mtu, store_mtu);
-static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO, show_enabled, store_enabled);
-static DEVICE_ATTR(nguid, S_IRUGO, show_nguid, NULL);
-static DEVICE_ATTR(nports, S_IRUGO, show_nports, NULL);
-static DEVICE_ATTR(reset, S_IWUSR, NULL, store_reset);
-static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
-static DEVICE_ATTR(status_str, S_IRUGO, show_status_str, NULL);
-static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(unit, S_IRUGO, show_unit, NULL);
-static DEVICE_ATTR(rx_pol_inv, S_IWUSR, NULL, store_rx_pol_inv);
-static DEVICE_ATTR(led_override, S_IWUSR, NULL, store_led_override);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
-static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
-static DEVICE_ATTR(jint_max_packets, S_IWUSR | S_IRUGO,
-                  show_jint_max_packets, store_jint_max_packets);
-static DEVICE_ATTR(jint_idle_ticks, S_IWUSR | S_IRUGO,
-                  show_jint_idle_ticks, store_jint_idle_ticks);
-static DEVICE_ATTR(tempsense, S_IWUSR | S_IRUGO,
-                  show_tempsense, store_tempsense);
-
-static struct attribute *dev_attributes[] = {
-       &dev_attr_guid.attr,
-       &dev_attr_lmc.attr,
-       &dev_attr_lid.attr,
-       &dev_attr_link_state.attr,
-       &dev_attr_mlid.attr,
-       &dev_attr_mtu.attr,
-       &dev_attr_nguid.attr,
-       &dev_attr_nports.attr,
-       &dev_attr_serial.attr,
-       &dev_attr_status.attr,
-       &dev_attr_status_str.attr,
-       &dev_attr_boardversion.attr,
-       &dev_attr_unit.attr,
-       &dev_attr_enabled.attr,
-       &dev_attr_rx_pol_inv.attr,
-       &dev_attr_led_override.attr,
-       &dev_attr_logged_errors.attr,
-       &dev_attr_tempsense.attr,
-       &dev_attr_localbus_info.attr,
-       NULL
-};
-
-static struct attribute_group dev_attr_group = {
-       .attrs = dev_attributes
-};
-
-static DEVICE_ATTR(hrtbt_enable, S_IWUSR | S_IRUGO, show_hrtbt_enb,
-                  store_hrtbt_enb);
-static DEVICE_ATTR(link_width_enable, S_IWUSR | S_IRUGO, show_lwid_enb,
-                  store_lwid_enb);
-static DEVICE_ATTR(link_width, S_IRUGO, show_lwid, NULL);
-static DEVICE_ATTR(link_speed_enable, S_IWUSR | S_IRUGO, show_spd_enb,
-                  store_spd_enb);
-static DEVICE_ATTR(link_speed, S_IRUGO, show_spd, NULL);
-static DEVICE_ATTR(rx_pol_inv_enable, S_IWUSR | S_IRUGO, show_rx_polinv_enb,
-                  store_rx_polinv_enb);
-static DEVICE_ATTR(rx_lane_rev_enable, S_IWUSR | S_IRUGO, show_lanerev_enb,
-                  store_lanerev_enb);
-
-static struct attribute *dev_ibcfg_attributes[] = {
-       &dev_attr_hrtbt_enable.attr,
-       &dev_attr_link_width_enable.attr,
-       &dev_attr_link_width.attr,
-       &dev_attr_link_speed_enable.attr,
-       &dev_attr_link_speed.attr,
-       &dev_attr_rx_pol_inv_enable.attr,
-       &dev_attr_rx_lane_rev_enable.attr,
-       NULL
-};
-
-static struct attribute_group dev_ibcfg_attr_group = {
-       .attrs = dev_ibcfg_attributes
-};
-
-/**
- * ipath_expose_reset - create a device reset file
- * @dev: the device structure
- *
- * Only expose a file that lets us reset the device after someone
- * enters diag mode.  A device reset is quite likely to crash the
- * machine entirely, so we don't want to normally make it
- * available.
- *
- * Called with ipath_mutex held.
- */
-int ipath_expose_reset(struct device *dev)
-{
-       static int exposed;
-       int ret;
-
-       if (!exposed) {
-               ret = device_create_file(dev, &dev_attr_reset);
-               exposed = 1;
-       } else {
-               ret = 0;
-       }
-
-       return ret;
-}
-
-int ipath_device_create_group(struct device *dev, struct ipath_devdata *dd)
-{
-       int ret;
-
-       ret = sysfs_create_group(&dev->kobj, &dev_attr_group);
-       if (ret)
-               goto bail;
-
-       ret = sysfs_create_group(&dev->kobj, &dev_counter_attr_group);
-       if (ret)
-               goto bail_attrs;
-
-       if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
-               ret = device_create_file(dev, &dev_attr_jint_idle_ticks);
-               if (ret)
-                       goto bail_counter;
-               ret = device_create_file(dev, &dev_attr_jint_max_packets);
-               if (ret)
-                       goto bail_idle;
-
-               ret = sysfs_create_group(&dev->kobj, &dev_ibcfg_attr_group);
-               if (ret)
-                       goto bail_max;
-       }
-
-       return 0;
-
-bail_max:
-       device_remove_file(dev, &dev_attr_jint_max_packets);
-bail_idle:
-       device_remove_file(dev, &dev_attr_jint_idle_ticks);
-bail_counter:
-       sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
-bail_attrs:
-       sysfs_remove_group(&dev->kobj, &dev_attr_group);
-bail:
-       return ret;
-}
-
-void ipath_device_remove_group(struct device *dev, struct ipath_devdata *dd)
-{
-       sysfs_remove_group(&dev->kobj, &dev_counter_attr_group);
-
-       if (dd->ipath_flags & IPATH_HAS_MULT_IB_SPEED) {
-               sysfs_remove_group(&dev->kobj, &dev_ibcfg_attr_group);
-               device_remove_file(dev, &dev_attr_jint_idle_ticks);
-               device_remove_file(dev, &dev_attr_jint_max_packets);
-       }
-
-       sysfs_remove_group(&dev->kobj, &dev_attr_group);
-
-       device_remove_file(dev, &dev_attr_reset);
-}
diff --git a/drivers/staging/rdma/ipath/ipath_uc.c b/drivers/staging/rdma/ipath/ipath_uc.c
deleted file mode 100644 (file)
index 0246b30..0000000
+++ /dev/null
@@ -1,547 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/* cut down ridiculously long IB macro names */
-#define OP(x) IB_OPCODE_UC_##x
-
-/**
- * ipath_make_uc_req - construct a request packet (SEND, RDMA write)
- * @qp: a pointer to the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_uc_req(struct ipath_qp *qp)
-{
-       struct ipath_other_headers *ohdr;
-       struct ipath_swqe *wqe;
-       unsigned long flags;
-       u32 hwords;
-       u32 bth0;
-       u32 len;
-       u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-       int ret = 0;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)) {
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
-                       goto bail;
-               /* We are in the error state, flush the work request. */
-               if (qp->s_last == qp->s_head)
-                       goto bail;
-               /* If DMAs are in progress, we can't flush immediately. */
-               if (atomic_read(&qp->s_dma_busy)) {
-                       qp->s_flags |= IPATH_S_WAIT_DMA;
-                       goto bail;
-               }
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
-               goto done;
-       }
-
-       ohdr = &qp->s_hdr.u.oth;
-       if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
-               ohdr = &qp->s_hdr.u.l.oth;
-
-       /* header size in 32-bit words LRH+BTH = (8+12)/4. */
-       hwords = 5;
-       bth0 = 1 << 22; /* Set M bit */
-
-       /* Get the next send request. */
-       wqe = get_swqe_ptr(qp, qp->s_cur);
-       qp->s_wqe = NULL;
-       switch (qp->s_state) {
-       default:
-               if (!(ib_ipath_state_ops[qp->state] &
-                   IPATH_PROCESS_NEXT_SEND_OK))
-                       goto bail;
-               /* Check if send work queue is empty. */
-               if (qp->s_cur == qp->s_head)
-                       goto bail;
-               /*
-                * Start a new request.
-                */
-               qp->s_psn = wqe->psn = qp->s_next_psn;
-               qp->s_sge.sge = wqe->sg_list[0];
-               qp->s_sge.sg_list = wqe->sg_list + 1;
-               qp->s_sge.num_sge = wqe->wr.num_sge;
-               qp->s_len = len = wqe->length;
-               switch (wqe->wr.opcode) {
-               case IB_WR_SEND:
-               case IB_WR_SEND_WITH_IMM:
-                       if (len > pmtu) {
-                               qp->s_state = OP(SEND_FIRST);
-                               len = pmtu;
-                               break;
-                       }
-                       if (wqe->wr.opcode == IB_WR_SEND)
-                               qp->s_state = OP(SEND_ONLY);
-                       else {
-                               qp->s_state =
-                                       OP(SEND_ONLY_WITH_IMMEDIATE);
-                               /* Immediate data comes after the BTH */
-                               ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                               hwords += 1;
-                       }
-                       if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                               bth0 |= 1 << 23;
-                       qp->s_wqe = wqe;
-                       if (++qp->s_cur >= qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               case IB_WR_RDMA_WRITE:
-               case IB_WR_RDMA_WRITE_WITH_IMM:
-                       ohdr->u.rc.reth.vaddr =
-                               cpu_to_be64(wqe->rdma_wr.remote_addr);
-                       ohdr->u.rc.reth.rkey =
-                               cpu_to_be32(wqe->rdma_wr.rkey);
-                       ohdr->u.rc.reth.length = cpu_to_be32(len);
-                       hwords += sizeof(struct ib_reth) / 4;
-                       if (len > pmtu) {
-                               qp->s_state = OP(RDMA_WRITE_FIRST);
-                               len = pmtu;
-                               break;
-                       }
-                       if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-                               qp->s_state = OP(RDMA_WRITE_ONLY);
-                       else {
-                               qp->s_state =
-                                       OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
-                               /* Immediate data comes after the RETH */
-                               ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
-                               hwords += 1;
-                               if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                                       bth0 |= 1 << 23;
-                       }
-                       qp->s_wqe = wqe;
-                       if (++qp->s_cur >= qp->s_size)
-                               qp->s_cur = 0;
-                       break;
-
-               default:
-                       goto bail;
-               }
-               break;
-
-       case OP(SEND_FIRST):
-               qp->s_state = OP(SEND_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(SEND_MIDDLE):
-               len = qp->s_len;
-               if (len > pmtu) {
-                       len = pmtu;
-                       break;
-               }
-               if (wqe->wr.opcode == IB_WR_SEND)
-                       qp->s_state = OP(SEND_LAST);
-               else {
-                       qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
-                       /* Immediate data comes after the BTH */
-                       ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                       hwords += 1;
-               }
-               if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                       bth0 |= 1 << 23;
-               qp->s_wqe = wqe;
-               if (++qp->s_cur >= qp->s_size)
-                       qp->s_cur = 0;
-               break;
-
-       case OP(RDMA_WRITE_FIRST):
-               qp->s_state = OP(RDMA_WRITE_MIDDLE);
-               /* FALLTHROUGH */
-       case OP(RDMA_WRITE_MIDDLE):
-               len = qp->s_len;
-               if (len > pmtu) {
-                       len = pmtu;
-                       break;
-               }
-               if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
-                       qp->s_state = OP(RDMA_WRITE_LAST);
-               else {
-                       qp->s_state =
-                               OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
-                       /* Immediate data comes after the BTH */
-                       ohdr->u.imm_data = wqe->wr.ex.imm_data;
-                       hwords += 1;
-                       if (wqe->wr.send_flags & IB_SEND_SOLICITED)
-                               bth0 |= 1 << 23;
-               }
-               qp->s_wqe = wqe;
-               if (++qp->s_cur >= qp->s_size)
-                       qp->s_cur = 0;
-               break;
-       }
-       qp->s_len -= len;
-       qp->s_hdrwords = hwords;
-       qp->s_cur_sge = &qp->s_sge;
-       qp->s_cur_size = len;
-       ipath_make_ruc_header(to_idev(qp->ibqp.device),
-                             qp, ohdr, bth0 | (qp->s_state << 24),
-                             qp->s_next_psn++ & IPATH_PSN_MASK);
-done:
-       ret = 1;
-       goto unlock;
-
-bail:
-       qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       return ret;
-}
-
-/**
- * ipath_uc_rcv - handle an incoming UC packet
- * @dev: the device the packet came in on
- * @hdr: the header of the packet
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the length of the packet
- * @qp: the QP for this packet.
- *
- * This is called from ipath_qp_rcv() to process an incoming UC packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
-       struct ipath_other_headers *ohdr;
-       int opcode;
-       u32 hdrsize;
-       u32 psn;
-       u32 pad;
-       struct ib_wc wc;
-       u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
-       struct ib_reth *reth;
-       int header_in_data;
-
-       /* Validate the SLID. See Ch. 9.6.1.5 */
-       if (unlikely(be16_to_cpu(hdr->lrh[3]) != qp->remote_ah_attr.dlid))
-               goto done;
-
-       /* Check for GRH */
-       if (!has_grh) {
-               ohdr = &hdr->u.oth;
-               hdrsize = 8 + 12;       /* LRH + BTH */
-               psn = be32_to_cpu(ohdr->bth[2]);
-               header_in_data = 0;
-       } else {
-               ohdr = &hdr->u.l.oth;
-               hdrsize = 8 + 40 + 12;  /* LRH + GRH + BTH */
-               /*
-                * The header with GRH is 60 bytes and the
-                * core driver sets the eager header buffer
-                * size to 56 bytes so the last 4 bytes of
-                * the BTH header (PSN) is in the data buffer.
-                */
-               header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
-               if (header_in_data) {
-                       psn = be32_to_cpu(((__be32 *) data)[0]);
-                       data += sizeof(__be32);
-               } else
-                       psn = be32_to_cpu(ohdr->bth[2]);
-       }
-       /*
-        * The opcode is in the low byte when its in network order
-        * (top byte when in host order).
-        */
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-
-       memset(&wc, 0, sizeof wc);
-
-       /* Compare the PSN verses the expected PSN. */
-       if (unlikely(ipath_cmp24(psn, qp->r_psn) != 0)) {
-               /*
-                * Handle a sequence error.
-                * Silently drop any current message.
-                */
-               qp->r_psn = psn;
-       inv:
-               qp->r_state = OP(SEND_LAST);
-               switch (opcode) {
-               case OP(SEND_FIRST):
-               case OP(SEND_ONLY):
-               case OP(SEND_ONLY_WITH_IMMEDIATE):
-                       goto send_first;
-
-               case OP(RDMA_WRITE_FIRST):
-               case OP(RDMA_WRITE_ONLY):
-               case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
-                       goto rdma_first;
-
-               default:
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-       }
-
-       /* Check for opcode sequence errors. */
-       switch (qp->r_state) {
-       case OP(SEND_FIRST):
-       case OP(SEND_MIDDLE):
-               if (opcode == OP(SEND_MIDDLE) ||
-                   opcode == OP(SEND_LAST) ||
-                   opcode == OP(SEND_LAST_WITH_IMMEDIATE))
-                       break;
-               goto inv;
-
-       case OP(RDMA_WRITE_FIRST):
-       case OP(RDMA_WRITE_MIDDLE):
-               if (opcode == OP(RDMA_WRITE_MIDDLE) ||
-                   opcode == OP(RDMA_WRITE_LAST) ||
-                   opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
-                       break;
-               goto inv;
-
-       default:
-               if (opcode == OP(SEND_FIRST) ||
-                   opcode == OP(SEND_ONLY) ||
-                   opcode == OP(SEND_ONLY_WITH_IMMEDIATE) ||
-                   opcode == OP(RDMA_WRITE_FIRST) ||
-                   opcode == OP(RDMA_WRITE_ONLY) ||
-                   opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
-                       break;
-               goto inv;
-       }
-
-       /* OK, process the packet. */
-       switch (opcode) {
-       case OP(SEND_FIRST):
-       case OP(SEND_ONLY):
-       case OP(SEND_ONLY_WITH_IMMEDIATE):
-       send_first:
-               if (qp->r_flags & IPATH_R_REUSE_SGE) {
-                       qp->r_flags &= ~IPATH_R_REUSE_SGE;
-                       qp->r_sge = qp->s_rdma_read_sge;
-               } else if (!ipath_get_rwqe(qp, 0)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               /* Save the WQE so we can reuse it in case of an error. */
-               qp->s_rdma_read_sge = qp->r_sge;
-               qp->r_rcv_len = 0;
-               if (opcode == OP(SEND_ONLY))
-                       goto send_last;
-               else if (opcode == OP(SEND_ONLY_WITH_IMMEDIATE))
-                       goto send_last_imm;
-               /* FALLTHROUGH */
-       case OP(SEND_MIDDLE):
-               /* Check for invalid length PMTU or posted rwqe len. */
-               if (unlikely(tlen != (hdrsize + pmtu + 4))) {
-                       qp->r_flags |= IPATH_R_REUSE_SGE;
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               qp->r_rcv_len += pmtu;
-               if (unlikely(qp->r_rcv_len > qp->r_len)) {
-                       qp->r_flags |= IPATH_R_REUSE_SGE;
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               ipath_copy_sge(&qp->r_sge, data, pmtu);
-               break;
-
-       case OP(SEND_LAST_WITH_IMMEDIATE):
-       send_last_imm:
-               if (header_in_data) {
-                       wc.ex.imm_data = *(__be32 *) data;
-                       data += sizeof(__be32);
-               } else {
-                       /* Immediate data comes after BTH */
-                       wc.ex.imm_data = ohdr->u.imm_data;
-               }
-               hdrsize += 4;
-               wc.wc_flags = IB_WC_WITH_IMM;
-               /* FALLTHROUGH */
-       case OP(SEND_LAST):
-       send_last:
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /* Check for invalid length. */
-               /* XXX LAST len should be >= 1 */
-               if (unlikely(tlen < (hdrsize + pad + 4))) {
-                       qp->r_flags |= IPATH_R_REUSE_SGE;
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               /* Don't count the CRC. */
-               tlen -= (hdrsize + pad + 4);
-               wc.byte_len = tlen + qp->r_rcv_len;
-               if (unlikely(wc.byte_len > qp->r_len)) {
-                       qp->r_flags |= IPATH_R_REUSE_SGE;
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               wc.opcode = IB_WC_RECV;
-       last_imm:
-               ipath_copy_sge(&qp->r_sge, data, tlen);
-               wc.wr_id = qp->r_wr_id;
-               wc.status = IB_WC_SUCCESS;
-               wc.qp = &qp->ibqp;
-               wc.src_qp = qp->remote_qpn;
-               wc.slid = qp->remote_ah_attr.dlid;
-               wc.sl = qp->remote_ah_attr.sl;
-               /* Signal completion event if the solicited bit is set. */
-               ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                              (ohdr->bth[0] &
-                               cpu_to_be32(1 << 23)) != 0);
-               break;
-
-       case OP(RDMA_WRITE_FIRST):
-       case OP(RDMA_WRITE_ONLY):
-       case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): /* consume RWQE */
-       rdma_first:
-               /* RETH comes after BTH */
-               if (!header_in_data)
-                       reth = &ohdr->u.rc.reth;
-               else {
-                       reth = (struct ib_reth *)data;
-                       data += sizeof(*reth);
-               }
-               hdrsize += sizeof(*reth);
-               qp->r_len = be32_to_cpu(reth->length);
-               qp->r_rcv_len = 0;
-               if (qp->r_len != 0) {
-                       u32 rkey = be32_to_cpu(reth->rkey);
-                       u64 vaddr = be64_to_cpu(reth->vaddr);
-                       int ok;
-
-                       /* Check rkey */
-                       ok = ipath_rkey_ok(qp, &qp->r_sge, qp->r_len,
-                                          vaddr, rkey,
-                                          IB_ACCESS_REMOTE_WRITE);
-                       if (unlikely(!ok)) {
-                               dev->n_pkt_drops++;
-                               goto done;
-                       }
-               } else {
-                       qp->r_sge.sg_list = NULL;
-                       qp->r_sge.sge.mr = NULL;
-                       qp->r_sge.sge.vaddr = NULL;
-                       qp->r_sge.sge.length = 0;
-                       qp->r_sge.sge.sge_length = 0;
-               }
-               if (unlikely(!(qp->qp_access_flags &
-                              IB_ACCESS_REMOTE_WRITE))) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               if (opcode == OP(RDMA_WRITE_ONLY))
-                       goto rdma_last;
-               else if (opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
-                       goto rdma_last_imm;
-               /* FALLTHROUGH */
-       case OP(RDMA_WRITE_MIDDLE):
-               /* Check for invalid length PMTU or posted rwqe len. */
-               if (unlikely(tlen != (hdrsize + pmtu + 4))) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               qp->r_rcv_len += pmtu;
-               if (unlikely(qp->r_rcv_len > qp->r_len)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               ipath_copy_sge(&qp->r_sge, data, pmtu);
-               break;
-
-       case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
-       rdma_last_imm:
-               if (header_in_data) {
-                       wc.ex.imm_data = *(__be32 *) data;
-                       data += sizeof(__be32);
-               } else {
-                       /* Immediate data comes after BTH */
-                       wc.ex.imm_data = ohdr->u.imm_data;
-               }
-               hdrsize += 4;
-               wc.wc_flags = IB_WC_WITH_IMM;
-
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /* Check for invalid length. */
-               /* XXX LAST len should be >= 1 */
-               if (unlikely(tlen < (hdrsize + pad + 4))) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               /* Don't count the CRC. */
-               tlen -= (hdrsize + pad + 4);
-               if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               if (qp->r_flags & IPATH_R_REUSE_SGE)
-                       qp->r_flags &= ~IPATH_R_REUSE_SGE;
-               else if (!ipath_get_rwqe(qp, 1)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               wc.byte_len = qp->r_len;
-               wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
-               goto last_imm;
-
-       case OP(RDMA_WRITE_LAST):
-       rdma_last:
-               /* Get the number of bytes the message was padded by. */
-               pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-               /* Check for invalid length. */
-               /* XXX LAST len should be >= 1 */
-               if (unlikely(tlen < (hdrsize + pad + 4))) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               /* Don't count the CRC. */
-               tlen -= (hdrsize + pad + 4);
-               if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) {
-                       dev->n_pkt_drops++;
-                       goto done;
-               }
-               ipath_copy_sge(&qp->r_sge, data, tlen);
-               break;
-
-       default:
-               /* Drop packet for unknown opcodes. */
-               dev->n_pkt_drops++;
-               goto done;
-       }
-       qp->r_psn++;
-       qp->r_state = opcode;
-done:
-       return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_ud.c b/drivers/staging/rdma/ipath/ipath_ud.c
deleted file mode 100644 (file)
index 385d941..0000000
+++ /dev/null
@@ -1,579 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_smi.h>
-
-#include "ipath_verbs.h"
-#include "ipath_kernel.h"
-
-/**
- * ipath_ud_loopback - handle send on loopback QPs
- * @sqp: the sending QP
- * @swqe: the send work request
- *
- * This is called from ipath_make_ud_req() to forward a WQE addressed
- * to the same HCA.
- * Note that the receive interrupt handler may be calling ipath_ud_rcv()
- * while this is being called.
- */
-static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
-{
-       struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
-       struct ipath_qp *qp;
-       struct ib_ah_attr *ah_attr;
-       unsigned long flags;
-       struct ipath_rq *rq;
-       struct ipath_srq *srq;
-       struct ipath_sge_state rsge;
-       struct ipath_sge *sge;
-       struct ipath_rwq *wq;
-       struct ipath_rwqe *wqe;
-       void (*handler)(struct ib_event *, void *);
-       struct ib_wc wc;
-       u32 tail;
-       u32 rlen;
-       u32 length;
-
-       qp = ipath_lookup_qpn(&dev->qp_table, swqe->ud_wr.remote_qpn);
-       if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-               dev->n_pkt_drops++;
-               goto done;
-       }
-
-       /*
-        * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
-        * Qkeys with the high order bit set mean use the
-        * qkey from the QP context instead of the WR (see 10.2.5).
-        */
-       if (unlikely(qp->ibqp.qp_num &&
-                    ((int) swqe->ud_wr.remote_qkey < 0 ?
-                     sqp->qkey : swqe->ud_wr.remote_qkey) != qp->qkey)) {
-               /* XXX OK to lose a count once in a while. */
-               dev->qkey_violations++;
-               dev->n_pkt_drops++;
-               goto drop;
-       }
-
-       /*
-        * A GRH is expected to precede the data even if not
-        * present on the wire.
-        */
-       length = swqe->length;
-       memset(&wc, 0, sizeof wc);
-       wc.byte_len = length + sizeof(struct ib_grh);
-
-       if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
-               wc.wc_flags = IB_WC_WITH_IMM;
-               wc.ex.imm_data = swqe->wr.ex.imm_data;
-       }
-
-       /*
-        * This would be a lot simpler if we could call ipath_get_rwqe()
-        * but that uses state that the receive interrupt handler uses
-        * so we would need to lock out receive interrupts while doing
-        * local loopback.
-        */
-       if (qp->ibqp.srq) {
-               srq = to_isrq(qp->ibqp.srq);
-               handler = srq->ibsrq.event_handler;
-               rq = &srq->rq;
-       } else {
-               srq = NULL;
-               handler = NULL;
-               rq = &qp->r_rq;
-       }
-
-       /*
-        * Get the next work request entry to find where to put the data.
-        * Note that it is safe to drop the lock after changing rq->tail
-        * since ipath_post_receive() won't fill the empty slot.
-        */
-       spin_lock_irqsave(&rq->lock, flags);
-       wq = rq->wq;
-       tail = wq->tail;
-       /* Validate tail before using it since it is user writable. */
-       if (tail >= rq->size)
-               tail = 0;
-       if (unlikely(tail == wq->head)) {
-               spin_unlock_irqrestore(&rq->lock, flags);
-               dev->n_pkt_drops++;
-               goto drop;
-       }
-       wqe = get_rwqe_ptr(rq, tail);
-       rsge.sg_list = qp->r_ud_sg_list;
-       if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
-               spin_unlock_irqrestore(&rq->lock, flags);
-               dev->n_pkt_drops++;
-               goto drop;
-       }
-       /* Silently drop packets which are too big. */
-       if (wc.byte_len > rlen) {
-               spin_unlock_irqrestore(&rq->lock, flags);
-               dev->n_pkt_drops++;
-               goto drop;
-       }
-       if (++tail >= rq->size)
-               tail = 0;
-       wq->tail = tail;
-       wc.wr_id = wqe->wr_id;
-       if (handler) {
-               u32 n;
-
-               /*
-                * validate head pointer value and compute
-                * the number of remaining WQEs.
-                */
-               n = wq->head;
-               if (n >= rq->size)
-                       n = 0;
-               if (n < tail)
-                       n += rq->size - tail;
-               else
-                       n -= tail;
-               if (n < srq->limit) {
-                       struct ib_event ev;
-
-                       srq->limit = 0;
-                       spin_unlock_irqrestore(&rq->lock, flags);
-                       ev.device = qp->ibqp.device;
-                       ev.element.srq = qp->ibqp.srq;
-                       ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
-                       handler(&ev, srq->ibsrq.srq_context);
-               } else
-                       spin_unlock_irqrestore(&rq->lock, flags);
-       } else
-               spin_unlock_irqrestore(&rq->lock, flags);
-
-       ah_attr = &to_iah(swqe->ud_wr.ah)->attr;
-       if (ah_attr->ah_flags & IB_AH_GRH) {
-               ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
-               wc.wc_flags |= IB_WC_GRH;
-       } else
-               ipath_skip_sge(&rsge, sizeof(struct ib_grh));
-       sge = swqe->sg_list;
-       while (length) {
-               u32 len = sge->length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               ipath_copy_sge(&rsge, sge->vaddr, len);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--swqe->wr.num_sge)
-                               sge++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               length -= len;
-       }
-       wc.status = IB_WC_SUCCESS;
-       wc.opcode = IB_WC_RECV;
-       wc.qp = &qp->ibqp;
-       wc.src_qp = sqp->ibqp.qp_num;
-       /* XXX do we know which pkey matched? Only needed for GSI. */
-       wc.pkey_index = 0;
-       wc.slid = dev->dd->ipath_lid |
-               (ah_attr->src_path_bits &
-                ((1 << dev->dd->ipath_lmc) - 1));
-       wc.sl = ah_attr->sl;
-       wc.dlid_path_bits =
-               ah_attr->dlid & ((1 << dev->dd->ipath_lmc) - 1);
-       wc.port_num = 1;
-       /* Signal completion event if the solicited bit is set. */
-       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                      swqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED);
-drop:
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-done:;
-}
-
-/**
- * ipath_make_ud_req - construct a UD request packet
- * @qp: the QP
- *
- * Return 1 if constructed; otherwise, return 0.
- */
-int ipath_make_ud_req(struct ipath_qp *qp)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_other_headers *ohdr;
-       struct ib_ah_attr *ah_attr;
-       struct ipath_swqe *wqe;
-       unsigned long flags;
-       u32 nwords;
-       u32 extra_bytes;
-       u32 bth0;
-       u16 lrh0;
-       u16 lid;
-       int ret = 0;
-       int next_cur;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_NEXT_SEND_OK)) {
-               if (!(ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND))
-                       goto bail;
-               /* We are in the error state, flush the work request. */
-               if (qp->s_last == qp->s_head)
-                       goto bail;
-               /* If DMAs are in progress, we can't flush immediately. */
-               if (atomic_read(&qp->s_dma_busy)) {
-                       qp->s_flags |= IPATH_S_WAIT_DMA;
-                       goto bail;
-               }
-               wqe = get_swqe_ptr(qp, qp->s_last);
-               ipath_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
-               goto done;
-       }
-
-       if (qp->s_cur == qp->s_head)
-               goto bail;
-
-       wqe = get_swqe_ptr(qp, qp->s_cur);
-       next_cur = qp->s_cur + 1;
-       if (next_cur >= qp->s_size)
-               next_cur = 0;
-
-       /* Construct the header. */
-       ah_attr = &to_iah(wqe->ud_wr.ah)->attr;
-       if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
-               if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
-                       dev->n_multicast_xmit++;
-               else
-                       dev->n_unicast_xmit++;
-       } else {
-               dev->n_unicast_xmit++;
-               lid = ah_attr->dlid & ~((1 << dev->dd->ipath_lmc) - 1);
-               if (unlikely(lid == dev->dd->ipath_lid)) {
-                       /*
-                        * If DMAs are in progress, we can't generate
-                        * a completion for the loopback packet since
-                        * it would be out of order.
-                        * XXX Instead of waiting, we could queue a
-                        * zero length descriptor so we get a callback.
-                        */
-                       if (atomic_read(&qp->s_dma_busy)) {
-                               qp->s_flags |= IPATH_S_WAIT_DMA;
-                               goto bail;
-                       }
-                       qp->s_cur = next_cur;
-                       spin_unlock_irqrestore(&qp->s_lock, flags);
-                       ipath_ud_loopback(qp, wqe);
-                       spin_lock_irqsave(&qp->s_lock, flags);
-                       ipath_send_complete(qp, wqe, IB_WC_SUCCESS);
-                       goto done;
-               }
-       }
-
-       qp->s_cur = next_cur;
-       extra_bytes = -wqe->length & 3;
-       nwords = (wqe->length + extra_bytes) >> 2;
-
-       /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
-       qp->s_hdrwords = 7;
-       qp->s_cur_size = wqe->length;
-       qp->s_cur_sge = &qp->s_sge;
-       qp->s_dmult = ah_attr->static_rate;
-       qp->s_wqe = wqe;
-       qp->s_sge.sge = wqe->sg_list[0];
-       qp->s_sge.sg_list = wqe->sg_list + 1;
-       qp->s_sge.num_sge = wqe->ud_wr.wr.num_sge;
-
-       if (ah_attr->ah_flags & IB_AH_GRH) {
-               /* Header size in 32-bit words. */
-               qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
-                                                &ah_attr->grh,
-                                                qp->s_hdrwords, nwords);
-               lrh0 = IPATH_LRH_GRH;
-               ohdr = &qp->s_hdr.u.l.oth;
-               /*
-                * Don't worry about sending to locally attached multicast
-                * QPs.  It is unspecified by the spec. what happens.
-                */
-       } else {
-               /* Header size in 32-bit words. */
-               lrh0 = IPATH_LRH_BTH;
-               ohdr = &qp->s_hdr.u.oth;
-       }
-       if (wqe->ud_wr.wr.opcode == IB_WR_SEND_WITH_IMM) {
-               qp->s_hdrwords++;
-               ohdr->u.ud.imm_data = wqe->ud_wr.wr.ex.imm_data;
-               bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
-       } else
-               bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
-       lrh0 |= ah_attr->sl << 4;
-       if (qp->ibqp.qp_type == IB_QPT_SMI)
-               lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
-       qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
-       qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid);  /* DEST LID */
-       qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
-                                          SIZE_OF_CRC);
-       lid = dev->dd->ipath_lid;
-       if (lid) {
-               lid |= ah_attr->src_path_bits &
-                       ((1 << dev->dd->ipath_lmc) - 1);
-               qp->s_hdr.lrh[3] = cpu_to_be16(lid);
-       } else
-               qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
-       if (wqe->ud_wr.wr.send_flags & IB_SEND_SOLICITED)
-               bth0 |= 1 << 23;
-       bth0 |= extra_bytes << 20;
-       bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
-               ipath_get_pkey(dev->dd, qp->s_pkey_index);
-       ohdr->bth[0] = cpu_to_be32(bth0);
-       /*
-        * Use the multicast QP if the destination LID is a multicast LID.
-        */
-       ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
-               ah_attr->dlid != IPATH_PERMISSIVE_LID ?
-               cpu_to_be32(IPATH_MULTICAST_QPN) :
-               cpu_to_be32(wqe->ud_wr.remote_qpn);
-       ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
-       /*
-        * Qkeys with the high order bit set mean use the
-        * qkey from the QP context instead of the WR (see 10.2.5).
-        */
-       ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->ud_wr.remote_qkey < 0 ?
-                                        qp->qkey : wqe->ud_wr.remote_qkey);
-       ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
-
-done:
-       ret = 1;
-       goto unlock;
-
-bail:
-       qp->s_flags &= ~IPATH_S_BUSY;
-unlock:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       return ret;
-}
-
-/**
- * ipath_ud_rcv - receive an incoming UD packet
- * @dev: the device the packet came in on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from ipath_qp_rcv() to process an incoming UD packet
- * for the given QP.
- * Called at interrupt level.
- */
-void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
-{
-       struct ipath_other_headers *ohdr;
-       int opcode;
-       u32 hdrsize;
-       u32 pad;
-       struct ib_wc wc;
-       u32 qkey;
-       u32 src_qp;
-       u16 dlid;
-       int header_in_data;
-
-       /* Check for GRH */
-       if (!has_grh) {
-               ohdr = &hdr->u.oth;
-               hdrsize = 8 + 12 + 8;   /* LRH + BTH + DETH */
-               qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
-               src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
-               header_in_data = 0;
-       } else {
-               ohdr = &hdr->u.l.oth;
-               hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
-               /*
-                * The header with GRH is 68 bytes and the core driver sets
-                * the eager header buffer size to 56 bytes so the last 12
-                * bytes of the IB header is in the data buffer.
-                */
-               header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
-               if (header_in_data) {
-                       qkey = be32_to_cpu(((__be32 *) data)[1]);
-                       src_qp = be32_to_cpu(((__be32 *) data)[2]);
-                       data += 12;
-               } else {
-                       qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
-                       src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
-               }
-       }
-       src_qp &= IPATH_QPN_MASK;
-
-       /*
-        * Check that the permissive LID is only used on QP0
-        * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
-        */
-       if (qp->ibqp.qp_num) {
-               if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
-                            hdr->lrh[3] == IB_LID_PERMISSIVE)) {
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-               if (unlikely(qkey != qp->qkey)) {
-                       /* XXX OK to lose a count once in a while. */
-                       dev->qkey_violations++;
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-       } else if (hdr->lrh[1] == IB_LID_PERMISSIVE ||
-                  hdr->lrh[3] == IB_LID_PERMISSIVE) {
-               struct ib_smp *smp = (struct ib_smp *) data;
-
-               if (smp->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-       }
-
-       /*
-        * The opcode is in the low byte when its in network order
-        * (top byte when in host order).
-        */
-       opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
-       if (qp->ibqp.qp_num > 1 &&
-           opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
-               if (header_in_data) {
-                       wc.ex.imm_data = *(__be32 *) data;
-                       data += sizeof(__be32);
-               } else
-                       wc.ex.imm_data = ohdr->u.ud.imm_data;
-               wc.wc_flags = IB_WC_WITH_IMM;
-               hdrsize += sizeof(u32);
-       } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
-               wc.ex.imm_data = 0;
-               wc.wc_flags = 0;
-       } else {
-               dev->n_pkt_drops++;
-               goto bail;
-       }
-
-       /* Get the number of bytes the message was padded by. */
-       pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
-       if (unlikely(tlen < (hdrsize + pad + 4))) {
-               /* Drop incomplete packets. */
-               dev->n_pkt_drops++;
-               goto bail;
-       }
-       tlen -= hdrsize + pad + 4;
-
-       /* Drop invalid MAD packets (see 13.5.3.1). */
-       if (unlikely((qp->ibqp.qp_num == 0 &&
-                     (tlen != 256 ||
-                      (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) ||
-                    (qp->ibqp.qp_num == 1 &&
-                     (tlen != 256 ||
-                      (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) {
-               dev->n_pkt_drops++;
-               goto bail;
-       }
-
-       /*
-        * A GRH is expected to precede the data even if not
-        * present on the wire.
-        */
-       wc.byte_len = tlen + sizeof(struct ib_grh);
-
-       /*
-        * Get the next work request entry to find where to put the data.
-        */
-       if (qp->r_flags & IPATH_R_REUSE_SGE)
-               qp->r_flags &= ~IPATH_R_REUSE_SGE;
-       else if (!ipath_get_rwqe(qp, 0)) {
-               /*
-                * Count VL15 packets dropped due to no receive buffer.
-                * Otherwise, count them as buffer overruns since usually,
-                * the HW will be able to receive packets even if there are
-                * no QPs with posted receive buffers.
-                */
-               if (qp->ibqp.qp_num == 0)
-                       dev->n_vl15_dropped++;
-               else
-                       dev->rcv_errors++;
-               goto bail;
-       }
-       /* Silently drop packets which are too big. */
-       if (wc.byte_len > qp->r_len) {
-               qp->r_flags |= IPATH_R_REUSE_SGE;
-               dev->n_pkt_drops++;
-               goto bail;
-       }
-       if (has_grh) {
-               ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
-                              sizeof(struct ib_grh));
-               wc.wc_flags |= IB_WC_GRH;
-       } else
-               ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
-       ipath_copy_sge(&qp->r_sge, data,
-                      wc.byte_len - sizeof(struct ib_grh));
-       if (!test_and_clear_bit(IPATH_R_WRID_VALID, &qp->r_aflags))
-               goto bail;
-       wc.wr_id = qp->r_wr_id;
-       wc.status = IB_WC_SUCCESS;
-       wc.opcode = IB_WC_RECV;
-       wc.vendor_err = 0;
-       wc.qp = &qp->ibqp;
-       wc.src_qp = src_qp;
-       /* XXX do we know which pkey matched? Only needed for GSI. */
-       wc.pkey_index = 0;
-       wc.slid = be16_to_cpu(hdr->lrh[3]);
-       wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
-       dlid = be16_to_cpu(hdr->lrh[1]);
-       /*
-        * Save the LMC lower bits if the destination LID is a unicast LID.
-        */
-       wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
-               dlid & ((1 << dev->dd->ipath_lmc) - 1);
-       wc.port_num = 1;
-       /* Signal completion event if the solicited bit is set. */
-       ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
-                      (ohdr->bth[0] &
-                       cpu_to_be32(1 << 23)) != 0);
-
-bail:;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_user_pages.c b/drivers/staging/rdma/ipath/ipath_user_pages.c
deleted file mode 100644 (file)
index d29b4da..0000000
+++ /dev/null
@@ -1,228 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/mm.h>
-#include <linux/device.h>
-#include <linux/slab.h>
-
-#include "ipath_kernel.h"
-
-static void __ipath_release_user_pages(struct page **p, size_t num_pages,
-                                  int dirty)
-{
-       size_t i;
-
-       for (i = 0; i < num_pages; i++) {
-               ipath_cdbg(MM, "%lu/%lu put_page %p\n", (unsigned long) i,
-                          (unsigned long) num_pages, p[i]);
-               if (dirty)
-                       set_page_dirty_lock(p[i]);
-               put_page(p[i]);
-       }
-}
-
-/* call with current->mm->mmap_sem held */
-static int __ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-                                 struct page **p)
-{
-       unsigned long lock_limit;
-       size_t got;
-       int ret;
-
-       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-
-       if (num_pages > lock_limit) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       ipath_cdbg(VERBOSE, "pin %lx pages from vaddr %lx\n",
-                  (unsigned long) num_pages, start_page);
-
-       for (got = 0; got < num_pages; got += ret) {
-               ret = get_user_pages(current, current->mm,
-                                    start_page + got * PAGE_SIZE,
-                                    num_pages - got, 1, 1,
-                                    p + got, NULL);
-               if (ret < 0)
-                       goto bail_release;
-       }
-
-       current->mm->pinned_vm += num_pages;
-
-       ret = 0;
-       goto bail;
-
-bail_release:
-       __ipath_release_user_pages(p, got, 0);
-bail:
-       return ret;
-}
-
-/**
- * ipath_map_page - a safety wrapper around pci_map_page()
- *
- * A dma_addr of all 0's is interpreted by the chip as "disabled".
- * Unfortunately, it can also be a valid dma_addr returned on some
- * architectures.
- *
- * The powerpc iommu assigns dma_addrs in ascending order, so we don't
- * have to bother with retries or mapping a dummy page to insure we
- * don't just get the same mapping again.
- *
- * I'm sure we won't be so lucky with other iommu's, so FIXME.
- */
-dma_addr_t ipath_map_page(struct pci_dev *hwdev, struct page *page,
-       unsigned long offset, size_t size, int direction)
-{
-       dma_addr_t phys;
-
-       phys = pci_map_page(hwdev, page, offset, size, direction);
-
-       if (phys == 0) {
-               pci_unmap_page(hwdev, phys, size, direction);
-               phys = pci_map_page(hwdev, page, offset, size, direction);
-               /*
-                * FIXME: If we get 0 again, we should keep this page,
-                * map another, then free the 0 page.
-                */
-       }
-
-       return phys;
-}
-
-/**
- * ipath_map_single - a safety wrapper around pci_map_single()
- *
- * Same idea as ipath_map_page().
- */
-dma_addr_t ipath_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
-       int direction)
-{
-       dma_addr_t phys;
-
-       phys = pci_map_single(hwdev, ptr, size, direction);
-
-       if (phys == 0) {
-               pci_unmap_single(hwdev, phys, size, direction);
-               phys = pci_map_single(hwdev, ptr, size, direction);
-               /*
-                * FIXME: If we get 0 again, we should keep this page,
-                * map another, then free the 0 page.
-                */
-       }
-
-       return phys;
-}
-
-/**
- * ipath_get_user_pages - lock user pages into memory
- * @start_page: the start page
- * @num_pages: the number of pages
- * @p: the output page structures
- *
- * This function takes a given start page (page aligned user virtual
- * address) and pins it and the following specified number of pages.  For
- * now, num_pages is always 1, but that will probably change at some point
- * (because caller is doing expected sends on a single virtually contiguous
- * buffer, so we can do all pages at once).
- */
-int ipath_get_user_pages(unsigned long start_page, size_t num_pages,
-                        struct page **p)
-{
-       int ret;
-
-       down_write(&current->mm->mmap_sem);
-
-       ret = __ipath_get_user_pages(start_page, num_pages, p);
-
-       up_write(&current->mm->mmap_sem);
-
-       return ret;
-}
-
-void ipath_release_user_pages(struct page **p, size_t num_pages)
-{
-       down_write(&current->mm->mmap_sem);
-
-       __ipath_release_user_pages(p, num_pages, 1);
-
-       current->mm->pinned_vm -= num_pages;
-
-       up_write(&current->mm->mmap_sem);
-}
-
-struct ipath_user_pages_work {
-       struct work_struct work;
-       struct mm_struct *mm;
-       unsigned long num_pages;
-};
-
-static void user_pages_account(struct work_struct *_work)
-{
-       struct ipath_user_pages_work *work =
-               container_of(_work, struct ipath_user_pages_work, work);
-
-       down_write(&work->mm->mmap_sem);
-       work->mm->pinned_vm -= work->num_pages;
-       up_write(&work->mm->mmap_sem);
-       mmput(work->mm);
-       kfree(work);
-}
-
-void ipath_release_user_pages_on_close(struct page **p, size_t num_pages)
-{
-       struct ipath_user_pages_work *work;
-       struct mm_struct *mm;
-
-       __ipath_release_user_pages(p, num_pages, 1);
-
-       mm = get_task_mm(current);
-       if (!mm)
-               return;
-
-       work = kmalloc(sizeof(*work), GFP_KERNEL);
-       if (!work)
-               goto bail_mm;
-
-       INIT_WORK(&work->work, user_pages_account);
-       work->mm = mm;
-       work->num_pages = num_pages;
-
-       queue_work(ib_wq, &work->work);
-       return;
-
-bail_mm:
-       mmput(mm);
-       return;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.c b/drivers/staging/rdma/ipath/ipath_user_sdma.c
deleted file mode 100644 (file)
index 8c12e3c..0000000
+++ /dev/null
@@ -1,874 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/dmapool.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/highmem.h>
-#include <linux/io.h>
-#include <linux/uio.h>
-#include <linux/rbtree.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-
-#include "ipath_kernel.h"
-#include "ipath_user_sdma.h"
-
-/* minimum size of header */
-#define IPATH_USER_SDMA_MIN_HEADER_LENGTH      64
-/* expected size of headers (for dma_pool) */
-#define IPATH_USER_SDMA_EXP_HEADER_LENGTH      64
-/* length mask in PBC (lower 11 bits) */
-#define IPATH_PBC_LENGTH_MASK                  ((1 << 11) - 1)
-
-struct ipath_user_sdma_pkt {
-       u8 naddr;               /* dimension of addr (1..3) ... */
-       u32 counter;            /* sdma pkts queued counter for this entry */
-       u64 added;              /* global descq number of entries */
-
-       struct {
-               u32 offset;                     /* offset for kvaddr, addr */
-               u32 length;                     /* length in page */
-               u8  put_page;                   /* should we put_page? */
-               u8  dma_mapped;                 /* is page dma_mapped? */
-               struct page *page;              /* may be NULL (coherent mem) */
-               void *kvaddr;                   /* FIXME: only for pio hack */
-               dma_addr_t addr;
-       } addr[4];   /* max pages, any more and we coalesce */
-       struct list_head list;  /* list element */
-};
-
-struct ipath_user_sdma_queue {
-       /*
-        * pkts sent to dma engine are queued on this
-        * list head.  the type of the elements of this
-        * list are struct ipath_user_sdma_pkt...
-        */
-       struct list_head sent;
-
-       /* headers with expected length are allocated from here... */
-       char header_cache_name[64];
-       struct dma_pool *header_cache;
-
-       /* packets are allocated from the slab cache... */
-       char pkt_slab_name[64];
-       struct kmem_cache *pkt_slab;
-
-       /* as packets go on the queued queue, they are counted... */
-       u32 counter;
-       u32 sent_counter;
-
-       /* dma page table */
-       struct rb_root dma_pages_root;
-
-       /* protect everything above... */
-       struct mutex lock;
-};
-
-struct ipath_user_sdma_queue *
-ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport)
-{
-       struct ipath_user_sdma_queue *pq =
-               kmalloc(sizeof(struct ipath_user_sdma_queue), GFP_KERNEL);
-
-       if (!pq)
-               goto done;
-
-       pq->counter = 0;
-       pq->sent_counter = 0;
-       INIT_LIST_HEAD(&pq->sent);
-
-       mutex_init(&pq->lock);
-
-       snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
-                "ipath-user-sdma-pkts-%u-%02u.%02u", unit, port, sport);
-       pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name,
-                                        sizeof(struct ipath_user_sdma_pkt),
-                                        0, 0, NULL);
-
-       if (!pq->pkt_slab)
-               goto err_kfree;
-
-       snprintf(pq->header_cache_name, sizeof(pq->header_cache_name),
-                "ipath-user-sdma-headers-%u-%02u.%02u", unit, port, sport);
-       pq->header_cache = dma_pool_create(pq->header_cache_name,
-                                          dev,
-                                          IPATH_USER_SDMA_EXP_HEADER_LENGTH,
-                                          4, 0);
-       if (!pq->header_cache)
-               goto err_slab;
-
-       pq->dma_pages_root = RB_ROOT;
-
-       goto done;
-
-err_slab:
-       kmem_cache_destroy(pq->pkt_slab);
-err_kfree:
-       kfree(pq);
-       pq = NULL;
-
-done:
-       return pq;
-}
-
-static void ipath_user_sdma_init_frag(struct ipath_user_sdma_pkt *pkt,
-                                     int i, size_t offset, size_t len,
-                                     int put_page, int dma_mapped,
-                                     struct page *page,
-                                     void *kvaddr, dma_addr_t dma_addr)
-{
-       pkt->addr[i].offset = offset;
-       pkt->addr[i].length = len;
-       pkt->addr[i].put_page = put_page;
-       pkt->addr[i].dma_mapped = dma_mapped;
-       pkt->addr[i].page = page;
-       pkt->addr[i].kvaddr = kvaddr;
-       pkt->addr[i].addr = dma_addr;
-}
-
-static void ipath_user_sdma_init_header(struct ipath_user_sdma_pkt *pkt,
-                                       u32 counter, size_t offset,
-                                       size_t len, int dma_mapped,
-                                       struct page *page,
-                                       void *kvaddr, dma_addr_t dma_addr)
-{
-       pkt->naddr = 1;
-       pkt->counter = counter;
-       ipath_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
-                                 kvaddr, dma_addr);
-}
-
-/* we've too many pages in the iovec, coalesce to a single page */
-static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
-                                   struct ipath_user_sdma_pkt *pkt,
-                                   const struct iovec *iov,
-                                   unsigned long niov) {
-       int ret = 0;
-       struct page *page = alloc_page(GFP_KERNEL);
-       void *mpage_save;
-       char *mpage;
-       int i;
-       int len = 0;
-       dma_addr_t dma_addr;
-
-       if (!page) {
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       mpage = kmap(page);
-       mpage_save = mpage;
-       for (i = 0; i < niov; i++) {
-               int cfur;
-
-               cfur = copy_from_user(mpage,
-                                     iov[i].iov_base, iov[i].iov_len);
-               if (cfur) {
-                       ret = -EFAULT;
-                       goto free_unmap;
-               }
-
-               mpage += iov[i].iov_len;
-               len += iov[i].iov_len;
-       }
-
-       dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
-                               DMA_TO_DEVICE);
-       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-               ret = -ENOMEM;
-               goto free_unmap;
-       }
-
-       ipath_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
-                                 dma_addr);
-       pkt->naddr = 2;
-
-       goto done;
-
-free_unmap:
-       kunmap(page);
-       __free_page(page);
-done:
-       return ret;
-}
-
-/* how many pages in this iovec element? */
-static int ipath_user_sdma_num_pages(const struct iovec *iov)
-{
-       const unsigned long addr  = (unsigned long) iov->iov_base;
-       const unsigned long  len  = iov->iov_len;
-       const unsigned long spage = addr & PAGE_MASK;
-       const unsigned long epage = (addr + len - 1) & PAGE_MASK;
-
-       return 1 + ((epage - spage) >> PAGE_SHIFT);
-}
-
-/* truncate length to page boundary */
-static int ipath_user_sdma_page_length(unsigned long addr, unsigned long len)
-{
-       const unsigned long offset = offset_in_page(addr);
-
-       return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
-}
-
-static void ipath_user_sdma_free_pkt_frag(struct device *dev,
-                                         struct ipath_user_sdma_queue *pq,
-                                         struct ipath_user_sdma_pkt *pkt,
-                                         int frag)
-{
-       const int i = frag;
-
-       if (pkt->addr[i].page) {
-               if (pkt->addr[i].dma_mapped)
-                       dma_unmap_page(dev,
-                                      pkt->addr[i].addr,
-                                      pkt->addr[i].length,
-                                      DMA_TO_DEVICE);
-
-               if (pkt->addr[i].kvaddr)
-                       kunmap(pkt->addr[i].page);
-
-               if (pkt->addr[i].put_page)
-                       put_page(pkt->addr[i].page);
-               else
-                       __free_page(pkt->addr[i].page);
-       } else if (pkt->addr[i].kvaddr)
-               /* free coherent mem from cache... */
-               dma_pool_free(pq->header_cache,
-                             pkt->addr[i].kvaddr, pkt->addr[i].addr);
-}
-
-/* return number of pages pinned... */
-static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
-                                    struct ipath_user_sdma_pkt *pkt,
-                                    unsigned long addr, int tlen, int npages)
-{
-       struct page *pages[2];
-       int j;
-       int ret;
-
-       ret = get_user_pages_fast(addr, npages, 0, pages);
-       if (ret != npages) {
-               int i;
-
-               for (i = 0; i < ret; i++)
-                       put_page(pages[i]);
-
-               ret = -ENOMEM;
-               goto done;
-       }
-
-       for (j = 0; j < npages; j++) {
-               /* map the pages... */
-               const int flen =
-                       ipath_user_sdma_page_length(addr, tlen);
-               dma_addr_t dma_addr =
-                       dma_map_page(&dd->pcidev->dev,
-                                    pages[j], 0, flen, DMA_TO_DEVICE);
-               unsigned long fofs = offset_in_page(addr);
-
-               if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-                       ret = -ENOMEM;
-                       goto done;
-               }
-
-               ipath_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
-                                         pages[j], kmap(pages[j]),
-                                         dma_addr);
-
-               pkt->naddr++;
-               addr += flen;
-               tlen -= flen;
-       }
-
-done:
-       return ret;
-}
-
-static int ipath_user_sdma_pin_pkt(const struct ipath_devdata *dd,
-                                  struct ipath_user_sdma_queue *pq,
-                                  struct ipath_user_sdma_pkt *pkt,
-                                  const struct iovec *iov,
-                                  unsigned long niov)
-{
-       int ret = 0;
-       unsigned long idx;
-
-       for (idx = 0; idx < niov; idx++) {
-               const int npages = ipath_user_sdma_num_pages(iov + idx);
-               const unsigned long addr = (unsigned long) iov[idx].iov_base;
-
-               ret = ipath_user_sdma_pin_pages(dd, pkt,
-                                               addr, iov[idx].iov_len,
-                                               npages);
-               if (ret < 0)
-                       goto free_pkt;
-       }
-
-       goto done;
-
-free_pkt:
-       for (idx = 0; idx < pkt->naddr; idx++)
-               ipath_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
-
-done:
-       return ret;
-}
-
-static int ipath_user_sdma_init_payload(const struct ipath_devdata *dd,
-                                       struct ipath_user_sdma_queue *pq,
-                                       struct ipath_user_sdma_pkt *pkt,
-                                       const struct iovec *iov,
-                                       unsigned long niov, int npages)
-{
-       int ret = 0;
-
-       if (npages >= ARRAY_SIZE(pkt->addr))
-               ret = ipath_user_sdma_coalesce(dd, pkt, iov, niov);
-       else
-               ret = ipath_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
-
-       return ret;
-}
-
-/* free a packet list -- return counter value of last packet */
-static void ipath_user_sdma_free_pkt_list(struct device *dev,
-                                         struct ipath_user_sdma_queue *pq,
-                                         struct list_head *list)
-{
-       struct ipath_user_sdma_pkt *pkt, *pkt_next;
-
-       list_for_each_entry_safe(pkt, pkt_next, list, list) {
-               int i;
-
-               for (i = 0; i < pkt->naddr; i++)
-                       ipath_user_sdma_free_pkt_frag(dev, pq, pkt, i);
-
-               kmem_cache_free(pq->pkt_slab, pkt);
-       }
-}
-
-/*
- * copy headers, coalesce etc -- pq->lock must be held
- *
- * we queue all the packets to list, returning the
- * number of bytes total.  list must be empty initially,
- * as, if there is an error we clean it...
- */
-static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
-                                     struct ipath_user_sdma_queue *pq,
-                                     struct list_head *list,
-                                     const struct iovec *iov,
-                                     unsigned long niov,
-                                     int maxpkts)
-{
-       unsigned long idx = 0;
-       int ret = 0;
-       int npkts = 0;
-       struct page *page = NULL;
-       __le32 *pbc;
-       dma_addr_t dma_addr;
-       struct ipath_user_sdma_pkt *pkt = NULL;
-       size_t len;
-       size_t nw;
-       u32 counter = pq->counter;
-       int dma_mapped = 0;
-
-       while (idx < niov && npkts < maxpkts) {
-               const unsigned long addr = (unsigned long) iov[idx].iov_base;
-               const unsigned long idx_save = idx;
-               unsigned pktnw;
-               unsigned pktnwc;
-               int nfrags = 0;
-               int npages = 0;
-               int cfur;
-
-               dma_mapped = 0;
-               len = iov[idx].iov_len;
-               nw = len >> 2;
-               page = NULL;
-
-               pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
-               if (!pkt) {
-                       ret = -ENOMEM;
-                       goto free_list;
-               }
-
-               if (len < IPATH_USER_SDMA_MIN_HEADER_LENGTH ||
-                   len > PAGE_SIZE || len & 3 || addr & 3) {
-                       ret = -EINVAL;
-                       goto free_pkt;
-               }
-
-               if (len == IPATH_USER_SDMA_EXP_HEADER_LENGTH)
-                       pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
-                                            &dma_addr);
-               else
-                       pbc = NULL;
-
-               if (!pbc) {
-                       page = alloc_page(GFP_KERNEL);
-                       if (!page) {
-                               ret = -ENOMEM;
-                               goto free_pkt;
-                       }
-                       pbc = kmap(page);
-               }
-
-               cfur = copy_from_user(pbc, iov[idx].iov_base, len);
-               if (cfur) {
-                       ret = -EFAULT;
-                       goto free_pbc;
-               }
-
-               /*
-                * this assignment is a bit strange.  it's because the
-                * the pbc counts the number of 32 bit words in the full
-                * packet _except_ the first word of the pbc itself...
-                */
-               pktnwc = nw - 1;
-
-               /*
-                * pktnw computation yields the number of 32 bit words
-                * that the caller has indicated in the PBC.  note that
-                * this is one less than the total number of words that
-                * goes to the send DMA engine as the first 32 bit word
-                * of the PBC itself is not counted.  Armed with this count,
-                * we can verify that the packet is consistent with the
-                * iovec lengths.
-                */
-               pktnw = le32_to_cpu(*pbc) & IPATH_PBC_LENGTH_MASK;
-               if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
-                       ret = -EINVAL;
-                       goto free_pbc;
-               }
-
-
-               idx++;
-               while (pktnwc < pktnw && idx < niov) {
-                       const size_t slen = iov[idx].iov_len;
-                       const unsigned long faddr =
-                               (unsigned long) iov[idx].iov_base;
-
-                       if (slen & 3 || faddr & 3 || !slen ||
-                           slen > PAGE_SIZE) {
-                               ret = -EINVAL;
-                               goto free_pbc;
-                       }
-
-                       npages++;
-                       if ((faddr & PAGE_MASK) !=
-                           ((faddr + slen - 1) & PAGE_MASK))
-                               npages++;
-
-                       pktnwc += slen >> 2;
-                       idx++;
-                       nfrags++;
-               }
-
-               if (pktnwc != pktnw) {
-                       ret = -EINVAL;
-                       goto free_pbc;
-               }
-
-               if (page) {
-                       dma_addr = dma_map_page(&dd->pcidev->dev,
-                                               page, 0, len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
-                               ret = -ENOMEM;
-                               goto free_pbc;
-                       }
-
-                       dma_mapped = 1;
-               }
-
-               ipath_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
-                                           page, pbc, dma_addr);
-
-               if (nfrags) {
-                       ret = ipath_user_sdma_init_payload(dd, pq, pkt,
-                                                          iov + idx_save + 1,
-                                                          nfrags, npages);
-                       if (ret < 0)
-                               goto free_pbc_dma;
-               }
-
-               counter++;
-               npkts++;
-
-               list_add_tail(&pkt->list, list);
-       }
-
-       ret = idx;
-       goto done;
-
-free_pbc_dma:
-       if (dma_mapped)
-               dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
-free_pbc:
-       if (page) {
-               kunmap(page);
-               __free_page(page);
-       } else
-               dma_pool_free(pq->header_cache, pbc, dma_addr);
-free_pkt:
-       kmem_cache_free(pq->pkt_slab, pkt);
-free_list:
-       ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
-done:
-       return ret;
-}
-
-static void ipath_user_sdma_set_complete_counter(struct ipath_user_sdma_queue *pq,
-                                                u32 c)
-{
-       pq->sent_counter = c;
-}
-
-/* try to clean out queue -- needs pq->lock */
-static int ipath_user_sdma_queue_clean(const struct ipath_devdata *dd,
-                                      struct ipath_user_sdma_queue *pq)
-{
-       struct list_head free_list;
-       struct ipath_user_sdma_pkt *pkt;
-       struct ipath_user_sdma_pkt *pkt_prev;
-       int ret = 0;
-
-       INIT_LIST_HEAD(&free_list);
-
-       list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
-               s64 descd = dd->ipath_sdma_descq_removed - pkt->added;
-
-               if (descd < 0)
-                       break;
-
-               list_move_tail(&pkt->list, &free_list);
-
-               /* one more packet cleaned */
-               ret++;
-       }
-
-       if (!list_empty(&free_list)) {
-               u32 counter;
-
-               pkt = list_entry(free_list.prev,
-                                struct ipath_user_sdma_pkt, list);
-               counter = pkt->counter;
-
-               ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
-               ipath_user_sdma_set_complete_counter(pq, counter);
-       }
-
-       return ret;
-}
-
-void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq)
-{
-       if (!pq)
-               return;
-
-       kmem_cache_destroy(pq->pkt_slab);
-       dma_pool_destroy(pq->header_cache);
-       kfree(pq);
-}
-
-/* clean descriptor queue, returns > 0 if some elements cleaned */
-static int ipath_user_sdma_hwqueue_clean(struct ipath_devdata *dd)
-{
-       int ret;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-       ret = ipath_sdma_make_progress(dd);
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       return ret;
-}
-
-/* we're in close, drain packets so that we can cleanup successfully... */
-void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
-                                struct ipath_user_sdma_queue *pq)
-{
-       int i;
-
-       if (!pq)
-               return;
-
-       for (i = 0; i < 100; i++) {
-               mutex_lock(&pq->lock);
-               if (list_empty(&pq->sent)) {
-                       mutex_unlock(&pq->lock);
-                       break;
-               }
-               ipath_user_sdma_hwqueue_clean(dd);
-               ipath_user_sdma_queue_clean(dd, pq);
-               mutex_unlock(&pq->lock);
-               msleep(10);
-       }
-
-       if (!list_empty(&pq->sent)) {
-               struct list_head free_list;
-
-               printk(KERN_INFO "drain: lists not empty: forcing!\n");
-               INIT_LIST_HEAD(&free_list);
-               mutex_lock(&pq->lock);
-               list_splice_init(&pq->sent, &free_list);
-               ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
-               mutex_unlock(&pq->lock);
-       }
-}
-
-static inline __le64 ipath_sdma_make_desc0(struct ipath_devdata *dd,
-                                          u64 addr, u64 dwlen, u64 dwoffset)
-{
-       return cpu_to_le64(/* SDmaPhyAddr[31:0] */
-                          ((addr & 0xfffffffcULL) << 32) |
-                          /* SDmaGeneration[1:0] */
-                          ((dd->ipath_sdma_generation & 3ULL) << 30) |
-                          /* SDmaDwordCount[10:0] */
-                          ((dwlen & 0x7ffULL) << 16) |
-                          /* SDmaBufOffset[12:2] */
-                          (dwoffset & 0x7ffULL));
-}
-
-static inline __le64 ipath_sdma_make_first_desc0(__le64 descq)
-{
-       return descq | cpu_to_le64(1ULL << 12);
-}
-
-static inline __le64 ipath_sdma_make_last_desc0(__le64 descq)
-{
-                                             /* last */  /* dma head */
-       return descq | cpu_to_le64(1ULL << 11 | 1ULL << 13);
-}
-
-static inline __le64 ipath_sdma_make_desc1(u64 addr)
-{
-       /* SDmaPhyAddr[47:32] */
-       return cpu_to_le64(addr >> 32);
-}
-
-static void ipath_user_sdma_send_frag(struct ipath_devdata *dd,
-                                     struct ipath_user_sdma_pkt *pkt, int idx,
-                                     unsigned ofs, u16 tail)
-{
-       const u64 addr = (u64) pkt->addr[idx].addr +
-               (u64) pkt->addr[idx].offset;
-       const u64 dwlen = (u64) pkt->addr[idx].length / 4;
-       __le64 *descqp;
-       __le64 descq0;
-
-       descqp = &dd->ipath_sdma_descq[tail].qw[0];
-
-       descq0 = ipath_sdma_make_desc0(dd, addr, dwlen, ofs);
-       if (idx == 0)
-               descq0 = ipath_sdma_make_first_desc0(descq0);
-       if (idx == pkt->naddr - 1)
-               descq0 = ipath_sdma_make_last_desc0(descq0);
-
-       descqp[0] = descq0;
-       descqp[1] = ipath_sdma_make_desc1(addr);
-}
-
-/* pq->lock must be held, get packets on the wire... */
-static int ipath_user_sdma_push_pkts(struct ipath_devdata *dd,
-                                    struct ipath_user_sdma_queue *pq,
-                                    struct list_head *pktlist)
-{
-       int ret = 0;
-       unsigned long flags;
-       u16 tail;
-
-       if (list_empty(pktlist))
-               return 0;
-
-       if (unlikely(!(dd->ipath_flags & IPATH_LINKACTIVE)))
-               return -ECOMM;
-
-       spin_lock_irqsave(&dd->ipath_sdma_lock, flags);
-
-       if (unlikely(dd->ipath_sdma_status & IPATH_SDMA_ABORT_MASK)) {
-               ret = -ECOMM;
-               goto unlock;
-       }
-
-       tail = dd->ipath_sdma_descq_tail;
-       while (!list_empty(pktlist)) {
-               struct ipath_user_sdma_pkt *pkt =
-                       list_entry(pktlist->next, struct ipath_user_sdma_pkt,
-                                  list);
-               int i;
-               unsigned ofs = 0;
-               u16 dtail = tail;
-
-               if (pkt->naddr > ipath_sdma_descq_freecnt(dd))
-                       goto unlock_check_tail;
-
-               for (i = 0; i < pkt->naddr; i++) {
-                       ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail);
-                       ofs += pkt->addr[i].length >> 2;
-
-                       if (++tail == dd->ipath_sdma_descq_cnt) {
-                               tail = 0;
-                               ++dd->ipath_sdma_generation;
-                       }
-               }
-
-               if ((ofs<<2) > dd->ipath_ibmaxlen) {
-                       ipath_dbg("packet size %X > ibmax %X, fail\n",
-                               ofs<<2, dd->ipath_ibmaxlen);
-                       ret = -EMSGSIZE;
-                       goto unlock;
-               }
-
-               /*
-                * if the packet is >= 2KB mtu equivalent, we have to use
-                * the large buffers, and have to mark each descriptor as
-                * part of a large buffer packet.
-                */
-               if (ofs >= IPATH_SMALLBUF_DWORDS) {
-                       for (i = 0; i < pkt->naddr; i++) {
-                               dd->ipath_sdma_descq[dtail].qw[0] |=
-                                       cpu_to_le64(1ULL << 14);
-                               if (++dtail == dd->ipath_sdma_descq_cnt)
-                                       dtail = 0;
-                       }
-               }
-
-               dd->ipath_sdma_descq_added += pkt->naddr;
-               pkt->added = dd->ipath_sdma_descq_added;
-               list_move_tail(&pkt->list, &pq->sent);
-               ret++;
-       }
-
-unlock_check_tail:
-       /* advance the tail on the chip if necessary */
-       if (dd->ipath_sdma_descq_tail != tail) {
-               wmb();
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail);
-               dd->ipath_sdma_descq_tail = tail;
-       }
-
-unlock:
-       spin_unlock_irqrestore(&dd->ipath_sdma_lock, flags);
-
-       return ret;
-}
-
-int ipath_user_sdma_writev(struct ipath_devdata *dd,
-                          struct ipath_user_sdma_queue *pq,
-                          const struct iovec *iov,
-                          unsigned long dim)
-{
-       int ret = 0;
-       struct list_head list;
-       int npkts = 0;
-
-       INIT_LIST_HEAD(&list);
-
-       mutex_lock(&pq->lock);
-
-       if (dd->ipath_sdma_descq_added != dd->ipath_sdma_descq_removed) {
-               ipath_user_sdma_hwqueue_clean(dd);
-               ipath_user_sdma_queue_clean(dd, pq);
-       }
-
-       while (dim) {
-               const int mxp = 8;
-
-               ret = ipath_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
-               if (ret <= 0)
-                       goto done_unlock;
-               else {
-                       dim -= ret;
-                       iov += ret;
-               }
-
-               /* force packets onto the sdma hw queue... */
-               if (!list_empty(&list)) {
-                       /*
-                        * lazily clean hw queue.  the 4 is a guess of about
-                        * how many sdma descriptors a packet will take (it
-                        * doesn't have to be perfect).
-                        */
-                       if (ipath_sdma_descq_freecnt(dd) < ret * 4) {
-                               ipath_user_sdma_hwqueue_clean(dd);
-                               ipath_user_sdma_queue_clean(dd, pq);
-                       }
-
-                       ret = ipath_user_sdma_push_pkts(dd, pq, &list);
-                       if (ret < 0)
-                               goto done_unlock;
-                       else {
-                               npkts += ret;
-                               pq->counter += ret;
-
-                               if (!list_empty(&list))
-                                       goto done_unlock;
-                       }
-               }
-       }
-
-done_unlock:
-       if (!list_empty(&list))
-               ipath_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list);
-       mutex_unlock(&pq->lock);
-
-       return (ret < 0) ? ret : npkts;
-}
-
-int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
-                                 struct ipath_user_sdma_queue *pq)
-{
-       int ret = 0;
-
-       mutex_lock(&pq->lock);
-       ipath_user_sdma_hwqueue_clean(dd);
-       ret = ipath_user_sdma_queue_clean(dd, pq);
-       mutex_unlock(&pq->lock);
-
-       return ret;
-}
-
-u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq)
-{
-       return pq->sent_counter;
-}
-
-u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq)
-{
-       return pq->counter;
-}
-
diff --git a/drivers/staging/rdma/ipath/ipath_user_sdma.h b/drivers/staging/rdma/ipath/ipath_user_sdma.h
deleted file mode 100644 (file)
index fc76316..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (c) 2007, 2008 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#include <linux/device.h>
-
-struct ipath_user_sdma_queue;
-
-struct ipath_user_sdma_queue *
-ipath_user_sdma_queue_create(struct device *dev, int unit, int port, int sport);
-void ipath_user_sdma_queue_destroy(struct ipath_user_sdma_queue *pq);
-
-int ipath_user_sdma_writev(struct ipath_devdata *dd,
-                          struct ipath_user_sdma_queue *pq,
-                          const struct iovec *iov,
-                          unsigned long dim);
-
-int ipath_user_sdma_make_progress(struct ipath_devdata *dd,
-                                 struct ipath_user_sdma_queue *pq);
-
-void ipath_user_sdma_queue_drain(struct ipath_devdata *dd,
-                                struct ipath_user_sdma_queue *pq);
-
-u32 ipath_user_sdma_complete_counter(const struct ipath_user_sdma_queue *pq);
-u32 ipath_user_sdma_inflight_counter(struct ipath_user_sdma_queue *pq);
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.c b/drivers/staging/rdma/ipath/ipath_verbs.c
deleted file mode 100644 (file)
index 53f9dca..0000000
+++ /dev/null
@@ -1,2376 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <rdma/ib_mad.h>
-#include <rdma/ib_user_verbs.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/utsname.h>
-#include <linux/rculist.h>
-
-#include "ipath_kernel.h"
-#include "ipath_verbs.h"
-#include "ipath_common.h"
-
-static unsigned int ib_ipath_qp_table_size = 251;
-module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
-MODULE_PARM_DESC(qp_table_size, "QP table size");
-
-unsigned int ib_ipath_lkey_table_size = 12;
-module_param_named(lkey_table_size, ib_ipath_lkey_table_size, uint,
-                  S_IRUGO);
-MODULE_PARM_DESC(lkey_table_size,
-                "LKEY table size in bits (2^n, 1 <= n <= 23)");
-
-static unsigned int ib_ipath_max_pds = 0xFFFF;
-module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_pds,
-                "Maximum number of protection domains to support");
-
-static unsigned int ib_ipath_max_ahs = 0xFFFF;
-module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
-
-unsigned int ib_ipath_max_cqes = 0x2FFFF;
-module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_cqes,
-                "Maximum number of completion queue entries to support");
-
-unsigned int ib_ipath_max_cqs = 0x1FFFF;
-module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
-
-unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
-module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
-                  S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
-
-unsigned int ib_ipath_max_qps = 16384;
-module_param_named(max_qps, ib_ipath_max_qps, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support");
-
-unsigned int ib_ipath_max_sges = 0x60;
-module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
-
-unsigned int ib_ipath_max_mcast_grps = 16384;
-module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
-                  S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_mcast_grps,
-                "Maximum number of multicast groups to support");
-
-unsigned int ib_ipath_max_mcast_qp_attached = 16;
-module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
-                  uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_mcast_qp_attached,
-                "Maximum number of attached QPs to support");
-
-unsigned int ib_ipath_max_srqs = 1024;
-module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
-
-unsigned int ib_ipath_max_srq_sges = 128;
-module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
-                  uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
-
-unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
-module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
-                  uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
-
-static unsigned int ib_ipath_disable_sma;
-module_param_named(disable_sma, ib_ipath_disable_sma, uint, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(disable_sma, "Disable the SMA");
-
-/*
- * Note that it is OK to post send work requests in the SQE and ERR
- * states; ipath_do_send() will process them and generate error
- * completions as per IB 1.2 C10-96.
- */
-const int ib_ipath_state_ops[IB_QPS_ERR + 1] = {
-       [IB_QPS_RESET] = 0,
-       [IB_QPS_INIT] = IPATH_POST_RECV_OK,
-       [IB_QPS_RTR] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK,
-       [IB_QPS_RTS] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
-           IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK |
-           IPATH_PROCESS_NEXT_SEND_OK,
-       [IB_QPS_SQD] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
-           IPATH_POST_SEND_OK | IPATH_PROCESS_SEND_OK,
-       [IB_QPS_SQE] = IPATH_POST_RECV_OK | IPATH_PROCESS_RECV_OK |
-           IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
-       [IB_QPS_ERR] = IPATH_POST_RECV_OK | IPATH_FLUSH_RECV |
-           IPATH_POST_SEND_OK | IPATH_FLUSH_SEND,
-};
-
-struct ipath_ucontext {
-       struct ib_ucontext ibucontext;
-};
-
-static inline struct ipath_ucontext *to_iucontext(struct ib_ucontext
-                                                 *ibucontext)
-{
-       return container_of(ibucontext, struct ipath_ucontext, ibucontext);
-}
-
-/*
- * Translate ib_wr_opcode into ib_wc_opcode.
- */
-const enum ib_wc_opcode ib_ipath_wc_opcode[] = {
-       [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE,
-       [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
-       [IB_WR_SEND] = IB_WC_SEND,
-       [IB_WR_SEND_WITH_IMM] = IB_WC_SEND,
-       [IB_WR_RDMA_READ] = IB_WC_RDMA_READ,
-       [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP,
-       [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD
-};
-
-/*
- * System image GUID.
- */
-static __be64 sys_image_guid;
-
-/**
- * ipath_copy_sge - copy data to SGE memory
- * @ss: the SGE state
- * @data: the data to copy
- * @length: the length of the data
- */
-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length)
-{
-       struct ipath_sge *sge = &ss->sge;
-
-       while (length) {
-               u32 len = sge->length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               memcpy(sge->vaddr, data, len);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               data += len;
-               length -= len;
-       }
-}
-
-/**
- * ipath_skip_sge - skip over SGE memory - XXX almost dup of prev func
- * @ss: the SGE state
- * @length: the number of bytes to skip
- */
-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
-{
-       struct ipath_sge *sge = &ss->sge;
-
-       while (length) {
-               u32 len = sge->length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               length -= len;
-       }
-}
-
-/*
- * Count the number of DMA descriptors needed to send length bytes of data.
- * Don't modify the ipath_sge_state to get the count.
- * Return zero if any of the segments is not aligned.
- */
-static u32 ipath_count_sge(struct ipath_sge_state *ss, u32 length)
-{
-       struct ipath_sge *sg_list = ss->sg_list;
-       struct ipath_sge sge = ss->sge;
-       u8 num_sge = ss->num_sge;
-       u32 ndesc = 1;  /* count the header */
-
-       while (length) {
-               u32 len = sge.length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge.sge_length)
-                       len = sge.sge_length;
-               BUG_ON(len == 0);
-               if (((long) sge.vaddr & (sizeof(u32) - 1)) ||
-                   (len != length && (len & (sizeof(u32) - 1)))) {
-                       ndesc = 0;
-                       break;
-               }
-               ndesc++;
-               sge.vaddr += len;
-               sge.length -= len;
-               sge.sge_length -= len;
-               if (sge.sge_length == 0) {
-                       if (--num_sge)
-                               sge = *sg_list++;
-               } else if (sge.length == 0 && sge.mr != NULL) {
-                       if (++sge.n >= IPATH_SEGSZ) {
-                               if (++sge.m >= sge.mr->mapsz)
-                                       break;
-                               sge.n = 0;
-                       }
-                       sge.vaddr =
-                               sge.mr->map[sge.m]->segs[sge.n].vaddr;
-                       sge.length =
-                               sge.mr->map[sge.m]->segs[sge.n].length;
-               }
-               length -= len;
-       }
-       return ndesc;
-}
-
-/*
- * Copy from the SGEs to the data buffer.
- */
-static void ipath_copy_from_sge(void *data, struct ipath_sge_state *ss,
-                               u32 length)
-{
-       struct ipath_sge *sge = &ss->sge;
-
-       while (length) {
-               u32 len = sge->length;
-
-               if (len > length)
-                       len = length;
-               if (len > sge->sge_length)
-                       len = sge->sge_length;
-               BUG_ON(len == 0);
-               memcpy(data, sge->vaddr, len);
-               sge->vaddr += len;
-               sge->length -= len;
-               sge->sge_length -= len;
-               if (sge->sge_length == 0) {
-                       if (--ss->num_sge)
-                               *sge = *ss->sg_list++;
-               } else if (sge->length == 0 && sge->mr != NULL) {
-                       if (++sge->n >= IPATH_SEGSZ) {
-                               if (++sge->m >= sge->mr->mapsz)
-                                       break;
-                               sge->n = 0;
-                       }
-                       sge->vaddr =
-                               sge->mr->map[sge->m]->segs[sge->n].vaddr;
-                       sge->length =
-                               sge->mr->map[sge->m]->segs[sge->n].length;
-               }
-               data += len;
-               length -= len;
-       }
-}
-
-/**
- * ipath_post_one_send - post one RC, UC, or UD send work request
- * @qp: the QP to post on
- * @wr: the work request to send
- */
-static int ipath_post_one_send(struct ipath_qp *qp, struct ib_send_wr *wr)
-{
-       struct ipath_swqe *wqe;
-       u32 next;
-       int i;
-       int j;
-       int acc;
-       int ret;
-       unsigned long flags;
-       struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-
-       spin_lock_irqsave(&qp->s_lock, flags);
-
-       if (qp->ibqp.qp_type != IB_QPT_SMI &&
-           !(dd->ipath_flags & IPATH_LINKACTIVE)) {
-               ret = -ENETDOWN;
-               goto bail;
-       }
-
-       /* Check that state is OK to post send. */
-       if (unlikely(!(ib_ipath_state_ops[qp->state] & IPATH_POST_SEND_OK)))
-               goto bail_inval;
-
-       /* IB spec says that num_sge == 0 is OK. */
-       if (wr->num_sge > qp->s_max_sge)
-               goto bail_inval;
-
-       /*
-        * Don't allow RDMA reads or atomic operations on UC or
-        * undefined operations.
-        * Make sure buffer is large enough to hold the result for atomics.
-        */
-       if (qp->ibqp.qp_type == IB_QPT_UC) {
-               if ((unsigned) wr->opcode >= IB_WR_RDMA_READ)
-                       goto bail_inval;
-       } else if (qp->ibqp.qp_type == IB_QPT_UD) {
-               /* Check UD opcode */
-               if (wr->opcode != IB_WR_SEND &&
-                   wr->opcode != IB_WR_SEND_WITH_IMM)
-                       goto bail_inval;
-               /* Check UD destination address PD */
-               if (qp->ibqp.pd != ud_wr(wr)->ah->pd)
-                       goto bail_inval;
-       } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD)
-               goto bail_inval;
-       else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
-                  (wr->num_sge == 0 ||
-                   wr->sg_list[0].length < sizeof(u64) ||
-                   wr->sg_list[0].addr & (sizeof(u64) - 1)))
-               goto bail_inval;
-       else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic)
-               goto bail_inval;
-
-       next = qp->s_head + 1;
-       if (next >= qp->s_size)
-               next = 0;
-       if (next == qp->s_last) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       wqe = get_swqe_ptr(qp, qp->s_head);
-
-       if (qp->ibqp.qp_type != IB_QPT_UC &&
-           qp->ibqp.qp_type != IB_QPT_RC)
-               memcpy(&wqe->ud_wr, ud_wr(wr), sizeof(wqe->ud_wr));
-       else if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
-                wr->opcode == IB_WR_RDMA_WRITE ||
-                wr->opcode == IB_WR_RDMA_READ)
-               memcpy(&wqe->rdma_wr, rdma_wr(wr), sizeof(wqe->rdma_wr));
-       else if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
-                wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
-               memcpy(&wqe->atomic_wr, atomic_wr(wr), sizeof(wqe->atomic_wr));
-       else
-               memcpy(&wqe->wr, wr, sizeof(wqe->wr));
-
-       wqe->length = 0;
-       if (wr->num_sge) {
-               acc = wr->opcode >= IB_WR_RDMA_READ ?
-                       IB_ACCESS_LOCAL_WRITE : 0;
-               for (i = 0, j = 0; i < wr->num_sge; i++) {
-                       u32 length = wr->sg_list[i].length;
-                       int ok;
-
-                       if (length == 0)
-                               continue;
-                       ok = ipath_lkey_ok(qp, &wqe->sg_list[j],
-                                          &wr->sg_list[i], acc);
-                       if (!ok)
-                               goto bail_inval;
-                       wqe->length += length;
-                       j++;
-               }
-               wqe->wr.num_sge = j;
-       }
-       if (qp->ibqp.qp_type == IB_QPT_UC ||
-           qp->ibqp.qp_type == IB_QPT_RC) {
-               if (wqe->length > 0x80000000U)
-                       goto bail_inval;
-       } else if (wqe->length > to_idev(qp->ibqp.device)->dd->ipath_ibmtu)
-               goto bail_inval;
-       wqe->ssn = qp->s_ssn++;
-       qp->s_head = next;
-
-       ret = 0;
-       goto bail;
-
-bail_inval:
-       ret = -EINVAL;
-bail:
-       spin_unlock_irqrestore(&qp->s_lock, flags);
-       return ret;
-}
-
-/**
- * ipath_post_send - post a send on a QP
- * @ibqp: the QP to post the send on
- * @wr: the list of work requests to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int ipath_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
-                          struct ib_send_wr **bad_wr)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       int err = 0;
-
-       for (; wr; wr = wr->next) {
-               err = ipath_post_one_send(qp, wr);
-               if (err) {
-                       *bad_wr = wr;
-                       goto bail;
-               }
-       }
-
-       /* Try to do the send work in the caller's context. */
-       ipath_do_send((unsigned long) qp);
-
-bail:
-       return err;
-}
-
-/**
- * ipath_post_receive - post a receive on a QP
- * @ibqp: the QP to post the receive on
- * @wr: the WR to post
- * @bad_wr: the first bad WR is put here
- *
- * This may be called from interrupt context.
- */
-static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
-                             struct ib_recv_wr **bad_wr)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       struct ipath_rwq *wq = qp->r_rq.wq;
-       unsigned long flags;
-       int ret;
-
-       /* Check that state is OK to post receive. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_POST_RECV_OK) || !wq) {
-               *bad_wr = wr;
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       for (; wr; wr = wr->next) {
-               struct ipath_rwqe *wqe;
-               u32 next;
-               int i;
-
-               if ((unsigned) wr->num_sge > qp->r_rq.max_sge) {
-                       *bad_wr = wr;
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               spin_lock_irqsave(&qp->r_rq.lock, flags);
-               next = wq->head + 1;
-               if (next >= qp->r_rq.size)
-                       next = 0;
-               if (next == wq->tail) {
-                       spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-                       *bad_wr = wr;
-                       ret = -ENOMEM;
-                       goto bail;
-               }
-
-               wqe = get_rwqe_ptr(&qp->r_rq, wq->head);
-               wqe->wr_id = wr->wr_id;
-               wqe->num_sge = wr->num_sge;
-               for (i = 0; i < wr->num_sge; i++)
-                       wqe->sg_list[i] = wr->sg_list[i];
-               /* Make sure queue entry is written before the head index. */
-               smp_wmb();
-               wq->head = next;
-               spin_unlock_irqrestore(&qp->r_rq.lock, flags);
-       }
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_qp_rcv - processing an incoming packet on a QP
- * @dev: the device the packet came on
- * @hdr: the packet header
- * @has_grh: true if the packet has a GRH
- * @data: the packet data
- * @tlen: the packet length
- * @qp: the QP the packet came on
- *
- * This is called from ipath_ib_rcv() to process an incoming packet
- * for the given QP.
- * Called at interrupt level.
- */
-static void ipath_qp_rcv(struct ipath_ibdev *dev,
-                        struct ipath_ib_header *hdr, int has_grh,
-                        void *data, u32 tlen, struct ipath_qp *qp)
-{
-       /* Check for valid receive state. */
-       if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
-               dev->n_pkt_drops++;
-               return;
-       }
-
-       switch (qp->ibqp.qp_type) {
-       case IB_QPT_SMI:
-       case IB_QPT_GSI:
-               if (ib_ipath_disable_sma)
-                       break;
-               /* FALLTHROUGH */
-       case IB_QPT_UD:
-               ipath_ud_rcv(dev, hdr, has_grh, data, tlen, qp);
-               break;
-
-       case IB_QPT_RC:
-               ipath_rc_rcv(dev, hdr, has_grh, data, tlen, qp);
-               break;
-
-       case IB_QPT_UC:
-               ipath_uc_rcv(dev, hdr, has_grh, data, tlen, qp);
-               break;
-
-       default:
-               break;
-       }
-}
-
-/**
- * ipath_ib_rcv - process an incoming packet
- * @arg: the device pointer
- * @rhdr: the header of the packet
- * @data: the packet data
- * @tlen: the packet length
- *
- * This is called from ipath_kreceive() to process an incoming packet at
- * interrupt level. Tlen is the length of the header + data + CRC in bytes.
- */
-void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
-                 u32 tlen)
-{
-       struct ipath_ib_header *hdr = rhdr;
-       struct ipath_other_headers *ohdr;
-       struct ipath_qp *qp;
-       u32 qp_num;
-       int lnh;
-       u8 opcode;
-       u16 lid;
-
-       if (unlikely(dev == NULL))
-               goto bail;
-
-       if (unlikely(tlen < 24)) {      /* LRH+BTH+CRC */
-               dev->rcv_errors++;
-               goto bail;
-       }
-
-       /* Check for a valid destination LID (see ch. 7.11.1). */
-       lid = be16_to_cpu(hdr->lrh[1]);
-       if (lid < IPATH_MULTICAST_LID_BASE) {
-               lid &= ~((1 << dev->dd->ipath_lmc) - 1);
-               if (unlikely(lid != dev->dd->ipath_lid)) {
-                       dev->rcv_errors++;
-                       goto bail;
-               }
-       }
-
-       /* Check for GRH */
-       lnh = be16_to_cpu(hdr->lrh[0]) & 3;
-       if (lnh == IPATH_LRH_BTH)
-               ohdr = &hdr->u.oth;
-       else if (lnh == IPATH_LRH_GRH)
-               ohdr = &hdr->u.l.oth;
-       else {
-               dev->rcv_errors++;
-               goto bail;
-       }
-
-       opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0x7f;
-       dev->opstats[opcode].n_bytes += tlen;
-       dev->opstats[opcode].n_packets++;
-
-       /* Get the destination QP number. */
-       qp_num = be32_to_cpu(ohdr->bth[1]) & IPATH_QPN_MASK;
-       if (qp_num == IPATH_MULTICAST_QPN) {
-               struct ipath_mcast *mcast;
-               struct ipath_mcast_qp *p;
-
-               if (lnh != IPATH_LRH_GRH) {
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-               mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
-               if (mcast == NULL) {
-                       dev->n_pkt_drops++;
-                       goto bail;
-               }
-               dev->n_multicast_rcv++;
-               list_for_each_entry_rcu(p, &mcast->qp_list, list)
-                       ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
-               /*
-                * Notify ipath_multicast_detach() if it is waiting for us
-                * to finish.
-                */
-               if (atomic_dec_return(&mcast->refcount) <= 1)
-                       wake_up(&mcast->wait);
-       } else {
-               qp = ipath_lookup_qpn(&dev->qp_table, qp_num);
-               if (qp) {
-                       dev->n_unicast_rcv++;
-                       ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
-                                    tlen, qp);
-                       /*
-                        * Notify ipath_destroy_qp() if it is waiting
-                        * for us to finish.
-                        */
-                       if (atomic_dec_and_test(&qp->refcount))
-                               wake_up(&qp->wait);
-               } else
-                       dev->n_pkt_drops++;
-       }
-
-bail:;
-}
-
-/**
- * ipath_ib_timer - verbs timer
- * @arg: the device pointer
- *
- * This is called from ipath_do_rcv_timer() at interrupt level to check for
- * QPs which need retransmits and to collect performance numbers.
- */
-static void ipath_ib_timer(struct ipath_ibdev *dev)
-{
-       struct ipath_qp *resend = NULL;
-       struct ipath_qp *rnr = NULL;
-       struct list_head *last;
-       struct ipath_qp *qp;
-       unsigned long flags;
-
-       if (dev == NULL)
-               return;
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       /* Start filling the next pending queue. */
-       if (++dev->pending_index >= ARRAY_SIZE(dev->pending))
-               dev->pending_index = 0;
-       /* Save any requests still in the new queue, they have timed out. */
-       last = &dev->pending[dev->pending_index];
-       while (!list_empty(last)) {
-               qp = list_entry(last->next, struct ipath_qp, timerwait);
-               list_del_init(&qp->timerwait);
-               qp->timer_next = resend;
-               resend = qp;
-               atomic_inc(&qp->refcount);
-       }
-       last = &dev->rnrwait;
-       if (!list_empty(last)) {
-               qp = list_entry(last->next, struct ipath_qp, timerwait);
-               if (--qp->s_rnr_timeout == 0) {
-                       do {
-                               list_del_init(&qp->timerwait);
-                               qp->timer_next = rnr;
-                               rnr = qp;
-                               atomic_inc(&qp->refcount);
-                               if (list_empty(last))
-                                       break;
-                               qp = list_entry(last->next, struct ipath_qp,
-                                               timerwait);
-                       } while (qp->s_rnr_timeout == 0);
-               }
-       }
-       /*
-        * We should only be in the started state if pma_sample_start != 0
-        */
-       if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_STARTED &&
-           --dev->pma_sample_start == 0) {
-               dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_RUNNING;
-               ipath_snapshot_counters(dev->dd, &dev->ipath_sword,
-                                       &dev->ipath_rword,
-                                       &dev->ipath_spkts,
-                                       &dev->ipath_rpkts,
-                                       &dev->ipath_xmit_wait);
-       }
-       if (dev->pma_sample_status == IB_PMA_SAMPLE_STATUS_RUNNING) {
-               if (dev->pma_sample_interval == 0) {
-                       u64 ta, tb, tc, td, te;
-
-                       dev->pma_sample_status = IB_PMA_SAMPLE_STATUS_DONE;
-                       ipath_snapshot_counters(dev->dd, &ta, &tb,
-                                               &tc, &td, &te);
-
-                       dev->ipath_sword = ta - dev->ipath_sword;
-                       dev->ipath_rword = tb - dev->ipath_rword;
-                       dev->ipath_spkts = tc - dev->ipath_spkts;
-                       dev->ipath_rpkts = td - dev->ipath_rpkts;
-                       dev->ipath_xmit_wait = te - dev->ipath_xmit_wait;
-               } else {
-                       dev->pma_sample_interval--;
-               }
-       }
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-       /* XXX What if timer fires again while this is running? */
-       while (resend != NULL) {
-               qp = resend;
-               resend = qp->timer_next;
-
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if (qp->s_last != qp->s_tail &&
-                   ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) {
-                       dev->n_timeouts++;
-                       ipath_restart_rc(qp, qp->s_last_psn + 1);
-               }
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-
-               /* Notify ipath_destroy_qp() if it is waiting. */
-               if (atomic_dec_and_test(&qp->refcount))
-                       wake_up(&qp->wait);
-       }
-       while (rnr != NULL) {
-               qp = rnr;
-               rnr = qp->timer_next;
-
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
-                       ipath_schedule_send(qp);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-
-               /* Notify ipath_destroy_qp() if it is waiting. */
-               if (atomic_dec_and_test(&qp->refcount))
-                       wake_up(&qp->wait);
-       }
-}
-
-static void update_sge(struct ipath_sge_state *ss, u32 length)
-{
-       struct ipath_sge *sge = &ss->sge;
-
-       sge->vaddr += length;
-       sge->length -= length;
-       sge->sge_length -= length;
-       if (sge->sge_length == 0) {
-               if (--ss->num_sge)
-                       *sge = *ss->sg_list++;
-       } else if (sge->length == 0 && sge->mr != NULL) {
-               if (++sge->n >= IPATH_SEGSZ) {
-                       if (++sge->m >= sge->mr->mapsz)
-                               return;
-                       sge->n = 0;
-               }
-               sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
-               sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
-       }
-}
-
-#ifdef __LITTLE_ENDIAN
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
-       return data >> shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
-       return data << shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
-       data <<= ((sizeof(u32) - n) * BITS_PER_BYTE);
-       data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
-       return data;
-}
-#else
-static inline u32 get_upper_bits(u32 data, u32 shift)
-{
-       return data << shift;
-}
-
-static inline u32 set_upper_bits(u32 data, u32 shift)
-{
-       return data >> shift;
-}
-
-static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
-{
-       data >>= ((sizeof(u32) - n) * BITS_PER_BYTE);
-       data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE);
-       return data;
-}
-#endif
-
-static void copy_io(u32 __iomem *piobuf, struct ipath_sge_state *ss,
-                   u32 length, unsigned flush_wc)
-{
-       u32 extra = 0;
-       u32 data = 0;
-       u32 last;
-
-       while (1) {
-               u32 len = ss->sge.length;
-               u32 off;
-
-               if (len > length)
-                       len = length;
-               if (len > ss->sge.sge_length)
-                       len = ss->sge.sge_length;
-               BUG_ON(len == 0);
-               /* If the source address is not aligned, try to align it. */
-               off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1);
-               if (off) {
-                       u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr &
-                                           ~(sizeof(u32) - 1));
-                       u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE);
-                       u32 y;
-
-                       y = sizeof(u32) - off;
-                       if (len > y)
-                               len = y;
-                       if (len + extra >= sizeof(u32)) {
-                               data |= set_upper_bits(v, extra *
-                                                      BITS_PER_BYTE);
-                               len = sizeof(u32) - extra;
-                               if (len == length) {
-                                       last = data;
-                                       break;
-                               }
-                               __raw_writel(data, piobuf);
-                               piobuf++;
-                               extra = 0;
-                               data = 0;
-                       } else {
-                               /* Clear unused upper bytes */
-                               data |= clear_upper_bytes(v, len, extra);
-                               if (len == length) {
-                                       last = data;
-                                       break;
-                               }
-                               extra += len;
-                       }
-               } else if (extra) {
-                       /* Source address is aligned. */
-                       u32 *addr = (u32 *) ss->sge.vaddr;
-                       int shift = extra * BITS_PER_BYTE;
-                       int ushift = 32 - shift;
-                       u32 l = len;
-
-                       while (l >= sizeof(u32)) {
-                               u32 v = *addr;
-
-                               data |= set_upper_bits(v, shift);
-                               __raw_writel(data, piobuf);
-                               data = get_upper_bits(v, ushift);
-                               piobuf++;
-                               addr++;
-                               l -= sizeof(u32);
-                       }
-                       /*
-                        * We still have 'extra' number of bytes leftover.
-                        */
-                       if (l) {
-                               u32 v = *addr;
-
-                               if (l + extra >= sizeof(u32)) {
-                                       data |= set_upper_bits(v, shift);
-                                       len -= l + extra - sizeof(u32);
-                                       if (len == length) {
-                                               last = data;
-                                               break;
-                                       }
-                                       __raw_writel(data, piobuf);
-                                       piobuf++;
-                                       extra = 0;
-                                       data = 0;
-                               } else {
-                                       /* Clear unused upper bytes */
-                                       data |= clear_upper_bytes(v, l,
-                                                                 extra);
-                                       if (len == length) {
-                                               last = data;
-                                               break;
-                                       }
-                                       extra += l;
-                               }
-                       } else if (len == length) {
-                               last = data;
-                               break;
-                       }
-               } else if (len == length) {
-                       u32 w;
-
-                       /*
-                        * Need to round up for the last dword in the
-                        * packet.
-                        */
-                       w = (len + 3) >> 2;
-                       __iowrite32_copy(piobuf, ss->sge.vaddr, w - 1);
-                       piobuf += w - 1;
-                       last = ((u32 *) ss->sge.vaddr)[w - 1];
-                       break;
-               } else {
-                       u32 w = len >> 2;
-
-                       __iowrite32_copy(piobuf, ss->sge.vaddr, w);
-                       piobuf += w;
-
-                       extra = len & (sizeof(u32) - 1);
-                       if (extra) {
-                               u32 v = ((u32 *) ss->sge.vaddr)[w];
-
-                               /* Clear unused upper bytes */
-                               data = clear_upper_bytes(v, extra, 0);
-                       }
-               }
-               update_sge(ss, len);
-               length -= len;
-       }
-       /* Update address before sending packet. */
-       update_sge(ss, length);
-       if (flush_wc) {
-               /* must flush early everything before trigger word */
-               ipath_flush_wc();
-               __raw_writel(last, piobuf);
-               /* be sure trigger word is written */
-               ipath_flush_wc();
-       } else
-               __raw_writel(last, piobuf);
-}
-
-/*
- * Convert IB rate to delay multiplier.
- */
-unsigned ipath_ib_rate_to_mult(enum ib_rate rate)
-{
-       switch (rate) {
-       case IB_RATE_2_5_GBPS: return 8;
-       case IB_RATE_5_GBPS:   return 4;
-       case IB_RATE_10_GBPS:  return 2;
-       case IB_RATE_20_GBPS:  return 1;
-       default:               return 0;
-       }
-}
-
-/*
- * Convert delay multiplier to IB rate
- */
-static enum ib_rate ipath_mult_to_ib_rate(unsigned mult)
-{
-       switch (mult) {
-       case 8:  return IB_RATE_2_5_GBPS;
-       case 4:  return IB_RATE_5_GBPS;
-       case 2:  return IB_RATE_10_GBPS;
-       case 1:  return IB_RATE_20_GBPS;
-       default: return IB_RATE_PORT_CURRENT;
-       }
-}
-
-static inline struct ipath_verbs_txreq *get_txreq(struct ipath_ibdev *dev)
-{
-       struct ipath_verbs_txreq *tx = NULL;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       if (!list_empty(&dev->txreq_free)) {
-               struct list_head *l = dev->txreq_free.next;
-
-               list_del(l);
-               tx = list_entry(l, struct ipath_verbs_txreq, txreq.list);
-       }
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-       return tx;
-}
-
-static inline void put_txreq(struct ipath_ibdev *dev,
-                            struct ipath_verbs_txreq *tx)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       list_add(&tx->txreq.list, &dev->txreq_free);
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-}
-
-static void sdma_complete(void *cookie, int status)
-{
-       struct ipath_verbs_txreq *tx = cookie;
-       struct ipath_qp *qp = tx->qp;
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       unsigned long flags;
-       enum ib_wc_status ibs = status == IPATH_SDMA_TXREQ_S_OK ?
-               IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR;
-
-       if (atomic_dec_and_test(&qp->s_dma_busy)) {
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if (tx->wqe)
-                       ipath_send_complete(qp, tx->wqe, ibs);
-               if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
-                    qp->s_last != qp->s_head) ||
-                   (qp->s_flags & IPATH_S_WAIT_DMA))
-                       ipath_schedule_send(qp);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-               wake_up(&qp->wait_dma);
-       } else if (tx->wqe) {
-               spin_lock_irqsave(&qp->s_lock, flags);
-               ipath_send_complete(qp, tx->wqe, ibs);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-       }
-
-       if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
-               kfree(tx->txreq.map_addr);
-       put_txreq(dev, tx);
-
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-}
-
-static void decrement_dma_busy(struct ipath_qp *qp)
-{
-       unsigned long flags;
-
-       if (atomic_dec_and_test(&qp->s_dma_busy)) {
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if ((ib_ipath_state_ops[qp->state] & IPATH_FLUSH_SEND &&
-                    qp->s_last != qp->s_head) ||
-                   (qp->s_flags & IPATH_S_WAIT_DMA))
-                       ipath_schedule_send(qp);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-               wake_up(&qp->wait_dma);
-       }
-}
-
-/*
- * Compute the number of clock cycles of delay before sending the next packet.
- * The multipliers reflect the number of clocks for the fastest rate so
- * one tick at 4xDDR is 8 ticks at 1xSDR.
- * If the destination port will take longer to receive a packet than
- * the outgoing link can send it, we need to delay sending the next packet
- * by the difference in time it takes the receiver to receive and the sender
- * to send this packet.
- * Note that this delay is always correct for UC and RC but not always
- * optimal for UD. For UD, the destination HCA can be different for each
- * packet, in which case, we could send packets to a different destination
- * while "waiting" for the delay. The overhead for doing this without
- * HW support is more than just paying the cost of delaying some packets
- * unnecessarily.
- */
-static inline unsigned ipath_pkt_delay(u32 plen, u8 snd_mult, u8 rcv_mult)
-{
-       return (rcv_mult > snd_mult) ?
-               (plen * (rcv_mult - snd_mult) + 1) >> 1 : 0;
-}
-
-static int ipath_verbs_send_dma(struct ipath_qp *qp,
-                               struct ipath_ib_header *hdr, u32 hdrwords,
-                               struct ipath_sge_state *ss, u32 len,
-                               u32 plen, u32 dwords)
-{
-       struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
-       struct ipath_devdata *dd = dev->dd;
-       struct ipath_verbs_txreq *tx;
-       u32 *piobuf;
-       u32 control;
-       u32 ndesc;
-       int ret;
-
-       tx = qp->s_tx;
-       if (tx) {
-               qp->s_tx = NULL;
-               /* resend previously constructed packet */
-               atomic_inc(&qp->s_dma_busy);
-               ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx);
-               if (ret) {
-                       qp->s_tx = tx;
-                       decrement_dma_busy(qp);
-               }
-               goto bail;
-       }
-
-       tx = get_txreq(dev);
-       if (!tx) {
-               ret = -EBUSY;
-               goto bail;
-       }
-
-       /*
-        * Get the saved delay count we computed for the previous packet
-        * and save the delay count for this packet to be used next time
-        * we get here.
-        */
-       control = qp->s_pkt_delay;
-       qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
-
-       tx->qp = qp;
-       atomic_inc(&qp->refcount);
-       tx->wqe = qp->s_wqe;
-       tx->txreq.callback = sdma_complete;
-       tx->txreq.callback_cookie = tx;
-       tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST |
-               IPATH_SDMA_TXREQ_F_INTREQ | IPATH_SDMA_TXREQ_F_FREEDESC;
-       if (plen + 1 >= IPATH_SMALLBUF_DWORDS)
-               tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF;
-
-       /* VL15 packets bypass credit check */
-       if ((be16_to_cpu(hdr->lrh[0]) >> 12) == 15) {
-               control |= 1ULL << 31;
-               tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15;
-       }
-
-       if (len) {
-               /*
-                * Don't try to DMA if it takes more descriptors than
-                * the queue holds.
-                */
-               ndesc = ipath_count_sge(ss, len);
-               if (ndesc >= dd->ipath_sdma_descq_cnt)
-                       ndesc = 0;
-       } else
-               ndesc = 1;
-       if (ndesc) {
-               tx->hdr.pbc[0] = cpu_to_le32(plen);
-               tx->hdr.pbc[1] = cpu_to_le32(control);
-               memcpy(&tx->hdr.hdr, hdr, hdrwords << 2);
-               tx->txreq.sg_count = ndesc;
-               tx->map_len = (hdrwords + 2) << 2;
-               tx->txreq.map_addr = &tx->hdr;
-               atomic_inc(&qp->s_dma_busy);
-               ret = ipath_sdma_verbs_send(dd, ss, dwords, tx);
-               if (ret) {
-                       /* save ss and length in dwords */
-                       tx->ss = ss;
-                       tx->len = dwords;
-                       qp->s_tx = tx;
-                       decrement_dma_busy(qp);
-               }
-               goto bail;
-       }
-
-       /* Allocate a buffer and copy the header and payload to it. */
-       tx->map_len = (plen + 1) << 2;
-       piobuf = kmalloc(tx->map_len, GFP_ATOMIC);
-       if (unlikely(piobuf == NULL)) {
-               ret = -EBUSY;
-               goto err_tx;
-       }
-       tx->txreq.map_addr = piobuf;
-       tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF;
-       tx->txreq.sg_count = 1;
-
-       *piobuf++ = (__force u32) cpu_to_le32(plen);
-       *piobuf++ = (__force u32) cpu_to_le32(control);
-       memcpy(piobuf, hdr, hdrwords << 2);
-       ipath_copy_from_sge(piobuf + hdrwords, ss, len);
-
-       atomic_inc(&qp->s_dma_busy);
-       ret = ipath_sdma_verbs_send(dd, NULL, 0, tx);
-       /*
-        * If we couldn't queue the DMA request, save the info
-        * and try again later rather than destroying the
-        * buffer and undoing the side effects of the copy.
-        */
-       if (ret) {
-               tx->ss = NULL;
-               tx->len = 0;
-               qp->s_tx = tx;
-               decrement_dma_busy(qp);
-       }
-       dev->n_unaligned++;
-       goto bail;
-
-err_tx:
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-       put_txreq(dev, tx);
-bail:
-       return ret;
-}
-
-static int ipath_verbs_send_pio(struct ipath_qp *qp,
-                               struct ipath_ib_header *ibhdr, u32 hdrwords,
-                               struct ipath_sge_state *ss, u32 len,
-                               u32 plen, u32 dwords)
-{
-       struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-       u32 *hdr = (u32 *) ibhdr;
-       u32 __iomem *piobuf;
-       unsigned flush_wc;
-       u32 control;
-       int ret;
-       unsigned long flags;
-
-       piobuf = ipath_getpiobuf(dd, plen, NULL);
-       if (unlikely(piobuf == NULL)) {
-               ret = -EBUSY;
-               goto bail;
-       }
-
-       /*
-        * Get the saved delay count we computed for the previous packet
-        * and save the delay count for this packet to be used next time
-        * we get here.
-        */
-       control = qp->s_pkt_delay;
-       qp->s_pkt_delay = ipath_pkt_delay(plen, dd->delay_mult, qp->s_dmult);
-
-       /* VL15 packets bypass credit check */
-       if ((be16_to_cpu(ibhdr->lrh[0]) >> 12) == 15)
-               control |= 1ULL << 31;
-
-       /*
-        * Write the length to the control qword plus any needed flags.
-        * We have to flush after the PBC for correctness on some cpus
-        * or WC buffer can be written out of order.
-        */
-       writeq(((u64) control << 32) | plen, piobuf);
-       piobuf += 2;
-
-       flush_wc = dd->ipath_flags & IPATH_PIO_FLUSH_WC;
-       if (len == 0) {
-               /*
-                * If there is just the header portion, must flush before
-                * writing last word of header for correctness, and after
-                * the last header word (trigger word).
-                */
-               if (flush_wc) {
-                       ipath_flush_wc();
-                       __iowrite32_copy(piobuf, hdr, hdrwords - 1);
-                       ipath_flush_wc();
-                       __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1);
-                       ipath_flush_wc();
-               } else
-                       __iowrite32_copy(piobuf, hdr, hdrwords);
-               goto done;
-       }
-
-       if (flush_wc)
-               ipath_flush_wc();
-       __iowrite32_copy(piobuf, hdr, hdrwords);
-       piobuf += hdrwords;
-
-       /* The common case is aligned and contained in one segment. */
-       if (likely(ss->num_sge == 1 && len <= ss->sge.length &&
-                  !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) {
-               u32 *addr = (u32 *) ss->sge.vaddr;
-
-               /* Update address before sending packet. */
-               update_sge(ss, len);
-               if (flush_wc) {
-                       __iowrite32_copy(piobuf, addr, dwords - 1);
-                       /* must flush early everything before trigger word */
-                       ipath_flush_wc();
-                       __raw_writel(addr[dwords - 1], piobuf + dwords - 1);
-                       /* be sure trigger word is written */
-                       ipath_flush_wc();
-               } else
-                       __iowrite32_copy(piobuf, addr, dwords);
-               goto done;
-       }
-       copy_io(piobuf, ss, len, flush_wc);
-done:
-       if (qp->s_wqe) {
-               spin_lock_irqsave(&qp->s_lock, flags);
-               ipath_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-       }
-       ret = 0;
-bail:
-       return ret;
-}
-
-/**
- * ipath_verbs_send - send a packet
- * @qp: the QP to send on
- * @hdr: the packet header
- * @hdrwords: the number of 32-bit words in the header
- * @ss: the SGE to send
- * @len: the length of the packet in bytes
- */
-int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
-                    u32 hdrwords, struct ipath_sge_state *ss, u32 len)
-{
-       struct ipath_devdata *dd = to_idev(qp->ibqp.device)->dd;
-       u32 plen;
-       int ret;
-       u32 dwords = (len + 3) >> 2;
-
-       /*
-        * Calculate the send buffer trigger address.
-        * The +1 counts for the pbc control dword following the pbc length.
-        */
-       plen = hdrwords + dwords + 1;
-
-       /*
-        * VL15 packets (IB_QPT_SMI) will always use PIO, so we
-        * can defer SDMA restart until link goes ACTIVE without
-        * worrying about just how we got there.
-        */
-       if (qp->ibqp.qp_type == IB_QPT_SMI ||
-           !(dd->ipath_flags & IPATH_HAS_SEND_DMA))
-               ret = ipath_verbs_send_pio(qp, hdr, hdrwords, ss, len,
-                                          plen, dwords);
-       else
-               ret = ipath_verbs_send_dma(qp, hdr, hdrwords, ss, len,
-                                          plen, dwords);
-
-       return ret;
-}
-
-int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
-                           u64 *rwords, u64 *spkts, u64 *rpkts,
-                           u64 *xmit_wait)
-{
-       int ret;
-
-       if (!(dd->ipath_flags & IPATH_INITTED)) {
-               /* no hardware, freeze, etc. */
-               ret = -EINVAL;
-               goto bail;
-       }
-       *swords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
-       *rwords = ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
-       *spkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
-       *rpkts = ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
-       *xmit_wait = ipath_snap_cntr(dd, dd->ipath_cregs->cr_sendstallcnt);
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_get_counters - get various chip counters
- * @dd: the infinipath device
- * @cntrs: counters are placed here
- *
- * Return the counters needed by recv_pma_get_portcounters().
- */
-int ipath_get_counters(struct ipath_devdata *dd,
-                      struct ipath_verbs_counters *cntrs)
-{
-       struct ipath_cregs const *crp = dd->ipath_cregs;
-       int ret;
-
-       if (!(dd->ipath_flags & IPATH_INITTED)) {
-               /* no hardware, freeze, etc. */
-               ret = -EINVAL;
-               goto bail;
-       }
-       cntrs->symbol_error_counter =
-               ipath_snap_cntr(dd, crp->cr_ibsymbolerrcnt);
-       cntrs->link_error_recovery_counter =
-               ipath_snap_cntr(dd, crp->cr_iblinkerrrecovcnt);
-       /*
-        * The link downed counter counts when the other side downs the
-        * connection.  We add in the number of times we downed the link
-        * due to local link integrity errors to compensate.
-        */
-       cntrs->link_downed_counter =
-               ipath_snap_cntr(dd, crp->cr_iblinkdowncnt);
-       cntrs->port_rcv_errors =
-               ipath_snap_cntr(dd, crp->cr_rxdroppktcnt) +
-               ipath_snap_cntr(dd, crp->cr_rcvovflcnt) +
-               ipath_snap_cntr(dd, crp->cr_portovflcnt) +
-               ipath_snap_cntr(dd, crp->cr_err_rlencnt) +
-               ipath_snap_cntr(dd, crp->cr_invalidrlencnt) +
-               ipath_snap_cntr(dd, crp->cr_errlinkcnt) +
-               ipath_snap_cntr(dd, crp->cr_erricrccnt) +
-               ipath_snap_cntr(dd, crp->cr_errvcrccnt) +
-               ipath_snap_cntr(dd, crp->cr_errlpcrccnt) +
-               ipath_snap_cntr(dd, crp->cr_badformatcnt) +
-               dd->ipath_rxfc_unsupvl_errs;
-       if (crp->cr_rxotherlocalphyerrcnt)
-               cntrs->port_rcv_errors +=
-                       ipath_snap_cntr(dd, crp->cr_rxotherlocalphyerrcnt);
-       if (crp->cr_rxvlerrcnt)
-               cntrs->port_rcv_errors +=
-                       ipath_snap_cntr(dd, crp->cr_rxvlerrcnt);
-       cntrs->port_rcv_remphys_errors =
-               ipath_snap_cntr(dd, crp->cr_rcvebpcnt);
-       cntrs->port_xmit_discards = ipath_snap_cntr(dd, crp->cr_unsupvlcnt);
-       cntrs->port_xmit_data = ipath_snap_cntr(dd, crp->cr_wordsendcnt);
-       cntrs->port_rcv_data = ipath_snap_cntr(dd, crp->cr_wordrcvcnt);
-       cntrs->port_xmit_packets = ipath_snap_cntr(dd, crp->cr_pktsendcnt);
-       cntrs->port_rcv_packets = ipath_snap_cntr(dd, crp->cr_pktrcvcnt);
-       cntrs->local_link_integrity_errors =
-               crp->cr_locallinkintegrityerrcnt ?
-               ipath_snap_cntr(dd, crp->cr_locallinkintegrityerrcnt) :
-               ((dd->ipath_flags & IPATH_GPIO_ERRINTRS) ?
-                dd->ipath_lli_errs : dd->ipath_lli_errors);
-       cntrs->excessive_buffer_overrun_errors =
-               crp->cr_excessbufferovflcnt ?
-               ipath_snap_cntr(dd, crp->cr_excessbufferovflcnt) :
-               dd->ipath_overrun_thresh_errs;
-       cntrs->vl15_dropped = crp->cr_vl15droppedpktcnt ?
-               ipath_snap_cntr(dd, crp->cr_vl15droppedpktcnt) : 0;
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_ib_piobufavail - callback when a PIO buffer is available
- * @arg: the device pointer
- *
- * This is called from ipath_intr() at interrupt level when a PIO buffer is
- * available after ipath_verbs_send() returned an error that no buffers were
- * available.  Return 1 if we consumed all the PIO buffers and we still have
- * QPs waiting for buffers (for now, just restart the send tasklet and
- * return zero).
- */
-int ipath_ib_piobufavail(struct ipath_ibdev *dev)
-{
-       struct list_head *list;
-       struct ipath_qp *qplist;
-       struct ipath_qp *qp;
-       unsigned long flags;
-
-       if (dev == NULL)
-               goto bail;
-
-       list = &dev->piowait;
-       qplist = NULL;
-
-       spin_lock_irqsave(&dev->pending_lock, flags);
-       while (!list_empty(list)) {
-               qp = list_entry(list->next, struct ipath_qp, piowait);
-               list_del_init(&qp->piowait);
-               qp->pio_next = qplist;
-               qplist = qp;
-               atomic_inc(&qp->refcount);
-       }
-       spin_unlock_irqrestore(&dev->pending_lock, flags);
-
-       while (qplist != NULL) {
-               qp = qplist;
-               qplist = qp->pio_next;
-
-               spin_lock_irqsave(&qp->s_lock, flags);
-               if (ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK)
-                       ipath_schedule_send(qp);
-               spin_unlock_irqrestore(&qp->s_lock, flags);
-
-               /* Notify ipath_destroy_qp() if it is waiting. */
-               if (atomic_dec_and_test(&qp->refcount))
-                       wake_up(&qp->wait);
-       }
-
-bail:
-       return 0;
-}
-
-static int ipath_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
-                             struct ib_udata *uhw)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-
-       if (uhw->inlen || uhw->outlen)
-               return -EINVAL;
-
-       memset(props, 0, sizeof(*props));
-
-       props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
-               IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
-               IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN |
-               IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE;
-       props->page_size_cap = PAGE_SIZE;
-       props->vendor_id =
-               IPATH_SRC_OUI_1 << 16 | IPATH_SRC_OUI_2 << 8 | IPATH_SRC_OUI_3;
-       props->vendor_part_id = dev->dd->ipath_deviceid;
-       props->hw_ver = dev->dd->ipath_pcirev;
-
-       props->sys_image_guid = dev->sys_image_guid;
-
-       props->max_mr_size = ~0ull;
-       props->max_qp = ib_ipath_max_qps;
-       props->max_qp_wr = ib_ipath_max_qp_wrs;
-       props->max_sge = ib_ipath_max_sges;
-       props->max_sge_rd = ib_ipath_max_sges;
-       props->max_cq = ib_ipath_max_cqs;
-       props->max_ah = ib_ipath_max_ahs;
-       props->max_cqe = ib_ipath_max_cqes;
-       props->max_mr = dev->lk_table.max;
-       props->max_fmr = dev->lk_table.max;
-       props->max_map_per_fmr = 32767;
-       props->max_pd = ib_ipath_max_pds;
-       props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
-       props->max_qp_init_rd_atom = 255;
-       /* props->max_res_rd_atom */
-       props->max_srq = ib_ipath_max_srqs;
-       props->max_srq_wr = ib_ipath_max_srq_wrs;
-       props->max_srq_sge = ib_ipath_max_srq_sges;
-       /* props->local_ca_ack_delay */
-       props->atomic_cap = IB_ATOMIC_GLOB;
-       props->max_pkeys = ipath_get_npkeys(dev->dd);
-       props->max_mcast_grp = ib_ipath_max_mcast_grps;
-       props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
-       props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
-               props->max_mcast_grp;
-
-       return 0;
-}
-
-const u8 ipath_cvt_physportstate[32] = {
-       [INFINIPATH_IBCS_LT_STATE_DISABLED] = IB_PHYSPORTSTATE_DISABLED,
-       [INFINIPATH_IBCS_LT_STATE_LINKUP] = IB_PHYSPORTSTATE_LINKUP,
-       [INFINIPATH_IBCS_LT_STATE_POLLACTIVE] = IB_PHYSPORTSTATE_POLL,
-       [INFINIPATH_IBCS_LT_STATE_POLLQUIET] = IB_PHYSPORTSTATE_POLL,
-       [INFINIPATH_IBCS_LT_STATE_SLEEPDELAY] = IB_PHYSPORTSTATE_SLEEP,
-       [INFINIPATH_IBCS_LT_STATE_SLEEPQUIET] = IB_PHYSPORTSTATE_SLEEP,
-       [INFINIPATH_IBCS_LT_STATE_CFGDEBOUNCE] =
-               IB_PHYSPORTSTATE_CFG_TRAIN,
-       [INFINIPATH_IBCS_LT_STATE_CFGRCVFCFG] =
-               IB_PHYSPORTSTATE_CFG_TRAIN,
-       [INFINIPATH_IBCS_LT_STATE_CFGWAITRMT] =
-               IB_PHYSPORTSTATE_CFG_TRAIN,
-       [INFINIPATH_IBCS_LT_STATE_CFGIDLE] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [INFINIPATH_IBCS_LT_STATE_RECOVERRETRAIN] =
-               IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-       [INFINIPATH_IBCS_LT_STATE_RECOVERWAITRMT] =
-               IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-       [INFINIPATH_IBCS_LT_STATE_RECOVERIDLE] =
-               IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
-       [0x10] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x11] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x13] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
-       [0x17] = IB_PHYSPORTSTATE_CFG_TRAIN
-};
-
-u32 ipath_get_cr_errpkey(struct ipath_devdata *dd)
-{
-       return ipath_read_creg32(dd, dd->ipath_cregs->cr_errpkey);
-}
-
-static int ipath_query_port(struct ib_device *ibdev,
-                           u8 port, struct ib_port_attr *props)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_devdata *dd = dev->dd;
-       enum ib_mtu mtu;
-       u16 lid = dd->ipath_lid;
-       u64 ibcstat;
-
-       memset(props, 0, sizeof(*props));
-       props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE);
-       props->lmc = dd->ipath_lmc;
-       props->sm_lid = dev->sm_lid;
-       props->sm_sl = dev->sm_sl;
-       ibcstat = dd->ipath_lastibcstat;
-       /* map LinkState to IB portinfo values.  */
-       props->state = ipath_ib_linkstate(dd, ibcstat) + 1;
-
-       /* See phys_state_show() */
-       props->phys_state = /* MEA: assumes shift == 0 */
-               ipath_cvt_physportstate[dd->ipath_lastibcstat &
-               dd->ibcs_lts_mask];
-       props->port_cap_flags = dev->port_cap_flags;
-       props->gid_tbl_len = 1;
-       props->max_msg_sz = 0x80000000;
-       props->pkey_tbl_len = ipath_get_npkeys(dd);
-       props->bad_pkey_cntr = ipath_get_cr_errpkey(dd) -
-               dev->z_pkey_violations;
-       props->qkey_viol_cntr = dev->qkey_violations;
-       props->active_width = dd->ipath_link_width_active;
-       /* See rate_show() */
-       props->active_speed = dd->ipath_link_speed_active;
-       props->max_vl_num = 1;          /* VLCap = VL0 */
-       props->init_type_reply = 0;
-
-       props->max_mtu = ipath_mtu4096 ? IB_MTU_4096 : IB_MTU_2048;
-       switch (dd->ipath_ibmtu) {
-       case 4096:
-               mtu = IB_MTU_4096;
-               break;
-       case 2048:
-               mtu = IB_MTU_2048;
-               break;
-       case 1024:
-               mtu = IB_MTU_1024;
-               break;
-       case 512:
-               mtu = IB_MTU_512;
-               break;
-       case 256:
-               mtu = IB_MTU_256;
-               break;
-       default:
-               mtu = IB_MTU_2048;
-       }
-       props->active_mtu = mtu;
-       props->subnet_timeout = dev->subnet_timeout;
-
-       return 0;
-}
-
-static int ipath_modify_device(struct ib_device *device,
-                              int device_modify_mask,
-                              struct ib_device_modify *device_modify)
-{
-       int ret;
-
-       if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
-                                  IB_DEVICE_MODIFY_NODE_DESC)) {
-               ret = -EOPNOTSUPP;
-               goto bail;
-       }
-
-       if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC)
-               memcpy(device->node_desc, device_modify->node_desc, 64);
-
-       if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
-               to_idev(device)->sys_image_guid =
-                       cpu_to_be64(device_modify->sys_image_guid);
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static int ipath_modify_port(struct ib_device *ibdev,
-                            u8 port, int port_modify_mask,
-                            struct ib_port_modify *props)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-
-       dev->port_cap_flags |= props->set_port_cap_mask;
-       dev->port_cap_flags &= ~props->clr_port_cap_mask;
-       if (port_modify_mask & IB_PORT_SHUTDOWN)
-               ipath_set_linkstate(dev->dd, IPATH_IB_LINKDOWN);
-       if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR)
-               dev->qkey_violations = 0;
-       return 0;
-}
-
-static int ipath_query_gid(struct ib_device *ibdev, u8 port,
-                          int index, union ib_gid *gid)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       int ret;
-
-       if (index >= 1) {
-               ret = -EINVAL;
-               goto bail;
-       }
-       gid->global.subnet_prefix = dev->gid_prefix;
-       gid->global.interface_id = dev->dd->ipath_guid;
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
-                                   struct ib_ucontext *context,
-                                   struct ib_udata *udata)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       struct ipath_pd *pd;
-       struct ib_pd *ret;
-
-       /*
-        * This is actually totally arbitrary.  Some correctness tests
-        * assume there's a maximum number of PDs that can be allocated.
-        * We don't actually have this limit, but we fail the test if
-        * we allow allocations of more than we report for this value.
-        */
-
-       pd = kmalloc(sizeof *pd, GFP_KERNEL);
-       if (!pd) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       spin_lock(&dev->n_pds_lock);
-       if (dev->n_pds_allocated == ib_ipath_max_pds) {
-               spin_unlock(&dev->n_pds_lock);
-               kfree(pd);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       dev->n_pds_allocated++;
-       spin_unlock(&dev->n_pds_lock);
-
-       /* ib_alloc_pd() will initialize pd->ibpd. */
-       pd->user = udata != NULL;
-
-       ret = &pd->ibpd;
-
-bail:
-       return ret;
-}
-
-static int ipath_dealloc_pd(struct ib_pd *ibpd)
-{
-       struct ipath_pd *pd = to_ipd(ibpd);
-       struct ipath_ibdev *dev = to_idev(ibpd->device);
-
-       spin_lock(&dev->n_pds_lock);
-       dev->n_pds_allocated--;
-       spin_unlock(&dev->n_pds_lock);
-
-       kfree(pd);
-
-       return 0;
-}
-
-/**
- * ipath_create_ah - create an address handle
- * @pd: the protection domain
- * @ah_attr: the attributes of the AH
- *
- * This may be called from interrupt context.
- */
-static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
-                                    struct ib_ah_attr *ah_attr)
-{
-       struct ipath_ah *ah;
-       struct ib_ah *ret;
-       struct ipath_ibdev *dev = to_idev(pd->device);
-       unsigned long flags;
-
-       /* A multicast address requires a GRH (see ch. 8.4.1). */
-       if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
-           ah_attr->dlid != IPATH_PERMISSIVE_LID &&
-           !(ah_attr->ah_flags & IB_AH_GRH)) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       if (ah_attr->dlid == 0) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       if (ah_attr->port_num < 1 ||
-           ah_attr->port_num > pd->device->phys_port_cnt) {
-               ret = ERR_PTR(-EINVAL);
-               goto bail;
-       }
-
-       ah = kmalloc(sizeof *ah, GFP_ATOMIC);
-       if (!ah) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       spin_lock_irqsave(&dev->n_ahs_lock, flags);
-       if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
-               spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-               kfree(ah);
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       dev->n_ahs_allocated++;
-       spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
-       /* ib_create_ah() will initialize ah->ibah. */
-       ah->attr = *ah_attr;
-       ah->attr.static_rate = ipath_ib_rate_to_mult(ah_attr->static_rate);
-
-       ret = &ah->ibah;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_destroy_ah - destroy an address handle
- * @ibah: the AH to destroy
- *
- * This may be called from interrupt context.
- */
-static int ipath_destroy_ah(struct ib_ah *ibah)
-{
-       struct ipath_ibdev *dev = to_idev(ibah->device);
-       struct ipath_ah *ah = to_iah(ibah);
-       unsigned long flags;
-
-       spin_lock_irqsave(&dev->n_ahs_lock, flags);
-       dev->n_ahs_allocated--;
-       spin_unlock_irqrestore(&dev->n_ahs_lock, flags);
-
-       kfree(ah);
-
-       return 0;
-}
-
-static int ipath_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
-{
-       struct ipath_ah *ah = to_iah(ibah);
-
-       *ah_attr = ah->attr;
-       ah_attr->static_rate = ipath_mult_to_ib_rate(ah->attr.static_rate);
-
-       return 0;
-}
-
-/**
- * ipath_get_npkeys - return the size of the PKEY table for port 0
- * @dd: the infinipath device
- */
-unsigned ipath_get_npkeys(struct ipath_devdata *dd)
-{
-       return ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys);
-}
-
-/**
- * ipath_get_pkey - return the indexed PKEY from the port PKEY table
- * @dd: the infinipath device
- * @index: the PKEY index
- */
-unsigned ipath_get_pkey(struct ipath_devdata *dd, unsigned index)
-{
-       unsigned ret;
-
-       /* always a kernel port, no locking needed */
-       if (index >= ARRAY_SIZE(dd->ipath_pd[0]->port_pkeys))
-               ret = 0;
-       else
-               ret = dd->ipath_pd[0]->port_pkeys[index];
-
-       return ret;
-}
-
-static int ipath_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
-                           u16 *pkey)
-{
-       struct ipath_ibdev *dev = to_idev(ibdev);
-       int ret;
-
-       if (index >= ipath_get_npkeys(dev->dd)) {
-               ret = -EINVAL;
-               goto bail;
-       }
-
-       *pkey = ipath_get_pkey(dev->dd, index);
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-/**
- * ipath_alloc_ucontext - allocate a ucontest
- * @ibdev: the infiniband device
- * @udata: not used by the InfiniPath driver
- */
-
-static struct ib_ucontext *ipath_alloc_ucontext(struct ib_device *ibdev,
-                                               struct ib_udata *udata)
-{
-       struct ipath_ucontext *context;
-       struct ib_ucontext *ret;
-
-       context = kmalloc(sizeof *context, GFP_KERNEL);
-       if (!context) {
-               ret = ERR_PTR(-ENOMEM);
-               goto bail;
-       }
-
-       ret = &context->ibucontext;
-
-bail:
-       return ret;
-}
-
-static int ipath_dealloc_ucontext(struct ib_ucontext *context)
-{
-       kfree(to_iucontext(context));
-       return 0;
-}
-
-static int ipath_verbs_register_sysfs(struct ib_device *dev);
-
-static void __verbs_timer(unsigned long arg)
-{
-       struct ipath_devdata *dd = (struct ipath_devdata *) arg;
-
-       /* Handle verbs layer timeouts. */
-       ipath_ib_timer(dd->verbs_dev);
-
-       mod_timer(&dd->verbs_timer, jiffies + 1);
-}
-
-static int enable_timer(struct ipath_devdata *dd)
-{
-       /*
-        * Early chips had a design flaw where the chip and kernel idea
-        * of the tail register don't always agree, and therefore we won't
-        * get an interrupt on the next packet received.
-        * If the board supports per packet receive interrupts, use it.
-        * Otherwise, the timer function periodically checks for packets
-        * to cover this case.
-        * Either way, the timer is needed for verbs layer related
-        * processing.
-        */
-       if (dd->ipath_flags & IPATH_GPIO_INTR) {
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect,
-                                0x2074076542310ULL);
-               /* Enable GPIO bit 2 interrupt */
-               dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT);
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-                                dd->ipath_gpio_mask);
-       }
-
-       setup_timer(&dd->verbs_timer, __verbs_timer, (unsigned long)dd);
-
-       dd->verbs_timer.expires = jiffies + 1;
-       add_timer(&dd->verbs_timer);
-
-       return 0;
-}
-
-static int disable_timer(struct ipath_devdata *dd)
-{
-       /* Disable GPIO bit 2 interrupt */
-       if (dd->ipath_flags & IPATH_GPIO_INTR) {
-                /* Disable GPIO bit 2 interrupt */
-               dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT));
-               ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask,
-                                dd->ipath_gpio_mask);
-               /*
-                * We might want to undo changes to debugportselect,
-                * but how?
-                */
-       }
-
-       del_timer_sync(&dd->verbs_timer);
-
-       return 0;
-}
-
-static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num,
-                               struct ib_port_immutable *immutable)
-{
-       struct ib_port_attr attr;
-       int err;
-
-       err = ipath_query_port(ibdev, port_num, &attr);
-       if (err)
-               return err;
-
-       immutable->pkey_tbl_len = attr.pkey_tbl_len;
-       immutable->gid_tbl_len = attr.gid_tbl_len;
-       immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
-       immutable->max_mad_size = IB_MGMT_MAD_SIZE;
-
-       return 0;
-}
-
-/**
- * ipath_register_ib_device - register our device with the infiniband core
- * @dd: the device data structure
- * Return the allocated ipath_ibdev pointer or NULL on error.
- */
-int ipath_register_ib_device(struct ipath_devdata *dd)
-{
-       struct ipath_verbs_counters cntrs;
-       struct ipath_ibdev *idev;
-       struct ib_device *dev;
-       struct ipath_verbs_txreq *tx;
-       unsigned i;
-       int ret;
-
-       idev = (struct ipath_ibdev *)ib_alloc_device(sizeof *idev);
-       if (idev == NULL) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-
-       dev = &idev->ibdev;
-
-       if (dd->ipath_sdma_descq_cnt) {
-               tx = kmalloc_array(dd->ipath_sdma_descq_cnt, sizeof *tx,
-                                  GFP_KERNEL);
-               if (tx == NULL) {
-                       ret = -ENOMEM;
-                       goto err_tx;
-               }
-       } else
-               tx = NULL;
-       idev->txreq_bufs = tx;
-
-       /* Only need to initialize non-zero fields. */
-       spin_lock_init(&idev->n_pds_lock);
-       spin_lock_init(&idev->n_ahs_lock);
-       spin_lock_init(&idev->n_cqs_lock);
-       spin_lock_init(&idev->n_qps_lock);
-       spin_lock_init(&idev->n_srqs_lock);
-       spin_lock_init(&idev->n_mcast_grps_lock);
-
-       spin_lock_init(&idev->qp_table.lock);
-       spin_lock_init(&idev->lk_table.lock);
-       idev->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE);
-       /* Set the prefix to the default value (see ch. 4.1.1) */
-       idev->gid_prefix = cpu_to_be64(0xfe80000000000000ULL);
-
-       ret = ipath_init_qp_table(idev, ib_ipath_qp_table_size);
-       if (ret)
-               goto err_qp;
-
-       /*
-        * The top ib_ipath_lkey_table_size bits are used to index the
-        * table.  The lower 8 bits can be owned by the user (copied from
-        * the LKEY).  The remaining bits act as a generation number or tag.
-        */
-       idev->lk_table.max = 1 << ib_ipath_lkey_table_size;
-       idev->lk_table.table = kcalloc(idev->lk_table.max,
-                                      sizeof(*idev->lk_table.table),
-                                      GFP_KERNEL);
-       if (idev->lk_table.table == NULL) {
-               ret = -ENOMEM;
-               goto err_lk;
-       }
-       INIT_LIST_HEAD(&idev->pending_mmaps);
-       spin_lock_init(&idev->pending_lock);
-       idev->mmap_offset = PAGE_SIZE;
-       spin_lock_init(&idev->mmap_offset_lock);
-       INIT_LIST_HEAD(&idev->pending[0]);
-       INIT_LIST_HEAD(&idev->pending[1]);
-       INIT_LIST_HEAD(&idev->pending[2]);
-       INIT_LIST_HEAD(&idev->piowait);
-       INIT_LIST_HEAD(&idev->rnrwait);
-       INIT_LIST_HEAD(&idev->txreq_free);
-       idev->pending_index = 0;
-       idev->port_cap_flags =
-               IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP;
-       if (dd->ipath_flags & IPATH_HAS_LINK_LATENCY)
-               idev->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP;
-       idev->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA;
-       idev->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA;
-       idev->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS;
-       idev->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS;
-       idev->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT;
-
-       /* Snapshot current HW counters to "clear" them. */
-       ipath_get_counters(dd, &cntrs);
-       idev->z_symbol_error_counter = cntrs.symbol_error_counter;
-       idev->z_link_error_recovery_counter =
-               cntrs.link_error_recovery_counter;
-       idev->z_link_downed_counter = cntrs.link_downed_counter;
-       idev->z_port_rcv_errors = cntrs.port_rcv_errors;
-       idev->z_port_rcv_remphys_errors =
-               cntrs.port_rcv_remphys_errors;
-       idev->z_port_xmit_discards = cntrs.port_xmit_discards;
-       idev->z_port_xmit_data = cntrs.port_xmit_data;
-       idev->z_port_rcv_data = cntrs.port_rcv_data;
-       idev->z_port_xmit_packets = cntrs.port_xmit_packets;
-       idev->z_port_rcv_packets = cntrs.port_rcv_packets;
-       idev->z_local_link_integrity_errors =
-               cntrs.local_link_integrity_errors;
-       idev->z_excessive_buffer_overrun_errors =
-               cntrs.excessive_buffer_overrun_errors;
-       idev->z_vl15_dropped = cntrs.vl15_dropped;
-
-       for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++)
-               list_add(&tx->txreq.list, &idev->txreq_free);
-
-       /*
-        * The system image GUID is supposed to be the same for all
-        * IB HCAs in a single system but since there can be other
-        * device types in the system, we can't be sure this is unique.
-        */
-       if (!sys_image_guid)
-               sys_image_guid = dd->ipath_guid;
-       idev->sys_image_guid = sys_image_guid;
-       idev->ib_unit = dd->ipath_unit;
-       idev->dd = dd;
-
-       strlcpy(dev->name, "ipath%d", IB_DEVICE_NAME_MAX);
-       dev->owner = THIS_MODULE;
-       dev->node_guid = dd->ipath_guid;
-       dev->uverbs_abi_ver = IPATH_UVERBS_ABI_VERSION;
-       dev->uverbs_cmd_mask =
-               (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
-               (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
-               (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
-               (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
-               (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
-               (1ull << IB_USER_VERBS_CMD_CREATE_AH)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_AH)          |
-               (1ull << IB_USER_VERBS_CMD_QUERY_AH)            |
-               (1ull << IB_USER_VERBS_CMD_REG_MR)              |
-               (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
-               (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
-               (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
-               (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
-               (1ull << IB_USER_VERBS_CMD_POLL_CQ)             |
-               (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)       |
-               (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
-               (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
-               (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
-               (1ull << IB_USER_VERBS_CMD_POST_SEND)           |
-               (1ull << IB_USER_VERBS_CMD_POST_RECV)           |
-               (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
-               (1ull << IB_USER_VERBS_CMD_DETACH_MCAST)        |
-               (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
-               (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
-               (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
-               (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
-               (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
-       dev->node_type = RDMA_NODE_IB_CA;
-       dev->phys_port_cnt = 1;
-       dev->num_comp_vectors = 1;
-       dev->dma_device = &dd->pcidev->dev;
-       dev->query_device = ipath_query_device;
-       dev->modify_device = ipath_modify_device;
-       dev->query_port = ipath_query_port;
-       dev->modify_port = ipath_modify_port;
-       dev->query_pkey = ipath_query_pkey;
-       dev->query_gid = ipath_query_gid;
-       dev->alloc_ucontext = ipath_alloc_ucontext;
-       dev->dealloc_ucontext = ipath_dealloc_ucontext;
-       dev->alloc_pd = ipath_alloc_pd;
-       dev->dealloc_pd = ipath_dealloc_pd;
-       dev->create_ah = ipath_create_ah;
-       dev->destroy_ah = ipath_destroy_ah;
-       dev->query_ah = ipath_query_ah;
-       dev->create_srq = ipath_create_srq;
-       dev->modify_srq = ipath_modify_srq;
-       dev->query_srq = ipath_query_srq;
-       dev->destroy_srq = ipath_destroy_srq;
-       dev->create_qp = ipath_create_qp;
-       dev->modify_qp = ipath_modify_qp;
-       dev->query_qp = ipath_query_qp;
-       dev->destroy_qp = ipath_destroy_qp;
-       dev->post_send = ipath_post_send;
-       dev->post_recv = ipath_post_receive;
-       dev->post_srq_recv = ipath_post_srq_receive;
-       dev->create_cq = ipath_create_cq;
-       dev->destroy_cq = ipath_destroy_cq;
-       dev->resize_cq = ipath_resize_cq;
-       dev->poll_cq = ipath_poll_cq;
-       dev->req_notify_cq = ipath_req_notify_cq;
-       dev->get_dma_mr = ipath_get_dma_mr;
-       dev->reg_user_mr = ipath_reg_user_mr;
-       dev->dereg_mr = ipath_dereg_mr;
-       dev->alloc_fmr = ipath_alloc_fmr;
-       dev->map_phys_fmr = ipath_map_phys_fmr;
-       dev->unmap_fmr = ipath_unmap_fmr;
-       dev->dealloc_fmr = ipath_dealloc_fmr;
-       dev->attach_mcast = ipath_multicast_attach;
-       dev->detach_mcast = ipath_multicast_detach;
-       dev->process_mad = ipath_process_mad;
-       dev->mmap = ipath_mmap;
-       dev->dma_ops = &ipath_dma_mapping_ops;
-       dev->get_port_immutable = ipath_port_immutable;
-
-       snprintf(dev->node_desc, sizeof(dev->node_desc),
-                IPATH_IDSTR " %s", init_utsname()->nodename);
-
-       ret = ib_register_device(dev, NULL);
-       if (ret)
-               goto err_reg;
-
-       ret = ipath_verbs_register_sysfs(dev);
-       if (ret)
-               goto err_class;
-
-       enable_timer(dd);
-
-       goto bail;
-
-err_class:
-       ib_unregister_device(dev);
-err_reg:
-       kfree(idev->lk_table.table);
-err_lk:
-       kfree(idev->qp_table.table);
-err_qp:
-       kfree(idev->txreq_bufs);
-err_tx:
-       ib_dealloc_device(dev);
-       ipath_dev_err(dd, "cannot register verbs: %d!\n", -ret);
-       idev = NULL;
-
-bail:
-       dd->verbs_dev = idev;
-       return ret;
-}
-
-void ipath_unregister_ib_device(struct ipath_ibdev *dev)
-{
-       struct ib_device *ibdev = &dev->ibdev;
-       u32 qps_inuse;
-
-       ib_unregister_device(ibdev);
-
-       disable_timer(dev->dd);
-
-       if (!list_empty(&dev->pending[0]) ||
-           !list_empty(&dev->pending[1]) ||
-           !list_empty(&dev->pending[2]))
-               ipath_dev_err(dev->dd, "pending list not empty!\n");
-       if (!list_empty(&dev->piowait))
-               ipath_dev_err(dev->dd, "piowait list not empty!\n");
-       if (!list_empty(&dev->rnrwait))
-               ipath_dev_err(dev->dd, "rnrwait list not empty!\n");
-       if (!ipath_mcast_tree_empty())
-               ipath_dev_err(dev->dd, "multicast table memory leak!\n");
-       /*
-        * Note that ipath_unregister_ib_device() can be called before all
-        * the QPs are destroyed!
-        */
-       qps_inuse = ipath_free_all_qps(&dev->qp_table);
-       if (qps_inuse)
-               ipath_dev_err(dev->dd, "QP memory leak! %u still in use\n",
-                       qps_inuse);
-       kfree(dev->qp_table.table);
-       kfree(dev->lk_table.table);
-       kfree(dev->txreq_bufs);
-       ib_dealloc_device(ibdev);
-}
-
-static ssize_t show_rev(struct device *device, struct device_attribute *attr,
-                       char *buf)
-{
-       struct ipath_ibdev *dev =
-               container_of(device, struct ipath_ibdev, ibdev.dev);
-
-       return sprintf(buf, "%x\n", dev->dd->ipath_pcirev);
-}
-
-static ssize_t show_hca(struct device *device, struct device_attribute *attr,
-                       char *buf)
-{
-       struct ipath_ibdev *dev =
-               container_of(device, struct ipath_ibdev, ibdev.dev);
-       int ret;
-
-       ret = dev->dd->ipath_f_get_boardname(dev->dd, buf, 128);
-       if (ret < 0)
-               goto bail;
-       strcat(buf, "\n");
-       ret = strlen(buf);
-
-bail:
-       return ret;
-}
-
-static ssize_t show_stats(struct device *device, struct device_attribute *attr,
-                         char *buf)
-{
-       struct ipath_ibdev *dev =
-               container_of(device, struct ipath_ibdev, ibdev.dev);
-       int i;
-       int len;
-
-       len = sprintf(buf,
-                     "RC resends  %d\n"
-                     "RC no QACK  %d\n"
-                     "RC ACKs     %d\n"
-                     "RC SEQ NAKs %d\n"
-                     "RC RDMA seq %d\n"
-                     "RC RNR NAKs %d\n"
-                     "RC OTH NAKs %d\n"
-                     "RC timeouts %d\n"
-                     "RC RDMA dup %d\n"
-                     "piobuf wait %d\n"
-                     "unaligned   %d\n"
-                     "PKT drops   %d\n"
-                     "WQE errs    %d\n",
-                     dev->n_rc_resends, dev->n_rc_qacks, dev->n_rc_acks,
-                     dev->n_seq_naks, dev->n_rdma_seq, dev->n_rnr_naks,
-                     dev->n_other_naks, dev->n_timeouts,
-                     dev->n_rdma_dup_busy, dev->n_piowait, dev->n_unaligned,
-                     dev->n_pkt_drops, dev->n_wqe_errs);
-       for (i = 0; i < ARRAY_SIZE(dev->opstats); i++) {
-               const struct ipath_opcode_stats *si = &dev->opstats[i];
-
-               if (!si->n_packets && !si->n_bytes)
-                       continue;
-               len += sprintf(buf + len, "%02x %llu/%llu\n", i,
-                              (unsigned long long) si->n_packets,
-                              (unsigned long long) si->n_bytes);
-       }
-       return len;
-}
-
-static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
-static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
-static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
-
-static struct device_attribute *ipath_class_attributes[] = {
-       &dev_attr_hw_rev,
-       &dev_attr_hca_type,
-       &dev_attr_board_id,
-       &dev_attr_stats
-};
-
-static int ipath_verbs_register_sysfs(struct ib_device *dev)
-{
-       int i;
-       int ret;
-
-       for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) {
-               ret = device_create_file(&dev->dev,
-                                      ipath_class_attributes[i]);
-               if (ret)
-                       goto bail;
-       }
-       return 0;
-bail:
-       for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i)
-               device_remove_file(&dev->dev, ipath_class_attributes[i]);
-       return ret;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_verbs.h b/drivers/staging/rdma/ipath/ipath_verbs.h
deleted file mode 100644 (file)
index 6c70a89..0000000
+++ /dev/null
@@ -1,941 +0,0 @@
-/*
- * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef IPATH_VERBS_H
-#define IPATH_VERBS_H
-
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/kref.h>
-#include <rdma/ib_pack.h>
-#include <rdma/ib_user_verbs.h>
-
-#include "ipath_kernel.h"
-
-#define IPATH_MAX_RDMA_ATOMIC  4
-
-#define QPN_MAX                 (1 << 24)
-#define QPNMAP_ENTRIES          (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
-
-/*
- * Increment this value if any changes that break userspace ABI
- * compatibility are made.
- */
-#define IPATH_UVERBS_ABI_VERSION       2
-
-/*
- * Define an ib_cq_notify value that is not valid so we know when CQ
- * notifications are armed.
- */
-#define IB_CQ_NONE     (IB_CQ_NEXT_COMP + 1)
-
-/* AETH NAK opcode values */
-#define IB_RNR_NAK                     0x20
-#define IB_NAK_PSN_ERROR               0x60
-#define IB_NAK_INVALID_REQUEST         0x61
-#define IB_NAK_REMOTE_ACCESS_ERROR     0x62
-#define IB_NAK_REMOTE_OPERATIONAL_ERROR 0x63
-#define IB_NAK_INVALID_RD_REQUEST      0x64
-
-/* Flags for checking QP state (see ib_ipath_state_ops[]) */
-#define IPATH_POST_SEND_OK             0x01
-#define IPATH_POST_RECV_OK             0x02
-#define IPATH_PROCESS_RECV_OK          0x04
-#define IPATH_PROCESS_SEND_OK          0x08
-#define IPATH_PROCESS_NEXT_SEND_OK     0x10
-#define IPATH_FLUSH_SEND               0x20
-#define IPATH_FLUSH_RECV               0x40
-#define IPATH_PROCESS_OR_FLUSH_SEND \
-       (IPATH_PROCESS_SEND_OK | IPATH_FLUSH_SEND)
-
-/* IB Performance Manager status values */
-#define IB_PMA_SAMPLE_STATUS_DONE      0x00
-#define IB_PMA_SAMPLE_STATUS_STARTED   0x01
-#define IB_PMA_SAMPLE_STATUS_RUNNING   0x02
-
-/* Mandatory IB performance counter select values. */
-#define IB_PMA_PORT_XMIT_DATA  cpu_to_be16(0x0001)
-#define IB_PMA_PORT_RCV_DATA   cpu_to_be16(0x0002)
-#define IB_PMA_PORT_XMIT_PKTS  cpu_to_be16(0x0003)
-#define IB_PMA_PORT_RCV_PKTS   cpu_to_be16(0x0004)
-#define IB_PMA_PORT_XMIT_WAIT  cpu_to_be16(0x0005)
-
-struct ib_reth {
-       __be64 vaddr;
-       __be32 rkey;
-       __be32 length;
-} __attribute__ ((packed));
-
-struct ib_atomic_eth {
-       __be32 vaddr[2];        /* unaligned so access as 2 32-bit words */
-       __be32 rkey;
-       __be64 swap_data;
-       __be64 compare_data;
-} __attribute__ ((packed));
-
-struct ipath_other_headers {
-       __be32 bth[3];
-       union {
-               struct {
-                       __be32 deth[2];
-                       __be32 imm_data;
-               } ud;
-               struct {
-                       struct ib_reth reth;
-                       __be32 imm_data;
-               } rc;
-               struct {
-                       __be32 aeth;
-                       __be32 atomic_ack_eth[2];
-               } at;
-               __be32 imm_data;
-               __be32 aeth;
-               struct ib_atomic_eth atomic_eth;
-       } u;
-} __attribute__ ((packed));
-
-/*
- * Note that UD packets with a GRH header are 8+40+12+8 = 68 bytes
- * long (72 w/ imm_data).  Only the first 56 bytes of the IB header
- * will be in the eager header buffer.  The remaining 12 or 16 bytes
- * are in the data buffer.
- */
-struct ipath_ib_header {
-       __be16 lrh[4];
-       union {
-               struct {
-                       struct ib_grh grh;
-                       struct ipath_other_headers oth;
-               } l;
-               struct ipath_other_headers oth;
-       } u;
-} __attribute__ ((packed));
-
-struct ipath_pio_header {
-       __le32 pbc[2];
-       struct ipath_ib_header hdr;
-} __attribute__ ((packed));
-
-/*
- * There is one struct ipath_mcast for each multicast GID.
- * All attached QPs are then stored as a list of
- * struct ipath_mcast_qp.
- */
-struct ipath_mcast_qp {
-       struct list_head list;
-       struct ipath_qp *qp;
-};
-
-struct ipath_mcast {
-       struct rb_node rb_node;
-       union ib_gid mgid;
-       struct list_head qp_list;
-       wait_queue_head_t wait;
-       atomic_t refcount;
-       int n_attached;
-};
-
-/* Protection domain */
-struct ipath_pd {
-       struct ib_pd ibpd;
-       int user;               /* non-zero if created from user space */
-};
-
-/* Address Handle */
-struct ipath_ah {
-       struct ib_ah ibah;
-       struct ib_ah_attr attr;
-};
-
-/*
- * This structure is used by ipath_mmap() to validate an offset
- * when an mmap() request is made.  The vm_area_struct then uses
- * this as its vm_private_data.
- */
-struct ipath_mmap_info {
-       struct list_head pending_mmaps;
-       struct ib_ucontext *context;
-       void *obj;
-       __u64 offset;
-       struct kref ref;
-       unsigned size;
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and completion queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- */
-struct ipath_cq_wc {
-       u32 head;               /* index of next entry to fill */
-       u32 tail;               /* index of next ib_poll_cq() entry */
-       union {
-               /* these are actually size ibcq.cqe + 1 */
-               struct ib_uverbs_wc uqueue[0];
-               struct ib_wc kqueue[0];
-       };
-};
-
-/*
- * The completion queue structure.
- */
-struct ipath_cq {
-       struct ib_cq ibcq;
-       struct tasklet_struct comptask;
-       spinlock_t lock;
-       u8 notify;
-       u8 triggered;
-       struct ipath_cq_wc *queue;
-       struct ipath_mmap_info *ip;
-};
-
-/*
- * A segment is a linear region of low physical memory.
- * XXX Maybe we should use phys addr here and kmap()/kunmap().
- * Used by the verbs layer.
- */
-struct ipath_seg {
-       void *vaddr;
-       size_t length;
-};
-
-/* The number of ipath_segs that fit in a page. */
-#define IPATH_SEGSZ     (PAGE_SIZE / sizeof (struct ipath_seg))
-
-struct ipath_segarray {
-       struct ipath_seg segs[IPATH_SEGSZ];
-};
-
-struct ipath_mregion {
-       struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
-       u64 user_base;          /* User's address for this region */
-       u64 iova;               /* IB start address of this region */
-       size_t length;
-       u32 lkey;
-       u32 offset;             /* offset (bytes) to start of region */
-       int access_flags;
-       u32 max_segs;           /* number of ipath_segs in all the arrays */
-       u32 mapsz;              /* size of the map array */
-       struct ipath_segarray *map[0];  /* the segments */
-};
-
-/*
- * These keep track of the copy progress within a memory region.
- * Used by the verbs layer.
- */
-struct ipath_sge {
-       struct ipath_mregion *mr;
-       void *vaddr;            /* kernel virtual address of segment */
-       u32 sge_length;         /* length of the SGE */
-       u32 length;             /* remaining length of the segment */
-       u16 m;                  /* current index: mr->map[m] */
-       u16 n;                  /* current index: mr->map[m]->segs[n] */
-};
-
-/* Memory region */
-struct ipath_mr {
-       struct ib_mr ibmr;
-       struct ib_umem *umem;
-       struct ipath_mregion mr;        /* must be last */
-};
-
-/*
- * Send work request queue entry.
- * The size of the sg_list is determined when the QP is created and stored
- * in qp->s_max_sge.
- */
-struct ipath_swqe {
-       union {
-               struct ib_send_wr wr;   /* don't use wr.sg_list */
-               struct ib_ud_wr ud_wr;
-               struct ib_rdma_wr rdma_wr;
-               struct ib_atomic_wr atomic_wr;
-       };
-
-       u32 psn;                /* first packet sequence number */
-       u32 lpsn;               /* last packet sequence number */
-       u32 ssn;                /* send sequence number */
-       u32 length;             /* total length of data in sg_list */
-       struct ipath_sge sg_list[0];
-};
-
-/*
- * Receive work request queue entry.
- * The size of the sg_list is determined when the QP (or SRQ) is created
- * and stored in qp->r_rq.max_sge (or srq->rq.max_sge).
- */
-struct ipath_rwqe {
-       u64 wr_id;
-       u8 num_sge;
-       struct ib_sge sg_list[0];
-};
-
-/*
- * This structure is used to contain the head pointer, tail pointer,
- * and receive work queue entries as a single memory allocation so
- * it can be mmap'ed into user space.
- * Note that the wq array elements are variable size so you can't
- * just index into the array to get the N'th element;
- * use get_rwqe_ptr() instead.
- */
-struct ipath_rwq {
-       u32 head;               /* new work requests posted to the head */
-       u32 tail;               /* receives pull requests from here. */
-       struct ipath_rwqe wq[0];
-};
-
-struct ipath_rq {
-       struct ipath_rwq *wq;
-       spinlock_t lock;
-       u32 size;               /* size of RWQE array */
-       u8 max_sge;
-};
-
-struct ipath_srq {
-       struct ib_srq ibsrq;
-       struct ipath_rq rq;
-       struct ipath_mmap_info *ip;
-       /* send signal when number of RWQEs < limit */
-       u32 limit;
-};
-
-struct ipath_sge_state {
-       struct ipath_sge *sg_list;      /* next SGE to be used if any */
-       struct ipath_sge sge;   /* progress state for the current SGE */
-       u8 num_sge;
-       u8 static_rate;
-};
-
-/*
- * This structure holds the information that the send tasklet needs
- * to send a RDMA read response or atomic operation.
- */
-struct ipath_ack_entry {
-       u8 opcode;
-       u8 sent;
-       u32 psn;
-       union {
-               struct ipath_sge_state rdma_sge;
-               u64 atomic_data;
-       };
-};
-
-/*
- * Variables prefixed with s_ are for the requester (sender).
- * Variables prefixed with r_ are for the responder (receiver).
- * Variables prefixed with ack_ are for responder replies.
- *
- * Common variables are protected by both r_rq.lock and s_lock in that order
- * which only happens in modify_qp() or changing the QP 'state'.
- */
-struct ipath_qp {
-       struct ib_qp ibqp;
-       struct ipath_qp *next;          /* link list for QPN hash table */
-       struct ipath_qp *timer_next;    /* link list for ipath_ib_timer() */
-       struct ipath_qp *pio_next;      /* link for ipath_ib_piobufavail() */
-       struct list_head piowait;       /* link for wait PIO buf */
-       struct list_head timerwait;     /* link for waiting for timeouts */
-       struct ib_ah_attr remote_ah_attr;
-       struct ipath_ib_header s_hdr;   /* next packet header to send */
-       atomic_t refcount;
-       wait_queue_head_t wait;
-       wait_queue_head_t wait_dma;
-       struct tasklet_struct s_task;
-       struct ipath_mmap_info *ip;
-       struct ipath_sge_state *s_cur_sge;
-       struct ipath_verbs_txreq *s_tx;
-       struct ipath_sge_state s_sge;   /* current send request data */
-       struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
-       struct ipath_sge_state s_ack_rdma_sge;
-       struct ipath_sge_state s_rdma_read_sge;
-       struct ipath_sge_state r_sge;   /* current receive data */
-       spinlock_t s_lock;
-       atomic_t s_dma_busy;
-       u16 s_pkt_delay;
-       u16 s_hdrwords;         /* size of s_hdr in 32 bit words */
-       u32 s_cur_size;         /* size of send packet in bytes */
-       u32 s_len;              /* total length of s_sge */
-       u32 s_rdma_read_len;    /* total length of s_rdma_read_sge */
-       u32 s_next_psn;         /* PSN for next request */
-       u32 s_last_psn;         /* last response PSN processed */
-       u32 s_psn;              /* current packet sequence number */
-       u32 s_ack_rdma_psn;     /* PSN for sending RDMA read responses */
-       u32 s_ack_psn;          /* PSN for acking sends and RDMA writes */
-       u32 s_rnr_timeout;      /* number of milliseconds for RNR timeout */
-       u32 r_ack_psn;          /* PSN for next ACK or atomic ACK */
-       u64 r_wr_id;            /* ID for current receive WQE */
-       unsigned long r_aflags;
-       u32 r_len;              /* total length of r_sge */
-       u32 r_rcv_len;          /* receive data len processed */
-       u32 r_psn;              /* expected rcv packet sequence number */
-       u32 r_msn;              /* message sequence number */
-       u8 state;               /* QP state */
-       u8 s_state;             /* opcode of last packet sent */
-       u8 s_ack_state;         /* opcode of packet to ACK */
-       u8 s_nak_state;         /* non-zero if NAK is pending */
-       u8 r_state;             /* opcode of last packet received */
-       u8 r_nak_state;         /* non-zero if NAK is pending */
-       u8 r_min_rnr_timer;     /* retry timeout value for RNR NAKs */
-       u8 r_flags;
-       u8 r_max_rd_atomic;     /* max number of RDMA read/atomic to receive */
-       u8 r_head_ack_queue;    /* index into s_ack_queue[] */
-       u8 qp_access_flags;
-       u8 s_max_sge;           /* size of s_wq->sg_list */
-       u8 s_retry_cnt;         /* number of times to retry */
-       u8 s_rnr_retry_cnt;
-       u8 s_retry;             /* requester retry counter */
-       u8 s_rnr_retry;         /* requester RNR retry counter */
-       u8 s_pkey_index;        /* PKEY index to use */
-       u8 s_max_rd_atomic;     /* max number of RDMA read/atomic to send */
-       u8 s_num_rd_atomic;     /* number of RDMA read/atomic pending */
-       u8 s_tail_ack_queue;    /* index into s_ack_queue[] */
-       u8 s_flags;
-       u8 s_dmult;
-       u8 s_draining;
-       u8 timeout;             /* Timeout for this QP */
-       enum ib_mtu path_mtu;
-       u32 remote_qpn;
-       u32 qkey;               /* QKEY for this QP (for UD or RD) */
-       u32 s_size;             /* send work queue size */
-       u32 s_head;             /* new entries added here */
-       u32 s_tail;             /* next entry to process */
-       u32 s_cur;              /* current work queue entry */
-       u32 s_last;             /* last un-ACK'ed entry */
-       u32 s_ssn;              /* SSN of tail entry */
-       u32 s_lsn;              /* limit sequence number (credit) */
-       struct ipath_swqe *s_wq;        /* send work queue */
-       struct ipath_swqe *s_wqe;
-       struct ipath_sge *r_ud_sg_list;
-       struct ipath_rq r_rq;           /* receive work queue */
-       struct ipath_sge r_sg_list[0];  /* verified SGEs */
-};
-
-/*
- * Atomic bit definitions for r_aflags.
- */
-#define IPATH_R_WRID_VALID     0
-
-/*
- * Bit definitions for r_flags.
- */
-#define IPATH_R_REUSE_SGE      0x01
-#define IPATH_R_RDMAR_SEQ      0x02
-
-/*
- * Bit definitions for s_flags.
- *
- * IPATH_S_FENCE_PENDING - waiting for all prior RDMA read or atomic SWQEs
- *                        before processing the next SWQE
- * IPATH_S_RDMAR_PENDING - waiting for any RDMA read or atomic SWQEs
- *                        before processing the next SWQE
- * IPATH_S_WAITING - waiting for RNR timeout or send buffer available.
- * IPATH_S_WAIT_SSN_CREDIT - waiting for RC credits to process next SWQE
- * IPATH_S_WAIT_DMA - waiting for send DMA queue to drain before generating
- *                   next send completion entry not via send DMA.
- */
-#define IPATH_S_SIGNAL_REQ_WR  0x01
-#define IPATH_S_FENCE_PENDING  0x02
-#define IPATH_S_RDMAR_PENDING  0x04
-#define IPATH_S_ACK_PENDING    0x08
-#define IPATH_S_BUSY           0x10
-#define IPATH_S_WAITING                0x20
-#define IPATH_S_WAIT_SSN_CREDIT        0x40
-#define IPATH_S_WAIT_DMA       0x80
-
-#define IPATH_S_ANY_WAIT (IPATH_S_FENCE_PENDING | IPATH_S_RDMAR_PENDING | \
-       IPATH_S_WAITING | IPATH_S_WAIT_SSN_CREDIT | IPATH_S_WAIT_DMA)
-
-#define IPATH_PSN_CREDIT       512
-
-/*
- * Since struct ipath_swqe is not a fixed size, we can't simply index into
- * struct ipath_qp.s_wq.  This function does the array index computation.
- */
-static inline struct ipath_swqe *get_swqe_ptr(struct ipath_qp *qp,
-                                             unsigned n)
-{
-       return (struct ipath_swqe *)((char *)qp->s_wq +
-                                    (sizeof(struct ipath_swqe) +
-                                     qp->s_max_sge *
-                                     sizeof(struct ipath_sge)) * n);
-}
-
-/*
- * Since struct ipath_rwqe is not a fixed size, we can't simply index into
- * struct ipath_rwq.wq.  This function does the array index computation.
- */
-static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq,
-                                             unsigned n)
-{
-       return (struct ipath_rwqe *)
-               ((char *) rq->wq->wq +
-                (sizeof(struct ipath_rwqe) +
-                 rq->max_sge * sizeof(struct ib_sge)) * n);
-}
-
-/*
- * QPN-map pages start out as NULL, they get allocated upon
- * first use and are never deallocated. This way,
- * large bitmaps are not allocated unless large numbers of QPs are used.
- */
-struct qpn_map {
-       atomic_t n_free;
-       void *page;
-};
-
-struct ipath_qp_table {
-       spinlock_t lock;
-       u32 last;               /* last QP number allocated */
-       u32 max;                /* size of the hash table */
-       u32 nmaps;              /* size of the map table */
-       struct ipath_qp **table;
-       /* bit map of free numbers */
-       struct qpn_map map[QPNMAP_ENTRIES];
-};
-
-struct ipath_lkey_table {
-       spinlock_t lock;
-       u32 next;               /* next unused index (speeds search) */
-       u32 gen;                /* generation count */
-       u32 max;                /* size of the table */
-       struct ipath_mregion **table;
-};
-
-struct ipath_opcode_stats {
-       u64 n_packets;          /* number of packets */
-       u64 n_bytes;            /* total number of bytes */
-};
-
-struct ipath_ibdev {
-       struct ib_device ibdev;
-       struct ipath_devdata *dd;
-       struct list_head pending_mmaps;
-       spinlock_t mmap_offset_lock;
-       u32 mmap_offset;
-       int ib_unit;            /* This is the device number */
-       u16 sm_lid;             /* in host order */
-       u8 sm_sl;
-       u8 mkeyprot;
-       /* non-zero when timer is set */
-       unsigned long mkey_lease_timeout;
-
-       /* The following fields are really per port. */
-       struct ipath_qp_table qp_table;
-       struct ipath_lkey_table lk_table;
-       struct list_head pending[3];    /* FIFO of QPs waiting for ACKs */
-       struct list_head piowait;       /* list for wait PIO buf */
-       struct list_head txreq_free;
-       void *txreq_bufs;
-       /* list of QPs waiting for RNR timer */
-       struct list_head rnrwait;
-       spinlock_t pending_lock;
-       __be64 sys_image_guid;  /* in network order */
-       __be64 gid_prefix;      /* in network order */
-       __be64 mkey;
-
-       u32 n_pds_allocated;    /* number of PDs allocated for device */
-       spinlock_t n_pds_lock;
-       u32 n_ahs_allocated;    /* number of AHs allocated for device */
-       spinlock_t n_ahs_lock;
-       u32 n_cqs_allocated;    /* number of CQs allocated for device */
-       spinlock_t n_cqs_lock;
-       u32 n_qps_allocated;    /* number of QPs allocated for device */
-       spinlock_t n_qps_lock;
-       u32 n_srqs_allocated;   /* number of SRQs allocated for device */
-       spinlock_t n_srqs_lock;
-       u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
-       spinlock_t n_mcast_grps_lock;
-
-       u64 ipath_sword;        /* total dwords sent (sample result) */
-       u64 ipath_rword;        /* total dwords received (sample result) */
-       u64 ipath_spkts;        /* total packets sent (sample result) */
-       u64 ipath_rpkts;        /* total packets received (sample result) */
-       /* # of ticks no data sent (sample result) */
-       u64 ipath_xmit_wait;
-       u64 rcv_errors;         /* # of packets with SW detected rcv errs */
-       u64 n_unicast_xmit;     /* total unicast packets sent */
-       u64 n_unicast_rcv;      /* total unicast packets received */
-       u64 n_multicast_xmit;   /* total multicast packets sent */
-       u64 n_multicast_rcv;    /* total multicast packets received */
-       u64 z_symbol_error_counter;             /* starting count for PMA */
-       u64 z_link_error_recovery_counter;      /* starting count for PMA */
-       u64 z_link_downed_counter;              /* starting count for PMA */
-       u64 z_port_rcv_errors;                  /* starting count for PMA */
-       u64 z_port_rcv_remphys_errors;          /* starting count for PMA */
-       u64 z_port_xmit_discards;               /* starting count for PMA */
-       u64 z_port_xmit_data;                   /* starting count for PMA */
-       u64 z_port_rcv_data;                    /* starting count for PMA */
-       u64 z_port_xmit_packets;                /* starting count for PMA */
-       u64 z_port_rcv_packets;                 /* starting count for PMA */
-       u32 z_pkey_violations;                  /* starting count for PMA */
-       u32 z_local_link_integrity_errors;      /* starting count for PMA */
-       u32 z_excessive_buffer_overrun_errors;  /* starting count for PMA */
-       u32 z_vl15_dropped;                     /* starting count for PMA */
-       u32 n_rc_resends;
-       u32 n_rc_acks;
-       u32 n_rc_qacks;
-       u32 n_seq_naks;
-       u32 n_rdma_seq;
-       u32 n_rnr_naks;
-       u32 n_other_naks;
-       u32 n_timeouts;
-       u32 n_pkt_drops;
-       u32 n_vl15_dropped;
-       u32 n_wqe_errs;
-       u32 n_rdma_dup_busy;
-       u32 n_piowait;
-       u32 n_unaligned;
-       u32 port_cap_flags;
-       u32 pma_sample_start;
-       u32 pma_sample_interval;
-       __be16 pma_counter_select[5];
-       u16 pma_tag;
-       u16 qkey_violations;
-       u16 mkey_violations;
-       u16 mkey_lease_period;
-       u16 pending_index;      /* which pending queue is active */
-       u8 pma_sample_status;
-       u8 subnet_timeout;
-       u8 vl_high_limit;
-       struct ipath_opcode_stats opstats[128];
-};
-
-struct ipath_verbs_counters {
-       u64 symbol_error_counter;
-       u64 link_error_recovery_counter;
-       u64 link_downed_counter;
-       u64 port_rcv_errors;
-       u64 port_rcv_remphys_errors;
-       u64 port_xmit_discards;
-       u64 port_xmit_data;
-       u64 port_rcv_data;
-       u64 port_xmit_packets;
-       u64 port_rcv_packets;
-       u32 local_link_integrity_errors;
-       u32 excessive_buffer_overrun_errors;
-       u32 vl15_dropped;
-};
-
-struct ipath_verbs_txreq {
-       struct ipath_qp         *qp;
-       struct ipath_swqe       *wqe;
-       u32                      map_len;
-       u32                      len;
-       struct ipath_sge_state  *ss;
-       struct ipath_pio_header  hdr;
-       struct ipath_sdma_txreq  txreq;
-};
-
-static inline struct ipath_mr *to_imr(struct ib_mr *ibmr)
-{
-       return container_of(ibmr, struct ipath_mr, ibmr);
-}
-
-static inline struct ipath_pd *to_ipd(struct ib_pd *ibpd)
-{
-       return container_of(ibpd, struct ipath_pd, ibpd);
-}
-
-static inline struct ipath_ah *to_iah(struct ib_ah *ibah)
-{
-       return container_of(ibah, struct ipath_ah, ibah);
-}
-
-static inline struct ipath_cq *to_icq(struct ib_cq *ibcq)
-{
-       return container_of(ibcq, struct ipath_cq, ibcq);
-}
-
-static inline struct ipath_srq *to_isrq(struct ib_srq *ibsrq)
-{
-       return container_of(ibsrq, struct ipath_srq, ibsrq);
-}
-
-static inline struct ipath_qp *to_iqp(struct ib_qp *ibqp)
-{
-       return container_of(ibqp, struct ipath_qp, ibqp);
-}
-
-static inline struct ipath_ibdev *to_idev(struct ib_device *ibdev)
-{
-       return container_of(ibdev, struct ipath_ibdev, ibdev);
-}
-
-/*
- * This must be called with s_lock held.
- */
-static inline void ipath_schedule_send(struct ipath_qp *qp)
-{
-       if (qp->s_flags & IPATH_S_ANY_WAIT)
-               qp->s_flags &= ~IPATH_S_ANY_WAIT;
-       if (!(qp->s_flags & IPATH_S_BUSY))
-               tasklet_hi_schedule(&qp->s_task);
-}
-
-int ipath_process_mad(struct ib_device *ibdev,
-                     int mad_flags,
-                     u8 port_num,
-                     const struct ib_wc *in_wc,
-                     const struct ib_grh *in_grh,
-                     const struct ib_mad_hdr *in, size_t in_mad_size,
-                     struct ib_mad_hdr *out, size_t *out_mad_size,
-                     u16 *out_mad_pkey_index);
-
-/*
- * Compare the lower 24 bits of the two values.
- * Returns an integer <, ==, or > than zero.
- */
-static inline int ipath_cmp24(u32 a, u32 b)
-{
-       return (((int) a) - ((int) b)) << 8;
-}
-
-struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid);
-
-int ipath_snapshot_counters(struct ipath_devdata *dd, u64 *swords,
-                           u64 *rwords, u64 *spkts, u64 *rpkts,
-                           u64 *xmit_wait);
-
-int ipath_get_counters(struct ipath_devdata *dd,
-                      struct ipath_verbs_counters *cntrs);
-
-int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
-
-int ipath_mcast_tree_empty(void);
-
-__be32 ipath_compute_aeth(struct ipath_qp *qp);
-
-struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn);
-
-struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
-                             struct ib_qp_init_attr *init_attr,
-                             struct ib_udata *udata);
-
-int ipath_destroy_qp(struct ib_qp *ibqp);
-
-int ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err);
-
-int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                   int attr_mask, struct ib_udata *udata);
-
-int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
-                  int attr_mask, struct ib_qp_init_attr *init_attr);
-
-unsigned ipath_free_all_qps(struct ipath_qp_table *qpt);
-
-int ipath_init_qp_table(struct ipath_ibdev *idev, int size);
-
-void ipath_get_credit(struct ipath_qp *qp, u32 aeth);
-
-unsigned ipath_ib_rate_to_mult(enum ib_rate rate);
-
-int ipath_verbs_send(struct ipath_qp *qp, struct ipath_ib_header *hdr,
-                    u32 hdrwords, struct ipath_sge_state *ss, u32 len);
-
-void ipath_copy_sge(struct ipath_sge_state *ss, void *data, u32 length);
-
-void ipath_skip_sge(struct ipath_sge_state *ss, u32 length);
-
-void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-void ipath_restart_rc(struct ipath_qp *qp, u32 psn);
-
-void ipath_rc_error(struct ipath_qp *qp, enum ib_wc_status err);
-
-int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr);
-
-void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
-                 int has_grh, void *data, u32 tlen, struct ipath_qp *qp);
-
-int ipath_alloc_lkey(struct ipath_lkey_table *rkt,
-                    struct ipath_mregion *mr);
-
-void ipath_free_lkey(struct ipath_lkey_table *rkt, u32 lkey);
-
-int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
-                 struct ib_sge *sge, int acc);
-
-int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
-                 u32 len, u64 vaddr, u32 rkey, int acc);
-
-int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
-                          struct ib_recv_wr **bad_wr);
-
-struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
-                               struct ib_srq_init_attr *srq_init_attr,
-                               struct ib_udata *udata);
-
-int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
-                    enum ib_srq_attr_mask attr_mask,
-                    struct ib_udata *udata);
-
-int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
-
-int ipath_destroy_srq(struct ib_srq *ibsrq);
-
-void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
-
-int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
-
-struct ib_cq *ipath_create_cq(struct ib_device *ibdev,
-                             const struct ib_cq_init_attr *attr,
-                             struct ib_ucontext *context,
-                             struct ib_udata *udata);
-
-int ipath_destroy_cq(struct ib_cq *ibcq);
-
-int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
-
-int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
-
-struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc);
-
-struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
-                               u64 virt_addr, int mr_access_flags,
-                               struct ib_udata *udata);
-
-int ipath_dereg_mr(struct ib_mr *ibmr);
-
-struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
-                              struct ib_fmr_attr *fmr_attr);
-
-int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list,
-                      int list_len, u64 iova);
-
-int ipath_unmap_fmr(struct list_head *fmr_list);
-
-int ipath_dealloc_fmr(struct ib_fmr *ibfmr);
-
-void ipath_release_mmap_info(struct kref *ref);
-
-struct ipath_mmap_info *ipath_create_mmap_info(struct ipath_ibdev *dev,
-                                              u32 size,
-                                              struct ib_ucontext *context,
-                                              void *obj);
-
-void ipath_update_mmap_info(struct ipath_ibdev *dev,
-                           struct ipath_mmap_info *ip,
-                           u32 size, void *obj);
-
-int ipath_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
-
-void ipath_insert_rnr_queue(struct ipath_qp *qp);
-
-int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
-                  u32 *lengthp, struct ipath_sge_state *ss);
-
-int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only);
-
-u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
-                  struct ib_global_route *grh, u32 hwords, u32 nwords);
-
-void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
-                          struct ipath_other_headers *ohdr,
-                          u32 bth0, u32 bth2);
-
-void ipath_do_send(unsigned long data);
-
-void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
-                        enum ib_wc_status status);
-
-int ipath_make_rc_req(struct ipath_qp *qp);
-
-int ipath_make_uc_req(struct ipath_qp *qp);
-
-int ipath_make_ud_req(struct ipath_qp *qp);
-
-int ipath_register_ib_device(struct ipath_devdata *);
-
-void ipath_unregister_ib_device(struct ipath_ibdev *);
-
-void ipath_ib_rcv(struct ipath_ibdev *, void *, void *, u32);
-
-int ipath_ib_piobufavail(struct ipath_ibdev *);
-
-unsigned ipath_get_npkeys(struct ipath_devdata *);
-
-u32 ipath_get_cr_errpkey(struct ipath_devdata *);
-
-unsigned ipath_get_pkey(struct ipath_devdata *, unsigned);
-
-extern const enum ib_wc_opcode ib_ipath_wc_opcode[];
-
-/*
- * Below converts HCA-specific LinkTrainingState to IB PhysPortState
- * values.
- */
-extern const u8 ipath_cvt_physportstate[];
-#define IB_PHYSPORTSTATE_SLEEP 1
-#define IB_PHYSPORTSTATE_POLL 2
-#define IB_PHYSPORTSTATE_DISABLED 3
-#define IB_PHYSPORTSTATE_CFG_TRAIN 4
-#define IB_PHYSPORTSTATE_LINKUP 5
-#define IB_PHYSPORTSTATE_LINK_ERR_RECOVER 6
-
-extern const int ib_ipath_state_ops[];
-
-extern unsigned int ib_ipath_lkey_table_size;
-
-extern unsigned int ib_ipath_max_cqes;
-
-extern unsigned int ib_ipath_max_cqs;
-
-extern unsigned int ib_ipath_max_qp_wrs;
-
-extern unsigned int ib_ipath_max_qps;
-
-extern unsigned int ib_ipath_max_sges;
-
-extern unsigned int ib_ipath_max_mcast_grps;
-
-extern unsigned int ib_ipath_max_mcast_qp_attached;
-
-extern unsigned int ib_ipath_max_srqs;
-
-extern unsigned int ib_ipath_max_srq_sges;
-
-extern unsigned int ib_ipath_max_srq_wrs;
-
-extern const u32 ib_ipath_rnr_table[];
-
-extern struct ib_dma_mapping_ops ipath_dma_mapping_ops;
-
-#endif                         /* IPATH_VERBS_H */
diff --git a/drivers/staging/rdma/ipath/ipath_verbs_mcast.c b/drivers/staging/rdma/ipath/ipath_verbs_mcast.c
deleted file mode 100644 (file)
index 72d476f..0000000
+++ /dev/null
@@ -1,363 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/rculist.h>
-#include <linux/slab.h>
-
-#include "ipath_verbs.h"
-
-/*
- * Global table of GID to attached QPs.
- * The table is global to all ipath devices since a send from one QP/device
- * needs to be locally routed to any locally attached QPs on the same
- * or different device.
- */
-static struct rb_root mcast_tree;
-static DEFINE_SPINLOCK(mcast_lock);
-
-/**
- * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct
- * @qp: the QP to link
- */
-static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp)
-{
-       struct ipath_mcast_qp *mqp;
-
-       mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
-       if (!mqp)
-               goto bail;
-
-       mqp->qp = qp;
-       atomic_inc(&qp->refcount);
-
-bail:
-       return mqp;
-}
-
-static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp)
-{
-       struct ipath_qp *qp = mqp->qp;
-
-       /* Notify ipath_destroy_qp() if it is waiting. */
-       if (atomic_dec_and_test(&qp->refcount))
-               wake_up(&qp->wait);
-
-       kfree(mqp);
-}
-
-/**
- * ipath_mcast_alloc - allocate the multicast GID structure
- * @mgid: the multicast GID
- *
- * A list of QPs will be attached to this structure.
- */
-static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
-{
-       struct ipath_mcast *mcast;
-
-       mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
-       if (!mcast)
-               goto bail;
-
-       mcast->mgid = *mgid;
-       INIT_LIST_HEAD(&mcast->qp_list);
-       init_waitqueue_head(&mcast->wait);
-       atomic_set(&mcast->refcount, 0);
-       mcast->n_attached = 0;
-
-bail:
-       return mcast;
-}
-
-static void ipath_mcast_free(struct ipath_mcast *mcast)
-{
-       struct ipath_mcast_qp *p, *tmp;
-
-       list_for_each_entry_safe(p, tmp, &mcast->qp_list, list)
-               ipath_mcast_qp_free(p);
-
-       kfree(mcast);
-}
-
-/**
- * ipath_mcast_find - search the global table for the given multicast GID
- * @mgid: the multicast GID to search for
- *
- * Returns NULL if not found.
- *
- * The caller is responsible for decrementing the reference count if found.
- */
-struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid)
-{
-       struct rb_node *n;
-       unsigned long flags;
-       struct ipath_mcast *mcast;
-
-       spin_lock_irqsave(&mcast_lock, flags);
-       n = mcast_tree.rb_node;
-       while (n) {
-               int ret;
-
-               mcast = rb_entry(n, struct ipath_mcast, rb_node);
-
-               ret = memcmp(mgid->raw, mcast->mgid.raw,
-                            sizeof(union ib_gid));
-               if (ret < 0)
-                       n = n->rb_left;
-               else if (ret > 0)
-                       n = n->rb_right;
-               else {
-                       atomic_inc(&mcast->refcount);
-                       spin_unlock_irqrestore(&mcast_lock, flags);
-                       goto bail;
-               }
-       }
-       spin_unlock_irqrestore(&mcast_lock, flags);
-
-       mcast = NULL;
-
-bail:
-       return mcast;
-}
-
-/**
- * ipath_mcast_add - insert mcast GID into table and attach QP struct
- * @mcast: the mcast GID table
- * @mqp: the QP to attach
- *
- * Return zero if both were added.  Return EEXIST if the GID was already in
- * the table but the QP was added.  Return ESRCH if the QP was already
- * attached and neither structure was added.
- */
-static int ipath_mcast_add(struct ipath_ibdev *dev,
-                          struct ipath_mcast *mcast,
-                          struct ipath_mcast_qp *mqp)
-{
-       struct rb_node **n = &mcast_tree.rb_node;
-       struct rb_node *pn = NULL;
-       int ret;
-
-       spin_lock_irq(&mcast_lock);
-
-       while (*n) {
-               struct ipath_mcast *tmcast;
-               struct ipath_mcast_qp *p;
-
-               pn = *n;
-               tmcast = rb_entry(pn, struct ipath_mcast, rb_node);
-
-               ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw,
-                            sizeof(union ib_gid));
-               if (ret < 0) {
-                       n = &pn->rb_left;
-                       continue;
-               }
-               if (ret > 0) {
-                       n = &pn->rb_right;
-                       continue;
-               }
-
-               /* Search the QP list to see if this is already there. */
-               list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
-                       if (p->qp == mqp->qp) {
-                               ret = ESRCH;
-                               goto bail;
-                       }
-               }
-               if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
-                       ret = ENOMEM;
-                       goto bail;
-               }
-
-               tmcast->n_attached++;
-
-               list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
-               ret = EEXIST;
-               goto bail;
-       }
-
-       spin_lock(&dev->n_mcast_grps_lock);
-       if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
-               spin_unlock(&dev->n_mcast_grps_lock);
-               ret = ENOMEM;
-               goto bail;
-       }
-
-       dev->n_mcast_grps_allocated++;
-       spin_unlock(&dev->n_mcast_grps_lock);
-
-       mcast->n_attached++;
-
-       list_add_tail_rcu(&mqp->list, &mcast->qp_list);
-
-       atomic_inc(&mcast->refcount);
-       rb_link_node(&mcast->rb_node, pn, n);
-       rb_insert_color(&mcast->rb_node, &mcast_tree);
-
-       ret = 0;
-
-bail:
-       spin_unlock_irq(&mcast_lock);
-
-       return ret;
-}
-
-int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       struct ipath_ibdev *dev = to_idev(ibqp->device);
-       struct ipath_mcast *mcast;
-       struct ipath_mcast_qp *mqp;
-       int ret;
-
-       /*
-        * Allocate data structures since its better to do this outside of
-        * spin locks and it will most likely be needed.
-        */
-       mcast = ipath_mcast_alloc(gid);
-       if (mcast == NULL) {
-               ret = -ENOMEM;
-               goto bail;
-       }
-       mqp = ipath_mcast_qp_alloc(qp);
-       if (mqp == NULL) {
-               ipath_mcast_free(mcast);
-               ret = -ENOMEM;
-               goto bail;
-       }
-       switch (ipath_mcast_add(dev, mcast, mqp)) {
-       case ESRCH:
-               /* Neither was used: can't attach the same QP twice. */
-               ipath_mcast_qp_free(mqp);
-               ipath_mcast_free(mcast);
-               ret = -EINVAL;
-               goto bail;
-       case EEXIST:            /* The mcast wasn't used */
-               ipath_mcast_free(mcast);
-               break;
-       case ENOMEM:
-               /* Exceeded the maximum number of mcast groups. */
-               ipath_mcast_qp_free(mqp);
-               ipath_mcast_free(mcast);
-               ret = -ENOMEM;
-               goto bail;
-       default:
-               break;
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
-{
-       struct ipath_qp *qp = to_iqp(ibqp);
-       struct ipath_ibdev *dev = to_idev(ibqp->device);
-       struct ipath_mcast *mcast = NULL;
-       struct ipath_mcast_qp *p, *tmp;
-       struct rb_node *n;
-       int last = 0;
-       int ret;
-
-       spin_lock_irq(&mcast_lock);
-
-       /* Find the GID in the mcast table. */
-       n = mcast_tree.rb_node;
-       while (1) {
-               if (n == NULL) {
-                       spin_unlock_irq(&mcast_lock);
-                       ret = -EINVAL;
-                       goto bail;
-               }
-
-               mcast = rb_entry(n, struct ipath_mcast, rb_node);
-               ret = memcmp(gid->raw, mcast->mgid.raw,
-                            sizeof(union ib_gid));
-               if (ret < 0)
-                       n = n->rb_left;
-               else if (ret > 0)
-                       n = n->rb_right;
-               else
-                       break;
-       }
-
-       /* Search the QP list. */
-       list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) {
-               if (p->qp != qp)
-                       continue;
-               /*
-                * We found it, so remove it, but don't poison the forward
-                * link until we are sure there are no list walkers.
-                */
-               list_del_rcu(&p->list);
-               mcast->n_attached--;
-
-               /* If this was the last attached QP, remove the GID too. */
-               if (list_empty(&mcast->qp_list)) {
-                       rb_erase(&mcast->rb_node, &mcast_tree);
-                       last = 1;
-               }
-               break;
-       }
-
-       spin_unlock_irq(&mcast_lock);
-
-       if (p) {
-               /*
-                * Wait for any list walkers to finish before freeing the
-                * list element.
-                */
-               wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
-               ipath_mcast_qp_free(p);
-       }
-       if (last) {
-               atomic_dec(&mcast->refcount);
-               wait_event(mcast->wait, !atomic_read(&mcast->refcount));
-               ipath_mcast_free(mcast);
-               spin_lock_irq(&dev->n_mcast_grps_lock);
-               dev->n_mcast_grps_allocated--;
-               spin_unlock_irq(&dev->n_mcast_grps_lock);
-       }
-
-       ret = 0;
-
-bail:
-       return ret;
-}
-
-int ipath_mcast_tree_empty(void)
-{
-       return mcast_tree.rb_node == NULL;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_ppc64.c b/drivers/staging/rdma/ipath/ipath_wc_ppc64.c
deleted file mode 100644 (file)
index 1a7e20a..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on PowerPC only.  Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include "ipath_kernel.h"
-
-/**
- * ipath_enable_wc - enable write combining for MMIO writes to the device
- * @dd: infinipath device
- *
- * Nothing to do on PowerPC, so just return without error.
- */
-int ipath_enable_wc(struct ipath_devdata *dd)
-{
-       return 0;
-}
diff --git a/drivers/staging/rdma/ipath/ipath_wc_x86_64.c b/drivers/staging/rdma/ipath/ipath_wc_x86_64.c
deleted file mode 100644 (file)
index 7b6e4c8..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-/*
- * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
- * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses.  You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- *     Redistribution and use in source and binary forms, with or
- *     without modification, are permitted provided that the following
- *     conditions are met:
- *
- *      - Redistributions of source code must retain the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer.
- *
- *      - Redistributions in binary form must reproduce the above
- *        copyright notice, this list of conditions and the following
- *        disclaimer in the documentation and/or other materials
- *        provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-/*
- * This file is conditionally built on x86_64 only.  Otherwise weak symbol
- * versions of the functions exported from here are used.
- */
-
-#include <linux/pci.h>
-#include <asm/processor.h>
-
-#include "ipath_kernel.h"
-
-/**
- * ipath_enable_wc - enable write combining for MMIO writes to the device
- * @dd: infinipath device
- *
- * This routine is x86_64-specific; it twiddles the CPU's MTRRs to enable
- * write combining.
- */
-int ipath_enable_wc(struct ipath_devdata *dd)
-{
-       int ret = 0;
-       u64 pioaddr, piolen;
-       unsigned bits;
-       const unsigned long addr = pci_resource_start(dd->pcidev, 0);
-       const size_t len = pci_resource_len(dd->pcidev, 0);
-
-       /*
-        * Set the PIO buffers to be WCCOMB, so we get HT bursts to the
-        * chip.  Linux (possibly the hardware) requires it to be on a power
-        * of 2 address matching the length (which has to be a power of 2).
-        * For rev1, that means the base address, for rev2, it will be just
-        * the PIO buffers themselves.
-        * For chips with two sets of buffers, the calculations are
-        * somewhat more complicated; we need to sum, and the piobufbase
-        * register has both offsets, 2K in low 32 bits, 4K in high 32 bits.
-        * The buffers are still packed, so a single range covers both.
-        */
-       if (dd->ipath_piobcnt2k && dd->ipath_piobcnt4k) { /* 2 sizes */
-               unsigned long pio2kbase, pio4kbase;
-               pio2kbase = dd->ipath_piobufbase & 0xffffffffUL;
-               pio4kbase = (dd->ipath_piobufbase >> 32) & 0xffffffffUL;
-               if (pio2kbase < pio4kbase) { /* all, for now */
-                       pioaddr = addr + pio2kbase;
-                       piolen = pio4kbase - pio2kbase +
-                               dd->ipath_piobcnt4k * dd->ipath_4kalign;
-               } else {
-                       pioaddr = addr + pio4kbase;
-                       piolen = pio2kbase - pio4kbase +
-                               dd->ipath_piobcnt2k * dd->ipath_palign;
-               }
-       } else {  /* single buffer size (2K, currently) */
-               pioaddr = addr + dd->ipath_piobufbase;
-               piolen = dd->ipath_piobcnt2k * dd->ipath_palign +
-                       dd->ipath_piobcnt4k * dd->ipath_4kalign;
-       }
-
-       for (bits = 0; !(piolen & (1ULL << bits)); bits++)
-               /* do nothing */ ;
-
-       if (piolen != (1ULL << bits)) {
-               piolen >>= bits;
-               while (piolen >>= 1)
-                       bits++;
-               piolen = 1ULL << (bits + 1);
-       }
-       if (pioaddr & (piolen - 1)) {
-               u64 atmp;
-               ipath_dbg("pioaddr %llx not on right boundary for size "
-                         "%llx, fixing\n",
-                         (unsigned long long) pioaddr,
-                         (unsigned long long) piolen);
-               atmp = pioaddr & ~(piolen - 1);
-               if (atmp < addr || (atmp + piolen) > (addr + len)) {
-                       ipath_dev_err(dd, "No way to align address/size "
-                                     "(%llx/%llx), no WC mtrr\n",
-                                     (unsigned long long) atmp,
-                                     (unsigned long long) piolen << 1);
-                       ret = -ENODEV;
-               } else {
-                       ipath_dbg("changing WC base from %llx to %llx, "
-                                 "len from %llx to %llx\n",
-                                 (unsigned long long) pioaddr,
-                                 (unsigned long long) atmp,
-                                 (unsigned long long) piolen,
-                                 (unsigned long long) piolen << 1);
-                       pioaddr = atmp;
-                       piolen <<= 1;
-               }
-       }
-
-       if (!ret) {
-               dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
-               if (dd->wc_cookie < 0) {
-                       ipath_dev_err(dd, "Seting mtrr failed on PIO buffers\n");
-                       ret = -ENODEV;
-               } else if (dd->wc_cookie == 0)
-                       ipath_cdbg(VERBOSE, "Set mtrr for chip to WC not needed\n");
-               else
-                       ipath_cdbg(VERBOSE, "Set mtrr for chip to WC\n");
-       }
-
-       return ret;
-}
-
-/**
- * ipath_disable_wc - disable write combining for MMIO writes to the device
- * @dd: infinipath device
- */
-void ipath_disable_wc(struct ipath_devdata *dd)
-{
-       arch_phys_wc_del(dd->wc_cookie);
-}
index 2096d78913bd909b864f5659d7e904a182d14f68..8eac7cdd5f3ebd65f10051568367d695ec0860a4 100644 (file)
@@ -9,6 +9,8 @@
  * more details.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -18,7 +20,6 @@
 #include <linux/if_ether.h>
 #include <linux/if_arp.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <linux/crc32.h>
 #include <linux/etherdevice.h>
@@ -48,10 +49,10 @@ struct rtllib_tkip_data {
        u32 dot11RSNAStatsTKIPLocalMICFailures;
 
        int key_idx;
-       struct crypto_blkcipher *rx_tfm_arc4;
-       struct crypto_hash *rx_tfm_michael;
-       struct crypto_blkcipher *tx_tfm_arc4;
-       struct crypto_hash *tx_tfm_michael;
+       struct crypto_skcipher *rx_tfm_arc4;
+       struct crypto_ahash *rx_tfm_michael;
+       struct crypto_skcipher *tx_tfm_arc4;
+       struct crypto_ahash *tx_tfm_michael;
        /* scratch buffers for virt_to_page() (crypto API) */
        u8 rx_hdr[16];
        u8 tx_hdr[16];
@@ -65,32 +66,32 @@ static void *rtllib_tkip_init(int key_idx)
        if (priv == NULL)
                goto fail;
        priv->key_idx = key_idx;
-       priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
-                       CRYPTO_ALG_ASYNC);
+       priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_arc4)) {
                pr_debug("Could not allocate crypto API arc4\n");
                priv->tx_tfm_arc4 = NULL;
                goto fail;
        }
 
-       priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
-                       CRYPTO_ALG_ASYNC);
+       priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_michael)) {
                pr_debug("Could not allocate crypto API michael_mic\n");
                priv->tx_tfm_michael = NULL;
                goto fail;
        }
 
-       priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
-                       CRYPTO_ALG_ASYNC);
+       priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_arc4)) {
                pr_debug("Could not allocate crypto API arc4\n");
                priv->rx_tfm_arc4 = NULL;
                goto fail;
        }
 
-       priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
-                       CRYPTO_ALG_ASYNC);
+       priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_michael)) {
                pr_debug("Could not allocate crypto API michael_mic\n");
                priv->rx_tfm_michael = NULL;
@@ -100,14 +101,10 @@ static void *rtllib_tkip_init(int key_idx)
 
 fail:
        if (priv) {
-               if (priv->tx_tfm_michael)
-                       crypto_free_hash(priv->tx_tfm_michael);
-               if (priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(priv->tx_tfm_arc4);
-               if (priv->rx_tfm_michael)
-                       crypto_free_hash(priv->rx_tfm_michael);
-               if (priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(priv->rx_tfm_arc4);
+               crypto_free_ahash(priv->tx_tfm_michael);
+               crypto_free_skcipher(priv->tx_tfm_arc4);
+               crypto_free_ahash(priv->rx_tfm_michael);
+               crypto_free_skcipher(priv->rx_tfm_arc4);
                kfree(priv);
        }
 
@@ -120,14 +117,10 @@ static void rtllib_tkip_deinit(void *priv)
        struct rtllib_tkip_data *_priv = priv;
 
        if (_priv) {
-               if (_priv->tx_tfm_michael)
-                       crypto_free_hash(_priv->tx_tfm_michael);
-               if (_priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->tx_tfm_arc4);
-               if (_priv->rx_tfm_michael)
-                       crypto_free_hash(_priv->rx_tfm_michael);
-               if (_priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->rx_tfm_arc4);
+               crypto_free_ahash(_priv->tx_tfm_michael);
+               crypto_free_skcipher(_priv->tx_tfm_arc4);
+               crypto_free_ahash(_priv->rx_tfm_michael);
+               crypto_free_skcipher(_priv->rx_tfm_arc4);
        }
        kfree(priv);
 }
@@ -301,7 +294,6 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        struct rtllib_hdr_4addr *hdr;
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
                                    MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
        int ret = 0;
        u8 rc4key[16],  *icv;
        u32 crc;
@@ -347,6 +339,8 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+
                icv = skb_put(skb, 4);
                crc = ~crc32_le(~0, pos, len);
                icv[0] = crc;
@@ -357,8 +351,12 @@ static int rtllib_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
                sg_init_one(&sg, pos, len+4);
 
 
-               crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
-               ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+               crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+               skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+               ret = crypto_skcipher_encrypt(req);
+               skcipher_request_zero(req);
        }
 
        tkey->tx_iv16++;
@@ -384,12 +382,12 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        struct rtllib_hdr_4addr *hdr;
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
                                    MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
        u8 rc4key[16];
        u8 icv[4];
        u32 crc;
        struct scatterlist sg;
        int plen;
+       int err;
 
        if (skb->len < hdr_len + 8 + 4)
                return -1;
@@ -425,6 +423,8 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        pos += 8;
 
        if (!tcb_desc->bHwSec || (skb->cb[0] == 1)) {
+               SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+
                if ((iv32 < tkey->rx_iv32 ||
                    (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) &&
                    tkey->initialized) {
@@ -450,8 +450,13 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 
                sg_init_one(&sg, pos, plen+4);
 
-               crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
-               if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
+               crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+               skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+               err = crypto_skcipher_decrypt(req);
+               skcipher_request_zero(req);
+               if (err) {
                        if (net_ratelimit()) {
                                netdev_dbg(skb->dev,
                                           "Failed to decrypt received packet from %pM\n",
@@ -500,11 +505,12 @@ static int rtllib_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 }
 
 
-static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr,
                       u8 *data, size_t data_len, u8 *mic)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm_michael);
        struct scatterlist sg[2];
+       int err;
 
        if (tfm_michael == NULL) {
                pr_warn("michael_mic: tfm_michael == NULL\n");
@@ -514,12 +520,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
        sg_set_buf(&sg[0], hdr, 16);
        sg_set_buf(&sg[1], data, data_len);
 
-       if (crypto_hash_setkey(tfm_michael, key, 8))
+       if (crypto_ahash_setkey(tfm_michael, key, 8))
                return -1;
 
-       desc.tfm = tfm_michael;
-       desc.flags = 0;
-       return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+       ahash_request_set_tfm(req, tfm_michael);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, mic, data_len + 16);
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
+       return err;
 }
 
 static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
@@ -655,10 +664,10 @@ static int rtllib_tkip_set_key(void *key, int len, u8 *seq, void *priv)
 {
        struct rtllib_tkip_data *tkey = priv;
        int keyidx;
-       struct crypto_hash *tfm = tkey->tx_tfm_michael;
-       struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
-       struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
-       struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
+       struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+       struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+       struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+       struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
 
        keyidx = tkey->key_idx;
        memset(tkey, 0, sizeof(*tkey));
index 21d7eee4c9a92303b480a50c8b715b146bc618b4..b3343a5d0fd6c80b11c03cd5feecccad54f9012c 100644 (file)
@@ -9,6 +9,7 @@
  * more details.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
@@ -17,8 +18,6 @@
 #include <linux/string.h>
 #include "rtllib.h"
 
-#include <linux/crypto.h>
-
 #include <linux/scatterlist.h>
 #include <linux/crc32.h>
 
@@ -28,8 +27,8 @@ struct prism2_wep_data {
        u8 key[WEP_KEY_LEN + 1];
        u8 key_len;
        u8 key_idx;
-       struct crypto_blkcipher *tx_tfm;
-       struct crypto_blkcipher *rx_tfm;
+       struct crypto_skcipher *tx_tfm;
+       struct crypto_skcipher *rx_tfm;
 };
 
 
@@ -42,13 +41,13 @@ static void *prism2_wep_init(int keyidx)
                goto fail;
        priv->key_idx = keyidx;
 
-       priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm)) {
                pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
                priv->tx_tfm = NULL;
                goto fail;
        }
-       priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm)) {
                pr_debug("rtllib_crypt_wep: could not allocate crypto API arc4\n");
                priv->rx_tfm = NULL;
@@ -62,10 +61,8 @@ static void *prism2_wep_init(int keyidx)
 
 fail:
        if (priv) {
-               if (priv->tx_tfm)
-                       crypto_free_blkcipher(priv->tx_tfm);
-               if (priv->rx_tfm)
-                       crypto_free_blkcipher(priv->rx_tfm);
+               crypto_free_skcipher(priv->tx_tfm);
+               crypto_free_skcipher(priv->rx_tfm);
                kfree(priv);
        }
        return NULL;
@@ -77,10 +74,8 @@ static void prism2_wep_deinit(void *priv)
        struct prism2_wep_data *_priv = priv;
 
        if (_priv) {
-               if (_priv->tx_tfm)
-                       crypto_free_blkcipher(_priv->tx_tfm);
-               if (_priv->rx_tfm)
-                       crypto_free_blkcipher(_priv->rx_tfm);
+               crypto_free_skcipher(_priv->tx_tfm);
+               crypto_free_skcipher(_priv->rx_tfm);
        }
        kfree(priv);
 }
@@ -99,10 +94,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 *pos;
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
                                    MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
        u32 crc;
        u8 *icv;
        struct scatterlist sg;
+       int err;
 
        if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
            skb->len < hdr_len){
@@ -140,6 +135,7 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        memcpy(key + 3, wep->key, wep->key_len);
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
 
                /* Append little-endian CRC32 and encrypt it to produce ICV */
                crc = ~crc32_le(~0, pos, len);
@@ -150,8 +146,13 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
                icv[3] = crc >> 24;
 
                sg_init_one(&sg, pos, len+4);
-               crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
-               return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+               crypto_skcipher_setkey(wep->tx_tfm, key, klen);
+               skcipher_request_set_tfm(req, wep->tx_tfm);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+               err = crypto_skcipher_encrypt(req);
+               skcipher_request_zero(req);
+               return err;
        }
 
        return 0;
@@ -173,10 +174,10 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 keyidx, *pos;
        struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
                                    MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
        u32 crc;
        u8 icv[4];
        struct scatterlist sg;
+       int err;
 
        if (skb->len < hdr_len + 8)
                return -1;
@@ -198,9 +199,16 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        plen = skb->len - hdr_len - 8;
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+
                sg_init_one(&sg, pos, plen+4);
-               crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
-               if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
+               crypto_skcipher_setkey(wep->rx_tfm, key, klen);
+               skcipher_request_set_tfm(req, wep->rx_tfm);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+               err = crypto_skcipher_decrypt(req);
+               skcipher_request_zero(req);
+               if (err)
                        return -7;
                crc = ~crc32_le(~0, pos, plen);
                icv[0] = crc;
index 908bc2eb4d29e9842b482eb5ad9d67ae6f119acb..6fa96d57d31629aebfd49ec81cd6f13e0f46cf81 100644 (file)
@@ -21,7 +21,8 @@
 
 #include "ieee80211.h"
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
        #include <linux/scatterlist.h>
 #include <linux/crc32.h>
 
@@ -52,10 +53,10 @@ struct ieee80211_tkip_data {
 
        int key_idx;
 
-       struct crypto_blkcipher *rx_tfm_arc4;
-       struct crypto_hash *rx_tfm_michael;
-       struct crypto_blkcipher *tx_tfm_arc4;
-       struct crypto_hash *tx_tfm_michael;
+       struct crypto_skcipher *rx_tfm_arc4;
+       struct crypto_ahash *rx_tfm_michael;
+       struct crypto_skcipher *tx_tfm_arc4;
+       struct crypto_ahash *tx_tfm_michael;
 
        /* scratch buffers for virt_to_page() (crypto API) */
        u8 rx_hdr[16], tx_hdr[16];
@@ -70,7 +71,7 @@ static void *ieee80211_tkip_init(int key_idx)
                goto fail;
        priv->key_idx = key_idx;
 
-       priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
+       priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_arc4)) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -79,7 +80,7 @@ static void *ieee80211_tkip_init(int key_idx)
                goto fail;
        }
 
-       priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
+       priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_michael)) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -88,7 +89,7 @@ static void *ieee80211_tkip_init(int key_idx)
                goto fail;
        }
 
-       priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
+       priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_arc4)) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -97,7 +98,7 @@ static void *ieee80211_tkip_init(int key_idx)
                goto fail;
        }
 
-       priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
+       priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_michael)) {
                printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
@@ -110,14 +111,10 @@ static void *ieee80211_tkip_init(int key_idx)
 
 fail:
        if (priv) {
-               if (priv->tx_tfm_michael)
-                       crypto_free_hash(priv->tx_tfm_michael);
-               if (priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(priv->tx_tfm_arc4);
-               if (priv->rx_tfm_michael)
-                       crypto_free_hash(priv->rx_tfm_michael);
-               if (priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(priv->rx_tfm_arc4);
+               crypto_free_ahash(priv->tx_tfm_michael);
+               crypto_free_skcipher(priv->tx_tfm_arc4);
+               crypto_free_ahash(priv->rx_tfm_michael);
+               crypto_free_skcipher(priv->rx_tfm_arc4);
                kfree(priv);
        }
 
@@ -130,14 +127,10 @@ static void ieee80211_tkip_deinit(void *priv)
        struct ieee80211_tkip_data *_priv = priv;
 
        if (_priv) {
-               if (_priv->tx_tfm_michael)
-                       crypto_free_hash(_priv->tx_tfm_michael);
-               if (_priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->tx_tfm_arc4);
-               if (_priv->rx_tfm_michael)
-                       crypto_free_hash(_priv->rx_tfm_michael);
-               if (_priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->rx_tfm_arc4);
+               crypto_free_ahash(_priv->tx_tfm_michael);
+               crypto_free_skcipher(_priv->tx_tfm_arc4);
+               crypto_free_ahash(_priv->rx_tfm_michael);
+               crypto_free_skcipher(_priv->rx_tfm_arc4);
        }
        kfree(priv);
 }
@@ -312,7 +305,6 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 *pos;
        struct rtl_80211_hdr_4addr *hdr;
        cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4};
        int ret = 0;
        u8 rc4key[16],  *icv;
        u32 crc;
@@ -357,15 +349,21 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
+
                icv = skb_put(skb, 4);
                crc = ~crc32_le(~0, pos, len);
                icv[0] = crc;
                icv[1] = crc >> 8;
                icv[2] = crc >> 16;
                icv[3] = crc >> 24;
-               crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+               crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
                sg_init_one(&sg, pos, len+4);
-               ret = crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+               skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+               ret = crypto_skcipher_encrypt(req);
+               skcipher_request_zero(req);
        }
 
        tkey->tx_iv16++;
@@ -390,12 +388,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u16 iv16;
        struct rtl_80211_hdr_4addr *hdr;
        cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4};
        u8 rc4key[16];
        u8 icv[4];
        u32 crc;
        struct scatterlist sg;
        int plen;
+       int err;
 
        if (skb->len < hdr_len + 8 + 4)
                return -1;
@@ -429,6 +427,8 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        pos += 8;
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
+
                if (iv32 < tkey->rx_iv32 ||
                (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
                        if (net_ratelimit()) {
@@ -449,10 +449,16 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 
                plen = skb->len - hdr_len - 12;
 
-               crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+               crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
                sg_init_one(&sg, pos, plen+4);
 
-               if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
+               skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+
+               err = crypto_skcipher_decrypt(req);
+               skcipher_request_zero(req);
+               if (err) {
                        if (net_ratelimit()) {
                                printk(KERN_DEBUG ": TKIP: failed to decrypt "
                                                "received packet from %pM\n",
@@ -501,11 +507,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        return keyidx;
 }
 
-static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
+static int michael_mic(struct crypto_ahash *tfm_michael, u8 *key, u8 *hdr,
                       u8 *data, size_t data_len, u8 *mic)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm_michael);
        struct scatterlist sg[2];
+       int err;
 
        if (tfm_michael == NULL) {
                printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
@@ -516,12 +523,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *hdr,
        sg_set_buf(&sg[0], hdr, 16);
        sg_set_buf(&sg[1], data, data_len);
 
-       if (crypto_hash_setkey(tfm_michael, key, 8))
+       if (crypto_ahash_setkey(tfm_michael, key, 8))
                return -1;
 
-       desc.tfm = tfm_michael;
-       desc.flags = 0;
-       return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+       ahash_request_set_tfm(req, tfm_michael);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, mic, data_len + 16);
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
+       return err;
 }
 
 static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
@@ -660,10 +670,10 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
 {
        struct ieee80211_tkip_data *tkey = priv;
        int keyidx;
-       struct crypto_hash *tfm = tkey->tx_tfm_michael;
-       struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
-       struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
-       struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
+       struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+       struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+       struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+       struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
 
        keyidx = tkey->key_idx;
        memset(tkey, 0, sizeof(*tkey));
index 681611dc93d39c9c070af9c63e401365fda48653..ababb6de125bf12c6ae16b63a0ce82be943e6e92 100644 (file)
@@ -18,7 +18,7 @@
 
 #include "ieee80211.h"
 
-#include <linux/crypto.h>
+#include <crypto/skcipher.h>
 #include <linux/scatterlist.h>
 #include <linux/crc32.h>
 
@@ -32,8 +32,8 @@ struct prism2_wep_data {
        u8 key[WEP_KEY_LEN + 1];
        u8 key_len;
        u8 key_idx;
-       struct crypto_blkcipher *tx_tfm;
-       struct crypto_blkcipher *rx_tfm;
+       struct crypto_skcipher *tx_tfm;
+       struct crypto_skcipher *rx_tfm;
 };
 
 
@@ -46,10 +46,10 @@ static void *prism2_wep_init(int keyidx)
                return NULL;
        priv->key_idx = keyidx;
 
-       priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm))
                goto free_priv;
-       priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm))
                goto free_tx;
 
@@ -58,7 +58,7 @@ static void *prism2_wep_init(int keyidx)
 
        return priv;
 free_tx:
-       crypto_free_blkcipher(priv->tx_tfm);
+       crypto_free_skcipher(priv->tx_tfm);
 free_priv:
        kfree(priv);
        return NULL;
@@ -70,10 +70,8 @@ static void prism2_wep_deinit(void *priv)
        struct prism2_wep_data *_priv = priv;
 
        if (_priv) {
-               if (_priv->tx_tfm)
-                       crypto_free_blkcipher(_priv->tx_tfm);
-               if (_priv->rx_tfm)
-                       crypto_free_blkcipher(_priv->rx_tfm);
+               crypto_free_skcipher(_priv->tx_tfm);
+               crypto_free_skcipher(_priv->rx_tfm);
        }
        kfree(priv);
 }
@@ -91,10 +89,10 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 key[WEP_KEY_LEN + 3];
        u8 *pos;
        cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
        u32 crc;
        u8 *icv;
        struct scatterlist sg;
+       int err;
 
        if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
            skb->len < hdr_len)
@@ -129,6 +127,8 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        memcpy(key + 3, wep->key, wep->key_len);
 
        if (!tcb_desc->bHwSec) {
+               SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
+
                /* Append little-endian CRC32 and encrypt it to produce ICV */
                crc = ~crc32_le(~0, pos, len);
                icv = skb_put(skb, 4);
@@ -137,10 +137,16 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
                icv[2] = crc >> 16;
                icv[3] = crc >> 24;
 
-               crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
+               crypto_skcipher_setkey(wep->tx_tfm, key, klen);
                sg_init_one(&sg, pos, len+4);
 
-               return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+               skcipher_request_set_tfm(req, wep->tx_tfm);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+
+               err = crypto_skcipher_encrypt(req);
+               skcipher_request_zero(req);
+               return err;
        }
 
        return 0;
@@ -161,10 +167,10 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u8 key[WEP_KEY_LEN + 3];
        u8 keyidx, *pos;
        cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
-       struct blkcipher_desc desc = {.tfm = wep->rx_tfm};
        u32 crc;
        u8 icv[4];
        struct scatterlist sg;
+       int err;
 
        if (skb->len < hdr_len + 8)
                return -1;
@@ -186,10 +192,18 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        plen = skb->len - hdr_len - 8;
 
        if (!tcb_desc->bHwSec) {
-               crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
+               SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
+
+               crypto_skcipher_setkey(wep->rx_tfm, key, klen);
                sg_init_one(&sg, pos, plen+4);
 
-               if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
+               skcipher_request_set_tfm(req, wep->rx_tfm);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
+               skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+
+               err = crypto_skcipher_decrypt(req);
+               skcipher_request_zero(req);
+               if (err)
                        return -7;
 
                crc = ~crc32_le(~0, pos, plen);
index efd6f4560d3e55ceeab571fdc1d0ebc242a38df2..7e8037e230b8e024ae2bb9d8797bba1d9eab7141 100644 (file)
@@ -1,7 +1,7 @@
 menu "Speakup console speech"
 
 config SPEAKUP
-       depends on VT
+       depends on VT && !MN10300
        tristate "Speakup core"
        ---help---
                This is the Speakup screen reader.  Think of it as a
index 63c59bc89b040fe68fcf01d364d5b636e8ac6707..30cf973f326dc467fba09f1a7c85a05acf0f9101 100644 (file)
@@ -264,8 +264,9 @@ static struct notifier_block vt_notifier_block = {
        .notifier_call = vt_notifier_call,
 };
 
-static unsigned char get_attributes(u16 *pos)
+static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
 {
+       pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
        return (u_char) (scr_readw(pos) >> 8);
 }
 
@@ -275,7 +276,7 @@ static void speakup_date(struct vc_data *vc)
        spk_y = spk_cy = vc->vc_y;
        spk_pos = spk_cp = vc->vc_pos;
        spk_old_attr = spk_attr;
-       spk_attr = get_attributes((u_short *) spk_pos);
+       spk_attr = get_attributes(vc, (u_short *)spk_pos);
 }
 
 static void bleep(u_short val)
@@ -469,8 +470,12 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
        u16 ch = ' ';
 
        if (vc && pos) {
-               u16 w = scr_readw(pos);
-               u16 c = w & 0xff;
+               u16 w;
+               u16 c;
+
+               pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
+               w = scr_readw(pos);
+               c = w & 0xff;
 
                if (w & vc->vc_hi_font_mask)
                        c |= 0x100;
@@ -746,7 +751,7 @@ static int get_line(struct vc_data *vc)
        u_char tmp2;
 
        spk_old_attr = spk_attr;
-       spk_attr = get_attributes((u_short *) spk_pos);
+       spk_attr = get_attributes(vc, (u_short *)spk_pos);
        for (i = 0; i < vc->vc_cols; i++) {
                buf[i] = (u_char) get_char(vc, (u_short *) tmp, &tmp2);
                tmp += 2;
@@ -811,7 +816,7 @@ static int say_from_to(struct vc_data *vc, u_long from, u_long to,
        u_short saved_punc_mask = spk_punc_mask;
 
        spk_old_attr = spk_attr;
-       spk_attr = get_attributes((u_short *) from);
+       spk_attr = get_attributes(vc, (u_short *)from);
        while (from < to) {
                buf[i++] = (char)get_char(vc, (u_short *) from, &tmp);
                from += 2;
@@ -886,7 +891,7 @@ static int get_sentence_buf(struct vc_data *vc, int read_punc)
        sentmarks[bn][0] = &sentbuf[bn][0];
        i = 0;
        spk_old_attr = spk_attr;
-       spk_attr = get_attributes((u_short *) start);
+       spk_attr = get_attributes(vc, (u_short *)start);
 
        while (start < end) {
                sentbuf[bn][i] = (char)get_char(vc, (u_short *) start, &tmp);
@@ -1585,7 +1590,7 @@ static int count_highlight_color(struct vc_data *vc)
                u16 *ptr;
 
                for (ptr = start; ptr < end; ptr++) {
-                       ch = get_attributes(ptr);
+                       ch = get_attributes(vc, ptr);
                        bg = (ch & 0x70) >> 4;
                        speakup_console[vc_num]->ht.bgcount[bg]++;
                }
index aa5ab6c80ed4ffefc12aaca9c1337e403371d478..41ef099b7aa69f345d33998c46e2bce11ad691d9 100644 (file)
@@ -142,7 +142,9 @@ static void __speakup_paste_selection(struct work_struct *work)
        struct tty_ldisc *ld;
        DECLARE_WAITQUEUE(wait, current);
 
-       ld = tty_ldisc_ref_wait(tty);
+       ld = tty_ldisc_ref(tty);
+       if (!ld)
+               goto tty_unref;
        tty_buffer_lock_exclusive(&vc->port);
 
        add_wait_queue(&vc->paste_wait, &wait);
@@ -162,6 +164,7 @@ static void __speakup_paste_selection(struct work_struct *work)
 
        tty_buffer_unlock_exclusive(&vc->port);
        tty_ldisc_deref(ld);
+tty_unref:
        tty_kref_put(tty);
 }
 
index 3b5835b2812849f0c32055f065ef0be6a0c4d0d0..a5bbb338f275495c18bf6051adf60ee8a3bbcc1d 100644 (file)
@@ -6,6 +6,11 @@
 #include "spk_priv.h"
 #include "serialio.h"
 
+#include <linux/serial_core.h>
+/* WARNING:  Do not change this to <linux/serial.h> without testing that
+ * SERIAL_PORT_DFNS does get defined to the appropriate value. */
+#include <asm/serial.h>
+
 #ifndef SERIAL_PORT_DFNS
 #define SERIAL_PORT_DFNS
 #endif
@@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
        int baud = 9600, quot = 0;
        unsigned int cval = 0;
        int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
-       const struct old_serial_port *ser = rs_table + index;
+       const struct old_serial_port *ser;
        int err;
 
+       if (index >= ARRAY_SIZE(rs_table)) {
+               pr_info("no port info for ttyS%d\n", index);
+               return NULL;
+       }
+       ser = rs_table + index;
+
        /*      Divisor, bytesize and parity */
        quot = ser->baud_base / baud;
        cval = cflag & (CSIZE | CSTOPB);
index 576a7a43470ce0b0c0932e563bdb2fa639f7b110..961202f4e9aa4a2004f85d89f7a8a638f3898814 100644 (file)
@@ -16,9 +16,9 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <crypto/hash.h>
 #include <linux/string.h>
 #include <linux/kthread.h>
-#include <linux/crypto.h>
 #include <linux/completion.h>
 #include <linux/module.h>
 #include <linux/vmalloc.h>
@@ -1190,7 +1190,7 @@ iscsit_handle_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 }
 
 static u32 iscsit_do_crypto_hash_sg(
-       struct hash_desc *hash,
+       struct ahash_request *hash,
        struct iscsi_cmd *cmd,
        u32 data_offset,
        u32 data_length,
@@ -1201,7 +1201,7 @@ static u32 iscsit_do_crypto_hash_sg(
        struct scatterlist *sg;
        unsigned int page_off;
 
-       crypto_hash_init(hash);
+       crypto_ahash_init(hash);
 
        sg = cmd->first_data_sg;
        page_off = cmd->first_data_sg_off;
@@ -1209,7 +1209,8 @@ static u32 iscsit_do_crypto_hash_sg(
        while (data_length) {
                u32 cur_len = min_t(u32, data_length, (sg->length - page_off));
 
-               crypto_hash_update(hash, sg, cur_len);
+               ahash_request_set_crypt(hash, sg, NULL, cur_len);
+               crypto_ahash_update(hash);
 
                data_length -= cur_len;
                page_off = 0;
@@ -1221,33 +1222,34 @@ static u32 iscsit_do_crypto_hash_sg(
                struct scatterlist pad_sg;
 
                sg_init_one(&pad_sg, pad_bytes, padding);
-               crypto_hash_update(hash, &pad_sg, padding);
+               ahash_request_set_crypt(hash, &pad_sg, (u8 *)&data_crc,
+                                       padding);
+               crypto_ahash_finup(hash);
+       } else {
+               ahash_request_set_crypt(hash, NULL, (u8 *)&data_crc, 0);
+               crypto_ahash_final(hash);
        }
-       crypto_hash_final(hash, (u8 *) &data_crc);
 
        return data_crc;
 }
 
 static void iscsit_do_crypto_hash_buf(
-       struct hash_desc *hash,
+       struct ahash_request *hash,
        const void *buf,
        u32 payload_length,
        u32 padding,
        u8 *pad_bytes,
        u8 *data_crc)
 {
-       struct scatterlist sg;
+       struct scatterlist sg[2];
 
-       crypto_hash_init(hash);
+       sg_init_table(sg, ARRAY_SIZE(sg));
+       sg_set_buf(sg, buf, payload_length);
+       sg_set_buf(sg + 1, pad_bytes, padding);
 
-       sg_init_one(&sg, buf, payload_length);
-       crypto_hash_update(hash, &sg, payload_length);
+       ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
 
-       if (padding) {
-               sg_init_one(&sg, pad_bytes, padding);
-               crypto_hash_update(hash, &sg, padding);
-       }
-       crypto_hash_final(hash, data_crc);
+       crypto_ahash_digest(hash);
 }
 
 int
@@ -1422,7 +1424,7 @@ iscsit_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        if (conn->conn_ops->DataDigest) {
                u32 data_crc;
 
-               data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+               data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
                                                    be32_to_cpu(hdr->offset),
                                                    payload_length, padding,
                                                    cmd->pad_bytes);
@@ -1682,7 +1684,7 @@ static int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                }
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
                                        ping_data, payload_length,
                                        padding, cmd->pad_bytes,
                                        (u8 *)&data_crc);
@@ -2101,7 +2103,7 @@ iscsit_handle_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                        goto reject;
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
                                        text_in, payload_length,
                                        padding, (u8 *)&pad_bytes,
                                        (u8 *)&data_crc);
@@ -2440,7 +2442,7 @@ static int iscsit_handle_immediate_data(
        if (conn->conn_ops->DataDigest) {
                u32 data_crc;
 
-               data_crc = iscsit_do_crypto_hash_sg(&conn->conn_rx_hash, cmd,
+               data_crc = iscsit_do_crypto_hash_sg(conn->conn_rx_hash, cmd,
                                                    cmd->write_data_done, length, padding,
                                                    cmd->pad_bytes);
 
@@ -2553,7 +2555,7 @@ static int iscsit_send_conn_drop_async_message(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->tx_size += ISCSI_CRC_LEN;
@@ -2683,7 +2685,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2711,7 +2713,7 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
                                cmd->padding);
        }
        if (conn->conn_ops->DataDigest) {
-               cmd->data_crc = iscsit_do_crypto_hash_sg(&conn->conn_tx_hash, cmd,
+               cmd->data_crc = iscsit_do_crypto_hash_sg(conn->conn_tx_hash, cmd,
                         datain.offset, datain.length, cmd->padding, cmd->pad_bytes);
 
                iov[iov_count].iov_base = &cmd->data_crc;
@@ -2857,7 +2859,7 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, &cmd->pdu[0],
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2915,7 +2917,7 @@ static int iscsit_send_unsolicited_nopin(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                tx_size += ISCSI_CRC_LEN;
@@ -2963,7 +2965,7 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -2993,7 +2995,7 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
                                " padding bytes.\n", padding);
                }
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
                                cmd->buf_ptr, cmd->buf_ptr_size,
                                padding, (u8 *)&cmd->pad_bytes,
                                (u8 *)&cmd->data_crc);
@@ -3049,7 +3051,7 @@ static int iscsit_send_r2t(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
@@ -3239,7 +3241,7 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
                }
 
                if (conn->conn_ops->DataDigest) {
-                       iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
                                cmd->sense_buffer,
                                (cmd->se_cmd.scsi_sense_length + padding),
                                0, NULL, (u8 *)&cmd->data_crc);
@@ -3262,7 +3264,7 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3332,7 +3334,7 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
@@ -3601,7 +3603,7 @@ static int iscsit_send_text_rsp(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3611,7 +3613,7 @@ static int iscsit_send_text_rsp(
        }
 
        if (conn->conn_ops->DataDigest) {
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
                                cmd->buf_ptr, text_length,
                                0, NULL, (u8 *)&cmd->data_crc);
 
@@ -3668,7 +3670,7 @@ static int iscsit_send_reject(
        if (conn->conn_ops->HeaderDigest) {
                u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
 
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
 
                iov[0].iov_len += ISCSI_CRC_LEN;
@@ -3678,7 +3680,7 @@ static int iscsit_send_reject(
        }
 
        if (conn->conn_ops->DataDigest) {
-               iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
+               iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->buf_ptr,
                                ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
 
                iov[iov_count].iov_base = &cmd->data_crc;
@@ -4145,7 +4147,7 @@ int iscsi_target_rx_thread(void *arg)
                                goto transport_err;
                        }
 
-                       iscsit_do_crypto_hash_buf(&conn->conn_rx_hash,
+                       iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
                                        buffer, ISCSI_HDR_LEN,
                                        0, NULL, (u8 *)&checksum);
 
@@ -4359,10 +4361,14 @@ int iscsit_close_connection(
         */
        iscsit_check_conn_usage_count(conn);
 
-       if (conn->conn_rx_hash.tfm)
-               crypto_free_hash(conn->conn_rx_hash.tfm);
-       if (conn->conn_tx_hash.tfm)
-               crypto_free_hash(conn->conn_tx_hash.tfm);
+       ahash_request_free(conn->conn_tx_hash);
+       if (conn->conn_rx_hash) {
+               struct crypto_ahash *tfm;
+
+               tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
+               ahash_request_free(conn->conn_rx_hash);
+               crypto_free_ahash(tfm);
+       }
 
        free_cpumask_var(conn->conn_cpumask);
 
index 47e249dccb5fe7d9652bea77bddc00b35dd98429..667406fcf4d3daf03acf84b24be9d6538c95f20e 100644 (file)
@@ -16,9 +16,9 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <crypto/hash.h>
 #include <linux/kernel.h>
 #include <linux/string.h>
-#include <linux/crypto.h>
 #include <linux/err.h>
 #include <linux/scatterlist.h>
 
@@ -185,9 +185,8 @@ static int chap_server_compute_md5(
        unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
        size_t compare_len;
        struct iscsi_chap *chap = conn->auth_protocol;
-       struct crypto_hash *tfm;
-       struct hash_desc desc;
-       struct scatterlist sg;
+       struct crypto_shash *tfm = NULL;
+       struct shash_desc *desc = NULL;
        int auth_ret = -1, ret, challenge_len;
 
        memset(identifier, 0, 10);
@@ -245,52 +244,47 @@ static int chap_server_compute_md5(
        pr_debug("[server] Got CHAP_R=%s\n", chap_r);
        chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
 
-       tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_shash("md5", 0, 0);
        if (IS_ERR(tfm)) {
-               pr_err("Unable to allocate struct crypto_hash\n");
+               tfm = NULL;
+               pr_err("Unable to allocate struct crypto_shash\n");
                goto out;
        }
-       desc.tfm = tfm;
-       desc.flags = 0;
 
-       ret = crypto_hash_init(&desc);
-       if (ret < 0) {
-               pr_err("crypto_hash_init() failed\n");
-               crypto_free_hash(tfm);
+       desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL);
+       if (!desc) {
+               pr_err("Unable to allocate struct shash_desc\n");
                goto out;
        }
 
-       sg_init_one(&sg, &chap->id, 1);
-       ret = crypto_hash_update(&desc, &sg, 1);
+       desc->tfm = tfm;
+       desc->flags = 0;
+
+       ret = crypto_shash_init(desc);
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for id\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_init() failed\n");
                goto out;
        }
 
-       sg_init_one(&sg, &auth->password, strlen(auth->password));
-       ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
+       ret = crypto_shash_update(desc, &chap->id, 1);
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for password\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_update() failed for id\n");
                goto out;
        }
 
-       sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
-       ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
+       ret = crypto_shash_update(desc, (char *)&auth->password,
+                                 strlen(auth->password));
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for challenge\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_update() failed for password\n");
                goto out;
        }
 
-       ret = crypto_hash_final(&desc, server_digest);
+       ret = crypto_shash_finup(desc, chap->challenge,
+                                CHAP_CHALLENGE_LENGTH, server_digest);
        if (ret < 0) {
-               pr_err("crypto_hash_final() failed for server digest\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_finup() failed for challenge\n");
                goto out;
        }
-       crypto_free_hash(tfm);
 
        chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
        pr_debug("[server] MD5 Server Digest: %s\n", response);
@@ -306,9 +300,8 @@ static int chap_server_compute_md5(
         * authentication is not enabled.
         */
        if (!auth->authenticate_target) {
-               kfree(challenge);
-               kfree(challenge_binhex);
-               return 0;
+               auth_ret = 0;
+               goto out;
        }
        /*
         * Get CHAP_I.
@@ -372,58 +365,37 @@ static int chap_server_compute_md5(
        /*
         * Generate CHAP_N and CHAP_R for mutual authentication.
         */
-       tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(tfm)) {
-               pr_err("Unable to allocate struct crypto_hash\n");
-               goto out;
-       }
-       desc.tfm = tfm;
-       desc.flags = 0;
-
-       ret = crypto_hash_init(&desc);
+       ret = crypto_shash_init(desc);
        if (ret < 0) {
-               pr_err("crypto_hash_init() failed\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_init() failed\n");
                goto out;
        }
 
        /* To handle both endiannesses */
        id_as_uchar = id;
-       sg_init_one(&sg, &id_as_uchar, 1);
-       ret = crypto_hash_update(&desc, &sg, 1);
+       ret = crypto_shash_update(desc, &id_as_uchar, 1);
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for id\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_update() failed for id\n");
                goto out;
        }
 
-       sg_init_one(&sg, auth->password_mutual,
-                               strlen(auth->password_mutual));
-       ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
+       ret = crypto_shash_update(desc, auth->password_mutual,
+                                 strlen(auth->password_mutual));
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for"
+               pr_err("crypto_shash_update() failed for"
                                " password_mutual\n");
-               crypto_free_hash(tfm);
                goto out;
        }
        /*
         * Convert received challenge to binary hex.
         */
-       sg_init_one(&sg, challenge_binhex, challenge_len);
-       ret = crypto_hash_update(&desc, &sg, challenge_len);
+       ret = crypto_shash_finup(desc, challenge_binhex, challenge_len,
+                                digest);
        if (ret < 0) {
-               pr_err("crypto_hash_update() failed for ma challenge\n");
-               crypto_free_hash(tfm);
+               pr_err("crypto_shash_finup() failed for ma challenge\n");
                goto out;
        }
 
-       ret = crypto_hash_final(&desc, digest);
-       if (ret < 0) {
-               pr_err("crypto_hash_final() failed for ma digest\n");
-               crypto_free_hash(tfm);
-               goto out;
-       }
-       crypto_free_hash(tfm);
        /*
         * Generate CHAP_N and CHAP_R.
         */
@@ -440,6 +412,8 @@ static int chap_server_compute_md5(
        pr_debug("[server] Sending CHAP_R=0x%s\n", response);
        auth_ret = 0;
 out:
+       kzfree(desc);
+       crypto_free_shash(tfm);
        kfree(challenge);
        kfree(challenge_binhex);
        return auth_ret;
index 96e78c823d13fa2f78feb6ff024fb468518be75b..8436d56c5f0c377e07caa7e0b1766051c11cf929 100644 (file)
@@ -16,9 +16,9 @@
  * GNU General Public License for more details.
  ******************************************************************************/
 
+#include <crypto/hash.h>
 #include <linux/string.h>
 #include <linux/kthread.h>
-#include <linux/crypto.h>
 #include <linux/idr.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
@@ -115,27 +115,36 @@ out_login:
  */
 int iscsi_login_setup_crypto(struct iscsi_conn *conn)
 {
+       struct crypto_ahash *tfm;
+
        /*
         * Setup slicing by CRC32C algorithm for RX and TX libcrypto contexts
         * which will default to crc32c_intel.ko for cpu_has_xmm4_2, or fallback
         * to software 1x8 byte slicing from crc32c.ko
         */
-       conn->conn_rx_hash.flags = 0;
-       conn->conn_rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-                                               CRYPTO_ALG_ASYNC);
-       if (IS_ERR(conn->conn_rx_hash.tfm)) {
-               pr_err("crypto_alloc_hash() failed for conn_rx_tfm\n");
+       tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm)) {
+               pr_err("crypto_alloc_ahash() failed\n");
                return -ENOMEM;
        }
 
-       conn->conn_tx_hash.flags = 0;
-       conn->conn_tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
-                                               CRYPTO_ALG_ASYNC);
-       if (IS_ERR(conn->conn_tx_hash.tfm)) {
-               pr_err("crypto_alloc_hash() failed for conn_tx_tfm\n");
-               crypto_free_hash(conn->conn_rx_hash.tfm);
+       conn->conn_rx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!conn->conn_rx_hash) {
+               pr_err("ahash_request_alloc() failed for conn_rx_hash\n");
+               crypto_free_ahash(tfm);
+               return -ENOMEM;
+       }
+       ahash_request_set_callback(conn->conn_rx_hash, 0, NULL, NULL);
+
+       conn->conn_tx_hash = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!conn->conn_tx_hash) {
+               pr_err("ahash_request_alloc() failed for conn_tx_hash\n");
+               ahash_request_free(conn->conn_rx_hash);
+               conn->conn_rx_hash = NULL;
+               crypto_free_ahash(tfm);
                return -ENOMEM;
        }
+       ahash_request_set_callback(conn->conn_tx_hash, 0, NULL, NULL);
 
        return 0;
 }
@@ -1174,10 +1183,14 @@ old_sess_out:
                iscsit_dec_session_usage_count(conn->sess);
        }
 
-       if (!IS_ERR(conn->conn_rx_hash.tfm))
-               crypto_free_hash(conn->conn_rx_hash.tfm);
-       if (!IS_ERR(conn->conn_tx_hash.tfm))
-               crypto_free_hash(conn->conn_tx_hash.tfm);
+       ahash_request_free(conn->conn_tx_hash);
+       if (conn->conn_rx_hash) {
+               struct crypto_ahash *tfm;
+
+               tfm = crypto_ahash_reqtfm(conn->conn_rx_hash);
+               ahash_request_free(conn->conn_rx_hash);
+               crypto_free_ahash(tfm);
+       }
 
        free_cpumask_var(conn->conn_cpumask);
 
index 8cc4ac64a91c36347b9307addb88ae99d545d2b7..7c92c09be21386c3d60ff41b76a6694234c93432 100644 (file)
@@ -195,7 +195,7 @@ config IMX_THERMAL
          passive trip is crossed.
 
 config SPEAR_THERMAL
-       bool "SPEAr thermal sensor driver"
+       tristate "SPEAr thermal sensor driver"
        depends on PLAT_SPEAR || COMPILE_TEST
        depends on OF
        help
@@ -237,8 +237,8 @@ config DOVE_THERMAL
          framework.
 
 config DB8500_THERMAL
-       bool "DB8500 thermal management"
-       depends on ARCH_U8500
+       tristate "DB8500 thermal management"
+       depends on MFD_DB8500_PRCMU
        default y
        help
          Adds DB8500 thermal management implementation according to the thermal
index ccc0ad02d06698108ec486d11f7b0dc69c7c734a..36fa724a36c851d463afa53a67d1a8526efb75dc 100644 (file)
 /* Braswell thermal reporting device */
 #define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
 
+/* Broxton thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BXT0_THERMAL  0x0A8C
+#define PCI_DEVICE_ID_PROC_BXT1_THERMAL  0x1A8C
+#define PCI_DEVICE_ID_PROC_BXTX_THERMAL  0x4A8C
+#define PCI_DEVICE_ID_PROC_BXTP_THERMAL  0x5A8C
+
 struct power_config {
        u32     index;
        u32     min_uw;
@@ -404,6 +410,10 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_SKL_THERMAL)},
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT0_THERMAL)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXT1_THERMAL)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTX_THERMAL)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BXTP_THERMAL)},
        { 0, },
 };
 
index 50c7da79be830a43b342999ca80ef264431e0078..00d81af648b8ed93a9d7c2e09aef8979f7d0606d 100644 (file)
@@ -136,7 +136,7 @@ struct pch_dev_ops {
 
 
 /* dev ops for Wildcat Point */
-static struct pch_dev_ops pch_dev_ops_wpt = {
+static const struct pch_dev_ops pch_dev_ops_wpt = {
        .hw_init = pch_wpt_init,
        .get_temp = pch_wpt_get_temp,
 };
index be4eedcb839ac22158fe180c2abc37df712d9511..9043f8f918529bd600eda1bbffe3bdf91f036ef4 100644 (file)
@@ -475,14 +475,10 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
 
        sensor_np = of_node_get(dev->of_node);
 
-       for_each_child_of_node(np, child) {
+       for_each_available_child_of_node(np, child) {
                struct of_phandle_args sensor_specs;
                int ret, id;
 
-               /* Check whether child is enabled or not */
-               if (!of_device_is_available(child))
-                       continue;
-
                /* For now, thermal framework supports only 1 sensor per zone */
                ret = of_parse_phandle_with_args(child, "thermal-sensors",
                                                 "#thermal-sensor-cells",
@@ -881,16 +877,12 @@ int __init of_parse_thermal_zones(void)
                return 0; /* Run successfully on systems without thermal DT */
        }
 
-       for_each_child_of_node(np, child) {
+       for_each_available_child_of_node(np, child) {
                struct thermal_zone_device *zone;
                struct thermal_zone_params *tzp;
                int i, mask = 0;
                u32 prop;
 
-               /* Check whether child is enabled or not */
-               if (!of_device_is_available(child))
-                       continue;
-
                tz = thermal_of_build_thermal_zone(child);
                if (IS_ERR(tz)) {
                        pr_err("failed to build thermal zone %s: %ld\n",
@@ -968,13 +960,9 @@ void of_thermal_destroy_zones(void)
                return;
        }
 
-       for_each_child_of_node(np, child) {
+       for_each_available_child_of_node(np, child) {
                struct thermal_zone_device *zone;
 
-               /* Check whether child is enabled or not */
-               if (!of_device_is_available(child))
-                       continue;
-
                zone = thermal_zone_get_zone_by_name(child->name);
                if (IS_ERR(zone))
                        continue;
index 13d01edc7a043812611719bf7a342260f09ababa..0e735acea33afc7b8e747857b57046647d50e7d9 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/reboot.h>
@@ -75,11 +76,13 @@ struct rcar_thermal_priv {
 #define rcar_has_irq_support(priv)     ((priv)->common->base)
 #define rcar_id_to_shift(priv)         ((priv)->id * 8)
 
-#ifdef DEBUG
-# define rcar_force_update_temp(priv)  1
-#else
-# define rcar_force_update_temp(priv)  0
-#endif
+#define USE_OF_THERMAL 1
+static const struct of_device_id rcar_thermal_dt_ids[] = {
+       { .compatible = "renesas,rcar-thermal", },
+       { .compatible = "renesas,rcar-gen2-thermal", .data = (void *)USE_OF_THERMAL },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
 
 /*
  *             basic functions
@@ -200,20 +203,46 @@ err_out_unlock:
        return ret;
 }
 
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
+static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
+                                        int *temp)
 {
-       struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+       int tmp;
+       int ret;
 
-       if (!rcar_has_irq_support(priv) || rcar_force_update_temp(priv))
-               rcar_thermal_update_temp(priv);
+       ret = rcar_thermal_update_temp(priv);
+       if (ret < 0)
+               return ret;
 
        mutex_lock(&priv->lock);
-       *temp =  MCELSIUS((priv->ctemp * 5) - 65);
+       tmp =  MCELSIUS((priv->ctemp * 5) - 65);
        mutex_unlock(&priv->lock);
 
+       if ((tmp < MCELSIUS(-45)) || (tmp > MCELSIUS(125))) {
+               struct device *dev = rcar_priv_to_dev(priv);
+
+               dev_err(dev, "it couldn't measure temperature correctly\n");
+               return -EIO;
+       }
+
+       *temp = tmp;
+
        return 0;
 }
 
+static int rcar_thermal_of_get_temp(void *data, int *temp)
+{
+       struct rcar_thermal_priv *priv = data;
+
+       return rcar_thermal_get_current_temp(priv, temp);
+}
+
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
+{
+       struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+
+       return rcar_thermal_get_current_temp(priv, temp);
+}
+
 static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
                                      int trip, enum thermal_trip_type *type)
 {
@@ -270,6 +299,10 @@ static int rcar_thermal_notify(struct thermal_zone_device *zone,
        return 0;
 }
 
+static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = {
+       .get_temp       = rcar_thermal_of_get_temp,
+};
+
 static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
        .get_temp       = rcar_thermal_get_temp,
        .get_trip_type  = rcar_thermal_get_trip_type,
@@ -288,6 +321,9 @@ static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
        unsigned long flags;
        u32 mask = 0x3 << rcar_id_to_shift(priv); /* enable Rising/Falling */
 
+       if (!rcar_has_irq_support(priv))
+               return;
+
        spin_lock_irqsave(&common->lock, flags);
 
        rcar_thermal_common_bset(common, INTMSK, mask, enable ? 0 : mask);
@@ -299,14 +335,24 @@ static void rcar_thermal_work(struct work_struct *work)
 {
        struct rcar_thermal_priv *priv;
        int cctemp, nctemp;
+       int ret;
 
        priv = container_of(work, struct rcar_thermal_priv, work.work);
 
-       rcar_thermal_get_temp(priv->zone, &cctemp);
-       rcar_thermal_update_temp(priv);
+       ret = rcar_thermal_get_current_temp(priv, &cctemp);
+       if (ret < 0)
+               return;
+
+       ret = rcar_thermal_update_temp(priv);
+       if (ret < 0)
+               return;
+
        rcar_thermal_irq_enable(priv);
 
-       rcar_thermal_get_temp(priv->zone, &nctemp);
+       ret = rcar_thermal_get_current_temp(priv, &nctemp);
+       if (ret < 0)
+               return;
+
        if (nctemp != cctemp)
                thermal_zone_device_update(priv->zone);
 }
@@ -368,8 +414,7 @@ static int rcar_thermal_remove(struct platform_device *pdev)
        struct rcar_thermal_priv *priv;
 
        rcar_thermal_for_each_priv(priv, common) {
-               if (rcar_has_irq_support(priv))
-                       rcar_thermal_irq_disable(priv);
+               rcar_thermal_irq_disable(priv);
                thermal_zone_device_unregister(priv->zone);
        }
 
@@ -385,6 +430,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
        struct rcar_thermal_priv *priv;
        struct device *dev = &pdev->dev;
        struct resource *res, *irq;
+       const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev);
+       unsigned long of_data = (unsigned long)of_id->data;
        int mres = 0;
        int i;
        int ret = -ENODEV;
@@ -441,9 +488,17 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                mutex_init(&priv->lock);
                INIT_LIST_HEAD(&priv->list);
                INIT_DELAYED_WORK(&priv->work, rcar_thermal_work);
-               rcar_thermal_update_temp(priv);
+               ret = rcar_thermal_update_temp(priv);
+               if (ret < 0)
+                       goto error_unregister;
 
-               priv->zone = thermal_zone_device_register("rcar_thermal",
+               if (of_data == USE_OF_THERMAL)
+                       priv->zone = thermal_zone_of_sensor_register(
+                                               dev, i, priv,
+                                               &rcar_thermal_zone_of_ops);
+               else
+                       priv->zone = thermal_zone_device_register(
+                                               "rcar_thermal",
                                                1, 0, priv,
                                                &rcar_thermal_zone_ops, NULL, 0,
                                                idle);
@@ -453,8 +508,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
                        goto error_unregister;
                }
 
-               if (rcar_has_irq_support(priv))
-                       rcar_thermal_irq_enable(priv);
+               rcar_thermal_irq_enable(priv);
 
                list_move_tail(&priv->list, &common->head);
 
@@ -484,12 +538,6 @@ error_unregister:
        return ret;
 }
 
-static const struct of_device_id rcar_thermal_dt_ids[] = {
-       { .compatible = "renesas,rcar-thermal", },
-       {},
-};
-MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
-
 static struct platform_driver rcar_thermal_driver = {
        .driver = {
                .name   = "rcar_thermal",
index e845841ab036cc82033d0a6829058a0f15f2543e..b58e3fb9b31186921e51edd0fa70bfe0e8c88059 100644 (file)
@@ -38,7 +38,7 @@ enum tshut_mode {
 };
 
 /**
- * the system Temperature Sensors tshut(tshut) polarity
+ * The system Temperature Sensors tshut(tshut) polarity
  * the bit 8 is tshut polarity.
  * 0: low active, 1: high active
  */
@@ -57,10 +57,10 @@ enum sensor_id {
 };
 
 /**
-* The conversion table has the adc value and temperature.
-* ADC_DECREMENT is the adc value decremnet.(e.g. v2_code_table)
-* ADC_INCREMNET is the adc value incremnet.(e.g. v3_code_table)
-*/
+ * The conversion table has the adc value and temperature.
+ * ADC_DECREMENT: the adc value is of diminishing.(e.g. v2_code_table)
+ * ADC_INCREMENT: the adc value is incremental.(e.g. v3_code_table)
+ */
 enum adc_sort_mode {
        ADC_DECREMENT = 0,
        ADC_INCREMENT,
@@ -72,16 +72,17 @@ enum adc_sort_mode {
  */
 #define SOC_MAX_SENSORS        2
 
+/**
+ * struct chip_tsadc_table: hold information about chip-specific differences
+ * @id: conversion table
+ * @length: size of conversion table
+ * @data_mask: mask to apply on data inputs
+ * @mode: sort mode of this adc variant (incrementing or decrementing)
+ */
 struct chip_tsadc_table {
        const struct tsadc_table *id;
-
-       /* the array table size*/
        unsigned int length;
-
-       /* that analogic mask data */
        u32 data_mask;
-
-       /* the sort mode is adc value that increment or decrement in table */
        enum adc_sort_mode mode;
 };
 
@@ -153,6 +154,7 @@ struct rockchip_thermal_data {
 #define TSADCV2_SHUT_2GPIO_SRC_EN(chn)         BIT(4 + (chn))
 #define TSADCV2_SHUT_2CRU_SRC_EN(chn)          BIT(8 + (chn))
 
+#define TSADCV1_INT_PD_CLEAR_MASK              ~BIT(16)
 #define TSADCV2_INT_PD_CLEAR_MASK              ~BIT(8)
 
 #define TSADCV2_DATA_MASK                      0xfff
@@ -168,6 +170,51 @@ struct tsadc_table {
        int temp;
 };
 
+/**
+ * Note:
+ * Code to Temperature mapping of the Temperature sensor is a piece wise linear
+ * curve.Any temperature, code faling between to 2 give temperatures can be
+ * linearly interpolated.
+ * Code to Temperature mapping should be updated based on sillcon results.
+ */
+static const struct tsadc_table v1_code_table[] = {
+       {TSADCV3_DATA_MASK, -40000},
+       {436, -40000},
+       {431, -35000},
+       {426, -30000},
+       {421, -25000},
+       {416, -20000},
+       {411, -15000},
+       {406, -10000},
+       {401, -5000},
+       {395, 0},
+       {390, 5000},
+       {385, 10000},
+       {380, 15000},
+       {375, 20000},
+       {370, 25000},
+       {364, 30000},
+       {359, 35000},
+       {354, 40000},
+       {349, 45000},
+       {343, 50000},
+       {338, 55000},
+       {333, 60000},
+       {328, 65000},
+       {322, 70000},
+       {317, 75000},
+       {312, 80000},
+       {307, 85000},
+       {301, 90000},
+       {296, 95000},
+       {291, 100000},
+       {286, 105000},
+       {280, 110000},
+       {275, 115000},
+       {270, 120000},
+       {264, 125000},
+};
+
 static const struct tsadc_table v2_code_table[] = {
        {TSADCV2_DATA_MASK, -40000},
        {3800, -40000},
@@ -245,6 +292,44 @@ static const struct tsadc_table v3_code_table[] = {
        {TSADCV3_DATA_MASK, 125000},
 };
 
+static const struct tsadc_table v4_code_table[] = {
+       {TSADCV3_DATA_MASK, -40000},
+       {431, -40000},
+       {426, -35000},
+       {421, -30000},
+       {415, -25000},
+       {410, -20000},
+       {405, -15000},
+       {399, -10000},
+       {394, -5000},
+       {389, 0},
+       {383, 5000},
+       {378, 10000},
+       {373, 15000},
+       {367, 20000},
+       {362, 25000},
+       {357, 30000},
+       {351, 35000},
+       {346, 40000},
+       {340, 45000},
+       {335, 50000},
+       {330, 55000},
+       {324, 60000},
+       {319, 65000},
+       {313, 70000},
+       {308, 75000},
+       {302, 80000},
+       {297, 85000},
+       {291, 90000},
+       {286, 95000},
+       {281, 100000},
+       {275, 105000},
+       {270, 110000},
+       {264, 115000},
+       {259, 120000},
+       {253, 125000},
+};
+
 static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
                                   int temp)
 {
@@ -368,6 +453,14 @@ static void rk_tsadcv2_initialize(void __iomem *regs,
                       regs + TSADCV2_HIGHT_TSHUT_DEBOUNCE);
 }
 
+static void rk_tsadcv1_irq_ack(void __iomem *regs)
+{
+       u32 val;
+
+       val = readl_relaxed(regs + TSADCV2_INT_PD);
+       writel_relaxed(val & TSADCV1_INT_PD_CLEAR_MASK, regs + TSADCV2_INT_PD);
+}
+
 static void rk_tsadcv2_irq_ack(void __iomem *regs)
 {
        u32 val;
@@ -429,6 +522,29 @@ static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
        writel_relaxed(val, regs + TSADCV2_INT_EN);
 }
 
+static const struct rockchip_tsadc_chip rk3228_tsadc_data = {
+       .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+       .chn_num = 1, /* one channel for tsadc */
+
+       .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+       .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+       .tshut_temp = 95000,
+
+       .initialize = rk_tsadcv2_initialize,
+       .irq_ack = rk_tsadcv1_irq_ack,
+       .control = rk_tsadcv2_control,
+       .get_temp = rk_tsadcv2_get_temp,
+       .set_tshut_temp = rk_tsadcv2_tshut_temp,
+       .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+       .table = {
+               .id = v1_code_table,
+               .length = ARRAY_SIZE(v1_code_table),
+               .data_mask = TSADCV3_DATA_MASK,
+               .mode = ADC_DECREMENT,
+       },
+};
+
 static const struct rockchip_tsadc_chip rk3288_tsadc_data = {
        .chn_id[SENSOR_CPU] = 1, /* cpu sensor is channel 1 */
        .chn_id[SENSOR_GPU] = 2, /* gpu sensor is channel 2 */
@@ -477,7 +593,35 @@ static const struct rockchip_tsadc_chip rk3368_tsadc_data = {
        },
 };
 
+static const struct rockchip_tsadc_chip rk3399_tsadc_data = {
+       .chn_id[SENSOR_CPU] = 0, /* cpu sensor is channel 0 */
+       .chn_id[SENSOR_GPU] = 1, /* gpu sensor is channel 1 */
+       .chn_num = 2, /* two channels for tsadc */
+
+       .tshut_mode = TSHUT_MODE_GPIO, /* default TSHUT via GPIO give PMIC */
+       .tshut_polarity = TSHUT_LOW_ACTIVE, /* default TSHUT LOW ACTIVE */
+       .tshut_temp = 95000,
+
+       .initialize = rk_tsadcv2_initialize,
+       .irq_ack = rk_tsadcv1_irq_ack,
+       .control = rk_tsadcv2_control,
+       .get_temp = rk_tsadcv2_get_temp,
+       .set_tshut_temp = rk_tsadcv2_tshut_temp,
+       .set_tshut_mode = rk_tsadcv2_tshut_mode,
+
+       .table = {
+               .id = v4_code_table,
+               .length = ARRAY_SIZE(v4_code_table),
+               .data_mask = TSADCV3_DATA_MASK,
+               .mode = ADC_DECREMENT,
+       },
+};
+
 static const struct of_device_id of_rockchip_thermal_match[] = {
+       {
+               .compatible = "rockchip,rk3228-tsadc",
+               .data = (void *)&rk3228_tsadc_data,
+       },
        {
                .compatible = "rockchip,rk3288-tsadc",
                .data = (void *)&rk3288_tsadc_data,
@@ -486,6 +630,10 @@ static const struct of_device_id of_rockchip_thermal_match[] = {
                .compatible = "rockchip,rk3368-tsadc",
                .data = (void *)&rk3368_tsadc_data,
        },
+       {
+               .compatible = "rockchip,rk3399-tsadc",
+               .data = (void *)&rk3399_tsadc_data,
+       },
        { /* end */ },
 };
 MODULE_DEVICE_TABLE(of, of_rockchip_thermal_match);
@@ -617,7 +765,7 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
        return 0;
 }
 
-/*
+/**
  * Reset TSADC Controller, reset all tsadc registers.
  */
 static void rockchip_thermal_reset_controller(struct reset_control *reset)
index 534dd913666283fa13eecfaeb8823ea3d55ac0c8..81b35aace9de0439c4dd76c4c4c0d91597d3b3f3 100644 (file)
@@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = {
        .get_temp = thermal_get_temp,
 };
 
-#ifdef CONFIG_PM
-static int spear_thermal_suspend(struct device *dev)
+static int __maybe_unused spear_thermal_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev)
        return 0;
 }
 
-static int spear_thermal_resume(struct device *dev)
+static int __maybe_unused spear_thermal_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev)
 
        return 0;
 }
-#endif
 
 static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
                spear_thermal_resume);
index 2f9f7086ac3dd0d7a3a71015593fffacf8ee586e..ea9366ad3e6bb285e52e368691a0d495cbb3429f 100644 (file)
@@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
        next_target = instance->target;
        dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
 
+       if (!instance->initialized) {
+               if (throttle) {
+                       next_target = (cur_state + 1) >= instance->upper ?
+                                       instance->upper :
+                                       ((cur_state + 1) < instance->lower ?
+                                       instance->lower : (cur_state + 1));
+               } else {
+                       next_target = THERMAL_NO_TARGET;
+               }
+
+               return next_target;
+       }
+
        switch (trend) {
        case THERMAL_TREND_RAISING:
                if (throttle) {
@@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
                dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
                                        old_target, (int)instance->target);
 
-               if (old_target == instance->target)
+               if (instance->initialized && old_target == instance->target)
                        continue;
 
                /* Activate a passive thermal instance */
@@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
                        instance->target == THERMAL_NO_TARGET)
                        update_passive_instance(tz, trip_type, -1);
 
-
+               instance->initialized = true;
                instance->cdev->updated = false; /* cdev needs update */
        }
 
index d9e525cc9c1ce24bcd9d55dd59730f375b15d552..a0a8fd1235e2a21cbe5553c440b26eafecf8042e 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/of.h>
 #include <net/netlink.h>
 #include <net/genetlink.h>
+#include <linux/suspend.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/thermal.h>
@@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
 static DEFINE_MUTEX(thermal_list_lock);
 static DEFINE_MUTEX(thermal_governor_lock);
 
+static atomic_t in_suspend;
+
 static struct thermal_governor *def_governor;
 
 static struct thermal_governor *__find_governor(const char *name)
@@ -532,14 +535,31 @@ static void update_temperature(struct thermal_zone_device *tz)
        mutex_unlock(&tz->lock);
 
        trace_thermal_temperature(tz);
-       dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
-                               tz->last_temperature, tz->temperature);
+       if (tz->last_temperature == THERMAL_TEMP_INVALID)
+               dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
+                       tz->temperature);
+       else
+               dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
+                       tz->last_temperature, tz->temperature);
+}
+
+static void thermal_zone_device_reset(struct thermal_zone_device *tz)
+{
+       struct thermal_instance *pos;
+
+       tz->temperature = THERMAL_TEMP_INVALID;
+       tz->passive = 0;
+       list_for_each_entry(pos, &tz->thermal_instances, tz_node)
+               pos->initialized = false;
 }
 
 void thermal_zone_device_update(struct thermal_zone_device *tz)
 {
        int count;
 
+       if (atomic_read(&in_suspend))
+               return;
+
        if (!tz->ops->get_temp)
                return;
 
@@ -676,8 +696,12 @@ trip_point_temp_store(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        ret = tz->ops->set_trip_temp(tz, trip, temperature);
+       if (ret)
+               return ret;
 
-       return ret ? ret : count;
+       thermal_zone_device_update(tz);
+
+       return count;
 }
 
 static ssize_t
@@ -1321,6 +1345,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
        if (!result) {
                list_add_tail(&dev->tz_node, &tz->thermal_instances);
                list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
+               atomic_set(&tz->need_update, 1);
        }
        mutex_unlock(&cdev->lock);
        mutex_unlock(&tz->lock);
@@ -1430,6 +1455,7 @@ __thermal_cooling_device_register(struct device_node *np,
                                  const struct thermal_cooling_device_ops *ops)
 {
        struct thermal_cooling_device *cdev;
+       struct thermal_zone_device *pos = NULL;
        int result;
 
        if (type && strlen(type) >= THERMAL_NAME_LENGTH)
@@ -1474,6 +1500,12 @@ __thermal_cooling_device_register(struct device_node *np,
        /* Update binding information for 'this' new cdev */
        bind_cdev(cdev);
 
+       mutex_lock(&thermal_list_lock);
+       list_for_each_entry(pos, &thermal_tz_list, node)
+               if (atomic_cmpxchg(&pos->need_update, 1, 0))
+                       thermal_zone_device_update(pos);
+       mutex_unlock(&thermal_list_lock);
+
        return cdev;
 }
 
@@ -1806,6 +1838,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
        tz->trips = trips;
        tz->passive_delay = passive_delay;
        tz->polling_delay = polling_delay;
+       /* A new thermal zone needs to be updated anyway. */
+       atomic_set(&tz->need_update, 1);
 
        dev_set_name(&tz->device, "thermal_zone%d", tz->id);
        result = device_register(&tz->device);
@@ -1900,7 +1934,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
 
        INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
 
-       thermal_zone_device_update(tz);
+       thermal_zone_device_reset(tz);
+       /* Update the new thermal zone and mark it as already updated. */
+       if (atomic_cmpxchg(&tz->need_update, 1, 0))
+               thermal_zone_device_update(tz);
 
        return tz;
 
@@ -2140,6 +2177,36 @@ static void thermal_unregister_governors(void)
        thermal_gov_power_allocator_unregister();
 }
 
+static int thermal_pm_notify(struct notifier_block *nb,
+                               unsigned long mode, void *_unused)
+{
+       struct thermal_zone_device *tz;
+
+       switch (mode) {
+       case PM_HIBERNATION_PREPARE:
+       case PM_RESTORE_PREPARE:
+       case PM_SUSPEND_PREPARE:
+               atomic_set(&in_suspend, 1);
+               break;
+       case PM_POST_HIBERNATION:
+       case PM_POST_RESTORE:
+       case PM_POST_SUSPEND:
+               atomic_set(&in_suspend, 0);
+               list_for_each_entry(tz, &thermal_tz_list, node) {
+                       thermal_zone_device_reset(tz);
+                       thermal_zone_device_update(tz);
+               }
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static struct notifier_block thermal_pm_nb = {
+       .notifier_call = thermal_pm_notify,
+};
+
 static int __init thermal_init(void)
 {
        int result;
@@ -2160,6 +2227,11 @@ static int __init thermal_init(void)
        if (result)
                goto exit_netlink;
 
+       result = register_pm_notifier(&thermal_pm_nb);
+       if (result)
+               pr_warn("Thermal: Can not register suspend notifier, return %d\n",
+                       result);
+
        return 0;
 
 exit_netlink:
@@ -2179,6 +2251,7 @@ error:
 
 static void __exit thermal_exit(void)
 {
+       unregister_pm_notifier(&thermal_pm_nb);
        of_thermal_destroy_zones();
        genetlink_exit();
        class_unregister(&thermal_class);
index d7ac1fccd6598a80453dc51da655e37b30b85ac7..749d41abfbabecfc8fc33f54f4bc2e5d680450f8 100644 (file)
@@ -41,6 +41,7 @@ struct thermal_instance {
        struct thermal_zone_device *tz;
        struct thermal_cooling_device *cdev;
        int trip;
+       bool initialized;
        unsigned long upper;    /* Highest cooling state for this trip point */
        unsigned long lower;    /* Lowest cooling state for this trip point */
        unsigned long target;   /* expected cooling state */
index 799634b382c6f0a131fb9eadd31dc89984f16830..3f91132d5b665375645d6bed034c01e3699206e6 100644 (file)
@@ -41,7 +41,7 @@ struct tb_ctl {
 
 
 #define tb_ctl_WARN(ctl, format, arg...) \
-       dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
+       dev_WARN(&(ctl)->nhi->pdev->dev, true, format, ## arg)
 
 #define tb_ctl_err(ctl, format, arg...) \
        dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
index 20a41f7de76f687d39329c2f4ea58ae30dc8fc6d..3f8d6cecbceaea95cd1445461a5b5dcb0ca43738 100644 (file)
@@ -51,11 +51,10 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
                 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
                 active ? "enabling" : "disabling", reg, bit, old, new);
 
-       if (new == old)
-               dev_WARN(&ring->nhi->pdev->dev,
-                                        "interrupt for %s %d is already %s\n",
-                                        RING_TYPE(ring), ring->hop,
-                                        active ? "enabled" : "disabled");
+       dev_WARN(&ring->nhi->pdev->dev, new == old,
+                       "interrupt for %s %d is already %s\n",
+                       RING_TYPE(ring), ring->hop,
+                       active ? "enabled" : "disabled");
        iowrite32(new, ring->nhi->iobase + reg);
 }
 
@@ -189,11 +188,10 @@ static void ring_work(struct work_struct *work)
                        frame->eof = ring->descriptors[ring->tail].eof;
                        frame->sof = ring->descriptors[ring->tail].sof;
                        frame->flags = ring->descriptors[ring->tail].flags;
-                       if (frame->sof != 0)
-                               dev_WARN(&ring->nhi->pdev->dev,
-                                        "%s %d got unexpected SOF: %#x\n",
-                                        RING_TYPE(ring), ring->hop,
-                                        frame->sof);
+                       dev_WARN(&ring->nhi->pdev->dev, frame->sof != 0,
+                                       "%s %d got unexpected SOF: %#x\n",
+                                       RING_TYPE(ring), ring->hop,
+                                       frame->sof);
                        /*
                         * known flags:
                         * raw not enabled, interupt not set: 0x2=0010
@@ -201,11 +199,10 @@ static void ring_work(struct work_struct *work)
                         * raw not enabled: 0xb=1011
                         * partial frame (>MAX_FRAME_SIZE): 0xe=1110
                         */
-                       if (frame->flags != 0xa)
-                               dev_WARN(&ring->nhi->pdev->dev,
-                                        "%s %d got unexpected flags: %#x\n",
-                                        RING_TYPE(ring), ring->hop,
-                                        frame->flags);
+                       dev_WARN(&ring->nhi->pdev->dev, frame->flags != 0xa,
+                                       "%s %d got unexpected flags: %#x\n",
+                                       RING_TYPE(ring), ring->hop,
+                                       frame->flags);
                }
                ring->tail = (ring->tail + 1) % ring->size;
        }
@@ -246,17 +243,17 @@ static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
                 transmit ? "TX" : "RX", hop, size);
 
        mutex_lock(&nhi->lock);
-       if (hop >= nhi->hop_count) {
-               dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
+       if (dev_WARN(&nhi->pdev->dev, hop >= nhi->hop_count,
+                                       "invalid hop: %d\n", hop))
                goto err;
-       }
-       if (transmit && nhi->tx_rings[hop]) {
-               dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
+
+       if (dev_WARN(&nhi->pdev->dev, transmit && nhi->tx_rings[hop],
+                                       "TX hop %d already allocated\n", hop))
                goto err;
-       } else if (!transmit && nhi->rx_rings[hop]) {
-               dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
+       else if (dev_WARN(&nhi->pdev->dev, !transmit && nhi->rx_rings[hop],
+                                       "RX hop %d already allocated\n", hop))
                goto err;
-       }
+
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
        if (!ring)
                goto err;
@@ -313,10 +310,10 @@ void ring_start(struct tb_ring *ring)
 {
        mutex_lock(&ring->nhi->lock);
        mutex_lock(&ring->lock);
-       if (ring->running) {
-               dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
+       if (dev_WARN(&ring->nhi->pdev->dev, ring->running,
+                                       "ring already started\n"))
                goto err;
-       }
+
        dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
                 RING_TYPE(ring), ring->hop);
 
@@ -359,11 +356,11 @@ void ring_stop(struct tb_ring *ring)
        mutex_lock(&ring->lock);
        dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
                 RING_TYPE(ring), ring->hop);
-       if (!ring->running) {
-               dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
-                        RING_TYPE(ring), ring->hop);
+       if (dev_WARN(&ring->nhi->pdev->dev, !ring->running,
+                                       "%s %d already stopped\n",
+                                       RING_TYPE(ring), ring->hop))
                goto err;
-       }
+
        ring_interrupt_active(ring, false);
 
        ring_iowrite32options(ring, 0, 0);
@@ -407,11 +404,8 @@ void ring_free(struct tb_ring *ring)
        else
                ring->nhi->rx_rings[ring->hop] = NULL;
 
-       if (ring->running) {
-               dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
-                        RING_TYPE(ring), ring->hop);
-       }
-
+       dev_WARN(&ring->nhi->pdev->dev, ring->running,
+                       "%s %d still running\n", RING_TYPE(ring), ring->hop);
        dma_free_coherent(&ring->nhi->pdev->dev,
                          ring->size * sizeof(*ring->descriptors),
                          ring->descriptors, ring->descriptors_dma);
@@ -515,12 +509,10 @@ static void nhi_shutdown(struct tb_nhi *nhi)
        dev_info(&nhi->pdev->dev, "shutdown\n");
 
        for (i = 0; i < nhi->hop_count; i++) {
-               if (nhi->tx_rings[i])
-                       dev_WARN(&nhi->pdev->dev,
-                                "TX ring %d is still active\n", i);
-               if (nhi->rx_rings[i])
-                       dev_WARN(&nhi->pdev->dev,
-                                "RX ring %d is still active\n", i);
+               dev_WARN(&nhi->pdev->dev, nhi->tx_rings[i],
+                               "TX ring %d is still active\n", i);
+               dev_WARN(&nhi->pdev->dev, nhi->rx_rings[i],
+                               "RX ring %d is still active\n", i);
        }
        nhi_disable_interrupts(nhi);
        /*
index 9562cd026dc01754144edab74f981313f837b507..39eebaac05f2dba05871d74111ce962a6b026f98 100644 (file)
@@ -53,7 +53,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, int num_hops)
 void tb_path_free(struct tb_path *path)
 {
        if (path->activated) {
-               tb_WARN(path->tb, "trying to free an activated path\n")
+               tb_WARN(path->tb, "trying to free an activated path\n");
                return;
        }
        kfree(path->hops);
index 8b0d7cf2b6d6d68ab6dd30398c838c44aaf04781..5735242912f1c0bf9207934b082441bab6babd4f 100644 (file)
@@ -186,7 +186,7 @@ static inline int tb_port_write(struct tb_port *port, void *buffer,
 }
 
 #define tb_err(tb, fmt, arg...) dev_err(&(tb)->nhi->pdev->dev, fmt, ## arg)
-#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, fmt, ## arg)
+#define tb_WARN(tb, fmt, arg...) dev_WARN(&(tb)->nhi->pdev->dev, true, fmt, ## arg)
 #define tb_warn(tb, fmt, arg...) dev_warn(&(tb)->nhi->pdev->dev, fmt, ## arg)
 #define tb_info(tb, fmt, arg...) dev_info(&(tb)->nhi->pdev->dev, fmt, ## arg)
 
index c01f4509587779346e20cace71d192fe0a997c00..82c4d2e45319b2ad27092469cd437d587d327560 100644 (file)
@@ -226,7 +226,7 @@ config CYCLADES
 
 config CYZ_INTR
        bool "Cyclades-Z interrupt mode operation"
-       depends on CYCLADES
+       depends on CYCLADES && PCI
        help
          The Cyclades-Z family of multiport cards allows 2 (two) driver op
          modes: polling and interrupt. In polling mode, the driver will check
index 2caaf5a2516d3c142a1da70f5276c258676a7b59..eacf4c9f3b299b8f7f92a5cc52a5c347fb0bd541 100644 (file)
@@ -639,7 +639,7 @@ static void shutdown(struct tty_struct *tty, struct serial_state *info)
        custom.adkcon = AC_UARTBRK;
        mb();
 
-       if (tty->termios.c_cflag & HUPCL)
+       if (C_HUPCL(tty))
                info->MCR &= ~(SER_DTR|SER_RTS);
        rtsdtr_ctrl(info->MCR);
 
@@ -965,8 +965,7 @@ static void rs_throttle(struct tty_struct * tty)
        struct serial_state *info = tty->driver_data;
        unsigned long flags;
 #ifdef SERIAL_DEBUG_THROTTLE
-       printk("throttle %s: %d....\n", tty_name(tty),
-              tty->ldisc.chars_in_buffer(tty));
+       printk("throttle %s ....\n", tty_name(tty));
 #endif
 
        if (serial_paranoia_check(info, tty->name, "rs_throttle"))
@@ -975,7 +974,7 @@ static void rs_throttle(struct tty_struct * tty)
        if (I_IXOFF(tty))
                rs_send_xchar(tty, STOP_CHAR(tty));
 
-       if (tty->termios.c_cflag & CRTSCTS)
+       if (C_CRTSCTS(tty))
                info->MCR &= ~SER_RTS;
 
        local_irq_save(flags);
@@ -988,8 +987,7 @@ static void rs_unthrottle(struct tty_struct * tty)
        struct serial_state *info = tty->driver_data;
        unsigned long flags;
 #ifdef SERIAL_DEBUG_THROTTLE
-       printk("unthrottle %s: %d....\n", tty_name(tty),
-              tty->ldisc.chars_in_buffer(tty));
+       printk("unthrottle %s ....\n", tty_name(tty));
 #endif
 
        if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
@@ -1001,7 +999,7 @@ static void rs_unthrottle(struct tty_struct * tty)
                else
                        rs_send_xchar(tty, START_CHAR(tty));
        }
-       if (tty->termios.c_cflag & CRTSCTS)
+       if (C_CRTSCTS(tty))
                info->MCR |= SER_RTS;
        local_irq_save(flags);
        rtsdtr_ctrl(info->MCR);
@@ -1334,8 +1332,7 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        change_speed(tty, info, old_termios);
 
        /* Handle transition to B0 status */
-       if ((old_termios->c_cflag & CBAUD) &&
-           !(cflag & CBAUD)) {
+       if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
                info->MCR &= ~(SER_DTR|SER_RTS);
                local_irq_save(flags);
                rtsdtr_ctrl(info->MCR);
@@ -1343,21 +1340,17 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        }
 
        /* Handle transition away from B0 status */
-       if (!(old_termios->c_cflag & CBAUD) &&
-           (cflag & CBAUD)) {
+       if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
                info->MCR |= SER_DTR;
-               if (!(tty->termios.c_cflag & CRTSCTS) || 
-                   !test_bit(TTY_THROTTLED, &tty->flags)) {
+               if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
                        info->MCR |= SER_RTS;
-               }
                local_irq_save(flags);
                rtsdtr_ctrl(info->MCR);
                local_irq_restore(flags);
        }
 
        /* Handle turning off CRTSCTS */
-       if ((old_termios->c_cflag & CRTSCTS) &&
-           !(tty->termios.c_cflag & CRTSCTS)) {
+       if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
                tty->hw_stopped = 0;
                rs_start(tty);
        }
@@ -1369,8 +1362,7 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
         * XXX  It's not clear whether the current behavior is correct
         * or not.  Hence, this may change.....
         */
-       if (!(old_termios->c_cflag & CLOCAL) &&
-           (tty->termios.c_cflag & CLOCAL))
+       if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty))
                wake_up_interruptible(&info->open_wait);
 #endif
 }
index abbed201dc744aac81cfd543eaa485a148802a01..d67e542bab1cee0a43f9b83342e99fe03e83f8d0 100644 (file)
@@ -1440,7 +1440,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
                        info->port.xmit_buf = NULL;
                        free_page((unsigned long)temp);
                }
-               if (tty->termios.c_cflag & HUPCL)
+               if (C_HUPCL(tty))
                        cyy_change_rts_dtr(info, 0, TIOCM_RTS | TIOCM_DTR);
 
                cyy_issue_cmd(info, CyCHAN_CTL | CyDIS_RCVR);
@@ -1469,7 +1469,7 @@ static void cy_shutdown(struct cyclades_port *info, struct tty_struct *tty)
                        free_page((unsigned long)temp);
                }
 
-               if (tty->termios.c_cflag & HUPCL)
+               if (C_HUPCL(tty))
                        tty_port_lower_dtr_rts(&info->port);
 
                set_bit(TTY_IO_ERROR, &tty->flags);
@@ -2795,8 +2795,7 @@ static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
 
        cy_set_line_char(info, tty);
 
-       if ((old_termios->c_cflag & CRTSCTS) &&
-                       !(tty->termios.c_cflag & CRTSCTS)) {
+       if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
                tty->hw_stopped = 0;
                cy_start(tty);
        }
@@ -2807,8 +2806,7 @@ static void cy_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
         * XXX  It's not clear whether the current behavior is correct
         * or not.  Hence, this may change.....
         */
-       if (!(old_termios->c_cflag & CLOCAL) &&
-           (tty->termios.c_cflag & CLOCAL))
+       if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty))
                wake_up_interruptible(&info->port.open_wait);
 #endif
 }                              /* cy_set_termios */
@@ -2852,8 +2850,8 @@ static void cy_throttle(struct tty_struct *tty)
        unsigned long flags;
 
 #ifdef CY_DEBUG_THROTTLE
-       printk(KERN_DEBUG "cyc:throttle %s: %ld...ttyC%d\n", tty_name(tty),
-                       tty->ldisc.chars_in_buffer(tty), info->line);
+       printk(KERN_DEBUG "cyc:throttle %s ...ttyC%d\n", tty_name(tty),
+                        info->line);
 #endif
 
        if (serial_paranoia_check(info, tty->name, "cy_throttle"))
@@ -2868,7 +2866,7 @@ static void cy_throttle(struct tty_struct *tty)
                        info->throttle = 1;
        }
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                if (!cy_is_Z(card)) {
                        spin_lock_irqsave(&card->card_lock, flags);
                        cyy_change_rts_dtr(info, 0, TIOCM_RTS);
@@ -2891,8 +2889,8 @@ static void cy_unthrottle(struct tty_struct *tty)
        unsigned long flags;
 
 #ifdef CY_DEBUG_THROTTLE
-       printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n",
-               tty_name(tty), tty_chars_in_buffer(tty), info->line);
+       printk(KERN_DEBUG "cyc:unthrottle %s ...ttyC%d\n",
+               tty_name(tty), info->line);
 #endif
 
        if (serial_paranoia_check(info, tty->name, "cy_unthrottle"))
@@ -2905,7 +2903,7 @@ static void cy_unthrottle(struct tty_struct *tty)
                        cy_send_xchar(tty, START_CHAR(tty));
        }
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                card = info->card;
                if (!cy_is_Z(card)) {
                        spin_lock_irqsave(&card->card_lock, flags);
index 342b36b9ad35a1d4c77bbfa757d58c8ceaea0c14..7ac9bcdf1e61a551bb8286a48c0afc492e4a95ee 100644 (file)
@@ -23,7 +23,6 @@
  * byte channel used for the console is designated as the default tty.
  */
 
-#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/err.h>
@@ -719,19 +718,6 @@ error:
        return ret;
 }
 
-static int ehv_bc_tty_remove(struct platform_device *pdev)
-{
-       struct ehv_bc_data *bc = dev_get_drvdata(&pdev->dev);
-
-       tty_unregister_device(ehv_bc_driver, bc - bcs);
-
-       tty_port_destroy(&bc->port);
-       irq_dispose_mapping(bc->tx_irq);
-       irq_dispose_mapping(bc->rx_irq);
-
-       return 0;
-}
-
 static const struct of_device_id ehv_bc_tty_of_ids[] = {
        { .compatible = "epapr,hv-byte-channel" },
        {}
@@ -741,15 +727,15 @@ static struct platform_driver ehv_bc_tty_driver = {
        .driver = {
                .name = "ehv-bc",
                .of_match_table = ehv_bc_tty_of_ids,
+               .suppress_bind_attrs = true,
        },
        .probe          = ehv_bc_tty_probe,
-       .remove         = ehv_bc_tty_remove,
 };
 
 /**
  * ehv_bc_init - ePAPR hypervisor byte channel driver initialization
  *
- * This function is called when this module is loaded.
+ * This function is called when this driver is loaded.
  */
 static int __init ehv_bc_init(void)
 {
@@ -814,24 +800,4 @@ error:
 
        return ret;
 }
-
-
-/**
- * ehv_bc_exit - ePAPR hypervisor byte channel driver termination
- *
- * This function is called when this driver is unloaded.
- */
-static void __exit ehv_bc_exit(void)
-{
-       platform_driver_unregister(&ehv_bc_tty_driver);
-       tty_unregister_driver(ehv_bc_driver);
-       put_tty_driver(ehv_bc_driver);
-       kfree(bcs);
-}
-
-module_init(ehv_bc_init);
-module_exit(ehv_bc_exit);
-
-MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
-MODULE_DESCRIPTION("ePAPR hypervisor byte channel driver");
-MODULE_LICENSE("GPL v2");
+device_initcall(ehv_bc_init);
index 0f82c0b146f6d8eb0bdea4a92e4c8a4d72dd3332..752232c77503aaa05cf0b437aba6342b2a5741a0 100644 (file)
@@ -162,7 +162,7 @@ static int goldfish_tty_console_setup(struct console *co, char *options)
        return 0;
 }
 
-static struct tty_port_operations goldfish_port_ops = {
+static const struct tty_port_operations goldfish_port_ops = {
        .activate = goldfish_tty_activate,
        .shutdown = goldfish_tty_shutdown
 };
index f575a9b5ede7b05bd24c87ecf58425e89ec20fdc..b05dc508662795639d391aee29ed568c2e2fe75e 100644 (file)
@@ -41,7 +41,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/console.h>
-#include <linux/module.h>
 
 #include <asm/hvconsole.h>
 #include <asm/vio.h>
@@ -61,7 +60,6 @@ static struct vio_device_id hvc_driver_table[] = {
 #endif
        { "", "" }
 };
-MODULE_DEVICE_TABLE(vio, hvc_driver_table);
 
 typedef enum hv_protocol {
        HV_PROTOCOL_RAW,
@@ -363,26 +361,13 @@ static int hvc_vio_probe(struct vio_dev *vdev,
        return 0;
 }
 
-static int hvc_vio_remove(struct vio_dev *vdev)
-{
-       struct hvc_struct *hp = dev_get_drvdata(&vdev->dev);
-       int rc, termno;
-
-       termno = hp->vtermno;
-       rc = hvc_remove(hp);
-       if (rc == 0) {
-               if (hvterm_privs[termno] != &hvterm_priv0)
-                       kfree(hvterm_privs[termno]);
-               hvterm_privs[termno] = NULL;
-       }
-       return rc;
-}
-
 static struct vio_driver hvc_vio_driver = {
        .id_table       = hvc_driver_table,
        .probe          = hvc_vio_probe,
-       .remove         = hvc_vio_remove,
        .name           = hvc_driver_name,
+       .driver = {
+               .suppress_bind_attrs    = true,
+       },
 };
 
 static int __init hvc_vio_init(void)
@@ -394,13 +379,7 @@ static int __init hvc_vio_init(void)
 
        return rc;
 }
-module_init(hvc_vio_init); /* after drivers/char/hvc_console.c */
-
-static void __exit hvc_vio_exit(void)
-{
-       vio_unregister_driver(&hvc_vio_driver);
-}
-module_exit(hvc_vio_exit);
+device_initcall(hvc_vio_init); /* after drivers/tty/hvc/hvc_console.c */
 
 void __init hvc_vio_init_early(void)
 {
index fa816b7193b6af9f823a74320588cb5f1c7bafde..f417fa1ee47cb7f2b9ed3c53f7887ee4a4263d15 100644 (file)
@@ -162,7 +162,7 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
        return recv;
 }
 
-static struct hv_ops domU_hvc_ops = {
+static const struct hv_ops domU_hvc_ops = {
        .get_chars = domU_read_console,
        .put_chars = domU_write_console,
        .notifier_add = notifier_add_irq,
@@ -188,7 +188,7 @@ static int dom0_write_console(uint32_t vtermno, const char *str, int len)
        return len;
 }
 
-static struct hv_ops dom0_hvc_ops = {
+static const struct hv_ops dom0_hvc_ops = {
        .get_chars = dom0_read_console,
        .put_chars = dom0_write_console,
        .notifier_add = notifier_add_irq,
@@ -323,6 +323,7 @@ void xen_console_resume(void)
        }
 }
 
+#ifdef CONFIG_HVC_XEN_FRONTEND
 static void xencons_disconnect_backend(struct xencons_info *info)
 {
        if (info->irq > 0)
@@ -363,7 +364,6 @@ static int xen_console_remove(struct xencons_info *info)
        return 0;
 }
 
-#ifdef CONFIG_HVC_XEN_FRONTEND
 static int xencons_remove(struct xenbus_device *dev)
 {
        return xen_console_remove(dev_get_drvdata(&dev->dev));
index 99875949bfb77fc1a2bbf628594e55599dd226a0..8bf67630018b8292e5f265f58f217121bbe64df4 100644 (file)
@@ -1204,8 +1204,7 @@ static void isicom_set_termios(struct tty_struct *tty,
        isicom_config_port(tty);
        spin_unlock_irqrestore(&port->card->card_lock, flags);
 
-       if ((old_termios->c_cflag & CRTSCTS) &&
-                       !(tty->termios.c_cflag & CRTSCTS)) {
+       if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
                tty->hw_stopped = 0;
                isicom_start(tty);
        }
index 4c4a236745692ab1ee2e674f5d7dbf0334002dae..2f12bb9f4336d761d0a5fba2f81cd4fb65a2bc45 100644 (file)
@@ -254,6 +254,7 @@ struct mxser_port {
        int xmit_head;
        int xmit_tail;
        int xmit_cnt;
+       int closing;
 
        struct ktermios normal_termios;
 
@@ -1081,6 +1082,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
                return;
        if (tty_port_close_start(port, tty, filp) == 0)
                return;
+       info->closing = 1;
        mutex_lock(&port->mutex);
        mxser_close_port(port);
        mxser_flush_buffer(tty);
@@ -1091,6 +1093,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
        mxser_shutdown_port(port);
        clear_bit(ASYNCB_INITIALIZED, &port->flags);
        mutex_unlock(&port->mutex);
+       info->closing = 0;
        /* Right now the tty_port set is done outside of the close_end helper
           as we don't yet have everyone using refcounts */     
        tty_port_close_end(port, tty);
@@ -1864,7 +1867,7 @@ static void mxser_stoprx(struct tty_struct *tty)
                }
        }
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                info->MCR &= ~UART_MCR_RTS;
                outb(info->MCR, info->ioaddr + UART_MCR);
        }
@@ -1901,7 +1904,7 @@ static void mxser_unthrottle(struct tty_struct *tty)
                }
        }
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                info->MCR |= UART_MCR_RTS;
                outb(info->MCR, info->ioaddr + UART_MCR);
        }
@@ -1949,15 +1952,13 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
        mxser_change_speed(tty, old_termios);
        spin_unlock_irqrestore(&info->slock, flags);
 
-       if ((old_termios->c_cflag & CRTSCTS) &&
-                       !(tty->termios.c_cflag & CRTSCTS)) {
+       if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
                tty->hw_stopped = 0;
                mxser_start(tty);
        }
 
        /* Handle sw stopped */
-       if ((old_termios->c_iflag & IXON) &&
-                       !(tty->termios.c_iflag & IXON)) {
+       if ((old_termios->c_iflag & IXON) && !I_IXON(tty)) {
                tty->stopped = 0;
 
                if (info->board->chip_flag) {
@@ -2255,10 +2256,8 @@ static irqreturn_t mxser_interrupt(int irq, void *dev_id)
                                        break;
                                iir &= MOXA_MUST_IIR_MASK;
                                tty = tty_port_tty_get(&port->port);
-                               if (!tty ||
-                                               (port->port.flags & ASYNC_CLOSING) ||
-                                               !(port->port.flags &
-                                                       ASYNC_INITIALIZED)) {
+                               if (!tty || port->closing ||
+                                   !(port->port.flags & ASYNC_INITIALIZED)) {
                                        status = inb(port->ioaddr + UART_LSR);
                                        outb(0x27, port->ioaddr + UART_FCR);
                                        inb(port->ioaddr + UART_MSR);
@@ -2337,7 +2336,7 @@ static const struct tty_operations mxser_ops = {
        .get_icount = mxser_get_icount,
 };
 
-static struct tty_port_operations mxser_port_ops = {
+static const struct tty_port_operations mxser_port_ops = {
        .carrier_raised = mxser_carrier_raised,
        .dtr_rts = mxser_dtr_rts,
        .activate = mxser_activate,
index c3fe026d3168dda14e7388472db49abf27544e4f..c01620780f5baf503ee6c46eef9714ef9ac2bae4 100644 (file)
@@ -1066,7 +1066,7 @@ static void gsm_process_modem(struct tty_struct *tty, struct gsm_dlci *dlci,
        /* Carrier drop -> hangup */
        if (tty) {
                if ((mlines & TIOCM_CD) == 0 && (dlci->modem_rx & TIOCM_CD))
-                       if (!(tty->termios.c_cflag & CLOCAL))
+                       if (!C_CLOCAL(tty))
                                tty_hangup(tty);
        }
        if (brk & 0x01)
@@ -2303,21 +2303,6 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
        /* If clogged call tty_throttle(tty); */
 }
 
-/**
- *     gsmld_chars_in_buffer   -       report available bytes
- *     @tty: tty device
- *
- *     Report the number of characters buffered to be delivered to user
- *     at this instant in time.
- *
- *     Locking: gsm lock
- */
-
-static ssize_t gsmld_chars_in_buffer(struct tty_struct *tty)
-{
-       return 0;
-}
-
 /**
  *     gsmld_flush_buffer      -       clean input queue
  *     @tty:   terminal device
@@ -2830,7 +2815,6 @@ static struct tty_ldisc_ops tty_ldisc_packet = {
        .open            = gsmld_open,
        .close           = gsmld_close,
        .flush_buffer    = gsmld_flush_buffer,
-       .chars_in_buffer = gsmld_chars_in_buffer,
        .read            = gsmld_read,
        .write           = gsmld_write,
        .ioctl           = gsmld_ioctl,
@@ -3132,7 +3116,7 @@ static void gsmtty_throttle(struct tty_struct *tty)
        struct gsm_dlci *dlci = tty->driver_data;
        if (dlci->state == DLCI_CLOSED)
                return;
-       if (tty->termios.c_cflag & CRTSCTS)
+       if (C_CRTSCTS(tty))
                dlci->modem_tx &= ~TIOCM_DTR;
        dlci->throttled = 1;
        /* Send an MSC with DTR cleared */
@@ -3144,7 +3128,7 @@ static void gsmtty_unthrottle(struct tty_struct *tty)
        struct gsm_dlci *dlci = tty->driver_data;
        if (dlci->state == DLCI_CLOSED)
                return;
-       if (tty->termios.c_cflag & CRTSCTS)
+       if (C_CRTSCTS(tty))
                dlci->modem_tx |= TIOCM_DTR;
        dlci->throttled = 0;
        /* Send an MSC with DTR set */
index bbc4ce66c2c18dd30fb10b80f955a4f4565225c5..bcaba17688f6280fa13da9f3fa733560dc2e6f36 100644 (file)
@@ -159,7 +159,6 @@ struct n_hdlc {
 /*
  * HDLC buffer list manipulation functions
  */
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list);
 static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
                           struct n_hdlc_buf *buf);
 static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
@@ -853,10 +852,10 @@ static struct n_hdlc *n_hdlc_alloc(void)
        if (!n_hdlc)
                return NULL;
 
-       n_hdlc_buf_list_init(&n_hdlc->rx_free_buf_list);
-       n_hdlc_buf_list_init(&n_hdlc->tx_free_buf_list);
-       n_hdlc_buf_list_init(&n_hdlc->rx_buf_list);
-       n_hdlc_buf_list_init(&n_hdlc->tx_buf_list);
+       spin_lock_init(&n_hdlc->rx_free_buf_list.spinlock);
+       spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
+       spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
+       spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
        
        /* allocate free rx buffer list */
        for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
@@ -884,16 +883,6 @@ static struct n_hdlc *n_hdlc_alloc(void)
        
 }      /* end of n_hdlc_alloc() */
 
-/**
- * n_hdlc_buf_list_init - initialize specified HDLC buffer list
- * @list - pointer to buffer list
- */
-static void n_hdlc_buf_list_init(struct n_hdlc_buf_list *list)
-{
-       memset(list, 0, sizeof(*list));
-       spin_lock_init(&list->spinlock);
-}      /* end of n_hdlc_buf_list_init() */
-
 /**
  * n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
  * @list - pointer to buffer list
index d9a5fc28fef4bbfe56850e258b726ca3464fd82b..fb76a7d80e7e55c892254685281f924d7fe1e5f5 100644 (file)
@@ -113,8 +113,6 @@ struct n_tty_data {
        DECLARE_BITMAP(read_flags, N_TTY_BUF_SIZE);
        unsigned char echo_buf[N_TTY_BUF_SIZE];
 
-       int minimum_to_wake;
-
        /* consumer-published */
        size_t read_tail;
        size_t line_start;
@@ -153,15 +151,6 @@ static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i)
        return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
 }
 
-static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
-                              unsigned char __user *ptr)
-{
-       struct n_tty_data *ldata = tty->disc_data;
-
-       tty_audit_add_data(tty, &x, 1, ldata->icanon);
-       return put_user(x, ptr);
-}
-
 static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
                            size_t tail, size_t n)
 {
@@ -171,7 +160,7 @@ static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
        int uncopied;
 
        if (n > size) {
-               tty_audit_add_data(tty, from, size, ldata->icanon);
+               tty_audit_add_data(tty, from, size);
                uncopied = copy_to_user(to, from, size);
                if (uncopied)
                        return uncopied;
@@ -180,7 +169,7 @@ static int tty_copy_to_user(struct tty_struct *tty, void __user *to,
                from = ldata->read_buf;
        }
 
-       tty_audit_add_data(tty, from, n, ldata->icanon);
+       tty_audit_add_data(tty, from, n);
        return copy_to_user(to, from, n);
 }
 
@@ -239,8 +228,8 @@ static ssize_t chars_in_buffer(struct tty_struct *tty)
 
 static void n_tty_write_wakeup(struct tty_struct *tty)
 {
-       if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags))
-               kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
+       clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+       kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
 }
 
 static void n_tty_check_throttle(struct tty_struct *tty)
@@ -269,16 +258,11 @@ static void n_tty_check_throttle(struct tty_struct *tty)
 
 static void n_tty_check_unthrottle(struct tty_struct *tty)
 {
-       if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
-           tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
+       if (tty->driver->type == TTY_DRIVER_TYPE_PTY) {
                if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
                        return;
-               if (!tty->count)
-                       return;
                n_tty_kick_worker(tty);
-               n_tty_write_wakeup(tty->link);
-               if (waitqueue_active(&tty->link->write_wait))
-                       wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
+               tty_wakeup(tty->link);
                return;
        }
 
@@ -295,8 +279,6 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
                tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
                if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
                        break;
-               if (!tty->count)
-                       break;
                n_tty_kick_worker(tty);
                unthrottled = tty_unthrottle_safe(tty);
                if (!unthrottled)
@@ -383,28 +365,6 @@ static void n_tty_flush_buffer(struct tty_struct *tty)
        up_write(&tty->termios_rwsem);
 }
 
-/**
- *     n_tty_chars_in_buffer   -       report available bytes
- *     @tty: tty device
- *
- *     Report the number of characters buffered to be delivered to user
- *     at this instant in time.
- *
- *     Locking: exclusive termios_rwsem
- */
-
-static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
-{
-       ssize_t n;
-
-       WARN_ONCE(1, "%s is deprecated and scheduled for removal.", __func__);
-
-       down_write(&tty->termios_rwsem);
-       n = chars_in_buffer(tty);
-       up_write(&tty->termios_rwsem);
-       return n;
-}
-
 /**
  *     is_utf8_continuation    -       utf8 multibyte check
  *     @c: byte to check
@@ -1564,8 +1524,6 @@ n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
                        flag = *fp++;
                if (likely(flag == TTY_NORMAL))
                        n_tty_receive_char_closing(tty, *cp++);
-               else
-                       n_tty_receive_char_flagged(tty, *cp++, flag);
        }
 }
 
@@ -1667,7 +1625,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
        /* publish read_head to consumer */
        smp_store_release(&ldata->commit_head, ldata->read_head);
 
-       if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) {
+       if (read_cnt(ldata)) {
                kill_fasync(&tty->fasync, SIGIO, POLL_IN);
                wake_up_interruptible_poll(&tty->read_wait, POLLIN);
        }
@@ -1788,12 +1746,6 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
        return n_tty_receive_buf_common(tty, cp, fp, count, 1);
 }
 
-int is_ignored(int sig)
-{
-       return (sigismember(&current->blocked, sig) ||
-               current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
-}
-
 /**
  *     n_tty_set_termios       -       termios data changed
  *     @tty: terminal
@@ -1940,7 +1892,6 @@ static int n_tty_open(struct tty_struct *tty)
        reset_buffer_flags(tty->disc_data);
        ldata->column = 0;
        ldata->canon_column = 0;
-       ldata->minimum_to_wake = 1;
        ldata->num_overrun = 0;
        ldata->no_room = 0;
        ldata->lnext = 0;
@@ -2018,7 +1969,7 @@ static int copy_from_read_buf(struct tty_struct *tty,
                retval = copy_to_user(*b, from, n);
                n -= retval;
                is_eof = n == 1 && *from == EOF_CHAR(tty);
-               tty_audit_add_data(tty, from, n, ldata->icanon);
+               tty_audit_add_data(tty, from, n);
                smp_store_release(&ldata->read_tail, ldata->read_tail + n);
                /* Turn single EOF into zero-length read */
                if (L_EXTPROC(tty) && ldata->icanon && is_eof &&
@@ -2112,7 +2063,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
                        ldata->line_start = ldata->read_tail;
                else
                        ldata->push = 0;
-               tty_audit_push(tty);
+               tty_audit_push();
        }
        return 0;
 }
@@ -2203,14 +2154,9 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
                minimum = MIN_CHAR(tty);
                if (minimum) {
                        time = (HZ / 10) * TIME_CHAR(tty);
-                       if (time)
-                               ldata->minimum_to_wake = 1;
-                       else if (!waitqueue_active(&tty->read_wait) ||
-                                (ldata->minimum_to_wake > minimum))
-                               ldata->minimum_to_wake = minimum;
                } else {
                        timeout = (HZ / 10) * TIME_CHAR(tty);
-                       ldata->minimum_to_wake = minimum = 1;
+                       minimum = 1;
                }
        }
 
@@ -2228,19 +2174,15 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
                        cs = tty->link->ctrl_status;
                        tty->link->ctrl_status = 0;
                        spin_unlock_irq(&tty->link->ctrl_lock);
-                       if (tty_put_user(tty, cs, b++)) {
+                       if (put_user(cs, b)) {
                                retval = -EFAULT;
-                               b--;
                                break;
                        }
+                       b++;
                        nr--;
                        break;
                }
 
-               if (((minimum - (b - buf)) < ldata->minimum_to_wake) &&
-                   ((minimum - (b - buf)) >= 1))
-                       ldata->minimum_to_wake = (minimum - (b - buf));
-
                done = check_other_done(tty);
 
                if (!input_available_p(tty, 0)) {
@@ -2278,11 +2220,11 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
 
                        /* Deal with packet mode. */
                        if (packet && b == buf) {
-                               if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
+                               if (put_user(TIOCPKT_DATA, b)) {
                                        retval = -EFAULT;
-                                       b--;
                                        break;
                                }
+                               b++;
                                nr--;
                        }
 
@@ -2306,9 +2248,6 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
        up_read(&tty->termios_rwsem);
 
        remove_wait_queue(&tty->read_wait, &wait);
-       if (!waitqueue_active(&tty->read_wait))
-               ldata->minimum_to_wake = minimum;
-
        mutex_unlock(&ldata->atomic_read_lock);
 
        if (b - buf)
@@ -2420,7 +2359,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
        }
 break_out:
        remove_wait_queue(&tty->write_wait, &wait);
-       if (b - buf != nr && tty->fasync)
+       if (nr && tty->fasync)
                set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
        up_read(&tty->termios_rwsem);
        return (b - buf) ? b - buf : retval;
@@ -2443,7 +2382,6 @@ break_out:
 static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
                                                        poll_table *wait)
 {
-       struct n_tty_data *ldata = tty->disc_data;
        unsigned int mask = 0;
 
        poll_wait(file, &tty->read_wait, wait);
@@ -2456,12 +2394,6 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
                mask |= POLLPRI | POLLIN | POLLRDNORM;
        if (tty_hung_up_p(file))
                mask |= POLLHUP;
-       if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
-               if (MIN_CHAR(tty) && !TIME_CHAR(tty))
-                       ldata->minimum_to_wake = MIN_CHAR(tty);
-               else
-                       ldata->minimum_to_wake = 1;
-       }
        if (tty->ops->write && !tty_is_writelocked(tty) &&
                        tty_chars_in_buffer(tty) < WAKEUP_CHARS &&
                        tty_write_room(tty) > 0)
@@ -2510,25 +2442,12 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
        }
 }
 
-static void n_tty_fasync(struct tty_struct *tty, int on)
-{
-       struct n_tty_data *ldata = tty->disc_data;
-
-       if (!waitqueue_active(&tty->read_wait)) {
-               if (on)
-                       ldata->minimum_to_wake = 1;
-               else if (!tty->fasync)
-                       ldata->minimum_to_wake = N_TTY_BUF_SIZE;
-       }
-}
-
-struct tty_ldisc_ops tty_ldisc_N_TTY = {
+static struct tty_ldisc_ops n_tty_ops = {
        .magic           = TTY_LDISC_MAGIC,
        .name            = "n_tty",
        .open            = n_tty_open,
        .close           = n_tty_close,
        .flush_buffer    = n_tty_flush_buffer,
-       .chars_in_buffer = n_tty_chars_in_buffer,
        .read            = n_tty_read,
        .write           = n_tty_write,
        .ioctl           = n_tty_ioctl,
@@ -2536,7 +2455,6 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = {
        .poll            = n_tty_poll,
        .receive_buf     = n_tty_receive_buf,
        .write_wakeup    = n_tty_write_wakeup,
-       .fasync          = n_tty_fasync,
        .receive_buf2    = n_tty_receive_buf2,
 };
 
@@ -2544,14 +2462,18 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = {
  *     n_tty_inherit_ops       -       inherit N_TTY methods
  *     @ops: struct tty_ldisc_ops where to save N_TTY methods
  *
- *     Enables a 'subclass' line discipline to 'inherit' N_TTY
- *     methods.
+ *     Enables a 'subclass' line discipline to 'inherit' N_TTY methods.
  */
 
 void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
 {
-       *ops = tty_ldisc_N_TTY;
+       *ops = n_tty_ops;
        ops->owner = NULL;
        ops->refcount = ops->flags = 0;
 }
 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
+
+void __init n_tty_init(void)
+{
+       tty_register_ldisc(N_TTY, &n_tty_ops);
+}
index 80f9de907563fc1ddebb2031d26553625e14c0a6..5cc80b80c82b6cb0a90d6887d188510d60e228f5 100644 (file)
@@ -823,7 +823,7 @@ static int receive_data(enum port_type index, struct nozomi *dc)
        struct tty_struct *tty = tty_port_tty_get(&port->port);
        int i, ret;
 
-       read_mem32((u32 *) &size, addr, 4);
+       size = __le32_to_cpu(readl(addr));
        /*  DBG1( "%d bytes port: %d", size, index); */
 
        if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
index b3110040164ae64fa29e66fae2d7f5bd4d7d139f..e16a49b507efbd57734b0b8f522d0e2a5dfe03ce 100644 (file)
@@ -263,8 +263,7 @@ static void pty_set_termios(struct tty_struct *tty,
 {
        /* See if packet mode change of state. */
        if (tty->link && tty->link->packet) {
-               int extproc = (old_termios->c_lflag & EXTPROC) |
-                               (tty->termios.c_lflag & EXTPROC);
+               int extproc = (old_termios->c_lflag & EXTPROC) | L_EXTPROC(tty);
                int old_flow = ((old_termios->c_iflag & IXON) &&
                                (old_termios->c_cc[VSTOP] == '\023') &&
                                (old_termios->c_cc[VSTART] == '\021'));
@@ -406,13 +405,8 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
        if (legacy) {
                /* We always use new tty termios data so we can do this
                   the easy way .. */
-               retval = tty_init_termios(tty);
-               if (retval)
-                       goto err_deinit_tty;
-
-               retval = tty_init_termios(o_tty);
-               if (retval)
-                       goto err_free_termios;
+               tty_init_termios(tty);
+               tty_init_termios(o_tty);
 
                driver->other->ttys[idx] = o_tty;
                driver->ttys[idx] = tty;
@@ -444,12 +438,7 @@ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty,
        tty->count++;
        o_tty->count++;
        return 0;
-err_free_termios:
-       if (legacy)
-               tty_free_termios(tty);
-err_deinit_tty:
-       deinitialize_tty_struct(o_tty);
-       free_tty_struct(o_tty);
+
 err_put_module:
        module_put(driver->other->owner);
 err:
@@ -666,22 +655,22 @@ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
        return tty;
 }
 
-/* We have no need to install and remove our tty objects as devpts does all
-   the work for us */
-
 static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
 {
        return pty_common_install(driver, tty, false);
 }
 
+/* this is called once with whichever end is closed last */
 static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
 {
-}
+       struct inode *ptmx_inode;
 
-/* this is called once with whichever end is closed last */
-static void pty_unix98_shutdown(struct tty_struct *tty)
-{
-       devpts_kill_index(tty->driver_data, tty->index);
+       if (tty->driver->subtype == PTY_TYPE_MASTER)
+               ptmx_inode = tty->driver_data;
+       else
+               ptmx_inode = tty->link->driver_data;
+       devpts_kill_index(ptmx_inode, tty->index);
+       devpts_del_ref(ptmx_inode);
 }
 
 static const struct tty_operations ptm_unix98_ops = {
@@ -697,7 +686,6 @@ static const struct tty_operations ptm_unix98_ops = {
        .unthrottle = pty_unthrottle,
        .ioctl = pty_unix98_ioctl,
        .resize = pty_resize,
-       .shutdown = pty_unix98_shutdown,
        .cleanup = pty_cleanup
 };
 
@@ -715,7 +703,6 @@ static const struct tty_operations pty_unix98_ops = {
        .set_termios = pty_set_termios,
        .start = pty_start,
        .stop = pty_stop,
-       .shutdown = pty_unix98_shutdown,
        .cleanup = pty_cleanup,
 };
 
@@ -773,6 +760,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
        tty->driver_data = inode;
 
+       /*
+        * In the case where all references to ptmx inode are dropped and we
+        * still have /dev/tty opened pointing to the master/slave pair (ptmx
+        * is closed/released before /dev/tty), we must make sure that the inode
+        * is still valid when we call the final pty_unix98_shutdown, thus we
+        * hold an additional reference to the ptmx inode. For the same /dev/tty
+        * last close case, we also need to make sure the super_block isn't
+        * destroyed (devpts instance unmounted), before /dev/tty is closed and
+        * on its release devpts_kill_index is called.
+        */
+       devpts_add_ref(inode);
+
        tty_add_file(tty, filp);
 
        slave_inode = devpts_pty_new(inode,
index 802eac7e561b85f1aa43601a7a9eb94b05d3cb62..0b802cdd70d0938722473b166095802ac96d6e78 100644 (file)
@@ -643,7 +643,6 @@ static void init_r_port(int board, int aiop, int chan, struct pci_dev *pci_dev)
        info->chan = chan;
        tty_port_init(&info->port);
        info->port.ops = &rocket_port_ops;
-       init_completion(&info->close_wait);
        info->flags &= ~ROCKET_MODE_MASK;
        switch (pc104[board][line]) {
        case 422:
@@ -960,7 +959,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
                        tty->alt_speed = 460800;
 
                configure_r_port(tty, info, NULL);
-               if (tty->termios.c_cflag & CBAUD) {
+               if (C_BAUD(tty)) {
                        sSetDTR(cp);
                        sSetRTS(cp);
                }
@@ -1043,13 +1042,12 @@ static void rp_close(struct tty_struct *tty, struct file *filp)
                }
        }
        spin_lock_irq(&port->lock);
-       info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_CLOSING | ASYNC_NORMAL_ACTIVE);
+       info->port.flags &= ~(ASYNC_INITIALIZED | ASYNC_NORMAL_ACTIVE);
        tty->closing = 0;
        spin_unlock_irq(&port->lock);
        mutex_unlock(&port->mutex);
        tty_port_tty_set(port, NULL);
 
-       complete_all(&info->close_wait);
        atomic_dec(&rp_num_ports_open);
 
 #ifdef ROCKET_DEBUG_OPEN
@@ -1086,18 +1084,18 @@ static void rp_set_termios(struct tty_struct *tty,
        cp = &info->channel;
 
        /* Handle transition to B0 status */
-       if ((old_termios->c_cflag & CBAUD) && !(tty->termios.c_cflag & CBAUD)) {
+       if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
                sClrDTR(cp);
                sClrRTS(cp);
        }
 
        /* Handle transition away from B0 status */
-       if (!(old_termios->c_cflag & CBAUD) && (tty->termios.c_cflag & CBAUD)) {
+       if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
                sSetRTS(cp);
                sSetDTR(cp);
        }
 
-       if ((old_termios->c_cflag & CRTSCTS) && !(tty->termios.c_cflag & CRTSCTS))
+       if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty))
                rp_start(tty);
 }
 
@@ -1360,8 +1358,7 @@ static void rp_throttle(struct tty_struct *tty)
        struct r_port *info = tty->driver_data;
 
 #ifdef ROCKET_DEBUG_THROTTLE
-       printk(KERN_INFO "throttle %s: %d....\n", tty->name,
-              tty->ldisc.chars_in_buffer(tty));
+       printk(KERN_INFO "throttle %s ....\n", tty->name);
 #endif
 
        if (rocket_paranoia_check(info, "rp_throttle"))
@@ -1377,8 +1374,7 @@ static void rp_unthrottle(struct tty_struct *tty)
 {
        struct r_port *info = tty->driver_data;
 #ifdef ROCKET_DEBUG_THROTTLE
-       printk(KERN_INFO "unthrottle %s: %d....\n", tty->name,
-              tty->ldisc.chars_in_buffer(tty));
+       printk(KERN_INFO "unthrottle %s ....\n", tty->name);
 #endif
 
        if (rocket_paranoia_check(info, "rp_unthrottle"))
index 67e0f1e778a23d4804de496d22759549966b914c..ef1e1be6b26d885a6efebc1fd45afec241e3977c 100644 (file)
@@ -1144,7 +1144,6 @@ struct r_port {
        int read_status_mask;
        int cps;
 
-       struct completion close_wait;   /* Not yet matching the core */
        spinlock_t slock;
        struct mutex write_mtx;
 };
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
deleted file mode 100644 (file)
index 0982c1a..0000000
+++ /dev/null
@@ -1,1322 +0,0 @@
-/* 68328serial.c: Serial port driver for 68328 microcontroller
- *
- * Copyright (C) 1995       David S. Miller    <davem@caip.rutgers.edu>
- * Copyright (C) 1998       Kenneth Albanowski <kjahds@kjahds.com>
- * Copyright (C) 1998, 1999 D. Jeff Dionne     <jeff@uclinux.org>
- * Copyright (C) 1999       Vladimir Gurevich  <vgurevic@cisco.com>
- * Copyright (C) 2002-2003  David McCullough   <davidm@snapgear.com>
- * Copyright (C) 2002       Greg Ungerer       <gerg@snapgear.com>
- *
- * VZ Support/Fixes             Evan Stawnyczy <e@lineo.ca>
- * Multiple UART support        Daniel Potts <danielp@cse.unsw.edu.au>
- * Power management support     Daniel Potts <danielp@cse.unsw.edu.au>
- * VZ Second Serial Port enable Phil Wilshire
- * 2.4/2.5 port                 David McCullough
- */
-
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/serial.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/major.h>
-#include <linux/string.h>
-#include <linux/fcntl.h>
-#include <linux/mm.h>
-#include <linux/kernel.h>
-#include <linux/console.h>
-#include <linux/reboot.h>
-#include <linux/keyboard.h>
-#include <linux/init.h>
-#include <linux/pm.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/gfp.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/delay.h>
-#include <asm/uaccess.h>
-
-/* (es) */
-/* note: perhaps we can murge these files, so that you can just
- *      define 1 of them, and they can sort that out for themselves
- */
-#if defined(CONFIG_M68EZ328)
-#include <asm/MC68EZ328.h>
-#else
-#if defined(CONFIG_M68VZ328)
-#include <asm/MC68VZ328.h>
-#else
-#include <asm/MC68328.h>
-#endif /* CONFIG_M68VZ328 */
-#endif /* CONFIG_M68EZ328 */
-
-/* Turn off usage of real serial interrupt code, to "support" Copilot */
-#ifdef CONFIG_XCOPILOT_BUGS
-#undef USE_INTS
-#else
-#define USE_INTS
-#endif
-
-/*
- * I believe this is the optimal setting that reduces the number of interrupts.
- * At high speeds the output might become a little "bursted" (use USTCNT_TXHE
- * if that bothers you), but in most cases it will not, since we try to
- * transmit characters every time rs_interrupt is called. Thus, quite often
- * you'll see that a receive interrupt occures before the transmit one.
- *                                  -- Vladimir Gurevich
- */
-#define USTCNT_TX_INTR_MASK (USTCNT_TXEE)
-
-/*
- * 68328 and 68EZ328 UARTS are a little bit different. EZ328 has special
- * "Old data interrupt" which occures whenever the data stay in the FIFO
- * longer than 30 bits time. This allows us to use FIFO without compromising
- * latency. '328 does not have this feature and without the real  328-based
- * board I would assume that RXRE is the safest setting.
- *
- * For EZ328 I use RXHE (Half empty) interrupt to reduce the number of
- * interrupts. RXFE (receive queue full) causes the system to lose data
- * at least at 115200 baud
- *
- * If your board is busy doing other stuff, you might consider to use
- * RXRE (data ready intrrupt) instead.
- *
- * The other option is to make these INTR masks run-time configurable, so
- * that people can dynamically adapt them according to the current usage.
- *                                  -- Vladimir Gurevich
- */
-
-/* (es) */
-#if defined(CONFIG_M68EZ328) || defined(CONFIG_M68VZ328)
-#define USTCNT_RX_INTR_MASK (USTCNT_RXHE | USTCNT_ODEN)
-#elif defined(CONFIG_M68328)
-#define USTCNT_RX_INTR_MASK (USTCNT_RXRE)
-#else
-#error Please, define the Rx interrupt events for your CPU
-#endif
-/* (/es) */
-
-/*
- * This is our internal structure for each serial port's state.
- */
-struct m68k_serial {
-       struct tty_port         tport;
-       char                    is_cons;        /* Is this our console. */
-       int                     magic;
-       int                     baud_base;
-       int                     port;
-       int                     irq;
-       int                     type;           /* UART type */
-       int                     custom_divisor;
-       int                     x_char;         /* xon/xoff character */
-       int                     line;
-       unsigned char           *xmit_buf;
-       int                     xmit_head;
-       int                     xmit_tail;
-       int                     xmit_cnt;
-};
-
-#define SERIAL_MAGIC 0x5301
-
-/*
- * Define the number of ports supported and their irqs.
- */
-#define NR_PORTS 1
-
-static struct m68k_serial m68k_soft[NR_PORTS];
-
-static unsigned int uart_irqs[NR_PORTS] = { UART_IRQ_NUM };
-
-/* multiple ports are contiguous in memory */
-m68328_uart *uart_addr = (m68328_uart *)USTCNT_ADDR;
-
-struct tty_driver *serial_driver;
-
-static void change_speed(struct m68k_serial *info, struct tty_struct *tty);
-
-/*
- *     Setup for console. Argument comes from the boot command line.
- */
-
-/* note: this is messy, but it works, again, perhaps defined somewhere else?*/
-#ifdef CONFIG_M68VZ328
-#define CONSOLE_BAUD_RATE      19200
-#define DEFAULT_CBAUD          B19200
-#endif
-
-
-#ifndef CONSOLE_BAUD_RATE
-#define        CONSOLE_BAUD_RATE       9600
-#define        DEFAULT_CBAUD           B9600
-#endif
-
-
-static int m68328_console_initted;
-static int m68328_console_baud    = CONSOLE_BAUD_RATE;
-static int m68328_console_cbaud   = DEFAULT_CBAUD;
-
-
-static inline int serial_paranoia_check(struct m68k_serial *info,
-                                       char *name, const char *routine)
-{
-#ifdef SERIAL_PARANOIA_CHECK
-       static const char *badmagic =
-               "Warning: bad magic number for serial struct %s in %s\n";
-       static const char *badinfo =
-               "Warning: null m68k_serial for %s in %s\n";
-
-       if (!info) {
-               printk(badinfo, name, routine);
-               return 1;
-       }
-       if (info->magic != SERIAL_MAGIC) {
-               printk(badmagic, name, routine);
-               return 1;
-       }
-#endif
-       return 0;
-}
-
-/*
- * This is used to figure out the divisor speeds and the timeouts
- */
-static int baud_table[] = {
-       0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
-       9600, 19200, 38400, 57600, 115200, 0 };
-
-/* Utility routines */
-static inline int get_baud(struct m68k_serial *ss)
-{
-       unsigned long result = 115200;
-       unsigned short int baud = uart_addr[ss->line].ubaud;
-       if (GET_FIELD(baud, UBAUD_PRESCALER) == 0x38) result = 38400;
-       result >>= GET_FIELD(baud, UBAUD_DIVIDE);
-
-       return result;
-}
-
-/*
- * ------------------------------------------------------------
- * rs_stop() and rs_start()
- *
- * This routines are called before setting or resetting tty->stopped.
- * They enable or disable transmitter interrupts, as necessary.
- * ------------------------------------------------------------
- */
-static void rs_stop(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-       m68328_uart *uart = &uart_addr[info->line];
-       unsigned long flags;
-
-       if (serial_paranoia_check(info, tty->name, "rs_stop"))
-               return;
-       
-       local_irq_save(flags);
-       uart->ustcnt &= ~USTCNT_TXEN;
-       local_irq_restore(flags);
-}
-
-static int rs_put_char(char ch)
-{
-       unsigned long flags;
-       int loops = 0;
-
-        local_irq_save(flags);
-
-       while (!(UTX & UTX_TX_AVAIL) && (loops < 1000)) {
-               loops++;
-               udelay(5);
-        }
-
-       UTX_TXDATA = ch;
-        udelay(5);
-        local_irq_restore(flags);
-        return 1;
-}
-
-static void rs_start(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-       m68328_uart *uart = &uart_addr[info->line];
-       unsigned long flags;
-       
-       if (serial_paranoia_check(info, tty->name, "rs_start"))
-               return;
-       
-       local_irq_save(flags);
-       if (info->xmit_cnt && info->xmit_buf && !(uart->ustcnt & USTCNT_TXEN)) {
-#ifdef USE_INTS
-               uart->ustcnt |= USTCNT_TXEN | USTCNT_TX_INTR_MASK;
-#else
-               uart->ustcnt |= USTCNT_TXEN;
-#endif
-       }
-       local_irq_restore(flags);
-}
-
-static void receive_chars(struct m68k_serial *info, unsigned short rx)
-{
-       m68328_uart *uart = &uart_addr[info->line];
-       unsigned char ch, flag;
-
-       /*
-        * This do { } while() loop will get ALL chars out of Rx FIFO 
-         */
-#ifndef CONFIG_XCOPILOT_BUGS
-       do {
-#endif 
-               ch = GET_FIELD(rx, URX_RXDATA);
-       
-               if (info->is_cons) {
-                       if (URX_BREAK & rx) { /* whee, break received */
-                               return;
-#ifdef CONFIG_MAGIC_SYSRQ
-                       } else if (ch == 0x10) { /* ^P */
-                               show_state();
-                               show_free_areas(0);
-                               show_buffers();
-/*                             show_net_buffers(); */
-                               return;
-                       } else if (ch == 0x12) { /* ^R */
-                               emergency_restart();
-                               return;
-#endif /* CONFIG_MAGIC_SYSRQ */
-                       }
-               }
-
-               flag = TTY_NORMAL;
-
-               if (rx & URX_PARITY_ERROR)
-                       flag = TTY_PARITY;
-               else if (rx & URX_OVRUN)
-                       flag = TTY_OVERRUN;
-               else if (rx & URX_FRAME_ERROR)
-                       flag = TTY_FRAME;
-
-               tty_insert_flip_char(&info->tport, ch, flag);
-#ifndef CONFIG_XCOPILOT_BUGS
-       } while ((rx = uart->urx.w) & URX_DATA_READY);
-#endif
-
-       tty_schedule_flip(&info->tport);
-}
-
-static void transmit_chars(struct m68k_serial *info, struct tty_struct *tty)
-{
-       m68328_uart *uart = &uart_addr[info->line];
-
-       if (info->x_char) {
-               /* Send next char */
-               uart->utx.b.txdata = info->x_char;
-               info->x_char = 0;
-               goto clear_and_return;
-       }
-
-       if ((info->xmit_cnt <= 0) || !tty || tty->stopped) {
-               /* That's peculiar... TX ints off */
-               uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
-               goto clear_and_return;
-       }
-
-       /* Send char */
-       uart->utx.b.txdata = info->xmit_buf[info->xmit_tail++];
-       info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
-       info->xmit_cnt--;
-
-       if (info->xmit_cnt <= 0) {
-               /* All done for now... TX ints off */
-               uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
-               goto clear_and_return;
-       }
-
-clear_and_return:
-       /* Clear interrupt (should be auto)*/
-       return;
-}
-
-/*
- * This is the serial driver's generic interrupt routine
- */
-irqreturn_t rs_interrupt(int irq, void *dev_id)
-{
-       struct m68k_serial *info = dev_id;
-       struct tty_struct *tty = tty_port_tty_get(&info->tport);
-       m68328_uart *uart;
-       unsigned short rx;
-       unsigned short tx;
-
-       uart = &uart_addr[info->line];
-       rx = uart->urx.w;
-
-#ifdef USE_INTS
-       tx = uart->utx.w;
-
-       if (rx & URX_DATA_READY)
-               receive_chars(info, rx);
-       if (tx & UTX_TX_AVAIL)
-               transmit_chars(info, tty);
-#else
-       receive_chars(info, rx);
-#endif
-       tty_kref_put(tty);
-
-       return IRQ_HANDLED;
-}
-
-static int startup(struct m68k_serial *info, struct tty_struct *tty)
-{
-       m68328_uart *uart = &uart_addr[info->line];
-       unsigned long flags;
-       
-       if (info->tport.flags & ASYNC_INITIALIZED)
-               return 0;
-
-       if (!info->xmit_buf) {
-               info->xmit_buf = (unsigned char *) __get_free_page(GFP_KERNEL);
-               if (!info->xmit_buf)
-                       return -ENOMEM;
-       }
-
-       local_irq_save(flags);
-
-       /*
-        * Clear the FIFO buffers and disable them
-        * (they will be reenabled in change_speed())
-        */
-
-       uart->ustcnt = USTCNT_UEN;
-       uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_TXEN;
-       (void)uart->urx.w;
-
-       /*
-        * Finally, enable sequencing and interrupts
-        */
-#ifdef USE_INTS
-       uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | 
-                 USTCNT_RX_INTR_MASK | USTCNT_TX_INTR_MASK;
-#else
-       uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
-#endif
-
-       if (tty)
-               clear_bit(TTY_IO_ERROR, &tty->flags);
-       info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
-       /*
-        * and set the speed of the serial port
-        */
-
-       change_speed(info, tty);
-
-       info->tport.flags |= ASYNC_INITIALIZED;
-       local_irq_restore(flags);
-       return 0;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts are disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void shutdown(struct m68k_serial *info, struct tty_struct *tty)
-{
-       m68328_uart *uart = &uart_addr[info->line];
-       unsigned long   flags;
-
-       uart->ustcnt = 0; /* All off! */
-       if (!(info->tport.flags & ASYNC_INITIALIZED))
-               return;
-
-       local_irq_save(flags);
-       
-       if (info->xmit_buf) {
-               free_page((unsigned long) info->xmit_buf);
-               info->xmit_buf = 0;
-       }
-
-       if (tty)
-               set_bit(TTY_IO_ERROR, &tty->flags);
-       
-       info->tport.flags &= ~ASYNC_INITIALIZED;
-       local_irq_restore(flags);
-}
-
-struct {
-       int divisor, prescale;
-}
-#ifndef CONFIG_M68VZ328
- hw_baud_table[18] = {
-       {0, 0}, /* 0 */
-       {0, 0}, /* 50 */
-       {0, 0}, /* 75 */
-       {0, 0}, /* 110 */
-       {0, 0}, /* 134 */
-       {0, 0}, /* 150 */
-       {0, 0}, /* 200 */
-       {7, 0x26}, /* 300 */
-       {6, 0x26}, /* 600 */
-       {5, 0x26}, /* 1200 */
-       {0, 0}, /* 1800 */
-       {4, 0x26}, /* 2400 */
-       {3, 0x26}, /* 4800 */
-       {2, 0x26}, /* 9600 */
-       {1, 0x26}, /* 19200 */
-       {0, 0x26}, /* 38400 */
-       {1, 0x38}, /* 57600 */
-       {0, 0x38}, /* 115200 */
-};
-#else
- hw_baud_table[18] = {
-                 {0, 0}, /* 0 */
-                 {0, 0}, /* 50 */
-                 {0, 0}, /* 75 */
-                 {0, 0}, /* 110 */
-                 {0, 0}, /* 134 */
-                 {0, 0}, /* 150 */
-                 {0, 0}, /* 200 */
-                 {0, 0}, /* 300 */
-                 {7, 0x26}, /* 600 */
-                 {6, 0x26}, /* 1200 */
-                 {0, 0}, /* 1800 */
-                 {5, 0x26}, /* 2400 */
-                 {4, 0x26}, /* 4800 */
-                 {3, 0x26}, /* 9600 */
-                 {2, 0x26}, /* 19200 */
-                 {1, 0x26}, /* 38400 */
-                 {0, 0x26}, /* 57600 */
-                 {1, 0x38}, /* 115200 */
-}; 
-#endif
-/* rate = 1036800 / ((65 - prescale) * (1<<divider)) */
-
-/*
- * This routine is called to set the UART divisor registers to match
- * the specified baud rate for a serial port.
- */
-static void change_speed(struct m68k_serial *info, struct tty_struct *tty)
-{
-       m68328_uart *uart = &uart_addr[info->line];
-       unsigned short port;
-       unsigned short ustcnt;
-       unsigned cflag;
-       int     i;
-
-       cflag = tty->termios.c_cflag;
-       port = info->port;
-       if (!port)
-               return;
-
-       ustcnt = uart->ustcnt;
-       uart->ustcnt = ustcnt & ~USTCNT_TXEN;
-
-       i = cflag & CBAUD;
-        if (i & CBAUDEX) {
-                i = (i & ~CBAUDEX) + B38400;
-        }
-
-       uart->ubaud = PUT_FIELD(UBAUD_DIVIDE,    hw_baud_table[i].divisor) | 
-               PUT_FIELD(UBAUD_PRESCALER, hw_baud_table[i].prescale);
-
-       ustcnt &= ~(USTCNT_PARITYEN | USTCNT_ODD_EVEN | USTCNT_STOP | USTCNT_8_7);
-       
-       if ((cflag & CSIZE) == CS8)
-               ustcnt |= USTCNT_8_7;
-               
-       if (cflag & CSTOPB)
-               ustcnt |= USTCNT_STOP;
-
-       if (cflag & PARENB)
-               ustcnt |= USTCNT_PARITYEN;
-       if (cflag & PARODD)
-               ustcnt |= USTCNT_ODD_EVEN;
-       
-#ifdef CONFIG_SERIAL_68328_RTS_CTS
-       if (cflag & CRTSCTS) {
-               uart->utx.w &= ~UTX_NOCTS;
-       } else {
-               uart->utx.w |= UTX_NOCTS;
-       }
-#endif
-
-       ustcnt |= USTCNT_TXEN;
-       
-       uart->ustcnt = ustcnt;
-       return;
-}
-
-/*
- * Fair output driver allows a process to speak.
- */
-static void rs_fair_output(void)
-{
-       int left;               /* Output no more than that */
-       unsigned long flags;
-       struct m68k_serial *info = &m68k_soft[0];
-       char c;
-
-       if (info == NULL) return;
-       if (info->xmit_buf == NULL) return;
-
-       local_irq_save(flags);
-       left = info->xmit_cnt;
-       while (left != 0) {
-               c = info->xmit_buf[info->xmit_tail];
-               info->xmit_tail = (info->xmit_tail+1) & (SERIAL_XMIT_SIZE-1);
-               info->xmit_cnt--;
-               local_irq_restore(flags);
-
-               rs_put_char(c);
-
-               local_irq_save(flags);
-               left = min(info->xmit_cnt, left-1);
-       }
-
-       /* Last character is being transmitted now (hopefully). */
-       udelay(5);
-
-       local_irq_restore(flags);
-       return;
-}
-
-/*
- * m68k_console_print is registered for printk.
- */
-void console_print_68328(const char *p)
-{
-       char c;
-       
-       while ((c = *(p++)) != 0) {
-               if (c == '\n')
-                       rs_put_char('\r');
-               rs_put_char(c);
-       }
-
-       /* Comment this if you want to have a strict interrupt-driven output */
-       rs_fair_output();
-
-       return;
-}
-
-static void rs_set_ldisc(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
-       if (serial_paranoia_check(info, tty->name, "rs_set_ldisc"))
-               return;
-
-       info->is_cons = (tty->termios.c_line == N_TTY);
-       
-       printk("ttyS%d console mode %s\n", info->line, info->is_cons ? "on" : "off");
-}
-
-static void rs_flush_chars(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-       m68328_uart *uart = &uart_addr[info->line];
-       unsigned long flags;
-
-       if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
-               return;
-#ifndef USE_INTS
-       for (;;) {
-#endif
-
-       /* Enable transmitter */
-       local_irq_save(flags);
-
-       if (info->xmit_cnt <= 0 || tty->stopped || !info->xmit_buf) {
-               local_irq_restore(flags);
-               return;
-       }
-
-#ifdef USE_INTS
-       uart->ustcnt |= USTCNT_TXEN | USTCNT_TX_INTR_MASK;
-#else
-       uart->ustcnt |= USTCNT_TXEN;
-#endif
-
-#ifdef USE_INTS
-       if (uart->utx.w & UTX_TX_AVAIL) {
-#else
-       if (1) {
-#endif
-               /* Send char */
-               uart->utx.b.txdata = info->xmit_buf[info->xmit_tail++];
-               info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
-               info->xmit_cnt--;
-       }
-
-#ifndef USE_INTS
-       while (!(uart->utx.w & UTX_TX_AVAIL)) udelay(5);
-       }
-#endif
-       local_irq_restore(flags);
-}
-
-extern void console_printn(const char *b, int count);
-
-static int rs_write(struct tty_struct *tty,
-                   const unsigned char *buf, int count)
-{
-       int     c, total = 0;
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-       m68328_uart *uart = &uart_addr[info->line];
-       unsigned long flags;
-
-       if (serial_paranoia_check(info, tty->name, "rs_write"))
-               return 0;
-
-       if (!tty || !info->xmit_buf)
-               return 0;
-
-       local_save_flags(flags);
-       while (1) {
-               local_irq_disable();            
-               c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
-                                  SERIAL_XMIT_SIZE - info->xmit_head));
-               local_irq_restore(flags);
-
-               if (c <= 0)
-                       break;
-
-               memcpy(info->xmit_buf + info->xmit_head, buf, c);
-
-               local_irq_disable();
-               info->xmit_head = (info->xmit_head + c) & (SERIAL_XMIT_SIZE-1);
-               info->xmit_cnt += c;
-               local_irq_restore(flags);
-               buf += c;
-               count -= c;
-               total += c;
-       }
-
-       if (info->xmit_cnt && !tty->stopped) {
-               /* Enable transmitter */
-               local_irq_disable();            
-#ifndef USE_INTS
-               while (info->xmit_cnt) {
-#endif
-
-               uart->ustcnt |= USTCNT_TXEN;
-#ifdef USE_INTS
-               uart->ustcnt |= USTCNT_TX_INTR_MASK;
-#else
-               while (!(uart->utx.w & UTX_TX_AVAIL)) udelay(5);
-#endif
-               if (uart->utx.w & UTX_TX_AVAIL) {
-                       uart->utx.b.txdata = info->xmit_buf[info->xmit_tail++];
-                       info->xmit_tail = info->xmit_tail & (SERIAL_XMIT_SIZE-1);
-                       info->xmit_cnt--;
-               }
-
-#ifndef USE_INTS
-               }
-#endif
-               local_irq_restore(flags);
-       }
-
-       return total;
-}
-
-static int rs_write_room(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-       int     ret;
-                               
-       if (serial_paranoia_check(info, tty->name, "rs_write_room"))
-               return 0;
-       ret = SERIAL_XMIT_SIZE - info->xmit_cnt - 1;
-       if (ret < 0)
-               ret = 0;
-       return ret;
-}
-
-static int rs_chars_in_buffer(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-                               
-       if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
-               return 0;
-       return info->xmit_cnt;
-}
-
-static void rs_flush_buffer(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-       unsigned long flags;
-                               
-       if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
-               return;
-       local_irq_save(flags);
-       info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-       local_irq_restore(flags);
-       tty_wakeup(tty);
-}
-
-/*
- * ------------------------------------------------------------
- * rs_throttle()
- * 
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- * ------------------------------------------------------------
- */
-static void rs_throttle(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
-       if (serial_paranoia_check(info, tty->name, "rs_throttle"))
-               return;
-       
-       if (I_IXOFF(tty))
-               info->x_char = STOP_CHAR(tty);
-
-       /* Turn off RTS line (do this atomic) */
-}
-
-static void rs_unthrottle(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
-       if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
-               return;
-       
-       if (I_IXOFF(tty)) {
-               if (info->x_char)
-                       info->x_char = 0;
-               else
-                       info->x_char = START_CHAR(tty);
-       }
-
-       /* Assert RTS line (do this atomic) */
-}
-
-/*
- * ------------------------------------------------------------
- * rs_ioctl() and friends
- * ------------------------------------------------------------
- */
-
-static int get_serial_info(struct m68k_serial *info,
-                          struct serial_struct *retinfo)
-{
-       struct serial_struct tmp;
-  
-       if (!retinfo)
-               return -EFAULT;
-       memset(&tmp, 0, sizeof(tmp));
-       tmp.type = info->type;
-       tmp.line = info->line;
-       tmp.port = info->port;
-       tmp.irq = info->irq;
-       tmp.flags = info->tport.flags;
-       tmp.baud_base = info->baud_base;
-       tmp.close_delay = info->tport.close_delay;
-       tmp.closing_wait = info->tport.closing_wait;
-       tmp.custom_divisor = info->custom_divisor;
-       if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int set_serial_info(struct m68k_serial *info, struct tty_struct *tty,
-                          struct serial_struct *new_info)
-{
-       struct tty_port *port = &info->tport;
-       struct serial_struct new_serial;
-       struct m68k_serial old_info;
-       int                     retval = 0;
-
-       if (!new_info)
-               return -EFAULT;
-       if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
-               return -EFAULT;
-       old_info = *info;
-
-       if (!capable(CAP_SYS_ADMIN)) {
-               if ((new_serial.baud_base != info->baud_base) ||
-                   (new_serial.type != info->type) ||
-                   (new_serial.close_delay != port->close_delay) ||
-                   ((new_serial.flags & ~ASYNC_USR_MASK) !=
-                    (port->flags & ~ASYNC_USR_MASK)))
-                       return -EPERM;
-               port->flags = ((port->flags & ~ASYNC_USR_MASK) |
-                              (new_serial.flags & ASYNC_USR_MASK));
-               info->custom_divisor = new_serial.custom_divisor;
-               goto check_and_exit;
-       }
-
-       if (port->count > 1)
-               return -EBUSY;
-
-       /*
-        * OK, past this point, all the error checking has been done.
-        * At this point, we start making changes.....
-        */
-
-       info->baud_base = new_serial.baud_base;
-       port->flags = ((port->flags & ~ASYNC_FLAGS) |
-                       (new_serial.flags & ASYNC_FLAGS));
-       info->type = new_serial.type;
-       port->close_delay = new_serial.close_delay;
-       port->closing_wait = new_serial.closing_wait;
-
-check_and_exit:
-       retval = startup(info, tty);
-       return retval;
-}
-
-/*
- * get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- *         is emptied.  On bus types like RS485, the transmitter must
- *         release the bus after transmitting. This must be done when
- *         the transmit shift register is empty, not be done when the
- *         transmit holding register is empty.  This functionality
- *         allows an RS485 driver to be written in user space. 
- */
-static int get_lsr_info(struct m68k_serial *info, unsigned int *value)
-{
-#ifdef CONFIG_SERIAL_68328_RTS_CTS
-       m68328_uart *uart = &uart_addr[info->line];
-#endif
-       unsigned char status;
-       unsigned long flags;
-
-       local_irq_save(flags);
-#ifdef CONFIG_SERIAL_68328_RTS_CTS
-       status = (uart->utx.w & UTX_CTS_STAT) ? 1 : 0;
-#else
-       status = 0;
-#endif
-       local_irq_restore(flags);
-       return put_user(status, value);
-}
-
-/*
- * This routine sends a break character out the serial port.
- */
-static void send_break(struct m68k_serial *info, unsigned int duration)
-{
-       m68328_uart *uart = &uart_addr[info->line];
-        unsigned long flags;
-        if (!info->port)
-                return;
-        local_irq_save(flags);
-#ifdef USE_INTS        
-       uart->utx.w |= UTX_SEND_BREAK;
-       msleep_interruptible(duration);
-       uart->utx.w &= ~UTX_SEND_BREAK;
-#endif         
-        local_irq_restore(flags);
-}
-
-static int rs_ioctl(struct tty_struct *tty,
-                   unsigned int cmd, unsigned long arg)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-       int retval;
-
-       if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
-               return -ENODEV;
-
-       if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) &&
-           (cmd != TIOCSERCONFIG) && (cmd != TIOCSERGWILD)  &&
-           (cmd != TIOCSERSWILD) && (cmd != TIOCSERGSTRUCT)) {
-               if (tty->flags & (1 << TTY_IO_ERROR))
-                   return -EIO;
-       }
-       
-       switch (cmd) {
-               case TCSBRK:    /* SVID version: non-zero arg --> no break */
-                       retval = tty_check_change(tty);
-                       if (retval)
-                               return retval;
-                       tty_wait_until_sent(tty, 0);
-                       if (!arg)
-                               send_break(info, 250);  /* 1/4 second */
-                       return 0;
-               case TCSBRKP:   /* support for POSIX tcsendbreak() */
-                       retval = tty_check_change(tty);
-                       if (retval)
-                               return retval;
-                       tty_wait_until_sent(tty, 0);
-                       send_break(info, arg ? arg*(100) : 250);
-                       return 0;
-               case TIOCGSERIAL:
-                       return get_serial_info(info,
-                                      (struct serial_struct *) arg);
-               case TIOCSSERIAL:
-                       return set_serial_info(info, tty,
-                                              (struct serial_struct *) arg);
-               case TIOCSERGETLSR: /* Get line status register */
-                       return get_lsr_info(info, (unsigned int *) arg);
-               case TIOCSERGSTRUCT:
-                       if (copy_to_user((struct m68k_serial *) arg,
-                                   info, sizeof(struct m68k_serial)))
-                               return -EFAULT;
-                       return 0;
-               default:
-                       return -ENOIOCTLCMD;
-               }
-       return 0;
-}
-
-static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-
-       change_speed(info, tty);
-
-       if ((old_termios->c_cflag & CRTSCTS) &&
-           !(tty->termios.c_cflag & CRTSCTS))
-               rs_start(tty);
-       
-}
-
-/*
- * ------------------------------------------------------------
- * rs_close()
- * 
- * This routine is called when the serial port gets closed.  First, we
- * wait for the last remaining data to be sent.  Then, we unlink its
- * S structure from the interrupt chain if necessary, and we free
- * that IRQ if nothing is left in the chain.
- * ------------------------------------------------------------
- */
-static void rs_close(struct tty_struct *tty, struct file *filp)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-       struct tty_port *port = &info->tport;
-       m68328_uart *uart = &uart_addr[info->line];
-       unsigned long flags;
-
-       if (serial_paranoia_check(info, tty->name, "rs_close"))
-               return;
-       
-       local_irq_save(flags);
-       
-       if (tty_hung_up_p(filp)) {
-               local_irq_restore(flags);
-               return;
-       }
-       
-       if ((tty->count == 1) && (port->count != 1)) {
-               /*
-                * Uh, oh.  tty->count is 1, which means that the tty
-                * structure will be freed.  Info->count should always
-                * be one in these conditions.  If it's greater than
-                * one, we've got real problems, since it means the
-                * serial port won't be shutdown.
-                */
-               printk("rs_close: bad serial port count; tty->count is 1, "
-                      "port->count is %d\n", port->count);
-               port->count = 1;
-       }
-       if (--port->count < 0) {
-               printk("rs_close: bad serial port count for ttyS%d: %d\n",
-                      info->line, port->count);
-               port->count = 0;
-       }
-       if (port->count) {
-               local_irq_restore(flags);
-               return;
-       }
-       port->flags |= ASYNC_CLOSING;
-       /*
-        * Now we wait for the transmit buffer to clear; and we notify 
-        * the line discipline to only process XON/XOFF characters.
-        */
-       tty->closing = 1;
-       if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
-               tty_wait_until_sent(tty, port->closing_wait);
-       /*
-        * At this point we stop accepting input.  To do this, we
-        * disable the receive line status interrupts, and tell the
-        * interrupt driver to stop checking the data ready bit in the
-        * line status register.
-        */
-
-       uart->ustcnt &= ~USTCNT_RXEN;
-       uart->ustcnt &= ~(USTCNT_RXEN | USTCNT_RX_INTR_MASK);
-
-       shutdown(info, tty);
-       rs_flush_buffer(tty);
-               
-       tty_ldisc_flush(tty);
-       tty->closing = 0;
-       tty_port_tty_set(&info->tport, NULL);
-#warning "This is not and has never been valid so fix it"      
-#if 0
-       if (tty->ldisc.num != ldiscs[N_TTY].num) {
-               if (tty->ldisc.close)
-                       (tty->ldisc.close)(tty);
-               tty->ldisc = ldiscs[N_TTY];
-               tty->termios.c_line = N_TTY;
-               if (tty->ldisc.open)
-                       (tty->ldisc.open)(tty);
-       }
-#endif 
-       if (port->blocked_open) {
-               if (port->close_delay)
-                       msleep_interruptible(jiffies_to_msecs(port->close_delay));
-               wake_up_interruptible(&port->open_wait);
-       }
-       port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
-       local_irq_restore(flags);
-}
-
-/*
- * rs_hangup() --- called by tty_hangup() when a hangup is signaled.
- */
-void rs_hangup(struct tty_struct *tty)
-{
-       struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
-       
-       if (serial_paranoia_check(info, tty->name, "rs_hangup"))
-               return;
-       
-       rs_flush_buffer(tty);
-       shutdown(info, tty);
-       info->tport.count = 0;
-       info->tport.flags &= ~ASYNC_NORMAL_ACTIVE;
-       tty_port_tty_set(&info->tport, NULL);
-       wake_up_interruptible(&info->tport.open_wait);
-}
-
-/*
- * This routine is called whenever a serial port is opened.  It
- * enables interrupts for a serial port, linking in its S structure into
- * the IRQ chain.   It also performs the serial-specific
- * initialization for the tty structure.
- */
-int rs_open(struct tty_struct *tty, struct file *filp)
-{
-       struct m68k_serial      *info;
-       int retval;
-
-       info = &m68k_soft[tty->index];
-
-       if (serial_paranoia_check(info, tty->name, "rs_open"))
-               return -ENODEV;
-
-       info->tport.count++;
-       tty->driver_data = info;
-       tty_port_tty_set(&info->tport, tty);
-
-       /*
-        * Start up serial port
-        */
-       retval = startup(info, tty);
-       if (retval)
-               return retval;
-
-       return tty_port_block_til_ready(&info->tport, tty, filp);
-}
-
-/* Finally, routines used to initialize the serial driver. */
-
-static void show_serial_version(void)
-{
-       printk("MC68328 serial driver version 1.00\n");
-}
-
-static const struct tty_operations rs_ops = {
-       .open = rs_open,
-       .close = rs_close,
-       .write = rs_write,
-       .flush_chars = rs_flush_chars,
-       .write_room = rs_write_room,
-       .chars_in_buffer = rs_chars_in_buffer,
-       .flush_buffer = rs_flush_buffer,
-       .ioctl = rs_ioctl,
-       .throttle = rs_throttle,
-       .unthrottle = rs_unthrottle,
-       .set_termios = rs_set_termios,
-       .stop = rs_stop,
-       .start = rs_start,
-       .hangup = rs_hangup,
-       .set_ldisc = rs_set_ldisc,
-};
-
-static const struct tty_port_operations rs_port_ops = {
-};
-
-/* rs_init inits the driver */
-static int __init
-rs68328_init(void)
-{
-       unsigned long flags;
-       int i;
-       struct m68k_serial *info;
-
-       serial_driver = alloc_tty_driver(NR_PORTS);
-       if (!serial_driver)
-               return -ENOMEM;
-
-       show_serial_version();
-
-       /* Initialize the tty_driver structure */
-       /* SPARC: Not all of this is exactly right for us. */
-       
-       serial_driver->name = "ttyS";
-       serial_driver->major = TTY_MAJOR;
-       serial_driver->minor_start = 64;
-       serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
-       serial_driver->subtype = SERIAL_TYPE_NORMAL;
-       serial_driver->init_termios = tty_std_termios;
-       serial_driver->init_termios.c_cflag = 
-                       m68328_console_cbaud | CS8 | CREAD | HUPCL | CLOCAL;
-       serial_driver->flags = TTY_DRIVER_REAL_RAW;
-       tty_set_operations(serial_driver, &rs_ops);
-
-       local_irq_save(flags);
-
-       for (i = 0; i < NR_PORTS; i++) {
-
-           info = &m68k_soft[i];
-           tty_port_init(&info->tport);
-           info->tport.ops = &rs_port_ops;
-           info->magic = SERIAL_MAGIC;
-           info->port = (int) &uart_addr[i];
-           info->irq = uart_irqs[i];
-           info->custom_divisor = 16;
-           info->x_char = 0;
-           info->line = i;
-           info->is_cons = 1; /* Means shortcuts work */
-           
-           printk("%s%d at 0x%08x (irq = %d)", serial_driver->name, info->line, 
-                  info->port, info->irq);
-           printk(" is a builtin MC68328 UART\n");
-           
-#ifdef CONFIG_M68VZ328
-               if (i > 0)
-                       PJSEL &= 0xCF;  /* PSW enable second port output */
-#endif
-
-           if (request_irq(uart_irqs[i],
-                           rs_interrupt,
-                           0,
-                           "M68328_UART", info))
-                panic("Unable to attach 68328 serial interrupt\n");
-
-           tty_port_link_device(&info->tport, serial_driver, i);
-       }
-       local_irq_restore(flags);
-
-       if (tty_register_driver(serial_driver)) {
-               put_tty_driver(serial_driver);
-               for (i = 0; i < NR_PORTS; i++)
-                       tty_port_destroy(&m68k_soft[i].tport);
-               printk(KERN_ERR "Couldn't register serial driver\n");
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-module_init(rs68328_init);
-
-
-
-static void m68328_set_baud(void)
-{
-       unsigned short ustcnt;
-       int     i;
-
-       ustcnt = USTCNT;
-       USTCNT = ustcnt & ~USTCNT_TXEN;
-
-again:
-       for (i = 0; i < ARRAY_SIZE(baud_table); i++)
-               if (baud_table[i] == m68328_console_baud)
-                       break;
-       if (i >= ARRAY_SIZE(baud_table)) {
-               m68328_console_baud = 9600;
-               goto again;
-       }
-
-       UBAUD = PUT_FIELD(UBAUD_DIVIDE,    hw_baud_table[i].divisor) | 
-               PUT_FIELD(UBAUD_PRESCALER, hw_baud_table[i].prescale);
-       ustcnt &= ~(USTCNT_PARITYEN | USTCNT_ODD_EVEN | USTCNT_STOP | USTCNT_8_7);
-       ustcnt |= USTCNT_8_7;
-       ustcnt |= USTCNT_TXEN;
-       USTCNT = ustcnt;
-       m68328_console_initted = 1;
-       return;
-}
-
-
-int m68328_console_setup(struct console *cp, char *arg)
-{
-       int             i, n = CONSOLE_BAUD_RATE;
-
-       if (!cp)
-               return(-1);
-
-       if (arg)
-               n = simple_strtoul(arg, NULL, 0);
-
-       for (i = 0; i < ARRAY_SIZE(baud_table); i++)
-               if (baud_table[i] == n)
-                       break;
-       if (i < ARRAY_SIZE(baud_table)) {
-               m68328_console_baud = n;
-               m68328_console_cbaud = 0;
-               if (i > 15) {
-                       m68328_console_cbaud |= CBAUDEX;
-                       i -= 15;
-               }
-               m68328_console_cbaud |= i;
-       }
-
-       m68328_set_baud(); /* make sure baud rate changes */
-       return 0;
-}
-
-
-static struct tty_driver *m68328_console_device(struct console *c, int *index)
-{
-       *index = c->index;
-       return serial_driver;
-}
-
-
-void m68328_console_write (struct console *co, const char *str,
-                          unsigned int count)
-{
-       if (!m68328_console_initted)
-               m68328_set_baud();
-    while (count--) {
-        if (*str == '\n')
-           rs_put_char('\r');
-        rs_put_char(*str++);
-    }
-}
-
-
-static struct console m68328_driver = {
-       .name           = "ttyS",
-       .write          = m68328_console_write,
-       .device         = m68328_console_device,
-       .setup          = m68328_console_setup,
-       .flags          = CON_PRINTBUFFER,
-       .index          = -1,
-};
-
-
-static int __init m68328_console_init(void)
-{
-       register_console(&m68328_driver);
-       return 0;
-}
-
-console_initcall(m68328_console_init);
index d54dcd87c67e2948c3e39e772dbef0fc48378827..047a7ba6796ad1f3b96e1f3eb9e3bab3a167a01b 100644 (file)
@@ -92,6 +92,18 @@ struct serial8250_config {
 #define SERIAL8250_SHARE_IRQS 0
 #endif
 
+#define SERIAL8250_PORT_FLAGS(_base, _irq, _flags)             \
+       {                                                       \
+               .iobase         = _base,                        \
+               .irq            = _irq,                         \
+               .uartclk        = 1843200,                      \
+               .iotype         = UPIO_PORT,                    \
+               .flags          = UPF_BOOT_AUTOCONF | (_flags), \
+       }
+
+#define SERIAL8250_PORT(_base, _irq) SERIAL8250_PORT_FLAGS(_base, _irq, 0)
+
+
 static inline int serial_in(struct uart_8250_port *up, int offset)
 {
        return up->port.serial_in(&up->port, offset);
@@ -117,6 +129,8 @@ static inline void serial_dl_write(struct uart_8250_port *up, int value)
 struct uart_8250_port *serial8250_get_port(int line);
 void serial8250_rpm_get(struct uart_8250_port *p);
 void serial8250_rpm_put(struct uart_8250_port *p);
+int serial8250_em485_init(struct uart_8250_port *p);
+void serial8250_em485_destroy(struct uart_8250_port *p);
 
 #if defined(__alpha__) && !defined(CONFIG_PCI)
 /*
index 34b51c651192e8d81a6c9c9453c4a206a3405a20..522aeae05192c11ada5310e0d9628ae3fc3da0c7 100644 (file)
 #include <linux/init.h>
 #include <linux/serial_8250.h>
 
-#define PORT(_base,_irq)                               \
-       {                                               \
-               .iobase         = _base,                \
-               .irq            = _irq,                 \
-               .uartclk        = 1843200,              \
-               .iotype         = UPIO_PORT,            \
-               .flags          = UPF_BOOT_AUTOCONF,    \
-       }
+#include "8250.h"
 
 static struct plat_serial8250_port accent_data[] = {
-       PORT(0x330, 4),
-       PORT(0x338, 4),
+       SERIAL8250_PORT(0x330, 4),
+       SERIAL8250_PORT(0x338, 4),
        { },
 };
 
index 549aa07c0d271dd6c706e80eccfc4abb698b7dfc..402dfdd4940e53d172bf47b7f736d5faabaaa804 100644 (file)
@@ -70,7 +70,7 @@ serial_card_probe(struct expansion_card *ec, const struct ecard_id *id)
        uart.port.regshift      = 2;
        uart.port.dev   = &ec->dev;
 
-       for (i = 0; i < info->num_ports; i ++) {
+       for (i = 0; i < info->num_ports; i++) {
                uart.port.membase = info->vaddr + type->offset[i];
                uart.port.mapbase = bus_addr + type->offset[i];
 
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
new file mode 100644 (file)
index 0000000..ecf89f1
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * Serial port driver for BCM2835AUX UART
+ *
+ * Copyright (C) 2016 Martin Sperl <kernel@martin.sperl.org>
+ *
+ * Based on 8250_lpc18xx.c:
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "8250.h"
+
+struct bcm2835aux_data {
+       struct uart_8250_port uart;
+       struct clk *clk;
+       int line;
+};
+
+static int bcm2835aux_serial_probe(struct platform_device *pdev)
+{
+       struct bcm2835aux_data *data;
+       struct resource *res;
+       int ret;
+
+       /* allocate the custom structure */
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       /* initialize data */
+       spin_lock_init(&data->uart.port.lock);
+       data->uart.capabilities = UART_CAP_FIFO;
+       data->uart.port.dev = &pdev->dev;
+       data->uart.port.regshift = 2;
+       data->uart.port.type = PORT_16550;
+       data->uart.port.iotype = UPIO_MEM;
+       data->uart.port.fifosize = 8;
+       data->uart.port.flags = UPF_SHARE_IRQ |
+                               UPF_FIXED_PORT |
+                               UPF_FIXED_TYPE |
+                               UPF_SKIP_TEST;
+
+       /* get the clock - this also enables the HW */
+       data->clk = devm_clk_get(&pdev->dev, NULL);
+       ret = PTR_ERR_OR_ZERO(data->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "could not get clk: %d\n", ret);
+               return ret;
+       }
+
+       /* get the interrupt */
+       data->uart.port.irq = platform_get_irq(pdev, 0);
+       if (data->uart.port.irq < 0) {
+               dev_err(&pdev->dev, "irq not found - %i",
+                       data->uart.port.irq);
+               return data->uart.port.irq;
+       }
+
+       /* map the main registers */
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               dev_err(&pdev->dev, "memory resource not found");
+               return -EINVAL;
+       }
+       data->uart.port.membase = devm_ioremap_resource(&pdev->dev, res);
+       ret = PTR_ERR_OR_ZERO(data->uart.port.membase);
+       if (ret)
+               return ret;
+
+       /* Check for a fixed line number */
+       ret = of_alias_get_id(pdev->dev.of_node, "serial");
+       if (ret >= 0)
+               data->uart.port.line = ret;
+
+       /* enable the clock as a last step */
+       ret = clk_prepare_enable(data->clk);
+       if (ret) {
+               dev_err(&pdev->dev, "unable to enable uart clock - %d\n",
+                       ret);
+               return ret;
+       }
+
+       /* the HW-clock divider for bcm2835aux is 8,
+        * but 8250 expects a divider of 16,
+        * so we have to multiply the actual clock by 2
+        * to get identical baudrates.
+        */
+       data->uart.port.uartclk = clk_get_rate(data->clk) * 2;
+
+       /* register the port */
+       ret = serial8250_register_8250_port(&data->uart);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "unable to register 8250 port - %d\n",
+                       ret);
+               goto dis_clk;
+       }
+       data->line = ret;
+
+       platform_set_drvdata(pdev, data);
+
+       return 0;
+
+dis_clk:
+       clk_disable_unprepare(data->clk);
+       return ret;
+}
+
+static int bcm2835aux_serial_remove(struct platform_device *pdev)
+{
+       struct bcm2835aux_data *data = platform_get_drvdata(pdev);
+
+       serial8250_unregister_port(data->uart.port.line);
+       clk_disable_unprepare(data->clk);
+
+       return 0;
+}
+
+static const struct of_device_id bcm2835aux_serial_match[] = {
+       { .compatible = "brcm,bcm2835-aux-uart" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, bcm2835aux_serial_match);
+
+static struct platform_driver bcm2835aux_serial_driver = {
+       .driver = {
+               .name = "bcm2835-aux-uart",
+               .of_match_table = bcm2835aux_serial_match,
+       },
+       .probe  = bcm2835aux_serial_probe,
+       .remove = bcm2835aux_serial_remove,
+};
+module_platform_driver(bcm2835aux_serial_driver);
+
+MODULE_DESCRIPTION("BCM2835 auxiliar UART driver");
+MODULE_AUTHOR("Martin Sperl <kernel@martin.sperl.org>");
+MODULE_LICENSE("GPL v2");
index d125dc107985b28f59bb302a636a3e3aa3cc2028..a63b5998e383059cb5261cd7606ceae8e6bfec34 100644 (file)
 #include <linux/init.h>
 #include <linux/serial_8250.h>
 
-#define PORT(_base,_irq)                               \
-       {                                               \
-               .iobase         = _base,                \
-               .irq            = _irq,                 \
-               .uartclk        = 1843200,              \
-               .iotype         = UPIO_PORT,            \
-               .flags          = UPF_BOOT_AUTOCONF,    \
-       }
+#include "8250.h"
 
 static struct plat_serial8250_port boca_data[] = {
-       PORT(0x100, 12),
-       PORT(0x108, 12),
-       PORT(0x110, 12),
-       PORT(0x118, 12),
-       PORT(0x120, 12),
-       PORT(0x128, 12),
-       PORT(0x130, 12),
-       PORT(0x138, 12),
-       PORT(0x140, 12),
-       PORT(0x148, 12),
-       PORT(0x150, 12),
-       PORT(0x158, 12),
-       PORT(0x160, 12),
-       PORT(0x168, 12),
-       PORT(0x170, 12),
-       PORT(0x178, 12),
+       SERIAL8250_PORT(0x100, 12),
+       SERIAL8250_PORT(0x108, 12),
+       SERIAL8250_PORT(0x110, 12),
+       SERIAL8250_PORT(0x118, 12),
+       SERIAL8250_PORT(0x120, 12),
+       SERIAL8250_PORT(0x128, 12),
+       SERIAL8250_PORT(0x130, 12),
+       SERIAL8250_PORT(0x138, 12),
+       SERIAL8250_PORT(0x140, 12),
+       SERIAL8250_PORT(0x148, 12),
+       SERIAL8250_PORT(0x150, 12),
+       SERIAL8250_PORT(0x158, 12),
+       SERIAL8250_PORT(0x160, 12),
+       SERIAL8250_PORT(0x168, 12),
+       SERIAL8250_PORT(0x170, 12),
+       SERIAL8250_PORT(0x178, 12),
        { },
 };
 
index c9720a97a977801f2b82b084c3580f0d1619734c..77752216de0e895b3a6e29019d4e580aef455f36 100644 (file)
@@ -597,6 +597,7 @@ static void univ8250_console_write(struct console *co, const char *s,
 static int univ8250_console_setup(struct console *co, char *options)
 {
        struct uart_port *port;
+       int retval;
 
        /*
         * Check whether an invalid uart number has been specified, and
@@ -609,7 +610,10 @@ static int univ8250_console_setup(struct console *co, char *options)
        /* link port to console */
        port->cons = co;
 
-       return serial8250_console_setup(port, options, false);
+       retval = serial8250_console_setup(port, options, false);
+       if (retval != 0)
+               port->cons = NULL;
+       return retval;
 }
 
 /**
@@ -687,7 +691,7 @@ static int __init univ8250_console_init(void)
 }
 console_initcall(univ8250_console_init);
 
-#define SERIAL8250_CONSOLE     &univ8250_console
+#define SERIAL8250_CONSOLE     (&univ8250_console)
 #else
 #define SERIAL8250_CONSOLE     NULL
 #endif
@@ -764,6 +768,7 @@ void serial8250_suspend_port(int line)
 
        uart_suspend_port(&serial8250_reg, port);
 }
+EXPORT_SYMBOL(serial8250_suspend_port);
 
 /**
  *     serial8250_resume_port - resume one serial port
@@ -789,6 +794,7 @@ void serial8250_resume_port(int line)
        }
        uart_resume_port(&serial8250_reg, port);
 }
+EXPORT_SYMBOL(serial8250_resume_port);
 
 /*
  * Register a set of serial devices attached to a platform device.  The
@@ -1093,9 +1099,8 @@ static int __init serial8250_init(void)
 
        serial8250_isa_init_ports();
 
-       printk(KERN_INFO "Serial: 8250/16550 driver, "
-               "%d ports, IRQ sharing %sabled\n", nr_uarts,
-               share_irqs ? "en" : "dis");
+       pr_info("Serial: 8250/16550 driver, %d ports, IRQ sharing %sabled\n",
+               nr_uarts, share_irqs ? "en" : "dis");
 
 #ifdef CONFIG_SPARC
        ret = sunserial_register_minors(&serial8250_reg, UART_NR);
@@ -1168,15 +1173,11 @@ static void __exit serial8250_exit(void)
 module_init(serial8250_init);
 module_exit(serial8250_exit);
 
-EXPORT_SYMBOL(serial8250_suspend_port);
-EXPORT_SYMBOL(serial8250_resume_port);
-
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Generic 8250/16x50 serial driver");
 
 module_param(share_irqs, uint, 0644);
-MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
-       " (unsafe)");
+MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices (unsafe)");
 
 module_param(nr_uarts, uint, 0644);
 MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
index a5d319e4aae65dad90f64bafd33c2ad02bed7034..30810acf7f962de935e5432f436a7dad4500e446 100644 (file)
@@ -95,25 +95,45 @@ static void dw8250_force_idle(struct uart_port *p)
        (void)p->serial_in(p, UART_RX);
 }
 
-static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+static void dw8250_check_lcr(struct uart_port *p, int value)
 {
-       writeb(value, p->membase + (offset << p->regshift));
+       void __iomem *offset = p->membase + (UART_LCR << p->regshift);
+       int tries = 1000;
 
        /* Make sure LCR write wasn't ignored */
-       if (offset == UART_LCR) {
-               int tries = 1000;
-               while (tries--) {
-                       unsigned int lcr = p->serial_in(p, UART_LCR);
-                       if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
-                               return;
-                       dw8250_force_idle(p);
-                       writeb(value, p->membase + (UART_LCR << p->regshift));
-               }
-               /*
-                * FIXME: this deadlocks if port->lock is already held
-                * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
-                */
+       while (tries--) {
+               unsigned int lcr = p->serial_in(p, UART_LCR);
+
+               if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
+                       return;
+
+               dw8250_force_idle(p);
+
+#ifdef CONFIG_64BIT
+               __raw_writeq(value & 0xff, offset);
+#else
+               if (p->iotype == UPIO_MEM32)
+                       writel(value, offset);
+               else if (p->iotype == UPIO_MEM32BE)
+                       iowrite32be(value, offset);
+               else
+                       writeb(value, offset);
+#endif
        }
+       /*
+        * FIXME: this deadlocks if port->lock is already held
+        * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
+        */
+}
+
+static void dw8250_serial_out(struct uart_port *p, int offset, int value)
+{
+       struct dw8250_data *d = p->private_data;
+
+       writeb(value, p->membase + (offset << p->regshift));
+
+       if (offset == UART_LCR && !d->uart_16550_compatible)
+               dw8250_check_lcr(p, value);
 }
 
 static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
@@ -135,49 +155,26 @@ static unsigned int dw8250_serial_inq(struct uart_port *p, int offset)
 
 static void dw8250_serial_outq(struct uart_port *p, int offset, int value)
 {
+       struct dw8250_data *d = p->private_data;
+
        value &= 0xff;
        __raw_writeq(value, p->membase + (offset << p->regshift));
        /* Read back to ensure register write ordering. */
        __raw_readq(p->membase + (UART_LCR << p->regshift));
 
-       /* Make sure LCR write wasn't ignored */
-       if (offset == UART_LCR) {
-               int tries = 1000;
-               while (tries--) {
-                       unsigned int lcr = p->serial_in(p, UART_LCR);
-                       if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
-                               return;
-                       dw8250_force_idle(p);
-                       __raw_writeq(value & 0xff,
-                                    p->membase + (UART_LCR << p->regshift));
-               }
-               /*
-                * FIXME: this deadlocks if port->lock is already held
-                * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
-                */
-       }
+       if (offset == UART_LCR && !d->uart_16550_compatible)
+               dw8250_check_lcr(p, value);
 }
 #endif /* CONFIG_64BIT */
 
 static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
 {
+       struct dw8250_data *d = p->private_data;
+
        writel(value, p->membase + (offset << p->regshift));
 
-       /* Make sure LCR write wasn't ignored */
-       if (offset == UART_LCR) {
-               int tries = 1000;
-               while (tries--) {
-                       unsigned int lcr = p->serial_in(p, UART_LCR);
-                       if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
-                               return;
-                       dw8250_force_idle(p);
-                       writel(value, p->membase + (UART_LCR << p->regshift));
-               }
-               /*
-                * FIXME: this deadlocks if port->lock is already held
-                * dev_err(p->dev, "Couldn't set LCR to %d\n", value);
-                */
-       }
+       if (offset == UART_LCR && !d->uart_16550_compatible)
+               dw8250_check_lcr(p, value);
 }
 
 static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
@@ -187,6 +184,24 @@ static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
        return dw8250_modify_msr(p, offset, value);
 }
 
+static void dw8250_serial_out32be(struct uart_port *p, int offset, int value)
+{
+       struct dw8250_data *d = p->private_data;
+
+       iowrite32be(value, p->membase + (offset << p->regshift));
+
+       if (offset == UART_LCR && !d->uart_16550_compatible)
+               dw8250_check_lcr(p, value);
+}
+
+static unsigned int dw8250_serial_in32be(struct uart_port *p, int offset)
+{
+       unsigned int value = ioread32be(p->membase + (offset << p->regshift));
+
+       return dw8250_modify_msr(p, offset, value);
+}
+
+
 static int dw8250_handle_irq(struct uart_port *p)
 {
        struct dw8250_data *d = p->private_data;
@@ -281,6 +296,11 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
                        data->skip_autocfg = true;
                }
 #endif
+               if (of_device_is_big_endian(p->dev->of_node)) {
+                       p->iotype = UPIO_MEM32BE;
+                       p->serial_in = dw8250_serial_in32be;
+                       p->serial_out = dw8250_serial_out32be;
+               }
        } else if (has_acpi_companion(p->dev)) {
                p->iotype = UPIO_MEM32;
                p->regshift = 2;
@@ -309,14 +329,20 @@ static void dw8250_setup_port(struct uart_port *p)
         * If the Component Version Register returns zero, we know that
         * ADDITIONAL_FEATURES are not enabled. No need to go any further.
         */
-       reg = readl(p->membase + DW_UART_UCV);
+       if (p->iotype == UPIO_MEM32BE)
+               reg = ioread32be(p->membase + DW_UART_UCV);
+       else
+               reg = readl(p->membase + DW_UART_UCV);
        if (!reg)
                return;
 
        dev_dbg(p->dev, "Designware UART version %c.%c%c\n",
                (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff);
 
-       reg = readl(p->membase + DW_UART_CPR);
+       if (p->iotype == UPIO_MEM32BE)
+               reg = ioread32be(p->membase + DW_UART_CPR);
+       else
+               reg = readl(p->membase + DW_UART_CPR);
        if (!reg)
                return;
 
@@ -463,10 +489,8 @@ static int dw8250_probe(struct platform_device *pdev)
        dw8250_quirks(p, data);
 
        /* If the Busy Functionality is not implemented, don't handle it */
-       if (data->uart_16550_compatible) {
-               p->serial_out = NULL;
+       if (data->uart_16550_compatible)
                p->handle_irq = NULL;
-       }
 
        if (!data->skip_autocfg)
                dw8250_setup_port(p);
index af62131af21eef8f4ab3e4c32da31b6e8e41ce8d..3b3dbdc1b73ed2a442409a6e8ee8857cffbd299b 100644 (file)
 
 static unsigned int __init serial8250_early_in(struct uart_port *port, int offset)
 {
+       offset <<= port->regshift;
+
        switch (port->iotype) {
        case UPIO_MEM:
                return readb(port->membase + offset);
        case UPIO_MEM16:
-               return readw(port->membase + (offset << 1));
+               return readw(port->membase + offset);
        case UPIO_MEM32:
-               return readl(port->membase + (offset << 2));
+               return readl(port->membase + offset);
        case UPIO_MEM32BE:
-               return ioread32be(port->membase + (offset << 2));
+               return ioread32be(port->membase + offset);
        case UPIO_PORT:
                return inb(port->iobase + offset);
        default:
@@ -57,18 +59,20 @@ static unsigned int __init serial8250_early_in(struct uart_port *port, int offse
 
 static void __init serial8250_early_out(struct uart_port *port, int offset, int value)
 {
+       offset <<= port->regshift;
+
        switch (port->iotype) {
        case UPIO_MEM:
                writeb(value, port->membase + offset);
                break;
        case UPIO_MEM16:
-               writew(value, port->membase + (offset << 1));
+               writew(value, port->membase + offset);
                break;
        case UPIO_MEM32:
-               writel(value, port->membase + (offset << 2));
+               writel(value, port->membase + offset);
                break;
        case UPIO_MEM32BE:
-               iowrite32be(value, port->membase + (offset << 2));
+               iowrite32be(value, port->membase + offset);
                break;
        case UPIO_PORT:
                outb(value, port->iobase + offset);
@@ -145,3 +149,24 @@ EARLYCON_DECLARE(uart8250, early_serial8250_setup);
 EARLYCON_DECLARE(uart, early_serial8250_setup);
 OF_EARLYCON_DECLARE(ns16550, "ns16550", early_serial8250_setup);
 OF_EARLYCON_DECLARE(ns16550a, "ns16550a", early_serial8250_setup);
+
+#ifdef CONFIG_SERIAL_8250_OMAP
+
+static int __init early_omap8250_setup(struct earlycon_device *device,
+                                      const char *options)
+{
+       struct uart_port *port = &device->port;
+
+       if (!(device->port.membase || device->port.iobase))
+               return -ENODEV;
+
+       port->regshift = 2;
+       device->con->write = early_serial8250_write;
+       return 0;
+}
+
+OF_EARLYCON_DECLARE(omap8250, "ti,omap2-uart", early_omap8250_setup);
+OF_EARLYCON_DECLARE(omap8250, "ti,omap3-uart", early_omap8250_setup);
+OF_EARLYCON_DECLARE(omap8250, "ti,omap4-uart", early_omap8250_setup);
+
+#endif
index bf53aabf9b5e34919c9967f6d7e94106b04738b1..3a7cb8262bb9fa2701051d19f89a3354dd3e2e25 100644 (file)
 #include <linux/init.h>
 #include <linux/serial_8250.h>
 
-#define PORT(_base,_irq)                               \
-       {                                               \
-               .iobase         = _base,                \
-               .irq            = _irq,                 \
-               .uartclk        = 1843200,              \
-               .iotype         = UPIO_PORT,            \
-               .flags          = UPF_BOOT_AUTOCONF,    \
-       }
+#include "8250.h"
 
 static struct plat_serial8250_port exar_data[] = {
-       PORT(0x100, 5),
-       PORT(0x108, 5),
-       PORT(0x110, 5),
-       PORT(0x118, 5),
+       SERIAL8250_PORT(0x100, 5),
+       SERIAL8250_PORT(0x108, 5),
+       SERIAL8250_PORT(0x110, 5),
+       SERIAL8250_PORT(0x118, 5),
        { },
 };
 
index be1582609626b433739f15879c8697fe97e9b07d..4045180a8cfca0536fb46cfa6f1c37f301fabf1a 100644 (file)
 #include <linux/init.h>
 #include <linux/serial_8250.h>
 
-#define PORT(_base,_irq)                                               \
-       {                                                               \
-               .iobase         = _base,                                \
-               .irq            = _irq,                                 \
-               .uartclk        = 1843200,                              \
-               .iotype         = UPIO_PORT,                            \
-               .flags          = UPF_BOOT_AUTOCONF | UPF_FOURPORT,     \
-       }
+#include "8250.h"
+
+#define SERIAL8250_FOURPORT(_base, _irq) \
+       SERIAL8250_PORT_FLAGS(_base, _irq, UPF_FOURPORT)
 
 static struct plat_serial8250_port fourport_data[] = {
-       PORT(0x1a0, 9),
-       PORT(0x1a8, 9),
-       PORT(0x1b0, 9),
-       PORT(0x1b8, 9),
-       PORT(0x2a0, 5),
-       PORT(0x2a8, 5),
-       PORT(0x2b0, 5),
-       PORT(0x2b8, 5),
+       SERIAL8250_FOURPORT(0x1a0, 9),
+       SERIAL8250_FOURPORT(0x1a8, 9),
+       SERIAL8250_FOURPORT(0x1b0, 9),
+       SERIAL8250_FOURPORT(0x1b8, 9),
+       SERIAL8250_FOURPORT(0x2a0, 5),
+       SERIAL8250_FOURPORT(0x2a8, 5),
+       SERIAL8250_FOURPORT(0x2b0, 5),
+       SERIAL8250_FOURPORT(0x2b8, 5),
        { },
 };
 
index 2e3ea1a70d7b90768f33f26a39f098eed7df2c12..b1e6ae9f1ff955cdc88ae1d9798bc90899785335 100644 (file)
@@ -42,7 +42,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
                 * the user what they're missing.
                 */
                if (parisc_parent(dev)->id.hw_type != HPHW_IOA)
-                       printk(KERN_INFO
+                       dev_info(&dev->dev,
                                "Serial: device 0x%llx not configured.\n"
                                "Enable support for Wax, Lasi, Asp or Dino.\n",
                                (unsigned long long)dev->hpa.start);
@@ -66,8 +66,9 @@ static int __init serial_init_chip(struct parisc_device *dev)
 
        err = serial8250_register_8250_port(&uart);
        if (err < 0) {
-               printk(KERN_WARNING
-                       "serial8250_register_8250_port returned error %d\n", err);
+               dev_warn(&dev->dev,
+                       "serial8250_register_8250_port returned error %d\n",
+                       err);
                iounmap(uart.port.membase);
                return err;
        }
index 2891958cd8420303a5e11cac8c72f359b7c2c229..38166db2b8244f8a33df986ce1032fce37940a21 100644 (file)
@@ -24,8 +24,7 @@
 #endif
 
 #ifdef CONFIG_HPAPCI
-struct hp300_port
-{
+struct hp300_port {
        struct hp300_port *next;        /* next port */
        int line;                       /* line (tty) number */
 };
@@ -111,7 +110,7 @@ int __init hp300_setup_serial_console(void)
        /* Check for APCI console */
        if (scode == 256) {
 #ifdef CONFIG_HPAPCI
-               printk(KERN_INFO "Serial console is HP APCI 1\n");
+               pr_info("Serial console is HP APCI 1\n");
 
                port.uartclk = HPAPCI_BAUD_BASE * 16;
                port.mapbase = (FRODO_BASE + FRODO_APCI_OFFSET(1));
@@ -119,7 +118,7 @@ int __init hp300_setup_serial_console(void)
                port.regshift = 2;
                add_preferred_console("ttyS", port.line, "9600n8");
 #else
-               printk(KERN_WARNING "Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n");
+               pr_warn("Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n");
                return 0;
 #endif
        } else {
@@ -128,7 +127,7 @@ int __init hp300_setup_serial_console(void)
                if (!pa)
                        return 0;
 
-               printk(KERN_INFO "Serial console is HP DCA at select code %d\n", scode);
+               pr_info("Serial console is HP DCA at select code %d\n", scode);
 
                port.uartclk = HPDCA_BAUD_BASE * 16;
                port.mapbase = (pa + UART_OFFSET);
@@ -142,13 +141,13 @@ int __init hp300_setup_serial_console(void)
                if (DIO_ID(pa + DIO_VIRADDRBASE) & 0x80)
                        add_preferred_console("ttyS", port.line, "9600n8");
 #else
-               printk(KERN_WARNING "Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n");
+               pr_warn("Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n");
                return 0;
 #endif
        }
 
        if (early_serial_setup(&port) < 0)
-               printk(KERN_WARNING "hp300_setup_serial_console(): early_serial_setup() failed.\n");
+               pr_warn("%s: early_serial_setup() failed.\n", __func__);
        return 0;
 }
 #endif /* CONFIG_SERIAL_8250_CONSOLE */
@@ -180,8 +179,9 @@ static int hpdca_init_one(struct dio_dev *d,
        line = serial8250_register_8250_port(&uart);
 
        if (line < 0) {
-               printk(KERN_NOTICE "8250_hp300: register_serial() DCA scode %d"
-                      " irq %d failed\n", d->scode, uart.port.irq);
+               dev_notice(&d->dev,
+                         "8250_hp300: register_serial() DCA scode %d irq %d failed\n",
+                         d->scode, uart.port.irq);
                return -ENOMEM;
        }
 
@@ -249,8 +249,8 @@ static int __init hp300_8250_init(void)
 
                /* Memory mapped I/O */
                uart.port.iotype = UPIO_MEM;
-               uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ \
-                             | UPF_BOOT_AUTOCONF;
+               uart.port.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ
+                               | UPF_BOOT_AUTOCONF;
                /* XXX - no interrupt support yet */
                uart.port.irq = 0;
                uart.port.uartclk = HPAPCI_BAUD_BASE * 16;
@@ -261,8 +261,9 @@ static int __init hp300_8250_init(void)
                line = serial8250_register_8250_port(&uart);
 
                if (line < 0) {
-                       printk(KERN_NOTICE "8250_hp300: register_serial() APCI"
-                              " %d irq %d failed\n", i, uart.port.irq);
+                       dev_notice(uart.port.dev,
+                                  "8250_hp300: register_serial() APCI %d irq %d failed\n",
+                                  i, uart.port.irq);
                        kfree(port);
                        continue;
                }
index a5c778e83de054d7e35c57c65b0921319e905e75..27124e21eb962010a18365304efd33c54949a648 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/init.h>
 #include <linux/serial_8250.h>
 
-#define HUB6(card,port)                                                        \
+#define HUB6(card, port)                                               \
        {                                                               \
                .iobase         = 0x302,                                \
                .irq            = 3,                                    \
index d6e1ec9b4fdeb7ab8f3efe626d6a2fc0467a4183..b4e1d39805b2182c2768ce9c8c3c6a42d869edf2 100644 (file)
@@ -4,6 +4,8 @@
  *
  * Ingenic SoC UART support
  *
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
  * Free Software Foundation;  either version 2 of the  License, or (at your
@@ -18,7 +20,7 @@
 #include <linux/console.h>
 #include <linux/io.h>
 #include <linux/libfdt.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/of_device.h>
@@ -154,14 +156,18 @@ static void ingenic_uart_serial_out(struct uart_port *p, int offset, int value)
                break;
 
        case UART_IER:
-               /* Enable receive timeout interrupt with the
-                * receive line status interrupt */
+               /*
+                * Enable receive timeout interrupt with the receive line
+                * status interrupt.
+                */
                value |= (value & 0x4) << 2;
                break;
 
        case UART_MCR:
-               /* If we have enabled modem status IRQs we should enable modem
-                * mode. */
+               /*
+                * If we have enabled modem status IRQs we should enable
+                * modem mode.
+                */
                ier = p->serial_in(p, UART_IER);
 
                if (ier & UART_IER_MSI)
@@ -297,16 +303,6 @@ out:
        return err;
 }
 
-static int ingenic_uart_remove(struct platform_device *pdev)
-{
-       struct ingenic_uart_data *data = platform_get_drvdata(pdev);
-
-       serial8250_unregister_port(data->line);
-       clk_disable_unprepare(data->clk_module);
-       clk_disable_unprepare(data->clk_baud);
-       return 0;
-}
-
 static const struct ingenic_uart_config jz4740_uart_config = {
        .tx_loadsz = 8,
        .fifosize = 16,
@@ -329,19 +325,13 @@ static const struct of_device_id of_match[] = {
        { .compatible = "ingenic,jz4780-uart", .data = &jz4780_uart_config },
        { /* sentinel */ }
 };
-MODULE_DEVICE_TABLE(of, of_match);
 
 static struct platform_driver ingenic_uart_platform_driver = {
        .driver = {
-               .name           = "ingenic-uart",
-               .of_match_table = of_match,
+               .name                   = "ingenic-uart",
+               .of_match_table         = of_match,
+               .suppress_bind_attrs    = true,
        },
        .probe                  = ingenic_uart_probe,
-       .remove                 = ingenic_uart_remove,
 };
-
-module_platform_driver(ingenic_uart_platform_driver);
-
-MODULE_AUTHOR("Paul Burton");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Ingenic SoC UART driver");
+builtin_platform_driver(ingenic_uart_platform_driver);
index 33021c1f7d5575cc0aa6f0c1c8e25ce53cbf3e1d..c7ed3d2bc8b22bac300e0b28e2657ff758dd07b2 100644 (file)
@@ -335,6 +335,7 @@ static struct platform_driver of_platform_serial_driver = {
        .driver = {
                .name = "of_serial",
                .of_match_table = of_platform_serial_table,
+               .pm = &of_serial_pm_ops,
        },
        .probe = of_platform_serial_probe,
        .remove = of_platform_serial_remove,
index a2c0734c76e2eb47eab5d7b294b3071a99efcf0e..d710985a2e4ffb9e98314ee090607aa276cb48a7 100644 (file)
@@ -697,6 +697,36 @@ static void omap_8250_throttle(struct uart_port *port)
        pm_runtime_put_autosuspend(port->dev);
 }
 
+static int omap_8250_rs485_config(struct uart_port *port,
+                                 struct serial_rs485 *rs485)
+{
+       struct uart_8250_port *up = up_to_u8250p(port);
+
+       /* Clamp the delays to [0, 100ms] */
+       rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+       rs485->delay_rts_after_send  = min(rs485->delay_rts_after_send, 100U);
+
+       port->rs485 = *rs485;
+
+       /*
+        * Both serial8250_em485_init and serial8250_em485_destroy
+        * are idempotent
+        */
+       if (rs485->flags & SER_RS485_ENABLED) {
+               int ret = serial8250_em485_init(up);
+
+               if (ret) {
+                       rs485->flags &= ~SER_RS485_ENABLED;
+                       port->rs485.flags &= ~SER_RS485_ENABLED;
+               }
+               return ret;
+       }
+
+       serial8250_em485_destroy(up);
+
+       return 0;
+}
+
 static void omap_8250_unthrottle(struct uart_port *port)
 {
        unsigned long flags;
@@ -1146,6 +1176,7 @@ static int omap8250_probe(struct platform_device *pdev)
        up.port.shutdown = omap_8250_shutdown;
        up.port.throttle = omap_8250_throttle;
        up.port.unthrottle = omap_8250_unthrottle;
+       up.port.rs485_config = omap_8250_rs485_config;
 
        if (pdev->dev.of_node) {
                const struct of_device_id *id;
index 4097f3f65b3bb1bfc3e3d4d6a40f05fb46c70618..8f8d5c59c8181e88e6ad1d29e11fd80f579649b0 100644 (file)
@@ -721,7 +721,7 @@ static int pci_ni8430_init(struct pci_dev *dev)
         */
        pcibios_resource_to_bus(dev->bus, &region, &dev->resource[bar]);
        device_window = ((region.start + MITE_IOWBSR1_WIN_OFFSET) & 0xffffff00)
-                       | MITE_IOWBSR1_WENAB | MITE_IOWBSR1_WSIZE;
+                       | MITE_IOWBSR1_WENAB | MITE_IOWBSR1_WSIZE;
        writel(device_window, p + MITE_IOWBSR1);
 
        /* Set window access to go to RAMSEL IO address space */
@@ -803,12 +803,12 @@ static int pci_netmos_9900_numports(struct pci_dev *dev)
        unsigned int pi;
        unsigned short sub_serports;
 
-       pi = (c & 0xff);
+       pi = c & 0xff;
 
-       if (pi == 2) {
+       if (pi == 2)
                return 1;
-       } else if ((pi == 0) &&
-                          (dev->device == PCI_DEVICE_ID_NETMOS_9900)) {
+
+       if ((pi == 0) && (dev->device == PCI_DEVICE_ID_NETMOS_9900)) {
                /* two possibilities: 0x30ps encodes number of parallel and
                 * serial ports, or 0x1000 indicates *something*. This is not
                 * immediately obvious, since the 2s1p+4s configuration seems
@@ -816,12 +816,12 @@ static int pci_netmos_9900_numports(struct pci_dev *dev)
                 * advertising the same function 3 as the 4s+2s1p config.
                 */
                sub_serports = dev->subsystem_device & 0xf;
-               if (sub_serports > 0) {
+               if (sub_serports > 0)
                        return sub_serports;
-               } else {
-                       dev_err(&dev->dev, "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
-                       return 0;
-               }
+
+               dev_err(&dev->dev,
+                       "NetMos/Mostech serial driver ignoring port on ambiguous config.\n");
+               return 0;
        }
 
        moan_device("unknown NetMos/Mostech program interface", dev);
@@ -842,21 +842,21 @@ static int pci_netmos_init(struct pci_dev *dev)
                return 0;
 
        switch (dev->device) { /* FALLTHROUGH on all */
-               case PCI_DEVICE_ID_NETMOS_9904:
-               case PCI_DEVICE_ID_NETMOS_9912:
-               case PCI_DEVICE_ID_NETMOS_9922:
-               case PCI_DEVICE_ID_NETMOS_9900:
-                       num_serial = pci_netmos_9900_numports(dev);
-                       break;
+       case PCI_DEVICE_ID_NETMOS_9904:
+       case PCI_DEVICE_ID_NETMOS_9912:
+       case PCI_DEVICE_ID_NETMOS_9922:
+       case PCI_DEVICE_ID_NETMOS_9900:
+               num_serial = pci_netmos_9900_numports(dev);
+               break;
 
-               default:
-                       if (num_serial == 0 ) {
-                               moan_device("unknown NetMos/Mostech device", dev);
-                       }
+       default:
+               break;
        }
 
-       if (num_serial == 0)
+       if (num_serial == 0) {
+               moan_device("unknown NetMos/Mostech device", dev);
                return -ENODEV;
+       }
 
        return num_serial;
 }
@@ -1198,8 +1198,9 @@ static int pci_quatech_has_qmcr(struct uart_8250_port *port)
 
 static int pci_quatech_test(struct uart_8250_port *port)
 {
-       u8 reg;
-       u8 qopr = pci_quatech_rqopr(port);
+       u8 reg, qopr;
+
+       qopr = pci_quatech_rqopr(port);
        pci_quatech_wqopr(port, qopr & QPCR_TEST_FOR1);
        reg = pci_quatech_rqopr(port) & 0xC0;
        if (reg != QPCR_TEST_GET1)
@@ -1286,6 +1287,7 @@ static int pci_quatech_init(struct pci_dev *dev)
                unsigned long base = pci_resource_start(dev, 0);
                if (base) {
                        u32 tmp;
+
                        outl(inl(base + 0x38) | 0x00002000, base + 0x38);
                        tmp = inl(base + 0x3c);
                        outl(tmp | 0x01000000, base + 0x3c);
@@ -1334,29 +1336,6 @@ static int pci_default_setup(struct serial_private *priv,
        return setup_port(priv, port, bar, offset, board->reg_shift);
 }
 
-static int pci_pericom_setup(struct serial_private *priv,
-                 const struct pciserial_board *board,
-                 struct uart_8250_port *port, int idx)
-{
-       unsigned int bar, offset = board->first_offset, maxnr;
-
-       bar = FL_GET_BASE(board->flags);
-       if (board->flags & FL_BASE_BARS)
-               bar += idx;
-       else
-               offset += idx * board->uart_offset;
-
-       maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
-               (board->reg_shift + 3);
-
-       if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
-               return 1;
-
-       port->port.uartclk = 14745600;
-
-       return setup_port(priv, port, bar, offset, board->reg_shift);
-}
-
 static int
 ce4100_serial_setup(struct serial_private *priv,
                  const struct pciserial_board *board,
@@ -1379,6 +1358,9 @@ ce4100_serial_setup(struct serial_private *priv,
 #define PCI_DEVICE_ID_INTEL_BSW_UART1  0x228a
 #define PCI_DEVICE_ID_INTEL_BSW_UART2  0x228c
 
+#define PCI_DEVICE_ID_INTEL_BDW_UART1  0x9ce3
+#define PCI_DEVICE_ID_INTEL_BDW_UART2  0x9ce4
+
 #define BYT_PRV_CLK                    0x800
 #define BYT_PRV_CLK_EN                 (1 << 0)
 #define BYT_PRV_CLK_M_VAL_SHIFT                1
@@ -1461,11 +1443,13 @@ byt_serial_setup(struct serial_private *priv,
        switch (pdev->device) {
        case PCI_DEVICE_ID_INTEL_BYT_UART1:
        case PCI_DEVICE_ID_INTEL_BSW_UART1:
+       case PCI_DEVICE_ID_INTEL_BDW_UART1:
                rx_param->src_id = 3;
                tx_param->dst_id = 2;
                break;
        case PCI_DEVICE_ID_INTEL_BYT_UART2:
        case PCI_DEVICE_ID_INTEL_BSW_UART2:
+       case PCI_DEVICE_ID_INTEL_BDW_UART2:
                rx_param->src_id = 5;
                tx_param->dst_id = 4;
                break;
@@ -1536,10 +1520,9 @@ pci_brcm_trumanage_setup(struct serial_private *priv,
 static int pci_fintek_rs485_config(struct uart_port *port,
                               struct serial_rs485 *rs485)
 {
+       struct pci_dev *pci_dev = to_pci_dev(port->dev);
        u8 setting;
        u8 *index = (u8 *) port->private_data;
-       struct pci_dev *pci_dev = container_of(port->dev, struct pci_dev,
-                                               dev);
 
        pci_read_config_byte(pci_dev, 0x40 + 8 * *index + 7, &setting);
 
@@ -1761,7 +1744,7 @@ xr17v35x_has_slave(struct serial_private *priv)
        const int dev_id = priv->dev->device;
 
        return ((dev_id == PCI_DEVICE_ID_EXAR_XR17V4358) ||
-               (dev_id == PCI_DEVICE_ID_EXAR_XR17V8358));
+               (dev_id == PCI_DEVICE_ID_EXAR_XR17V8358));
 }
 
 static int
@@ -1861,8 +1844,8 @@ pci_fastcom335_setup(struct serial_private *priv,
 
 static int
 pci_wch_ch353_setup(struct serial_private *priv,
-                    const struct pciserial_board *board,
-                    struct uart_8250_port *port, int idx)
+                   const struct pciserial_board *board,
+                   struct uart_8250_port *port, int idx)
 {
        port->port.flags |= UPF_FIXED_TYPE;
        port->port.type = PORT_16550A;
@@ -1871,8 +1854,8 @@ pci_wch_ch353_setup(struct serial_private *priv,
 
 static int
 pci_wch_ch38x_setup(struct serial_private *priv,
-                    const struct pciserial_board *board,
-                    struct uart_8250_port *port, int idx)
+                   const struct pciserial_board *board,
+                   struct uart_8250_port *port, int idx)
 {
        port->port.flags |= UPF_FIXED_TYPE;
        port->port.type = PORT_16850;
@@ -1936,6 +1919,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
 #define PCIE_VENDOR_ID_WCH             0x1c00
 #define PCIE_DEVICE_ID_WCH_CH382_2S1P  0x3250
 #define PCIE_DEVICE_ID_WCH_CH384_4S    0x3470
+#define PCIE_DEVICE_ID_WCH_CH382_2S    0x3253
 
 #define PCI_VENDOR_ID_PERICOM                  0x12D8
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7951       0x7951
@@ -2062,6 +2046,20 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = byt_serial_setup,
        },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BDW_UART1,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = byt_serial_setup,
+       },
+       {
+               .vendor         = PCI_VENDOR_ID_INTEL,
+               .device         = PCI_DEVICE_ID_INTEL_BDW_UART2,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = byt_serial_setup,
+       },
        /*
         * ITE
         */
@@ -2225,16 +2223,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .setup          = pci_default_setup,
                .exit           = pci_plx9050_exit,
        },
-       /*
-        * Pericom
-        */
-       {
-               .vendor         = PCI_VENDOR_ID_PERICOM,
-               .device         = PCI_ANY_ID,
-               .subvendor      = PCI_ANY_ID,
-               .subdevice      = PCI_ANY_ID,
-               .setup          = pci_pericom_setup,
-       },
        /*
         * PLX
         */
@@ -2618,6 +2606,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
                .subdevice      = PCI_ANY_ID,
                .setup          = pci_wch_ch353_setup,
        },
+       /* WCH CH382 2S card (16850 clone) */
+       {
+               .vendor         = PCIE_VENDOR_ID_WCH,
+               .device         = PCIE_DEVICE_ID_WCH_CH382_2S,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = PCI_ANY_ID,
+               .setup          = pci_wch_ch38x_setup,
+       },
        /* WCH CH382 2S1P card (16850 clone) */
        {
                .vendor         = PCIE_VENDOR_ID_WCH,
@@ -2936,6 +2932,7 @@ enum pci_board_num_t {
        pbn_fintek_4,
        pbn_fintek_8,
        pbn_fintek_12,
+       pbn_wch382_2,
        pbn_wch384_4,
        pbn_pericom_PI7C9X7951,
        pbn_pericom_PI7C9X7952,
@@ -3756,6 +3753,13 @@ static struct pciserial_board pci_boards[] = {
                .base_baud      = 115200,
                .first_offset   = 0x40,
        },
+       [pbn_wch382_2] = {
+               .flags          = FL_BASE0,
+               .num_ports      = 2,
+               .base_baud      = 115200,
+               .uart_offset    = 8,
+               .first_offset   = 0xC0,
+       },
        [pbn_wch384_4] = {
                .flags          = FL_BASE0,
                .num_ports      = 4,
@@ -4502,7 +4506,7 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID, 0, 0,
                pbn_b0_bt_2_921600 },
        {       PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI958,
-               PCI_ANY_ID , PCI_ANY_ID, 0, 0,
+               PCI_ANY_ID, PCI_ANY_ID, 0, 0,
                pbn_b2_8_1152000 },
 
        /*
@@ -5506,6 +5510,16 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
                pbn_byt },
 
+       /* Intel Broadwell */
+       {       PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART1,
+               PCI_ANY_ID,  PCI_ANY_ID,
+               PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+               pbn_byt },
+       {       PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_UART2,
+               PCI_ANY_ID,  PCI_ANY_ID,
+               PCI_CLASS_COMMUNICATION_SERIAL << 8, 0xff0000,
+               pbn_byt },
+
        /*
         * Intel Quark x1000
         */
@@ -5545,6 +5559,10 @@ static struct pci_device_id serial_pci_tbl[] = {
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_b0_bt_2_115200 },
 
+       {       PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
+               PCI_ANY_ID, PCI_ANY_ID,
+               0, 0, pbn_wch382_2 },
+
        {       PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
                PCI_ANY_ID, PCI_ANY_ID,
                0, 0, pbn_wch384_4 },
index 658b392d1170d63dbb61e5c91348f8f2bcd2549f..34f05ed78b68ce387cc75b7395ef7a2de6a12239 100644 (file)
@@ -357,8 +357,8 @@ static const struct pnp_device_id pnp_dev_table[] = {
        /* Fujitsu Wacom 1FGT Tablet PC device */
        {       "FUJ02E9",              0       },
        /*
-        * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in
-        * disguise)
+        * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6
+        * in disguise).
         */
        {       "LTS0001",              0       },
        /* Rockwell's (PORALiNK) 33600 INT PNP */
@@ -367,12 +367,14 @@ static const struct pnp_device_id pnp_dev_table[] = {
        {       "PNPCXXX",              UNKNOWN_DEV     },
        /* More unknown PnP modems */
        {       "PNPDXXX",              UNKNOWN_DEV     },
-       /* Winbond CIR port, should not be probed. We should keep track
-          of it to prevent the legacy serial driver from probing it */
+       /*
+        * Winbond CIR port, should not be probed. We should keep track of
+        * it to prevent the legacy serial driver from probing it.
+        */
        {       "WEC1022",              CIR_PORT        },
        /*
-        * SMSC IrCC SIR/FIR port, should not be probed by serial driver
-        * as well so its own driver can bind to it.
+        * SMSC IrCC SIR/FIR port, should not be probed by serial driver as
+        * well so its own driver can bind to it.
         */
        {       "SMCF010",              CIR_PORT        },
        {       "",                     0       }
@@ -380,35 +382,35 @@ static const struct pnp_device_id pnp_dev_table[] = {
 
 MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
 
-static char *modem_names[] = {
+static const char *modem_names[] = {
        "MODEM", "Modem", "modem", "FAX", "Fax", "fax",
        "56K", "56k", "K56", "33.6", "28.8", "14.4",
        "33,600", "28,800", "14,400", "33.600", "28.800", "14.400",
        "33600", "28800", "14400", "V.90", "V.34", "V.32", NULL
 };
 
-static int check_name(char *name)
+static bool check_name(const char *name)
 {
-       char **tmp;
+       const char **tmp;
 
        for (tmp = modem_names; *tmp; tmp++)
                if (strstr(name, *tmp))
-                       return 1;
+                       return true;
 
-       return 0;
+       return false;
 }
 
-static int check_resources(struct pnp_dev *dev)
+static bool check_resources(struct pnp_dev *dev)
 {
-       resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8};
-       int i;
+       static const resource_size_t base[] = {0x2f8, 0x3f8, 0x2e8, 0x3e8};
+       unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(base); i++) {
                if (pnp_possible_config(dev, IORESOURCE_IO, base[i], 8))
-                       return 1;
+                       return true;
        }
 
-       return 0;
+       return false;
 }
 
 /*
@@ -425,8 +427,8 @@ static int check_resources(struct pnp_dev *dev)
 static int serial_pnp_guess_board(struct pnp_dev *dev)
 {
        if (!(check_name(pnp_dev_name(dev)) ||
-               (dev->card && check_name(dev->card->name))))
-                       return -ENODEV;
+           (dev->card && check_name(dev->card->name))))
+               return -ENODEV;
 
        if (check_resources(dev))
                return 0;
@@ -462,11 +464,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
        } else
                return -ENODEV;
 
-#ifdef SERIAL_DEBUG_PNP
-       printk(KERN_DEBUG
-               "Setup PNP port: port %x, mem 0x%lx, irq %d, type %d\n",
-                      uart.port.iobase, uart.port.mapbase, uart.port.irq, uart.port.iotype);
-#endif
+       dev_dbg(&dev->dev,
+                "Setup PNP port: port %lx, mem %pa, irq %d, type %d\n",
+                uart.port.iobase, &uart.port.mapbase,
+                uart.port.irq, uart.port.iotype);
+
        if (flags & CIR_PORT) {
                uart.port.flags |= UPF_FIXED_PORT | UPF_FIXED_TYPE;
                uart.port.type = PORT_8250_CIR;
index 8d262bce97e410a58ed7fc3ba703bf8ba15cfb6d..e376cfaf8ab70bf147ed4a080f94a6cdfe1c6e08 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
+#include <linux/timer.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -52,7 +53,7 @@
 #define DEBUG_AUTOCONF(fmt...) do { } while (0)
 #endif
 
-#define BOTH_EMPTY     (UART_LSR_TEMT | UART_LSR_THRE)
+#define BOTH_EMPTY     (UART_LSR_TEMT | UART_LSR_THRE)
 
 /*
  * Here we define the default xmit fifo size used for each type of UART.
@@ -250,9 +251,11 @@ static const struct serial8250_config uart_config[] = {
                .fcr            = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10,
                .flags          = UART_CAP_FIFO | UART_CAP_AFE,
        },
-/* tx_loadsz is set to 63-bytes instead of 64-bytes to implement
-workaround of errata A-008006 which states that tx_loadsz should  be
-configured less than Maximum supported fifo bytes */
+       /*
+        * tx_loadsz is set to 63-bytes instead of 64-bytes to implement
+        * workaround of errata A-008006 which states that tx_loadsz should
+        * be configured less than Maximum supported fifo bytes.
+        */
        [PORT_16550A_FSL64] = {
                .name           = "16550A_FSL64",
                .fifo_size      = 64,
@@ -522,6 +525,20 @@ static void serial8250_clear_fifos(struct uart_8250_port *p)
        }
 }
 
+static inline void serial8250_em485_rts_after_send(struct uart_8250_port *p)
+{
+       unsigned char mcr = serial_in(p, UART_MCR);
+
+       if (p->port.rs485.flags & SER_RS485_RTS_AFTER_SEND)
+               mcr |= UART_MCR_RTS;
+       else
+               mcr &= ~UART_MCR_RTS;
+       serial_out(p, UART_MCR, mcr);
+}
+
+static void serial8250_em485_handle_start_tx(unsigned long arg);
+static void serial8250_em485_handle_stop_tx(unsigned long arg);
+
 void serial8250_clear_and_reinit_fifos(struct uart_8250_port *p)
 {
        serial8250_clear_fifos(p);
@@ -546,6 +563,73 @@ void serial8250_rpm_put(struct uart_8250_port *p)
 }
 EXPORT_SYMBOL_GPL(serial8250_rpm_put);
 
+/**
+ *     serial8250_em485_init() - put uart_8250_port into rs485 emulating
+ *     @p:     uart_8250_port port instance
+ *
+ *     The function is used to start rs485 software emulating on the
+ *     &struct uart_8250_port* @p. Namely, RTS is switched before/after
+ *     transmission. The function is idempotent, so it is safe to call it
+ *     multiple times.
+ *
+ *     The caller MUST enable interrupt on empty shift register before
+ *     calling serial8250_em485_init(). This interrupt is not a part of
+ *     8250 standard, but implementation defined.
+ *
+ *     The function is supposed to be called from .rs485_config callback
+ *     or from any other callback protected with p->port.lock spinlock.
+ *
+ *     See also serial8250_em485_destroy()
+ *
+ *     Return 0 - success, -errno - otherwise
+ */
+int serial8250_em485_init(struct uart_8250_port *p)
+{
+       if (p->em485 != NULL)
+               return 0;
+
+       p->em485 = kmalloc(sizeof(struct uart_8250_em485), GFP_KERNEL);
+       if (p->em485 == NULL)
+               return -ENOMEM;
+
+       setup_timer(&p->em485->stop_tx_timer,
+               serial8250_em485_handle_stop_tx, (unsigned long)p);
+       setup_timer(&p->em485->start_tx_timer,
+               serial8250_em485_handle_start_tx, (unsigned long)p);
+       p->em485->active_timer = NULL;
+
+       serial8250_em485_rts_after_send(p);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(serial8250_em485_init);
+
+/**
+ *     serial8250_em485_destroy() - put uart_8250_port into normal state
+ *     @p:     uart_8250_port port instance
+ *
+ *     The function is used to stop rs485 software emulating on the
+ *     &struct uart_8250_port* @p. The function is idempotent, so it is safe to
+ *     call it multiple times.
+ *
+ *     The function is supposed to be called from .rs485_config callback
+ *     or from any other callback protected with p->port.lock spinlock.
+ *
+ *     See also serial8250_em485_init()
+ */
+void serial8250_em485_destroy(struct uart_8250_port *p)
+{
+       if (p->em485 == NULL)
+               return;
+
+       del_timer(&p->em485->start_tx_timer);
+       del_timer(&p->em485->stop_tx_timer);
+
+       kfree(p->em485);
+       p->em485 = NULL;
+}
+EXPORT_SYMBOL_GPL(serial8250_em485_destroy);
+
 /*
  * These two wrappers ensure that enable_runtime_pm_tx() can be called more than
  * once and disable_runtime_pm_tx() will still disable RPM because the fifo is
@@ -731,22 +815,16 @@ static int size_fifo(struct uart_8250_port *up)
  */
 static unsigned int autoconfig_read_divisor_id(struct uart_8250_port *p)
 {
-       unsigned char old_dll, old_dlm, old_lcr;
-       unsigned int id;
+       unsigned char old_lcr;
+       unsigned int id, old_dl;
 
        old_lcr = serial_in(p, UART_LCR);
        serial_out(p, UART_LCR, UART_LCR_CONF_MODE_A);
+       old_dl = serial_dl_read(p);
+       serial_dl_write(p, 0);
+       id = serial_dl_read(p);
+       serial_dl_write(p, old_dl);
 
-       old_dll = serial_in(p, UART_DLL);
-       old_dlm = serial_in(p, UART_DLM);
-
-       serial_out(p, UART_DLL, 0);
-       serial_out(p, UART_DLM, 0);
-
-       id = serial_in(p, UART_DLL) | serial_in(p, UART_DLM) << 8;
-
-       serial_out(p, UART_DLL, old_dll);
-       serial_out(p, UART_DLM, old_dlm);
        serial_out(p, UART_LCR, old_lcr);
 
        return id;
@@ -1238,8 +1316,7 @@ static void autoconfig(struct uart_8250_port *up)
 out_lock:
        spin_unlock_irqrestore(&port->lock, flags);
        if (up->capabilities != old_capabilities) {
-               printk(KERN_WARNING
-                      "ttyS%d: detected caps %08x should be %08x\n",
+               pr_warn("ttyS%d: detected caps %08x should be %08x\n",
                       serial_index(port), old_capabilities,
                       up->capabilities);
        }
@@ -1304,7 +1381,69 @@ static void autoconfig_irq(struct uart_8250_port *up)
        port->irq = (irq > 0) ? irq : 0;
 }
 
-static inline void __stop_tx(struct uart_8250_port *p)
+static void serial8250_stop_rx(struct uart_port *port)
+{
+       struct uart_8250_port *up = up_to_u8250p(port);
+
+       serial8250_rpm_get(up);
+
+       up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
+       up->port.read_status_mask &= ~UART_LSR_DR;
+       serial_port_out(port, UART_IER, up->ier);
+
+       serial8250_rpm_put(up);
+}
+
+static void __do_stop_tx_rs485(struct uart_8250_port *p)
+{
+       if (!p->em485)
+               return;
+
+       serial8250_em485_rts_after_send(p);
+       /*
+        * Empty the RX FIFO, we are not interested in anything
+        * received during the half-duplex transmission.
+        */
+       if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX))
+               serial8250_clear_fifos(p);
+}
+
+static void serial8250_em485_handle_stop_tx(unsigned long arg)
+{
+       struct uart_8250_port *p = (struct uart_8250_port *)arg;
+       struct uart_8250_em485 *em485 = p->em485;
+       unsigned long flags;
+
+       spin_lock_irqsave(&p->port.lock, flags);
+       if (em485 &&
+           em485->active_timer == &em485->stop_tx_timer) {
+               __do_stop_tx_rs485(p);
+               em485->active_timer = NULL;
+       }
+       spin_unlock_irqrestore(&p->port.lock, flags);
+}
+
+static void __stop_tx_rs485(struct uart_8250_port *p)
+{
+       struct uart_8250_em485 *em485 = p->em485;
+
+       if (!em485)
+               return;
+
+       /*
+        * __do_stop_tx_rs485 is going to set RTS according to config
+        * AND flush RX FIFO if required.
+        */
+       if (p->port.rs485.delay_rts_after_send > 0) {
+               em485->active_timer = &em485->stop_tx_timer;
+               mod_timer(&em485->stop_tx_timer, jiffies +
+                       p->port.rs485.delay_rts_after_send * HZ / 1000);
+       } else {
+               __do_stop_tx_rs485(p);
+       }
+}
+
+static inline void __do_stop_tx(struct uart_8250_port *p)
 {
        if (p->ier & UART_IER_THRI) {
                p->ier &= ~UART_IER_THRI;
@@ -1313,6 +1452,28 @@ static inline void __stop_tx(struct uart_8250_port *p)
        }
 }
 
+static inline void __stop_tx(struct uart_8250_port *p)
+{
+       struct uart_8250_em485 *em485 = p->em485;
+
+       if (em485) {
+               unsigned char lsr = serial_in(p, UART_LSR);
+               /*
+                * To provide required timeing and allow FIFO transfer,
+                * __stop_tx_rs485 must be called only when both FIFO and
+                * shift register are empty. It is for device driver to enable
+                * interrupt on TEMT.
+                */
+               if ((lsr & BOTH_EMPTY) != BOTH_EMPTY)
+                       return;
+
+               del_timer(&em485->start_tx_timer);
+               em485->active_timer = NULL;
+       }
+       __do_stop_tx(p);
+       __stop_tx_rs485(p);
+}
+
 static void serial8250_stop_tx(struct uart_port *port)
 {
        struct uart_8250_port *up = up_to_u8250p(port);
@@ -1330,12 +1491,10 @@ static void serial8250_stop_tx(struct uart_port *port)
        serial8250_rpm_put(up);
 }
 
-static void serial8250_start_tx(struct uart_port *port)
+static inline void __start_tx(struct uart_port *port)
 {
        struct uart_8250_port *up = up_to_u8250p(port);
 
-       serial8250_rpm_get_tx(up);
-
        if (up->dma && !up->dma->tx_dma(up))
                return;
 
@@ -1345,6 +1504,7 @@ static void serial8250_start_tx(struct uart_port *port)
 
                if (up->bugs & UART_BUG_TXEN) {
                        unsigned char lsr;
+
                        lsr = serial_in(up, UART_LSR);
                        up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS;
                        if (lsr & UART_LSR_THRE)
@@ -1361,27 +1521,78 @@ static void serial8250_start_tx(struct uart_port *port)
        }
 }
 
-static void serial8250_throttle(struct uart_port *port)
+static inline void start_tx_rs485(struct uart_port *port)
 {
-       port->throttle(port);
+       struct uart_8250_port *up = up_to_u8250p(port);
+       struct uart_8250_em485 *em485 = up->em485;
+       unsigned char mcr;
+
+       if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX))
+               serial8250_stop_rx(&up->port);
+
+       del_timer(&em485->stop_tx_timer);
+       em485->active_timer = NULL;
+
+       mcr = serial_in(up, UART_MCR);
+       if (!!(up->port.rs485.flags & SER_RS485_RTS_ON_SEND) !=
+           !!(mcr & UART_MCR_RTS)) {
+               if (up->port.rs485.flags & SER_RS485_RTS_ON_SEND)
+                       mcr |= UART_MCR_RTS;
+               else
+                       mcr &= ~UART_MCR_RTS;
+               serial_out(up, UART_MCR, mcr);
+
+               if (up->port.rs485.delay_rts_before_send > 0) {
+                       em485->active_timer = &em485->start_tx_timer;
+                       mod_timer(&em485->start_tx_timer, jiffies +
+                               up->port.rs485.delay_rts_before_send * HZ / 1000);
+                       return;
+               }
+       }
+
+       __start_tx(port);
 }
 
-static void serial8250_unthrottle(struct uart_port *port)
+static void serial8250_em485_handle_start_tx(unsigned long arg)
 {
-       port->unthrottle(port);
+       struct uart_8250_port *p = (struct uart_8250_port *)arg;
+       struct uart_8250_em485 *em485 = p->em485;
+       unsigned long flags;
+
+       spin_lock_irqsave(&p->port.lock, flags);
+       if (em485 &&
+           em485->active_timer == &em485->start_tx_timer) {
+               __start_tx(&p->port);
+               em485->active_timer = NULL;
+       }
+       spin_unlock_irqrestore(&p->port.lock, flags);
 }
 
-static void serial8250_stop_rx(struct uart_port *port)
+static void serial8250_start_tx(struct uart_port *port)
 {
        struct uart_8250_port *up = up_to_u8250p(port);
+       struct uart_8250_em485 *em485 = up->em485;
 
-       serial8250_rpm_get(up);
+       serial8250_rpm_get_tx(up);
 
-       up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
-       up->port.read_status_mask &= ~UART_LSR_DR;
-       serial_port_out(port, UART_IER, up->ier);
+       if (em485 &&
+           em485->active_timer == &em485->start_tx_timer)
+               return;
 
-       serial8250_rpm_put(up);
+       if (em485)
+               start_tx_rs485(port);
+       else
+               __start_tx(port);
+}
+
+static void serial8250_throttle(struct uart_port *port)
+{
+       port->throttle(port);
+}
+
+static void serial8250_unthrottle(struct uart_port *port)
+{
+       port->unthrottle(port);
 }
 
 static void serial8250_disable_ms(struct uart_port *port)
@@ -1412,81 +1623,85 @@ static void serial8250_enable_ms(struct uart_port *port)
        serial8250_rpm_put(up);
 }
 
+static void serial8250_read_char(struct uart_8250_port *up, unsigned char lsr)
+{
+       struct uart_port *port = &up->port;
+       unsigned char ch;
+       char flag = TTY_NORMAL;
+
+       if (likely(lsr & UART_LSR_DR))
+               ch = serial_in(up, UART_RX);
+       else
+               /*
+                * Intel 82571 has a Serial Over Lan device that will
+                * set UART_LSR_BI without setting UART_LSR_DR when
+                * it receives a break. To avoid reading from the
+                * receive buffer without UART_LSR_DR bit set, we
+                * just force the read character to be 0
+                */
+               ch = 0;
+
+       port->icount.rx++;
+
+       lsr |= up->lsr_saved_flags;
+       up->lsr_saved_flags = 0;
+
+       if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
+               if (lsr & UART_LSR_BI) {
+                       lsr &= ~(UART_LSR_FE | UART_LSR_PE);
+                       port->icount.brk++;
+                       /*
+                        * We do the SysRQ and SAK checking
+                        * here because otherwise the break
+                        * may get masked by ignore_status_mask
+                        * or read_status_mask.
+                        */
+                       if (uart_handle_break(port))
+                               return;
+               } else if (lsr & UART_LSR_PE)
+                       port->icount.parity++;
+               else if (lsr & UART_LSR_FE)
+                       port->icount.frame++;
+               if (lsr & UART_LSR_OE)
+                       port->icount.overrun++;
+
+               /*
+                * Mask off conditions which should be ignored.
+                */
+               lsr &= port->read_status_mask;
+
+               if (lsr & UART_LSR_BI) {
+                       DEBUG_INTR("handling break....");
+                       flag = TTY_BREAK;
+               } else if (lsr & UART_LSR_PE)
+                       flag = TTY_PARITY;
+               else if (lsr & UART_LSR_FE)
+                       flag = TTY_FRAME;
+       }
+       if (uart_handle_sysrq_char(port, ch))
+               return;
+
+       uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
+}
+
 /*
  * serial8250_rx_chars: processes according to the passed in LSR
  * value, and returns the remaining LSR bits not handled
  * by this Rx routine.
  */
-unsigned char
-serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
+unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
 {
        struct uart_port *port = &up->port;
-       unsigned char ch;
        int max_count = 256;
-       char flag;
 
        do {
-               if (likely(lsr & UART_LSR_DR))
-                       ch = serial_in(up, UART_RX);
-               else
-                       /*
-                        * Intel 82571 has a Serial Over Lan device that will
-                        * set UART_LSR_BI without setting UART_LSR_DR when
-                        * it receives a break. To avoid reading from the
-                        * receive buffer without UART_LSR_DR bit set, we
-                        * just force the read character to be 0
-                        */
-                       ch = 0;
-
-               flag = TTY_NORMAL;
-               port->icount.rx++;
-
-               lsr |= up->lsr_saved_flags;
-               up->lsr_saved_flags = 0;
-
-               if (unlikely(lsr & UART_LSR_BRK_ERROR_BITS)) {
-                       if (lsr & UART_LSR_BI) {
-                               lsr &= ~(UART_LSR_FE | UART_LSR_PE);
-                               port->icount.brk++;
-                               /*
-                                * We do the SysRQ and SAK checking
-                                * here because otherwise the break
-                                * may get masked by ignore_status_mask
-                                * or read_status_mask.
-                                */
-                               if (uart_handle_break(port))
-                                       goto ignore_char;
-                       } else if (lsr & UART_LSR_PE)
-                               port->icount.parity++;
-                       else if (lsr & UART_LSR_FE)
-                               port->icount.frame++;
-                       if (lsr & UART_LSR_OE)
-                               port->icount.overrun++;
-
-                       /*
-                        * Mask off conditions which should be ignored.
-                        */
-                       lsr &= port->read_status_mask;
-
-                       if (lsr & UART_LSR_BI) {
-                               DEBUG_INTR("handling break....");
-                               flag = TTY_BREAK;
-                       } else if (lsr & UART_LSR_PE)
-                               flag = TTY_PARITY;
-                       else if (lsr & UART_LSR_FE)
-                               flag = TTY_FRAME;
-               }
-               if (uart_handle_sysrq_char(port, ch))
-                       goto ignore_char;
-
-               uart_insert_char(port, lsr, UART_LSR_OE, ch, flag);
-
-ignore_char:
+               serial8250_read_char(up, lsr);
+               if (--max_count == 0)
+                       break;
                lsr = serial_in(up, UART_LSR);
-       } while ((lsr & (UART_LSR_DR | UART_LSR_BI)) && (--max_count > 0));
-       spin_unlock(&port->lock);
+       } while (lsr & (UART_LSR_DR | UART_LSR_BI));
+
        tty_flip_buffer_push(&port->state->port);
-       spin_lock(&port->lock);
        return lsr;
 }
 EXPORT_SYMBOL_GPL(serial8250_rx_chars);
@@ -1519,11 +1734,9 @@ void serial8250_tx_chars(struct uart_8250_port *up)
                port->icount.tx++;
                if (uart_circ_empty(xmit))
                        break;
-               if (up->capabilities & UART_CAP_HFIFO) {
-                       if ((serial_port_in(port, UART_LSR) & BOTH_EMPTY) !=
-                           BOTH_EMPTY)
-                               break;
-               }
+               if ((up->capabilities & UART_CAP_HFIFO) &&
+                   (serial_in(up, UART_LSR) & BOTH_EMPTY) != BOTH_EMPTY)
+                       break;
        } while (--count > 0);
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1752,6 +1965,7 @@ static void wait_for_xmitr(struct uart_8250_port *up, int bits)
        /* Wait up to 1s for flow control if necessary */
        if (up->port.flags & UPF_CONS_FLOW) {
                unsigned int tmout;
+
                for (tmout = 1000000; tmout; tmout--) {
                        unsigned int msr = serial_in(up, UART_MSR);
                        up->msr_saved_flags |= msr & MSR_SAVE_FLAGS;
@@ -1985,23 +2199,23 @@ int serial8250_do_startup(struct uart_port *port)
 
        serial8250_set_mctrl(port, port->mctrl);
 
-       /* Serial over Lan (SoL) hack:
-          Intel 8257x Gigabit ethernet chips have a
-          16550 emulation, to be used for Serial Over Lan.
-          Those chips take a longer time than a normal
-          serial device to signalize that a transmission
-          data was queued. Due to that, the above test generally
-          fails. One solution would be to delay the reading of
-          iir. However, this is not reliable, since the timeout
-          is variable. So, let's just don't test if we receive
-          TX irq. This way, we'll never enable UART_BUG_TXEN.
+       /*
+        * Serial over Lan (SoL) hack:
+        * Intel 8257x Gigabit ethernet chips have a 16550 emulation, to be
+        * used for Serial Over Lan.  Those chips take a longer time than a
+        * normal serial device to signalize that a transmission data was
+        * queued. Due to that, the above test generally fails. One solution
+        * would be to delay the reading of iir. However, this is not
+        * reliable, since the timeout is variable. So, let's just don't
+        * test if we receive TX irq.  This way, we'll never enable
+        * UART_BUG_TXEN.
         */
        if (up->port.flags & UPF_NO_TXEN_TEST)
                goto dont_test_tx_en;
 
        /*
-        * Do a quick test to see if we receive an
-        * interrupt when we enable the TX irq.
+        * Do a quick test to see if we receive an interrupt when we enable
+        * the TX irq.
         */
        serial_port_out(port, UART_IER, UART_IER_THRI);
        lsr = serial_port_in(port, UART_LSR);
@@ -2084,8 +2298,12 @@ void serial8250_do_shutdown(struct uart_port *port)
        /*
         * Disable interrupts from this port
         */
+       spin_lock_irqsave(&port->lock, flags);
        up->ier = 0;
        serial_port_out(port, UART_IER, 0);
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       synchronize_irq(port->irq);
 
        if (up->dma)
                serial8250_release_dma(up);
@@ -2251,9 +2469,9 @@ static void serial8250_set_divisor(struct uart_port *port, unsigned int baud,
                serial_port_out(port, 0x2, quot_frac);
 }
 
-static unsigned int
-serial8250_get_baud_rate(struct uart_port *port, struct ktermios *termios,
-                        struct ktermios *old)
+static unsigned int serial8250_get_baud_rate(struct uart_port *port,
+                                            struct ktermios *termios,
+                                            struct ktermios *old)
 {
        unsigned int tolerance = port->uartclk / 100;
 
@@ -2270,7 +2488,7 @@ serial8250_get_baud_rate(struct uart_port *port, struct ktermios *termios,
 
 void
 serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
-                         struct ktermios *old)
+                         struct ktermios *old)
 {
        struct uart_8250_port *up = up_to_u8250p(port);
        unsigned char cval;
@@ -2745,8 +2963,7 @@ serial8250_verify_port(struct uart_port *port, struct serial_struct *ser)
        return 0;
 }
 
-static const char *
-serial8250_type(struct uart_port *port)
+static const char *serial8250_type(struct uart_port *port)
 {
        int type = port->type;
 
index b03cb51751130708efc9fe9a0b9c2713efc6fd55..67ad6b0d595b094ca3033d9fda091b337bf19f22 100644 (file)
@@ -272,6 +272,30 @@ config SERIAL_8250_ACORN
          system, say Y to this option.  The driver can handle 1, 2, or 3 port
          cards.  If unsure, say N.
 
+config SERIAL_8250_BCM2835AUX
+       tristate "BCM2835 auxiliar mini UART support"
+       depends on ARCH_BCM2835 || COMPILE_TEST
+       depends on SERIAL_8250 && SERIAL_8250_SHARE_IRQ
+       help
+         Support for the BCM2835 auxiliar mini UART.
+
+         Features and limitations of the UART are
+           Registers are similar to 16650 registers,
+              set bits in the control registers that are unsupported
+             are ignored and read back as 0
+           7/8 bit operation with 1 start and 1 stop bit
+           8 symbols deep fifo for rx and tx
+           SW controlled RTS and SW readable CTS
+           Clock rate derived from system clock
+           Uses 8 times oversampling (compared to 16 times for 16650)
+           Missing break detection (but break generation)
+           Missing framing error detection
+           Missing parity bit
+           Missing receive time-out interrupt
+           Missing DCD, DSR, DTR and RI signals
+
+         If unsure, say N.
+
 config SERIAL_8250_FSL
        bool
        depends on SERIAL_8250_CONSOLE
index b9b9bca5b6c3d860294fdd3dd2cab0187b062cf9..5c1869fdfd4cf9405e1beae80a90a0389edbfabe 100644 (file)
@@ -12,6 +12,7 @@ obj-$(CONFIG_SERIAL_8250_PCI)         += 8250_pci.o
 obj-$(CONFIG_SERIAL_8250_HP300)                += 8250_hp300.o
 obj-$(CONFIG_SERIAL_8250_CS)           += serial_cs.o
 obj-$(CONFIG_SERIAL_8250_ACORN)                += 8250_acorn.o
+obj-$(CONFIG_SERIAL_8250_BCM2835AUX)   += 8250_bcm2835aux.o
 obj-$(CONFIG_SERIAL_8250_CONSOLE)      += 8250_early.o
 obj-$(CONFIG_SERIAL_8250_FOURPORT)     += 8250_fourport.o
 obj-$(CONFIG_SERIAL_8250_ACCENT)       += 8250_accent.o
index 4d180c9423effe3994f2226bbfc7325829cf2156..933c2688dd7ea90c4a38d7f55938bfc454e9a67c 100644 (file)
@@ -28,7 +28,7 @@
     and other provisions required by the GPL.  If you do not delete
     the provisions above, a recipient may use your version of this
     file under either the MPL or the GPL.
-    
+
 ======================================================================*/
 
 #include <linux/module.h>
@@ -257,7 +257,7 @@ static const struct serial_quirk quirks[] = {
 };
 
 
-static int serial_config(struct pcmcia_device * link);
+static int serial_config(struct pcmcia_device *link);
 
 
 static void serial_remove(struct pcmcia_device *link)
@@ -309,7 +309,7 @@ static int serial_probe(struct pcmcia_device *link)
        dev_dbg(&link->dev, "serial_attach()\n");
 
        /* Create new serial device */
-       info = kzalloc(sizeof (*info), GFP_KERNEL);
+       info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (!info)
                return -ENOMEM;
        info->p_dev = link;
@@ -339,7 +339,7 @@ static void serial_detach(struct pcmcia_device *link)
 
 /*====================================================================*/
 
-static int setup_serial(struct pcmcia_device *handle, struct serial_info * info,
+static int setup_serial(struct pcmcia_device *handle, struct serial_info *info,
                        unsigned int iobase, int irq)
 {
        struct uart_8250_port uart;
@@ -441,16 +441,20 @@ static int simple_config(struct pcmcia_device *link)
        struct serial_info *info = link->priv;
        int i = -ENODEV, try;
 
-       /* First pass: look for a config entry that looks normal.
-        * Two tries: without IO aliases, then with aliases */
+       /*
+        * First pass: look for a config entry that looks normal.
+        * Two tries: without IO aliases, then with aliases.
+        */
        link->config_flags |= CONF_AUTO_SET_VPP;
        for (try = 0; try < 4; try++)
                if (!pcmcia_loop_config(link, simple_config_check, &try))
                        goto found_port;
 
-       /* Second pass: try to find an entry that isn't picky about
-          its base address, then try to grab any standard serial port
-          address, and finally try to get any free port. */
+       /*
+        * Second pass: try to find an entry that isn't picky about
+        * its base address, then try to grab any standard serial port
+        * address, and finally try to get any free port.
+        */
        if (!pcmcia_loop_config(link, simple_config_check_notpicky, NULL))
                goto found_port;
 
@@ -480,8 +484,10 @@ static int multi_config_check(struct pcmcia_device *p_dev, void *priv_data)
        if (p_dev->resource[1]->end)
                return -EINVAL;
 
-       /* The quad port cards have bad CIS's, so just look for a
-          window larger than 8 ports and assume it will be right */
+       /*
+        * The quad port cards have bad CIS's, so just look for a
+        * window larger than 8 ports and assume it will be right.
+        */
        if (p_dev->resource[0]->end <= 8)
                return -EINVAL;
 
@@ -527,8 +533,8 @@ static int multi_config(struct pcmcia_device *link)
                info->multi = 2;
                if (pcmcia_loop_config(link, multi_config_check_notpicky,
                                       &base2)) {
-                       dev_warn(&link->dev, "no usable port range "
-                              "found, giving up\n");
+                       dev_warn(&link->dev,
+                                "no usable port range found, giving up\n");
                        return -ENODEV;
                }
        }
@@ -600,7 +606,7 @@ static int serial_check_for_multi(struct pcmcia_device *p_dev,  void *priv_data)
 }
 
 
-static int serial_config(struct pcmcia_device * link)
+static int serial_config(struct pcmcia_device *link)
 {
        struct serial_info *info = link->priv;
        int i;
@@ -623,8 +629,10 @@ static int serial_config(struct pcmcia_device * link)
                        break;
                }
 
-       /* Another check for dual-serial cards: look for either serial or
-          multifunction cards that ask for appropriate IO port ranges */
+       /*
+        * Another check for dual-serial cards: look for either serial or
+        * multifunction cards that ask for appropriate IO port ranges.
+        */
        if ((info->multi == 0) &&
            (link->has_func_id) &&
            (link->socket->pcmcia_pfc == 0) &&
@@ -701,7 +709,7 @@ static const struct pcmcia_device_id serial_ids[] = {
        PCMCIA_PFC_DEVICE_PROD_ID12(1, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58),
        PCMCIA_PFC_DEVICE_PROD_ID12(1, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e),
        PCMCIA_PFC_DEVICE_PROD_ID12(1, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555),
-       PCMCIA_PFC_DEVICE_PROD_ID12(1, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
+       PCMCIA_PFC_DEVICE_PROD_ID12(1, "NEC", "PK-UG-J001", 0x18df0ba0, 0x831b1064),
        PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9),
        PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed),
        PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc),
@@ -797,30 +805,30 @@ static const struct pcmcia_device_id serial_ids[] = {
        PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"),
        PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"),
        PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"),
-       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100  1.00.",0x19ca78af,0xf964f42b),
-       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83),
-       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232  1.00.",0x19ca78af,0x69fb7490),
-       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232",0x19ca78af,0xb6bc0235),
-       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232",0x63f2e0bd,0xb9e175d3),
-       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.","SERIAL CARD: CF232-5",0x63f2e0bd,0xfce33442),
-       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232",0x3beb8cf2,0x171e7190),
-       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF232-5",0x3beb8cf2,0x20da4262),
-       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF428",0x3beb8cf2,0xea5dd57d),
-       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: CF500",0x3beb8cf2,0xd77255fa),
-       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: IC232",0x3beb8cf2,0x6a709903),
-       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: SL232",0x3beb8cf2,0x18430676),
-       PCMCIA_DEVICE_PROD_ID12("Elan","Serial Port: XL232",0x3beb8cf2,0x6f933767),
-       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
-       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
-       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
-       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
-       PCMCIA_MFC_DEVICE_PROD_ID12(0,"Elan","Serial+Parallel Port: SP230",0x3beb8cf2,0xdb9e58bc),
-       PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: CF332",0x3beb8cf2,0x16dc1ba7),
-       PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL332",0x3beb8cf2,0x19816c41),
-       PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL385",0x3beb8cf2,0x64112029),
-       PCMCIA_MFC_DEVICE_PROD_ID12(1,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
-       PCMCIA_MFC_DEVICE_PROD_ID12(2,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
-       PCMCIA_MFC_DEVICE_PROD_ID12(3,"Elan","Serial Port: SL432",0x3beb8cf2,0x1cce7ac4),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100  1.00.", 0x19ca78af, 0xf964f42b),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL100", 0x19ca78af, 0x71d98e83),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232  1.00.", 0x19ca78af, 0x69fb7490),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.", "SERIAL CARD: SL232", 0x19ca78af, 0xb6bc0235),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.", "SERIAL CARD: CF232", 0x63f2e0bd, 0xb9e175d3),
+       PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c2000.", "SERIAL CARD: CF232-5", 0x63f2e0bd, 0xfce33442),
+       PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF232", 0x3beb8cf2, 0x171e7190),
+       PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF232-5", 0x3beb8cf2, 0x20da4262),
+       PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF428", 0x3beb8cf2, 0xea5dd57d),
+       PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: CF500", 0x3beb8cf2, 0xd77255fa),
+       PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: IC232", 0x3beb8cf2, 0x6a709903),
+       PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: SL232", 0x3beb8cf2, 0x18430676),
+       PCMCIA_DEVICE_PROD_ID12("Elan", "Serial Port: XL232", 0x3beb8cf2, 0x6f933767),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: CF332", 0x3beb8cf2, 0x16dc1ba7),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: SL332", 0x3beb8cf2, 0x19816c41),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: SL385", 0x3beb8cf2, 0x64112029),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4),
+       PCMCIA_MFC_DEVICE_PROD_ID12(0, "Elan", "Serial+Parallel Port: SP230", 0x3beb8cf2, 0xdb9e58bc),
+       PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: CF332", 0x3beb8cf2, 0x16dc1ba7),
+       PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: SL332", 0x3beb8cf2, 0x19816c41),
+       PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: SL385", 0x3beb8cf2, 0x64112029),
+       PCMCIA_MFC_DEVICE_PROD_ID12(1, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4),
+       PCMCIA_MFC_DEVICE_PROD_ID12(2, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4),
+       PCMCIA_MFC_DEVICE_PROD_ID12(3, "Elan", "Serial Port: SL432", 0x3beb8cf2, 0x1cce7ac4),
        PCMCIA_DEVICE_MANF_CARD(0x0279, 0x950b),
        /* too generic */
        /* PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0160, 0x0002), */
index 39721ec4f415cf4d1cb013f9e2f7ea254f97fde1..bdbe1c533c6a1a3485722d53aa7466ba69f24b41 100644 (file)
@@ -610,6 +610,7 @@ config SERIAL_UARTLITE_CONSOLE
        bool "Support for console on Xilinx uartlite serial port"
        depends on SERIAL_UARTLITE=y
        select SERIAL_CORE_CONSOLE
+       select SERIAL_EARLYCON
        help
          Say Y here if you wish to use a Xilinx uartlite as the system
          console (the system console is the device which receives all kernel
@@ -745,6 +746,12 @@ config SERIAL_SH_SCI_CONSOLE
        depends on SERIAL_SH_SCI=y
        select SERIAL_CORE_CONSOLE
 
+config SERIAL_SH_SCI_EARLYCON
+       bool "Support for early console on SuperH SCI(F)"
+       depends on SERIAL_SH_SCI=y
+       select SERIAL_CORE_CONSOLE
+       select SERIAL_EARLYCON
+
 config SERIAL_SH_SCI_DMA
        bool "DMA support"
        depends on SERIAL_SH_SCI && DMA_ENGINE
@@ -793,17 +800,6 @@ config SERIAL_CORE_CONSOLE
 config CONSOLE_POLL
        bool
 
-config SERIAL_68328
-       bool "68328 serial support"
-       depends on M68328 || M68EZ328 || M68VZ328
-       help
-         This driver supports the built-in serial port of the Motorola 68328
-         (standard, EZ and VZ varieties).
-
-config SERIAL_68328_RTS_CTS
-       bool "Support RTS/CTS on 68328 serial port"
-       depends on SERIAL_68328
-
 config SERIAL_MCF
        bool "Coldfire serial support"
        depends on COLDFIRE
index b391c9b319602f426de38d4dce1f715130be0301..ceba33c4ebb43fb92361acaf7b8d3d4699b32521 100644 (file)
@@ -34,7 +34,6 @@ obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
 obj-$(CONFIG_SERIAL_MAX310X) += max310x.o
 obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
 obj-$(CONFIG_SERIAL_MUX) += mux.o
-obj-$(CONFIG_SERIAL_68328) += 68328serial.o
 obj-$(CONFIG_SERIAL_MCF) += mcf.o
 obj-$(CONFIG_SERIAL_PMACZILOG) += pmac_zilog.o
 obj-$(CONFIG_SERIAL_HS_LPC32XX) += lpc32xx_hs.o
index c0da0ccbbcf5476575c9b5f1c06cbb4827056ce4..500232ad38f32c1a1b4ee358078a3ae4ee247689 100644 (file)
@@ -187,7 +187,7 @@ static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
        [REG_DMACR] = ZX_UART011_DMACR,
 };
 
-static struct vendor_data vendor_zte = {
+static struct vendor_data vendor_zte __maybe_unused = {
        .reg_offset             = pl011_zte_offsets,
        .access_32b             = true,
        .ifls                   = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
@@ -1167,7 +1167,7 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
 
        /* Disable RX and TX DMA */
        while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY)
-               barrier();
+               cpu_relax();
 
        spin_lock_irq(&uap->port.lock);
        uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
@@ -1611,7 +1611,7 @@ static void pl011_put_poll_char(struct uart_port *port,
            container_of(port, struct uart_amba_port, port);
 
        while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
-               barrier();
+               cpu_relax();
 
        pl011_write(ch, uap, REG_DR);
 }
@@ -2150,7 +2150,7 @@ static void pl011_console_putchar(struct uart_port *port, int ch)
            container_of(port, struct uart_amba_port, port);
 
        while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
-               barrier();
+               cpu_relax();
        pl011_write(ch, uap, REG_DR);
 }
 
@@ -2158,7 +2158,7 @@ static void
 pl011_console_write(struct console *co, const char *s, unsigned int count)
 {
        struct uart_amba_port *uap = amba_ports[co->index];
-       unsigned int status, old_cr = 0, new_cr;
+       unsigned int old_cr = 0, new_cr;
        unsigned long flags;
        int locked = 1;
 
@@ -2188,9 +2188,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
         *      Finally, wait for transmitter to become empty
         *      and restore the TCR
         */
-       do {
-               status = pl011_read(uap, REG_FR);
-       } while (status & UART01x_FR_BUSY);
+       while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY)
+               cpu_relax();
        if (!uap->vendor->always_enabled)
                pl011_write(old_cr, uap, REG_CR);
 
@@ -2302,13 +2301,13 @@ static struct console amba_console = {
 static void pl011_putc(struct uart_port *port, int c)
 {
        while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
-               ;
+               cpu_relax();
        if (port->iotype == UPIO_MEM32)
                writel(c, port->membase + UART01x_DR);
        else
                writeb(c, port->membase + UART01x_DR);
        while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
-               ;
+               cpu_relax();
 }
 
 static void pl011_early_write(struct console *con, const char *s, unsigned n)
@@ -2327,7 +2326,6 @@ static int __init pl011_early_console_setup(struct earlycon_device *device,
        device->con->write = pl011_early_write;
        return 0;
 }
-EARLYCON_DECLARE(pl011, pl011_early_console_setup);
 OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
 
 #else
index 03ebe401fff7483c269778f4f57ce923c1282267..3a1de5c87cb430111bddcb8733884667528e1366 100644 (file)
@@ -576,7 +576,6 @@ static int __init arc_early_console_setup(struct earlycon_device *dev,
        dev->con->write = arc_early_serial_write;
        return 0;
 }
-EARLYCON_DECLARE(arc_uart, arc_early_console_setup);
 OF_EARLYCON_DECLARE(arc_uart, "snps,arc-uart", arc_early_console_setup);
 
 #endif /* CONFIG_SERIAL_ARC_CONSOLE */
index 1c0884d8ef3268f88c0be4fc93bfb98f0bb91ab0..b30c93f80899b602bc8ecdb2b1301873aad259d0 100644 (file)
@@ -159,8 +159,8 @@ struct atmel_uart_port {
        u32                     rts_high;
        u32                     rts_low;
        bool                    ms_irq_enabled;
-       bool                    is_usart;       /* usart or uart */
-       struct timer_list       uart_timer;     /* uart timer */
+       bool                    has_hw_timer;
+       struct timer_list       uart_timer;
 
        bool                    suspended;
        unsigned int            pending;
@@ -1710,19 +1710,20 @@ static void atmel_get_ip_name(struct uart_port *port)
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
        int name = atmel_uart_readl(port, ATMEL_US_NAME);
        u32 version;
-       int usart, uart;
-       /* usart and uart ascii */
-       usart = 0x55534152;
-       uart = 0x44424755;
-
-       atmel_port->is_usart = false;
-
-       if (name == usart) {
-               dev_dbg(port->dev, "This is usart\n");
-               atmel_port->is_usart = true;
-       } else if (name == uart) {
-               dev_dbg(port->dev, "This is uart\n");
-               atmel_port->is_usart = false;
+       u32 usart, dbgu_uart, new_uart;
+       /* ASCII decoding for IP version */
+       usart = 0x55534152;     /* USAR(T) */
+       dbgu_uart = 0x44424755; /* DBGU */
+       new_uart = 0x55415254;  /* UART */
+
+       atmel_port->has_hw_timer = false;
+
+       if (name == usart || name == new_uart) {
+               dev_dbg(port->dev, "Usart or uart with hw timer\n");
+               atmel_port->has_hw_timer = true;
+       } else if (name == dbgu_uart) {
+               dev_dbg(port->dev, "Dbgu or uart without hw timer\n");
+               atmel_port->has_hw_timer = false;
        } else {
                /* fallback for older SoCs: use version field */
                version = atmel_uart_readl(port, ATMEL_US_VERSION);
@@ -1730,12 +1731,12 @@ static void atmel_get_ip_name(struct uart_port *port)
                case 0x302:
                case 0x10213:
                        dev_dbg(port->dev, "This version is usart\n");
-                       atmel_port->is_usart = true;
+                       atmel_port->has_hw_timer = true;
                        break;
                case 0x203:
                case 0x10202:
                        dev_dbg(port->dev, "This version is uart\n");
-                       atmel_port->is_usart = false;
+                       atmel_port->has_hw_timer = false;
                        break;
                default:
                        dev_err(port->dev, "Not supported ip name nor version, set to uart\n");
@@ -1835,7 +1836,7 @@ static int atmel_startup(struct uart_port *port)
 
        if (atmel_use_pdc_rx(port)) {
                /* set UART timeout */
-               if (!atmel_port->is_usart) {
+               if (!atmel_port->has_hw_timer) {
                        mod_timer(&atmel_port->uart_timer,
                                        jiffies + uart_poll_timeout(port));
                /* set USART timeout */
@@ -1850,7 +1851,7 @@ static int atmel_startup(struct uart_port *port)
                atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
        } else if (atmel_use_dma_rx(port)) {
                /* set UART timeout */
-               if (!atmel_port->is_usart) {
+               if (!atmel_port->has_hw_timer) {
                        mod_timer(&atmel_port->uart_timer,
                                        jiffies + uart_poll_timeout(port));
                /* set USART timeout */
@@ -2478,13 +2479,13 @@ static int __init atmel_console_init(void)
                struct atmel_uart_data *pdata =
                        dev_get_platdata(&atmel_default_console_device->dev);
                int id = pdata->num;
-               struct atmel_uart_port *port = &atmel_ports[id];
+               struct atmel_uart_port *atmel_port = &atmel_ports[id];
 
-               port->backup_imr = 0;
-               port->uart.line = id;
+               atmel_port->backup_imr = 0;
+               atmel_port->uart.line = id;
 
                add_preferred_console(ATMEL_DEVICENAME, id, NULL);
-               ret = atmel_init_port(port, atmel_default_console_device);
+               ret = atmel_init_port(atmel_port, atmel_default_console_device);
                if (ret)
                        return ret;
                register_console(&atmel_console);
@@ -2599,23 +2600,23 @@ static int atmel_serial_resume(struct platform_device *pdev)
 #define atmel_serial_resume NULL
 #endif
 
-static void atmel_serial_probe_fifos(struct atmel_uart_port *port,
+static void atmel_serial_probe_fifos(struct atmel_uart_port *atmel_port,
                                     struct platform_device *pdev)
 {
-       port->fifo_size = 0;
-       port->rts_low = 0;
-       port->rts_high = 0;
+       atmel_port->fifo_size = 0;
+       atmel_port->rts_low = 0;
+       atmel_port->rts_high = 0;
 
        if (of_property_read_u32(pdev->dev.of_node,
                                 "atmel,fifo-size",
-                                &port->fifo_size))
+                                &atmel_port->fifo_size))
                return;
 
-       if (!port->fifo_size)
+       if (!atmel_port->fifo_size)
                return;
 
-       if (port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
-               port->fifo_size = 0;
+       if (atmel_port->fifo_size < ATMEL_MIN_FIFO_SIZE) {
+               atmel_port->fifo_size = 0;
                dev_err(&pdev->dev, "Invalid FIFO size\n");
                return;
        }
@@ -2628,22 +2629,22 @@ static void atmel_serial_probe_fifos(struct atmel_uart_port *port,
         * Threshold to a reasonably high value respecting this 16 data
         * empirical rule when possible.
         */
-       port->rts_high = max_t(int, port->fifo_size >> 1,
-                              port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
-       port->rts_low  = max_t(int, port->fifo_size >> 2,
-                              port->fifo_size - ATMEL_RTS_LOW_OFFSET);
+       atmel_port->rts_high = max_t(int, atmel_port->fifo_size >> 1,
+                              atmel_port->fifo_size - ATMEL_RTS_HIGH_OFFSET);
+       atmel_port->rts_low  = max_t(int, atmel_port->fifo_size >> 2,
+                              atmel_port->fifo_size - ATMEL_RTS_LOW_OFFSET);
 
        dev_info(&pdev->dev, "Using FIFO (%u data)\n",
-                port->fifo_size);
+                atmel_port->fifo_size);
        dev_dbg(&pdev->dev, "RTS High Threshold : %2u data\n",
-               port->rts_high);
+               atmel_port->rts_high);
        dev_dbg(&pdev->dev, "RTS Low Threshold  : %2u data\n",
-               port->rts_low);
+               atmel_port->rts_low);
 }
 
 static int atmel_serial_probe(struct platform_device *pdev)
 {
-       struct atmel_uart_port *port;
+       struct atmel_uart_port *atmel_port;
        struct device_node *np = pdev->dev.of_node;
        struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
        void *data;
@@ -2674,87 +2675,88 @@ static int atmel_serial_probe(struct platform_device *pdev)
                goto err;
        }
 
-       port = &atmel_ports[ret];
-       port->backup_imr = 0;
-       port->uart.line = ret;
-       atmel_serial_probe_fifos(port, pdev);
+       atmel_port = &atmel_ports[ret];
+       atmel_port->backup_imr = 0;
+       atmel_port->uart.line = ret;
+       atmel_serial_probe_fifos(atmel_port, pdev);
 
-       spin_lock_init(&port->lock_suspended);
+       spin_lock_init(&atmel_port->lock_suspended);
 
-       ret = atmel_init_port(port, pdev);
+       ret = atmel_init_port(atmel_port, pdev);
        if (ret)
                goto err_clear_bit;
 
-       port->gpios = mctrl_gpio_init(&port->uart, 0);
-       if (IS_ERR(port->gpios)) {
-               ret = PTR_ERR(port->gpios);
+       atmel_port->gpios = mctrl_gpio_init(&atmel_port->uart, 0);
+       if (IS_ERR(atmel_port->gpios)) {
+               ret = PTR_ERR(atmel_port->gpios);
                goto err_clear_bit;
        }
 
-       if (!atmel_use_pdc_rx(&port->uart)) {
+       if (!atmel_use_pdc_rx(&atmel_port->uart)) {
                ret = -ENOMEM;
                data = kmalloc(sizeof(struct atmel_uart_char)
                                * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
                if (!data)
                        goto err_alloc_ring;
-               port->rx_ring.buf = data;
+               atmel_port->rx_ring.buf = data;
        }
 
-       rs485_enabled = port->uart.rs485.flags & SER_RS485_ENABLED;
+       rs485_enabled = atmel_port->uart.rs485.flags & SER_RS485_ENABLED;
 
-       ret = uart_add_one_port(&atmel_uart, &port->uart);
+       ret = uart_add_one_port(&atmel_uart, &atmel_port->uart);
        if (ret)
                goto err_add_port;
 
 #ifdef CONFIG_SERIAL_ATMEL_CONSOLE
-       if (atmel_is_console_port(&port->uart)
+       if (atmel_is_console_port(&atmel_port->uart)
                        && ATMEL_CONSOLE_DEVICE->flags & CON_ENABLED) {
                /*
                 * The serial core enabled the clock for us, so undo
                 * the clk_prepare_enable() in atmel_console_setup()
                 */
-               clk_disable_unprepare(port->clk);
+               clk_disable_unprepare(atmel_port->clk);
        }
 #endif
 
        device_init_wakeup(&pdev->dev, 1);
-       platform_set_drvdata(pdev, port);
+       platform_set_drvdata(pdev, atmel_port);
 
        /*
         * The peripheral clock has been disabled by atmel_init_port():
         * enable it before accessing I/O registers
         */
-       clk_prepare_enable(port->clk);
+       clk_prepare_enable(atmel_port->clk);
 
        if (rs485_enabled) {
-               atmel_uart_writel(&port->uart, ATMEL_US_MR,
+               atmel_uart_writel(&atmel_port->uart, ATMEL_US_MR,
                                  ATMEL_US_USMODE_NORMAL);
-               atmel_uart_writel(&port->uart, ATMEL_US_CR, ATMEL_US_RTSEN);
+               atmel_uart_writel(&atmel_port->uart, ATMEL_US_CR,
+                                 ATMEL_US_RTSEN);
        }
 
        /*
         * Get port name of usart or uart
         */
-       atmel_get_ip_name(&port->uart);
+       atmel_get_ip_name(&atmel_port->uart);
 
        /*
         * The peripheral clock can now safely be disabled till the port
         * is used
         */
-       clk_disable_unprepare(port->clk);
+       clk_disable_unprepare(atmel_port->clk);
 
        return 0;
 
 err_add_port:
-       kfree(port->rx_ring.buf);
-       port->rx_ring.buf = NULL;
+       kfree(atmel_port->rx_ring.buf);
+       atmel_port->rx_ring.buf = NULL;
 err_alloc_ring:
-       if (!atmel_is_console_port(&port->uart)) {
-               clk_put(port->clk);
-               port->clk = NULL;
+       if (!atmel_is_console_port(&atmel_port->uart)) {
+               clk_put(atmel_port->clk);
+               atmel_port->clk = NULL;
        }
 err_clear_bit:
-       clear_bit(port->uart.line, atmel_ports_in_use);
+       clear_bit(atmel_port->uart.line, atmel_ports_in_use);
 err:
        return ret;
 }
index f13f2ebd215bc9afe2ad64ae9070b8c069967d18..c0172bf54a9b3d8052fc6416f6731d1a45a6c9eb 100644 (file)
@@ -1413,9 +1413,8 @@ rs_stop(struct tty_struct *tty)
                xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char,
                                STOP_CHAR(info->port.tty));
                xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, stop);
-               if (tty->termios.c_iflag & IXON ) {
+               if (I_IXON(tty))
                        xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
-               }
 
                *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
                local_irq_restore(flags);
@@ -1436,9 +1435,8 @@ rs_start(struct tty_struct *tty)
                                         info->xmit.tail,SERIAL_XMIT_SIZE)));
                xoff = IO_FIELD(R_SERIAL0_XOFF, xoff_char, STOP_CHAR(tty));
                xoff |= IO_STATE(R_SERIAL0_XOFF, tx_stop, enable);
-               if (tty->termios.c_iflag & IXON ) {
+               if (I_IXON(tty))
                        xoff |= IO_STATE(R_SERIAL0_XOFF, auto_xoff, enable);
-               }
 
                *((unsigned long *)&info->ioport[REG_XOFF]) = xoff;
                if (!info->uses_dma_out &&
@@ -2968,7 +2966,7 @@ static int rs_raw_write(struct tty_struct *tty,
 
        local_save_flags(flags);
        DFLOW(DEBUG_LOG(info->line, "write count %i ", count));
-       DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty)));
+       DFLOW(DEBUG_LOG(info->line, "ldisc\n"));
 
 
        /* The local_irq_disable/restore_flags pairs below are needed
@@ -3161,13 +3159,12 @@ rs_throttle(struct tty_struct * tty)
 {
        struct e100_serial *info = (struct e100_serial *)tty->driver_data;
 #ifdef SERIAL_DEBUG_THROTTLE
-       printk("throttle %s: %lu....\n", tty_name(tty),
-              (unsigned long)tty->ldisc.chars_in_buffer(tty));
+       printk("throttle %s ....\n", tty_name(tty));
 #endif
-       DFLOW(DEBUG_LOG(info->line,"rs_throttle %lu\n", tty->ldisc.chars_in_buffer(tty)));
+       DFLOW(DEBUG_LOG(info->line,"rs_throttle\n"));
 
        /* Do RTS before XOFF since XOFF might take some time */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                /* Turn off RTS line */
                e100_rts(info, 0);
        }
@@ -3181,13 +3178,12 @@ rs_unthrottle(struct tty_struct * tty)
 {
        struct e100_serial *info = (struct e100_serial *)tty->driver_data;
 #ifdef SERIAL_DEBUG_THROTTLE
-       printk("unthrottle %s: %lu....\n", tty_name(tty),
-              (unsigned long)tty->ldisc.chars_in_buffer(tty));
+       printk("unthrottle %s ....\n", tty_name(tty));
 #endif
-       DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc %d\n", tty->ldisc.chars_in_buffer(tty)));
+       DFLOW(DEBUG_LOG(info->line,"rs_unthrottle ldisc\n"));
        DFLOW(DEBUG_LOG(info->line,"rs_unthrottle flip.count: %i\n", tty->flip.count));
        /* Do RTS before XOFF since XOFF might take some time */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                /* Assert RTS line  */
                e100_rts(info, 1);
        }
@@ -3555,8 +3551,7 @@ rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        change_speed(info);
 
        /* Handle turning off CRTSCTS */
-       if ((old_termios->c_cflag & CRTSCTS) &&
-           !(tty->termios.c_cflag & CRTSCTS))
+       if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty))
                rs_start(tty);
 
 }
@@ -3615,7 +3610,6 @@ rs_close(struct tty_struct *tty, struct file * filp)
                local_irq_restore(flags);
                return;
        }
-       info->port.flags |= ASYNC_CLOSING;
        /*
         * Now we wait for the transmit buffer to clear; and we notify
         * the line discipline to only process XON/XOFF characters.
@@ -3654,7 +3648,7 @@ rs_close(struct tty_struct *tty, struct file * filp)
                        schedule_timeout_interruptible(info->port.close_delay);
                wake_up_interruptible(&info->port.open_wait);
        }
-       info->port.flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
+       info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
        local_irq_restore(flags);
 
        /* port closed */
@@ -3767,9 +3761,8 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
                return 0;
        }
 
-       if (tty->termios.c_cflag & CLOCAL) {
-                       do_clocal = 1;
-       }
+       if (C_CLOCAL(tty))
+               do_clocal = 1;
 
        /*
         * Block waiting for the carrier detect and the line to become
index 3f2423690d01c155338d3e273ac0801b273ca5a8..067783f0523c5a8a69a63e1333a7d010c1d203f6 100644 (file)
@@ -19,7 +19,8 @@
 #include <linux/io.h>
 #include <linux/serial_core.h>
 #include <linux/sizes.h>
-#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
 
 #ifdef CONFIG_FIX_EARLYCON_MEM
 #include <asm/fixmap.h>
 #include <asm/serial.h>
 
 static struct console early_con = {
-       .name =         "uart", /* 8250 console switch requires this name */
+       .name =         "uart",         /* fixed up at earlycon registration */
        .flags =        CON_PRINTBUFFER | CON_BOOT,
-       .index =        -1,
+       .index =        0,
 };
 
 static struct earlycon_device early_console_dev = {
        .con = &early_con,
 };
 
-extern struct earlycon_id __earlycon_table[];
-static const struct earlycon_id __earlycon_table_sentinel
-       __used __section(__earlycon_table_end);
-
-static const struct of_device_id __earlycon_of_table_sentinel
-       __used __section(__earlycon_of_table_end);
-
 static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
 {
        void __iomem *base;
@@ -61,6 +55,39 @@ static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
        return base;
 }
 
+static void __init earlycon_init(struct earlycon_device *device,
+                                const char *name)
+{
+       struct console *earlycon = device->con;
+       struct uart_port *port = &device->port;
+       const char *s;
+       size_t len;
+
+       /* scan backwards from end of string for first non-numeral */
+       for (s = name + strlen(name);
+            s > name && s[-1] >= '0' && s[-1] <= '9';
+            s--)
+               ;
+       if (*s)
+               earlycon->index = simple_strtoul(s, NULL, 10);
+       len = s - name;
+       strlcpy(earlycon->name, name, min(len + 1, sizeof(earlycon->name)));
+       earlycon->data = &early_console_dev;
+
+       if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM16 ||
+           port->iotype == UPIO_MEM32 || port->iotype == UPIO_MEM32BE)
+               pr_info("%s%d at MMIO%s %pa (options '%s')\n",
+                       earlycon->name, earlycon->index,
+                       (port->iotype == UPIO_MEM) ? "" :
+                       (port->iotype == UPIO_MEM16) ? "16" :
+                       (port->iotype == UPIO_MEM32) ? "32" : "32be",
+                       &port->mapbase, device->options);
+       else
+               pr_info("%s%d at I/O port 0x%lx (options '%s')\n",
+                       earlycon->name, earlycon->index,
+                       port->iobase, device->options);
+}
+
 static int __init parse_options(struct earlycon_device *device, char *options)
 {
        struct uart_port *port = &device->port;
@@ -97,19 +124,6 @@ static int __init parse_options(struct earlycon_device *device, char *options)
                strlcpy(device->options, options, length);
        }
 
-       if (port->iotype == UPIO_MEM || port->iotype == UPIO_MEM16 ||
-           port->iotype == UPIO_MEM32 || port->iotype == UPIO_MEM32BE)
-               pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n",
-                       (port->iotype == UPIO_MEM) ? "" :
-                       (port->iotype == UPIO_MEM16) ? "16" :
-                       (port->iotype == UPIO_MEM32) ? "32" : "32be",
-                       (unsigned long long)port->mapbase,
-                       device->options);
-       else
-               pr_info("Early serial console at I/O port 0x%lx (options '%s')\n",
-                       port->iobase,
-                       device->options);
-
        return 0;
 }
 
@@ -127,7 +141,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
        if (port->mapbase)
                port->membase = earlycon_map(port->mapbase, 64);
 
-       early_console_dev.con->data = &early_console_dev;
+       earlycon_init(&early_console_dev, match->name);
        err = match->setup(&early_console_dev, buf);
        if (err < 0)
                return err;
@@ -166,7 +180,7 @@ int __init setup_earlycon(char *buf)
        if (early_con.flags & CON_ENABLED)
                return -EALREADY;
 
-       for (match = __earlycon_table; match->name[0]; match++) {
+       for (match = __earlycon_table; match < __earlycon_table_end; match++) {
                size_t len = strlen(match->name);
 
                if (strncmp(buf, match->name, len))
@@ -204,20 +218,62 @@ static int __init param_setup_earlycon(char *buf)
 }
 early_param("earlycon", param_setup_earlycon);
 
-int __init of_setup_earlycon(unsigned long addr,
-                            int (*setup)(struct earlycon_device *, const char *))
+#ifdef CONFIG_OF_EARLY_FLATTREE
+
+int __init of_setup_earlycon(const struct earlycon_id *match,
+                            unsigned long node,
+                            const char *options)
 {
        int err;
        struct uart_port *port = &early_console_dev.port;
+       const __be32 *val;
+       bool big_endian;
+       u64 addr;
 
        spin_lock_init(&port->lock);
        port->iotype = UPIO_MEM;
+       addr = of_flat_dt_translate_address(node);
+       if (addr == OF_BAD_ADDR) {
+               pr_warn("[%s] bad address\n", match->name);
+               return -ENXIO;
+       }
        port->mapbase = addr;
        port->uartclk = BASE_BAUD * 16;
-       port->membase = earlycon_map(addr, SZ_4K);
+       port->membase = earlycon_map(port->mapbase, SZ_4K);
+
+       val = of_get_flat_dt_prop(node, "reg-offset", NULL);
+       if (val)
+               port->mapbase += be32_to_cpu(*val);
+       val = of_get_flat_dt_prop(node, "reg-shift", NULL);
+       if (val)
+               port->regshift = be32_to_cpu(*val);
+       big_endian = of_get_flat_dt_prop(node, "big-endian", NULL) != NULL ||
+               (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
+                of_get_flat_dt_prop(node, "native-endian", NULL) != NULL);
+       val = of_get_flat_dt_prop(node, "reg-io-width", NULL);
+       if (val) {
+               switch (be32_to_cpu(*val)) {
+               case 1:
+                       port->iotype = UPIO_MEM;
+                       break;
+               case 2:
+                       port->iotype = UPIO_MEM16;
+                       break;
+               case 4:
+                       port->iotype = (big_endian) ? UPIO_MEM32BE : UPIO_MEM32;
+                       break;
+               default:
+                       pr_warn("[%s] unsupported reg-io-width\n", match->name);
+                       return -EINVAL;
+               }
+       }
 
-       early_console_dev.con->data = &early_console_dev;
-       err = setup(&early_console_dev, NULL);
+       if (options) {
+               strlcpy(early_console_dev.options, options,
+                       sizeof(early_console_dev.options));
+       }
+       earlycon_init(&early_console_dev, match->name);
+       err = match->setup(&early_console_dev, options);
        if (err < 0)
                return err;
        if (!early_console_dev.con->write)
@@ -227,3 +283,5 @@ int __init of_setup_earlycon(unsigned long addr,
        register_console(early_console_dev.con);
        return 0;
 }
+
+#endif /* CONFIG_OF_EARLY_FLATTREE */
index 9362f54c816c99c73957209cd49bd781e5a61e09..231e7d5caf6c18459737b4c92d5d2955c7654413 100644 (file)
@@ -2166,7 +2166,8 @@ static int imx_serial_port_suspend(struct device *dev)
 
        uart_suspend_port(&imx_reg, &sport->port);
 
-       return 0;
+       /* Needed to enable clock in suspend_noirq */
+       return clk_prepare(sport->clk_ipg);
 }
 
 static int imx_serial_port_resume(struct device *dev)
@@ -2179,6 +2180,8 @@ static int imx_serial_port_resume(struct device *dev)
 
        uart_resume_port(&imx_reg, &sport->port);
 
+       clk_unprepare(sport->clk_ipg);
+
        return 0;
 }
 
index 524e86ab3cae7af5b43811c6679e344c4da78054..c5ddfe542451f55d90f9e0b240c30668b188ef68 100644 (file)
@@ -529,7 +529,6 @@ void jsm_input(struct jsm_channel *ch)
        int data_len;
        unsigned long lock_flags;
        int len = 0;
-       int n = 0;
        int s = 0;
        int i = 0;
 
@@ -569,8 +568,7 @@ void jsm_input(struct jsm_channel *ch)
         *If the device is not open, or CREAD is off, flush
         *input data and return immediately.
         */
-       if (!tp ||
-               !(tp->termios.c_cflag & CREAD) ) {
+       if (!tp || !C_CREAD(tp)) {
 
                jsm_dbg(READ, &ch->ch_bd->pci_dev,
                        "input. dropping %d bytes on port %d...\n",
@@ -598,16 +596,15 @@ void jsm_input(struct jsm_channel *ch)
        jsm_dbg(READ, &ch->ch_bd->pci_dev, "start 2\n");
 
        len = tty_buffer_request_room(port, data_len);
-       n = len;
 
        /*
-        * n now contains the most amount of data we can copy,
+        * len now contains the most amount of data we can copy,
         * bounded either by the flip buffer size or the amount
         * of data the card actually has pending...
         */
-       while (n) {
+       while (len) {
                s = ((head >= tail) ? head : RQUEUESIZE) - tail;
-               s = min(s, n);
+               s = min(s, len);
 
                if (s <= 0)
                        break;
@@ -638,7 +635,7 @@ void jsm_input(struct jsm_channel *ch)
                        tty_insert_flip_string(port, ch->ch_rqueue + tail, s);
                }
                tail += s;
-               n -= s;
+               len -= s;
                /* Flip queue if needed */
                tail &= rmask;
        }
index 0eeb64f2499ca102ed456212f07ca6d3a56be74c..68765f7c2645be08ac3f7651214a119a35150d34 100644 (file)
 #define BAUD_RATE      115200
 
 #include <linux/serial_core.h>
-#include "m32r_sio.h"
 #include "m32r_sio_reg.h"
 
-/*
- * Debugging.
- */
-#if 0
-#define DEBUG_AUTOCONF(fmt...) printk(fmt)
-#else
-#define DEBUG_AUTOCONF(fmt...) do { } while (0)
-#endif
-
-#if 0
-#define DEBUG_INTR(fmt...)     printk(fmt)
-#else
-#define DEBUG_INTR(fmt...)     do { } while (0)
-#endif
-
 #define PASS_LIMIT     256
 
-#define BASE_BAUD      115200
-
 /* Standard COM flags */
 #define STD_COM_FLAGS (UPF_BOOT_AUTOCONF | UPF_SKIP_TEST)
 
-/*
- * SERIAL_PORT_DFNS tells us about built-in ports that have no
- * standard enumeration mechanism.   Platforms that can find all
- * serial ports via mechanisms like ACPI or PCI need not supply it.
- */
+static const struct {
+       unsigned int port;
+       unsigned int irq;
+} old_serial_port[] = {
 #if defined(CONFIG_PLAT_USRV)
-
-#define SERIAL_PORT_DFNS                                               \
-       /* UART  CLK     PORT   IRQ            FLAGS */                 \
-       { 0, BASE_BAUD, 0x3F8, PLD_IRQ_UART0, STD_COM_FLAGS }, /* ttyS0 */ \
-       { 0, BASE_BAUD, 0x2F8, PLD_IRQ_UART1, STD_COM_FLAGS }, /* ttyS1 */
-
-#else /* !CONFIG_PLAT_USRV */
-
-#if defined(CONFIG_SERIAL_M32R_PLDSIO)
-#define SERIAL_PORT_DFNS                                               \
-       { 0, BASE_BAUD, ((unsigned long)PLD_ESIO0CR), PLD_IRQ_SIO0_RCV, \
-         STD_COM_FLAGS }, /* ttyS0 */
+       /* PORT  IRQ            FLAGS */
+       { 0x3F8, PLD_IRQ_UART0 }, /* ttyS0 */
+       { 0x2F8, PLD_IRQ_UART1 }, /* ttyS1 */
+#elif defined(CONFIG_SERIAL_M32R_PLDSIO)
+       { ((unsigned long)PLD_ESIO0CR), PLD_IRQ_SIO0_RCV }, /* ttyS0 */
 #else
-#define SERIAL_PORT_DFNS                                               \
-       { 0, BASE_BAUD, M32R_SIO_OFFSET, M32R_IRQ_SIO0_R,               \
-         STD_COM_FLAGS }, /* ttyS0 */
+       { M32R_SIO_OFFSET, M32R_IRQ_SIO0_R }, /* ttyS0 */
 #endif
-
-#endif /* !CONFIG_PLAT_USRV */
-
-static struct old_serial_port old_serial_port[] = {
-       SERIAL_PORT_DFNS
 };
 
 #define UART_NR        ARRAY_SIZE(old_serial_port)
@@ -108,19 +75,7 @@ struct uart_sio_port {
        struct uart_port        port;
        struct timer_list       timer;          /* "no irq" timer */
        struct list_head        list;           /* ports on this IRQ */
-       unsigned short          rev;
-       unsigned char           acr;
        unsigned char           ier;
-       unsigned char           lcr;
-       unsigned char           mcr_mask;       /* mask of user bits */
-       unsigned char           mcr_force;      /* mask of forced bits */
-       unsigned char           lsr_break_flag;
-
-       /*
-        * We provide a per-port pm hook.
-        */
-       void                    (*pm)(struct uart_port *port,
-                                     unsigned int state, unsigned int old);
 };
 
 struct irq_info {
@@ -345,14 +300,8 @@ static void receive_chars(struct uart_sio_port *up, int *status)
                         */
                        *status &= up->port.read_status_mask;
 
-                       if (up->port.line == up->port.cons->index) {
-                               /* Recover the break flag from console xmit */
-                               *status |= up->lsr_break_flag;
-                               up->lsr_break_flag = 0;
-                       }
-
                        if (*status & UART_LSR_BI) {
-                               DEBUG_INTR("handling break....");
+                               pr_debug("handling break....\n");
                                flag = TTY_BREAK;
                        } else if (*status & UART_LSR_PE)
                                flag = TTY_PARITY;
@@ -413,7 +362,7 @@ static void transmit_chars(struct uart_sio_port *up)
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(&up->port);
 
-       DEBUG_INTR("THRE...");
+       pr_debug("THRE...\n");
 
        if (uart_circ_empty(xmit))
                m32r_sio_stop_tx(&up->port);
@@ -425,7 +374,7 @@ static void transmit_chars(struct uart_sio_port *up)
 static inline void m32r_sio_handle_port(struct uart_sio_port *up,
        unsigned int status)
 {
-       DEBUG_INTR("status = %x...", status);
+       pr_debug("status = %x...\n", status);
 
        if (status & 0x04)
                receive_chars(up, &status);
@@ -453,7 +402,7 @@ static irqreturn_t m32r_sio_interrupt(int irq, void *dev_id)
        struct list_head *l, *end = NULL;
        int pass_counter = 0;
 
-       DEBUG_INTR("m32r_sio_interrupt(%d)...", irq);
+       pr_debug("m32r_sio_interrupt(%d)...\n", irq);
 
 #ifdef CONFIG_SERIAL_M32R_PLDSIO
 //     if (irq == PLD_IRQ_SIO0_SND)
@@ -493,7 +442,7 @@ static irqreturn_t m32r_sio_interrupt(int irq, void *dev_id)
 
        spin_unlock(&i->lock);
 
-       DEBUG_INTR("end.\n");
+       pr_debug("end.\n");
 
        return IRQ_HANDLED;
 }
@@ -782,20 +731,9 @@ static void m32r_sio_set_termios(struct uart_port *port,
 
        serial_out(up, UART_IER, up->ier);
 
-       up->lcr = cval;                                 /* Save LCR */
        spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
-static void m32r_sio_pm(struct uart_port *port, unsigned int state,
-       unsigned int oldstate)
-{
-       struct uart_sio_port *up =
-               container_of(port, struct uart_sio_port, port);
-
-       if (up->pm)
-               up->pm(port, state, oldstate);
-}
-
 /*
  * Resource handling.  This is complicated by the fact that resources
  * depend on the port type.  Maybe we should be claiming the standard
@@ -932,7 +870,6 @@ static struct uart_ops m32r_sio_pops = {
        .startup        = m32r_sio_startup,
        .shutdown       = m32r_sio_shutdown,
        .set_termios    = m32r_sio_set_termios,
-       .pm             = m32r_sio_pm,
        .release_port   = m32r_sio_release_port,
        .request_port   = m32r_sio_request_port,
        .config_port    = m32r_sio_config_port,
@@ -951,15 +888,14 @@ static void __init m32r_sio_init_ports(void)
                return;
        first = 0;
 
-       for (i = 0, up = m32r_sio_ports; i < ARRAY_SIZE(old_serial_port);
-            i++, up++) {
+       for (i = 0, up = m32r_sio_ports; i < UART_NR; i++, up++) {
                up->port.iobase   = old_serial_port[i].port;
                up->port.irq      = irq_canonicalize(old_serial_port[i].irq);
-               up->port.uartclk  = old_serial_port[i].baud_base * 16;
-               up->port.flags    = old_serial_port[i].flags;
-               up->port.membase  = old_serial_port[i].iomem_base;
-               up->port.iotype   = old_serial_port[i].io_type;
-               up->port.regshift = old_serial_port[i].iomem_reg_shift;
+               up->port.uartclk  = BAUD_RATE * 16;
+               up->port.flags    = STD_COM_FLAGS;
+               up->port.membase  = 0;
+               up->port.iotype   = 0;
+               up->port.regshift = 0;
                up->port.ops      = &m32r_sio_pops;
        }
 }
@@ -978,9 +914,6 @@ static void __init m32r_sio_register_ports(struct uart_driver *drv)
                init_timer(&up->timer);
                up->timer.function = m32r_sio_timeout;
 
-               up->mcr_mask = ~0;
-               up->mcr_force = 0;
-
                uart_add_one_port(drv, &up->port);
        }
 }
@@ -1112,28 +1045,6 @@ static struct uart_driver m32r_sio_reg = {
        .cons                   = M32R_SIO_CONSOLE,
 };
 
-/**
- *     m32r_sio_suspend_port - suspend one serial port
- *     @line: serial line number
- *
- *     Suspend one serial port.
- */
-void m32r_sio_suspend_port(int line)
-{
-       uart_suspend_port(&m32r_sio_reg, &m32r_sio_ports[line].port);
-}
-
-/**
- *     m32r_sio_resume_port - resume one serial port
- *     @line: serial line number
- *
- *     Resume one serial port.
- */
-void m32r_sio_resume_port(int line)
-{
-       uart_resume_port(&m32r_sio_reg, &m32r_sio_ports[line].port);
-}
-
 static int __init m32r_sio_init(void)
 {
        int ret, i;
@@ -1163,8 +1074,5 @@ static void __exit m32r_sio_exit(void)
 module_init(m32r_sio_init);
 module_exit(m32r_sio_exit);
 
-EXPORT_SYMBOL(m32r_sio_suspend_port);
-EXPORT_SYMBOL(m32r_sio_resume_port);
-
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Generic M32R SIO serial driver");
diff --git a/drivers/tty/serial/m32r_sio.h b/drivers/tty/serial/m32r_sio.h
deleted file mode 100644 (file)
index 8129824..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- *  m32r_sio.h
- *
- *  Driver for M32R serial ports
- *
- *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
- *  Based on drivers/serial/8250.h.
- *
- *  Copyright (C) 2001  Russell King.
- *  Copyright (C) 2004  Hirokazu Takata <takata at linux-m32r.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/pci.h>
-
-struct m32r_sio_probe {
-       struct module   *owner;
-       int             (*pci_init_one)(struct pci_dev *dev);
-       void            (*pci_remove_one)(struct pci_dev *dev);
-       void            (*pnp_init)(void);
-};
-
-int m32r_sio_register_probe(struct m32r_sio_probe *probe);
-void m32r_sio_unregister_probe(struct m32r_sio_probe *probe);
-void m32r_sio_get_irq_map(unsigned int *map);
-void m32r_sio_suspend_port(int line);
-void m32r_sio_resume_port(int line);
-
-struct old_serial_port {
-       unsigned int uart;
-       unsigned int baud_base;
-       unsigned int port;
-       unsigned int irq;
-       unsigned int flags;
-       unsigned char io_type;
-       unsigned char __iomem *iomem_base;
-       unsigned short iomem_reg_shift;
-};
-
-#define _INLINE_ inline
-
-#define PROBE_RSA      (1 << 0)
-#define PROBE_ANY      (~0)
-
-#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
index 8c3e5131447027104f4497f177faec3e4df68ade..3970d6a9aaca42e4dd459bd483b06599f77884fd 100644 (file)
@@ -346,7 +346,7 @@ static irqreturn_t mpc52xx_psc_handle_irq(struct uart_port *port)
        return mpc5xxx_uart_process_int(port);
 }
 
-static struct psc_ops mpc52xx_psc_ops = {
+static const struct psc_ops mpc52xx_psc_ops = {
        .fifo_init = mpc52xx_psc_fifo_init,
        .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
        .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
@@ -376,7 +376,7 @@ static struct psc_ops mpc52xx_psc_ops = {
        .get_mr1 = mpc52xx_psc_get_mr1,
 };
 
-static struct psc_ops mpc5200b_psc_ops = {
+static const struct psc_ops mpc5200b_psc_ops = {
        .fifo_init = mpc52xx_psc_fifo_init,
        .raw_rx_rdy = mpc52xx_psc_raw_rx_rdy,
        .raw_tx_rdy = mpc52xx_psc_raw_tx_rdy,
@@ -969,7 +969,7 @@ static u8 mpc5125_psc_get_mr1(struct uart_port *port)
        return in_8(&PSC_5125(port)->mr1);
 }
 
-static struct psc_ops mpc5125_psc_ops = {
+static const struct psc_ops mpc5125_psc_ops = {
        .fifo_init = mpc5125_psc_fifo_init,
        .raw_rx_rdy = mpc5125_psc_raw_rx_rdy,
        .raw_tx_rdy = mpc5125_psc_raw_tx_rdy,
@@ -1004,7 +1004,7 @@ static struct psc_ops mpc5125_psc_ops = {
        .get_mr1 = mpc5125_psc_get_mr1,
 };
 
-static struct psc_ops mpc512x_psc_ops = {
+static const struct psc_ops mpc512x_psc_ops = {
        .fifo_init = mpc512x_psc_fifo_init,
        .raw_rx_rdy = mpc512x_psc_raw_rx_rdy,
        .raw_tx_rdy = mpc512x_psc_raw_tx_rdy,
index cadfd1cfae2b9dde93f3d0d08e0d49f5edc4b708..4a3021bcc859566b02e69a7425dafbb2c905560b 100644 (file)
@@ -137,8 +137,6 @@ struct mpsc_port_info {
        /* Internal driver state for this ctlr */
        u8 ready;
        u8 rcv_data;
-       tcflag_t c_iflag;       /* save termios->c_iflag */
-       tcflag_t c_cflag;       /* save termios->c_cflag */
 
        /* Info passed in from platform */
        u8 mirror_regs;         /* Need to mirror regs? */
@@ -1407,9 +1405,6 @@ static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
        ulong flags;
        u32 chr_bits, stop_bits, par;
 
-       pi->c_iflag = termios->c_iflag;
-       pi->c_cflag = termios->c_cflag;
-
        switch (termios->c_cflag & CSIZE) {
        case CS5:
                chr_bits = MPSC_MPCR_CL_5;
@@ -1870,12 +1865,12 @@ static int mpsc_shared_map_regs(struct platform_device *pd)
 
 static void mpsc_shared_unmap_regs(void)
 {
-       if (!mpsc_shared_regs.mpsc_routing_base) {
+       if (mpsc_shared_regs.mpsc_routing_base) {
                iounmap(mpsc_shared_regs.mpsc_routing_base);
                release_mem_region(mpsc_shared_regs.mpsc_routing_base_p,
                                MPSC_ROUTING_REG_BLOCK_SIZE);
        }
-       if (!mpsc_shared_regs.sdma_intr_base) {
+       if (mpsc_shared_regs.sdma_intr_base) {
                iounmap(mpsc_shared_regs.sdma_intr_base);
                release_mem_region(mpsc_shared_regs.sdma_intr_base_p,
                                MPSC_SDMA_INTR_REG_BLOCK_SIZE);
@@ -1891,44 +1886,39 @@ static void mpsc_shared_unmap_regs(void)
 static int mpsc_shared_drv_probe(struct platform_device *dev)
 {
        struct mpsc_shared_pdata        *pdata;
-       int                              rc = -ENODEV;
-
-       if (dev->id == 0) {
-               rc = mpsc_shared_map_regs(dev);
-               if (!rc) {
-                       pdata = (struct mpsc_shared_pdata *)
-                               dev_get_platdata(&dev->dev);
-
-                       mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
-                       mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
-                       mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
-                       mpsc_shared_regs.SDMA_INTR_CAUSE_m =
-                               pdata->intr_cause_val;
-                       mpsc_shared_regs.SDMA_INTR_MASK_m =
-                               pdata->intr_mask_val;
-
-                       rc = 0;
-               }
-       }
+       int rc;
 
-       return rc;
+       if (dev->id != 0)
+               return -ENODEV;
+
+       rc = mpsc_shared_map_regs(dev);
+       if (rc)
+               return rc;
+
+       pdata = dev_get_platdata(&dev->dev);
+
+       mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
+       mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
+       mpsc_shared_regs.MPSC_TCRR_m= pdata->tcrr_val;
+       mpsc_shared_regs.SDMA_INTR_CAUSE_m = pdata->intr_cause_val;
+       mpsc_shared_regs.SDMA_INTR_MASK_m = pdata->intr_mask_val;
+
+       return 0;
 }
 
 static int mpsc_shared_drv_remove(struct platform_device *dev)
 {
-       int     rc = -ENODEV;
+       if (dev->id != 0)
+               return -ENODEV;
 
-       if (dev->id == 0) {
-               mpsc_shared_unmap_regs();
-               mpsc_shared_regs.MPSC_MRR_m = 0;
-               mpsc_shared_regs.MPSC_RCRR_m = 0;
-               mpsc_shared_regs.MPSC_TCRR_m = 0;
-               mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
-               mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
-               rc = 0;
-       }
+       mpsc_shared_unmap_regs();
+       mpsc_shared_regs.MPSC_MRR_m = 0;
+       mpsc_shared_regs.MPSC_RCRR_m = 0;
+       mpsc_shared_regs.MPSC_TCRR_m = 0;
+       mpsc_shared_regs.SDMA_INTR_CAUSE_m = 0;
+       mpsc_shared_regs.SDMA_INTR_MASK_m = 0;
 
-       return rc;
+       return 0;
 }
 
 static struct platform_driver mpsc_shared_driver = {
@@ -1979,10 +1969,6 @@ static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
                pi->sdma_base_p = r->start;
        } else {
                mpsc_resource_err("SDMA base");
-               if (pi->mpsc_base) {
-                       iounmap(pi->mpsc_base);
-                       pi->mpsc_base = NULL;
-               }
                goto err;
        }
 
@@ -1993,33 +1979,33 @@ static int mpsc_drv_map_regs(struct mpsc_port_info *pi,
                pi->brg_base_p = r->start;
        } else {
                mpsc_resource_err("BRG base");
-               if (pi->mpsc_base) {
-                       iounmap(pi->mpsc_base);
-                       pi->mpsc_base = NULL;
-               }
-               if (pi->sdma_base) {
-                       iounmap(pi->sdma_base);
-                       pi->sdma_base = NULL;
-               }
                goto err;
        }
        return 0;
 
 err:
+       if (pi->sdma_base) {
+               iounmap(pi->sdma_base);
+               pi->sdma_base = NULL;
+       }
+       if (pi->mpsc_base) {
+               iounmap(pi->mpsc_base);
+               pi->mpsc_base = NULL;
+       }
        return -ENOMEM;
 }
 
 static void mpsc_drv_unmap_regs(struct mpsc_port_info *pi)
 {
-       if (!pi->mpsc_base) {
+       if (pi->mpsc_base) {
                iounmap(pi->mpsc_base);
                release_mem_region(pi->mpsc_base_p, MPSC_REG_BLOCK_SIZE);
        }
-       if (!pi->sdma_base) {
+       if (pi->sdma_base) {
                iounmap(pi->sdma_base);
                release_mem_region(pi->sdma_base_p, MPSC_SDMA_REG_BLOCK_SIZE);
        }
-       if (!pi->brg_base) {
+       if (pi->brg_base) {
                iounmap(pi->brg_base);
                release_mem_region(pi->brg_base_p, MPSC_BRG_REG_BLOCK_SIZE);
        }
@@ -2073,36 +2059,37 @@ static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
 
 static int mpsc_drv_probe(struct platform_device *dev)
 {
-       struct mpsc_port_info   *pi;
-       int                     rc = -ENODEV;
-
-       pr_debug("mpsc_drv_probe: Adding MPSC %d\n", dev->id);
-
-       if (dev->id < MPSC_NUM_CTLRS) {
-               pi = &mpsc_ports[dev->id];
-
-               rc = mpsc_drv_map_regs(pi, dev);
-               if (!rc) {
-                       mpsc_drv_get_platform_data(pi, dev, dev->id);
-                       pi->port.dev = &dev->dev;
-
-                       rc = mpsc_make_ready(pi);
-                       if (!rc) {
-                               spin_lock_init(&pi->tx_lock);
-                               rc = uart_add_one_port(&mpsc_reg, &pi->port);
-                               if (!rc) {
-                                       rc = 0;
-                               } else {
-                                       mpsc_release_port((struct uart_port *)
-                                                       pi);
-                                       mpsc_drv_unmap_regs(pi);
-                               }
-                       } else {
-                               mpsc_drv_unmap_regs(pi);
-                       }
-               }
-       }
+       struct mpsc_port_info *pi;
+       int rc;
+
+       dev_dbg(&dev->dev, "mpsc_drv_probe: Adding MPSC %d\n", dev->id);
+
+       if (dev->id >= MPSC_NUM_CTLRS)
+               return -ENODEV;
+
+       pi = &mpsc_ports[dev->id];
+
+       rc = mpsc_drv_map_regs(pi, dev);
+       if (rc)
+               return rc;
 
+       mpsc_drv_get_platform_data(pi, dev, dev->id);
+       pi->port.dev = &dev->dev;
+
+       rc = mpsc_make_ready(pi);
+       if (rc)
+               goto err_unmap;
+
+       spin_lock_init(&pi->tx_lock);
+       rc = uart_add_one_port(&mpsc_reg, &pi->port);
+       if (rc)
+               goto err_relport;
+
+       return 0;
+err_relport:
+       mpsc_release_port(&pi->port);
+err_unmap:
+       mpsc_drv_unmap_regs(pi);
        return rc;
 }
 
@@ -2124,19 +2111,22 @@ static int __init mpsc_drv_init(void)
        memset(&mpsc_shared_regs, 0, sizeof(mpsc_shared_regs));
 
        rc = uart_register_driver(&mpsc_reg);
-       if (!rc) {
-               rc = platform_driver_register(&mpsc_shared_driver);
-               if (!rc) {
-                       rc = platform_driver_register(&mpsc_driver);
-                       if (rc) {
-                               platform_driver_unregister(&mpsc_shared_driver);
-                               uart_unregister_driver(&mpsc_reg);
-                       }
-               } else {
-                       uart_unregister_driver(&mpsc_reg);
-               }
-       }
+       if (rc)
+               return rc;
+
+       rc = platform_driver_register(&mpsc_shared_driver);
+       if (rc)
+               goto err_unreg_uart;
 
+       rc = platform_driver_register(&mpsc_driver);
+       if (rc)
+               goto err_unreg_plat;
+
+       return 0;
+err_unreg_plat:
+       platform_driver_unregister(&mpsc_shared_driver);
+err_unreg_uart:
+       uart_unregister_driver(&mpsc_reg);
        return rc;
 }
 device_initcall(mpsc_drv_init);
index dcde955475dc9b11029c614d0d137d582d04e77c..96d3ce8dc2dc4c79b5d8dd57e21c638e7a8219d5 100644 (file)
@@ -1478,7 +1478,6 @@ msm_serial_early_console_setup(struct earlycon_device *device, const char *opt)
        device->con->write = msm_serial_early_write;
        return 0;
 }
-EARLYCON_DECLARE(msm_serial, msm_serial_early_console_setup);
 OF_EARLYCON_DECLARE(msm_serial, "qcom,msm-uart",
                    msm_serial_early_console_setup);
 
@@ -1500,7 +1499,6 @@ msm_serial_early_console_setup_dm(struct earlycon_device *device,
        device->con->write = msm_serial_early_write_dm;
        return 0;
 }
-EARLYCON_DECLARE(msm_serial_dm, msm_serial_early_console_setup_dm);
 OF_EARLYCON_DECLARE(msm_serial_dm, "qcom,msm-uartdm",
                    msm_serial_early_console_setup_dm);
 
index b645f9228ed77b90ac6d4791e1b0ab27056a066b..a2a529994ba5837551e08a84dd9aefbe554a497b 100644 (file)
@@ -1165,7 +1165,7 @@ serial_omap_type(struct uart_port *port)
 
 #define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
 
-static void wait_for_xmitr(struct uart_omap_port *up)
+static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
 {
        unsigned int status, tmout = 10000;
 
@@ -1343,7 +1343,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
 
 /* Enable or disable the rs485 support */
 static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
 {
        struct uart_omap_port *up = to_uart_omap_port(port);
        unsigned int mode;
@@ -1356,8 +1356,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
        up->ier = 0;
        serial_out(up, UART_IER, 0);
 
+       /* Clamp the delays to [0, 100ms] */
+       rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+       rs485->delay_rts_after_send  = min(rs485->delay_rts_after_send, 100U);
+
        /* store new config */
-       port->rs485 = *rs485conf;
+       port->rs485 = *rs485;
 
        /*
         * Just as a precaution, only allow rs485
@@ -1866,7 +1870,7 @@ static struct platform_driver serial_omap_driver = {
        .probe          = serial_omap_probe,
        .remove         = serial_omap_remove,
        .driver         = {
-               .name   = DRIVER_NAME,
+               .name   = OMAP_SERIAL_DRIVER_NAME,
                .pm     = &serial_omap_dev_pm_ops,
                .of_match_table = of_match_ptr(omap_serial_of_match),
        },
index d72cd736bdc6ec9d28277616ca6231885089dc56..fd9c47f2f29fdfae6ccddb2be36a0fec66b5d9d0 100644 (file)
@@ -2451,7 +2451,6 @@ static int __init s3c2410_early_console_setup(struct earlycon_device *device,
 }
 OF_EARLYCON_DECLARE(s3c2410, "samsung,s3c2410-uart",
                        s3c2410_early_console_setup);
-EARLYCON_DECLARE(s3c2410, s3c2410_early_console_setup);
 
 /* S3C2412, S3C2440, S3C64xx */
 static struct samsung_early_console_data s3c2440_early_console_data = {
@@ -2470,9 +2469,6 @@ OF_EARLYCON_DECLARE(s3c2440, "samsung,s3c2440-uart",
                        s3c2440_early_console_setup);
 OF_EARLYCON_DECLARE(s3c6400, "samsung,s3c6400-uart",
                        s3c2440_early_console_setup);
-EARLYCON_DECLARE(s3c2412, s3c2440_early_console_setup);
-EARLYCON_DECLARE(s3c2440, s3c2440_early_console_setup);
-EARLYCON_DECLARE(s3c6400, s3c2440_early_console_setup);
 
 /* S5PV210, EXYNOS */
 static struct samsung_early_console_data s5pv210_early_console_data = {
@@ -2489,8 +2485,6 @@ OF_EARLYCON_DECLARE(s5pv210, "samsung,s5pv210-uart",
                        s5pv210_early_console_setup);
 OF_EARLYCON_DECLARE(exynos4210, "samsung,exynos4210-uart",
                        s5pv210_early_console_setup);
-EARLYCON_DECLARE(s5pv210, s5pv210_early_console_setup);
-EARLYCON_DECLARE(exynos4210, s5pv210_early_console_setup);
 #endif
 
 MODULE_ALIAS("platform:samsung-uart");
index 13f8d5f7027283ffa5716073d8c8408abf70f5b6..311e7bc07a2448242b072ec99d3730aa7cf878c8 100644 (file)
                                                  * or (IO6)
                                                  * - only on 75x/76x
                                                  */
-#define SC16IS7XX_MSR_CTS_BIT          (1 << 0) /* CTS */
-#define SC16IS7XX_MSR_DSR_BIT          (1 << 1) /* DSR (IO4)
+#define SC16IS7XX_MSR_CTS_BIT          (1 << 4) /* CTS */
+#define SC16IS7XX_MSR_DSR_BIT          (1 << 5) /* DSR (IO4)
                                                  * - only on 75x/76x
                                                  */
-#define SC16IS7XX_MSR_RI_BIT           (1 << 2) /* RI (IO7)
+#define SC16IS7XX_MSR_RI_BIT           (1 << 6) /* RI (IO7)
                                                  * - only on 75x/76x
                                                  */
-#define SC16IS7XX_MSR_CD_BIT           (1 << 3) /* CD (IO6)
+#define SC16IS7XX_MSR_CD_BIT           (1 << 7) /* CD (IO6)
                                                  * - only on 75x/76x
                                                  */
 #define SC16IS7XX_MSR_DELTA_MASK       0x0F     /* Any of the delta bits! */
 
 /* IOControl register bits (Only 750/760) */
 #define SC16IS7XX_IOCONTROL_LATCH_BIT  (1 << 0) /* Enable input latching */
-#define SC16IS7XX_IOCONTROL_GPIO_BIT   (1 << 1) /* Enable GPIO[7:4] */
+#define SC16IS7XX_IOCONTROL_MODEM_BIT  (1 << 1) /* Enable GPIO[7:4] as modem pins */
 #define SC16IS7XX_IOCONTROL_SRESET_BIT (1 << 3) /* Software Reset */
 
 /* EFCR register bits */
@@ -687,7 +687,7 @@ static void sc16is7xx_port_irq(struct sc16is7xx_port *s, int portno)
                case SC16IS7XX_IIR_CTSRTS_SRC:
                        msr = sc16is7xx_port_read(port, SC16IS7XX_MSR_REG);
                        uart_handle_cts_change(port,
-                                              !!(msr & SC16IS7XX_MSR_CTS_BIT));
+                                              !!(msr & SC16IS7XX_MSR_DCTS_BIT));
                        break;
                case SC16IS7XX_IIR_THRI_SRC:
                        sc16is7xx_handle_tx(port);
index b1f54ab1818c4ee0982bab49559940cd61b80435..a126a603b0837916ad1046da7640c0170300bab4 100644 (file)
@@ -171,14 +171,12 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
                 */
                uart_change_speed(tty, state, NULL);
 
-               if (init_hw) {
-                       /*
-                        * Setup the RTS and DTR signals once the
-                        * port is open and ready to respond.
-                        */
-                       if (tty->termios.c_cflag & CBAUD)
-                               uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
-               }
+               /*
+                * Setup the RTS and DTR signals once the
+                * port is open and ready to respond.
+                */
+               if (init_hw && C_BAUD(tty))
+                       uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
        }
 
        /*
@@ -240,7 +238,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
                if (uart_console(uport) && tty)
                        uport->cons->cflag = tty->termios.c_cflag;
 
-               if (!tty || (tty->termios.c_cflag & HUPCL))
+               if (!tty || C_HUPCL(tty))
                        uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
 
                uart_port_shutdown(port);
@@ -485,12 +483,15 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
        spin_unlock_irq(&uport->lock);
 }
 
-static inline int __uart_put_char(struct uart_port *port,
-                               struct circ_buf *circ, unsigned char c)
+static int uart_put_char(struct tty_struct *tty, unsigned char c)
 {
+       struct uart_state *state = tty->driver_data;
+       struct uart_port *port = state->uart_port;
+       struct circ_buf *circ;
        unsigned long flags;
        int ret = 0;
 
+       circ = &state->xmit;
        if (!circ->buf)
                return 0;
 
@@ -504,13 +505,6 @@ static inline int __uart_put_char(struct uart_port *port,
        return ret;
 }
 
-static int uart_put_char(struct tty_struct *tty, unsigned char ch)
-{
-       struct uart_state *state = tty->driver_data;
-
-       return __uart_put_char(state->uart_port, &state->xmit, ch);
-}
-
 static void uart_flush_chars(struct tty_struct *tty)
 {
        uart_start(tty);
@@ -639,7 +633,7 @@ static void uart_throttle(struct tty_struct *tty)
 
        if (I_IXOFF(tty))
                mask |= UPSTAT_AUTOXOFF;
-       if (tty->termios.c_cflag & CRTSCTS)
+       if (C_CRTSCTS(tty))
                mask |= UPSTAT_AUTORTS;
 
        if (port->status & mask) {
@@ -647,11 +641,11 @@ static void uart_throttle(struct tty_struct *tty)
                mask &= ~port->status;
        }
 
-       if (mask & UPSTAT_AUTOXOFF)
-               uart_send_xchar(tty, STOP_CHAR(tty));
-
        if (mask & UPSTAT_AUTORTS)
                uart_clear_mctrl(port, TIOCM_RTS);
+
+       if (mask & UPSTAT_AUTOXOFF)
+               uart_send_xchar(tty, STOP_CHAR(tty));
 }
 
 static void uart_unthrottle(struct tty_struct *tty)
@@ -662,7 +656,7 @@ static void uart_unthrottle(struct tty_struct *tty)
 
        if (I_IXOFF(tty))
                mask |= UPSTAT_AUTOXOFF;
-       if (tty->termios.c_cflag & CRTSCTS)
+       if (C_CRTSCTS(tty))
                mask |= UPSTAT_AUTORTS;
 
        if (port->status & mask) {
@@ -670,21 +664,25 @@ static void uart_unthrottle(struct tty_struct *tty)
                mask &= ~port->status;
        }
 
-       if (mask & UPSTAT_AUTOXOFF)
-               uart_send_xchar(tty, START_CHAR(tty));
-
        if (mask & UPSTAT_AUTORTS)
                uart_set_mctrl(port, TIOCM_RTS);
+
+       if (mask & UPSTAT_AUTOXOFF)
+               uart_send_xchar(tty, START_CHAR(tty));
 }
 
-static void do_uart_get_info(struct tty_port *port,
-                       struct serial_struct *retinfo)
+static void uart_get_info(struct tty_port *port, struct serial_struct *retinfo)
 {
        struct uart_state *state = container_of(port, struct uart_state, port);
        struct uart_port *uport = state->uart_port;
 
        memset(retinfo, 0, sizeof(*retinfo));
 
+       /*
+        * Ensure the state we copy is consistent and no hardware changes
+        * occur as we go
+        */
+       mutex_lock(&port->mutex);
        retinfo->type       = uport->type;
        retinfo->line       = uport->line;
        retinfo->port       = uport->iobase;
@@ -703,15 +701,6 @@ static void do_uart_get_info(struct tty_port *port,
        retinfo->io_type         = uport->iotype;
        retinfo->iomem_reg_shift = uport->regshift;
        retinfo->iomem_base      = (void *)(unsigned long)uport->mapbase;
-}
-
-static void uart_get_info(struct tty_port *port,
-                       struct serial_struct *retinfo)
-{
-       /* Ensure the state we copy is consistent and no hardware changes
-          occur as we go */
-       mutex_lock(&port->mutex);
-       do_uart_get_info(port, retinfo);
        mutex_unlock(&port->mutex);
 }
 
@@ -719,6 +708,7 @@ static int uart_get_info_user(struct tty_port *port,
                         struct serial_struct __user *retinfo)
 {
        struct serial_struct tmp;
+
        uart_get_info(port, &tmp);
 
        if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
@@ -1391,8 +1381,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
 
        uport = state->uart_port;
        port = &state->port;
-
-       pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
+       pr_debug("uart_close(%d) called\n", tty->index);
 
        if (!port->count || tty_port_close_start(port, tty, filp) == 0)
                return;
@@ -1434,7 +1423,6 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
         * Wake up anyone trying to open this port.
         */
        clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
-       clear_bit(ASYNCB_CLOSING, &port->flags);
        spin_unlock_irq(&port->lock);
        wake_up_interruptible(&port->open_wait);
 
@@ -1510,7 +1498,7 @@ static void uart_hangup(struct tty_struct *tty)
        struct tty_port *port = &state->port;
        unsigned long flags;
 
-       pr_debug("uart_hangup(%d)\n", state->uart_port->line);
+       pr_debug("uart_hangup(%d)\n", tty->index);
 
        mutex_lock(&port->mutex);
        if (port->flags & ASYNC_NORMAL_ACTIVE) {
@@ -1591,7 +1579,7 @@ static void uart_dtr_rts(struct tty_port *port, int onoff)
  */
 static int uart_open(struct tty_struct *tty, struct file *filp)
 {
-       struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
+       struct uart_driver *drv = tty->driver->driver_state;
        int retval, line = tty->index;
        struct uart_state *state = drv->state + line;
        struct tty_port *port = &state->port;
@@ -1633,15 +1621,12 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
        /*
         * If we succeeded, wait until the port is ready.
         */
+err_unlock:
        mutex_unlock(&port->mutex);
        if (retval == 0)
                retval = tty_port_block_til_ready(port, tty, filp);
-
 end:
        return retval;
-err_unlock:
-       mutex_unlock(&port->mutex);
-       goto end;
 }
 
 static const char *uart_type(struct uart_port *port)
@@ -1700,17 +1685,13 @@ static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
                seq_printf(m, " tx:%d rx:%d",
                                uport->icount.tx, uport->icount.rx);
                if (uport->icount.frame)
-                       seq_printf(m, " fe:%d",
-                               uport->icount.frame);
+                       seq_printf(m, " fe:%d", uport->icount.frame);
                if (uport->icount.parity)
-                       seq_printf(m, " pe:%d",
-                               uport->icount.parity);
+                       seq_printf(m, " pe:%d", uport->icount.parity);
                if (uport->icount.brk)
-                       seq_printf(m, " brk:%d",
-                               uport->icount.brk);
+                       seq_printf(m, " brk:%d", uport->icount.brk);
                if (uport->icount.overrun)
-                       seq_printf(m, " oe:%d",
-                               uport->icount.overrun);
+                       seq_printf(m, " oe:%d", uport->icount.overrun);
 
 #define INFOBIT(bit, str) \
        if (uport->mctrl & (bit)) \
@@ -1745,8 +1726,7 @@ static int uart_proc_show(struct seq_file *m, void *v)
        struct uart_driver *drv = ttydrv->driver_state;
        int i;
 
-       seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
-                       "", "", "");
+       seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n", "", "", "");
        for (i = 0; i < drv->nr; i++)
                uart_line_info(m, drv, i);
        return 0;
@@ -1895,26 +1875,6 @@ uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow)
 }
 EXPORT_SYMBOL_GPL(uart_parse_options);
 
-struct baud_rates {
-       unsigned int rate;
-       unsigned int cflag;
-};
-
-static const struct baud_rates baud_rates[] = {
-       { 921600, B921600 },
-       { 460800, B460800 },
-       { 230400, B230400 },
-       { 115200, B115200 },
-       {  57600, B57600  },
-       {  38400, B38400  },
-       {  19200, B19200  },
-       {   9600, B9600   },
-       {   4800, B4800   },
-       {   2400, B2400   },
-       {   1200, B1200   },
-       {      0, B38400  }
-};
-
 /**
  *     uart_set_options - setup the serial console parameters
  *     @port: pointer to the serial ports uart_port structure
@@ -1930,7 +1890,6 @@ uart_set_options(struct uart_port *port, struct console *co,
 {
        struct ktermios termios;
        static struct ktermios dummy;
-       int i;
 
        /*
         * Ensure that the serial console lock is initialised
@@ -1945,16 +1904,8 @@ uart_set_options(struct uart_port *port, struct console *co,
 
        memset(&termios, 0, sizeof(struct ktermios));
 
-       termios.c_cflag = CREAD | HUPCL | CLOCAL;
-
-       /*
-        * Construct a cflag setting.
-        */
-       for (i = 0; baud_rates[i].rate; i++)
-               if (baud_rates[i].rate <= baud)
-                       break;
-
-       termios.c_cflag |= baud_rates[i].cflag;
+       termios.c_cflag |= CREAD | HUPCL | CLOCAL;
+       tty_termios_encode_baud_rate(&termios, baud, baud);
 
        if (bits == 7)
                termios.c_cflag |= CS7;
index b4decf8787de006b59a843c1de5a0040653235b8..57f152394af5ac913d702c55dc61a4d66d0d98a5 100644 (file)
@@ -554,7 +554,7 @@ static struct uart_port ks8695uart_ports[SERIAL_KS8695_NR] = {
                .uartclk        = KS8695_CLOCK_RATE * 16,
                .fifosize       = 16,
                .ops            = &ks8695uart_pops,
-               .flags          = ASYNC_BOOT_AUTOCONF,
+               .flags          = UPF_BOOT_AUTOCONF,
                .line           = 0,
        }
 };
index 4646a9f531ad86d654039feda089cdbaf9c5372b..4678d8f2dd7d1da56d2e2455ee5bcf607ed7b952 100644 (file)
@@ -84,6 +84,22 @@ enum SCI_CLKS {
        SCI_NUM_CLKS
 };
 
+/* Bit x set means sampling rate x + 1 is supported */
+#define SCI_SR(x)              BIT((x) - 1)
+#define SCI_SR_RANGE(x, y)     GENMASK((y) - 1, (x) - 1)
+
+#define SCI_SR_SCIFAB          SCI_SR(5) | SCI_SR(7) | SCI_SR(11) | \
+                               SCI_SR(13) | SCI_SR(16) | SCI_SR(17) | \
+                               SCI_SR(19) | SCI_SR(27)
+
+#define min_sr(_port)          ffs((_port)->sampling_rate_mask)
+#define max_sr(_port)          fls((_port)->sampling_rate_mask)
+
+/* Iterate over all supported sampling rates, from high to low */
+#define for_each_sr(_sr, _port)                                                \
+       for ((_sr) = max_sr(_port); (_sr) >= min_sr(_port); (_sr)--)    \
+               if ((_port)->sampling_rate_mask & SCI_SR((_sr)))
+
 struct sci_port {
        struct uart_port        port;
 
@@ -93,7 +109,7 @@ struct sci_port {
        unsigned int            overrun_mask;
        unsigned int            error_mask;
        unsigned int            error_clear;
-       unsigned int            sampling_rate;
+       unsigned int            sampling_rate_mask;
        resource_size_t         reg_size;
 
        /* Break timer */
@@ -637,7 +653,8 @@ static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask)
        }
 }
 
-#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE)
+#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
+    defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
 
 #ifdef CONFIG_CONSOLE_POLL
 static int sci_poll_get_char(struct uart_port *port)
@@ -678,7 +695,8 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
        serial_port_out(port, SCxTDR, c);
        sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
 }
-#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE */
+#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE ||
+         CONFIG_SERIAL_SH_SCI_EARLYCON */
 
 static void sci_init_pins(struct uart_port *port, unsigned int cflag)
 {
@@ -1902,19 +1920,13 @@ static int sci_sck_calc(struct sci_port *s, unsigned int bps,
                        unsigned int *srr)
 {
        unsigned long freq = s->clk_rates[SCI_SCK];
-       unsigned int min_sr, max_sr, sr;
        int err, min_err = INT_MAX;
+       unsigned int sr;
 
-       if (s->sampling_rate) {
-               /* SCI(F) has a fixed sampling rate */
-               min_sr = max_sr = s->sampling_rate / 2;
-       } else {
-               /* HSCIF has a variable 1/(8..32) sampling rate */
-               min_sr = 8;
-               max_sr = 32;
-       }
+       if (s->port.type != PORT_HSCIF)
+               freq *= 2;
 
-       for (sr = max_sr; sr >= min_sr; sr--) {
+       for_each_sr(sr, s) {
                err = DIV_ROUND_CLOSEST(freq, sr) - bps;
                if (abs(err) >= abs(min_err))
                        continue;
@@ -1935,19 +1947,13 @@ static int sci_brg_calc(struct sci_port *s, unsigned int bps,
                        unsigned long freq, unsigned int *dlr,
                        unsigned int *srr)
 {
-       unsigned int min_sr, max_sr, sr, dl;
        int err, min_err = INT_MAX;
+       unsigned int sr, dl;
 
-       if (s->sampling_rate) {
-               /* SCIF has a fixed sampling rate */
-               min_sr = max_sr = s->sampling_rate / 2;
-       } else {
-               /* HSCIF has a variable 1/(8..32) sampling rate */
-               min_sr = 8;
-               max_sr = 32;
-       }
+       if (s->port.type != PORT_HSCIF)
+               freq *= 2;
 
-       for (sr = max_sr; sr >= min_sr; sr--) {
+       for_each_sr(sr, s) {
                dl = DIV_ROUND_CLOSEST(freq, sr * bps);
                dl = clamp(dl, 1U, 65535U);
 
@@ -1973,19 +1979,12 @@ static int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
                          unsigned int *brr, unsigned int *srr,
                          unsigned int *cks)
 {
-       unsigned int min_sr, max_sr, shift, sr, br, prediv, scrate, c;
        unsigned long freq = s->clk_rates[SCI_FCK];
+       unsigned int sr, br, prediv, scrate, c;
        int err, min_err = INT_MAX;
 
-       if (s->sampling_rate) {
-               min_sr = max_sr = s->sampling_rate;
-               shift = 0;
-       } else {
-               /* HSCIF has a variable sample rate */
-               min_sr = 8;
-               max_sr = 32;
-               shift = 1;
-       }
+       if (s->port.type != PORT_HSCIF)
+               freq *= 2;
 
        /*
         * Find the combination of sample rate and clock select with the
@@ -2002,10 +2001,10 @@ static int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
         *      (|D - 0.5| / N * (1 + F))|
         *  NOTE: Usually, treat D for 0.5, F is 0 by this calculation.
         */
-       for (sr = max_sr; sr >= min_sr; sr--) {
+       for_each_sr(sr, s) {
                for (c = 0; c <= 3; c++) {
                        /* integerized formulas from HSCIF documentation */
-                       prediv = sr * (1 << (2 * c + shift));
+                       prediv = sr * (1 << (2 * c + 1));
 
                        /*
                         * We need to calculate:
@@ -2062,7 +2061,7 @@ static void sci_reset(struct uart_port *port)
 static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
                            struct ktermios *old)
 {
-       unsigned int baud, smr_val = 0, scr_val = 0, i;
+       unsigned int baud, smr_val = SCSMR_ASYNC, scr_val = 0, i;
        unsigned int brr = 255, cks = 0, srr = 15, dl = 0, sccks = 0;
        unsigned int brr1 = 255, cks1 = 0, srr1 = 15, dl1 = 0;
        struct sci_port *s = to_sci_port(port);
@@ -2096,8 +2095,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
        for (i = 0; i < SCI_NUM_CLKS; i++)
                max_freq = max(max_freq, s->clk_rates[i]);
 
-       baud = uart_get_baud_rate(port, termios, old, 0,
-                                 max_freq / max(s->sampling_rate, 8U));
+       baud = uart_get_baud_rate(port, termios, old, 0, max_freq / min_sr(s));
        if (!baud)
                goto done;
 
@@ -2185,6 +2183,17 @@ done:
        uart_update_timeout(port, termios->c_cflag, baud);
 
        if (best_clk >= 0) {
+               if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
+                       switch (srr + 1) {
+                       case 5:  smr_val |= SCSMR_SRC_5;  break;
+                       case 7:  smr_val |= SCSMR_SRC_7;  break;
+                       case 11: smr_val |= SCSMR_SRC_11; break;
+                       case 13: smr_val |= SCSMR_SRC_13; break;
+                       case 16: smr_val |= SCSMR_SRC_16; break;
+                       case 17: smr_val |= SCSMR_SRC_17; break;
+                       case 19: smr_val |= SCSMR_SRC_19; break;
+                       case 27: smr_val |= SCSMR_SRC_27; break;
+                       }
                smr_val |= cks;
                dev_dbg(port->dev,
                         "SCR 0x%x SMR 0x%x BRR %u CKS 0x%x DL %u SRR %u\n",
@@ -2200,7 +2209,8 @@ done:
        } else {
                /* Don't touch the bit rate configuration */
                scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0);
-               smr_val |= serial_port_in(port, SCSMR) & SCSMR_CKS;
+               smr_val |= serial_port_in(port, SCSMR) &
+                          (SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS);
                dev_dbg(port->dev, "SCR 0x%x SMR 0x%x\n", scr_val, smr_val);
                serial_port_out(port, SCSCR, scr_val);
                serial_port_out(port, SCSMR, smr_val);
@@ -2232,6 +2242,16 @@ done:
        scr_val |= s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0);
        dev_dbg(port->dev, "SCSCR 0x%x\n", scr_val);
        serial_port_out(port, SCSCR, scr_val);
+       if ((srr + 1 == 5) &&
+           (port->type == PORT_SCIFA || port->type == PORT_SCIFB)) {
+               /*
+                * In asynchronous mode, when the sampling rate is 1/5, first
+                * received data may become invalid on some SCIFA and SCIFB.
+                * To avoid this problem wait more than 1 serial data time (1
+                * bit time x serial data number) after setting SCSCR.RE = 1.
+                */
+               udelay(DIV_ROUND_UP(10 * 1000000, baud));
+       }
 
 #ifdef CONFIG_SERIAL_SH_SCI_DMA
        /*
@@ -2528,37 +2548,37 @@ static int sci_init_single(struct platform_device *dev,
                port->fifosize = 256;
                sci_port->overrun_reg = SCxSR;
                sci_port->overrun_mask = SCIFA_ORER;
-               sci_port->sampling_rate = 16;
+               sci_port->sampling_rate_mask = SCI_SR_SCIFAB;
                break;
        case PORT_HSCIF:
                port->fifosize = 128;
                sci_port->overrun_reg = SCLSR;
                sci_port->overrun_mask = SCLSR_ORER;
-               sci_port->sampling_rate = 0;
+               sci_port->sampling_rate_mask = SCI_SR_RANGE(8, 32);
                break;
        case PORT_SCIFA:
                port->fifosize = 64;
                sci_port->overrun_reg = SCxSR;
                sci_port->overrun_mask = SCIFA_ORER;
-               sci_port->sampling_rate = 16;
+               sci_port->sampling_rate_mask = SCI_SR_SCIFAB;
                break;
        case PORT_SCIF:
                port->fifosize = 16;
                if (p->regtype == SCIx_SH7705_SCIF_REGTYPE) {
                        sci_port->overrun_reg = SCxSR;
                        sci_port->overrun_mask = SCIFA_ORER;
-                       sci_port->sampling_rate = 16;
+                       sci_port->sampling_rate_mask = SCI_SR(16);
                } else {
                        sci_port->overrun_reg = SCLSR;
                        sci_port->overrun_mask = SCLSR_ORER;
-                       sci_port->sampling_rate = 32;
+                       sci_port->sampling_rate_mask = SCI_SR(32);
                }
                break;
        default:
                port->fifosize = 1;
                sci_port->overrun_reg = SCxSR;
                sci_port->overrun_mask = SCI_ORER;
-               sci_port->sampling_rate = 32;
+               sci_port->sampling_rate_mask = SCI_SR(32);
                break;
        }
 
@@ -2567,7 +2587,7 @@ static int sci_init_single(struct platform_device *dev,
         * data override the sampling rate for now.
         */
        if (p->sampling_rate)
-               sci_port->sampling_rate = p->sampling_rate;
+               sci_port->sampling_rate_mask = SCI_SR(p->sampling_rate);
 
        if (!early) {
                ret = sci_init_clocks(sci_port, &dev->dev);
@@ -2632,7 +2652,8 @@ static void sci_cleanup_single(struct sci_port *port)
        pm_runtime_disable(port->port.dev);
 }
 
-#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
+#if defined(CONFIG_SERIAL_SH_SCI_CONSOLE) || \
+    defined(CONFIG_SERIAL_SH_SCI_EARLYCON)
 static void serial_console_putchar(struct uart_port *port, int ch)
 {
        sci_poll_put_char(port, ch);
@@ -2652,9 +2673,12 @@ static void serial_console_write(struct console *co, const char *s,
        int locked = 1;
 
        local_irq_save(flags);
+#if defined(SUPPORT_SYSRQ)
        if (port->sysrq)
                locked = 0;
-       else if (oops_in_progress)
+       else
+#endif
+       if (oops_in_progress)
                locked = spin_trylock(&port->lock);
        else
                spin_lock(&port->lock);
@@ -2764,7 +2788,7 @@ static inline int sci_probe_earlyprintk(struct platform_device *pdev)
 
 #define SCI_CONSOLE    NULL
 
-#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE */
+#endif /* CONFIG_SERIAL_SH_SCI_CONSOLE || CONFIG_SERIAL_SH_SCI_EARLYCON */
 
 static const char banner[] __initconst = "SuperH (H)SCI(F) driver initialized";
 
@@ -2998,6 +3022,67 @@ static void __exit sci_exit(void)
 early_platform_init_buffer("earlyprintk", &sci_driver,
                           early_serial_buf, ARRAY_SIZE(early_serial_buf));
 #endif
+#ifdef CONFIG_SERIAL_SH_SCI_EARLYCON
+static struct __init plat_sci_port port_cfg;
+
+static int __init early_console_setup(struct earlycon_device *device,
+                                     int type)
+{
+       if (!device->port.membase)
+               return -ENODEV;
+
+       device->port.serial_in = sci_serial_in;
+       device->port.serial_out = sci_serial_out;
+       device->port.type = type;
+       memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port));
+       sci_ports[0].cfg = &port_cfg;
+       sci_ports[0].cfg->type = type;
+       sci_probe_regmap(sci_ports[0].cfg);
+       port_cfg.scscr = sci_serial_in(&sci_ports[0].port, SCSCR) |
+                        SCSCR_RE | SCSCR_TE;
+       sci_serial_out(&sci_ports[0].port, SCSCR, port_cfg.scscr);
+
+       device->con->write = serial_console_write;
+       return 0;
+}
+static int __init sci_early_console_setup(struct earlycon_device *device,
+                                         const char *opt)
+{
+       return early_console_setup(device, PORT_SCI);
+}
+static int __init scif_early_console_setup(struct earlycon_device *device,
+                                         const char *opt)
+{
+       return early_console_setup(device, PORT_SCIF);
+}
+static int __init scifa_early_console_setup(struct earlycon_device *device,
+                                         const char *opt)
+{
+       return early_console_setup(device, PORT_SCIFA);
+}
+static int __init scifb_early_console_setup(struct earlycon_device *device,
+                                         const char *opt)
+{
+       return early_console_setup(device, PORT_SCIFB);
+}
+static int __init hscif_early_console_setup(struct earlycon_device *device,
+                                         const char *opt)
+{
+       return early_console_setup(device, PORT_HSCIF);
+}
+
+EARLYCON_DECLARE(sci, sci_early_console_setup);
+OF_EARLYCON_DECLARE(sci, "renesas,sci", sci_early_console_setup);
+EARLYCON_DECLARE(scif, scif_early_console_setup);
+OF_EARLYCON_DECLARE(scif, "renesas,scif", scif_early_console_setup);
+EARLYCON_DECLARE(scifa, scifa_early_console_setup);
+OF_EARLYCON_DECLARE(scifa, "renesas,scifa", scifa_early_console_setup);
+EARLYCON_DECLARE(scifb, scifb_early_console_setup);
+OF_EARLYCON_DECLARE(scifb, "renesas,scifb", scifb_early_console_setup);
+EARLYCON_DECLARE(hscif, hscif_early_console_setup);
+OF_EARLYCON_DECLARE(hscif, "renesas,hscif", hscif_early_console_setup);
+#endif /* CONFIG_SERIAL_SH_SCI_EARLYCON */
+
 module_init(sci_init);
 module_exit(sci_exit);
 
index fb176025042185c44c7ae0ec2eeaa3134393219e..7a4fa185b93ef30792d58a24d7f26dbb2ec0156e 100644 (file)
@@ -35,12 +35,27 @@ enum {
 
 
 /* SCSMR (Serial Mode Register) */
+#define SCSMR_C_A      BIT(7)  /* Communication Mode */
+#define SCSMR_CSYNC    BIT(7)  /*   - Clocked synchronous mode */
+#define SCSMR_ASYNC    0       /*   - Asynchronous mode */
 #define SCSMR_CHR      BIT(6)  /* 7-bit Character Length */
 #define SCSMR_PE       BIT(5)  /* Parity Enable */
 #define SCSMR_ODD      BIT(4)  /* Odd Parity */
 #define SCSMR_STOP     BIT(3)  /* Stop Bit Length */
 #define SCSMR_CKS      0x0003  /* Clock Select */
 
+/* Serial Mode Register, SCIFA/SCIFB only bits */
+#define SCSMR_CKEDG    BIT(12) /* Transmit/Receive Clock Edge Select */
+#define SCSMR_SRC_MASK 0x0700  /* Sampling Control */
+#define SCSMR_SRC_16   0x0000  /* Sampling rate 1/16 */
+#define SCSMR_SRC_5    0x0100  /* Sampling rate 1/5 */
+#define SCSMR_SRC_7    0x0200  /* Sampling rate 1/7 */
+#define SCSMR_SRC_11   0x0300  /* Sampling rate 1/11 */
+#define SCSMR_SRC_13   0x0400  /* Sampling rate 1/13 */
+#define SCSMR_SRC_17   0x0500  /* Sampling rate 1/17 */
+#define SCSMR_SRC_19   0x0600  /* Sampling rate 1/19 */
+#define SCSMR_SRC_27   0x0700  /* Sampling rate 1/27 */
+
 /* Serial Control Register, SCIFA/SCIFB only bits */
 #define SCSCR_TDRQE    BIT(15) /* Tx Data Transfer Request Enable */
 #define SCSCR_RDRQE    BIT(14) /* Rx Data Transfer Request Enable */
index ef26c4a60be421e02d364944270ce938ec9fa20b..18971063f95fa17f4929e4cbc7014fff65172148 100644 (file)
@@ -624,8 +624,6 @@ static int __init sprd_early_console_setup(
        device->con->write = sprd_early_write;
        return 0;
 }
-
-EARLYCON_DECLARE(sprd_serial, sprd_early_console_setup);
 OF_EARLYCON_DECLARE(sprd_serial, "sprd,sc9836-uart",
                    sprd_early_console_setup);
 
index b1c6bd3d483fa87a0372928ebd59cbdf9173d17d..ee2e8efdea4ab3b4cac4c4d92b9f357a60d5ac6d 100644 (file)
@@ -194,7 +194,9 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
 {
        struct uart_port *port = dev_id;
        int busy, n = 0;
+       unsigned long flags;
 
+       spin_lock_irqsave(&port->lock, flags);
        do {
                int stat = uart_in32(ULITE_STATUS, port);
                busy  = ulite_receive(port, stat);
@@ -202,6 +204,8 @@ static irqreturn_t ulite_isr(int irq, void *dev_id)
                n++;
        } while (busy);
 
+       spin_unlock_irqrestore(&port->lock, flags);
+
        /* work done? */
        if (n > 1) {
                tty_flip_buffer_push(&port->state->port);
@@ -519,6 +523,47 @@ static int __init ulite_console_init(void)
 
 console_initcall(ulite_console_init);
 
+static void early_uartlite_putc(struct uart_port *port, int c)
+{
+       /*
+        * Limit how many times we'll spin waiting for TX FIFO status.
+        * This will prevent lockups if the base address is incorrectly
+        * set, or any other issue on the UARTLITE.
+        * This limit is pretty arbitrary, unless we are at about 10 baud
+        * we'll never timeout on a working UART.
+        */
+
+       unsigned retries = 1000000;
+       /* read status bit - 0x8 offset */
+       while (--retries && (readl(port->membase + 8) & (1 << 3)))
+               ;
+
+       /* Only attempt the iowrite if we didn't timeout */
+       /* write to TX_FIFO - 0x4 offset */
+       if (retries)
+               writel(c & 0xff, port->membase + 4);
+}
+
+static void early_uartlite_write(struct console *console,
+                                const char *s, unsigned n)
+{
+       struct earlycon_device *device = console->data;
+       uart_console_write(&device->port, s, n, early_uartlite_putc);
+}
+
+static int __init early_uartlite_setup(struct earlycon_device *device,
+                                      const char *options)
+{
+       if (!device->port.membase)
+               return -ENODEV;
+
+       device->con->write = early_uartlite_write;
+       return 0;
+}
+EARLYCON_DECLARE(uartlite, early_uartlite_setup);
+OF_EARLYCON_DECLARE(uartlite_b, "xlnx,opb-uartlite-1.00.b", early_uartlite_setup);
+OF_EARLYCON_DECLARE(uartlite_a, "xlnx,xps-uartlite-1.00.a", early_uartlite_setup);
+
 #endif /* CONFIG_SERIAL_UARTLITE_CONSOLE */
 
 static struct uart_driver ulite_uart_driver = {
index 009e0dbc12d2ce7b8941804f9a5bdf5062920e19..131a3117fbbbdab18b116047946e7d0be30b493a 100644 (file)
@@ -50,24 +50,24 @@ module_param(rx_timeout, uint, S_IRUGO);
 MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
 
 /* Register offsets for the UART. */
-#define CDNS_UART_CR_OFFSET            0x00  /* Control Register */
-#define CDNS_UART_MR_OFFSET            0x04  /* Mode Register */
-#define CDNS_UART_IER_OFFSET           0x08  /* Interrupt Enable */
-#define CDNS_UART_IDR_OFFSET           0x0C  /* Interrupt Disable */
-#define CDNS_UART_IMR_OFFSET           0x10  /* Interrupt Mask */
-#define CDNS_UART_ISR_OFFSET           0x14  /* Interrupt Status */
-#define CDNS_UART_BAUDGEN_OFFSET       0x18  /* Baud Rate Generator */
-#define CDNS_UART_RXTOUT_OFFSET                0x1C  /* RX Timeout */
-#define CDNS_UART_RXWM_OFFSET          0x20  /* RX FIFO Trigger Level */
-#define CDNS_UART_MODEMCR_OFFSET       0x24  /* Modem Control */
-#define CDNS_UART_MODEMSR_OFFSET       0x28  /* Modem Status */
-#define CDNS_UART_SR_OFFSET            0x2C  /* Channel Status */
-#define CDNS_UART_FIFO_OFFSET          0x30  /* FIFO */
-#define CDNS_UART_BAUDDIV_OFFSET       0x34  /* Baud Rate Divider */
-#define CDNS_UART_FLOWDEL_OFFSET       0x38  /* Flow Delay */
-#define CDNS_UART_IRRX_PWIDTH_OFFSET   0x3C  /* IR Min Received Pulse Width */
-#define CDNS_UART_IRTX_PWIDTH_OFFSET   0x40  /* IR Transmitted pulse Width */
-#define CDNS_UART_TXWM_OFFSET          0x44  /* TX FIFO Trigger Level */
+#define CDNS_UART_CR           0x00  /* Control Register */
+#define CDNS_UART_MR           0x04  /* Mode Register */
+#define CDNS_UART_IER          0x08  /* Interrupt Enable */
+#define CDNS_UART_IDR          0x0C  /* Interrupt Disable */
+#define CDNS_UART_IMR          0x10  /* Interrupt Mask */
+#define CDNS_UART_ISR          0x14  /* Interrupt Status */
+#define CDNS_UART_BAUDGEN      0x18  /* Baud Rate Generator */
+#define CDNS_UART_RXTOUT               0x1C  /* RX Timeout */
+#define CDNS_UART_RXWM         0x20  /* RX FIFO Trigger Level */
+#define CDNS_UART_MODEMCR      0x24  /* Modem Control */
+#define CDNS_UART_MODEMSR      0x28  /* Modem Status */
+#define CDNS_UART_SR           0x2C  /* Channel Status */
+#define CDNS_UART_FIFO         0x30  /* FIFO */
+#define CDNS_UART_BAUDDIV      0x34  /* Baud Rate Divider */
+#define CDNS_UART_FLOWDEL      0x38  /* Flow Delay */
+#define CDNS_UART_IRRX_PWIDTH  0x3C  /* IR Min Received Pulse Width */
+#define CDNS_UART_IRTX_PWIDTH  0x40  /* IR Transmitted pulse Width */
+#define CDNS_UART_TXWM         0x44  /* TX FIFO Trigger Level */
 
 /* Control Register Bit Definitions */
 #define CDNS_UART_CR_STOPBRK   0x00000100  /* Stop TX break */
@@ -126,6 +126,10 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
 #define CDNS_UART_IXR_RXEMPTY  0x00000002 /* RX FIFO empty interrupt. */
 #define CDNS_UART_IXR_MASK     0x00001FFF /* Valid bit mask */
 
+#define CDNS_UART_RX_IRQS      (CDNS_UART_IXR_PARITY | CDNS_UART_IXR_FRAMING | \
+                                CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_RXTRIG | \
+                                CDNS_UART_IXR_TOUT)
+
 /* Goes in read_status_mask for break detection as the HW doesn't do it*/
 #define CDNS_UART_IXR_BRK      0x80000000
 
@@ -172,43 +176,22 @@ struct cdns_uart {
 #define to_cdns_uart(_nb) container_of(_nb, struct cdns_uart, \
                clk_rate_change_nb);
 
-/**
- * cdns_uart_isr - Interrupt handler
- * @irq: Irq number
- * @dev_id: Id of the port
- *
- * Return: IRQHANDLED
- */
-static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
+static void cdns_uart_handle_rx(struct uart_port *port, unsigned int isrstatus)
 {
-       struct uart_port *port = (struct uart_port *)dev_id;
-       unsigned long flags;
-       unsigned int isrstatus, numbytes;
-       unsigned int data;
-       char status = TTY_NORMAL;
-
-       spin_lock_irqsave(&port->lock, flags);
-
-       /* Read the interrupt status register to determine which
-        * interrupt(s) is/are active.
-        */
-       isrstatus = readl(port->membase + CDNS_UART_ISR_OFFSET);
-
        /*
         * There is no hardware break detection, so we interpret framing
         * error with all-zeros data as a break sequence. Most of the time,
         * there's another non-zero byte at the end of the sequence.
         */
        if (isrstatus & CDNS_UART_IXR_FRAMING) {
-               while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+               while (!(readl(port->membase + CDNS_UART_SR) &
                                        CDNS_UART_SR_RXEMPTY)) {
-                       if (!readl(port->membase + CDNS_UART_FIFO_OFFSET)) {
+                       if (!readl(port->membase + CDNS_UART_FIFO)) {
                                port->read_status_mask |= CDNS_UART_IXR_BRK;
                                isrstatus &= ~CDNS_UART_IXR_FRAMING;
                        }
                }
-               writel(CDNS_UART_IXR_FRAMING,
-                               port->membase + CDNS_UART_ISR_OFFSET);
+               writel(CDNS_UART_IXR_FRAMING, port->membase + CDNS_UART_ISR);
        }
 
        /* drop byte with parity error if IGNPAR specified */
@@ -218,94 +201,106 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
        isrstatus &= port->read_status_mask;
        isrstatus &= ~port->ignore_status_mask;
 
-       if ((isrstatus & CDNS_UART_IXR_TOUT) ||
-               (isrstatus & CDNS_UART_IXR_RXTRIG)) {
-               /* Receive Timeout Interrupt */
-               while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
-                                       CDNS_UART_SR_RXEMPTY)) {
-                       data = readl(port->membase + CDNS_UART_FIFO_OFFSET);
-
-                       /* Non-NULL byte after BREAK is garbage (99%) */
-                       if (data && (port->read_status_mask &
-                                               CDNS_UART_IXR_BRK)) {
-                               port->read_status_mask &= ~CDNS_UART_IXR_BRK;
-                               port->icount.brk++;
-                               if (uart_handle_break(port))
-                                       continue;
-                       }
+       if (!(isrstatus & (CDNS_UART_IXR_TOUT | CDNS_UART_IXR_RXTRIG)))
+               return;
 
-#ifdef SUPPORT_SYSRQ
-                       /*
-                        * uart_handle_sysrq_char() doesn't work if
-                        * spinlocked, for some reason
-                        */
-                        if (port->sysrq) {
-                               spin_unlock(&port->lock);
-                               if (uart_handle_sysrq_char(port,
-                                                       (unsigned char)data)) {
-                                       spin_lock(&port->lock);
-                                       continue;
-                               }
-                               spin_lock(&port->lock);
-                       }
-#endif
+       while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)) {
+               u32 data;
+               char status = TTY_NORMAL;
 
-                       port->icount.rx++;
+               data = readl(port->membase + CDNS_UART_FIFO);
 
-                       if (isrstatus & CDNS_UART_IXR_PARITY) {
-                               port->icount.parity++;
-                               status = TTY_PARITY;
-                       } else if (isrstatus & CDNS_UART_IXR_FRAMING) {
-                               port->icount.frame++;
-                               status = TTY_FRAME;
-                       } else if (isrstatus & CDNS_UART_IXR_OVERRUN) {
-                               port->icount.overrun++;
-                       }
+               /* Non-NULL byte after BREAK is garbage (99%) */
+               if (data && (port->read_status_mask & CDNS_UART_IXR_BRK)) {
+                       port->read_status_mask &= ~CDNS_UART_IXR_BRK;
+                       port->icount.brk++;
+                       if (uart_handle_break(port))
+                               continue;
+               }
+
+               if (uart_handle_sysrq_char(port, data))
+                       continue;
+
+               port->icount.rx++;
 
-                       uart_insert_char(port, isrstatus, CDNS_UART_IXR_OVERRUN,
-                                       data, status);
+               if (isrstatus & CDNS_UART_IXR_PARITY) {
+                       port->icount.parity++;
+                       status = TTY_PARITY;
+               } else if (isrstatus & CDNS_UART_IXR_FRAMING) {
+                       port->icount.frame++;
+                       status = TTY_FRAME;
+               } else if (isrstatus & CDNS_UART_IXR_OVERRUN) {
+                       port->icount.overrun++;
                }
-               spin_unlock(&port->lock);
-               tty_flip_buffer_push(&port->state->port);
-               spin_lock(&port->lock);
+
+               uart_insert_char(port, isrstatus, CDNS_UART_IXR_OVERRUN,
+                                data, status);
        }
+       tty_flip_buffer_push(&port->state->port);
+}
 
-       /* Dispatch an appropriate handler */
-       if ((isrstatus & CDNS_UART_IXR_TXEMPTY) == CDNS_UART_IXR_TXEMPTY) {
-               if (uart_circ_empty(&port->state->xmit)) {
-                       writel(CDNS_UART_IXR_TXEMPTY,
-                                       port->membase + CDNS_UART_IDR_OFFSET);
-               } else {
-                       numbytes = port->fifosize;
-                       /* Break if no more data available in the UART buffer */
-                       while (numbytes--) {
-                               if (uart_circ_empty(&port->state->xmit))
-                                       break;
-                               /* Get the data from the UART circular buffer
-                                * and write it to the cdns_uart's TX_FIFO
-                                * register.
-                                */
-                               writel(port->state->xmit.buf[
-                                               port->state->xmit.tail],
-                                       port->membase + CDNS_UART_FIFO_OFFSET);
-
-                               port->icount.tx++;
-
-                               /* Adjust the tail of the UART buffer and wrap
-                                * the buffer if it reaches limit.
-                                */
-                               port->state->xmit.tail =
-                                       (port->state->xmit.tail + 1) &
-                                               (UART_XMIT_SIZE - 1);
-                       }
+static void cdns_uart_handle_tx(struct uart_port *port)
+{
+       unsigned int numbytes;
 
-                       if (uart_circ_chars_pending(
-                                       &port->state->xmit) < WAKEUP_CHARS)
-                               uart_write_wakeup(port);
-               }
+       if (uart_circ_empty(&port->state->xmit)) {
+               writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
+               return;
+       }
+
+       numbytes = port->fifosize;
+       while (numbytes && !uart_circ_empty(&port->state->xmit) &&
+              !(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXFULL)) {
+               /*
+                * Get the data from the UART circular buffer
+                * and write it to the cdns_uart's TX_FIFO
+                * register.
+                */
+               writel(port->state->xmit.buf[port->state->xmit.tail],
+                       port->membase + CDNS_UART_FIFO);
+               port->icount.tx++;
+
+               /*
+                * Adjust the tail of the UART buffer and wrap
+                * the buffer if it reaches limit.
+                */
+               port->state->xmit.tail =
+                       (port->state->xmit.tail + 1) & (UART_XMIT_SIZE - 1);
+
+               numbytes--;
        }
 
-       writel(isrstatus, port->membase + CDNS_UART_ISR_OFFSET);
+       if (uart_circ_chars_pending(&port->state->xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(port);
+}
+
+/**
+ * cdns_uart_isr - Interrupt handler
+ * @irq: Irq number
+ * @dev_id: Id of the port
+ *
+ * Return: IRQHANDLED
+ */
+static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
+{
+       struct uart_port *port = (struct uart_port *)dev_id;
+       unsigned long flags;
+       unsigned int isrstatus;
+
+       spin_lock_irqsave(&port->lock, flags);
+
+       /* Read the interrupt status register to determine which
+        * interrupt(s) is/are active.
+        */
+       isrstatus = readl(port->membase + CDNS_UART_ISR);
+
+       if (isrstatus & CDNS_UART_RX_IRQS)
+               cdns_uart_handle_rx(port, isrstatus);
+
+       if ((isrstatus & CDNS_UART_IXR_TXEMPTY) == CDNS_UART_IXR_TXEMPTY)
+               cdns_uart_handle_tx(port);
+
+       writel(isrstatus, port->membase + CDNS_UART_ISR);
 
        /* be sure to release the lock and tty before leaving */
        spin_unlock_irqrestore(&port->lock, flags);
@@ -395,14 +390,14 @@ static unsigned int cdns_uart_set_baud_rate(struct uart_port *port,
                        &div8);
 
        /* Write new divisors to hardware */
-       mreg = readl(port->membase + CDNS_UART_MR_OFFSET);
+       mreg = readl(port->membase + CDNS_UART_MR);
        if (div8)
                mreg |= CDNS_UART_MR_CLKSEL;
        else
                mreg &= ~CDNS_UART_MR_CLKSEL;
-       writel(mreg, port->membase + CDNS_UART_MR_OFFSET);
-       writel(cd, port->membase + CDNS_UART_BAUDGEN_OFFSET);
-       writel(bdiv, port->membase + CDNS_UART_BAUDDIV_OFFSET);
+       writel(mreg, port->membase + CDNS_UART_MR);
+       writel(cd, port->membase + CDNS_UART_BAUDGEN);
+       writel(bdiv, port->membase + CDNS_UART_BAUDDIV);
        cdns_uart->baud = baud;
 
        return calc_baud;
@@ -449,9 +444,9 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
                spin_lock_irqsave(&cdns_uart->port->lock, flags);
 
                /* Disable the TX and RX to set baud rate */
-               ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+               ctrl_reg = readl(port->membase + CDNS_UART_CR);
                ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
-               writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+               writel(ctrl_reg, port->membase + CDNS_UART_CR);
 
                spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
 
@@ -476,11 +471,11 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
                        spin_lock_irqsave(&cdns_uart->port->lock, flags);
 
                /* Set TX/RX Reset */
-               ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+               ctrl_reg = readl(port->membase + CDNS_UART_CR);
                ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
-               writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+               writel(ctrl_reg, port->membase + CDNS_UART_CR);
 
-               while (readl(port->membase + CDNS_UART_CR_OFFSET) &
+               while (readl(port->membase + CDNS_UART_CR) &
                                (CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
                        cpu_relax();
 
@@ -489,11 +484,11 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
                 * enable bit and RX enable bit to enable the transmitter and
                 * receiver.
                 */
-               writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
-               ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+               writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
+               ctrl_reg = readl(port->membase + CDNS_UART_CR);
                ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
                ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
-               writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+               writel(ctrl_reg, port->membase + CDNS_UART_CR);
 
                spin_unlock_irqrestore(&cdns_uart->port->lock, flags);
 
@@ -510,43 +505,28 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
  */
 static void cdns_uart_start_tx(struct uart_port *port)
 {
-       unsigned int status, numbytes = port->fifosize;
+       unsigned int status;
 
-       if (uart_circ_empty(&port->state->xmit) || uart_tx_stopped(port))
+       if (uart_tx_stopped(port))
                return;
 
-       status = readl(port->membase + CDNS_UART_CR_OFFSET);
-       /* Set the TX enable bit and clear the TX disable bit to enable the
+       /*
+        * Set the TX enable bit and clear the TX disable bit to enable the
         * transmitter.
         */
-       writel((status & ~CDNS_UART_CR_TX_DIS) | CDNS_UART_CR_TX_EN,
-                       port->membase + CDNS_UART_CR_OFFSET);
+       status = readl(port->membase + CDNS_UART_CR);
+       status &= ~CDNS_UART_CR_TX_DIS;
+       status |= CDNS_UART_CR_TX_EN;
+       writel(status, port->membase + CDNS_UART_CR);
 
-       while (numbytes-- && ((readl(port->membase + CDNS_UART_SR_OFFSET) &
-                               CDNS_UART_SR_TXFULL)) != CDNS_UART_SR_TXFULL) {
-               /* Break if no more data available in the UART buffer */
-               if (uart_circ_empty(&port->state->xmit))
-                       break;
+       if (uart_circ_empty(&port->state->xmit))
+               return;
 
-               /* Get the data from the UART circular buffer and
-                * write it to the cdns_uart's TX_FIFO register.
-                */
-               writel(port->state->xmit.buf[port->state->xmit.tail],
-                               port->membase + CDNS_UART_FIFO_OFFSET);
-               port->icount.tx++;
+       cdns_uart_handle_tx(port);
 
-               /* Adjust the tail of the UART buffer and wrap
-                * the buffer if it reaches limit.
-                */
-               port->state->xmit.tail = (port->state->xmit.tail + 1) &
-                                       (UART_XMIT_SIZE - 1);
-       }
-       writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_ISR_OFFSET);
+       writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_ISR);
        /* Enable the TX Empty interrupt */
-       writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IER_OFFSET);
-
-       if (uart_circ_chars_pending(&port->state->xmit) < WAKEUP_CHARS)
-               uart_write_wakeup(port);
+       writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IER);
 }
 
 /**
@@ -557,10 +537,10 @@ static void cdns_uart_stop_tx(struct uart_port *port)
 {
        unsigned int regval;
 
-       regval = readl(port->membase + CDNS_UART_CR_OFFSET);
+       regval = readl(port->membase + CDNS_UART_CR);
        regval |= CDNS_UART_CR_TX_DIS;
        /* Disable the transmitter */
-       writel(regval, port->membase + CDNS_UART_CR_OFFSET);
+       writel(regval, port->membase + CDNS_UART_CR);
 }
 
 /**
@@ -571,10 +551,13 @@ static void cdns_uart_stop_rx(struct uart_port *port)
 {
        unsigned int regval;
 
-       regval = readl(port->membase + CDNS_UART_CR_OFFSET);
-       regval |= CDNS_UART_CR_RX_DIS;
+       /* Disable RX IRQs */
+       writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IDR);
+
        /* Disable the receiver */
-       writel(regval, port->membase + CDNS_UART_CR_OFFSET);
+       regval = readl(port->membase + CDNS_UART_CR);
+       regval |= CDNS_UART_CR_RX_DIS;
+       writel(regval, port->membase + CDNS_UART_CR);
 }
 
 /**
@@ -587,7 +570,7 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port)
 {
        unsigned int status;
 
-       status = readl(port->membase + CDNS_UART_SR_OFFSET) &
+       status = readl(port->membase + CDNS_UART_SR) &
                                CDNS_UART_SR_TXEMPTY;
        return status ? TIOCSER_TEMT : 0;
 }
@@ -605,15 +588,15 @@ static void cdns_uart_break_ctl(struct uart_port *port, int ctl)
 
        spin_lock_irqsave(&port->lock, flags);
 
-       status = readl(port->membase + CDNS_UART_CR_OFFSET);
+       status = readl(port->membase + CDNS_UART_CR);
 
        if (ctl == -1)
                writel(CDNS_UART_CR_STARTBRK | status,
-                               port->membase + CDNS_UART_CR_OFFSET);
+                               port->membase + CDNS_UART_CR);
        else {
                if ((status & CDNS_UART_CR_STOPBRK) == 0)
                        writel(CDNS_UART_CR_STOPBRK | status,
-                                       port->membase + CDNS_UART_CR_OFFSET);
+                                       port->membase + CDNS_UART_CR);
        }
        spin_unlock_irqrestore(&port->lock, flags);
 }
@@ -636,18 +619,18 @@ static void cdns_uart_set_termios(struct uart_port *port,
        spin_lock_irqsave(&port->lock, flags);
 
        /* Wait for the transmit FIFO to empty before making changes */
-       if (!(readl(port->membase + CDNS_UART_CR_OFFSET) &
+       if (!(readl(port->membase + CDNS_UART_CR) &
                                CDNS_UART_CR_TX_DIS)) {
-               while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+               while (!(readl(port->membase + CDNS_UART_SR) &
                                CDNS_UART_SR_TXEMPTY)) {
                        cpu_relax();
                }
        }
 
        /* Disable the TX and RX to set baud rate */
-       ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+       ctrl_reg = readl(port->membase + CDNS_UART_CR);
        ctrl_reg |= CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS;
-       writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+       writel(ctrl_reg, port->membase + CDNS_UART_CR);
 
        /*
         * Min baud rate = 6bps and Max Baud Rate is 10Mbps for 100Mhz clk
@@ -666,20 +649,20 @@ static void cdns_uart_set_termios(struct uart_port *port,
        uart_update_timeout(port, termios->c_cflag, baud);
 
        /* Set TX/RX Reset */
-       ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+       ctrl_reg = readl(port->membase + CDNS_UART_CR);
        ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
-       writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+       writel(ctrl_reg, port->membase + CDNS_UART_CR);
 
        /*
         * Clear the RX disable and TX disable bits and then set the TX enable
         * bit and RX enable bit to enable the transmitter and receiver.
         */
-       ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+       ctrl_reg = readl(port->membase + CDNS_UART_CR);
        ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
        ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
-       writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+       writel(ctrl_reg, port->membase + CDNS_UART_CR);
 
-       writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
+       writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
 
        port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
                        CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
@@ -699,7 +682,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
                        CDNS_UART_IXR_TOUT | CDNS_UART_IXR_PARITY |
                        CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN;
 
-       mode_reg = readl(port->membase + CDNS_UART_MR_OFFSET);
+       mode_reg = readl(port->membase + CDNS_UART_MR);
 
        /* Handling Data Size */
        switch (termios->c_cflag & CSIZE) {
@@ -740,7 +723,7 @@ static void cdns_uart_set_termios(struct uart_port *port,
                cval |= CDNS_UART_MR_PARITY_NONE;
        }
        cval |= mode_reg & 1;
-       writel(cval, port->membase + CDNS_UART_MR_OFFSET);
+       writel(cval, port->membase + CDNS_UART_MR);
 
        spin_unlock_irqrestore(&port->lock, flags);
 }
@@ -753,63 +736,67 @@ static void cdns_uart_set_termios(struct uart_port *port,
  */
 static int cdns_uart_startup(struct uart_port *port)
 {
-       unsigned int retval = 0, status = 0;
+       int ret;
+       unsigned long flags;
+       unsigned int status = 0;
 
-       retval = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME,
-                                                               (void *)port);
-       if (retval)
-               return retval;
+       spin_lock_irqsave(&port->lock, flags);
 
        /* Disable the TX and RX */
        writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
-                       port->membase + CDNS_UART_CR_OFFSET);
+                       port->membase + CDNS_UART_CR);
 
        /* Set the Control Register with TX/RX Enable, TX/RX Reset,
         * no break chars.
         */
        writel(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST,
-                       port->membase + CDNS_UART_CR_OFFSET);
-
-       status = readl(port->membase + CDNS_UART_CR_OFFSET);
+                       port->membase + CDNS_UART_CR);
 
-       /* Clear the RX disable and TX disable bits and then set the TX enable
-        * bit and RX enable bit to enable the transmitter and receiver.
+       /*
+        * Clear the RX disable bit and then set the RX enable bit to enable
+        * the receiver.
         */
-       writel((status & ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS))
-                       | (CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN |
-                       CDNS_UART_CR_STOPBRK),
-                       port->membase + CDNS_UART_CR_OFFSET);
+       status = readl(port->membase + CDNS_UART_CR);
+       status &= CDNS_UART_CR_RX_DIS;
+       status |= CDNS_UART_CR_RX_EN;
+       writel(status, port->membase + CDNS_UART_CR);
 
        /* Set the Mode Register with normal mode,8 data bits,1 stop bit,
         * no parity.
         */
        writel(CDNS_UART_MR_CHMODE_NORM | CDNS_UART_MR_STOPMODE_1_BIT
                | CDNS_UART_MR_PARITY_NONE | CDNS_UART_MR_CHARLEN_8_BIT,
-               port->membase + CDNS_UART_MR_OFFSET);
+               port->membase + CDNS_UART_MR);
 
        /*
         * Set the RX FIFO Trigger level to use most of the FIFO, but it
         * can be tuned with a module parameter
         */
-       writel(rx_trigger_level, port->membase + CDNS_UART_RXWM_OFFSET);
+       writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
 
        /*
         * Receive Timeout register is enabled but it
         * can be tuned with a module parameter
         */
-       writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
+       writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
 
        /* Clear out any pending interrupts before enabling them */
-       writel(readl(port->membase + CDNS_UART_ISR_OFFSET),
-                       port->membase + CDNS_UART_ISR_OFFSET);
+       writel(readl(port->membase + CDNS_UART_ISR),
+                       port->membase + CDNS_UART_ISR);
+
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       ret = request_irq(port->irq, cdns_uart_isr, 0, CDNS_UART_NAME, port);
+       if (ret) {
+               dev_err(port->dev, "request_irq '%d' failed with %d\n",
+                       port->irq, ret);
+               return ret;
+       }
 
        /* Set the Interrupt Registers with desired interrupts */
-       writel(CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_PARITY |
-               CDNS_UART_IXR_FRAMING | CDNS_UART_IXR_OVERRUN |
-               CDNS_UART_IXR_RXTRIG | CDNS_UART_IXR_TOUT,
-               port->membase + CDNS_UART_IER_OFFSET);
+       writel(CDNS_UART_RX_IRQS, port->membase + CDNS_UART_IER);
 
-       return retval;
+       return 0;
 }
 
 /**
@@ -819,14 +806,21 @@ static int cdns_uart_startup(struct uart_port *port)
 static void cdns_uart_shutdown(struct uart_port *port)
 {
        int status;
+       unsigned long flags;
+
+       spin_lock_irqsave(&port->lock, flags);
 
        /* Disable interrupts */
-       status = readl(port->membase + CDNS_UART_IMR_OFFSET);
-       writel(status, port->membase + CDNS_UART_IDR_OFFSET);
+       status = readl(port->membase + CDNS_UART_IMR);
+       writel(status, port->membase + CDNS_UART_IDR);
+       writel(0xffffffff, port->membase + CDNS_UART_ISR);
 
        /* Disable the TX and RX */
        writel(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS,
-                       port->membase + CDNS_UART_CR_OFFSET);
+                       port->membase + CDNS_UART_CR);
+
+       spin_unlock_irqrestore(&port->lock, flags);
+
        free_irq(port->irq, port);
 }
 
@@ -928,7 +922,7 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
        u32 val;
 
-       val = readl(port->membase + CDNS_UART_MODEMCR_OFFSET);
+       val = readl(port->membase + CDNS_UART_MODEMCR);
 
        val &= ~(CDNS_UART_MODEMCR_RTS | CDNS_UART_MODEMCR_DTR);
 
@@ -937,55 +931,46 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
        if (mctrl & TIOCM_DTR)
                val |= CDNS_UART_MODEMCR_DTR;
 
-       writel(val, port->membase + CDNS_UART_MODEMCR_OFFSET);
+       writel(val, port->membase + CDNS_UART_MODEMCR);
 }
 
 #ifdef CONFIG_CONSOLE_POLL
 static int cdns_uart_poll_get_char(struct uart_port *port)
 {
-       u32 imr;
        int c;
+       unsigned long flags;
 
-       /* Disable all interrupts */
-       imr = readl(port->membase + CDNS_UART_IMR_OFFSET);
-       writel(imr, port->membase + CDNS_UART_IDR_OFFSET);
+       spin_lock_irqsave(&port->lock, flags);
 
        /* Check if FIFO is empty */
-       if (readl(port->membase + CDNS_UART_SR_OFFSET) & CDNS_UART_SR_RXEMPTY)
+       if (readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_RXEMPTY)
                c = NO_POLL_CHAR;
        else /* Read a character */
-               c = (unsigned char) readl(
-                                       port->membase + CDNS_UART_FIFO_OFFSET);
+               c = (unsigned char) readl(port->membase + CDNS_UART_FIFO);
 
-       /* Enable interrupts */
-       writel(imr, port->membase + CDNS_UART_IER_OFFSET);
+       spin_unlock_irqrestore(&port->lock, flags);
 
        return c;
 }
 
 static void cdns_uart_poll_put_char(struct uart_port *port, unsigned char c)
 {
-       u32 imr;
+       unsigned long flags;
 
-       /* Disable all interrupts */
-       imr = readl(port->membase + CDNS_UART_IMR_OFFSET);
-       writel(imr, port->membase + CDNS_UART_IDR_OFFSET);
+       spin_lock_irqsave(&port->lock, flags);
 
        /* Wait until FIFO is empty */
-       while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
-                               CDNS_UART_SR_TXEMPTY))
+       while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
                cpu_relax();
 
        /* Write a character */
-       writel(c, port->membase + CDNS_UART_FIFO_OFFSET);
+       writel(c, port->membase + CDNS_UART_FIFO);
 
        /* Wait until FIFO is empty */
-       while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
-                               CDNS_UART_SR_TXEMPTY))
+       while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
                cpu_relax();
 
-       /* Enable interrupts */
-       writel(imr, port->membase + CDNS_UART_IER_OFFSET);
+       spin_unlock_irqrestore(&port->lock, flags);
 
        return;
 }
@@ -1059,8 +1044,7 @@ static struct uart_port *cdns_uart_get_port(int id)
  */
 static void cdns_uart_console_wait_tx(struct uart_port *port)
 {
-       while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
-                               CDNS_UART_SR_TXEMPTY))
+       while (!(readl(port->membase + CDNS_UART_SR) & CDNS_UART_SR_TXEMPTY))
                barrier();
 }
 
@@ -1072,7 +1056,7 @@ static void cdns_uart_console_wait_tx(struct uart_port *port)
 static void cdns_uart_console_putchar(struct uart_port *port, int ch)
 {
        cdns_uart_console_wait_tx(port);
-       writel(ch, port->membase + CDNS_UART_FIFO_OFFSET);
+       writel(ch, port->membase + CDNS_UART_FIFO);
 }
 
 static void __init cdns_early_write(struct console *con, const char *s,
@@ -1109,30 +1093,33 @@ static void cdns_uart_console_write(struct console *co, const char *s,
        unsigned int imr, ctrl;
        int locked = 1;
 
-       if (oops_in_progress)
+       if (port->sysrq)
+               locked = 0;
+       else if (oops_in_progress)
                locked = spin_trylock_irqsave(&port->lock, flags);
        else
                spin_lock_irqsave(&port->lock, flags);
 
        /* save and disable interrupt */
-       imr = readl(port->membase + CDNS_UART_IMR_OFFSET);
-       writel(imr, port->membase + CDNS_UART_IDR_OFFSET);
+       imr = readl(port->membase + CDNS_UART_IMR);
+       writel(imr, port->membase + CDNS_UART_IDR);
 
        /*
         * Make sure that the tx part is enabled. Set the TX enable bit and
         * clear the TX disable bit to enable the transmitter.
         */
-       ctrl = readl(port->membase + CDNS_UART_CR_OFFSET);
-       writel((ctrl & ~CDNS_UART_CR_TX_DIS) | CDNS_UART_CR_TX_EN,
-                       port->membase + CDNS_UART_CR_OFFSET);
+       ctrl = readl(port->membase + CDNS_UART_CR);
+       ctrl &= ~CDNS_UART_CR_TX_DIS;
+       ctrl |= CDNS_UART_CR_TX_EN;
+       writel(ctrl, port->membase + CDNS_UART_CR);
 
        uart_console_write(port, s, count, cdns_uart_console_putchar);
        cdns_uart_console_wait_tx(port);
 
-       writel(ctrl, port->membase + CDNS_UART_CR_OFFSET);
+       writel(ctrl, port->membase + CDNS_UART_CR);
 
        /* restore interrupt state */
-       writel(imr, port->membase + CDNS_UART_IER_OFFSET);
+       writel(imr, port->membase + CDNS_UART_IER);
 
        if (locked)
                spin_unlock_irqrestore(&port->lock, flags);
@@ -1244,14 +1231,13 @@ static int cdns_uart_suspend(struct device *device)
 
                spin_lock_irqsave(&port->lock, flags);
                /* Empty the receive FIFO 1st before making changes */
-               while (!(readl(port->membase + CDNS_UART_SR_OFFSET) &
+               while (!(readl(port->membase + CDNS_UART_SR) &
                                        CDNS_UART_SR_RXEMPTY))
-                       readl(port->membase + CDNS_UART_FIFO_OFFSET);
+                       readl(port->membase + CDNS_UART_FIFO);
                /* set RX trigger level to 1 */
-               writel(1, port->membase + CDNS_UART_RXWM_OFFSET);
+               writel(1, port->membase + CDNS_UART_RXWM);
                /* disable RX timeout interrups */
-               writel(CDNS_UART_IXR_TOUT,
-                               port->membase + CDNS_UART_IDR_OFFSET);
+               writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IDR);
                spin_unlock_irqrestore(&port->lock, flags);
        }
 
@@ -1290,30 +1276,28 @@ static int cdns_uart_resume(struct device *device)
                spin_lock_irqsave(&port->lock, flags);
 
                /* Set TX/RX Reset */
-               ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+               ctrl_reg = readl(port->membase + CDNS_UART_CR);
                ctrl_reg |= CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST;
-               writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
-               while (readl(port->membase + CDNS_UART_CR_OFFSET) &
+               writel(ctrl_reg, port->membase + CDNS_UART_CR);
+               while (readl(port->membase + CDNS_UART_CR) &
                                (CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
                        cpu_relax();
 
                /* restore rx timeout value */
-               writel(rx_timeout, port->membase + CDNS_UART_RXTOUT_OFFSET);
+               writel(rx_timeout, port->membase + CDNS_UART_RXTOUT);
                /* Enable Tx/Rx */
-               ctrl_reg = readl(port->membase + CDNS_UART_CR_OFFSET);
+               ctrl_reg = readl(port->membase + CDNS_UART_CR);
                ctrl_reg &= ~(CDNS_UART_CR_TX_DIS | CDNS_UART_CR_RX_DIS);
                ctrl_reg |= CDNS_UART_CR_TX_EN | CDNS_UART_CR_RX_EN;
-               writel(ctrl_reg, port->membase + CDNS_UART_CR_OFFSET);
+               writel(ctrl_reg, port->membase + CDNS_UART_CR);
 
                spin_unlock_irqrestore(&port->lock, flags);
        } else {
                spin_lock_irqsave(&port->lock, flags);
                /* restore original rx trigger level */
-               writel(rx_trigger_level,
-                               port->membase + CDNS_UART_RXWM_OFFSET);
+               writel(rx_trigger_level, port->membase + CDNS_UART_RXWM);
                /* enable RX timeout interrupt */
-               writel(CDNS_UART_IXR_TOUT,
-                               port->membase + CDNS_UART_IER_OFFSET);
+               writel(CDNS_UART_IXR_TOUT, port->membase + CDNS_UART_IER);
                spin_unlock_irqrestore(&port->lock, flags);
        }
 
@@ -1406,27 +1390,30 @@ static int cdns_uart_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "Cannot get uart_port structure\n");
                rc = -ENODEV;
                goto err_out_notif_unreg;
-       } else {
-               /* Register the port.
-                * This function also registers this device with the tty layer
-                * and triggers invocation of the config_port() entry point.
-                */
-               port->mapbase = res->start;
-               port->irq = irq;
-               port->dev = &pdev->dev;
-               port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
-               port->private_data = cdns_uart_data;
-               cdns_uart_data->port = port;
-               platform_set_drvdata(pdev, port);
-               rc = uart_add_one_port(&cdns_uart_uart_driver, port);
-               if (rc) {
-                       dev_err(&pdev->dev,
-                               "uart_add_one_port() failed; err=%i\n", rc);
-                       goto err_out_notif_unreg;
-               }
-               return 0;
        }
 
+       /*
+        * Register the port.
+        * This function also registers this device with the tty layer
+        * and triggers invocation of the config_port() entry point.
+        */
+       port->mapbase = res->start;
+       port->irq = irq;
+       port->dev = &pdev->dev;
+       port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
+       port->private_data = cdns_uart_data;
+       cdns_uart_data->port = port;
+       platform_set_drvdata(pdev, port);
+
+       rc = uart_add_one_port(&cdns_uart_uart_driver, port);
+       if (rc) {
+               dev_err(&pdev->dev,
+                       "uart_add_one_port() failed; err=%i\n", rc);
+               goto err_out_notif_unreg;
+       }
+
+       return 0;
+
 err_out_notif_unreg:
 #ifdef CONFIG_COMMON_CLK
        clk_notifier_unregister(cdns_uart_data->uartclk,
index 2b65bb7ffb8a47ba642864a4c9870a14b0fb5a89..eeefd76a30da9c0f42700267fdec035ac7fad494 100644 (file)
@@ -1181,6 +1181,10 @@ static void zs_console_write(struct console *co, const char *s,
        if (txint & TxINT_ENAB) {
                zport->regs[1] |= TxINT_ENAB;
                write_zsreg(zport, R1, zport->regs[1]);
+
+               /* Resume any transmission as the TxIP bit won't be set.  */
+               if (!zport->tx_stopped)
+                       zs_raw_transmit_chars(zport);
        }
        spin_unlock_irqrestore(&scc->zlock, flags);
 }
index 6188059fd523991c4a5072c0b8350f4d31b00dfd..f5476e270734b24a030a2b20db04628209a25994 100644 (file)
@@ -2363,7 +2363,7 @@ static void mgsl_throttle(struct tty_struct * tty)
        if (I_IXOFF(tty))
                mgsl_send_xchar(tty, STOP_CHAR(tty));
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->irq_spinlock,flags);
                info->serial_signals &= ~SerialSignal_RTS;
                usc_set_serial_signals(info);
@@ -2397,7 +2397,7 @@ static void mgsl_unthrottle(struct tty_struct * tty)
                        mgsl_send_xchar(tty, START_CHAR(tty));
        }
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->irq_spinlock,flags);
                info->serial_signals |= SerialSignal_RTS;
                usc_set_serial_signals(info);
@@ -3039,30 +3039,25 @@ static void mgsl_set_termios(struct tty_struct *tty, struct ktermios *old_termio
        mgsl_change_params(info);
 
        /* Handle transition to B0 status */
-       if (old_termios->c_cflag & CBAUD &&
-           !(tty->termios.c_cflag & CBAUD)) {
+       if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
                info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
                spin_lock_irqsave(&info->irq_spinlock,flags);
                usc_set_serial_signals(info);
                spin_unlock_irqrestore(&info->irq_spinlock,flags);
        }
-       
+
        /* Handle transition away from B0 status */
-       if (!(old_termios->c_cflag & CBAUD) &&
-           tty->termios.c_cflag & CBAUD) {
+       if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
                info->serial_signals |= SerialSignal_DTR;
-               if (!(tty->termios.c_cflag & CRTSCTS) || 
-                   !test_bit(TTY_THROTTLED, &tty->flags)) {
+               if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
                        info->serial_signals |= SerialSignal_RTS;
-               }
                spin_lock_irqsave(&info->irq_spinlock,flags);
                usc_set_serial_signals(info);
                spin_unlock_irqrestore(&info->irq_spinlock,flags);
        }
-       
+
        /* Handle turning off CRTSCTS */
-       if (old_termios->c_cflag & CRTSCTS &&
-           !(tty->termios.c_cflag & CRTSCTS)) {
+       if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
                tty->hw_stopped = 0;
                mgsl_start(tty);
        }
@@ -3281,7 +3276,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
                return 0;
        }
 
-       if (tty->termios.c_cflag & CLOCAL)
+       if (C_CLOCAL(tty))
                do_clocal = true;
 
        /* Wait for carrier detect and the line to become
index 5505ea84217934fff36e56f720844ba72c175fdc..c0a2f5a1b1c2d08b0534df5138b12d813f420602 100644 (file)
@@ -774,8 +774,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        change_params(info);
 
        /* Handle transition to B0 status */
-       if (old_termios->c_cflag & CBAUD &&
-           !(tty->termios.c_cflag & CBAUD)) {
+       if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
                info->signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
                spin_lock_irqsave(&info->lock,flags);
                set_signals(info);
@@ -783,21 +782,17 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        }
 
        /* Handle transition away from B0 status */
-       if (!(old_termios->c_cflag & CBAUD) &&
-           tty->termios.c_cflag & CBAUD) {
+       if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
                info->signals |= SerialSignal_DTR;
-               if (!(tty->termios.c_cflag & CRTSCTS) ||
-                   !test_bit(TTY_THROTTLED, &tty->flags)) {
+               if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
                        info->signals |= SerialSignal_RTS;
-               }
                spin_lock_irqsave(&info->lock,flags);
                set_signals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 
        /* Handle turning off CRTSCTS */
-       if (old_termios->c_cflag & CRTSCTS &&
-           !(tty->termios.c_cflag & CRTSCTS)) {
+       if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty)) {
                tty->hw_stopped = 0;
                tx_release(tty);
        }
@@ -1362,7 +1357,7 @@ static void throttle(struct tty_struct * tty)
        DBGINFO(("%s throttle\n", info->device_name));
        if (I_IXOFF(tty))
                send_xchar(tty, STOP_CHAR(tty));
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock,flags);
                info->signals &= ~SerialSignal_RTS;
                set_signals(info);
@@ -1387,7 +1382,7 @@ static void unthrottle(struct tty_struct * tty)
                else
                        send_xchar(tty, START_CHAR(tty));
        }
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock,flags);
                info->signals |= SerialSignal_RTS;
                set_signals(info);
@@ -3280,7 +3275,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
                return 0;
        }
 
-       if (tty->termios.c_cflag & CLOCAL)
+       if (C_CLOCAL(tty))
                do_clocal = true;
 
        /* Wait for carrier detect and the line to become
index fb00a06dfa4bba12e41f4dd42bd38e2c95d16ee4..90da0c712262d81b92891aec8103c95a9df44634 100644 (file)
@@ -549,8 +549,8 @@ static int  tiocmset(struct tty_struct *tty,
                        unsigned int set, unsigned int clear);
 static int  set_break(struct tty_struct *tty, int break_state);
 
-static void add_device(SLMP_INFO *info);
-static void device_init(int adapter_num, struct pci_dev *pdev);
+static int  add_device(SLMP_INFO *info);
+static int  device_init(int adapter_num, struct pci_dev *pdev);
 static int  claim_resources(SLMP_INFO *info);
 static void release_resources(SLMP_INFO *info);
 
@@ -871,8 +871,7 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        change_params(info);
 
        /* Handle transition to B0 status */
-       if (old_termios->c_cflag & CBAUD &&
-           !(tty->termios.c_cflag & CBAUD)) {
+       if ((old_termios->c_cflag & CBAUD) && !C_BAUD(tty)) {
                info->serial_signals &= ~(SerialSignal_RTS | SerialSignal_DTR);
                spin_lock_irqsave(&info->lock,flags);
                set_signals(info);
@@ -880,21 +879,17 @@ static void set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        }
 
        /* Handle transition away from B0 status */
-       if (!(old_termios->c_cflag & CBAUD) &&
-           tty->termios.c_cflag & CBAUD) {
+       if (!(old_termios->c_cflag & CBAUD) && C_BAUD(tty)) {
                info->serial_signals |= SerialSignal_DTR;
-               if (!(tty->termios.c_cflag & CRTSCTS) ||
-                   !test_bit(TTY_THROTTLED, &tty->flags)) {
+               if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
                        info->serial_signals |= SerialSignal_RTS;
-               }
                spin_lock_irqsave(&info->lock,flags);
                set_signals(info);
                spin_unlock_irqrestore(&info->lock,flags);
        }
 
        /* Handle turning off CRTSCTS */
-       if (old_termios->c_cflag & CRTSCTS &&
-           !(tty->termios.c_cflag & CRTSCTS)) {
+       if (old_termios->c_cflag & CRTSCTS && !C_CRTSCTS(tty)) {
                tty->hw_stopped = 0;
                tx_release(tty);
        }
@@ -1472,7 +1467,7 @@ static void throttle(struct tty_struct * tty)
        if (I_IXOFF(tty))
                send_xchar(tty, STOP_CHAR(tty));
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock,flags);
                info->serial_signals &= ~SerialSignal_RTS;
                set_signals(info);
@@ -1501,7 +1496,7 @@ static void unthrottle(struct tty_struct * tty)
                        send_xchar(tty, START_CHAR(tty));
        }
 
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                spin_lock_irqsave(&info->lock,flags);
                info->serial_signals |= SerialSignal_RTS;
                set_signals(info);
@@ -3297,7 +3292,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
                return 0;
        }
 
-       if (tty->termios.c_cflag & CLOCAL)
+       if (C_CLOCAL(tty))
                do_clocal = true;
 
        /* Wait for carrier detect and the line to become
@@ -3693,7 +3688,7 @@ static void release_resources(SLMP_INFO *info)
 /* Add the specified device instance data structure to the
  * global linked list of devices and increment the device count.
  */
-static void add_device(SLMP_INFO *info)
+static int add_device(SLMP_INFO *info)
 {
        info->next_device = NULL;
        info->line = synclinkmp_device_count;
@@ -3731,7 +3726,9 @@ static void add_device(SLMP_INFO *info)
                info->max_frame_size );
 
 #if SYNCLINK_GENERIC_HDLC
-       hdlcdev_init(info);
+       return hdlcdev_init(info);
+#else
+       return 0;
 #endif
 }
 
@@ -3820,10 +3817,10 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
        return info;
 }
 
-static void device_init(int adapter_num, struct pci_dev *pdev)
+static int device_init(int adapter_num, struct pci_dev *pdev)
 {
        SLMP_INFO *port_array[SCA_MAX_PORTS];
-       int port;
+       int port, rc;
 
        /* allocate device instances for up to SCA_MAX_PORTS devices */
        for ( port = 0; port < SCA_MAX_PORTS; ++port ) {
@@ -3833,14 +3830,16 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
                                tty_port_destroy(&port_array[port]->port);
                                kfree(port_array[port]);
                        }
-                       return;
+                       return -ENOMEM;
                }
        }
 
        /* give copy of port_array to all ports and add to device list  */
        for ( port = 0; port < SCA_MAX_PORTS; ++port ) {
                memcpy(port_array[port]->port_array,port_array,sizeof(port_array));
-               add_device( port_array[port] );
+               rc = add_device( port_array[port] );
+               if (rc)
+                       goto err_add;
                spin_lock_init(&port_array[port]->lock);
        }
 
@@ -3860,21 +3859,30 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
                        alloc_dma_bufs(port_array[port]);
                }
 
-               if ( request_irq(port_array[0]->irq_level,
+               rc = request_irq(port_array[0]->irq_level,
                                        synclinkmp_interrupt,
                                        port_array[0]->irq_flags,
                                        port_array[0]->device_name,
-                                       port_array[0]) < 0 ) {
+                                       port_array[0]);
+               if ( rc ) {
                        printk( "%s(%d):%s Can't request interrupt, IRQ=%d\n",
                                __FILE__,__LINE__,
                                port_array[0]->device_name,
                                port_array[0]->irq_level );
+                       goto err_irq;
                }
-               else {
-                       port_array[0]->irq_requested = true;
-                       adapter_test(port_array[0]);
-               }
+               port_array[0]->irq_requested = true;
+               adapter_test(port_array[0]);
        }
+       return 0;
+err_irq:
+       release_resources( port_array[0] );
+err_add:
+       for ( port = 0; port < SCA_MAX_PORTS; ++port ) {
+               tty_port_destroy(&port_array[port]->port);
+               kfree(port_array[port]);
+       }
+       return rc;
 }
 
 static const struct tty_operations ops = {
@@ -5589,8 +5597,7 @@ static int synclinkmp_init_one (struct pci_dev *dev,
                printk("error enabling pci device %p\n", dev);
                return -EIO;
        }
-       device_init( ++synclinkmp_adapter_count, dev );
-       return 0;
+       return device_init( ++synclinkmp_adapter_count, dev );
 }
 
 static void synclinkmp_remove_one (struct pci_dev *dev)
index 3d245cd3d8e62082301b7061144ec2329cf16609..66d53fcf4da0d8616265e39e96239f47d2398e90 100644 (file)
 #include <linux/tty.h>
 
 struct tty_audit_buf {
-       atomic_t count;
        struct mutex mutex;     /* Protects all data below */
-       int major, minor;       /* The TTY which the data is from */
+       dev_t dev;              /* The TTY which the data is from */
        unsigned icanon:1;
        size_t valid;
        unsigned char *data;    /* Allocated size N_TTY_BUF_SIZE */
 };
 
-static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor,
-                                                unsigned icanon)
+static struct tty_audit_buf *tty_audit_buf_ref(void)
+{
+       struct tty_audit_buf *buf;
+
+       buf = current->signal->tty_audit_buf;
+       WARN_ON(buf == ERR_PTR(-ESRCH));
+       return buf;
+}
+
+static struct tty_audit_buf *tty_audit_buf_alloc(void)
 {
        struct tty_audit_buf *buf;
 
@@ -33,11 +40,9 @@ static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor,
        buf->data = kmalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
        if (!buf->data)
                goto err_buf;
-       atomic_set(&buf->count, 1);
        mutex_init(&buf->mutex);
-       buf->major = major;
-       buf->minor = minor;
-       buf->icanon = icanon;
+       buf->dev = MKDEV(0, 0);
+       buf->icanon = 0;
        buf->valid = 0;
        return buf;
 
@@ -54,13 +59,7 @@ static void tty_audit_buf_free(struct tty_audit_buf *buf)
        kfree(buf);
 }
 
-static void tty_audit_buf_put(struct tty_audit_buf *buf)
-{
-       if (atomic_dec_and_test(&buf->count))
-               tty_audit_buf_free(buf);
-}
-
-static void tty_audit_log(const char *description, int major, int minor,
+static void tty_audit_log(const char *description, dev_t dev,
                          unsigned char *data, size_t size)
 {
        struct audit_buffer *ab;
@@ -76,7 +75,7 @@ static void tty_audit_log(const char *description, int major, int minor,
 
                audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d"
                                 " minor=%d comm=", description, pid, uid,
-                                loginuid, sessionid, major, minor);
+                                loginuid, sessionid, MAJOR(dev), MINOR(dev));
                get_task_comm(name, tsk);
                audit_log_untrustedstring(ab, name);
                audit_log_format(ab, " data=");
@@ -99,7 +98,7 @@ static void tty_audit_buf_push(struct tty_audit_buf *buf)
                buf->valid = 0;
                return;
        }
-       tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid);
+       tty_audit_log("tty", buf->dev, buf->data, buf->valid);
        buf->valid = 0;
 }
 
@@ -108,21 +107,20 @@ static void tty_audit_buf_push(struct tty_audit_buf *buf)
  *
  *     Make sure all buffered data is written out and deallocate the buffer.
  *     Only needs to be called if current->signal->tty_audit_buf != %NULL.
+ *
+ *     The process is single-threaded at this point; no other threads share
+ *     current->signal.
  */
 void tty_audit_exit(void)
 {
        struct tty_audit_buf *buf;
 
-       buf = current->signal->tty_audit_buf;
-       current->signal->tty_audit_buf = NULL;
+       buf = xchg(&current->signal->tty_audit_buf, ERR_PTR(-ESRCH));
        if (!buf)
                return;
 
-       mutex_lock(&buf->mutex);
        tty_audit_buf_push(buf);
-       mutex_unlock(&buf->mutex);
-
-       tty_audit_buf_put(buf);
+       tty_audit_buf_free(buf);
 }
 
 /**
@@ -133,7 +131,6 @@ void tty_audit_exit(void)
 void tty_audit_fork(struct signal_struct *sig)
 {
        sig->audit_tty = current->signal->audit_tty;
-       sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd;
 }
 
 /**
@@ -141,123 +138,68 @@ void tty_audit_fork(struct signal_struct *sig)
  */
 void tty_audit_tiocsti(struct tty_struct *tty, char ch)
 {
-       struct tty_audit_buf *buf;
-       int major, minor, should_audit;
-       unsigned long flags;
+       dev_t dev;
 
-       spin_lock_irqsave(&current->sighand->siglock, flags);
-       should_audit = current->signal->audit_tty;
-       buf = current->signal->tty_audit_buf;
-       if (buf)
-               atomic_inc(&buf->count);
-       spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
-       major = tty->driver->major;
-       minor = tty->driver->minor_start + tty->index;
-       if (buf) {
-               mutex_lock(&buf->mutex);
-               if (buf->major == major && buf->minor == minor)
-                       tty_audit_buf_push(buf);
-               mutex_unlock(&buf->mutex);
-               tty_audit_buf_put(buf);
-       }
+       dev = MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
+       if (tty_audit_push())
+               return;
 
-       if (should_audit && audit_enabled) {
+       if (audit_enabled) {
                kuid_t auid;
                unsigned int sessionid;
 
                auid = audit_get_loginuid(current);
                sessionid = audit_get_sessionid(current);
-               tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1);
+               tty_audit_log("ioctl=TIOCSTI", dev, &ch, 1);
        }
 }
 
 /**
- * tty_audit_push_current -    Flush current's pending audit data
+ *     tty_audit_push  -       Flush current's pending audit data
  *
- * Try to lock sighand and get a reference to the tty audit buffer if available.
- * Flush the buffer or return an appropriate error code.
+ *     Returns 0 if success, -EPERM if tty audit is disabled
  */
-int tty_audit_push_current(void)
+int tty_audit_push(void)
 {
-       struct tty_audit_buf *buf = ERR_PTR(-EPERM);
-       struct task_struct *tsk = current;
-       unsigned long flags;
+       struct tty_audit_buf *buf;
 
-       if (!lock_task_sighand(tsk, &flags))
-               return -ESRCH;
+       if (~current->signal->audit_tty & AUDIT_TTY_ENABLE)
+               return -EPERM;
 
-       if (tsk->signal->audit_tty) {
-               buf = tsk->signal->tty_audit_buf;
-               if (buf)
-                       atomic_inc(&buf->count);
+       buf = tty_audit_buf_ref();
+       if (!IS_ERR_OR_NULL(buf)) {
+               mutex_lock(&buf->mutex);
+               tty_audit_buf_push(buf);
+               mutex_unlock(&buf->mutex);
        }
-       unlock_task_sighand(tsk, &flags);
-
-       /*
-        * Return 0 when signal->audit_tty set
-        * but tsk->signal->tty_audit_buf == NULL.
-        */
-       if (!buf || IS_ERR(buf))
-               return PTR_ERR(buf);
-
-       mutex_lock(&buf->mutex);
-       tty_audit_buf_push(buf);
-       mutex_unlock(&buf->mutex);
-
-       tty_audit_buf_put(buf);
        return 0;
 }
 
 /**
  *     tty_audit_buf_get       -       Get an audit buffer.
  *
- *     Get an audit buffer for @tty, allocate it if necessary.  Return %NULL
- *     if TTY auditing is disabled or out of memory.  Otherwise, return a new
- *     reference to the buffer.
+ *     Get an audit buffer, allocate it if necessary.  Return %NULL
+ *     if out of memory or ERR_PTR(-ESRCH) if tty_audit_exit() has already
+ *     occurred.  Otherwise, return a new reference to the buffer.
  */
-static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
-               unsigned icanon)
+static struct tty_audit_buf *tty_audit_buf_get(void)
 {
-       struct tty_audit_buf *buf, *buf2;
-       unsigned long flags;
-
-       buf = NULL;
-       buf2 = NULL;
-       spin_lock_irqsave(&current->sighand->siglock, flags);
-       if (likely(!current->signal->audit_tty))
-               goto out;
-       buf = current->signal->tty_audit_buf;
-       if (buf) {
-               atomic_inc(&buf->count);
-               goto out;
-       }
-       spin_unlock_irqrestore(&current->sighand->siglock, flags);
+       struct tty_audit_buf *buf;
 
-       buf2 = tty_audit_buf_alloc(tty->driver->major,
-                                  tty->driver->minor_start + tty->index,
-                                  icanon);
-       if (buf2 == NULL) {
+       buf = tty_audit_buf_ref();
+       if (buf)
+               return buf;
+
+       buf = tty_audit_buf_alloc();
+       if (buf == NULL) {
                audit_log_lost("out of memory in TTY auditing");
                return NULL;
        }
 
-       spin_lock_irqsave(&current->sighand->siglock, flags);
-       if (!current->signal->audit_tty)
-               goto out;
-       buf = current->signal->tty_audit_buf;
-       if (!buf) {
-               current->signal->tty_audit_buf = buf2;
-               buf = buf2;
-               buf2 = NULL;
-       }
-       atomic_inc(&buf->count);
-       /* Fall through */
- out:
-       spin_unlock_irqrestore(&current->sighand->siglock, flags);
-       if (buf2)
-               tty_audit_buf_free(buf2);
-       return buf;
+       /* Race to use this buffer, free it if another wins */
+       if (cmpxchg(&current->signal->tty_audit_buf, NULL, buf) != NULL)
+               tty_audit_buf_free(buf);
+       return tty_audit_buf_ref();
 }
 
 /**
@@ -265,39 +207,36 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty,
  *
  *     Audit @data of @size from @tty, if necessary.
  */
-void tty_audit_add_data(struct tty_struct *tty, const void *data,
-                       size_t size, unsigned icanon)
+void tty_audit_add_data(struct tty_struct *tty, const void *data, size_t size)
 {
        struct tty_audit_buf *buf;
-       int major, minor;
-       int audit_log_tty_passwd;
-       unsigned long flags;
+       unsigned int icanon = !!L_ICANON(tty);
+       unsigned int audit_tty;
+       dev_t dev;
 
-       if (unlikely(size == 0))
+       audit_tty = READ_ONCE(current->signal->audit_tty);
+       if (~audit_tty & AUDIT_TTY_ENABLE)
                return;
 
-       spin_lock_irqsave(&current->sighand->siglock, flags);
-       audit_log_tty_passwd = current->signal->audit_tty_log_passwd;
-       spin_unlock_irqrestore(&current->sighand->siglock, flags);
-       if (!audit_log_tty_passwd && icanon && !L_ECHO(tty))
+       if (unlikely(size == 0))
                return;
 
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY
            && tty->driver->subtype == PTY_TYPE_MASTER)
                return;
 
-       buf = tty_audit_buf_get(tty, icanon);
-       if (!buf)
+       if ((~audit_tty & AUDIT_TTY_LOG_PASSWD) && icanon && !L_ECHO(tty))
+               return;
+
+       buf = tty_audit_buf_get();
+       if (IS_ERR_OR_NULL(buf))
                return;
 
        mutex_lock(&buf->mutex);
-       major = tty->driver->major;
-       minor = tty->driver->minor_start + tty->index;
-       if (buf->major != major || buf->minor != minor
-           || buf->icanon != icanon) {
+       dev = MKDEV(tty->driver->major, tty->driver->minor_start) + tty->index;
+       if (buf->dev != dev || buf->icanon != icanon) {
                tty_audit_buf_push(buf);
-               buf->major = major;
-               buf->minor = minor;
+               buf->dev = dev;
                buf->icanon = icanon;
        }
        do {
@@ -314,38 +253,4 @@ void tty_audit_add_data(struct tty_struct *tty, const void *data,
                        tty_audit_buf_push(buf);
        } while (size != 0);
        mutex_unlock(&buf->mutex);
-       tty_audit_buf_put(buf);
-}
-
-/**
- *     tty_audit_push  -       Push buffered data out
- *
- *     Make sure no audit data is pending for @tty on the current process.
- */
-void tty_audit_push(struct tty_struct *tty)
-{
-       struct tty_audit_buf *buf;
-       unsigned long flags;
-
-       spin_lock_irqsave(&current->sighand->siglock, flags);
-       if (likely(!current->signal->audit_tty)) {
-               spin_unlock_irqrestore(&current->sighand->siglock, flags);
-               return;
-       }
-       buf = current->signal->tty_audit_buf;
-       if (buf)
-               atomic_inc(&buf->count);
-       spin_unlock_irqrestore(&current->sighand->siglock, flags);
-
-       if (buf) {
-               int major, minor;
-
-               major = tty->driver->major;
-               minor = tty->driver->minor_start + tty->index;
-               mutex_lock(&buf->mutex);
-               if (buf->major == major && buf->minor == minor)
-                       tty_audit_buf_push(buf);
-               mutex_unlock(&buf->mutex);
-               tty_audit_buf_put(buf);
-       }
 }
index 3cd31e0d4bd9545b5357cda0ffa14373d181e670..a946e49a2626f0f414c07ea633c4c46a138bb7f9 100644 (file)
@@ -435,25 +435,42 @@ int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
 }
 EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
 
+/**
+ *     tty_ldisc_receive_buf           -       forward data to line discipline
+ *     @ld:    line discipline to process input
+ *     @p:     char buffer
+ *     @f:     TTY_* flags buffer
+ *     @count: number of bytes to process
+ *
+ *     Callers other than flush_to_ldisc() need to exclude the kworker
+ *     from concurrent use of the line discipline, see paste_selection().
+ *
+ *     Returns the number of bytes not processed
+ */
+int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+                         char *f, int count)
+{
+       if (ld->ops->receive_buf2)
+               count = ld->ops->receive_buf2(ld->tty, p, f, count);
+       else {
+               count = min_t(int, count, ld->tty->receive_room);
+               if (count && ld->ops->receive_buf)
+                       ld->ops->receive_buf(ld->tty, p, f, count);
+       }
+       return count;
+}
+EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf);
 
 static int
-receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
+receive_buf(struct tty_ldisc *ld, struct tty_buffer *head, int count)
 {
-       struct tty_ldisc *disc = tty->ldisc;
        unsigned char *p = char_buf_ptr(head, head->read);
        char          *f = NULL;
 
        if (~head->flags & TTYB_NORMAL)
                f = flag_buf_ptr(head, head->read);
 
-       if (disc->ops->receive_buf2)
-               count = disc->ops->receive_buf2(tty, p, f, count);
-       else {
-               count = min_t(int, count, tty->receive_room);
-               if (count && disc->ops->receive_buf)
-                       disc->ops->receive_buf(tty, p, f, count);
-       }
-       return count;
+       return tty_ldisc_receive_buf(ld, p, f, count);
 }
 
 /**
@@ -514,7 +531,7 @@ static void flush_to_ldisc(struct work_struct *work)
                        continue;
                }
 
-               count = receive_buf(tty, head, count);
+               count = receive_buf(disc, head, count);
                if (!count)
                        break;
                head->read += count;
index 892c923547450518cb5d7a4714f570a6803253bd..c14c45fefa70d8fa5f522c5715e9ae6931235cad 100644 (file)
@@ -123,7 +123,8 @@ struct ktermios tty_std_termios = { /* for the benefit of tty drivers  */
                   ECHOCTL | ECHOKE | IEXTEN,
        .c_cc = INIT_C_CC,
        .c_ispeed = 38400,
-       .c_ospeed = 38400
+       .c_ospeed = 38400,
+       /* .c_line = N_TTY, */
 };
 
 EXPORT_SYMBOL(tty_std_termios);
@@ -134,13 +135,8 @@ EXPORT_SYMBOL(tty_std_termios);
 
 LIST_HEAD(tty_drivers);                        /* linked list of tty drivers */
 
-/* Mutex to protect creating and releasing a tty. This is shared with
-   vt.c for deeply disgusting hack reasons */
+/* Mutex to protect creating and releasing a tty */
 DEFINE_MUTEX(tty_mutex);
-EXPORT_SYMBOL(tty_mutex);
-
-/* Spinlock to protect the tty->tty_files list */
-DEFINE_SPINLOCK(tty_files_lock);
 
 static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
 static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
@@ -168,10 +164,9 @@ static void release_tty(struct tty_struct *tty, int idx);
  *     Locking: none. Must be called after tty is definitely unused
  */
 
-void free_tty_struct(struct tty_struct *tty)
+static void free_tty_struct(struct tty_struct *tty)
 {
-       if (!tty)
-               return;
+       tty_ldisc_deinit(tty);
        put_device(tty->dev);
        kfree(tty->write_buf);
        tty->magic = 0xDEADDEAD;
@@ -204,9 +199,9 @@ void tty_add_file(struct tty_struct *tty, struct file *file)
        priv->tty = tty;
        priv->file = file;
 
-       spin_lock(&tty_files_lock);
+       spin_lock(&tty->files_lock);
        list_add(&priv->list, &tty->tty_files);
-       spin_unlock(&tty_files_lock);
+       spin_unlock(&tty->files_lock);
 }
 
 /**
@@ -227,10 +222,11 @@ void tty_free_file(struct file *file)
 static void tty_del_file(struct file *file)
 {
        struct tty_file_private *priv = file->private_data;
+       struct tty_struct *tty = priv->tty;
 
-       spin_lock(&tty_files_lock);
+       spin_lock(&tty->files_lock);
        list_del(&priv->list);
-       spin_unlock(&tty_files_lock);
+       spin_unlock(&tty->files_lock);
        tty_free_file(file);
 }
 
@@ -288,11 +284,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
        struct list_head *p;
        int count = 0;
 
-       spin_lock(&tty_files_lock);
+       spin_lock(&tty->files_lock);
        list_for_each(p, &tty->tty_files) {
                count++;
        }
-       spin_unlock(&tty_files_lock);
+       spin_unlock(&tty->files_lock);
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
            tty->driver->subtype == PTY_TYPE_SLAVE &&
            tty->link && tty->link->count)
@@ -383,6 +379,12 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
 EXPORT_SYMBOL_GPL(tty_find_polling_driver);
 #endif
 
+static int is_ignored(int sig)
+{
+       return (sigismember(&current->blocked, sig) ||
+               current->sighand->action[sig-1].sa.sa_handler == SIG_IGN);
+}
+
 /**
  *     tty_check_change        -       check for POSIX terminal changes
  *     @tty: tty to check
@@ -466,6 +468,11 @@ static long hung_up_tty_compat_ioctl(struct file *file,
        return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
 }
 
+static int hung_up_tty_fasync(int fd, struct file *file, int on)
+{
+       return -ENOTTY;
+}
+
 static const struct file_operations tty_fops = {
        .llseek         = no_llseek,
        .read           = tty_read,
@@ -498,6 +505,7 @@ static const struct file_operations hung_up_tty_fops = {
        .unlocked_ioctl = hung_up_tty_ioctl,
        .compat_ioctl   = hung_up_tty_compat_ioctl,
        .release        = tty_release,
+       .fasync         = hung_up_tty_fasync,
 };
 
 static DEFINE_SPINLOCK(redirect_lock);
@@ -709,7 +717,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
           workqueue with the lock held */
        check_tty_count(tty, "tty_hangup");
 
-       spin_lock(&tty_files_lock);
+       spin_lock(&tty->files_lock);
        /* This breaks for file handles being sent over AF_UNIX sockets ? */
        list_for_each_entry(priv, &tty->tty_files, list) {
                filp = priv->file;
@@ -721,14 +729,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
                __tty_fasync(-1, filp, 0);      /* can't block */
                filp->f_op = &hung_up_tty_fops;
        }
-       spin_unlock(&tty_files_lock);
+       spin_unlock(&tty->files_lock);
 
        refs = tty_signal_session_leader(tty, exit_session);
        /* Account for the p->signal references we killed */
        while (refs--)
                tty_kref_put(tty);
 
-       tty_ldisc_hangup(tty);
+       tty_ldisc_hangup(tty, cons_filp != NULL);
 
        spin_lock_irq(&tty->ctrl_lock);
        clear_bit(TTY_THROTTLED, &tty->flags);
@@ -753,10 +761,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
        } else if (tty->ops->hangup)
                tty->ops->hangup(tty);
        /*
-        * We don't want to have driver/ldisc interactions beyond
-        * the ones we did here. The driver layer expects no
-        * calls after ->hangup() from the ldisc side. However we
-        * can't yet guarantee all that.
+        * We don't want to have driver/ldisc interactions beyond the ones
+        * we did here. The driver layer expects no calls after ->hangup()
+        * from the ldisc side, which is now guaranteed.
         */
        set_bit(TTY_HUPPED, &tty->flags);
        tty_unlock(tty);
@@ -1069,6 +1076,8 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
        /* We want to wait for the line discipline to sort out in this
           situation */
        ld = tty_ldisc_ref_wait(tty);
+       if (!ld)
+               return hung_up_tty_read(file, buf, count, ppos);
        if (ld->ops->read)
                i = ld->ops->read(tty, file, buf, count);
        else
@@ -1243,6 +1252,8 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
        if (tty->ops->write_room == NULL)
                tty_err(tty, "missing write_room method\n");
        ld = tty_ldisc_ref_wait(tty);
+       if (!ld)
+               return hung_up_tty_write(file, buf, count, ppos);
        if (!ld->ops->write)
                ret = -EIO;
        else
@@ -1378,7 +1389,7 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
  *     the tty_mutex currently so we can be relaxed about ordering.
  */
 
-int tty_init_termios(struct tty_struct *tty)
+void tty_init_termios(struct tty_struct *tty)
 {
        struct ktermios *tp;
        int idx = tty->index;
@@ -1388,24 +1399,21 @@ int tty_init_termios(struct tty_struct *tty)
        else {
                /* Check for lazy saved data */
                tp = tty->driver->termios[idx];
-               if (tp != NULL)
+               if (tp != NULL) {
                        tty->termios = *tp;
-               else
+                       tty->termios.c_line  = tty->driver->init_termios.c_line;
+               } else
                        tty->termios = tty->driver->init_termios;
        }
        /* Compatibility until drivers always set this */
        tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios);
        tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios);
-       return 0;
 }
 EXPORT_SYMBOL_GPL(tty_init_termios);
 
 int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
 {
-       int ret = tty_init_termios(tty);
-       if (ret)
-               return ret;
-
+       tty_init_termios(tty);
        tty_driver_kref_get(driver);
        tty->count++;
        driver->ttys[tty->index] = tty;
@@ -1442,7 +1450,7 @@ static int tty_driver_install_tty(struct tty_driver *driver,
  *
  *     Locking: tty_mutex for now
  */
-void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
+static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
 {
        if (driver->ops->remove)
                driver->ops->remove(driver, tty);
@@ -1463,19 +1471,20 @@ static int tty_reopen(struct tty_struct *tty)
 {
        struct tty_driver *driver = tty->driver;
 
-       if (!tty->count)
-               return -EIO;
-
        if (driver->type == TTY_DRIVER_TYPE_PTY &&
            driver->subtype == PTY_TYPE_MASTER)
                return -EIO;
 
+       if (!tty->count)
+               return -EAGAIN;
+
        if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
                return -EBUSY;
 
        tty->count++;
 
-       WARN_ON(!tty->ldisc);
+       if (!tty->ldisc)
+               return tty_ldisc_reinit(tty, tty->termios.c_line);
 
        return 0;
 }
@@ -1529,7 +1538,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
        tty_lock(tty);
        retval = tty_driver_install_tty(driver, tty);
        if (retval < 0)
-               goto err_deinit_tty;
+               goto err_free_tty;
 
        if (!tty->port)
                tty->port = driver->ports[idx];
@@ -1551,9 +1560,8 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
        /* Return the tty locked so that it cannot vanish under the caller */
        return tty;
 
-err_deinit_tty:
+err_free_tty:
        tty_unlock(tty);
-       deinitialize_tty_struct(tty);
        free_tty_struct(tty);
 err_module_put:
        module_put(driver->owner);
@@ -1568,7 +1576,7 @@ err_release_tty:
        return ERR_PTR(retval);
 }
 
-void tty_free_termios(struct tty_struct *tty)
+static void tty_free_termios(struct tty_struct *tty)
 {
        struct ktermios *tp;
        int idx = tty->index;
@@ -1587,7 +1595,6 @@ void tty_free_termios(struct tty_struct *tty)
        }
        *tp = tty->termios;
 }
-EXPORT_SYMBOL(tty_free_termios);
 
 /**
  *     tty_flush_works         -       flush all works of a tty/pty pair
@@ -1634,9 +1641,9 @@ static void release_one_tty(struct work_struct *work)
        tty_driver_kref_put(driver);
        module_put(owner);
 
-       spin_lock(&tty_files_lock);
+       spin_lock(&tty->files_lock);
        list_del_init(&tty->tty_files);
-       spin_unlock(&tty_files_lock);
+       spin_unlock(&tty->files_lock);
 
        put_pid(tty->pgrp);
        put_pid(tty->session);
@@ -1967,7 +1974,7 @@ static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
  *     Locking: tty_mutex protects get_tty_driver
  */
 static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
-               int *noctty, int *index)
+               int *index)
 {
        struct tty_driver *driver;
 
@@ -1977,7 +1984,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
                extern struct tty_driver *console_driver;
                driver = tty_driver_kref_get(console_driver);
                *index = fg_console;
-               *noctty = 1;
                break;
        }
 #endif
@@ -1988,7 +1994,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
                        if (driver) {
                                /* Don't let /dev/console block */
                                filp->f_flags |= O_NONBLOCK;
-                               *noctty = 1;
                                break;
                        }
                }
@@ -2003,6 +2008,68 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
        return driver;
 }
 
+/**
+ *     tty_open_by_driver      -       open a tty device
+ *     @device: dev_t of device to open
+ *     @inode: inode of device file
+ *     @filp: file pointer to tty
+ *
+ *     Performs the driver lookup, checks for a reopen, or otherwise
+ *     performs the first-time tty initialization.
+ *
+ *     Returns the locked initialized or re-opened &tty_struct
+ *
+ *     Claims the global tty_mutex to serialize:
+ *       - concurrent first-time tty initialization
+ *       - concurrent tty driver removal w/ lookup
+ *       - concurrent tty removal from driver table
+ */
+static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
+                                            struct file *filp)
+{
+       struct tty_struct *tty;
+       struct tty_driver *driver = NULL;
+       int index = -1;
+       int retval;
+
+       mutex_lock(&tty_mutex);
+       driver = tty_lookup_driver(device, filp, &index);
+       if (IS_ERR(driver)) {
+               mutex_unlock(&tty_mutex);
+               return ERR_CAST(driver);
+       }
+
+       /* check whether we're reopening an existing tty */
+       tty = tty_driver_lookup_tty(driver, inode, index);
+       if (IS_ERR(tty)) {
+               mutex_unlock(&tty_mutex);
+               goto out;
+       }
+
+       if (tty) {
+               mutex_unlock(&tty_mutex);
+               retval = tty_lock_interruptible(tty);
+               tty_kref_put(tty);  /* drop kref from tty_driver_lookup_tty() */
+               if (retval) {
+                       if (retval == -EINTR)
+                               retval = -ERESTARTSYS;
+                       tty = ERR_PTR(retval);
+                       goto out;
+               }
+               retval = tty_reopen(tty);
+               if (retval < 0) {
+                       tty_unlock(tty);
+                       tty = ERR_PTR(retval);
+               }
+       } else { /* Returns with the tty_lock held for now */
+               tty = tty_init_dev(driver, index);
+               mutex_unlock(&tty_mutex);
+       }
+out:
+       tty_driver_kref_put(driver);
+       return tty;
+}
+
 /**
  *     tty_open                -       open a tty device
  *     @inode: inode of device file
@@ -2031,8 +2098,6 @@ static int tty_open(struct inode *inode, struct file *filp)
 {
        struct tty_struct *tty;
        int noctty, retval;
-       struct tty_driver *driver = NULL;
-       int index;
        dev_t device = inode->i_rdev;
        unsigned saved_flags = filp->f_flags;
 
@@ -2043,56 +2108,22 @@ retry_open:
        if (retval)
                return -ENOMEM;
 
-       noctty = filp->f_flags & O_NOCTTY;
-       index  = -1;
-       retval = 0;
-
        tty = tty_open_current_tty(device, filp);
-       if (!tty) {
-               mutex_lock(&tty_mutex);
-               driver = tty_lookup_driver(device, filp, &noctty, &index);
-               if (IS_ERR(driver)) {
-                       retval = PTR_ERR(driver);
-                       goto err_unlock;
-               }
-
-               /* check whether we're reopening an existing tty */
-               tty = tty_driver_lookup_tty(driver, inode, index);
-               if (IS_ERR(tty)) {
-                       retval = PTR_ERR(tty);
-                       goto err_unlock;
-               }
-
-               if (tty) {
-                       mutex_unlock(&tty_mutex);
-                       tty_lock(tty);
-                       /* safe to drop the kref from tty_driver_lookup_tty() */
-                       tty_kref_put(tty);
-                       retval = tty_reopen(tty);
-                       if (retval < 0) {
-                               tty_unlock(tty);
-                               tty = ERR_PTR(retval);
-                       }
-               } else { /* Returns with the tty_lock held for now */
-                       tty = tty_init_dev(driver, index);
-                       mutex_unlock(&tty_mutex);
-               }
-
-               tty_driver_kref_put(driver);
-       }
+       if (!tty)
+               tty = tty_open_by_driver(device, inode, filp);
 
        if (IS_ERR(tty)) {
+               tty_free_file(filp);
                retval = PTR_ERR(tty);
-               goto err_file;
+               if (retval != -EAGAIN || signal_pending(current))
+                       return retval;
+               schedule();
+               goto retry_open;
        }
 
        tty_add_file(tty, filp);
 
        check_tty_count(tty, __func__);
-       if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
-           tty->driver->subtype == PTY_TYPE_MASTER)
-               noctty = 1;
-
        tty_debug_hangup(tty, "opening (count=%d)\n", tty->count);
 
        if (tty->ops->open)
@@ -2125,6 +2156,12 @@ retry_open:
 
        read_lock(&tasklist_lock);
        spin_lock_irq(&current->sighand->siglock);
+       noctty = (filp->f_flags & O_NOCTTY) ||
+                       device == MKDEV(TTY_MAJOR, 0) ||
+                       device == MKDEV(TTYAUX_MAJOR, 1) ||
+                       (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+                        tty->driver->subtype == PTY_TYPE_MASTER);
+
        if (!noctty &&
            current->signal->leader &&
            !current->signal->tty &&
@@ -2150,14 +2187,6 @@ retry_open:
        read_unlock(&tasklist_lock);
        tty_unlock(tty);
        return 0;
-err_unlock:
-       mutex_unlock(&tty_mutex);
-       /* after locks to avoid deadlock */
-       if (!IS_ERR_OR_NULL(driver))
-               tty_driver_kref_put(driver);
-err_file:
-       tty_free_file(filp);
-       return retval;
 }
 
 
@@ -2184,6 +2213,8 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
                return 0;
 
        ld = tty_ldisc_ref_wait(tty);
+       if (!ld)
+               return hung_up_tty_poll(filp, wait);
        if (ld->ops->poll)
                ret = ld->ops->poll(tty, filp, wait);
        tty_ldisc_deref(ld);
@@ -2193,7 +2224,6 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
 static int __tty_fasync(int fd, struct file *filp, int on)
 {
        struct tty_struct *tty = file_tty(filp);
-       struct tty_ldisc *ldisc;
        unsigned long flags;
        int retval = 0;
 
@@ -2204,13 +2234,6 @@ static int __tty_fasync(int fd, struct file *filp, int on)
        if (retval <= 0)
                goto out;
 
-       ldisc = tty_ldisc_ref(tty);
-       if (ldisc) {
-               if (ldisc->ops->fasync)
-                       ldisc->ops->fasync(tty, on);
-               tty_ldisc_deref(ldisc);
-       }
-
        if (on) {
                enum pid_type type;
                struct pid *pid;
@@ -2236,10 +2259,11 @@ out:
 static int tty_fasync(int fd, struct file *filp, int on)
 {
        struct tty_struct *tty = file_tty(filp);
-       int retval;
+       int retval = -ENOTTY;
 
        tty_lock(tty);
-       retval = __tty_fasync(fd, filp, on);
+       if (!tty_hung_up_p(filp))
+               retval = __tty_fasync(fd, filp, on);
        tty_unlock(tty);
 
        return retval;
@@ -2273,6 +2297,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
                return -EFAULT;
        tty_audit_tiocsti(tty, ch);
        ld = tty_ldisc_ref_wait(tty);
+       if (!ld)
+               return -EIO;
        ld->ops->receive_buf(tty, &ch, &mbz, 1);
        tty_ldisc_deref(ld);
        return 0;
@@ -2637,17 +2663,41 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _
 
 static int tiocsetd(struct tty_struct *tty, int __user *p)
 {
-       int ldisc;
+       int disc;
        int ret;
 
-       if (get_user(ldisc, p))
+       if (get_user(disc, p))
                return -EFAULT;
 
-       ret = tty_set_ldisc(tty, ldisc);
+       ret = tty_set_ldisc(tty, disc);
 
        return ret;
 }
 
+/**
+ *     tiocgetd        -       get line discipline
+ *     @tty: tty device
+ *     @p: pointer to user data
+ *
+ *     Retrieves the line discipline id directly from the ldisc.
+ *
+ *     Locking: waits for ldisc reference (in case the line discipline
+ *             is changing or the tty is being hungup)
+ */
+
+static int tiocgetd(struct tty_struct *tty, int __user *p)
+{
+       struct tty_ldisc *ld;
+       int ret;
+
+       ld = tty_ldisc_ref_wait(tty);
+       if (!ld)
+               return -EIO;
+       ret = put_user(ld->ops->num, p);
+       tty_ldisc_deref(ld);
+       return ret;
+}
+
 /**
  *     send_break      -       performed time break
  *     @tty: device to break on
@@ -2874,7 +2924,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case TIOCGSID:
                return tiocgsid(tty, real_tty, p);
        case TIOCGETD:
-               return put_user(tty->ldisc->ops->num, (int __user *)p);
+               return tiocgetd(tty, p);
        case TIOCSETD:
                return tiocsetd(tty, p);
        case TIOCVHANGUP:
@@ -2940,6 +2990,8 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        return retval;
        }
        ld = tty_ldisc_ref_wait(tty);
+       if (!ld)
+               return hung_up_tty_ioctl(file, cmd, arg);
        retval = -EINVAL;
        if (ld->ops->ioctl) {
                retval = ld->ops->ioctl(tty, file, cmd, arg);
@@ -2968,6 +3020,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
        }
 
        ld = tty_ldisc_ref_wait(tty);
+       if (!ld)
+               return hung_up_tty_compat_ioctl(file, cmd, arg);
        if (ld->ops->compat_ioctl)
                retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
        else
@@ -3118,6 +3172,7 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
        mutex_init(&tty->atomic_write_lock);
        spin_lock_init(&tty->ctrl_lock);
        spin_lock_init(&tty->flow_lock);
+       spin_lock_init(&tty->files_lock);
        INIT_LIST_HEAD(&tty->tty_files);
        INIT_WORK(&tty->SAK_work, do_SAK_work);
 
@@ -3130,20 +3185,6 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
        return tty;
 }
 
-/**
- *     deinitialize_tty_struct
- *     @tty: tty to deinitialize
- *
- *     This subroutine deinitializes a tty structure that has been newly
- *     allocated but tty_release cannot be called on that yet.
- *
- *     Locking: none - tty in question must not be exposed at this point
- */
-void deinitialize_tty_struct(struct tty_struct *tty)
-{
-       tty_ldisc_deinit(tty);
-}
-
 /**
  *     tty_put_char    -       write one character to a tty
  *     @tty: tty
@@ -3538,7 +3579,7 @@ void __init console_init(void)
        initcall_t *call;
 
        /* Setup the default TTY line discipline. */
-       tty_ldisc_begin();
+       n_tty_init();
 
        /*
         * set up the console device so that later boot sequences can
index 0ea351388724aeae75b70d22433b3d56736d1828..23bf5bb1d8bf74d01ea0a29b5ddb1e095e1855db 100644 (file)
@@ -719,16 +719,16 @@ static int get_sgflags(struct tty_struct *tty)
 {
        int flags = 0;
 
-       if (!(tty->termios.c_lflag & ICANON)) {
-               if (tty->termios.c_lflag & ISIG)
+       if (!L_ICANON(tty)) {
+               if (L_ISIG(tty))
                        flags |= 0x02;          /* cbreak */
                else
                        flags |= 0x20;          /* raw */
        }
-       if (tty->termios.c_lflag & ECHO)
+       if (L_ECHO(tty))
                flags |= 0x08;                  /* echo */
-       if (tty->termios.c_oflag & OPOST)
-               if (tty->termios.c_oflag & ONLCR)
+       if (O_OPOST(tty))
+               if (O_ONLCR(tty))
                        flags |= 0x10;          /* crmod */
        return flags;
 }
@@ -908,7 +908,7 @@ static int tty_change_softcar(struct tty_struct *tty, int arg)
        tty->termios.c_cflag |= bit;
        if (tty->ops->set_termios)
                tty->ops->set_termios(tty, &old);
-       if ((tty->termios.c_cflag & CLOCAL) != bit)
+       if (C_CLOCAL(tty) != bit)
                ret = -EINVAL;
        up_write(&tty->termios_rwsem);
        return ret;
index a054d03c22e7b7376824927ce577d46644b01f16..68947f6de5ad6339adea804182597229c3eb1d38 100644 (file)
@@ -140,9 +140,16 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
  *     @disc: ldisc number
  *
  *     Takes a reference to a line discipline. Deals with refcounts and
- *     module locking counts. Returns NULL if the discipline is not available.
- *     Returns a pointer to the discipline and bumps the ref count if it is
- *     available
+ *     module locking counts.
+ *
+ *     Returns: -EINVAL if the discipline index is not [N_TTY..NR_LDISCS] or
+ *                      if the discipline is not registered
+ *              -EAGAIN if request_module() failed to load or register the
+ *                      the discipline
+ *              -ENOMEM if allocation failure
+ *
+ *              Otherwise, returns a pointer to the discipline and bumps the
+ *              ref count
  *
  *     Locking:
  *             takes tty_ldiscs_lock to guard against ldisc races
@@ -250,19 +257,23 @@ const struct file_operations tty_ldiscs_proc_fops = {
  *     reference to it. If the line discipline is in flux then
  *     wait patiently until it changes.
  *
+ *     Returns: NULL if the tty has been hungup and not re-opened with
+ *              a new file descriptor, otherwise valid ldisc reference
+ *
  *     Note: Must not be called from an IRQ/timer context. The caller
  *     must also be careful not to hold other locks that will deadlock
  *     against a discipline change, such as an existing ldisc reference
  *     (which we check for)
  *
- *     Note: only callable from a file_operations routine (which
- *     guarantees tty->ldisc != NULL when the lock is acquired).
+ *     Note: a file_operations routine (read/poll/write) should use this
+ *     function to wait for any ldisc lifetime events to finish.
  */
 
 struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
 {
        ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
-       WARN_ON(!tty->ldisc);
+       if (!tty->ldisc)
+               ldsem_up_read(&tty->ldisc_sem);
        return tty->ldisc;
 }
 EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
@@ -304,13 +315,13 @@ void tty_ldisc_deref(struct tty_ldisc *ld)
 EXPORT_SYMBOL_GPL(tty_ldisc_deref);
 
 
-static inline int __lockfunc
+static inline int
 __tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
 {
        return ldsem_down_write(&tty->ldisc_sem, timeout);
 }
 
-static inline int __lockfunc
+static inline int
 __tty_ldisc_lock_nested(struct tty_struct *tty, unsigned long timeout)
 {
        return ldsem_down_write_nested(&tty->ldisc_sem,
@@ -322,8 +333,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty)
        ldsem_up_write(&tty->ldisc_sem);
 }
 
-static int __lockfunc
-tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
 {
        int ret;
 
@@ -340,7 +350,7 @@ static void tty_ldisc_unlock(struct tty_struct *tty)
        __tty_ldisc_unlock(tty);
 }
 
-static int __lockfunc
+static int
 tty_ldisc_lock_pair_timeout(struct tty_struct *tty, struct tty_struct *tty2,
                            unsigned long timeout)
 {
@@ -376,14 +386,13 @@ tty_ldisc_lock_pair_timeout(struct tty_struct *tty, struct tty_struct *tty2,
        return 0;
 }
 
-static void __lockfunc
-tty_ldisc_lock_pair(struct tty_struct *tty, struct tty_struct *tty2)
+static void tty_ldisc_lock_pair(struct tty_struct *tty, struct tty_struct *tty2)
 {
        tty_ldisc_lock_pair_timeout(tty, tty2, MAX_SCHEDULE_TIMEOUT);
 }
 
-static void __lockfunc tty_ldisc_unlock_pair(struct tty_struct *tty,
-                                            struct tty_struct *tty2)
+static void tty_ldisc_unlock_pair(struct tty_struct *tty,
+                                 struct tty_struct *tty2)
 {
        __tty_ldisc_unlock(tty);
        if (tty2)
@@ -411,7 +420,7 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
 /**
  *     tty_set_termios_ldisc           -       set ldisc field
  *     @tty: tty structure
- *     @num: line discipline number
+ *     @disc: line discipline number
  *
  *     This is probably overkill for real world processors but
  *     they are not on hot paths so a little discipline won't do
@@ -424,10 +433,10 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
  *     Locking: takes termios_rwsem
  */
 
-static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
+static void tty_set_termios_ldisc(struct tty_struct *tty, int disc)
 {
        down_write(&tty->termios_rwsem);
-       tty->termios.c_line = num;
+       tty->termios.c_line = disc;
        up_write(&tty->termios_rwsem);
 
        tty->disc_data = NULL;
@@ -455,7 +464,7 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
                if (ret)
                        clear_bit(TTY_LDISC_OPEN, &tty->flags);
 
-               tty_ldisc_debug(tty, "%p: opened\n", tty->ldisc);
+               tty_ldisc_debug(tty, "%p: opened\n", ld);
                return ret;
        }
        return 0;
@@ -476,7 +485,7 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
        clear_bit(TTY_LDISC_OPEN, &tty->flags);
        if (ld->ops->close)
                ld->ops->close(tty);
-       tty_ldisc_debug(tty, "%p: closed\n", tty->ldisc);
+       tty_ldisc_debug(tty, "%p: closed\n", ld);
 }
 
 /**
@@ -525,12 +534,12 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
  *     the close of one side of a tty/pty pair, and eventually hangup.
  */
 
-int tty_set_ldisc(struct tty_struct *tty, int ldisc)
+int tty_set_ldisc(struct tty_struct *tty, int disc)
 {
        int retval;
        struct tty_ldisc *old_ldisc, *new_ldisc;
 
-       new_ldisc = tty_ldisc_get(tty, ldisc);
+       new_ldisc = tty_ldisc_get(tty, disc);
        if (IS_ERR(new_ldisc))
                return PTR_ERR(new_ldisc);
 
@@ -539,8 +548,13 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
        if (retval)
                goto err;
 
+       if (!tty->ldisc) {
+               retval = -EIO;
+               goto out;
+       }
+
        /* Check the no-op case */
-       if (tty->ldisc->ops->num == ldisc)
+       if (tty->ldisc->ops->num == disc)
                goto out;
 
        if (test_bit(TTY_HUPPED, &tty->flags)) {
@@ -556,7 +570,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
 
        /* Now set up the new line discipline. */
        tty->ldisc = new_ldisc;
-       tty_set_termios_ldisc(tty, ldisc);
+       tty_set_termios_ldisc(tty, disc);
 
        retval = tty_ldisc_open(tty, new_ldisc);
        if (retval < 0) {
@@ -589,6 +603,25 @@ err:
        return retval;
 }
 
+/**
+ *     tty_ldisc_kill  -       teardown ldisc
+ *     @tty: tty being released
+ *
+ *     Perform final close of the ldisc and reset tty->ldisc
+ */
+static void tty_ldisc_kill(struct tty_struct *tty)
+{
+       if (!tty->ldisc)
+               return;
+       /*
+        * Now kill off the ldisc
+        */
+       tty_ldisc_close(tty, tty->ldisc);
+       tty_ldisc_put(tty->ldisc);
+       /* Force an oops if we mess this up */
+       tty->ldisc = NULL;
+}
+
 /**
  *     tty_reset_termios       -       reset terminal state
  *     @tty: tty to reset
@@ -609,28 +642,44 @@ static void tty_reset_termios(struct tty_struct *tty)
 /**
  *     tty_ldisc_reinit        -       reinitialise the tty ldisc
  *     @tty: tty to reinit
- *     @ldisc: line discipline to reinitialize
+ *     @disc: line discipline to reinitialize
+ *
+ *     Completely reinitialize the line discipline state, by closing the
+ *     current instance, if there is one, and opening a new instance. If
+ *     an error occurs opening the new non-N_TTY instance, the instance
+ *     is dropped and tty->ldisc reset to NULL. The caller can then retry
+ *     with N_TTY instead.
  *
- *     Switch the tty to a line discipline and leave the ldisc
- *     state closed
+ *     Returns 0 if successful, otherwise error code < 0
  */
 
-static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
+int tty_ldisc_reinit(struct tty_struct *tty, int disc)
 {
-       struct tty_ldisc *ld = tty_ldisc_get(tty, ldisc);
+       struct tty_ldisc *ld;
+       int retval;
 
-       if (IS_ERR(ld))
-               return -1;
+       ld = tty_ldisc_get(tty, disc);
+       if (IS_ERR(ld)) {
+               BUG_ON(disc == N_TTY);
+               return PTR_ERR(ld);
+       }
 
-       tty_ldisc_close(tty, tty->ldisc);
-       tty_ldisc_put(tty->ldisc);
-       /*
-        *      Switch the line discipline back
-        */
-       tty->ldisc = ld;
-       tty_set_termios_ldisc(tty, ldisc);
+       if (tty->ldisc) {
+               tty_ldisc_close(tty, tty->ldisc);
+               tty_ldisc_put(tty->ldisc);
+       }
 
-       return 0;
+       /* switch the line discipline */
+       tty->ldisc = ld;
+       tty_set_termios_ldisc(tty, disc);
+       retval = tty_ldisc_open(tty, tty->ldisc);
+       if (retval) {
+               if (!WARN_ON(disc == N_TTY)) {
+                       tty_ldisc_put(tty->ldisc);
+                       tty->ldisc = NULL;
+               }
+       }
+       return retval;
 }
 
 /**
@@ -648,13 +697,11 @@ static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
  *     tty itself so we must be careful about locking rules.
  */
 
-void tty_ldisc_hangup(struct tty_struct *tty)
+void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
 {
        struct tty_ldisc *ld;
-       int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
-       int err = 0;
 
-       tty_ldisc_debug(tty, "%p: closing\n", tty->ldisc);
+       tty_ldisc_debug(tty, "%p: hangup\n", tty->ldisc);
 
        ld = tty_ldisc_ref(tty);
        if (ld != NULL) {
@@ -680,31 +727,17 @@ void tty_ldisc_hangup(struct tty_struct *tty)
         */
        tty_ldisc_lock(tty, MAX_SCHEDULE_TIMEOUT);
 
-       if (tty->ldisc) {
-
-               /* At this point we have a halted ldisc; we want to close it and
-                  reopen a new ldisc. We could defer the reopen to the next
-                  open but it means auditing a lot of other paths so this is
-                  a FIXME */
-               if (reset == 0) {
+       if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS)
+               tty_reset_termios(tty);
 
-                       if (!tty_ldisc_reinit(tty, tty->termios.c_line))
-                               err = tty_ldisc_open(tty, tty->ldisc);
-                       else
-                               err = 1;
-               }
-               /* If the re-open fails or we reset then go to N_TTY. The
-                  N_TTY open cannot fail */
-               if (reset || err) {
-                       BUG_ON(tty_ldisc_reinit(tty, N_TTY));
-                       WARN_ON(tty_ldisc_open(tty, tty->ldisc));
-               }
+       if (tty->ldisc) {
+               if (reinit) {
+                       if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0)
+                               tty_ldisc_reinit(tty, N_TTY);
+               } else
+                       tty_ldisc_kill(tty);
        }
        tty_ldisc_unlock(tty);
-       if (reset)
-               tty_reset_termios(tty);
-
-       tty_ldisc_debug(tty, "%p: re-opened\n", tty->ldisc);
 }
 
 /**
@@ -719,44 +752,26 @@ void tty_ldisc_hangup(struct tty_struct *tty)
 
 int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
 {
-       struct tty_ldisc *ld = tty->ldisc;
-       int retval;
-
-       retval = tty_ldisc_open(tty, ld);
+       int retval = tty_ldisc_open(tty, tty->ldisc);
        if (retval)
                return retval;
 
        if (o_tty) {
                retval = tty_ldisc_open(o_tty, o_tty->ldisc);
                if (retval) {
-                       tty_ldisc_close(tty, ld);
+                       tty_ldisc_close(tty, tty->ldisc);
                        return retval;
                }
        }
        return 0;
 }
 
-static void tty_ldisc_kill(struct tty_struct *tty)
-{
-       /*
-        * Now kill off the ldisc
-        */
-       tty_ldisc_close(tty, tty->ldisc);
-       tty_ldisc_put(tty->ldisc);
-       /* Force an oops if we mess this up */
-       tty->ldisc = NULL;
-
-       /* Ensure the next open requests the N_TTY ldisc */
-       tty_set_termios_ldisc(tty, N_TTY);
-}
-
 /**
  *     tty_ldisc_release               -       release line discipline
  *     @tty: tty being shut down (or one end of pty pair)
  *
  *     Called during the final close of a tty or a pty pair in order to shut
- *     down the line discpline layer. On exit, each ldisc assigned is N_TTY and
- *     each ldisc has not been opened.
+ *     down the line discpline layer. On exit, each tty's ldisc is NULL.
  */
 
 void tty_ldisc_release(struct tty_struct *tty)
@@ -797,7 +812,7 @@ void tty_ldisc_init(struct tty_struct *tty)
 }
 
 /**
- *     tty_ldisc_init          -       ldisc cleanup for new tty
+ *     tty_ldisc_deinit        -       ldisc cleanup for new tty
  *     @tty: tty that was allocated recently
  *
  *     The tty structure must not becompletely set up (tty_ldisc_setup) when
@@ -805,12 +820,7 @@ void tty_ldisc_init(struct tty_struct *tty)
  */
 void tty_ldisc_deinit(struct tty_struct *tty)
 {
-       tty_ldisc_put(tty->ldisc);
+       if (tty->ldisc)
+               tty_ldisc_put(tty->ldisc);
        tty->ldisc = NULL;
 }
-
-void tty_ldisc_begin(void)
-{
-       /* Setup the default TTY line discipline. */
-       (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY);
-}
index 77703a3912075a7b740eb4626a5159d55ea6f57a..d8bae67a6174b65e676a887e08dd9e681c2f5693 100644 (file)
@@ -10,7 +10,7 @@
  * Getting the big tty mutex.
  */
 
-void __lockfunc tty_lock(struct tty_struct *tty)
+void tty_lock(struct tty_struct *tty)
 {
        if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
                return;
@@ -19,7 +19,20 @@ void __lockfunc tty_lock(struct tty_struct *tty)
 }
 EXPORT_SYMBOL(tty_lock);
 
-void __lockfunc tty_unlock(struct tty_struct *tty)
+int tty_lock_interruptible(struct tty_struct *tty)
+{
+       int ret;
+
+       if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
+               return -EIO;
+       tty_kref_get(tty);
+       ret = mutex_lock_interruptible(&tty->legacy_mutex);
+       if (ret)
+               tty_kref_put(tty);
+       return ret;
+}
+
+void tty_unlock(struct tty_struct *tty)
 {
        if (WARN(tty->magic != TTY_MAGIC, "U Bad %p\n", tty))
                return;
@@ -28,13 +41,13 @@ void __lockfunc tty_unlock(struct tty_struct *tty)
 }
 EXPORT_SYMBOL(tty_unlock);
 
-void __lockfunc tty_lock_slave(struct tty_struct *tty)
+void tty_lock_slave(struct tty_struct *tty)
 {
        if (tty && tty != tty->link)
                tty_lock(tty);
 }
 
-void __lockfunc tty_unlock_slave(struct tty_struct *tty)
+void tty_unlock_slave(struct tty_struct *tty)
 {
        if (tty && tty != tty->link)
                tty_unlock(tty);
index 846ed481c24fecec26bb4871589a9288792135be..dbcca30a54b1ad56db0b49f37a86d4d1a8f0710b 100644 (file)
@@ -370,7 +370,7 @@ int tty_port_block_til_ready(struct tty_port *port,
        }
        if (filp->f_flags & O_NONBLOCK) {
                /* Indicate we are open */
-               if (tty->termios.c_cflag & CBAUD)
+               if (C_BAUD(tty))
                        tty_port_raise_dtr_rts(port);
                port->flags |= ASYNC_NORMAL_ACTIVE;
                return 0;
@@ -476,7 +476,6 @@ int tty_port_close_start(struct tty_port *port,
                spin_unlock_irqrestore(&port->lock, flags);
                return 0;
        }
-       set_bit(ASYNCB_CLOSING, &port->flags);
        spin_unlock_irqrestore(&port->lock, flags);
 
        tty->closing = 1;
@@ -510,14 +509,12 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
 
        if (port->blocked_open) {
                spin_unlock_irqrestore(&port->lock, flags);
-               if (port->close_delay) {
-                       msleep_interruptible(
-                               jiffies_to_msecs(port->close_delay));
-               }
+               if (port->close_delay)
+                       msleep_interruptible(jiffies_to_msecs(port->close_delay));
                spin_lock_irqsave(&port->lock, flags);
                wake_up_interruptible(&port->open_wait);
        }
-       port->flags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
+       port->flags &= ~ASYNC_NORMAL_ACTIVE;
        spin_unlock_irqrestore(&port->lock, flags);
 }
 EXPORT_SYMBOL(tty_port_close_end);
index 6f0336fff5011046cbb2d5d0642228b00b34307c..f973bfce5d089256086b945fb37148fffebf902d 100644 (file)
@@ -1706,16 +1706,12 @@ int vt_do_diacrit(unsigned int cmd, void __user *udp, int perm)
                        return -EINVAL;
 
                if (ct) {
-                       dia = kmalloc(sizeof(struct kbdiacr) * ct,
-                                                               GFP_KERNEL);
-                       if (!dia)
-                               return -ENOMEM;
 
-                       if (copy_from_user(dia, a->kbdiacr,
-                                       sizeof(struct kbdiacr) * ct)) {
-                               kfree(dia);
-                               return -EFAULT;
-                       }
+                       dia = memdup_user(a->kbdiacr,
+                                       sizeof(struct kbdiacr) * ct);
+                       if (IS_ERR(dia))
+                               return PTR_ERR(dia);
+
                }
 
                spin_lock_irqsave(&kbd_event_lock, flags);
index 381a2b13682c1a587a81e9bab781bcccdc669332..4dd9dd2270a01d31ded104bd789883b3bb356c3d 100644 (file)
@@ -347,6 +347,8 @@ int paste_selection(struct tty_struct *tty)
        console_unlock();
 
        ld = tty_ldisc_ref_wait(tty);
+       if (!ld)
+               return -EIO;    /* ldisc was hung up */
        tty_buffer_lock_exclusive(&vc->port);
 
        add_wait_queue(&vc->paste_wait, &wait);
index e7cbc44eef57529a23c479413538f1c2f924b55c..3e3c7575e92d0eaf3363bb5c3869cc139163073e 100644 (file)
@@ -568,7 +568,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr)
                        vc->vc_cols - vc->vc_x);
 }
 
-static int softcursor_original;
+static int softcursor_original = -1;
 
 static void add_softcursor(struct vc_data *vc)
 {
@@ -4250,6 +4250,7 @@ unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
 {
        return screenpos(vc, 2 * w_offset, viewed);
 }
+EXPORT_SYMBOL_GPL(screen_pos);
 
 void getconsxy(struct vc_data *vc, unsigned char *p)
 {
index 1173f9cbc137ef1b175a84de7d2c8c81219323f5..0a866e90b49c80ee04061ae1af3ac5020dd3a237 100644 (file)
@@ -476,6 +476,8 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
                        return -EINVAL;
                if (index < 0 || index > 0x7f)
                        return -EINVAL;
+               if (tmp < 0 || tmp > len - pos)
+                       return -EINVAL;
                pos += tmp;
 
                /* skip trailing newline */
index 26ca4f910cb020aae539f29b260a98fe1eee92b0..fa4e23930614a47ac14ece43e863286fadb91402 100644 (file)
@@ -428,7 +428,8 @@ static void acm_read_bulk_callback(struct urb *urb)
                set_bit(rb->index, &acm->read_urbs_free);
                dev_dbg(&acm->data->dev, "%s - non-zero urb status: %d\n",
                                                        __func__, status);
-               return;
+               if ((status != -ENOENT) || (urb->actual_length == 0))
+                       return;
        }
 
        usb_mark_last_busy(acm->dev);
@@ -1404,6 +1405,8 @@ made_compressed_probe:
                                usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress),
                                NULL, acm->writesize, acm_write_bulk, snd);
                snd->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+               if (quirks & SEND_ZERO_PACKET)
+                       snd->urb->transfer_flags |= URB_ZERO_PACKET;
                snd->instance = acm;
        }
 
@@ -1838,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
        },
 #endif
 
+       /*Samsung phone in firmware update mode */
+       { USB_DEVICE(0x04e8, 0x685d),
+       .driver_info = IGNORE_DEVICE,
+       },
+
        /* Exclude Infineon Flash Loader utility */
        { USB_DEVICE(0x058b, 0x0041),
        .driver_info = IGNORE_DEVICE,
@@ -1861,6 +1869,10 @@ static const struct usb_device_id acm_ids[] = {
        { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
                USB_CDC_ACM_PROTO_AT_CDMA) },
 
+       { USB_DEVICE(0x1519, 0x0452), /* Intel 7260 modem */
+       .driver_info = SEND_ZERO_PACKET,
+       },
+
        { }
 };
 
index dd9af38e7cda612106e09c89a298c4397ccf7494..ccfaba9ab4e49cd4a009132397d9467bc949bfd0 100644 (file)
@@ -134,3 +134,4 @@ struct acm {
 #define IGNORE_DEVICE                  BIT(5)
 #define QUIRK_CONTROL_LINE_STATE       BIT(6)
 #define CLEAR_HALT_CONDITIONS          BIT(7)
+#define SEND_ZERO_PACKET               BIT(8)
index 7a11a8263171caad8ccba867bfa4e2e9734c2c3f..419c72e1046469d8767df8debe663f029fd7b9af 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/kref.h>
 #include <linux/slab.h>
+#include <linux/poll.h>
 #include <linux/mutex.h>
 #include <linux/usb.h>
 #include <linux/usb/tmc.h>
@@ -87,6 +88,23 @@ struct usbtmc_device_data {
        u8 bTag_last_write;     /* needed for abort */
        u8 bTag_last_read;      /* needed for abort */
 
+       /* data for interrupt in endpoint handling */
+       u8             bNotify1;
+       u8             bNotify2;
+       u16            ifnum;
+       u8             iin_bTag;
+       u8            *iin_buffer;
+       atomic_t       iin_data_valid;
+       unsigned int   iin_ep;
+       int            iin_ep_present;
+       int            iin_interval;
+       struct urb    *iin_urb;
+       u16            iin_wMaxPacketSize;
+       atomic_t       srq_asserted;
+
+       /* coalesced usb488_caps from usbtmc_dev_capabilities */
+       __u8 usb488_caps;
+
        u8 rigol_quirk;
 
        /* attributes from the USB TMC spec for this device */
@@ -99,6 +117,8 @@ struct usbtmc_device_data {
        struct usbtmc_dev_capabilities  capabilities;
        struct kref kref;
        struct mutex io_mutex;  /* only one i/o function running at a time */
+       wait_queue_head_t waitq;
+       struct fasync_struct *fasync;
 };
 #define to_usbtmc_data(d) container_of(d, struct usbtmc_device_data, kref)
 
@@ -373,6 +393,142 @@ exit:
        return rv;
 }
 
+static int usbtmc488_ioctl_read_stb(struct usbtmc_device_data *data,
+                               void __user *arg)
+{
+       struct device *dev = &data->intf->dev;
+       u8 *buffer;
+       u8 tag;
+       __u8 stb;
+       int rv;
+
+       dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
+               data->iin_ep_present);
+
+       buffer = kmalloc(8, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       atomic_set(&data->iin_data_valid, 0);
+
+       /* must issue read_stb before using poll or select */
+       atomic_set(&data->srq_asserted, 0);
+
+       rv = usb_control_msg(data->usb_dev,
+                       usb_rcvctrlpipe(data->usb_dev, 0),
+                       USBTMC488_REQUEST_READ_STATUS_BYTE,
+                       USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+                       data->iin_bTag,
+                       data->ifnum,
+                       buffer, 0x03, USBTMC_TIMEOUT);
+       if (rv < 0) {
+               dev_err(dev, "stb usb_control_msg returned %d\n", rv);
+               goto exit;
+       }
+
+       if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+               dev_err(dev, "control status returned %x\n", buffer[0]);
+               rv = -EIO;
+               goto exit;
+       }
+
+       if (data->iin_ep_present) {
+               rv = wait_event_interruptible_timeout(
+                       data->waitq,
+                       atomic_read(&data->iin_data_valid) != 0,
+                       USBTMC_TIMEOUT);
+               if (rv < 0) {
+                       dev_dbg(dev, "wait interrupted %d\n", rv);
+                       goto exit;
+               }
+
+               if (rv == 0) {
+                       dev_dbg(dev, "wait timed out\n");
+                       rv = -ETIME;
+                       goto exit;
+               }
+
+               tag = data->bNotify1 & 0x7f;
+               if (tag != data->iin_bTag) {
+                       dev_err(dev, "expected bTag %x got %x\n",
+                               data->iin_bTag, tag);
+               }
+
+               stb = data->bNotify2;
+       } else {
+               stb = buffer[2];
+       }
+
+       rv = copy_to_user(arg, &stb, sizeof(stb));
+       if (rv)
+               rv = -EFAULT;
+
+ exit:
+       /* bump interrupt bTag */
+       data->iin_bTag += 1;
+       if (data->iin_bTag > 127)
+               /* 1 is for SRQ see USBTMC-USB488 subclass spec section 4.3.1 */
+               data->iin_bTag = 2;
+
+       kfree(buffer);
+       return rv;
+}
+
+static int usbtmc488_ioctl_simple(struct usbtmc_device_data *data,
+                               void __user *arg, unsigned int cmd)
+{
+       struct device *dev = &data->intf->dev;
+       __u8 val;
+       u8 *buffer;
+       u16 wValue;
+       int rv;
+
+       if (!(data->usb488_caps & USBTMC488_CAPABILITY_SIMPLE))
+               return -EINVAL;
+
+       buffer = kmalloc(8, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       if (cmd == USBTMC488_REQUEST_REN_CONTROL) {
+               rv = copy_from_user(&val, arg, sizeof(val));
+               if (rv) {
+                       rv = -EFAULT;
+                       goto exit;
+               }
+               wValue = val ? 1 : 0;
+       } else {
+               wValue = 0;
+       }
+
+       rv = usb_control_msg(data->usb_dev,
+                       usb_rcvctrlpipe(data->usb_dev, 0),
+                       cmd,
+                       USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+                       wValue,
+                       data->ifnum,
+                       buffer, 0x01, USBTMC_TIMEOUT);
+       if (rv < 0) {
+               dev_err(dev, "simple usb_control_msg failed %d\n", rv);
+               goto exit;
+       } else if (rv != 1) {
+               dev_warn(dev, "simple usb_control_msg returned %d\n", rv);
+               rv = -EIO;
+               goto exit;
+       }
+
+       if (buffer[0] != USBTMC_STATUS_SUCCESS) {
+               dev_err(dev, "simple control status returned %x\n", buffer[0]);
+               rv = -EIO;
+               goto exit;
+       }
+       rv = 0;
+
+ exit:
+       kfree(buffer);
+       return rv;
+}
+
 /*
  * Sends a REQUEST_DEV_DEP_MSG_IN message on the Bulk-IN endpoint.
  * @transfer_size: number of bytes to request from the device.
@@ -895,6 +1051,7 @@ static int get_capabilities(struct usbtmc_device_data *data)
        data->capabilities.device_capabilities = buffer[5];
        data->capabilities.usb488_interface_capabilities = buffer[14];
        data->capabilities.usb488_device_capabilities = buffer[15];
+       data->usb488_caps = (buffer[14] & 0x07) | ((buffer[15] & 0x0f) << 4);
        rv = 0;
 
 err_out:
@@ -1069,6 +1226,33 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        case USBTMC_IOCTL_ABORT_BULK_IN:
                retval = usbtmc_ioctl_abort_bulk_in(data);
                break;
+
+       case USBTMC488_IOCTL_GET_CAPS:
+               retval = copy_to_user((void __user *)arg,
+                               &data->usb488_caps,
+                               sizeof(data->usb488_caps));
+               if (retval)
+                       retval = -EFAULT;
+               break;
+
+       case USBTMC488_IOCTL_READ_STB:
+               retval = usbtmc488_ioctl_read_stb(data, (void __user *)arg);
+               break;
+
+       case USBTMC488_IOCTL_REN_CONTROL:
+               retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
+                                               USBTMC488_REQUEST_REN_CONTROL);
+               break;
+
+       case USBTMC488_IOCTL_GOTO_LOCAL:
+               retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
+                                               USBTMC488_REQUEST_GOTO_LOCAL);
+               break;
+
+       case USBTMC488_IOCTL_LOCAL_LOCKOUT:
+               retval = usbtmc488_ioctl_simple(data, (void __user *)arg,
+                                               USBTMC488_REQUEST_LOCAL_LOCKOUT);
+               break;
        }
 
 skip_io_on_zombie:
@@ -1076,6 +1260,34 @@ skip_io_on_zombie:
        return retval;
 }
 
+static int usbtmc_fasync(int fd, struct file *file, int on)
+{
+       struct usbtmc_device_data *data = file->private_data;
+
+       return fasync_helper(fd, file, on, &data->fasync);
+}
+
+static unsigned int usbtmc_poll(struct file *file, poll_table *wait)
+{
+       struct usbtmc_device_data *data = file->private_data;
+       unsigned int mask;
+
+       mutex_lock(&data->io_mutex);
+
+       if (data->zombie) {
+               mask = POLLHUP | POLLERR;
+               goto no_poll;
+       }
+
+       poll_wait(file, &data->waitq, wait);
+
+       mask = (atomic_read(&data->srq_asserted)) ? POLLIN | POLLRDNORM : 0;
+
+no_poll:
+       mutex_unlock(&data->io_mutex);
+       return mask;
+}
+
 static const struct file_operations fops = {
        .owner          = THIS_MODULE,
        .read           = usbtmc_read,
@@ -1083,6 +1295,8 @@ static const struct file_operations fops = {
        .open           = usbtmc_open,
        .release        = usbtmc_release,
        .unlocked_ioctl = usbtmc_ioctl,
+       .fasync         = usbtmc_fasync,
+       .poll           = usbtmc_poll,
        .llseek         = default_llseek,
 };
 
@@ -1092,6 +1306,67 @@ static struct usb_class_driver usbtmc_class = {
        .minor_base =   USBTMC_MINOR_BASE,
 };
 
+static void usbtmc_interrupt(struct urb *urb)
+{
+       struct usbtmc_device_data *data = urb->context;
+       struct device *dev = &data->intf->dev;
+       int status = urb->status;
+       int rv;
+
+       dev_dbg(&data->intf->dev, "int status: %d len %d\n",
+               status, urb->actual_length);
+
+       switch (status) {
+       case 0: /* SUCCESS */
+               /* check for valid STB notification */
+               if (data->iin_buffer[0] > 0x81) {
+                       data->bNotify1 = data->iin_buffer[0];
+                       data->bNotify2 = data->iin_buffer[1];
+                       atomic_set(&data->iin_data_valid, 1);
+                       wake_up_interruptible(&data->waitq);
+                       goto exit;
+               }
+               /* check for SRQ notification */
+               if (data->iin_buffer[0] == 0x81) {
+                       if (data->fasync)
+                               kill_fasync(&data->fasync,
+                                       SIGIO, POLL_IN);
+
+                       atomic_set(&data->srq_asserted, 1);
+                       wake_up_interruptible(&data->waitq);
+                       goto exit;
+               }
+               dev_warn(dev, "invalid notification: %x\n", data->iin_buffer[0]);
+               break;
+       case -EOVERFLOW:
+               dev_err(dev, "overflow with length %d, actual length is %d\n",
+                       data->iin_wMaxPacketSize, urb->actual_length);
+       case -ECONNRESET:
+       case -ENOENT:
+       case -ESHUTDOWN:
+       case -EILSEQ:
+       case -ETIME:
+               /* urb terminated, clean up */
+               dev_dbg(dev, "urb terminated, status: %d\n", status);
+               return;
+       default:
+               dev_err(dev, "unknown status received: %d\n", status);
+       }
+exit:
+       rv = usb_submit_urb(urb, GFP_ATOMIC);
+       if (rv)
+               dev_err(dev, "usb_submit_urb failed: %d\n", rv);
+}
+
+static void usbtmc_free_int(struct usbtmc_device_data *data)
+{
+       if (!data->iin_ep_present || !data->iin_urb)
+               return;
+       usb_kill_urb(data->iin_urb);
+       kfree(data->iin_buffer);
+       usb_free_urb(data->iin_urb);
+       kref_put(&data->kref, usbtmc_delete);
+}
 
 static int usbtmc_probe(struct usb_interface *intf,
                        const struct usb_device_id *id)
@@ -1114,6 +1389,9 @@ static int usbtmc_probe(struct usb_interface *intf,
        usb_set_intfdata(intf, data);
        kref_init(&data->kref);
        mutex_init(&data->io_mutex);
+       init_waitqueue_head(&data->waitq);
+       atomic_set(&data->iin_data_valid, 0);
+       atomic_set(&data->srq_asserted, 0);
        data->zombie = 0;
 
        /* Determine if it is a Rigol or not */
@@ -1134,9 +1412,12 @@ static int usbtmc_probe(struct usb_interface *intf,
        data->bTag      = 1;
        data->TermCharEnabled = 0;
        data->TermChar = '\n';
+       /*  2 <= bTag <= 127   USBTMC-USB488 subclass specification 4.3.1 */
+       data->iin_bTag = 2;
 
        /* USBTMC devices have only one setting, so use that */
        iface_desc = data->intf->cur_altsetting;
+       data->ifnum = iface_desc->desc.bInterfaceNumber;
 
        /* Find bulk in endpoint */
        for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
@@ -1161,6 +1442,20 @@ static int usbtmc_probe(struct usb_interface *intf,
                        break;
                }
        }
+       /* Find int endpoint */
+       for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) {
+               endpoint = &iface_desc->endpoint[n].desc;
+
+               if (usb_endpoint_is_int_in(endpoint)) {
+                       data->iin_ep_present = 1;
+                       data->iin_ep = endpoint->bEndpointAddress;
+                       data->iin_wMaxPacketSize = usb_endpoint_maxp(endpoint);
+                       data->iin_interval = endpoint->bInterval;
+                       dev_dbg(&intf->dev, "Found Int in endpoint at %u\n",
+                               data->iin_ep);
+                       break;
+               }
+       }
 
        retcode = get_capabilities(data);
        if (retcode)
@@ -1169,6 +1464,39 @@ static int usbtmc_probe(struct usb_interface *intf,
                retcode = sysfs_create_group(&intf->dev.kobj,
                                             &capability_attr_grp);
 
+       if (data->iin_ep_present) {
+               /* allocate int urb */
+               data->iin_urb = usb_alloc_urb(0, GFP_KERNEL);
+               if (!data->iin_urb) {
+                       dev_err(&intf->dev, "Failed to allocate int urb\n");
+                       goto error_register;
+               }
+
+               /* will reference data in int urb */
+               kref_get(&data->kref);
+
+               /* allocate buffer for interrupt in */
+               data->iin_buffer = kmalloc(data->iin_wMaxPacketSize,
+                                       GFP_KERNEL);
+               if (!data->iin_buffer) {
+                       dev_err(&intf->dev, "Failed to allocate int buf\n");
+                       goto error_register;
+               }
+
+               /* fill interrupt urb */
+               usb_fill_int_urb(data->iin_urb, data->usb_dev,
+                               usb_rcvintpipe(data->usb_dev, data->iin_ep),
+                               data->iin_buffer, data->iin_wMaxPacketSize,
+                               usbtmc_interrupt,
+                               data, data->iin_interval);
+
+               retcode = usb_submit_urb(data->iin_urb, GFP_KERNEL);
+               if (retcode) {
+                       dev_err(&intf->dev, "Failed to submit iin_urb\n");
+                       goto error_register;
+               }
+       }
+
        retcode = sysfs_create_group(&intf->dev.kobj, &data_attr_grp);
 
        retcode = usb_register_dev(intf, &usbtmc_class);
@@ -1185,6 +1513,7 @@ static int usbtmc_probe(struct usb_interface *intf,
 error_register:
        sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
        sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
+       usbtmc_free_int(data);
        kref_put(&data->kref, usbtmc_delete);
        return retcode;
 }
@@ -1196,6 +1525,7 @@ static void usbtmc_disconnect(struct usb_interface *intf)
        dev_dbg(&intf->dev, "usbtmc_disconnect called\n");
 
        data = usb_get_intfdata(intf);
+       usbtmc_free_int(data);
        usb_deregister_dev(intf, &usbtmc_class);
        sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
        sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
index e6ec125e4485096276b685fd12932aae44dccc87..49fbfe8b0f24280ee3b596831346598d9e4c5a24 100644 (file)
@@ -51,6 +51,7 @@ static const char *const speed_names[] = {
        [USB_SPEED_HIGH] = "high-speed",
        [USB_SPEED_WIRELESS] = "wireless",
        [USB_SPEED_SUPER] = "super-speed",
+       [USB_SPEED_SUPER_PLUS] = "super-speed-plus",
 };
 
 const char *usb_speed_string(enum usb_device_speed speed)
index 5050760f5e17cf3872952ae5fa6a40d01aab394b..bbcf4009f99eaa5c0d6119e3c7016ca640cf7da2 100644 (file)
@@ -191,6 +191,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
        if (usb_endpoint_xfer_int(d)) {
                i = 1;
                switch (to_usb_device(ddev)->speed) {
+               case USB_SPEED_SUPER_PLUS:
                case USB_SPEED_SUPER:
                case USB_SPEED_HIGH:
                        /* Many device manufacturers are using full-speed
@@ -274,7 +275,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
        }
 
        /* Parse a possible SuperSpeed endpoint companion descriptor */
-       if (to_usb_device(ddev)->speed == USB_SPEED_SUPER)
+       if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER)
                usb_parse_ss_endpoint_companion(ddev, cfgno,
                                inum, asnum, endpoint, buffer, size);
 
index cffa0a0d7de282de402f3576dd3669df9aa33eb2..ef04b50e6bbb9ff2d10f44b53b3ae91a1b3f7eca 100644 (file)
@@ -110,13 +110,6 @@ static const char format_endpt[] =
 /* E:  Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */
   "E:  Ad=%02x(%c) Atr=%02x(%-4s) MxPS=%4d Ivl=%d%cs\n";
 
-
-/*
- * Need access to the driver and USB bus lists.
- * extern struct list_head usb_bus_list;
- * However, these will come from functions that return ptrs to each of them.
- */
-
 /*
  * Wait for an connect/disconnect event to happen. We initialize
  * the event counter with an odd number, and each event will increment
@@ -221,7 +214,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
                break;
        case USB_ENDPOINT_XFER_INT:
                type = "Int.";
-               if (speed == USB_SPEED_HIGH || speed == USB_SPEED_SUPER)
+               if (speed == USB_SPEED_HIGH || speed >= USB_SPEED_SUPER)
                        interval = 1 << (desc->bInterval - 1);
                else
                        interval = desc->bInterval;
@@ -230,7 +223,7 @@ static char *usb_dump_endpoint_descriptor(int speed, char *start, char *end,
                return start;
        }
        interval *= (speed == USB_SPEED_HIGH ||
-                    speed == USB_SPEED_SUPER) ? 125 : 1000;
+                    speed >= USB_SPEED_SUPER) ? 125 : 1000;
        if (interval % 1000)
                unit = 'u';
        else {
@@ -322,7 +315,7 @@ static char *usb_dump_config_descriptor(char *start, char *end,
 
        if (start > end)
                return start;
-       if (speed == USB_SPEED_SUPER)
+       if (speed >= USB_SPEED_SUPER)
                mul = 8;
        else
                mul = 2;
@@ -534,6 +527,8 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
                speed = "480"; break;
        case USB_SPEED_SUPER:
                speed = "5000"; break;
+       case USB_SPEED_SUPER_PLUS:
+               speed = "10000"; break;
        default:
                speed = "??";
        }
@@ -553,7 +548,7 @@ static ssize_t usb_device_dump(char __user **buffer, size_t *nbytes,
 
                /* super/high speed reserves 80%, full/low reserves 90% */
                if (usbdev->speed == USB_SPEED_HIGH ||
-                   usbdev->speed == USB_SPEED_SUPER)
+                   usbdev->speed >= USB_SPEED_SUPER)
                        max = 800;
                else
                        max = FRAME_TIME_MAX_USECS_ALLOC;
@@ -616,6 +611,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
        struct usb_bus *bus;
        ssize_t ret, total_written = 0;
        loff_t skip_bytes = *ppos;
+       int id;
 
        if (*ppos < 0)
                return -EINVAL;
@@ -624,9 +620,9 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
        if (!access_ok(VERIFY_WRITE, buf, nbytes))
                return -EFAULT;
 
-       mutex_lock(&usb_bus_list_lock);
+       mutex_lock(&usb_bus_idr_lock);
        /* print devices for all busses */
-       list_for_each_entry(bus, &usb_bus_list, bus_list) {
+       idr_for_each_entry(&usb_bus_idr, bus, id) {
                /* recurse through all children of the root hub */
                if (!bus_to_hcd(bus)->rh_registered)
                        continue;
@@ -635,12 +631,12 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
                                      bus->root_hub, bus, 0, 0, 0);
                usb_unlock_device(bus->root_hub);
                if (ret < 0) {
-                       mutex_unlock(&usb_bus_list_lock);
+                       mutex_unlock(&usb_bus_idr_lock);
                        return ret;
                }
                total_written += ret;
        }
-       mutex_unlock(&usb_bus_list_lock);
+       mutex_unlock(&usb_bus_idr_lock);
        return total_written;
 }
 
index 59e7a3369084ab0b2d444991ce605a2259d47619..13a4b9f4873941379a41b94c0295a5af1b8a8857 100644 (file)
@@ -848,7 +848,7 @@ static struct usb_device *usbdev_lookup_by_devt(dev_t devt)
                              (void *) (unsigned long) devt, match_devt);
        if (!dev)
                return NULL;
-       return container_of(dev, struct usb_device, dev);
+       return to_usb_device(dev);
 }
 
 /*
@@ -1378,11 +1378,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
                number_of_packets = uurb->number_of_packets;
                isofrmlen = sizeof(struct usbdevfs_iso_packet_desc) *
                                   number_of_packets;
-               isopkt = kmalloc(isofrmlen, GFP_KERNEL);
-               if (!isopkt)
-                       return -ENOMEM;
-               if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
-                       ret = -EFAULT;
+               isopkt = memdup_user(iso_frame_desc, isofrmlen);
+               if (IS_ERR(isopkt)) {
+                       ret = PTR_ERR(isopkt);
+                       isopkt = NULL;
                        goto error;
                }
                for (totlen = u = 0; u < number_of_packets; u++) {
@@ -1903,7 +1902,7 @@ static int proc_releaseinterface(struct usb_dev_state *ps, void __user *arg)
        ret = releaseintf(ps, ifnum);
        if (ret < 0)
                return ret;
-       destroy_async_on_interface (ps, ifnum);
+       destroy_async_on_interface(ps, ifnum);
        return 0;
 }
 
index ea337a718cc1ca38945d3c42e6d0f75d157083b6..822ced9639aaf67463b359055ee825ff863761ea 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/errno.h>
 #include <linux/rwsem.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/usb.h>
 
 #include "usb.h"
@@ -155,7 +156,6 @@ int usb_register_dev(struct usb_interface *intf,
        int minor_base = class_driver->minor_base;
        int minor;
        char name[20];
-       char *temp;
 
 #ifdef CONFIG_USB_DYNAMIC_MINORS
        /*
@@ -192,14 +192,9 @@ int usb_register_dev(struct usb_interface *intf,
 
        /* create a usb class device for this usb interface */
        snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
-       temp = strrchr(name, '/');
-       if (temp && (temp[1] != '\0'))
-               ++temp;
-       else
-               temp = name;
        intf->usb_dev = device_create(usb_class->class, &intf->dev,
                                      MKDEV(USB_MAJOR, minor), class_driver,
-                                     "%s", temp);
+                                     "%s", kbasename(name));
        if (IS_ERR(intf->usb_dev)) {
                down_write(&minor_rwsem);
                usb_minors[minor] = NULL;
index 9eb1cff28bd4b2499e4dfd9ed8b91e53901df184..f9d42cf23e55f80104066f6c38b6deda9943f458 100644 (file)
@@ -28,7 +28,6 @@
 #ifdef CONFIG_PPC_PMAC
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
-#include <asm/pci-bridge.h>
 #include <asm/prom.h>
 #endif
 
@@ -197,7 +196,7 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
         * The xHCI driver has its own irq management
         * make sure irq setup is not touched for xhci in generic hcd code
         */
-       if ((driver->flags & HCD_MASK) != HCD_USB3) {
+       if ((driver->flags & HCD_MASK) < HCD_USB3) {
                if (!dev->irq) {
                        dev_err(&dev->dev,
                        "Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
index df0e3b92533a745fa8ea848d86cdbfb50a86f7f1..0b8a91004737ad56b42fd0e3e6f38e09ab3995d5 100644 (file)
@@ -90,16 +90,15 @@ unsigned long usb_hcds_loaded;
 EXPORT_SYMBOL_GPL(usb_hcds_loaded);
 
 /* host controllers we manage */
-LIST_HEAD (usb_bus_list);
-EXPORT_SYMBOL_GPL (usb_bus_list);
+DEFINE_IDR (usb_bus_idr);
+EXPORT_SYMBOL_GPL (usb_bus_idr);
 
 /* used when allocating bus numbers */
 #define USB_MAXBUS             64
-static DECLARE_BITMAP(busmap, USB_MAXBUS);
 
 /* used when updating list of hcds */
-DEFINE_MUTEX(usb_bus_list_lock);       /* exported only for usbfs */
-EXPORT_SYMBOL_GPL (usb_bus_list_lock);
+DEFINE_MUTEX(usb_bus_idr_lock);        /* exported only for usbfs */
+EXPORT_SYMBOL_GPL (usb_bus_idr_lock);
 
 /* used for controlling access to virtual root hubs */
 static DEFINE_SPINLOCK(hcd_root_hub_lock);
@@ -128,6 +127,27 @@ static inline int is_root_hub(struct usb_device *udev)
 #define KERNEL_REL     bin2bcd(((LINUX_VERSION_CODE >> 16) & 0x0ff))
 #define KERNEL_VER     bin2bcd(((LINUX_VERSION_CODE >> 8) & 0x0ff))
 
+/* usb 3.1 root hub device descriptor */
+static const u8 usb31_rh_dev_descriptor[18] = {
+       0x12,       /*  __u8  bLength; */
+       USB_DT_DEVICE, /* __u8 bDescriptorType; Device */
+       0x10, 0x03, /*  __le16 bcdUSB; v3.1 */
+
+       0x09,       /*  __u8  bDeviceClass; HUB_CLASSCODE */
+       0x00,       /*  __u8  bDeviceSubClass; */
+       0x03,       /*  __u8  bDeviceProtocol; USB 3 hub */
+       0x09,       /*  __u8  bMaxPacketSize0; 2^9 = 512 Bytes */
+
+       0x6b, 0x1d, /*  __le16 idVendor; Linux Foundation 0x1d6b */
+       0x03, 0x00, /*  __le16 idProduct; device 0x0003 */
+       KERNEL_VER, KERNEL_REL, /*  __le16 bcdDevice */
+
+       0x03,       /*  __u8  iManufacturer; */
+       0x02,       /*  __u8  iProduct; */
+       0x01,       /*  __u8  iSerialNumber; */
+       0x01        /*  __u8  bNumConfigurations; */
+};
+
 /* usb 3.0 root hub device descriptor */
 static const u8 usb3_rh_dev_descriptor[18] = {
        0x12,       /*  __u8  bLength; */
@@ -557,6 +577,8 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
                case USB_DT_DEVICE << 8:
                        switch (hcd->speed) {
                        case HCD_USB31:
+                               bufp = usb31_rh_dev_descriptor;
+                               break;
                        case HCD_USB3:
                                bufp = usb3_rh_dev_descriptor;
                                break;
@@ -645,9 +667,15 @@ nongeneric:
                /* non-generic request */
                switch (typeReq) {
                case GetHubStatus:
-               case GetPortStatus:
                        len = 4;
                        break;
+               case GetPortStatus:
+                       if (wValue == HUB_PORT_STATUS)
+                               len = 4;
+                       else
+                               /* other port status types return 8 bytes */
+                               len = 8;
+                       break;
                case GetHubDescriptor:
                        len = sizeof (struct usb_hub_descriptor);
                        break;
@@ -967,8 +995,6 @@ static void usb_bus_init (struct usb_bus *bus)
        bus->bandwidth_int_reqs  = 0;
        bus->bandwidth_isoc_reqs = 0;
        mutex_init(&bus->usb_address0_mutex);
-
-       INIT_LIST_HEAD (&bus->bus_list);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -988,18 +1014,14 @@ static int usb_register_bus(struct usb_bus *bus)
        int result = -E2BIG;
        int busnum;
 
-       mutex_lock(&usb_bus_list_lock);
-       busnum = find_next_zero_bit(busmap, USB_MAXBUS, 1);
-       if (busnum >= USB_MAXBUS) {
-               printk (KERN_ERR "%s: too many buses\n", usbcore_name);
+       mutex_lock(&usb_bus_idr_lock);
+       busnum = idr_alloc(&usb_bus_idr, bus, 1, USB_MAXBUS, GFP_KERNEL);
+       if (busnum < 0) {
+               pr_err("%s: failed to get bus number\n", usbcore_name);
                goto error_find_busnum;
        }
-       set_bit(busnum, busmap);
        bus->busnum = busnum;
-
-       /* Add it to the local list of buses */
-       list_add (&bus->bus_list, &usb_bus_list);
-       mutex_unlock(&usb_bus_list_lock);
+       mutex_unlock(&usb_bus_idr_lock);
 
        usb_notify_add_bus(bus);
 
@@ -1008,7 +1030,7 @@ static int usb_register_bus(struct usb_bus *bus)
        return 0;
 
 error_find_busnum:
-       mutex_unlock(&usb_bus_list_lock);
+       mutex_unlock(&usb_bus_idr_lock);
        return result;
 }
 
@@ -1029,13 +1051,11 @@ static void usb_deregister_bus (struct usb_bus *bus)
         * controller code, as well as having it call this when cleaning
         * itself up
         */
-       mutex_lock(&usb_bus_list_lock);
-       list_del (&bus->bus_list);
-       mutex_unlock(&usb_bus_list_lock);
+       mutex_lock(&usb_bus_idr_lock);
+       idr_remove(&usb_bus_idr, bus->busnum);
+       mutex_unlock(&usb_bus_idr_lock);
 
        usb_notify_remove_bus(bus);
-
-       clear_bit(bus->busnum, busmap);
 }
 
 /**
@@ -1063,12 +1083,12 @@ static int register_root_hub(struct usb_hcd *hcd)
        set_bit (devnum, usb_dev->bus->devmap.devicemap);
        usb_set_device_state(usb_dev, USB_STATE_ADDRESS);
 
-       mutex_lock(&usb_bus_list_lock);
+       mutex_lock(&usb_bus_idr_lock);
 
        usb_dev->ep0.desc.wMaxPacketSize = cpu_to_le16(64);
        retval = usb_get_device_descriptor(usb_dev, USB_DT_DEVICE_SIZE);
        if (retval != sizeof usb_dev->descriptor) {
-               mutex_unlock(&usb_bus_list_lock);
+               mutex_unlock(&usb_bus_idr_lock);
                dev_dbg (parent_dev, "can't read %s device descriptor %d\n",
                                dev_name(&usb_dev->dev), retval);
                return (retval < 0) ? retval : -EMSGSIZE;
@@ -1078,8 +1098,8 @@ static int register_root_hub(struct usb_hcd *hcd)
                retval = usb_get_bos_descriptor(usb_dev);
                if (!retval) {
                        usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
-               } else if (usb_dev->speed == USB_SPEED_SUPER) {
-                       mutex_unlock(&usb_bus_list_lock);
+               } else if (usb_dev->speed >= USB_SPEED_SUPER) {
+                       mutex_unlock(&usb_bus_idr_lock);
                        dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
                                        dev_name(&usb_dev->dev), retval);
                        return retval;
@@ -1099,7 +1119,7 @@ static int register_root_hub(struct usb_hcd *hcd)
                if (HCD_DEAD(hcd))
                        usb_hc_died (hcd);      /* This time clean up */
        }
-       mutex_unlock(&usb_bus_list_lock);
+       mutex_unlock(&usb_bus_idr_lock);
 
        return retval;
 }
@@ -2112,7 +2132,7 @@ int usb_alloc_streams(struct usb_interface *interface,
        hcd = bus_to_hcd(dev->bus);
        if (!hcd->driver->alloc_streams || !hcd->driver->free_streams)
                return -EINVAL;
-       if (dev->speed != USB_SPEED_SUPER)
+       if (dev->speed < USB_SPEED_SUPER)
                return -EINVAL;
        if (dev->state < USB_STATE_CONFIGURED)
                return -ENODEV;
@@ -2160,7 +2180,7 @@ int usb_free_streams(struct usb_interface *interface,
 
        dev = interface_to_usbdev(interface);
        hcd = bus_to_hcd(dev->bus);
-       if (dev->speed != USB_SPEED_SUPER)
+       if (dev->speed < USB_SPEED_SUPER)
                return -EINVAL;
 
        /* Double-free is not allowed */
@@ -2208,7 +2228,7 @@ int usb_hcd_get_frame_number (struct usb_device *udev)
 
 int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
 {
-       struct usb_hcd  *hcd = container_of(rhdev->bus, struct usb_hcd, self);
+       struct usb_hcd  *hcd = bus_to_hcd(rhdev->bus);
        int             status;
        int             old_state = hcd->state;
 
@@ -2257,7 +2277,7 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
 
 int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
 {
-       struct usb_hcd  *hcd = container_of(rhdev->bus, struct usb_hcd, self);
+       struct usb_hcd  *hcd = bus_to_hcd(rhdev->bus);
        int             status;
        int             old_state = hcd->state;
 
@@ -2371,7 +2391,7 @@ int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num)
         * boards with root hubs hooked up to internal devices (instead of
         * just the OTG port) may need more attention to resetting...
         */
-       hcd = container_of (bus, struct usb_hcd, self);
+       hcd = bus_to_hcd(bus);
        if (port_num && hcd->driver->start_port_reset)
                status = hcd->driver->start_port_reset(hcd, port_num);
 
@@ -2778,9 +2798,11 @@ int usb_add_hcd(struct usb_hcd *hcd,
                rhdev->speed = USB_SPEED_WIRELESS;
                break;
        case HCD_USB3:
-       case HCD_USB31:
                rhdev->speed = USB_SPEED_SUPER;
                break;
+       case HCD_USB31:
+               rhdev->speed = USB_SPEED_SUPER_PLUS;
+               break;
        default:
                retval = -EINVAL;
                goto err_set_rh_speed;
@@ -2863,9 +2885,9 @@ error_create_attr_group:
 #ifdef CONFIG_PM
        cancel_work_sync(&hcd->wakeup_work);
 #endif
-       mutex_lock(&usb_bus_list_lock);
+       mutex_lock(&usb_bus_idr_lock);
        usb_disconnect(&rhdev);         /* Sets rhdev to NULL */
-       mutex_unlock(&usb_bus_list_lock);
+       mutex_unlock(&usb_bus_idr_lock);
 err_register_root_hub:
        hcd->rh_pollable = 0;
        clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2932,9 +2954,9 @@ void usb_remove_hcd(struct usb_hcd *hcd)
        cancel_work_sync(&hcd->wakeup_work);
 #endif
 
-       mutex_lock(&usb_bus_list_lock);
+       mutex_lock(&usb_bus_idr_lock);
        usb_disconnect(&rhdev);         /* Sets rhdev to NULL */
-       mutex_unlock(&usb_bus_list_lock);
+       mutex_unlock(&usb_bus_idr_lock);
 
        /*
         * tasklet_kill() isn't needed here because:
index 51b436918f7892b1249c7ec1b19dad515a9a5755..681036d005289e86c044d0f19fe003aa12174a40 100644 (file)
@@ -49,7 +49,7 @@ static void hub_event(struct work_struct *work);
 DEFINE_MUTEX(usb_port_peer_mutex);
 
 /* cycle leds on hubs that aren't blinking for attention */
-static bool blinkenlights = 0;
+static bool blinkenlights;
 module_param(blinkenlights, bool, S_IRUGO);
 MODULE_PARM_DESC(blinkenlights, "true to cycle leds on hubs");
 
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(initial_descriptor_timeout,
  * otherwise the new scheme is used.  If that fails and "use_both_schemes"
  * is set, then the driver will make another attempt, using the other scheme.
  */
-static bool old_scheme_first = 0;
+static bool old_scheme_first;
 module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(old_scheme_first,
                 "start with the old device initialization scheme");
@@ -298,7 +298,7 @@ static void usb_set_lpm_parameters(struct usb_device *udev)
        unsigned int hub_u1_del;
        unsigned int hub_u2_del;
 
-       if (!udev->lpm_capable || udev->speed != USB_SPEED_SUPER)
+       if (!udev->lpm_capable || udev->speed < USB_SPEED_SUPER)
                return;
 
        hub = usb_hub_to_struct_hub(udev->parent);
@@ -537,29 +537,34 @@ static int get_hub_status(struct usb_device *hdev,
 
 /*
  * USB 2.0 spec Section 11.24.2.7
+ * USB 3.1 takes into use the wValue and wLength fields, spec Section 10.16.2.6
  */
 static int get_port_status(struct usb_device *hdev, int port1,
-               struct usb_port_status *data)
+                          void *data, u16 value, u16 length)
 {
        int i, status = -ETIMEDOUT;
 
        for (i = 0; i < USB_STS_RETRIES &&
                        (status == -ETIMEDOUT || status == -EPIPE); i++) {
                status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
-                       USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
-                       data, sizeof(*data), USB_STS_TIMEOUT);
+                       USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, value,
+                       port1, data, length, USB_STS_TIMEOUT);
        }
        return status;
 }
 
-static int hub_port_status(struct usb_hub *hub, int port1,
-               u16 *status, u16 *change)
+static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
+                              u16 *status, u16 *change, u32 *ext_status)
 {
        int ret;
+       int len = 4;
+
+       if (type != HUB_PORT_STATUS)
+               len = 8;
 
        mutex_lock(&hub->status_mutex);
-       ret = get_port_status(hub->hdev, port1, &hub->status->port);
-       if (ret < 4) {
+       ret = get_port_status(hub->hdev, port1, &hub->status->port, type, len);
+       if (ret < len) {
                if (ret != -ENODEV)
                        dev_err(hub->intfdev,
                                "%s failed (err = %d)\n", __func__, ret);
@@ -568,13 +573,22 @@ static int hub_port_status(struct usb_hub *hub, int port1,
        } else {
                *status = le16_to_cpu(hub->status->port.wPortStatus);
                *change = le16_to_cpu(hub->status->port.wPortChange);
-
+               if (type != HUB_PORT_STATUS && ext_status)
+                       *ext_status = le32_to_cpu(
+                               hub->status->port.dwExtPortStatus);
                ret = 0;
        }
        mutex_unlock(&hub->status_mutex);
        return ret;
 }
 
+static int hub_port_status(struct usb_hub *hub, int port1,
+               u16 *status, u16 *change)
+{
+       return hub_ext_port_status(hub, port1, HUB_PORT_STATUS,
+                                  status, change, NULL);
+}
+
 static void kick_hub_wq(struct usb_hub *hub)
 {
        struct usb_interface *intf;
@@ -2131,7 +2145,7 @@ static void hub_disconnect_children(struct usb_device *udev)
  * Something got disconnected. Get rid of it and all of its children.
  *
  * If *pdev is a normal device then the parent hub must already be locked.
- * If *pdev is a root hub then the caller must hold the usb_bus_list_lock,
+ * If *pdev is a root hub then the caller must hold the usb_bus_idr_lock,
  * which protects the set of root hubs as well as the list of buses.
  *
  * Only hub drivers (including virtual root hub drivers for host
@@ -2429,7 +2443,7 @@ static void set_usb_port_removable(struct usb_device *udev)
  * enumerated.  The device descriptor is available, but not descriptors
  * for any device configuration.  The caller must have locked either
  * the parent hub (if udev is a normal device) or else the
- * usb_bus_list_lock (if udev is a root hub).  The parent's pointer to
+ * usb_bus_idr_lock (if udev is a root hub).  The parent's pointer to
  * udev has already been installed, but udev is not yet visible through
  * sysfs or other filesystem code.
  *
@@ -2612,6 +2626,32 @@ out_authorized:
        return result;
 }
 
+/*
+ * Return 1 if port speed is SuperSpeedPlus, 0 otherwise
+ * check it from the link protocol field of the current speed ID attribute.
+ * current speed ID is got from ext port status request. Sublink speed attribute
+ * table is returned with the hub BOS SSP device capability descriptor
+ */
+static int port_speed_is_ssp(struct usb_device *hdev, int speed_id)
+{
+       int ssa_count;
+       u32 ss_attr;
+       int i;
+       struct usb_ssp_cap_descriptor *ssp_cap = hdev->bos->ssp_cap;
+
+       if (!ssp_cap)
+               return 0;
+
+       ssa_count = le32_to_cpu(ssp_cap->bmAttributes) &
+               USB_SSP_SUBLINK_SPEED_ATTRIBS;
+
+       for (i = 0; i <= ssa_count; i++) {
+               ss_attr = le32_to_cpu(ssp_cap->bmSublinkSpeedAttr[i]);
+               if (speed_id == (ss_attr & USB_SSP_SUBLINK_SPEED_SSID))
+                       return !!(ss_attr & USB_SSP_SUBLINK_SPEED_LP);
+       }
+       return 0;
+}
 
 /* Returns 1 if @hub is a WUSB root hub, 0 otherwise */
 static unsigned hub_is_wusb(struct usb_hub *hub)
@@ -2619,7 +2659,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
        struct usb_hcd *hcd;
        if (hub->hdev->parent != NULL)  /* not a root hub? */
                return 0;
-       hcd = container_of(hub->hdev->bus, struct usb_hcd, self);
+       hcd = bus_to_hcd(hub->hdev->bus);
        return hcd->wireless;
 }
 
@@ -2645,7 +2685,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
  */
 static bool use_new_scheme(struct usb_device *udev, int retry)
 {
-       if (udev->speed == USB_SPEED_SUPER)
+       if (udev->speed >= USB_SPEED_SUPER)
                return false;
 
        return USE_NEW_SCHEME(retry);
@@ -2676,6 +2716,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
        int delay_time, ret;
        u16 portstatus;
        u16 portchange;
+       u32 ext_portstatus = 0;
 
        for (delay_time = 0;
                        delay_time < HUB_RESET_TIMEOUT;
@@ -2684,7 +2725,14 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
                msleep(delay);
 
                /* read and decode port status */
-               ret = hub_port_status(hub, port1, &portstatus, &portchange);
+               if (hub_is_superspeedplus(hub->hdev))
+                       ret = hub_ext_port_status(hub, port1,
+                                                 HUB_EXT_PORT_STATUS,
+                                                 &portstatus, &portchange,
+                                                 &ext_portstatus);
+               else
+                       ret = hub_port_status(hub, port1, &portstatus,
+                                             &portchange);
                if (ret < 0)
                        return ret;
 
@@ -2727,6 +2775,10 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
 
        if (hub_is_wusb(hub))
                udev->speed = USB_SPEED_WIRELESS;
+       else if (hub_is_superspeedplus(hub->hdev) &&
+                port_speed_is_ssp(hub->hdev, ext_portstatus &
+                                  USB_EXT_PORT_STAT_RX_SPEED_ID))
+               udev->speed = USB_SPEED_SUPER_PLUS;
        else if (hub_is_superspeed(hub->hdev))
                udev->speed = USB_SPEED_SUPER;
        else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
@@ -3989,7 +4041,7 @@ int usb_disable_lpm(struct usb_device *udev)
        struct usb_hcd *hcd;
 
        if (!udev || !udev->parent ||
-                       udev->speed != USB_SPEED_SUPER ||
+                       udev->speed < USB_SPEED_SUPER ||
                        !udev->lpm_capable ||
                        udev->state < USB_STATE_DEFAULT)
                return 0;
@@ -4048,7 +4100,7 @@ void usb_enable_lpm(struct usb_device *udev)
        struct usb_port *port_dev;
 
        if (!udev || !udev->parent ||
-                       udev->speed != USB_SPEED_SUPER ||
+                       udev->speed < USB_SPEED_SUPER ||
                        !udev->lpm_capable ||
                        udev->state < USB_STATE_DEFAULT)
                return;
@@ -4323,7 +4375,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
 
        retval = -ENODEV;
 
-       if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
+       /* Don't allow speed changes at reset, except usb 3.0 to faster */
+       if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed &&
+           !(oldspeed == USB_SPEED_SUPER && udev->speed > oldspeed)) {
                dev_dbg(&udev->dev, "device reset changed speed!\n");
                goto fail;
        }
@@ -4335,6 +4389,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
         * reported as 0xff in the device descriptor). WUSB1.0[4.8.1].
         */
        switch (udev->speed) {
+       case USB_SPEED_SUPER_PLUS:
        case USB_SPEED_SUPER:
        case USB_SPEED_WIRELESS:        /* fixed at 512 */
                udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
@@ -4361,7 +4416,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
        else
                speed = usb_speed_string(udev->speed);
 
-       if (udev->speed != USB_SPEED_SUPER)
+       if (udev->speed < USB_SPEED_SUPER)
                dev_info(&udev->dev,
                                "%s %s USB device number %d using %s\n",
                                (udev->config) ? "reset" : "new", speed,
@@ -4485,11 +4540,12 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
                                                        devnum, retval);
                                goto fail;
                        }
-                       if (udev->speed == USB_SPEED_SUPER) {
+                       if (udev->speed >= USB_SPEED_SUPER) {
                                devnum = udev->devnum;
                                dev_info(&udev->dev,
-                                               "%s SuperSpeed USB device number %d using %s\n",
+                                               "%s SuperSpeed%s USB device number %d using %s\n",
                                                (udev->config) ? "reset" : "new",
+                                        (udev->speed == USB_SPEED_SUPER_PLUS) ? "Plus" : "",
                                                devnum, udev->bus->controller->driver->name);
                        }
 
@@ -4528,7 +4584,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
         * got from those devices show they aren't superspeed devices. Warm
         * reset the port attached by the devices can fix them.
         */
-       if ((udev->speed == USB_SPEED_SUPER) &&
+       if ((udev->speed >= USB_SPEED_SUPER) &&
                        (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
                dev_err(&udev->dev, "got a wrong device descriptor, "
                                "warm reset device\n");
@@ -4539,7 +4595,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
        }
 
        if (udev->descriptor.bMaxPacketSize0 == 0xff ||
-                       udev->speed == USB_SPEED_SUPER)
+                       udev->speed >= USB_SPEED_SUPER)
                i = 512;
        else
                i = udev->descriptor.bMaxPacketSize0;
@@ -4749,7 +4805,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
                udev->level = hdev->level + 1;
                udev->wusb = hub_is_wusb(hub);
 
-               /* Only USB 3.0 devices are connected to SuperSpeed hubs. */
+               /* Devices connected to SuperSpeed hubs are USB 3.0 or later */
                if (hub_is_superspeed(hub->hdev))
                        udev->speed = USB_SPEED_SUPER;
                else
@@ -5401,7 +5457,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
        }
 
        bos = udev->bos;
-       udev->bos = NULL;
 
        for (i = 0; i < SET_CONFIG_TRIES; ++i) {
 
@@ -5494,8 +5549,11 @@ done:
        usb_set_usb2_hardware_lpm(udev, 1);
        usb_unlocked_enable_lpm(udev);
        usb_enable_ltm(udev);
-       usb_release_bos_descriptor(udev);
-       udev->bos = bos;
+       /* release the new BOS descriptor allocated  by hub_port_init() */
+       if (udev->bos != bos) {
+               usb_release_bos_descriptor(udev);
+               udev->bos = bos;
+       }
        return 0;
 
 re_enumerate:
index 45d070dd1d0315cfca7b049e3e1d04e6e701645c..34c1a7e22aae020a2f48bce219a342a89d835650 100644 (file)
@@ -140,6 +140,13 @@ static inline int hub_is_superspeed(struct usb_device *hdev)
        return hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS;
 }
 
+static inline int hub_is_superspeedplus(struct usb_device *hdev)
+{
+       return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS &&
+               le16_to_cpu(hdev->descriptor.bcdUSB) >= 0x0310 &&
+               hdev->bos->ssp_cap);
+}
+
 static inline unsigned hub_power_on_good_delay(struct usb_hub *hub)
 {
        unsigned delay = hub->descriptor->bPwrOn2PwrGood * 2;
index 65b6e6b840431a7b91184fccd110390678e3bb8e..c953a0f1c69513a066ceab7bb7372f946b19e8ed 100644 (file)
@@ -23,10 +23,12 @@ static ssize_t field##_show(struct device *dev,                             \
 {                                                                      \
        struct usb_device *udev;                                        \
        struct usb_host_config *actconfig;                              \
-       ssize_t rc = 0;                                                 \
+       ssize_t rc;                                                     \
                                                                        \
        udev = to_usb_device(dev);                                      \
-       usb_lock_device(udev);                                          \
+       rc = usb_lock_device_interruptible(udev);                       \
+       if (rc < 0)                                                     \
+               return -EINTR;                                          \
        actconfig = udev->actconfig;                                    \
        if (actconfig)                                                  \
                rc = sprintf(buf, format_string,                        \
@@ -47,10 +49,12 @@ static ssize_t bMaxPower_show(struct device *dev,
 {
        struct usb_device *udev;
        struct usb_host_config *actconfig;
-       ssize_t rc = 0;
+       ssize_t rc;
 
        udev = to_usb_device(dev);
-       usb_lock_device(udev);
+       rc = usb_lock_device_interruptible(udev);
+       if (rc < 0)
+               return -EINTR;
        actconfig = udev->actconfig;
        if (actconfig)
                rc = sprintf(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
@@ -64,10 +68,12 @@ static ssize_t configuration_show(struct device *dev,
 {
        struct usb_device *udev;
        struct usb_host_config *actconfig;
-       ssize_t rc = 0;
+       ssize_t rc;
 
        udev = to_usb_device(dev);
-       usb_lock_device(udev);
+       rc = usb_lock_device_interruptible(udev);
+       if (rc < 0)
+               return -EINTR;
        actconfig = udev->actconfig;
        if (actconfig && actconfig->string)
                rc = sprintf(buf, "%s\n", actconfig->string);
@@ -84,11 +90,13 @@ static ssize_t bConfigurationValue_store(struct device *dev,
                                         const char *buf, size_t count)
 {
        struct usb_device       *udev = to_usb_device(dev);
-       int                     config, value;
+       int                     config, value, rc;
 
        if (sscanf(buf, "%d", &config) != 1 || config < -1 || config > 255)
                return -EINVAL;
-       usb_lock_device(udev);
+       rc = usb_lock_device_interruptible(udev);
+       if (rc < 0)
+               return -EINTR;
        value = usb_set_configuration(udev, config);
        usb_unlock_device(udev);
        return (value < 0) ? value : count;
@@ -105,7 +113,9 @@ static ssize_t  name##_show(struct device *dev,                             \
        int retval;                                                     \
                                                                        \
        udev = to_usb_device(dev);                                      \
-       usb_lock_device(udev);                                          \
+       retval = usb_lock_device_interruptible(udev);                   \
+       if (retval < 0)                                                 \
+               return -EINTR;                                          \
        retval = sprintf(buf, "%s\n", udev->name);                      \
        usb_unlock_device(udev);                                        \
        return retval;                                                  \
@@ -141,6 +151,9 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
        case USB_SPEED_SUPER:
                speed = "5000";
                break;
+       case USB_SPEED_SUPER_PLUS:
+               speed = "10000";
+               break;
        default:
                speed = "unknown";
        }
@@ -224,11 +237,13 @@ static ssize_t avoid_reset_quirk_store(struct device *dev,
                                      const char *buf, size_t count)
 {
        struct usb_device       *udev = to_usb_device(dev);
-       int                     val;
+       int                     val, rc;
 
        if (sscanf(buf, "%d", &val) != 1 || val < 0 || val > 1)
                return -EINVAL;
-       usb_lock_device(udev);
+       rc = usb_lock_device_interruptible(udev);
+       if (rc < 0)
+               return -EINTR;
        if (val)
                udev->quirks |= USB_QUIRK_RESET;
        else
@@ -294,7 +309,7 @@ static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
                             const char *buf, size_t count)
 {
        struct usb_device *udev = to_usb_device(dev);
-       int value;
+       int value, rc;
 
        /* Hubs are always enabled for USB_PERSIST */
        if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
@@ -303,7 +318,9 @@ static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
        if (sscanf(buf, "%d", &value) != 1)
                return -EINVAL;
 
-       usb_lock_device(udev);
+       rc = usb_lock_device_interruptible(udev);
+       if (rc < 0)
+               return -EINTR;
        udev->persist_enabled = !!value;
        usb_unlock_device(udev);
        return count;
@@ -420,13 +437,16 @@ static ssize_t level_store(struct device *dev, struct device_attribute *attr,
        int len = count;
        char *cp;
        int rc = count;
+       int rv;
 
        warn_level();
        cp = memchr(buf, '\n', count);
        if (cp)
                len = cp - buf;
 
-       usb_lock_device(udev);
+       rv = usb_lock_device_interruptible(udev);
+       if (rv < 0)
+               return -EINTR;
 
        if (len == sizeof on_string - 1 &&
                        strncmp(buf, on_string, len) == 0)
@@ -466,7 +486,9 @@ static ssize_t usb2_hardware_lpm_store(struct device *dev,
        bool value;
        int ret;
 
-       usb_lock_device(udev);
+       ret = usb_lock_device_interruptible(udev);
+       if (ret < 0)
+               return -EINTR;
 
        ret = strtobool(buf, &value);
 
@@ -536,8 +558,11 @@ static ssize_t usb3_hardware_lpm_u1_show(struct device *dev,
 {
        struct usb_device *udev = to_usb_device(dev);
        const char *p;
+       int rc;
 
-       usb_lock_device(udev);
+       rc = usb_lock_device_interruptible(udev);
+       if (rc < 0)
+               return -EINTR;
 
        if (udev->usb3_lpm_u1_enabled)
                p = "enabled";
@@ -555,8 +580,11 @@ static ssize_t usb3_hardware_lpm_u2_show(struct device *dev,
 {
        struct usb_device *udev = to_usb_device(dev);
        const char *p;
+       int rc;
 
-       usb_lock_device(udev);
+       rc = usb_lock_device_interruptible(udev);
+       if (rc < 0)
+               return -EINTR;
 
        if (udev->usb3_lpm_u2_enabled)
                p = "enabled";
@@ -822,7 +850,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
         * Following that are the raw descriptor entries for all the
         * configurations (config plus subsidiary descriptors).
         */
-       usb_lock_device(udev);
        for (cfgno = -1; cfgno < udev->descriptor.bNumConfigurations &&
                        nleft > 0; ++cfgno) {
                if (cfgno < 0) {
@@ -843,7 +870,6 @@ read_descriptors(struct file *filp, struct kobject *kobj,
                        off -= srclen;
                }
        }
-       usb_unlock_device(udev);
        return count - nleft;
 }
 
@@ -969,7 +995,9 @@ static ssize_t supports_autosuspend_show(struct device *dev,
 {
        int s;
 
-       device_lock(dev);
+       s = device_lock_interruptible(dev);
+       if (s < 0)
+               return -EINTR;
        /* Devices will be autosuspended even when an interface isn't claimed */
        s = (!dev->driver || to_usb_driver(dev->driver)->supports_autosuspend);
        device_unlock(dev);
index 3d274778caaf8f9f473aacc89b1be6c20de19ab2..2509fc1237cf1db67c9c27ff8e3f24440f51469a 100644 (file)
@@ -401,7 +401,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
                /* SuperSpeed isoc endpoints have up to 16 bursts of up to
                 * 3 packets each
                 */
-               if (dev->speed == USB_SPEED_SUPER) {
+               if (dev->speed >= USB_SPEED_SUPER) {
                        int     burst = 1 + ep->ss_ep_comp.bMaxBurst;
                        int     mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
                        max *= burst;
@@ -444,8 +444,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
         */
 
        /* Check that the pipe's type matches the endpoint's type */
-       if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
-               dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
+       dev_WARN(&dev->dev, usb_pipetype(urb->pipe) != pipetypes[xfertype],
+                       "BOGUS urb xfer, pipe %x != type %x\n",
                        usb_pipetype(urb->pipe), pipetypes[xfertype]);
 
        /* Check against a simple/standard policy */
@@ -471,8 +471,8 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
        allowed &= urb->transfer_flags;
 
        /* warn if submitter gave bogus flags */
-       if (allowed != urb->transfer_flags)
-               dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
+       dev_WARN(&dev->dev, allowed != urb->transfer_flags,
+                       "BOGUS urb flags, %x --> %x\n",
                        urb->transfer_flags, allowed);
 
        /*
@@ -499,6 +499,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
                }
                /* too big? */
                switch (dev->speed) {
+               case USB_SPEED_SUPER_PLUS:
                case USB_SPEED_SUPER:   /* units are 125us */
                        /* Handle up to 2^(16-1) microframes */
                        if (urb->interval > (1 << 15))
index ebb29caa3fe40d716833715e48ac102173753028..524c9822d2bb10cb68c91cb412dd6986593d24a4 100644 (file)
@@ -241,7 +241,7 @@ static int __each_dev(struct device *dev, void *data)
        if (!is_usb_device(dev))
                return 0;
 
-       return arg->fn(container_of(dev, struct usb_device, dev), arg->data);
+       return arg->fn(to_usb_device(dev), arg->data);
 }
 
 /**
@@ -397,7 +397,7 @@ struct device_type usb_device_type = {
 /* Returns 1 if @usb_bus is WUSB, 0 otherwise */
 static unsigned usb_bus_is_wusb(struct usb_bus *bus)
 {
-       struct usb_hcd *hcd = container_of(bus, struct usb_hcd, self);
+       struct usb_hcd *hcd = bus_to_hcd(bus);
        return hcd->wireless;
 }
 
@@ -1115,6 +1115,7 @@ static void __exit usb_exit(void)
        bus_unregister(&usb_bus_type);
        usb_acpi_unregister();
        usb_debugfs_cleanup();
+       idr_destroy(&usb_bus_idr);
 }
 
 subsys_initcall(usb_init);
index 05b5e17abf92fc158530d13f3e2ae8edf9923c5a..53318126ed91b24603934030702a0dae178cc5d1 100644 (file)
@@ -45,7 +45,7 @@ static inline unsigned usb_get_max_power(struct usb_device *udev,
                struct usb_host_config *c)
 {
        /* SuperSpeed power is in 8 mA units; others are in 2 mA units */
-       unsigned mul = (udev->speed == USB_SPEED_SUPER ? 8 : 2);
+       unsigned mul = (udev->speed >= USB_SPEED_SUPER ? 8 : 2);
 
        return c->desc.bMaxPower * mul;
 }
index 39a0fa8a4c0aea17b9ecf330c127b110b4b54ba4..e991d55914db649804a6357821f42e78355e6e5b 100644 (file)
@@ -572,12 +572,6 @@ static bool dwc2_force_mode(struct dwc2_hsotg *hsotg, bool host)
        set = host ? GUSBCFG_FORCEHOSTMODE : GUSBCFG_FORCEDEVMODE;
        clear = host ? GUSBCFG_FORCEDEVMODE : GUSBCFG_FORCEHOSTMODE;
 
-       /*
-        * If the force mode bit is already set, don't set it.
-        */
-       if ((gusbcfg & set) && !(gusbcfg & clear))
-               return false;
-
        gusbcfg &= ~clear;
        gusbcfg |= set;
        dwc2_writel(gusbcfg, hsotg->regs + GUSBCFG);
@@ -3278,9 +3272,6 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg)
 /**
  * During device initialization, read various hardware configuration
  * registers and interpret the contents.
- *
- * This should be called during driver probe. It will perform a core
- * soft reset in order to get the reset values of the parameters.
  */
 int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
 {
@@ -3288,7 +3279,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
        unsigned width;
        u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
        u32 grxfsiz;
-       int retval;
 
        /*
         * Attempt to ensure this device is really a DWC_otg Controller.
@@ -3308,10 +3298,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
                hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
                hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
 
-       retval = dwc2_core_reset(hsotg);
-       if (retval)
-               return retval;
-
        hwcfg1 = dwc2_readl(hsotg->regs + GHWCFG1);
        hwcfg2 = dwc2_readl(hsotg->regs + GHWCFG2);
        hwcfg3 = dwc2_readl(hsotg->regs + GHWCFG3);
index 510f787434b3d574e40cfe4a5a41511c92f1ad50..690b9fd98b55165a8b1d6a7b8bce30f730ac6805 100644 (file)
@@ -530,7 +530,13 @@ static int dwc2_driver_probe(struct platform_device *dev)
        if (retval)
                return retval;
 
-       /* Reset the controller and detect hardware config values */
+       /*
+        * Reset before dwc2_get_hwparams() then it could get power-on real
+        * reset value form registers.
+        */
+       dwc2_core_reset_and_force_dr_mode(hsotg);
+
+       /* Detect config values from hardware */
        retval = dwc2_get_hwparams(hsotg);
        if (retval)
                goto error;
index 22e9606d8e081c3ece06c3cf52f0e735ab44207c..cf56941374809bae89cde4fbb9bb34b0ff24e20b 100644 (file)
@@ -266,7 +266,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
                break;
 
        default:
-               dev_WARN(omap->dev, "invalid state\n");
+               dev_WARN(omap->dev, true, "invalid state\n");
        }
 }
 
@@ -398,7 +398,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
                reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
                break;
        default:
-               dev_WARN(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
+               dev_WARN(omap->dev, true, "UNKNOWN utmi mode %d\n", utmi_mode);
        }
 
        dwc3_omap_write_utmi_ctrl(omap, reg);
index 3a9354abcb68bfc6e9d27cc64c7155d54dec04e0..252b1e9e7f06c20dcaa5dcf0322e159c3c88d318 100644 (file)
@@ -140,10 +140,9 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
 
                direction = !!(dep->flags & DWC3_EP0_DIR_IN);
 
-               if (dwc->ep0state != EP0_DATA_PHASE) {
-                       dev_WARN(dwc->dev, "Unexpected pending request\n");
+               if (dev_WARN(dwc->dev, dwc->ep0state != EP0_DATA_PHASE,
+                                               "Unexpected pending request\n"))
                        return 0;
-               }
 
                __dwc3_ep0_do_control_data(dwc, dwc->eps[direction], req);
 
index af023a81a0b02160f85e4800594b49aa318190c2..7ee58f59bc53dd6927f84b77136bf1c193687253 100644 (file)
@@ -2608,7 +2608,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc,
                dwc3_trace(trace_dwc3_gadget, "Overflow");
                break;
        default:
-               dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
+               dev_WARN(dwc->dev, true, "UNKNOWN IRQ %d\n", event->type);
        }
 }
 
@@ -2789,6 +2789,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
        dwc->gadget.speed               = USB_SPEED_UNKNOWN;
        dwc->gadget.sg_supported        = true;
        dwc->gadget.name                = "dwc3-gadget";
+       dwc->gadget.is_otg              = dwc->dr_mode == USB_DR_MODE_OTG;
 
        /*
         * FIXME We might be setting max_speed to <SUPER, however versions
index 70d3917cc00364e71ca0900557c09fd02ce56bc3..943c21aafd3b573affe0e61d608b20ec74dc79ee 100644 (file)
@@ -914,7 +914,7 @@ struct rndis_params *rndis_register(void (*resp_avail)(void *v), void *v)
        params->media_state = RNDIS_MEDIA_STATE_DISCONNECTED;
        params->resp_avail = resp_avail;
        params->v = v;
-       INIT_LIST_HEAD(&(params->resp_queue));
+       INIT_LIST_HEAD(&params->resp_queue);
        pr_debug("%s: configNr = %d\n", __func__, i);
 
        return params;
@@ -1006,13 +1006,10 @@ EXPORT_SYMBOL_GPL(rndis_add_hdr);
 
 void rndis_free_response(struct rndis_params *params, u8 *buf)
 {
-       rndis_resp_t *r;
-       struct list_head *act, *tmp;
+       rndis_resp_t *r, *n;
 
-       list_for_each_safe(act, tmp, &(params->resp_queue))
-       {
-               r = list_entry(act, rndis_resp_t, list);
-               if (r && r->buf == buf) {
+       list_for_each_entry_safe(r, n, &params->resp_queue, list) {
+               if (r->buf == buf) {
                        list_del(&r->list);
                        kfree(r);
                }
@@ -1022,14 +1019,11 @@ EXPORT_SYMBOL_GPL(rndis_free_response);
 
 u8 *rndis_get_next_response(struct rndis_params *params, u32 *length)
 {
-       rndis_resp_t *r;
-       struct list_head *act, *tmp;
+       rndis_resp_t *r, *n;
 
        if (!length) return NULL;
 
-       list_for_each_safe(act, tmp, &(params->resp_queue))
-       {
-               r = list_entry(act, rndis_resp_t, list);
+       list_for_each_entry_safe(r, n, &params->resp_queue, list) {
                if (!r->send) {
                        r->send = 1;
                        *length = r->length;
@@ -1053,7 +1047,7 @@ static rndis_resp_t *rndis_add_response(struct rndis_params *params, u32 length)
        r->length = length;
        r->send = 0;
 
-       list_add_tail(&r->list, &(params->resp_queue));
+       list_add_tail(&r->list, &params->resp_queue);
        return r;
 }
 
index a23d1b90454c4f106436510c05b67ef10438e069..0b36878eb5fdd30231284b8ba99a7f508f6af12c 100644 (file)
@@ -103,8 +103,7 @@ config USB_ETH
           - CDC Ethernet Emulation Model (EEM) is a newer standard that has
             a simpler interface that can be used by more USB hardware.
 
-         RNDIS support is an additional option, more demanding than than
-         subset.
+         RNDIS support is an additional option, more demanding than subset.
 
          Within the USB device, this gadget driver exposes a network device
          "usbX", where X depends on what other networking devices you have.
index 291aaa2baed831701ae190fdde96944cc4963f44..963e2d0e8f92d440396ff17bc5b718f81298b682 100644 (file)
@@ -35,6 +35,7 @@ MODULE_DESCRIPTION("Common USB driver for BCMA Bus");
 MODULE_LICENSE("GPL");
 
 struct bcma_hcd_device {
+       struct bcma_device *core;
        struct platform_device *ehci_dev;
        struct platform_device *ohci_dev;
        struct gpio_desc *gpio_desc;
@@ -244,7 +245,10 @@ static const struct usb_ehci_pdata ehci_pdata = {
 static const struct usb_ohci_pdata ohci_pdata = {
 };
 
-static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, bool ohci, u32 addr)
+static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev,
+                                                   const char *name, u32 addr,
+                                                   const void *data,
+                                                   size_t size)
 {
        struct platform_device *hci_dev;
        struct resource hci_res[2];
@@ -259,8 +263,7 @@ static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, boo
        hci_res[1].start = dev->irq;
        hci_res[1].flags = IORESOURCE_IRQ;
 
-       hci_dev = platform_device_alloc(ohci ? "ohci-platform" :
-                                       "ehci-platform" , 0);
+       hci_dev = platform_device_alloc(name, 0);
        if (!hci_dev)
                return ERR_PTR(-ENOMEM);
 
@@ -271,12 +274,8 @@ static struct platform_device *bcma_hcd_create_pdev(struct bcma_device *dev, boo
                                            ARRAY_SIZE(hci_res));
        if (ret)
                goto err_alloc;
-       if (ohci)
-               ret = platform_device_add_data(hci_dev, &ohci_pdata,
-                                              sizeof(ohci_pdata));
-       else
-               ret = platform_device_add_data(hci_dev, &ehci_pdata,
-                                              sizeof(ehci_pdata));
+       if (data)
+               ret = platform_device_add_data(hci_dev, data, size);
        if (ret)
                goto err_alloc;
        ret = platform_device_add(hci_dev);
@@ -290,31 +289,16 @@ err_alloc:
        return ERR_PTR(ret);
 }
 
-static int bcma_hcd_probe(struct bcma_device *dev)
+static int bcma_hcd_usb20_init(struct bcma_hcd_device *usb_dev)
 {
-       int err;
+       struct bcma_device *dev = usb_dev->core;
+       struct bcma_chipinfo *chipinfo = &dev->bus->chipinfo;
        u32 ohci_addr;
-       struct bcma_hcd_device *usb_dev;
-       struct bcma_chipinfo *chipinfo;
-
-       chipinfo = &dev->bus->chipinfo;
-
-       /* TODO: Probably need checks here; is the core connected? */
+       int err;
 
        if (dma_set_mask_and_coherent(dev->dma_dev, DMA_BIT_MASK(32)))
                return -EOPNOTSUPP;
 
-       usb_dev = devm_kzalloc(&dev->dev, sizeof(struct bcma_hcd_device),
-                              GFP_KERNEL);
-       if (!usb_dev)
-               return -ENOMEM;
-
-       if (dev->dev.of_node)
-               usb_dev->gpio_desc = devm_get_gpiod_from_child(&dev->dev, "vcc",
-                                                              &dev->dev.of_node->fwnode);
-       if (!IS_ERR_OR_NULL(usb_dev->gpio_desc))
-               gpiod_direction_output(usb_dev->gpio_desc, 1);
-
        switch (dev->id.id) {
        case BCMA_CORE_NS_USB20:
                bcma_hcd_init_chip_arm(dev);
@@ -333,17 +317,20 @@ static int bcma_hcd_probe(struct bcma_device *dev)
            && chipinfo->rev == 0)
                ohci_addr = 0x18009000;
 
-       usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, true, ohci_addr);
+       usb_dev->ohci_dev = bcma_hcd_create_pdev(dev, "ohci-platform",
+                                                ohci_addr, &ohci_pdata,
+                                                sizeof(ohci_pdata));
        if (IS_ERR(usb_dev->ohci_dev))
                return PTR_ERR(usb_dev->ohci_dev);
 
-       usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, false, dev->addr);
+       usb_dev->ehci_dev = bcma_hcd_create_pdev(dev, "ehci-platform",
+                                                dev->addr, &ehci_pdata,
+                                                sizeof(ehci_pdata));
        if (IS_ERR(usb_dev->ehci_dev)) {
                err = PTR_ERR(usb_dev->ehci_dev);
                goto err_unregister_ohci_dev;
        }
 
-       bcma_set_drvdata(dev, usb_dev);
        return 0;
 
 err_unregister_ohci_dev:
@@ -351,6 +338,40 @@ err_unregister_ohci_dev:
        return err;
 }
 
+static int bcma_hcd_probe(struct bcma_device *core)
+{
+       int err;
+       struct bcma_hcd_device *usb_dev;
+
+       /* TODO: Probably need checks here; is the core connected? */
+
+       usb_dev = devm_kzalloc(&core->dev, sizeof(struct bcma_hcd_device),
+                              GFP_KERNEL);
+       if (!usb_dev)
+               return -ENOMEM;
+       usb_dev->core = core;
+
+       if (core->dev.of_node)
+               usb_dev->gpio_desc = devm_get_gpiod_from_child(&core->dev, "vcc",
+                                                              &core->dev.of_node->fwnode);
+       if (!IS_ERR_OR_NULL(usb_dev->gpio_desc))
+               gpiod_direction_output(usb_dev->gpio_desc, 1);
+
+       switch (core->id.id) {
+       case BCMA_CORE_USB20_HOST:
+       case BCMA_CORE_NS_USB20:
+               err = bcma_hcd_usb20_init(usb_dev);
+               if (err)
+                       return err;
+               break;
+       default:
+               return -ENODEV;
+       }
+
+       bcma_set_drvdata(core, usb_dev);
+       return 0;
+}
+
 static void bcma_hcd_remove(struct bcma_device *dev)
 {
        struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev);
index b7d623f1523c5cdb21728e465aaba523f52ea0e1..79d12b2ba3c48693e59f540004cf32260031ab18 100644 (file)
  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  * for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software Foundation,
- * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 /* this file is part of ehci-hcd.c */
 
 #ifdef CONFIG_DYNAMIC_DEBUG
 
-/* check the values in the HCSPARAMS register
+/*
+ * check the values in the HCSPARAMS register
  * (host controller _Structural_ parameters)
  * see EHCI spec, Table 2-4 for each value
  */
-static void dbg_hcs_params (struct ehci_hcd *ehci, char *label)
+static void dbg_hcs_params(struct ehci_hcd *ehci, char *label)
 {
        u32     params = ehci_readl(ehci, &ehci->caps->hcs_params);
 
-       ehci_dbg (ehci,
+       ehci_dbg(ehci,
                "%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n",
                label, params,
-               HCS_DEBUG_PORT (params),
-               HCS_INDICATOR (params) ? " ind" : "",
-               HCS_N_CC (params),
-               HCS_N_PCC (params),
-               HCS_PORTROUTED (params) ? "" : " ordered",
-               HCS_PPC (params) ? "" : " !ppc",
-               HCS_N_PORTS (params)
-               );
+               HCS_DEBUG_PORT(params),
+               HCS_INDICATOR(params) ? " ind" : "",
+               HCS_N_CC(params),
+               HCS_N_PCC(params),
+               HCS_PORTROUTED(params) ? "" : " ordered",
+               HCS_PPC(params) ? "" : " !ppc",
+               HCS_N_PORTS(params));
        /* Port routing, per EHCI 0.95 Spec, Section 2.2.5 */
-       if (HCS_PORTROUTED (params)) {
+       if (HCS_PORTROUTED(params)) {
                int i;
-               char buf [46], tmp [7], byte;
+               char buf[46], tmp[7], byte;
 
                buf[0] = 0;
-               for (i = 0; i < HCS_N_PORTS (params); i++) {
-                       // FIXME MIPS won't readb() ...
-                       byte = readb (&ehci->caps->portroute[(i>>1)]);
+               for (i = 0; i < HCS_N_PORTS(params); i++) {
+                       /* FIXME MIPS won't readb() ... */
+                       byte = readb(&ehci->caps->portroute[(i >> 1)]);
                        sprintf(tmp, "%d ",
-                               ((i & 0x1) ? ((byte)&0xf) : ((byte>>4)&0xf)));
+                               (i & 0x1) ? byte & 0xf : (byte >> 4) & 0xf);
                        strcat(buf, tmp);
                }
-               ehci_dbg (ehci, "%s portroute %s\n",
-                               label, buf);
+               ehci_dbg(ehci, "%s portroute %s\n", label, buf);
        }
 }
 #else
 
-static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {}
+static inline void dbg_hcs_params(struct ehci_hcd *ehci, char *label) {}
 
 #endif
 
 #ifdef CONFIG_DYNAMIC_DEBUG
 
-/* check the values in the HCCPARAMS register
+/*
+ * check the values in the HCCPARAMS register
  * (host controller _Capability_ parameters)
  * see EHCI Spec, Table 2-5 for each value
- * */
-static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
+ */
+static void dbg_hcc_params(struct ehci_hcd *ehci, char *label)
 {
        u32     params = ehci_readl(ehci, &ehci->caps->hcc_params);
 
-       if (HCC_ISOC_CACHE (params)) {
-               ehci_dbg (ehci,
+       if (HCC_ISOC_CACHE(params)) {
+               ehci_dbg(ehci,
                        "%s hcc_params %04x caching frame %s%s%s\n",
                        label, params,
                        HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
                        HCC_CANPARK(params) ? " park" : "",
                        HCC_64BIT_ADDR(params) ? " 64 bit addr" : "");
        } else {
-               ehci_dbg (ehci,
+               ehci_dbg(ehci,
                        "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n",
                        label,
                        params,
@@ -97,21 +94,21 @@ static void dbg_hcc_params (struct ehci_hcd *ehci, char *label)
 }
 #else
 
-static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
+static inline void dbg_hcc_params(struct ehci_hcd *ehci, char *label) {}
 
 #endif
 
 #ifdef CONFIG_DYNAMIC_DEBUG
 
 static void __maybe_unused
-dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
+dbg_qtd(const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
 {
        ehci_dbg(ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
                hc32_to_cpup(ehci, &qtd->hw_next),
                hc32_to_cpup(ehci, &qtd->hw_alt_next),
                hc32_to_cpup(ehci, &qtd->hw_token),
-               hc32_to_cpup(ehci, &qtd->hw_buf [0]));
-       if (qtd->hw_buf [1])
+               hc32_to_cpup(ehci, &qtd->hw_buf[0]));
+       if (qtd->hw_buf[1])
                ehci_dbg(ehci, "  p1=%08x p2=%08x p3=%08x p4=%08x\n",
                        hc32_to_cpup(ehci, &qtd->hw_buf[1]),
                        hc32_to_cpup(ehci, &qtd->hw_buf[2]),
@@ -120,22 +117,22 @@ dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
 }
 
 static void __maybe_unused
-dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
+dbg_qh(const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
 {
        struct ehci_qh_hw *hw = qh->hw;
 
-       ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
+       ehci_dbg(ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
                qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
        dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next);
 }
 
 static void __maybe_unused
-dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
+dbg_itd(const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
 {
-       ehci_dbg (ehci, "%s [%d] itd %p, next %08x, urb %p\n",
+       ehci_dbg(ehci, "%s [%d] itd %p, next %08x, urb %p\n",
                label, itd->frame, itd, hc32_to_cpu(ehci, itd->hw_next),
                itd->urb);
-       ehci_dbg (ehci,
+       ehci_dbg(ehci,
                "  trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
                hc32_to_cpu(ehci, itd->hw_transaction[0]),
                hc32_to_cpu(ehci, itd->hw_transaction[1]),
@@ -145,7 +142,7 @@ dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
                hc32_to_cpu(ehci, itd->hw_transaction[5]),
                hc32_to_cpu(ehci, itd->hw_transaction[6]),
                hc32_to_cpu(ehci, itd->hw_transaction[7]));
-       ehci_dbg (ehci,
+       ehci_dbg(ehci,
                "  buf:   %08x %08x %08x %08x %08x %08x %08x\n",
                hc32_to_cpu(ehci, itd->hw_bufp[0]),
                hc32_to_cpu(ehci, itd->hw_bufp[1]),
@@ -154,19 +151,19 @@ dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd)
                hc32_to_cpu(ehci, itd->hw_bufp[4]),
                hc32_to_cpu(ehci, itd->hw_bufp[5]),
                hc32_to_cpu(ehci, itd->hw_bufp[6]));
-       ehci_dbg (ehci, "  index: %d %d %d %d %d %d %d %d\n",
+       ehci_dbg(ehci, "  index: %d %d %d %d %d %d %d %d\n",
                itd->index[0], itd->index[1], itd->index[2],
                itd->index[3], itd->index[4], itd->index[5],
                itd->index[6], itd->index[7]);
 }
 
 static void __maybe_unused
-dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
+dbg_sitd(const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
 {
-       ehci_dbg (ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
+       ehci_dbg(ehci, "%s [%d] sitd %p, next %08x, urb %p\n",
                label, sitd->frame, sitd, hc32_to_cpu(ehci, sitd->hw_next),
                sitd->urb);
-       ehci_dbg (ehci,
+       ehci_dbg(ehci,
                "  addr %08x sched %04x result %08x buf %08x %08x\n",
                hc32_to_cpu(ehci, sitd->hw_fullspeed_ep),
                hc32_to_cpu(ehci, sitd->hw_uframe),
@@ -176,11 +173,11 @@ dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd)
 }
 
 static int __maybe_unused
-dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
 {
-       return scnprintf (buf, len,
+       return scnprintf(buf, len,
                "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s",
-               label, label [0] ? " " : "", status,
+               label, label[0] ? " " : "", status,
                (status & STS_PPCE_MASK) ? " PPCE" : "",
                (status & STS_ASS) ? " Async" : "",
                (status & STS_PSS) ? " Periodic" : "",
@@ -191,79 +188,83 @@ dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
                (status & STS_FLR) ? " FLR" : "",
                (status & STS_PCD) ? " PCD" : "",
                (status & STS_ERR) ? " ERR" : "",
-               (status & STS_INT) ? " INT" : ""
-               );
+               (status & STS_INT) ? " INT" : "");
 }
 
 static int __maybe_unused
-dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
 {
-       return scnprintf (buf, len,
+       return scnprintf(buf, len,
                "%s%sintrenable %02x%s%s%s%s%s%s%s",
-               label, label [0] ? " " : "", enable,
+               label, label[0] ? " " : "", enable,
                (enable & STS_PPCE_MASK) ? " PPCE" : "",
                (enable & STS_IAA) ? " IAA" : "",
                (enable & STS_FATAL) ? " FATAL" : "",
                (enable & STS_FLR) ? " FLR" : "",
                (enable & STS_PCD) ? " PCD" : "",
                (enable & STS_ERR) ? " ERR" : "",
-               (enable & STS_INT) ? " INT" : ""
-               );
+               (enable & STS_INT) ? " INT" : "");
 }
 
-static const char *const fls_strings [] =
-    { "1024", "512", "256", "??" };
+static const char *const fls_strings[] = { "1024", "512", "256", "??" };
 
 static int
-dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
 {
-       return scnprintf (buf, len,
+       return scnprintf(buf, len,
                "%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s "
                "period=%s%s %s",
-               label, label [0] ? " " : "", command,
+               label, label[0] ? " " : "", command,
                (command & CMD_HIRD) ? " HIRD" : "",
                (command & CMD_PPCEE) ? " PPCEE" : "",
                (command & CMD_FSP) ? " FSP" : "",
                (command & CMD_ASPE) ? " ASPE" : "",
                (command & CMD_PSPE) ? " PSPE" : "",
                (command & CMD_PARK) ? " park" : "(park)",
-               CMD_PARK_CNT (command),
+               CMD_PARK_CNT(command),
                (command >> 16) & 0x3f,
                (command & CMD_LRESET) ? " LReset" : "",
                (command & CMD_IAAD) ? " IAAD" : "",
                (command & CMD_ASE) ? " Async" : "",
                (command & CMD_PSE) ? " Periodic" : "",
-               fls_strings [(command >> 2) & 0x3],
+               fls_strings[(command >> 2) & 0x3],
                (command & CMD_RESET) ? " Reset" : "",
-               (command & CMD_RUN) ? "RUN" : "HALT"
-               );
+               (command & CMD_RUN) ? "RUN" : "HALT");
 }
 
 static int
-dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
 {
        char    *sig;
 
        /* signaling state */
        switch (status & (3 << 10)) {
-       case 0 << 10: sig = "se0"; break;
-       case 1 << 10: sig = "k"; break;         /* low speed */
-       case 2 << 10: sig = "j"; break;
-       default: sig = "?"; break;
+       case 0 << 10:
+               sig = "se0";
+               break;
+       case 1 << 10: /* low speed */
+               sig = "k";
+               break;
+       case 2 << 10:
+               sig = "j";
+               break;
+       default:
+               sig = "?";
+               break;
        }
 
-       return scnprintf (buf, len,
+       return scnprintf(buf, len,
                "%s%sport:%d status %06x %d %s%s%s%s%s%s "
                "sig=%s%s%s%s%s%s%s%s%s%s%s",
-               label, label [0] ? " " : "", port, status,
-               status>>25,/*device address */
-               (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ?
+               label, label[0] ? " " : "", port, status,
+               status >> 25, /*device address */
+               (status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_ACK ?
                                                " ACK" : "",
-               (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ?
+               (status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_NYET ?
                                                " NYET" : "",
-               (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ?
+               (status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_STALL ?
                                                " STALL" : "",
-               (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ?
+               (status & PORT_SSTS) >> 23 == PORTSC_SUSPEND_STS_ERR ?
                                                " ERR" : "",
                (status & PORT_POWER) ? " POWER" : "",
                (status & PORT_OWNER) ? " OWNER" : "",
@@ -282,52 +283,68 @@ dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
 
 #else
 static inline void __maybe_unused
-dbg_qh (char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
+dbg_qh(char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
 {}
 
 static inline int __maybe_unused
-dbg_status_buf (char *buf, unsigned len, const char *label, u32 status)
-{ return 0; }
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+       return 0;
+}
 
 static inline int __maybe_unused
-dbg_command_buf (char *buf, unsigned len, const char *label, u32 command)
-{ return 0; }
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{
+       return 0;
+}
 
 static inline int __maybe_unused
-dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable)
-{ return 0; }
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+       return 0;
+}
 
 static inline int __maybe_unused
-dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
-{ return 0; }
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{
+       return 0;
+}
 
 #endif /* CONFIG_DYNAMIC_DEBUG */
 
-/* functions have the "wrong" filename when they're output... */
-#define dbg_status(ehci, label, status) { \
-       char _buf [80]; \
-       dbg_status_buf (_buf, sizeof _buf, label, status); \
-       ehci_dbg (ehci, "%s\n", _buf); \
+static inline void
+dbg_status(struct ehci_hcd *ehci, const char *label, u32 status)
+{
+       char buf[80];
+
+       dbg_status_buf(buf, sizeof(buf), label, status);
+       ehci_dbg(ehci, "%s\n", buf);
 }
 
-#define dbg_cmd(ehci, label, command) { \
-       char _buf [80]; \
-       dbg_command_buf (_buf, sizeof _buf, label, command); \
-       ehci_dbg (ehci, "%s\n", _buf); \
+static inline void
+dbg_cmd(struct ehci_hcd *ehci, const char *label, u32 command)
+{
+       char buf[80];
+
+       dbg_command_buf(buf, sizeof(buf), label, command);
+       ehci_dbg(ehci, "%s\n", buf);
 }
 
-#define dbg_port(ehci, label, port, status) { \
-       char _buf [80]; \
-       dbg_port_buf (_buf, sizeof _buf, label, port, status); \
-       ehci_dbg (ehci, "%s\n", _buf); \
+static inline void
+dbg_port(struct ehci_hcd *ehci, const char *label, int port, u32 status)
+{
+       char buf[80];
+
+       dbg_port_buf(buf, sizeof(buf), label, port, status);
+       ehci_dbg(ehci, "%s\n", buf);
 }
 
 /*-------------------------------------------------------------------------*/
 
-#ifdef STUB_DEBUG_FILES
+#ifndef CONFIG_DYNAMIC_DEBUG
 
-static inline void create_debug_files (struct ehci_hcd *bus) { }
-static inline void remove_debug_files (struct ehci_hcd *bus) { }
+static inline void create_debug_files(struct ehci_hcd *bus) { }
+static inline void remove_debug_files(struct ehci_hcd *bus) { }
 
 #else
 
@@ -348,6 +365,7 @@ static const struct file_operations debug_async_fops = {
        .release        = debug_close,
        .llseek         = default_llseek,
 };
+
 static const struct file_operations debug_bandwidth_fops = {
        .owner          = THIS_MODULE,
        .open           = debug_bandwidth_open,
@@ -355,6 +373,7 @@ static const struct file_operations debug_bandwidth_fops = {
        .release        = debug_close,
        .llseek         = default_llseek,
 };
+
 static const struct file_operations debug_periodic_fops = {
        .owner          = THIS_MODULE,
        .open           = debug_periodic_open,
@@ -362,6 +381,7 @@ static const struct file_operations debug_periodic_fops = {
        .release        = debug_close,
        .llseek         = default_llseek,
 };
+
 static const struct file_operations debug_registers_fops = {
        .owner          = THIS_MODULE,
        .open           = debug_registers_open,
@@ -381,13 +401,19 @@ struct debug_buffer {
        size_t alloc_size;
 };
 
-#define speed_char(info1) ({ char tmp; \
-               switch (info1 & (3 << 12)) { \
-               case QH_FULL_SPEED: tmp = 'f'; break; \
-               case QH_LOW_SPEED:  tmp = 'l'; break; \
-               case QH_HIGH_SPEED: tmp = 'h'; break; \
-               default: tmp = '?'; break; \
-               } tmp; })
+static inline char speed_char(u32 info1)
+{
+       switch (info1 & (3 << 12)) {
+       case QH_FULL_SPEED:
+               return 'f';
+       case QH_LOW_SPEED:
+               return 'l';
+       case QH_HIGH_SPEED:
+               return 'h';
+       default:
+               return '?';
+       }
+}
 
 static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
 {
@@ -397,18 +423,14 @@ static inline char token_mark(struct ehci_hcd *ehci, __hc32 token)
                return '*';
        if (v & QTD_STS_HALT)
                return '-';
-       if (!IS_SHORT_READ (v))
+       if (!IS_SHORT_READ(v))
                return ' ';
        /* tries to advance through hw_alt_next */
        return '/';
 }
 
-static void qh_lines (
-       struct ehci_hcd *ehci,
-       struct ehci_qh *qh,
-       char **nextp,
-       unsigned *sizep
-)
+static void qh_lines(struct ehci_hcd *ehci, struct ehci_qh *qh,
+               char **nextp, unsigned *sizep)
 {
        u32                     scratch;
        u32                     hw_curr;
@@ -435,7 +457,7 @@ static void qh_lines (
        }
        scratch = hc32_to_cpup(ehci, &hw->hw_info1);
        hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0;
-       temp = scnprintf (next, size,
+       temp = scnprintf(next, size,
                        "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)"
                        " [cur %08x next %08x buf[0] %08x]",
                        qh, scratch & 0x007f,
@@ -453,46 +475,52 @@ static void qh_lines (
        next += temp;
 
        /* hc may be modifying the list as we read it ... */
-       list_for_each (entry, &qh->qtd_list) {
-               td = list_entry (entry, struct ehci_qtd, qtd_list);
+       list_for_each(entry, &qh->qtd_list) {
+               char *type;
+
+               td = list_entry(entry, struct ehci_qtd, qtd_list);
                scratch = hc32_to_cpup(ehci, &td->hw_token);
                mark = ' ';
-               if (hw_curr == td->qtd_dma)
+               if (hw_curr == td->qtd_dma) {
                        mark = '*';
-               else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
+               } else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) {
                        mark = '+';
-               else if (QTD_LENGTH (scratch)) {
+               } else if (QTD_LENGTH(scratch)) {
                        if (td->hw_alt_next == ehci->async->hw->hw_alt_next)
                                mark = '#';
                        else if (td->hw_alt_next != list_end)
                                mark = '/';
                }
-               temp = snprintf (next, size,
+               switch ((scratch >> 8) & 0x03) {
+               case 0:
+                       type = "out";
+                       break;
+               case 1:
+                       type = "in";
+                       break;
+               case 2:
+                       type = "setup";
+                       break;
+               default:
+                       type = "?";
+                       break;
+               }
+               temp = scnprintf(next, size,
                                "\n\t%p%c%s len=%d %08x urb %p"
                                " [td %08x buf[0] %08x]",
-                               td, mark, ({ char *tmp;
-                                switch ((scratch>>8)&0x03) {
-                                case 0: tmp = "out"; break;
-                                case 1: tmp = "in"; break;
-                                case 2: tmp = "setup"; break;
-                                default: tmp = "?"; break;
-                                } tmp;}),
+                               td, mark, type,
                                (scratch >> 16) & 0x7fff,
                                scratch,
                                td->urb,
                                (u32) td->qtd_dma,
                                hc32_to_cpup(ehci, &td->hw_buf[0]));
-               if (size < temp)
-                       temp = size;
                size -= temp;
                next += temp;
                if (temp == size)
                        goto done;
        }
 
-       temp = snprintf (next, size, "\n");
-       if (size < temp)
-               temp = size;
+       temp = scnprintf(next, size, "\n");
        size -= temp;
        next += temp;
 
@@ -511,19 +539,20 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
        struct ehci_qh          *qh;
 
        hcd = bus_to_hcd(buf->bus);
-       ehci = hcd_to_ehci (hcd);
+       ehci = hcd_to_ehci(hcd);
        next = buf->output_buf;
        size = buf->alloc_size;
 
        *next = 0;
 
-       /* dumps a snapshot of the async schedule.
+       /*
+        * dumps a snapshot of the async schedule.
         * usually empty except for long-term bulk reads, or head.
         * one QH per line, and TDs we know about
         */
-       spin_lock_irqsave (&ehci->lock, flags);
+       spin_lock_irqsave(&ehci->lock, flags);
        for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
-               qh_lines (ehci, qh, &next, &size);
+               qh_lines(ehci, qh, &next, &size);
        if (!list_empty(&ehci->async_unlink) && size > 0) {
                temp = scnprintf(next, size, "\nunlink =\n");
                size -= temp;
@@ -535,7 +564,7 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
                        qh_lines(ehci, qh, &next, &size);
                }
        }
-       spin_unlock_irqrestore (&ehci->lock, flags);
+       spin_unlock_irqrestore(&ehci->lock, flags);
 
        return strlen(buf->output_buf);
 }
@@ -623,6 +652,33 @@ static ssize_t fill_bandwidth_buffer(struct debug_buffer *buf)
        return next - buf->output_buf;
 }
 
+static unsigned output_buf_tds_dir(char *buf, struct ehci_hcd *ehci,
+               struct ehci_qh_hw *hw, struct ehci_qh *qh, unsigned size)
+{
+       u32                     scratch = hc32_to_cpup(ehci, &hw->hw_info1);
+       struct ehci_qtd         *qtd;
+       char                    *type = "";
+       unsigned                temp = 0;
+
+       /* count tds, get ep direction */
+       list_for_each_entry(qtd, &qh->qtd_list, qtd_list) {
+               temp++;
+               switch ((hc32_to_cpu(ehci, qtd->hw_token) >> 8) & 0x03) {
+               case 0:
+                       type = "out";
+                       continue;
+               case 1:
+                       type = "in";
+                       continue;
+               }
+       }
+
+       return scnprintf(buf, size, " (%c%d ep%d%s [%d/%d] q%d p%d)",
+                       speed_char(scratch), scratch & 0x007f,
+                       (scratch >> 8) & 0x000f, type, qh->ps.usecs,
+                       qh->ps.c_usecs, temp, 0x7ff & (scratch >> 16));
+}
+
 #define DBG_SCHED_LIMIT 64
 static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
 {
@@ -635,31 +691,32 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
        unsigned                i;
        __hc32                  tag;
 
-       seen = kmalloc(DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC);
+       seen = kmalloc_array(DBG_SCHED_LIMIT, sizeof(*seen), GFP_ATOMIC);
        if (!seen)
                return 0;
        seen_count = 0;
 
        hcd = bus_to_hcd(buf->bus);
-       ehci = hcd_to_ehci (hcd);
+       ehci = hcd_to_ehci(hcd);
        next = buf->output_buf;
        size = buf->alloc_size;
 
-       temp = scnprintf (next, size, "size = %d\n", ehci->periodic_size);
+       temp = scnprintf(next, size, "size = %d\n", ehci->periodic_size);
        size -= temp;
        next += temp;
 
-       /* dump a snapshot of the periodic schedule.
+       /*
+        * dump a snapshot of the periodic schedule.
         * iso changes, interrupt usually doesn't.
         */
-       spin_lock_irqsave (&ehci->lock, flags);
+       spin_lock_irqsave(&ehci->lock, flags);
        for (i = 0; i < ehci->periodic_size; i++) {
-               p = ehci->pshadow [i];
-               if (likely (!p.ptr))
+               p = ehci->pshadow[i];
+               if (likely(!p.ptr))
                        continue;
-               tag = Q_NEXT_TYPE(ehci, ehci->periodic [i]);
+               tag = Q_NEXT_TYPE(ehci, ehci->periodic[i]);
 
-               temp = scnprintf (next, size, "%4d: ", i);
+               temp = scnprintf(next, size, "%4d: ", i);
                size -= temp;
                next += temp;
 
@@ -669,7 +726,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
                        switch (hc32_to_cpu(ehci, tag)) {
                        case Q_TYPE_QH:
                                hw = p.qh->hw;
-                               temp = scnprintf (next, size, " qh%d-%04x/%p",
+                               temp = scnprintf(next, size, " qh%d-%04x/%p",
                                                p.qh->ps.period,
                                                hc32_to_cpup(ehci,
                                                        &hw->hw_info2)
@@ -680,10 +737,10 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
                                next += temp;
                                /* don't repeat what follows this qh */
                                for (temp = 0; temp < seen_count; temp++) {
-                                       if (seen [temp].ptr != p.ptr)
+                                       if (seen[temp].ptr != p.ptr)
                                                continue;
                                        if (p.qh->qh_next.ptr) {
-                                               temp = scnprintf (next, size,
+                                               temp = scnprintf(next, size,
                                                        " ...");
                                                size -= temp;
                                                next += temp;
@@ -692,58 +749,32 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
                                }
                                /* show more info the first time around */
                                if (temp == seen_count) {
-                                       u32     scratch = hc32_to_cpup(ehci,
-                                                       &hw->hw_info1);
-                                       struct ehci_qtd *qtd;
-                                       char            *type = "";
-
-                                       /* count tds, get ep direction */
-                                       temp = 0;
-                                       list_for_each_entry (qtd,
-                                                       &p.qh->qtd_list,
-                                                       qtd_list) {
-                                               temp++;
-                                               switch (0x03 & (hc32_to_cpu(
-                                                       ehci,
-                                                       qtd->hw_token) >> 8)) {
-                                               case 0: type = "out"; continue;
-                                               case 1: type = "in"; continue;
-                                               }
-                                       }
-
-                                       temp = scnprintf (next, size,
-                                               " (%c%d ep%d%s "
-                                               "[%d/%d] q%d p%d)",
-                                               speed_char (scratch),
-                                               scratch & 0x007f,
-                                               (scratch >> 8) & 0x000f, type,
-                                               p.qh->ps.usecs,
-                                               p.qh->ps.c_usecs,
-                                               temp,
-                                               0x7ff & (scratch >> 16));
+                                       temp = output_buf_tds_dir(next, ehci,
+                                               hw, p.qh, size);
 
                                        if (seen_count < DBG_SCHED_LIMIT)
-                                               seen [seen_count++].qh = p.qh;
-                               } else
+                                               seen[seen_count++].qh = p.qh;
+                               } else {
                                        temp = 0;
+                               }
                                tag = Q_NEXT_TYPE(ehci, hw->hw_next);
                                p = p.qh->qh_next;
                                break;
                        case Q_TYPE_FSTN:
-                               temp = scnprintf (next, size,
+                               temp = scnprintf(next, size,
                                        " fstn-%8x/%p", p.fstn->hw_prev,
                                        p.fstn);
                                tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next);
                                p = p.fstn->fstn_next;
                                break;
                        case Q_TYPE_ITD:
-                               temp = scnprintf (next, size,
+                               temp = scnprintf(next, size,
                                        " itd/%p", p.itd);
                                tag = Q_NEXT_TYPE(ehci, p.itd->hw_next);
                                p = p.itd->itd_next;
                                break;
                        case Q_TYPE_SITD:
-                               temp = scnprintf (next, size,
+                               temp = scnprintf(next, size,
                                        " sitd%d-%04x/%p",
                                        p.sitd->stream->ps.period,
                                        hc32_to_cpup(ehci, &p.sitd->hw_uframe)
@@ -757,12 +788,12 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
                        next += temp;
                } while (p.ptr);
 
-               temp = scnprintf (next, size, "\n");
+               temp = scnprintf(next, size, "\n");
                size -= temp;
                next += temp;
        }
-       spin_unlock_irqrestore (&ehci->lock, flags);
-       kfree (seen);
+       spin_unlock_irqrestore(&ehci->lock, flags);
+       kfree(seen);
 
        return buf->alloc_size - size;
 }
@@ -789,19 +820,19 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
        struct ehci_hcd         *ehci;
        unsigned long           flags;
        unsigned                temp, size, i;
-       char                    *next, scratch [80];
-       static char             fmt [] = "%*s\n";
-       static char             label [] = "";
+       char                    *next, scratch[80];
+       static char             fmt[] = "%*s\n";
+       static char             label[] = "";
 
        hcd = bus_to_hcd(buf->bus);
-       ehci = hcd_to_ehci (hcd);
+       ehci = hcd_to_ehci(hcd);
        next = buf->output_buf;
        size = buf->alloc_size;
 
-       spin_lock_irqsave (&ehci->lock, flags);
+       spin_lock_irqsave(&ehci->lock, flags);
 
        if (!HCD_HW_ACCESSIBLE(hcd)) {
-               size = scnprintf (next, size,
+               size = scnprintf(next, size,
                        "bus %s, device %s\n"
                        "%s\n"
                        "SUSPENDED (no register access)\n",
@@ -813,7 +844,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
 
        /* Capability Registers */
        i = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
-       temp = scnprintf (next, size,
+       temp = scnprintf(next, size,
                "bus %s, device %s\n"
                "%s\n"
                "EHCI %x.%02x, rh state %s\n",
@@ -829,16 +860,16 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
        if (dev_is_pci(hcd->self.controller)) {
                struct pci_dev  *pdev;
                u32             offset, cap, cap2;
-               unsigned        count = 256/4;
+               unsigned        count = 256 / 4;
 
                pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller);
                offset = HCC_EXT_CAPS(ehci_readl(ehci,
                                &ehci->caps->hcc_params));
                while (offset && count--) {
-                       pci_read_config_dword (pdev, offset, &cap);
+                       pci_read_config_dword(pdev, offset, &cap);
                        switch (cap & 0xff) {
                        case 1:
-                               temp = scnprintf (next, size,
+                               temp = scnprintf(next, size,
                                        "ownership %08x%s%s\n", cap,
                                        (cap & (1 << 24)) ? " linux" : "",
                                        (cap & (1 << 16)) ? " firmware" : "");
@@ -846,8 +877,8 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
                                next += temp;
 
                                offset += 4;
-                               pci_read_config_dword (pdev, offset, &cap2);
-                               temp = scnprintf (next, size,
+                               pci_read_config_dword(pdev, offset, &cap2);
+                               temp = scnprintf(next, size,
                                        "SMI sts/enable 0x%08x\n", cap2);
                                size -= temp;
                                next += temp;
@@ -863,50 +894,50 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
        }
 #endif
 
-       // FIXME interpret both types of params
+       /* FIXME interpret both types of params */
        i = ehci_readl(ehci, &ehci->caps->hcs_params);
-       temp = scnprintf (next, size, "structural params 0x%08x\n", i);
+       temp = scnprintf(next, size, "structural params 0x%08x\n", i);
        size -= temp;
        next += temp;
 
        i = ehci_readl(ehci, &ehci->caps->hcc_params);
-       temp = scnprintf (next, size, "capability params 0x%08x\n", i);
+       temp = scnprintf(next, size, "capability params 0x%08x\n", i);
        size -= temp;
        next += temp;
 
        /* Operational Registers */
-       temp = dbg_status_buf (scratch, sizeof scratch, label,
+       temp = dbg_status_buf(scratch, sizeof(scratch), label,
                        ehci_readl(ehci, &ehci->regs->status));
-       temp = scnprintf (next, size, fmt, temp, scratch);
+       temp = scnprintf(next, size, fmt, temp, scratch);
        size -= temp;
        next += temp;
 
-       temp = dbg_command_buf (scratch, sizeof scratch, label,
+       temp = dbg_command_buf(scratch, sizeof(scratch), label,
                        ehci_readl(ehci, &ehci->regs->command));
-       temp = scnprintf (next, size, fmt, temp, scratch);
+       temp = scnprintf(next, size, fmt, temp, scratch);
        size -= temp;
        next += temp;
 
-       temp = dbg_intr_buf (scratch, sizeof scratch, label,
+       temp = dbg_intr_buf(scratch, sizeof(scratch), label,
                        ehci_readl(ehci, &ehci->regs->intr_enable));
-       temp = scnprintf (next, size, fmt, temp, scratch);
+       temp = scnprintf(next, size, fmt, temp, scratch);
        size -= temp;
        next += temp;
 
-       temp = scnprintf (next, size, "uframe %04x\n",
+       temp = scnprintf(next, size, "uframe %04x\n",
                        ehci_read_frame_index(ehci));
        size -= temp;
        next += temp;
 
-       for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) {
-               temp = dbg_port_buf (scratch, sizeof scratch, label, i,
+       for (i = 1; i <= HCS_N_PORTS(ehci->hcs_params); i++) {
+               temp = dbg_port_buf(scratch, sizeof(scratch), label, i,
                                ehci_readl(ehci,
                                        &ehci->regs->port_status[i - 1]));
-               temp = scnprintf (next, size, fmt, temp, scratch);
+               temp = scnprintf(next, size, fmt, temp, scratch);
                size -= temp;
                next += temp;
                if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) {
-                       temp = scnprintf (next, size,
+                       temp = scnprintf(next, size,
                                        "    debug control %08x\n",
                                        ehci_readl(ehci,
                                                &ehci->debug->control));
@@ -924,31 +955,31 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
        }
 
 #ifdef EHCI_STATS
-       temp = scnprintf (next, size,
+       temp = scnprintf(next, size,
                "irq normal %ld err %ld iaa %ld (lost %ld)\n",
                ehci->stats.normal, ehci->stats.error, ehci->stats.iaa,
                ehci->stats.lost_iaa);
        size -= temp;
        next += temp;
 
-       temp = scnprintf (next, size, "complete %ld unlink %ld\n",
+       temp = scnprintf(next, size, "complete %ld unlink %ld\n",
                ehci->stats.complete, ehci->stats.unlink);
        size -= temp;
        next += temp;
 #endif
 
 done:
-       spin_unlock_irqrestore (&ehci->lock, flags);
+       spin_unlock_irqrestore(&ehci->lock, flags);
 
        return buf->alloc_size - size;
 }
 
 static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
-                               ssize_t (*fill_func)(struct debug_buffer *))
+               ssize_t (*fill_func)(struct debug_buffer *))
 {
        struct debug_buffer *buf;
 
-       buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+       buf = kzalloc(sizeof(*buf), GFP_KERNEL);
 
        if (buf) {
                buf->bus = bus;
@@ -984,7 +1015,7 @@ out:
 }
 
 static ssize_t debug_output(struct file *file, char __user *user_buf,
-                           size_t len, loff_t *offset)
+               size_t len, loff_t *offset)
 {
        struct debug_buffer *buf = file->private_data;
        int ret = 0;
@@ -1004,7 +1035,6 @@ static ssize_t debug_output(struct file *file, char __user *user_buf,
 
 out:
        return ret;
-
 }
 
 static int debug_close(struct inode *inode, struct file *file)
@@ -1037,11 +1067,12 @@ static int debug_bandwidth_open(struct inode *inode, struct file *file)
 static int debug_periodic_open(struct inode *inode, struct file *file)
 {
        struct debug_buffer *buf;
+
        buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
        if (!buf)
                return -ENOMEM;
 
-       buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+       buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8) * PAGE_SIZE;
        file->private_data = buf;
        return 0;
 }
@@ -1054,7 +1085,7 @@ static int debug_registers_open(struct inode *inode, struct file *file)
        return file->private_data ? 0 : -ENOMEM;
 }
 
-static inline void create_debug_files (struct ehci_hcd *ehci)
+static inline void create_debug_files(struct ehci_hcd *ehci)
 {
        struct usb_bus *bus = &ehci_to_hcd(ehci)->self;
 
@@ -1084,9 +1115,9 @@ file_error:
        debugfs_remove_recursive(ehci->debug_dir);
 }
 
-static inline void remove_debug_files (struct ehci_hcd *ehci)
+static inline void remove_debug_files(struct ehci_hcd *ehci)
 {
        debugfs_remove_recursive(ehci->debug_dir);
 }
 
-#endif /* STUB_DEBUG_FILES */
+#endif /* CONFIG_DYNAMIC_DEBUG */
index 3b6eb219de1a714d31fc843718472efc6ab615f4..9f5ffb62997303dedd1d6c7d929191992a039c81 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/usb/otg.h>
 #include <linux/platform_device.h>
 #include <linux/fsl_devices.h>
+#include <linux/of_platform.h>
 
 #include "ehci.h"
 #include "ehci-fsl.h"
@@ -241,7 +242,8 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
         * to portsc
         */
        if (pdata->check_phy_clk_valid) {
-               if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & PHY_CLK_VALID)) {
+               if (!(ioread32be(non_ehci + FSL_SOC_USB_CTRL) &
+                   PHY_CLK_VALID)) {
                        dev_warn(hcd->self.controller,
                                 "USB PHY clock invalid\n");
                        return -EINVAL;
@@ -273,9 +275,11 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
 
                /* Setup Snooping for all the 4GB space */
                /* SNOOP1 starts from 0x0, size 2G */
-               out_be32(non_ehci + FSL_SOC_USB_SNOOP1, 0x0 | SNOOP_SIZE_2GB);
+               iowrite32be(0x0 | SNOOP_SIZE_2GB,
+                           non_ehci + FSL_SOC_USB_SNOOP1);
                /* SNOOP2 starts from 0x80000000, size 2G */
-               out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
+               iowrite32be(0x80000000 | SNOOP_SIZE_2GB,
+                           non_ehci + FSL_SOC_USB_SNOOP2);
        }
 
        /* Deal with USB erratum A-005275 */
@@ -309,13 +313,13 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
 
        if (pdata->have_sysif_regs) {
 #ifdef CONFIG_FSL_SOC_BOOKE
-               out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x00000008);
-               out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000080);
+               iowrite32be(0x00000008, non_ehci + FSL_SOC_USB_PRICTRL);
+               iowrite32be(0x00000080, non_ehci + FSL_SOC_USB_AGECNTTHRSH);
 #else
-               out_be32(non_ehci + FSL_SOC_USB_PRICTRL, 0x0000000c);
-               out_be32(non_ehci + FSL_SOC_USB_AGECNTTHRSH, 0x00000040);
+               iowrite32be(0x0000000c, non_ehci + FSL_SOC_USB_PRICTRL);
+               iowrite32be(0x00000040, non_ehci + FSL_SOC_USB_AGECNTTHRSH);
 #endif
-               out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
+               iowrite32be(0x00000001, non_ehci + FSL_SOC_USB_SICTRL);
        }
 
        return 0;
@@ -554,7 +558,7 @@ static int ehci_fsl_drv_suspend(struct device *dev)
        if (!fsl_deep_sleep())
                return 0;
 
-       ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
+       ehci_fsl->usb_ctrl = ioread32be(non_ehci + FSL_SOC_USB_CTRL);
        return 0;
 }
 
@@ -577,7 +581,7 @@ static int ehci_fsl_drv_resume(struct device *dev)
        usb_root_hub_lost_power(hcd->self.root_hub);
 
        /* Restore USB PHY settings and enable the controller. */
-       out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
+       iowrite32be(ehci_fsl->usb_ctrl, non_ehci + FSL_SOC_USB_CTRL);
 
        ehci_reset(ehci);
        ehci_fsl_reinit(ehci);
index 14178bbf069496aba61a03ab3589945d9bbf721a..ae1b6e69eb965b88dd13a3934d026af25d27c808 100644 (file)
@@ -306,9 +306,9 @@ static void ehci_quiesce (struct ehci_hcd *ehci)
 
 /*-------------------------------------------------------------------------*/
 
+static void end_iaa_cycle(struct ehci_hcd *ehci);
 static void end_unlink_async(struct ehci_hcd *ehci);
 static void unlink_empty_async(struct ehci_hcd *ehci);
-static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
 static void ehci_work(struct ehci_hcd *ehci);
 static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
 static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh);
@@ -565,6 +565,9 @@ static int ehci_init(struct usb_hcd *hcd)
        /* Accept arbitrarily long scatter-gather lists */
        if (!(hcd->driver->flags & HCD_LOCAL_MEM))
                hcd->self.sg_tablesize = ~0;
+
+       /* Prepare for unlinking active QHs */
+       ehci->old_current = ~0;
        return 0;
 }
 
@@ -675,8 +678,10 @@ int ehci_setup(struct usb_hcd *hcd)
                return retval;
 
        retval = ehci_halt(ehci);
-       if (retval)
+       if (retval) {
+               ehci_mem_cleanup(ehci);
                return retval;
+       }
 
        ehci_reset(ehci);
 
@@ -756,7 +761,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
                        ehci_dbg(ehci, "IAA with IAAD still set?\n");
                if (ehci->iaa_in_progress)
                        COUNT(ehci->stats.iaa);
-               end_unlink_async(ehci);
+               end_iaa_cycle(ehci);
        }
 
        /* remote wakeup [4.3.1] */
@@ -909,7 +914,7 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                 */
        } else {
                qh = (struct ehci_qh *) urb->hcpriv;
-               qh->exception = 1;
+               qh->unlink_reason |= QH_UNLINK_REQUESTED;
                switch (qh->qh_state) {
                case QH_STATE_LINKED:
                        if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT)
@@ -970,10 +975,13 @@ rescan:
                goto done;
        }
 
-       qh->exception = 1;
+       qh->unlink_reason |= QH_UNLINK_REQUESTED;
        switch (qh->qh_state) {
        case QH_STATE_LINKED:
-               WARN_ON(!list_empty(&qh->qtd_list));
+               if (list_empty(&qh->qtd_list))
+                       qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
+               else
+                       WARN_ON(1);
                if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
                        start_unlink_async(ehci, qh);
                else
@@ -1040,7 +1048,7 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
                         * re-linking will call qh_refresh().
                         */
                        usb_settoggle(qh->ps.udev, epnum, is_out, 0);
-                       qh->exception = 1;
+                       qh->unlink_reason |= QH_UNLINK_REQUESTED;
                        if (eptype == USB_ENDPOINT_XFER_BULK)
                                start_unlink_async(ehci, qh);
                        else
index 086a7115d263c673ec4a4e10abe1a386546b8338..ffc90295a95f284e7e234f121429b5f957dedc07 100644 (file)
@@ -33,6 +33,8 @@
 
 #ifdef CONFIG_PM
 
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci);
+
 static int persist_enabled_on_companion(struct usb_device *udev, void *unused)
 {
        return !udev->maxchild && udev->persist_enabled &&
@@ -347,8 +349,10 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
                goto done;
        ehci->rh_state = EHCI_RH_SUSPENDED;
 
-       end_unlink_async(ehci);
        unlink_empty_async_suspended(ehci);
+
+       /* Any IAA cycle that started before the suspend is now invalid */
+       end_iaa_cycle(ehci);
        ehci_handle_start_intr_unlinks(ehci);
        ehci_handle_intr_unlinks(ehci);
        end_free_itds(ehci);
index c23e2858c815bbf15dc6de9bc98714349fa97177..3e226ef6ca624351c1c53c6709a22f02b2714e88 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/usb/msm_hsusb_hw.h>
 #include <linux/usb.h>
 #include <linux/usb/hcd.h>
+#include <linux/acpi.h>
 
 #include "ehci.h"
 
@@ -55,12 +56,16 @@ static int ehci_msm_reset(struct usb_hcd *hcd)
        if (retval)
                return retval;
 
+       /* select ULPI phy and clear other status/control bits in PORTSC */
+       writel(PORTSC_PTS_ULPI, USB_PORTSC);
        /* bursts of unspecified length. */
        writel(0, USB_AHBBURST);
        /* Use the AHB transactor, allow posted data writes */
        writel(0x8, USB_AHBMODE);
        /* Disable streaming mode and select host mode */
        writel(0x13, USB_USBMODE);
+       /* Disable ULPI_TX_PKT_EN_CLR_FIX which is valid only for HSIC */
+       writel(readl(USB_GENCONFIG_2) & ~ULPI_TX_PKT_EN_CLR_FIX, USB_GENCONFIG_2);
 
        return 0;
 }
@@ -104,9 +109,9 @@ static int ehci_msm_probe(struct platform_device *pdev)
        }
 
        /*
-        * OTG driver takes care of PHY initialization, clock management,
-        * powering up VBUS, mapping of registers address space and power
-        * management.
+        * If there is an OTG driver, let it take care of PHY initialization,
+        * clock management, powering up VBUS, mapping of registers address
+        * space and power management.
         */
        if (pdev->dev.of_node)
                phy = devm_usb_get_phy_by_phandle(&pdev->dev, "usb-phy", 0);
@@ -114,27 +119,35 @@ static int ehci_msm_probe(struct platform_device *pdev)
                phy = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
 
        if (IS_ERR(phy)) {
-               dev_err(&pdev->dev, "unable to find transceiver\n");
-               ret = -EPROBE_DEFER;
-               goto put_hcd;
-       }
-
-       ret = otg_set_host(phy->otg, &hcd->self);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "unable to register with transceiver\n");
-               goto put_hcd;
+               if (PTR_ERR(phy) == -EPROBE_DEFER) {
+                       dev_err(&pdev->dev, "unable to find transceiver\n");
+                       ret = -EPROBE_DEFER;
+                       goto put_hcd;
+               }
+               phy = NULL;
        }
 
        hcd->usb_phy = phy;
        device_init_wakeup(&pdev->dev, 1);
-       /*
-        * OTG device parent of HCD takes care of putting
-        * hardware into low power mode.
-        */
-       pm_runtime_no_callbacks(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
 
-       /* FIXME: need to call usb_add_hcd() here? */
+       if (phy && phy->otg) {
+               /*
+                * MSM OTG driver takes care of adding the HCD and
+                * placing hardware into low power mode via runtime PM.
+                */
+               ret = otg_set_host(phy->otg, &hcd->self);
+               if (ret < 0) {
+                       dev_err(&pdev->dev, "unable to register with transceiver\n");
+                       goto put_hcd;
+               }
+
+               pm_runtime_no_callbacks(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       } else {
+               ret = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+               if (ret)
+                       goto put_hcd;
+       }
 
        return 0;
 
@@ -152,9 +165,10 @@ static int ehci_msm_remove(struct platform_device *pdev)
        pm_runtime_disable(&pdev->dev);
        pm_runtime_set_suspended(&pdev->dev);
 
-       otg_set_host(hcd->usb_phy->otg, NULL);
-
-       /* FIXME: need to call usb_remove_hcd() here? */
+       if (hcd->usb_phy && hcd->usb_phy->otg)
+               otg_set_host(hcd->usb_phy->otg, NULL);
+       else
+               usb_remove_hcd(hcd);
 
        usb_put_hcd(hcd);
 
@@ -191,6 +205,12 @@ static const struct dev_pm_ops ehci_msm_dev_pm_ops = {
        .resume          = ehci_msm_pm_resume,
 };
 
+static const struct acpi_device_id msm_ehci_acpi_ids[] = {
+       { "QCOM8040", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, msm_ehci_acpi_ids);
+
 static const struct of_device_id msm_ehci_dt_match[] = {
        { .compatible = "qcom,ehci-host", },
        {}
@@ -200,10 +220,12 @@ MODULE_DEVICE_TABLE(of, msm_ehci_dt_match);
 static struct platform_driver ehci_msm_driver = {
        .probe  = ehci_msm_probe,
        .remove = ehci_msm_remove,
+       .shutdown = usb_hcd_platform_shutdown,
        .driver = {
                   .name = "msm_hsusb_host",
                   .pm = &ehci_msm_dev_pm_ops,
                   .of_match_table = msm_ehci_dt_match,
+                  .acpi_match_table = ACPI_PTR(msm_ehci_acpi_ids),
        },
 };
 
index 2a5d2fd76040cd6580a350d7ee8f1c4bb0fafffb..3b3649d88c5f00281123975a931f3ecd52a14289 100644 (file)
@@ -377,6 +377,12 @@ static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return usb_hcd_pci_probe(pdev, id);
 }
 
+static void ehci_pci_remove(struct pci_dev *pdev)
+{
+       pci_clear_mwi(pdev);
+       usb_hcd_pci_remove(pdev);       
+}
+
 /* PCI driver selection metadata; PCI hotplugging uses this */
 static const struct pci_device_id pci_ids [] = { {
        /* handle any USB 2.0 EHCI controller */
@@ -396,7 +402,7 @@ static struct pci_driver ehci_pci_driver = {
        .id_table =     pci_ids,
 
        .probe =        ehci_pci_probe,
-       .remove =       usb_hcd_pci_remove,
+       .remove =       ehci_pci_remove,
        .shutdown =     usb_hcd_pci_shutdown,
 
 #ifdef CONFIG_PM
index bd7082f297bbe76b7db8c10ad091402c79f45786..1757ebb471b6d992d87d549fc6c61a7f375fcb71 100644 (file)
@@ -345,8 +345,7 @@ static int ehci_platform_suspend(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
        struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
-       struct platform_device *pdev =
-               container_of(dev, struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        bool do_wakeup = device_may_wakeup(dev);
        int ret;
 
@@ -364,8 +363,7 @@ static int ehci_platform_resume(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
        struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
-       struct platform_device *pdev =
-               container_of(dev, struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        struct ehci_platform_priv *priv = hcd_to_ehci_priv(hcd);
 
        if (pdata->power_on) {
index aad0777240d3b233f79b89ecd161e139647ad5d1..eca3710d8fc44833506f80f1578eb1578b6838f6 100644 (file)
@@ -394,6 +394,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
                                        goto retry_xacterr;
                                }
                                stopped = 1;
+                               qh->unlink_reason |= QH_UNLINK_HALTED;
 
                        /* magic dummy for some short reads; qh won't advance.
                         * that silicon quirk can kick in with this dummy too.
@@ -408,6 +409,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
                                        && !(qtd->hw_alt_next
                                                & EHCI_LIST_END(ehci))) {
                                stopped = 1;
+                               qh->unlink_reason |= QH_UNLINK_SHORT_READ;
                        }
 
                /* stop scanning when we reach qtds the hc is using */
@@ -420,8 +422,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
                        stopped = 1;
 
                        /* cancel everything if we halt, suspend, etc */
-                       if (ehci->rh_state < EHCI_RH_RUNNING)
+                       if (ehci->rh_state < EHCI_RH_RUNNING) {
                                last_status = -ESHUTDOWN;
+                               qh->unlink_reason |= QH_UNLINK_SHUTDOWN;
+                       }
 
                        /* this qtd is active; skip it unless a previous qtd
                         * for its urb faulted, or its urb was canceled.
@@ -538,10 +542,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
         * except maybe high bandwidth ...
         */
        if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci))
-               qh->exception = 1;
+               qh->unlink_reason |= QH_UNLINK_DUMMY_OVERLAY;
 
        /* Let the caller know if the QH needs to be unlinked. */
-       return qh->exception;
+       return qh->unlink_reason;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -1003,7 +1007,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
 
        qh->qh_state = QH_STATE_LINKED;
        qh->xacterrs = 0;
-       qh->exception = 0;
+       qh->unlink_reason = 0;
        /* qtd completions reported later by interrupt */
 
        enable_async(ehci);
@@ -1279,17 +1283,13 @@ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
 
 static void start_iaa_cycle(struct ehci_hcd *ehci)
 {
-       /* Do nothing if an IAA cycle is already running */
-       if (ehci->iaa_in_progress)
-               return;
-       ehci->iaa_in_progress = true;
-
        /* If the controller isn't running, we don't have to wait for it */
        if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
                end_unlink_async(ehci);
 
-       /* Otherwise start a new IAA cycle */
-       } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) {
+       /* Otherwise start a new IAA cycle if one isn't already running */
+       } else if (ehci->rh_state == EHCI_RH_RUNNING &&
+                       !ehci->iaa_in_progress) {
 
                /* Make sure the unlinks are all visible to the hardware */
                wmb();
@@ -1297,17 +1297,13 @@ static void start_iaa_cycle(struct ehci_hcd *ehci)
                ehci_writel(ehci, ehci->command | CMD_IAAD,
                                &ehci->regs->command);
                ehci_readl(ehci, &ehci->regs->command);
+               ehci->iaa_in_progress = true;
                ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true);
        }
 }
 
-/* the async qh for the qtds being unlinked are now gone from the HC */
-
-static void end_unlink_async(struct ehci_hcd *ehci)
+static void end_iaa_cycle(struct ehci_hcd *ehci)
 {
-       struct ehci_qh          *qh;
-       bool                    early_exit;
-
        if (ehci->has_synopsys_hc_bug)
                ehci_writel(ehci, (u32) ehci->async->qh_dma,
                            &ehci->regs->async_next);
@@ -1315,6 +1311,16 @@ static void end_unlink_async(struct ehci_hcd *ehci)
        /* The current IAA cycle has ended */
        ehci->iaa_in_progress = false;
 
+       end_unlink_async(ehci);
+}
+
+/* See if the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct ehci_hcd *ehci)
+{
+       struct ehci_qh          *qh;
+       bool                    early_exit;
+
        if (list_empty(&ehci->async_unlink))
                return;
        qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
@@ -1335,14 +1341,60 @@ static void end_unlink_async(struct ehci_hcd *ehci)
         * after the IAA interrupt occurs.  In self-defense, always go
         * through two IAA cycles for each QH.
         */
-       else if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
+       else if (qh->qh_state == QH_STATE_UNLINK) {
+               /*
+                * Second IAA cycle has finished.  Process only the first
+                * waiting QH (NVIDIA (?) bug).
+                */
+               list_move_tail(&qh->unlink_node, &ehci->async_idle);
+       }
+
+       /*
+        * AMD/ATI (?) bug: The HC can continue to use an active QH long
+        * after the IAA interrupt occurs.  To prevent problems, QHs that
+        * may still be active will wait until 2 ms have passed with no
+        * change to the hw_current and hw_token fields (this delay occurs
+        * between the two IAA cycles).
+        *
+        * The EHCI spec (4.8.2) says that active QHs must not be removed
+        * from the async schedule and recommends waiting until the QH
+        * goes inactive.  This is ridiculous because the QH will _never_
+        * become inactive if the endpoint NAKs indefinitely.
+        */
+
+       /* Some reasons for unlinking guarantee the QH can't be active */
+       else if (qh->unlink_reason & (QH_UNLINK_HALTED |
+                       QH_UNLINK_SHORT_READ | QH_UNLINK_DUMMY_OVERLAY))
+               goto DelayDone;
+
+       /* The QH can't be active if the queue was and still is empty... */
+       else if ((qh->unlink_reason & QH_UNLINK_QUEUE_EMPTY) &&
+                       list_empty(&qh->qtd_list))
+               goto DelayDone;
+
+       /* ... or if the QH has halted */
+       else if (qh->hw->hw_token & cpu_to_hc32(ehci, QTD_STS_HALT))
+               goto DelayDone;
+
+       /* Otherwise we have to wait until the QH stops changing */
+       else {
+               __hc32          qh_current, qh_token;
+
+               qh_current = qh->hw->hw_current;
+               qh_token = qh->hw->hw_token;
+               if (qh_current != ehci->old_current ||
+                               qh_token != ehci->old_token) {
+                       ehci->old_current = qh_current;
+                       ehci->old_token = qh_token;
+                       ehci_enable_event(ehci,
+                                       EHCI_HRTIMER_ACTIVE_UNLINK, true);
+                       return;
+               }
+ DelayDone:
                qh->qh_state = QH_STATE_UNLINK;
                early_exit = true;
        }
-
-       /* Otherwise process only the first waiting QH (NVIDIA bug?) */
-       else
-               list_move_tail(&qh->unlink_node, &ehci->async_idle);
+       ehci->old_current = ~0;         /* Prepare for next QH */
 
        /* Start a new IAA cycle if any QHs are waiting for it */
        if (!list_empty(&ehci->async_unlink))
@@ -1395,6 +1447,7 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
 
        /* If nothing else is being unlinked, unlink the last empty QH */
        if (list_empty(&ehci->async_unlink) && qh_to_unlink) {
+               qh_to_unlink->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
                start_unlink_async(ehci, qh_to_unlink);
                --count;
        }
@@ -1406,8 +1459,10 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
        }
 }
 
+#ifdef CONFIG_PM
+
 /* The root hub is suspended; unlink all the async QHs */
-static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci)
+static void unlink_empty_async_suspended(struct ehci_hcd *ehci)
 {
        struct ehci_qh          *qh;
 
@@ -1416,9 +1471,10 @@ static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci)
                WARN_ON(!list_empty(&qh->qtd_list));
                single_unlink_async(ehci, qh);
        }
-       start_iaa_cycle(ehci);
 }
 
+#endif
+
 /* makes sure the async qh will become idle */
 /* caller must own ehci->lock */
 
index f9a332775c4781e57faf7bd584d5d58cbf5397e3..1dfe54f147370711da9884066164cc8913a4c5ee 100644 (file)
@@ -34,7 +34,7 @@
  * pre-calculated schedule data to make appending to the queue be quick.
  */
 
-static int ehci_get_frame (struct usb_hcd *hcd);
+static int ehci_get_frame(struct usb_hcd *hcd);
 
 /*
  * periodic_next_shadow - return "next" pointer on shadow list
@@ -52,7 +52,7 @@ periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
                return &periodic->fstn->fstn_next;
        case Q_TYPE_ITD:
                return &periodic->itd->itd_next;
-       // case Q_TYPE_SITD:
+       /* case Q_TYPE_SITD: */
        default:
                return &periodic->sitd->sitd_next;
        }
@@ -73,7 +73,7 @@ shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
 }
 
 /* caller must hold ehci->lock */
-static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
+static void periodic_unlink(struct ehci_hcd *ehci, unsigned frame, void *ptr)
 {
        union ehci_shadow       *prev_p = &ehci->pshadow[frame];
        __hc32                  *hw_p = &ehci->periodic[frame];
@@ -296,10 +296,9 @@ static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE],
                                if (x <= 125) {
                                        budget_line[uf] = x;
                                        break;
-                               } else {
-                                       budget_line[uf] = 125;
-                                       x -= 125;
                                }
+                               budget_line[uf] = 125;
+                               x -= 125;
                        }
                }
        }
@@ -330,7 +329,8 @@ static int __maybe_unused same_tt(struct usb_device *dev1,
  */
 static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask)
 {
-       unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask);
+       unsigned char smask = hc32_to_cpu(ehci, mask) & QH_SMASK;
+
        if (!smask) {
                ehci_err(ehci, "invalid empty smask!\n");
                /* uframe 7 can't have bw so this will indicate failure */
@@ -346,7 +346,8 @@ max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 };
 static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
 {
        int i;
-       for (i=0; i<7; i++) {
+
+       for (i = 0; i < 7; i++) {
                if (max_tt_usecs[i] < tt_usecs[i]) {
                        tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i];
                        tt_usecs[i] = max_tt_usecs[i];
@@ -375,7 +376,7 @@ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8])
  * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4,
  * since proper scheduling limits ssplits to less than 16 per uframe.
  */
-static int tt_available (
+static int tt_available(
        struct ehci_hcd         *ehci,
        struct ehci_per_sched   *ps,
        struct ehci_tt          *tt,
@@ -409,11 +410,11 @@ static int tt_available (
                 * must be empty, so as to not illegally delay
                 * already scheduled transactions
                 */
-               if (125 < usecs) {
+               if (usecs > 125) {
                        int ufs = (usecs / 125);
 
                        for (i = uframe; i < (uframe + ufs) && i < 8; i++)
-                               if (0 < tt_usecs[i])
+                               if (tt_usecs[i] > 0)
                                        return 0;
                }
 
@@ -435,7 +436,7 @@ static int tt_available (
  * for a periodic transfer starting at the specified frame, using
  * all the uframes in the mask.
  */
-static int tt_no_collision (
+static int tt_no_collision(
        struct ehci_hcd         *ehci,
        unsigned                period,
        struct usb_device       *dev,
@@ -455,8 +456,8 @@ static int tt_no_collision (
                __hc32                  type;
                struct ehci_qh_hw       *hw;
 
-               here = ehci->pshadow [frame];
-               type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
+               here = ehci->pshadow[frame];
+               type = Q_NEXT_TYPE(ehci, ehci->periodic[frame]);
                while (here.ptr) {
                        switch (hc32_to_cpu(ehci, type)) {
                        case Q_TYPE_ITD:
@@ -479,7 +480,7 @@ static int tt_no_collision (
                                here = here.qh->qh_next;
                                continue;
                        case Q_TYPE_SITD:
-                               if (same_tt (dev, here.sitd->urb->dev)) {
+                               if (same_tt(dev, here.sitd->urb->dev)) {
                                        u16             mask;
 
                                        mask = hc32_to_cpu(ehci, here.sitd
@@ -492,9 +493,9 @@ static int tt_no_collision (
                                type = Q_NEXT_TYPE(ehci, here.sitd->hw_next);
                                here = here.sitd->sitd_next;
                                continue;
-                       // case Q_TYPE_FSTN:
+                       /* case Q_TYPE_FSTN: */
                        default:
-                               ehci_dbg (ehci,
+                               ehci_dbg(ehci,
                                        "periodic frame %d bogus type %d\n",
                                        frame, type);
                        }
@@ -588,14 +589,14 @@ static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
                        qh->qh_next = here;
                        if (here.qh)
                                qh->hw->hw_next = *hw_p;
-                       wmb ();
+                       wmb();
                        prev->qh = qh;
-                       *hw_p = QH_NEXT (ehci, qh->qh_dma);
+                       *hw_p = QH_NEXT(ehci, qh->qh_dma);
                }
        }
        qh->qh_state = QH_STATE_LINKED;
        qh->xacterrs = 0;
-       qh->exception = 0;
+       qh->unlink_reason = 0;
 
        /* update per-qh bandwidth for debugfs */
        ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period
@@ -633,7 +634,7 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
        period = qh->ps.period ? : 1;
 
        for (i = qh->ps.phase; i < ehci->periodic_size; i += period)
-               periodic_unlink (ehci, i, qh);
+               periodic_unlink(ehci, i, qh);
 
        /* update per-qh bandwidth for debugfs */
        ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period
@@ -679,7 +680,7 @@ static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
        /* if the qh is waiting for unlink, cancel it now */
        cancel_unlink_wait_intr(ehci, qh);
 
-       qh_unlink_periodic (ehci, qh);
+       qh_unlink_periodic(ehci, qh);
 
        /* Make sure the unlinks are visible before starting the timer */
        wmb();
@@ -763,7 +764,7 @@ static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
 
 /*-------------------------------------------------------------------------*/
 
-static int check_period (
+static int check_period(
        struct ehci_hcd *ehci,
        unsigned        frame,
        unsigned        uframe,
@@ -785,11 +786,11 @@ static int check_period (
                        return 0;
        }
 
-       // success!
+       /* success! */
        return 1;
 }
 
-static int check_intr_schedule (
+static int check_intr_schedule(
        struct ehci_hcd         *ehci,
        unsigned                frame,
        unsigned                uframe,
@@ -925,7 +926,7 @@ done:
        return status;
 }
 
-static int intr_submit (
+static int intr_submit(
        struct ehci_hcd         *ehci,
        struct urb              *urb,
        struct list_head        *qtd_list,
@@ -940,7 +941,7 @@ static int intr_submit (
        /* get endpoint and transfer/schedule data */
        epnum = urb->ep->desc.bEndpointAddress;
 
-       spin_lock_irqsave (&ehci->lock, flags);
+       spin_lock_irqsave(&ehci->lock, flags);
 
        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
                status = -ESHUTDOWN;
@@ -951,20 +952,21 @@ static int intr_submit (
                goto done_not_linked;
 
        /* get qh and force any scheduling errors */
-       INIT_LIST_HEAD (&empty);
+       INIT_LIST_HEAD(&empty);
        qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv);
        if (qh == NULL) {
                status = -ENOMEM;
                goto done;
        }
        if (qh->qh_state == QH_STATE_IDLE) {
-               if ((status = qh_schedule (ehci, qh)) != 0)
+               status = qh_schedule(ehci, qh);
+               if (status)
                        goto done;
        }
 
        /* then queue the urb's tds to the qh */
        qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
-       BUG_ON (qh == NULL);
+       BUG_ON(qh == NULL);
 
        /* stuff into the periodic schedule */
        if (qh->qh_state == QH_STATE_IDLE) {
@@ -982,9 +984,9 @@ done:
        if (unlikely(status))
                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
 done_not_linked:
-       spin_unlock_irqrestore (&ehci->lock, flags);
+       spin_unlock_irqrestore(&ehci->lock, flags);
        if (status)
-               qtd_list_free (ehci, urb, qtd_list);
+               qtd_list_free(ehci, urb, qtd_list);
 
        return status;
 }
@@ -1022,12 +1024,12 @@ static void scan_intr(struct ehci_hcd *ehci)
 /* ehci_iso_stream ops work with both ITD and SITD */
 
 static struct ehci_iso_stream *
-iso_stream_alloc (gfp_t mem_flags)
+iso_stream_alloc(gfp_t mem_flags)
 {
        struct ehci_iso_stream *stream;
 
-       stream = kzalloc(sizeof *stream, mem_flags);
-       if (likely (stream != NULL)) {
+       stream = kzalloc(sizeof(*stream), mem_flags);
+       if (likely(stream != NULL)) {
                INIT_LIST_HEAD(&stream->td_list);
                INIT_LIST_HEAD(&stream->free_list);
                stream->next_uframe = NO_FRAME;
@@ -1037,13 +1039,13 @@ iso_stream_alloc (gfp_t mem_flags)
 }
 
 static void
-iso_stream_init (
+iso_stream_init(
        struct ehci_hcd         *ehci,
        struct ehci_iso_stream  *stream,
        struct urb              *urb
 )
 {
-       static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
+       static const u8 smask_out[] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f };
 
        struct usb_device       *dev = urb->dev;
        u32                     buf1;
@@ -1058,11 +1060,7 @@ iso_stream_init (
        epnum = usb_pipeendpoint(urb->pipe);
        is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0;
        maxp = usb_endpoint_maxp(&urb->ep->desc);
-       if (is_input) {
-               buf1 = (1 << 11);
-       } else {
-               buf1 = 0;
-       }
+       buf1 = is_input ? 1 << 11 : 0;
 
        /* knows about ITD vs SITD */
        if (dev->speed == USB_SPEED_HIGH) {
@@ -1111,7 +1109,7 @@ iso_stream_init (
                think_time = dev->tt ? dev->tt->think_time : 0;
                stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time(
                                dev->speed, is_input, 1, maxp));
-               hs_transfers = max (1u, (maxp + 187) / 188);
+               hs_transfers = max(1u, (maxp + 187) / 188);
                if (is_input) {
                        u32     tmp;
 
@@ -1151,7 +1149,7 @@ iso_stream_init (
 }
 
 static struct ehci_iso_stream *
-iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
+iso_stream_find(struct ehci_hcd *ehci, struct urb *urb)
 {
        unsigned                epnum;
        struct ehci_iso_stream  *stream;
@@ -1164,25 +1162,25 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
        else
                ep = urb->dev->ep_out[epnum];
 
-       spin_lock_irqsave (&ehci->lock, flags);
+       spin_lock_irqsave(&ehci->lock, flags);
        stream = ep->hcpriv;
 
-       if (unlikely (stream == NULL)) {
+       if (unlikely(stream == NULL)) {
                stream = iso_stream_alloc(GFP_ATOMIC);
-               if (likely (stream != NULL)) {
+               if (likely(stream != NULL)) {
                        ep->hcpriv = stream;
                        iso_stream_init(ehci, stream, urb);
                }
 
        /* if dev->ep [epnum] is a QH, hw is set */
-       } else if (unlikely (stream->hw != NULL)) {
-               ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
+       } else if (unlikely(stream->hw != NULL)) {
+               ehci_dbg(ehci, "dev %s ep%d%s, not iso??\n",
                        urb->dev->devpath, epnum,
                        usb_pipein(urb->pipe) ? "in" : "out");
                stream = NULL;
        }
 
-       spin_unlock_irqrestore (&ehci->lock, flags);
+       spin_unlock_irqrestore(&ehci->lock, flags);
        return stream;
 }
 
@@ -1191,16 +1189,16 @@ iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
 /* ehci_iso_sched ops can be ITD-only or SITD-only */
 
 static struct ehci_iso_sched *
-iso_sched_alloc (unsigned packets, gfp_t mem_flags)
+iso_sched_alloc(unsigned packets, gfp_t mem_flags)
 {
        struct ehci_iso_sched   *iso_sched;
-       int                     size = sizeof *iso_sched;
+       int                     size = sizeof(*iso_sched);
 
-       size += packets * sizeof (struct ehci_iso_packet);
+       size += packets * sizeof(struct ehci_iso_packet);
        iso_sched = kzalloc(size, mem_flags);
-       if (likely (iso_sched != NULL)) {
-               INIT_LIST_HEAD (&iso_sched->td_list);
-       }
+       if (likely(iso_sched != NULL))
+               INIT_LIST_HEAD(&iso_sched->td_list);
+
        return iso_sched;
 }
 
@@ -1222,17 +1220,17 @@ itd_sched_init(
         * when we fit new itds into the schedule.
         */
        for (i = 0; i < urb->number_of_packets; i++) {
-               struct ehci_iso_packet  *uframe = &iso_sched->packet [i];
+               struct ehci_iso_packet  *uframe = &iso_sched->packet[i];
                unsigned                length;
                dma_addr_t              buf;
                u32                     trans;
 
-               length = urb->iso_frame_desc [i].length;
-               buf = dma + urb->iso_frame_desc [i].offset;
+               length = urb->iso_frame_desc[i].length;
+               buf = dma + urb->iso_frame_desc[i].offset;
 
                trans = EHCI_ISOC_ACTIVE;
                trans |= buf & 0x0fff;
-               if (unlikely (((i + 1) == urb->number_of_packets))
+               if (unlikely(((i + 1) == urb->number_of_packets))
                                && !(urb->transfer_flags & URB_NO_INTERRUPT))
                        trans |= EHCI_ITD_IOC;
                trans |= length << 16;
@@ -1241,26 +1239,26 @@ itd_sched_init(
                /* might need to cross a buffer page within a uframe */
                uframe->bufp = (buf & ~(u64)0x0fff);
                buf += length;
-               if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
+               if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
                        uframe->cross = 1;
        }
 }
 
 static void
-iso_sched_free (
+iso_sched_free(
        struct ehci_iso_stream  *stream,
        struct ehci_iso_sched   *iso_sched
 )
 {
        if (!iso_sched)
                return;
-       // caller must hold ehci->lock!
-       list_splice (&iso_sched->td_list, &stream->free_list);
-       kfree (iso_sched);
+       /* caller must hold ehci->lock! */
+       list_splice(&iso_sched->td_list, &stream->free_list);
+       kfree(iso_sched);
 }
 
 static int
-itd_urb_transaction (
+itd_urb_transaction(
        struct ehci_iso_stream  *stream,
        struct ehci_hcd         *ehci,
        struct urb              *urb,
@@ -1274,8 +1272,8 @@ itd_urb_transaction (
        struct ehci_iso_sched   *sched;
        unsigned long           flags;
 
-       sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
-       if (unlikely (sched == NULL))
+       sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
+       if (unlikely(sched == NULL))
                return -ENOMEM;
 
        itd_sched_init(ehci, sched, stream, urb);
@@ -1286,7 +1284,7 @@ itd_urb_transaction (
                num_itds = urb->number_of_packets;
 
        /* allocate/init ITDs */
-       spin_lock_irqsave (&ehci->lock, flags);
+       spin_lock_irqsave(&ehci->lock, flags);
        for (i = 0; i < num_itds; i++) {
 
                /*
@@ -1298,14 +1296,14 @@ itd_urb_transaction (
                                        struct ehci_itd, itd_list);
                        if (itd->frame == ehci->now_frame)
                                goto alloc_itd;
-                       list_del (&itd->itd_list);
+                       list_del(&itd->itd_list);
                        itd_dma = itd->itd_dma;
                } else {
  alloc_itd:
-                       spin_unlock_irqrestore (&ehci->lock, flags);
-                       itd = dma_pool_alloc (ehci->itd_pool, mem_flags,
+                       spin_unlock_irqrestore(&ehci->lock, flags);
+                       itd = dma_pool_alloc(ehci->itd_pool, mem_flags,
                                        &itd_dma);
-                       spin_lock_irqsave (&ehci->lock, flags);
+                       spin_lock_irqsave(&ehci->lock, flags);
                        if (!itd) {
                                iso_sched_free(stream, sched);
                                spin_unlock_irqrestore(&ehci->lock, flags);
@@ -1313,12 +1311,12 @@ itd_urb_transaction (
                        }
                }
 
-               memset (itd, 0, sizeof *itd);
+               memset(itd, 0, sizeof(*itd));
                itd->itd_dma = itd_dma;
                itd->frame = NO_FRAME;
-               list_add (&itd->itd_list, &sched->td_list);
+               list_add(&itd->itd_list, &sched->td_list);
        }
-       spin_unlock_irqrestore (&ehci->lock, flags);
+       spin_unlock_irqrestore(&ehci->lock, flags);
 
        /* temporarily store schedule info in hcpriv */
        urb->hcpriv = sched;
@@ -1385,7 +1383,7 @@ static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci,
 }
 
 static inline int
-itd_slot_ok (
+itd_slot_ok(
        struct ehci_hcd         *ehci,
        struct ehci_iso_stream  *stream,
        unsigned                uframe
@@ -1405,7 +1403,7 @@ itd_slot_ok (
 }
 
 static inline int
-sitd_slot_ok (
+sitd_slot_ok(
        struct ehci_hcd         *ehci,
        struct ehci_iso_stream  *stream,
        unsigned                uframe,
@@ -1492,7 +1490,7 @@ sitd_slot_ok (
  */
 
 static int
-iso_stream_schedule (
+iso_stream_schedule(
        struct ehci_hcd         *ehci,
        struct urb              *urb,
        struct ehci_iso_stream  *stream
@@ -1693,9 +1691,9 @@ itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream,
 
        /* it's been recently zeroed */
        itd->hw_next = EHCI_LIST_END(ehci);
-       itd->hw_bufp [0] = stream->buf0;
-       itd->hw_bufp [1] = stream->buf1;
-       itd->hw_bufp [2] = stream->buf2;
+       itd->hw_bufp[0] = stream->buf0;
+       itd->hw_bufp[1] = stream->buf1;
+       itd->hw_bufp[2] = stream->buf2;
 
        for (i = 0; i < 8; i++)
                itd->index[i] = -1;
@@ -1712,13 +1710,13 @@ itd_patch(
        u16                     uframe
 )
 {
-       struct ehci_iso_packet  *uf = &iso_sched->packet [index];
+       struct ehci_iso_packet  *uf = &iso_sched->packet[index];
        unsigned                pg = itd->pg;
 
-       // BUG_ON (pg == 6 && uf->cross);
+       /* BUG_ON(pg == 6 && uf->cross); */
 
        uframe &= 0x07;
-       itd->index [uframe] = index;
+       itd->index[uframe] = index;
 
        itd->hw_transaction[uframe] = uf->transaction;
        itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12);
@@ -1726,7 +1724,7 @@ itd_patch(
        itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32));
 
        /* iso_frame_desc[].offset must be strictly increasing */
-       if (unlikely (uf->cross)) {
+       if (unlikely(uf->cross)) {
                u64     bufp = uf->bufp + 4096;
 
                itd->pg = ++pg;
@@ -1736,7 +1734,7 @@ itd_patch(
 }
 
 static inline void
-itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
+itd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
 {
        union ehci_shadow       *prev = &ehci->pshadow[frame];
        __hc32                  *hw_p = &ehci->periodic[frame];
@@ -1757,7 +1755,7 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
        itd->hw_next = *hw_p;
        prev->itd = itd;
        itd->frame = frame;
-       wmb ();
+       wmb();
        *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
 }
 
@@ -1776,7 +1774,7 @@ static void itd_link_urb(
 
        next_uframe = stream->next_uframe & (mod - 1);
 
-       if (unlikely (list_empty(&stream->td_list)))
+       if (unlikely(list_empty(&stream->td_list)))
                ehci_to_hcd(ehci)->self.bandwidth_allocated
                                += stream->bandwidth;
 
@@ -1792,16 +1790,16 @@ static void itd_link_urb(
                        packet < urb->number_of_packets;) {
                if (itd == NULL) {
                        /* ASSERT:  we have all necessary itds */
-                       // BUG_ON (list_empty (&iso_sched->td_list));
+                       /* BUG_ON(list_empty(&iso_sched->td_list)); */
 
                        /* ASSERT:  no itds for this endpoint in this uframe */
 
-                       itd = list_entry (iso_sched->td_list.next,
+                       itd = list_entry(iso_sched->td_list.next,
                                        struct ehci_itd, itd_list);
-                       list_move_tail (&itd->itd_list, &stream->td_list);
+                       list_move_tail(&itd->itd_list, &stream->td_list);
                        itd->stream = stream;
                        itd->urb = urb;
-                       itd_init (ehci, stream, itd);
+                       itd_init(ehci, stream, itd);
                }
 
                uframe = next_uframe & 0x07;
@@ -1823,7 +1821,7 @@ static void itd_link_urb(
        stream->next_uframe = next_uframe;
 
        /* don't need that schedule data any more */
-       iso_sched_free (stream, iso_sched);
+       iso_sched_free(stream, iso_sched);
        urb->hcpriv = stream;
 
        ++ehci->isoc_count;
@@ -1855,19 +1853,19 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
 
        /* for each uframe with a packet */
        for (uframe = 0; uframe < 8; uframe++) {
-               if (likely (itd->index[uframe] == -1))
+               if (likely(itd->index[uframe] == -1))
                        continue;
                urb_index = itd->index[uframe];
-               desc = &urb->iso_frame_desc [urb_index];
+               desc = &urb->iso_frame_desc[urb_index];
 
-               t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]);
-               itd->hw_transaction [uframe] = 0;
+               t = hc32_to_cpup(ehci, &itd->hw_transaction[uframe]);
+               itd->hw_transaction[uframe] = 0;
 
                /* report transfer status */
-               if (unlikely (t & ISO_ERRS)) {
+               if (unlikely(t & ISO_ERRS)) {
                        urb->error_count++;
                        if (t & EHCI_ISOC_BUF_ERR)
-                               desc->status = usb_pipein (urb->pipe)
+                               desc->status = usb_pipein(urb->pipe)
                                        ? -ENOSR  /* hc couldn't read */
                                        : -ECOMM; /* hc couldn't write */
                        else if (t & EHCI_ISOC_BABBLE)
@@ -1880,7 +1878,7 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
                                desc->actual_length = EHCI_ITD_LENGTH(t);
                                urb->actual_length += desc->actual_length;
                        }
-               } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
+               } else if (likely((t & EHCI_ISOC_ACTIVE) == 0)) {
                        desc->status = 0;
                        desc->actual_length = EHCI_ITD_LENGTH(t);
                        urb->actual_length += desc->actual_length;
@@ -1891,12 +1889,13 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
        }
 
        /* handle completion now? */
-       if (likely ((urb_index + 1) != urb->number_of_packets))
+       if (likely((urb_index + 1) != urb->number_of_packets))
                goto done;
 
-       /* ASSERT: it's really the last itd for this urb
-       list_for_each_entry (itd, &stream->td_list, itd_list)
-               BUG_ON (itd->urb == urb);
+       /*
+        * ASSERT: it's really the last itd for this urb
+        * list_for_each_entry (itd, &stream->td_list, itd_list)
+        *       BUG_ON(itd->urb == urb);
         */
 
        /* give urb back to the driver; completion often (re)submits */
@@ -1936,7 +1935,7 @@ done:
 
 /*-------------------------------------------------------------------------*/
 
-static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
+static int itd_submit(struct ehci_hcd *ehci, struct urb *urb,
        gfp_t mem_flags)
 {
        int                     status = -EINVAL;
@@ -1944,37 +1943,37 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
        struct ehci_iso_stream  *stream;
 
        /* Get iso_stream head */
-       stream = iso_stream_find (ehci, urb);
-       if (unlikely (stream == NULL)) {
-               ehci_dbg (ehci, "can't get iso stream\n");
+       stream = iso_stream_find(ehci, urb);
+       if (unlikely(stream == NULL)) {
+               ehci_dbg(ehci, "can't get iso stream\n");
                return -ENOMEM;
        }
        if (unlikely(urb->interval != stream->uperiod)) {
-               ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
+               ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
                        stream->uperiod, urb->interval);
                goto done;
        }
 
 #ifdef EHCI_URB_TRACE
-       ehci_dbg (ehci,
+       ehci_dbg(ehci,
                "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
                __func__, urb->dev->devpath, urb,
-               usb_pipeendpoint (urb->pipe),
-               usb_pipein (urb->pipe) ? "in" : "out",
+               usb_pipeendpoint(urb->pipe),
+               usb_pipein(urb->pipe) ? "in" : "out",
                urb->transfer_buffer_length,
                urb->number_of_packets, urb->interval,
                stream);
 #endif
 
        /* allocate ITDs w/o locking anything */
-       status = itd_urb_transaction (stream, ehci, urb, mem_flags);
-       if (unlikely (status < 0)) {
-               ehci_dbg (ehci, "can't init itds\n");
+       status = itd_urb_transaction(stream, ehci, urb, mem_flags);
+       if (unlikely(status < 0)) {
+               ehci_dbg(ehci, "can't init itds\n");
                goto done;
        }
 
        /* schedule ... need to lock */
-       spin_lock_irqsave (&ehci->lock, flags);
+       spin_lock_irqsave(&ehci->lock, flags);
        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
                status = -ESHUTDOWN;
                goto done_not_linked;
@@ -1984,7 +1983,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
                goto done_not_linked;
        status = iso_stream_schedule(ehci, urb, stream);
        if (likely(status == 0)) {
-               itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
+               itd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
        } else if (status > 0) {
                status = 0;
                ehci_urb_done(ehci, urb, 0);
@@ -1992,7 +1991,7 @@ static int itd_submit (struct ehci_hcd *ehci, struct urb *urb,
                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
        }
  done_not_linked:
-       spin_unlock_irqrestore (&ehci->lock, flags);
+       spin_unlock_irqrestore(&ehci->lock, flags);
  done:
        return status;
 }
@@ -2022,13 +2021,13 @@ sitd_sched_init(
         * when we fit new sitds into the schedule.
         */
        for (i = 0; i < urb->number_of_packets; i++) {
-               struct ehci_iso_packet  *packet = &iso_sched->packet [i];
+               struct ehci_iso_packet  *packet = &iso_sched->packet[i];
                unsigned                length;
                dma_addr_t              buf;
                u32                     trans;
 
-               length = urb->iso_frame_desc [i].length & 0x03ff;
-               buf = dma + urb->iso_frame_desc [i].offset;
+               length = urb->iso_frame_desc[i].length & 0x03ff;
+               buf = dma + urb->iso_frame_desc[i].offset;
 
                trans = SITD_STS_ACTIVE;
                if (((i + 1) == urb->number_of_packets)
@@ -2054,7 +2053,7 @@ sitd_sched_init(
 }
 
 static int
-sitd_urb_transaction (
+sitd_urb_transaction(
        struct ehci_iso_stream  *stream,
        struct ehci_hcd         *ehci,
        struct urb              *urb,
@@ -2067,14 +2066,14 @@ sitd_urb_transaction (
        struct ehci_iso_sched   *iso_sched;
        unsigned long           flags;
 
-       iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags);
+       iso_sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
        if (iso_sched == NULL)
                return -ENOMEM;
 
        sitd_sched_init(ehci, iso_sched, stream, urb);
 
        /* allocate/init sITDs */
-       spin_lock_irqsave (&ehci->lock, flags);
+       spin_lock_irqsave(&ehci->lock, flags);
        for (i = 0; i < urb->number_of_packets; i++) {
 
                /* NOTE:  for now, we don't try to handle wraparound cases
@@ -2091,14 +2090,14 @@ sitd_urb_transaction (
                                         struct ehci_sitd, sitd_list);
                        if (sitd->frame == ehci->now_frame)
                                goto alloc_sitd;
-                       list_del (&sitd->sitd_list);
+                       list_del(&sitd->sitd_list);
                        sitd_dma = sitd->sitd_dma;
                } else {
  alloc_sitd:
-                       spin_unlock_irqrestore (&ehci->lock, flags);
-                       sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags,
+                       spin_unlock_irqrestore(&ehci->lock, flags);
+                       sitd = dma_pool_alloc(ehci->sitd_pool, mem_flags,
                                        &sitd_dma);
-                       spin_lock_irqsave (&ehci->lock, flags);
+                       spin_lock_irqsave(&ehci->lock, flags);
                        if (!sitd) {
                                iso_sched_free(stream, iso_sched);
                                spin_unlock_irqrestore(&ehci->lock, flags);
@@ -2106,17 +2105,17 @@ sitd_urb_transaction (
                        }
                }
 
-               memset (sitd, 0, sizeof *sitd);
+               memset(sitd, 0, sizeof(*sitd));
                sitd->sitd_dma = sitd_dma;
                sitd->frame = NO_FRAME;
-               list_add (&sitd->sitd_list, &iso_sched->td_list);
+               list_add(&sitd->sitd_list, &iso_sched->td_list);
        }
 
        /* temporarily store schedule info in hcpriv */
        urb->hcpriv = iso_sched;
        urb->error_count = 0;
 
-       spin_unlock_irqrestore (&ehci->lock, flags);
+       spin_unlock_irqrestore(&ehci->lock, flags);
        return 0;
 }
 
@@ -2131,8 +2130,8 @@ sitd_patch(
        unsigned                index
 )
 {
-       struct ehci_iso_packet  *uf = &iso_sched->packet [index];
-       u64                     bufp = uf->bufp;
+       struct ehci_iso_packet  *uf = &iso_sched->packet[index];
+       u64                     bufp;
 
        sitd->hw_next = EHCI_LIST_END(ehci);
        sitd->hw_fullspeed_ep = stream->address;
@@ -2152,14 +2151,14 @@ sitd_patch(
 }
 
 static inline void
-sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
+sitd_link(struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd)
 {
        /* note: sitd ordering could matter (CSPLIT then SSPLIT) */
-       sitd->sitd_next = ehci->pshadow [frame];
-       sitd->hw_next = ehci->periodic [frame];
-       ehci->pshadow [frame].sitd = sitd;
+       sitd->sitd_next = ehci->pshadow[frame];
+       sitd->hw_next = ehci->periodic[frame];
+       ehci->pshadow[frame].sitd = sitd;
        sitd->frame = frame;
-       wmb ();
+       wmb();
        ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD);
 }
 
@@ -2196,13 +2195,13 @@ static void sitd_link_urb(
                        packet++) {
 
                /* ASSERT:  we have all necessary sitds */
-               BUG_ON (list_empty (&sched->td_list));
+               BUG_ON(list_empty(&sched->td_list));
 
                /* ASSERT:  no itds for this endpoint in this frame */
 
-               sitd = list_entry (sched->td_list.next,
+               sitd = list_entry(sched->td_list.next,
                                struct ehci_sitd, sitd_list);
-               list_move_tail (&sitd->sitd_list, &stream->td_list);
+               list_move_tail(&sitd->sitd_list, &stream->td_list);
                sitd->stream = stream;
                sitd->urb = urb;
 
@@ -2215,7 +2214,7 @@ static void sitd_link_urb(
        stream->next_uframe = next_uframe & (mod - 1);
 
        /* don't need that schedule data any more */
-       iso_sched_free (stream, sched);
+       iso_sched_free(stream, sched);
        urb->hcpriv = stream;
 
        ++ehci->isoc_count;
@@ -2242,20 +2241,20 @@ static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
        struct urb                              *urb = sitd->urb;
        struct usb_iso_packet_descriptor        *desc;
        u32                                     t;
-       int                                     urb_index = -1;
+       int                                     urb_index;
        struct ehci_iso_stream                  *stream = sitd->stream;
        struct usb_device                       *dev;
        bool                                    retval = false;
 
        urb_index = sitd->index;
-       desc = &urb->iso_frame_desc [urb_index];
+       desc = &urb->iso_frame_desc[urb_index];
        t = hc32_to_cpup(ehci, &sitd->hw_results);
 
        /* report transfer status */
        if (unlikely(t & SITD_ERRS)) {
                urb->error_count++;
                if (t & SITD_STS_DBE)
-                       desc->status = usb_pipein (urb->pipe)
+                       desc->status = usb_pipein(urb->pipe)
                                ? -ENOSR  /* hc couldn't read */
                                : -ECOMM; /* hc couldn't write */
                else if (t & SITD_STS_BABBLE)
@@ -2275,9 +2274,10 @@ static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
        if ((urb_index + 1) != urb->number_of_packets)
                goto done;
 
-       /* ASSERT: it's really the last sitd for this urb
-       list_for_each_entry (sitd, &stream->td_list, sitd_list)
-               BUG_ON (sitd->urb == urb);
+       /*
+        * ASSERT: it's really the last sitd for this urb
+        * list_for_each_entry (sitd, &stream->td_list, sitd_list)
+        *       BUG_ON(sitd->urb == urb);
         */
 
        /* give urb back to the driver; completion often (re)submits */
@@ -2316,7 +2316,7 @@ done:
 }
 
 
-static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
+static int sitd_submit(struct ehci_hcd *ehci, struct urb *urb,
        gfp_t mem_flags)
 {
        int                     status = -EINVAL;
@@ -2324,35 +2324,35 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
        struct ehci_iso_stream  *stream;
 
        /* Get iso_stream head */
-       stream = iso_stream_find (ehci, urb);
+       stream = iso_stream_find(ehci, urb);
        if (stream == NULL) {
-               ehci_dbg (ehci, "can't get iso stream\n");
+               ehci_dbg(ehci, "can't get iso stream\n");
                return -ENOMEM;
        }
        if (urb->interval != stream->ps.period) {
-               ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
+               ehci_dbg(ehci, "can't change iso interval %d --> %d\n",
                        stream->ps.period, urb->interval);
                goto done;
        }
 
 #ifdef EHCI_URB_TRACE
-       ehci_dbg (ehci,
+       ehci_dbg(ehci,
                "submit %p dev%s ep%d%s-iso len %d\n",
                urb, urb->dev->devpath,
-               usb_pipeendpoint (urb->pipe),
-               usb_pipein (urb->pipe) ? "in" : "out",
+               usb_pipeendpoint(urb->pipe),
+               usb_pipein(urb->pipe) ? "in" : "out",
                urb->transfer_buffer_length);
 #endif
 
        /* allocate SITDs */
-       status = sitd_urb_transaction (stream, ehci, urb, mem_flags);
+       status = sitd_urb_transaction(stream, ehci, urb, mem_flags);
        if (status < 0) {
-               ehci_dbg (ehci, "can't init sitds\n");
+               ehci_dbg(ehci, "can't init sitds\n");
                goto done;
        }
 
        /* schedule ... need to lock */
-       spin_lock_irqsave (&ehci->lock, flags);
+       spin_lock_irqsave(&ehci->lock, flags);
        if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) {
                status = -ESHUTDOWN;
                goto done_not_linked;
@@ -2362,7 +2362,7 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
                goto done_not_linked;
        status = iso_stream_schedule(ehci, urb, stream);
        if (likely(status == 0)) {
-               sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
+               sitd_link_urb(ehci, urb, ehci->periodic_size << 3, stream);
        } else if (status > 0) {
                status = 0;
                ehci_urb_done(ehci, urb, 0);
@@ -2370,7 +2370,7 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
                usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
        }
  done_not_linked:
-       spin_unlock_irqrestore (&ehci->lock, flags);
+       spin_unlock_irqrestore(&ehci->lock, flags);
  done:
        return status;
 }
@@ -2379,9 +2379,11 @@ static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb,
 
 static void scan_isoc(struct ehci_hcd *ehci)
 {
-       unsigned        uf, now_frame, frame;
-       unsigned        fmask = ehci->periodic_size - 1;
-       bool            modified, live;
+       unsigned                uf, now_frame, frame;
+       unsigned                fmask = ehci->periodic_size - 1;
+       bool                    modified, live;
+       union ehci_shadow       q, *q_p;
+       __hc32                  type, *hw_p;
 
        /*
         * When running, scan from last scan point up to "now"
@@ -2399,119 +2401,117 @@ static void scan_isoc(struct ehci_hcd *ehci)
        ehci->now_frame = now_frame;
 
        frame = ehci->last_iso_frame;
-       for (;;) {
-               union ehci_shadow       q, *q_p;
-               __hc32                  type, *hw_p;
 
 restart:
-               /* scan each element in frame's queue for completions */
-               q_p = &ehci->pshadow [frame];
-               hw_p = &ehci->periodic [frame];
-               q.ptr = q_p->ptr;
-               type = Q_NEXT_TYPE(ehci, *hw_p);
-               modified = false;
-
-               while (q.ptr != NULL) {
-                       switch (hc32_to_cpu(ehci, type)) {
-                       case Q_TYPE_ITD:
-                               /* If this ITD is still active, leave it for
-                                * later processing ... check the next entry.
-                                * No need to check for activity unless the
-                                * frame is current.
-                                */
-                               if (frame == now_frame && live) {
-                                       rmb();
-                                       for (uf = 0; uf < 8; uf++) {
-                                               if (q.itd->hw_transaction[uf] &
-                                                           ITD_ACTIVE(ehci))
-                                                       break;
-                                       }
-                                       if (uf < 8) {
-                                               q_p = &q.itd->itd_next;
-                                               hw_p = &q.itd->hw_next;
-                                               type = Q_NEXT_TYPE(ehci,
-                                                       q.itd->hw_next);
-                                               q = *q_p;
+       /* Scan each element in frame's queue for completions */
+       q_p = &ehci->pshadow[frame];
+       hw_p = &ehci->periodic[frame];
+       q.ptr = q_p->ptr;
+       type = Q_NEXT_TYPE(ehci, *hw_p);
+       modified = false;
+
+       while (q.ptr != NULL) {
+               switch (hc32_to_cpu(ehci, type)) {
+               case Q_TYPE_ITD:
+                       /*
+                        * If this ITD is still active, leave it for
+                        * later processing ... check the next entry.
+                        * No need to check for activity unless the
+                        * frame is current.
+                        */
+                       if (frame == now_frame && live) {
+                               rmb();
+                               for (uf = 0; uf < 8; uf++) {
+                                       if (q.itd->hw_transaction[uf] &
+                                                       ITD_ACTIVE(ehci))
                                                break;
-                                       }
                                }
-
-                               /* Take finished ITDs out of the schedule
-                                * and process them:  recycle, maybe report
-                                * URB completion.  HC won't cache the
-                                * pointer for much longer, if at all.
-                                */
-                               *q_p = q.itd->itd_next;
-                               if (!ehci->use_dummy_qh ||
-                                   q.itd->hw_next != EHCI_LIST_END(ehci))
-                                       *hw_p = q.itd->hw_next;
-                               else
-                                       *hw_p = cpu_to_hc32(ehci,
-                                                       ehci->dummy->qh_dma);
-                               type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
-                               wmb();
-                               modified = itd_complete (ehci, q.itd);
-                               q = *q_p;
-                               break;
-                       case Q_TYPE_SITD:
-                               /* If this SITD is still active, leave it for
-                                * later processing ... check the next entry.
-                                * No need to check for activity unless the
-                                * frame is current.
-                                */
-                               if (((frame == now_frame) ||
-                                    (((frame + 1) & fmask) == now_frame))
-                                   && live
-                                   && (q.sitd->hw_results &
-                                       SITD_ACTIVE(ehci))) {
-
-                                       q_p = &q.sitd->sitd_next;
-                                       hw_p = &q.sitd->hw_next;
+                               if (uf < 8) {
+                                       q_p = &q.itd->itd_next;
+                                       hw_p = &q.itd->hw_next;
                                        type = Q_NEXT_TYPE(ehci,
-                                                       q.sitd->hw_next);
+                                                       q.itd->hw_next);
                                        q = *q_p;
                                        break;
                                }
+                       }
+
+                       /*
+                        * Take finished ITDs out of the schedule
+                        * and process them:  recycle, maybe report
+                        * URB completion.  HC won't cache the
+                        * pointer for much longer, if at all.
+                        */
+                       *q_p = q.itd->itd_next;
+                       if (!ehci->use_dummy_qh ||
+                                       q.itd->hw_next != EHCI_LIST_END(ehci))
+                               *hw_p = q.itd->hw_next;
+                       else
+                               *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
+                       type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
+                       wmb();
+                       modified = itd_complete(ehci, q.itd);
+                       q = *q_p;
+                       break;
+               case Q_TYPE_SITD:
+                       /*
+                        * If this SITD is still active, leave it for
+                        * later processing ... check the next entry.
+                        * No need to check for activity unless the
+                        * frame is current.
+                        */
+                       if (((frame == now_frame) ||
+                                       (((frame + 1) & fmask) == now_frame))
+                               && live
+                               && (q.sitd->hw_results & SITD_ACTIVE(ehci))) {
 
-                               /* Take finished SITDs out of the schedule
-                                * and process them:  recycle, maybe report
-                                * URB completion.
-                                */
-                               *q_p = q.sitd->sitd_next;
-                               if (!ehci->use_dummy_qh ||
-                                   q.sitd->hw_next != EHCI_LIST_END(ehci))
-                                       *hw_p = q.sitd->hw_next;
-                               else
-                                       *hw_p = cpu_to_hc32(ehci,
-                                                       ehci->dummy->qh_dma);
+                               q_p = &q.sitd->sitd_next;
+                               hw_p = &q.sitd->hw_next;
                                type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
-                               wmb();
-                               modified = sitd_complete (ehci, q.sitd);
                                q = *q_p;
                                break;
-                       default:
-                               ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
-                                       type, frame, q.ptr);
-                               // BUG ();
-                               /* FALL THROUGH */
-                       case Q_TYPE_QH:
-                       case Q_TYPE_FSTN:
-                               /* End of the iTDs and siTDs */
-                               q.ptr = NULL;
-                               break;
                        }
 
-                       /* assume completion callbacks modify the queue */
-                       if (unlikely(modified && ehci->isoc_count > 0))
-                               goto restart;
-               }
-
-               /* Stop when we have reached the current frame */
-               if (frame == now_frame)
+                       /*
+                        * Take finished SITDs out of the schedule
+                        * and process them:  recycle, maybe report
+                        * URB completion.
+                        */
+                       *q_p = q.sitd->sitd_next;
+                       if (!ehci->use_dummy_qh ||
+                                       q.sitd->hw_next != EHCI_LIST_END(ehci))
+                               *hw_p = q.sitd->hw_next;
+                       else
+                               *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma);
+                       type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
+                       wmb();
+                       modified = sitd_complete(ehci, q.sitd);
+                       q = *q_p;
+                       break;
+               default:
+                       ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n",
+                                       type, frame, q.ptr);
+                       /* BUG(); */
+                       /* FALL THROUGH */
+               case Q_TYPE_QH:
+               case Q_TYPE_FSTN:
+                       /* End of the iTDs and siTDs */
+                       q.ptr = NULL;
                        break;
+               }
 
-               /* The last frame may still have active siTDs */
-               ehci->last_iso_frame = frame;
-               frame = (frame + 1) & fmask;
+               /* Assume completion callbacks modify the queue */
+               if (unlikely(modified && ehci->isoc_count > 0))
+                       goto restart;
        }
+
+       /* Stop when we have reached the current frame */
+       if (frame == now_frame)
+               return;
+
+       /* The last frame may still have active siTDs */
+       ehci->last_iso_frame = frame;
+       frame = (frame + 1) & fmask;
+
+       goto restart;
 }
index b7c5cfa37a83501f8430837cab16fdf19b370c13..a94ed677d93747c143d01879a175bad23f8f795f 100644 (file)
@@ -287,8 +287,7 @@ static int st_ehci_suspend(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
        struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
-       struct platform_device *pdev =
-               container_of(dev, struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        bool do_wakeup = device_may_wakeup(dev);
        int ret;
 
@@ -308,8 +307,7 @@ static int st_ehci_resume(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
        struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
-       struct platform_device *pdev =
-               container_of(dev, struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        int err;
 
        pinctrl_pm_select_default_state(dev);
index 424ac5d8371479fe54501139fd68be9e58a62167..69f50e6533a6ec68fa3cefeeb43842964dc227f7 100644 (file)
@@ -72,6 +72,7 @@ static unsigned event_delays_ns[] = {
        1 * NSEC_PER_MSEC,      /* EHCI_HRTIMER_POLL_DEAD */
        1125 * NSEC_PER_USEC,   /* EHCI_HRTIMER_UNLINK_INTR */
        2 * NSEC_PER_MSEC,      /* EHCI_HRTIMER_FREE_ITDS */
+       2 * NSEC_PER_MSEC,      /* EHCI_HRTIMER_ACTIVE_UNLINK */
        5 * NSEC_PER_MSEC,      /* EHCI_HRTIMER_START_UNLINK_INTR */
        6 * NSEC_PER_MSEC,      /* EHCI_HRTIMER_ASYNC_UNLINKS */
        10 * NSEC_PER_MSEC,     /* EHCI_HRTIMER_IAA_WATCHDOG */
@@ -237,6 +238,7 @@ static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
                                ehci->intr_unlink_wait_cycle))
                        break;
                list_del_init(&qh->unlink_node);
+               qh->unlink_reason |= QH_UNLINK_QUEUE_EMPTY;
                start_unlink_intr(ehci, qh);
        }
 
@@ -360,7 +362,7 @@ static void ehci_iaa_watchdog(struct ehci_hcd *ehci)
        }
 
        ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd);
-       end_unlink_async(ehci);
+       end_iaa_cycle(ehci);
 }
 
 
@@ -394,6 +396,7 @@ static void (*event_handlers[])(struct ehci_hcd *) = {
        ehci_handle_controller_death,   /* EHCI_HRTIMER_POLL_DEAD */
        ehci_handle_intr_unlinks,       /* EHCI_HRTIMER_UNLINK_INTR */
        end_free_itds,                  /* EHCI_HRTIMER_FREE_ITDS */
+       end_unlink_async,               /* EHCI_HRTIMER_ACTIVE_UNLINK */
        ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */
        unlink_empty_async,             /* EHCI_HRTIMER_ASYNC_UNLINKS */
        ehci_iaa_watchdog,              /* EHCI_HRTIMER_IAA_WATCHDOG */
index ec61aedb00673db16163da24b51a442ee478aa00..2ddf35203c05c31290a821cf3f7fd6274f0f63ce 100644 (file)
@@ -110,6 +110,7 @@ enum ehci_hrtimer_event {
        EHCI_HRTIMER_POLL_DEAD,         /* Wait for dead controller to stop */
        EHCI_HRTIMER_UNLINK_INTR,       /* Wait for interrupt QH unlink */
        EHCI_HRTIMER_FREE_ITDS,         /* Wait for unused iTDs and siTDs */
+       EHCI_HRTIMER_ACTIVE_UNLINK,     /* Wait while unlinking an active QH */
        EHCI_HRTIMER_START_UNLINK_INTR, /* Unlink empty interrupt QHs */
        EHCI_HRTIMER_ASYNC_UNLINKS,     /* Unlink empty async QHs */
        EHCI_HRTIMER_IAA_WATCHDOG,      /* Handle lost IAA interrupts */
@@ -156,6 +157,8 @@ struct ehci_hcd {                   /* one per controller */
        struct list_head        async_idle;
        unsigned                async_unlink_cycle;
        unsigned                async_count;    /* async activity count */
+       __hc32                  old_current;    /* Test for QH becoming */
+       __hc32                  old_token;      /*  inactive during unlink */
 
        /* periodic schedule support */
 #define        DEFAULT_I_TDPS          1024            /* some HCs can do less */
@@ -185,7 +188,7 @@ struct ehci_hcd {                   /* one per controller */
        struct ehci_sitd        *last_sitd_to_free;
 
        /* per root hub port */
-       unsigned long           reset_done [EHCI_MAX_ROOT_PORTS];
+       unsigned long           reset_done[EHCI_MAX_ROOT_PORTS];
 
        /* bit vectors (one bit per port) */
        unsigned long           bus_suspended;          /* which ports were
@@ -244,9 +247,9 @@ struct ehci_hcd {                   /* one per controller */
        /* irq statistics */
 #ifdef EHCI_STATS
        struct ehci_stats       stats;
-#      define COUNT(x) do { (x)++; } while (0)
+#      define COUNT(x) ((x)++)
 #else
-#      define COUNT(x) do {} while (0)
+#      define COUNT(x)
 #endif
 
        /* debug files */
@@ -268,13 +271,13 @@ struct ehci_hcd {                 /* one per controller */
 };
 
 /* convert between an HCD pointer and the corresponding EHCI_HCD */
-static inline struct ehci_hcd *hcd_to_ehci (struct usb_hcd *hcd)
+static inline struct ehci_hcd *hcd_to_ehci(struct usb_hcd *hcd)
 {
        return (struct ehci_hcd *) (hcd->hcd_priv);
 }
-static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci)
+static inline struct usb_hcd *ehci_to_hcd(struct ehci_hcd *ehci)
 {
-       return container_of ((void *) ehci, struct usb_hcd, hcd_priv);
+       return container_of((void *) ehci, struct usb_hcd, hcd_priv);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -316,25 +319,25 @@ struct ehci_qtd {
 #define HALT_BIT(ehci)         cpu_to_hc32(ehci, QTD_STS_HALT)
 #define STATUS_BIT(ehci)       cpu_to_hc32(ehci, QTD_STS_STS)
 
-       __hc32                  hw_buf [5];        /* see EHCI 3.5.4 */
-       __hc32                  hw_buf_hi [5];        /* Appendix B */
+       __hc32                  hw_buf[5];        /* see EHCI 3.5.4 */
+       __hc32                  hw_buf_hi[5];        /* Appendix B */
 
        /* the rest is HCD-private */
        dma_addr_t              qtd_dma;                /* qtd address */
        struct list_head        qtd_list;               /* sw qtd list */
        struct urb              *urb;                   /* qtd's urb */
        size_t                  length;                 /* length of buffer */
-} __attribute__ ((aligned (32)));
+} __aligned(32);
 
 /* mask NakCnt+T in qh->hw_alt_next */
-#define QTD_MASK(ehci) cpu_to_hc32 (ehci, ~0x1f)
+#define QTD_MASK(ehci) cpu_to_hc32(ehci, ~0x1f)
 
-#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1)
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
 
 /*-------------------------------------------------------------------------*/
 
 /* type tag from {qh,itd,sitd,fstn}->hw_next */
-#define Q_NEXT_TYPE(ehci,dma)  ((dma) & cpu_to_hc32(ehci, 3 << 1))
+#define Q_NEXT_TYPE(ehci, dma) ((dma) & cpu_to_hc32(ehci, 3 << 1))
 
 /*
  * Now the following defines are not converted using the
@@ -350,7 +353,8 @@ struct ehci_qtd {
 #define Q_TYPE_FSTN    (3 << 1)
 
 /* next async queue entry, or pointer to interrupt/periodic QH */
-#define QH_NEXT(ehci,dma)      (cpu_to_hc32(ehci, (((u32)dma)&~0x01f)|Q_TYPE_QH))
+#define QH_NEXT(ehci, dma) \
+               (cpu_to_hc32(ehci, (((u32) dma) & ~0x01f) | Q_TYPE_QH))
 
 /* for periodic/async schedules and qtd lists, mark end of list */
 #define EHCI_LIST_END(ehci)    cpu_to_hc32(ehci, 1) /* "null pointer" to hw */
@@ -405,9 +409,9 @@ struct ehci_qh_hw {
        __hc32                  hw_qtd_next;
        __hc32                  hw_alt_next;
        __hc32                  hw_token;
-       __hc32                  hw_buf [5];
-       __hc32                  hw_buf_hi [5];
-} __attribute__ ((aligned(32)));
+       __hc32                  hw_buf[5];
+       __hc32                  hw_buf_hi[5];
+} __aligned(32);
 
 struct ehci_qh {
        struct ehci_qh_hw       *hw;            /* Must come first */
@@ -432,13 +436,19 @@ struct ehci_qh {
        u8                      xacterrs;       /* XactErr retry counter */
 #define        QH_XACTERR_MAX          32              /* XactErr retry limit */
 
+       u8                      unlink_reason;
+#define QH_UNLINK_HALTED       0x01            /* Halt flag is set */
+#define QH_UNLINK_SHORT_READ   0x02            /* Recover from a short read */
+#define QH_UNLINK_DUMMY_OVERLAY        0x04            /* QH overlayed the dummy TD */
+#define QH_UNLINK_SHUTDOWN     0x08            /* The HC isn't running */
+#define QH_UNLINK_QUEUE_EMPTY  0x10            /* Reached end of the queue */
+#define QH_UNLINK_REQUESTED    0x20            /* Disable, reset, or dequeue */
+
        u8                      gap_uf;         /* uframes split/csplit gap */
 
        unsigned                is_out:1;       /* bulk or intr OUT */
        unsigned                clearing_tt:1;  /* Clear-TT-Buf in progress */
        unsigned                dequeue_during_giveback:1;
-       unsigned                exception:1;    /* got a fault, or an unlink
-                                                  was requested */
        unsigned                should_be_inactive:1;
 };
 
@@ -462,7 +472,7 @@ struct ehci_iso_sched {
        struct list_head        td_list;
        unsigned                span;
        unsigned                first_packet;
-       struct ehci_iso_packet  packet [0];
+       struct ehci_iso_packet  packet[0];
 };
 
 /*
@@ -510,7 +520,7 @@ struct ehci_iso_stream {
 struct ehci_itd {
        /* first part defined by EHCI spec */
        __hc32                  hw_next;           /* see EHCI 3.3.1 */
-       __hc32                  hw_transaction [8]; /* see EHCI 3.3.2 */
+       __hc32                  hw_transaction[8]; /* see EHCI 3.3.2 */
 #define EHCI_ISOC_ACTIVE        (1<<31)        /* activate transfer this slot */
 #define EHCI_ISOC_BUF_ERR       (1<<30)        /* Data buffer error */
 #define EHCI_ISOC_BABBLE        (1<<29)        /* babble detected */
@@ -520,8 +530,8 @@ struct ehci_itd {
 
 #define ITD_ACTIVE(ehci)       cpu_to_hc32(ehci, EHCI_ISOC_ACTIVE)
 
-       __hc32                  hw_bufp [7];    /* see EHCI 3.3.3 */
-       __hc32                  hw_bufp_hi [7]; /* Appendix B */
+       __hc32                  hw_bufp[7];     /* see EHCI 3.3.3 */
+       __hc32                  hw_bufp_hi[7];  /* Appendix B */
 
        /* the rest is HCD-private */
        dma_addr_t              itd_dma;        /* for this itd */
@@ -535,7 +545,7 @@ struct ehci_itd {
        unsigned                frame;          /* where scheduled */
        unsigned                pg;
        unsigned                index[8];       /* in urb->iso_frame_desc */
-} __attribute__ ((aligned (32)));
+} __aligned(32);
 
 /*-------------------------------------------------------------------------*/
 
@@ -554,7 +564,7 @@ struct ehci_sitd {
        __hc32                  hw_results;             /* EHCI table 3-11 */
 #define        SITD_IOC        (1 << 31)       /* interrupt on completion */
 #define        SITD_PAGE       (1 << 30)       /* buffer 0/1 */
-#define        SITD_LENGTH(x)  (0x3ff & ((x)>>16))
+#define        SITD_LENGTH(x)  (((x) >> 16) & 0x3ff)
 #define        SITD_STS_ACTIVE (1 << 7)        /* HC may execute this */
 #define        SITD_STS_ERR    (1 << 6)        /* error from TT */
 #define        SITD_STS_DBE    (1 << 5)        /* data buffer error (in HC) */
@@ -565,9 +575,9 @@ struct ehci_sitd {
 
 #define SITD_ACTIVE(ehci)      cpu_to_hc32(ehci, SITD_STS_ACTIVE)
 
-       __hc32                  hw_buf [2];             /* EHCI table 3-12 */
+       __hc32                  hw_buf[2];              /* EHCI table 3-12 */
        __hc32                  hw_backpointer;         /* EHCI table 3-13 */
-       __hc32                  hw_buf_hi [2];          /* Appendix B */
+       __hc32                  hw_buf_hi[2];           /* Appendix B */
 
        /* the rest is HCD-private */
        dma_addr_t              sitd_dma;
@@ -578,7 +588,7 @@ struct ehci_sitd {
        struct list_head        sitd_list;      /* list of stream's sitds */
        unsigned                frame;
        unsigned                index;
-} __attribute__ ((aligned (32)));
+} __aligned(32);
 
 /*-------------------------------------------------------------------------*/
 
@@ -598,7 +608,7 @@ struct ehci_fstn {
        /* the rest is HCD-private */
        dma_addr_t              fstn_dma;
        union ehci_shadow       fstn_next;      /* ptr to periodic q entry */
-} __attribute__ ((aligned (32)));
+} __aligned(32);
 
 /*-------------------------------------------------------------------------*/
 
@@ -634,10 +644,10 @@ struct ehci_tt {
 /* Prepare the PORTSC wakeup flags during controller suspend/resume */
 
 #define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup)     \
-               ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup);
+               ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup)
 
 #define ehci_prepare_ports_for_controller_resume(ehci)                 \
-               ehci_adjust_port_wakeup_flags(ehci, false, false);
+               ehci_adjust_port_wakeup_flags(ehci, false, false)
 
 /*-------------------------------------------------------------------------*/
 
@@ -731,7 +741,7 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
 #endif
 
 static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
-               __u32 __iomem * regs)
+               __u32 __iomem *regs)
 {
 #ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO
        return ehci_big_endian_mmio(ehci) ?
@@ -806,7 +816,7 @@ static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational)
 #define ehci_big_endian_desc(e)                ((e)->big_endian_desc)
 
 /* cpu to ehci */
-static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
+static inline __hc32 cpu_to_hc32(const struct ehci_hcd *ehci, const u32 x)
 {
        return ehci_big_endian_desc(ehci)
                ? (__force __hc32)cpu_to_be32(x)
@@ -814,14 +824,14 @@ static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
 }
 
 /* ehci to cpu */
-static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
+static inline u32 hc32_to_cpu(const struct ehci_hcd *ehci, const __hc32 x)
 {
        return ehci_big_endian_desc(ehci)
                ? be32_to_cpu((__force __be32)x)
                : le32_to_cpu((__force __le32)x);
 }
 
-static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
+static inline u32 hc32_to_cpup(const struct ehci_hcd *ehci, const __hc32 *x)
 {
        return ehci_big_endian_desc(ehci)
                ? be32_to_cpup((__force __be32 *)x)
@@ -831,18 +841,18 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
 #else
 
 /* cpu to ehci */
-static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x)
+static inline __hc32 cpu_to_hc32(const struct ehci_hcd *ehci, const u32 x)
 {
        return cpu_to_le32(x);
 }
 
 /* ehci to cpu */
-static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x)
+static inline u32 hc32_to_cpu(const struct ehci_hcd *ehci, const __hc32 x)
 {
        return le32_to_cpu(x);
 }
 
-static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
+static inline u32 hc32_to_cpup(const struct ehci_hcd *ehci, const __hc32 *x)
 {
        return le32_to_cpup(x);
 }
@@ -852,18 +862,13 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
 /*-------------------------------------------------------------------------*/
 
 #define ehci_dbg(ehci, fmt, args...) \
-       dev_dbg(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+       dev_dbg(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
 #define ehci_err(ehci, fmt, args...) \
-       dev_err(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+       dev_err(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
 #define ehci_info(ehci, fmt, args...) \
-       dev_info(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
+       dev_info(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
 #define ehci_warn(ehci, fmt, args...) \
-       dev_warn(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
-
-
-#ifndef CONFIG_DYNAMIC_DEBUG
-#define STUB_DEBUG_FILES
-#endif
+       dev_warn(ehci_to_hcd(ehci)->self.controller, fmt, ## args)
 
 /*-------------------------------------------------------------------------*/
 
index 2341af4f34909bd81ce53e4364a34eeaf54dfa03..360a5e95abca8525861568f10a1a76a48019b71d 100644 (file)
@@ -2267,7 +2267,7 @@ static unsigned qh_completions(struct fotg210_hcd *fotg210,
                struct fotg210_qh *qh)
 {
        struct fotg210_qtd *last, *end = qh->dummy;
-       struct list_head *entry, *tmp;
+       struct fotg210_qtd *qtd, *tmp;
        int last_status;
        int stopped;
        unsigned count = 0;
@@ -2301,12 +2301,10 @@ rescan:
         * then let the queue advance.
         * if queue is stopped, handles unlinks.
         */
-       list_for_each_safe(entry, tmp, &qh->qtd_list) {
-               struct fotg210_qtd *qtd;
+       list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
                struct urb *urb;
                u32 token = 0;
 
-               qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
                urb = qtd->urb;
 
                /* clean up any state from previous QTD ...*/
@@ -2544,14 +2542,11 @@ retry_xacterr:
  * used for cleanup after errors, before HC sees an URB's TDs.
  */
 static void qtd_list_free(struct fotg210_hcd *fotg210, struct urb *urb,
-               struct list_head *qtd_list)
+               struct list_head *head)
 {
-       struct list_head *entry, *temp;
-
-       list_for_each_safe(entry, temp, qtd_list) {
-               struct fotg210_qtd *qtd;
+       struct fotg210_qtd *qtd, *temp;
 
-               qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
+       list_for_each_entry_safe(qtd, temp, head, qtd_list) {
                list_del(&qtd->qtd_list);
                fotg210_qtd_free(fotg210, qtd);
        }
index 0c382652a3991fdd26cd5316b2eab9720d72b010..1044b0f9d656f122f4bf3a994465147683f9b05f 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/of_platform.h>
 #include <linux/clk.h>
 #include <linux/module.h>
+#include <linux/dma-mapping.h>
 
 struct fsl_usb2_dev_data {
        char *dr_mode;          /* controller mode */
@@ -96,7 +97,11 @@ static struct platform_device *fsl_usb2_device_register(
        pdev->dev.parent = &ofdev->dev;
 
        pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask;
-       *pdev->dev.dma_mask = *ofdev->dev.dma_mask;
+
+       if (!pdev->dev.dma_mask)
+               pdev->dev.dma_mask = &ofdev->dev.coherent_dma_mask;
+       else
+               dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 
        retval = platform_device_add_data(pdev, pdata, sizeof(*pdata));
        if (retval)
index bd98706d1ce9def735fe83442302c366a0fda74c..c369c29e496d735e77f3be32226c479e6871e846 100644 (file)
@@ -797,19 +797,16 @@ max3421_check_unlink(struct usb_hcd *hcd)
 {
        struct spi_device *spi = to_spi_device(hcd->self.controller);
        struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
-       struct list_head *pos, *upos, *next_upos;
        struct max3421_ep *max3421_ep;
        struct usb_host_endpoint *ep;
-       struct urb *urb;
+       struct urb *urb, *next;
        unsigned long flags;
        int retval = 0;
 
        spin_lock_irqsave(&max3421_hcd->lock, flags);
-       list_for_each(pos, &max3421_hcd->ep_list) {
-               max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+       list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
                ep = max3421_ep->ep;
-               list_for_each_safe(upos, next_upos, &ep->urb_list) {
-                       urb = container_of(upos, struct urb, urb_list);
+               list_for_each_entry_safe(urb, next, &ep->urb_list, urb_list) {
                        if (urb->unlinked) {
                                retval = 1;
                                dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
@@ -1184,22 +1181,19 @@ dump_eps(struct usb_hcd *hcd)
        struct max3421_hcd *max3421_hcd = hcd_to_max3421(hcd);
        struct max3421_ep *max3421_ep;
        struct usb_host_endpoint *ep;
-       struct list_head *pos, *upos;
        char ubuf[512], *dp, *end;
        unsigned long flags;
        struct urb *urb;
        int epnum, ret;
 
        spin_lock_irqsave(&max3421_hcd->lock, flags);
-       list_for_each(pos, &max3421_hcd->ep_list) {
-               max3421_ep = container_of(pos, struct max3421_ep, ep_list);
+       list_for_each_entry(max3421_ep, &max3421_hcd->ep_list, ep_list) {
                ep = max3421_ep->ep;
 
                dp = ubuf;
                end = dp + sizeof(ubuf);
                *dp = '\0';
-               list_for_each(upos, &ep->urb_list) {
-                       urb = container_of(upos, struct urb, urb_list);
+               list_for_each_entry(urb, &ep->urb_list, urb_list) {
                        ret = snprintf(dp, end - dp, " %p(%d.%s %d/%d)", urb,
                                       usb_pipetype(urb->pipe),
                                       usb_urb_dir_in(urb) ? "IN" : "OUT",
index cfa94275c52c24d668cde3833e3694162ce52f9f..ebacf97fc406653783da1e7888674a0b2383e4f9 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/dma-mapping.h>
 #include <linux/io.h>
 #include <linux/i2c.h>
-#include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 
 #include "ohci.h"
 
-
 #include <mach/hardware.h>
-#include <asm/mach-types.h>
-#include <asm/io.h>
-
-#include <mach/platform.h>
-#include <mach/irqs.h>
 
 #define USB_CONFIG_BASE                0x31020000
 #define PWRMAN_BASE            0x40004000
index c2669f185f658c76f74e9f5c8ec651cbb92ec397..ae1c988da146e556f4b7fe386ad26264d0b20d90 100644 (file)
@@ -310,8 +310,7 @@ static int ohci_platform_suspend(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
        struct usb_ohci_pdata *pdata = dev->platform_data;
-       struct platform_device *pdev =
-               container_of(dev, struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        bool do_wakeup = device_may_wakeup(dev);
        int ret;
 
@@ -329,8 +328,7 @@ static int ohci_platform_resume(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
        struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
-       struct platform_device *pdev =
-               container_of(dev, struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
 
        if (pdata->power_on) {
                int err = pdata->power_on(pdev);
index df9028e0d9b4776a0e5c77e7c61911e1bae567c0..acf2eb2a56766358c2bbdef931b070c2c81ad43d 100644 (file)
@@ -270,8 +270,7 @@ static int st_ohci_suspend(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
        struct usb_ohci_pdata *pdata = dev->platform_data;
-       struct platform_device *pdev =
-               container_of(dev, struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        bool do_wakeup = device_may_wakeup(dev);
        int ret;
 
@@ -289,8 +288,7 @@ static int st_ohci_resume(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
        struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
-       struct platform_device *pdev =
-               container_of(dev, struct platform_device, dev);
+       struct platform_device *pdev = to_platform_device(dev);
        int err;
 
        if (pdata->power_on) {
index bc74aca8a54c291a425a1e20d6e954c57a1296fb..4e4d601af35c19da7bb3f826b54af8239a36e853 100644 (file)
@@ -981,7 +981,7 @@ static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
 static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
 {
        struct ehci_qtd *last = NULL, *end = qh->dummy;
-       struct list_head *entry, *tmp;
+       struct ehci_qtd *qtd, *tmp;
        int stopped;
        unsigned count = 0;
        int do_status = 0;
@@ -1006,12 +1006,10 @@ static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
         * then let the queue advance.
         * if queue is stopped, handles unlinks.
         */
-       list_for_each_safe(entry, tmp, &qh->qtd_list) {
-               struct ehci_qtd *qtd;
+       list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) {
                struct urb *urb;
                u32 token = 0;
 
-               qtd = list_entry(entry, struct ehci_qtd, qtd_list);
                urb = qtd->urb;
 
                /* Clean up any state from previous QTD ...*/
@@ -1174,14 +1172,11 @@ halt:
  * used for cleanup after errors, before HC sees an URB's TDs.
  */
 static void qtd_list_free(struct oxu_hcd *oxu,
-                               struct urb *urb, struct list_head *qtd_list)
+                               struct urb *urb, struct list_head *head)
 {
-       struct list_head *entry, *temp;
-
-       list_for_each_safe(entry, temp, qtd_list) {
-               struct ehci_qtd *qtd;
+       struct ehci_qtd *qtd, *temp;
 
-               qtd = list_entry(entry, struct ehci_qtd, qtd_list);
+       list_for_each_entry_safe(qtd, temp, head, qtd_list) {
                list_del(&qtd->qtd_list);
                oxu_qtd_free(oxu, qtd);
        }
index 4cbd0633c5c2dbbc1779cc2ae3aa0a4c818bf72e..bfa7fa3d2eea0143b89298b3c722f9627af98fce 100644 (file)
@@ -2099,16 +2099,13 @@ static void r8a66597_check_detect_child(struct r8a66597 *r8a66597,
 
        memset(now_map, 0, sizeof(now_map));
 
-       list_for_each_entry(bus, &usb_bus_list, bus_list) {
-               if (!bus->root_hub)
-                       continue;
-
-               if (bus->busnum != hcd->self.busnum)
-                       continue;
-
+       mutex_lock(&usb_bus_idr_lock);
+       bus = idr_find(&usb_bus_idr, hcd->self.busnum);
+       if (bus && bus->root_hub) {
                collect_usb_address_map(bus->root_hub, now_map);
                update_usb_address_map(r8a66597, bus->root_hub, now_map);
        }
+       mutex_unlock(&usb_bus_idr_lock);
 }
 
 static int r8a66597_hub_status_data(struct usb_hcd *hcd, char *buf)
index 05c85c7baf84e3e772127beecd27f664c69f5d32..43d52931b5bf3509c7ca6255ddb82aa7b7c5cb98 100644 (file)
@@ -1309,13 +1309,9 @@ static void u132_hcd_ring_work_scheduler(struct work_struct *work)
                u132_ring_put_kref(u132, ring);
                return;
        } else if (ring->curr_endp) {
-               struct u132_endp *last_endp = ring->curr_endp;
-               struct list_head *scan;
-               struct list_head *head = &last_endp->endp_ring;
+               struct u132_endp *endp, *last_endp = ring->curr_endp;
                unsigned long wakeup = 0;
-               list_for_each(scan, head) {
-                       struct u132_endp *endp = list_entry(scan,
-                               struct u132_endp, endp_ring);
+               list_for_each_entry(endp, &last_endp->endp_ring, endp_ring) {
                        if (endp->queue_next == endp->queue_last) {
                        } else if ((endp->delayed == 0)
                                || time_after_eq(jiffies, endp->jiffies)) {
@@ -2393,14 +2389,12 @@ static int u132_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
 static int dequeue_from_overflow_chain(struct u132 *u132,
        struct u132_endp *endp, struct urb *urb)
 {
-       struct list_head *scan;
-       struct list_head *head = &endp->urb_more;
-       list_for_each(scan, head) {
-               struct u132_urbq *urbq = list_entry(scan, struct u132_urbq,
-                       urb_more);
+       struct u132_urbq *urbq;
+
+       list_for_each_entry(urbq, &endp->urb_more, urb_more) {
                if (urbq->urb == urb) {
                        struct usb_hcd *hcd = u132_to_hcd(u132);
-                       list_del(scan);
+                       list_del(&urbq->urb_more);
                        endp->queue_size -= 1;
                        urb->error_count = 0;
                        usb_hcd_giveback_urb(hcd, urb, 0);
index c17ea1589b8335b94abb1e176d13eb3cfee2b034..f097a27dc0b49775864ed36e1596a6c1c39c1739 100644 (file)
@@ -123,10 +123,11 @@ static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci)
 
 static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td)
 {
-       if (!list_empty(&td->list))
-               dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td);
-       if (!list_empty(&td->fl_list))
-               dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td);
+       dev_WARN(uhci_dev(uhci), !list_empty(&td->list),
+                       "td %p still in list!\n", td);
+
+       dev_WARN(uhci_dev(uhci), !list_empty(&td->fl_list),
+                       "td %p still in fl_list!\n", td);
 
        dma_pool_free(uhci->td_pool, td, td->dma_handle);
 }
@@ -292,8 +293,9 @@ static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci,
 static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh)
 {
        WARN_ON(qh->state != QH_STATE_IDLE && qh->udev);
-       if (!list_empty(&qh->queue))
-               dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh);
+
+       dev_WARN(uhci_dev(uhci), !list_empty(&qh->queue),
+                       "qh %p list not empty!\n", qh);
 
        list_del(&qh->node);
        if (qh->udev) {
@@ -742,9 +744,9 @@ static void uhci_free_urb_priv(struct uhci_hcd *uhci,
 {
        struct uhci_td *td, *tmp;
 
-       if (!list_empty(&urbp->node))
-               dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n",
-                               urbp->urb);
+       dev_WARN(uhci_dev(uhci), !list_empty(&urbp->node),
+                       "urb %p still on QH's list!\n",
+                       urbp->urb);
 
        list_for_each_entry_safe(td, tmp, &urbp->td_list, list) {
                uhci_remove_td_from_urbp(td);
index 04ce6b156b350e5dd0f9f9a321c79eb4d173b656..e0244fb3903dc85287f509bbac97600734c9b5af 100644 (file)
@@ -112,12 +112,16 @@ static inline int xhci_find_next_ext_cap(void __iomem *base, u32 start, int id)
        offset = start;
        if (!start || start == XHCI_HCC_PARAMS_OFFSET) {
                val = readl(base + XHCI_HCC_PARAMS_OFFSET);
+               if (val == ~0)
+                       return 0;
                offset = XHCI_HCC_EXT_CAPS(val) << 2;
                if (!offset)
                        return 0;
        };
        do {
                val = readl(base + offset);
+               if (val == ~0)
+                       return 0;
                if (XHCI_EXT_CAPS_ID(val) == id && offset != start)
                        return offset;
 
index b30b4ce294d36bb9d8291053f7adb89347e8a64d..d61fcc48099ed68f0a6084eef673ae33371cab95 100644 (file)
@@ -50,14 +50,18 @@ static u8 usb_bos_descriptor [] = {
        0x00,                           /* bU1DevExitLat, set later. */
        0x00, 0x00,                     /* __le16 bU2DevExitLat, set later. */
        /* Second device capability, SuperSpeedPlus */
-       0x0c,                           /* bLength 12, will be adjusted later */
+       0x1c,                           /* bLength 28, will be adjusted later */
        USB_DT_DEVICE_CAPABILITY,       /* Device Capability */
        USB_SSP_CAP_TYPE,               /* bDevCapabilityType SUPERSPEED_PLUS */
        0x00,                           /* bReserved 0 */
-       0x00, 0x00, 0x00, 0x00,         /* bmAttributes, get from xhci psic */
-       0x00, 0x00,                     /* wFunctionalitySupport */
+       0x23, 0x00, 0x00, 0x00,         /* bmAttributes, SSAC=3 SSIC=1 */
+       0x01, 0x00,                     /* wFunctionalitySupport */
        0x00, 0x00,                     /* wReserved 0 */
-       /* Sublink Speed Attributes are added in xhci_create_usb3_bos_desc() */
+       /* Default Sublink Speed Attributes, overwrite if custom PSI exists */
+       0x34, 0x00, 0x05, 0x00,         /* 5Gbps, symmetric, rx, ID = 4 */
+       0xb4, 0x00, 0x05, 0x00,         /* 5Gbps, symmetric, tx, ID = 4 */
+       0x35, 0x40, 0x0a, 0x00,         /* 10Gbps, SSP, symmetric, rx, ID = 5 */
+       0xb5, 0x40, 0x0a, 0x00,         /* 10Gbps, SSP, symmetric, tx, ID = 5 */
 };
 
 static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
@@ -72,10 +76,14 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
        ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
 
        /* does xhci support USB 3.1 Enhanced SuperSpeed */
-       if (xhci->usb3_rhub.min_rev >= 0x01 && xhci->usb3_rhub.psi_uid_count) {
-               /* two SSA entries for each unique PSI ID, one RX and one TX */
-               ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
-               ssa_size = ssa_count * sizeof(u32);
+       if (xhci->usb3_rhub.min_rev >= 0x01) {
+               /* does xhci provide a PSI table for SSA speed attributes? */
+               if (xhci->usb3_rhub.psi_count) {
+                       /* two SSA entries for each unique PSI ID, RX and TX */
+                       ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
+                       ssa_size = ssa_count * sizeof(u32);
+                       ssp_cap_size -= 16; /* skip copying the default SSA */
+               }
                desc_size += ssp_cap_size;
                usb3_1 = true;
        }
@@ -102,7 +110,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
                put_unaligned_le16(HCS_U2_LATENCY(temp), &buf[13]);
        }
 
-       if (usb3_1) {
+       /* If PSI table exists, add the custom speed attributes from it */
+       if (usb3_1 && xhci->usb3_rhub.psi_count) {
                u32 ssp_cap_base, bm_attrib, psi;
                int offset;
 
index 5cd080e0a685ebdbafbe3b6e64657f5be1607fda..42f2bec759aae95dc31d5448e2e5ee0e750b6f25 100644 (file)
@@ -1070,7 +1070,7 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
        struct usb_device *top_dev;
        struct usb_hcd *hcd;
 
-       if (udev->speed == USB_SPEED_SUPER)
+       if (udev->speed >= USB_SPEED_SUPER)
                hcd = xhci->shared_hcd;
        else
                hcd = xhci->main_hcd;
@@ -1105,6 +1105,10 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
        /* 3) Only the control endpoint is valid - one endpoint context */
        slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route);
        switch (udev->speed) {
+       case USB_SPEED_SUPER_PLUS:
+               slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
+               max_packets = MAX_PACKET(512);
+               break;
        case USB_SPEED_SUPER:
                slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
                max_packets = MAX_PACKET(512);
@@ -1292,6 +1296,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
                }
                /* Fall through - SS and HS isoc/int have same decoding */
 
+       case USB_SPEED_SUPER_PLUS:
        case USB_SPEED_SUPER:
                if (usb_endpoint_xfer_int(&ep->desc) ||
                    usb_endpoint_xfer_isoc(&ep->desc)) {
@@ -1332,7 +1337,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
 static u32 xhci_get_endpoint_mult(struct usb_device *udev,
                struct usb_host_endpoint *ep)
 {
-       if (udev->speed != USB_SPEED_SUPER ||
+       if (udev->speed < USB_SPEED_SUPER ||
                        !usb_endpoint_xfer_isoc(&ep->desc))
                return 0;
        return ep->ss_ep_comp.bmAttributes;
@@ -1382,7 +1387,7 @@ static u32 xhci_get_max_esit_payload(struct usb_device *udev,
                        usb_endpoint_xfer_bulk(&ep->desc))
                return 0;
 
-       if (udev->speed == USB_SPEED_SUPER)
+       if (udev->speed >= USB_SPEED_SUPER)
                return le16_to_cpu(ep->ss_ep_comp.wBytesPerInterval);
 
        max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
@@ -1453,6 +1458,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
        max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
        max_burst = 0;
        switch (udev->speed) {
+       case USB_SPEED_SUPER_PLUS:
        case USB_SPEED_SUPER:
                /* dig out max burst from ep companion desc */
                max_burst = ep->ss_ep_comp.bMaxBurst;
index c30de7c39f44088e302b257a480a6c7b159ff167..73f763c4f5f591a2c19053083dedf7b6aa252f31 100644 (file)
@@ -275,8 +275,9 @@ static bool need_bw_sch(struct usb_host_endpoint *ep,
                return false;
 
        /*
-        * for LS & FS periodic endpoints which its device don't attach
-        * to TT are also ignored, root-hub will schedule them directly
+        * for LS & FS periodic endpoints which its device is not behind
+        * a TT are also ignored, root-hub will schedule them directly,
+        * but need set @bpkts field of endpoint context to 1.
         */
        if (is_fs_or_ls(speed) && !has_tt)
                return false;
@@ -339,8 +340,17 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
                GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)),
                usb_endpoint_dir_in(&ep->desc), ep);
 
-       if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT))
+       if (!need_bw_sch(ep, udev->speed, slot_ctx->tt_info & TT_SLOT)) {
+               /*
+                * set @bpkts to 1 if it is LS or FS periodic endpoint, and its
+                * device does not connected through an external HS hub
+                */
+               if (usb_endpoint_xfer_int(&ep->desc)
+                       || usb_endpoint_xfer_isoc(&ep->desc))
+                       ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(1));
+
                return 0;
+       }
 
        bw_index = get_bw_index(xhci, udev, ep);
        sch_bw = &sch_array[bw_index];
index c9ab6a44c34aef05968c0b7e053e63cafd392d8f..9532f5aef71bfe310b499db66a266e9a2c7b32f4 100644 (file)
@@ -696,9 +696,24 @@ static int xhci_mtk_remove(struct platform_device *dev)
 }
 
 #ifdef CONFIG_PM_SLEEP
+/*
+ * if ip sleep fails, and all clocks are disabled, access register will hang
+ * AHB bus, so stop polling roothubs to avoid regs access on bus suspend.
+ * and no need to check whether ip sleep failed or not; this will cause SPM
+ * to wake up system immediately after system suspend complete if ip sleep
+ * fails, it is what we wanted.
+ */
 static int xhci_mtk_suspend(struct device *dev)
 {
        struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+       struct usb_hcd *hcd = mtk->hcd;
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+       xhci_dbg(xhci, "%s: stop port polling\n", __func__);
+       clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       del_timer_sync(&hcd->rh_timer);
+       clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+       del_timer_sync(&xhci->shared_hcd->rh_timer);
 
        xhci_mtk_host_disable(mtk);
        xhci_mtk_phy_power_off(mtk);
@@ -710,11 +725,19 @@ static int xhci_mtk_suspend(struct device *dev)
 static int xhci_mtk_resume(struct device *dev)
 {
        struct xhci_hcd_mtk *mtk = dev_get_drvdata(dev);
+       struct usb_hcd *hcd = mtk->hcd;
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
        usb_wakeup_disable(mtk);
        xhci_mtk_clks_enable(mtk);
        xhci_mtk_phy_power_on(mtk);
        xhci_mtk_host_enable(mtk);
+
+       xhci_dbg(xhci, "%s: restart port polling\n", __func__);
+       set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+       usb_hcd_poll_rh_status(hcd);
+       set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
+       usb_hcd_poll_rh_status(xhci->shared_hcd);
        return 0;
 }
 
index 58c43ed7ff3b6c5291bab17bb646c2bbefcf67b6..f0640b7a1c42e2b82c4b8c6071fa0c43561ab7e7 100644 (file)
@@ -28,7 +28,9 @@
 #include "xhci.h"
 #include "xhci-trace.h"
 
-#define PORT2_SSIC_CONFIG_REG2 0x883c
+#define SSIC_PORT_NUM          2
+#define SSIC_PORT_CFG2         0x880c
+#define SSIC_PORT_CFG2_OFFSET  0x30
 #define PROG_DONE              (1 << 30)
 #define SSIC_PORT_UNUSED       (1 << 31)
 
@@ -45,6 +47,7 @@
 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI            0x22b5
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI                0xa12f
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI       0x9d2f
+#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI             0x0aa8
 
 static const char hcd_name[] = "xhci_hcd";
 
@@ -151,9 +154,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
                (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
                 pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
-                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) {
+                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+                pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
                xhci->quirks |= XHCI_PME_STUCK_QUIRK;
        }
+       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+               xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
+       }
        if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
                        pdev->device == PCI_DEVICE_ID_EJ168) {
                xhci->quirks |= XHCI_RESET_ON_RESUME;
@@ -312,22 +320,20 @@ static void xhci_pci_remove(struct pci_dev *dev)
  * SSIC PORT need to be marked as "unused" before putting xHCI
  * into D3. After D3 exit, the SSIC port need to be marked as "used".
  * Without this change, xHCI might not enter D3 state.
- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
  */
-static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
+static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-       struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
        u32 val;
        void __iomem *reg;
+       int i;
 
-       if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-                pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
-
-               reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+       for (i = 0; i < SSIC_PORT_NUM; i++) {
+               reg = (void __iomem *) xhci->cap_regs +
+                               SSIC_PORT_CFG2 +
+                               i * SSIC_PORT_CFG2_OFFSET;
 
-               /* Notify SSIC that SSIC profile programming is not done */
+               /* Notify SSIC that SSIC profile programming is not done. */
                val = readl(reg) & ~PROG_DONE;
                writel(val, reg);
 
@@ -344,6 +350,17 @@ static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
                writel(val, reg);
                readl(reg);
        }
+}
+
+/*
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct usb_hcd *hcd)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       void __iomem *reg;
+       u32 val;
 
        reg = (void __iomem *) xhci->cap_regs + 0x80a4;
        val = readl(reg);
@@ -355,6 +372,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct pci_dev          *pdev = to_pci_dev(hcd->self.controller);
+       int                     ret;
 
        /*
         * Systems with the TI redriver that loses port status change events
@@ -364,9 +382,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
                pdev->no_d3cold = true;
 
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-               xhci_pme_quirk(hcd, true);
+               xhci_pme_quirk(hcd);
+
+       if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+               xhci_ssic_port_unused_quirk(hcd, true);
 
-       return xhci_suspend(xhci, do_wakeup);
+       ret = xhci_suspend(xhci, do_wakeup);
+       if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED))
+               xhci_ssic_port_unused_quirk(hcd, false);
+
+       return ret;
 }
 
 static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
@@ -396,8 +421,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
        if (pdev->vendor == PCI_VENDOR_ID_INTEL)
                usb_enable_intel_xhci_ports(pdev);
 
+       if (xhci->quirks & XHCI_SSIC_PORT_UNUSED)
+               xhci_ssic_port_unused_quirk(hcd, false);
+
        if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
-               xhci_pme_quirk(hcd, false);
+               xhci_pme_quirk(hcd);
 
        retval = xhci_resume(xhci, hibernated);
        return retval;
index 770b6b08879790ec2754e26fbb424fcd03c1205c..d39d6bf1d090dad06687892ed723ea3a5e993083 100644 (file)
@@ -184,7 +184,8 @@ static int xhci_plat_probe(struct platform_device *pdev)
                struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
 
                /* Just copy data for now */
-               *priv = *priv_match;
+               if (priv_match)
+                       *priv = *priv_match;
        }
 
        if (xhci_plat_type_is(hcd, XHCI_PLAT_TYPE_MARVELL_ARMADA)) {
index f1c21c40b4a674e6a8fad98ac34c0b9385df76be..6773e508d9bb67619a5c576ee6a9794a8b702dd7 100644 (file)
@@ -2193,10 +2193,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                }
        /* Fast path - was this the last TRB in the TD for this URB? */
        } else if (event_trb == td->last_trb) {
-               if (td->urb_length_set && trb_comp_code == COMP_SHORT_TX)
-                       return finish_td(xhci, td, event_trb, event, ep,
-                                        status, false);
-
                if (EVENT_TRB_LEN(le32_to_cpu(event->transfer_len)) != 0) {
                        td->urb->actual_length =
                                td->urb->transfer_buffer_length -
@@ -2248,12 +2244,6 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
                        td->urb->actual_length +=
                                TRB_LEN(le32_to_cpu(cur_trb->generic.field[2])) -
                                EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
-
-               if (trb_comp_code == COMP_SHORT_TX) {
-                       xhci_dbg(xhci, "mid bulk/intr SP, wait for last TRB event\n");
-                       td->urb_length_set = true;
-                       return 0;
-               }
        }
 
        return finish_td(xhci, td, event_trb, event, ep, status, false);
@@ -3573,7 +3563,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
 {
        unsigned int max_burst;
 
-       if (xhci->hci_version < 0x100 || udev->speed != USB_SPEED_SUPER)
+       if (xhci->hci_version < 0x100 || udev->speed < USB_SPEED_SUPER)
                return 0;
 
        max_burst = urb->ep->ss_ep_comp.bMaxBurst;
@@ -3599,6 +3589,7 @@ static unsigned int xhci_get_last_burst_packet_count(struct xhci_hcd *xhci,
                return 0;
 
        switch (udev->speed) {
+       case USB_SPEED_SUPER_PLUS:
        case USB_SPEED_SUPER:
                /* bMaxBurst is zero based: 0 means 1 packet per burst */
                max_burst = urb->ep->ss_ep_comp.bMaxBurst;
index 26a44c0e969e621be5d2a6e2d947cf9292505716..d51ee0c3cf9f009d3f9098bfa77434a6ed4695b6 100644 (file)
@@ -1554,7 +1554,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
                                "HW died, freeing TD.");
                urb_priv = urb->hcpriv;
-               for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+               for (i = urb_priv->td_cnt;
+                    i < urb_priv->length && xhci->devs[urb->dev->slot_id];
+                    i++) {
                        td = urb_priv->td[i];
                        if (!list_empty(&td->td_list))
                                list_del_init(&td->td_list);
@@ -2084,6 +2086,7 @@ static unsigned int xhci_get_block_size(struct usb_device *udev)
        case USB_SPEED_HIGH:
                return HS_BLOCK;
        case USB_SPEED_SUPER:
+       case USB_SPEED_SUPER_PLUS:
                return SS_BLOCK;
        case USB_SPEED_UNKNOWN:
        case USB_SPEED_WIRELESS:
@@ -2209,7 +2212,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
        unsigned int packets_remaining = 0;
        unsigned int i;
 
-       if (virt_dev->udev->speed == USB_SPEED_SUPER)
+       if (virt_dev->udev->speed >= USB_SPEED_SUPER)
                return xhci_check_ss_bw(xhci, virt_dev);
 
        if (virt_dev->udev->speed == USB_SPEED_HIGH) {
@@ -2410,7 +2413,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
        if (xhci_is_async_ep(ep_bw->type))
                return;
 
-       if (udev->speed == USB_SPEED_SUPER) {
+       if (udev->speed >= USB_SPEED_SUPER) {
                if (xhci_is_sync_in_ep(ep_bw->type))
                        xhci->devs[udev->slot_id]->bw_table->ss_bw_in -=
                                xhci_get_ss_bw_consumed(ep_bw);
@@ -2448,6 +2451,7 @@ void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
                interval_bw->overhead[HS_OVERHEAD_TYPE] -= 1;
                break;
        case USB_SPEED_SUPER:
+       case USB_SPEED_SUPER_PLUS:
        case USB_SPEED_UNKNOWN:
        case USB_SPEED_WIRELESS:
                /* Should never happen because only LS/FS/HS endpoints will get
@@ -2507,6 +2511,7 @@ static void xhci_add_ep_to_interval_table(struct xhci_hcd *xhci,
                interval_bw->overhead[HS_OVERHEAD_TYPE] += 1;
                break;
        case USB_SPEED_SUPER:
+       case USB_SPEED_SUPER_PLUS:
        case USB_SPEED_UNKNOWN:
        case USB_SPEED_WIRELESS:
                /* Should never happen because only LS/FS/HS endpoints will get
@@ -4895,6 +4900,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                if (xhci->sbrn == 0x31) {
                        xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n");
                        hcd->speed = HCD_USB31;
+                       hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS;
                }
                /* xHCI private pointer was set in xhci_pci_probe for the second
                 * registered roothub.
index 9be7348872baab804691c7fd10f31115a0c7e3fe..e1bee3fe1a394ae7d39edd919aca493da91cb86b 100644 (file)
@@ -343,6 +343,7 @@ struct xhci_op_regs {
 #define        SLOT_SPEED_LS           (XDEV_LS << 10)
 #define        SLOT_SPEED_HS           (XDEV_HS << 10)
 #define        SLOT_SPEED_SS           (XDEV_SS << 10)
+#define        SLOT_SPEED_SSP          (XDEV_SSP << 10)
 /* Port Indicator Control */
 #define PORT_LED_OFF   (0 << 14)
 #define PORT_LED_AMBER (1 << 14)
@@ -1631,6 +1632,7 @@ struct xhci_hcd {
 #define XHCI_BROKEN_STREAMS    (1 << 19)
 #define XHCI_PME_STUCK_QUIRK   (1 << 20)
 #define XHCI_MTK_HOST          (1 << 21)
+#define XHCI_SSIC_PORT_UNUSED  (1 << 22)
        unsigned int            num_active_eps;
        unsigned int            limit_active_eps;
        /* There are two roothubs to keep track of bus suspend info for */
index 8efbabacc84e6b3179f43165b1f90920e99b2a5d..a22de52cb083da11402e9393bfadfe65eb68f2b8 100644 (file)
@@ -61,8 +61,8 @@
 /* Forward declarations / clean-up routines */
 
 #ifdef INCL_SISUSB_CON
-static int sisusb_first_vc = 0;
-static int sisusb_last_vc = 0;
+static int sisusb_first_vc;
+static int sisusb_last_vc;
 module_param_named(first, sisusb_first_vc, int, 0);
 module_param_named(last, sisusb_last_vc, int, 0);
 MODULE_PARM_DESC(first, "Number of first console to take over (1 - MAX_NR_CONSOLES)");
@@ -71,25 +71,19 @@ MODULE_PARM_DESC(last, "Number of last console to take over (1 - MAX_NR_CONSOLES
 
 static struct usb_driver sisusb_driver;
 
-static void
-sisusb_free_buffers(struct sisusb_usb_data *sisusb)
+static void sisusb_free_buffers(struct sisusb_usb_data *sisusb)
 {
        int i;
 
        for (i = 0; i < NUMOBUFS; i++) {
-               if (sisusb->obuf[i]) {
-                       kfree(sisusb->obuf[i]);
-                       sisusb->obuf[i] = NULL;
-               }
-       }
-       if (sisusb->ibuf) {
-               kfree(sisusb->ibuf);
-               sisusb->ibuf = NULL;
+               kfree(sisusb->obuf[i]);
+               sisusb->obuf[i] = NULL;
        }
+       kfree(sisusb->ibuf);
+       sisusb->ibuf = NULL;
 }
 
-static void
-sisusb_free_urbs(struct sisusb_usb_data *sisusb)
+static void sisusb_free_urbs(struct sisusb_usb_data *sisusb)
 {
        int i;
 
@@ -108,8 +102,7 @@ sisusb_free_urbs(struct sisusb_usb_data *sisusb)
 /* out-urb management */
 
 /* Return 1 if all free, 0 otherwise */
-static int
-sisusb_all_free(struct sisusb_usb_data *sisusb)
+static int sisusb_all_free(struct sisusb_usb_data *sisusb)
 {
        int i;
 
@@ -124,8 +117,7 @@ sisusb_all_free(struct sisusb_usb_data *sisusb)
 }
 
 /* Kill all busy URBs */
-static void
-sisusb_kill_all_busy(struct sisusb_usb_data *sisusb)
+static void sisusb_kill_all_busy(struct sisusb_usb_data *sisusb)
 {
        int i;
 
@@ -141,20 +133,17 @@ sisusb_kill_all_busy(struct sisusb_usb_data *sisusb)
 }
 
 /* Return 1 if ok, 0 if error (not all complete within timeout) */
-static int
-sisusb_wait_all_out_complete(struct sisusb_usb_data *sisusb)
+static int sisusb_wait_all_out_complete(struct sisusb_usb_data *sisusb)
 {
        int timeout = 5 * HZ, i = 1;
 
-       wait_event_timeout(sisusb->wait_q,
-                               (i = sisusb_all_free(sisusb)),
-                                timeout);
+       wait_event_timeout(sisusb->wait_q, (i = sisusb_all_free(sisusb)),
+                       timeout);
 
        return i;
 }
 
-static int
-sisusb_outurb_available(struct sisusb_usb_data *sisusb)
+static int sisusb_outurb_available(struct sisusb_usb_data *sisusb)
 {
        int i;
 
@@ -168,20 +157,17 @@ sisusb_outurb_available(struct sisusb_usb_data *sisusb)
        return -1;
 }
 
-static int
-sisusb_get_free_outbuf(struct sisusb_usb_data *sisusb)
+static int sisusb_get_free_outbuf(struct sisusb_usb_data *sisusb)
 {
        int i, timeout = 5 * HZ;
 
        wait_event_timeout(sisusb->wait_q,
-                               ((i = sisusb_outurb_available(sisusb)) >= 0),
-                               timeout);
+                       ((i = sisusb_outurb_available(sisusb)) >= 0), timeout);
 
        return i;
 }
 
-static int
-sisusb_alloc_outbuf(struct sisusb_usb_data *sisusb)
+static int sisusb_alloc_outbuf(struct sisusb_usb_data *sisusb)
 {
        int i;
 
@@ -193,8 +179,7 @@ sisusb_alloc_outbuf(struct sisusb_usb_data *sisusb)
        return i;
 }
 
-static void
-sisusb_free_outbuf(struct sisusb_usb_data *sisusb, int index)
+static void sisusb_free_outbuf(struct sisusb_usb_data *sisusb, int index)
 {
        if ((index >= 0) && (index < sisusb->numobufs))
                sisusb->urbstatus[index] &= ~SU_URB_ALLOC;
@@ -202,8 +187,7 @@ sisusb_free_outbuf(struct sisusb_usb_data *sisusb, int index)
 
 /* completion callback */
 
-static void
-sisusb_bulk_completeout(struct urb *urb)
+static void sisusb_bulk_completeout(struct urb *urb)
 {
        struct sisusb_urb_context *context = urb->context;
        struct sisusb_usb_data *sisusb;
@@ -225,9 +209,9 @@ sisusb_bulk_completeout(struct urb *urb)
        wake_up(&sisusb->wait_q);
 }
 
-static int
-sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe, void *data,
-               int len, int *actual_length, int timeout, unsigned int tflags)
+static int sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index,
+               unsigned int pipe, void *data, int len, int *actual_length,
+               int timeout, unsigned int tflags)
 {
        struct urb *urb = sisusb->sisurbout[index];
        int retval, byteswritten = 0;
@@ -236,14 +220,15 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
        urb->transfer_flags = 0;
 
        usb_fill_bulk_urb(urb, sisusb->sisusb_dev, pipe, data, len,
-               sisusb_bulk_completeout, &sisusb->urbout_context[index]);
+                       sisusb_bulk_completeout,
+                       &sisusb->urbout_context[index]);
 
        urb->transfer_flags |= tflags;
        urb->actual_length = 0;
 
        /* Set up context */
        sisusb->urbout_context[index].actual_length = (timeout) ?
-                                               NULL : actual_length;
+                       NULL : actual_length;
 
        /* Declare this urb/buffer in use */
        sisusb->urbstatus[index] |= SU_URB_BUSY;
@@ -254,8 +239,8 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
        /* If OK, and if timeout > 0, wait for completion */
        if ((retval == 0) && timeout) {
                wait_event_timeout(sisusb->wait_q,
-                                  (!(sisusb->urbstatus[index] & SU_URB_BUSY)),
-                                  timeout);
+                               (!(sisusb->urbstatus[index] & SU_URB_BUSY)),
+                               timeout);
                if (sisusb->urbstatus[index] & SU_URB_BUSY) {
                        /* URB timed out... kill it and report error */
                        usb_kill_urb(urb);
@@ -277,8 +262,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
 
 /* completion callback */
 
-static void
-sisusb_bulk_completein(struct urb *urb)
+static void sisusb_bulk_completein(struct urb *urb)
 {
        struct sisusb_usb_data *sisusb = urb->context;
 
@@ -289,9 +273,9 @@ sisusb_bulk_completein(struct urb *urb)
        wake_up(&sisusb->wait_q);
 }
 
-static int
-sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
-       int len, int *actual_length, int timeout, unsigned int tflags)
+static int sisusb_bulkin_msg(struct sisusb_usb_data *sisusb,
+               unsigned int pipe, void *data, int len,
+               int *actual_length, int timeout, unsigned int tflags)
 {
        struct urb *urb = sisusb->sisurbin;
        int retval, readbytes = 0;
@@ -375,7 +359,7 @@ static int sisusb_send_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
 
        do {
                passsize = thispass = (sisusb->obufsize < count) ?
-                                               sisusb->obufsize : count;
+                               sisusb->obufsize : count;
 
                if (index < 0)
                        index = sisusb_get_free_outbuf(sisusb);
@@ -405,14 +389,9 @@ static int sisusb_send_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
                        if (!sisusb->sisusb_dev)
                                return -ENODEV;
 
-                       result = sisusb_bulkout_msg(sisusb,
-                                               index,
-                                               pipe,
-                                               buffer,
-                                               thispass,
-                                               &transferred_len,
-                                               async ? 0 : 5 * HZ,
-                                               tflags);
+                       result = sisusb_bulkout_msg(sisusb, index, pipe,
+                                       buffer, thispass, &transferred_len,
+                                       async ? 0 : 5 * HZ, tflags);
 
                        if (result == -ETIMEDOUT) {
 
@@ -500,13 +479,8 @@ static int sisusb_recv_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
 
                thispass = (bufsize < count) ? bufsize : count;
 
-               result = sisusb_bulkin_msg(sisusb,
-                                          pipe,
-                                          buffer,
-                                          thispass,
-                                          &transferred_len,
-                                          5 * HZ,
-                                          tflags);
+               result = sisusb_bulkin_msg(sisusb, pipe, buffer, thispass,
+                               &transferred_len, 5 * HZ, tflags);
 
                if (transferred_len)
                        thispass = transferred_len;
@@ -549,7 +523,7 @@ static int sisusb_recv_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
 }
 
 static int sisusb_send_packet(struct sisusb_usb_data *sisusb, int len,
-                                               struct sisusb_packet *packet)
+               struct sisusb_packet *packet)
 {
        int ret;
        ssize_t bytes_transferred = 0;
@@ -585,8 +559,7 @@ static int sisusb_send_packet(struct sisusb_usb_data *sisusb, int len,
 }
 
 static int sisusb_send_bridge_packet(struct sisusb_usb_data *sisusb, int len,
-                                       struct sisusb_packet *packet,
-                                       unsigned int tflags)
+               struct sisusb_packet *packet, unsigned int tflags)
 {
        int ret;
        ssize_t bytes_transferred = 0;
@@ -634,7 +607,7 @@ static int sisusb_send_bridge_packet(struct sisusb_usb_data *sisusb, int len,
  */
 
 static int sisusb_write_memio_byte(struct sisusb_usb_data *sisusb, int type,
-                                                       u32 addr, u8 data)
+               u32 addr, u8 data)
 {
        struct sisusb_packet packet;
        int ret;
@@ -647,7 +620,7 @@ static int sisusb_write_memio_byte(struct sisusb_usb_data *sisusb, int type,
 }
 
 static int sisusb_write_memio_word(struct sisusb_usb_data *sisusb, int type,
-                                                       u32 addr, u16 data)
+               u32 addr, u16 data)
 {
        struct sisusb_packet packet;
        int ret = 0;
@@ -655,36 +628,36 @@ static int sisusb_write_memio_word(struct sisusb_usb_data *sisusb, int type,
        packet.address = addr & ~3;
 
        switch (addr & 3) {
-               case 0:
-                       packet.header = (type << 6) | 0x0003;
-                       packet.data   = (u32)data;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       break;
-               case 1:
-                       packet.header = (type << 6) | 0x0006;
-                       packet.data   = (u32)data << 8;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       break;
-               case 2:
-                       packet.header = (type << 6) | 0x000c;
-                       packet.data   = (u32)data << 16;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       break;
-               case 3:
-                       packet.header = (type << 6) | 0x0008;
-                       packet.data   = (u32)data << 24;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       packet.header = (type << 6) | 0x0001;
-                       packet.address = (addr & ~3) + 4;
-                       packet.data   = (u32)data >> 8;
-                       ret |= sisusb_send_packet(sisusb, 10, &packet);
+       case 0:
+               packet.header = (type << 6) | 0x0003;
+               packet.data   = (u32)data;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               break;
+       case 1:
+               packet.header = (type << 6) | 0x0006;
+               packet.data   = (u32)data << 8;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               break;
+       case 2:
+               packet.header = (type << 6) | 0x000c;
+               packet.data   = (u32)data << 16;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               break;
+       case 3:
+               packet.header = (type << 6) | 0x0008;
+               packet.data   = (u32)data << 24;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               packet.header = (type << 6) | 0x0001;
+               packet.address = (addr & ~3) + 4;
+               packet.data   = (u32)data >> 8;
+               ret |= sisusb_send_packet(sisusb, 10, &packet);
        }
 
        return ret;
 }
 
 static int sisusb_write_memio_24bit(struct sisusb_usb_data *sisusb, int type,
-                                                       u32 addr, u32 data)
+               u32 addr, u32 data)
 {
        struct sisusb_packet packet;
        int ret = 0;
@@ -692,40 +665,40 @@ static int sisusb_write_memio_24bit(struct sisusb_usb_data *sisusb, int type,
        packet.address = addr & ~3;
 
        switch (addr & 3) {
-               case 0:
-                       packet.header  = (type << 6) | 0x0007;
-                       packet.data    = data & 0x00ffffff;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       break;
-               case 1:
-                       packet.header  = (type << 6) | 0x000e;
-                       packet.data    = data << 8;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       break;
-               case 2:
-                       packet.header  = (type << 6) | 0x000c;
-                       packet.data    = data << 16;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       packet.header  = (type << 6) | 0x0001;
-                       packet.address = (addr & ~3) + 4;
-                       packet.data    = (data >> 16) & 0x00ff;
-                       ret |= sisusb_send_packet(sisusb, 10, &packet);
-                       break;
-               case 3:
-                       packet.header  = (type << 6) | 0x0008;
-                       packet.data    = data << 24;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       packet.header  = (type << 6) | 0x0003;
-                       packet.address = (addr & ~3) + 4;
-                       packet.data    = (data >> 8) & 0xffff;
-                       ret |= sisusb_send_packet(sisusb, 10, &packet);
+       case 0:
+               packet.header  = (type << 6) | 0x0007;
+               packet.data    = data & 0x00ffffff;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               break;
+       case 1:
+               packet.header  = (type << 6) | 0x000e;
+               packet.data    = data << 8;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               break;
+       case 2:
+               packet.header  = (type << 6) | 0x000c;
+               packet.data    = data << 16;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               packet.header  = (type << 6) | 0x0001;
+               packet.address = (addr & ~3) + 4;
+               packet.data    = (data >> 16) & 0x00ff;
+               ret |= sisusb_send_packet(sisusb, 10, &packet);
+               break;
+       case 3:
+               packet.header  = (type << 6) | 0x0008;
+               packet.data    = data << 24;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               packet.header  = (type << 6) | 0x0003;
+               packet.address = (addr & ~3) + 4;
+               packet.data    = (data >> 8) & 0xffff;
+               ret |= sisusb_send_packet(sisusb, 10, &packet);
        }
 
        return ret;
 }
 
 static int sisusb_write_memio_long(struct sisusb_usb_data *sisusb, int type,
-                                                       u32 addr, u32 data)
+               u32 addr, u32 data)
 {
        struct sisusb_packet packet;
        int ret = 0;
@@ -733,37 +706,37 @@ static int sisusb_write_memio_long(struct sisusb_usb_data *sisusb, int type,
        packet.address = addr & ~3;
 
        switch (addr & 3) {
-               case 0:
-                       packet.header  = (type << 6) | 0x000f;
-                       packet.data    = data;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       break;
-               case 1:
-                       packet.header  = (type << 6) | 0x000e;
-                       packet.data    = data << 8;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       packet.header  = (type << 6) | 0x0001;
-                       packet.address = (addr & ~3) + 4;
-                       packet.data    = data >> 24;
-                       ret |= sisusb_send_packet(sisusb, 10, &packet);
-                       break;
-               case 2:
-                       packet.header  = (type << 6) | 0x000c;
-                       packet.data    = data << 16;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       packet.header  = (type << 6) | 0x0003;
-                       packet.address = (addr & ~3) + 4;
-                       packet.data    = data >> 16;
-                       ret |= sisusb_send_packet(sisusb, 10, &packet);
-                       break;
-               case 3:
-                       packet.header  = (type << 6) | 0x0008;
-                       packet.data    = data << 24;
-                       ret = sisusb_send_packet(sisusb, 10, &packet);
-                       packet.header  = (type << 6) | 0x0007;
-                       packet.address = (addr & ~3) + 4;
-                       packet.data    = data >> 8;
-                       ret |= sisusb_send_packet(sisusb, 10, &packet);
+       case 0:
+               packet.header  = (type << 6) | 0x000f;
+               packet.data    = data;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               break;
+       case 1:
+               packet.header  = (type << 6) | 0x000e;
+               packet.data    = data << 8;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               packet.header  = (type << 6) | 0x0001;
+               packet.address = (addr & ~3) + 4;
+               packet.data    = data >> 24;
+               ret |= sisusb_send_packet(sisusb, 10, &packet);
+               break;
+       case 2:
+               packet.header  = (type << 6) | 0x000c;
+               packet.data    = data << 16;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               packet.header  = (type << 6) | 0x0003;
+               packet.address = (addr & ~3) + 4;
+               packet.data    = data >> 16;
+               ret |= sisusb_send_packet(sisusb, 10, &packet);
+               break;
+       case 3:
+               packet.header  = (type << 6) | 0x0008;
+               packet.data    = data << 24;
+               ret = sisusb_send_packet(sisusb, 10, &packet);
+               packet.header  = (type << 6) | 0x0007;
+               packet.address = (addr & ~3) + 4;
+               packet.data    = data >> 8;
+               ret |= sisusb_send_packet(sisusb, 10, &packet);
        }
 
        return ret;
@@ -780,13 +753,12 @@ static int sisusb_write_memio_long(struct sisusb_usb_data *sisusb, int type,
  */
 
 static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
-                               char *kernbuffer, int length,
-                               const char __user *userbuffer, int index,
-                               ssize_t *bytes_written)
+               char *kernbuffer, int length, const char __user *userbuffer,
+               int index, ssize_t *bytes_written)
 {
        struct sisusb_packet packet;
        int  ret = 0;
-       static int msgcount = 0;
+       static int msgcount;
        u8   swap8, fromkern = kernbuffer ? 1 : 0;
        u16  swap16;
        u32  swap32, flag = (length >> 28) & 1;
@@ -803,9 +775,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
        length &= 0x00ffffff;
 
        while (length) {
-
-           switch (length) {
-
+               switch (length) {
                case 1:
                        if (userbuffer) {
                                if (get_user(swap8, (u8 __user *)userbuffer))
@@ -813,9 +783,8 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
                        } else
                                swap8 = kernbuffer[0];
 
-                       ret = sisusb_write_memio_byte(sisusb,
-                                                       SISUSB_TYPE_MEM,
-                                                       addr, swap8);
+                       ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM,
+                                       addr, swap8);
 
                        if (!ret)
                                (*bytes_written)++;
@@ -829,10 +798,8 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
                        } else
                                swap16 = *((u16 *)kernbuffer);
 
-                       ret = sisusb_write_memio_word(sisusb,
-                                                       SISUSB_TYPE_MEM,
-                                                       addr,
-                                                       swap16);
+                       ret = sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
+                                       addr, swap16);
 
                        if (!ret)
                                (*bytes_written) += 2;
@@ -863,10 +830,8 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
                                         kernbuffer[0];
 #endif
 
-                       ret = sisusb_write_memio_24bit(sisusb,
-                                                       SISUSB_TYPE_MEM,
-                                                       addr,
-                                                       swap32);
+                       ret = sisusb_write_memio_24bit(sisusb, SISUSB_TYPE_MEM,
+                                       addr, swap32);
 
                        if (!ret)
                                (*bytes_written) += 3;
@@ -880,10 +845,8 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
                        } else
                                swap32 = *((u32 *)kernbuffer);
 
-                       ret = sisusb_write_memio_long(sisusb,
-                                                       SISUSB_TYPE_MEM,
-                                                       addr,
-                                                       swap32);
+                       ret = sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM,
+                                       addr, swap32);
                        if (!ret)
                                (*bytes_written) += 4;
 
@@ -892,103 +855,106 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
                default:
                        if ((length & ~3) > 0x10000) {
 
-                          packet.header  = 0x001f;
-                          packet.address = 0x000001d4;
-                          packet.data    = addr;
-                          ret = sisusb_send_bridge_packet(sisusb, 10,
-                                                               &packet, 0);
-                          packet.header  = 0x001f;
-                          packet.address = 0x000001d0;
-                          packet.data    = (length & ~3);
-                          ret |= sisusb_send_bridge_packet(sisusb, 10,
-                                                               &packet, 0);
-                          packet.header  = 0x001f;
-                          packet.address = 0x000001c0;
-                          packet.data    = flag | 0x16;
-                          ret |= sisusb_send_bridge_packet(sisusb, 10,
-                                                               &packet, 0);
-                          if (userbuffer) {
-                               ret |= sisusb_send_bulk_msg(sisusb,
+                               packet.header  = 0x001f;
+                               packet.address = 0x000001d4;
+                               packet.data    = addr;
+                               ret = sisusb_send_bridge_packet(sisusb, 10,
+                                               &packet, 0);
+                               packet.header  = 0x001f;
+                               packet.address = 0x000001d0;
+                               packet.data    = (length & ~3);
+                               ret |= sisusb_send_bridge_packet(sisusb, 10,
+                                               &packet, 0);
+                               packet.header  = 0x001f;
+                               packet.address = 0x000001c0;
+                               packet.data    = flag | 0x16;
+                               ret |= sisusb_send_bridge_packet(sisusb, 10,
+                                               &packet, 0);
+                               if (userbuffer) {
+                                       ret |= sisusb_send_bulk_msg(sisusb,
                                                        SISUSB_EP_GFX_LBULK_OUT,
                                                        (length & ~3),
                                                        NULL, userbuffer, 0,
                                                        bytes_written, 0, 1);
-                               userbuffer += (*bytes_written);
-                          } else if (fromkern) {
-                               ret |= sisusb_send_bulk_msg(sisusb,
+                                       userbuffer += (*bytes_written);
+                               } else if (fromkern) {
+                                       ret |= sisusb_send_bulk_msg(sisusb,
                                                        SISUSB_EP_GFX_LBULK_OUT,
                                                        (length & ~3),
                                                        kernbuffer, NULL, 0,
                                                        bytes_written, 0, 1);
-                               kernbuffer += (*bytes_written);
-                          } else {
-                       ret |= sisusb_send_bulk_msg(sisusb,
+                                       kernbuffer += (*bytes_written);
+                               } else {
+                                       ret |= sisusb_send_bulk_msg(sisusb,
                                                        SISUSB_EP_GFX_LBULK_OUT,
                                                        (length & ~3),
                                                        NULL, NULL, index,
                                                        bytes_written, 0, 1);
-                               kernbuffer += ((*bytes_written) &
-                                               (sisusb->obufsize-1));
-                          }
+                                       kernbuffer += ((*bytes_written) &
+                                                       (sisusb->obufsize-1));
+                               }
 
                        } else {
 
-                          packet.header  = 0x001f;
-                          packet.address = 0x00000194;
-                          packet.data    = addr;
-                          ret = sisusb_send_bridge_packet(sisusb, 10,
-                                                               &packet, 0);
-                          packet.header  = 0x001f;
-                          packet.address = 0x00000190;
-                          packet.data    = (length & ~3);
-                          ret |= sisusb_send_bridge_packet(sisusb, 10,
-                                                               &packet, 0);
-                          if (sisusb->flagb0 != 0x16) {
                                packet.header  = 0x001f;
-                               packet.address = 0x00000180;
-                               packet.data    = flag | 0x16;
+                               packet.address = 0x00000194;
+                               packet.data    = addr;
+                               ret = sisusb_send_bridge_packet(sisusb, 10,
+                                               &packet, 0);
+                               packet.header  = 0x001f;
+                               packet.address = 0x00000190;
+                               packet.data    = (length & ~3);
                                ret |= sisusb_send_bridge_packet(sisusb, 10,
-                                                               &packet, 0);
-                               sisusb->flagb0 = 0x16;
-                          }
-                          if (userbuffer) {
-                               ret |= sisusb_send_bulk_msg(sisusb,
+                                               &packet, 0);
+                               if (sisusb->flagb0 != 0x16) {
+                                       packet.header  = 0x001f;
+                                       packet.address = 0x00000180;
+                                       packet.data    = flag | 0x16;
+                                       ret |= sisusb_send_bridge_packet(sisusb,
+                                                       10, &packet, 0);
+                                       sisusb->flagb0 = 0x16;
+                               }
+                               if (userbuffer) {
+                                       ret |= sisusb_send_bulk_msg(sisusb,
                                                        SISUSB_EP_GFX_BULK_OUT,
                                                        (length & ~3),
                                                        NULL, userbuffer, 0,
                                                        bytes_written, 0, 1);
-                               userbuffer += (*bytes_written);
-                          } else if (fromkern) {
-                               ret |= sisusb_send_bulk_msg(sisusb,
+                                       userbuffer += (*bytes_written);
+                               } else if (fromkern) {
+                                       ret |= sisusb_send_bulk_msg(sisusb,
                                                        SISUSB_EP_GFX_BULK_OUT,
                                                        (length & ~3),
                                                        kernbuffer, NULL, 0,
                                                        bytes_written, 0, 1);
-                               kernbuffer += (*bytes_written);
-                          } else {
-                               ret |= sisusb_send_bulk_msg(sisusb,
+                                       kernbuffer += (*bytes_written);
+                               } else {
+                                       ret |= sisusb_send_bulk_msg(sisusb,
                                                        SISUSB_EP_GFX_BULK_OUT,
                                                        (length & ~3),
                                                        NULL, NULL, index,
                                                        bytes_written, 0, 1);
-                               kernbuffer += ((*bytes_written) &
-                                               (sisusb->obufsize-1));
-                          }
+                                       kernbuffer += ((*bytes_written) &
+                                                       (sisusb->obufsize-1));
+                               }
                        }
                        if (ret) {
                                msgcount++;
                                if (msgcount < 500)
-                                       dev_err(&sisusb->sisusb_dev->dev, "Wrote %zd of %d bytes, error %d\n",
-                                               *bytes_written, length, ret);
+                                       dev_err(&sisusb->sisusb_dev->dev,
+                                                       "Wrote %zd of %d bytes, error %d\n",
+                                                       *bytes_written, length,
+                                                       ret);
                                else if (msgcount == 500)
-                                       dev_err(&sisusb->sisusb_dev->dev, "Too many errors, logging stopped\n");
+                                       dev_err(&sisusb->sisusb_dev->dev,
+                                                       "Too many errors, logging stopped\n");
                        }
                        addr += (*bytes_written);
                        length -= (*bytes_written);
-           }
+               }
 
-           if (ret)
-               break;
+               if (ret)
+                       break;
 
        }
 
@@ -1000,7 +966,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
  */
 
 static int sisusb_read_memio_byte(struct sisusb_usb_data *sisusb, int type,
-                                                       u32 addr, u8 *data)
+               u32 addr, u8 *data)
 {
        struct sisusb_packet packet;
        int ret;
@@ -1014,7 +980,7 @@ static int sisusb_read_memio_byte(struct sisusb_usb_data *sisusb, int type,
 }
 
 static int sisusb_read_memio_word(struct sisusb_usb_data *sisusb, int type,
-                                                       u32 addr, u16 *data)
+               u32 addr, u16 *data)
 {
        struct sisusb_packet packet;
        int ret = 0;
@@ -1024,36 +990,36 @@ static int sisusb_read_memio_word(struct sisusb_usb_data *sisusb, int type,
        packet.address = addr & ~3;
 
        switch (addr & 3) {
-               case 0:
-                       packet.header = (type << 6) | 0x0003;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = (u16)(packet.data);
-                       break;
-               case 1:
-                       packet.header = (type << 6) | 0x0006;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = (u16)(packet.data >> 8);
-                       break;
-               case 2:
-                       packet.header = (type << 6) | 0x000c;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = (u16)(packet.data >> 16);
-                       break;
-               case 3:
-                       packet.header = (type << 6) | 0x0008;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = (u16)(packet.data >> 24);
-                       packet.header = (type << 6) | 0x0001;
-                       packet.address = (addr & ~3) + 4;
-                       ret |= sisusb_send_packet(sisusb, 6, &packet);
-                       *data |= (u16)(packet.data << 8);
+       case 0:
+               packet.header = (type << 6) | 0x0003;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = (u16)(packet.data);
+               break;
+       case 1:
+               packet.header = (type << 6) | 0x0006;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = (u16)(packet.data >> 8);
+               break;
+       case 2:
+               packet.header = (type << 6) | 0x000c;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = (u16)(packet.data >> 16);
+               break;
+       case 3:
+               packet.header = (type << 6) | 0x0008;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = (u16)(packet.data >> 24);
+               packet.header = (type << 6) | 0x0001;
+               packet.address = (addr & ~3) + 4;
+               ret |= sisusb_send_packet(sisusb, 6, &packet);
+               *data |= (u16)(packet.data << 8);
        }
 
        return ret;
 }
 
 static int sisusb_read_memio_24bit(struct sisusb_usb_data *sisusb, int type,
-                                                       u32 addr, u32 *data)
+               u32 addr, u32 *data)
 {
        struct sisusb_packet packet;
        int ret = 0;
@@ -1061,40 +1027,40 @@ static int sisusb_read_memio_24bit(struct sisusb_usb_data *sisusb, int type,
        packet.address = addr & ~3;
 
        switch (addr & 3) {
-               case 0:
-                       packet.header  = (type << 6) | 0x0007;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = packet.data & 0x00ffffff;
-                       break;
-               case 1:
-                       packet.header  = (type << 6) | 0x000e;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = packet.data >> 8;
-                       break;
-               case 2:
-                       packet.header  = (type << 6) | 0x000c;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = packet.data >> 16;
-                       packet.header  = (type << 6) | 0x0001;
-                       packet.address = (addr & ~3) + 4;
-                       ret |= sisusb_send_packet(sisusb, 6, &packet);
-                       *data |= ((packet.data & 0xff) << 16);
-                       break;
-               case 3:
-                       packet.header  = (type << 6) | 0x0008;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = packet.data >> 24;
-                       packet.header  = (type << 6) | 0x0003;
-                       packet.address = (addr & ~3) + 4;
-                       ret |= sisusb_send_packet(sisusb, 6, &packet);
-                       *data |= ((packet.data & 0xffff) << 8);
+       case 0:
+               packet.header  = (type << 6) | 0x0007;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = packet.data & 0x00ffffff;
+               break;
+       case 1:
+               packet.header  = (type << 6) | 0x000e;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = packet.data >> 8;
+               break;
+       case 2:
+               packet.header  = (type << 6) | 0x000c;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = packet.data >> 16;
+               packet.header  = (type << 6) | 0x0001;
+               packet.address = (addr & ~3) + 4;
+               ret |= sisusb_send_packet(sisusb, 6, &packet);
+               *data |= ((packet.data & 0xff) << 16);
+               break;
+       case 3:
+               packet.header  = (type << 6) | 0x0008;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = packet.data >> 24;
+               packet.header  = (type << 6) | 0x0003;
+               packet.address = (addr & ~3) + 4;
+               ret |= sisusb_send_packet(sisusb, 6, &packet);
+               *data |= ((packet.data & 0xffff) << 8);
        }
 
        return ret;
 }
 
 static int sisusb_read_memio_long(struct sisusb_usb_data *sisusb, int type,
-                                                       u32 addr, u32 *data)
+               u32 addr, u32 *data)
 {
        struct sisusb_packet packet;
        int ret = 0;
@@ -1102,45 +1068,45 @@ static int sisusb_read_memio_long(struct sisusb_usb_data *sisusb, int type,
        packet.address = addr & ~3;
 
        switch (addr & 3) {
-               case 0:
-                       packet.header  = (type << 6) | 0x000f;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = packet.data;
-                       break;
-               case 1:
-                       packet.header  = (type << 6) | 0x000e;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = packet.data >> 8;
-                       packet.header  = (type << 6) | 0x0001;
-                       packet.address = (addr & ~3) + 4;
-                       ret |= sisusb_send_packet(sisusb, 6, &packet);
-                       *data |= (packet.data << 24);
-                       break;
-               case 2:
-                       packet.header  = (type << 6) | 0x000c;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = packet.data >> 16;
-                       packet.header  = (type << 6) | 0x0003;
-                       packet.address = (addr & ~3) + 4;
-                       ret |= sisusb_send_packet(sisusb, 6, &packet);
-                       *data |= (packet.data << 16);
-                       break;
-               case 3:
-                       packet.header  = (type << 6) | 0x0008;
-                       ret = sisusb_send_packet(sisusb, 6, &packet);
-                       *data = packet.data >> 24;
-                       packet.header  = (type << 6) | 0x0007;
-                       packet.address = (addr & ~3) + 4;
-                       ret |= sisusb_send_packet(sisusb, 6, &packet);
-                       *data |= (packet.data << 8);
+       case 0:
+               packet.header  = (type << 6) | 0x000f;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = packet.data;
+               break;
+       case 1:
+               packet.header  = (type << 6) | 0x000e;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = packet.data >> 8;
+               packet.header  = (type << 6) | 0x0001;
+               packet.address = (addr & ~3) + 4;
+               ret |= sisusb_send_packet(sisusb, 6, &packet);
+               *data |= (packet.data << 24);
+               break;
+       case 2:
+               packet.header  = (type << 6) | 0x000c;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = packet.data >> 16;
+               packet.header  = (type << 6) | 0x0003;
+               packet.address = (addr & ~3) + 4;
+               ret |= sisusb_send_packet(sisusb, 6, &packet);
+               *data |= (packet.data << 16);
+               break;
+       case 3:
+               packet.header  = (type << 6) | 0x0008;
+               ret = sisusb_send_packet(sisusb, 6, &packet);
+               *data = packet.data >> 24;
+               packet.header  = (type << 6) | 0x0007;
+               packet.address = (addr & ~3) + 4;
+               ret |= sisusb_send_packet(sisusb, 6, &packet);
+               *data |= (packet.data << 8);
        }
 
        return ret;
 }
 
 static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
-                               char *kernbuffer, int length,
-                               char __user *userbuffer, ssize_t *bytes_read)
+               char *kernbuffer, int length, char __user *userbuffer,
+               ssize_t *bytes_read)
 {
        int ret = 0;
        char buf[4];
@@ -1152,34 +1118,27 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
        length &= 0x00ffffff;
 
        while (length) {
-
-           switch (length) {
-
+               switch (length) {
                case 1:
-
                        ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM,
-                                                               addr, &buf[0]);
+                                       addr, &buf[0]);
                        if (!ret) {
                                (*bytes_read)++;
                                if (userbuffer) {
-                                       if (put_user(buf[0],
-                                               (u8 __user *)userbuffer)) {
+                                       if (put_user(buf[0], (u8 __user *)userbuffer))
                                                return -EFAULT;
-                                       }
-                               } else {
+                               } else
                                        kernbuffer[0] = buf[0];
-                               }
                        }
                        return ret;
 
                case 2:
                        ret |= sisusb_read_memio_word(sisusb, SISUSB_TYPE_MEM,
-                                                               addr, &swap16);
+                                       addr, &swap16);
                        if (!ret) {
                                (*bytes_read) += 2;
                                if (userbuffer) {
-                                       if (put_user(swap16,
-                                               (u16 __user *)userbuffer))
+                                       if (put_user(swap16, (u16 __user *)userbuffer))
                                                return -EFAULT;
                                } else {
                                        *((u16 *)kernbuffer) = swap16;
@@ -1189,7 +1148,7 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
 
                case 3:
                        ret |= sisusb_read_memio_24bit(sisusb, SISUSB_TYPE_MEM,
-                                                               addr, &swap32);
+                                       addr, &swap32);
                        if (!ret) {
                                (*bytes_read) += 3;
 #ifdef __BIG_ENDIAN
@@ -1202,7 +1161,8 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
                                buf[0] = swap32 & 0xff;
 #endif
                                if (userbuffer) {
-                                       if (copy_to_user(userbuffer, &buf[0], 3))
+                                       if (copy_to_user(userbuffer,
+                                                       &buf[0], 3))
                                                return -EFAULT;
                                } else {
                                        kernbuffer[0] = buf[0];
@@ -1214,12 +1174,11 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
 
                default:
                        ret |= sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM,
-                                                               addr, &swap32);
+                                       addr, &swap32);
                        if (!ret) {
                                (*bytes_read) += 4;
                                if (userbuffer) {
-                                       if (put_user(swap32,
-                                               (u32 __user *)userbuffer))
+                                       if (put_user(swap32, (u32 __user *)userbuffer))
                                                return -EFAULT;
 
                                        userbuffer += 4;
@@ -1230,10 +1189,9 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
                                addr += 4;
                                length -= 4;
                        }
-           }
-
-           if (ret)
-               break;
+               }
+               if (ret)
+                       break;
        }
 
        return ret;
@@ -1242,40 +1200,39 @@ static int sisusb_read_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
 /* High level: Gfx (indexed) register access */
 
 #ifdef INCL_SISUSB_CON
-int
-sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
+int sisusb_setreg(struct sisusb_usb_data *sisusb, int port, u8 data)
 {
        return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
 }
 
-int
-sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data)
+int sisusb_getreg(struct sisusb_usb_data *sisusb, int port, u8 *data)
 {
        return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port, data);
 }
 #endif
 
-int
-sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port, u8 index, u8 data)
+int sisusb_setidxreg(struct sisusb_usb_data *sisusb, int port,
+               u8 index, u8 data)
 {
        int ret;
+
        ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, index);
        ret |= sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, data);
        return ret;
 }
 
-int
-sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port, u8 index, u8 *data)
+int sisusb_getidxreg(struct sisusb_usb_data *sisusb, int port,
+               u8 index, u8 *data)
 {
        int ret;
+
        ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, index);
        ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, data);
        return ret;
 }
 
-int
-sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
-                                                       u8 myand, u8 myor)
+int sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
+               u8 myand, u8 myor)
 {
        int ret;
        u8 tmp;
@@ -1288,12 +1245,12 @@ sisusb_setidxregandor(struct sisusb_usb_data *sisusb, int port, u8 idx,
        return ret;
 }
 
-static int
-sisusb_setidxregmask(struct sisusb_usb_data *sisusb, int port, u8 idx,
-                                                       u8 data, u8 mask)
+static int sisusb_setidxregmask(struct sisusb_usb_data *sisusb,
+               int port, u8 idx, u8 data, u8 mask)
 {
        int ret;
        u8 tmp;
+
        ret = sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, port, idx);
        ret |= sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, port + 1, &tmp);
        tmp &= ~(mask);
@@ -1302,75 +1259,76 @@ sisusb_setidxregmask(struct sisusb_usb_data *sisusb, int port, u8 idx,
        return ret;
 }
 
-int
-sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port, u8 index, u8 myor)
+int sisusb_setidxregor(struct sisusb_usb_data *sisusb, int port,
+               u8 index, u8 myor)
 {
-       return(sisusb_setidxregandor(sisusb, port, index, 0xff, myor));
+       return sisusb_setidxregandor(sisusb, port, index, 0xff, myor);
 }
 
-int
-sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port, u8 idx, u8 myand)
+int sisusb_setidxregand(struct sisusb_usb_data *sisusb, int port,
+               u8 idx, u8 myand)
 {
-       return(sisusb_setidxregandor(sisusb, port, idx, myand, 0x00));
+       return sisusb_setidxregandor(sisusb, port, idx, myand, 0x00);
 }
 
 /* Write/read video ram */
 
 #ifdef INCL_SISUSB_CON
-int
-sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data)
+int sisusb_writeb(struct sisusb_usb_data *sisusb, u32 adr, u8 data)
 {
-       return(sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data));
+       return sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data);
 }
 
-int
-sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data)
+int sisusb_readb(struct sisusb_usb_data *sisusb, u32 adr, u8 *data)
 {
-       return(sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data));
+       return sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, adr, data);
 }
 
-int
-sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
-                       u32 dest, int length, size_t *bytes_written)
+int sisusb_copy_memory(struct sisusb_usb_data *sisusb, char *src,
+               u32 dest, int length, size_t *bytes_written)
 {
-       return(sisusb_write_mem_bulk(sisusb, dest, src, length, NULL, 0, bytes_written));
+       return sisusb_write_mem_bulk(sisusb, dest, src, length,
+                       NULL, 0, bytes_written);
 }
 
 #ifdef SISUSBENDIANTEST
-int
-sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
-                       u32 src, int length, size_t *bytes_written)
+int sisusb_read_memory(struct sisusb_usb_data *sisusb, char *dest,
+               u32 src, int length, size_t *bytes_written)
 {
-       return(sisusb_read_mem_bulk(sisusb, src, dest, length, NULL, bytes_written));
+       return sisusb_read_mem_bulk(sisusb, src, dest, length,
+                       NULL, bytes_written);
 }
 #endif
 #endif
 
 #ifdef SISUSBENDIANTEST
-static void
-sisusb_testreadwrite(struct sisusb_usb_data *sisusb)
-{
-    static char srcbuffer[] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
-    char destbuffer[10];
-    size_t dummy;
-    int i,j;
-
-    sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7, &dummy);
-
-    for(i = 1; i <= 7; i++) {
-        dev_dbg(&sisusb->sisusb_dev->dev, "sisusb: rwtest %d bytes\n", i);
-       sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase, i, &dummy);
-       for(j = 0; j < i; j++) {
-            dev_dbg(&sisusb->sisusb_dev->dev, "rwtest read[%d] = %x\n", j, destbuffer[j]);
+static void sisusb_testreadwrite(struct sisusb_usb_data *sisusb)
+{
+       static char srcbuffer[] = { 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77 };
+       char destbuffer[10];
+       size_t dummy;
+       int i, j;
+
+       sisusb_copy_memory(sisusb, srcbuffer, sisusb->vrambase, 7, &dummy);
+
+       for (i = 1; i <= 7; i++) {
+               dev_dbg(&sisusb->sisusb_dev->dev,
+                               "sisusb: rwtest %d bytes\n", i);
+               sisusb_read_memory(sisusb, destbuffer, sisusb->vrambase,
+                               i, &dummy);
+               for (j = 0; j < i; j++) {
+                       dev_dbg(&sisusb->sisusb_dev->dev,
+                                       "rwtest read[%d] = %x\n",
+                                       j, destbuffer[j]);
+               }
        }
-    }
 }
 #endif
 
 /* access pci config registers (reg numbers 0, 4, 8, etc) */
 
-static int
-sisusb_write_pci_config(struct sisusb_usb_data *sisusb, int regnum, u32 data)
+static int sisusb_write_pci_config(struct sisusb_usb_data *sisusb,
+               int regnum, u32 data)
 {
        struct sisusb_packet packet;
        int ret;
@@ -1382,8 +1340,8 @@ sisusb_write_pci_config(struct sisusb_usb_data *sisusb, int regnum, u32 data)
        return ret;
 }
 
-static int
-sisusb_read_pci_config(struct sisusb_usb_data *sisusb, int regnum, u32 *data)
+static int sisusb_read_pci_config(struct sisusb_usb_data *sisusb,
+               int regnum, u32 *data)
 {
        struct sisusb_packet packet;
        int ret;
@@ -1397,8 +1355,8 @@ sisusb_read_pci_config(struct sisusb_usb_data *sisusb, int regnum, u32 *data)
 
 /* Clear video RAM */
 
-static int
-sisusb_clear_vram(struct sisusb_usb_data *sisusb, u32 address, int length)
+static int sisusb_clear_vram(struct sisusb_usb_data *sisusb,
+               u32 address, int length)
 {
        int ret, i;
        ssize_t j;
@@ -1416,7 +1374,8 @@ sisusb_clear_vram(struct sisusb_usb_data *sisusb, u32 address, int length)
                return 0;
 
        /* allocate free buffer/urb and clear the buffer */
-       if ((i = sisusb_alloc_outbuf(sisusb)) < 0)
+       i = sisusb_alloc_outbuf(sisusb);
+       if (i < 0)
                return -EBUSY;
 
        memset(sisusb->obuf[i], 0, sisusb->obufsize);
@@ -1437,20 +1396,19 @@ sisusb_clear_vram(struct sisusb_usb_data *sisusb, u32 address, int length)
  * a defined mode (640x480@60Hz)
  */
 
-#define GETREG(r,d)     sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
-#define SETREG(r,d)    sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
-#define SETIREG(r,i,d) sisusb_setidxreg(sisusb, r, i, d)
-#define GETIREG(r,i,d)  sisusb_getidxreg(sisusb, r, i, d)
-#define SETIREGOR(r,i,o)       sisusb_setidxregor(sisusb, r, i, o)
-#define SETIREGAND(r,i,a)      sisusb_setidxregand(sisusb, r, i, a)
-#define SETIREGANDOR(r,i,a,o)  sisusb_setidxregandor(sisusb, r, i, a, o)
-#define READL(a,d)     sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
-#define WRITEL(a,d)    sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
-#define READB(a,d)     sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
-#define WRITEB(a,d)    sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
-
-static int
-sisusb_triggersr16(struct sisusb_usb_data *sisusb, u8 ramtype)
+#define GETREG(r, d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
+#define SETREG(r, d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_IO, r, d)
+#define SETIREG(r, i, d) sisusb_setidxreg(sisusb, r, i, d)
+#define GETIREG(r, i, d) sisusb_getidxreg(sisusb, r, i, d)
+#define SETIREGOR(r, i, o) sisusb_setidxregor(sisusb, r, i, o)
+#define SETIREGAND(r, i, a) sisusb_setidxregand(sisusb, r, i, a)
+#define SETIREGANDOR(r, i, a, o) sisusb_setidxregandor(sisusb, r, i, a, o)
+#define READL(a, d) sisusb_read_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
+#define WRITEL(a, d) sisusb_write_memio_long(sisusb, SISUSB_TYPE_MEM, a, d)
+#define READB(a, d) sisusb_read_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
+#define WRITEB(a, d) sisusb_write_memio_byte(sisusb, SISUSB_TYPE_MEM, a, d)
+
+static int sisusb_triggersr16(struct sisusb_usb_data *sisusb, u8 ramtype)
 {
        int ret;
        u8 tmp8;
@@ -1480,8 +1438,8 @@ sisusb_triggersr16(struct sisusb_usb_data *sisusb, u8 ramtype)
        return ret;
 }
 
-static int
-sisusb_getbuswidth(struct sisusb_usb_data *sisusb, int *bw, int *chab)
+static int sisusb_getbuswidth(struct sisusb_usb_data *sisusb,
+               int *bw, int *chab)
 {
        int ret;
        u8  ramtype, done = 0;
@@ -1526,7 +1484,7 @@ sisusb_getbuswidth(struct sisusb_usb_data *sisusb, int *bw, int *chab)
                }
                if ((t1 != 0x456789ab) || (t0 != 0x01234567)) {
                        *chab = 1; *bw = 64;
-                       ret |= SETIREGANDOR(SISSR, 0x14, 0xfc,0x01);
+                       ret |= SETIREGANDOR(SISSR, 0x14, 0xfc, 0x01);
 
                        ret |= sisusb_triggersr16(sisusb, ramtype);
                        ret |= WRITEL(ramptr +  0, 0x89abcdef);
@@ -1593,8 +1551,7 @@ sisusb_getbuswidth(struct sisusb_usb_data *sisusb, int *bw, int *chab)
        return ret;
 }
 
-static int
-sisusb_verify_mclk(struct sisusb_usb_data *sisusb)
+static int sisusb_verify_mclk(struct sisusb_usb_data *sisusb)
 {
        int ret = 0;
        u32 ramptr = SISUSB_PCI_MEMBASE;
@@ -1622,10 +1579,8 @@ sisusb_verify_mclk(struct sisusb_usb_data *sisusb)
        return ret;
 }
 
-static int
-sisusb_set_rank(struct sisusb_usb_data *sisusb, int *iret, int index,
-                       u8 rankno, u8 chab, const u8 dramtype[][5],
-                       int bw)
+static int sisusb_set_rank(struct sisusb_usb_data *sisusb, int *iret,
+               int index, u8 rankno, u8 chab, const u8 dramtype[][5], int bw)
 {
        int ret = 0, ranksize;
        u8 tmp;
@@ -1641,7 +1596,9 @@ sisusb_set_rank(struct sisusb_usb_data *sisusb, int *iret, int index,
                return ret;
 
        tmp = 0;
-       while ((ranksize >>= 1) > 0) tmp += 0x10;
+       while ((ranksize >>= 1) > 0)
+               tmp += 0x10;
+
        tmp |= ((rankno - 1) << 2);
        tmp |= ((bw / 64) & 0x02);
        tmp |= (chab & 0x01);
@@ -1654,8 +1611,8 @@ sisusb_set_rank(struct sisusb_usb_data *sisusb, int *iret, int index,
        return ret;
 }
 
-static int
-sisusb_check_rbc(struct sisusb_usb_data *sisusb, int *iret, u32 inc, int testn)
+static int sisusb_check_rbc(struct sisusb_usb_data *sisusb, int *iret,
+               u32 inc, int testn)
 {
        int ret = 0, i;
        u32 j, tmp;
@@ -1669,7 +1626,9 @@ sisusb_check_rbc(struct sisusb_usb_data *sisusb, int *iret, u32 inc, int testn)
 
        for (i = 0, j = 0; i < testn; i++) {
                ret |= READL(sisusb->vrambase + j, &tmp);
-               if (tmp != j) return ret;
+               if (tmp != j)
+                       return ret;
+
                j += inc;
        }
 
@@ -1677,9 +1636,8 @@ sisusb_check_rbc(struct sisusb_usb_data *sisusb, int *iret, u32 inc, int testn)
        return ret;
 }
 
-static int
-sisusb_check_ranks(struct sisusb_usb_data *sisusb, int *iret, int rankno,
-                                       int idx, int bw, const u8 rtype[][5])
+static int sisusb_check_ranks(struct sisusb_usb_data *sisusb,
+               int *iret, int rankno, int idx, int bw, const u8 rtype[][5])
 {
        int ret = 0, i, i2ret;
        u32 inc;
@@ -1687,10 +1645,8 @@ sisusb_check_ranks(struct sisusb_usb_data *sisusb, int *iret, int rankno,
        *iret = 0;
 
        for (i = rankno; i >= 1; i--) {
-               inc = 1 << (rtype[idx][2] +
-                           rtype[idx][1] +
-                           rtype[idx][0] +
-                           bw / 64 + i);
+               inc = 1 << (rtype[idx][2] + rtype[idx][1] + rtype[idx][0] +
+                               bw / 64 + i);
                ret |= sisusb_check_rbc(sisusb, &i2ret, inc, 2);
                if (!i2ret)
                        return ret;
@@ -1710,9 +1666,8 @@ sisusb_check_ranks(struct sisusb_usb_data *sisusb, int *iret, int rankno,
        return ret;
 }
 
-static int
-sisusb_get_sdram_size(struct sisusb_usb_data *sisusb, int *iret, int bw,
-                                                               int chab)
+static int sisusb_get_sdram_size(struct sisusb_usb_data *sisusb, int *iret,
+               int bw, int chab)
 {
        int ret = 0, i2ret = 0, i, j;
        static const u8 sdramtype[13][5] = {
@@ -1736,13 +1691,13 @@ sisusb_get_sdram_size(struct sisusb_usb_data *sisusb, int *iret, int bw,
        for (i = 0; i < 13; i++) {
                ret |= SETIREGANDOR(SISSR, 0x13, 0x80, sdramtype[i][4]);
                for (j = 2; j > 0; j--) {
-                       ret |= sisusb_set_rank(sisusb, &i2ret, i, j,
-                                               chab, sdramtype, bw);
+                       ret |= sisusb_set_rank(sisusb, &i2ret, i, j, chab,
+                                       sdramtype, bw);
                        if (!i2ret)
                                continue;
 
-                       ret |= sisusb_check_ranks(sisusb, &i2ret, j, i,
-                                               bw, sdramtype);
+                       ret |= sisusb_check_ranks(sisusb, &i2ret, j, i, bw,
+                                       sdramtype);
                        if (i2ret) {
                                *iret = 0;      /* ram size found */
                                return ret;
@@ -1753,8 +1708,8 @@ sisusb_get_sdram_size(struct sisusb_usb_data *sisusb, int *iret, int bw,
        return ret;
 }
 
-static int
-sisusb_setup_screen(struct sisusb_usb_data *sisusb, int clrall, int drwfr)
+static int sisusb_setup_screen(struct sisusb_usb_data *sisusb,
+               int clrall, int drwfr)
 {
        int ret = 0;
        u32 address;
@@ -1775,47 +1730,47 @@ sisusb_setup_screen(struct sisusb_usb_data *sisusb, int clrall, int drwfr)
                for (i = 0; i < modex; i++) {
                        address = sisusb->vrambase + (i * bpp);
                        ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
-                                                       address, 0xf100);
+                                       address, 0xf100);
                        address += (modex * (modey-1) * bpp);
                        ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
-                                                       address, 0xf100);
+                                       address, 0xf100);
                }
                for (i = 0; i < modey; i++) {
                        address = sisusb->vrambase + ((i * modex) * bpp);
                        ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
-                                                       address, 0xf100);
+                                       address, 0xf100);
                        address += ((modex - 1) * bpp);
                        ret |= sisusb_write_memio_word(sisusb, SISUSB_TYPE_MEM,
-                                                       address, 0xf100);
+                                       address, 0xf100);
                }
        }
 
        return ret;
 }
 
-static int
-sisusb_set_default_mode(struct sisusb_usb_data *sisusb, int touchengines)
+static int sisusb_set_default_mode(struct sisusb_usb_data *sisusb,
+               int touchengines)
 {
        int ret = 0, i, j, modex, modey, bpp, du;
        u8 sr31, cr63, tmp8;
        static const char attrdata[] = {
-               0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,
-               0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
-               0x01,0x00,0x00,0x00
+               0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+               0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+               0x01, 0x00, 0x00, 0x00
        };
        static const char crtcrdata[] = {
-               0x5f,0x4f,0x50,0x82,0x54,0x80,0x0b,0x3e,
-               0x00,0x40,0x00,0x00,0x00,0x00,0x00,0x00,
-               0xea,0x8c,0xdf,0x28,0x40,0xe7,0x04,0xa3,
+               0x5f, 0x4f, 0x50, 0x82, 0x54, 0x80, 0x0b, 0x3e,
+               0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               0xea, 0x8c, 0xdf, 0x28, 0x40, 0xe7, 0x04, 0xa3,
                0xff
        };
        static const char grcdata[] = {
-               0x00,0x00,0x00,0x00,0x00,0x40,0x05,0x0f,
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x05, 0x0f,
                0xff
        };
        static const char crtcdata[] = {
-               0x5f,0x4f,0x4f,0x83,0x55,0x81,0x0b,0x3e,
-               0xe9,0x8b,0xdf,0xe8,0x0c,0x00,0x00,0x05,
+               0x5f, 0x4f, 0x4f, 0x83, 0x55, 0x81, 0x0b, 0x3e,
+               0xe9, 0x8b, 0xdf, 0xe8, 0x0c, 0x00, 0x00, 0x05,
                0x00
        };
 
@@ -1858,28 +1813,32 @@ sisusb_set_default_mode(struct sisusb_usb_data *sisusb, int touchengines)
        SETIREGAND(SISSR, 0x37, 0xfe);
        SETREG(SISMISCW, 0xef);         /* sync */
        SETIREG(SISCR, 0x11, 0x00);     /* crtc */
-       for (j = 0x00, i = 0; i <= 7; i++, j++) {
+       for (j = 0x00, i = 0; i <= 7; i++, j++)
                SETIREG(SISCR, j, crtcdata[i]);
-       }
-       for (j = 0x10; i <= 10; i++, j++) {
+
+       for (j = 0x10; i <= 10; i++, j++)
                SETIREG(SISCR, j, crtcdata[i]);
-       }
-       for (j = 0x15; i <= 12; i++, j++) {
+
+       for (j = 0x15; i <= 12; i++, j++)
                SETIREG(SISCR, j, crtcdata[i]);
-       }
-       for (j = 0x0A; i <= 15; i++, j++) {
+
+       for (j = 0x0A; i <= 15; i++, j++)
                SETIREG(SISSR, j, crtcdata[i]);
-       }
+
        SETIREG(SISSR, 0x0E, (crtcdata[16] & 0xE0));
        SETIREGANDOR(SISCR, 0x09, 0x5f, ((crtcdata[16] & 0x01) << 5));
        SETIREG(SISCR, 0x14, 0x4f);
        du = (modex / 16) * (bpp * 2);  /* offset/pitch */
-       if (modex % 16) du += bpp;
+       if (modex % 16)
+               du += bpp;
+
        SETIREGANDOR(SISSR, 0x0e, 0xf0, ((du >> 8) & 0x0f));
        SETIREG(SISCR, 0x13, (du & 0xff));
        du <<= 5;
        tmp8 = du >> 8;
-       if (du & 0xff) tmp8++;
+       if (du & 0xff)
+               tmp8++;
+
        SETIREG(SISSR, 0x10, tmp8);
        SETIREG(SISSR, 0x31, 0x00);     /* VCLK */
        SETIREG(SISSR, 0x2b, 0x1b);
@@ -1925,8 +1884,7 @@ sisusb_set_default_mode(struct sisusb_usb_data *sisusb, int touchengines)
        return ret;
 }
 
-static int
-sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
+static int sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
 {
        int ret = 0, i, j, bw, chab, iret, retry = 3;
        u8 tmp8, ramtype;
@@ -1970,7 +1928,8 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
                ret |= GETREG(SISMISCR, &tmp8);
                ret |= SETREG(SISMISCW, (tmp8 | 0x01));
 
-               if (ret) continue;
+               if (ret)
+                       continue;
 
                /* Reset registers */
                ret |= SETIREGAND(SISCR, 0x5b, 0xdf);
@@ -1979,23 +1938,23 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
 
                ret |= SETREG(SISMISCW, 0x67);
 
-               for (i = 0x06; i <= 0x1f; i++) {
+               for (i = 0x06; i <= 0x1f; i++)
                        ret |= SETIREG(SISSR, i, 0x00);
-               }
-               for (i = 0x21; i <= 0x27; i++) {
+
+               for (i = 0x21; i <= 0x27; i++)
                        ret |= SETIREG(SISSR, i, 0x00);
-               }
-               for (i = 0x31; i <= 0x3d; i++) {
+
+               for (i = 0x31; i <= 0x3d; i++)
                        ret |= SETIREG(SISSR, i, 0x00);
-               }
-               for (i = 0x12; i <= 0x1b; i++) {
+
+               for (i = 0x12; i <= 0x1b; i++)
                        ret |= SETIREG(SISSR, i, 0x00);
-               }
-               for (i = 0x79; i <= 0x7c; i++) {
+
+               for (i = 0x79; i <= 0x7c; i++)
                        ret |= SETIREG(SISCR, i, 0x00);
-               }
 
-               if (ret) continue;
+               if (ret)
+                       continue;
 
                ret |= SETIREG(SISCR, 0x63, 0x80);
 
@@ -2013,13 +1972,16 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
                ret |= SETIREG(SISSR, 0x07, 0x18);
                ret |= SETIREG(SISSR, 0x11, 0x0f);
 
-               if (ret) continue;
+               if (ret)
+                       continue;
 
                for (i = 0x15, j = 0; i <= 0x1b; i++, j++) {
-                       ret |= SETIREG(SISSR, i, ramtypetable1[(j*4) + ramtype]);
+                       ret |= SETIREG(SISSR, i,
+                                       ramtypetable1[(j*4) + ramtype]);
                }
                for (i = 0x40, j = 0; i <= 0x44; i++, j++) {
-                       ret |= SETIREG(SISCR, i, ramtypetable2[(j*4) + ramtype]);
+                       ret |= SETIREG(SISCR, i,
+                                       ramtypetable2[(j*4) + ramtype]);
                }
 
                ret |= SETIREG(SISCR, 0x49, 0xaa);
@@ -2036,7 +1998,8 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
 
                ret |= SETIREGAND(SISCAP, 0x3f, 0xef);
 
-               if (ret) continue;
+               if (ret)
+                       continue;
 
                ret |= SETIREG(SISPART1, 0x00, 0x00);
 
@@ -2058,7 +2021,8 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
                ret |= SETIREG(SISSR, 0x32, 0x11);
                ret |= SETIREG(SISSR, 0x33, 0x00);
 
-               if (ret) continue;
+               if (ret)
+                       continue;
 
                ret |= SETIREG(SISCR, 0x83, 0x00);
 
@@ -2080,13 +2044,15 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
                if (ramtype <= 1) {
                        ret |= sisusb_get_sdram_size(sisusb, &iret, bw, chab);
                        if (iret) {
-                               dev_err(&sisusb->sisusb_dev->dev,"RAM size detection failed, assuming 8MB video RAM\n");
-                               ret |= SETIREG(SISSR,0x14,0x31);
+                               dev_err(&sisusb->sisusb_dev->dev,
+                                               "RAM size detection failed, assuming 8MB video RAM\n");
+                               ret |= SETIREG(SISSR, 0x14, 0x31);
                                /* TODO */
                        }
                } else {
-                       dev_err(&sisusb->sisusb_dev->dev, "DDR RAM device found, assuming 8MB video RAM\n");
-                       ret |= SETIREG(SISSR,0x14,0x31);
+                       dev_err(&sisusb->sisusb_dev->dev,
+                                       "DDR RAM device found, assuming 8MB video RAM\n");
+                       ret |= SETIREG(SISSR, 0x14, 0x31);
                        /* *** TODO *** */
                }
 
@@ -2117,8 +2083,7 @@ sisusb_init_gfxcore(struct sisusb_usb_data *sisusb)
 #undef READL
 #undef WRITEL
 
-static void
-sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
+static void sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
 {
        u8 tmp8, tmp82, ramtype;
        int bw = 0;
@@ -2127,7 +2092,7 @@ sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
        static const char ram_dynamictype[4] = {'D', 'G', 'D', 'G'};
        static const int busSDR[4]  = {64, 64, 128, 128};
        static const int busDDR[4]  = {32, 32,  64,  64};
-       static const int busDDRA[4] = {64+32, 64+32 , (64+32)*2, (64+32)*2};
+       static const int busDDRA[4] = {64+32, 64+32, (64+32)*2, (64+32)*2};
 
        sisusb_getidxreg(sisusb, SISSR, 0x14, &tmp8);
        sisusb_getidxreg(sisusb, SISSR, 0x15, &tmp82);
@@ -2135,35 +2100,38 @@ sisusb_get_ramconfig(struct sisusb_usb_data *sisusb)
        sisusb->vramsize = (1 << ((tmp8 & 0xf0) >> 4)) * 1024 * 1024;
        ramtype &= 0x03;
        switch ((tmp8 >> 2) & 0x03) {
-       case 0: ramtypetext1 = "1 ch/1 r";
-               if (tmp82 & 0x10) {
+       case 0:
+               ramtypetext1 = "1 ch/1 r";
+               if (tmp82 & 0x10)
                        bw = 32;
-               } else {
+               else
                        bw = busSDR[(tmp8 & 0x03)];
-               }
+
                break;
-       case 1: ramtypetext1 = "1 ch/2 r";
+       case 1:
+               ramtypetext1 = "1 ch/2 r";
                sisusb->vramsize <<= 1;
                bw = busSDR[(tmp8 & 0x03)];
                break;
-       case 2: ramtypetext1 = "asymmeric";
+       case 2:
+               ramtypetext1 = "asymmeric";
                sisusb->vramsize += sisusb->vramsize/2;
                bw = busDDRA[(tmp8 & 0x03)];
                break;
-       case 3: ramtypetext1 = "2 channel";
+       case 3:
+               ramtypetext1 = "2 channel";
                sisusb->vramsize <<= 1;
                bw = busDDR[(tmp8 & 0x03)];
                break;
        }
 
-
-       dev_info(&sisusb->sisusb_dev->dev, "%dMB %s %cDR S%cRAM, bus width %d\n",
-                sisusb->vramsize >> 20, ramtypetext1,
-                ram_datarate[ramtype], ram_dynamictype[ramtype], bw);
+       dev_info(&sisusb->sisusb_dev->dev,
+                       "%dMB %s %cDR S%cRAM, bus width %d\n",
+                       sisusb->vramsize >> 20, ramtypetext1,
+                       ram_datarate[ramtype], ram_dynamictype[ramtype], bw);
 }
 
-static int
-sisusb_do_init_gfxdevice(struct sisusb_usb_data *sisusb)
+static int sisusb_do_init_gfxdevice(struct sisusb_usb_data *sisusb)
 {
        struct sisusb_packet packet;
        int ret;
@@ -2241,8 +2209,7 @@ sisusb_do_init_gfxdevice(struct sisusb_usb_data *sisusb)
  * of the graphics board.
  */
 
-static int
-sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
+static int sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
 {
        int ret = 0, test = 0;
        u32 tmp32;
@@ -2250,16 +2217,25 @@ sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
        if (sisusb->devinit == 1) {
                /* Read PCI BARs and see if they have been set up */
                ret |= sisusb_read_pci_config(sisusb, 0x10, &tmp32);
-               if (ret) return ret;
-               if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MEMBASE) test++;
+               if (ret)
+                       return ret;
+
+               if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MEMBASE)
+                       test++;
 
                ret |= sisusb_read_pci_config(sisusb, 0x14, &tmp32);
-               if (ret) return ret;
-               if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MMIOBASE) test++;
+               if (ret)
+                       return ret;
+
+               if ((tmp32 & 0xfffffff0) == SISUSB_PCI_MMIOBASE)
+                       test++;
 
                ret |= sisusb_read_pci_config(sisusb, 0x18, &tmp32);
-               if (ret) return ret;
-               if ((tmp32 & 0xfffffff0) == SISUSB_PCI_IOPORTBASE) test++;
+               if (ret)
+                       return ret;
+
+               if ((tmp32 & 0xfffffff0) == SISUSB_PCI_IOPORTBASE)
+                       test++;
        }
 
        /* No? So reset the device */
@@ -2289,20 +2265,20 @@ sisusb_init_gfxdevice(struct sisusb_usb_data *sisusb, int initscreen)
 #ifdef INCL_SISUSB_CON
 
 /* Set up default text mode:
  - Set text mode (0x03)
  - Upload default font
  - Upload user font (if available)
-*/
* - Set text mode (0x03)
* - Upload default font
* - Upload user font (if available)
+ */
 
-int
-sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
+int sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
 {
        int ret = 0, slot = sisusb->font_slot, i;
        const struct font_desc *myfont;
        u8 *tempbuf;
        u16 *tempbufb;
        size_t written;
-       static const char bootstring[] = "SiSUSB VGA text console, (C) 2005 Thomas Winischhofer.";
+       static const char bootstring[] =
+               "SiSUSB VGA text console, (C) 2005 Thomas Winischhofer.";
        static const char bootlogo[] = "(o_ //\\ V_/_";
 
        /* sisusb->lock is down */
@@ -2328,7 +2304,8 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
                memcpy(tempbuf + (i * 32), myfont->data + (i * 16), 16);
 
        /* Upload default font */
-       ret = sisusbcon_do_font_op(sisusb, 1, 0, tempbuf, 8192, 0, 1, NULL, 16, 0);
+       ret = sisusbcon_do_font_op(sisusb, 1, 0, tempbuf, 8192,
+                       0, 1, NULL, 16, 0);
 
        vfree(tempbuf);
 
@@ -2366,7 +2343,7 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
                                *(tempbufb++) = 0x0700 | bootstring[i++];
 
                        ret |= sisusb_copy_memory(sisusb, tempbuf,
-                               sisusb->vrambase, 8192, &written);
+                                       sisusb->vrambase, 8192, &written);
 
                        vfree(tempbuf);
 
@@ -2375,12 +2352,13 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
        } else if (sisusb->scrbuf) {
 
                ret |= sisusb_copy_memory(sisusb, (char *)sisusb->scrbuf,
-                               sisusb->vrambase, sisusb->scrbuf_size, &written);
+                               sisusb->vrambase, sisusb->scrbuf_size,
+                               &written);
 
        }
 
        if (sisusb->sisusb_cursor_size_from >= 0 &&
-           sisusb->sisusb_cursor_size_to >= 0) {
+                       sisusb->sisusb_cursor_size_to >= 0) {
                sisusb_setidxreg(sisusb, SISCR, 0x0a,
                                sisusb->sisusb_cursor_size_from);
                sisusb_setidxregandor(sisusb, SISCR, 0x0b, 0xe0,
@@ -2392,7 +2370,8 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
        }
 
        slot = sisusb->sisusb_cursor_loc;
-       if(slot < 0) slot = 0;
+       if (slot < 0)
+               slot = 0;
 
        sisusb->sisusb_cursor_loc = -1;
        sisusb->bad_cursor_pos = 1;
@@ -2413,22 +2392,19 @@ sisusb_reset_text_mode(struct sisusb_usb_data *sisusb, int init)
 
 /* fops */
 
-static int
-sisusb_open(struct inode *inode, struct file *file)
+static int sisusb_open(struct inode *inode, struct file *file)
 {
        struct sisusb_usb_data *sisusb;
        struct usb_interface *interface;
        int subminor = iminor(inode);
 
        interface = usb_find_interface(&sisusb_driver, subminor);
-       if (!interface) {
+       if (!interface)
                return -ENODEV;
-       }
 
        sisusb = usb_get_intfdata(interface);
-       if (!sisusb) {
+       if (!sisusb)
                return -ENODEV;
-       }
 
        mutex_lock(&sisusb->lock);
 
@@ -2444,15 +2420,17 @@ sisusb_open(struct inode *inode, struct file *file)
 
        if (!sisusb->devinit) {
                if (sisusb->sisusb_dev->speed == USB_SPEED_HIGH ||
-                   sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
+                               sisusb->sisusb_dev->speed == USB_SPEED_SUPER) {
                        if (sisusb_init_gfxdevice(sisusb, 0)) {
                                mutex_unlock(&sisusb->lock);
-                               dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
+                               dev_err(&sisusb->sisusb_dev->dev,
+                                               "Failed to initialize device\n");
                                return -EIO;
                        }
                } else {
                        mutex_unlock(&sisusb->lock);
-                       dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
+                       dev_err(&sisusb->sisusb_dev->dev,
+                                       "Device not attached to USB 2.0 hub\n");
                        return -EIO;
                }
        }
@@ -2469,8 +2447,7 @@ sisusb_open(struct inode *inode, struct file *file)
        return 0;
 }
 
-void
-sisusb_delete(struct kref *kref)
+void sisusb_delete(struct kref *kref)
 {
        struct sisusb_usb_data *sisusb = to_sisusb_dev(kref);
 
@@ -2488,8 +2465,7 @@ sisusb_delete(struct kref *kref)
        kfree(sisusb);
 }
 
-static int
-sisusb_release(struct inode *inode, struct file *file)
+static int sisusb_release(struct inode *inode, struct file *file)
 {
        struct sisusb_usb_data *sisusb;
 
@@ -2516,8 +2492,8 @@ sisusb_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-static ssize_t
-sisusb_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
+static ssize_t sisusb_read(struct file *file, char __user *buffer,
+               size_t count, loff_t *ppos)
 {
        struct sisusb_usb_data *sisusb;
        ssize_t bytes_read = 0;
@@ -2539,11 +2515,10 @@ sisusb_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
        }
 
        if ((*ppos) >= SISUSB_PCI_PSEUDO_IOPORTBASE &&
-           (*ppos) <  SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
+                       (*ppos) <  SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
 
-               address = (*ppos) -
-                       SISUSB_PCI_PSEUDO_IOPORTBASE +
-                       SISUSB_PCI_IOPORTBASE;
+               address = (*ppos) - SISUSB_PCI_PSEUDO_IOPORTBASE +
+                               SISUSB_PCI_IOPORTBASE;
 
                /* Read i/o ports
                 * Byte, word and long(32) can be read. As this
@@ -2551,82 +2526,77 @@ sisusb_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
                 * in machine-endianness.
                 */
                switch (count) {
+               case 1:
+                       if (sisusb_read_memio_byte(sisusb, SISUSB_TYPE_IO,
+                                       address, &buf8))
+                               errno = -EIO;
+                       else if (put_user(buf8, (u8 __user *)buffer))
+                               errno = -EFAULT;
+                       else
+                               bytes_read = 1;
 
-                       case 1:
-                               if (sisusb_read_memio_byte(sisusb,
-                                                       SISUSB_TYPE_IO,
-                                                       address, &buf8))
-                                       errno = -EIO;
-                               else if (put_user(buf8, (u8 __user *)buffer))
-                                       errno = -EFAULT;
-                               else
-                                       bytes_read = 1;
-
-                               break;
+                       break;
 
-                       case 2:
-                               if (sisusb_read_memio_word(sisusb,
-                                                       SISUSB_TYPE_IO,
-                                                       address, &buf16))
-                                       errno = -EIO;
-                               else if (put_user(buf16, (u16 __user *)buffer))
-                                       errno = -EFAULT;
-                               else
-                                       bytes_read = 2;
+               case 2:
+                       if (sisusb_read_memio_word(sisusb, SISUSB_TYPE_IO,
+                                       address, &buf16))
+                               errno = -EIO;
+                       else if (put_user(buf16, (u16 __user *)buffer))
+                               errno = -EFAULT;
+                       else
+                               bytes_read = 2;
 
-                               break;
+                       break;
 
-                       case 4:
-                               if (sisusb_read_memio_long(sisusb,
-                                                       SISUSB_TYPE_IO,
-                                                       address, &buf32))
-                                       errno = -EIO;
-                               else if (put_user(buf32, (u32 __user *)buffer))
-                                       errno = -EFAULT;
-                               else
-                                       bytes_read = 4;
+               case 4:
+                       if (sisusb_read_memio_long(sisusb, SISUSB_TYPE_IO,
+                                       address, &buf32))
+                               errno = -EIO;
+                       else if (put_user(buf32, (u32 __user *)buffer))
+                               errno = -EFAULT;
+                       else
+                               bytes_read = 4;
 
-                               break;
+                       break;
 
-                       default:
-                               errno = -EIO;
+               default:
+                       errno = -EIO;
 
                }
 
-       } else if ((*ppos) >= SISUSB_PCI_PSEUDO_MEMBASE &&
-                  (*ppos) <  SISUSB_PCI_PSEUDO_MEMBASE + sisusb->vramsize) {
+       } else if ((*ppos) >= SISUSB_PCI_PSEUDO_MEMBASE && (*ppos) <
+                       SISUSB_PCI_PSEUDO_MEMBASE + sisusb->vramsize) {
 
-               address = (*ppos) -
-                       SISUSB_PCI_PSEUDO_MEMBASE +
-                       SISUSB_PCI_MEMBASE;
+               address = (*ppos) - SISUSB_PCI_PSEUDO_MEMBASE +
+                               SISUSB_PCI_MEMBASE;
 
                /* Read video ram
                 * Remember: Data delivered is never endian-corrected
                 */
                errno = sisusb_read_mem_bulk(sisusb, address,
-                                       NULL, count, buffer, &bytes_read);
+                               NULL, count, buffer, &bytes_read);
 
                if (bytes_read)
                        errno = bytes_read;
 
        } else  if ((*ppos) >= SISUSB_PCI_PSEUDO_MMIOBASE &&
-                   (*ppos) <  SISUSB_PCI_PSEUDO_MMIOBASE + SISUSB_PCI_MMIOSIZE) {
+                               (*ppos) <  SISUSB_PCI_PSEUDO_MMIOBASE +
+                               SISUSB_PCI_MMIOSIZE) {
 
-               address = (*ppos) -
-                       SISUSB_PCI_PSEUDO_MMIOBASE +
-                       SISUSB_PCI_MMIOBASE;
+               address = (*ppos) - SISUSB_PCI_PSEUDO_MMIOBASE +
+                               SISUSB_PCI_MMIOBASE;
 
                /* Read MMIO
                 * Remember: Data delivered is never endian-corrected
                 */
                errno = sisusb_read_mem_bulk(sisusb, address,
-                                       NULL, count, buffer, &bytes_read);
+                               NULL, count, buffer, &bytes_read);
 
                if (bytes_read)
                        errno = bytes_read;
 
        } else  if ((*ppos) >= SISUSB_PCI_PSEUDO_PCIBASE &&
-                   (*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE + 0x5c) {
+                       (*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE + 0x5c) {
 
                if (count != 4) {
                        mutex_unlock(&sisusb->lock);
@@ -2658,9 +2628,8 @@ sisusb_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
        return errno ? errno : bytes_read;
 }
 
-static ssize_t
-sisusb_write(struct file *file, const char __user *buffer, size_t count,
-                                                               loff_t *ppos)
+static ssize_t sisusb_write(struct file *file, const char __user *buffer,
+               size_t count, loff_t *ppos)
 {
        struct sisusb_usb_data *sisusb;
        int errno = 0;
@@ -2682,11 +2651,10 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
        }
 
        if ((*ppos) >= SISUSB_PCI_PSEUDO_IOPORTBASE &&
-           (*ppos) <  SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
+                       (*ppos) <  SISUSB_PCI_PSEUDO_IOPORTBASE + 128) {
 
-               address = (*ppos) -
-                       SISUSB_PCI_PSEUDO_IOPORTBASE +
-                       SISUSB_PCI_IOPORTBASE;
+               address = (*ppos) - SISUSB_PCI_PSEUDO_IOPORTBASE +
+                               SISUSB_PCI_IOPORTBASE;
 
                /* Write i/o ports
                 * Byte, word and long(32) can be written. As this
@@ -2694,53 +2662,49 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
                 * in machine-endianness.
                 */
                switch (count) {
+               case 1:
+                       if (get_user(buf8, (u8 __user *)buffer))
+                               errno = -EFAULT;
+                       else if (sisusb_write_memio_byte(sisusb,
+                                       SISUSB_TYPE_IO, address, buf8))
+                               errno = -EIO;
+                       else
+                               bytes_written = 1;
 
-                       case 1:
-                               if (get_user(buf8, (u8 __user *)buffer))
-                                       errno = -EFAULT;
-                               else if (sisusb_write_memio_byte(sisusb,
-                                                       SISUSB_TYPE_IO,
-                                                       address, buf8))
-                                       errno = -EIO;
-                               else
-                                       bytes_written = 1;
-
-                               break;
+                       break;
 
-                       case 2:
-                               if (get_user(buf16, (u16 __user *)buffer))
-                                       errno = -EFAULT;
-                               else if (sisusb_write_memio_word(sisusb,
-                                                       SISUSB_TYPE_IO,
-                                                       address, buf16))
-                                       errno = -EIO;
-                               else
-                                       bytes_written = 2;
+               case 2:
+                       if (get_user(buf16, (u16 __user *)buffer))
+                               errno = -EFAULT;
+                       else if (sisusb_write_memio_word(sisusb,
+                                       SISUSB_TYPE_IO, address, buf16))
+                               errno = -EIO;
+                       else
+                               bytes_written = 2;
 
-                               break;
+                       break;
 
-                       case 4:
-                               if (get_user(buf32, (u32 __user *)buffer))
-                                       errno = -EFAULT;
-                               else if (sisusb_write_memio_long(sisusb,
-                                                       SISUSB_TYPE_IO,
-                                                       address, buf32))
-                                       errno = -EIO;
-                               else
-                                       bytes_written = 4;
+               case 4:
+                       if (get_user(buf32, (u32 __user *)buffer))
+                               errno = -EFAULT;
+                       else if (sisusb_write_memio_long(sisusb,
+                                       SISUSB_TYPE_IO, address, buf32))
+                               errno = -EIO;
+                       else
+                               bytes_written = 4;
 
-                               break;
+                       break;
 
-                       default:
-                               errno = -EIO;
+               default:
+                       errno = -EIO;
                }
 
        } else if ((*ppos) >= SISUSB_PCI_PSEUDO_MEMBASE &&
-                  (*ppos) <  SISUSB_PCI_PSEUDO_MEMBASE + sisusb->vramsize) {
+                       (*ppos) <  SISUSB_PCI_PSEUDO_MEMBASE +
+                       sisusb->vramsize) {
 
-               address = (*ppos) -
-                       SISUSB_PCI_PSEUDO_MEMBASE +
-                       SISUSB_PCI_MEMBASE;
+               address = (*ppos) - SISUSB_PCI_PSEUDO_MEMBASE +
+                               SISUSB_PCI_MEMBASE;
 
                /* Write video ram.
                 * Buffer is copied 1:1, therefore, on big-endian
@@ -2749,17 +2713,17 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
                 * mode or if YUV data is being transferred).
                 */
                errno = sisusb_write_mem_bulk(sisusb, address, NULL,
-                                       count, buffer, 0, &bytes_written);
+                               count, buffer, 0, &bytes_written);
 
                if (bytes_written)
                        errno = bytes_written;
 
        } else  if ((*ppos) >= SISUSB_PCI_PSEUDO_MMIOBASE &&
-                   (*ppos) <  SISUSB_PCI_PSEUDO_MMIOBASE + SISUSB_PCI_MMIOSIZE) {
+                       (*ppos) <  SISUSB_PCI_PSEUDO_MMIOBASE +
+                       SISUSB_PCI_MMIOSIZE) {
 
-               address = (*ppos) -
-                       SISUSB_PCI_PSEUDO_MMIOBASE +
-                       SISUSB_PCI_MMIOBASE;
+               address = (*ppos) - SISUSB_PCI_PSEUDO_MMIOBASE +
+                               SISUSB_PCI_MMIOBASE;
 
                /* Write MMIO.
                 * Buffer is copied 1:1, therefore, on big-endian
@@ -2767,13 +2731,14 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
                 * in advance.
                 */
                errno = sisusb_write_mem_bulk(sisusb, address, NULL,
-                                       count, buffer, 0, &bytes_written);
+                               count, buffer, 0, &bytes_written);
 
                if (bytes_written)
                        errno = bytes_written;
 
        } else  if ((*ppos) >= SISUSB_PCI_PSEUDO_PCIBASE &&
-                   (*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE + SISUSB_PCI_PCONFSIZE) {
+                               (*ppos) <= SISUSB_PCI_PSEUDO_PCIBASE +
+                               SISUSB_PCI_PCONFSIZE) {
 
                if (count != 4) {
                        mutex_unlock(&sisusb->lock);
@@ -2807,8 +2772,7 @@ sisusb_write(struct file *file, const char __user *buffer, size_t count,
        return errno ? errno : bytes_written;
 }
 
-static loff_t
-sisusb_lseek(struct file *file, loff_t offset, int orig)
+static loff_t sisusb_lseek(struct file *file, loff_t offset, int orig)
 {
        struct sisusb_usb_data *sisusb;
        loff_t ret;
@@ -2831,9 +2795,8 @@ sisusb_lseek(struct file *file, loff_t offset, int orig)
        return ret;
 }
 
-static int
-sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y,
-                                                       unsigned long arg)
+static int sisusb_handle_command(struct sisusb_usb_data *sisusb,
+               struct sisusb_command *y, unsigned long arg)
 {
        int     retval, port, length;
        u32     address;
@@ -2849,105 +2812,99 @@ sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y,
                SISUSB_PCI_IOPORTBASE;
 
        switch (y->operation) {
-               case SUCMD_GET:
-                       retval = sisusb_getidxreg(sisusb, port,
-                                                        y->data0, &y->data1);
-                       if (!retval) {
-                               if (copy_to_user((void __user *)arg, y,
-                                                       sizeof(*y)))
-                                       retval = -EFAULT;
-                       }
-                       break;
+       case SUCMD_GET:
+               retval = sisusb_getidxreg(sisusb, port, y->data0, &y->data1);
+               if (!retval) {
+                       if (copy_to_user((void __user *)arg, y, sizeof(*y)))
+                               retval = -EFAULT;
+               }
+               break;
 
-               case SUCMD_SET:
-                       retval = sisusb_setidxreg(sisusb, port,
-                                               y->data0, y->data1);
-                       break;
+       case SUCMD_SET:
+               retval = sisusb_setidxreg(sisusb, port, y->data0, y->data1);
+               break;
 
-               case SUCMD_SETOR:
-                       retval = sisusb_setidxregor(sisusb, port,
-                                               y->data0, y->data1);
-                       break;
+       case SUCMD_SETOR:
+               retval = sisusb_setidxregor(sisusb, port, y->data0, y->data1);
+               break;
 
-               case SUCMD_SETAND:
-                       retval = sisusb_setidxregand(sisusb, port,
-                                               y->data0, y->data1);
-                       break;
+       case SUCMD_SETAND:
+               retval = sisusb_setidxregand(sisusb, port, y->data0, y->data1);
+               break;
 
-               case SUCMD_SETANDOR:
-                       retval = sisusb_setidxregandor(sisusb, port,
-                                               y->data0, y->data1, y->data2);
-                       break;
+       case SUCMD_SETANDOR:
+               retval = sisusb_setidxregandor(sisusb, port, y->data0,
+                               y->data1, y->data2);
+               break;
 
-               case SUCMD_SETMASK:
-                       retval = sisusb_setidxregmask(sisusb, port,
-                                               y->data0, y->data1, y->data2);
-                       break;
+       case SUCMD_SETMASK:
+               retval = sisusb_setidxregmask(sisusb, port, y->data0,
+                               y->data1, y->data2);
+               break;
 
-               case SUCMD_CLRSCR:
-                       /* Gfx core must be initialized */
-                       if (!sisusb->gfxinit)
-                               return -ENODEV;
+       case SUCMD_CLRSCR:
+               /* Gfx core must be initialized */
+               if (!sisusb->gfxinit)
+                       return -ENODEV;
 
-                       length = (y->data0 << 16) | (y->data1 << 8) | y->data2;
-                       address = y->data3 -
-                               SISUSB_PCI_PSEUDO_MEMBASE +
+               length = (y->data0 << 16) | (y->data1 << 8) | y->data2;
+               address = y->data3 - SISUSB_PCI_PSEUDO_MEMBASE +
                                SISUSB_PCI_MEMBASE;
-                       retval = sisusb_clear_vram(sisusb, address, length);
-                       break;
+               retval = sisusb_clear_vram(sisusb, address, length);
+               break;
 
-               case SUCMD_HANDLETEXTMODE:
-                       retval = 0;
+       case SUCMD_HANDLETEXTMODE:
+               retval = 0;
 #ifdef INCL_SISUSB_CON
-                       /* Gfx core must be initialized, SiS_Pr must exist */
-                       if (!sisusb->gfxinit || !sisusb->SiS_Pr)
-                               return -ENODEV;
+               /* Gfx core must be initialized, SiS_Pr must exist */
+               if (!sisusb->gfxinit || !sisusb->SiS_Pr)
+                       return -ENODEV;
 
-                       switch (y->data0) {
-                       case 0:
-                               retval = sisusb_reset_text_mode(sisusb, 0);
-                               break;
-                       case 1:
-                               sisusb->textmodedestroyed = 1;
-                               break;
-                       }
-#endif
+               switch (y->data0) {
+               case 0:
+                       retval = sisusb_reset_text_mode(sisusb, 0);
+                       break;
+               case 1:
+                       sisusb->textmodedestroyed = 1;
                        break;
+               }
+#endif
+               break;
 
 #ifdef INCL_SISUSB_CON
-               case SUCMD_SETMODE:
-                       /* Gfx core must be initialized, SiS_Pr must exist */
-                       if (!sisusb->gfxinit || !sisusb->SiS_Pr)
-                               return -ENODEV;
+       case SUCMD_SETMODE:
+               /* Gfx core must be initialized, SiS_Pr must exist */
+               if (!sisusb->gfxinit || !sisusb->SiS_Pr)
+                       return -ENODEV;
 
-                       retval = 0;
+               retval = 0;
 
-                       sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30;
-                       sisusb->SiS_Pr->sisusb = (void *)sisusb;
+               sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30;
+               sisusb->SiS_Pr->sisusb = (void *)sisusb;
 
-                       if (SiSUSBSetMode(sisusb->SiS_Pr, y->data3))
-                               retval = -EINVAL;
+               if (SiSUSBSetMode(sisusb->SiS_Pr, y->data3))
+                       retval = -EINVAL;
 
-                       break;
+               break;
 
-               case SUCMD_SETVESAMODE:
-                       /* Gfx core must be initialized, SiS_Pr must exist */
-                       if (!sisusb->gfxinit || !sisusb->SiS_Pr)
-                               return -ENODEV;
+       case SUCMD_SETVESAMODE:
+               /* Gfx core must be initialized, SiS_Pr must exist */
+               if (!sisusb->gfxinit || !sisusb->SiS_Pr)
+                       return -ENODEV;
 
-                       retval = 0;
+               retval = 0;
 
-                       sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30;
-                       sisusb->SiS_Pr->sisusb = (void *)sisusb;
+               sisusb->SiS_Pr->IOAddress = SISUSB_PCI_IOPORTBASE + 0x30;
+               sisusb->SiS_Pr->sisusb = (void *)sisusb;
 
-                       if (SiSUSBSetVESAMode(sisusb->SiS_Pr, y->data3))
-                               retval = -EINVAL;
+               if (SiSUSBSetVESAMode(sisusb->SiS_Pr, y->data3))
+                       retval = -EINVAL;
 
-                       break;
+               break;
 #endif
 
-               default:
-                       retval = -EINVAL;
+       default:
+               retval = -EINVAL;
        }
 
        if (retval > 0)
@@ -2956,8 +2913,7 @@ sisusb_handle_command(struct sisusb_usb_data *sisusb, struct sisusb_command *y,
        return retval;
 }
 
-static long
-sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct sisusb_usb_data *sisusb;
        struct sisusb_info x;
@@ -2978,52 +2934,51 @@ sisusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        }
 
        switch (cmd) {
+       case SISUSB_GET_CONFIG_SIZE:
 
-               case SISUSB_GET_CONFIG_SIZE:
-
-                       if (put_user(sizeof(x), argp))
-                               retval = -EFAULT;
+               if (put_user(sizeof(x), argp))
+                       retval = -EFAULT;
 
-                       break;
+               break;
 
-               case SISUSB_GET_CONFIG:
-
-                       x.sisusb_id         = SISUSB_ID;
-                       x.sisusb_version    = SISUSB_VERSION;
-                       x.sisusb_revision   = SISUSB_REVISION;
-                       x.sisusb_patchlevel = SISUSB_PATCHLEVEL;
-                       x.sisusb_gfxinit    = sisusb->gfxinit;
-                       x.sisusb_vrambase   = SISUSB_PCI_PSEUDO_MEMBASE;
-                       x.sisusb_mmiobase   = SISUSB_PCI_PSEUDO_MMIOBASE;
-                       x.sisusb_iobase     = SISUSB_PCI_PSEUDO_IOPORTBASE;
-                       x.sisusb_pcibase    = SISUSB_PCI_PSEUDO_PCIBASE;
-                       x.sisusb_vramsize   = sisusb->vramsize;
-                       x.sisusb_minor      = sisusb->minor;
-                       x.sisusb_fbdevactive= 0;
+       case SISUSB_GET_CONFIG:
+
+               x.sisusb_id = SISUSB_ID;
+               x.sisusb_version = SISUSB_VERSION;
+               x.sisusb_revision = SISUSB_REVISION;
+               x.sisusb_patchlevel = SISUSB_PATCHLEVEL;
+               x.sisusb_gfxinit = sisusb->gfxinit;
+               x.sisusb_vrambase = SISUSB_PCI_PSEUDO_MEMBASE;
+               x.sisusb_mmiobase = SISUSB_PCI_PSEUDO_MMIOBASE;
+               x.sisusb_iobase = SISUSB_PCI_PSEUDO_IOPORTBASE;
+               x.sisusb_pcibase = SISUSB_PCI_PSEUDO_PCIBASE;
+               x.sisusb_vramsize = sisusb->vramsize;
+               x.sisusb_minor = sisusb->minor;
+               x.sisusb_fbdevactive = 0;
 #ifdef INCL_SISUSB_CON
-                       x.sisusb_conactive  = sisusb->haveconsole ? 1 : 0;
+               x.sisusb_conactive  = sisusb->haveconsole ? 1 : 0;
 #else
-                       x.sisusb_conactive  = 0;
+               x.sisusb_conactive  = 0;
 #endif
-                       memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
+               memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
 
-                       if (copy_to_user((void __user *)arg, &x, sizeof(x)))
-                               retval = -EFAULT;
+               if (copy_to_user((void __user *)arg, &x, sizeof(x)))
+                       retval = -EFAULT;
 
-                       break;
+               break;
 
-               case SISUSB_COMMAND:
+       case SISUSB_COMMAND:
 
-                       if (copy_from_user(&y, (void __user *)arg, sizeof(y)))
-                               retval = -EFAULT;
-                       else
-                               retval = sisusb_handle_command(sisusb, &y, arg);
+               if (copy_from_user(&y, (void __user *)arg, sizeof(y)))
+                       retval = -EFAULT;
+               else
+                       retval = sisusb_handle_command(sisusb, &y, arg);
 
-                       break;
+               break;
 
-               default:
-                       retval = -ENOTTY;
-                       break;
+       default:
+               retval = -ENOTTY;
+               break;
        }
 
 err_out:
@@ -3032,20 +2987,20 @@ err_out:
 }
 
 #ifdef SISUSB_NEW_CONFIG_COMPAT
-static long
-sisusb_compat_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+static long sisusb_compat_ioctl(struct file *f, unsigned int cmd,
+               unsigned long arg)
 {
        long retval;
 
        switch (cmd) {
-               case SISUSB_GET_CONFIG_SIZE:
-               case SISUSB_GET_CONFIG:
-               case SISUSB_COMMAND:
-                       retval = sisusb_ioctl(f, cmd, arg);
-                       return retval;
+       case SISUSB_GET_CONFIG_SIZE:
+       case SISUSB_GET_CONFIG:
+       case SISUSB_COMMAND:
+               retval = sisusb_ioctl(f, cmd, arg);
+               return retval;
 
-               default:
-                       return -ENOIOCTLCMD;
+       default:
+               return -ENOIOCTLCMD;
        }
 }
 #endif
@@ -3070,21 +3025,20 @@ static struct usb_class_driver usb_sisusb_class = {
 };
 
 static int sisusb_probe(struct usb_interface *intf,
-                       const struct usb_device_id *id)
+               const struct usb_device_id *id)
 {
        struct usb_device *dev = interface_to_usbdev(intf);
        struct sisusb_usb_data *sisusb;
        int retval = 0, i;
 
        dev_info(&dev->dev, "USB2VGA dongle found at address %d\n",
-               dev->devnum);
+                       dev->devnum);
 
        /* Allocate memory for our private */
        sisusb = kzalloc(sizeof(*sisusb), GFP_KERNEL);
-       if (!sisusb) {
-               dev_err(&dev->dev, "Failed to allocate memory for private data\n");
+       if (!sisusb)
                return -ENOMEM;
-       }
+
        kref_init(&sisusb->kref);
 
        mutex_init(&(sisusb->lock));
@@ -3092,8 +3046,9 @@ static int sisusb_probe(struct usb_interface *intf,
        /* Register device */
        retval = usb_register_dev(intf, &usb_sisusb_class);
        if (retval) {
-               dev_err(&sisusb->sisusb_dev->dev, "Failed to get a minor for device %d\n",
-                       dev->devnum);
+               dev_err(&sisusb->sisusb_dev->dev,
+                               "Failed to get a minor for device %d\n",
+                               dev->devnum);
                retval = -ENODEV;
                goto error_1;
        }
@@ -3108,8 +3063,8 @@ static int sisusb_probe(struct usb_interface *intf,
 
        /* Allocate buffers */
        sisusb->ibufsize = SISUSB_IBUF_SIZE;
-       if (!(sisusb->ibuf = kmalloc(SISUSB_IBUF_SIZE, GFP_KERNEL))) {
-               dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for input buffer");
+       sisusb->ibuf = kmalloc(SISUSB_IBUF_SIZE, GFP_KERNEL);
+       if (!sisusb->ibuf) {
                retval = -ENOMEM;
                goto error_2;
        }
@@ -3117,20 +3072,20 @@ static int sisusb_probe(struct usb_interface *intf,
        sisusb->numobufs = 0;
        sisusb->obufsize = SISUSB_OBUF_SIZE;
        for (i = 0; i < NUMOBUFS; i++) {
-               if (!(sisusb->obuf[i] = kmalloc(SISUSB_OBUF_SIZE, GFP_KERNEL))) {
+               sisusb->obuf[i] = kmalloc(SISUSB_OBUF_SIZE, GFP_KERNEL);
+               if (!sisusb->obuf[i]) {
                        if (i == 0) {
-                               dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for output buffer\n");
                                retval = -ENOMEM;
                                goto error_3;
                        }
                        break;
-               } else
-                       sisusb->numobufs++;
-
+               }
+               sisusb->numobufs++;
        }
 
        /* Allocate URBs */
-       if (!(sisusb->sisurbin = usb_alloc_urb(0, GFP_KERNEL))) {
+       sisusb->sisurbin = usb_alloc_urb(0, GFP_KERNEL);
+       if (!sisusb->sisurbin) {
                dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate URBs\n");
                retval = -ENOMEM;
                goto error_3;
@@ -3138,8 +3093,10 @@ static int sisusb_probe(struct usb_interface *intf,
        sisusb->completein = 1;
 
        for (i = 0; i < sisusb->numobufs; i++) {
-               if (!(sisusb->sisurbout[i] = usb_alloc_urb(0, GFP_KERNEL))) {
-                       dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate URBs\n");
+               sisusb->sisurbout[i] = usb_alloc_urb(0, GFP_KERNEL);
+               if (!sisusb->sisurbout[i]) {
+                       dev_err(&sisusb->sisusb_dev->dev,
+                                       "Failed to allocate URBs\n");
                        retval = -ENOMEM;
                        goto error_4;
                }
@@ -3148,12 +3105,15 @@ static int sisusb_probe(struct usb_interface *intf,
                sisusb->urbstatus[i] = 0;
        }
 
-       dev_info(&sisusb->sisusb_dev->dev, "Allocated %d output buffers\n", sisusb->numobufs);
+       dev_info(&sisusb->sisusb_dev->dev, "Allocated %d output buffers\n",
+                       sisusb->numobufs);
 
 #ifdef INCL_SISUSB_CON
        /* Allocate our SiS_Pr */
-       if (!(sisusb->SiS_Pr = kmalloc(sizeof(struct SiS_Private), GFP_KERNEL))) {
-               dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate SiS_Pr\n");
+       sisusb->SiS_Pr = kmalloc(sizeof(struct SiS_Private), GFP_KERNEL);
+       if (!sisusb->SiS_Pr) {
+               retval = -ENOMEM;
+               goto error_4;
        }
 #endif
 
@@ -3170,17 +3130,18 @@ static int sisusb_probe(struct usb_interface *intf,
        if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
                int initscreen = 1;
 #ifdef INCL_SISUSB_CON
-               if (sisusb_first_vc > 0 &&
-                   sisusb_last_vc > 0 &&
-                   sisusb_first_vc <= sisusb_last_vc &&
-                   sisusb_last_vc <= MAX_NR_CONSOLES)
+               if (sisusb_first_vc > 0 && sisusb_last_vc > 0 &&
+                               sisusb_first_vc <= sisusb_last_vc &&
+                               sisusb_last_vc <= MAX_NR_CONSOLES)
                        initscreen = 0;
 #endif
                if (sisusb_init_gfxdevice(sisusb, initscreen))
-                       dev_err(&sisusb->sisusb_dev->dev, "Failed to early initialize device\n");
+                       dev_err(&sisusb->sisusb_dev->dev,
+                                       "Failed to early initialize device\n");
 
        } else
-               dev_info(&sisusb->sisusb_dev->dev, "Not attached to USB 2.0 hub, deferring init\n");
+               dev_info(&sisusb->sisusb_dev->dev,
+                               "Not attached to USB 2.0 hub, deferring init\n");
 
        sisusb->ready = 1;
 
@@ -3254,7 +3215,7 @@ static const struct usb_device_id sisusb_table[] = {
        { }
 };
 
-MODULE_DEVICE_TABLE (usb, sisusb_table);
+MODULE_DEVICE_TABLE(usb, sisusb_table);
 
 static struct usb_driver sisusb_driver = {
        .name =         "sisusb",
index fec3f1128fdcab5101db4f929d946f0dd13f1046..33ff49c4cea428b2a257d63ff52a241a2031e2be 100644 (file)
@@ -349,7 +349,7 @@ struct mon_bus *mon_bus_lookup(unsigned int num)
 static int __init mon_init(void)
 {
        struct usb_bus *ubus;
-       int rc;
+       int rc, id;
 
        if ((rc = mon_text_init()) != 0)
                goto err_text;
@@ -365,12 +365,11 @@ static int __init mon_init(void)
        }
        // MOD_INC_USE_COUNT(which_module?);
 
-       mutex_lock(&usb_bus_list_lock);
-       list_for_each_entry (ubus, &usb_bus_list, bus_list) {
+       mutex_lock(&usb_bus_idr_lock);
+       idr_for_each_entry(&usb_bus_idr, ubus, id)
                mon_bus_init(ubus);
-       }
        usb_register_notify(&mon_nb);
-       mutex_unlock(&usb_bus_list_lock);
+       mutex_unlock(&usb_bus_idr_lock);
        return 0;
 
 err_reg:
index b2685e75a6835fe2b4615b3f86f7aef378fedcae..3eaa4ba6867d408a516abec7bb4180d5c9f05d40 100644 (file)
@@ -348,7 +348,9 @@ static int ux500_suspend(struct device *dev)
        struct ux500_glue       *glue = dev_get_drvdata(dev);
        struct musb             *musb = glue_to_musb(glue);
 
-       usb_phy_set_suspend(musb->xceiv, 1);
+       if (musb)
+               usb_phy_set_suspend(musb->xceiv, 1);
+
        clk_disable_unprepare(glue->clk);
 
        return 0;
@@ -366,7 +368,8 @@ static int ux500_resume(struct device *dev)
                return ret;
        }
 
-       usb_phy_set_suspend(musb->xceiv, 0);
+       if (musb)
+               usb_phy_set_suspend(musb->xceiv, 0);
 
        return 0;
 }
index 0d19a6d61a71f7ccb7a55e1b90a7d5d5f27db535..970a30e155cb51bfd1d15126d8cdc65f9adf579c 100644 (file)
@@ -1599,6 +1599,8 @@ static int msm_otg_read_dt(struct platform_device *pdev, struct msm_otg *motg)
                                                &motg->id.nb);
                if (ret < 0) {
                        dev_err(&pdev->dev, "register ID notifier failed\n");
+                       extcon_unregister_notifier(motg->vbus.extcon,
+                                                  EXTCON_USB, &motg->vbus.nb);
                        return ret;
                }
 
@@ -1660,15 +1662,6 @@ static int msm_otg_probe(struct platform_device *pdev)
        if (!motg)
                return -ENOMEM;
 
-       pdata = dev_get_platdata(&pdev->dev);
-       if (!pdata) {
-               if (!np)
-                       return -ENXIO;
-               ret = msm_otg_read_dt(pdev, motg);
-               if (ret)
-                       return ret;
-       }
-
        motg->phy.otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
                                     GFP_KERNEL);
        if (!motg->phy.otg)
@@ -1710,6 +1703,15 @@ static int msm_otg_probe(struct platform_device *pdev)
        if (!motg->regs)
                return -ENOMEM;
 
+       pdata = dev_get_platdata(&pdev->dev);
+       if (!pdata) {
+               if (!np)
+                       return -ENXIO;
+               ret = msm_otg_read_dt(pdev, motg);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * NOTE: The PHYs can be multiplexed between the chipidea controller
         * and the dwc3 controller, using a single bit. It is important that
@@ -1717,8 +1719,10 @@ static int msm_otg_probe(struct platform_device *pdev)
         */
        if (motg->phy_number) {
                phy_select = devm_ioremap_nocache(&pdev->dev, USB2_PHY_SEL, 4);
-               if (!phy_select)
-                       return -ENOMEM;
+               if (!phy_select) {
+                       ret = -ENOMEM;
+                       goto unregister_extcon;
+               }
                /* Enable second PHY with the OTG port */
                writel(0x1, phy_select);
        }
@@ -1728,7 +1732,8 @@ static int msm_otg_probe(struct platform_device *pdev)
        motg->irq = platform_get_irq(pdev, 0);
        if (motg->irq < 0) {
                dev_err(&pdev->dev, "platform_get_irq failed\n");
-               return motg->irq;
+               ret = motg->irq;
+               goto unregister_extcon;
        }
 
        regs[0].supply = "vddcx";
@@ -1737,7 +1742,7 @@ static int msm_otg_probe(struct platform_device *pdev)
 
        ret = devm_regulator_bulk_get(motg->phy.dev, ARRAY_SIZE(regs), regs);
        if (ret)
-               return ret;
+               goto unregister_extcon;
 
        motg->vddcx = regs[0].consumer;
        motg->v3p3  = regs[1].consumer;
@@ -1834,6 +1839,12 @@ disable_clks:
        clk_disable_unprepare(motg->clk);
        if (!IS_ERR(motg->core_clk))
                clk_disable_unprepare(motg->core_clk);
+unregister_extcon:
+       extcon_unregister_notifier(motg->id.extcon,
+                                  EXTCON_USB_HOST, &motg->id.nb);
+       extcon_unregister_notifier(motg->vbus.extcon,
+                                  EXTCON_USB, &motg->vbus.nb);
+
        return ret;
 }
 
index c2936dc48ca7b45b0e5c186859d2879569719323..00bfea01be6501d9b53ccc7975b069c24bc57d7b 100644 (file)
@@ -220,7 +220,7 @@ static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
 /* Return true if the vbus is there */
 static bool mxs_phy_get_vbus_status(struct mxs_phy *mxs_phy)
 {
-       unsigned int vbus_value;
+       unsigned int vbus_value = 0;
 
        if (!mxs_phy->regmap_anatop)
                return false;
index c0f5c652d272c8959f5b3d59461e1af139d6f7fd..b4de70ee16d3cfb56f4f3d5eb2047c5feb729a3a 100644 (file)
@@ -46,7 +46,7 @@ static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
        return -EINVAL;
 }
 
-static struct usbhs_pkt_handle usbhsf_null_handler = {
+static const struct usbhs_pkt_handle usbhsf_null_handler = {
        .prepare = usbhsf_null_handle,
        .try_run = usbhsf_null_handle,
 };
@@ -422,12 +422,12 @@ static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
        return 0;
 }
 
-struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
+const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
        .prepare = usbhs_dcp_dir_switch_to_write,
        .try_run = usbhs_dcp_dir_switch_done,
 };
 
-struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
+const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
        .prepare = usbhs_dcp_dir_switch_to_read,
        .try_run = usbhs_dcp_dir_switch_done,
 };
@@ -449,7 +449,7 @@ static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
        return pkt->handler->prepare(pkt, is_done);
 }
 
-struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
+const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
        .prepare = usbhsf_dcp_data_stage_try_push,
 };
 
@@ -488,7 +488,7 @@ static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
        return pkt->handler->prepare(pkt, is_done);
 }
 
-struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
+const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
        .prepare = usbhsf_dcp_data_stage_prepare_pop,
 };
 
@@ -600,7 +600,7 @@ static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
        return usbhsf_pio_try_push(pkt, is_done);
 }
 
-struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
+const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
        .prepare = usbhsf_pio_prepare_push,
        .try_run = usbhsf_pio_try_push,
 };
@@ -730,7 +730,7 @@ usbhs_fifo_read_busy:
        return ret;
 }
 
-struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
+const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
        .prepare = usbhsf_prepare_pop,
        .try_run = usbhsf_pio_try_pop,
 };
@@ -747,7 +747,7 @@ static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
        return 0;
 }
 
-struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
+const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
        .prepare = usbhsf_ctrl_stage_end,
        .try_run = usbhsf_ctrl_stage_end,
 };
@@ -934,7 +934,7 @@ static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
        return 0;
 }
 
-struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
+const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
        .prepare        = usbhsf_dma_prepare_push,
        .dma_done       = usbhsf_dma_push_done,
 };
@@ -1182,7 +1182,7 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
                return usbhsf_dma_pop_done_with_rx_irq(pkt, is_done);
 }
 
-struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
+const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
        .prepare        = usbhsf_dma_prepare_pop,
        .try_run        = usbhsf_dma_try_pop,
        .dma_done       = usbhsf_dma_pop_done
index c7d9b86d51bf84eb30215724ade608649b24fb31..8b98507d7abc0ea43e9c4931e68971689fcae20a 100644 (file)
@@ -54,7 +54,7 @@ struct usbhs_pkt_handle;
 struct usbhs_pkt {
        struct list_head node;
        struct usbhs_pipe *pipe;
-       struct usbhs_pkt_handle *handler;
+       const struct usbhs_pkt_handle *handler;
        void (*done)(struct usbhs_priv *priv,
                     struct usbhs_pkt *pkt);
        struct work_struct work;
@@ -86,18 +86,18 @@ void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe);
 /*
  * packet info
  */
-extern struct usbhs_pkt_handle usbhs_fifo_pio_push_handler;
-extern struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler;
-extern struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler;
+extern const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler;
+extern const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler;
+extern const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler;
 
-extern struct usbhs_pkt_handle usbhs_fifo_dma_push_handler;
-extern struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler;
+extern const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler;
+extern const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler;
 
-extern struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler;
-extern struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler;
+extern const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler;
+extern const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler;
 
-extern struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler;
-extern struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler;
+extern const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler;
+extern const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler;
 
 void usbhs_pkt_init(struct usbhs_pkt *pkt);
 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
index 3212ab51e844b71190469a367bede9ba7a4406b7..7835747f980398a1e8a398e52c02e567bd96b1c7 100644 (file)
@@ -38,7 +38,7 @@ struct usbhs_pipe {
 #define USBHS_PIPE_FLAGS_IS_DIR_HOST           (1 << 2)
 #define USBHS_PIPE_FLAGS_IS_RUNNING            (1 << 3)
 
-       struct usbhs_pkt_handle *handler;
+       const struct usbhs_pkt_handle *handler;
 
        void *mod_private;
 };
index 3806e7014199d13c892f32e8a03c1b37e286bddb..a66b01bb1fa102c27f70588ced24a1120a6b5f7b 100644 (file)
@@ -147,10 +147,7 @@ static int usb_console_setup(struct console *co, char *options)
                        kref_get(&tty->driver->kref);
                        __module_get(tty->driver->owner);
                        tty->ops = &usb_console_fake_tty_ops;
-                       if (tty_init_termios(tty)) {
-                               retval = -ENOMEM;
-                               goto put_tty;
-                       }
+                       tty_init_termios(tty);
                        tty_port_tty_set(&port->port, tty);
                }
 
@@ -185,7 +182,6 @@ static int usb_console_setup(struct console *co, char *options)
 
  fail:
        tty_port_tty_set(&port->port, NULL);
- put_tty:
        tty_kref_put(tty);
  reset_open_count:
        port->port.count = 0;
index 9b90ad747d8766797bd1737ea8f0be9347a5560d..987813b8a7f91e27b6a582a44ca88254dbd49c13 100644 (file)
@@ -99,6 +99,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
        { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
        { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
+       { USB_DEVICE(0x10C4, 0x81D7) }, /* IAI Corp. RCB-CV-USB USB to RS485 Adaptor */
        { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
        { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
        { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
index 01bf533928192d6f2f4b77f44995bd63fd6d1be2..b283eb8b86d68f5b53c336ee70bc8a6cd609adce 100644 (file)
@@ -1165,8 +1165,7 @@ static void cypress_read_int_callback(struct urb *urb)
 
        /* hangup, as defined in acm.c... this might be a bad place for it
         * though */
-       if (tty && !(tty->termios.c_cflag & CLOCAL) &&
-                       !(priv->current_status & UART_CD)) {
+       if (tty && !C_CLOCAL(tty) && !(priv->current_status & UART_CD)) {
                dev_dbg(dev, "%s - calling hangup\n", __func__);
                tty_hangup(tty);
                goto continue_read;
index 12b0e67473ba2509193bf06048b4c0ff627430d5..010a42a92688954d7b65a82472d28fad346c80c3 100644 (file)
@@ -695,11 +695,11 @@ static void digi_set_termios(struct tty_struct *tty,
                arg = -1;
 
                /* reassert DTR and (maybe) RTS on transition from B0 */
-               if ((old_cflag&CBAUD) == B0) {
+               if ((old_cflag & CBAUD) == B0) {
                        /* don't set RTS if using hardware flow control */
                        /* and throttling input */
                        modem_signals = TIOCM_DTR;
-                       if (!(tty->termios.c_cflag & CRTSCTS) ||
+                       if (!C_CRTSCTS(tty) ||
                            !test_bit(TTY_THROTTLED, &tty->flags))
                                modem_signals |= TIOCM_RTS;
                        digi_set_modem_signals(port, modem_signals, 1);
@@ -1491,8 +1491,8 @@ static int digi_read_oob_callback(struct urb *urb)
 
                rts = 0;
                if (tty)
-                       rts = tty->termios.c_cflag & CRTSCTS;
-               
+                       rts = C_CRTSCTS(tty);
+
                if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
                        spin_lock(&priv->dp_port_lock);
                        /* convert from digi flags to termiox flags */
index a5a0376bbd48c2596b908aa0409befb1136bf128..8c660ae401d8267f951516e33fe8188f0f23e060 100644 (file)
@@ -824,6 +824,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_TURTELIZER_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_USB60F) },
+       { USB_DEVICE(RATOC_VENDOR_ID, RATOC_PRODUCT_ID_SCU18) },
        { USB_DEVICE(FTDI_VID, FTDI_REU_TINY_PID) },
 
        /* Papouch devices based on FTDI chip */
index 67c6d446973092ddf98dd703152b7cd0d8dec24c..a84df2513994a57377cc41105f3078e36d2351a2 100644 (file)
  */
 #define RATOC_VENDOR_ID                0x0584
 #define RATOC_PRODUCT_ID_USB60F        0xb020
+#define RATOC_PRODUCT_ID_SCU18 0xb03a
 
 /*
  * Infineon Technologies
index f49327d20ee820a55a338da89146887b4ec4ac0e..f3007ecdd1b47b1d438ecde4fdcd15be47dc714a 100644 (file)
@@ -1398,7 +1398,7 @@ static void edge_throttle(struct tty_struct *tty)
        }
 
        /* if we are implementing RTS/CTS, toggle that line */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                edge_port->shadowMCR &= ~MCR_RTS;
                status = send_cmd_write_uart_register(edge_port, MCR,
                                                        edge_port->shadowMCR);
@@ -1435,7 +1435,7 @@ static void edge_unthrottle(struct tty_struct *tty)
                        return;
        }
        /* if we are implementing RTS/CTS, toggle that line */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                edge_port->shadowMCR |= MCR_RTS;
                send_cmd_write_uart_register(edge_port, MCR,
                                                edge_port->shadowMCR);
index fd707d6a10e26359f4f96502e0bcbb1f1c79bbe1..4446b8d70ac203cc8b763c38b6b93903cd1a0cb8 100644 (file)
@@ -428,7 +428,7 @@ static int  mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
         * either.
         */
        spin_lock_irqsave(&priv->lock, flags);
-       if (tty && (tty->termios.c_cflag & CBAUD))
+       if (tty && C_BAUD(tty))
                priv->control_state = TIOCM_DTR | TIOCM_RTS;
        else
                priv->control_state = 0;
index 78b4f64c6b00e457cef0bc65ed636a3e9fa8db42..2eddbe538cda14d073a15bdaa5f5ad4a13eeb560 100644 (file)
@@ -1308,7 +1308,7 @@ static void mos7720_throttle(struct tty_struct *tty)
        }
 
        /* if we are implementing RTS/CTS, toggle that line */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                mos7720_port->shadowMCR &= ~UART_MCR_RTS;
                write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
                              mos7720_port->shadowMCR);
@@ -1338,7 +1338,7 @@ static void mos7720_unthrottle(struct tty_struct *tty)
        }
 
        /* if we are implementing RTS/CTS, toggle that line */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                mos7720_port->shadowMCR |= UART_MCR_RTS;
                write_mos_reg(port->serial, port->port_number, MOS7720_MCR,
                              mos7720_port->shadowMCR);
index 2c69bfcdacc64e005ce294ccedb2d3ebf7d02a26..02ea975754f52da75f4f570c497cb952bb9d473a 100644 (file)
@@ -1425,7 +1425,7 @@ static void mos7840_throttle(struct tty_struct *tty)
                        return;
        }
        /* if we are implementing RTS/CTS, toggle that line */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                mos7840_port->shadowMCR &= ~MCR_RTS;
                status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
                                         mos7840_port->shadowMCR);
@@ -1466,7 +1466,7 @@ static void mos7840_unthrottle(struct tty_struct *tty)
        }
 
        /* if we are implementing RTS/CTS, toggle that line */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                mos7840_port->shadowMCR |= MCR_RTS;
                status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER,
                                         mos7840_port->shadowMCR);
index e3c3f57c2d828d42ecb35e12d8a1a048974d9ee6..619607323bfdfb0a678e470ed31757ba6028676d 100644 (file)
@@ -368,6 +368,16 @@ static int mxu1_port_probe(struct usb_serial_port *port)
        return 0;
 }
 
+static int mxu1_port_remove(struct usb_serial_port *port)
+{
+       struct mxu1_port *mxport;
+
+       mxport = usb_get_serial_port_data(port);
+       kfree(mxport);
+
+       return 0;
+}
+
 static int mxu1_startup(struct usb_serial *serial)
 {
        struct mxu1_device *mxdev;
@@ -427,6 +437,14 @@ err_free_mxdev:
        return err;
 }
 
+static void mxu1_release(struct usb_serial *serial)
+{
+       struct mxu1_device *mxdev;
+
+       mxdev = usb_get_serial_data(serial);
+       kfree(mxdev);
+}
+
 static int mxu1_write_byte(struct usb_serial_port *port, u32 addr,
                           u8 mask, u8 byte)
 {
@@ -957,7 +975,9 @@ static struct usb_serial_driver mxu11x0_device = {
        .id_table               = mxu1_idtable,
        .num_ports              = 1,
        .port_probe             = mxu1_port_probe,
+       .port_remove            = mxu1_port_remove,
        .attach                 = mxu1_startup,
+       .release                = mxu1_release,
        .open                   = mxu1_open,
        .close                  = mxu1_close,
        .ioctl                  = mxu1_ioctl,
index f2280606b73c0c897d83d7738747b3d6f97a2c71..db86e512e0fcb3af0548eb9907b04ce68db3a8dc 100644 (file)
@@ -268,6 +268,8 @@ static void option_instat_callback(struct urb *urb);
 #define TELIT_PRODUCT_CC864_SINGLE             0x1006
 #define TELIT_PRODUCT_DE910_DUAL               0x1010
 #define TELIT_PRODUCT_UE910_V2                 0x1012
+#define TELIT_PRODUCT_LE922_USBCFG0            0x1042
+#define TELIT_PRODUCT_LE922_USBCFG3            0x1043
 #define TELIT_PRODUCT_LE920                    0x1200
 #define TELIT_PRODUCT_LE910                    0x1201
 
@@ -615,6 +617,16 @@ static const struct option_blacklist_info telit_le920_blacklist = {
        .reserved = BIT(1) | BIT(5),
 };
 
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
+       .sendsetup = BIT(2),
+       .reserved = BIT(0) | BIT(1) | BIT(3),
+};
+
+static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
+       .sendsetup = BIT(0),
+       .reserved = BIT(1) | BIT(2) | BIT(3),
+};
+
 static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
        { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1160,6 +1172,10 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
+               .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+       { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
+               .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
                .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1679,7 +1695,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
                .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
-       { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
+       { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
                .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 
index 60afb39eb73c0b95d261ec367890a306621fd2de..337a0be89fcf0767f67731a9b0a50700e1df9318 100644 (file)
@@ -544,6 +544,11 @@ static int treo_attach(struct usb_serial *serial)
                (serial->num_interrupt_in == 0))
                return 0;
 
+       if (serial->num_bulk_in < 2 || serial->num_interrupt_in < 2) {
+               dev_err(&serial->interface->dev, "missing endpoints\n");
+               return -ENODEV;
+       }
+
        /*
        * It appears that Treos and Kyoceras want to use the
        * 1st bulk in endpoint to communicate with the 2nd bulk out endpoint,
@@ -597,8 +602,10 @@ static int clie_5_attach(struct usb_serial *serial)
         */
 
        /* some sanity check */
-       if (serial->num_ports < 2)
-               return -1;
+       if (serial->num_bulk_out < 2) {
+               dev_err(&serial->interface->dev, "missing bulk out endpoints\n");
+               return -ENODEV;
+       }
 
        /* port 0 now uses the modified endpoint Address */
        port = serial->port[0];
index f3cf4cecd2b7c8d6b570b672c491638a5ed0cb95..d3a17c65a702de5ee08f6f122efc141b33072dd1 100644 (file)
@@ -1067,12 +1067,12 @@ static void ms_lib_free_writebuf(struct us_data *us)
        ms_lib_clear_pagemap(info); /* (pdx)->MS_Lib.pagemap memset 0 in ms.h */
 
        if (info->MS_Lib.blkpag) {
-               kfree((u8 *)(info->MS_Lib.blkpag));  /* Arnold test ... */
+               kfree(info->MS_Lib.blkpag);  /* Arnold test ... */
                info->MS_Lib.blkpag = NULL;
        }
 
        if (info->MS_Lib.blkext) {
-               kfree((u8 *)(info->MS_Lib.blkext));  /* Arnold test ... */
+               kfree(info->MS_Lib.blkext);  /* Arnold test ... */
                info->MS_Lib.blkext = NULL;
        }
 }
index 9ff9404f99d7b10e3d88b6888212b62ab982a96e..44b096c1737b6548f5117144aff800c63d20b6d8 100644 (file)
@@ -246,6 +246,29 @@ static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
        }
 }
 
+static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *cmnd)
+{
+       u8 response_code = riu->response_code;
+
+       switch (response_code) {
+       case RC_INCORRECT_LUN:
+               cmnd->result = DID_BAD_TARGET << 16;
+               break;
+       case RC_TMF_SUCCEEDED:
+               cmnd->result = DID_OK << 16;
+               break;
+       case RC_TMF_NOT_SUPPORTED:
+               cmnd->result = DID_TARGET_FAILURE << 16;
+               break;
+       default:
+               uas_log_cmd_state(cmnd, "response iu", response_code);
+               cmnd->result = DID_ERROR << 16;
+               break;
+       }
+
+       return response_code == RC_TMF_SUCCEEDED;
+}
+
 static void uas_stat_cmplt(struct urb *urb)
 {
        struct iu *iu = urb->transfer_buffer;
@@ -258,6 +281,7 @@ static void uas_stat_cmplt(struct urb *urb)
        unsigned long flags;
        unsigned int idx;
        int status = urb->status;
+       bool success;
 
        spin_lock_irqsave(&devinfo->lock, flags);
 
@@ -313,13 +337,13 @@ static void uas_stat_cmplt(struct urb *urb)
                uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
                break;
        case IU_ID_RESPONSE:
-               uas_log_cmd_state(cmnd, "unexpected response iu",
-                                 ((struct response_iu *)iu)->response_code);
-               /* Error, cancel data transfers */
-               data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
-               data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
                cmdinfo->state &= ~COMMAND_INFLIGHT;
-               cmnd->result = DID_ERROR << 16;
+               success = uas_evaluate_response_iu((struct response_iu *)iu, cmnd);
+               if (!success) {
+                       /* Error, cancel data transfers */
+                       data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
+                       data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
+               }
                uas_try_complete(cmnd, __func__);
                break;
        default:
index 64933b993d7a5f0a8ff64a82979ed20bb9890534..2580a32bcdfff088d4e5f86af1e09f93b13c20ce 100644 (file)
@@ -117,11 +117,12 @@ EXPORT_SYMBOL_GPL(usbip_event_add);
 int usbip_event_happened(struct usbip_device *ud)
 {
        int happened = 0;
+       unsigned long flags;
 
-       spin_lock(&ud->lock);
+       spin_lock_irqsave(&ud->lock, flags);
        if (ud->event != 0)
                happened = 1;
-       spin_unlock(&ud->lock);
+       spin_unlock_irqrestore(&ud->lock, flags);
 
        return happened;
 }
index 7fbe19d5279e5c11159f141fdd20b6a3ac7ff568..fca51105974eabdd09094b7cd1e80bb945275129 100644 (file)
@@ -121,9 +121,11 @@ static void dump_port_status_diff(u32 prev_status, u32 new_status)
 
 void rh_port_connect(int rhport, enum usb_device_speed speed)
 {
+       unsigned long   flags;
+
        usbip_dbg_vhci_rh("rh_port_connect %d\n", rhport);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
 
        the_controller->port_status[rhport] |= USB_PORT_STAT_CONNECTION
                | (1 << USB_PORT_FEAT_C_CONNECTION);
@@ -139,22 +141,24 @@ void rh_port_connect(int rhport, enum usb_device_speed speed)
                break;
        }
 
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
 }
 
 static void rh_port_disconnect(int rhport)
 {
+       unsigned long   flags;
+
        usbip_dbg_vhci_rh("rh_port_disconnect %d\n", rhport);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
 
        the_controller->port_status[rhport] &= ~USB_PORT_STAT_CONNECTION;
        the_controller->port_status[rhport] |=
                                        (1 << USB_PORT_FEAT_C_CONNECTION);
 
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
        usb_hcd_poll_rh_status(vhci_to_hcd(the_controller));
 }
 
@@ -182,13 +186,14 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
        int             retval;
        int             rhport;
        int             changed = 0;
+       unsigned long   flags;
 
        retval = DIV_ROUND_UP(VHCI_NPORTS + 1, 8);
        memset(buf, 0, retval);
 
        vhci = hcd_to_vhci(hcd);
 
-       spin_lock(&vhci->lock);
+       spin_lock_irqsave(&vhci->lock, flags);
        if (!HCD_HW_ACCESSIBLE(hcd)) {
                usbip_dbg_vhci_rh("hw accessible flag not on?\n");
                goto done;
@@ -209,7 +214,7 @@ static int vhci_hub_status(struct usb_hcd *hcd, char *buf)
                usb_hcd_resume_root_hub(hcd);
 
 done:
-       spin_unlock(&vhci->lock);
+       spin_unlock_irqrestore(&vhci->lock, flags);
        return changed ? retval : 0;
 }
 
@@ -231,6 +236,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
        struct vhci_hcd *dum;
        int             retval = 0;
        int             rhport;
+       unsigned long   flags;
 
        u32 prev_port_status[VHCI_NPORTS];
 
@@ -249,7 +255,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
 
        dum = hcd_to_vhci(hcd);
 
-       spin_lock(&dum->lock);
+       spin_lock_irqsave(&dum->lock, flags);
 
        /* store old status and compare now and old later */
        if (usbip_dbg_flag_vhci_rh) {
@@ -403,7 +409,7 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
        }
        usbip_dbg_vhci_rh(" bye\n");
 
-       spin_unlock(&dum->lock);
+       spin_unlock_irqrestore(&dum->lock, flags);
 
        return retval;
 }
@@ -426,6 +432,7 @@ static void vhci_tx_urb(struct urb *urb)
 {
        struct vhci_device *vdev = get_vdev(urb->dev);
        struct vhci_priv *priv;
+       unsigned long flags;
 
        if (!vdev) {
                pr_err("could not get virtual device");
@@ -438,7 +445,7 @@ static void vhci_tx_urb(struct urb *urb)
                return;
        }
 
-       spin_lock(&vdev->priv_lock);
+       spin_lock_irqsave(&vdev->priv_lock, flags);
 
        priv->seqnum = atomic_inc_return(&the_controller->seqnum);
        if (priv->seqnum == 0xffff)
@@ -452,7 +459,7 @@ static void vhci_tx_urb(struct urb *urb)
        list_add_tail(&priv->list, &vdev->priv_tx);
 
        wake_up(&vdev->waitq_tx);
-       spin_unlock(&vdev->priv_lock);
+       spin_unlock_irqrestore(&vdev->priv_lock, flags);
 }
 
 static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
@@ -461,6 +468,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
        struct device *dev = &urb->dev->dev;
        int ret = 0;
        struct vhci_device *vdev;
+       unsigned long flags;
 
        usbip_dbg_vhci_hc("enter, usb_hcd %p urb %p mem_flags %d\n",
                          hcd, urb, mem_flags);
@@ -468,11 +476,11 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
        /* patch to usb_sg_init() is in 2.5.60 */
        BUG_ON(!urb->transfer_buffer && urb->transfer_buffer_length);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
 
        if (urb->status != -EINPROGRESS) {
                dev_err(dev, "URB already unlinked!, status %d\n", urb->status);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
                return urb->status;
        }
 
@@ -484,7 +492,7 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
            vdev->ud.status == VDEV_ST_ERROR) {
                dev_err(dev, "enqueue for inactive port %d\n", vdev->rhport);
                spin_unlock(&vdev->ud.lock);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
                return -ENODEV;
        }
        spin_unlock(&vdev->ud.lock);
@@ -557,14 +565,14 @@ static int vhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
 
 out:
        vhci_tx_urb(urb);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        return 0;
 
 no_need_xmit:
        usb_hcd_unlink_urb_from_ep(hcd, urb);
 no_need_unlink:
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
        if (!ret)
                usb_hcd_giveback_urb(vhci_to_hcd(the_controller),
                                     urb, urb->status);
@@ -621,16 +629,17 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 {
        struct vhci_priv *priv;
        struct vhci_device *vdev;
+       unsigned long flags;
 
        pr_info("dequeue a urb %p\n", urb);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
 
        priv = urb->hcpriv;
        if (!priv) {
                /* URB was never linked! or will be soon given back by
                 * vhci_rx. */
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
                return -EIDRM;
        }
 
@@ -639,7 +648,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 
                ret = usb_hcd_check_unlink_urb(hcd, urb, status);
                if (ret) {
-                       spin_unlock(&the_controller->lock);
+                       spin_unlock_irqrestore(&the_controller->lock, flags);
                        return ret;
                }
        }
@@ -667,10 +676,10 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 
                usb_hcd_unlink_urb_from_ep(hcd, urb);
 
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
                usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
                                     urb->status);
-               spin_lock(&the_controller->lock);
+               spin_lock_irqsave(&the_controller->lock, flags);
 
        } else {
                /* tcp connection is alive */
@@ -682,7 +691,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                unlink = kzalloc(sizeof(struct vhci_unlink), GFP_ATOMIC);
                if (!unlink) {
                        spin_unlock(&vdev->priv_lock);
-                       spin_unlock(&the_controller->lock);
+                       spin_unlock_irqrestore(&the_controller->lock, flags);
                        usbip_event_add(&vdev->ud, VDEV_EVENT_ERROR_MALLOC);
                        return -ENOMEM;
                }
@@ -703,7 +712,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
                spin_unlock(&vdev->priv_lock);
        }
 
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usbip_dbg_vhci_hc("leave\n");
        return 0;
@@ -712,8 +721,9 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
 static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
 {
        struct vhci_unlink *unlink, *tmp;
+       unsigned long flags;
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
        spin_lock(&vdev->priv_lock);
 
        list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
@@ -747,19 +757,19 @@ static void vhci_device_unlink_cleanup(struct vhci_device *vdev)
                list_del(&unlink->list);
 
                spin_unlock(&vdev->priv_lock);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
 
                usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
                                     urb->status);
 
-               spin_lock(&the_controller->lock);
+               spin_lock_irqsave(&the_controller->lock, flags);
                spin_lock(&vdev->priv_lock);
 
                kfree(unlink);
        }
 
        spin_unlock(&vdev->priv_lock);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 }
 
 /*
@@ -826,8 +836,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
 static void vhci_device_reset(struct usbip_device *ud)
 {
        struct vhci_device *vdev = container_of(ud, struct vhci_device, ud);
+       unsigned long flags;
 
-       spin_lock(&ud->lock);
+       spin_lock_irqsave(&ud->lock, flags);
 
        vdev->speed  = 0;
        vdev->devid  = 0;
@@ -841,14 +852,16 @@ static void vhci_device_reset(struct usbip_device *ud)
        }
        ud->status = VDEV_ST_NULL;
 
-       spin_unlock(&ud->lock);
+       spin_unlock_irqrestore(&ud->lock, flags);
 }
 
 static void vhci_device_unusable(struct usbip_device *ud)
 {
-       spin_lock(&ud->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&ud->lock, flags);
        ud->status = VDEV_ST_ERROR;
-       spin_unlock(&ud->lock);
+       spin_unlock_irqrestore(&ud->lock, flags);
 }
 
 static void vhci_device_init(struct vhci_device *vdev)
@@ -938,12 +951,13 @@ static int vhci_get_frame_number(struct usb_hcd *hcd)
 static int vhci_bus_suspend(struct usb_hcd *hcd)
 {
        struct vhci_hcd *vhci = hcd_to_vhci(hcd);
+       unsigned long flags;
 
        dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
 
-       spin_lock(&vhci->lock);
+       spin_lock_irqsave(&vhci->lock, flags);
        hcd->state = HC_STATE_SUSPENDED;
-       spin_unlock(&vhci->lock);
+       spin_unlock_irqrestore(&vhci->lock, flags);
 
        return 0;
 }
@@ -952,15 +966,16 @@ static int vhci_bus_resume(struct usb_hcd *hcd)
 {
        struct vhci_hcd *vhci = hcd_to_vhci(hcd);
        int rc = 0;
+       unsigned long flags;
 
        dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
 
-       spin_lock(&vhci->lock);
+       spin_lock_irqsave(&vhci->lock, flags);
        if (!HCD_HW_ACCESSIBLE(hcd))
                rc = -ESHUTDOWN;
        else
                hcd->state = HC_STATE_RUNNING;
-       spin_unlock(&vhci->lock);
+       spin_unlock_irqrestore(&vhci->lock, flags);
 
        return rc;
 }
@@ -1058,17 +1073,18 @@ static int vhci_hcd_suspend(struct platform_device *pdev, pm_message_t state)
        int rhport = 0;
        int connected = 0;
        int ret = 0;
+       unsigned long flags;
 
        hcd = platform_get_drvdata(pdev);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
 
        for (rhport = 0; rhport < VHCI_NPORTS; rhport++)
                if (the_controller->port_status[rhport] &
                    USB_PORT_STAT_CONNECTION)
                        connected += 1;
 
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        if (connected > 0) {
                dev_info(&pdev->dev,
index 00e4a54308e430f283f053865853089566c823d7..d656e0edc3d50104617785da0d559943532ac1a8 100644 (file)
@@ -72,10 +72,11 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 {
        struct usbip_device *ud = &vdev->ud;
        struct urb *urb;
+       unsigned long flags;
 
-       spin_lock(&vdev->priv_lock);
+       spin_lock_irqsave(&vdev->priv_lock, flags);
        urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
-       spin_unlock(&vdev->priv_lock);
+       spin_unlock_irqrestore(&vdev->priv_lock, flags);
 
        if (!urb) {
                pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
@@ -104,9 +105,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
 
        usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
        usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
 
@@ -117,8 +118,9 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
                                                  struct usbip_header *pdu)
 {
        struct vhci_unlink *unlink, *tmp;
+       unsigned long flags;
 
-       spin_lock(&vdev->priv_lock);
+       spin_lock_irqsave(&vdev->priv_lock, flags);
 
        list_for_each_entry_safe(unlink, tmp, &vdev->unlink_rx, list) {
                pr_info("unlink->seqnum %lu\n", unlink->seqnum);
@@ -127,12 +129,12 @@ static struct vhci_unlink *dequeue_pending_unlink(struct vhci_device *vdev,
                                          unlink->seqnum);
                        list_del(&unlink->list);
 
-                       spin_unlock(&vdev->priv_lock);
+                       spin_unlock_irqrestore(&vdev->priv_lock, flags);
                        return unlink;
                }
        }
 
-       spin_unlock(&vdev->priv_lock);
+       spin_unlock_irqrestore(&vdev->priv_lock, flags);
 
        return NULL;
 }
@@ -142,6 +144,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 {
        struct vhci_unlink *unlink;
        struct urb *urb;
+       unsigned long flags;
 
        usbip_dump_header(pdu);
 
@@ -152,9 +155,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
                return;
        }
 
-       spin_lock(&vdev->priv_lock);
+       spin_lock_irqsave(&vdev->priv_lock, flags);
        urb = pickup_urb_and_free_priv(vdev, unlink->unlink_seqnum);
-       spin_unlock(&vdev->priv_lock);
+       spin_unlock_irqrestore(&vdev->priv_lock, flags);
 
        if (!urb) {
                /*
@@ -171,9 +174,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
                urb->status = pdu->u.ret_unlink.status;
                pr_info("urb->status %d\n", urb->status);
 
-               spin_lock(&the_controller->lock);
+               spin_lock_irqsave(&the_controller->lock, flags);
                usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
 
                usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
                                     urb->status);
@@ -185,10 +188,11 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
 static int vhci_priv_tx_empty(struct vhci_device *vdev)
 {
        int empty = 0;
+       unsigned long flags;
 
-       spin_lock(&vdev->priv_lock);
+       spin_lock_irqsave(&vdev->priv_lock, flags);
        empty = list_empty(&vdev->priv_rx);
-       spin_unlock(&vdev->priv_lock);
+       spin_unlock_irqrestore(&vdev->priv_lock, flags);
 
        return empty;
 }
index 211f43f67ea25f28ca46146bbadc099b2532924b..5b5462eb16657b03d30b293caac2e6facfbb2226 100644 (file)
@@ -32,10 +32,11 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
 {
        char *s = out;
        int i = 0;
+       unsigned long flags;
 
        BUG_ON(!the_controller || !out);
 
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
 
        /*
         * output example:
@@ -70,7 +71,7 @@ static ssize_t status_show(struct device *dev, struct device_attribute *attr,
                spin_unlock(&vdev->ud.lock);
        }
 
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        return out - s;
 }
@@ -80,11 +81,12 @@ static DEVICE_ATTR_RO(status);
 static int vhci_port_disconnect(__u32 rhport)
 {
        struct vhci_device *vdev;
+       unsigned long flags;
 
        usbip_dbg_vhci_sysfs("enter\n");
 
        /* lock */
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
 
        vdev = port_to_vdev(rhport);
 
@@ -94,14 +96,14 @@ static int vhci_port_disconnect(__u32 rhport)
 
                /* unlock */
                spin_unlock(&vdev->ud.lock);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
 
                return -EINVAL;
        }
 
        /* unlock */
        spin_unlock(&vdev->ud.lock);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
 
        usbip_event_add(&vdev->ud, VDEV_EVENT_DOWN);
 
@@ -177,6 +179,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
        int sockfd = 0;
        __u32 rhport = 0, devid = 0, speed = 0;
        int err;
+       unsigned long flags;
 
        /*
         * @rhport: port number of vhci_hcd
@@ -202,14 +205,14 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
        /* now need lock until setting vdev status as used */
 
        /* begin a lock */
-       spin_lock(&the_controller->lock);
+       spin_lock_irqsave(&the_controller->lock, flags);
        vdev = port_to_vdev(rhport);
        spin_lock(&vdev->ud.lock);
 
        if (vdev->ud.status != VDEV_ST_NULL) {
                /* end of the lock */
                spin_unlock(&vdev->ud.lock);
-               spin_unlock(&the_controller->lock);
+               spin_unlock_irqrestore(&the_controller->lock, flags);
 
                sockfd_put(socket);
 
@@ -227,7 +230,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
        vdev->ud.status     = VDEV_ST_NOTASSIGNED;
 
        spin_unlock(&vdev->ud.lock);
-       spin_unlock(&the_controller->lock);
+       spin_unlock_irqrestore(&the_controller->lock, flags);
        /* end the lock */
 
        vdev->ud.tcp_rx = kthread_get_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
index 409fd99f3257f94bb7489b0d7aa7d971bf5e303b..3e7878fe2fd463f5051b228b364a8ca4be6f4761 100644 (file)
@@ -47,16 +47,17 @@ static void setup_cmd_submit_pdu(struct usbip_header *pdup,  struct urb *urb)
 static struct vhci_priv *dequeue_from_priv_tx(struct vhci_device *vdev)
 {
        struct vhci_priv *priv, *tmp;
+       unsigned long flags;
 
-       spin_lock(&vdev->priv_lock);
+       spin_lock_irqsave(&vdev->priv_lock, flags);
 
        list_for_each_entry_safe(priv, tmp, &vdev->priv_tx, list) {
                list_move_tail(&priv->list, &vdev->priv_rx);
-               spin_unlock(&vdev->priv_lock);
+               spin_unlock_irqrestore(&vdev->priv_lock, flags);
                return priv;
        }
 
-       spin_unlock(&vdev->priv_lock);
+       spin_unlock_irqrestore(&vdev->priv_lock, flags);
 
        return NULL;
 }
@@ -136,16 +137,17 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
 static struct vhci_unlink *dequeue_from_unlink_tx(struct vhci_device *vdev)
 {
        struct vhci_unlink *unlink, *tmp;
+       unsigned long flags;
 
-       spin_lock(&vdev->priv_lock);
+       spin_lock_irqsave(&vdev->priv_lock, flags);
 
        list_for_each_entry_safe(unlink, tmp, &vdev->unlink_tx, list) {
                list_move_tail(&unlink->list, &vdev->unlink_rx);
-               spin_unlock(&vdev->priv_lock);
+               spin_unlock_irqrestore(&vdev->priv_lock, flags);
                return unlink;
        }
 
-       spin_unlock(&vdev->priv_lock);
+       spin_unlock_irqrestore(&vdev->priv_lock, flags);
 
        return NULL;
 }
index 50ce80d604f3af1ab6992bc9e588fc97aa051858..8ed8e34c3492837debffc0552af9e603cc3952e3 100644 (file)
@@ -45,6 +45,7 @@
  *             funneled through AES are...16 bytes in size!
  */
 
+#include <crypto/skcipher.h>
 #include <linux/crypto.h>
 #include <linux/module.h>
 #include <linux/err.h>
@@ -195,21 +196,22 @@ static void bytewise_xor(void *_bo, const void *_bi1, const void *_bi2,
  * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
  *       what sg[4] is for. Maybe there is a smarter way to do this.
  */
-static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
+static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
                        struct crypto_cipher *tfm_aes, void *mic,
                        const struct aes_ccm_nonce *n,
                        const struct aes_ccm_label *a, const void *b,
                        size_t blen)
 {
        int result = 0;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm_cbc);
        struct aes_ccm_b0 b0;
        struct aes_ccm_b1 b1;
        struct aes_ccm_a ax;
        struct scatterlist sg[4], sg_dst;
-       void *iv, *dst_buf;
-       size_t ivsize, dst_size;
+       void *dst_buf;
+       size_t dst_size;
        const u8 bzero[16] = { 0 };
+       u8 iv[crypto_skcipher_ivsize(tfm_cbc)];
        size_t zero_padding;
 
        /*
@@ -232,9 +234,7 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
                goto error_dst_buf;
        }
 
-       iv = crypto_blkcipher_crt(tfm_cbc)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm_cbc);
-       memset(iv, 0, ivsize);
+       memset(iv, 0, sizeof(iv));
 
        /* Setup B0 */
        b0.flags = 0x59;        /* Format B0 */
@@ -259,9 +259,11 @@ static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
        sg_set_buf(&sg[3], bzero, zero_padding);
        sg_init_one(&sg_dst, dst_buf, dst_size);
 
-       desc.tfm = tfm_cbc;
-       desc.flags = 0;
-       result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
+       skcipher_request_set_tfm(req, tfm_cbc);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, &sg_dst, dst_size, iv);
+       result = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
        if (result < 0) {
                printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
                       result);
@@ -301,18 +303,18 @@ ssize_t wusb_prf(void *out, size_t out_size,
 {
        ssize_t result, bytes = 0, bitr;
        struct aes_ccm_nonce n = *_n;
-       struct crypto_blkcipher *tfm_cbc;
+       struct crypto_skcipher *tfm_cbc;
        struct crypto_cipher *tfm_aes;
        u64 sfn = 0;
        __le64 sfn_le;
 
-       tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_cbc = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_cbc)) {
                result = PTR_ERR(tfm_cbc);
                printk(KERN_ERR "E: can't load CBC(AES): %d\n", (int)result);
                goto error_alloc_cbc;
        }
-       result = crypto_blkcipher_setkey(tfm_cbc, key, 16);
+       result = crypto_skcipher_setkey(tfm_cbc, key, 16);
        if (result < 0) {
                printk(KERN_ERR "E: can't set CBC key: %d\n", (int)result);
                goto error_setkey_cbc;
@@ -345,7 +347,7 @@ error_setkey_aes:
        crypto_free_cipher(tfm_aes);
 error_alloc_aes:
 error_setkey_cbc:
-       crypto_free_blkcipher(tfm_cbc);
+       crypto_free_skcipher(tfm_cbc);
 error_alloc_cbc:
        return result;
 }
index 41838db7f85c6bb508ff7c570d73fb529a50282a..8c5bd000739ba8c518afecac6232551063c87c83 100644 (file)
@@ -336,7 +336,7 @@ static inline
 struct usb_hcd *usb_hcd_get_by_usb_dev(struct usb_device *usb_dev)
 {
        struct usb_hcd *usb_hcd;
-       usb_hcd = container_of(usb_dev->bus, struct usb_hcd, self);
+       usb_hcd = bus_to_hcd(usb_dev->bus);
        return usb_get_hcd(usb_hcd);
 }
 
index 82f25cc1c460dee73c37b2b19895fb7d89bfb0d3..ecca316386f57fa64d04746d4b8326fa4c241ebd 100644 (file)
@@ -123,8 +123,8 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
        /*
         * With noiommu enabled, an IOMMU group will be created for a device
         * that doesn't already have one and doesn't have an iommu_ops on their
-        * bus.  We use iommu_present() again in the main code to detect these
-        * fake groups.
+        * bus.  We set iommudata simply to be able to identify these groups
+        * as special use and for reclamation later.
         */
        if (group || !noiommu || iommu_present(dev->bus))
                return group;
@@ -134,6 +134,7 @@ struct iommu_group *vfio_iommu_group_get(struct device *dev)
                return NULL;
 
        iommu_group_set_name(group, "vfio-noiommu");
+       iommu_group_set_iommudata(group, &noiommu, NULL);
        ret = iommu_group_add_device(group, dev);
        iommu_group_put(group);
        if (ret)
@@ -158,7 +159,7 @@ EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
 void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
 {
 #ifdef CONFIG_VFIO_NOIOMMU
-       if (!iommu_present(dev->bus))
+       if (iommu_group_get_iommudata(group) == &noiommu)
                iommu_group_remove_device(dev);
 #endif
 
@@ -190,16 +191,10 @@ static long vfio_noiommu_ioctl(void *iommu_data,
        return -ENOTTY;
 }
 
-static int vfio_iommu_present(struct device *dev, void *unused)
-{
-       return iommu_present(dev->bus) ? 1 : 0;
-}
-
 static int vfio_noiommu_attach_group(void *iommu_data,
                                     struct iommu_group *iommu_group)
 {
-       return iommu_group_for_each_dev(iommu_group, NULL,
-                                       vfio_iommu_present) ? -EINVAL : 0;
+       return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
 }
 
 static void vfio_noiommu_detach_group(void *iommu_data,
@@ -323,8 +318,7 @@ static void vfio_group_unlock_and_free(struct vfio_group *group)
 /**
  * Group objects - create, release, get, put, search
  */
-static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
-                                           bool iommu_present)
+static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
 {
        struct vfio_group *group, *tmp;
        struct device *dev;
@@ -342,7 +336,9 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
        atomic_set(&group->container_users, 0);
        atomic_set(&group->opened, 0);
        group->iommu_group = iommu_group;
-       group->noiommu = !iommu_present;
+#ifdef CONFIG_VFIO_NOIOMMU
+       group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
+#endif
 
        group->nb.notifier_call = vfio_iommu_group_notifier;
 
@@ -767,7 +763,7 @@ int vfio_add_group_dev(struct device *dev,
 
        group = vfio_group_get_from_iommu(iommu_group);
        if (!group) {
-               group = vfio_create_group(iommu_group, iommu_present(dev->bus));
+               group = vfio_create_group(iommu_group);
                if (IS_ERR(group)) {
                        iommu_group_put(iommu_group);
                        return PTR_ERR(group);
index c42ce2fdfd441de6a88f7e3ecbc46fa2514028d7..0a4626886b00c1402bc2bbc234fe691bb549ff8a 100644 (file)
@@ -68,7 +68,6 @@
 #include <asm/machdep.h>
 #include <asm/pmac_feature.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #include "../macmodes.h"
 #endif
 
index ce0b1d05a388cefc309e177de53b6b936226ae74..218339a4edaac46e5b223d07f12f335b8966c66f 100644 (file)
@@ -76,7 +76,6 @@
 
 #ifdef CONFIG_PPC
 
-#include <asm/pci-bridge.h>
 #include "../macmodes.h"
 
 #ifdef CONFIG_BOOTX_TEXT
index 9b167f7ef6c698854a8fe4d05fdf4c3d2e9fffc7..4363c64d74e8c1481842735aa2fc0d1831344b05 100644 (file)
@@ -33,7 +33,6 @@
 #if defined(CONFIG_PPC)
 #include <linux/nvram.h>
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #include "macmodes.h"
 #endif
 
index 09b02cd1eb0e270f7dc654116167101202b00a27..7a90ea2c4613c772d4b14584a483fd2bbaed92bc 100644 (file)
@@ -47,7 +47,6 @@
 
 #if defined(CONFIG_PPC_PMAC)
 #include <asm/prom.h>
-#include <asm/pci-bridge.h>
 #include "../macmodes.h"
 #endif
 
index 43a0a52fc52703c18244dc2b25e54d6dd22c1f61..fb60a8f0cc94c8103d8dc141b25c5eecec5db39c 100644 (file)
 #include <linux/pci.h>
 #include <asm/io.h>
 
-#ifdef CONFIG_PPC64
-#include <asm/pci-bridge.h>
-#endif
-
 #ifdef CONFIG_PPC32
 #include <asm/bootx.h>
 #endif
index 36205c27c4d0f19ab61a4aa4175de3dd8785de66..f6bed86c17f96f16cfa75f35a15b790287e7ab1f 100644 (file)
@@ -545,6 +545,7 @@ err_enable_device:
 static void virtio_pci_remove(struct pci_dev *pci_dev)
 {
        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+       struct device *dev = get_device(&vp_dev->vdev.dev);
 
        unregister_virtio_device(&vp_dev->vdev);
 
@@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
                virtio_pci_modern_remove(vp_dev);
 
        pci_disable_device(pci_dev);
+       put_device(dev);
 }
 
 static struct pci_driver virtio_pci_driver = {
index 4f0e7be0da346c90d8559d9653bd41927f722b22..80825a7e8e48e1ebd06af14a1bcf208acb733daf 100644 (file)
@@ -145,7 +145,8 @@ config MENF21BMC_WATCHDOG
 config TANGOX_WATCHDOG
        tristate "Sigma Designs SMP86xx/SMP87xx watchdog"
        select WATCHDOG_CORE
-       depends on ARCH_TANGOX || COMPILE_TEST
+       depends on ARCH_TANGO || COMPILE_TEST
+       depends on HAS_IOMEM
        help
          Support for the watchdog in Sigma Designs SMP86xx (tango3)
          and SMP87xx (tango4) family chips.
@@ -618,6 +619,7 @@ config DIGICOLOR_WATCHDOG
 config LPC18XX_WATCHDOG
        tristate "LPC18xx/43xx Watchdog"
        depends on ARCH_LPC18XX || COMPILE_TEST
+       depends on HAS_IOMEM
        select WATCHDOG_CORE
        help
          Say Y here if to include support for the watchdog timer
@@ -1374,6 +1376,7 @@ config BCM_KONA_WDT_DEBUG
 config BCM7038_WDT
        tristate "BCM7038 Watchdog"
        select WATCHDOG_CORE
+       depends on HAS_IOMEM
        help
         Watchdog driver for the built-in hardware in Broadcom 7038 SoCs.
 
@@ -1383,6 +1386,7 @@ config IMGPDC_WDT
        tristate "Imagination Technologies PDC Watchdog Timer"
        depends on HAS_IOMEM
        depends on METAG || MIPS || COMPILE_TEST
+       select WATCHDOG_CORE
        help
          Driver for Imagination Technologies PowerDown Controller
          Watchdog Timer.
@@ -1565,6 +1569,17 @@ config WATCHDOG_RIO
          machines.  The watchdog timeout period is normally one minute but
          can be changed with a boot-time parameter.
 
+config WATCHDOG_SUN4V
+       tristate "Sun4v Watchdog support"
+       select WATCHDOG_CORE
+       depends on SPARC64
+       help
+         Say Y here to support the hypervisor watchdog capability embedded
+         in the SPARC sun4v architecture.
+
+         To compile this driver as a module, choose M here. The module will
+         be called sun4v_wdt.
+
 # XTENSA Architecture
 
 # Xen Architecture
index f566753256abbbb74535337d3f9a9cc0442fd78e..f6a6a387c6c71f7a5e9e2cda4cc91ae3f39edf2c 100644 (file)
@@ -179,6 +179,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
 
 obj-$(CONFIG_WATCHDOG_RIO)             += riowd.o
 obj-$(CONFIG_WATCHDOG_CP1XXX)          += cpwd.o
+obj-$(CONFIG_WATCHDOG_SUN4V)           += sun4v_wdt.o
 
 # XTENSA Architecture
 
index f36ca4be07207a9fd200d92577e9242a10e774df..ac5840d9689aed0f79fafa82e6a56e4515bb1b27 100644 (file)
@@ -292,4 +292,4 @@ MODULE_PARM_DESC(nodelay,
                 "Force selection of a timeout setting without initial delay "
                 "(max6373/74 only, default=0)");
 
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
index 1a11aedc4fe850738bfe17d469232da64959b84d..68952d9ccf8394412654d76fc2c14edef3f74124 100644 (file)
@@ -608,7 +608,7 @@ static int usb_pcwd_probe(struct usb_interface *interface,
        struct usb_host_interface *iface_desc;
        struct usb_endpoint_descriptor *endpoint;
        struct usb_pcwd_private *usb_pcwd = NULL;
-       int pipe, maxp;
+       int pipe;
        int retval = -ENOMEM;
        int got_fw_rev;
        unsigned char fw_rev_major, fw_rev_minor;
@@ -641,7 +641,6 @@ static int usb_pcwd_probe(struct usb_interface *interface,
 
        /* get a handle to the interrupt data pipe */
        pipe = usb_rcvintpipe(udev, endpoint->bEndpointAddress);
-       maxp = usb_maxpacket(udev, pipe, usb_pipeout(pipe));
 
        /* allocate memory for our device and initialize it */
        usb_pcwd = kzalloc(sizeof(struct usb_pcwd_private), GFP_KERNEL);
index 01d816251302c2491c24a70e8f7c542b61c2f15a..e7a715e820217eb82a90cc8e5eed54d6d826e22c 100644 (file)
@@ -139,12 +139,11 @@ static int wdt_config(struct watchdog_device *wdd, bool ping)
 
        writel_relaxed(UNLOCK, wdt->base + WDTLOCK);
        writel_relaxed(wdt->load_val, wdt->base + WDTLOAD);
+       writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
 
-       if (!ping) {
-               writel_relaxed(INT_MASK, wdt->base + WDTINTCLR);
+       if (!ping)
                writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base +
                                WDTCONTROL);
-       }
 
        writel_relaxed(LOCK, wdt->base + WDTLOCK);
 
diff --git a/drivers/watchdog/sun4v_wdt.c b/drivers/watchdog/sun4v_wdt.c
new file mode 100644 (file)
index 0000000..1467fe5
--- /dev/null
@@ -0,0 +1,191 @@
+/*
+ *     sun4v watchdog timer
+ *     (c) Copyright 2016 Oracle Corporation
+ *
+ *     Implement a simple watchdog driver using the built-in sun4v hypervisor
+ *     watchdog support. If time expires, the hypervisor stops or bounces
+ *     the guest domain.
+ *
+ *     This program is free software; you can redistribute it and/or
+ *     modify it under the terms of the GNU General Public License
+ *     as published by the Free Software Foundation; either version
+ *     2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/watchdog.h>
+#include <asm/hypervisor.h>
+#include <asm/mdesc.h>
+
+#define WDT_TIMEOUT                    60
+#define WDT_MAX_TIMEOUT                        31536000
+#define WDT_MIN_TIMEOUT                        1
+#define WDT_DEFAULT_RESOLUTION_MS      1000    /* 1 second */
+
+static unsigned int timeout;
+module_param(timeout, uint, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
+       __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, S_IRUGO);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+       __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static int sun4v_wdt_stop(struct watchdog_device *wdd)
+{
+       sun4v_mach_set_watchdog(0, NULL);
+
+       return 0;
+}
+
+static int sun4v_wdt_ping(struct watchdog_device *wdd)
+{
+       int hverr;
+
+       /*
+        * HV watchdog timer will round up the timeout
+        * passed in to the nearest multiple of the
+        * watchdog resolution in milliseconds.
+        */
+       hverr = sun4v_mach_set_watchdog(wdd->timeout * 1000, NULL);
+       if (hverr == HV_EINVAL)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int sun4v_wdt_set_timeout(struct watchdog_device *wdd,
+                                unsigned int timeout)
+{
+       wdd->timeout = timeout;
+
+       return 0;
+}
+
+static const struct watchdog_info sun4v_wdt_ident = {
+       .options =      WDIOF_SETTIMEOUT |
+                       WDIOF_MAGICCLOSE |
+                       WDIOF_KEEPALIVEPING,
+       .identity =     "sun4v hypervisor watchdog",
+       .firmware_version = 0,
+};
+
+static struct watchdog_ops sun4v_wdt_ops = {
+       .owner =        THIS_MODULE,
+       .start =        sun4v_wdt_ping,
+       .stop =         sun4v_wdt_stop,
+       .ping =         sun4v_wdt_ping,
+       .set_timeout =  sun4v_wdt_set_timeout,
+};
+
+static struct watchdog_device wdd = {
+       .info = &sun4v_wdt_ident,
+       .ops = &sun4v_wdt_ops,
+       .min_timeout = WDT_MIN_TIMEOUT,
+       .max_timeout = WDT_MAX_TIMEOUT,
+       .timeout = WDT_TIMEOUT,
+};
+
+static int __init sun4v_wdt_init(void)
+{
+       struct mdesc_handle *handle;
+       u64 node;
+       const u64 *value;
+       int err = 0;
+       unsigned long major = 1, minor = 1;
+
+       /*
+        * There are 2 properties that can be set from the control
+        * domain for the watchdog.
+        * watchdog-resolution
+        * watchdog-max-timeout
+        *
+        * We can expect a handle to be returned otherwise something
+        * serious is wrong. Correct to return -ENODEV here.
+        */
+
+       handle = mdesc_grab();
+       if (!handle)
+               return -ENODEV;
+
+       node = mdesc_node_by_name(handle, MDESC_NODE_NULL, "platform");
+       err = -ENODEV;
+       if (node == MDESC_NODE_NULL)
+               goto out_release;
+
+       /*
+        * This is a safe way to validate if we are on the right
+        * platform.
+        */
+       if (sun4v_hvapi_register(HV_GRP_CORE, major, &minor))
+               goto out_hv_unreg;
+
+       /* Allow value of watchdog-resolution up to 1s (default) */
+       value = mdesc_get_property(handle, node, "watchdog-resolution", NULL);
+       err = -EINVAL;
+       if (value) {
+               if (*value == 0 ||
+                   *value > WDT_DEFAULT_RESOLUTION_MS)
+                       goto out_hv_unreg;
+       }
+
+       value = mdesc_get_property(handle, node, "watchdog-max-timeout", NULL);
+       if (value) {
+               /*
+                * If the property value (in ms) is smaller than
+                * min_timeout, return -EINVAL.
+                */
+               if (*value < wdd.min_timeout * 1000)
+                       goto out_hv_unreg;
+
+               /*
+                * If the property value is smaller than
+                * default max_timeout  then set watchdog max_timeout to
+                * the value of the property in seconds.
+                */
+               if (*value < wdd.max_timeout * 1000)
+                       wdd.max_timeout = *value  / 1000;
+       }
+
+       watchdog_init_timeout(&wdd, timeout, NULL);
+
+       watchdog_set_nowayout(&wdd, nowayout);
+
+       err = watchdog_register_device(&wdd);
+       if (err)
+               goto out_hv_unreg;
+
+       pr_info("initialized (timeout=%ds, nowayout=%d)\n",
+                wdd.timeout, nowayout);
+
+       mdesc_release(handle);
+
+       return 0;
+
+out_hv_unreg:
+       sun4v_hvapi_unregister(HV_GRP_CORE);
+
+out_release:
+       mdesc_release(handle);
+       return err;
+}
+
+static void __exit sun4v_wdt_exit(void)
+{
+       sun4v_hvapi_unregister(HV_GRP_CORE);
+       watchdog_unregister_device(&wdd);
+}
+
+module_init(sun4v_wdt_init);
+module_exit(sun4v_wdt_exit);
+
+MODULE_AUTHOR("Wim Coekaerts <wim.coekaerts@oracle.com>");
+MODULE_DESCRIPTION("sun4v watchdog driver");
+MODULE_LICENSE("GPL");
index 12eab503efd1bb8793fff55eca83649e1f564426..dc4305b407bf6756791ec9b31e7929c1fe5c3ca8 100644 (file)
@@ -257,7 +257,7 @@ static struct resource *additional_memory_resource(phys_addr_t size)
                return NULL;
 
        res->name = "System RAM";
-       res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 
        ret = allocate_resource(&iomem_resource, res,
                                size, 0, -1,
index 945fc43272017cfe05bab171eb8b97e62057244c..4ac2ca8a76561952798f7c49bdd292719d0439a8 100644 (file)
@@ -242,7 +242,7 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
        return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
 }
 
-static struct cleancache_ops tmem_cleancache_ops = {
+static const struct cleancache_ops tmem_cleancache_ops = {
        .put_page = tmem_cleancache_put_page,
        .get_page = tmem_cleancache_get_page,
        .invalidate_page = tmem_cleancache_flush_page,
index 36b210f9b6b29621fa9e410db063d3df53d7fb75..9282dbf5abdba6b01e059a45e7fc9895002dbd2d 100644 (file)
@@ -65,8 +65,7 @@ static ssize_t zorro_read_config(struct file *filp, struct kobject *kobj,
                                 struct bin_attribute *bin_attr,
                                 char *buf, loff_t off, size_t count)
 {
-       struct zorro_dev *z = to_zorro_dev(container_of(kobj, struct device,
-                                          kobj));
+       struct zorro_dev *z = to_zorro_dev(kobj_to_dev(kobj));
        struct ConfigDev cd;
 
        /* Construct a ConfigDev */
index 9adee0d7536e11343c9050b84174be6d2b90bdfc..922893f8ab4a5f6f2f9241e713c6fd4a6de103a0 100644 (file)
@@ -207,6 +207,7 @@ menuconfig MISC_FILESYSTEMS
 
 if MISC_FILESYSTEMS
 
+source "fs/orangefs/Kconfig"
 source "fs/adfs/Kconfig"
 source "fs/affs/Kconfig"
 source "fs/ecryptfs/Kconfig"
index 79f522575cba3e79e6909ca4c1c055d2cb54ce9a..f0f2f00951f505db759590419b1b04a091ee8874 100644 (file)
@@ -105,6 +105,7 @@ obj-$(CONFIG_AUTOFS4_FS)    += autofs4/
 obj-$(CONFIG_ADFS_FS)          += adfs/
 obj-$(CONFIG_FUSE_FS)          += fuse/
 obj-$(CONFIG_OVERLAY_FS)       += overlayfs/
+obj-$(CONFIG_ORANGEFS_FS)       += orangefs/
 obj-$(CONFIG_UDF_FS)           += udf/
 obj-$(CONFIG_SUN_OPENPROMFS)   += openpromfs/
 obj-$(CONFIG_OMFS_FS)          += omfs/
index 3a93755e880fee23fa6d8370916caefb21f2c9eb..051ea4809c14037fd0b1efffccb12c3909b3e46b 100644 (file)
@@ -491,6 +491,7 @@ static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
  * arch_check_elf() - check an ELF executable
  * @ehdr:      The main ELF header
  * @has_interp:        True if the ELF has an interpreter, else false.
+ * @interp_ehdr: The interpreter's ELF header
  * @state:     Architecture-specific state preserved throughout the process
  *             of loading the ELF.
  *
@@ -502,6 +503,7 @@ static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
  *         with that return code.
  */
 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
+                                struct elfhdr *interp_ehdr,
                                 struct arch_elf_state *state)
 {
        /* Dummy implementation, always proceed */
@@ -829,7 +831,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
         * still possible to return an error to the code that invoked
         * the exec syscall.
         */
-       retval = arch_check_elf(&loc->elf_ex, !!interpreter, &arch_state);
+       retval = arch_check_elf(&loc->elf_ex,
+                               !!interpreter, &loc->interp_elf_ex,
+                               &arch_state);
        if (retval)
                goto out_free_dentry;
 
index 7b9cd49622b132f5f71e6557f05d0a2591a37c16..39b3a174a4253974b4635bee951e2283cf71b9cf 100644 (file)
@@ -1730,43 +1730,25 @@ static int blkdev_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        return __dax_fault(vma, vmf, blkdev_get_block, NULL);
 }
 
-static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
-               pmd_t *pmd, unsigned int flags)
-{
-       return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
-}
-
-static void blkdev_vm_open(struct vm_area_struct *vma)
+static int blkdev_dax_pfn_mkwrite(struct vm_area_struct *vma,
+               struct vm_fault *vmf)
 {
-       struct inode *bd_inode = bdev_file_inode(vma->vm_file);
-       struct block_device *bdev = I_BDEV(bd_inode);
-
-       inode_lock(bd_inode);
-       bdev->bd_map_count++;
-       inode_unlock(bd_inode);
+       return dax_pfn_mkwrite(vma, vmf);
 }
 
-static void blkdev_vm_close(struct vm_area_struct *vma)
+static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
+               pmd_t *pmd, unsigned int flags)
 {
-       struct inode *bd_inode = bdev_file_inode(vma->vm_file);
-       struct block_device *bdev = I_BDEV(bd_inode);
-
-       inode_lock(bd_inode);
-       bdev->bd_map_count--;
-       inode_unlock(bd_inode);
+       return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
 }
 
 static const struct vm_operations_struct blkdev_dax_vm_ops = {
-       .open           = blkdev_vm_open,
-       .close          = blkdev_vm_close,
        .fault          = blkdev_dax_fault,
        .pmd_fault      = blkdev_dax_pmd_fault,
-       .pfn_mkwrite    = blkdev_dax_fault,
+       .pfn_mkwrite    = blkdev_dax_pfn_mkwrite,
 };
 
 static const struct vm_operations_struct blkdev_default_vm_ops = {
-       .open           = blkdev_vm_open,
-       .close          = blkdev_vm_close,
        .fault          = filemap_fault,
        .map_pages      = filemap_map_pages,
 };
@@ -1774,18 +1756,14 @@ static const struct vm_operations_struct blkdev_default_vm_ops = {
 static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct inode *bd_inode = bdev_file_inode(file);
-       struct block_device *bdev = I_BDEV(bd_inode);
 
        file_accessed(file);
-       inode_lock(bd_inode);
-       bdev->bd_map_count++;
        if (IS_DAX(bd_inode)) {
                vma->vm_ops = &blkdev_dax_vm_ops;
                vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
        } else {
                vma->vm_ops = &blkdev_default_vm_ops;
        }
-       inode_unlock(bd_inode);
 
        return 0;
 }
index 88d9af3d4581f7c45aa0065fa467d097625be5c1..5fb60ea7eee2b2c28e067d11b54c2fad7127c7d8 100644 (file)
@@ -328,8 +328,8 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
                list_add_tail(&work->ordered_list, &wq->ordered_list);
                spin_unlock_irqrestore(&wq->list_lock, flags);
        }
-       queue_work(wq->normal_wq, &work->normal_work);
        trace_btrfs_work_queued(work);
+       queue_work(wq->normal_wq, &work->normal_work);
 }
 
 void btrfs_queue_work(struct btrfs_workqueue *wq,
index 769e0ff1b4cebe998cc29f567d6fae73be4b8fb0..f5ef7d171bb888f48017080e91244a638f515255 100644 (file)
@@ -311,7 +311,7 @@ struct tree_mod_root {
 
 struct tree_mod_elem {
        struct rb_node node;
-       u64 index;              /* shifted logical */
+       u64 logical;
        u64 seq;
        enum mod_log_op op;
 
@@ -435,11 +435,11 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
 
 /*
  * key order of the log:
- *       index -> sequence
+ *       node/leaf start address -> sequence
  *
- * the index is the shifted logical of the *new* root node for root replace
- * operations, or the shifted logical of the affected block for all other
- * operations.
+ * The 'start address' is the logical address of the *new* root node
+ * for root replace operations, or the logical address of the affected
+ * block for all other operations.
  *
  * Note: must be called with write lock (tree_mod_log_write_lock).
  */
@@ -460,9 +460,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
        while (*new) {
                cur = container_of(*new, struct tree_mod_elem, node);
                parent = *new;
-               if (cur->index < tm->index)
+               if (cur->logical < tm->logical)
                        new = &((*new)->rb_left);
-               else if (cur->index > tm->index)
+               else if (cur->logical > tm->logical)
                        new = &((*new)->rb_right);
                else if (cur->seq < tm->seq)
                        new = &((*new)->rb_left);
@@ -523,7 +523,7 @@ alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
        if (!tm)
                return NULL;
 
-       tm->index = eb->start >> PAGE_CACHE_SHIFT;
+       tm->logical = eb->start;
        if (op != MOD_LOG_KEY_ADD) {
                btrfs_node_key(eb, &tm->key, slot);
                tm->blockptr = btrfs_node_blockptr(eb, slot);
@@ -588,7 +588,7 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
                goto free_tms;
        }
 
-       tm->index = eb->start >> PAGE_CACHE_SHIFT;
+       tm->logical = eb->start;
        tm->slot = src_slot;
        tm->move.dst_slot = dst_slot;
        tm->move.nr_items = nr_items;
@@ -699,7 +699,7 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
                goto free_tms;
        }
 
-       tm->index = new_root->start >> PAGE_CACHE_SHIFT;
+       tm->logical = new_root->start;
        tm->old_root.logical = old_root->start;
        tm->old_root.level = btrfs_header_level(old_root);
        tm->generation = btrfs_header_generation(old_root);
@@ -739,16 +739,15 @@ __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
        struct rb_node *node;
        struct tree_mod_elem *cur = NULL;
        struct tree_mod_elem *found = NULL;
-       u64 index = start >> PAGE_CACHE_SHIFT;
 
        tree_mod_log_read_lock(fs_info);
        tm_root = &fs_info->tree_mod_log;
        node = tm_root->rb_node;
        while (node) {
                cur = container_of(node, struct tree_mod_elem, node);
-               if (cur->index < index) {
+               if (cur->logical < start) {
                        node = node->rb_left;
-               } else if (cur->index > index) {
+               } else if (cur->logical > start) {
                        node = node->rb_right;
                } else if (cur->seq < min_seq) {
                        node = node->rb_left;
@@ -1230,9 +1229,10 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
                return NULL;
 
        /*
-        * the very last operation that's logged for a root is the replacement
-        * operation (if it is replaced at all). this has the index of the *new*
-        * root, making it the very first operation that's logged for this root.
+        * the very last operation that's logged for a root is the
+        * replacement operation (if it is replaced at all). this has
+        * the logical address of the *new* root, making it the very
+        * first operation that's logged for this root.
         */
        while (1) {
                tm = tree_mod_log_search_oldest(fs_info, root_logical,
@@ -1336,7 +1336,7 @@ __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
                if (!next)
                        break;
                tm = container_of(next, struct tree_mod_elem, node);
-               if (tm->index != first_tm->index)
+               if (tm->logical != first_tm->logical)
                        break;
        }
        tree_mod_log_read_unlock(fs_info);
index bfe4a337fb4d13a058446265b7baf4a1437aa602..5f5c4fbd7a3c9880d56c86a5ddd16df03cb35f5a 100644 (file)
@@ -2353,6 +2353,9 @@ struct btrfs_map_token {
        unsigned long offset;
 };
 
+#define BTRFS_BYTES_TO_BLKS(fs_info, bytes) \
+                               ((bytes) >> (fs_info)->sb->s_blocksize_bits)
+
 static inline void btrfs_init_map_token (struct btrfs_map_token *token)
 {
        token->kaddr = NULL;
@@ -4027,7 +4030,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root,
                        struct inode *dir, u64 objectid,
                        const char *name, int name_len);
-int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
+int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
                        int front);
 int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                               struct btrfs_root *root,
index dd08e29f51177d27859c12f0d173224a24a1f4a9..5699bbc23febaa25614c52f2b2546abe145fd69d 100644 (file)
@@ -182,6 +182,7 @@ static struct btrfs_lockdep_keyset {
        { .id = BTRFS_TREE_RELOC_OBJECTID,      .name_stem = "treloc"   },
        { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc"   },
        { .id = BTRFS_UUID_TREE_OBJECTID,       .name_stem = "uuid"     },
+       { .id = BTRFS_FREE_SPACE_TREE_OBJECTID, .name_stem = "free-space" },
        { .id = 0,                              .name_stem = "tree"     },
 };
 
@@ -930,7 +931,7 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
        if (bio_flags & EXTENT_BIO_TREE_LOG)
                return 0;
 #ifdef CONFIG_X86
-       if (static_cpu_has_safe(X86_FEATURE_XMM4_2))
+       if (static_cpu_has(X86_FEATURE_XMM4_2))
                return 0;
 #endif
        return 1;
@@ -1787,7 +1788,6 @@ static int cleaner_kthread(void *arg)
        int again;
        struct btrfs_trans_handle *trans;
 
-       set_freezable();
        do {
                again = 0;
 
index 2e7c97a3f3444aec33a688a4d1f705b0486026cf..1b2073389dc2cf71ef5051071acc98588f393a8d 100644 (file)
@@ -3186,7 +3186,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
 
        while (1) {
                lock_extent(tree, start, end);
-               ordered = btrfs_lookup_ordered_extent(inode, start);
+               ordered = btrfs_lookup_ordered_range(inode, start,
+                                               PAGE_CACHE_SIZE);
                if (!ordered)
                        break;
                unlock_extent(tree, start, end);
index a67e1c828d0f735c3974e9ff0dca098cd759e9d3..1c50a7b09b4e4bd87e88eef65b9b3b32401083e8 100644 (file)
@@ -172,6 +172,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
        u64 item_start_offset = 0;
        u64 item_last_offset = 0;
        u64 disk_bytenr;
+       u64 page_bytes_left;
        u32 diff;
        int nblocks;
        int bio_index = 0;
@@ -220,6 +221,8 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
        disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
        if (dio)
                offset = logical_offset;
+
+       page_bytes_left = bvec->bv_len;
        while (bio_index < bio->bi_vcnt) {
                if (!dio)
                        offset = page_offset(bvec->bv_page) + bvec->bv_offset;
@@ -243,7 +246,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
                                if (BTRFS_I(inode)->root->root_key.objectid ==
                                    BTRFS_DATA_RELOC_TREE_OBJECTID) {
                                        set_extent_bits(io_tree, offset,
-                                               offset + bvec->bv_len - 1,
+                                               offset + root->sectorsize - 1,
                                                EXTENT_NODATASUM, GFP_NOFS);
                                } else {
                                        btrfs_info(BTRFS_I(inode)->root->fs_info,
@@ -281,11 +284,17 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
 found:
                csum += count * csum_size;
                nblocks -= count;
-               bio_index += count;
+
                while (count--) {
-                       disk_bytenr += bvec->bv_len;
-                       offset += bvec->bv_len;
-                       bvec++;
+                       disk_bytenr += root->sectorsize;
+                       offset += root->sectorsize;
+                       page_bytes_left -= root->sectorsize;
+                       if (!page_bytes_left) {
+                               bio_index++;
+                               bvec++;
+                               page_bytes_left = bvec->bv_len;
+                       }
+
                }
        }
        btrfs_free_path(path);
@@ -432,6 +441,8 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
        struct bio_vec *bvec = bio->bi_io_vec;
        int bio_index = 0;
        int index;
+       int nr_sectors;
+       int i;
        unsigned long total_bytes = 0;
        unsigned long this_sum_bytes = 0;
        u64 offset;
@@ -459,41 +470,56 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
                if (!contig)
                        offset = page_offset(bvec->bv_page) + bvec->bv_offset;
 
-               if (offset >= ordered->file_offset + ordered->len ||
-                   offset < ordered->file_offset) {
-                       unsigned long bytes_left;
-                       sums->len = this_sum_bytes;
-                       this_sum_bytes = 0;
-                       btrfs_add_ordered_sum(inode, ordered, sums);
-                       btrfs_put_ordered_extent(ordered);
+               data = kmap_atomic(bvec->bv_page);
 
-                       bytes_left = bio->bi_iter.bi_size - total_bytes;
+               nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+                                               bvec->bv_len + root->sectorsize
+                                               - 1);
+
+               for (i = 0; i < nr_sectors; i++) {
+                       if (offset >= ordered->file_offset + ordered->len ||
+                               offset < ordered->file_offset) {
+                               unsigned long bytes_left;
+
+                               kunmap_atomic(data);
+                               sums->len = this_sum_bytes;
+                               this_sum_bytes = 0;
+                               btrfs_add_ordered_sum(inode, ordered, sums);
+                               btrfs_put_ordered_extent(ordered);
+
+                               bytes_left = bio->bi_iter.bi_size - total_bytes;
+
+                               sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
+                                       GFP_NOFS);
+                               BUG_ON(!sums); /* -ENOMEM */
+                               sums->len = bytes_left;
+                               ordered = btrfs_lookup_ordered_extent(inode,
+                                                               offset);
+                               ASSERT(ordered); /* Logic error */
+                               sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
+                                       + total_bytes;
+                               index = 0;
+
+                               data = kmap_atomic(bvec->bv_page);
+                       }
 
-                       sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
-                                      GFP_NOFS);
-                       BUG_ON(!sums); /* -ENOMEM */
-                       sums->len = bytes_left;
-                       ordered = btrfs_lookup_ordered_extent(inode, offset);
-                       BUG_ON(!ordered); /* Logic error */
-                       sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
-                                      total_bytes;
-                       index = 0;
+                       sums->sums[index] = ~(u32)0;
+                       sums->sums[index]
+                               = btrfs_csum_data(data + bvec->bv_offset
+                                               + (i * root->sectorsize),
+                                               sums->sums[index],
+                                               root->sectorsize);
+                       btrfs_csum_final(sums->sums[index],
+                                       (char *)(sums->sums + index));
+                       index++;
+                       offset += root->sectorsize;
+                       this_sum_bytes += root->sectorsize;
+                       total_bytes += root->sectorsize;
                }
 
-               data = kmap_atomic(bvec->bv_page);
-               sums->sums[index] = ~(u32)0;
-               sums->sums[index] = btrfs_csum_data(data + bvec->bv_offset,
-                                                   sums->sums[index],
-                                                   bvec->bv_len);
                kunmap_atomic(data);
-               btrfs_csum_final(sums->sums[index],
-                                (char *)(sums->sums + index));
 
                bio_index++;
-               index++;
-               total_bytes += bvec->bv_len;
-               this_sum_bytes += bvec->bv_len;
-               offset += bvec->bv_len;
                bvec++;
        }
        this_sum_bytes = 0;
index 098bb8f690c992e1ebd01270f49ff2d37e6658bd..5a58e292bdadc7d586086102f42c540e5f52fb98 100644 (file)
@@ -498,7 +498,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
        loff_t isize = i_size_read(inode);
 
        start_pos = pos & ~((u64)root->sectorsize - 1);
-       num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
+       num_bytes = round_up(write_bytes + pos - start_pos, root->sectorsize);
 
        end_of_last_block = start_pos + num_bytes - 1;
        err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@@ -1379,16 +1379,19 @@ fail:
 static noinline int
 lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages,
                                size_t num_pages, loff_t pos,
+                               size_t write_bytes,
                                u64 *lockstart, u64 *lockend,
                                struct extent_state **cached_state)
 {
+       struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 start_pos;
        u64 last_pos;
        int i;
        int ret = 0;
 
-       start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
-       last_pos = start_pos + ((u64)num_pages << PAGE_CACHE_SHIFT) - 1;
+       start_pos = round_down(pos, root->sectorsize);
+       last_pos = start_pos
+               + round_up(pos + write_bytes - start_pos, root->sectorsize) - 1;
 
        if (start_pos < inode->i_size) {
                struct btrfs_ordered_extent *ordered;
@@ -1503,6 +1506,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
 
        while (iov_iter_count(i) > 0) {
                size_t offset = pos & (PAGE_CACHE_SIZE - 1);
+               size_t sector_offset;
                size_t write_bytes = min(iov_iter_count(i),
                                         nrptrs * (size_t)PAGE_CACHE_SIZE -
                                         offset);
@@ -1511,6 +1515,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                size_t reserve_bytes;
                size_t dirty_pages;
                size_t copied;
+               size_t dirty_sectors;
+               size_t num_sectors;
 
                WARN_ON(num_pages > nrptrs);
 
@@ -1523,7 +1529,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                        break;
                }
 
-               reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
+               sector_offset = pos & (root->sectorsize - 1);
+               reserve_bytes = round_up(write_bytes + sector_offset,
+                               root->sectorsize);
 
                if (BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
                                             BTRFS_INODE_PREALLOC)) {
@@ -1542,7 +1550,9 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                                 */
                                num_pages = DIV_ROUND_UP(write_bytes + offset,
                                                         PAGE_CACHE_SIZE);
-                               reserve_bytes = num_pages << PAGE_CACHE_SHIFT;
+                               reserve_bytes = round_up(write_bytes
+                                                       + sector_offset,
+                                                       root->sectorsize);
                                goto reserve_metadata;
                        }
                }
@@ -1576,8 +1586,8 @@ again:
                        break;
 
                ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages,
-                                                     pos, &lockstart, &lockend,
-                                                     &cached_state);
+                                               pos, write_bytes, &lockstart,
+                                               &lockend, &cached_state);
                if (ret < 0) {
                        if (ret == -EAGAIN)
                                goto again;
@@ -1612,9 +1622,16 @@ again:
                 * we still have an outstanding extent for the chunk we actually
                 * managed to copy.
                 */
-               if (num_pages > dirty_pages) {
-                       release_bytes = (num_pages - dirty_pages) <<
-                               PAGE_CACHE_SHIFT;
+               num_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+                                               reserve_bytes);
+               dirty_sectors = round_up(copied + sector_offset,
+                                       root->sectorsize);
+               dirty_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info,
+                                               dirty_sectors);
+
+               if (num_sectors > dirty_sectors) {
+                       release_bytes = (write_bytes - copied)
+                               & ~((u64)root->sectorsize - 1);
                        if (copied > 0) {
                                spin_lock(&BTRFS_I(inode)->lock);
                                BTRFS_I(inode)->outstanding_extents++;
@@ -1633,7 +1650,8 @@ again:
                        }
                }
 
-               release_bytes = dirty_pages << PAGE_CACHE_SHIFT;
+               release_bytes = round_up(copied + sector_offset,
+                                       root->sectorsize);
 
                if (copied > 0)
                        ret = btrfs_dirty_pages(root, inode, pages,
@@ -1654,8 +1672,7 @@ again:
 
                if (only_release_metadata && copied > 0) {
                        lockstart = round_down(pos, root->sectorsize);
-                       lockend = lockstart +
-                               (dirty_pages << PAGE_CACHE_SHIFT) - 1;
+                       lockend = round_up(pos + copied, root->sectorsize) - 1;
 
                        set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
                                       lockend, EXTENT_NORESERVE, NULL,
@@ -1761,6 +1778,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        ssize_t err;
        loff_t pos;
        size_t count;
+       loff_t oldsize;
+       int clean_page = 0;
 
        inode_lock(inode);
        err = generic_write_checks(iocb, from);
@@ -1799,14 +1818,17 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
        pos = iocb->ki_pos;
        count = iov_iter_count(from);
        start_pos = round_down(pos, root->sectorsize);
-       if (start_pos > i_size_read(inode)) {
+       oldsize = i_size_read(inode);
+       if (start_pos > oldsize) {
                /* Expand hole size to cover write data, preventing empty gap */
                end_pos = round_up(pos + count, root->sectorsize);
-               err = btrfs_cont_expand(inode, i_size_read(inode), end_pos);
+               err = btrfs_cont_expand(inode, oldsize, end_pos);
                if (err) {
                        inode_unlock(inode);
                        goto out;
                }
+               if (start_pos > round_up(oldsize, root->sectorsize))
+                       clean_page = 1;
        }
 
        if (sync)
@@ -1818,6 +1840,9 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
                num_written = __btrfs_buffered_write(file, from, pos);
                if (num_written > 0)
                        iocb->ki_pos = pos + num_written;
+               if (clean_page)
+                       pagecache_isize_extended(inode, oldsize,
+                                               i_size_read(inode));
        }
 
        inode_unlock(inode);
@@ -2293,10 +2318,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
        int ret = 0;
        int err = 0;
        unsigned int rsv_count;
-       bool same_page;
+       bool same_block;
        bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
        u64 ino_size;
-       bool truncated_page = false;
+       bool truncated_block = false;
        bool updated_inode = false;
 
        ret = btrfs_wait_ordered_range(inode, offset, len);
@@ -2304,7 +2329,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                return ret;
 
        inode_lock(inode);
-       ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
+       ino_size = round_up(inode->i_size, root->sectorsize);
        ret = find_first_non_hole(inode, &offset, &len);
        if (ret < 0)
                goto out_only_mutex;
@@ -2317,31 +2342,30 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
        lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize);
        lockend = round_down(offset + len,
                             BTRFS_I(inode)->root->sectorsize) - 1;
-       same_page = ((offset >> PAGE_CACHE_SHIFT) ==
-                   ((offset + len - 1) >> PAGE_CACHE_SHIFT));
-
+       same_block = (BTRFS_BYTES_TO_BLKS(root->fs_info, offset))
+               == (BTRFS_BYTES_TO_BLKS(root->fs_info, offset + len - 1));
        /*
-        * We needn't truncate any page which is beyond the end of the file
+        * We needn't truncate any block which is beyond the end of the file
         * because we are sure there is no data there.
         */
        /*
-        * Only do this if we are in the same page and we aren't doing the
-        * entire page.
+        * Only do this if we are in the same block and we aren't doing the
+        * entire block.
         */
-       if (same_page && len < PAGE_CACHE_SIZE) {
+       if (same_block && len < root->sectorsize) {
                if (offset < ino_size) {
-                       truncated_page = true;
-                       ret = btrfs_truncate_page(inode, offset, len, 0);
+                       truncated_block = true;
+                       ret = btrfs_truncate_block(inode, offset, len, 0);
                } else {
                        ret = 0;
                }
                goto out_only_mutex;
        }
 
-       /* zero back part of the first page */
+       /* zero back part of the first block */
        if (offset < ino_size) {
-               truncated_page = true;
-               ret = btrfs_truncate_page(inode, offset, 0, 0);
+               truncated_block = true;
+               ret = btrfs_truncate_block(inode, offset, 0, 0);
                if (ret) {
                        inode_unlock(inode);
                        return ret;
@@ -2376,9 +2400,10 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
                if (!ret) {
                        /* zero the front end of the last page */
                        if (tail_start + tail_len < ino_size) {
-                               truncated_page = true;
-                               ret = btrfs_truncate_page(inode,
-                                               tail_start + tail_len, 0, 1);
+                               truncated_block = true;
+                               ret = btrfs_truncate_block(inode,
+                                                       tail_start + tail_len,
+                                                       0, 1);
                                if (ret)
                                        goto out_only_mutex;
                        }
@@ -2558,7 +2583,7 @@ out:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
                             &cached_state, GFP_NOFS);
 out_only_mutex:
-       if (!updated_inode && truncated_page && !ret && !err) {
+       if (!updated_inode && truncated_block && !ret && !err) {
                /*
                 * If we only end up zeroing part of a page, we still need to
                 * update the inode item, so that all the time fields are
@@ -2678,10 +2703,10 @@ static long btrfs_fallocate(struct file *file, int mode,
        } else if (offset + len > inode->i_size) {
                /*
                 * If we are fallocating from the end of the file onward we
-                * need to zero out the end of the page if i_size lands in the
-                * middle of a page.
+                * need to zero out the end of the block if i_size lands in the
+                * middle of a block.
                 */
-               ret = btrfs_truncate_page(inode, inode->i_size, 0, 0);
+               ret = btrfs_truncate_block(inode, inode->i_size, 0, 0);
                if (ret)
                        goto out;
        }
index 393e36bd5845d996d216e14ef2c6ac6866fce746..53dbeaf6ce941cc7a6a8bf06c6a81df354e9e364 100644 (file)
@@ -153,6 +153,20 @@ static inline u32 free_space_bitmap_size(u64 size, u32 sectorsize)
 
 static unsigned long *alloc_bitmap(u32 bitmap_size)
 {
+       void *mem;
+
+       /*
+        * The allocation size varies, observed numbers were < 4K up to 16K.
+        * Using vmalloc unconditionally would be too heavy, we'll try
+        * contiguous allocations first.
+        */
+       if  (bitmap_size <= PAGE_SIZE)
+               return kzalloc(bitmap_size, GFP_NOFS);
+
+       mem = kzalloc(bitmap_size, GFP_NOFS | __GFP_NOWARN);
+       if (mem)
+               return mem;
+
        return __vmalloc(bitmap_size, GFP_NOFS | __GFP_HIGHMEM | __GFP_ZERO,
                         PAGE_KERNEL);
 }
@@ -289,7 +303,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
 
        ret = 0;
 out:
-       vfree(bitmap);
+       kvfree(bitmap);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
        return ret;
@@ -438,7 +452,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
 
        ret = 0;
 out:
-       vfree(bitmap);
+       kvfree(bitmap);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
        return ret;
index e28f3d4691afee8bf6dcd68d0617a27e07439e82..3e0d4151151723446ef4d4cee6dfd7959ddd53d7 100644 (file)
@@ -263,7 +263,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
                data_len = compressed_size;
 
        if (start > 0 ||
-           actual_end > PAGE_CACHE_SIZE ||
+           actual_end > root->sectorsize ||
            data_len > BTRFS_MAX_INLINE_DATA_SIZE(root) ||
            (!compressed_size &&
            (actual_end & (root->sectorsize - 1)) == 0) ||
@@ -2002,7 +2002,8 @@ again:
        if (PagePrivate2(page))
                goto out;
 
-       ordered = btrfs_lookup_ordered_extent(inode, page_start);
+       ordered = btrfs_lookup_ordered_range(inode, page_start,
+                                       PAGE_CACHE_SIZE);
        if (ordered) {
                unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start,
                                     page_end, &cached_state, GFP_NOFS);
@@ -4248,7 +4249,8 @@ static int truncate_inline_extent(struct inode *inode,
                 * read the extent item from disk (data not in the page cache).
                 */
                btrfs_release_path(path);
-               return btrfs_truncate_page(inode, offset, page_end - offset, 0);
+               return btrfs_truncate_block(inode, offset, page_end - offset,
+                                       0);
        }
 
        btrfs_set_file_extent_ram_bytes(leaf, fi, size);
@@ -4601,17 +4603,17 @@ error:
 }
 
 /*
- * btrfs_truncate_page - read, zero a chunk and write a page
+ * btrfs_truncate_block - read, zero a chunk and write a block
  * @inode - inode that we're zeroing
  * @from - the offset to start zeroing
  * @len - the length to zero, 0 to zero the entire range respective to the
  *     offset
  * @front - zero up to the offset instead of from the offset on
  *
- * This will find the page for the "from" offset and cow the page and zero the
+ * This will find the block for the "from" offset and cow the block and zero the
  * part we want to zero.  This is used with truncate and hole punching.
  */
-int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
+int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len,
                        int front)
 {
        struct address_space *mapping = inode->i_mapping;
@@ -4622,18 +4624,19 @@ int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
        char *kaddr;
        u32 blocksize = root->sectorsize;
        pgoff_t index = from >> PAGE_CACHE_SHIFT;
-       unsigned offset = from & (PAGE_CACHE_SIZE-1);
+       unsigned offset = from & (blocksize - 1);
        struct page *page;
        gfp_t mask = btrfs_alloc_write_mask(mapping);
        int ret = 0;
-       u64 page_start;
-       u64 page_end;
+       u64 block_start;
+       u64 block_end;
 
        if ((offset & (blocksize - 1)) == 0 &&
            (!len || ((len & (blocksize - 1)) == 0)))
                goto out;
+
        ret = btrfs_delalloc_reserve_space(inode,
-                       round_down(from, PAGE_CACHE_SIZE), PAGE_CACHE_SIZE);
+                       round_down(from, blocksize), blocksize);
        if (ret)
                goto out;
 
@@ -4641,14 +4644,14 @@ again:
        page = find_or_create_page(mapping, index, mask);
        if (!page) {
                btrfs_delalloc_release_space(inode,
-                               round_down(from, PAGE_CACHE_SIZE),
-                               PAGE_CACHE_SIZE);
+                               round_down(from, blocksize),
+                               blocksize);
                ret = -ENOMEM;
                goto out;
        }
 
-       page_start = page_offset(page);
-       page_end = page_start + PAGE_CACHE_SIZE - 1;
+       block_start = round_down(from, blocksize);
+       block_end = block_start + blocksize - 1;
 
        if (!PageUptodate(page)) {
                ret = btrfs_readpage(NULL, page);
@@ -4665,12 +4668,12 @@ again:
        }
        wait_on_page_writeback(page);
 
-       lock_extent_bits(io_tree, page_start, page_end, &cached_state);
+       lock_extent_bits(io_tree, block_start, block_end, &cached_state);
        set_page_extent_mapped(page);
 
-       ordered = btrfs_lookup_ordered_extent(inode, page_start);
+       ordered = btrfs_lookup_ordered_extent(inode, block_start);
        if (ordered) {
-               unlock_extent_cached(io_tree, page_start, page_end,
+               unlock_extent_cached(io_tree, block_start, block_end,
                                     &cached_state, GFP_NOFS);
                unlock_page(page);
                page_cache_release(page);
@@ -4679,39 +4682,41 @@ again:
                goto again;
        }
 
-       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, block_start, block_end,
                          EXTENT_DIRTY | EXTENT_DELALLOC |
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
+       ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
                                        &cached_state);
        if (ret) {
-               unlock_extent_cached(io_tree, page_start, page_end,
+               unlock_extent_cached(io_tree, block_start, block_end,
                                     &cached_state, GFP_NOFS);
                goto out_unlock;
        }
 
-       if (offset != PAGE_CACHE_SIZE) {
+       if (offset != blocksize) {
                if (!len)
-                       len = PAGE_CACHE_SIZE - offset;
+                       len = blocksize - offset;
                kaddr = kmap(page);
                if (front)
-                       memset(kaddr, 0, offset);
+                       memset(kaddr + (block_start - page_offset(page)),
+                               0, offset);
                else
-                       memset(kaddr + offset, 0, len);
+                       memset(kaddr + (block_start - page_offset(page)) +  offset,
+                               0, len);
                flush_dcache_page(page);
                kunmap(page);
        }
        ClearPageChecked(page);
        set_page_dirty(page);
-       unlock_extent_cached(io_tree, page_start, page_end, &cached_state,
+       unlock_extent_cached(io_tree, block_start, block_end, &cached_state,
                             GFP_NOFS);
 
 out_unlock:
        if (ret)
-               btrfs_delalloc_release_space(inode, page_start,
-                                            PAGE_CACHE_SIZE);
+               btrfs_delalloc_release_space(inode, block_start,
+                                            blocksize);
        unlock_page(page);
        page_cache_release(page);
 out:
@@ -4782,11 +4787,11 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
        int err = 0;
 
        /*
-        * If our size started in the middle of a page we need to zero out the
-        * rest of the page before we expand the i_size, otherwise we could
+        * If our size started in the middle of a block we need to zero out the
+        * rest of the block before we expand the i_size, otherwise we could
         * expose stale data.
         */
-       err = btrfs_truncate_page(inode, oldsize, 0, 0);
+       err = btrfs_truncate_block(inode, oldsize, 0, 0);
        if (err)
                return err;
 
@@ -4895,7 +4900,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
        }
 
        if (newsize > oldsize) {
-               truncate_pagecache(inode, newsize);
                /*
                 * Don't do an expanding truncate while snapshoting is ongoing.
                 * This is to ensure the snapshot captures a fully consistent
@@ -4918,6 +4922,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
 
                i_size_write(inode, newsize);
                btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+               pagecache_isize_extended(inode, oldsize, newsize);
                ret = btrfs_update_inode(trans, root, inode);
                btrfs_end_write_no_snapshoting(root);
                btrfs_end_transaction(trans, root);
@@ -7116,21 +7121,41 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
        if (ret)
                return ERR_PTR(ret);
 
-       em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
-                             ins.offset, ins.offset, ins.offset, 0);
-       if (IS_ERR(em)) {
-               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
-               return em;
-       }
-
+       /*
+        * Create the ordered extent before the extent map. This is to avoid
+        * races with the fast fsync path that would lead to it logging file
+        * extent items that point to disk extents that were not yet written to.
+        * The fast fsync path collects ordered extents into a local list and
+        * then collects all the new extent maps, so we must create the ordered
+        * extent first and make sure the fast fsync path collects any new
+        * ordered extents after collecting new extent maps as well.
+        * The fsync path simply can not rely on inode_dio_wait() because it
+        * causes deadlock with AIO.
+        */
        ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
                                           ins.offset, ins.offset, 0);
        if (ret) {
                btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
-               free_extent_map(em);
                return ERR_PTR(ret);
        }
 
+       em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
+                             ins.offset, ins.offset, ins.offset, 0);
+       if (IS_ERR(em)) {
+               struct btrfs_ordered_extent *oe;
+
+               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
+               oe = btrfs_lookup_ordered_extent(inode, start);
+               ASSERT(oe);
+               if (WARN_ON(!oe))
+                       return em;
+               set_bit(BTRFS_ORDERED_IOERR, &oe->flags);
+               set_bit(BTRFS_ORDERED_IO_DONE, &oe->flags);
+               btrfs_remove_ordered_extent(inode, oe);
+               /* Once for our lookup and once for the ordered extents tree. */
+               btrfs_put_ordered_extent(oe);
+               btrfs_put_ordered_extent(oe);
+       }
        return em;
 }
 
@@ -7732,9 +7757,9 @@ static int btrfs_check_dio_repairable(struct inode *inode,
 }
 
 static int dio_read_error(struct inode *inode, struct bio *failed_bio,
-                         struct page *page, u64 start, u64 end,
-                         int failed_mirror, bio_end_io_t *repair_endio,
-                         void *repair_arg)
+                       struct page *page, unsigned int pgoff,
+                       u64 start, u64 end, int failed_mirror,
+                       bio_end_io_t *repair_endio, void *repair_arg)
 {
        struct io_failure_record *failrec;
        struct bio *bio;
@@ -7755,7 +7780,9 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
                return -EIO;
        }
 
-       if (failed_bio->bi_vcnt > 1)
+       if ((failed_bio->bi_vcnt > 1)
+               || (failed_bio->bi_io_vec->bv_len
+                       > BTRFS_I(inode)->root->sectorsize))
                read_mode = READ_SYNC | REQ_FAILFAST_DEV;
        else
                read_mode = READ_SYNC;
@@ -7763,7 +7790,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
        isector = start - btrfs_io_bio(failed_bio)->logical;
        isector >>= inode->i_sb->s_blocksize_bits;
        bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page,
-                                     0, isector, repair_endio, repair_arg);
+                               pgoff, isector, repair_endio, repair_arg);
        if (!bio) {
                free_io_failure(inode, failrec);
                return -EIO;
@@ -7793,12 +7820,17 @@ struct btrfs_retry_complete {
 static void btrfs_retry_endio_nocsum(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
+       struct inode *inode;
        struct bio_vec *bvec;
        int i;
 
        if (bio->bi_error)
                goto end;
 
+       ASSERT(bio->bi_vcnt == 1);
+       inode = bio->bi_io_vec->bv_page->mapping->host;
+       ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
+
        done->uptodate = 1;
        bio_for_each_segment_all(bvec, bio, i)
                clean_io_failure(done->inode, done->start, bvec->bv_page, 0);
@@ -7810,25 +7842,35 @@ end:
 static int __btrfs_correct_data_nocsum(struct inode *inode,
                                       struct btrfs_io_bio *io_bio)
 {
+       struct btrfs_fs_info *fs_info;
        struct bio_vec *bvec;
        struct btrfs_retry_complete done;
        u64 start;
+       unsigned int pgoff;
+       u32 sectorsize;
+       int nr_sectors;
        int i;
        int ret;
 
+       fs_info = BTRFS_I(inode)->root->fs_info;
+       sectorsize = BTRFS_I(inode)->root->sectorsize;
+
        start = io_bio->logical;
        done.inode = inode;
 
        bio_for_each_segment_all(bvec, &io_bio->bio, i) {
-try_again:
+               nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
+               pgoff = bvec->bv_offset;
+
+next_block_or_try_again:
                done.uptodate = 0;
                done.start = start;
                init_completion(&done.done);
 
-               ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
-                                    start + bvec->bv_len - 1,
-                                    io_bio->mirror_num,
-                                    btrfs_retry_endio_nocsum, &done);
+               ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
+                               pgoff, start, start + sectorsize - 1,
+                               io_bio->mirror_num,
+                               btrfs_retry_endio_nocsum, &done);
                if (ret)
                        return ret;
 
@@ -7836,10 +7878,15 @@ try_again:
 
                if (!done.uptodate) {
                        /* We might have another mirror, so try again */
-                       goto try_again;
+                       goto next_block_or_try_again;
                }
 
-               start += bvec->bv_len;
+               start += sectorsize;
+
+               if (nr_sectors--) {
+                       pgoff += sectorsize;
+                       goto next_block_or_try_again;
+               }
        }
 
        return 0;
@@ -7849,7 +7896,9 @@ static void btrfs_retry_endio(struct bio *bio)
 {
        struct btrfs_retry_complete *done = bio->bi_private;
        struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
+       struct inode *inode;
        struct bio_vec *bvec;
+       u64 start;
        int uptodate;
        int ret;
        int i;
@@ -7858,13 +7907,20 @@ static void btrfs_retry_endio(struct bio *bio)
                goto end;
 
        uptodate = 1;
+
+       start = done->start;
+
+       ASSERT(bio->bi_vcnt == 1);
+       inode = bio->bi_io_vec->bv_page->mapping->host;
+       ASSERT(bio->bi_io_vec->bv_len == BTRFS_I(inode)->root->sectorsize);
+
        bio_for_each_segment_all(bvec, bio, i) {
                ret = __readpage_endio_check(done->inode, io_bio, i,
-                                            bvec->bv_page, 0,
-                                            done->start, bvec->bv_len);
+                                       bvec->bv_page, bvec->bv_offset,
+                                       done->start, bvec->bv_len);
                if (!ret)
                        clean_io_failure(done->inode, done->start,
-                                        bvec->bv_page, 0);
+                                       bvec->bv_page, bvec->bv_offset);
                else
                        uptodate = 0;
        }
@@ -7878,20 +7934,34 @@ end:
 static int __btrfs_subio_endio_read(struct inode *inode,
                                    struct btrfs_io_bio *io_bio, int err)
 {
+       struct btrfs_fs_info *fs_info;
        struct bio_vec *bvec;
        struct btrfs_retry_complete done;
        u64 start;
        u64 offset = 0;
+       u32 sectorsize;
+       int nr_sectors;
+       unsigned int pgoff;
+       int csum_pos;
        int i;
        int ret;
 
+       fs_info = BTRFS_I(inode)->root->fs_info;
+       sectorsize = BTRFS_I(inode)->root->sectorsize;
+
        err = 0;
        start = io_bio->logical;
        done.inode = inode;
 
        bio_for_each_segment_all(bvec, &io_bio->bio, i) {
-               ret = __readpage_endio_check(inode, io_bio, i, bvec->bv_page,
-                                            0, start, bvec->bv_len);
+               nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info, bvec->bv_len);
+
+               pgoff = bvec->bv_offset;
+next_block:
+               csum_pos = BTRFS_BYTES_TO_BLKS(fs_info, offset);
+               ret = __readpage_endio_check(inode, io_bio, csum_pos,
+                                       bvec->bv_page, pgoff, start,
+                                       sectorsize);
                if (likely(!ret))
                        goto next;
 try_again:
@@ -7899,10 +7969,10 @@ try_again:
                done.start = start;
                init_completion(&done.done);
 
-               ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page, start,
-                                    start + bvec->bv_len - 1,
-                                    io_bio->mirror_num,
-                                    btrfs_retry_endio, &done);
+               ret = dio_read_error(inode, &io_bio->bio, bvec->bv_page,
+                               pgoff, start, start + sectorsize - 1,
+                               io_bio->mirror_num,
+                               btrfs_retry_endio, &done);
                if (ret) {
                        err = ret;
                        goto next;
@@ -7915,8 +7985,15 @@ try_again:
                        goto try_again;
                }
 next:
-               offset += bvec->bv_len;
-               start += bvec->bv_len;
+               offset += sectorsize;
+               start += sectorsize;
+
+               ASSERT(nr_sectors);
+
+               if (--nr_sectors) {
+                       pgoff += sectorsize;
+                       goto next_block;
+               }
        }
 
        return err;
@@ -8168,9 +8245,11 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        u64 file_offset = dip->logical_offset;
        u64 submit_len = 0;
        u64 map_length;
-       int nr_pages = 0;
-       int ret;
+       u32 blocksize = root->sectorsize;
        int async_submit = 0;
+       int nr_sectors;
+       int ret;
+       int i;
 
        map_length = orig_bio->bi_iter.bi_size;
        ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
@@ -8200,9 +8279,12 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        atomic_inc(&dip->pending_bios);
 
        while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
-               if (map_length < submit_len + bvec->bv_len ||
-                   bio_add_page(bio, bvec->bv_page, bvec->bv_len,
-                                bvec->bv_offset) < bvec->bv_len) {
+               nr_sectors = BTRFS_BYTES_TO_BLKS(root->fs_info, bvec->bv_len);
+               i = 0;
+next_block:
+               if (unlikely(map_length < submit_len + blocksize ||
+                   bio_add_page(bio, bvec->bv_page, blocksize,
+                           bvec->bv_offset + (i * blocksize)) < blocksize)) {
                        /*
                         * inc the count before we submit the bio so
                         * we know the end IO handler won't happen before
@@ -8223,7 +8305,6 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                        file_offset += submit_len;
 
                        submit_len = 0;
-                       nr_pages = 0;
 
                        bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
                                                  start_sector, GFP_NOFS);
@@ -8241,9 +8322,14 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
                                bio_put(bio);
                                goto out_err;
                        }
+
+                       goto next_block;
                } else {
-                       submit_len += bvec->bv_len;
-                       nr_pages++;
+                       submit_len += blocksize;
+                       if (--nr_sectors) {
+                               i++;
+                               goto next_block;
+                       }
                        bvec++;
                }
        }
@@ -8608,6 +8694,8 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
        struct extent_state *cached_state = NULL;
        u64 page_start = page_offset(page);
        u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
+       u64 start;
+       u64 end;
        int inode_evicting = inode->i_state & I_FREEING;
 
        /*
@@ -8627,14 +8715,18 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
 
        if (!inode_evicting)
                lock_extent_bits(tree, page_start, page_end, &cached_state);
-       ordered = btrfs_lookup_ordered_extent(inode, page_start);
+again:
+       start = page_start;
+       ordered = btrfs_lookup_ordered_range(inode, start,
+                                       page_end - start + 1);
        if (ordered) {
+               end = min(page_end, ordered->file_offset + ordered->len - 1);
                /*
                 * IO on this page will never be started, so we need
                 * to account for any ordered extents now
                 */
                if (!inode_evicting)
-                       clear_extent_bit(tree, page_start, page_end,
+                       clear_extent_bit(tree, start, end,
                                         EXTENT_DIRTY | EXTENT_DELALLOC |
                                         EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
                                         EXTENT_DEFRAG, 1, 0, &cached_state,
@@ -8651,22 +8743,26 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset,
 
                        spin_lock_irq(&tree->lock);
                        set_bit(BTRFS_ORDERED_TRUNCATED, &ordered->flags);
-                       new_len = page_start - ordered->file_offset;
+                       new_len = start - ordered->file_offset;
                        if (new_len < ordered->truncated_len)
                                ordered->truncated_len = new_len;
                        spin_unlock_irq(&tree->lock);
 
                        if (btrfs_dec_test_ordered_pending(inode, &ordered,
-                                                          page_start,
-                                                          PAGE_CACHE_SIZE, 1))
+                                                          start,
+                                                          end - start + 1, 1))
                                btrfs_finish_ordered_io(ordered);
                }
                btrfs_put_ordered_extent(ordered);
                if (!inode_evicting) {
                        cached_state = NULL;
-                       lock_extent_bits(tree, page_start, page_end,
+                       lock_extent_bits(tree, start, end,
                                         &cached_state);
                }
+
+               start = end + 1;
+               if (start < page_end)
+                       goto again;
        }
 
        /*
@@ -8727,15 +8823,28 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        loff_t size;
        int ret;
        int reserved = 0;
+       u64 reserved_space;
        u64 page_start;
        u64 page_end;
+       u64 end;
+
+       reserved_space = PAGE_CACHE_SIZE;
 
        sb_start_pagefault(inode->i_sb);
        page_start = page_offset(page);
        page_end = page_start + PAGE_CACHE_SIZE - 1;
+       end = page_end;
 
+       /*
+        * Reserving delalloc space after obtaining the page lock can lead to
+        * deadlock. For example, if a dirty page is locked by this function
+        * and the call to btrfs_delalloc_reserve_space() ends up triggering
+        * dirty page write out, then the btrfs_writepage() function could
+        * end up waiting indefinitely to get a lock on the page currently
+        * being processed by btrfs_page_mkwrite() function.
+        */
        ret = btrfs_delalloc_reserve_space(inode, page_start,
-                                          PAGE_CACHE_SIZE);
+                                          reserved_space);
        if (!ret) {
                ret = file_update_time(vma->vm_file);
                reserved = 1;
@@ -8769,7 +8878,7 @@ again:
         * we can't set the delalloc bits if there are pending ordered
         * extents.  Drop our locks and wait for them to finish
         */
-       ordered = btrfs_lookup_ordered_extent(inode, page_start);
+       ordered = btrfs_lookup_ordered_range(inode, page_start, page_end);
        if (ordered) {
                unlock_extent_cached(io_tree, page_start, page_end,
                                     &cached_state, GFP_NOFS);
@@ -8779,6 +8888,18 @@ again:
                goto again;
        }
 
+       if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) {
+               reserved_space = round_up(size - page_start, root->sectorsize);
+               if (reserved_space < PAGE_CACHE_SIZE) {
+                       end = page_start + reserved_space - 1;
+                       spin_lock(&BTRFS_I(inode)->lock);
+                       BTRFS_I(inode)->outstanding_extents++;
+                       spin_unlock(&BTRFS_I(inode)->lock);
+                       btrfs_delalloc_release_space(inode, page_start,
+                                               PAGE_CACHE_SIZE - reserved_space);
+               }
+       }
+
        /*
         * XXX - page_mkwrite gets called every time the page is dirtied, even
         * if it was already dirty, so for space accounting reasons we need to
@@ -8786,12 +8907,12 @@ again:
         * is probably a better way to do this, but for now keep consistent with
         * prepare_pages in the normal write path.
         */
-       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
+       clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, end,
                          EXTENT_DIRTY | EXTENT_DELALLOC |
                          EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
                          0, 0, &cached_state, GFP_NOFS);
 
-       ret = btrfs_set_extent_delalloc(inode, page_start, page_end,
+       ret = btrfs_set_extent_delalloc(inode, page_start, end,
                                        &cached_state);
        if (ret) {
                unlock_extent_cached(io_tree, page_start, page_end,
@@ -8830,7 +8951,7 @@ out_unlock:
        }
        unlock_page(page);
 out:
-       btrfs_delalloc_release_space(inode, page_start, PAGE_CACHE_SIZE);
+       btrfs_delalloc_release_space(inode, page_start, reserved_space);
 out_noreserve:
        sb_end_pagefault(inode->i_sb);
        return ret;
@@ -9216,7 +9337,6 @@ static int btrfs_getattr(struct vfsmount *mnt,
 
        generic_fillattr(inode, stat);
        stat->dev = BTRFS_I(inode)->root->anon_dev;
-       stat->blksize = PAGE_CACHE_SIZE;
 
        spin_lock(&BTRFS_I(inode)->lock);
        delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
index 952172ca7e455633c28a79292d18ebbfd68c4d18..93e7832d1d1b22cc93b484ef34ca030c919ab74b 100644 (file)
@@ -3814,8 +3814,9 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src,
         * Truncate page cache pages so that future reads will see the cloned
         * data immediately and not the previous data.
         */
-       truncate_inode_pages_range(&inode->i_data, destoff,
-                                  PAGE_CACHE_ALIGN(destoff + len) - 1);
+       truncate_inode_pages_range(&inode->i_data,
+                               round_down(destoff, PAGE_CACHE_SIZE),
+                               round_up(destoff + len, PAGE_CACHE_SIZE) - 1);
 out_unlock:
        if (!same_inode)
                btrfs_double_inode_unlock(src, inode);
index 619f92963e27102fb47a6f119b65451bdf959bef..c5f1773c4794995eacc909976f62def80a9c64bc 100644 (file)
@@ -280,7 +280,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
        end = start + cache->key.offset - 1;
        btrfs_put_block_group(cache);
 
-       zone = kzalloc(sizeof(*zone), GFP_NOFS);
+       zone = kzalloc(sizeof(*zone), GFP_KERNEL);
        if (!zone)
                return NULL;
 
@@ -343,7 +343,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
        if (re)
                return re;
 
-       re = kzalloc(sizeof(*re), GFP_NOFS);
+       re = kzalloc(sizeof(*re), GFP_KERNEL);
        if (!re)
                return NULL;
 
@@ -566,7 +566,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
        if (!re)
                return -1;
 
-       rec = kzalloc(sizeof(*rec), GFP_NOFS);
+       rec = kzalloc(sizeof(*rec), GFP_KERNEL);
        if (!rec) {
                reada_extent_put(root->fs_info, re);
                return -ENOMEM;
@@ -791,7 +791,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
 {
        struct reada_machine_work *rmw;
 
-       rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
+       rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
        if (!rmw) {
                /* FIXME we cannot handle this properly right now */
                BUG();
@@ -926,7 +926,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
                .offset = (u64)-1
        };
 
-       rc = kzalloc(sizeof(*rc), GFP_NOFS);
+       rc = kzalloc(sizeof(*rc), GFP_KERNEL);
        if (!rc)
                return ERR_PTR(-ENOMEM);
 
index fd1c4d982463fd92768537f86f8b9ae8545befba..2bd0011450df2715ca926a6ccd2d99cb7d40f8b4 100644 (file)
@@ -575,7 +575,8 @@ static int is_cowonly_root(u64 root_objectid)
            root_objectid == BTRFS_TREE_LOG_OBJECTID ||
            root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
            root_objectid == BTRFS_UUID_TREE_OBJECTID ||
-           root_objectid == BTRFS_QUOTA_TREE_OBJECTID)
+           root_objectid == BTRFS_QUOTA_TREE_OBJECTID ||
+           root_objectid == BTRFS_FREE_SPACE_TREE_OBJECTID)
                return 1;
        return 0;
 }
index 92bf5ee732fb0e14f7e330dddaadccb4b70b35cc..2de7817d0e1be7176a3e3dcf7ad44793fe446e2c 100644 (file)
@@ -461,7 +461,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
        int ret;
 
-       sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
+       sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
        if (!sctx)
                goto nomem;
        atomic_set(&sctx->refs, 1);
@@ -472,7 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
                struct scrub_bio *sbio;
 
-               sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
+               sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
                if (!sbio)
                        goto nomem;
                sctx->bios[i] = sbio;
@@ -1654,7 +1654,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
 again:
        if (!wr_ctx->wr_curr_bio) {
                wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
-                                             GFP_NOFS);
+                                             GFP_KERNEL);
                if (!wr_ctx->wr_curr_bio) {
                        mutex_unlock(&wr_ctx->wr_lock);
                        return -ENOMEM;
@@ -1671,7 +1671,8 @@ again:
                sbio->dev = wr_ctx->tgtdev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
+                       bio = btrfs_io_bio_alloc(GFP_KERNEL,
+                                       wr_ctx->pages_per_wr_bio);
                        if (!bio) {
                                mutex_unlock(&wr_ctx->wr_lock);
                                return -ENOMEM;
@@ -2076,7 +2077,8 @@ again:
                sbio->dev = spage->dev;
                bio = sbio->bio;
                if (!bio) {
-                       bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
+                       bio = btrfs_io_bio_alloc(GFP_KERNEL,
+                                       sctx->pages_per_rd_bio);
                        if (!bio)
                                return -ENOMEM;
                        sbio->bio = bio;
@@ -2241,7 +2243,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
        struct scrub_block *sblock;
        int index;
 
-       sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
+       sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
        if (!sblock) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.malloc_errors++;
@@ -2259,7 +2261,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
                struct scrub_page *spage;
                u64 l = min_t(u64, len, PAGE_SIZE);
 
-               spage = kzalloc(sizeof(*spage), GFP_NOFS);
+               spage = kzalloc(sizeof(*spage), GFP_KERNEL);
                if (!spage) {
 leave_nomem:
                        spin_lock(&sctx->stat_lock);
@@ -2286,7 +2288,7 @@ leave_nomem:
                        spage->have_csum = 0;
                }
                sblock->page_count++;
-               spage->page = alloc_page(GFP_NOFS);
+               spage->page = alloc_page(GFP_KERNEL);
                if (!spage->page)
                        goto leave_nomem;
                len -= l;
@@ -2541,7 +2543,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
        struct scrub_block *sblock;
        int index;
 
-       sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
+       sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
        if (!sblock) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.malloc_errors++;
@@ -2561,7 +2563,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
                struct scrub_page *spage;
                u64 l = min_t(u64, len, PAGE_SIZE);
 
-               spage = kzalloc(sizeof(*spage), GFP_NOFS);
+               spage = kzalloc(sizeof(*spage), GFP_KERNEL);
                if (!spage) {
 leave_nomem:
                        spin_lock(&sctx->stat_lock);
@@ -2591,7 +2593,7 @@ leave_nomem:
                        spage->have_csum = 0;
                }
                sblock->page_count++;
-               spage->page = alloc_page(GFP_NOFS);
+               spage->page = alloc_page(GFP_KERNEL);
                if (!spage->page)
                        goto leave_nomem;
                len -= l;
index 63a6152be04b4f4265b25e253eadb1d4d91b8969..d2e29925f1da3bc6f3d4ce3846733cd058a301bf 100644 (file)
@@ -304,7 +304,7 @@ static struct fs_path *fs_path_alloc(void)
 {
        struct fs_path *p;
 
-       p = kmalloc(sizeof(*p), GFP_NOFS);
+       p = kmalloc(sizeof(*p), GFP_KERNEL);
        if (!p)
                return NULL;
        p->reversed = 0;
@@ -363,11 +363,11 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
         * First time the inline_buf does not suffice
         */
        if (p->buf == p->inline_buf) {
-               tmp_buf = kmalloc(len, GFP_NOFS);
+               tmp_buf = kmalloc(len, GFP_KERNEL);
                if (tmp_buf)
                        memcpy(tmp_buf, p->buf, old_buf_len);
        } else {
-               tmp_buf = krealloc(p->buf, len, GFP_NOFS);
+               tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
        }
        if (!tmp_buf)
                return -ENOMEM;
@@ -995,7 +995,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
         * values are small.
         */
        buf_len = PATH_MAX;
-       buf = kmalloc(buf_len, GFP_NOFS);
+       buf = kmalloc(buf_len, GFP_KERNEL);
        if (!buf) {
                ret = -ENOMEM;
                goto out;
@@ -1042,7 +1042,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
                                buf = NULL;
                        } else {
                                char *tmp = krealloc(buf, buf_len,
-                                                    GFP_NOFS | __GFP_NOWARN);
+                                               GFP_KERNEL | __GFP_NOWARN);
 
                                if (!tmp)
                                        kfree(buf);
@@ -1303,7 +1303,7 @@ static int find_extent_clone(struct send_ctx *sctx,
        /* We only use this path under the commit sem */
        tmp_path->need_commit_sem = 0;
 
-       backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
+       backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
        if (!backref_ctx) {
                ret = -ENOMEM;
                goto out;
@@ -1984,7 +1984,7 @@ static int name_cache_insert(struct send_ctx *sctx,
        nce_head = radix_tree_lookup(&sctx->name_cache,
                        (unsigned long)nce->ino);
        if (!nce_head) {
-               nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
+               nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
                if (!nce_head) {
                        kfree(nce);
                        return -ENOMEM;
@@ -2179,7 +2179,7 @@ out_cache:
        /*
         * Store the result of the lookup in the name cache.
         */
-       nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
+       nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
        if (!nce) {
                ret = -ENOMEM;
                goto out;
@@ -2315,7 +2315,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
        if (!path)
                return -ENOMEM;
 
-       name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
+       name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
        if (!name) {
                btrfs_free_path(path);
                return -ENOMEM;
@@ -2730,7 +2730,7 @@ static int __record_ref(struct list_head *head, u64 dir,
 {
        struct recorded_ref *ref;
 
-       ref = kmalloc(sizeof(*ref), GFP_NOFS);
+       ref = kmalloc(sizeof(*ref), GFP_KERNEL);
        if (!ref)
                return -ENOMEM;
 
@@ -2755,7 +2755,7 @@ static int dup_ref(struct recorded_ref *ref, struct list_head *list)
 {
        struct recorded_ref *new;
 
-       new = kmalloc(sizeof(*ref), GFP_NOFS);
+       new = kmalloc(sizeof(*ref), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -2818,7 +2818,7 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
        struct rb_node *parent = NULL;
        struct orphan_dir_info *entry, *odi;
 
-       odi = kmalloc(sizeof(*odi), GFP_NOFS);
+       odi = kmalloc(sizeof(*odi), GFP_KERNEL);
        if (!odi)
                return ERR_PTR(-ENOMEM);
        odi->ino = dir_ino;
@@ -2973,7 +2973,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
        struct rb_node *parent = NULL;
        struct waiting_dir_move *entry, *dm;
 
-       dm = kmalloc(sizeof(*dm), GFP_NOFS);
+       dm = kmalloc(sizeof(*dm), GFP_KERNEL);
        if (!dm)
                return -ENOMEM;
        dm->ino = ino;
@@ -3040,7 +3040,7 @@ static int add_pending_dir_move(struct send_ctx *sctx,
        int exists = 0;
        int ret;
 
-       pm = kmalloc(sizeof(*pm), GFP_NOFS);
+       pm = kmalloc(sizeof(*pm), GFP_KERNEL);
        if (!pm)
                return -ENOMEM;
        pm->parent_ino = parent_ino;
@@ -4280,7 +4280,7 @@ static int __find_xattr(int num, struct btrfs_key *di_key,
            strncmp(name, ctx->name, name_len) == 0) {
                ctx->found_idx = num;
                ctx->found_data_len = data_len;
-               ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
+               ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
                if (!ctx->found_data)
                        return -ENOMEM;
                return 1;
@@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
        while (index <= last_index) {
                unsigned cur_len = min_t(unsigned, len,
                                         PAGE_CACHE_SIZE - pg_offset);
-               page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+               page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
                if (!page) {
                        ret = -ENOMEM;
                        break;
@@ -5989,7 +5989,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
                goto out;
        }
 
-       sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
+       sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
        if (!sctx) {
                ret = -ENOMEM;
                goto out;
@@ -5997,7 +5997,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
 
        INIT_LIST_HEAD(&sctx->new_refs);
        INIT_LIST_HEAD(&sctx->deleted_refs);
-       INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
+       INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
        INIT_LIST_HEAD(&sctx->name_cache_list);
 
        sctx->flags = arg->flags;
index e0ac85949067c30191ce3635e2a65edbe8a56361..539e7b5e3f86a83059f070409fdb36749a958ce4 100644 (file)
@@ -202,6 +202,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF);
 BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56);
 BTRFS_FEAT_ATTR_INCOMPAT(skinny_metadata, SKINNY_METADATA);
 BTRFS_FEAT_ATTR_INCOMPAT(no_holes, NO_HOLES);
+BTRFS_FEAT_ATTR_COMPAT_RO(free_space_tree, FREE_SPACE_TREE);
 
 static struct attribute *btrfs_supported_feature_attrs[] = {
        BTRFS_FEAT_ATTR_PTR(mixed_backref),
@@ -213,6 +214,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
        BTRFS_FEAT_ATTR_PTR(raid56),
        BTRFS_FEAT_ATTR_PTR(skinny_metadata),
        BTRFS_FEAT_ATTR_PTR(no_holes),
+       BTRFS_FEAT_ATTR_PTR(free_space_tree),
        NULL
 };
 
@@ -780,6 +782,39 @@ failure:
        return error;
 }
 
+
+/*
+ * Change per-fs features in /sys/fs/btrfs/UUID/features to match current
+ * values in superblock. Call after any changes to incompat/compat_ro flags
+ */
+void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
+               u64 bit, enum btrfs_feature_set set)
+{
+       struct btrfs_fs_devices *fs_devs;
+       struct kobject *fsid_kobj;
+       u64 features;
+       int ret;
+
+       if (!fs_info)
+               return;
+
+       features = get_features(fs_info, set);
+       ASSERT(bit & supported_feature_masks[set]);
+
+       fs_devs = fs_info->fs_devices;
+       fsid_kobj = &fs_devs->fsid_kobj;
+
+       if (!fsid_kobj->state_initialized)
+               return;
+
+       /*
+        * FIXME: this is too heavy to update just one value, ideally we'd like
+        * to use sysfs_update_group but some refactoring is needed first.
+        */
+       sysfs_remove_group(fsid_kobj, &btrfs_feature_attr_group);
+       ret = sysfs_create_group(fsid_kobj, &btrfs_feature_attr_group);
+}
+
 static int btrfs_init_debugfs(void)
 {
 #ifdef CONFIG_DEBUG_FS
index 9c09522125a6b26d0577e139c38ebad7ca5c8331..d7da1a4c2f6c12d04a8f29e4d8f503943058d46d 100644 (file)
@@ -56,7 +56,7 @@ static struct btrfs_feature_attr btrfs_attr_##_name = {                            \
 #define BTRFS_FEAT_ATTR_COMPAT(name, feature) \
        BTRFS_FEAT_ATTR(name, FEAT_COMPAT, BTRFS_FEATURE_COMPAT, feature)
 #define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \
-       BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT, feature)
+       BTRFS_FEAT_ATTR(name, FEAT_COMPAT_RO, BTRFS_FEATURE_COMPAT_RO, feature)
 #define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \
        BTRFS_FEAT_ATTR(name, FEAT_INCOMPAT, BTRFS_FEATURE_INCOMPAT, feature)
 
@@ -90,4 +90,7 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
                                struct kobject *parent);
 int btrfs_sysfs_add_device(struct btrfs_fs_devices *fs_devs);
 void btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs);
+void btrfs_sysfs_feature_update(struct btrfs_fs_info *fs_info,
+               u64 bit, enum btrfs_feature_set set);
+
 #endif /* _BTRFS_SYSFS_H_ */
index b1d920b3007017c2014d28217cb5efa1d7ed4d96..0e1e61a7ec23265d292e165fd39f1f8d2080f604 100644 (file)
@@ -82,18 +82,18 @@ void btrfs_destroy_test_fs(void)
 struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
 {
        struct btrfs_fs_info *fs_info = kzalloc(sizeof(struct btrfs_fs_info),
-                                               GFP_NOFS);
+                                               GFP_KERNEL);
 
        if (!fs_info)
                return fs_info;
        fs_info->fs_devices = kzalloc(sizeof(struct btrfs_fs_devices),
-                                     GFP_NOFS);
+                                     GFP_KERNEL);
        if (!fs_info->fs_devices) {
                kfree(fs_info);
                return NULL;
        }
        fs_info->super_copy = kzalloc(sizeof(struct btrfs_super_block),
-                                     GFP_NOFS);
+                                     GFP_KERNEL);
        if (!fs_info->super_copy) {
                kfree(fs_info->fs_devices);
                kfree(fs_info);
@@ -180,11 +180,11 @@ btrfs_alloc_dummy_block_group(unsigned long length)
 {
        struct btrfs_block_group_cache *cache;
 
-       cache = kzalloc(sizeof(*cache), GFP_NOFS);
+       cache = kzalloc(sizeof(*cache), GFP_KERNEL);
        if (!cache)
                return NULL;
        cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
-                                       GFP_NOFS);
+                                       GFP_KERNEL);
        if (!cache->free_space_ctl) {
                kfree(cache);
                return NULL;
index e29fa297e053951a2d2d178a8793c9d49e021023..669b58201e36881fb1434d76fd787a45329f042e 100644 (file)
@@ -94,7 +94,7 @@ static int test_find_delalloc(void)
         * test.
         */
        for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) {
-               page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+               page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
                if (!page) {
                        test_msg("Failed to allocate test page\n");
                        ret = -ENOMEM;
@@ -113,7 +113,7 @@ static int test_find_delalloc(void)
         * |--- delalloc ---|
         * |---  search  ---|
         */
-       set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_NOFS);
+       set_extent_delalloc(&tmp, 0, 4095, NULL, GFP_KERNEL);
        start = 0;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -144,7 +144,7 @@ static int test_find_delalloc(void)
                test_msg("Couldn't find the locked page\n");
                goto out_bits;
        }
-       set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_NOFS);
+       set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL, GFP_KERNEL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -199,7 +199,7 @@ static int test_find_delalloc(void)
         *
         * We are re-using our test_start from above since it works out well.
         */
-       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_NOFS);
+       set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL, GFP_KERNEL);
        start = test_start;
        end = 0;
        found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
@@ -262,7 +262,7 @@ static int test_find_delalloc(void)
        }
        ret = 0;
 out_bits:
-       clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_NOFS);
+       clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL);
 out:
        if (locked_page)
                page_cache_release(locked_page);
@@ -360,7 +360,7 @@ static int test_eb_bitmaps(void)
 
        test_msg("Running extent buffer bitmap tests\n");
 
-       bitmap = kmalloc(len, GFP_NOFS);
+       bitmap = kmalloc(len, GFP_KERNEL);
        if (!bitmap) {
                test_msg("Couldn't allocate test bitmap\n");
                return -ENOMEM;
index 5de55fdd28bcda6aca3d0a2becf5bed8e96f1b63..e2d3da02deee4b73b9f9c96ed1e6cc9f4c1b5700 100644 (file)
@@ -974,7 +974,7 @@ static int test_extent_accounting(void)
                               (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095,
                               EXTENT_DELALLOC | EXTENT_DIRTY |
                               EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0,
-                              NULL, GFP_NOFS);
+                              NULL, GFP_KERNEL);
        if (ret) {
                test_msg("clear_extent_bit returned %d\n", ret);
                goto out;
@@ -1045,7 +1045,7 @@ static int test_extent_accounting(void)
                               BTRFS_MAX_EXTENT_SIZE+8191,
                               EXTENT_DIRTY | EXTENT_DELALLOC |
                               EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
-                              NULL, GFP_NOFS);
+                              NULL, GFP_KERNEL);
        if (ret) {
                test_msg("clear_extent_bit returned %d\n", ret);
                goto out;
@@ -1079,7 +1079,7 @@ static int test_extent_accounting(void)
        ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
                               EXTENT_DIRTY | EXTENT_DELALLOC |
                               EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
-                              NULL, GFP_NOFS);
+                              NULL, GFP_KERNEL);
        if (ret) {
                test_msg("clear_extent_bit returned %d\n", ret);
                goto out;
@@ -1096,7 +1096,7 @@ out:
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
                                 EXTENT_DIRTY | EXTENT_DELALLOC |
                                 EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0,
-                                NULL, GFP_NOFS);
+                                NULL, GFP_KERNEL);
        iput(inode);
        btrfs_free_dummy_root(root);
        return ret;
index 323e12cc9d2f522388fe929ed66d4dd946b5881d..978c3a8108936381309de4681e90400aeb86874f 100644 (file)
@@ -4127,7 +4127,9 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
                                     struct inode *inode,
                                     struct btrfs_path *path,
                                     struct list_head *logged_list,
-                                    struct btrfs_log_ctx *ctx)
+                                    struct btrfs_log_ctx *ctx,
+                                    const u64 start,
+                                    const u64 end)
 {
        struct extent_map *em, *n;
        struct list_head extents;
@@ -4166,7 +4168,13 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
        }
 
        list_sort(NULL, &extents, extent_cmp);
-
+       /*
+        * Collect any new ordered extents within the range. This is to
+        * prevent logging file extent items without waiting for the disk
+        * location they point to being written. We do this only to deal
+        * with races against concurrent lockless direct IO writes.
+        */
+       btrfs_get_logged_extents(inode, logged_list, start, end);
 process:
        while (!list_empty(&extents)) {
                em = list_entry(extents.next, struct extent_map, list);
@@ -4701,7 +4709,7 @@ log_extents:
                        goto out_unlock;
                }
                ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
-                                               &logged_list, ctx);
+                                               &logged_list, ctx, start, end);
                if (ret) {
                        err = ret;
                        goto out_unlock;
index b7d218a168fb81c2c028ea38ffccf2a17a1d68d5..c22213789090f975b3680b10e022a1e5f2f9fdb4 100644 (file)
@@ -1108,7 +1108,7 @@ retry_locked:
                return 0;
 
        /* past end of file? */
-       i_size = inode->i_size;   /* caller holds i_mutex */
+       i_size = i_size_read(inode);
 
        if (page_off >= i_size ||
            (pos_in_page == 0 && (pos+len) >= i_size &&
@@ -1149,7 +1149,6 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
                page = grab_cache_page_write_begin(mapping, index, 0);
                if (!page)
                        return -ENOMEM;
-               *pagep = page;
 
                dout("write_begin file %p inode %p page %p %d~%d\n", file,
                     inode, page, (int)pos, (int)len);
@@ -1184,8 +1183,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
                zero_user_segment(page, from+copied, len);
 
        /* did file size increase? */
-       /* (no need for i_size_read(); we caller holds i_mutex */
-       if (pos+copied > inode->i_size)
+       if (pos+copied > i_size_read(inode))
                check_cap = ceph_inode_set_size(inode, pos+copied);
 
        if (!PageUptodate(page))
@@ -1378,11 +1376,13 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        ret = VM_FAULT_NOPAGE;
        if ((off > size) ||
-           (page->mapping != inode->i_mapping))
+           (page->mapping != inode->i_mapping)) {
+               unlock_page(page);
                goto out;
+       }
 
        ret = ceph_update_writeable_page(vma->vm_file, off, len, page);
-       if (ret == 0) {
+       if (ret >= 0) {
                /* success.  we'll keep the page locked. */
                set_page_dirty(page);
                ret = VM_FAULT_LOCKED;
@@ -1393,8 +1393,6 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
                        ret = VM_FAULT_SIGBUS;
        }
 out:
-       if (ret != VM_FAULT_LOCKED)
-               unlock_page(page);
        if (ret == VM_FAULT_LOCKED ||
            ci->i_inline_version != CEPH_INLINE_NONE) {
                int dirty;
index 7680e2626815d133d3b5e9998e529535bd9231f9..a351480dbabc95891e4b61f83fb92485f8ea7b18 100644 (file)
@@ -106,7 +106,7 @@ static uint16_t ceph_fscache_inode_get_aux(const void *cookie_netfs_data,
 
        memset(&aux, 0, sizeof(aux));
        aux.mtime = inode->i_mtime;
-       aux.size = inode->i_size;
+       aux.size = i_size_read(inode);
 
        memcpy(buffer, &aux, sizeof(aux));
 
@@ -117,9 +117,7 @@ static void ceph_fscache_inode_get_attr(const void *cookie_netfs_data,
                                        uint64_t *size)
 {
        const struct ceph_inode_info* ci = cookie_netfs_data;
-       const struct inode* inode = &ci->vfs_inode;
-
-       *size = inode->i_size;
+       *size = i_size_read(&ci->vfs_inode);
 }
 
 static enum fscache_checkaux ceph_fscache_inode_check_aux(
@@ -134,7 +132,7 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux(
 
        memset(&aux, 0, sizeof(aux));
        aux.mtime = inode->i_mtime;
-       aux.size = inode->i_size;
+       aux.size = i_size_read(inode);
 
        if (memcmp(data, &aux, sizeof(aux)) != 0)
                return FSCACHE_CHECKAUX_OBSOLETE;
index 10c5ae79696ee860a1cebb6f99dec2d2bde4717a..eb9028e8cfc5197ef1ea99524fcbc67d67b4df91 100644 (file)
@@ -397,8 +397,9 @@ int ceph_release(struct inode *inode, struct file *file)
 }
 
 enum {
-       CHECK_EOF = 1,
-       READ_INLINE = 2,
+       HAVE_RETRIED = 1,
+       CHECK_EOF =    2,
+       READ_INLINE =  3,
 };
 
 /*
@@ -411,17 +412,15 @@ enum {
 static int striped_read(struct inode *inode,
                        u64 off, u64 len,
                        struct page **pages, int num_pages,
-                       int *checkeof, bool o_direct,
-                       unsigned long buf_align)
+                       int *checkeof)
 {
        struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
        struct ceph_inode_info *ci = ceph_inode(inode);
        u64 pos, this_len, left;
-       int io_align, page_align;
-       int pages_left;
-       int read;
+       loff_t i_size;
+       int page_align, pages_left;
+       int read, ret;
        struct page **page_pos;
-       int ret;
        bool hit_stripe, was_short;
 
        /*
@@ -432,13 +431,9 @@ static int striped_read(struct inode *inode,
        page_pos = pages;
        pages_left = num_pages;
        read = 0;
-       io_align = off & ~PAGE_MASK;
 
 more:
-       if (o_direct)
-               page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
-       else
-               page_align = pos & ~PAGE_MASK;
+       page_align = pos & ~PAGE_MASK;
        this_len = left;
        ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
                                  &ci->i_layout, pos, &this_len,
@@ -452,13 +447,12 @@ more:
        dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
             ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
 
+       i_size = i_size_read(inode);
        if (ret >= 0) {
                int didpages;
-               if (was_short && (pos + ret < inode->i_size)) {
-                       int zlen = min(this_len - ret,
-                                      inode->i_size - pos - ret);
-                       int zoff = (o_direct ? buf_align : io_align) +
-                                   read + ret;
+               if (was_short && (pos + ret < i_size)) {
+                       int zlen = min(this_len - ret, i_size - pos - ret);
+                       int zoff = (off & ~PAGE_MASK) + read + ret;
                        dout(" zero gap %llu to %llu\n",
                                pos + ret, pos + ret + zlen);
                        ceph_zero_page_vector_range(zoff, zlen, pages);
@@ -473,14 +467,14 @@ more:
                pages_left -= didpages;
 
                /* hit stripe and need continue*/
-               if (left && hit_stripe && pos < inode->i_size)
+               if (left && hit_stripe && pos < i_size)
                        goto more;
        }
 
        if (read > 0) {
                ret = read;
                /* did we bounce off eof? */
-               if (pos + left > inode->i_size)
+               if (pos + left > i_size)
                        *checkeof = CHECK_EOF;
        }
 
@@ -521,54 +515,28 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
        if (ret < 0)
                return ret;
 
-       if (iocb->ki_flags & IOCB_DIRECT) {
-               while (iov_iter_count(i)) {
-                       size_t start;
-                       ssize_t n;
-
-                       n = dio_get_pagev_size(i);
-                       pages = dio_get_pages_alloc(i, n, &start, &num_pages);
-                       if (IS_ERR(pages))
-                               return PTR_ERR(pages);
-
-                       ret = striped_read(inode, off, n,
-                                          pages, num_pages, checkeof,
-                                          1, start);
-
-                       ceph_put_page_vector(pages, num_pages, true);
-
-                       if (ret <= 0)
-                               break;
-                       off += ret;
-                       iov_iter_advance(i, ret);
-                       if (ret < n)
+       num_pages = calc_pages_for(off, len);
+       pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
+       if (IS_ERR(pages))
+               return PTR_ERR(pages);
+       ret = striped_read(inode, off, len, pages,
+                               num_pages, checkeof);
+       if (ret > 0) {
+               int l, k = 0;
+               size_t left = ret;
+
+               while (left) {
+                       size_t page_off = off & ~PAGE_MASK;
+                       size_t copy = min_t(size_t, left,
+                                           PAGE_SIZE - page_off);
+                       l = copy_page_to_iter(pages[k++], page_off, copy, i);
+                       off += l;
+                       left -= l;
+                       if (l < copy)
                                break;
                }
-       } else {
-               num_pages = calc_pages_for(off, len);
-               pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
-               if (IS_ERR(pages))
-                       return PTR_ERR(pages);
-               ret = striped_read(inode, off, len, pages,
-                                       num_pages, checkeof, 0, 0);
-               if (ret > 0) {
-                       int l, k = 0;
-                       size_t left = ret;
-
-                       while (left) {
-                               size_t page_off = off & ~PAGE_MASK;
-                               size_t copy = min_t(size_t,
-                                                   PAGE_SIZE - page_off, left);
-                               l = copy_page_to_iter(pages[k++], page_off,
-                                                     copy, i);
-                               off += l;
-                               left -= l;
-                               if (l < copy)
-                                       break;
-                       }
-               }
-               ceph_release_page_vector(pages, num_pages);
        }
+       ceph_release_page_vector(pages, num_pages);
 
        if (off > iocb->ki_pos) {
                ret = off - iocb->ki_pos;
@@ -579,6 +547,193 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
        return ret;
 }
 
+struct ceph_aio_request {
+       struct kiocb *iocb;
+       size_t total_len;
+       int write;
+       int error;
+       struct list_head osd_reqs;
+       unsigned num_reqs;
+       atomic_t pending_reqs;
+       struct timespec mtime;
+       struct ceph_cap_flush *prealloc_cf;
+};
+
+struct ceph_aio_work {
+       struct work_struct work;
+       struct ceph_osd_request *req;
+};
+
+static void ceph_aio_retry_work(struct work_struct *work);
+
+static void ceph_aio_complete(struct inode *inode,
+                             struct ceph_aio_request *aio_req)
+{
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       int ret;
+
+       if (!atomic_dec_and_test(&aio_req->pending_reqs))
+               return;
+
+       ret = aio_req->error;
+       if (!ret)
+               ret = aio_req->total_len;
+
+       dout("ceph_aio_complete %p rc %d\n", inode, ret);
+
+       if (ret >= 0 && aio_req->write) {
+               int dirty;
+
+               loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
+               if (endoff > i_size_read(inode)) {
+                       if (ceph_inode_set_size(inode, endoff))
+                               ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
+               }
+
+               spin_lock(&ci->i_ceph_lock);
+               ci->i_inline_version = CEPH_INLINE_NONE;
+               dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
+                                              &aio_req->prealloc_cf);
+               spin_unlock(&ci->i_ceph_lock);
+               if (dirty)
+                       __mark_inode_dirty(inode, dirty);
+
+       }
+
+       ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
+                                               CEPH_CAP_FILE_RD));
+
+       aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
+
+       ceph_free_cap_flush(aio_req->prealloc_cf);
+       kfree(aio_req);
+}
+
+static void ceph_aio_complete_req(struct ceph_osd_request *req,
+                                 struct ceph_msg *msg)
+{
+       int rc = req->r_result;
+       struct inode *inode = req->r_inode;
+       struct ceph_aio_request *aio_req = req->r_priv;
+       struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
+       int num_pages = calc_pages_for((u64)osd_data->alignment,
+                                      osd_data->length);
+
+       dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
+            inode, rc, osd_data->length);
+
+       if (rc == -EOLDSNAPC) {
+               struct ceph_aio_work *aio_work;
+               BUG_ON(!aio_req->write);
+
+               aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
+               if (aio_work) {
+                       INIT_WORK(&aio_work->work, ceph_aio_retry_work);
+                       aio_work->req = req;
+                       queue_work(ceph_inode_to_client(inode)->wb_wq,
+                                  &aio_work->work);
+                       return;
+               }
+               rc = -ENOMEM;
+       } else if (!aio_req->write) {
+               if (rc == -ENOENT)
+                       rc = 0;
+               if (rc >= 0 && osd_data->length > rc) {
+                       int zoff = osd_data->alignment + rc;
+                       int zlen = osd_data->length - rc;
+                       /*
+                        * If read is satisfied by single OSD request,
+                        * it can pass EOF. Otherwise read is within
+                        * i_size.
+                        */
+                       if (aio_req->num_reqs == 1) {
+                               loff_t i_size = i_size_read(inode);
+                               loff_t endoff = aio_req->iocb->ki_pos + rc;
+                               if (endoff < i_size)
+                                       zlen = min_t(size_t, zlen,
+                                                    i_size - endoff);
+                               aio_req->total_len = rc + zlen;
+                       }
+
+                       if (zlen > 0)
+                               ceph_zero_page_vector_range(zoff, zlen,
+                                                           osd_data->pages);
+               }
+       }
+
+       ceph_put_page_vector(osd_data->pages, num_pages, false);
+       ceph_osdc_put_request(req);
+
+       if (rc < 0)
+               cmpxchg(&aio_req->error, 0, rc);
+
+       ceph_aio_complete(inode, aio_req);
+       return;
+}
+
+static void ceph_aio_retry_work(struct work_struct *work)
+{
+       struct ceph_aio_work *aio_work =
+               container_of(work, struct ceph_aio_work, work);
+       struct ceph_osd_request *orig_req = aio_work->req;
+       struct ceph_aio_request *aio_req = orig_req->r_priv;
+       struct inode *inode = orig_req->r_inode;
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       struct ceph_snap_context *snapc;
+       struct ceph_osd_request *req;
+       int ret;
+
+       spin_lock(&ci->i_ceph_lock);
+       if (__ceph_have_pending_cap_snap(ci)) {
+               struct ceph_cap_snap *capsnap =
+                       list_last_entry(&ci->i_cap_snaps,
+                                       struct ceph_cap_snap,
+                                       ci_item);
+               snapc = ceph_get_snap_context(capsnap->context);
+       } else {
+               BUG_ON(!ci->i_head_snapc);
+               snapc = ceph_get_snap_context(ci->i_head_snapc);
+       }
+       spin_unlock(&ci->i_ceph_lock);
+
+       req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
+                       false, GFP_NOFS);
+       if (!req) {
+               ret = -ENOMEM;
+               req = orig_req;
+               goto out;
+       }
+
+       req->r_flags =  CEPH_OSD_FLAG_ORDERSNAP |
+                       CEPH_OSD_FLAG_ONDISK |
+                       CEPH_OSD_FLAG_WRITE;
+       req->r_base_oloc = orig_req->r_base_oloc;
+       req->r_base_oid = orig_req->r_base_oid;
+
+       req->r_ops[0] = orig_req->r_ops[0];
+       osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
+
+       ceph_osdc_build_request(req, req->r_ops[0].extent.offset,
+                               snapc, CEPH_NOSNAP, &aio_req->mtime);
+
+       ceph_osdc_put_request(orig_req);
+
+       req->r_callback = ceph_aio_complete_req;
+       req->r_inode = inode;
+       req->r_priv = aio_req;
+
+       ret = ceph_osdc_start_request(req->r_osdc, req, false);
+out:
+       if (ret < 0) {
+               BUG_ON(ret == -EOLDSNAPC);
+               req->r_result = ret;
+               ceph_aio_complete_req(req, NULL);
+       }
+
+       ceph_put_snap_context(snapc);
+       kfree(aio_work);
+}
+
 /*
  * Write commit request unsafe callback, called to tell us when a
  * request is unsafe (that is, in flight--has been handed to the
@@ -612,16 +767,10 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
 }
 
 
-/*
- * Synchronous write, straight from __user pointer or user pages.
- *
- * If write spans object boundary, just do multiple writes.  (For a
- * correct atomic write, we should e.g. take write locks on all
- * objects, rollback on failure, etc.)
- */
 static ssize_t
-ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
-                      struct ceph_snap_context *snapc)
+ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
+                      struct ceph_snap_context *snapc,
+                      struct ceph_cap_flush **pcf)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file_inode(file);
@@ -630,44 +779,52 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
        struct ceph_vino vino;
        struct ceph_osd_request *req;
        struct page **pages;
-       int num_pages;
-       int written = 0;
+       struct ceph_aio_request *aio_req = NULL;
+       int num_pages = 0;
        int flags;
-       int check_caps = 0;
        int ret;
        struct timespec mtime = CURRENT_TIME;
-       size_t count = iov_iter_count(from);
+       size_t count = iov_iter_count(iter);
+       loff_t pos = iocb->ki_pos;
+       bool write = iov_iter_rw(iter) == WRITE;
 
-       if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
+       if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
                return -EROFS;
 
-       dout("sync_direct_write on file %p %lld~%u\n", file, pos,
-            (unsigned)count);
+       dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
+            (write ? "write" : "read"), file, pos, (unsigned)count);
 
        ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
        if (ret < 0)
                return ret;
 
-       ret = invalidate_inode_pages2_range(inode->i_mapping,
-                                           pos >> PAGE_CACHE_SHIFT,
-                                           (pos + count) >> PAGE_CACHE_SHIFT);
-       if (ret < 0)
-               dout("invalidate_inode_pages2_range returned %d\n", ret);
+       if (write) {
+               ret = invalidate_inode_pages2_range(inode->i_mapping,
+                                       pos >> PAGE_CACHE_SHIFT,
+                                       (pos + count) >> PAGE_CACHE_SHIFT);
+               if (ret < 0)
+                       dout("invalidate_inode_pages2_range returned %d\n", ret);
 
-       flags = CEPH_OSD_FLAG_ORDERSNAP |
-               CEPH_OSD_FLAG_ONDISK |
-               CEPH_OSD_FLAG_WRITE;
+               flags = CEPH_OSD_FLAG_ORDERSNAP |
+                       CEPH_OSD_FLAG_ONDISK |
+                       CEPH_OSD_FLAG_WRITE;
+       } else {
+               flags = CEPH_OSD_FLAG_READ;
+       }
 
-       while (iov_iter_count(from) > 0) {
-               u64 len = dio_get_pagev_size(from);
-               size_t start;
-               ssize_t n;
+       while (iov_iter_count(iter) > 0) {
+               u64 size = dio_get_pagev_size(iter);
+               size_t start = 0;
+               ssize_t len;
 
                vino = ceph_vino(inode);
                req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
-                                           vino, pos, &len, 0,
-                                           2,/*include a 'startsync' command*/
-                                           CEPH_OSD_OP_WRITE, flags, snapc,
+                                           vino, pos, &size, 0,
+                                           /*include a 'startsync' command*/
+                                           write ? 2 : 1,
+                                           write ? CEPH_OSD_OP_WRITE :
+                                                   CEPH_OSD_OP_READ,
+                                           flags, snapc,
                                            ci->i_truncate_seq,
                                            ci->i_truncate_size,
                                            false);
@@ -676,10 +833,8 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                        break;
                }
 
-               osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
-
-               n = len;
-               pages = dio_get_pages_alloc(from, len, &start, &num_pages);
+               len = size;
+               pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
                if (IS_ERR(pages)) {
                        ceph_osdc_put_request(req);
                        ret = PTR_ERR(pages);
@@ -687,47 +842,128 @@ ceph_sync_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
                }
 
                /*
-                * throw out any page cache pages in this range. this
-                * may block.
+                * To simplify error handling, allow AIO when IO within i_size
+                * or IO can be satisfied by single OSD request.
                 */
-               truncate_inode_pages_range(inode->i_mapping, pos,
-                                  (pos+n) | (PAGE_CACHE_SIZE-1));
-               osd_req_op_extent_osd_data_pages(req, 0, pages, n, start,
-                                               false, false);
+               if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
+                   (len == count || pos + count <= i_size_read(inode))) {
+                       aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
+                       if (aio_req) {
+                               aio_req->iocb = iocb;
+                               aio_req->write = write;
+                               INIT_LIST_HEAD(&aio_req->osd_reqs);
+                               if (write) {
+                                       aio_req->mtime = mtime;
+                                       swap(aio_req->prealloc_cf, *pcf);
+                               }
+                       }
+                       /* ignore error */
+               }
+
+               if (write) {
+                       /*
+                        * throw out any page cache pages in this range. this
+                        * may block.
+                        */
+                       truncate_inode_pages_range(inode->i_mapping, pos,
+                                       (pos+len) | (PAGE_CACHE_SIZE - 1));
+
+                       osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
+               }
+
+
+               osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
+                                                false, false);
 
-               /* BUG_ON(vino.snap != CEPH_NOSNAP); */
                ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);
 
-               ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
+               if (aio_req) {
+                       aio_req->total_len += len;
+                       aio_req->num_reqs++;
+                       atomic_inc(&aio_req->pending_reqs);
+
+                       req->r_callback = ceph_aio_complete_req;
+                       req->r_inode = inode;
+                       req->r_priv = aio_req;
+                       list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
+
+                       pos += len;
+                       iov_iter_advance(iter, len);
+                       continue;
+               }
+
+               ret = ceph_osdc_start_request(req->r_osdc, req, false);
                if (!ret)
                        ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
 
+               size = i_size_read(inode);
+               if (!write) {
+                       if (ret == -ENOENT)
+                               ret = 0;
+                       if (ret >= 0 && ret < len && pos + ret < size) {
+                               int zlen = min_t(size_t, len - ret,
+                                                size - pos - ret);
+                               ceph_zero_page_vector_range(start + ret, zlen,
+                                                           pages);
+                               ret += zlen;
+                       }
+                       if (ret >= 0)
+                               len = ret;
+               }
+
                ceph_put_page_vector(pages, num_pages, false);
 
                ceph_osdc_put_request(req);
-               if (ret)
+               if (ret < 0)
+                       break;
+
+               pos += len;
+               iov_iter_advance(iter, len);
+
+               if (!write && pos >= size)
                        break;
-               pos += n;
-               written += n;
-               iov_iter_advance(from, n);
 
-               if (pos > i_size_read(inode)) {
-                       check_caps = ceph_inode_set_size(inode, pos);
-                       if (check_caps)
+               if (write && pos > size) {
+                       if (ceph_inode_set_size(inode, pos))
                                ceph_check_caps(ceph_inode(inode),
                                                CHECK_CAPS_AUTHONLY,
                                                NULL);
                }
        }
 
-       if (ret != -EOLDSNAPC && written > 0) {
+       if (aio_req) {
+               if (aio_req->num_reqs == 0) {
+                       kfree(aio_req);
+                       return ret;
+               }
+
+               ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
+                                             CEPH_CAP_FILE_RD);
+
+               while (!list_empty(&aio_req->osd_reqs)) {
+                       req = list_first_entry(&aio_req->osd_reqs,
+                                              struct ceph_osd_request,
+                                              r_unsafe_item);
+                       list_del_init(&req->r_unsafe_item);
+                       if (ret >= 0)
+                               ret = ceph_osdc_start_request(req->r_osdc,
+                                                             req, false);
+                       if (ret < 0) {
+                               BUG_ON(ret == -EOLDSNAPC);
+                               req->r_result = ret;
+                               ceph_aio_complete_req(req, NULL);
+                       }
+               }
+               return -EIOCBQUEUED;
+       }
+
+       if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
+               ret = pos - iocb->ki_pos;
                iocb->ki_pos = pos;
-               ret = written;
        }
        return ret;
 }
 
-
 /*
  * Synchronous write, straight from __user pointer or user pages.
  *
@@ -897,8 +1133,14 @@ again:
                     ceph_cap_string(got));
 
                if (ci->i_inline_version == CEPH_INLINE_NONE) {
-                       /* hmm, this isn't really async... */
-                       ret = ceph_sync_read(iocb, to, &retry_op);
+                       if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
+                               ret = ceph_direct_read_write(iocb, to,
+                                                            NULL, NULL);
+                               if (ret >= 0 && ret < len)
+                                       retry_op = CHECK_EOF;
+                       } else {
+                               ret = ceph_sync_read(iocb, to, &retry_op);
+                       }
                } else {
                        retry_op = READ_INLINE;
                }
@@ -916,7 +1158,7 @@ again:
                pinned_page = NULL;
        }
        ceph_put_cap_refs(ci, got);
-       if (retry_op && ret >= 0) {
+       if (retry_op > HAVE_RETRIED && ret >= 0) {
                int statret;
                struct page *page = NULL;
                loff_t i_size;
@@ -968,12 +1210,11 @@ again:
                if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
                    ret < len) {
                        dout("sync_read hit hole, ppos %lld < size %lld"
-                            ", reading more\n", iocb->ki_pos,
-                            inode->i_size);
+                            ", reading more\n", iocb->ki_pos, i_size);
 
                        read += ret;
                        len -= ret;
-                       retry_op = 0;
+                       retry_op = HAVE_RETRIED;
                        goto again;
                }
        }
@@ -1052,7 +1293,7 @@ retry_snap:
        }
 
        dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
-            inode, ceph_vinop(inode), pos, count, inode->i_size);
+            inode, ceph_vinop(inode), pos, count, i_size_read(inode));
        if (fi->fmode & CEPH_FILE_MODE_LAZY)
                want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
        else
@@ -1088,8 +1329,8 @@ retry_snap:
                /* we might need to revert back to that point */
                data = *from;
                if (iocb->ki_flags & IOCB_DIRECT)
-                       written = ceph_sync_direct_write(iocb, &data, pos,
-                                                        snapc);
+                       written = ceph_direct_read_write(iocb, &data, snapc,
+                                                        &prealloc_cf);
                else
                        written = ceph_sync_write(iocb, &data, pos, snapc);
                if (written == -EOLDSNAPC) {
@@ -1104,7 +1345,7 @@ retry_snap:
                        iov_iter_advance(from, written);
                ceph_put_snap_context(snapc);
        } else {
-               loff_t old_size = inode->i_size;
+               loff_t old_size = i_size_read(inode);
                /*
                 * No need to acquire the i_truncate_mutex. Because
                 * the MDS revokes Fwb caps before sending truncate
@@ -1115,7 +1356,7 @@ retry_snap:
                written = generic_perform_write(file, from, pos);
                if (likely(written >= 0))
                        iocb->ki_pos = pos + written;
-               if (inode->i_size > old_size)
+               if (i_size_read(inode) > old_size)
                        ceph_fscache_update_objectsize(inode);
                inode_unlock(inode);
        }
@@ -1160,6 +1401,7 @@ out_unlocked:
 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
 {
        struct inode *inode = file->f_mapping->host;
+       loff_t i_size;
        int ret;
 
        inode_lock(inode);
@@ -1172,9 +1414,10 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
                }
        }
 
+       i_size = i_size_read(inode);
        switch (whence) {
        case SEEK_END:
-               offset += inode->i_size;
+               offset += i_size;
                break;
        case SEEK_CUR:
                /*
@@ -1190,17 +1433,17 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
                offset += file->f_pos;
                break;
        case SEEK_DATA:
-               if (offset >= inode->i_size) {
+               if (offset >= i_size) {
                        ret = -ENXIO;
                        goto out;
                }
                break;
        case SEEK_HOLE:
-               if (offset >= inode->i_size) {
+               if (offset >= i_size) {
                        ret = -ENXIO;
                        goto out;
                }
-               offset = inode->i_size;
+               offset = i_size;
                break;
        }
 
index da55eb8bcffab89755baf5229b92ededf49dd484..fb4ba2e4e2a5fa5c5d62afa3b94f758c7906687b 100644 (file)
@@ -548,7 +548,7 @@ int ceph_fill_file_size(struct inode *inode, int issued,
        if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
            (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
                dout("size %lld -> %llu\n", inode->i_size, size);
-               inode->i_size = size;
+               i_size_write(inode, size);
                inode->i_blocks = (size + (1<<9) - 1) >> 9;
                ci->i_reported_size = size;
                if (truncate_seq != ci->i_truncate_seq) {
@@ -808,7 +808,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
                        spin_unlock(&ci->i_ceph_lock);
 
                        err = -EINVAL;
-                       if (WARN_ON(symlen != inode->i_size))
+                       if (WARN_ON(symlen != i_size_read(inode)))
                                goto out;
 
                        err = -ENOMEM;
@@ -1549,7 +1549,7 @@ int ceph_inode_set_size(struct inode *inode, loff_t size)
 
        spin_lock(&ci->i_ceph_lock);
        dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
-       inode->i_size = size;
+       i_size_write(inode, size);
        inode->i_blocks = (size + (1 << 9) - 1) >> 9;
 
        /* tell the MDS if we are approaching max_size */
@@ -1911,7 +1911,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
                     inode->i_size, attr->ia_size);
                if ((issued & CEPH_CAP_FILE_EXCL) &&
                    attr->ia_size > inode->i_size) {
-                       inode->i_size = attr->ia_size;
+                       i_size_write(inode, attr->ia_size);
                        inode->i_blocks =
                                (attr->ia_size + (1 << 9) - 1) >> 9;
                        inode->i_ctime = attr->ia_ctime;
index 7febcf2475c5ab675c04dfd2fddaa3ed574522a0..50b268483302922ee63d205a2fa560ec27676eed 100644 (file)
@@ -50,7 +50,7 @@ void cifs_vfs_err(const char *fmt, ...)
        vaf.fmt = fmt;
        vaf.va = &args;
 
-       pr_err("CIFS VFS: %pV", &vaf);
+       pr_err_ratelimited("CIFS VFS: %pV", &vaf);
 
        va_end(args);
 }
index f40fbaca1b2a2c1d7457d71bd1a5e7dd4b477859..66cf0f9fff8984cb12eed1c89d91bc9c6ccc6a9d 100644 (file)
@@ -51,14 +51,13 @@ __printf(1, 2) void cifs_vfs_err(const char *fmt, ...);
 /* information message: e.g., configuration, major event */
 #define cifs_dbg(type, fmt, ...)                                       \
 do {                                                                   \
-       if (type == FYI) {                                              \
-               if (cifsFYI & CIFS_INFO) {                              \
-                       pr_debug("%s: " fmt, __FILE__, ##__VA_ARGS__);  \
-               }                                                       \
+       if (type == FYI && cifsFYI & CIFS_INFO) {                       \
+               pr_debug_ratelimited("%s: "                             \
+                           fmt, __FILE__, ##__VA_ARGS__);              \
        } else if (type == VFS) {                                       \
                cifs_vfs_err(fmt, ##__VA_ARGS__);                       \
        } else if (type == NOISY && type != 0) {                        \
-               pr_debug(fmt, ##__VA_ARGS__);                           \
+               pr_debug_ratelimited(fmt, ##__VA_ARGS__);               \
        }                                                               \
 } while (0)
 
index afa09fce81515e4caf7500b04c16dfb96a71cfd1..d41165433260673d66fef4cc074695665330124b 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/ctype.h>
 #include <linux/random.h>
 #include <linux/highmem.h>
+#include <crypto/skcipher.h>
 
 static int
 cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
@@ -789,38 +790,46 @@ int
 calc_seckey(struct cifs_ses *ses)
 {
        int rc;
-       struct crypto_blkcipher *tfm_arc4;
+       struct crypto_skcipher *tfm_arc4;
        struct scatterlist sgin, sgout;
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
        unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */
 
        get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE);
 
-       tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_arc4)) {
                rc = PTR_ERR(tfm_arc4);
                cifs_dbg(VFS, "could not allocate crypto API arc4\n");
                return rc;
        }
 
-       desc.tfm = tfm_arc4;
-
-       rc = crypto_blkcipher_setkey(tfm_arc4, ses->auth_key.response,
+       rc = crypto_skcipher_setkey(tfm_arc4, ses->auth_key.response,
                                        CIFS_SESS_KEY_SIZE);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not set response as a key\n",
                         __func__);
-               return rc;
+               goto out_free_cipher;
+       }
+
+       req = skcipher_request_alloc(tfm_arc4, GFP_KERNEL);
+       if (!req) {
+               rc = -ENOMEM;
+               cifs_dbg(VFS, "could not allocate crypto API arc4 request\n");
+               goto out_free_cipher;
        }
 
        sg_init_one(&sgin, sec_key, CIFS_SESS_KEY_SIZE);
        sg_init_one(&sgout, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
 
-       rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sgin, &sgout, CIFS_CPHTXT_SIZE, NULL);
+
+       rc = crypto_skcipher_encrypt(req);
+       skcipher_request_free(req);
        if (rc) {
                cifs_dbg(VFS, "could not encrypt session key rc: %d\n", rc);
-               crypto_free_blkcipher(tfm_arc4);
-               return rc;
+               goto out_free_cipher;
        }
 
        /* make secondary_key/nonce as session key */
@@ -828,7 +837,8 @@ calc_seckey(struct cifs_ses *ses)
        /* and make len as that of session key only */
        ses->auth_key.len = CIFS_SESS_KEY_SIZE;
 
-       crypto_free_blkcipher(tfm_arc4);
+out_free_cipher:
+       crypto_free_skcipher(tfm_arc4);
 
        return rc;
 }
index e24ca79b140c9e5d62990c0aef2ea01f20223efe..c48ca13673e37f2eba7c4045f140e6e8b740e499 100644 (file)
@@ -507,6 +507,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
 
        seq_printf(s, ",rsize=%u", cifs_sb->rsize);
        seq_printf(s, ",wsize=%u", cifs_sb->wsize);
+       seq_printf(s, ",echo_interval=%lu",
+                       tcon->ses->server->echo_interval / HZ);
        /* convert actimeo and display it in seconds */
        seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
 
@@ -752,6 +754,9 @@ cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
        ssize_t rc;
        struct inode *inode = file_inode(iocb->ki_filp);
 
+       if (iocb->ki_filp->f_flags & O_DIRECT)
+               return cifs_user_readv(iocb, iter);
+
        rc = cifs_revalidate_mapping(inode);
        if (rc)
                return rc;
@@ -766,6 +771,18 @@ static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
        ssize_t written;
        int rc;
 
+       if (iocb->ki_filp->f_flags & O_DIRECT) {
+               written = cifs_user_writev(iocb, from);
+               if (written > 0 && CIFS_CACHE_READ(cinode)) {
+                       cifs_zap_mapping(inode);
+                       cifs_dbg(FYI,
+                                "Set no oplock for inode=%p after a write operation\n",
+                                inode);
+                       cinode->oplock = 0;
+               }
+               return written;
+       }
+
        written = cifs_get_writer(cinode);
        if (written)
                return written;
index 2b510c537a0d588c532fe9200cdd4a9b03a4c9f4..a25b2513f1460608596f9c9fa7a9cb8e8383e5f2 100644 (file)
 #define SERVER_NAME_LENGTH 40
 #define SERVER_NAME_LEN_WITH_NULL     (SERVER_NAME_LENGTH + 1)
 
-/* SMB echo "timeout" -- FIXME: tunable? */
-#define SMB_ECHO_INTERVAL (60 * HZ)
+/* echo interval in seconds */
+#define SMB_ECHO_INTERVAL_MIN 1
+#define SMB_ECHO_INTERVAL_MAX 600
+#define SMB_ECHO_INTERVAL_DEFAULT 60
 
 #include "cifspdu.h"
 
@@ -225,7 +227,7 @@ struct smb_version_operations {
        void (*print_stats)(struct seq_file *m, struct cifs_tcon *);
        void (*dump_share_caps)(struct seq_file *, struct cifs_tcon *);
        /* verify the message */
-       int (*check_message)(char *, unsigned int);
+       int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
        bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
        void (*downgrade_oplock)(struct TCP_Server_Info *,
                                        struct cifsInodeInfo *, bool);
@@ -507,6 +509,7 @@ struct smb_vol {
        struct sockaddr_storage dstaddr; /* destination address */
        struct sockaddr_storage srcaddr; /* allow binding to a local IP */
        struct nls_table *local_nls;
+       unsigned int echo_interval; /* echo interval in secs */
 };
 
 #define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \
@@ -627,7 +630,9 @@ struct TCP_Server_Info {
 #ifdef CONFIG_CIFS_SMB2
        unsigned int    max_read;
        unsigned int    max_write;
+       __u8            preauth_hash[512];
 #endif /* CONFIG_CIFS_SMB2 */
+       unsigned long echo_interval;
 };
 
 static inline unsigned int
@@ -809,7 +814,10 @@ struct cifs_ses {
        bool need_reconnect:1; /* connection reset, uid now invalid */
 #ifdef CONFIG_CIFS_SMB2
        __u16 session_flags;
-       char smb3signingkey[SMB3_SIGN_KEY_SIZE]; /* for signing smb3 packets */
+       __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
+       __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
+       __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
+       __u8 preauth_hash[512];
 #endif /* CONFIG_CIFS_SMB2 */
 };
 
index c63fd1dde25b861b011f604522572c5619f177f1..eed7ff50faf016e6714867542953334b199e7627 100644 (file)
@@ -102,7 +102,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
                        struct smb_hdr *out_buf,
                        int *bytes_returned);
 extern int cifs_reconnect(struct TCP_Server_Info *server);
-extern int checkSMB(char *buf, unsigned int length);
+extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr);
 extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
 extern bool backup_cred(struct cifs_sb_info *);
 extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
@@ -439,7 +439,8 @@ extern int setup_ntlm_response(struct cifs_ses *, const struct nls_table *);
 extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
 extern void cifs_crypto_shash_release(struct TCP_Server_Info *);
 extern int calc_seckey(struct cifs_ses *);
-extern int generate_smb3signingkey(struct cifs_ses *);
+extern int generate_smb30signingkey(struct cifs_ses *);
+extern int generate_smb311signingkey(struct cifs_ses *);
 
 #ifdef CONFIG_CIFS_WEAK_PW_HASH
 extern int calc_lanman_hash(const char *password, const char *cryptkey,
index ecb0803bdb0e50479f50b0e874a4e87cb064e2d0..4fbd92d2e113e69d5e3105235a215a52b2e756e7 100644 (file)
@@ -95,6 +95,7 @@ enum {
        Opt_cruid, Opt_gid, Opt_file_mode,
        Opt_dirmode, Opt_port,
        Opt_rsize, Opt_wsize, Opt_actimeo,
+       Opt_echo_interval,
 
        /* Mount options which take string value */
        Opt_user, Opt_pass, Opt_ip,
@@ -188,6 +189,7 @@ static const match_table_t cifs_mount_option_tokens = {
        { Opt_rsize, "rsize=%s" },
        { Opt_wsize, "wsize=%s" },
        { Opt_actimeo, "actimeo=%s" },
+       { Opt_echo_interval, "echo_interval=%s" },
 
        { Opt_blank_user, "user=" },
        { Opt_blank_user, "username=" },
@@ -368,7 +370,6 @@ cifs_reconnect(struct TCP_Server_Info *server)
        server->session_key.response = NULL;
        server->session_key.len = 0;
        server->lstrp = jiffies;
-       mutex_unlock(&server->srv_mutex);
 
        /* mark submitted MIDs for retry and issue callback */
        INIT_LIST_HEAD(&retry_list);
@@ -381,6 +382,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                list_move(&mid_entry->qhead, &retry_list);
        }
        spin_unlock(&GlobalMid_Lock);
+       mutex_unlock(&server->srv_mutex);
 
        cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
        list_for_each_safe(tmp, tmp2, &retry_list) {
@@ -418,6 +420,7 @@ cifs_echo_request(struct work_struct *work)
        int rc;
        struct TCP_Server_Info *server = container_of(work,
                                        struct TCP_Server_Info, echo.work);
+       unsigned long echo_interval = server->echo_interval;
 
        /*
         * We cannot send an echo if it is disabled or until the
@@ -427,7 +430,7 @@ cifs_echo_request(struct work_struct *work)
         */
        if (!server->ops->need_neg || server->ops->need_neg(server) ||
            (server->ops->can_echo && !server->ops->can_echo(server)) ||
-           time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+           time_before(jiffies, server->lstrp + echo_interval - HZ))
                goto requeue_echo;
 
        rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
@@ -436,7 +439,7 @@ cifs_echo_request(struct work_struct *work)
                         server->hostname);
 
 requeue_echo:
-       queue_delayed_work(cifsiod_wq, &server->echo, SMB_ECHO_INTERVAL);
+       queue_delayed_work(cifsiod_wq, &server->echo, echo_interval);
 }
 
 static bool
@@ -487,9 +490,9 @@ server_unresponsive(struct TCP_Server_Info *server)
         *     a response in >60s.
         */
        if (server->tcpStatus == CifsGood &&
-           time_after(jiffies, server->lstrp + 2 * SMB_ECHO_INTERVAL)) {
-               cifs_dbg(VFS, "Server %s has not responded in %d seconds. Reconnecting...\n",
-                        server->hostname, (2 * SMB_ECHO_INTERVAL) / HZ);
+           time_after(jiffies, server->lstrp + 2 * server->echo_interval)) {
+               cifs_dbg(VFS, "Server %s has not responded in %lu seconds. Reconnecting...\n",
+                        server->hostname, (2 * server->echo_interval) / HZ);
                cifs_reconnect(server);
                wake_up(&server->response_q);
                return true;
@@ -828,7 +831,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
         * 48 bytes is enough to display the header and a little bit
         * into the payload for debugging purposes.
         */
-       length = server->ops->check_message(buf, server->total_read);
+       length = server->ops->check_message(buf, server->total_read, server);
        if (length != 0)
                cifs_dump_mem("Bad SMB: ", buf,
                        min_t(unsigned int, server->total_read, 48));
@@ -1624,6 +1627,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                                goto cifs_parse_mount_err;
                        }
                        break;
+               case Opt_echo_interval:
+                       if (get_option_ul(args, &option)) {
+                               cifs_dbg(VFS, "%s: Invalid echo interval value\n",
+                                        __func__);
+                               goto cifs_parse_mount_err;
+                       }
+                       vol->echo_interval = option;
+                       break;
 
                /* String Arguments */
 
@@ -2089,6 +2100,9 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
        if (!match_security(server, vol))
                return 0;
 
+       if (server->echo_interval != vol->echo_interval)
+               return 0;
+
        return 1;
 }
 
@@ -2208,6 +2222,12 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
        tcp_ses->tcpStatus = CifsNew;
        ++tcp_ses->srv_count;
 
+       if (volume_info->echo_interval >= SMB_ECHO_INTERVAL_MIN &&
+               volume_info->echo_interval <= SMB_ECHO_INTERVAL_MAX)
+               tcp_ses->echo_interval = volume_info->echo_interval * HZ;
+       else
+               tcp_ses->echo_interval = SMB_ECHO_INTERVAL_DEFAULT * HZ;
+
        rc = ip_connect(tcp_ses);
        if (rc < 0) {
                cifs_dbg(VFS, "Error connecting to socket. Aborting operation.\n");
@@ -2237,7 +2257,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
        cifs_fscache_get_client_cookie(tcp_ses);
 
        /* queue echo request delayed work */
-       queue_delayed_work(cifsiod_wq, &tcp_ses->echo, SMB_ECHO_INTERVAL);
+       queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
 
        return tcp_ses;
 
index a329f5ba35aad8649ce3f644b9b11913c57cc5f8..aeb26dbfa1bf2dbfb2d111316064d240ef807336 100644 (file)
@@ -814,8 +814,21 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                        }
                } else
                        fattr.cf_uniqueid = iunique(sb, ROOT_I);
-       } else
-               fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
+       } else {
+               if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) &&
+                   validinum == false && server->ops->get_srv_inum) {
+                       /*
+                        * Pass a NULL tcon to ensure we don't make a round
+                        * trip to the server. This only works for SMB2+.
+                        */
+                       tmprc = server->ops->get_srv_inum(xid,
+                               NULL, cifs_sb, full_path,
+                               &fattr.cf_uniqueid, data);
+                       if (tmprc)
+                               fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
+               } else
+                       fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
+       }
 
        /* query for SFU type info if supported and needed */
        if (fattr.cf_cifsattrs & ATTR_SYSTEM &&
@@ -856,6 +869,13 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
        } else {
                /* we already have inode, update it */
 
+               /* if uniqueid is different, return error */
+               if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+                   CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+                       rc = -ESTALE;
+                       goto cgii_exit;
+               }
+
                /* if filetype is different, return error */
                if (unlikely(((*inode)->i_mode & S_IFMT) !=
                    (fattr.cf_mode & S_IFMT))) {
index 8442b8b8e0be145e7f68055e0025f1909442d156..813fe13c2ae175869cdc6db06a19392d09d7f05f 100644 (file)
@@ -310,7 +310,7 @@ check_smb_hdr(struct smb_hdr *smb)
 }
 
 int
-checkSMB(char *buf, unsigned int total_read)
+checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
 {
        struct smb_hdr *smb = (struct smb_hdr *)buf;
        __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
index 0557c45e9c3308a368f7ff3e73f9423c81eedea4..b30a4a6d98a0f002c235bf3128012aff06ba2bc3 100644 (file)
@@ -847,6 +847,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
                 * if buggy server returns . and .. late do we want to
                 * check for that here?
                 */
+               *tmp_buf = 0;
                rc = cifs_filldir(current_entry, file, ctx,
                                  tmp_buf, max_len);
                if (rc) {
index 1c5907019045517ac302daf4c04e8caea21f1fda..389fb9f8c84e22308ac5fcb44c27f005bee6e27c 100644 (file)
@@ -38,7 +38,7 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
         * Make sure that this really is an SMB, that it is a response,
         * and that the message ids match.
         */
-       if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) &&
+       if ((hdr->ProtocolId == SMB2_PROTO_NUMBER) &&
            (mid == wire_mid)) {
                if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
                        return 0;
@@ -50,9 +50,9 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
                                cifs_dbg(VFS, "Received Request not response\n");
                }
        } else { /* bad signature or mid */
-               if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER)
+               if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
                        cifs_dbg(VFS, "Bad protocol string signature header %x\n",
-                                *(unsigned int *) hdr->ProtocolId);
+                                le32_to_cpu(hdr->ProtocolId));
                if (mid != wire_mid)
                        cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
                                 mid, wire_mid);
@@ -93,11 +93,11 @@ static const __le16 smb2_rsp_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
 };
 
 int
-smb2_check_message(char *buf, unsigned int length)
+smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr)
 {
        struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
        struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
-       __u64 mid = le64_to_cpu(hdr->MessageId);
+       __u64 mid;
        __u32 len = get_rfc1002_length(buf);
        __u32 clc_len;  /* calculated length */
        int command;
@@ -111,6 +111,30 @@ smb2_check_message(char *buf, unsigned int length)
         * ie Validate the wct via smb2_struct_sizes table above
         */
 
+       if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
+               struct smb2_transform_hdr *thdr =
+                       (struct smb2_transform_hdr *)buf;
+               struct cifs_ses *ses = NULL;
+               struct list_head *tmp;
+
+               /* decrypt frame now that it is completely read in */
+               spin_lock(&cifs_tcp_ses_lock);
+               list_for_each(tmp, &srvr->smb_ses_list) {
+                       ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+                       if (ses->Suid == thdr->SessionId)
+                               break;
+
+                       ses = NULL;
+               }
+               spin_unlock(&cifs_tcp_ses_lock);
+               if (ses == NULL) {
+                       cifs_dbg(VFS, "no decryption - session id not found\n");
+                       return 1;
+               }
+       }
+
+
+       mid = le64_to_cpu(hdr->MessageId);
        if (length < sizeof(struct smb2_pdu)) {
                if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) {
                        pdu->StructureSize2 = 0;
@@ -322,7 +346,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr)
 
        /* return pointer to beginning of data area, ie offset from SMB start */
        if ((*off != 0) && (*len != 0))
-               return (char *)(&hdr->ProtocolId[0]) + *off;
+               return (char *)(&hdr->ProtocolId) + *off;
        else
                return NULL;
 }
index 53ccdde6ff187c16084e10dd07372d6516d25139..3525ed756173d40009017ca11c9ae04cbd97f34a 100644 (file)
@@ -182,6 +182,11 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
        struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
        __u64 wire_mid = le64_to_cpu(hdr->MessageId);
 
+       if (hdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM) {
+               cifs_dbg(VFS, "encrypted frame parsing not supported yet");
+               return NULL;
+       }
+
        spin_lock(&GlobalMid_Lock);
        list_for_each_entry(mid, &server->pending_mid_q, qhead) {
                if ((mid->mid == wire_mid) &&
@@ -1692,7 +1697,7 @@ struct smb_version_operations smb30_operations = {
        .get_lease_key = smb2_get_lease_key,
        .set_lease_key = smb2_set_lease_key,
        .new_lease_key = smb2_new_lease_key,
-       .generate_signingkey = generate_smb3signingkey,
+       .generate_signingkey = generate_smb30signingkey,
        .calc_signature = smb3_calc_signature,
        .set_integrity  = smb3_set_integrity,
        .is_read_op = smb21_is_read_op,
@@ -1779,7 +1784,7 @@ struct smb_version_operations smb311_operations = {
        .get_lease_key = smb2_get_lease_key,
        .set_lease_key = smb2_set_lease_key,
        .new_lease_key = smb2_new_lease_key,
-       .generate_signingkey = generate_smb3signingkey,
+       .generate_signingkey = generate_smb311signingkey,
        .calc_signature = smb3_calc_signature,
        .set_integrity  = smb3_set_integrity,
        .is_read_op = smb21_is_read_op,
@@ -1838,7 +1843,7 @@ struct smb_version_values smb21_values = {
 struct smb_version_values smb30_values = {
        .version_string = SMB30_VERSION_STRING,
        .protocol_id = SMB30_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
@@ -1858,7 +1863,7 @@ struct smb_version_values smb30_values = {
 struct smb_version_values smb302_values = {
        .version_string = SMB302_VERSION_STRING,
        .protocol_id = SMB302_PROT_ID,
-       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES,
+       .req_capabilities = SMB2_GLOBAL_CAP_DFS | SMB2_GLOBAL_CAP_LEASING | SMB2_GLOBAL_CAP_LARGE_MTU | SMB2_GLOBAL_CAP_PERSISTENT_HANDLES | SMB2_GLOBAL_CAP_ENCRYPTION,
        .large_lock_type = 0,
        .exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE_LOCK,
        .shared_lock_type = SMB2_LOCKFLAG_SHARED_LOCK,
index 767555518d40ccd357b6f5540f2758bb8be6e654..10f8d5cf5681e26b843c0ac15821eab69e6eeb22 100644 (file)
@@ -97,10 +97,7 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
        hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
                        - 4 /*  RFC 1001 length field itself not counted */);
 
-       hdr->ProtocolId[0] = 0xFE;
-       hdr->ProtocolId[1] = 'S';
-       hdr->ProtocolId[2] = 'M';
-       hdr->ProtocolId[3] = 'B';
+       hdr->ProtocolId = SMB2_PROTO_NUMBER;
        hdr->StructureSize = cpu_to_le16(64);
        hdr->Command = smb2_cmd;
        hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
@@ -1573,7 +1570,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
                goto ioctl_exit;
        }
 
-       memcpy(*out_data, rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset),
+       memcpy(*out_data,
+              (char *)&rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset),
               *plen);
 ioctl_exit:
        free_rsp_buf(resp_buftype, rsp);
@@ -2093,7 +2091,7 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
        }
 
        if (*buf) {
-               memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
+               memcpy(*buf, (char *)&rsp->hdr.ProtocolId + rsp->DataOffset,
                       *nbytes);
                free_rsp_buf(resp_buftype, iov[0].iov_base);
        } else if (resp_buftype != CIFS_NO_BUFFER) {
index 4af52780ec350323cd41e993389e7214fb50f97d..ff88d9feb01e7475f75a230c004d5f40a80f14f9 100644 (file)
@@ -86,6 +86,7 @@
 #define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
 
 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
+#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
 
 /*
  * SMB2 Header Definition
@@ -102,7 +103,7 @@ struct smb2_hdr {
        __be32 smb2_buf_length; /* big endian on wire */
                                /* length is only two or three bytes - with
                                 one or two byte type preceding it that MBZ */
-       __u8   ProtocolId[4];   /* 0xFE 'S' 'M' 'B' */
+       __le32 ProtocolId;      /* 0xFE 'S' 'M' 'B' */
        __le16 StructureSize;   /* 64 */
        __le16 CreditCharge;    /* MBZ */
        __le32 Status;          /* Error from server */
@@ -128,11 +129,10 @@ struct smb2_transform_hdr {
                                 one or two byte type preceding it that MBZ */
        __u8   ProtocolId[4];   /* 0xFD 'S' 'M' 'B' */
        __u8   Signature[16];
-       __u8   Nonce[11];
-       __u8   Reserved[5];
+       __u8   Nonce[16];
        __le32 OriginalMessageSize;
        __u16  Reserved1;
-       __le16 EncryptionAlgorithm;
+       __le16 Flags; /* EncryptionAlgorithm */
        __u64  SessionId;
 } __packed;
 
index 79dc650c18b26baf2e9a4ca8ac2c67df5ebdc63e..4f07dc93608db3fc8723f53d86219087b7cc76c8 100644 (file)
@@ -34,7 +34,8 @@ struct smb_rqst;
  *****************************************************************
  */
 extern int map_smb2_to_linux_error(char *buf, bool log_err);
-extern int smb2_check_message(char *buf, unsigned int length);
+extern int smb2_check_message(char *buf, unsigned int length,
+                             struct TCP_Server_Info *server);
 extern unsigned int smb2_calc_size(void *buf);
 extern char *smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *hdr);
 extern __le16 *cifs_convert_path_to_utf16(const char *from,
index d4c5b6f109a7feaa6f2c99f21ca332ff41a2673f..8732a43b10084bf787a8410498e3ec51c69ea93c 100644 (file)
@@ -222,8 +222,8 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
        return rc;
 }
 
-int
-generate_smb3signingkey(struct cifs_ses *ses)
+static int generate_key(struct cifs_ses *ses, struct kvec label,
+                       struct kvec context, __u8 *key, unsigned int key_size)
 {
        unsigned char zero = 0x0;
        __u8 i[4] = {0, 0, 0, 1};
@@ -233,7 +233,7 @@ generate_smb3signingkey(struct cifs_ses *ses)
        unsigned char *hashptr = prfhash;
 
        memset(prfhash, 0x0, SMB2_HMACSHA256_SIZE);
-       memset(ses->smb3signingkey, 0x0, SMB3_SIGNKEY_SIZE);
+       memset(key, 0x0, key_size);
 
        rc = smb3_crypto_shash_allocate(ses->server);
        if (rc) {
@@ -262,7 +262,7 @@ generate_smb3signingkey(struct cifs_ses *ses)
        }
 
        rc = crypto_shash_update(&ses->server->secmech.sdeschmacsha256->shash,
-                               "SMB2AESCMAC", 12);
+                               label.iov_base, label.iov_len);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not update with label\n", __func__);
                goto smb3signkey_ret;
@@ -276,7 +276,7 @@ generate_smb3signingkey(struct cifs_ses *ses)
        }
 
        rc = crypto_shash_update(&ses->server->secmech.sdeschmacsha256->shash,
-                               "SmbSign", 8);
+                               context.iov_base, context.iov_len);
        if (rc) {
                cifs_dbg(VFS, "%s: Could not update with context\n", __func__);
                goto smb3signkey_ret;
@@ -296,12 +296,102 @@ generate_smb3signingkey(struct cifs_ses *ses)
                goto smb3signkey_ret;
        }
 
-       memcpy(ses->smb3signingkey, hashptr, SMB3_SIGNKEY_SIZE);
+       memcpy(key, hashptr, key_size);
 
 smb3signkey_ret:
        return rc;
 }
 
+struct derivation {
+       struct kvec label;
+       struct kvec context;
+};
+
+struct derivation_triplet {
+       struct derivation signing;
+       struct derivation encryption;
+       struct derivation decryption;
+};
+
+static int
+generate_smb3signingkey(struct cifs_ses *ses,
+                       const struct derivation_triplet *ptriplet)
+{
+       int rc;
+
+       rc = generate_key(ses, ptriplet->signing.label,
+                         ptriplet->signing.context, ses->smb3signingkey,
+                         SMB3_SIGN_KEY_SIZE);
+       if (rc)
+               return rc;
+
+       rc = generate_key(ses, ptriplet->encryption.label,
+                         ptriplet->encryption.context, ses->smb3encryptionkey,
+                         SMB3_SIGN_KEY_SIZE);
+       if (rc)
+               return rc;
+
+       return generate_key(ses, ptriplet->decryption.label,
+                           ptriplet->decryption.context,
+                           ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
+}
+
+int
+generate_smb30signingkey(struct cifs_ses *ses)
+
+{
+       struct derivation_triplet triplet;
+       struct derivation *d;
+
+       d = &triplet.signing;
+       d->label.iov_base = "SMB2AESCMAC";
+       d->label.iov_len = 12;
+       d->context.iov_base = "SmbSign";
+       d->context.iov_len = 8;
+
+       d = &triplet.encryption;
+       d->label.iov_base = "SMB2AESCCM";
+       d->label.iov_len = 11;
+       d->context.iov_base = "ServerIn ";
+       d->context.iov_len = 10;
+
+       d = &triplet.decryption;
+       d->label.iov_base = "SMB2AESCCM";
+       d->label.iov_len = 11;
+       d->context.iov_base = "ServerOut";
+       d->context.iov_len = 10;
+
+       return generate_smb3signingkey(ses, &triplet);
+}
+
+int
+generate_smb311signingkey(struct cifs_ses *ses)
+
+{
+       struct derivation_triplet triplet;
+       struct derivation *d;
+
+       d = &triplet.signing;
+       d->label.iov_base = "SMB2AESCMAC";
+       d->label.iov_len = 12;
+       d->context.iov_base = "SmbSign";
+       d->context.iov_len = 8;
+
+       d = &triplet.encryption;
+       d->label.iov_base = "SMB2AESCCM";
+       d->label.iov_len = 11;
+       d->context.iov_base = "ServerIn ";
+       d->context.iov_len = 10;
+
+       d = &triplet.decryption;
+       d->label.iov_base = "SMB2AESCCM";
+       d->label.iov_len = 11;
+       d->context.iov_base = "ServerOut";
+       d->context.iov_len = 10;
+
+       return generate_smb3signingkey(ses, &triplet);
+}
+
 int
 smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
 {
index a4232ec4f2ba45386b4f25db484f7f30135b01c2..699b7868108f658a3262943c7763b13d59767a89 100644 (file)
@@ -23,6 +23,7 @@
    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */
 
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/fs.h>
@@ -70,31 +71,42 @@ smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
 {
        int rc;
        unsigned char key2[8];
-       struct crypto_blkcipher *tfm_des;
+       struct crypto_skcipher *tfm_des;
        struct scatterlist sgin, sgout;
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
 
        str_to_key(key, key2);
 
-       tfm_des = crypto_alloc_blkcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC);
+       tfm_des = crypto_alloc_skcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_des)) {
                rc = PTR_ERR(tfm_des);
                cifs_dbg(VFS, "could not allocate des crypto API\n");
                goto smbhash_err;
        }
 
-       desc.tfm = tfm_des;
+       req = skcipher_request_alloc(tfm_des, GFP_KERNEL);
+       if (!req) {
+               rc = -ENOMEM;
+               cifs_dbg(VFS, "could not allocate des crypto API\n");
+               goto smbhash_free_skcipher;
+       }
 
-       crypto_blkcipher_setkey(tfm_des, key2, 8);
+       crypto_skcipher_setkey(tfm_des, key2, 8);
 
        sg_init_one(&sgin, in, 8);
        sg_init_one(&sgout, out, 8);
 
-       rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sgin, &sgout, 8, NULL);
+
+       rc = crypto_skcipher_encrypt(req);
        if (rc)
                cifs_dbg(VFS, "could not encrypt crypt key rc: %d\n", rc);
 
-       crypto_free_blkcipher(tfm_des);
+       skcipher_request_free(req);
+
+smbhash_free_skcipher:
+       crypto_free_skcipher(tfm_des);
 smbhash_err:
        return rc;
 }
index 2a24c524fb9a90cedd4187460ecb90c0e5dfcf0c..87abe8ed074c31962a969b178d55757e44bdc0d5 100644 (file)
@@ -576,14 +576,16 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
        cifs_in_send_dec(server);
        cifs_save_when_sent(mid);
 
-       if (rc < 0)
+       if (rc < 0) {
                server->sequence_number -= 2;
+               cifs_delete_mid(mid);
+       }
+
        mutex_unlock(&server->srv_mutex);
 
        if (rc == 0)
                return 0;
 
-       cifs_delete_mid(mid);
        add_credits_and_wake_if(server, credits, optype);
        return rc;
 }
index a5b8eb69a8f42526d4fb4d46d1cc26d33e78e6b2..6402eaf8ab958f27cc3371d0773ad45046dc0b8e 100644 (file)
@@ -1261,6 +1261,9 @@ COMPATIBLE_IOCTL(HCIUNBLOCKADDR)
 COMPATIBLE_IOCTL(HCIINQUIRY)
 COMPATIBLE_IOCTL(HCIUARTSETPROTO)
 COMPATIBLE_IOCTL(HCIUARTGETPROTO)
+COMPATIBLE_IOCTL(HCIUARTGETDEVICE)
+COMPATIBLE_IOCTL(HCIUARTSETFLAGS)
+COMPATIBLE_IOCTL(HCIUARTGETFLAGS)
 COMPATIBLE_IOCTL(RFCOMMCREATEDEV)
 COMPATIBLE_IOCTL(RFCOMMRELEASEDEV)
 COMPATIBLE_IOCTL(RFCOMMGETDEVLIST)
index 4fd6b0c5c6b50d29097e016bcd205ab72b758225..d8d3cc9323a1658637341d280547cda96c670c9e 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -58,6 +58,26 @@ static void dax_unmap_atomic(struct block_device *bdev,
        blk_queue_exit(bdev->bd_queue);
 }
 
+struct page *read_dax_sector(struct block_device *bdev, sector_t n)
+{
+       struct page *page = alloc_pages(GFP_KERNEL, 0);
+       struct blk_dax_ctl dax = {
+               .size = PAGE_SIZE,
+               .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
+       };
+       long rc;
+
+       if (!page)
+               return ERR_PTR(-ENOMEM);
+
+       rc = dax_map_atomic(bdev, &dax);
+       if (rc < 0)
+               return ERR_PTR(rc);
+       memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
+       dax_unmap_atomic(bdev, &dax);
+       return page;
+}
+
 /*
  * dax_clear_blocks() is called from within transaction context from XFS,
  * and hence this means the stack from this point must follow GFP_NOFS
@@ -267,8 +287,13 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
        if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
                inode_unlock(inode);
 
-       if ((retval > 0) && end_io)
-               end_io(iocb, pos, retval, bh.b_private);
+       if (end_io) {
+               int err;
+
+               err = end_io(iocb, pos, retval, bh.b_private);
+               if (err)
+                       retval = err;
+       }
 
        if (!(flags & DIO_SKIP_DIO_COUNT))
                inode_dio_end(inode);
@@ -338,7 +363,8 @@ static int dax_radix_entry(struct address_space *mapping, pgoff_t index,
        void *entry;
 
        WARN_ON_ONCE(pmd_entry && !dirty);
-       __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+       if (dirty)
+               __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
 
        spin_lock_irq(&mapping->tree_lock);
 
index 1f107fd513286f0f3e8be601cd715ea0ee5443e8..655f21f991606b5bf2bef600a54b814994dbed98 100644 (file)
@@ -575,6 +575,26 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx)
        mutex_unlock(&allocated_ptys_lock);
 }
 
+/*
+ * pty code needs to hold extra references in case of last /dev/tty close
+ */
+
+void devpts_add_ref(struct inode *ptmx_inode)
+{
+       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+       atomic_inc(&sb->s_active);
+       ihold(ptmx_inode);
+}
+
+void devpts_del_ref(struct inode *ptmx_inode)
+{
+       struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+       iput(ptmx_inode);
+       deactivate_super(sb);
+}
+
 /**
  * devpts_pty_new -- create a new inode in /dev/pts/
  * @ptmx_inode: inode of the master
index 1b2f7ffc8b841fd16cf312874fe8c7d4c0fa0e8e..9c6f885cc5186cc2b5a7f30187e750d7b6a979db 100644 (file)
@@ -253,8 +253,13 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
        if (ret == 0)
                ret = transferred;
 
-       if (dio->end_io && dio->result)
-               dio->end_io(dio->iocb, offset, transferred, dio->private);
+       if (dio->end_io) {
+               int err;
+
+               err = dio->end_io(dio->iocb, offset, ret, dio->private);
+               if (err)
+                       ret = err;
+       }
 
        if (!(dio->flags & DIO_SKIP_DIO_COUNT))
                inode_dio_end(dio->inode);
index 80d6901493cf5e0867cd2572885cc16f66ff59c5..11255cbcb2db34e8e7f1e52fad1cf79437c14b05 100644 (file)
@@ -23,6 +23,8 @@
  * 02111-1307, USA.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/fs.h>
 #include <linux/mount.h>
 #include <linux/pagemap.h>
@@ -30,7 +32,6 @@
 #include <linux/compiler.h>
 #include <linux/key.h>
 #include <linux/namei.h>
-#include <linux/crypto.h>
 #include <linux/file.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
@@ -74,6 +75,19 @@ void ecryptfs_from_hex(char *dst, char *src, int dst_size)
        }
 }
 
+static int ecryptfs_hash_digest(struct crypto_shash *tfm,
+                               char *src, int len, char *dst)
+{
+       SHASH_DESC_ON_STACK(desc, tfm);
+       int err;
+
+       desc->tfm = tfm;
+       desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       err = crypto_shash_digest(desc, src, len, dst);
+       shash_desc_zero(desc);
+       return err;
+}
+
 /**
  * ecryptfs_calculate_md5 - calculates the md5 of @src
  * @dst: Pointer to 16 bytes of allocated memory
@@ -88,45 +102,26 @@ static int ecryptfs_calculate_md5(char *dst,
                                  struct ecryptfs_crypt_stat *crypt_stat,
                                  char *src, int len)
 {
-       struct scatterlist sg;
-       struct hash_desc desc = {
-               .tfm = crypt_stat->hash_tfm,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct crypto_shash *tfm;
        int rc = 0;
 
        mutex_lock(&crypt_stat->cs_hash_tfm_mutex);
-       sg_init_one(&sg, (u8 *)src, len);
-       if (!desc.tfm) {
-               desc.tfm = crypto_alloc_hash(ECRYPTFS_DEFAULT_HASH, 0,
-                                            CRYPTO_ALG_ASYNC);
-               if (IS_ERR(desc.tfm)) {
-                       rc = PTR_ERR(desc.tfm);
+       tfm = crypt_stat->hash_tfm;
+       if (!tfm) {
+               tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0);
+               if (IS_ERR(tfm)) {
+                       rc = PTR_ERR(tfm);
                        ecryptfs_printk(KERN_ERR, "Error attempting to "
                                        "allocate crypto context; rc = [%d]\n",
                                        rc);
                        goto out;
                }
-               crypt_stat->hash_tfm = desc.tfm;
-       }
-       rc = crypto_hash_init(&desc);
-       if (rc) {
-               printk(KERN_ERR
-                      "%s: Error initializing crypto hash; rc = [%d]\n",
-                      __func__, rc);
-               goto out;
+               crypt_stat->hash_tfm = tfm;
        }
-       rc = crypto_hash_update(&desc, &sg, len);
+       rc = ecryptfs_hash_digest(tfm, src, len, dst);
        if (rc) {
                printk(KERN_ERR
-                      "%s: Error updating crypto hash; rc = [%d]\n",
-                      __func__, rc);
-               goto out;
-       }
-       rc = crypto_hash_final(&desc, dst);
-       if (rc) {
-               printk(KERN_ERR
-                      "%s: Error finalizing crypto hash; rc = [%d]\n",
+                      "%s: Error computing crypto hash; rc = [%d]\n",
                       __func__, rc);
                goto out;
        }
@@ -234,10 +229,8 @@ void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat)
 {
        struct ecryptfs_key_sig *key_sig, *key_sig_tmp;
 
-       if (crypt_stat->tfm)
-               crypto_free_ablkcipher(crypt_stat->tfm);
-       if (crypt_stat->hash_tfm)
-               crypto_free_hash(crypt_stat->hash_tfm);
+       crypto_free_skcipher(crypt_stat->tfm);
+       crypto_free_shash(crypt_stat->hash_tfm);
        list_for_each_entry_safe(key_sig, key_sig_tmp,
                                 &crypt_stat->keysig_list, crypt_stat_list) {
                list_del(&key_sig->crypt_stat_list);
@@ -342,7 +335,7 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                             struct scatterlist *src_sg, int size,
                             unsigned char *iv, int op)
 {
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        struct extent_crypt_result ecr;
        int rc = 0;
 
@@ -358,20 +351,20 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
        init_completion(&ecr.completion);
 
        mutex_lock(&crypt_stat->cs_tfm_mutex);
-       req = ablkcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
+       req = skcipher_request_alloc(crypt_stat->tfm, GFP_NOFS);
        if (!req) {
                mutex_unlock(&crypt_stat->cs_tfm_mutex);
                rc = -ENOMEM;
                goto out;
        }
 
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                        CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                        extent_crypt_complete, &ecr);
        /* Consider doing this once, when the file is opened */
        if (!(crypt_stat->flags & ECRYPTFS_KEY_SET)) {
-               rc = crypto_ablkcipher_setkey(crypt_stat->tfm, crypt_stat->key,
-                                             crypt_stat->key_size);
+               rc = crypto_skcipher_setkey(crypt_stat->tfm, crypt_stat->key,
+                                           crypt_stat->key_size);
                if (rc) {
                        ecryptfs_printk(KERN_ERR,
                                        "Error setting key; rc = [%d]\n",
@@ -383,9 +376,9 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                crypt_stat->flags |= ECRYPTFS_KEY_SET;
        }
        mutex_unlock(&crypt_stat->cs_tfm_mutex);
-       ablkcipher_request_set_crypt(req, src_sg, dst_sg, size, iv);
-       rc = op == ENCRYPT ? crypto_ablkcipher_encrypt(req) :
-                            crypto_ablkcipher_decrypt(req);
+       skcipher_request_set_crypt(req, src_sg, dst_sg, size, iv);
+       rc = op == ENCRYPT ? crypto_skcipher_encrypt(req) :
+                            crypto_skcipher_decrypt(req);
        if (rc == -EINPROGRESS || rc == -EBUSY) {
                struct extent_crypt_result *ecr = req->base.data;
 
@@ -394,7 +387,7 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
                reinit_completion(&ecr->completion);
        }
 out:
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        return rc;
 }
 
@@ -622,7 +615,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                                    crypt_stat->cipher, "cbc");
        if (rc)
                goto out_unlock;
-       crypt_stat->tfm = crypto_alloc_ablkcipher(full_alg_name, 0, 0);
+       crypt_stat->tfm = crypto_alloc_skcipher(full_alg_name, 0, 0);
        if (IS_ERR(crypt_stat->tfm)) {
                rc = PTR_ERR(crypt_stat->tfm);
                crypt_stat->tfm = NULL;
@@ -631,7 +624,7 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat)
                                full_alg_name);
                goto out_free;
        }
-       crypto_ablkcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       crypto_skcipher_set_flags(crypt_stat->tfm, CRYPTO_TFM_REQ_WEAK_KEY);
        rc = 0;
 out_free:
        kfree(full_alg_name);
@@ -1591,7 +1584,7 @@ out:
  * event, regardless of whether this function succeeds for fails.
  */
 static int
-ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
+ecryptfs_process_key_cipher(struct crypto_skcipher **key_tfm,
                            char *cipher_name, size_t *key_size)
 {
        char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
@@ -1609,21 +1602,18 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
                                                    "ecb");
        if (rc)
                goto out;
-       *key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
+       *key_tfm = crypto_alloc_skcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(*key_tfm)) {
                rc = PTR_ERR(*key_tfm);
                printk(KERN_ERR "Unable to allocate crypto cipher with name "
                       "[%s]; rc = [%d]\n", full_alg_name, rc);
                goto out;
        }
-       crypto_blkcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-       if (*key_size == 0) {
-               struct blkcipher_alg *alg = crypto_blkcipher_alg(*key_tfm);
-
-               *key_size = alg->max_keysize;
-       }
+       crypto_skcipher_set_flags(*key_tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       if (*key_size == 0)
+               *key_size = crypto_skcipher_default_keysize(*key_tfm);
        get_random_bytes(dummy_key, *key_size);
-       rc = crypto_blkcipher_setkey(*key_tfm, dummy_key, *key_size);
+       rc = crypto_skcipher_setkey(*key_tfm, dummy_key, *key_size);
        if (rc) {
                printk(KERN_ERR "Error attempting to set key of size [%zd] for "
                       "cipher [%s]; rc = [%d]\n", *key_size, full_alg_name,
@@ -1660,8 +1650,7 @@ int ecryptfs_destroy_crypto(void)
        list_for_each_entry_safe(key_tfm, key_tfm_tmp, &key_tfm_list,
                                 key_tfm_list) {
                list_del(&key_tfm->key_tfm_list);
-               if (key_tfm->key_tfm)
-                       crypto_free_blkcipher(key_tfm->key_tfm);
+               crypto_free_skcipher(key_tfm->key_tfm);
                kmem_cache_free(ecryptfs_key_tfm_cache, key_tfm);
        }
        mutex_unlock(&key_tfm_list_mutex);
@@ -1747,7 +1736,7 @@ int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm)
  * Searches for cached item first, and creates new if not found.
  * Returns 0 on success, non-zero if adding new cipher failed
  */
-int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
+int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_skcipher **tfm,
                                               struct mutex **tfm_mutex,
                                               char *cipher_name)
 {
@@ -2120,7 +2109,7 @@ out:
 int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
                           struct ecryptfs_mount_crypt_stat *mount_crypt_stat)
 {
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *tfm;
        struct mutex *tfm_mutex;
        size_t cipher_blocksize;
        int rc;
@@ -2130,7 +2119,7 @@ int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
                return 0;
        }
 
-       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex,
+       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&tfm, &tfm_mutex,
                        mount_crypt_stat->global_default_fn_cipher_name);
        if (unlikely(rc)) {
                (*namelen) = 0;
@@ -2138,7 +2127,7 @@ int ecryptfs_set_f_namelen(long *namelen, long lower_namelen,
        }
 
        mutex_lock(tfm_mutex);
-       cipher_blocksize = crypto_blkcipher_blocksize(desc.tfm);
+       cipher_blocksize = crypto_skcipher_blocksize(tfm);
        mutex_unlock(tfm_mutex);
 
        /* Return an exact amount for the common cases */
index 7b39260c7bbaa18fda583426a101a95124639dc5..b7f81287c6880ebdd2ba6a17918fa25c72f365c9 100644 (file)
@@ -28,6 +28,7 @@
 #ifndef ECRYPTFS_KERNEL_H
 #define ECRYPTFS_KERNEL_H
 
+#include <crypto/skcipher.h>
 #include <keys/user-type.h>
 #include <keys/encrypted-type.h>
 #include <linux/fs.h>
@@ -38,7 +39,6 @@
 #include <linux/nsproxy.h>
 #include <linux/backing-dev.h>
 #include <linux/ecryptfs.h>
-#include <linux/crypto.h>
 
 #define ECRYPTFS_DEFAULT_IV_BYTES 16
 #define ECRYPTFS_DEFAULT_EXTENT_SIZE 4096
@@ -233,9 +233,9 @@ struct ecryptfs_crypt_stat {
        size_t extent_shift;
        unsigned int extent_mask;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
-       struct crypto_ablkcipher *tfm;
-       struct crypto_hash *hash_tfm; /* Crypto context for generating
-                                      * the initialization vectors */
+       struct crypto_skcipher *tfm;
+       struct crypto_shash *hash_tfm; /* Crypto context for generating
+                                       * the initialization vectors */
        unsigned char cipher[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
        unsigned char key[ECRYPTFS_MAX_KEY_BYTES];
        unsigned char root_iv[ECRYPTFS_MAX_IV_BYTES];
@@ -309,7 +309,7 @@ struct ecryptfs_global_auth_tok {
  * keeps a list of crypto API contexts around to use when needed.
  */
 struct ecryptfs_key_tfm {
-       struct crypto_blkcipher *key_tfm;
+       struct crypto_skcipher *key_tfm;
        size_t key_size;
        struct mutex key_tfm_mutex;
        struct list_head key_tfm_list;
@@ -659,7 +659,7 @@ ecryptfs_add_new_key_tfm(struct ecryptfs_key_tfm **key_tfm, char *cipher_name,
 int ecryptfs_init_crypto(void);
 int ecryptfs_destroy_crypto(void);
 int ecryptfs_tfm_exists(char *cipher_name, struct ecryptfs_key_tfm **key_tfm);
-int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
+int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_skcipher **tfm,
                                               struct mutex **tfm_mutex,
                                               char *cipher_name);
 int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
index 4e685ac1024dc313e56ed8c8245fa5add9bb7c33..0a8f1b469a633dace58587b9044053f82b6fdc92 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/dcache.h>
 #include <linux/namei.h>
 #include <linux/mount.h>
-#include <linux/crypto.h>
 #include <linux/fs_stack.h>
 #include <linux/slab.h>
 #include <linux/xattr.h>
index 6bd67e2011f083e4e184e4d2d336cb15176d40ee..c5c84dfb5b3e3e0cf488a38846d4897b19fd53d0 100644 (file)
  * 02111-1307, USA.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/string.h>
 #include <linux/pagemap.h>
 #include <linux/key.h>
 #include <linux/random.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include "ecryptfs_kernel.h"
@@ -601,12 +602,13 @@ struct ecryptfs_write_tag_70_packet_silly_stack {
        struct ecryptfs_auth_tok *auth_tok;
        struct scatterlist src_sg[2];
        struct scatterlist dst_sg[2];
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *skcipher_tfm;
+       struct skcipher_request *skcipher_req;
        char iv[ECRYPTFS_MAX_IV_BYTES];
        char hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
        char tmp_hash[ECRYPTFS_TAG_70_DIGEST_SIZE];
-       struct hash_desc hash_desc;
-       struct scatterlist hash_sg;
+       struct crypto_shash *hash_tfm;
+       struct shash_desc *hash_desc;
 };
 
 /**
@@ -629,14 +631,13 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
        struct key *auth_tok_key = NULL;
        int rc = 0;
 
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       s = kzalloc(sizeof(*s), GFP_KERNEL);
        if (!s) {
                printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
                       "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
                rc = -ENOMEM;
                goto out;
        }
-       s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
        (*packet_size) = 0;
        rc = ecryptfs_find_auth_tok_for_sig(
                &auth_tok_key,
@@ -649,7 +650,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                goto out;
        }
        rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(
-               &s->desc.tfm,
+               &s->skcipher_tfm,
                &s->tfm_mutex, mount_crypt_stat->global_default_fn_cipher_name);
        if (unlikely(rc)) {
                printk(KERN_ERR "Internal error whilst attempting to get "
@@ -658,7 +659,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                goto out;
        }
        mutex_lock(s->tfm_mutex);
-       s->block_size = crypto_blkcipher_blocksize(s->desc.tfm);
+       s->block_size = crypto_skcipher_blocksize(s->skcipher_tfm);
        /* Plus one for the \0 separator between the random prefix
         * and the plaintext filename */
        s->num_rand_bytes = (ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES + 1);
@@ -691,6 +692,19 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                rc = -EINVAL;
                goto out_unlock;
        }
+
+       s->skcipher_req = skcipher_request_alloc(s->skcipher_tfm, GFP_KERNEL);
+       if (!s->skcipher_req) {
+               printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
+                      "skcipher_request_alloc for %s\n", __func__,
+                      crypto_skcipher_driver_name(s->skcipher_tfm));
+               rc = -ENOMEM;
+               goto out_unlock;
+       }
+
+       skcipher_request_set_callback(s->skcipher_req,
+                                     CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
        s->block_aligned_filename = kzalloc(s->block_aligned_filename_size,
                                            GFP_KERNEL);
        if (!s->block_aligned_filename) {
@@ -700,7 +714,6 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                rc = -ENOMEM;
                goto out_unlock;
        }
-       s->i = 0;
        dest[s->i++] = ECRYPTFS_TAG_70_PACKET_TYPE;
        rc = ecryptfs_write_packet_length(&dest[s->i],
                                          (ECRYPTFS_SIG_SIZE
@@ -738,40 +751,36 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                       "password tokens\n", __func__);
                goto out_free_unlock;
        }
-       sg_init_one(
-               &s->hash_sg,
-               (u8 *)s->auth_tok->token.password.session_key_encryption_key,
-               s->auth_tok->token.password.session_key_encryption_key_bytes);
-       s->hash_desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-       s->hash_desc.tfm = crypto_alloc_hash(ECRYPTFS_TAG_70_DIGEST, 0,
-                                            CRYPTO_ALG_ASYNC);
-       if (IS_ERR(s->hash_desc.tfm)) {
-                       rc = PTR_ERR(s->hash_desc.tfm);
+       s->hash_tfm = crypto_alloc_shash(ECRYPTFS_TAG_70_DIGEST, 0, 0);
+       if (IS_ERR(s->hash_tfm)) {
+                       rc = PTR_ERR(s->hash_tfm);
                        printk(KERN_ERR "%s: Error attempting to "
                               "allocate hash crypto context; rc = [%d]\n",
                               __func__, rc);
                        goto out_free_unlock;
        }
-       rc = crypto_hash_init(&s->hash_desc);
-       if (rc) {
-               printk(KERN_ERR
-                      "%s: Error initializing crypto hash; rc = [%d]\n",
-                      __func__, rc);
-               goto out_release_free_unlock;
-       }
-       rc = crypto_hash_update(
-               &s->hash_desc, &s->hash_sg,
-               s->auth_tok->token.password.session_key_encryption_key_bytes);
-       if (rc) {
-               printk(KERN_ERR
-                      "%s: Error updating crypto hash; rc = [%d]\n",
-                      __func__, rc);
+
+       s->hash_desc = kmalloc(sizeof(*s->hash_desc) +
+                              crypto_shash_descsize(s->hash_tfm), GFP_KERNEL);
+       if (!s->hash_desc) {
+               printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
+                      "kmalloc [%zd] bytes\n", __func__,
+                      sizeof(*s->hash_desc) +
+                      crypto_shash_descsize(s->hash_tfm));
+               rc = -ENOMEM;
                goto out_release_free_unlock;
        }
-       rc = crypto_hash_final(&s->hash_desc, s->hash);
+
+       s->hash_desc->tfm = s->hash_tfm;
+       s->hash_desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+       rc = crypto_shash_digest(s->hash_desc,
+                                (u8 *)s->auth_tok->token.password.session_key_encryption_key,
+                                s->auth_tok->token.password.session_key_encryption_key_bytes,
+                                s->hash);
        if (rc) {
                printk(KERN_ERR
-                      "%s: Error finalizing crypto hash; rc = [%d]\n",
+                      "%s: Error computing crypto hash; rc = [%d]\n",
                       __func__, rc);
                goto out_release_free_unlock;
        }
@@ -780,27 +789,12 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                        s->hash[(s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)];
                if ((s->j % ECRYPTFS_TAG_70_DIGEST_SIZE)
                    == (ECRYPTFS_TAG_70_DIGEST_SIZE - 1)) {
-                       sg_init_one(&s->hash_sg, (u8 *)s->hash,
-                                   ECRYPTFS_TAG_70_DIGEST_SIZE);
-                       rc = crypto_hash_init(&s->hash_desc);
-                       if (rc) {
-                               printk(KERN_ERR
-                                      "%s: Error initializing crypto hash; "
-                                      "rc = [%d]\n", __func__, rc);
-                               goto out_release_free_unlock;
-                       }
-                       rc = crypto_hash_update(&s->hash_desc, &s->hash_sg,
-                                               ECRYPTFS_TAG_70_DIGEST_SIZE);
+                       rc = crypto_shash_digest(s->hash_desc, (u8 *)s->hash,
+                                               ECRYPTFS_TAG_70_DIGEST_SIZE,
+                                               s->tmp_hash);
                        if (rc) {
                                printk(KERN_ERR
-                                      "%s: Error updating crypto hash; "
-                                      "rc = [%d]\n", __func__, rc);
-                               goto out_release_free_unlock;
-                       }
-                       rc = crypto_hash_final(&s->hash_desc, s->tmp_hash);
-                       if (rc) {
-                               printk(KERN_ERR
-                                      "%s: Error finalizing crypto hash; "
+                                      "%s: Error computing crypto hash; "
                                       "rc = [%d]\n", __func__, rc);
                                goto out_release_free_unlock;
                        }
@@ -834,10 +828,8 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
         * of the IV here, so we just use 0's for the IV. Note the
         * constraint that ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES
         * >= ECRYPTFS_MAX_IV_BYTES. */
-       memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES);
-       s->desc.info = s->iv;
-       rc = crypto_blkcipher_setkey(
-               s->desc.tfm,
+       rc = crypto_skcipher_setkey(
+               s->skcipher_tfm,
                s->auth_tok->token.password.session_key_encryption_key,
                mount_crypt_stat->global_default_fn_cipher_key_bytes);
        if (rc < 0) {
@@ -850,8 +842,9 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
                       mount_crypt_stat->global_default_fn_cipher_key_bytes);
                goto out_release_free_unlock;
        }
-       rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg,
-                                        s->block_aligned_filename_size);
+       skcipher_request_set_crypt(s->skcipher_req, s->src_sg, s->dst_sg,
+                                  s->block_aligned_filename_size, s->iv);
+       rc = crypto_skcipher_encrypt(s->skcipher_req);
        if (rc) {
                printk(KERN_ERR "%s: Error attempting to encrypt filename; "
                       "rc = [%d]\n", __func__, rc);
@@ -861,7 +854,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes,
        (*packet_size) = s->i;
        (*remaining_bytes) -= (*packet_size);
 out_release_free_unlock:
-       crypto_free_hash(s->hash_desc.tfm);
+       crypto_free_shash(s->hash_tfm);
 out_free_unlock:
        kzfree(s->block_aligned_filename);
 out_unlock:
@@ -871,6 +864,8 @@ out:
                up_write(&(auth_tok_key->sem));
                key_put(auth_tok_key);
        }
+       skcipher_request_free(s->skcipher_req);
+       kzfree(s->hash_desc);
        kfree(s);
        return rc;
 }
@@ -888,7 +883,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack {
        struct ecryptfs_auth_tok *auth_tok;
        struct scatterlist src_sg[2];
        struct scatterlist dst_sg[2];
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *skcipher_tfm;
+       struct skcipher_request *skcipher_req;
        char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1];
        char iv[ECRYPTFS_MAX_IV_BYTES];
        char cipher_string[ECRYPTFS_MAX_CIPHER_NAME_SIZE + 1];
@@ -922,14 +918,13 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
        (*packet_size) = 0;
        (*filename_size) = 0;
        (*filename) = NULL;
-       s = kmalloc(sizeof(*s), GFP_KERNEL);
+       s = kzalloc(sizeof(*s), GFP_KERNEL);
        if (!s) {
                printk(KERN_ERR "%s: Out of memory whilst trying to kmalloc "
                       "[%zd] bytes of kernel memory\n", __func__, sizeof(*s));
                rc = -ENOMEM;
                goto out;
        }
-       s->desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
        if (max_packet_size < ECRYPTFS_TAG_70_MIN_METADATA_SIZE) {
                printk(KERN_WARNING "%s: max_packet_size is [%zd]; it must be "
                       "at least [%d]\n", __func__, max_packet_size,
@@ -992,7 +987,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                       rc);
                goto out;
        }
-       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->desc.tfm,
+       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&s->skcipher_tfm,
                                                        &s->tfm_mutex,
                                                        s->cipher_string);
        if (unlikely(rc)) {
@@ -1030,12 +1025,23 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                       __func__, rc, s->block_aligned_filename_size);
                goto out_free_unlock;
        }
+
+       s->skcipher_req = skcipher_request_alloc(s->skcipher_tfm, GFP_KERNEL);
+       if (!s->skcipher_req) {
+               printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
+                      "skcipher_request_alloc for %s\n", __func__,
+                      crypto_skcipher_driver_name(s->skcipher_tfm));
+               rc = -ENOMEM;
+               goto out_free_unlock;
+       }
+
+       skcipher_request_set_callback(s->skcipher_req,
+                                     CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
        /* The characters in the first block effectively do the job of
         * the IV here, so we just use 0's for the IV. Note the
         * constraint that ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES
         * >= ECRYPTFS_MAX_IV_BYTES. */
-       memset(s->iv, 0, ECRYPTFS_MAX_IV_BYTES);
-       s->desc.info = s->iv;
        /* TODO: Support other key modules than passphrase for
         * filename encryption */
        if (s->auth_tok->token_type != ECRYPTFS_PASSWORD) {
@@ -1044,8 +1050,8 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                       "password tokens\n", __func__);
                goto out_free_unlock;
        }
-       rc = crypto_blkcipher_setkey(
-               s->desc.tfm,
+       rc = crypto_skcipher_setkey(
+               s->skcipher_tfm,
                s->auth_tok->token.password.session_key_encryption_key,
                mount_crypt_stat->global_default_fn_cipher_key_bytes);
        if (rc < 0) {
@@ -1058,14 +1064,14 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size,
                       mount_crypt_stat->global_default_fn_cipher_key_bytes);
                goto out_free_unlock;
        }
-       rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg,
-                                        s->block_aligned_filename_size);
+       skcipher_request_set_crypt(s->skcipher_req, s->src_sg, s->dst_sg,
+                                  s->block_aligned_filename_size, s->iv);
+       rc = crypto_skcipher_decrypt(s->skcipher_req);
        if (rc) {
                printk(KERN_ERR "%s: Error attempting to decrypt filename; "
                       "rc = [%d]\n", __func__, rc);
                goto out_free_unlock;
        }
-       s->i = 0;
        while (s->decrypted_filename[s->i] != '\0'
               && s->i < s->block_aligned_filename_size)
                s->i++;
@@ -1108,6 +1114,7 @@ out:
                up_write(&(auth_tok_key->sem));
                key_put(auth_tok_key);
        }
+       skcipher_request_free(s->skcipher_req);
        kfree(s);
        return rc;
 }
@@ -1667,9 +1674,8 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
        struct scatterlist dst_sg[2];
        struct scatterlist src_sg[2];
        struct mutex *tfm_mutex;
-       struct blkcipher_desc desc = {
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct crypto_skcipher *tfm;
+       struct skcipher_request *req = NULL;
        int rc = 0;
 
        if (unlikely(ecryptfs_verbosity > 0)) {
@@ -1680,7 +1686,7 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
                        auth_tok->token.password.session_key_encryption_key,
                        auth_tok->token.password.session_key_encryption_key_bytes);
        }
-       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex,
+       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&tfm, &tfm_mutex,
                                                        crypt_stat->cipher);
        if (unlikely(rc)) {
                printk(KERN_ERR "Internal error whilst attempting to get "
@@ -1711,8 +1717,20 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
                goto out;
        }
        mutex_lock(tfm_mutex);
-       rc = crypto_blkcipher_setkey(
-               desc.tfm, auth_tok->token.password.session_key_encryption_key,
+       req = skcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               mutex_unlock(tfm_mutex);
+               printk(KERN_ERR "%s: Out of kernel memory whilst attempting to "
+                      "skcipher_request_alloc for %s\n", __func__,
+                      crypto_skcipher_driver_name(tfm));
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     NULL, NULL);
+       rc = crypto_skcipher_setkey(
+               tfm, auth_tok->token.password.session_key_encryption_key,
                crypt_stat->key_size);
        if (unlikely(rc < 0)) {
                mutex_unlock(tfm_mutex);
@@ -1720,8 +1738,10 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
                rc = -EINVAL;
                goto out;
        }
-       rc = crypto_blkcipher_decrypt(&desc, dst_sg, src_sg,
-                                     auth_tok->session_key.encrypted_key_size);
+       skcipher_request_set_crypt(req, src_sg, dst_sg,
+                                  auth_tok->session_key.encrypted_key_size,
+                                  NULL);
+       rc = crypto_skcipher_decrypt(req);
        mutex_unlock(tfm_mutex);
        if (unlikely(rc)) {
                printk(KERN_ERR "Error decrypting; rc = [%d]\n", rc);
@@ -1738,6 +1758,7 @@ decrypt_passphrase_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
                                  crypt_stat->key_size);
        }
 out:
+       skcipher_request_free(req);
        return rc;
 }
 
@@ -2191,16 +2212,14 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
        size_t max_packet_size;
        struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
                crypt_stat->mount_crypt_stat;
-       struct blkcipher_desc desc = {
-               .tfm = NULL,
-               .flags = CRYPTO_TFM_REQ_MAY_SLEEP
-       };
+       struct crypto_skcipher *tfm;
+       struct skcipher_request *req;
        int rc = 0;
 
        (*packet_size) = 0;
        ecryptfs_from_hex(key_rec->sig, auth_tok->token.password.signature,
                          ECRYPTFS_SIG_SIZE);
-       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&desc.tfm, &tfm_mutex,
+       rc = ecryptfs_get_tfm_and_mutex_for_cipher_name(&tfm, &tfm_mutex,
                                                        crypt_stat->cipher);
        if (unlikely(rc)) {
                printk(KERN_ERR "Internal error whilst attempting to get "
@@ -2209,12 +2228,11 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
                goto out;
        }
        if (mount_crypt_stat->global_default_cipher_key_size == 0) {
-               struct blkcipher_alg *alg = crypto_blkcipher_alg(desc.tfm);
-
                printk(KERN_WARNING "No key size specified at mount; "
-                      "defaulting to [%d]\n", alg->max_keysize);
+                      "defaulting to [%d]\n",
+                      crypto_skcipher_default_keysize(tfm));
                mount_crypt_stat->global_default_cipher_key_size =
-                       alg->max_keysize;
+                       crypto_skcipher_default_keysize(tfm);
        }
        if (crypt_stat->key_size == 0)
                crypt_stat->key_size =
@@ -2284,20 +2302,36 @@ write_tag_3_packet(char *dest, size_t *remaining_bytes,
                goto out;
        }
        mutex_lock(tfm_mutex);
-       rc = crypto_blkcipher_setkey(desc.tfm, session_key_encryption_key,
-                                    crypt_stat->key_size);
+       rc = crypto_skcipher_setkey(tfm, session_key_encryption_key,
+                                   crypt_stat->key_size);
        if (rc < 0) {
                mutex_unlock(tfm_mutex);
                ecryptfs_printk(KERN_ERR, "Error setting key for crypto "
                                "context; rc = [%d]\n", rc);
                goto out;
        }
+
+       req = skcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               mutex_unlock(tfm_mutex);
+               ecryptfs_printk(KERN_ERR, "Out of kernel memory whilst "
+                               "attempting to skcipher_request_alloc for "
+                               "%s\n", crypto_skcipher_driver_name(tfm));
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+                                     NULL, NULL);
+
        rc = 0;
        ecryptfs_printk(KERN_DEBUG, "Encrypting [%zd] bytes of the key\n",
                        crypt_stat->key_size);
-       rc = crypto_blkcipher_encrypt(&desc, dst_sg, src_sg,
-                                     (*key_rec).enc_key_size);
+       skcipher_request_set_crypt(req, src_sg, dst_sg,
+                                  (*key_rec).enc_key_size, NULL);
+       rc = crypto_skcipher_encrypt(req);
        mutex_unlock(tfm_mutex);
+       skcipher_request_free(req);
        if (rc) {
                printk(KERN_ERR "Error encrypting; rc = [%d]\n", rc);
                goto out;
index e25b6b06bacf2cf8719d76d06e6efb8a60bd78b8..5fe2cdb4898806e6e00ec95e88a5d19821a293ae 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/module.h>
 #include <linux/namei.h>
 #include <linux/skbuff.h>
-#include <linux/crypto.h>
 #include <linux/mount.h>
 #include <linux/pagemap.h>
 #include <linux/key.h>
@@ -739,8 +738,7 @@ static void ecryptfs_free_kmem_caches(void)
                struct ecryptfs_cache_info *info;
 
                info = &ecryptfs_cache_infos[i];
-               if (*(info->cache))
-                       kmem_cache_destroy(*(info->cache));
+               kmem_cache_destroy(*(info->cache));
        }
 }
 
index c6ced4cbf0cff7d3b3f754ddbf0727d44bca8809..1f5865263b3eff32fed493480ce9ff7b72759af2 100644 (file)
@@ -30,7 +30,6 @@
 #include <linux/page-flags.h>
 #include <linux/mount.h>
 #include <linux/file.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <asm/unaligned.h>
index afa1b81c3418bbfa1e18a07e9358c46807e9af30..77a486d3a51b600265a0fc1a1a0134ae74ff5ab9 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/file.h>
-#include <linux/crypto.h>
 #include <linux/statfs.h>
 #include <linux/magic.h>
 #include "ecryptfs_kernel.h"
index ae1dbcf47e979d48b67ee83e1ec413e4d119a48b..cde60741cad2c4cc429f04e80f29e788b0e0d673 100644 (file)
 /* Epoll private bits inside the event mask */
 #define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)
 
+#define EPOLLINOUT_BITS (POLLIN | POLLOUT)
+
+#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | POLLERR | POLLHUP | \
+                               EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)
+
 /* Maximum number of nesting allowed inside epoll sets */
 #define EP_MAX_NESTS 4
 
@@ -1068,7 +1073,22 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
         * wait list.
         */
        if (waitqueue_active(&ep->wq)) {
-               ewake = 1;
+               if ((epi->event.events & EPOLLEXCLUSIVE) &&
+                                       !((unsigned long)key & POLLFREE)) {
+                       switch ((unsigned long)key & EPOLLINOUT_BITS) {
+                       case POLLIN:
+                               if (epi->event.events & POLLIN)
+                                       ewake = 1;
+                               break;
+                       case POLLOUT:
+                               if (epi->event.events & POLLOUT)
+                                       ewake = 1;
+                               break;
+                       case 0:
+                               ewake = 1;
+                               break;
+                       }
+               }
                wake_up_locked(&ep->wq);
        }
        if (waitqueue_active(&ep->poll_wait))
@@ -1875,9 +1895,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
         * so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.
         * Also, we do not currently supported nested exclusive wakeups.
         */
-       if ((epds.events & EPOLLEXCLUSIVE) && (op == EPOLL_CTL_MOD ||
-               (op == EPOLL_CTL_ADD && is_file_epoll(tf.file))))
-               goto error_tgt_fput;
+       if (epds.events & EPOLLEXCLUSIVE) {
+               if (op == EPOLL_CTL_MOD)
+                       goto error_tgt_fput;
+               if (op == EPOLL_CTL_ADD && (is_file_epoll(tf.file) ||
+                               (epds.events & ~EPOLLEXCLUSIVE_OK_BITS)))
+                       goto error_tgt_fput;
+       }
 
        /*
         * At this point it is safe to assume that the "private_data" contains
@@ -1950,8 +1974,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
                break;
        case EPOLL_CTL_MOD:
                if (epi) {
-                       epds.events |= POLLERR | POLLHUP;
-                       error = ep_modify(ep, epi, &epds);
+                       if (!(epi->event.events & EPOLLEXCLUSIVE)) {
+                               epds.events |= POLLERR | POLLHUP;
+                               error = ep_modify(ep, epi, &epds);
+                       }
                } else
                        error = -ENOENT;
                break;
index c8021208a7eb1a98ba32e16a8536146f570d5c4a..04efc304c9c147b08089dfa66948a4d2f809584b 100644 (file)
  * Special Publication 800-38E and IEEE P1619/D16.
  */
 
-#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/skcipher.h>
 #include <keys/user-type.h>
 #include <keys/encrypted-type.h>
-#include <linux/crypto.h>
 #include <linux/ecryptfs.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
@@ -261,21 +259,21 @@ static int ext4_page_crypto(struct inode *inode,
 
 {
        u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_EXT4_COMPLETION_RESULT(ecr);
        struct scatterlist dst, src;
        struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
 
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(KERN_ERR
                                   "%s: crypto_request_alloc() failed\n",
                                   __func__);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(
+       skcipher_request_set_callback(
                req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                ext4_crypt_complete, &ecr);
 
@@ -288,21 +286,21 @@ static int ext4_page_crypto(struct inode *inode,
        sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
        sg_init_table(&src, 1);
        sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
-       ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
-                                    xts_tweak);
+       skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+                                  xts_tweak);
        if (rw == EXT4_DECRYPT)
-               res = crypto_ablkcipher_decrypt(req);
+               res = crypto_skcipher_decrypt(req);
        else
-               res = crypto_ablkcipher_encrypt(req);
+               res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res) {
                printk_ratelimited(
                        KERN_ERR
-                       "%s: crypto_ablkcipher_encrypt() returned %d\n",
+                       "%s: crypto_skcipher_encrypt() returned %d\n",
                        __func__, res);
                return res;
        }
@@ -467,3 +465,47 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
                return size;
        return 0;
 }
+
+/*
+ * Validate dentries for encrypted directories to make sure we aren't
+ * potentially caching stale data after a key has been added or
+ * removed.
+ */
+static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       struct inode *dir = d_inode(dentry->d_parent);
+       struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
+       int inode_has_key, encrypted_filename;
+
+       if (!ext4_encrypted_inode(dir))
+               return 0;
+
+       if (ci && ci->ci_keyring_key &&
+           (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+                                         (1 << KEY_FLAG_REVOKED) |
+                                         (1 << KEY_FLAG_DEAD))))
+               ci = NULL;
+
+       /* this should eventually be an flag in d_flags */
+       encrypted_filename = dentry->d_fsdata != NULL;
+       inode_has_key = (ci != NULL);
+
+       /*
+        * If this is an encrypted file name, and it is a negative
+        * dentry, it might be a valid name.  We can't check if the
+        * key has since been made available due to locking reasons,
+        * so we fail the validation so ext4_lookup() can do this
+        * check.
+        *
+        * We also fail the validation if the dentry is for a
+        * decrypted filename, but we no longer have the key.
+        */
+       if ((encrypted_filename && d_is_negative(dentry)) ||
+           (!encrypted_filename && !inode_has_key))
+               return 0;
+       return 1;
+}
+
+const struct dentry_operations ext4_encrypted_d_ops = {
+       .d_revalidate = ext4_d_revalidate,
+};
index 2fbef8a14760f4095300c9fdc0edcdc2909f4a65..1a2f360405dbdd1b4bdd84de735589f34010068a 100644 (file)
  *
  */
 
-#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/skcipher.h>
 #include <keys/encrypted-type.h>
 #include <keys/user-type.h>
-#include <linux/crypto.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
 #include <linux/key.h>
@@ -65,10 +63,10 @@ static int ext4_fname_encrypt(struct inode *inode,
                              struct ext4_str *oname)
 {
        u32 ciphertext_len;
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_EXT4_COMPLETION_RESULT(ecr);
        struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
        char iv[EXT4_CRYPTO_BLOCK_SIZE];
        struct scatterlist src_sg, dst_sg;
@@ -95,14 +93,14 @@ static int ext4_fname_encrypt(struct inode *inode,
        }
 
        /* Allocate request */
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(
                    KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
                kfree(alloc_buf);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                ext4_dir_crypt_complete, &ecr);
 
@@ -117,14 +115,14 @@ static int ext4_fname_encrypt(struct inode *inode,
        /* Create encryption request */
        sg_init_one(&src_sg, workbuf, ciphertext_len);
        sg_init_one(&dst_sg, oname->name, ciphertext_len);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
-       res = crypto_ablkcipher_encrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
+       res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
        kfree(alloc_buf);
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res < 0) {
                printk_ratelimited(
                    KERN_ERR "%s: Error (error code %d)\n", __func__, res);
@@ -145,11 +143,11 @@ static int ext4_fname_decrypt(struct inode *inode,
                              struct ext4_str *oname)
 {
        struct ext4_str tmp_in[2], tmp_out[1];
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_EXT4_COMPLETION_RESULT(ecr);
        struct scatterlist src_sg, dst_sg;
        struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
        char iv[EXT4_CRYPTO_BLOCK_SIZE];
        unsigned lim = max_name_len(inode);
@@ -162,13 +160,13 @@ static int ext4_fname_decrypt(struct inode *inode,
        tmp_out[0].name = oname->name;
 
        /* Allocate request */
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(
                    KERN_ERR "%s: crypto_request_alloc() failed\n",  __func__);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                ext4_dir_crypt_complete, &ecr);
 
@@ -178,13 +176,13 @@ static int ext4_fname_decrypt(struct inode *inode,
        /* Create encryption request */
        sg_init_one(&src_sg, iname->name, iname->len);
        sg_init_one(&dst_sg, oname->name, oname->len);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
-       res = crypto_ablkcipher_decrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+       res = crypto_skcipher_decrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res < 0) {
                printk_ratelimited(
                    KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
index 9a16d1e75a493f9e4663bb4ea05b9da9980ba65d..0129d688d1f7187701df12b64d23f522d0c3f31b 100644 (file)
@@ -8,6 +8,7 @@
  * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
  */
 
+#include <crypto/skcipher.h>
 #include <keys/encrypted-type.h>
 #include <keys/user-type.h>
 #include <linux/random.h>
@@ -41,45 +42,42 @@ static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE],
                               char derived_key[EXT4_AES_256_XTS_KEY_SIZE])
 {
        int res = 0;
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_EXT4_COMPLETION_RESULT(ecr);
        struct scatterlist src_sg, dst_sg;
-       struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
-                                                               0);
+       struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
 
        if (IS_ERR(tfm)) {
                res = PTR_ERR(tfm);
                tfm = NULL;
                goto out;
        }
-       crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                res = -ENOMEM;
                goto out;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                        CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                        derive_crypt_complete, &ecr);
-       res = crypto_ablkcipher_setkey(tfm, deriving_key,
-                                      EXT4_AES_128_ECB_KEY_SIZE);
+       res = crypto_skcipher_setkey(tfm, deriving_key,
+                                    EXT4_AES_128_ECB_KEY_SIZE);
        if (res < 0)
                goto out;
        sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE);
        sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
-                                    EXT4_AES_256_XTS_KEY_SIZE, NULL);
-       res = crypto_ablkcipher_encrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg,
+                                  EXT4_AES_256_XTS_KEY_SIZE, NULL);
+       res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
 
 out:
-       if (req)
-               ablkcipher_request_free(req);
-       if (tfm)
-               crypto_free_ablkcipher(tfm);
+       skcipher_request_free(req);
+       crypto_free_skcipher(tfm);
        return res;
 }
 
@@ -90,7 +88,7 @@ void ext4_free_crypt_info(struct ext4_crypt_info *ci)
 
        if (ci->ci_keyring_key)
                key_put(ci->ci_keyring_key);
-       crypto_free_ablkcipher(ci->ci_ctfm);
+       crypto_free_skcipher(ci->ci_ctfm);
        kmem_cache_free(ext4_crypt_info_cachep, ci);
 }
 
@@ -122,7 +120,7 @@ int _ext4_get_encryption_info(struct inode *inode)
        struct ext4_encryption_context ctx;
        const struct user_key_payload *ukp;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       struct crypto_ablkcipher *ctfm;
+       struct crypto_skcipher *ctfm;
        const char *cipher_str;
        char raw_key[EXT4_MAX_KEY_SIZE];
        char mode;
@@ -237,7 +235,7 @@ retry:
        if (res)
                goto out;
 got_key:
-       ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
+       ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
        if (!ctfm || IS_ERR(ctfm)) {
                res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
                printk(KERN_DEBUG
@@ -246,11 +244,11 @@ got_key:
                goto out;
        }
        crypt_info->ci_ctfm = ctfm;
-       crypto_ablkcipher_clear_flags(ctfm, ~0);
-       crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm),
+       crypto_skcipher_clear_flags(ctfm, ~0);
+       crypto_tfm_set_flags(crypto_skcipher_tfm(ctfm),
                             CRYPTO_TFM_REQ_WEAK_KEY);
-       res = crypto_ablkcipher_setkey(ctfm, raw_key,
-                                      ext4_encryption_key_size(mode));
+       res = crypto_skcipher_setkey(ctfm, raw_key,
+                                    ext4_encryption_key_size(mode));
        if (res)
                goto out;
        memzero_explicit(raw_key, sizeof(raw_key));
index 1d1bca74f84437172d96c26e648e6ed45e129725..6d17f31a31d7479cae271f6d043ec9cb475e348e 100644 (file)
@@ -111,6 +111,12 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
        int dir_has_error = 0;
        struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
 
+       if (ext4_encrypted_inode(inode)) {
+               err = ext4_get_encryption_info(inode);
+               if (err && err != -ENOKEY)
+                       return err;
+       }
+
        if (is_dx_dir(inode)) {
                err = ext4_dx_readdir(file, ctx);
                if (err != ERR_BAD_DX_DIR) {
index 0662b285dc8a71982a54e5895d58d797c7bcf6a4..157b458a69d4b7c334f28b80c37fd5d11c9f0c25 100644 (file)
@@ -2302,6 +2302,7 @@ struct page *ext4_encrypt(struct inode *inode,
 int ext4_decrypt(struct page *page);
 int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
                           ext4_fsblk_t pblk, ext4_lblk_t len);
+extern const struct dentry_operations ext4_encrypted_d_ops;
 
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
 int ext4_init_crypto(void);
index ac7d4e81379630d2f720ea8d389b8322a784f4cf..1f73c29717e19f656db7b9745c8e61c67baa0ff1 100644 (file)
@@ -77,7 +77,7 @@ struct ext4_crypt_info {
        char            ci_data_mode;
        char            ci_filename_mode;
        char            ci_flags;
-       struct crypto_ablkcipher *ci_ctfm;
+       struct crypto_skcipher *ci_ctfm;
        struct key      *ci_keyring_key;
        char            ci_master_key[EXT4_KEY_DESCRIPTOR_SIZE];
 };
index 1126436dada19519b97240bcdb42995acae46724..474f1a4d2ca8f45be1c0c97ce8c13a90cf75d34d 100644 (file)
@@ -350,6 +350,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
        struct super_block *sb = inode->i_sb;
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
        struct vfsmount *mnt = filp->f_path.mnt;
+       struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
        struct path path;
        char buf[64], *cp;
        int ret;
@@ -393,6 +394,14 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
                if (ext4_encryption_info(inode) == NULL)
                        return -ENOKEY;
        }
+       if (ext4_encrypted_inode(dir) &&
+           !ext4_is_child_context_consistent_with_parent(dir, inode)) {
+               ext4_warning(inode->i_sb,
+                            "Inconsistent encryption contexts: %lu/%lu\n",
+                            (unsigned long) dir->i_ino,
+                            (unsigned long) inode->i_ino);
+               return -EPERM;
+       }
        /*
         * Set up the jbd2_inode if we are opening the inode for
         * writing and the journal is present
index 83bc8bfb3bea8eeefed38ca46ae8260779222405..9db04dd9b88a6e45782485ccf28844804294b103 100644 (file)
@@ -3161,14 +3161,17 @@ out:
 }
 #endif
 
-static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
+static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
                            ssize_t size, void *private)
 {
         ext4_io_end_t *io_end = iocb->private;
 
+       if (size <= 0)
+               return 0;
+
        /* if not async direct IO just return */
        if (!io_end)
-               return;
+               return 0;
 
        ext_debug("ext4_end_io_dio(): io_end 0x%p "
                  "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3179,6 +3182,8 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
        io_end->offset = offset;
        io_end->size = size;
        ext4_put_io_end(io_end);
+
+       return 0;
 }
 
 /*
index 06574dd77614a3b1c4396d5bb3c017c681209d00..48e4b8907826eca52a1e94e14bdfdb0fd2240c56 100644 (file)
@@ -1558,6 +1558,24 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
        struct ext4_dir_entry_2 *de;
        struct buffer_head *bh;
 
+       if (ext4_encrypted_inode(dir)) {
+               int res = ext4_get_encryption_info(dir);
+
+               /*
+                * This should be a properly defined flag for
+                * dentry->d_flags when we uplift this to the VFS.
+                * d_fsdata is set to (void *) 1 if if the dentry is
+                * created while the directory was encrypted and we
+                * don't have access to the key.
+                */
+              dentry->d_fsdata = NULL;
+              if (ext4_encryption_info(dir))
+                      dentry->d_fsdata = (void *) 1;
+              d_set_d_op(dentry, &ext4_encrypted_d_ops);
+              if (res && res != -ENOKEY)
+                      return ERR_PTR(res);
+       }
+
        if (dentry->d_name.len > EXT4_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
 
@@ -1585,11 +1603,15 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
                        return ERR_PTR(-EFSCORRUPTED);
                }
                if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
-                   (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-                    S_ISLNK(inode->i_mode)) &&
+                   (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
                    !ext4_is_child_context_consistent_with_parent(dir,
                                                                  inode)) {
+                       int nokey = ext4_encrypted_inode(inode) &&
+                               !ext4_encryption_info(inode);
+
                        iput(inode);
+                       if (nokey)
+                               return ERR_PTR(-ENOKEY);
                        ext4_warning(inode->i_sb,
                                     "Inconsistent encryption contexts: %lu/%lu\n",
                                     (unsigned long) dir->i_ino,
index 3ed01ec011d75067579a4b894b4b0623af265a2d..b5bcbddceb91c981a43cb5ae66802d1e35eca5a1 100644 (file)
@@ -1132,6 +1132,7 @@ static const struct dquot_operations ext4_quota_operations = {
        .alloc_dquot    = dquot_alloc,
        .destroy_dquot  = dquot_destroy,
        .get_projid     = ext4_get_projid,
+       .get_next_id    = dquot_get_next_id,
 };
 
 static const struct quotactl_ops ext4_qctl_operations = {
index 3842af954cd5bc127f2f0aad13d0ff52b743779d..536bec99bd64d39570edd0f08ac5545e79ade5a2 100644 (file)
@@ -39,7 +39,7 @@ repeat:
                cond_resched();
                goto repeat;
        }
-       f2fs_wait_on_page_writeback(page, META);
+       f2fs_wait_on_page_writeback(page, META, true);
        SetPageUptodate(page);
        return page;
 }
@@ -232,13 +232,17 @@ static int f2fs_write_meta_page(struct page *page,
        if (unlikely(f2fs_cp_error(sbi)))
                goto redirty_out;
 
-       f2fs_wait_on_page_writeback(page, META);
        write_meta_page(sbi, page);
        dec_page_count(sbi, F2FS_DIRTY_META);
+
+       if (wbc->for_reclaim)
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, META, WRITE);
+
        unlock_page(page);
 
-       if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi)))
+       if (unlikely(f2fs_cp_error(sbi)))
                f2fs_submit_merged_bio(sbi, META, WRITE);
+
        return 0;
 
 redirty_out:
@@ -252,13 +256,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
        long diff, written;
 
-       trace_f2fs_writepages(mapping->host, wbc, META);
-
        /* collect a number of dirty meta pages and write together */
        if (wbc->for_kupdate ||
                get_pages(sbi, F2FS_DIRTY_META) < nr_pages_to_skip(sbi, META))
                goto skip_write;
 
+       trace_f2fs_writepages(mapping->host, wbc, META);
+
        /* if mounting is failed, skip writing node pages */
        mutex_lock(&sbi->cp_mutex);
        diff = nr_pages_to_write(sbi, META, wbc);
@@ -269,6 +273,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
 
 skip_write:
        wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_META);
+       trace_f2fs_writepages(mapping->host, wbc, META);
        return 0;
 }
 
@@ -315,6 +320,9 @@ continue_unlock:
                                goto continue_unlock;
                        }
 
+                       f2fs_wait_on_page_writeback(page, META, true);
+
+                       BUG_ON(PageWriteback(page));
                        if (!clear_page_dirty_for_io(page))
                                goto continue_unlock;
 
@@ -921,6 +929,9 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
        int cp_payload_blks = __cp_payload(sbi);
        block_t discard_blk = NEXT_FREE_BLKADDR(sbi, curseg);
        bool invalidate = false;
+       struct super_block *sb = sbi->sb;
+       struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+       u64 kbytes_written;
 
        /*
         * This avoids to conduct wrong roll-forward operations and uses
@@ -1034,6 +1045,14 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
 
        write_data_summaries(sbi, start_blk);
        start_blk += data_sum_blocks;
+
+       /* Record write statistics in the hot node summary */
+       kbytes_written = sbi->kbytes_written;
+       if (sb->s_bdev->bd_part)
+               kbytes_written += BD_PART_WRITTEN(sbi);
+
+       seg_i->sum_blk->info.kbytes_written = cpu_to_le64(kbytes_written);
+
        if (__remain_node_summaries(cpc->reason)) {
                write_node_summaries(sbi, start_blk);
                start_blk += NR_CURSEG_NODE_TYPE;
index 4a62ef14e93275a881f967ceabd66c6bd306345c..95c5cf039711c6c994ef8b4a04d0ef37f777591d 100644 (file)
  * The usage of AES-XTS should conform to recommendations in NIST
  * Special Publication 800-38E and IEEE P1619/D16.
  */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/skcipher.h>
 #include <keys/user-type.h>
 #include <keys/encrypted-type.h>
-#include <linux/crypto.h>
 #include <linux/ecryptfs.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
@@ -328,21 +326,21 @@ static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
                                struct page *dest_page)
 {
        u8 xts_tweak[F2FS_XTS_TWEAK_SIZE];
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_F2FS_COMPLETION_RESULT(ecr);
        struct scatterlist dst, src;
        struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
 
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(KERN_ERR
                                "%s: crypto_request_alloc() failed\n",
                                __func__);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(
+       skcipher_request_set_callback(
                req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                f2fs_crypt_complete, &ecr);
 
@@ -355,21 +353,21 @@ static int f2fs_page_crypto(struct f2fs_crypto_ctx *ctx,
        sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
        sg_init_table(&src, 1);
        sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
-       ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
-                                       xts_tweak);
+       skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
+                                  xts_tweak);
        if (rw == F2FS_DECRYPT)
-               res = crypto_ablkcipher_decrypt(req);
+               res = crypto_skcipher_decrypt(req);
        else
-               res = crypto_ablkcipher_encrypt(req);
+               res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                BUG_ON(req->base.data != &ecr);
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res) {
                printk_ratelimited(KERN_ERR
-                       "%s: crypto_ablkcipher_encrypt() returned %d\n",
+                       "%s: crypto_skcipher_encrypt() returned %d\n",
                        __func__, res);
                return res;
        }
index ab377d496a39ad47924a3f79b48e1aad7538f2d2..16aec6653291a852fbceb82502c24d288b70db1f 100644 (file)
  *
  * This has not yet undergone a rigorous security audit.
  */
-#include <crypto/hash.h>
-#include <crypto/sha.h>
+#include <crypto/skcipher.h>
 #include <keys/encrypted-type.h>
 #include <keys/user-type.h>
-#include <linux/crypto.h>
 #include <linux/gfp.h>
 #include <linux/kernel.h>
 #include <linux/key.h>
@@ -70,10 +68,10 @@ static int f2fs_fname_encrypt(struct inode *inode,
                        const struct qstr *iname, struct f2fs_str *oname)
 {
        u32 ciphertext_len;
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_F2FS_COMPLETION_RESULT(ecr);
        struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
        char iv[F2FS_CRYPTO_BLOCK_SIZE];
        struct scatterlist src_sg, dst_sg;
@@ -99,14 +97,14 @@ static int f2fs_fname_encrypt(struct inode *inode,
        }
 
        /* Allocate request */
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(KERN_ERR
                        "%s: crypto_request_alloc() failed\n", __func__);
                kfree(alloc_buf);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                        CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                        f2fs_dir_crypt_complete, &ecr);
 
@@ -121,15 +119,15 @@ static int f2fs_fname_encrypt(struct inode *inode,
        /* Create encryption request */
        sg_init_one(&src_sg, workbuf, ciphertext_len);
        sg_init_one(&dst_sg, oname->name, ciphertext_len);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
-       res = crypto_ablkcipher_encrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
+       res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                BUG_ON(req->base.data != &ecr);
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
        kfree(alloc_buf);
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res < 0) {
                printk_ratelimited(KERN_ERR
                                "%s: Error (error code %d)\n", __func__, res);
@@ -148,11 +146,11 @@ static int f2fs_fname_encrypt(struct inode *inode,
 static int f2fs_fname_decrypt(struct inode *inode,
                        const struct f2fs_str *iname, struct f2fs_str *oname)
 {
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_F2FS_COMPLETION_RESULT(ecr);
        struct scatterlist src_sg, dst_sg;
        struct f2fs_crypt_info *ci = F2FS_I(inode)->i_crypt_info;
-       struct crypto_ablkcipher *tfm = ci->ci_ctfm;
+       struct crypto_skcipher *tfm = ci->ci_ctfm;
        int res = 0;
        char iv[F2FS_CRYPTO_BLOCK_SIZE];
        unsigned lim = max_name_len(inode);
@@ -161,13 +159,13 @@ static int f2fs_fname_decrypt(struct inode *inode,
                return -EIO;
 
        /* Allocate request */
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                printk_ratelimited(KERN_ERR
                        "%s: crypto_request_alloc() failed\n",  __func__);
                return -ENOMEM;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                f2fs_dir_crypt_complete, &ecr);
 
@@ -177,14 +175,14 @@ static int f2fs_fname_decrypt(struct inode *inode,
        /* Create decryption request */
        sg_init_one(&src_sg, iname->name, iname->len);
        sg_init_one(&dst_sg, oname->name, oname->len);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
-       res = crypto_ablkcipher_decrypt(req);
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
+       res = crypto_skcipher_decrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                BUG_ON(req->base.data != &ecr);
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
-       ablkcipher_request_free(req);
+       skcipher_request_free(req);
        if (res < 0) {
                printk_ratelimited(KERN_ERR
                        "%s: Error in f2fs_fname_decrypt (error code %d)\n",
index 5de2d866a25c282fecb2bdf4592bc53fafa0ca85..2aeb6273bd8f21fc217a6dfc64d1ef52ade44baa 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/random.h>
 #include <linux/scatterlist.h>
 #include <uapi/linux/keyctl.h>
-#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/f2fs_fs.h>
 
 #include "f2fs.h"
@@ -44,46 +44,43 @@ static int f2fs_derive_key_aes(char deriving_key[F2FS_AES_128_ECB_KEY_SIZE],
                                char derived_key[F2FS_AES_256_XTS_KEY_SIZE])
 {
        int res = 0;
-       struct ablkcipher_request *req = NULL;
+       struct skcipher_request *req = NULL;
        DECLARE_F2FS_COMPLETION_RESULT(ecr);
        struct scatterlist src_sg, dst_sg;
-       struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
-                                                               0);
+       struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
 
        if (IS_ERR(tfm)) {
                res = PTR_ERR(tfm);
                tfm = NULL;
                goto out;
        }
-       crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-       req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+       crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       req = skcipher_request_alloc(tfm, GFP_NOFS);
        if (!req) {
                res = -ENOMEM;
                goto out;
        }
-       ablkcipher_request_set_callback(req,
+       skcipher_request_set_callback(req,
                        CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
                        derive_crypt_complete, &ecr);
-       res = crypto_ablkcipher_setkey(tfm, deriving_key,
+       res = crypto_skcipher_setkey(tfm, deriving_key,
                                F2FS_AES_128_ECB_KEY_SIZE);
        if (res < 0)
                goto out;
 
        sg_init_one(&src_sg, source_key, F2FS_AES_256_XTS_KEY_SIZE);
        sg_init_one(&dst_sg, derived_key, F2FS_AES_256_XTS_KEY_SIZE);
-       ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
+       skcipher_request_set_crypt(req, &src_sg, &dst_sg,
                                        F2FS_AES_256_XTS_KEY_SIZE, NULL);
-       res = crypto_ablkcipher_encrypt(req);
+       res = crypto_skcipher_encrypt(req);
        if (res == -EINPROGRESS || res == -EBUSY) {
                BUG_ON(req->base.data != &ecr);
                wait_for_completion(&ecr.completion);
                res = ecr.res;
        }
 out:
-       if (req)
-               ablkcipher_request_free(req);
-       if (tfm)
-               crypto_free_ablkcipher(tfm);
+       skcipher_request_free(req);
+       crypto_free_skcipher(tfm);
        return res;
 }
 
@@ -93,7 +90,7 @@ static void f2fs_free_crypt_info(struct f2fs_crypt_info *ci)
                return;
 
        key_put(ci->ci_keyring_key);
-       crypto_free_ablkcipher(ci->ci_ctfm);
+       crypto_free_skcipher(ci->ci_ctfm);
        kmem_cache_free(f2fs_crypt_info_cachep, ci);
 }
 
@@ -123,7 +120,7 @@ int _f2fs_get_encryption_info(struct inode *inode)
        struct f2fs_encryption_key *master_key;
        struct f2fs_encryption_context ctx;
        const struct user_key_payload *ukp;
-       struct crypto_ablkcipher *ctfm;
+       struct crypto_skcipher *ctfm;
        const char *cipher_str;
        char raw_key[F2FS_MAX_KEY_SIZE];
        char mode;
@@ -213,7 +210,7 @@ retry:
        if (res)
                goto out;
 
-       ctfm = crypto_alloc_ablkcipher(cipher_str, 0, 0);
+       ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
        if (!ctfm || IS_ERR(ctfm)) {
                res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
                printk(KERN_DEBUG
@@ -222,11 +219,10 @@ retry:
                goto out;
        }
        crypt_info->ci_ctfm = ctfm;
-       crypto_ablkcipher_clear_flags(ctfm, ~0);
-       crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctfm),
-                            CRYPTO_TFM_REQ_WEAK_KEY);
-       res = crypto_ablkcipher_setkey(ctfm, raw_key,
-                                       f2fs_encryption_key_size(mode));
+       crypto_skcipher_clear_flags(ctfm, ~0);
+       crypto_skcipher_set_flags(ctfm, CRYPTO_TFM_REQ_WEAK_KEY);
+       res = crypto_skcipher_setkey(ctfm, raw_key,
+                                    f2fs_encryption_key_size(mode));
        if (res)
                goto out;
 
index 5c06db17e41fa267f5b270061d2959b2a36803e4..03f948e84115df85ea68faadb856c7c3eef3383c 100644 (file)
@@ -67,7 +67,6 @@ static void f2fs_write_end_io(struct bio *bio)
                f2fs_restore_and_release_control_page(&page);
 
                if (unlikely(bio->bi_error)) {
-                       set_page_dirty(page);
                        set_bit(AS_EIO, &page->mapping->flags);
                        f2fs_stop_checkpoint(sbi);
                }
@@ -75,8 +74,7 @@ static void f2fs_write_end_io(struct bio *bio)
                dec_page_count(sbi, F2FS_WRITEBACK);
        }
 
-       if (!get_pages(sbi, F2FS_WRITEBACK) &&
-                       !list_empty(&sbi->cp_wait.task_list))
+       if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait))
                wake_up(&sbi->cp_wait);
 
        bio_put(bio);
@@ -116,8 +114,60 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
        io->bio = NULL;
 }
 
-void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
-                               enum page_type type, int rw)
+static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode,
+                                               struct page *page, nid_t ino)
+{
+       struct bio_vec *bvec;
+       struct page *target;
+       int i;
+
+       if (!io->bio)
+               return false;
+
+       if (!inode && !page && !ino)
+               return true;
+
+       bio_for_each_segment_all(bvec, io->bio, i) {
+
+               if (bvec->bv_page->mapping) {
+                       target = bvec->bv_page;
+               } else {
+                       struct f2fs_crypto_ctx *ctx;
+
+                       /* encrypted page */
+                       ctx = (struct f2fs_crypto_ctx *)page_private(
+                                                               bvec->bv_page);
+                       target = ctx->w.control_page;
+               }
+
+               if (inode && inode == target->mapping->host)
+                       return true;
+               if (page && page == target)
+                       return true;
+               if (ino && ino == ino_of_node(target))
+                       return true;
+       }
+
+       return false;
+}
+
+static bool has_merged_page(struct f2fs_sb_info *sbi, struct inode *inode,
+                                               struct page *page, nid_t ino,
+                                               enum page_type type)
+{
+       enum page_type btype = PAGE_TYPE_OF_BIO(type);
+       struct f2fs_bio_info *io = &sbi->write_io[btype];
+       bool ret;
+
+       down_read(&io->io_rwsem);
+       ret = __has_merged_page(io, inode, page, ino);
+       up_read(&io->io_rwsem);
+       return ret;
+}
+
+static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
+                               struct inode *inode, struct page *page,
+                               nid_t ino, enum page_type type, int rw)
 {
        enum page_type btype = PAGE_TYPE_OF_BIO(type);
        struct f2fs_bio_info *io;
@@ -126,6 +176,9 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
 
        down_write(&io->io_rwsem);
 
+       if (!__has_merged_page(io, inode, page, ino))
+               goto out;
+
        /* change META to META_FLUSH in the checkpoint procedure */
        if (type >= META_FLUSH) {
                io->fio.type = META_FLUSH;
@@ -135,9 +188,24 @@ void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
                        io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
        }
        __submit_merged_bio(io);
+out:
        up_write(&io->io_rwsem);
 }
 
+void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, enum page_type type,
+                                                                       int rw)
+{
+       __f2fs_submit_merged_bio(sbi, NULL, NULL, 0, type, rw);
+}
+
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *sbi,
+                               struct inode *inode, struct page *page,
+                               nid_t ino, enum page_type type, int rw)
+{
+       if (has_merged_page(sbi, inode, page, ino, type))
+               __f2fs_submit_merged_bio(sbi, inode, page, ino, type, rw);
+}
+
 /*
  * Fill the locked page with data located in the block address.
  * Return unlocked page.
@@ -218,7 +286,7 @@ void set_data_blkaddr(struct dnode_of_data *dn)
        struct page *node_page = dn->node_page;
        unsigned int ofs_in_node = dn->ofs_in_node;
 
-       f2fs_wait_on_page_writeback(node_page, NODE);
+       f2fs_wait_on_page_writeback(node_page, NODE, true);
 
        rn = F2FS_NODE(node_page);
 
@@ -461,7 +529,6 @@ got_it:
 static int __allocate_data_block(struct dnode_of_data *dn)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
-       struct f2fs_inode_info *fi = F2FS_I(dn->inode);
        struct f2fs_summary sum;
        struct node_info ni;
        int seg = CURSEG_WARM_DATA;
@@ -489,7 +556,7 @@ alloc:
        set_data_blkaddr(dn);
 
        /* update i_size */
-       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
                                                        dn->ofs_in_node;
        if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT))
                i_size_write(dn->inode,
@@ -497,67 +564,33 @@ alloc:
        return 0;
 }
 
-static int __allocate_data_blocks(struct inode *inode, loff_t offset,
-                                                       size_t count)
+ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
 {
-       struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-       struct dnode_of_data dn;
-       u64 start = F2FS_BYTES_TO_BLK(offset);
-       u64 len = F2FS_BYTES_TO_BLK(count);
-       bool allocated;
-       u64 end_offset;
-       int err = 0;
-
-       while (len) {
-               f2fs_lock_op(sbi);
-
-               /* When reading holes, we need its node page */
-               set_new_dnode(&dn, inode, NULL, NULL, 0);
-               err = get_dnode_of_data(&dn, start, ALLOC_NODE);
-               if (err)
-                       goto out;
-
-               allocated = false;
-               end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
-
-               while (dn.ofs_in_node < end_offset && len) {
-                       block_t blkaddr;
-
-                       if (unlikely(f2fs_cp_error(sbi))) {
-                               err = -EIO;
-                               goto sync_out;
-                       }
-
-                       blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
-                       if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) {
-                               err = __allocate_data_block(&dn);
-                               if (err)
-                                       goto sync_out;
-                               allocated = true;
-                       }
-                       len--;
-                       start++;
-                       dn.ofs_in_node++;
-               }
+       struct inode *inode = file_inode(iocb->ki_filp);
+       struct f2fs_map_blocks map;
+       ssize_t ret = 0;
 
-               if (allocated)
-                       sync_inode_page(&dn);
+       map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos);
+       map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from));
+       map.m_next_pgofs = NULL;
 
-               f2fs_put_dnode(&dn);
-               f2fs_unlock_op(sbi);
+       if (f2fs_encrypted_inode(inode))
+               return 0;
 
-               f2fs_balance_fs(sbi, dn.node_changed);
+       if (iocb->ki_flags & IOCB_DIRECT) {
+               ret = f2fs_convert_inline_inode(inode);
+               if (ret)
+                       return ret;
+               return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
        }
-       return err;
-
-sync_out:
-       if (allocated)
-               sync_inode_page(&dn);
-       f2fs_put_dnode(&dn);
-out:
-       f2fs_unlock_op(sbi);
-       f2fs_balance_fs(sbi, dn.node_changed);
-       return err;
+       if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA) {
+               ret = f2fs_convert_inline_inode(inode);
+               if (ret)
+                       return ret;
+       }
+       if (!f2fs_has_inline_data(inode))
+               return f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
+       return ret;
 }
 
 /*
@@ -588,13 +621,14 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
        /* it only supports block size == page size */
        pgofs = (pgoff_t)map->m_lblk;
 
-       if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
+       if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) {
                map->m_pblk = ei.blk + pgofs - ei.fofs;
                map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs);
                map->m_flags = F2FS_MAP_MAPPED;
                goto out;
        }
 
+next_dnode:
        if (create)
                f2fs_lock_op(sbi);
 
@@ -602,120 +636,98 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
        set_new_dnode(&dn, inode, NULL, NULL, 0);
        err = get_dnode_of_data(&dn, pgofs, mode);
        if (err) {
-               if (err == -ENOENT)
+               if (err == -ENOENT) {
                        err = 0;
+                       if (map->m_next_pgofs)
+                               *map->m_next_pgofs =
+                                       get_next_page_offset(&dn, pgofs);
+               }
                goto unlock_out;
        }
 
-       if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) {
+       end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
+
+next_block:
+       blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+
+       if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
                if (create) {
                        if (unlikely(f2fs_cp_error(sbi))) {
                                err = -EIO;
-                               goto put_out;
+                               goto sync_out;
+                       }
+                       if (flag == F2FS_GET_BLOCK_PRE_AIO) {
+                               if (blkaddr == NULL_ADDR)
+                                       err = reserve_new_block(&dn);
+                       } else {
+                               err = __allocate_data_block(&dn);
                        }
-                       err = __allocate_data_block(&dn);
                        if (err)
-                               goto put_out;
+                               goto sync_out;
                        allocated = true;
                        map->m_flags = F2FS_MAP_NEW;
+                       blkaddr = dn.data_blkaddr;
                } else {
+                       if (flag == F2FS_GET_BLOCK_FIEMAP &&
+                                               blkaddr == NULL_ADDR) {
+                               if (map->m_next_pgofs)
+                                       *map->m_next_pgofs = pgofs + 1;
+                       }
                        if (flag != F2FS_GET_BLOCK_FIEMAP ||
-                                               dn.data_blkaddr != NEW_ADDR) {
+                                               blkaddr != NEW_ADDR) {
                                if (flag == F2FS_GET_BLOCK_BMAP)
                                        err = -ENOENT;
-                               goto put_out;
+                               goto sync_out;
                        }
-
-                       /*
-                        * preallocated unwritten block should be mapped
-                        * for fiemap.
-                        */
-                       if (dn.data_blkaddr == NEW_ADDR)
-                               map->m_flags = F2FS_MAP_UNWRITTEN;
                }
        }
 
-       map->m_flags |= F2FS_MAP_MAPPED;
-       map->m_pblk = dn.data_blkaddr;
-       map->m_len = 1;
+       if (map->m_len == 0) {
+               /* preallocated unwritten block should be mapped for fiemap. */
+               if (blkaddr == NEW_ADDR)
+                       map->m_flags |= F2FS_MAP_UNWRITTEN;
+               map->m_flags |= F2FS_MAP_MAPPED;
+
+               map->m_pblk = blkaddr;
+               map->m_len = 1;
+       } else if ((map->m_pblk != NEW_ADDR &&
+                       blkaddr == (map->m_pblk + ofs)) ||
+                       (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
+                       flag == F2FS_GET_BLOCK_PRE_DIO ||
+                       flag == F2FS_GET_BLOCK_PRE_AIO) {
+               ofs++;
+               map->m_len++;
+       } else {
+               goto sync_out;
+       }
 
-       end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
        dn.ofs_in_node++;
        pgofs++;
 
-get_next:
-       if (map->m_len >= maxblocks)
-               goto sync_out;
+       if (map->m_len < maxblocks) {
+               if (dn.ofs_in_node < end_offset)
+                       goto next_block;
 
-       if (dn.ofs_in_node >= end_offset) {
                if (allocated)
                        sync_inode_page(&dn);
-               allocated = false;
                f2fs_put_dnode(&dn);
 
                if (create) {
                        f2fs_unlock_op(sbi);
-                       f2fs_balance_fs(sbi, dn.node_changed);
-                       f2fs_lock_op(sbi);
-               }
-
-               set_new_dnode(&dn, inode, NULL, NULL, 0);
-               err = get_dnode_of_data(&dn, pgofs, mode);
-               if (err) {
-                       if (err == -ENOENT)
-                               err = 0;
-                       goto unlock_out;
+                       f2fs_balance_fs(sbi, allocated);
                }
-
-               end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
-       }
-
-       blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
-
-       if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) {
-               if (create) {
-                       if (unlikely(f2fs_cp_error(sbi))) {
-                               err = -EIO;
-                               goto sync_out;
-                       }
-                       err = __allocate_data_block(&dn);
-                       if (err)
-                               goto sync_out;
-                       allocated = true;
-                       map->m_flags |= F2FS_MAP_NEW;
-                       blkaddr = dn.data_blkaddr;
-               } else {
-                       /*
-                        * we only merge preallocated unwritten blocks
-                        * for fiemap.
-                        */
-                       if (flag != F2FS_GET_BLOCK_FIEMAP ||
-                                       blkaddr != NEW_ADDR)
-                               goto sync_out;
-               }
-       }
-
-       /* Give more consecutive addresses for the readahead */
-       if ((map->m_pblk != NEW_ADDR &&
-                       blkaddr == (map->m_pblk + ofs)) ||
-                       (map->m_pblk == NEW_ADDR &&
-                       blkaddr == NEW_ADDR)) {
-               ofs++;
-               dn.ofs_in_node++;
-               pgofs++;
-               map->m_len++;
-               goto get_next;
+               allocated = false;
+               goto next_dnode;
        }
 
 sync_out:
        if (allocated)
                sync_inode_page(&dn);
-put_out:
        f2fs_put_dnode(&dn);
 unlock_out:
        if (create) {
                f2fs_unlock_op(sbi);
-               f2fs_balance_fs(sbi, dn.node_changed);
+               f2fs_balance_fs(sbi, allocated);
        }
 out:
        trace_f2fs_map_blocks(inode, map, err);
@@ -723,13 +735,15 @@ out:
 }
 
 static int __get_data_block(struct inode *inode, sector_t iblock,
-                       struct buffer_head *bh, int create, int flag)
+                       struct buffer_head *bh, int create, int flag,
+                       pgoff_t *next_pgofs)
 {
        struct f2fs_map_blocks map;
        int ret;
 
        map.m_lblk = iblock;
        map.m_len = bh->b_size >> inode->i_blkbits;
+       map.m_next_pgofs = next_pgofs;
 
        ret = f2fs_map_blocks(inode, &map, create, flag);
        if (!ret) {
@@ -741,16 +755,18 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
 }
 
 static int get_data_block(struct inode *inode, sector_t iblock,
-                       struct buffer_head *bh_result, int create, int flag)
+                       struct buffer_head *bh_result, int create, int flag,
+                       pgoff_t *next_pgofs)
 {
-       return __get_data_block(inode, iblock, bh_result, create, flag);
+       return __get_data_block(inode, iblock, bh_result, create,
+                                                       flag, next_pgofs);
 }
 
 static int get_data_block_dio(struct inode *inode, sector_t iblock,
                        struct buffer_head *bh_result, int create)
 {
        return __get_data_block(inode, iblock, bh_result, create,
-                                               F2FS_GET_BLOCK_DIO);
+                                               F2FS_GET_BLOCK_DIO, NULL);
 }
 
 static int get_data_block_bmap(struct inode *inode, sector_t iblock,
@@ -761,7 +777,7 @@ static int get_data_block_bmap(struct inode *inode, sector_t iblock,
                return -EFBIG;
 
        return __get_data_block(inode, iblock, bh_result, create,
-                                               F2FS_GET_BLOCK_BMAP);
+                                               F2FS_GET_BLOCK_BMAP, NULL);
 }
 
 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
@@ -779,6 +795,7 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
 {
        struct buffer_head map_bh;
        sector_t start_blk, last_blk;
+       pgoff_t next_pgofs;
        loff_t isize;
        u64 logical = 0, phys = 0, size = 0;
        u32 flags = 0;
@@ -814,14 +831,15 @@ next:
        map_bh.b_size = len;
 
        ret = get_data_block(inode, start_blk, &map_bh, 0,
-                                       F2FS_GET_BLOCK_FIEMAP);
+                                       F2FS_GET_BLOCK_FIEMAP, &next_pgofs);
        if (ret)
                goto out;
 
        /* HOLE */
        if (!buffer_mapped(&map_bh)) {
+               start_blk = next_pgofs;
                /* Go through holes util pass the EOF */
-               if (blk_to_logical(inode, start_blk++) < isize)
+               if (blk_to_logical(inode, start_blk) < isize)
                        goto prep_next;
                /* Found a hole beyond isize means no more extents.
                 * Note that the premise is that filesystems don't
@@ -889,6 +907,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
        map.m_lblk = 0;
        map.m_len = 0;
        map.m_flags = 0;
+       map.m_next_pgofs = NULL;
 
        for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
 
@@ -927,7 +946,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
                        map.m_len = last_block - block_in_file;
 
                        if (f2fs_map_blocks(inode, &map, 0,
-                                                       F2FS_GET_BLOCK_READ))
+                                               F2FS_GET_BLOCK_READ))
                                goto set_error_page;
                }
 got_it:
@@ -1177,12 +1196,18 @@ out:
        inode_dec_dirty_pages(inode);
        if (err)
                ClearPageUptodate(page);
+
+       if (wbc->for_reclaim) {
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, DATA, WRITE);
+               remove_dirty_inode(inode);
+       }
+
        unlock_page(page);
        f2fs_balance_fs(sbi, need_balance_fs);
-       if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi))) {
+
+       if (unlikely(f2fs_cp_error(sbi)))
                f2fs_submit_merged_bio(sbi, DATA, WRITE);
-               remove_dirty_inode(inode);
-       }
+
        return 0;
 
 redirty_out:
@@ -1282,7 +1307,8 @@ continue_unlock:
 
                        if (PageWriteback(page)) {
                                if (wbc->sync_mode != WB_SYNC_NONE)
-                                       f2fs_wait_on_page_writeback(page, DATA);
+                                       f2fs_wait_on_page_writeback(page,
+                                                               DATA, true);
                                else
                                        goto continue_unlock;
                        }
@@ -1339,8 +1365,6 @@ static int f2fs_write_data_pages(struct address_space *mapping,
        int ret;
        long diff;
 
-       trace_f2fs_writepages(mapping->host, wbc, DATA);
-
        /* deal with chardevs and other special file */
        if (!mapping->a_ops->writepage)
                return 0;
@@ -1362,14 +1386,16 @@ static int f2fs_write_data_pages(struct address_space *mapping,
        if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
                goto skip_write;
 
+       trace_f2fs_writepages(mapping->host, wbc, DATA);
+
        diff = nr_pages_to_write(sbi, DATA, wbc);
 
-       if (!S_ISDIR(inode->i_mode)) {
+       if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) {
                mutex_lock(&sbi->writepages);
                locked = true;
        }
        ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
-       f2fs_submit_merged_bio(sbi, DATA, WRITE);
+       f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
        if (locked)
                mutex_unlock(&sbi->writepages);
 
@@ -1380,6 +1406,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 
 skip_write:
        wbc->pages_skipped += get_dirty_pages(inode);
+       trace_f2fs_writepages(mapping->host, wbc, DATA);
        return 0;
 }
 
@@ -1406,6 +1433,14 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
        struct extent_info ei;
        int err = 0;
 
+       /*
+        * we already allocated all the blocks, so we don't need to get
+        * the block addresses when there is no need to fill the page.
+        */
+       if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) &&
+                                       len == PAGE_CACHE_SIZE)
+               return 0;
+
        if (f2fs_has_inline_data(inode) ||
                        (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
                f2fs_lock_op(sbi);
@@ -1425,7 +1460,7 @@ restart:
                if (pos + len <= MAX_INLINE_DATA) {
                        read_inline_data(page, ipage);
                        set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
-                       sync_inode_page(&dn);
+                       set_inline_node(ipage);
                } else {
                        err = f2fs_convert_inline_page(&dn, page);
                        if (err)
@@ -1439,13 +1474,9 @@ restart:
                if (f2fs_lookup_extent_cache(inode, index, &ei)) {
                        dn.data_blkaddr = ei.blk + index - ei.fofs;
                } else {
-                       bool restart = false;
-
                        /* hole case */
                        err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
-                       if (err || (!err && dn.data_blkaddr == NULL_ADDR))
-                               restart = true;
-                       if (restart) {
+                       if (err || (!err && dn.data_blkaddr == NULL_ADDR)) {
                                f2fs_put_dnode(&dn);
                                f2fs_lock_op(sbi);
                                locked = true;
@@ -1514,7 +1545,7 @@ repeat:
                }
        }
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, false);
 
        /* wait for GCed encrypted page writeback */
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
@@ -1592,7 +1623,6 @@ static int f2fs_write_end(struct file *file,
        if (pos + copied > i_size_read(inode)) {
                i_size_write(inode, pos + copied);
                mark_inode_dirty(inode);
-               update_inode_page(inode);
        }
 
        f2fs_put_page(page, 1);
@@ -1617,34 +1647,21 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
 static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
                              loff_t offset)
 {
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
+       struct address_space *mapping = iocb->ki_filp->f_mapping;
        struct inode *inode = mapping->host;
        size_t count = iov_iter_count(iter);
        int err;
 
-       /* we don't need to use inline_data strictly */
-       err = f2fs_convert_inline_inode(inode);
+       err = check_direct_IO(inode, iter, offset);
        if (err)
                return err;
 
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
                return 0;
 
-       err = check_direct_IO(inode, iter, offset);
-       if (err)
-               return err;
-
        trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 
-       if (iov_iter_rw(iter) == WRITE) {
-               err = __allocate_data_blocks(inode, offset, count);
-               if (err)
-                       goto out;
-       }
-
        err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio);
-out:
        if (err < 0 && iov_iter_rw(iter) == WRITE)
                f2fs_write_failed(mapping, offset + count);
 
index faa7495e2d7e62effef70b681c52ba2c75ca36d9..8950fc3cc2f7578520bfb340e80c7269b3dd8064 100644 (file)
@@ -296,7 +296,7 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
 {
        enum page_type type = f2fs_has_inline_dentry(dir) ? NODE : DATA;
        lock_page(page);
-       f2fs_wait_on_page_writeback(page, type);
+       f2fs_wait_on_page_writeback(page, type, true);
        de->ino = cpu_to_le32(inode->i_ino);
        set_de_type(de, inode->i_mode);
        f2fs_dentry_kunmap(dir, page);
@@ -311,7 +311,7 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
 {
        struct f2fs_inode *ri;
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
        /* copy name info. to this inode page */
        ri = F2FS_INODE(ipage);
@@ -598,7 +598,7 @@ start:
        ++level;
        goto start;
 add_dentry:
-       f2fs_wait_on_page_writeback(dentry_page, DATA);
+       f2fs_wait_on_page_writeback(dentry_page, DATA, true);
 
        if (inode) {
                down_write(&F2FS_I(inode)->i_sem);
@@ -709,7 +709,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
                return f2fs_delete_inline_entry(dentry, page, dir, inode);
 
        lock_page(page);
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
 
        dentry_blk = page_address(page);
        bit_pos = dentry - dentry_blk->dentry;
index ccd5c636d3fe026d0c5379d4062ee204fc7faa17..071a1b19e5afb793e8eeea924af9ce32eca30a76 100644 (file)
@@ -33,6 +33,7 @@ static struct extent_node *__attach_extent_node(struct f2fs_sb_info *sbi,
 
        en->ei = *ei;
        INIT_LIST_HEAD(&en->list);
+       en->et = et;
 
        rb_link_node(&en->rb_node, parent, p);
        rb_insert_color(&en->rb_node, &et->root);
@@ -50,6 +51,24 @@ static void __detach_extent_node(struct f2fs_sb_info *sbi,
 
        if (et->cached_en == en)
                et->cached_en = NULL;
+       kmem_cache_free(extent_node_slab, en);
+}
+
+/*
+ * Flow to release an extent_node:
+ * 1. list_del_init
+ * 2. __detach_extent_node
+ * 3. kmem_cache_free.
+ */
+static void __release_extent_node(struct f2fs_sb_info *sbi,
+                       struct extent_tree *et, struct extent_node *en)
+{
+       spin_lock(&sbi->extent_lock);
+       f2fs_bug_on(sbi, list_empty(&en->list));
+       list_del_init(&en->list);
+       spin_unlock(&sbi->extent_lock);
+
+       __detach_extent_node(sbi, et, en);
 }
 
 static struct extent_tree *__grab_extent_tree(struct inode *inode)
@@ -129,7 +148,7 @@ static struct extent_node *__init_extent_tree(struct f2fs_sb_info *sbi,
 }
 
 static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
-                                       struct extent_tree *et, bool free_all)
+                                       struct extent_tree *et)
 {
        struct rb_node *node, *next;
        struct extent_node *en;
@@ -139,18 +158,7 @@ static unsigned int __free_extent_tree(struct f2fs_sb_info *sbi,
        while (node) {
                next = rb_next(node);
                en = rb_entry(node, struct extent_node, rb_node);
-
-               if (free_all) {
-                       spin_lock(&sbi->extent_lock);
-                       if (!list_empty(&en->list))
-                               list_del_init(&en->list);
-                       spin_unlock(&sbi->extent_lock);
-               }
-
-               if (free_all || list_empty(&en->list)) {
-                       __detach_extent_node(sbi, et, en);
-                       kmem_cache_free(extent_node_slab, en);
-               }
+               __release_extent_node(sbi, et, en);
                node = next;
        }
 
@@ -232,9 +240,10 @@ static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
        if (en) {
                *ei = en->ei;
                spin_lock(&sbi->extent_lock);
-               if (!list_empty(&en->list))
+               if (!list_empty(&en->list)) {
                        list_move_tail(&en->list, &sbi->extent_list);
-               et->cached_en = en;
+                       et->cached_en = en;
+               }
                spin_unlock(&sbi->extent_lock);
                ret = true;
        }
@@ -329,7 +338,6 @@ lookup_neighbors:
 
 static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
                                struct extent_tree *et, struct extent_info *ei,
-                               struct extent_node **den,
                                struct extent_node *prev_ex,
                                struct extent_node *next_ex)
 {
@@ -342,20 +350,25 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
        }
 
        if (next_ex && __is_front_mergeable(ei, &next_ex->ei)) {
-               if (en) {
-                       __detach_extent_node(sbi, et, prev_ex);
-                       *den = prev_ex;
-               }
+               if (en)
+                       __release_extent_node(sbi, et, prev_ex);
                next_ex->ei.fofs = ei->fofs;
                next_ex->ei.blk = ei->blk;
                next_ex->ei.len += ei->len;
                en = next_ex;
        }
 
-       if (en) {
-               __try_update_largest_extent(et, en);
+       if (!en)
+               return NULL;
+
+       __try_update_largest_extent(et, en);
+
+       spin_lock(&sbi->extent_lock);
+       if (!list_empty(&en->list)) {
+               list_move_tail(&en->list, &sbi->extent_list);
                et->cached_en = en;
        }
+       spin_unlock(&sbi->extent_lock);
        return en;
 }
 
@@ -391,7 +404,12 @@ do_insert:
                return NULL;
 
        __try_update_largest_extent(et, en);
+
+       /* update in global extent list */
+       spin_lock(&sbi->extent_lock);
+       list_add_tail(&en->list, &sbi->extent_list);
        et->cached_en = en;
+       spin_unlock(&sbi->extent_lock);
        return en;
 }
 
@@ -479,7 +497,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
                if (parts)
                        __try_update_largest_extent(et, en);
                else
-                       __detach_extent_node(sbi, et, en);
+                       __release_extent_node(sbi, et, en);
 
                /*
                 * if original extent is split into zero or two parts, extent
@@ -490,31 +508,15 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
                        insert_p = NULL;
                        insert_parent = NULL;
                }
-
-               /* update in global extent list */
-               spin_lock(&sbi->extent_lock);
-               if (!parts && !list_empty(&en->list))
-                       list_del(&en->list);
-               if (en1)
-                       list_add_tail(&en1->list, &sbi->extent_list);
-               spin_unlock(&sbi->extent_lock);
-
-               /* release extent node */
-               if (!parts)
-                       kmem_cache_free(extent_node_slab, en);
-
                en = next_en;
        }
 
        /* 3. update extent in extent cache */
        if (blkaddr) {
-               struct extent_node *den = NULL;
 
                set_extent_info(&ei, fofs, blkaddr, len);
-               en1 = __try_merge_extent_node(sbi, et, &ei, &den,
-                                                       prev_en, next_en);
-               if (!en1)
-                       en1 = __insert_extent_tree(sbi, et, &ei,
+               if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
+                       __insert_extent_tree(sbi, et, &ei,
                                                insert_p, insert_parent);
 
                /* give up extent_cache, if split and small updates happen */
@@ -524,24 +526,10 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
                        et->largest.len = 0;
                        set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
                }
-
-               spin_lock(&sbi->extent_lock);
-               if (en1) {
-                       if (list_empty(&en1->list))
-                               list_add_tail(&en1->list, &sbi->extent_list);
-                       else
-                               list_move_tail(&en1->list, &sbi->extent_list);
-               }
-               if (den && !list_empty(&den->list))
-                       list_del(&den->list);
-               spin_unlock(&sbi->extent_lock);
-
-               if (den)
-                       kmem_cache_free(extent_node_slab, den);
        }
 
        if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
-               __free_extent_tree(sbi, et, true);
+               __free_extent_tree(sbi, et);
 
        write_unlock(&et->lock);
 
@@ -550,14 +538,10 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
 
 unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 {
-       struct extent_tree *treevec[EXT_TREE_VEC_SIZE];
        struct extent_tree *et, *next;
-       struct extent_node *en, *tmp;
-       unsigned long ino = F2FS_ROOT_INO(sbi);
-       unsigned int found;
+       struct extent_node *en;
        unsigned int node_cnt = 0, tree_cnt = 0;
        int remained;
-       bool do_free = false;
 
        if (!test_opt(sbi, EXTENT_CACHE))
                return 0;
@@ -572,10 +556,10 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
        list_for_each_entry_safe(et, next, &sbi->zombie_list, list) {
                if (atomic_read(&et->node_cnt)) {
                        write_lock(&et->lock);
-                       node_cnt += __free_extent_tree(sbi, et, true);
+                       node_cnt += __free_extent_tree(sbi, et);
                        write_unlock(&et->lock);
                }
-
+               f2fs_bug_on(sbi, atomic_read(&et->node_cnt));
                list_del_init(&et->list);
                radix_tree_delete(&sbi->extent_tree_root, et->ino);
                kmem_cache_free(extent_tree_slab, et);
@@ -585,6 +569,7 @@ unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink)
 
                if (node_cnt + tree_cnt >= nr_shrink)
                        goto unlock_out;
+               cond_resched();
        }
        up_write(&sbi->extent_tree_lock);
 
@@ -596,42 +581,29 @@ free_node:
        remained = nr_shrink - (node_cnt + tree_cnt);
 
        spin_lock(&sbi->extent_lock);
-       list_for_each_entry_safe(en, tmp, &sbi->extent_list, list) {
-               if (!remained--)
+       for (; remained > 0; remained--) {
+               if (list_empty(&sbi->extent_list))
                        break;
-               list_del_init(&en->list);
-               do_free = true;
-       }
-       spin_unlock(&sbi->extent_lock);
-
-       if (do_free == false)
-               goto unlock_out;
-
-       /*
-        * reset ino for searching victims from beginning of global extent tree.
-        */
-       ino = F2FS_ROOT_INO(sbi);
-
-       while ((found = radix_tree_gang_lookup(&sbi->extent_tree_root,
-                               (void **)treevec, ino, EXT_TREE_VEC_SIZE))) {
-               unsigned i;
-
-               ino = treevec[found - 1]->ino + 1;
-               for (i = 0; i < found; i++) {
-                       struct extent_tree *et = treevec[i];
+               en = list_first_entry(&sbi->extent_list,
+                                       struct extent_node, list);
+               et = en->et;
+               if (!write_trylock(&et->lock)) {
+                       /* refresh this extent node's position in extent list */
+                       list_move_tail(&en->list, &sbi->extent_list);
+                       continue;
+               }
 
-                       if (!atomic_read(&et->node_cnt))
-                               continue;
+               list_del_init(&en->list);
+               spin_unlock(&sbi->extent_lock);
 
-                       if (write_trylock(&et->lock)) {
-                               node_cnt += __free_extent_tree(sbi, et, false);
-                               write_unlock(&et->lock);
-                       }
+               __detach_extent_node(sbi, et, en);
 
-                       if (node_cnt + tree_cnt >= nr_shrink)
-                               goto unlock_out;
-               }
+               write_unlock(&et->lock);
+               node_cnt++;
+               spin_lock(&sbi->extent_lock);
        }
+       spin_unlock(&sbi->extent_lock);
+
 unlock_out:
        up_write(&sbi->extent_tree_lock);
 out:
@@ -650,7 +622,7 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode)
                return 0;
 
        write_lock(&et->lock);
-       node_cnt = __free_extent_tree(sbi, et, true);
+       node_cnt = __free_extent_tree(sbi, et);
        write_unlock(&et->lock);
 
        return node_cnt;
@@ -701,7 +673,6 @@ bool f2fs_lookup_extent_cache(struct inode *inode, pgoff_t pgofs,
 
 void f2fs_update_extent_cache(struct dnode_of_data *dn)
 {
-       struct f2fs_inode_info *fi = F2FS_I(dn->inode);
        pgoff_t fofs;
 
        if (!f2fs_may_extent_tree(dn->inode))
@@ -710,8 +681,8 @@ void f2fs_update_extent_cache(struct dnode_of_data *dn)
        f2fs_bug_on(F2FS_I_SB(dn->inode), dn->data_blkaddr == NEW_ADDR);
 
 
-       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
-                                                       dn->ofs_in_node;
+       fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
+                                                               dn->ofs_in_node;
 
        if (f2fs_update_extent_tree_range(dn->inode, fofs, dn->data_blkaddr, 1))
                sync_inode_page(dn);
index ff79054c6cf6a5bfe254abaf203ae94acd44739b..f6a841b85d40a45bb720afa3a04b8e3a42373d90 100644 (file)
@@ -354,6 +354,7 @@ struct extent_node {
        struct rb_node rb_node;         /* rb node located in rb-tree */
        struct list_head list;          /* node in global extent list of sbi */
        struct extent_info ei;          /* extent info */
+       struct extent_tree *et;         /* extent tree pointer */
 };
 
 struct extent_tree {
@@ -382,6 +383,7 @@ struct f2fs_map_blocks {
        block_t m_lblk;
        unsigned int m_len;
        unsigned int m_flags;
+       pgoff_t *m_next_pgofs;          /* point next possible non-hole pgofs */
 };
 
 /* for flag in get_data_block */
@@ -389,6 +391,8 @@ struct f2fs_map_blocks {
 #define F2FS_GET_BLOCK_DIO             1
 #define F2FS_GET_BLOCK_FIEMAP          2
 #define F2FS_GET_BLOCK_BMAP            3
+#define F2FS_GET_BLOCK_PRE_DIO         4
+#define F2FS_GET_BLOCK_PRE_AIO         5
 
 /*
  * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
@@ -515,6 +519,7 @@ struct f2fs_nm_info {
        nid_t next_scan_nid;            /* the next nid to be scanned */
        unsigned int ram_thresh;        /* control the memory footprint */
        unsigned int ra_nid_pages;      /* # of nid pages to be readaheaded */
+       unsigned int dirty_nats_ratio;  /* control dirty nats ratio threshold */
 
        /* NAT cache management */
        struct radix_tree_root nat_root;/* root of the nat entry cache */
@@ -549,6 +554,8 @@ struct dnode_of_data {
        unsigned int ofs_in_node;       /* data offset in the node page */
        bool inode_page_locked;         /* inode page is locked or not */
        bool node_changed;              /* is node block changed */
+       char cur_level;                 /* level of hole node page */
+       char max_level;                 /* level of current page located */
        block_t data_blkaddr;           /* block address of the node block */
 };
 
@@ -844,8 +851,19 @@ struct f2fs_sb_info {
        struct list_head s_list;
        struct mutex umount_mutex;
        unsigned int shrinker_run_no;
+
+       /* For write statistics */
+       u64 sectors_written_start;
+       u64 kbytes_written;
 };
 
+/* For write statistics. Suppose sector size is 512 bytes,
+ * and the return value is in kbytes. s is of struct f2fs_sb_info.
+ */
+#define BD_PART_WRITTEN(s)                                              \
+(((u64)part_stat_read(s->sb->s_bdev->bd_part, sectors[1]) -             \
+               s->sectors_written_start) >> 1)
+
 static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
 {
        sbi->last_time[type] = jiffies;
@@ -1525,9 +1543,9 @@ static inline int f2fs_has_inline_xattr(struct inode *inode)
        return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR);
 }
 
-static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi)
+static inline unsigned int addrs_per_inode(struct inode *inode)
 {
-       if (f2fs_has_inline_xattr(&fi->vfs_inode))
+       if (f2fs_has_inline_xattr(inode))
                return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
        return DEF_ADDRS_PER_INODE;
 }
@@ -1681,10 +1699,10 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
         (F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
 
 /* get offset of first page in next direct node */
-#define PGOFS_OF_NEXT_DNODE(pgofs, fi)                         \
-       ((pgofs < ADDRS_PER_INODE(fi)) ? ADDRS_PER_INODE(fi) :  \
-       (pgofs - ADDRS_PER_INODE(fi) + ADDRS_PER_BLOCK) /       \
-       ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi))
+#define PGOFS_OF_NEXT_DNODE(pgofs, inode)                              \
+       ((pgofs < ADDRS_PER_INODE(inode)) ? ADDRS_PER_INODE(inode) :    \
+       (pgofs - ADDRS_PER_INODE(inode) + ADDRS_PER_BLOCK) /    \
+       ADDRS_PER_BLOCK * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode))
 
 /*
  * file.c
@@ -1780,6 +1798,7 @@ int need_dentry_mark(struct f2fs_sb_info *, nid_t);
 bool is_checkpointed_node(struct f2fs_sb_info *, nid_t);
 bool need_inode_block_update(struct f2fs_sb_info *, nid_t);
 void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
+pgoff_t get_next_page_offset(struct dnode_of_data *, pgoff_t);
 int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
 int truncate_inode_blocks(struct inode *, pgoff_t);
 int truncate_xattr_node(struct inode *, struct page *);
@@ -1836,7 +1855,7 @@ void f2fs_replace_block(struct f2fs_sb_info *, struct dnode_of_data *,
                                block_t, block_t, unsigned char, bool);
 void allocate_data_block(struct f2fs_sb_info *, struct page *,
                block_t, block_t *, struct f2fs_summary *, int);
-void f2fs_wait_on_page_writeback(struct page *, enum page_type);
+void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
 void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *, block_t);
 void write_data_summaries(struct f2fs_sb_info *, block_t);
 void write_node_summaries(struct f2fs_sb_info *, block_t);
@@ -1881,11 +1900,14 @@ void destroy_checkpoint_caches(void);
  * data.c
  */
 void f2fs_submit_merged_bio(struct f2fs_sb_info *, enum page_type, int);
+void f2fs_submit_merged_bio_cond(struct f2fs_sb_info *, struct inode *,
+                               struct page *, nid_t, enum page_type, int);
 int f2fs_submit_page_bio(struct f2fs_io_info *);
 void f2fs_submit_page_mbio(struct f2fs_io_info *);
 void set_data_blkaddr(struct dnode_of_data *);
 int reserve_new_block(struct dnode_of_data *);
 int f2fs_get_block(struct dnode_of_data *, pgoff_t);
+ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *);
 int f2fs_reserve_block(struct dnode_of_data *, pgoff_t);
 struct page *get_read_data_page(struct inode *, pgoff_t, int, bool);
 struct page *find_data_page(struct inode *, pgoff_t);
@@ -1902,7 +1924,7 @@ int f2fs_release_page(struct page *, gfp_t);
  */
 int start_gc_thread(struct f2fs_sb_info *);
 void stop_gc_thread(struct f2fs_sb_info *);
-block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
+block_t start_bidx_of_node(unsigned int, struct inode *);
 int f2fs_gc(struct f2fs_sb_info *, bool);
 void build_gc_manager(struct f2fs_sb_info *);
 
index c2c1c2b63b25529cfd9cda4bcfb82de77f99f156..ea3d1d7c97f3c3bd3f510b19a8f7b4828c964e32 100644 (file)
@@ -78,7 +78,7 @@ struct f2fs_crypt_info {
        char            ci_data_mode;
        char            ci_filename_mode;
        char            ci_flags;
-       struct crypto_ablkcipher *ci_ctfm;
+       struct crypto_skcipher *ci_ctfm;
        struct key      *ci_keyring_key;
        char            ci_master_key[F2FS_KEY_DESCRIPTOR_SIZE];
 };
index ea272be62677004d29c8018691ea442c2c992bb1..a4362d4d714b09f838bf46cad48d6f7aa62de2fd 100644 (file)
@@ -86,7 +86,7 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
        trace_f2fs_vm_page_mkwrite(page, DATA);
 mapped:
        /* fill the page */
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, false);
 
        /* wait for GCed encrypted page writeback */
        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
@@ -358,15 +358,14 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
                } else if (err == -ENOENT) {
                        /* direct node does not exists */
                        if (whence == SEEK_DATA) {
-                               pgofs = PGOFS_OF_NEXT_DNODE(pgofs,
-                                                       F2FS_I(inode));
+                               pgofs = get_next_page_offset(&dn, pgofs);
                                continue;
                        } else {
                                goto found;
                        }
                }
 
-               end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+               end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
 
                /* find data/hole in dnode block */
                for (; dn.ofs_in_node < end_offset;
@@ -480,7 +479,7 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
                 * we will invalidate all blkaddr in the whole range.
                 */
                fofs = start_bidx_of_node(ofs_of_node(dn->node_page),
-                                               F2FS_I(dn->inode)) + ofs;
+                                                       dn->inode) + ofs;
                f2fs_update_extent_cache_range(dn, fofs, 0, len);
                dec_valid_block_count(sbi, dn->inode, nr_free);
                sync_inode_page(dn);
@@ -521,7 +520,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from,
        if (IS_ERR(page))
                return 0;
 truncate_out:
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        zero_user(page, offset, PAGE_CACHE_SIZE - offset);
        if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode))
                set_page_dirty(page);
@@ -568,7 +567,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock)
                goto out;
        }
 
-       count = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+       count = ADDRS_PER_PAGE(dn.node_page, inode);
 
        count -= dn.ofs_in_node;
        f2fs_bug_on(sbi, count < 0);
@@ -743,7 +742,7 @@ static int fill_zero(struct inode *inode, pgoff_t index,
        if (IS_ERR(page))
                return PTR_ERR(page);
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        zero_user(page, start, len);
        set_page_dirty(page);
        f2fs_put_page(page, 1);
@@ -768,7 +767,7 @@ int truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
                        return err;
                }
 
-               end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
+               end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
                count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
 
                f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
@@ -892,7 +891,7 @@ static int __exchange_data_block(struct inode *inode, pgoff_t src,
                psrc = get_lock_data_page(inode, src, true);
                if (IS_ERR(psrc))
                        return PTR_ERR(psrc);
-               pdst = get_new_data_page(inode, NULL, dst, false);
+               pdst = get_new_data_page(inode, NULL, dst, true);
                if (IS_ERR(pdst)) {
                        f2fs_put_page(psrc, 1);
                        return PTR_ERR(pdst);
@@ -1648,7 +1647,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
                                        struct f2fs_defragment *range)
 {
        struct inode *inode = file_inode(filp);
-       struct f2fs_map_blocks map;
+       struct f2fs_map_blocks map = { .m_next_pgofs = NULL };
        struct extent_info ei;
        pgoff_t pg_start, pg_end;
        unsigned int blk_per_seg = sbi->blocks_per_seg;
@@ -1874,14 +1873,32 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
 static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
-       struct inode *inode = file_inode(iocb->ki_filp);
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file_inode(file);
+       ssize_t ret;
 
        if (f2fs_encrypted_inode(inode) &&
                                !f2fs_has_encryption_key(inode) &&
                                f2fs_get_encryption_info(inode))
                return -EACCES;
 
-       return generic_file_write_iter(iocb, from);
+       inode_lock(inode);
+       ret = generic_write_checks(iocb, from);
+       if (ret > 0) {
+               ret = f2fs_preallocate_blocks(iocb, from);
+               if (!ret)
+                       ret = __generic_file_write_iter(iocb, from);
+       }
+       inode_unlock(inode);
+
+       if (ret > 0) {
+               ssize_t err;
+
+               err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+               if (err < 0)
+                       ret = err;
+       }
+       return ret;
 }
 
 #ifdef CONFIG_COMPAT
index f610c2a9bdde9561d3be71ab4d674376db8ec401..47ade3542fbdd679471dfc9927f6d2782034c45f 100644 (file)
@@ -245,6 +245,18 @@ static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
                return get_cb_cost(sbi, segno);
 }
 
+static unsigned int count_bits(const unsigned long *addr,
+                               unsigned int offset, unsigned int len)
+{
+       unsigned int end = offset + len, sum = 0;
+
+       while (offset < end) {
+               if (test_bit(offset++, addr))
+                       ++sum;
+       }
+       return sum;
+}
+
 /*
  * This function is called from two paths.
  * One is garbage collection and the other is SSR segment selection.
@@ -260,7 +272,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
        struct victim_sel_policy p;
        unsigned int secno, max_cost;
        unsigned int last_segment = MAIN_SEGS(sbi);
-       int nsearched = 0;
+       unsigned int nsearched = 0;
 
        mutex_lock(&dirty_i->seglist_lock);
 
@@ -295,26 +307,31 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
                }
 
                p.offset = segno + p.ofs_unit;
-               if (p.ofs_unit > 1)
+               if (p.ofs_unit > 1) {
                        p.offset -= segno % p.ofs_unit;
+                       nsearched += count_bits(p.dirty_segmap,
+                                               p.offset - p.ofs_unit,
+                                               p.ofs_unit);
+               } else {
+                       nsearched++;
+               }
+
 
                secno = GET_SECNO(sbi, segno);
 
                if (sec_usage_check(sbi, secno))
-                       continue;
+                       goto next;
                if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
-                       continue;
+                       goto next;
 
                cost = get_gc_cost(sbi, segno, &p);
 
                if (p.min_cost > cost) {
                        p.min_segno = segno;
                        p.min_cost = cost;
-               } else if (unlikely(cost == max_cost)) {
-                       continue;
                }
-
-               if (nsearched++ >= p.max_search) {
+next:
+               if (nsearched >= p.max_search) {
                        sbi->last_victim[p.gc_mode] = segno;
                        break;
                }
@@ -399,7 +416,7 @@ static int check_valid_map(struct f2fs_sb_info *sbi,
  * On validity, copy that node with cold status, otherwise (invalid node)
  * ignore that.
  */
-static int gc_node_segment(struct f2fs_sb_info *sbi,
+static void gc_node_segment(struct f2fs_sb_info *sbi,
                struct f2fs_summary *sum, unsigned int segno, int gc_type)
 {
        bool initial = true;
@@ -419,7 +436,7 @@ next_step:
 
                /* stop BG_GC if there is not enough free sections. */
                if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-                       return 0;
+                       return;
 
                if (check_valid_map(sbi, segno, off) == 0)
                        continue;
@@ -446,7 +463,7 @@ next_step:
 
                /* set page dirty and write it */
                if (gc_type == FG_GC) {
-                       f2fs_wait_on_page_writeback(node_page, NODE);
+                       f2fs_wait_on_page_writeback(node_page, NODE, true);
                        set_page_dirty(node_page);
                } else {
                        if (!PageWriteback(node_page))
@@ -460,20 +477,6 @@ next_step:
                initial = false;
                goto next_step;
        }
-
-       if (gc_type == FG_GC) {
-               struct writeback_control wbc = {
-                       .sync_mode = WB_SYNC_ALL,
-                       .nr_to_write = LONG_MAX,
-                       .for_reclaim = 0,
-               };
-               sync_node_pages(sbi, 0, &wbc);
-
-               /* return 1 only if FG_GC succefully reclaimed one */
-               if (get_valid_blocks(sbi, segno, 1) == 0)
-                       return 1;
-       }
-       return 0;
 }
 
 /*
@@ -483,7 +486,7 @@ next_step:
  * as indirect or double indirect node blocks, are given, it must be a caller's
  * bug.
  */
-block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
+block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
 {
        unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
        unsigned int bidx;
@@ -500,7 +503,7 @@ block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
                int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
                bidx = node_ofs - 5 - dec;
        }
-       return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
+       return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
 }
 
 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -567,7 +570,7 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
         * don't cache encrypted data into meta inode until previous dirty
         * data were writebacked to avoid racing between GC and flush.
         */
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
 
        get_node_info(fio.sbi, dn.nid, &ni);
        set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
@@ -596,14 +599,14 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
                goto put_page_out;
 
        set_page_dirty(fio.encrypted_page);
-       f2fs_wait_on_page_writeback(fio.encrypted_page, DATA);
+       f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
        if (clear_page_dirty_for_io(fio.encrypted_page))
                dec_page_count(fio.sbi, F2FS_DIRTY_META);
 
        set_page_writeback(fio.encrypted_page);
 
        /* allocate block address */
-       f2fs_wait_on_page_writeback(dn.node_page, NODE);
+       f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
        allocate_data_block(fio.sbi, NULL, fio.blk_addr,
                                        &fio.blk_addr, &sum, CURSEG_COLD_DATA);
        fio.rw = WRITE_SYNC;
@@ -645,7 +648,7 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
                        .encrypted_page = NULL,
                };
                set_page_dirty(page);
-               f2fs_wait_on_page_writeback(page, DATA);
+               f2fs_wait_on_page_writeback(page, DATA, true);
                if (clear_page_dirty_for_io(page))
                        inode_dec_dirty_pages(inode);
                set_cold_data(page);
@@ -663,7 +666,7 @@ out:
  * If the parent node is not valid or the data block address is different,
  * the victim data block is ignored.
  */
-static int gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
+static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
                struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
 {
        struct super_block *sb = sbi->sb;
@@ -686,7 +689,7 @@ next_step:
 
                /* stop BG_GC if there is not enough free sections. */
                if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0))
-                       return 0;
+                       return;
 
                if (check_valid_map(sbi, segno, off) == 0)
                        continue;
@@ -719,7 +722,7 @@ next_step:
                                continue;
                        }
 
-                       start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
+                       start_bidx = start_bidx_of_node(nofs, inode);
                        data_page = get_read_data_page(inode,
                                        start_bidx + ofs_in_node, READA, true);
                        if (IS_ERR(data_page)) {
@@ -735,7 +738,7 @@ next_step:
                /* phase 3 */
                inode = find_gc_inode(gc_list, dni.ino);
                if (inode) {
-                       start_bidx = start_bidx_of_node(nofs, F2FS_I(inode))
+                       start_bidx = start_bidx_of_node(nofs, inode)
                                                                + ofs_in_node;
                        if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
                                move_encrypted_block(inode, start_bidx);
@@ -747,15 +750,6 @@ next_step:
 
        if (++phase < 4)
                goto next_step;
-
-       if (gc_type == FG_GC) {
-               f2fs_submit_merged_bio(sbi, DATA, WRITE);
-
-               /* return 1 only if FG_GC succefully reclaimed one */
-               if (get_valid_blocks(sbi, segno, 1) == 0)
-                       return 1;
-       }
-       return 0;
 }
 
 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
@@ -771,53 +765,90 @@ static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
        return ret;
 }
 
-static int do_garbage_collect(struct f2fs_sb_info *sbi, unsigned int segno,
+static int do_garbage_collect(struct f2fs_sb_info *sbi,
+                               unsigned int start_segno,
                                struct gc_inode_list *gc_list, int gc_type)
 {
        struct page *sum_page;
        struct f2fs_summary_block *sum;
        struct blk_plug plug;
-       int nfree = 0;
+       unsigned int segno = start_segno;
+       unsigned int end_segno = start_segno + sbi->segs_per_sec;
+       int seg_freed = 0;
+       unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
+                                               SUM_TYPE_DATA : SUM_TYPE_NODE;
 
-       /* read segment summary of victim */
-       sum_page = get_sum_page(sbi, segno);
+       /* readahead multi ssa blocks those have contiguous address */
+       if (sbi->segs_per_sec > 1)
+               ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
+                                       sbi->segs_per_sec, META_SSA, true);
+
+       /* reference all summary page */
+       while (segno < end_segno) {
+               sum_page = get_sum_page(sbi, segno++);
+               unlock_page(sum_page);
+       }
 
        blk_start_plug(&plug);
 
-       sum = page_address(sum_page);
+       for (segno = start_segno; segno < end_segno; segno++) {
+               /* find segment summary of victim */
+               sum_page = find_get_page(META_MAPPING(sbi),
+                                       GET_SUM_BLOCK(sbi, segno));
+               f2fs_bug_on(sbi, !PageUptodate(sum_page));
+               f2fs_put_page(sum_page, 0);
 
-       /*
-        * this is to avoid deadlock:
-        * - lock_page(sum_page)         - f2fs_replace_block
-        *  - check_valid_map()            - mutex_lock(sentry_lock)
-        *   - mutex_lock(sentry_lock)     - change_curseg()
-        *                                  - lock_page(sum_page)
-        */
-       unlock_page(sum_page);
-
-       switch (GET_SUM_TYPE((&sum->footer))) {
-       case SUM_TYPE_NODE:
-               nfree = gc_node_segment(sbi, sum->entries, segno, gc_type);
-               break;
-       case SUM_TYPE_DATA:
-               nfree = gc_data_segment(sbi, sum->entries, gc_list,
-                                                       segno, gc_type);
-               break;
+               sum = page_address(sum_page);
+               f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
+
+               /*
+                * this is to avoid deadlock:
+                * - lock_page(sum_page)         - f2fs_replace_block
+                *  - check_valid_map()            - mutex_lock(sentry_lock)
+                *   - mutex_lock(sentry_lock)     - change_curseg()
+                *                                  - lock_page(sum_page)
+                */
+
+               if (type == SUM_TYPE_NODE)
+                       gc_node_segment(sbi, sum->entries, segno, gc_type);
+               else
+                       gc_data_segment(sbi, sum->entries, gc_list, segno,
+                                                               gc_type);
+
+               stat_inc_seg_count(sbi, type, gc_type);
+               stat_inc_call_count(sbi->stat_info);
+
+               f2fs_put_page(sum_page, 0);
        }
-       blk_finish_plug(&plug);
 
-       stat_inc_seg_count(sbi, GET_SUM_TYPE((&sum->footer)), gc_type);
-       stat_inc_call_count(sbi->stat_info);
+       if (gc_type == FG_GC) {
+               if (type == SUM_TYPE_NODE) {
+                       struct writeback_control wbc = {
+                               .sync_mode = WB_SYNC_ALL,
+                               .nr_to_write = LONG_MAX,
+                               .for_reclaim = 0,
+                       };
+                       sync_node_pages(sbi, 0, &wbc);
+               } else {
+                       f2fs_submit_merged_bio(sbi, DATA, WRITE);
+               }
+       }
 
-       f2fs_put_page(sum_page, 0);
-       return nfree;
+       blk_finish_plug(&plug);
+
+       if (gc_type == FG_GC) {
+               while (start_segno < end_segno)
+                       if (get_valid_blocks(sbi, start_segno++, 1) == 0)
+                               seg_freed++;
+       }
+       return seg_freed;
 }
 
 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync)
 {
-       unsigned int segno, i;
+       unsigned int segno;
        int gc_type = sync ? FG_GC : BG_GC;
-       int sec_freed = 0;
+       int sec_freed = 0, seg_freed;
        int ret = -EINVAL;
        struct cp_control cpc;
        struct gc_inode_list gc_list = {
@@ -838,30 +869,24 @@ gc_more:
 
        if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed)) {
                gc_type = FG_GC;
+               /*
+                * If there is no victim and no prefree segment but still not
+                * enough free sections, we should flush dent/node blocks and do
+                * garbage collections.
+                */
                if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
                        write_checkpoint(sbi, &cpc);
+               else if (has_not_enough_free_secs(sbi, 0))
+                       write_checkpoint(sbi, &cpc);
        }
 
        if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
                goto stop;
        ret = 0;
 
-       /* readahead multi ssa blocks those have contiguous address */
-       if (sbi->segs_per_sec > 1)
-               ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno), sbi->segs_per_sec,
-                                                       META_SSA, true);
-
-       for (i = 0; i < sbi->segs_per_sec; i++) {
-               /*
-                * for FG_GC case, halt gcing left segments once failed one
-                * of segments in selected section to avoid long latency.
-                */
-               if (!do_garbage_collect(sbi, segno + i, &gc_list, gc_type) &&
-                               gc_type == FG_GC)
-                       break;
-       }
+       seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type);
 
-       if (i == sbi->segs_per_sec && gc_type == FG_GC)
+       if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec)
                sec_freed++;
 
        if (gc_type == FG_GC)
index c3f0b7d4cfca174bba6e6b4baa796640b767d2ec..0be4a9b400c63db0fecd285b3ad21fe665cb5f7f 100644 (file)
@@ -71,7 +71,7 @@ bool truncate_inline_inode(struct page *ipage, u64 from)
 
        addr = inline_data_addr(ipage);
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
        memset(addr + from, 0, MAX_INLINE_DATA - from);
 
        return true;
@@ -124,8 +124,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
        if (err)
                return err;
 
-       f2fs_wait_on_page_writeback(page, DATA);
-
+       f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
        if (PageUptodate(page))
                goto no_update;
 
@@ -150,7 +149,7 @@ no_update:
        write_data_page(dn, &fio);
        set_data_blkaddr(dn);
        f2fs_update_extent_cache(dn);
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        if (dirty)
                inode_dec_dirty_pages(dn->inode);
 
@@ -159,6 +158,7 @@ no_update:
 
        /* clear inline data and flag after data writeback */
        truncate_inline_inode(dn->inode_page, 0);
+       clear_inline_node(dn->inode_page);
 clear_out:
        stat_dec_inline_inode(dn->inode);
        f2fs_clear_inline_inode(dn->inode);
@@ -223,7 +223,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
 
        f2fs_bug_on(F2FS_I_SB(inode), page->index);
 
-       f2fs_wait_on_page_writeback(dn.inode_page, NODE);
+       f2fs_wait_on_page_writeback(dn.inode_page, NODE, true);
        src_addr = kmap_atomic(page);
        dst_addr = inline_data_addr(dn.inode_page);
        memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
@@ -233,6 +233,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
        set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
 
        sync_inode_page(&dn);
+       clear_inline_node(dn.inode_page);
        f2fs_put_dnode(&dn);
        return 0;
 }
@@ -261,7 +262,7 @@ process_inline:
                ipage = get_node_page(sbi, inode->i_ino);
                f2fs_bug_on(sbi, IS_ERR(ipage));
 
-               f2fs_wait_on_page_writeback(ipage, NODE);
+               f2fs_wait_on_page_writeback(ipage, NODE, true);
 
                src_addr = inline_data_addr(npage);
                dst_addr = inline_data_addr(ipage);
@@ -389,7 +390,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage,
        if (err)
                goto out;
 
-       f2fs_wait_on_page_writeback(page, DATA);
+       f2fs_wait_on_page_writeback(page, DATA, true);
        zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE);
 
        dentry_blk = kmap_atomic(page);
@@ -469,7 +470,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
                }
        }
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
        name_hash = f2fs_dentry_hash(name);
        make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
@@ -507,7 +508,7 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
        int i;
 
        lock_page(page);
-       f2fs_wait_on_page_writeback(page, NODE);
+       f2fs_wait_on_page_writeback(page, NODE, true);
 
        inline_dentry = inline_data_addr(page);
        bit_pos = dentry - inline_dentry->dentry;
index 2adeff26be11b9689dde24a5d22b0bd3351c21b7..60e3b3078b81469b300fca0d4bf44b2117f709bf 100644 (file)
@@ -83,7 +83,7 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
 
        while (start < end) {
                if (*start++) {
-                       f2fs_wait_on_page_writeback(ipage, NODE);
+                       f2fs_wait_on_page_writeback(ipage, NODE, true);
 
                        set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
                        set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
@@ -227,7 +227,7 @@ int update_inode(struct inode *inode, struct page *node_page)
 {
        struct f2fs_inode *ri;
 
-       f2fs_wait_on_page_writeback(node_page, NODE);
+       f2fs_wait_on_page_writeback(node_page, NODE, true);
 
        ri = F2FS_INODE(node_page);
 
@@ -263,6 +263,10 @@ int update_inode(struct inode *inode, struct page *node_page)
        set_cold_node(inode, node_page);
        clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
 
+       /* deleted inode */
+       if (inode->i_nlink == 0)
+               clear_inline_node(node_page);
+
        return set_page_dirty(node_page);
 }
 
index 342597a5897f059a2d31823923d8664861b7b969..150907ffa7aaf871772488c880d98ce3d86620c5 100644 (file)
@@ -403,14 +403,45 @@ cache:
        up_write(&nm_i->nat_tree_lock);
 }
 
+pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
+{
+       const long direct_index = ADDRS_PER_INODE(dn->inode);
+       const long direct_blks = ADDRS_PER_BLOCK;
+       const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
+       unsigned int skipped_unit = ADDRS_PER_BLOCK;
+       int cur_level = dn->cur_level;
+       int max_level = dn->max_level;
+       pgoff_t base = 0;
+
+       if (!dn->max_level)
+               return pgofs + 1;
+
+       while (max_level-- > cur_level)
+               skipped_unit *= NIDS_PER_BLOCK;
+
+       switch (dn->max_level) {
+       case 3:
+               base += 2 * indirect_blks;
+       case 2:
+               base += 2 * direct_blks;
+       case 1:
+               base += direct_index;
+               break;
+       default:
+               f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
+       }
+
+       return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
+}
+
 /*
  * The maximum depth is four.
  * Offset[0] will have raw inode offset.
  */
-static int get_node_path(struct f2fs_inode_info *fi, long block,
+static int get_node_path(struct inode *inode, long block,
                                int offset[4], unsigned int noffset[4])
 {
-       const long direct_index = ADDRS_PER_INODE(fi);
+       const long direct_index = ADDRS_PER_INODE(inode);
        const long direct_blks = ADDRS_PER_BLOCK;
        const long dptrs_per_blk = NIDS_PER_BLOCK;
        const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
@@ -495,10 +526,10 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
        int offset[4];
        unsigned int noffset[4];
        nid_t nids[4];
-       int level, i;
+       int level, i = 0;
        int err = 0;
 
-       level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
+       level = get_node_path(dn->inode, index, offset, noffset);
 
        nids[0] = dn->inode->i_ino;
        npage[0] = dn->inode_page;
@@ -585,6 +616,10 @@ release_pages:
 release_out:
        dn->inode_page = NULL;
        dn->node_page = NULL;
+       if (err == -ENOENT) {
+               dn->cur_level = i;
+               dn->max_level = level;
+       }
        return err;
 }
 
@@ -792,7 +827,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
 
        trace_f2fs_truncate_inode_blocks_enter(inode, from);
 
-       level = get_node_path(F2FS_I(inode), from, offset, noffset);
+       level = get_node_path(inode, from, offset, noffset);
 restart:
        page = get_node_page(sbi, inode->i_ino);
        if (IS_ERR(page)) {
@@ -861,7 +896,7 @@ skip_partial:
                                f2fs_put_page(page, 1);
                                goto restart;
                        }
-                       f2fs_wait_on_page_writeback(page, NODE);
+                       f2fs_wait_on_page_writeback(page, NODE, true);
                        ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
                        set_page_dirty(page);
                        unlock_page(page);
@@ -976,7 +1011,7 @@ struct page *new_node_page(struct dnode_of_data *dn,
        new_ni.ino = dn->inode->i_ino;
        set_node_addr(sbi, &new_ni, NEW_ADDR, false);
 
-       f2fs_wait_on_page_writeback(page, NODE);
+       f2fs_wait_on_page_writeback(page, NODE, true);
        fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
        set_cold_node(dn->inode, page);
        SetPageUptodate(page);
@@ -1154,6 +1189,39 @@ void sync_inode_page(struct dnode_of_data *dn)
        dn->node_changed = ret ? true: false;
 }
 
+static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
+{
+       struct inode *inode;
+       struct page *page;
+
+       /* should flush inline_data before evict_inode */
+       inode = ilookup(sbi->sb, ino);
+       if (!inode)
+               return;
+
+       page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0);
+       if (!page)
+               goto iput_out;
+
+       if (!PageUptodate(page))
+               goto page_out;
+
+       if (!PageDirty(page))
+               goto page_out;
+
+       if (!clear_page_dirty_for_io(page))
+               goto page_out;
+
+       if (!f2fs_write_inline_data(inode, page))
+               inode_dec_dirty_pages(inode);
+       else
+               set_page_dirty(page);
+page_out:
+       f2fs_put_page(page, 1);
+iput_out:
+       iput(inode);
+}
+
 int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
                                        struct writeback_control *wbc)
 {
@@ -1221,6 +1289,17 @@ continue_unlock:
                                goto continue_unlock;
                        }
 
+                       /* flush inline_data */
+                       if (!ino && is_inline_node(page)) {
+                               clear_inline_node(page);
+                               unlock_page(page);
+                               flush_inline_data(sbi, ino_of_node(page));
+                               continue;
+                       }
+
+                       f2fs_wait_on_page_writeback(page, NODE, true);
+
+                       BUG_ON(PageWriteback(page));
                        if (!clear_page_dirty_for_io(page))
                                goto continue_unlock;
 
@@ -1258,8 +1337,13 @@ continue_unlock:
                goto next_step;
        }
 
-       if (wrote)
-               f2fs_submit_merged_bio(sbi, NODE, WRITE);
+       if (wrote) {
+               if (ino)
+                       f2fs_submit_merged_bio_cond(sbi, NULL, NULL,
+                                                       ino, NODE, WRITE);
+               else
+                       f2fs_submit_merged_bio(sbi, NODE, WRITE);
+       }
        return nwritten;
 }
 
@@ -1287,7 +1371,7 @@ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
                                continue;
 
                        if (ino && ino_of_node(page) == ino) {
-                               f2fs_wait_on_page_writeback(page, NODE);
+                               f2fs_wait_on_page_writeback(page, NODE, true);
                                if (TestClearPageError(page))
                                        ret = -EIO;
                        }
@@ -1326,8 +1410,6 @@ static int f2fs_write_node_page(struct page *page,
        if (unlikely(f2fs_cp_error(sbi)))
                goto redirty_out;
 
-       f2fs_wait_on_page_writeback(page, NODE);
-
        /* get old block addr of this node page */
        nid = nid_of_node(page);
        f2fs_bug_on(sbi, page->index != nid);
@@ -1356,9 +1438,13 @@ static int f2fs_write_node_page(struct page *page,
        set_node_addr(sbi, &ni, fio.blk_addr, is_fsync_dnode(page));
        dec_page_count(sbi, F2FS_DIRTY_NODES);
        up_read(&sbi->node_write);
+
+       if (wbc->for_reclaim)
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, NODE, WRITE);
+
        unlock_page(page);
 
-       if (wbc->for_reclaim || unlikely(f2fs_cp_error(sbi)))
+       if (unlikely(f2fs_cp_error(sbi)))
                f2fs_submit_merged_bio(sbi, NODE, WRITE);
 
        return 0;
@@ -1374,8 +1460,6 @@ static int f2fs_write_node_pages(struct address_space *mapping,
        struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
        long diff;
 
-       trace_f2fs_writepages(mapping->host, wbc, NODE);
-
        /* balancing f2fs's metadata in background */
        f2fs_balance_fs_bg(sbi);
 
@@ -1383,6 +1467,8 @@ static int f2fs_write_node_pages(struct address_space *mapping,
        if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
                goto skip_write;
 
+       trace_f2fs_writepages(mapping->host, wbc, NODE);
+
        diff = nr_pages_to_write(sbi, NODE, wbc);
        wbc->sync_mode = WB_SYNC_NONE;
        sync_node_pages(sbi, 0, wbc);
@@ -1391,6 +1477,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
 
 skip_write:
        wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
+       trace_f2fs_writepages(mapping->host, wbc, NODE);
        return 0;
 }
 
@@ -1703,7 +1790,7 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
        src_addr = inline_xattr_addr(page);
        inline_size = inline_xattr_size(inode);
 
-       f2fs_wait_on_page_writeback(ipage, NODE);
+       f2fs_wait_on_page_writeback(ipage, NODE, true);
        memcpy(dst_addr, src_addr, inline_size);
 update_inode:
        update_inode(inode, ipage);
@@ -2000,6 +2087,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
        nm_i->nat_cnt = 0;
        nm_i->ram_thresh = DEF_RAM_THRESHOLD;
        nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
+       nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
 
        INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
        INIT_LIST_HEAD(&nm_i->free_nid_list);
index d4d1f636fe1c36c923ebcd68da46249161ccafc8..1f4f9d4569d9cb4f8a6ed93ff112fa300cd49444 100644 (file)
@@ -25,6 +25,9 @@
 /* control the memory footprint threshold (10MB per 1GB ram) */
 #define DEF_RAM_THRESHOLD      10
 
+/* control dirty nats ratio threshold (default: 10% over max nid count) */
+#define DEF_DIRTY_NAT_RATIO_THRESHOLD          10
+
 /* vector size for gang look-up from nat cache that consists of radix tree */
 #define NATVEC_SIZE    64
 #define SETVEC_SIZE    32
@@ -117,6 +120,12 @@ static inline void raw_nat_from_node_info(struct f2fs_nat_entry *raw_ne,
        raw_ne->version = ni->version;
 }
 
+static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
+{
+       return NM_I(sbi)->dirty_nat_cnt >= NM_I(sbi)->max_nid *
+                                       NM_I(sbi)->dirty_nats_ratio / 100;
+}
+
 enum mem_type {
        FREE_NIDS,      /* indicates the free nid list */
        NAT_ENTRIES,    /* indicates the cached nat entry */
@@ -321,7 +330,7 @@ static inline int set_nid(struct page *p, int off, nid_t nid, bool i)
 {
        struct f2fs_node *rn = F2FS_NODE(p);
 
-       f2fs_wait_on_page_writeback(p, NODE);
+       f2fs_wait_on_page_writeback(p, NODE, true);
 
        if (i)
                rn->i.i_nid[off - NODE_DIR1_BLOCK] = cpu_to_le32(nid);
@@ -370,6 +379,21 @@ static inline int is_node(struct page *page, int type)
 #define is_fsync_dnode(page)   is_node(page, FSYNC_BIT_SHIFT)
 #define is_dent_dnode(page)    is_node(page, DENT_BIT_SHIFT)
 
+static inline int is_inline_node(struct page *page)
+{
+       return PageChecked(page);
+}
+
+static inline void set_inline_node(struct page *page)
+{
+       SetPageChecked(page);
+}
+
+static inline void clear_inline_node(struct page *page)
+{
+       ClearPageChecked(page);
+}
+
 static inline void set_cold_node(struct inode *inode, struct page *page)
 {
        struct f2fs_node *rn = F2FS_NODE(page);
index 589b20b8677b8cc1dcf3d16a6f7e0943ce0283eb..5045dd6a27e96c9a890e3e885ac01947c4914c53 100644 (file)
@@ -350,8 +350,7 @@ got_it:
                inode = dn->inode;
        }
 
-       bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
-                       le16_to_cpu(sum.ofs_in_node);
+       bidx = start_bidx_of_node(offset, inode) + le16_to_cpu(sum.ofs_in_node);
 
        /*
         * if inode page is locked, unlock temporarily, but its reference
@@ -386,10 +385,9 @@ truncate_out:
 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
                                        struct page *page, block_t blkaddr)
 {
-       struct f2fs_inode_info *fi = F2FS_I(inode);
-       unsigned int start, end;
        struct dnode_of_data dn;
        struct node_info ni;
+       unsigned int start, end;
        int err = 0, recovered = 0;
 
        /* step 1: recover xattr */
@@ -409,8 +407,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
                goto out;
 
        /* step 3: recover data indices */
-       start = start_bidx_of_node(ofs_of_node(page), fi);
-       end = start + ADDRS_PER_PAGE(page, fi);
+       start = start_bidx_of_node(ofs_of_node(page), inode);
+       end = start + ADDRS_PER_PAGE(page, inode);
 
        set_new_dnode(&dn, inode, NULL, NULL, 0);
 
@@ -418,7 +416,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
        if (err)
                goto out;
 
-       f2fs_wait_on_page_writeback(dn.node_page, NODE);
+       f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
 
        get_node_info(sbi, dn.nid, &ni);
        f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
index 5904a411c86fe8d4feb70f695809273b3a46bd6a..57a5f7bb275ae21b0935e1d8f89e83624bf06117 100644 (file)
@@ -223,7 +223,8 @@ int commit_inmem_pages(struct inode *inode, bool abort)
                if (!abort) {
                        if (cur->page->mapping == inode->i_mapping) {
                                set_page_dirty(cur->page);
-                               f2fs_wait_on_page_writeback(cur->page, DATA);
+                               f2fs_wait_on_page_writeback(cur->page, DATA,
+                                                                       true);
                                if (clear_page_dirty_for_io(cur->page))
                                        inode_dec_dirty_pages(inode);
                                trace_f2fs_commit_inmem_page(cur->page, INMEM);
@@ -253,7 +254,8 @@ int commit_inmem_pages(struct inode *inode, bool abort)
        if (!abort) {
                f2fs_unlock_op(sbi);
                if (submit_bio)
-                       f2fs_submit_merged_bio(sbi, DATA, WRITE);
+                       f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0,
+                                                               DATA, WRITE);
        }
        return err;
 }
@@ -291,8 +293,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
 
        /* checkpoint is the only way to shrink partial cached entries */
        if (!available_free_memory(sbi, NAT_ENTRIES) ||
-                       excess_prefree_segs(sbi) ||
                        !available_free_memory(sbi, INO_ENTRIES) ||
+                       excess_prefree_segs(sbi) ||
+                       excess_dirty_nats(sbi) ||
                        (is_idle(sbi) && f2fs_time_over(sbi, CP_TIME))) {
                if (test_opt(sbi, DATA_FLUSH))
                        sync_dirty_inodes(sbi, FILE_INODE);
@@ -873,9 +876,8 @@ static void get_new_segment(struct f2fs_sb_info *sbi,
 
        if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
                segno = find_next_zero_bit(free_i->free_segmap,
-                                       MAIN_SEGS(sbi), *newseg + 1);
-               if (segno - *newseg < sbi->segs_per_sec -
-                                       (*newseg % sbi->segs_per_sec))
+                               (hint + 1) * sbi->segs_per_sec, *newseg + 1);
+               if (segno < (hint + 1) * sbi->segs_per_sec)
                        goto got_it;
        }
 find_other_zone:
@@ -1415,53 +1417,17 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
        f2fs_update_extent_cache(dn);
 }
 
-static inline bool is_merged_page(struct f2fs_sb_info *sbi,
-                                       struct page *page, enum page_type type)
-{
-       enum page_type btype = PAGE_TYPE_OF_BIO(type);
-       struct f2fs_bio_info *io = &sbi->write_io[btype];
-       struct bio_vec *bvec;
-       struct page *target;
-       int i;
-
-       down_read(&io->io_rwsem);
-       if (!io->bio) {
-               up_read(&io->io_rwsem);
-               return false;
-       }
-
-       bio_for_each_segment_all(bvec, io->bio, i) {
-
-               if (bvec->bv_page->mapping) {
-                       target = bvec->bv_page;
-               } else {
-                       struct f2fs_crypto_ctx *ctx;
-
-                       /* encrypted page */
-                       ctx = (struct f2fs_crypto_ctx *)page_private(
-                                                               bvec->bv_page);
-                       target = ctx->w.control_page;
-               }
-
-               if (page == target) {
-                       up_read(&io->io_rwsem);
-                       return true;
-               }
-       }
-
-       up_read(&io->io_rwsem);
-       return false;
-}
-
 void f2fs_wait_on_page_writeback(struct page *page,
-                               enum page_type type)
+                               enum page_type type, bool ordered)
 {
        if (PageWriteback(page)) {
                struct f2fs_sb_info *sbi = F2FS_P_SB(page);
 
-               if (is_merged_page(sbi, page, type))
-                       f2fs_submit_merged_bio(sbi, type, WRITE);
-               wait_on_page_writeback(page);
+               f2fs_submit_merged_bio_cond(sbi, NULL, page, 0, type, WRITE);
+               if (ordered)
+                       wait_on_page_writeback(page);
+               else
+                       wait_for_stable_page(page);
        }
 }
 
@@ -1477,7 +1443,7 @@ void f2fs_wait_on_encrypted_page_writeback(struct f2fs_sb_info *sbi,
 
        cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
        if (cpage) {
-               f2fs_wait_on_page_writeback(cpage, DATA);
+               f2fs_wait_on_page_writeback(cpage, DATA, true);
                f2fs_put_page(cpage, 1);
        }
 }
index ee44d346ea44143e5610c59c3d0ed66c88526c43..cd7111b9a664f542865e1272b25554443ac90c5a 100644 (file)
@@ -183,7 +183,7 @@ struct segment_allocation {
  * this value is set in page as a private data which indicate that
  * the page is atomically written, and it is in inmem_pages list.
  */
-#define ATOMIC_WRITTEN_PAGE            0x0000ffff
+#define ATOMIC_WRITTEN_PAGE            ((unsigned long)-1)
 
 #define IS_ATOMIC_WRITTEN_PAGE(page)                   \
                (page_private(page) == (unsigned long)ATOMIC_WRITTEN_PAGE)
index 6134832baaaf0b25a86ed9cec1e8121b3ba3e4e2..9445a34b8d48c184ecf0762b65dc2096896d2885 100644 (file)
@@ -126,6 +126,19 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type)
        return NULL;
 }
 
+static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
+               struct f2fs_sb_info *sbi, char *buf)
+{
+       struct super_block *sb = sbi->sb;
+
+       if (!sb->s_bdev->bd_part)
+               return snprintf(buf, PAGE_SIZE, "0\n");
+
+       return snprintf(buf, PAGE_SIZE, "%llu\n",
+               (unsigned long long)(sbi->kbytes_written +
+                       BD_PART_WRITTEN(sbi)));
+}
+
 static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
                        struct f2fs_sb_info *sbi, char *buf)
 {
@@ -204,6 +217,9 @@ static struct f2fs_attr f2fs_attr_##_name = {                       \
                f2fs_sbi_show, f2fs_sbi_store,                  \
                offsetof(struct struct_name, elname))
 
+#define F2FS_GENERAL_RO_ATTR(name) \
+static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
+
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_min_sleep_time, min_sleep_time);
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_max_sleep_time, max_sleep_time);
 F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time);
@@ -216,10 +232,12 @@ F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_ipu_util, min_ipu_util);
 F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, min_fsync_blocks, min_fsync_blocks);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ram_thresh, ram_thresh);
 F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, ra_nid_pages, ra_nid_pages);
+F2FS_RW_ATTR(NM_INFO, f2fs_nm_info, dirty_nats_ratio, dirty_nats_ratio);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]);
 F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]);
+F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
 
 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
 static struct attribute *f2fs_attrs[] = {
@@ -237,8 +255,10 @@ static struct attribute *f2fs_attrs[] = {
        ATTR_LIST(dir_level),
        ATTR_LIST(ram_thresh),
        ATTR_LIST(ra_nid_pages),
+       ATTR_LIST(dirty_nats_ratio),
        ATTR_LIST(cp_interval),
        ATTR_LIST(idle_interval),
+       ATTR_LIST(lifetime_write_kbytes),
        NULL,
 };
 
@@ -562,6 +582,13 @@ static void f2fs_put_super(struct super_block *sb)
        f2fs_leave_shrinker(sbi);
        mutex_unlock(&sbi->umount_mutex);
 
+       /* our cp_error case, we can wait for any writeback page */
+       if (get_pages(sbi, F2FS_WRITEBACK)) {
+               f2fs_submit_merged_bio(sbi, DATA, WRITE);
+               f2fs_submit_merged_bio(sbi, NODE, WRITE);
+               f2fs_submit_merged_bio(sbi, META, WRITE);
+       }
+
        iput(sbi->node_inode);
        iput(sbi->meta_inode);
 
@@ -766,8 +793,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
        bool need_stop_gc = false;
        bool no_extent_cache = !test_opt(sbi, EXTENT_CACHE);
 
-       sync_filesystem(sb);
-
        /*
         * Save the old mount options in case we
         * need to restore them.
@@ -775,6 +800,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
        org_mount_opt = sbi->mount_opt;
        active_logs = sbi->active_logs;
 
+       if (*flags & MS_RDONLY) {
+               set_opt(sbi, FASTBOOT);
+               set_sbi_flag(sbi, SBI_IS_DIRTY);
+       }
+
+       sync_filesystem(sb);
+
        sbi->mount_opt.opt = 0;
        default_options(sbi);
 
@@ -1242,6 +1274,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
        bool retry = true, need_fsck = false;
        char *options = NULL;
        int recovery, i, valid_super_block;
+       struct curseg_info *seg_i;
 
 try_onemore:
        err = -EINVAL;
@@ -1372,6 +1405,17 @@ try_onemore:
                goto free_nm;
        }
 
+       /* For write statistics */
+       if (sb->s_bdev->bd_part)
+               sbi->sectors_written_start =
+                       (u64)part_stat_read(sb->s_bdev->bd_part, sectors[1]);
+
+       /* Read accumulated write IO statistics if exists */
+       seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
+       if (__exist_node_summaries(sbi))
+               sbi->kbytes_written =
+                       le64_to_cpu(seg_i->sum_blk->info.kbytes_written);
+
        build_gc_manager(sbi);
 
        /* get an inode for node space */
index 10f1e784fa2390148aaa37f527526162be2d724d..06a72dc0191a022049bbf48ad26e127bb6211c02 100644 (file)
@@ -300,7 +300,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
 
                if (ipage) {
                        inline_addr = inline_xattr_addr(ipage);
-                       f2fs_wait_on_page_writeback(ipage, NODE);
+                       f2fs_wait_on_page_writeback(ipage, NODE, true);
                } else {
                        page = get_node_page(sbi, inode->i_ino);
                        if (IS_ERR(page)) {
@@ -308,7 +308,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
                                return PTR_ERR(page);
                        }
                        inline_addr = inline_xattr_addr(page);
-                       f2fs_wait_on_page_writeback(page, NODE);
+                       f2fs_wait_on_page_writeback(page, NODE, true);
                }
                memcpy(inline_addr, txattr_addr, inline_size);
                f2fs_put_page(page, 1);
@@ -329,7 +329,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
                        return PTR_ERR(xpage);
                }
                f2fs_bug_on(sbi, new_nid);
-               f2fs_wait_on_page_writeback(xpage, NODE);
+               f2fs_wait_on_page_writeback(xpage, NODE, true);
        } else {
                struct dnode_of_data dn;
                set_new_dnode(&dn, inode, NULL, NULL, new_nid);
index 93f07465e5a682618844fa53bba03c859526141e..aa016e4b8bec976d57b8a36cdbb3c12c7e8e39ff 100644 (file)
@@ -1082,7 +1082,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
         * the first place, mapping->nr_pages will always be zero.
         */
        if (mapping->nrpages) {
-               loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
+               loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1);
                loff_t len = iov_iter_count(iter);
                loff_t end = PAGE_ALIGN(offset + len) - 1;
 
index 6a92592304fb54a73b39abf1bf41c99551ca80d1..d4014af4f064f60d05345e72cdfb6953c3abb9d9 100644 (file)
@@ -798,7 +798,7 @@ static int get_first_leaf(struct gfs2_inode *dip, u32 index,
        int error;
 
        error = get_leaf_nr(dip, index, &leaf_no);
-       if (!error)
+       if (!IS_ERR_VALUE(error))
                error = get_leaf(dip, leaf_no, bh_out);
 
        return error;
@@ -1014,7 +1014,7 @@ static int dir_split_leaf(struct inode *inode, const struct qstr *name)
 
        index = name->hash >> (32 - dip->i_depth);
        error = get_leaf_nr(dip, index, &leaf_no);
-       if (error)
+       if (IS_ERR_VALUE(error))
                return error;
 
        /*  Get the old leaf block  */
index a4ff7b56f5cdbabc8a1e12830bf0e36221a4c53e..7f0257309b3e8a5ef3f180b400fe5e39f0143e65 100644 (file)
@@ -572,6 +572,12 @@ static void delete_work_func(struct work_struct *work)
        struct inode *inode;
        u64 no_addr = gl->gl_name.ln_number;
 
+       /* If someone's using this glock to create a new dinode, the block must
+          have been freed by another node, then re-used, in which case our
+          iopen callback is too late after the fact. Ignore it. */
+       if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
+               goto out;
+
        ip = gl->gl_object;
        /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
 
@@ -583,6 +589,7 @@ static void delete_work_func(struct work_struct *work)
                d_prune_aliases(inode);
                iput(inode);
        }
+out:
        gfs2_glock_put(gl);
 }
 
@@ -1015,6 +1022,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
                handle_callback(gl, LM_ST_UNLOCKED, 0, false);
 
        list_del_init(&gh->gh_list);
+       clear_bit(HIF_HOLDER, &gh->gh_iflags);
        if (find_first_holder(gl) == NULL) {
                if (glops->go_unlock) {
                        GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
index 845fb09cc60668037b67edf243c2ab05c6f9e6e4..a6a3389a07fc67187288e5bb82a3b19d4996bf37 100644 (file)
@@ -328,6 +328,7 @@ enum {
        GLF_LRU                         = 13,
        GLF_OBJECT                      = 14, /* Used only for tracing */
        GLF_BLOCKING                    = 15,
+       GLF_INODE_CREATING              = 16, /* Inode creation occurring */
 };
 
 struct gfs2_glock {
index 352f958769e15b150575c54b09d82b51d34f258e..5fdb319fc2426925edb0ebdb35b7d19adacf705c 100644 (file)
@@ -592,7 +592,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        struct inode *inode = NULL;
        struct gfs2_inode *dip = GFS2_I(dir), *ip;
        struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
-       struct gfs2_glock *io_gl;
+       struct gfs2_glock *io_gl = NULL;
        int error, free_vfs_inode = 1;
        u32 aflags = 0;
        unsigned blocks = 1;
@@ -729,6 +729,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        if (error)
                goto fail_gunlock2;
 
+       BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
+
        error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
        if (error)
                goto fail_gunlock2;
@@ -771,12 +773,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
        }
        gfs2_glock_dq_uninit(ghs);
        gfs2_glock_dq_uninit(ghs + 1);
+       clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
        return error;
 
 fail_gunlock3:
        gfs2_glock_dq_uninit(&ip->i_iopen_gh);
        gfs2_glock_put(io_gl);
 fail_gunlock2:
+       if (io_gl)
+               clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
        gfs2_glock_dq_uninit(ghs + 1);
 fail_free_inode:
        if (ip->i_gl)
index 8f960a51a9a0d72b238e244eca9a336eacbd77f9..f8a0cd821290976bcf45846c11123bb31997bb61 100644 (file)
@@ -1551,12 +1551,16 @@ static void gfs2_evict_inode(struct inode *inode)
                        goto out_truncate;
        }
 
-       ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
-       gfs2_glock_dq_wait(&ip->i_iopen_gh);
-       gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh);
-       error = gfs2_glock_nq(&ip->i_iopen_gh);
-       if (error)
-               goto out_truncate;
+       if (ip->i_iopen_gh.gh_gl &&
+           test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
+               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
+               gfs2_glock_dq_wait(&ip->i_iopen_gh);
+               gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
+                                  &ip->i_iopen_gh);
+               error = gfs2_glock_nq(&ip->i_iopen_gh);
+               if (error)
+                       goto out_truncate;
+       }
 
        /* Case 1 starts here */
 
@@ -1606,11 +1610,13 @@ out_unlock:
        if (gfs2_rs_active(&ip->i_res))
                gfs2_rs_deltree(&ip->i_res);
 
-       if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
-               ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
-               gfs2_glock_dq_wait(&ip->i_iopen_gh);
+       if (ip->i_iopen_gh.gh_gl) {
+               if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
+                       ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
+                       gfs2_glock_dq_wait(&ip->i_iopen_gh);
+               }
+               gfs2_holder_uninit(&ip->i_iopen_gh);
        }
-       gfs2_holder_uninit(&ip->i_iopen_gh);
        gfs2_glock_dq_uninit(&gh);
        if (error && error != GLR_TRYFAILED && error != -EROFS)
                fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
index 996b7742c90b2eaf1e8b154ae3cc580293d6f94b..118d033bcbbc26a169e6e0395af4b6f9fe5e335e 100644 (file)
@@ -691,15 +691,22 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
                                          const unsigned char *path,
                                          const void *ns)
 {
-       static char path_buf[PATH_MAX]; /* protected by kernfs_mutex */
-       size_t len = strlcpy(path_buf, path, PATH_MAX);
-       char *p = path_buf;
-       char *name;
+       size_t len;
+       char *p, *name;
 
        lockdep_assert_held(&kernfs_mutex);
 
-       if (len >= PATH_MAX)
+       /* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
+       spin_lock_irq(&kernfs_rename_lock);
+
+       len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf));
+
+       if (len >= sizeof(kernfs_pr_cont_buf)) {
+               spin_unlock_irq(&kernfs_rename_lock);
                return NULL;
+       }
+
+       p = kernfs_pr_cont_buf;
 
        while ((name = strsep(&p, "/")) && parent) {
                if (*name == '\0')
@@ -707,6 +714,8 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
                parent = kernfs_find_ns(parent, name, ns);
        }
 
+       spin_unlock_irq(&kernfs_rename_lock);
+
        return parent;
 }
 
index 5bcd92d50e820ab6ccfa5e7789ee01152176b857..0cb1abd535e38469201478ee597e74045e593bc5 100644 (file)
@@ -1215,7 +1215,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
                                        hdr->pgio_mirror_idx + 1,
                                        &hdr->pgio_mirror_idx))
                        goto out_eagain;
-               set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+               set_bit(NFS_LAYOUT_RETURN_REQUESTED,
                        &hdr->lseg->pls_layout->plh_flags);
                pnfs_read_resend_pnfs(hdr);
                return task->tk_status;
index 29898a9550fa6f72541cbedc7ac1f0270c5a7c5c..eb370460ce203c11c8461e88115cc0fab9c417fa 100644 (file)
@@ -412,7 +412,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx,
                                         OP_ILLEGAL, GFP_NOIO);
                if (!fail_return) {
                        if (ff_layout_has_available_ds(lseg))
-                               set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+                               set_bit(NFS_LAYOUT_RETURN_REQUESTED,
                                        &lseg->pls_layout->plh_flags);
                        else
                                pnfs_error_mark_layout_for_return(ino, lseg);
index 4bfc33ad05637f58f9f0b675f058fd90085d4271..e810ab57a5115f4e30d6f0e4aa2f9668ae616740 100644 (file)
@@ -4707,20 +4707,17 @@ out:
 /*
  * The getxattr API returns the required buffer length when called with a
  * NULL buf. The NFSv4 acl tool then calls getxattr again after allocating
- * the required buf.  On a NULL buf, we send a page of data to the server
- * guessing that the ACL request can be serviced by a page. If so, we cache
- * up to the page of ACL data, and the 2nd call to getxattr is serviced by
- * the cache. If not so, we throw away the page, and cache the required
- * length. The next getxattr call will then produce another round trip to
- * the server, this time with the input buf of the required size.
+ * the required buf. Cache the result from the first getxattr call to avoid
+ * sending another RPC.
  */
 static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen)
 {
-       struct page *pages[NFS4ACL_MAXPAGES] = {NULL, };
+       /* enough pages to hold ACL data plus the bitmap and acl length */
+       struct page *pages[NFS4ACL_MAXPAGES + 1] = {NULL, };
        struct nfs_getaclargs args = {
                .fh = NFS_FH(inode),
+               /* The xdr layer may allocate pages here. */
                .acl_pages = pages,
-               .acl_len = buflen,
        };
        struct nfs_getaclres res = {
                .acl_len = buflen,
@@ -4730,31 +4727,24 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
-       unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
        int ret = -ENOMEM, i;
 
-       /* As long as we're doing a round trip to the server anyway,
-        * let's be prepared for a page of acl data. */
-       if (npages == 0)
-               npages = 1;
-       if (npages > ARRAY_SIZE(pages))
-               return -ERANGE;
-
-       for (i = 0; i < npages; i++) {
-               pages[i] = alloc_page(GFP_KERNEL);
-               if (!pages[i])
-                       goto out_free;
-       }
+       /*
+        * There will be some data returned by the server, how much is not
+        * known yet. Allocate one page and let the XDR layer allocate
+        * more if needed.
+        */
+       pages[0] = alloc_page(GFP_KERNEL);
 
        /* for decoding across pages */
        res.acl_scratch = alloc_page(GFP_KERNEL);
        if (!res.acl_scratch)
                goto out_free;
 
-       args.acl_len = npages * PAGE_SIZE;
+       args.acl_len = ARRAY_SIZE(pages) << PAGE_SHIFT;
 
-       dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
-               __func__, buf, buflen, npages, args.acl_len);
+       dprintk("%s  buf %p buflen %zu args.acl_len %zu\n",
+               __func__, buf, buflen, args.acl_len);
        ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
                             &msg, &args.seq_args, &res.seq_res, 0);
        if (ret)
@@ -4779,7 +4769,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
 out_ok:
        ret = res.acl_len;
 out_free:
-       for (i = 0; i < npages; i++)
+       for (i = 0; i < ARRAY_SIZE(pages) && pages[i]; i++)
                if (pages[i])
                        __free_page(pages[i]);
        if (res.acl_scratch)
index a3592cc34a20b76341c9c9b9af3378bdddebd477..482b6e94bb37cd1f3abaca235e94d961d9c15cb4 100644 (file)
@@ -52,9 +52,7 @@ static DEFINE_SPINLOCK(pnfs_spinlock);
  */
 static LIST_HEAD(pnfs_modules_tbl);
 
-static int
-pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
-                      enum pnfs_iomode iomode, bool sync);
+static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
 
 /* Return the registered pnfs layout driver module matching given id */
 static struct pnfs_layoutdriver_type *
@@ -243,6 +241,8 @@ pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
 {
        struct inode *inode = lo->plh_inode;
 
+       pnfs_layoutreturn_before_put_layout_hdr(lo);
+
        if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
                if (!list_empty(&lo->plh_segs))
                        WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
@@ -345,58 +345,6 @@ pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
        rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
 }
 
-/* Return true if layoutreturn is needed */
-static bool
-pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
-                       struct pnfs_layout_segment *lseg)
-{
-       struct pnfs_layout_segment *s;
-
-       if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
-               return false;
-
-       list_for_each_entry(s, &lo->plh_segs, pls_list)
-               if (s != lseg && test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
-                       return false;
-
-       return true;
-}
-
-static bool
-pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
-{
-       if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
-               return false;
-       lo->plh_return_iomode = 0;
-       pnfs_get_layout_hdr(lo);
-       clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
-       return true;
-}
-
-static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
-               struct pnfs_layout_hdr *lo, struct inode *inode)
-{
-       lo = lseg->pls_layout;
-       inode = lo->plh_inode;
-
-       spin_lock(&inode->i_lock);
-       if (pnfs_layout_need_return(lo, lseg)) {
-               nfs4_stateid stateid;
-               enum pnfs_iomode iomode;
-               bool send;
-
-               nfs4_stateid_copy(&stateid, &lo->plh_stateid);
-               iomode = lo->plh_return_iomode;
-               send = pnfs_prepare_layoutreturn(lo);
-               spin_unlock(&inode->i_lock);
-               if (send) {
-                       /* Send an async layoutreturn so we dont deadlock */
-                       pnfs_send_layoutreturn(lo, &stateid, iomode, false);
-               }
-       } else
-               spin_unlock(&inode->i_lock);
-}
-
 void
 pnfs_put_lseg(struct pnfs_layout_segment *lseg)
 {
@@ -410,15 +358,8 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
                atomic_read(&lseg->pls_refcount),
                test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
 
-       /* Handle the case where refcount != 1 */
-       if (atomic_add_unless(&lseg->pls_refcount, -1, 1))
-               return;
-
        lo = lseg->pls_layout;
        inode = lo->plh_inode;
-       /* Do we need a layoutreturn? */
-       if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
-               pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
 
        if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
                if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
@@ -937,6 +878,17 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
        rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
 }
 
+static bool
+pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
+{
+       if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
+               return false;
+       lo->plh_return_iomode = 0;
+       pnfs_get_layout_hdr(lo);
+       clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
+       return true;
+}
+
 static int
 pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
                       enum pnfs_iomode iomode, bool sync)
@@ -971,6 +923,48 @@ out:
        return status;
 }
 
+/* Return true if layoutreturn is needed */
+static bool
+pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
+{
+       struct pnfs_layout_segment *s;
+
+       if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+               return false;
+
+       /* Defer layoutreturn until all lsegs are done */
+       list_for_each_entry(s, &lo->plh_segs, pls_list) {
+               if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
+                       return false;
+       }
+
+       return true;
+}
+
+static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
+{
+       struct inode *inode= lo->plh_inode;
+
+       if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
+               return;
+       spin_lock(&inode->i_lock);
+       if (pnfs_layout_need_return(lo)) {
+               nfs4_stateid stateid;
+               enum pnfs_iomode iomode;
+               bool send;
+
+               nfs4_stateid_copy(&stateid, &lo->plh_stateid);
+               iomode = lo->plh_return_iomode;
+               send = pnfs_prepare_layoutreturn(lo);
+               spin_unlock(&inode->i_lock);
+               if (send) {
+                       /* Send an async layoutreturn so we dont deadlock */
+                       pnfs_send_layoutreturn(lo, &stateid, iomode, false);
+               }
+       } else
+               spin_unlock(&inode->i_lock);
+}
+
 /*
  * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  * when the layout segment list is empty.
@@ -1091,7 +1085,7 @@ bool pnfs_roc(struct inode *ino)
 
        nfs4_stateid_copy(&stateid, &lo->plh_stateid);
        /* always send layoutreturn if being marked so */
-       if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+       if (test_and_clear_bit(NFS_LAYOUT_RETURN_REQUESTED,
                                   &lo->plh_flags))
                layoutreturn = pnfs_prepare_layoutreturn(lo);
 
@@ -1772,7 +1766,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
                        pnfs_set_plh_return_iomode(lo, return_range->iomode);
                        if (!mark_lseg_invalid(lseg, tmp_list))
                                remaining++;
-                       set_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
+                       set_bit(NFS_LAYOUT_RETURN_REQUESTED,
                                        &lo->plh_flags);
                }
        return remaining;
index 9f4e2a47f4aa4ff41a590178dde2ddf160ae244f..1ac1db5f6dadb6508cf8658a53385add279eb9fa 100644 (file)
@@ -94,8 +94,8 @@ enum {
        NFS_LAYOUT_RO_FAILED = 0,       /* get ro layout failed stop trying */
        NFS_LAYOUT_RW_FAILED,           /* get rw layout failed stop trying */
        NFS_LAYOUT_BULK_RECALL,         /* bulk recall affecting layout */
-       NFS_LAYOUT_RETURN,              /* Return this layout ASAP */
-       NFS_LAYOUT_RETURN_BEFORE_CLOSE, /* Return this layout before close */
+       NFS_LAYOUT_RETURN,              /* layoutreturn in progress */
+       NFS_LAYOUT_RETURN_REQUESTED,    /* Return this layout ASAP */
        NFS_LAYOUT_INVALID_STID,        /* layout stateid id is invalid */
        NFS_LAYOUT_FIRST_LAYOUTGET,     /* Serialize first layoutget */
 };
index dc8ebecf561866a2d6fcee729813528852be6313..195fe2668207a2bdaf689c8e93296503facec806 100644 (file)
 *
 */
 
+#include <crypto/hash.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 #include <linux/namei.h>
-#include <linux/crypto.h>
 #include <linux/sched.h>
 #include <linux/fs.h>
 #include <linux/module.h>
@@ -104,29 +104,35 @@ static int
 nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
 {
        struct xdr_netobj cksum;
-       struct hash_desc desc;
-       struct scatterlist sg;
+       struct crypto_shash *tfm;
        int status;
 
        dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
                        clname->len, clname->data);
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
-       desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(desc.tfm)) {
-               status = PTR_ERR(desc.tfm);
+       tfm = crypto_alloc_shash("md5", 0, 0);
+       if (IS_ERR(tfm)) {
+               status = PTR_ERR(tfm);
                goto out_no_tfm;
        }
 
-       cksum.len = crypto_hash_digestsize(desc.tfm);
+       cksum.len = crypto_shash_digestsize(tfm);
        cksum.data = kmalloc(cksum.len, GFP_KERNEL);
        if (cksum.data == NULL) {
                status = -ENOMEM;
                goto out;
        }
 
-       sg_init_one(&sg, clname->data, clname->len);
+       {
+               SHASH_DESC_ON_STACK(desc, tfm);
+
+               desc->tfm = tfm;
+               desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+               status = crypto_shash_digest(desc, clname->data, clname->len,
+                                            cksum.data);
+               shash_desc_zero(desc);
+       }
 
-       status = crypto_hash_digest(&desc, &sg, sg.length, cksum.data);
        if (status)
                goto out;
 
@@ -135,7 +141,7 @@ nfs4_make_rec_clidname(char *dname, const struct xdr_netobj *clname)
        status = 0;
 out:
        kfree(cksum.data);
-       crypto_free_hash(desc.tfm);
+       crypto_free_shash(tfm);
 out_no_tfm:
        return status;
 }
index 794fd1587f34a3bc210d60770525a02f3cd59aba..5dcc5f5a842ea5eaf4a0ea471d87c54326c0fe16 100644 (file)
@@ -620,7 +620,7 @@ bail:
  * particularly interested in the aio/dio case.  We use the rw_lock DLM lock
  * to protect io on one node from truncation on another.
  */
-static void ocfs2_dio_end_io(struct kiocb *iocb,
+static int ocfs2_dio_end_io(struct kiocb *iocb,
                             loff_t offset,
                             ssize_t bytes,
                             void *private)
@@ -628,6 +628,9 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
        struct inode *inode = file_inode(iocb->ki_filp);
        int level;
 
+       if (bytes <= 0)
+               return 0;
+
        /* this io's submitter should not have unlocked this before we could */
        BUG_ON(!ocfs2_iocb_is_rw_locked(iocb));
 
@@ -644,6 +647,8 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
                level = ocfs2_iocb_rw_locked_level(iocb);
                ocfs2_rw_unlock(inode, level);
        }
+
+       return 0;
 }
 
 static int ocfs2_releasepage(struct page *page, gfp_t wait)
index a3cc6d2fc896cea05a8605dcc84e09b7840a5e1a..a76b9ea7722e6d7d00d8c0c58217a4a0387f0ff0 100644 (file)
@@ -1254,15 +1254,15 @@ static const struct file_operations o2hb_debug_fops = {
 
 void o2hb_exit(void)
 {
-       kfree(o2hb_db_livenodes);
-       kfree(o2hb_db_liveregions);
-       kfree(o2hb_db_quorumregions);
-       kfree(o2hb_db_failedregions);
        debugfs_remove(o2hb_debug_failedregions);
        debugfs_remove(o2hb_debug_quorumregions);
        debugfs_remove(o2hb_debug_liveregions);
        debugfs_remove(o2hb_debug_livenodes);
        debugfs_remove(o2hb_debug_dir);
+       kfree(o2hb_db_livenodes);
+       kfree(o2hb_db_liveregions);
+       kfree(o2hb_db_quorumregions);
+       kfree(o2hb_db_failedregions);
 }
 
 static struct dentry *o2hb_debug_create(const char *name, struct dentry *dir,
@@ -1438,13 +1438,15 @@ static void o2hb_region_release(struct config_item *item)
 
        kfree(reg->hr_slots);
 
-       kfree(reg->hr_db_regnum);
-       kfree(reg->hr_db_livenodes);
        debugfs_remove(reg->hr_debug_livenodes);
        debugfs_remove(reg->hr_debug_regnum);
        debugfs_remove(reg->hr_debug_elapsed_time);
        debugfs_remove(reg->hr_debug_pinned);
        debugfs_remove(reg->hr_debug_dir);
+       kfree(reg->hr_db_livenodes);
+       kfree(reg->hr_db_regnum);
+       kfree(reg->hr_debug_elapsed_time);
+       kfree(reg->hr_debug_pinned);
 
        spin_lock(&o2hb_live_lock);
        list_del(&reg->hr_all_item);
index c5bdf02c213bd7ad5c086331a1ea8194bde40578..b94a425f0175fa84818d78742659c42293d96dba 100644 (file)
@@ -2367,6 +2367,8 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
                                                break;
                                        }
                                }
+                               dlm_lockres_clear_refmap_bit(dlm, res,
+                                               dead_node);
                                spin_unlock(&res->spinlock);
                                continue;
                        }
index 6cb019b7c6a83c4ec449baff12652e84b5fca06a..a52a2dbc064e2d87919f934504fb1f156c73d176 100644 (file)
@@ -2035,6 +2035,8 @@ DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_release_dquot);
 
 DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_acquire_dquot);
 
+DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_get_next_id);
+
 DEFINE_OCFS2_UINT_INT_EVENT(ocfs2_mark_dquot_dirty);
 
 /* End of trace events for fs/ocfs2/quota_global.c. */
index 9c9dd30bc94541fa89baed2b0ac8c2a3c8a72e31..91bc674203ed6c8d138b00f1edede8f11f2816c1 100644 (file)
@@ -860,6 +860,30 @@ out:
        return status;
 }
 
+static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+       int type = qid->type;
+       struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
+       int status = 0;
+
+       trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
+       status = ocfs2_lock_global_qf(info, 0);
+       if (status < 0)
+               goto out;
+       status = ocfs2_qinfo_lock(info, 0);
+       if (status < 0)
+               goto out_global;
+       status = qtree_get_next_id(&info->dqi_gi, qid);
+       ocfs2_qinfo_unlock(info, 0);
+out_global:
+       ocfs2_unlock_global_qf(info, 0);
+out:
+       /* Avoid logging ENOENT since it just means there isn't next ID */
+       if (status && status != -ENOENT)
+               mlog_errno(status);
+       return status;
+}
+
 static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
 {
        unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) |
@@ -968,4 +992,5 @@ const struct dquot_operations ocfs2_quota_operations = {
        .write_info     = ocfs2_write_info,
        .alloc_dquot    = ocfs2_alloc_dquot,
        .destroy_dquot  = ocfs2_destroy_dquot,
+       .get_next_id    = ocfs2_get_next_id,
 };
diff --git a/fs/orangefs/Kconfig b/fs/orangefs/Kconfig
new file mode 100644 (file)
index 0000000..1554c02
--- /dev/null
@@ -0,0 +1,6 @@
+config ORANGEFS_FS
+       tristate "ORANGEFS (Powered by PVFS) support"
+       select FS_POSIX_ACL
+       help
+          Orange is a parallel file system designed for use on high end
+          computing (HEC) systems.
diff --git a/fs/orangefs/Makefile b/fs/orangefs/Makefile
new file mode 100644 (file)
index 0000000..a9d6a96
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for the ORANGEFS filesystem.
+#
+
+obj-$(CONFIG_ORANGEFS_FS) += orangefs.o
+
+orangefs-objs := acl.o file.o orangefs-cache.o orangefs-utils.o xattr.o \
+                dcache.o inode.o orangefs-sysfs.o orangefs-mod.o super.o \
+                devorangefs-req.o namei.o symlink.o dir.o orangefs-bufmap.o \
+                orangefs-debugfs.o waitqueue.o
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
new file mode 100644 (file)
index 0000000..03f89db
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+#include <linux/posix_acl_xattr.h>
+#include <linux/fs_struct.h>
+
+struct posix_acl *orangefs_get_acl(struct inode *inode, int type)
+{
+       struct posix_acl *acl;
+       int ret;
+       char *key = NULL, *value = NULL;
+
+       switch (type) {
+       case ACL_TYPE_ACCESS:
+               key = ORANGEFS_XATTR_NAME_ACL_ACCESS;
+               break;
+       case ACL_TYPE_DEFAULT:
+               key = ORANGEFS_XATTR_NAME_ACL_DEFAULT;
+               break;
+       default:
+               gossip_err("orangefs_get_acl: bogus value of type %d\n", type);
+               return ERR_PTR(-EINVAL);
+       }
+       /*
+        * Rather than incurring a network call just to determine the exact
+        * length of the attribute, I just allocate a max length to save on
+        * the network call. Conceivably, we could pass NULL to
+        * orangefs_inode_getxattr() to probe the length of the value, but
+        * I don't do that for now.
+        */
+       value = kmalloc(ORANGEFS_MAX_XATTR_VALUELEN, GFP_KERNEL);
+       if (value == NULL)
+               return ERR_PTR(-ENOMEM);
+
+       gossip_debug(GOSSIP_ACL_DEBUG,
+                    "inode %pU, key %s, type %d\n",
+                    get_khandle_from_ino(inode),
+                    key,
+                    type);
+       ret = orangefs_inode_getxattr(inode,
+                                  "",
+                                  key,
+                                  value,
+                                  ORANGEFS_MAX_XATTR_VALUELEN);
+       /* if the key exists, convert it to an in-memory rep */
+       if (ret > 0) {
+               acl = posix_acl_from_xattr(&init_user_ns, value, ret);
+       } else if (ret == -ENODATA || ret == -ENOSYS) {
+               acl = NULL;
+       } else {
+               gossip_err("inode %pU retrieving acl's failed with error %d\n",
+                          get_khandle_from_ino(inode),
+                          ret);
+               acl = ERR_PTR(ret);
+       }
+       /* kfree(NULL) is safe, so don't worry if value ever got used */
+       kfree(value);
+       return acl;
+}
+
+int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       int error = 0;
+       void *value = NULL;
+       size_t size = 0;
+       const char *name = NULL;
+
+       switch (type) {
+       case ACL_TYPE_ACCESS:
+               name = ORANGEFS_XATTR_NAME_ACL_ACCESS;
+               if (acl) {
+                       umode_t mode = inode->i_mode;
+                       /*
+                        * can we represent this with the traditional file
+                        * mode permission bits?
+                        */
+                       error = posix_acl_equiv_mode(acl, &mode);
+                       if (error < 0) {
+                               gossip_err("%s: posix_acl_equiv_mode err: %d\n",
+                                          __func__,
+                                          error);
+                               return error;
+                       }
+
+                       if (inode->i_mode != mode)
+                               SetModeFlag(orangefs_inode);
+                       inode->i_mode = mode;
+                       mark_inode_dirty_sync(inode);
+                       if (error == 0)
+                               acl = NULL;
+               }
+               break;
+       case ACL_TYPE_DEFAULT:
+               name = ORANGEFS_XATTR_NAME_ACL_DEFAULT;
+               break;
+       default:
+               gossip_err("%s: invalid type %d!\n", __func__, type);
+               return -EINVAL;
+       }
+
+       gossip_debug(GOSSIP_ACL_DEBUG,
+                    "%s: inode %pU, key %s type %d\n",
+                    __func__, get_khandle_from_ino(inode),
+                    name,
+                    type);
+
+       if (acl) {
+               size = posix_acl_xattr_size(acl->a_count);
+               value = kmalloc(size, GFP_KERNEL);
+               if (!value)
+                       return -ENOMEM;
+
+               error = posix_acl_to_xattr(&init_user_ns, acl, value, size);
+               if (error < 0)
+                       goto out;
+       }
+
+       gossip_debug(GOSSIP_ACL_DEBUG,
+                    "%s: name %s, value %p, size %zd, acl %p\n",
+                    __func__, name, value, size, acl);
+       /*
+        * Go ahead and set the extended attribute now. NOTE: Suppose acl
+        * was NULL, then value will be NULL and size will be 0 and that
+        * will xlate to a removexattr. However, we don't want removexattr
+        * complain if attributes does not exist.
+        */
+       error = orangefs_inode_setxattr(inode, "", name, value, size, 0);
+
+out:
+       kfree(value);
+       if (!error)
+               set_cached_acl(inode, type, acl);
+       return error;
+}
+
+int orangefs_init_acl(struct inode *inode, struct inode *dir)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct posix_acl *default_acl, *acl;
+       umode_t mode = inode->i_mode;
+       int error = 0;
+
+       ClearModeFlag(orangefs_inode);
+
+       error = posix_acl_create(dir, &mode, &default_acl, &acl);
+       if (error)
+               return error;
+
+       if (default_acl) {
+               error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
+               posix_acl_release(default_acl);
+       }
+
+       if (acl) {
+               if (!error)
+                       error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+               posix_acl_release(acl);
+       }
+
+       /* If mode of the inode was changed, then do a forcible ->setattr */
+       if (mode != inode->i_mode) {
+               SetModeFlag(orangefs_inode);
+               inode->i_mode = mode;
+               orangefs_flush_inode(inode);
+       }
+
+       return error;
+}
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
new file mode 100644 (file)
index 0000000..a6911db
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Implementation of dentry (directory cache) functions.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+
+/* Returns 1 if dentry can still be trusted, else 0. */
+static int orangefs_revalidate_lookup(struct dentry *dentry)
+{
+       struct dentry *parent_dentry = dget_parent(dentry);
+       struct inode *parent_inode = parent_dentry->d_inode;
+       struct orangefs_inode_s *parent = ORANGEFS_I(parent_inode);
+       struct inode *inode = dentry->d_inode;
+       struct orangefs_kernel_op_s *new_op;
+       int ret = 0;
+       int err = 0;
+
+       gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: attempting lookup.\n", __func__);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP);
+       if (!new_op)
+               goto out_put_parent;
+
+       new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW;
+       new_op->upcall.req.lookup.parent_refn = parent->refn;
+       strncpy(new_op->upcall.req.lookup.d_name,
+               dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+
+       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                    "%s:%s:%d interrupt flag [%d]\n",
+                    __FILE__,
+                    __func__,
+                    __LINE__,
+                    get_interruptible_flag(parent_inode));
+
+       err = service_operation(new_op, "orangefs_lookup",
+                       get_interruptible_flag(parent_inode));
+
+       /* Positive dentry: reject if error or not the same inode. */
+       if (inode) {
+               if (err) {
+                       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                           "%s:%s:%d lookup failure.\n",
+                           __FILE__, __func__, __LINE__);
+                       goto out_drop;
+               }
+               if (!match_handle(new_op->downcall.resp.lookup.refn.khandle,
+                   inode)) {
+                       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                           "%s:%s:%d no match.\n",
+                           __FILE__, __func__, __LINE__);
+                       goto out_drop;
+               }
+
+       /* Negative dentry: reject if success or error other than ENOENT. */
+       } else {
+               gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: negative dentry.\n",
+                   __func__);
+               if (!err || err != -ENOENT) {
+                       if (new_op->downcall.status != 0)
+                               gossip_debug(GOSSIP_DCACHE_DEBUG,
+                                   "%s:%s:%d lookup failure.\n",
+                                   __FILE__, __func__, __LINE__);
+                       goto out_drop;
+               }
+       }
+
+       ret = 1;
+out_release_op:
+       op_release(new_op);
+out_put_parent:
+       dput(parent_dentry);
+       return ret;
+out_drop:
+       gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d revalidate failed\n",
+           __FILE__, __func__, __LINE__);
+       d_drop(dentry);
+       goto out_release_op;
+}
+
+/*
+ * Verify that dentry is valid.
+ *
+ * Should return 1 if dentry can still be trusted, else 0.
+ */
+static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+       int ret;
+
+       if (flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       gossip_debug(GOSSIP_DCACHE_DEBUG, "%s: called on dentry %p.\n",
+                    __func__, dentry);
+
+       /* skip root handle lookups. */
+       if (dentry->d_inode && is_root_handle(dentry->d_inode))
+               return 1;
+
+       /*
+        * If this passes, the positive dentry still exists or the negative
+        * dentry still does not exist.
+        */
+       if (!orangefs_revalidate_lookup(dentry)) {
+               d_drop(dentry);
+               return 0;
+       }
+
+       /* We do not need to continue with negative dentries. */
+       if (!dentry->d_inode)
+               goto out;
+
+       /* Now we must perform a getattr to validate the inode contents. */
+
+       ret = orangefs_inode_getattr(dentry->d_inode,
+           ORANGEFS_ATTR_SYS_TYPE|ORANGEFS_ATTR_SYS_LNK_TARGET, 1);
+       if (ret < 0) {
+               gossip_debug(GOSSIP_DCACHE_DEBUG, "%s:%s:%d getattr failure.\n",
+                   __FILE__, __func__, __LINE__);
+               d_drop(dentry);
+               return 0;
+       }
+       if (ret == 0) {
+               d_drop(dentry);
+               return 0;
+       }
+
+out:
+       gossip_debug(GOSSIP_DCACHE_DEBUG,
+           "%s: negative dentry or positive dentry and inode valid.\n",
+           __func__);
+       return 1;
+}
+
+const struct dentry_operations orangefs_dentry_operations = {
+       .d_revalidate = orangefs_d_revalidate,
+};
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
new file mode 100644 (file)
index 0000000..37278f5
--- /dev/null
@@ -0,0 +1,933 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * Changes by Acxiom Corporation to add protocol version to kernel
+ * communication, Copyright Acxiom Corporation, 2005.
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-dev-proto.h"
+#include "orangefs-bufmap.h"
+
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+/* this file implements the /dev/pvfs2-req device node */
+
+static int open_access_count;
+
+#define DUMP_DEVICE_ERROR()                                                   \
+do {                                                                          \
+       gossip_err("*****************************************************\n");\
+       gossip_err("ORANGEFS Device Error:  You cannot open the device file ");  \
+       gossip_err("\n/dev/%s more than once.  Please make sure that\nthere " \
+                  "are no ", ORANGEFS_REQDEVICE_NAME);                          \
+       gossip_err("instances of a program using this device\ncurrently "     \
+                  "running. (You must verify this!)\n");                     \
+       gossip_err("For example, you can use the lsof program as follows:\n");\
+       gossip_err("'lsof | grep %s' (run this as root)\n",                   \
+                  ORANGEFS_REQDEVICE_NAME);                                     \
+       gossip_err("  open_access_count = %d\n", open_access_count);          \
+       gossip_err("*****************************************************\n");\
+} while (0)
+
+static int hash_func(__u64 tag, int table_size)
+{
+       return do_div(tag, (unsigned int)table_size);
+}
+
+static void orangefs_devreq_add_op(struct orangefs_kernel_op_s *op)
+{
+       int index = hash_func(op->tag, hash_table_size);
+
+       list_add_tail(&op->list, &htable_ops_in_progress[index]);
+}
+
+static struct orangefs_kernel_op_s *orangefs_devreq_remove_op(__u64 tag)
+{
+       struct orangefs_kernel_op_s *op, *next;
+       int index;
+
+       index = hash_func(tag, hash_table_size);
+
+       spin_lock(&htable_ops_in_progress_lock);
+       list_for_each_entry_safe(op,
+                                next,
+                                &htable_ops_in_progress[index],
+                                list) {
+               if (op->tag == tag && !op_state_purged(op)) {
+                       list_del_init(&op->list);
+                       get_op(op); /* increase ref count. */
+                       spin_unlock(&htable_ops_in_progress_lock);
+                       return op;
+               }
+       }
+
+       spin_unlock(&htable_ops_in_progress_lock);
+       return NULL;
+}
+
+static int orangefs_devreq_open(struct inode *inode, struct file *file)
+{
+       int ret = -EINVAL;
+
+       if (!(file->f_flags & O_NONBLOCK)) {
+               gossip_err("%s: device cannot be opened in blocking mode\n",
+                          __func__);
+               goto out;
+       }
+       ret = -EACCES;
+       gossip_debug(GOSSIP_DEV_DEBUG, "client-core: opening device\n");
+       mutex_lock(&devreq_mutex);
+
+       if (open_access_count == 0) {
+               open_access_count = 1;
+               ret = 0;
+       } else {
+               DUMP_DEVICE_ERROR();
+       }
+       mutex_unlock(&devreq_mutex);
+
+out:
+
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "pvfs2-client-core: open device complete (ret = %d)\n",
+                    ret);
+       return ret;
+}
+
+/* Function for read() callers into the device */
+static ssize_t orangefs_devreq_read(struct file *file,
+                                char __user *buf,
+                                size_t count, loff_t *offset)
+{
+       struct orangefs_kernel_op_s *op, *temp;
+       __s32 proto_ver = ORANGEFS_KERNEL_PROTO_VERSION;
+       static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
+       struct orangefs_kernel_op_s *cur_op = NULL;
+       unsigned long ret;
+
+       /* We do not support blocking IO. */
+       if (!(file->f_flags & O_NONBLOCK)) {
+               gossip_err("%s: blocking read from client-core.\n",
+                          __func__);
+               return -EINVAL;
+       }
+
+       /*
+        * The client will do an ioctl to find MAX_DEV_REQ_UPSIZE, then
+        * always read with that size buffer.
+        */
+       if (count != MAX_DEV_REQ_UPSIZE) {
+               gossip_err("orangefs: client-core tried to read wrong size\n");
+               return -EINVAL;
+       }
+
+restart:
+       /* Get next op (if any) from top of list. */
+       spin_lock(&orangefs_request_list_lock);
+       list_for_each_entry_safe(op, temp, &orangefs_request_list, list) {
+               __s32 fsid;
+               /* This lock is held past the end of the loop when we break. */
+               spin_lock(&op->lock);
+               if (unlikely(op_state_purged(op))) {
+                       spin_unlock(&op->lock);
+                       continue;
+               }
+
+               fsid = fsid_of_op(op);
+               if (fsid != ORANGEFS_FS_ID_NULL) {
+                       int ret;
+                       /* Skip ops whose filesystem needs to be mounted. */
+                       ret = fs_mount_pending(fsid);
+                       if (ret == 1) {
+                               gossip_debug(GOSSIP_DEV_DEBUG,
+                                   "%s: mount pending, skipping op tag "
+                                   "%llu %s\n",
+                                   __func__,
+                                   llu(op->tag),
+                                   get_opname_string(op));
+                               spin_unlock(&op->lock);
+                               continue;
+                       /*
+                        * Skip ops whose filesystem we don't know about unless
+                        * it is being mounted.
+                        */
+                       /* XXX: is there a better way to detect this? */
+                       } else if (ret == -1 &&
+                                  !(op->upcall.type ==
+                                       ORANGEFS_VFS_OP_FS_MOUNT ||
+                                    op->upcall.type ==
+                                       ORANGEFS_VFS_OP_GETATTR)) {
+                               gossip_debug(GOSSIP_DEV_DEBUG,
+                                   "orangefs: skipping op tag %llu %s\n",
+                                   llu(op->tag), get_opname_string(op));
+                               gossip_err(
+                                   "orangefs: ERROR: fs_mount_pending %d\n",
+                                   fsid);
+                               spin_unlock(&op->lock);
+                               continue;
+                       }
+               }
+               /*
+                * Either this op does not pertain to a filesystem, is mounting
+                * a filesystem, or pertains to a mounted filesystem. Let it
+                * through.
+                */
+               cur_op = op;
+               break;
+       }
+
+       /*
+        * At this point we either have a valid op and can continue or have not
+        * found an op and must ask the client to try again later.
+        */
+       if (!cur_op) {
+               spin_unlock(&orangefs_request_list_lock);
+               return -EAGAIN;
+       }
+
+       gossip_debug(GOSSIP_DEV_DEBUG, "orangefs: reading op tag %llu %s\n",
+                    llu(cur_op->tag), get_opname_string(cur_op));
+
+       /*
+        * Such an op should never be on the list in the first place. If so, we
+        * will abort.
+        */
+       if (op_state_in_progress(cur_op) || op_state_serviced(cur_op)) {
+               gossip_err("orangefs: ERROR: Current op already queued.\n");
+               list_del(&cur_op->list);
+               spin_unlock(&cur_op->lock);
+               spin_unlock(&orangefs_request_list_lock);
+               return -EAGAIN;
+       }
+       list_del_init(&cur_op->list);
+       get_op(op);
+       spin_unlock(&orangefs_request_list_lock);
+
+       spin_unlock(&cur_op->lock);
+
+       /* Push the upcall out. */
+       ret = copy_to_user(buf, &proto_ver, sizeof(__s32));
+       if (ret != 0)
+               goto error;
+       ret = copy_to_user(buf+sizeof(__s32), &magic, sizeof(__s32));
+       if (ret != 0)
+               goto error;
+       ret = copy_to_user(buf+2 * sizeof(__s32), &cur_op->tag, sizeof(__u64));
+       if (ret != 0)
+               goto error;
+       ret = copy_to_user(buf+2*sizeof(__s32)+sizeof(__u64), &cur_op->upcall,
+                          sizeof(struct orangefs_upcall_s));
+       if (ret != 0)
+               goto error;
+
+       spin_lock(&htable_ops_in_progress_lock);
+       spin_lock(&cur_op->lock);
+       if (unlikely(op_state_given_up(cur_op))) {
+               spin_unlock(&cur_op->lock);
+               spin_unlock(&htable_ops_in_progress_lock);
+               op_release(cur_op);
+               goto restart;
+       }
+
+       /*
+        * Set the operation to be in progress and move it between lists since
+        * it has been sent to the client.
+        */
+       set_op_state_inprogress(cur_op);
+       orangefs_devreq_add_op(cur_op);
+       spin_unlock(&cur_op->lock);
+       spin_unlock(&htable_ops_in_progress_lock);
+       op_release(cur_op);
+
+       /* The client only asks to read one size buffer. */
+       return MAX_DEV_REQ_UPSIZE;
+error:
+       /*
+        * We were unable to copy the op data to the client. Put the op back in
+        * list. If client has crashed, the op will be purged later when the
+        * device is released.
+        */
+       gossip_err("orangefs: Failed to copy data to user space\n");
+       spin_lock(&orangefs_request_list_lock);
+       spin_lock(&cur_op->lock);
+       if (likely(!op_state_given_up(cur_op))) {
+               set_op_state_waiting(cur_op);
+               list_add(&cur_op->list, &orangefs_request_list);
+       }
+       spin_unlock(&cur_op->lock);
+       spin_unlock(&orangefs_request_list_lock);
+       op_release(cur_op);
+       return -EFAULT;
+}
+
+/*
+ * Function for writev() callers into the device.
+ *
+ * Userspace should have written:
+ *  - __u32 version
+ *  - __u32 magic
+ *  - __u64 tag
+ *  - struct orangefs_downcall_s
+ *  - trailer buffer (in the case of READDIR operations)
+ */
+static ssize_t orangefs_devreq_write_iter(struct kiocb *iocb,
+                                     struct iov_iter *iter)
+{
+       ssize_t ret;
+       struct orangefs_kernel_op_s *op = NULL;
+       struct {
+               __u32 version;
+               __u32 magic;
+               __u64 tag;
+       } head;
+       int total = ret = iov_iter_count(iter);
+       int n;
+       int downcall_size = sizeof(struct orangefs_downcall_s);
+       int head_size = sizeof(head);
+
+       gossip_debug(GOSSIP_DEV_DEBUG, "%s: total:%d: ret:%zd:\n",
+                    __func__,
+                    total,
+                    ret);
+
+        if (total < MAX_DEV_REQ_DOWNSIZE) {
+               gossip_err("%s: total:%d: must be at least:%u:\n",
+                          __func__,
+                          total,
+                          (unsigned int) MAX_DEV_REQ_DOWNSIZE);
+               return -EFAULT;
+       }
+     
+       n = copy_from_iter(&head, head_size, iter);
+       if (n < head_size) {
+               gossip_err("%s: failed to copy head.\n", __func__);
+               return -EFAULT;
+       }
+
+       if (head.version < ORANGEFS_MINIMUM_USERSPACE_VERSION) {
+               gossip_err("%s: userspace claims version"
+                          "%d, minimum version required: %d.\n",
+                          __func__,
+                          head.version,
+                          ORANGEFS_MINIMUM_USERSPACE_VERSION);
+               return -EPROTO;
+       }
+
+       if (head.magic != ORANGEFS_DEVREQ_MAGIC) {
+               gossip_err("Error: Device magic number does not match.\n");
+               return -EPROTO;
+       }
+
+       op = orangefs_devreq_remove_op(head.tag);
+       if (!op) {
+               gossip_err("WARNING: No one's waiting for tag %llu\n",
+                          llu(head.tag));
+               return ret;
+       }
+
+       n = copy_from_iter(&op->downcall, downcall_size, iter);
+       if (n != downcall_size) {
+               gossip_err("%s: failed to copy downcall.\n", __func__);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+       if (op->downcall.status)
+               goto wakeup;
+
+       /*
+        * We've successfully peeled off the head and the downcall. 
+        * Something has gone awry if total doesn't equal the
+        * sum of head_size, downcall_size and trailer_size.
+        */
+       if ((head_size + downcall_size + op->downcall.trailer_size) != total) {
+               gossip_err("%s: funky write, head_size:%d"
+                          ": downcall_size:%d: trailer_size:%lld"
+                          ": total size:%d:\n",
+                          __func__,
+                          head_size,
+                          downcall_size,
+                          op->downcall.trailer_size,
+                          total);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+       /* Only READDIR operations should have trailers. */
+       if ((op->downcall.type != ORANGEFS_VFS_OP_READDIR) &&
+           (op->downcall.trailer_size != 0)) {
+               gossip_err("%s: %x operation with trailer.",
+                          __func__,
+                          op->downcall.type);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+       /* READDIR operations should always have trailers. */
+       if ((op->downcall.type == ORANGEFS_VFS_OP_READDIR) &&
+           (op->downcall.trailer_size == 0)) {
+               gossip_err("%s: %x operation with no trailer.",
+                          __func__,
+                          op->downcall.type);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+       if (op->downcall.type != ORANGEFS_VFS_OP_READDIR)
+               goto wakeup;
+
+       op->downcall.trailer_buf =
+               vmalloc(op->downcall.trailer_size);
+       if (op->downcall.trailer_buf == NULL) {
+               gossip_err("%s: failed trailer vmalloc.\n",
+                          __func__);
+               ret = -ENOMEM;
+               goto Broken;
+       }
+       memset(op->downcall.trailer_buf, 0, op->downcall.trailer_size);
+       n = copy_from_iter(op->downcall.trailer_buf,
+                          op->downcall.trailer_size,
+                          iter);
+       if (n != op->downcall.trailer_size) {
+               gossip_err("%s: failed to copy trailer.\n", __func__);
+               vfree(op->downcall.trailer_buf);
+               ret = -EFAULT;
+               goto Broken;
+       }
+
+wakeup:
+       /*
+        * tell the vfs op waiting on a waitqueue
+        * that this op is done
+        */
+       spin_lock(&op->lock);
+       if (unlikely(op_state_given_up(op))) {
+               spin_unlock(&op->lock);
+               goto out;
+       }
+       set_op_state_serviced(op);
+       spin_unlock(&op->lock);
+
+       /*
+        * If this operation is an I/O operation we need to wait
+        * for all data to be copied before we can return to avoid
+        * buffer corruption and races that can pull the buffers
+        * out from under us.
+        *
+        * Essentially we're synchronizing with other parts of the
+        * vfs implicitly by not allowing the user space
+        * application reading/writing this device to return until
+        * the buffers are done being used.
+        */
+       if (op->downcall.type == ORANGEFS_VFS_OP_FILE_IO) {
+               long n = wait_for_completion_interruptible_timeout(&op->done,
+                                                       op_timeout_secs * HZ);
+               if (unlikely(n < 0)) {
+                       gossip_debug(GOSSIP_DEV_DEBUG,
+                               "%s: signal on I/O wait, aborting\n",
+                               __func__);
+               } else if (unlikely(n == 0)) {
+                       gossip_debug(GOSSIP_DEV_DEBUG,
+                               "%s: timed out.\n",
+                               __func__);
+               }
+       }
+out:
+       op_release(op);
+       return ret;
+
+Broken:
+       spin_lock(&op->lock);
+       if (!op_state_given_up(op)) {
+               op->downcall.status = ret;
+               set_op_state_serviced(op);
+       }
+       spin_unlock(&op->lock);
+       goto out;
+}
+
+/* Returns whether any FS are still pending remounted */
+static int mark_all_pending_mounts(void)
+{
+       int unmounted = 1;
+       struct orangefs_sb_info_s *orangefs_sb = NULL;
+
+       spin_lock(&orangefs_superblocks_lock);
+       list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
+               /* All of these file system require a remount */
+               orangefs_sb->mount_pending = 1;
+               unmounted = 0;
+       }
+       spin_unlock(&orangefs_superblocks_lock);
+       return unmounted;
+}
+
+/*
+ * Determine if a given file system needs to be remounted or not
+ *  Returns -1 on error
+ *           0 if already mounted
+ *           1 if needs remount
+ */
+int fs_mount_pending(__s32 fsid)
+{
+       int mount_pending = -1;
+       struct orangefs_sb_info_s *orangefs_sb = NULL;
+
+       spin_lock(&orangefs_superblocks_lock);
+       list_for_each_entry(orangefs_sb, &orangefs_superblocks, list) {
+               if (orangefs_sb->fs_id == fsid) {
+                       mount_pending = orangefs_sb->mount_pending;
+                       break;
+               }
+       }
+       spin_unlock(&orangefs_superblocks_lock);
+       return mount_pending;
+}
+
+/*
+ * NOTE: gets called when the last reference to this device is dropped.
+ * Using the open_access_count variable, we enforce a reference count
+ * on this file so that it can be opened by only one process at a time.
+ * the devreq_mutex is used to make sure all i/o has completed
+ * before we call orangefs_bufmap_finalize, and similar such tricky
+ * situations
+ */
+static int orangefs_devreq_release(struct inode *inode, struct file *file)
+{
+       int unmounted = 0;
+
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "%s:pvfs2-client-core: exiting, closing device\n",
+                    __func__);
+
+       mutex_lock(&devreq_mutex);
+       if (orangefs_get_bufmap_init())
+               orangefs_bufmap_finalize();
+
+       open_access_count = -1;
+
+       unmounted = mark_all_pending_mounts();
+       gossip_debug(GOSSIP_DEV_DEBUG, "ORANGEFS Device Close: Filesystem(s) %s\n",
+                    (unmounted ? "UNMOUNTED" : "MOUNTED"));
+
+       /*
+        * Walk through the list of ops in the request list, mark them
+        * as purged and wake them up.
+        */
+       purge_waiting_ops();
+       /*
+        * Walk through the hash table of in progress operations; mark
+        * them as purged and wake them up
+        */
+       purge_inprogress_ops();
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "pvfs2-client-core: device close complete\n");
+       open_access_count = 0;
+       mutex_unlock(&devreq_mutex);
+       return 0;
+}
+
+int is_daemon_in_service(void)
+{
+       int in_service;
+
+       /*
+        * What this function does is checks if client-core is alive
+        * based on the access count we maintain on the device.
+        */
+       mutex_lock(&devreq_mutex);
+       in_service = open_access_count == 1 ? 0 : -EIO;
+       mutex_unlock(&devreq_mutex);
+       return in_service;
+}
+
+static inline long check_ioctl_command(unsigned int command)
+{
+       /* Check for valid ioctl codes */
+       if (_IOC_TYPE(command) != ORANGEFS_DEV_MAGIC) {
+               gossip_err("device ioctl magic numbers don't match! Did you rebuild pvfs2-client-core/libpvfs2? [cmd %x, magic %x != %x]\n",
+                       command,
+                       _IOC_TYPE(command),
+                       ORANGEFS_DEV_MAGIC);
+               return -EINVAL;
+       }
+       /* and valid ioctl commands */
+       if (_IOC_NR(command) >= ORANGEFS_DEV_MAXNR || _IOC_NR(command) <= 0) {
+               gossip_err("Invalid ioctl command number [%d >= %d]\n",
+                          _IOC_NR(command), ORANGEFS_DEV_MAXNR);
+               return -ENOIOCTLCMD;
+       }
+       return 0;
+}
+
+static long dispatch_ioctl_command(unsigned int command, unsigned long arg)
+{
+       static __s32 magic = ORANGEFS_DEVREQ_MAGIC;
+       static __s32 max_up_size = MAX_DEV_REQ_UPSIZE;
+       static __s32 max_down_size = MAX_DEV_REQ_DOWNSIZE;
+       struct ORANGEFS_dev_map_desc user_desc;
+       int ret = 0;
+       struct dev_mask_info_s mask_info = { 0 };
+       struct dev_mask2_info_s mask2_info = { 0, 0 };
+       int upstream_kmod = 1;
+       struct list_head *tmp = NULL;
+       struct orangefs_sb_info_s *orangefs_sb = NULL;
+
+       /* mtmoore: add locking here */
+
+       switch (command) {
+       case ORANGEFS_DEV_GET_MAGIC:
+               return ((put_user(magic, (__s32 __user *) arg) == -EFAULT) ?
+                       -EIO :
+                       0);
+       case ORANGEFS_DEV_GET_MAX_UPSIZE:
+               return ((put_user(max_up_size,
+                                 (__s32 __user *) arg) == -EFAULT) ?
+                                       -EIO :
+                                       0);
+       case ORANGEFS_DEV_GET_MAX_DOWNSIZE:
+               return ((put_user(max_down_size,
+                                 (__s32 __user *) arg) == -EFAULT) ?
+                                       -EIO :
+                                       0);
+       case ORANGEFS_DEV_MAP:
+               ret = copy_from_user(&user_desc,
+                                    (struct ORANGEFS_dev_map_desc __user *)
+                                    arg,
+                                    sizeof(struct ORANGEFS_dev_map_desc));
+               if (orangefs_get_bufmap_init()) {
+                       return -EINVAL;
+               } else {
+                       return ret ?
+                              -EIO :
+                              orangefs_bufmap_initialize(&user_desc);
+               }
+       case ORANGEFS_DEV_REMOUNT_ALL:
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                            "%s: got ORANGEFS_DEV_REMOUNT_ALL\n",
+                            __func__);
+
+               /*
+                * remount all mounted orangefs volumes to regain the lost
+                * dynamic mount tables (if any) -- NOTE: this is done
+                * without keeping the superblock list locked due to the
+                * upcall/downcall waiting.  also, the request semaphore is
+                * used to ensure that no operations will be serviced until
+                * all of the remounts are serviced (to avoid ops between
+                * mounts to fail)
+                */
+               ret = mutex_lock_interruptible(&request_mutex);
+               if (ret < 0)
+                       return ret;
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                            "%s: priority remount in progress\n",
+                            __func__);
+               list_for_each(tmp, &orangefs_superblocks) {
+                       orangefs_sb =
+                               list_entry(tmp,
+                                          struct orangefs_sb_info_s,
+                                          list);
+                       if (orangefs_sb && (orangefs_sb->sb)) {
+                               gossip_debug(GOSSIP_DEV_DEBUG,
+                                            "%s: Remounting SB %p\n",
+                                            __func__,
+                                            orangefs_sb);
+
+                               ret = orangefs_remount(orangefs_sb->sb);
+                               if (ret) {
+                                       gossip_debug(GOSSIP_DEV_DEBUG,
+                                                    "SB %p remount failed\n",
+                                                    orangefs_sb);
+                                       break;
+                               }
+                       }
+               }
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                            "%s: priority remount complete\n",
+                            __func__);
+               mutex_unlock(&request_mutex);
+               return ret;
+
+       case ORANGEFS_DEV_UPSTREAM:
+               ret = copy_to_user((void __user *)arg,
+                                   &upstream_kmod,
+                                   sizeof(upstream_kmod));
+
+               if (ret != 0)
+                       return -EIO;
+               else
+                       return ret;
+
+       case ORANGEFS_DEV_CLIENT_MASK:
+               ret = copy_from_user(&mask2_info,
+                                    (void __user *)arg,
+                                    sizeof(struct dev_mask2_info_s));
+
+               if (ret != 0)
+                       return -EIO;
+
+               client_debug_mask.mask1 = mask2_info.mask1_value;
+               client_debug_mask.mask2 = mask2_info.mask2_value;
+
+               pr_info("%s: client debug mask has been been received "
+                       ":%llx: :%llx:\n",
+                       __func__,
+                       (unsigned long long)client_debug_mask.mask1,
+                       (unsigned long long)client_debug_mask.mask2);
+
+               return ret;
+
+       case ORANGEFS_DEV_CLIENT_STRING:
+               ret = copy_from_user(&client_debug_array_string,
+                                    (void __user *)arg,
+                                    ORANGEFS_MAX_DEBUG_STRING_LEN);
+               if (ret != 0) {
+                       pr_info("%s: CLIENT_STRING: copy_from_user failed\n",
+                               __func__);
+                       return -EIO;
+               }
+
+               pr_info("%s: client debug array string has been received.\n",
+                       __func__);
+
+               if (!help_string_initialized) {
+
+                       /* Free the "we don't know yet" default string... */
+                       kfree(debug_help_string);
+
+                       /* build a proper debug help string */
+                       if (orangefs_prepare_debugfs_help_string(0)) {
+                               gossip_err("%s: no debug help string \n",
+                                          __func__);
+                               return -EIO;
+                       }
+
+                       /* Replace the boilerplate boot-time debug-help file. */
+                       debugfs_remove(help_file_dentry);
+
+                       help_file_dentry =
+                               debugfs_create_file(
+                                       ORANGEFS_KMOD_DEBUG_HELP_FILE,
+                                       0444,
+                                       debug_dir,
+                                       debug_help_string,
+                                       &debug_help_fops);
+
+                       if (!help_file_dentry) {
+                               gossip_err("%s: debugfs_create_file failed for"
+                                          " :%s:!\n",
+                                          __func__,
+                                          ORANGEFS_KMOD_DEBUG_HELP_FILE);
+                               return -EIO;
+                       }
+               }
+
+               debug_mask_to_string(&client_debug_mask, 1);
+
+               debugfs_remove(client_debug_dentry);
+
+               orangefs_client_debug_init();
+
+               help_string_initialized++;
+
+               return ret;
+
+       case ORANGEFS_DEV_DEBUG:
+               ret = copy_from_user(&mask_info,
+                                    (void __user *)arg,
+                                    sizeof(mask_info));
+
+               if (ret != 0)
+                       return -EIO;
+
+               if (mask_info.mask_type == KERNEL_MASK) {
+                       if ((mask_info.mask_value == 0)
+                           && (kernel_mask_set_mod_init)) {
+                               /*
+                                * the kernel debug mask was set when the
+                                * kernel module was loaded; don't override
+                                * it if the client-core was started without
+                                * a value for ORANGEFS_KMODMASK.
+                                */
+                               return 0;
+                       }
+                       debug_mask_to_string(&mask_info.mask_value,
+                                            mask_info.mask_type);
+                       gossip_debug_mask = mask_info.mask_value;
+                       pr_info("%s: kernel debug mask has been modified to "
+                               ":%s: :%llx:\n",
+                               __func__,
+                               kernel_debug_string,
+                               (unsigned long long)gossip_debug_mask);
+               } else if (mask_info.mask_type == CLIENT_MASK) {
+                       debug_mask_to_string(&mask_info.mask_value,
+                                            mask_info.mask_type);
+                       pr_info("%s: client debug mask has been modified to"
+                               ":%s: :%llx:\n",
+                               __func__,
+                               client_debug_string,
+                               llu(mask_info.mask_value));
+               } else {
+                       gossip_lerr("Invalid mask type....\n");
+                       return -EINVAL;
+               }
+
+               return ret;
+
+       default:
+               return -ENOIOCTLCMD;
+       }
+       return -ENOIOCTLCMD;
+}
+
+static long orangefs_devreq_ioctl(struct file *file,
+                              unsigned int command, unsigned long arg)
+{
+       long ret;
+
+       /* Check for properly constructed commands */
+       ret = check_ioctl_command(command);
+       if (ret < 0)
+               return (int)ret;
+
+       return (int)dispatch_ioctl_command(command, arg);
+}
+
+#ifdef CONFIG_COMPAT           /* CONFIG_COMPAT is in .config */
+
+/*  Compat structure for the ORANGEFS_DEV_MAP ioctl */
+struct ORANGEFS_dev_map_desc32 {
+       compat_uptr_t ptr;
+       __s32 total_size;
+       __s32 size;
+       __s32 count;
+};
+
+static unsigned long translate_dev_map26(unsigned long args, long *error)
+{
+       struct ORANGEFS_dev_map_desc32 __user *p32 = (void __user *)args;
+       /*
+        * Depending on the architecture, allocate some space on the
+        * user-call-stack based on our expected layout.
+        */
+       struct ORANGEFS_dev_map_desc __user *p =
+           compat_alloc_user_space(sizeof(*p));
+       compat_uptr_t addr;
+
+       *error = 0;
+       /* get the ptr from the 32 bit user-space */
+       if (get_user(addr, &p32->ptr))
+               goto err;
+       /* try to put that into a 64-bit layout */
+       if (put_user(compat_ptr(addr), &p->ptr))
+               goto err;
+       /* copy the remaining fields */
+       if (copy_in_user(&p->total_size, &p32->total_size, sizeof(__s32)))
+               goto err;
+       if (copy_in_user(&p->size, &p32->size, sizeof(__s32)))
+               goto err;
+       if (copy_in_user(&p->count, &p32->count, sizeof(__s32)))
+               goto err;
+       return (unsigned long)p;
+err:
+       *error = -EFAULT;
+       return 0;
+}
+
+/*
+ * 32 bit user-space apps' ioctl handlers when kernel modules
+ * is compiled as a 64 bit one
+ */
+static long orangefs_devreq_compat_ioctl(struct file *filp, unsigned int cmd,
+                                     unsigned long args)
+{
+       long ret;
+       unsigned long arg = args;
+
+       /* Check for properly constructed commands */
+       ret = check_ioctl_command(cmd);
+       if (ret < 0)
+               return ret;
+       if (cmd == ORANGEFS_DEV_MAP) {
+               /*
+                * convert the arguments to what we expect internally
+                * in kernel space
+                */
+               arg = translate_dev_map26(args, &ret);
+               if (ret < 0) {
+                       gossip_err("Could not translate dev map\n");
+                       return ret;
+               }
+       }
+       /* no other ioctl requires translation */
+       return dispatch_ioctl_command(cmd, arg);
+}
+
+#endif /* CONFIG_COMPAT is in .config */
+
+/* the assigned character device major number */
+static int orangefs_dev_major;
+
+/*
+ * Initialize orangefs device specific state:
+ * Must be called at module load time only
+ */
+int orangefs_dev_init(void)
+{
+       /* register orangefs-req device  */
+       orangefs_dev_major = register_chrdev(0,
+                                         ORANGEFS_REQDEVICE_NAME,
+                                         &orangefs_devreq_file_operations);
+       if (orangefs_dev_major < 0) {
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                            "Failed to register /dev/%s (error %d)\n",
+                            ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
+               return orangefs_dev_major;
+       }
+
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "*** /dev/%s character device registered ***\n",
+                    ORANGEFS_REQDEVICE_NAME);
+       gossip_debug(GOSSIP_DEV_DEBUG, "'mknod /dev/%s c %d 0'.\n",
+                    ORANGEFS_REQDEVICE_NAME, orangefs_dev_major);
+       return 0;
+}
+
+void orangefs_dev_cleanup(void)
+{
+       unregister_chrdev(orangefs_dev_major, ORANGEFS_REQDEVICE_NAME);
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                    "*** /dev/%s character device unregistered ***\n",
+                    ORANGEFS_REQDEVICE_NAME);
+}
+
+static unsigned int orangefs_devreq_poll(struct file *file,
+                                     struct poll_table_struct *poll_table)
+{
+       int poll_revent_mask = 0;
+
+       poll_wait(file, &orangefs_request_list_waitq, poll_table);
+
+       if (!list_empty(&orangefs_request_list))
+               poll_revent_mask |= POLL_IN;
+       return poll_revent_mask;
+}
+
+const struct file_operations orangefs_devreq_file_operations = {
+       .owner = THIS_MODULE,
+       .read = orangefs_devreq_read,
+       .write_iter = orangefs_devreq_write_iter,
+       .open = orangefs_devreq_open,
+       .release = orangefs_devreq_release,
+       .unlocked_ioctl = orangefs_devreq_ioctl,
+
+#ifdef CONFIG_COMPAT           /* CONFIG_COMPAT is in .config */
+       .compat_ioctl = orangefs_devreq_compat_ioctl,
+#endif
+       .poll = orangefs_devreq_poll
+};
diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c
new file mode 100644 (file)
index 0000000..6f5836d
--- /dev/null
@@ -0,0 +1,446 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+struct readdir_handle_s {
+       int buffer_index;
+       struct orangefs_readdir_response_s readdir_response;
+       void *dents_buf;
+};
+
+/*
+ * decode routine used by kmod to deal with the blob sent from
+ * userspace for readdirs. The blob contains zero or more of these
+ * sub-blobs:
+ *   __u32 - represents length of the character string that follows.
+ *   string - between 1 and ORANGEFS_NAME_MAX bytes long.
+ *   padding - (if needed) to cause the __u32 plus the string to be
+ *             eight byte aligned.
+ *   khandle - sizeof(khandle) bytes.
+ */
+static long decode_dirents(char *ptr, size_t size,
+                           struct orangefs_readdir_response_s *readdir)
+{
+       int i;
+       struct orangefs_readdir_response_s *rd =
+               (struct orangefs_readdir_response_s *) ptr;
+       char *buf = ptr;
+       int khandle_size = sizeof(struct orangefs_khandle);
+       size_t offset = offsetof(struct orangefs_readdir_response_s,
+                               dirent_array);
+       /* 8 reflects eight byte alignment */
+       int smallest_blob = khandle_size + 8;
+       __u32 len;
+       int aligned_len;
+       int sizeof_u32 = sizeof(__u32);
+       long ret;
+
+       gossip_debug(GOSSIP_DIR_DEBUG, "%s: size:%zu:\n", __func__, size);
+
+       /* size is = offset on empty dirs, > offset on non-empty dirs... */
+       if (size < offset) {
+               gossip_err("%s: size:%zu: offset:%zu:\n",
+                          __func__,
+                          size,
+                          offset);
+               ret = -EINVAL;
+               goto out;
+       }
+
+        if ((size == offset) && (readdir->orangefs_dirent_outcount != 0)) {
+               gossip_err("%s: size:%zu: dirent_outcount:%d:\n",
+                          __func__,
+                          size,
+                          readdir->orangefs_dirent_outcount);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       readdir->token = rd->token;
+       readdir->orangefs_dirent_outcount = rd->orangefs_dirent_outcount;
+       readdir->dirent_array = kcalloc(readdir->orangefs_dirent_outcount,
+                                       sizeof(*readdir->dirent_array),
+                                       GFP_KERNEL);
+       if (readdir->dirent_array == NULL) {
+               gossip_err("%s: kcalloc failed.\n", __func__);
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       buf += offset;
+       size -= offset;
+
+       for (i = 0; i < readdir->orangefs_dirent_outcount; i++) {
+               if (size < smallest_blob) {
+                       gossip_err("%s: size:%zu: smallest_blob:%d:\n",
+                                  __func__,
+                                  size,
+                                  smallest_blob);
+                       ret = -EINVAL;
+                       goto free;
+               }
+
+               len = *(__u32 *)buf;
+               if ((len < 1) || (len > ORANGEFS_NAME_MAX)) {
+                       gossip_err("%s: len:%d:\n", __func__, len);
+                       ret = -EINVAL;
+                       goto free;
+               }
+
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "%s: size:%zu: len:%d:\n",
+                            __func__,
+                            size,
+                            len);
+
+               readdir->dirent_array[i].d_name = buf + sizeof_u32;
+               readdir->dirent_array[i].d_length = len;
+
+               /*
+                * Calculate "aligned" length of this string and its
+                * associated __u32 descriptor.
+                */
+               aligned_len = ((sizeof_u32 + len + 1) + 7) & ~7;
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "%s: aligned_len:%d:\n",
+                            __func__,
+                            aligned_len);
+
+               /*
+                * The end of the blob should coincide with the end
+                * of the last sub-blob.
+                */
+               if (size < aligned_len + khandle_size) {
+                       gossip_err("%s: ran off the end of the blob.\n",
+                                  __func__);
+                       ret = -EINVAL;
+                       goto free;
+               }
+               size -= aligned_len + khandle_size;
+
+               buf += aligned_len;
+
+               readdir->dirent_array[i].khandle =
+                       *(struct orangefs_khandle *) buf;
+               buf += khandle_size;
+       }
+       ret = buf - ptr;
+       gossip_debug(GOSSIP_DIR_DEBUG, "%s: returning:%ld:\n", __func__, ret);
+       goto out;
+
+free:
+       kfree(readdir->dirent_array);
+       readdir->dirent_array = NULL;
+
+out:
+       return ret;
+}
+
+static long readdir_handle_ctor(struct readdir_handle_s *rhandle, void *buf,
+                               size_t size, int buffer_index)
+{
+       long ret;
+
+       if (buf == NULL) {
+               gossip_err
+                   ("Invalid NULL buffer specified in readdir_handle_ctor\n");
+               return -ENOMEM;
+       }
+       if (buffer_index < 0) {
+               gossip_err
+                   ("Invalid buffer index specified in readdir_handle_ctor\n");
+               return -EINVAL;
+       }
+       rhandle->buffer_index = buffer_index;
+       rhandle->dents_buf = buf;
+       ret = decode_dirents(buf, size, &rhandle->readdir_response);
+       if (ret < 0) {
+               gossip_err("Could not decode readdir from buffer %ld\n", ret);
+               rhandle->buffer_index = -1;
+               gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n", buf);
+               vfree(buf);
+               rhandle->dents_buf = NULL;
+       }
+       return ret;
+}
+
+static void readdir_handle_dtor(struct orangefs_bufmap *bufmap,
+               struct readdir_handle_s *rhandle)
+{
+       if (rhandle == NULL)
+               return;
+
+       /* kfree(NULL) is safe */
+       kfree(rhandle->readdir_response.dirent_array);
+       rhandle->readdir_response.dirent_array = NULL;
+
+       if (rhandle->buffer_index >= 0) {
+               orangefs_readdir_index_put(bufmap, rhandle->buffer_index);
+               rhandle->buffer_index = -1;
+       }
+       if (rhandle->dents_buf) {
+               gossip_debug(GOSSIP_DIR_DEBUG, "vfree %p\n",
+                            rhandle->dents_buf);
+               vfree(rhandle->dents_buf);
+               rhandle->dents_buf = NULL;
+       }
+}
+
+/*
+ * Read directory entries from an instance of an open directory.
+ */
+static int orangefs_readdir(struct file *file, struct dir_context *ctx)
+{
+       struct orangefs_bufmap *bufmap = NULL;
+       int ret = 0;
+       int buffer_index;
+       /*
+        * ptoken supports Orangefs' distributed directory logic, added
+        * in 2.9.2.
+        */
+       __u64 *ptoken = file->private_data;
+       __u64 pos = 0;
+       ino_t ino = 0;
+       struct dentry *dentry = file->f_path.dentry;
+       struct orangefs_kernel_op_s *new_op = NULL;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode);
+       int buffer_full = 0;
+       struct readdir_handle_s rhandle;
+       int i = 0;
+       int len = 0;
+       ino_t current_ino = 0;
+       char *current_entry = NULL;
+       long bytes_decoded;
+
+       gossip_debug(GOSSIP_DIR_DEBUG,
+                    "%s: ctx->pos:%lld, ptoken = %llu\n",
+                    __func__,
+                    lld(ctx->pos),
+                    llu(*ptoken));
+
+       pos = (__u64) ctx->pos;
+
+       /* are we done? */
+       if (pos == ORANGEFS_READDIR_END) {
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "Skipping to termination path\n");
+               return 0;
+       }
+
+       gossip_debug(GOSSIP_DIR_DEBUG,
+                    "orangefs_readdir called on %s (pos=%llu)\n",
+                    dentry->d_name.name, llu(pos));
+
+       rhandle.buffer_index = -1;
+       rhandle.dents_buf = NULL;
+       memset(&rhandle.readdir_response, 0, sizeof(rhandle.readdir_response));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_READDIR);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->uses_shared_memory = 1;
+       new_op->upcall.req.readdir.refn = orangefs_inode->refn;
+       new_op->upcall.req.readdir.max_dirent_count =
+           ORANGEFS_MAX_DIRENT_COUNT_READDIR;
+
+       gossip_debug(GOSSIP_DIR_DEBUG,
+                    "%s: upcall.req.readdir.refn.khandle: %pU\n",
+                    __func__,
+                    &new_op->upcall.req.readdir.refn.khandle);
+
+       new_op->upcall.req.readdir.token = *ptoken;
+
+get_new_buffer_index:
+       ret = orangefs_readdir_index_get(&bufmap, &buffer_index);
+       if (ret < 0) {
+               gossip_lerr("orangefs_readdir: orangefs_readdir_index_get() failure (%d)\n",
+                           ret);
+               goto out_free_op;
+       }
+       new_op->upcall.req.readdir.buf_index = buffer_index;
+
+       ret = service_operation(new_op,
+                               "orangefs_readdir",
+                               get_interruptible_flag(dentry->d_inode));
+
+       gossip_debug(GOSSIP_DIR_DEBUG,
+                    "Readdir downcall status is %d.  ret:%d\n",
+                    new_op->downcall.status,
+                    ret);
+
+       if (ret == -EAGAIN && op_state_purged(new_op)) {
+               /*
+                * readdir shared memory aread has been wiped due to
+                * pvfs2-client-core restarting, so we must get a new
+                * index into the shared memory.
+                */
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                       "%s: Getting new buffer_index for retry of readdir..\n",
+                        __func__);
+               orangefs_readdir_index_put(bufmap, buffer_index);
+               goto get_new_buffer_index;
+       }
+
+       if (ret == -EIO && op_state_purged(new_op)) {
+               gossip_err("%s: Client is down. Aborting readdir call.\n",
+                       __func__);
+               orangefs_readdir_index_put(bufmap, buffer_index);
+               goto out_free_op;
+       }
+
+       if (ret < 0 || new_op->downcall.status != 0) {
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "Readdir request failed.  Status:%d\n",
+                            new_op->downcall.status);
+               orangefs_readdir_index_put(bufmap, buffer_index);
+               if (ret >= 0)
+                       ret = new_op->downcall.status;
+               goto out_free_op;
+       }
+
+       bytes_decoded =
+               readdir_handle_ctor(&rhandle,
+                                   new_op->downcall.trailer_buf,
+                                   new_op->downcall.trailer_size,
+                                   buffer_index);
+       if (bytes_decoded < 0) {
+               gossip_err("orangefs_readdir: Could not decode trailer buffer into a readdir response %d\n",
+                       ret);
+               ret = bytes_decoded;
+               orangefs_readdir_index_put(bufmap, buffer_index);
+               goto out_free_op;
+       }
+
+       if (bytes_decoded != new_op->downcall.trailer_size) {
+               gossip_err("orangefs_readdir: # bytes decoded (%ld) "
+                          "!= trailer size (%ld)\n",
+                          bytes_decoded,
+                          (long)new_op->downcall.trailer_size);
+               ret = -EINVAL;
+               goto out_destroy_handle;
+       }
+
+       /*
+        *  orangefs doesn't actually store dot and dot-dot, but
+        *  we need to have them represented.
+        */
+       if (pos == 0) {
+               ino = get_ino_from_khandle(dentry->d_inode);
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "%s: calling dir_emit of \".\" with pos = %llu\n",
+                            __func__,
+                            llu(pos));
+               ret = dir_emit(ctx, ".", 1, ino, DT_DIR);
+               pos += 1;
+       }
+
+       if (pos == 1) {
+               ino = get_parent_ino_from_dentry(dentry);
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "%s: calling dir_emit of \"..\" with pos = %llu\n",
+                            __func__,
+                            llu(pos));
+               ret = dir_emit(ctx, "..", 2, ino, DT_DIR);
+               pos += 1;
+       }
+
+       /*
+        * we stored ORANGEFS_ITERATE_NEXT in ctx->pos last time around
+        * to prevent "finding" dot and dot-dot on any iteration
+        * other than the first.
+        */
+       if (ctx->pos == ORANGEFS_ITERATE_NEXT)
+               ctx->pos = 0;
+
+       for (i = ctx->pos;
+            i < rhandle.readdir_response.orangefs_dirent_outcount;
+            i++) {
+               len = rhandle.readdir_response.dirent_array[i].d_length;
+               current_entry = rhandle.readdir_response.dirent_array[i].d_name;
+               current_ino = orangefs_khandle_to_ino(
+                       &(rhandle.readdir_response.dirent_array[i].khandle));
+
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                            "calling dir_emit for %s with len %d"
+                            ", ctx->pos %ld\n",
+                            current_entry,
+                            len,
+                            (unsigned long)ctx->pos);
+               /*
+                * type is unknown. We don't return object type
+                * in the dirent_array. This leaves getdents
+                * clueless about type.
+                */
+               ret =
+                   dir_emit(ctx, current_entry, len, current_ino, DT_UNKNOWN);
+               if (!ret)
+                       break;
+               ctx->pos++;
+               gossip_debug(GOSSIP_DIR_DEBUG,
+                             "%s: ctx->pos:%lld\n",
+                             __func__,
+                             lld(ctx->pos));
+
+       }
+
+       /*
+        * we ran all the way through the last batch, set up for
+        * getting another batch...
+        */
+       if (ret) {
+               *ptoken = rhandle.readdir_response.token;
+               ctx->pos = ORANGEFS_ITERATE_NEXT;
+       }
+
+       /*
+        * Did we hit the end of the directory?
+        */
+       if (rhandle.readdir_response.token == ORANGEFS_READDIR_END &&
+           !buffer_full) {
+               gossip_debug(GOSSIP_DIR_DEBUG,
+               "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n");
+               ctx->pos = ORANGEFS_READDIR_END;
+       }
+
+out_destroy_handle:
+       readdir_handle_dtor(bufmap, &rhandle);
+out_free_op:
+       op_release(new_op);
+       gossip_debug(GOSSIP_DIR_DEBUG, "orangefs_readdir returning %d\n", ret);
+       return ret;
+}
+
+static int orangefs_dir_open(struct inode *inode, struct file *file)
+{
+       __u64 *ptoken;
+
+       file->private_data = kmalloc(sizeof(__u64), GFP_KERNEL);
+       if (!file->private_data)
+               return -ENOMEM;
+
+       ptoken = file->private_data;
+       *ptoken = ORANGEFS_READDIR_START;
+       return 0;
+}
+
+static int orangefs_dir_release(struct inode *inode, struct file *file)
+{
+       orangefs_flush_inode(inode);
+       kfree(file->private_data);
+       return 0;
+}
+
+/** ORANGEFS implementation of VFS directory operations */
+const struct file_operations orangefs_dir_operations = {
+       .read = generic_read_dir,
+       .iterate = orangefs_readdir,
+       .open = orangefs_dir_open,
+       .release = orangefs_dir_release,
+};
diff --git a/fs/orangefs/downcall.h b/fs/orangefs/downcall.h
new file mode 100644 (file)
index 0000000..72d4cac
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Definitions of downcalls used in Linux kernel module.
+ */
+
+#ifndef __DOWNCALL_H
+#define __DOWNCALL_H
+
+/*
+ * Sanitized the device-client core interaction
+ * for clean 32-64 bit usage
+ */
+struct orangefs_io_response {
+       __s64 amt_complete;
+};
+
+struct orangefs_lookup_response {
+       struct orangefs_object_kref refn;
+};
+
+struct orangefs_create_response {
+       struct orangefs_object_kref refn;
+};
+
+struct orangefs_symlink_response {
+       struct orangefs_object_kref refn;
+};
+
+struct orangefs_getattr_response {
+       struct ORANGEFS_sys_attr_s attributes;
+       char link_target[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_mkdir_response {
+       struct orangefs_object_kref refn;
+};
+
+/*
+ * duplication of some system interface structures so that I don't have
+ * to allocate extra memory
+ */
+struct orangefs_dirent {
+       char *d_name;
+       int d_length;
+       struct orangefs_khandle khandle;
+};
+
+struct orangefs_statfs_response {
+       __s64 block_size;
+       __s64 blocks_total;
+       __s64 blocks_avail;
+       __s64 files_total;
+       __s64 files_avail;
+};
+
+struct orangefs_fs_mount_response {
+       __s32 fs_id;
+       __s32 id;
+       struct orangefs_khandle root_khandle;
+};
+
+/* the getxattr response is the attribute value */
+struct orangefs_getxattr_response {
+       __s32 val_sz;
+       __s32 __pad1;
+       char val[ORANGEFS_MAX_XATTR_VALUELEN];
+};
+
+/* the listxattr response is an array of attribute names */
+struct orangefs_listxattr_response {
+       __s32 returned_count;
+       __s32 __pad1;
+       __u64 token;
+       char key[ORANGEFS_MAX_XATTR_LISTLEN * ORANGEFS_MAX_XATTR_NAMELEN];
+       __s32 keylen;
+       __s32 __pad2;
+       __s32 lengths[ORANGEFS_MAX_XATTR_LISTLEN];
+};
+
+struct orangefs_param_response {
+       __s64 value;
+};
+
+#define PERF_COUNT_BUF_SIZE 4096
+struct orangefs_perf_count_response {
+       char buffer[PERF_COUNT_BUF_SIZE];
+};
+
+#define FS_KEY_BUF_SIZE 4096
+struct orangefs_fs_key_response {
+       __s32 fs_keylen;
+       __s32 __pad1;
+       char fs_key[FS_KEY_BUF_SIZE];
+};
+
+struct orangefs_downcall_s {
+       __s32 type;
+       __s32 status;
+       /* currently trailer is used only by readdir */
+       __s64 trailer_size;
+       char *trailer_buf;
+
+       union {
+               struct orangefs_io_response io;
+               struct orangefs_lookup_response lookup;
+               struct orangefs_create_response create;
+               struct orangefs_symlink_response sym;
+               struct orangefs_getattr_response getattr;
+               struct orangefs_mkdir_response mkdir;
+               struct orangefs_statfs_response statfs;
+               struct orangefs_fs_mount_response fs_mount;
+               struct orangefs_getxattr_response getxattr;
+               struct orangefs_listxattr_response listxattr;
+               struct orangefs_param_response param;
+               struct orangefs_perf_count_response perf_count;
+               struct orangefs_fs_key_response fs_key;
+       } resp;
+};
+
+struct orangefs_readdir_response_s {
+       __u64 token;
+       __u64 directory_version;
+       __u32 __pad2;
+       __u32 orangefs_dirent_outcount;
+       struct orangefs_dirent *dirent_array;
+};
+
+#endif /* __DOWNCALL_H */
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
new file mode 100644 (file)
index 0000000..d865b58
--- /dev/null
@@ -0,0 +1,738 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Linux VFS file operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+
+/*
+ * Copy to client-core's address space from the buffers specified
+ * by the iovec upto total_size bytes.
+ * NOTE: the iovector can either contain addresses which
+ *       can futher be kernel-space or user-space addresses.
+ *       or it can pointers to struct page's
+ */
+static int precopy_buffers(struct orangefs_bufmap *bufmap,
+                          int buffer_index,
+                          struct iov_iter *iter,
+                          size_t total_size)
+{
+       int ret = 0;
+       /*
+        * copy data from application/kernel by pulling it out
+        * of the iovec.
+        */
+
+
+       if (total_size) {
+               ret = orangefs_bufmap_copy_from_iovec(bufmap,
+                                                     iter,
+                                                     buffer_index,
+                                                     total_size);
+               if (ret < 0)
+               gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
+                          __func__,
+                          (long)ret);
+       }
+
+       if (ret < 0)
+               gossip_err("%s: Failed to copy-in buffers. Please make sure that the pvfs2-client is running. %ld\n",
+                       __func__,
+                       (long)ret);
+       return ret;
+}
+
+/*
+ * Copy from client-core's address space to the buffers specified
+ * by the iovec upto total_size bytes.
+ * NOTE: the iovector can either contain addresses which
+ *       can futher be kernel-space or user-space addresses.
+ *       or it can pointers to struct page's
+ */
+static int postcopy_buffers(struct orangefs_bufmap *bufmap,
+                           int buffer_index,
+                           struct iov_iter *iter,
+                           size_t total_size)
+{
+       int ret = 0;
+       /*
+        * copy data to application/kernel by pushing it out to
+        * the iovec. NOTE; target buffers can be addresses or
+        * struct page pointers.
+        */
+       if (total_size) {
+               ret = orangefs_bufmap_copy_to_iovec(bufmap,
+                                                   iter,
+                                                   buffer_index,
+                                                   total_size);
+               if (ret < 0)
+                       gossip_err("%s: Failed to copy-out buffers. Please make sure that the pvfs2-client is running (%ld)\n",
+                               __func__,
+                               (long)ret);
+       }
+       return ret;
+}
+
+/*
+ * handles two possible error cases, depending on context.
+ *
+ * by design, our vfs i/o errors need to be handled in one of two ways,
+ * depending on where the error occured.
+ *
+ * if the error happens in the waitqueue code because we either timed
+ * out or a signal was raised while waiting, we need to cancel the
+ * userspace i/o operation and free the op manually.  this is done to
+ * avoid having the device start writing application data to our shared
+ * bufmap pages without us expecting it.
+ *
+ * FIXME: POSSIBLE OPTIMIZATION:
+ * However, if we timed out or if we got a signal AND our upcall was never
+ * picked off the queue (i.e. we were in OP_VFS_STATE_WAITING), then we don't
+ * need to send a cancellation upcall. The way we can handle this is
+ * set error_exit to 2 in such cases and 1 whenever cancellation has to be
+ * sent and have handle_error
+ * take care of this situation as well..
+ *
+ * if a orangefs sysint level error occured and i/o has been completed,
+ * there is no need to cancel the operation, as the user has finished
+ * using the bufmap page and so there is no danger in this case.  in
+ * this case, we wake up the device normally so that it may free the
+ * op, as normal.
+ *
+ * note the only reason this is a macro is because both read and write
+ * cases need the exact same handling code.
+ */
+#define handle_io_error()                                      \
+do {                                                           \
+       if (!op_state_serviced(new_op)) {                       \
+               orangefs_cancel_op_in_progress(new_op->tag);    \
+       } else {                                                \
+               complete(&new_op->done);                        \
+       }                                                       \
+       orangefs_bufmap_put(bufmap, buffer_index);              \
+       buffer_index = -1;                                      \
+} while (0)
+
+/*
+ * Post and wait for the I/O upcall to finish
+ */
+static ssize_t wait_for_direct_io(enum ORANGEFS_io_type type, struct inode *inode,
+               loff_t *offset, struct iov_iter *iter,
+               size_t total_size, loff_t readahead_size)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
+       struct orangefs_bufmap *bufmap = NULL;
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int buffer_index = -1;
+       ssize_t ret;
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FILE_IO);
+       if (!new_op)
+               return -ENOMEM;
+
+       /* synchronous I/O */
+       new_op->upcall.req.io.async_vfs_io = ORANGEFS_VFS_SYNC_IO;
+       new_op->upcall.req.io.readahead_size = readahead_size;
+       new_op->upcall.req.io.io_type = type;
+       new_op->upcall.req.io.refn = orangefs_inode->refn;
+
+populate_shared_memory:
+       /* get a shared buffer index */
+       ret = orangefs_bufmap_get(&bufmap, &buffer_index);
+       if (ret < 0) {
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s: orangefs_bufmap_get failure (%ld)\n",
+                            __func__, (long)ret);
+               goto out;
+       }
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): GET op %p -> buffer_index %d\n",
+                    __func__,
+                    handle,
+                    new_op,
+                    buffer_index);
+
+       new_op->uses_shared_memory = 1;
+       new_op->upcall.req.io.buf_index = buffer_index;
+       new_op->upcall.req.io.count = total_size;
+       new_op->upcall.req.io.offset = *offset;
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): offset: %llu total_size: %zd\n",
+                    __func__,
+                    handle,
+                    llu(*offset),
+                    total_size);
+       /*
+        * Stage 1: copy the buffers into client-core's address space
+        * precopy_buffers only pertains to writes.
+        */
+       if (type == ORANGEFS_IO_WRITE) {
+               ret = precopy_buffers(bufmap,
+                                     buffer_index,
+                                     iter,
+                                     total_size);
+               if (ret < 0)
+                       goto out;
+       }
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): Calling post_io_request with tag (%llu)\n",
+                    __func__,
+                    handle,
+                    llu(new_op->tag));
+
+       /* Stage 2: Service the I/O operation */
+       ret = service_operation(new_op,
+                               type == ORANGEFS_IO_WRITE ?
+                                       "file_write" :
+                                       "file_read",
+                               get_interruptible_flag(inode));
+
+       /*
+        * If service_operation() returns -EAGAIN #and# the operation was
+        * purged from orangefs_request_list or htable_ops_in_progress, then
+        * we know that the client was restarted, causing the shared memory
+        * area to be wiped clean.  To restart a  write operation in this
+        * case, we must re-copy the data from the user's iovec to a NEW
+        * shared memory location. To restart a read operation, we must get
+        * a new shared memory location.
+        */
+       if (ret == -EAGAIN && op_state_purged(new_op)) {
+               orangefs_bufmap_put(bufmap, buffer_index);
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s:going to repopulate_shared_memory.\n",
+                            __func__);
+               goto populate_shared_memory;
+       }
+
+       if (ret < 0) {
+               handle_io_error();
+               /*
+                * don't write an error to syslog on signaled operation
+                * termination unless we've got debugging turned on, as
+                * this can happen regularly (i.e. ctrl-c)
+                */
+               if (ret == -EINTR)
+                       gossip_debug(GOSSIP_FILE_DEBUG,
+                                    "%s: returning error %ld\n", __func__,
+                                    (long)ret);
+               else
+                       gossip_err("%s: error in %s handle %pU, returning %zd\n",
+                               __func__,
+                               type == ORANGEFS_IO_READ ?
+                                       "read from" : "write to",
+                               handle, ret);
+               goto out;
+       }
+
+       /*
+        * Stage 3: Post copy buffers from client-core's address space
+        * postcopy_buffers only pertains to reads.
+        */
+       if (type == ORANGEFS_IO_READ) {
+               ret = postcopy_buffers(bufmap,
+                                      buffer_index,
+                                      iter,
+                                      new_op->downcall.resp.io.amt_complete);
+               if (ret < 0) {
+                       /*
+                        * put error codes in downcall so that handle_io_error()
+                        * preserves it properly
+                        */
+                       WARN_ON(!op_state_serviced(new_op));
+                       new_op->downcall.status = ret;
+                       handle_io_error();
+                       goto out;
+               }
+       }
+       gossip_debug(GOSSIP_FILE_DEBUG,
+           "%s(%pU): Amount written as returned by the sys-io call:%d\n",
+           __func__,
+           handle,
+           (int)new_op->downcall.resp.io.amt_complete);
+
+       ret = new_op->downcall.resp.io.amt_complete;
+
+       /*
+        * tell the device file owner waiting on I/O that this read has
+        * completed and it can return now.
+        */
+       complete(&new_op->done);
+
+out:
+       if (buffer_index >= 0) {
+               orangefs_bufmap_put(bufmap, buffer_index);
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): PUT buffer_index %d\n",
+                            __func__, handle, buffer_index);
+               buffer_index = -1;
+       }
+       op_release(new_op);
+       return ret;
+}
+
+/*
+ * Common entry point for read/write/readv/writev
+ * This function will dispatch it to either the direct I/O
+ * or buffered I/O path depending on the mount options and/or
+ * augmented/extended metadata attached to the file.
+ * Note: File extended attributes override any mount options.
+ */
+static ssize_t do_readv_writev(enum ORANGEFS_io_type type, struct file *file,
+               loff_t *offset, struct iov_iter *iter)
+{
+       struct inode *inode = file->f_mapping->host;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_khandle *handle = &orangefs_inode->refn.khandle;
+       size_t count = iov_iter_count(iter);
+       ssize_t total_count = 0;
+       ssize_t ret = -EINVAL;
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+               "%s-BEGIN(%pU): count(%d) after estimate_max_iovecs.\n",
+               __func__,
+               handle,
+               (int)count);
+
+       if (type == ORANGEFS_IO_WRITE) {
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): proceeding with offset : %llu, "
+                            "size %d\n",
+                            __func__,
+                            handle,
+                            llu(*offset),
+                            (int)count);
+       }
+
+       if (count == 0) {
+               ret = 0;
+               goto out;
+       }
+
+       while (iov_iter_count(iter)) {
+               size_t each_count = iov_iter_count(iter);
+               size_t amt_complete;
+
+               /* how much to transfer in this loop iteration */
+               if (each_count > orangefs_bufmap_size_query())
+                       each_count = orangefs_bufmap_size_query();
+
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): size of each_count(%d)\n",
+                            __func__,
+                            handle,
+                            (int)each_count);
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): BEFORE wait_for_io: offset is %d\n",
+                            __func__,
+                            handle,
+                            (int)*offset);
+
+               ret = wait_for_direct_io(type, inode, offset, iter,
+                               each_count, 0);
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): return from wait_for_io:%d\n",
+                            __func__,
+                            handle,
+                            (int)ret);
+
+               if (ret < 0)
+                       goto out;
+
+               *offset += ret;
+               total_count += ret;
+               amt_complete = ret;
+
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s(%pU): AFTER wait_for_io: offset is %d\n",
+                            __func__,
+                            handle,
+                            (int)*offset);
+
+               /*
+                * if we got a short I/O operations,
+                * fall out and return what we got so far
+                */
+               if (amt_complete < each_count)
+                       break;
+       } /*end while */
+
+       if (total_count > 0)
+               ret = total_count;
+out:
+       if (ret > 0) {
+               if (type == ORANGEFS_IO_READ) {
+                       file_accessed(file);
+               } else {
+                       SetMtimeFlag(orangefs_inode);
+                       inode->i_mtime = CURRENT_TIME;
+                       mark_inode_dirty_sync(inode);
+               }
+       }
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): Value(%d) returned.\n",
+                    __func__,
+                    handle,
+                    (int)ret);
+
+       return ret;
+}
+
+/*
+ * Read data from a specified offset in a file (referenced by inode).
+ * Data may be placed either in a user or kernel buffer.
+ */
+ssize_t orangefs_inode_read(struct inode *inode,
+                           struct iov_iter *iter,
+                           loff_t *offset,
+                           loff_t readahead_size)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       size_t count = iov_iter_count(iter);
+       size_t bufmap_size;
+       ssize_t ret = -EINVAL;
+
+       g_orangefs_stats.reads++;
+
+       bufmap_size = orangefs_bufmap_size_query();
+       if (count > bufmap_size) {
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "%s: count is too large (%zd/%zd)!\n",
+                            __func__, count, bufmap_size);
+               return -EINVAL;
+       }
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU) %zd@%llu\n",
+                    __func__,
+                    &orangefs_inode->refn.khandle,
+                    count,
+                    llu(*offset));
+
+       ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, offset, iter,
+                       count, readahead_size);
+       if (ret > 0)
+               *offset += ret;
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "%s(%pU): Value(%zd) returned.\n",
+                    __func__,
+                    &orangefs_inode->refn.khandle,
+                    ret);
+
+       return ret;
+}
+
+static ssize_t orangefs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       loff_t pos = *(&iocb->ki_pos);
+       ssize_t rc = 0;
+
+       BUG_ON(iocb->private);
+
+       gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_read_iter\n");
+
+       g_orangefs_stats.reads++;
+
+       rc = do_readv_writev(ORANGEFS_IO_READ, file, &pos, iter);
+       iocb->ki_pos = pos;
+
+       return rc;
+}
+
+static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       struct file *file = iocb->ki_filp;
+       loff_t pos;
+       ssize_t rc;
+
+       BUG_ON(iocb->private);
+
+       gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n");
+
+       mutex_lock(&file->f_mapping->host->i_mutex);
+
+       /* Make sure generic_write_checks sees an up to date inode size. */
+       if (file->f_flags & O_APPEND) {
+               rc = orangefs_inode_getattr(file->f_mapping->host,
+                                        ORANGEFS_ATTR_SYS_SIZE, 0);
+               if (rc) {
+                       gossip_err("%s: orangefs_inode_getattr failed, rc:%zd:.\n",
+                                  __func__, rc);
+                       goto out;
+               }
+       }
+
+       if (file->f_pos > i_size_read(file->f_mapping->host))
+               orangefs_i_size_write(file->f_mapping->host, file->f_pos);
+
+       rc = generic_write_checks(iocb, iter);
+
+       if (rc <= 0) {
+               gossip_err("%s: generic_write_checks failed, rc:%zd:.\n",
+                          __func__, rc);
+               goto out;
+       }
+
+       /*
+        * if we are appending, generic_write_checks would have updated
+        * pos to the end of the file, so we will wait till now to set
+        * pos...
+        */
+       pos = *(&iocb->ki_pos);
+
+       rc = do_readv_writev(ORANGEFS_IO_WRITE,
+                            file,
+                            &pos,
+                            iter);
+       if (rc < 0) {
+               gossip_err("%s: do_readv_writev failed, rc:%zd:.\n",
+                          __func__, rc);
+               goto out;
+       }
+
+       iocb->ki_pos = pos;
+       g_orangefs_stats.writes++;
+
+out:
+
+       mutex_unlock(&file->f_mapping->host->i_mutex);
+       return rc;
+}
+
+/*
+ * Perform a miscellaneous operation on a file.
+ */
+static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       int ret = -ENOTTY;
+       __u64 val = 0;
+       unsigned long uval;
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_ioctl: called with cmd %d\n",
+                    cmd);
+
+       /*
+        * we understand some general ioctls on files, such as the immutable
+        * and append flags
+        */
+       if (cmd == FS_IOC_GETFLAGS) {
+               val = 0;
+               ret = orangefs_inode_getxattr(file_inode(file),
+                                             ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+                                             "user.pvfs2.meta_hint",
+                                             &val, sizeof(val));
+               if (ret < 0 && ret != -ENODATA)
+                       return ret;
+               else if (ret == -ENODATA)
+                       val = 0;
+               uval = val;
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "orangefs_ioctl: FS_IOC_GETFLAGS: %llu\n",
+                            (unsigned long long)uval);
+               return put_user(uval, (int __user *)arg);
+       } else if (cmd == FS_IOC_SETFLAGS) {
+               ret = 0;
+               if (get_user(uval, (int __user *)arg))
+                       return -EFAULT;
+               /*
+                * ORANGEFS_MIRROR_FL is set internally when the mirroring mode
+                * is turned on for a file. The user is not allowed to turn
+                * on this bit, but the bit is present if the user first gets
+                * the flags and then updates the flags with some new
+                * settings. So, we ignore it in the following edit. bligon.
+                */
+               if ((uval & ~ORANGEFS_MIRROR_FL) &
+                   (~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NOATIME_FL))) {
+                       gossip_err("orangefs_ioctl: the FS_IOC_SETFLAGS only supports setting one of FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NOATIME_FL\n");
+                       return -EINVAL;
+               }
+               val = uval;
+               gossip_debug(GOSSIP_FILE_DEBUG,
+                            "orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n",
+                            (unsigned long long)val);
+               ret = orangefs_inode_setxattr(file_inode(file),
+                                             ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+                                             "user.pvfs2.meta_hint",
+                                             &val, sizeof(val), 0);
+       }
+
+       return ret;
+}
+
+/*
+ * Memory map a region of a file.
+ */
+static int orangefs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_file_mmap: called on %s\n",
+                    (file ?
+                       (char *)file->f_path.dentry->d_name.name :
+                       (char *)"Unknown"));
+
+       /* set the sequential readahead hint */
+       vma->vm_flags |= VM_SEQ_READ;
+       vma->vm_flags &= ~VM_RAND_READ;
+
+       /* Use readonly mmap since we cannot support writable maps. */
+       return generic_file_readonly_mmap(file, vma);
+}
+
+#define mapping_nrpages(idata) ((idata)->nrpages)
+
+/*
+ * Called to notify the module that there are no more references to
+ * this file (i.e. no processes have it open).
+ *
+ * \note Not called when each file is closed.
+ */
+static int orangefs_file_release(struct inode *inode, struct file *file)
+{
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_file_release: called on %s\n",
+                    file->f_path.dentry->d_name.name);
+
+       orangefs_flush_inode(inode);
+
+       /*
+        * remove all associated inode pages from the page cache and mmap
+        * readahead cache (if any); this forces an expensive refresh of
+        * data for the next caller of mmap (or 'get_block' accesses)
+        */
+       if (file->f_path.dentry->d_inode &&
+           file->f_path.dentry->d_inode->i_mapping &&
+           mapping_nrpages(&file->f_path.dentry->d_inode->i_data))
+               truncate_inode_pages(file->f_path.dentry->d_inode->i_mapping,
+                                    0);
+       return 0;
+}
+
+/*
+ * Push all data for a specific file onto permanent storage.
+ */
+static int orangefs_fsync(struct file *file,
+                      loff_t start,
+                      loff_t end,
+                      int datasync)
+{
+       int ret = -EINVAL;
+       struct orangefs_inode_s *orangefs_inode =
+               ORANGEFS_I(file->f_path.dentry->d_inode);
+       struct orangefs_kernel_op_s *new_op = NULL;
+
+       /* required call */
+       filemap_write_and_wait_range(file->f_mapping, start, end);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FSYNC);
+       if (!new_op)
+               return -ENOMEM;
+       new_op->upcall.req.fsync.refn = orangefs_inode->refn;
+
+       ret = service_operation(new_op,
+                       "orangefs_fsync",
+                       get_interruptible_flag(file->f_path.dentry->d_inode));
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_fsync got return value of %d\n",
+                    ret);
+
+       op_release(new_op);
+
+       orangefs_flush_inode(file->f_path.dentry->d_inode);
+       return ret;
+}
+
+/*
+ * Change the file pointer position for an instance of an open file.
+ *
+ * \note If .llseek is overriden, we must acquire lock as described in
+ *       Documentation/filesystems/Locking.
+ *
+ * Future upgrade could support SEEK_DATA and SEEK_HOLE but would
+ * require much changes to the FS
+ */
+static loff_t orangefs_file_llseek(struct file *file, loff_t offset, int origin)
+{
+       int ret = -EINVAL;
+       struct inode *inode = file->f_path.dentry->d_inode;
+
+       if (!inode) {
+               gossip_err("orangefs_file_llseek: invalid inode (NULL)\n");
+               return ret;
+       }
+
+       if (origin == ORANGEFS_SEEK_END) {
+               /*
+                * revalidate the inode's file size.
+                * NOTE: We are only interested in file size here,
+                * so we set mask accordingly.
+                */
+               ret = orangefs_inode_getattr(inode, ORANGEFS_ATTR_SYS_SIZE, 0);
+               if (ret) {
+                       gossip_debug(GOSSIP_FILE_DEBUG,
+                                    "%s:%s:%d calling make bad inode\n",
+                                    __FILE__,
+                                    __func__,
+                                    __LINE__);
+                       orangefs_make_bad_inode(inode);
+                       return ret;
+               }
+       }
+
+       gossip_debug(GOSSIP_FILE_DEBUG,
+                    "orangefs_file_llseek: offset is %ld | origin is %d"
+                    " | inode size is %lu\n",
+                    (long)offset,
+                    origin,
+                    (unsigned long)file->f_path.dentry->d_inode->i_size);
+
+       return generic_file_llseek(file, offset, origin);
+}
+
+/*
+ * Support local locks (locks that only this kernel knows about)
+ * if Orangefs was mounted -o local_lock.
+ */
+static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
+{
+       int rc = -EINVAL;
+
+       if (ORANGEFS_SB(filp->f_inode->i_sb)->flags & ORANGEFS_OPT_LOCAL_LOCK) {
+               if (cmd == F_GETLK) {
+                       rc = 0;
+                       posix_test_lock(filp, fl);
+               } else {
+                       rc = posix_lock_file(filp, fl, NULL);
+               }
+       }
+
+       return rc;
+}
+
+/** ORANGEFS implementation of VFS file operations */
+const struct file_operations orangefs_file_operations = {
+       .llseek         = orangefs_file_llseek,
+       .read_iter      = orangefs_file_read_iter,
+       .write_iter     = orangefs_file_write_iter,
+       .lock           = orangefs_lock,
+       .unlocked_ioctl = orangefs_ioctl,
+       .mmap           = orangefs_file_mmap,
+       .open           = generic_file_open,
+       .release        = orangefs_file_release,
+       .fsync          = orangefs_fsync,
+};
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
new file mode 100644 (file)
index 0000000..d2923dc
--- /dev/null
@@ -0,0 +1,487 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Linux VFS inode operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+static int read_one_page(struct page *page)
+{
+       int ret;
+       int max_block;
+       ssize_t bytes_read = 0;
+       struct inode *inode = page->mapping->host;
+       const __u32 blocksize = PAGE_CACHE_SIZE;        /* inode->i_blksize */
+       const __u32 blockbits = PAGE_CACHE_SHIFT;       /* inode->i_blkbits */
+       struct iov_iter to;
+       struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};
+
+       iov_iter_bvec(&to, ITER_BVEC | READ, &bv, 1, PAGE_SIZE);
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                   "orangefs_readpage called with page %p\n",
+                    page);
+
+       max_block = ((inode->i_size / blocksize) + 1);
+
+       if (page->index < max_block) {
+               loff_t blockptr_offset = (((loff_t) page->index) << blockbits);
+
+               bytes_read = orangefs_inode_read(inode,
+                                                &to,
+                                                &blockptr_offset,
+                                                inode->i_size);
+       }
+       /* this will only zero remaining unread portions of the page data */
+       iov_iter_zero(~0U, &to);
+       /* takes care of potential aliasing */
+       flush_dcache_page(page);
+       if (bytes_read < 0) {
+               ret = bytes_read;
+               SetPageError(page);
+       } else {
+               SetPageUptodate(page);
+               if (PageError(page))
+                       ClearPageError(page);
+               ret = 0;
+       }
+       /* unlock the page after the ->readpage() routine completes */
+       unlock_page(page);
+       return ret;
+}
+
+static int orangefs_readpage(struct file *file, struct page *page)
+{
+       return read_one_page(page);
+}
+
+static int orangefs_readpages(struct file *file,
+                          struct address_space *mapping,
+                          struct list_head *pages,
+                          unsigned nr_pages)
+{
+       int page_idx;
+       int ret;
+
+       gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_readpages called\n");
+
+       for (page_idx = 0; page_idx < nr_pages; page_idx++) {
+               struct page *page;
+
+               page = list_entry(pages->prev, struct page, lru);
+               list_del(&page->lru);
+               if (!add_to_page_cache(page,
+                                      mapping,
+                                      page->index,
+                                      GFP_KERNEL)) {
+                       ret = read_one_page(page);
+                       gossip_debug(GOSSIP_INODE_DEBUG,
+                               "failure adding page to cache, read_one_page returned: %d\n",
+                               ret);
+             } else {
+                       page_cache_release(page);
+             }
+       }
+       BUG_ON(!list_empty(pages));
+       return 0;
+}
+
+static void orangefs_invalidatepage(struct page *page,
+                                unsigned int offset,
+                                unsigned int length)
+{
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_invalidatepage called on page %p "
+                    "(offset is %u)\n",
+                    page,
+                    offset);
+
+       ClearPageUptodate(page);
+       ClearPageMappedToDisk(page);
+       return;
+
+}
+
+static int orangefs_releasepage(struct page *page, gfp_t foo)
+{
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_releasepage called on page %p\n",
+                    page);
+       return 0;
+}
+
+/*
+ * Having a direct_IO entry point in the address_space_operations
+ * struct causes the kernel to allows us to use O_DIRECT on
+ * open. Nothing will ever call this thing, but in the future we
+ * will need to be able to use O_DIRECT on open in order to support
+ * AIO. Modeled after NFS, they do this too.
+ */
+/*
+ * static ssize_t orangefs_direct_IO(int rw,
+ *                     struct kiocb *iocb,
+ *                     struct iov_iter *iter,
+ *                     loff_t offset)
+ *{
+ *     gossip_debug(GOSSIP_INODE_DEBUG,
+ *                  "orangefs_direct_IO: %s\n",
+ *                  iocb->ki_filp->f_path.dentry->d_name.name);
+ *
+ *     return -EINVAL;
+ *}
+ */
+
+struct backing_dev_info orangefs_backing_dev_info = {
+       .name = "orangefs",
+       .ra_pages = 0,
+       .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+};
+
+/** ORANGEFS2 implementation of address space operations */
+const struct address_space_operations orangefs_address_operations = {
+       .readpage = orangefs_readpage,
+       .readpages = orangefs_readpages,
+       .invalidatepage = orangefs_invalidatepage,
+       .releasepage = orangefs_releasepage,
+/*     .direct_IO = orangefs_direct_IO */
+};
+
+static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       loff_t orig_size = i_size_read(inode);
+       int ret = -EINVAL;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "%s: %pU: Handle is %pU | fs_id %d | size is %llu\n",
+                    __func__,
+                    get_khandle_from_ino(inode),
+                    &orangefs_inode->refn.khandle,
+                    orangefs_inode->refn.fs_id,
+                    iattr->ia_size);
+
+       truncate_setsize(inode, iattr->ia_size);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_TRUNCATE);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.truncate.refn = orangefs_inode->refn;
+       new_op->upcall.req.truncate.size = (__s64) iattr->ia_size;
+
+       ret = service_operation(new_op, __func__,
+                               get_interruptible_flag(inode));
+
+       /*
+        * the truncate has no downcall members to retrieve, but
+        * the status value tells us if it went through ok or not
+        */
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs: orangefs_truncate got return value of %d\n",
+                    ret);
+
+       op_release(new_op);
+
+       if (ret != 0)
+               return ret;
+
+       /*
+        * Only change the c/mtime if we are changing the size or we are
+        * explicitly asked to change it.  This handles the semantic difference
+        * between truncate() and ftruncate() as implemented in the VFS.
+        *
+        * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
+        * special case where we need to update the times despite not having
+        * these flags set.  For all other operations the VFS set these flags
+        * explicitly if it wants a timestamp update.
+        */
+       if (orig_size != i_size_read(inode) &&
+           !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
+               iattr->ia_ctime = iattr->ia_mtime =
+                       current_fs_time(inode->i_sb);
+               iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
+       }
+
+       return ret;
+}
+
+/*
+ * Change attributes of an object referenced by dentry.
+ */
+int orangefs_setattr(struct dentry *dentry, struct iattr *iattr)
+{
+       int ret = -EINVAL;
+       struct inode *inode = dentry->d_inode;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_setattr: called on %s\n",
+                    dentry->d_name.name);
+
+       ret = inode_change_ok(inode, iattr);
+       if (ret)
+               goto out;
+
+       if ((iattr->ia_valid & ATTR_SIZE) &&
+           iattr->ia_size != i_size_read(inode)) {
+               ret = orangefs_setattr_size(inode, iattr);
+               if (ret)
+                       goto out;
+       }
+
+       setattr_copy(inode, iattr);
+       mark_inode_dirty(inode);
+
+       ret = orangefs_inode_setattr(inode, iattr);
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_setattr: inode_setattr returned %d\n",
+                    ret);
+
+       if (!ret && (iattr->ia_valid & ATTR_MODE))
+               /* change mod on a file that has ACLs */
+               ret = posix_acl_chmod(inode, inode->i_mode);
+
+out:
+       gossip_debug(GOSSIP_INODE_DEBUG, "orangefs_setattr: returning %d\n", ret);
+       return ret;
+}
+
+/*
+ * Obtain attributes of an object given a dentry
+ */
+int orangefs_getattr(struct vfsmount *mnt,
+                 struct dentry *dentry,
+                 struct kstat *kstat)
+{
+       int ret = -ENOENT;
+       struct inode *inode = dentry->d_inode;
+       struct orangefs_inode_s *orangefs_inode = NULL;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_getattr: called on %s\n",
+                    dentry->d_name.name);
+
+       /*
+        * Similar to the above comment, a getattr also expects that all
+        * fields/attributes of the inode would be refreshed. So again, we
+        * dont have too much of a choice but refresh all the attributes.
+        */
+       ret = orangefs_inode_getattr(inode, ORANGEFS_ATTR_SYS_ALL_NOHINT, 0);
+       if (ret == 0) {
+               generic_fillattr(inode, kstat);
+               /* override block size reported to stat */
+               orangefs_inode = ORANGEFS_I(inode);
+               kstat->blksize = orangefs_inode->blksize;
+       } else {
+               /* assume an I/O error and flag inode as bad */
+               gossip_debug(GOSSIP_INODE_DEBUG,
+                            "%s:%s:%d calling make bad inode\n",
+                            __FILE__,
+                            __func__,
+                            __LINE__);
+               orangefs_make_bad_inode(inode);
+       }
+       return ret;
+}
+
+int orangefs_permission(struct inode *inode, int mask)
+{
+       int ret;
+
+       if (mask & MAY_NOT_BLOCK)
+               return -ECHILD;
+
+       gossip_debug(GOSSIP_INODE_DEBUG, "%s: refreshing\n", __func__);
+
+       /* Make sure the permission (and other common attrs) are up to date. */
+       ret = orangefs_inode_getattr(inode,
+           ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE, 0);
+       if (ret < 0)
+               return ret;
+
+       return generic_permission(inode, mask);
+}
+
+/* ORANGEDS2 implementation of VFS inode operations for files */
+struct inode_operations orangefs_file_inode_operations = {
+       .get_acl = orangefs_get_acl,
+       .set_acl = orangefs_set_acl,
+       .setattr = orangefs_setattr,
+       .getattr = orangefs_getattr,
+       .setxattr = generic_setxattr,
+       .getxattr = generic_getxattr,
+       .listxattr = orangefs_listxattr,
+       .removexattr = generic_removexattr,
+       .permission = orangefs_permission,
+};
+
+static int orangefs_init_iops(struct inode *inode)
+{
+       inode->i_mapping->a_ops = &orangefs_address_operations;
+
+       switch (inode->i_mode & S_IFMT) {
+       case S_IFREG:
+               inode->i_op = &orangefs_file_inode_operations;
+               inode->i_fop = &orangefs_file_operations;
+               inode->i_blkbits = PAGE_CACHE_SHIFT;
+               break;
+       case S_IFLNK:
+               inode->i_op = &orangefs_symlink_inode_operations;
+               break;
+       case S_IFDIR:
+               inode->i_op = &orangefs_dir_inode_operations;
+               inode->i_fop = &orangefs_dir_operations;
+               break;
+       default:
+               gossip_debug(GOSSIP_INODE_DEBUG,
+                            "%s: unsupported mode\n",
+                            __func__);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/*
+ * Given a ORANGEFS object identifier (fsid, handle), convert it into a ino_t type
+ * that will be used as a hash-index from where the handle will
+ * be searched for in the VFS hash table of inodes.
+ */
+static inline ino_t orangefs_handle_hash(struct orangefs_object_kref *ref)
+{
+       if (!ref)
+               return 0;
+       return orangefs_khandle_to_ino(&(ref->khandle));
+}
+
+/*
+ * Called to set up an inode from iget5_locked.
+ */
+static int orangefs_set_inode(struct inode *inode, void *data)
+{
+       struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
+       struct orangefs_inode_s *orangefs_inode = NULL;
+
+       /* Make sure that we have sane parameters */
+       if (!data || !inode)
+               return 0;
+       orangefs_inode = ORANGEFS_I(inode);
+       if (!orangefs_inode)
+               return 0;
+       orangefs_inode->refn.fs_id = ref->fs_id;
+       orangefs_inode->refn.khandle = ref->khandle;
+       return 0;
+}
+
+/*
+ * Called to determine if handles match.
+ */
+static int orangefs_test_inode(struct inode *inode, void *data)
+{
+       struct orangefs_object_kref *ref = (struct orangefs_object_kref *) data;
+       struct orangefs_inode_s *orangefs_inode = NULL;
+
+       orangefs_inode = ORANGEFS_I(inode);
+       return (!ORANGEFS_khandle_cmp(&(orangefs_inode->refn.khandle), &(ref->khandle))
+               && orangefs_inode->refn.fs_id == ref->fs_id);
+}
+
+/*
+ * Front-end to lookup the inode-cache maintained by the VFS using the ORANGEFS
+ * file handle.
+ *
+ * @sb: the file system super block instance.
+ * @ref: The ORANGEFS object for which we are trying to locate an inode structure.
+ */
+struct inode *orangefs_iget(struct super_block *sb, struct orangefs_object_kref *ref)
+{
+       struct inode *inode = NULL;
+       unsigned long hash;
+       int error;
+
+       hash = orangefs_handle_hash(ref);
+       inode = iget5_locked(sb, hash, orangefs_test_inode, orangefs_set_inode, ref);
+       if (!inode || !(inode->i_state & I_NEW))
+               return inode;
+
+       error = orangefs_inode_getattr(inode,
+           ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE, 0);
+       if (error) {
+               iget_failed(inode);
+               return ERR_PTR(error);
+       }
+
+       inode->i_ino = hash;    /* needed for stat etc */
+       orangefs_init_iops(inode);
+       unlock_new_inode(inode);
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "iget handle %pU, fsid %d hash %ld i_ino %lu\n",
+                    &ref->khandle,
+                    ref->fs_id,
+                    hash,
+                    inode->i_ino);
+
+       return inode;
+}
+
+/*
+ * Allocate an inode for a newly created file and insert it into the inode hash.
+ */
+struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
+               int mode, dev_t dev, struct orangefs_object_kref *ref)
+{
+       unsigned long hash = orangefs_handle_hash(ref);
+       struct inode *inode;
+       int error;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "orangefs_get_custom_inode_common: called\n"
+                    "(sb is %p | MAJOR(dev)=%u | MINOR(dev)=%u mode=%o)\n",
+                    sb,
+                    MAJOR(dev),
+                    MINOR(dev),
+                    mode);
+
+       inode = new_inode(sb);
+       if (!inode)
+               return NULL;
+
+       orangefs_set_inode(inode, ref);
+       inode->i_ino = hash;    /* needed for stat etc */
+
+       error = orangefs_inode_getattr(inode,
+           ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE, 0);
+       if (error)
+               goto out_iput;
+
+       orangefs_init_iops(inode);
+
+       inode->i_mode = mode;
+       inode->i_uid = current_fsuid();
+       inode->i_gid = current_fsgid();
+       inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+       inode->i_size = PAGE_CACHE_SIZE;
+       inode->i_rdev = dev;
+
+       error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref);
+       if (error < 0)
+               goto out_iput;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "Initializing ACL's for inode %pU\n",
+                    get_khandle_from_ino(inode));
+       orangefs_init_acl(inode, dir);
+       return inode;
+
+out_iput:
+       iput(inode);
+       return ERR_PTR(error);
+}
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
new file mode 100644 (file)
index 0000000..8fc55c6
--- /dev/null
@@ -0,0 +1,447 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Linux VFS namei operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+
+/*
+ * Get a newly allocated inode to go with a negative dentry.
+ */
+static int orangefs_create(struct inode *dir,
+                       struct dentry *dentry,
+                       umode_t mode,
+                       bool exclusive)
+{
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       struct inode *inode;
+       int ret;
+
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s: called\n", __func__);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_CREATE);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.create.parent_refn = parent->refn;
+
+       fill_default_sys_attrs(new_op->upcall.req.create.attributes,
+                              ORANGEFS_TYPE_METAFILE, mode);
+
+       strncpy(new_op->upcall.req.create.d_name,
+               dentry->d_name.name, ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Create Got ORANGEFS handle %pU on fsid %d (ret=%d)\n",
+                    &new_op->downcall.resp.create.refn.khandle,
+                    new_op->downcall.resp.create.refn.fs_id, ret);
+
+       if (ret < 0) {
+               gossip_debug(GOSSIP_NAME_DEBUG,
+                            "%s: failed with error code %d\n",
+                            __func__, ret);
+               goto out;
+       }
+
+       inode = orangefs_new_inode(dir->i_sb, dir, S_IFREG | mode, 0,
+                               &new_op->downcall.resp.create.refn);
+       if (IS_ERR(inode)) {
+               gossip_err("*** Failed to allocate orangefs file inode\n");
+               ret = PTR_ERR(inode);
+               goto out;
+       }
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Assigned file inode new number of %pU\n",
+                    get_khandle_from_ino(inode));
+
+       d_instantiate(dentry, inode);
+       unlock_new_inode(inode);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Inode (Regular File) %pU -> %s\n",
+                    get_khandle_from_ino(inode),
+                    dentry->d_name.name);
+
+       SetMtimeFlag(parent);
+       dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+       mark_inode_dirty_sync(dir);
+       ret = 0;
+out:
+       op_release(new_op);
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s: returning %d\n", __func__, ret);
+       return ret;
+}
+
+/*
+ * Attempt to resolve an object name (dentry->d_name), parent handle, and
+ * fsid into a handle for the object.
+ */
+static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
+                                  unsigned int flags)
+{
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       struct inode *inode;
+       struct dentry *res;
+       int ret = -EINVAL;
+
+       /*
+        * in theory we could skip a lookup here (if the intent is to
+        * create) in order to avoid a potentially failed lookup, but
+        * leaving it in can skip a valid lookup and try to create a file
+        * that already exists (e.g. the vfs already handles checking for
+        * -EEXIST on O_EXCL opens, which is broken if we skip this lookup
+        * in the create path)
+        */
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s called on %s\n",
+                    __func__, dentry->d_name.name);
+
+       if (dentry->d_name.len > (ORANGEFS_NAME_LEN - 1))
+               return ERR_PTR(-ENAMETOOLONG);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_LOOKUP);
+       if (!new_op)
+               return ERR_PTR(-ENOMEM);
+
+       new_op->upcall.req.lookup.sym_follow = ORANGEFS_LOOKUP_LINK_NO_FOLLOW;
+
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s:%s:%d using parent %pU\n",
+                    __FILE__,
+                    __func__,
+                    __LINE__,
+                    &parent->refn.khandle);
+       new_op->upcall.req.lookup.parent_refn = parent->refn;
+
+       strncpy(new_op->upcall.req.lookup.d_name, dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "%s: doing lookup on %s under %pU,%d (follow=%s)\n",
+                    __func__,
+                    new_op->upcall.req.lookup.d_name,
+                    &new_op->upcall.req.lookup.parent_refn.khandle,
+                    new_op->upcall.req.lookup.parent_refn.fs_id,
+                    ((new_op->upcall.req.lookup.sym_follow ==
+                      ORANGEFS_LOOKUP_LINK_FOLLOW) ? "yes" : "no"));
+
+       ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Lookup Got %pU, fsid %d (ret=%d)\n",
+                    &new_op->downcall.resp.lookup.refn.khandle,
+                    new_op->downcall.resp.lookup.refn.fs_id,
+                    ret);
+
+       if (ret < 0) {
+               if (ret == -ENOENT) {
+                       /*
+                        * if no inode was found, add a negative dentry to
+                        * dcache anyway; if we don't, we don't hold expected
+                        * lookup semantics and we most noticeably break
+                        * during directory renames.
+                        *
+                        * however, if the operation failed or exited, do not
+                        * add the dentry (e.g. in the case that a touch is
+                        * issued on a file that already exists that was
+                        * interrupted during this lookup -- no need to add
+                        * another negative dentry for an existing file)
+                        */
+
+                       gossip_debug(GOSSIP_NAME_DEBUG,
+                                    "orangefs_lookup: Adding *negative* dentry "
+                                    "%p for %s\n",
+                                    dentry,
+                                    dentry->d_name.name);
+
+                       d_add(dentry, NULL);
+                       res = NULL;
+                       goto out;
+               }
+
+               /* must be a non-recoverable error */
+               res = ERR_PTR(ret);
+               goto out;
+       }
+
+       inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
+       if (IS_ERR(inode)) {
+               gossip_debug(GOSSIP_NAME_DEBUG,
+                       "error %ld from iget\n", PTR_ERR(inode));
+               res = ERR_CAST(inode);
+               goto out;
+       }
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "%s:%s:%d "
+                    "Found good inode [%lu] with count [%d]\n",
+                    __FILE__,
+                    __func__,
+                    __LINE__,
+                    inode->i_ino,
+                    (int)atomic_read(&inode->i_count));
+
+       /* update dentry/inode pair into dcache */
+       res = d_splice_alias(inode, dentry);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Lookup success (inode ct = %d)\n",
+                    (int)atomic_read(&inode->i_count));
+out:
+       op_release(new_op);
+       return res;
+}
+
+/* return 0 on success; non-zero otherwise */
+static int orangefs_unlink(struct inode *dir, struct dentry *dentry)
+{
+       struct inode *inode = dentry->d_inode;
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       int ret;
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "%s: called on %s\n"
+                    "  (inode %pU): Parent is %pU | fs_id %d\n",
+                    __func__,
+                    dentry->d_name.name,
+                    get_khandle_from_ino(inode),
+                    &parent->refn.khandle,
+                    parent->refn.fs_id);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_REMOVE);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.remove.parent_refn = parent->refn;
+       strncpy(new_op->upcall.req.remove.d_name, dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op, "orangefs_unlink",
+                               get_interruptible_flag(inode));
+
+       /* when request is serviced properly, free req op struct */
+       op_release(new_op);
+
+       if (!ret) {
+               drop_nlink(inode);
+
+               SetMtimeFlag(parent);
+               dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+               mark_inode_dirty_sync(dir);
+       }
+       return ret;
+}
+
+static int orangefs_symlink(struct inode *dir,
+                        struct dentry *dentry,
+                        const char *symname)
+{
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       struct inode *inode;
+       int mode = 755;
+       int ret;
+
+       gossip_debug(GOSSIP_NAME_DEBUG, "%s: called\n", __func__);
+
+       if (!symname)
+               return -EINVAL;
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_SYMLINK);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.sym.parent_refn = parent->refn;
+
+       fill_default_sys_attrs(new_op->upcall.req.sym.attributes,
+                              ORANGEFS_TYPE_SYMLINK,
+                              mode);
+
+       strncpy(new_op->upcall.req.sym.entry_name,
+               dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+       strncpy(new_op->upcall.req.sym.target, symname, ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Symlink Got ORANGEFS handle %pU on fsid %d (ret=%d)\n",
+                    &new_op->downcall.resp.sym.refn.khandle,
+                    new_op->downcall.resp.sym.refn.fs_id, ret);
+
+       if (ret < 0) {
+               gossip_debug(GOSSIP_NAME_DEBUG,
+                           "%s: failed with error code %d\n",
+                           __func__, ret);
+               goto out;
+       }
+
+       inode = orangefs_new_inode(dir->i_sb, dir, S_IFLNK | mode, 0,
+                               &new_op->downcall.resp.sym.refn);
+       if (IS_ERR(inode)) {
+               gossip_err
+                   ("*** Failed to allocate orangefs symlink inode\n");
+               ret = PTR_ERR(inode);
+               goto out;
+       }
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Assigned symlink inode new number of %pU\n",
+                    get_khandle_from_ino(inode));
+
+       d_instantiate(dentry, inode);
+       unlock_new_inode(inode);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Inode (Symlink) %pU -> %s\n",
+                    get_khandle_from_ino(inode),
+                    dentry->d_name.name);
+
+       SetMtimeFlag(parent);
+       dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+       mark_inode_dirty_sync(dir);
+       ret = 0;
+out:
+       op_release(new_op);
+       return ret;
+}
+
+static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+       struct orangefs_inode_s *parent = ORANGEFS_I(dir);
+       struct orangefs_kernel_op_s *new_op;
+       struct inode *inode;
+       int ret;
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_MKDIR);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.mkdir.parent_refn = parent->refn;
+
+       fill_default_sys_attrs(new_op->upcall.req.mkdir.attributes,
+                             ORANGEFS_TYPE_DIRECTORY, mode);
+
+       strncpy(new_op->upcall.req.mkdir.d_name,
+               dentry->d_name.name, ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op, __func__, get_interruptible_flag(dir));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Mkdir Got ORANGEFS handle %pU on fsid %d\n",
+                    &new_op->downcall.resp.mkdir.refn.khandle,
+                    new_op->downcall.resp.mkdir.refn.fs_id);
+
+       if (ret < 0) {
+               gossip_debug(GOSSIP_NAME_DEBUG,
+                            "%s: failed with error code %d\n",
+                            __func__, ret);
+               goto out;
+       }
+
+       inode = orangefs_new_inode(dir->i_sb, dir, S_IFDIR | mode, 0,
+                               &new_op->downcall.resp.mkdir.refn);
+       if (IS_ERR(inode)) {
+               gossip_err("*** Failed to allocate orangefs dir inode\n");
+               ret = PTR_ERR(inode);
+               goto out;
+       }
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Assigned dir inode new number of %pU\n",
+                    get_khandle_from_ino(inode));
+
+       d_instantiate(dentry, inode);
+       unlock_new_inode(inode);
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "Inode (Directory) %pU -> %s\n",
+                    get_khandle_from_ino(inode),
+                    dentry->d_name.name);
+
+       /*
+        * NOTE: we have no good way to keep nlink consistent for directories
+        * across clients; keep constant at 1.
+        */
+       SetMtimeFlag(parent);
+       dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
+       mark_inode_dirty_sync(dir);
+out:
+       op_release(new_op);
+       return ret;
+}
+
+static int orangefs_rename(struct inode *old_dir,
+                       struct dentry *old_dentry,
+                       struct inode *new_dir,
+                       struct dentry *new_dentry)
+{
+       struct orangefs_kernel_op_s *new_op;
+       int ret;
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "orangefs_rename: called (%s/%s => %s/%s) ct=%d\n",
+                    old_dentry->d_parent->d_name.name,
+                    old_dentry->d_name.name,
+                    new_dentry->d_parent->d_name.name,
+                    new_dentry->d_name.name,
+                    d_count(new_dentry));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_RENAME);
+       if (!new_op)
+               return -EINVAL;
+
+       new_op->upcall.req.rename.old_parent_refn = ORANGEFS_I(old_dir)->refn;
+       new_op->upcall.req.rename.new_parent_refn = ORANGEFS_I(new_dir)->refn;
+
+       strncpy(new_op->upcall.req.rename.d_old_name,
+               old_dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+       strncpy(new_op->upcall.req.rename.d_new_name,
+               new_dentry->d_name.name,
+               ORANGEFS_NAME_LEN);
+
+       ret = service_operation(new_op,
+                               "orangefs_rename",
+                               get_interruptible_flag(old_dentry->d_inode));
+
+       gossip_debug(GOSSIP_NAME_DEBUG,
+                    "orangefs_rename: got downcall status %d\n",
+                    ret);
+
+       if (new_dentry->d_inode)
+               new_dentry->d_inode->i_ctime = CURRENT_TIME;
+
+       op_release(new_op);
+       return ret;
+}
+
+/* ORANGEFS implementation of VFS inode operations for directories */
+struct inode_operations orangefs_dir_inode_operations = {
+       .lookup = orangefs_lookup,
+       .get_acl = orangefs_get_acl,
+       .set_acl = orangefs_set_acl,
+       .create = orangefs_create,
+       .unlink = orangefs_unlink,
+       .symlink = orangefs_symlink,
+       .mkdir = orangefs_mkdir,
+       .rmdir = orangefs_unlink,
+       .rename = orangefs_rename,
+       .setattr = orangefs_setattr,
+       .getattr = orangefs_getattr,
+       .setxattr = generic_setxattr,
+       .getxattr = generic_getxattr,
+       .removexattr = generic_removexattr,
+       .listxattr = orangefs_listxattr,
+       .permission = orangefs_permission,
+};
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
new file mode 100644 (file)
index 0000000..c60019d
--- /dev/null
@@ -0,0 +1,577 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+DECLARE_WAIT_QUEUE_HEAD(orangefs_bufmap_init_waitq);
+
+/* used to describe mapped buffers */
+struct orangefs_bufmap_desc {
+       void *uaddr;                    /* user space address pointer */
+       struct page **page_array;       /* array of mapped pages */
+       int array_count;                /* size of above arrays */
+       struct list_head list_link;
+};
+
+static struct orangefs_bufmap {
+       atomic_t refcnt;
+
+       int desc_size;
+       int desc_shift;
+       int desc_count;
+       int total_size;
+       int page_count;
+
+       struct page **page_array;
+       struct orangefs_bufmap_desc *desc_array;
+
+       /* array to track usage of buffer descriptors */
+       int *buffer_index_array;
+       spinlock_t buffer_index_lock;
+
+       /* array to track usage of buffer descriptors for readdir */
+       int readdir_index_array[ORANGEFS_READDIR_DEFAULT_DESC_COUNT];
+       spinlock_t readdir_index_lock;
+} *__orangefs_bufmap;
+
+static DEFINE_SPINLOCK(orangefs_bufmap_lock);
+
+static void
+orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap)
+{
+       int i;
+
+       for (i = 0; i < bufmap->page_count; i++)
+               page_cache_release(bufmap->page_array[i]);
+}
+
+static void
+orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
+{
+       kfree(bufmap->page_array);
+       kfree(bufmap->desc_array);
+       kfree(bufmap->buffer_index_array);
+       kfree(bufmap);
+}
+
+static struct orangefs_bufmap *orangefs_bufmap_ref(void)
+{
+       struct orangefs_bufmap *bufmap = NULL;
+
+       spin_lock(&orangefs_bufmap_lock);
+       if (__orangefs_bufmap) {
+               bufmap = __orangefs_bufmap;
+               atomic_inc(&bufmap->refcnt);
+       }
+       spin_unlock(&orangefs_bufmap_lock);
+       return bufmap;
+}
+
+static void orangefs_bufmap_unref(struct orangefs_bufmap *bufmap)
+{
+       if (atomic_dec_and_lock(&bufmap->refcnt, &orangefs_bufmap_lock)) {
+               __orangefs_bufmap = NULL;
+               spin_unlock(&orangefs_bufmap_lock);
+
+               orangefs_bufmap_unmap(bufmap);
+               orangefs_bufmap_free(bufmap);
+       }
+}
+
+/*
+ * XXX: Can the size and shift change while the caller gives up the 
+ * XXX: lock between calling this and doing something useful?
+ */
+
+int orangefs_bufmap_size_query(void)
+{
+       struct orangefs_bufmap *bufmap;
+       int size = 0;
+       bufmap = orangefs_bufmap_ref();
+       if (bufmap) {
+               size = bufmap->desc_size;
+               orangefs_bufmap_unref(bufmap);
+       }
+       return size;
+}
+
+int orangefs_bufmap_shift_query(void)
+{
+       struct orangefs_bufmap *bufmap;
+       int shift = 0;
+       bufmap = orangefs_bufmap_ref();
+       if (bufmap) {
+               shift = bufmap->desc_shift;
+               orangefs_bufmap_unref(bufmap);
+       }
+       return shift;
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(bufmap_waitq);
+static DECLARE_WAIT_QUEUE_HEAD(readdir_waitq);
+
+/*
+ * orangefs_get_bufmap_init
+ *
+ * If bufmap_init is 1, then the shared memory system, including the
+ * buffer_index_array, is available.  Otherwise, it is not.
+ *
+ * returns the value of bufmap_init
+ */
+int orangefs_get_bufmap_init(void)
+{
+       return __orangefs_bufmap ? 1 : 0;
+}
+
+
+static struct orangefs_bufmap *
+orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
+{
+       struct orangefs_bufmap *bufmap;
+
+       bufmap = kzalloc(sizeof(*bufmap), GFP_KERNEL);
+       if (!bufmap)
+               goto out;
+
+       atomic_set(&bufmap->refcnt, 1);
+       bufmap->total_size = user_desc->total_size;
+       bufmap->desc_count = user_desc->count;
+       bufmap->desc_size = user_desc->size;
+       bufmap->desc_shift = ilog2(bufmap->desc_size);
+
+       spin_lock_init(&bufmap->buffer_index_lock);
+       bufmap->buffer_index_array =
+               kcalloc(bufmap->desc_count, sizeof(int), GFP_KERNEL);
+       if (!bufmap->buffer_index_array) {
+               gossip_err("orangefs: could not allocate %d buffer indices\n",
+                               bufmap->desc_count);
+               goto out_free_bufmap;
+       }
+       spin_lock_init(&bufmap->readdir_index_lock);
+
+       bufmap->desc_array =
+               kcalloc(bufmap->desc_count, sizeof(struct orangefs_bufmap_desc),
+                       GFP_KERNEL);
+       if (!bufmap->desc_array) {
+               gossip_err("orangefs: could not allocate %d descriptors\n",
+                               bufmap->desc_count);
+               goto out_free_index_array;
+       }
+
+       bufmap->page_count = bufmap->total_size / PAGE_SIZE;
+
+       /* allocate storage to track our page mappings */
+       bufmap->page_array =
+               kcalloc(bufmap->page_count, sizeof(struct page *), GFP_KERNEL);
+       if (!bufmap->page_array)
+               goto out_free_desc_array;
+
+       return bufmap;
+
+out_free_desc_array:
+       kfree(bufmap->desc_array);
+out_free_index_array:
+       kfree(bufmap->buffer_index_array);
+out_free_bufmap:
+       kfree(bufmap);
+out:
+       return NULL;
+}
+
+static int
+orangefs_bufmap_map(struct orangefs_bufmap *bufmap,
+               struct ORANGEFS_dev_map_desc *user_desc)
+{
+       int pages_per_desc = bufmap->desc_size / PAGE_SIZE;
+       int offset = 0, ret, i;
+
+       /* map the pages */
+       ret = get_user_pages_fast((unsigned long)user_desc->ptr,
+                            bufmap->page_count, 1, bufmap->page_array);
+
+       if (ret < 0)
+               return ret;
+
+       if (ret != bufmap->page_count) {
+               gossip_err("orangefs error: asked for %d pages, only got %d.\n",
+                               bufmap->page_count, ret);
+
+               for (i = 0; i < ret; i++) {
+                       SetPageError(bufmap->page_array[i]);
+                       page_cache_release(bufmap->page_array[i]);
+               }
+               return -ENOMEM;
+       }
+
+       /*
+        * ideally we want to get kernel space pointers for each page, but
+        * we can't kmap that many pages at once if highmem is being used.
+        * so instead, we just kmap/kunmap the page address each time the
+        * kaddr is needed.
+        */
+       for (i = 0; i < bufmap->page_count; i++)
+               flush_dcache_page(bufmap->page_array[i]);
+
+       /* build a list of available descriptors */
+       for (offset = 0, i = 0; i < bufmap->desc_count; i++) {
+               bufmap->desc_array[i].page_array = &bufmap->page_array[offset];
+               bufmap->desc_array[i].array_count = pages_per_desc;
+               bufmap->desc_array[i].uaddr =
+                   (user_desc->ptr + (i * pages_per_desc * PAGE_SIZE));
+               offset += pages_per_desc;
+       }
+
+       return 0;
+}
+
+/*
+ * orangefs_bufmap_initialize()
+ *
+ * initializes the mapped buffer interface
+ *
+ * returns 0 on success, -errno on failure
+ */
+int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc)
+{
+       struct orangefs_bufmap *bufmap;
+       int ret = -EINVAL;
+
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "orangefs_bufmap_initialize: called (ptr ("
+                    "%p) sz (%d) cnt(%d).\n",
+                    user_desc->ptr,
+                    user_desc->size,
+                    user_desc->count);
+
+       /*
+        * sanity check alignment and size of buffer that caller wants to
+        * work with
+        */
+       if (PAGE_ALIGN((unsigned long)user_desc->ptr) !=
+           (unsigned long)user_desc->ptr) {
+               gossip_err("orangefs error: memory alignment (front). %p\n",
+                          user_desc->ptr);
+               goto out;
+       }
+
+       if (PAGE_ALIGN(((unsigned long)user_desc->ptr + user_desc->total_size))
+           != (unsigned long)(user_desc->ptr + user_desc->total_size)) {
+               gossip_err("orangefs error: memory alignment (back).(%p + %d)\n",
+                          user_desc->ptr,
+                          user_desc->total_size);
+               goto out;
+       }
+
+       if (user_desc->total_size != (user_desc->size * user_desc->count)) {
+               gossip_err("orangefs error: user provided an oddly sized buffer: (%d, %d, %d)\n",
+                          user_desc->total_size,
+                          user_desc->size,
+                          user_desc->count);
+               goto out;
+       }
+
+       if ((user_desc->size % PAGE_SIZE) != 0) {
+               gossip_err("orangefs error: bufmap size not page size divisible (%d).\n",
+                          user_desc->size);
+               goto out;
+       }
+
+       ret = -ENOMEM;
+       bufmap = orangefs_bufmap_alloc(user_desc);
+       if (!bufmap)
+               goto out;
+
+       ret = orangefs_bufmap_map(bufmap, user_desc);
+       if (ret)
+               goto out_free_bufmap;
+
+
+       spin_lock(&orangefs_bufmap_lock);
+       if (__orangefs_bufmap) {
+               spin_unlock(&orangefs_bufmap_lock);
+               gossip_err("orangefs: error: bufmap already initialized.\n");
+               ret = -EALREADY;
+               goto out_unmap_bufmap;
+       }
+       __orangefs_bufmap = bufmap;
+       spin_unlock(&orangefs_bufmap_lock);
+
+       /*
+        * If there are operations in orangefs_bufmap_init_waitq, wake them up.
+        * This scenario occurs when the client-core is restarted and I/O
+        * requests in the in-progress or waiting tables are restarted.  I/O
+        * requests cannot be restarted until the shared memory system is
+        * completely re-initialized, so we put the I/O requests in this
+        * waitq until initialization has completed.  NOTE:  the I/O requests
+        * are also on a timer, so they don't wait forever just in case the
+        * client-core doesn't come back up.
+        */
+       wake_up_interruptible(&orangefs_bufmap_init_waitq);
+
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "orangefs_bufmap_initialize: exiting normally\n");
+       return 0;
+
+out_unmap_bufmap:
+       orangefs_bufmap_unmap(bufmap);
+out_free_bufmap:
+       orangefs_bufmap_free(bufmap);
+out:
+       return ret;
+}
+
+/*
+ * orangefs_bufmap_finalize()
+ *
+ * shuts down the mapped buffer interface and releases any resources
+ * associated with it
+ *
+ * no return value
+ */
+void orangefs_bufmap_finalize(void)
+{
+       gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs_bufmap_finalize: called\n");
+       BUG_ON(!__orangefs_bufmap);
+       orangefs_bufmap_unref(__orangefs_bufmap);
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "orangefs_bufmap_finalize: exiting normally\n");
+}
+
+struct slot_args {
+       int slot_count;
+       int *slot_array;
+       spinlock_t *slot_lock;
+       wait_queue_head_t *slot_wq;
+};
+
+static int wait_for_a_slot(struct slot_args *slargs, int *buffer_index)
+{
+       int ret = -1;
+       int i = 0;
+       DEFINE_WAIT(wait_entry);
+
+       while (1) {
+               /*
+                * check for available desc, slot_lock is the appropriate
+                * index_lock
+                */
+               spin_lock(slargs->slot_lock);
+               prepare_to_wait_exclusive(slargs->slot_wq,
+                                         &wait_entry,
+                                         TASK_INTERRUPTIBLE);
+               for (i = 0; i < slargs->slot_count; i++)
+                       if (slargs->slot_array[i] == 0) {
+                               slargs->slot_array[i] = 1;
+                               *buffer_index = i;
+                               ret = 0;
+                               break;
+                       }
+               spin_unlock(slargs->slot_lock);
+
+               /* if we acquired a buffer, then break out of while */
+               if (ret == 0)
+                       break;
+
+               if (!signal_pending(current)) {
+                       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                                    "[BUFMAP]: waiting %d "
+                                    "seconds for a slot\n",
+                                    slot_timeout_secs);
+                       if (!schedule_timeout(slot_timeout_secs * HZ)) {
+                               gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                                            "*** wait_for_a_slot timed out\n");
+                               ret = -ETIMEDOUT;
+                               break;
+                       }
+                       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                         "[BUFMAP]: woken up by a slot becoming available.\n");
+                       continue;
+               }
+
+               gossip_debug(GOSSIP_BUFMAP_DEBUG, "orangefs: %s interrupted.\n",
+                            __func__);
+               ret = -EINTR;
+               break;
+       }
+
+       spin_lock(slargs->slot_lock);
+       finish_wait(slargs->slot_wq, &wait_entry);
+       spin_unlock(slargs->slot_lock);
+       return ret;
+}
+
+static void put_back_slot(struct slot_args *slargs, int buffer_index)
+{
+       /* slot_lock is the appropriate index_lock */
+       spin_lock(slargs->slot_lock);
+       if (buffer_index < 0 || buffer_index >= slargs->slot_count) {
+               spin_unlock(slargs->slot_lock);
+               return;
+       }
+
+       /* put the desc back on the queue */
+       slargs->slot_array[buffer_index] = 0;
+       spin_unlock(slargs->slot_lock);
+
+       /* wake up anyone who may be sleeping on the queue */
+       wake_up_interruptible(slargs->slot_wq);
+}
+
+/*
+ * orangefs_bufmap_get()
+ *
+ * gets a free mapped buffer descriptor, will sleep until one becomes
+ * available if necessary
+ *
+ * returns 0 on success, -errno on failure
+ */
+int orangefs_bufmap_get(struct orangefs_bufmap **mapp, int *buffer_index)
+{
+       struct orangefs_bufmap *bufmap = orangefs_bufmap_ref();
+       struct slot_args slargs;
+       int ret;
+
+       if (!bufmap) {
+               gossip_err("orangefs: please confirm that pvfs2-client daemon is running.\n");
+               return -EIO;
+       }
+
+       slargs.slot_count = bufmap->desc_count;
+       slargs.slot_array = bufmap->buffer_index_array;
+       slargs.slot_lock = &bufmap->buffer_index_lock;
+       slargs.slot_wq = &bufmap_waitq;
+       ret = wait_for_a_slot(&slargs, buffer_index);
+       if (ret)
+               orangefs_bufmap_unref(bufmap);
+       *mapp = bufmap;
+       return ret;
+}
+
+/*
+ * orangefs_bufmap_put()
+ *
+ * returns a mapped buffer descriptor to the collection
+ *
+ * no return value
+ */
+void orangefs_bufmap_put(struct orangefs_bufmap *bufmap, int buffer_index)
+{
+       struct slot_args slargs;
+
+       slargs.slot_count = bufmap->desc_count;
+       slargs.slot_array = bufmap->buffer_index_array;
+       slargs.slot_lock = &bufmap->buffer_index_lock;
+       slargs.slot_wq = &bufmap_waitq;
+       put_back_slot(&slargs, buffer_index);
+       orangefs_bufmap_unref(bufmap);
+}
+
+/*
+ * orangefs_readdir_index_get()
+ *
+ * gets a free descriptor, will sleep until one becomes
+ * available if necessary.
+ * Although the readdir buffers are not mapped into kernel space
+ * we could do that at a later point of time. Regardless, these
+ * indices are used by the client-core.
+ *
+ * returns 0 on success, -errno on failure
+ */
+int orangefs_readdir_index_get(struct orangefs_bufmap **mapp, int *buffer_index)
+{
+       struct orangefs_bufmap *bufmap = orangefs_bufmap_ref();
+       struct slot_args slargs;
+       int ret;
+
+       if (!bufmap) {
+               gossip_err("orangefs: please confirm that pvfs2-client daemon is running.\n");
+               return -EIO;
+       }
+
+       slargs.slot_count = ORANGEFS_READDIR_DEFAULT_DESC_COUNT;
+       slargs.slot_array = bufmap->readdir_index_array;
+       slargs.slot_lock = &bufmap->readdir_index_lock;
+       slargs.slot_wq = &readdir_waitq;
+       ret = wait_for_a_slot(&slargs, buffer_index);
+       if (ret)
+               orangefs_bufmap_unref(bufmap);
+       *mapp = bufmap;
+       return ret;
+}
+
+void orangefs_readdir_index_put(struct orangefs_bufmap *bufmap, int buffer_index)
+{
+       struct slot_args slargs;
+
+       slargs.slot_count = ORANGEFS_READDIR_DEFAULT_DESC_COUNT;
+       slargs.slot_array = bufmap->readdir_index_array;
+       slargs.slot_lock = &bufmap->readdir_index_lock;
+       slargs.slot_wq = &readdir_waitq;
+       put_back_slot(&slargs, buffer_index);
+       orangefs_bufmap_unref(bufmap);
+}
+
+/*
+ * we've been handed an iovec, we need to copy it to 
+ * the shared memory descriptor at "buffer_index".
+ */
+int orangefs_bufmap_copy_from_iovec(struct orangefs_bufmap *bufmap,
+                               struct iov_iter *iter,
+                               int buffer_index,
+                               size_t size)
+{
+       struct orangefs_bufmap_desc *to = &bufmap->desc_array[buffer_index];
+       int i;
+
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "%s: buffer_index:%d: size:%zu:\n",
+                    __func__, buffer_index, size);
+
+
+       for (i = 0; size; i++) {
+               struct page *page = to->page_array[i];
+               size_t n = size;
+               if (n > PAGE_SIZE)
+                       n = PAGE_SIZE;
+               n = copy_page_from_iter(page, 0, n, iter);
+               if (!n)
+                       return -EFAULT;
+               size -= n;
+       }
+       return 0;
+
+}
+
+/*
+ * we've been handed an iovec, we need to fill it from
+ * the shared memory descriptor at "buffer_index".
+ */
+int orangefs_bufmap_copy_to_iovec(struct orangefs_bufmap *bufmap,
+                                   struct iov_iter *iter,
+                                   int buffer_index,
+                                   size_t size)
+{
+       struct orangefs_bufmap_desc *from = &bufmap->desc_array[buffer_index];
+       int i;
+
+       gossip_debug(GOSSIP_BUFMAP_DEBUG,
+                    "%s: buffer_index:%d: size:%zu:\n",
+                    __func__, buffer_index, size);
+
+
+       for (i = 0; size; i++) {
+               struct page *page = from->page_array[i];
+               size_t n = size;
+               if (n > PAGE_SIZE)
+                       n = PAGE_SIZE;
+               n = copy_page_to_iter(page, 0, n, iter);
+               if (!n)
+                       return -EFAULT;
+               size -= n;
+       }
+       return 0;
+}
diff --git a/fs/orangefs/orangefs-bufmap.h b/fs/orangefs/orangefs-bufmap.h
new file mode 100644 (file)
index 0000000..dff55e2
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#ifndef __ORANGEFS_BUFMAP_H
+#define __ORANGEFS_BUFMAP_H
+
+struct orangefs_bufmap;
+
+int orangefs_bufmap_size_query(void);
+
+int orangefs_bufmap_shift_query(void);
+
+int orangefs_bufmap_initialize(struct ORANGEFS_dev_map_desc *user_desc);
+
+int orangefs_get_bufmap_init(void);
+
+void orangefs_bufmap_finalize(void);
+
+int orangefs_bufmap_get(struct orangefs_bufmap **mapp, int *buffer_index);
+
+void orangefs_bufmap_put(struct orangefs_bufmap *bufmap, int buffer_index);
+
+int orangefs_readdir_index_get(struct orangefs_bufmap **mapp, int *buffer_index);
+
+void orangefs_readdir_index_put(struct orangefs_bufmap *bufmap, int buffer_index);
+
+int orangefs_bufmap_copy_from_iovec(struct orangefs_bufmap *bufmap,
+                               struct iov_iter *iter,
+                               int buffer_index,
+                               size_t size);
+
+int orangefs_bufmap_copy_to_iovec(struct orangefs_bufmap *bufmap,
+                             struct iov_iter *iter,
+                             int buffer_index,
+                             size_t size);
+
+size_t orangefs_bufmap_copy_to_user_task_iovec(struct task_struct *tsk,
+                                          struct iovec *iovec,
+                                          unsigned long nr_segs,
+                                          struct orangefs_bufmap *bufmap,
+                                          int buffer_index,
+                                          size_t bytes_to_be_copied);
+
+#endif /* __ORANGEFS_BUFMAP_H */
diff --git a/fs/orangefs/orangefs-cache.c b/fs/orangefs/orangefs-cache.c
new file mode 100644 (file)
index 0000000..3b3de91
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+
+/* tags assigned to kernel upcall operations */
+static __u64 next_tag_value;
+static DEFINE_SPINLOCK(next_tag_value_lock);
+
+/* the orangefs memory caches */
+
+/* a cache for orangefs upcall/downcall operations */
+static struct kmem_cache *op_cache;
+
+int op_cache_initialize(void)
+{
+       op_cache = kmem_cache_create("orangefs_op_cache",
+                                    sizeof(struct orangefs_kernel_op_s),
+                                    0,
+                                    ORANGEFS_CACHE_CREATE_FLAGS,
+                                    NULL);
+
+       if (!op_cache) {
+               gossip_err("Cannot create orangefs_op_cache\n");
+               return -ENOMEM;
+       }
+
+       /* initialize our atomic tag counter */
+       spin_lock(&next_tag_value_lock);
+       next_tag_value = 100;
+       spin_unlock(&next_tag_value_lock);
+       return 0;
+}
+
+int op_cache_finalize(void)
+{
+       kmem_cache_destroy(op_cache);
+       return 0;
+}
+
+char *get_opname_string(struct orangefs_kernel_op_s *new_op)
+{
+       if (new_op) {
+               __s32 type = new_op->upcall.type;
+
+               if (type == ORANGEFS_VFS_OP_FILE_IO)
+                       return "OP_FILE_IO";
+               else if (type == ORANGEFS_VFS_OP_LOOKUP)
+                       return "OP_LOOKUP";
+               else if (type == ORANGEFS_VFS_OP_CREATE)
+                       return "OP_CREATE";
+               else if (type == ORANGEFS_VFS_OP_GETATTR)
+                       return "OP_GETATTR";
+               else if (type == ORANGEFS_VFS_OP_REMOVE)
+                       return "OP_REMOVE";
+               else if (type == ORANGEFS_VFS_OP_MKDIR)
+                       return "OP_MKDIR";
+               else if (type == ORANGEFS_VFS_OP_READDIR)
+                       return "OP_READDIR";
+               else if (type == ORANGEFS_VFS_OP_READDIRPLUS)
+                       return "OP_READDIRPLUS";
+               else if (type == ORANGEFS_VFS_OP_SETATTR)
+                       return "OP_SETATTR";
+               else if (type == ORANGEFS_VFS_OP_SYMLINK)
+                       return "OP_SYMLINK";
+               else if (type == ORANGEFS_VFS_OP_RENAME)
+                       return "OP_RENAME";
+               else if (type == ORANGEFS_VFS_OP_STATFS)
+                       return "OP_STATFS";
+               else if (type == ORANGEFS_VFS_OP_TRUNCATE)
+                       return "OP_TRUNCATE";
+               else if (type == ORANGEFS_VFS_OP_MMAP_RA_FLUSH)
+                       return "OP_MMAP_RA_FLUSH";
+               else if (type == ORANGEFS_VFS_OP_FS_MOUNT)
+                       return "OP_FS_MOUNT";
+               else if (type == ORANGEFS_VFS_OP_FS_UMOUNT)
+                       return "OP_FS_UMOUNT";
+               else if (type == ORANGEFS_VFS_OP_GETXATTR)
+                       return "OP_GETXATTR";
+               else if (type == ORANGEFS_VFS_OP_SETXATTR)
+                       return "OP_SETXATTR";
+               else if (type == ORANGEFS_VFS_OP_LISTXATTR)
+                       return "OP_LISTXATTR";
+               else if (type == ORANGEFS_VFS_OP_REMOVEXATTR)
+                       return "OP_REMOVEXATTR";
+               else if (type == ORANGEFS_VFS_OP_PARAM)
+                       return "OP_PARAM";
+               else if (type == ORANGEFS_VFS_OP_PERF_COUNT)
+                       return "OP_PERF_COUNT";
+               else if (type == ORANGEFS_VFS_OP_CANCEL)
+                       return "OP_CANCEL";
+               else if (type == ORANGEFS_VFS_OP_FSYNC)
+                       return "OP_FSYNC";
+               else if (type == ORANGEFS_VFS_OP_FSKEY)
+                       return "OP_FSKEY";
+       }
+       return "OP_UNKNOWN?";
+}
+
+struct orangefs_kernel_op_s *op_alloc(__s32 type)
+{
+       struct orangefs_kernel_op_s *new_op = NULL;
+
+       new_op = kmem_cache_zalloc(op_cache, GFP_KERNEL);
+       if (new_op) {
+               INIT_LIST_HEAD(&new_op->list);
+               spin_lock_init(&new_op->lock);
+               init_waitqueue_head(&new_op->waitq);
+
+               atomic_set(&new_op->ref_count, 1);
+
+               init_completion(&new_op->done);
+
+               new_op->upcall.type = ORANGEFS_VFS_OP_INVALID;
+               new_op->downcall.type = ORANGEFS_VFS_OP_INVALID;
+               new_op->downcall.status = -1;
+
+               new_op->op_state = OP_VFS_STATE_UNKNOWN;
+               new_op->tag = 0;
+
+               /* initialize the op specific tag and upcall credentials */
+               spin_lock(&next_tag_value_lock);
+               new_op->tag = next_tag_value++;
+               if (next_tag_value == 0)
+                       next_tag_value = 100;
+               spin_unlock(&next_tag_value_lock);
+               new_op->upcall.type = type;
+               new_op->attempts = 0;
+               gossip_debug(GOSSIP_CACHE_DEBUG,
+                            "Alloced OP (%p: %llu %s)\n",
+                            new_op,
+                            llu(new_op->tag),
+                            get_opname_string(new_op));
+
+               new_op->upcall.uid = from_kuid(current_user_ns(),
+                                              current_fsuid());
+
+               new_op->upcall.gid = from_kgid(current_user_ns(),
+                                              current_fsgid());
+       } else {
+               gossip_err("op_alloc: kmem_cache_zalloc failed!\n");
+       }
+       return new_op;
+}
+
+void __op_release(struct orangefs_kernel_op_s *orangefs_op)
+{
+       if (orangefs_op) {
+               gossip_debug(GOSSIP_CACHE_DEBUG,
+                            "Releasing OP (%p: %llu)\n",
+                            orangefs_op,
+                            llu(orangefs_op->tag));
+               kmem_cache_free(op_cache, orangefs_op);
+       } else {
+               gossip_err("NULL pointer in op_release\n");
+       }
+}
diff --git a/fs/orangefs/orangefs-debug.h b/fs/orangefs/orangefs-debug.h
new file mode 100644 (file)
index 0000000..387db17
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/* This file just defines debugging masks to be used with the gossip
+ * logging utility.  All debugging masks for ORANGEFS are kept here to make
+ * sure we don't have collisions.
+ */
+
+#ifndef __ORANGEFS_DEBUG_H
+#define __ORANGEFS_DEBUG_H
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+#define        GOSSIP_NO_DEBUG                 (__u64)0
+
+#define GOSSIP_SUPER_DEBUG             ((__u64)1 << 0)
+#define GOSSIP_INODE_DEBUG             ((__u64)1 << 1)
+#define GOSSIP_FILE_DEBUG              ((__u64)1 << 2)
+#define GOSSIP_DIR_DEBUG               ((__u64)1 << 3)
+#define GOSSIP_UTILS_DEBUG             ((__u64)1 << 4)
+#define GOSSIP_WAIT_DEBUG              ((__u64)1 << 5)
+#define GOSSIP_ACL_DEBUG               ((__u64)1 << 6)
+#define GOSSIP_DCACHE_DEBUG            ((__u64)1 << 7)
+#define GOSSIP_DEV_DEBUG               ((__u64)1 << 8)
+#define GOSSIP_NAME_DEBUG              ((__u64)1 << 9)
+#define GOSSIP_BUFMAP_DEBUG            ((__u64)1 << 10)
+#define GOSSIP_CACHE_DEBUG             ((__u64)1 << 11)
+#define GOSSIP_DEBUGFS_DEBUG           ((__u64)1 << 12)
+#define GOSSIP_XATTR_DEBUG             ((__u64)1 << 13)
+#define GOSSIP_INIT_DEBUG              ((__u64)1 << 14)
+#define GOSSIP_SYSFS_DEBUG             ((__u64)1 << 15)
+
+#define GOSSIP_MAX_NR                 16
+#define GOSSIP_MAX_DEBUG              (((__u64)1 << GOSSIP_MAX_NR) - 1)
+
+/*function prototypes*/
+__u64 ORANGEFS_kmod_eventlog_to_mask(const char *event_logging);
+__u64 ORANGEFS_debug_eventlog_to_mask(const char *event_logging);
+char *ORANGEFS_debug_mask_to_eventlog(__u64 mask);
+char *ORANGEFS_kmod_mask_to_eventlog(__u64 mask);
+
+/* a private internal type */
+struct __keyword_mask_s {
+       const char *keyword;
+       __u64 mask_val;
+};
+
+/*
+ * Map all kmod keywords to kmod debug masks here. Keep this
+ * structure "packed":
+ *
+ *   "all" is always last...
+ *
+ *   keyword     mask_val     index
+ *     foo          1           0
+ *     bar          2           1
+ *     baz          4           2
+ *     qux          8           3
+ *      .           .           .
+ */
+static struct __keyword_mask_s s_kmod_keyword_mask_map[] = {
+       {"super", GOSSIP_SUPER_DEBUG},
+       {"inode", GOSSIP_INODE_DEBUG},
+       {"file", GOSSIP_FILE_DEBUG},
+       {"dir", GOSSIP_DIR_DEBUG},
+       {"utils", GOSSIP_UTILS_DEBUG},
+       {"wait", GOSSIP_WAIT_DEBUG},
+       {"acl", GOSSIP_ACL_DEBUG},
+       {"dcache", GOSSIP_DCACHE_DEBUG},
+       {"dev", GOSSIP_DEV_DEBUG},
+       {"name", GOSSIP_NAME_DEBUG},
+       {"bufmap", GOSSIP_BUFMAP_DEBUG},
+       {"cache", GOSSIP_CACHE_DEBUG},
+       {"debugfs", GOSSIP_DEBUGFS_DEBUG},
+       {"xattr", GOSSIP_XATTR_DEBUG},
+       {"init", GOSSIP_INIT_DEBUG},
+       {"sysfs", GOSSIP_SYSFS_DEBUG},
+       {"none", GOSSIP_NO_DEBUG},
+       {"all", GOSSIP_MAX_DEBUG}
+};
+
+static const int num_kmod_keyword_mask_map = (int)
+       (sizeof(s_kmod_keyword_mask_map) / sizeof(struct __keyword_mask_s));
+
+#endif /* __ORANGEFS_DEBUG_H */
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c
new file mode 100644 (file)
index 0000000..9eb7972
--- /dev/null
@@ -0,0 +1,457 @@
+/*
+ * What:               /sys/kernel/debug/orangefs/debug-help
+ * Date:               June 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     List of client and kernel debug keywords.
+ *
+ *
+ * What:               /sys/kernel/debug/orangefs/client-debug
+ * Date:               June 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Debug setting for "the client", the userspace
+ *                     helper for the kernel module.
+ *
+ *
+ * What:               /sys/kernel/debug/orangefs/kernel-debug
+ * Date:               June 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Debug setting for the orangefs kernel module.
+ *
+ *                     Any of the keywords, or comma-separated lists
+ *                     of keywords, from debug-help can be catted to
+ *                     client-debug or kernel-debug.
+ *
+ *                     "none", "all" and "verbose" are special keywords
+ *                     for client-debug. Setting client-debug to "all"
+ *                     is kind of like trying to drink water from a
+ *                     fire hose, "verbose" triggers most of the same
+ *                     output except for the constant flow of output
+ *                     from the main wait loop.
+ *
+ *                     "none" and "all" are similar settings for kernel-debug
+ *                     no need for a "verbose".
+ */
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+
+#include <linux/uaccess.h>
+
+#include "orangefs-debugfs.h"
+#include "protocol.h"
+#include "orangefs-kernel.h"
+
+static int orangefs_debug_disabled = 1;
+
+static int orangefs_debug_help_open(struct inode *, struct file *);
+
+const struct file_operations debug_help_fops = {
+       .open           = orangefs_debug_help_open,
+       .read           = seq_read,
+       .release        = seq_release,
+       .llseek         = seq_lseek,
+};
+
+static void *help_start(struct seq_file *, loff_t *);
+static void *help_next(struct seq_file *, void *, loff_t *);
+static void help_stop(struct seq_file *, void *);
+static int help_show(struct seq_file *, void *);
+
+static const struct seq_operations help_debug_ops = {
+       .start  = help_start,
+       .next   = help_next,
+       .stop   = help_stop,
+       .show   = help_show,
+};
+
+/*
+ * Used to protect data in ORANGEFS_KMOD_DEBUG_FILE and
+ * ORANGEFS_KMOD_DEBUG_FILE.
+ */
+static DEFINE_MUTEX(orangefs_debug_lock);
+
+int orangefs_debug_open(struct inode *, struct file *);
+
+static ssize_t orangefs_debug_read(struct file *,
+                                char __user *,
+                                size_t,
+                                loff_t *);
+
+static ssize_t orangefs_debug_write(struct file *,
+                                 const char __user *,
+                                 size_t,
+                                 loff_t *);
+
+static const struct file_operations kernel_debug_fops = {
+       .open           = orangefs_debug_open,
+       .read           = orangefs_debug_read,
+       .write          = orangefs_debug_write,
+       .llseek         = generic_file_llseek,
+};
+
+/*
+ * initialize kmod debug operations, create orangefs debugfs dir and
+ * ORANGEFS_KMOD_DEBUG_HELP_FILE.
+ */
+int orangefs_debugfs_init(void)
+{
+
+       int rc = -ENOMEM;
+
+       debug_dir = debugfs_create_dir("orangefs", NULL);
+       if (!debug_dir)
+               goto out;
+
+       help_file_dentry = debugfs_create_file(ORANGEFS_KMOD_DEBUG_HELP_FILE,
+                                 0444,
+                                 debug_dir,
+                                 debug_help_string,
+                                 &debug_help_fops);
+       if (!help_file_dentry)
+               goto out;
+
+       orangefs_debug_disabled = 0;
+       rc = 0;
+
+out:
+       if (rc)
+               orangefs_debugfs_cleanup();
+
+       return rc;
+}
+
+void orangefs_debugfs_cleanup(void)
+{
+       debugfs_remove_recursive(debug_dir);
+}
+
+/* open ORANGEFS_KMOD_DEBUG_HELP_FILE */
+static int orangefs_debug_help_open(struct inode *inode, struct file *file)
+{
+       int rc = -ENODEV;
+       int ret;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_help_open: start\n");
+
+       if (orangefs_debug_disabled)
+               goto out;
+
+       ret = seq_open(file, &help_debug_ops);
+       if (ret)
+               goto out;
+
+       ((struct seq_file *)(file->private_data))->private = inode->i_private;
+
+       rc = 0;
+
+out:
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_help_open: rc:%d:\n",
+                    rc);
+       return rc;
+}
+
+/*
+ * I think start always gets called again after stop. Start
+ * needs to return NULL when it is done. The whole "payload"
+ * in this case is a single (long) string, so by the second
+ * time we get to start (pos = 1), we're done.
+ */
+static void *help_start(struct seq_file *m, loff_t *pos)
+{
+       void *payload = NULL;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_start: start\n");
+
+       if (*pos == 0)
+               payload = m->private;
+
+       return payload;
+}
+
+static void *help_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_next: start\n");
+
+       return NULL;
+}
+
+static void help_stop(struct seq_file *m, void *p)
+{
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_stop: start\n");
+}
+
+static int help_show(struct seq_file *m, void *v)
+{
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "help_show: start\n");
+
+       seq_puts(m, v);
+
+       return 0;
+}
+
+/*
+ * initialize the kernel-debug file.
+ */
+int orangefs_kernel_debug_init(void)
+{
+
+       int rc = -ENOMEM;
+       struct dentry *ret;
+       char *k_buffer = NULL;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+
+       k_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+       if (!k_buffer)
+               goto out;
+
+       if (strlen(kernel_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+               strcpy(k_buffer, kernel_debug_string);
+               strcat(k_buffer, "\n");
+       } else {
+               strcpy(k_buffer, "none\n");
+               pr_info("%s: overflow 1!\n", __func__);
+       }
+
+       ret = debugfs_create_file(ORANGEFS_KMOD_DEBUG_FILE,
+                                 0444,
+                                 debug_dir,
+                                 k_buffer,
+                                 &kernel_debug_fops);
+       if (!ret) {
+               pr_info("%s: failed to create %s.\n",
+                       __func__,
+                       ORANGEFS_KMOD_DEBUG_FILE);
+               goto out;
+       }
+
+       rc = 0;
+
+out:
+       if (rc)
+               orangefs_debugfs_cleanup();
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+       return rc;
+}
+
+/*
+ * initialize the client-debug file.
+ */
+int orangefs_client_debug_init(void)
+{
+
+       int rc = -ENOMEM;
+       char *c_buffer = NULL;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: start\n", __func__);
+
+       c_buffer = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+       if (!c_buffer)
+               goto out;
+
+       if (strlen(client_debug_string) + 1 < ORANGEFS_MAX_DEBUG_STRING_LEN) {
+               strcpy(c_buffer, client_debug_string);
+               strcat(c_buffer, "\n");
+       } else {
+               strcpy(c_buffer, "none\n");
+               pr_info("%s: overflow! 2\n", __func__);
+       }
+
+       client_debug_dentry = debugfs_create_file(ORANGEFS_CLIENT_DEBUG_FILE,
+                                                 0444,
+                                                 debug_dir,
+                                                 c_buffer,
+                                                 &kernel_debug_fops);
+       if (!client_debug_dentry) {
+               pr_info("%s: failed to create %s.\n",
+                       __func__,
+                       ORANGEFS_CLIENT_DEBUG_FILE);
+               goto out;
+       }
+
+       rc = 0;
+
+out:
+       if (rc)
+               orangefs_debugfs_cleanup();
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+       return rc;
+}
+
+/* open ORANGEFS_KMOD_DEBUG_FILE or ORANGEFS_CLIENT_DEBUG_FILE.*/
+int orangefs_debug_open(struct inode *inode, struct file *file)
+{
+       int rc = -ENODEV;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "%s: orangefs_debug_disabled: %d\n",
+                    __func__,
+                    orangefs_debug_disabled);
+
+       if (orangefs_debug_disabled)
+               goto out;
+
+       rc = 0;
+       mutex_lock(&orangefs_debug_lock);
+       file->private_data = inode->i_private;
+       mutex_unlock(&orangefs_debug_lock);
+
+out:
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_open: rc: %d\n",
+                    rc);
+       return rc;
+}
+
+static ssize_t orangefs_debug_read(struct file *file,
+                                char __user *ubuf,
+                                size_t count,
+                                loff_t *ppos)
+{
+       char *buf;
+       int sprintf_ret;
+       ssize_t read_ret = -ENOMEM;
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG, "orangefs_debug_read: start\n");
+
+       buf = kmalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+       if (!buf)
+               goto out;
+
+       mutex_lock(&orangefs_debug_lock);
+       sprintf_ret = sprintf(buf, "%s", (char *)file->private_data);
+       mutex_unlock(&orangefs_debug_lock);
+
+       read_ret = simple_read_from_buffer(ubuf, count, ppos, buf, sprintf_ret);
+
+       kfree(buf);
+
+out:
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_read: ret: %zu\n",
+                    read_ret);
+
+       return read_ret;
+}
+
+static ssize_t orangefs_debug_write(struct file *file,
+                                 const char __user *ubuf,
+                                 size_t count,
+                                 loff_t *ppos)
+{
+       char *buf;
+       int rc = -EFAULT;
+       size_t silly = 0;
+       char *debug_string;
+       struct orangefs_kernel_op_s *new_op = NULL;
+       struct client_debug_mask c_mask = { NULL, 0, 0 };
+
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+               "orangefs_debug_write: %s\n",
+               file->f_path.dentry->d_name.name);
+
+       /*
+        * Thwart users who try to jamb a ridiculous number
+        * of bytes into the debug file...
+        */
+       if (count > ORANGEFS_MAX_DEBUG_STRING_LEN + 1) {
+               silly = count;
+               count = ORANGEFS_MAX_DEBUG_STRING_LEN + 1;
+       }
+
+       buf = kzalloc(ORANGEFS_MAX_DEBUG_STRING_LEN, GFP_KERNEL);
+       if (!buf)
+               goto out;
+
+       if (copy_from_user(buf, ubuf, count - 1)) {
+               gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                            "%s: copy_from_user failed!\n",
+                            __func__);
+               goto out;
+       }
+
+       /*
+        * Map the keyword string from userspace into a valid debug mask.
+        * The mapping process involves mapping the human-inputted string
+        * into a valid mask, and then rebuilding the string from the
+        * verified valid mask.
+        *
+        * A service operation is required to set a new client-side
+        * debug mask.
+        */
+       if (!strcmp(file->f_path.dentry->d_name.name,
+                   ORANGEFS_KMOD_DEBUG_FILE)) {
+               debug_string_to_mask(buf, &gossip_debug_mask, 0);
+               debug_mask_to_string(&gossip_debug_mask, 0);
+               debug_string = kernel_debug_string;
+               gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                            "New kernel debug string is %s\n",
+                            kernel_debug_string);
+       } else {
+               /* Can't reset client debug mask if client is not running. */
+               if (is_daemon_in_service()) {
+                       pr_info("%s: Client not running :%d:\n",
+                               __func__,
+                               is_daemon_in_service());
+                       goto out;
+               }
+
+               debug_string_to_mask(buf, &c_mask, 1);
+               debug_mask_to_string(&c_mask, 1);
+               debug_string = client_debug_string;
+
+               new_op = op_alloc(ORANGEFS_VFS_OP_PARAM);
+               if (!new_op) {
+                       pr_info("%s: op_alloc failed!\n", __func__);
+                       goto out;
+               }
+
+               new_op->upcall.req.param.op =
+                       ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES;
+               new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET;
+               memset(new_op->upcall.req.param.s_value,
+                      0,
+                      ORANGEFS_MAX_DEBUG_STRING_LEN);
+               sprintf(new_op->upcall.req.param.s_value,
+                       "%llx %llx\n",
+                       c_mask.mask1,
+                       c_mask.mask2);
+
+               /* service_operation returns 0 on success... */
+               rc = service_operation(new_op,
+                                      "orangefs_param",
+                                       ORANGEFS_OP_INTERRUPTIBLE);
+
+               if (rc)
+                       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                                    "%s: service_operation failed! rc:%d:\n",
+                                    __func__,
+                                    rc);
+
+               op_release(new_op);
+       }
+
+       mutex_lock(&orangefs_debug_lock);
+       memset(file->f_inode->i_private, 0, ORANGEFS_MAX_DEBUG_STRING_LEN);
+       sprintf((char *)file->f_inode->i_private, "%s\n", debug_string);
+       mutex_unlock(&orangefs_debug_lock);
+
+       *ppos += count;
+       if (silly)
+               rc = silly;
+       else
+               rc = count;
+
+out:
+       gossip_debug(GOSSIP_DEBUGFS_DEBUG,
+                    "orangefs_debug_write: rc: %d\n",
+                    rc);
+       kfree(buf);
+       return rc;
+}
diff --git a/fs/orangefs/orangefs-debugfs.h b/fs/orangefs/orangefs-debugfs.h
new file mode 100644 (file)
index 0000000..e4828c0
--- /dev/null
@@ -0,0 +1,3 @@
+int orangefs_debugfs_init(void);
+int orangefs_kernel_debug_init(void);
+void orangefs_debugfs_cleanup(void);
diff --git a/fs/orangefs/orangefs-dev-proto.h b/fs/orangefs/orangefs-dev-proto.h
new file mode 100644 (file)
index 0000000..5a8725a
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#ifndef _ORANGEFS_DEV_PROTO_H
+#define _ORANGEFS_DEV_PROTO_H
+
+/*
+ * types and constants shared between user space and kernel space for
+ * device interaction using a common protocol
+ */
+
+/*
+ * valid orangefs kernel operation types
+ */
+#define ORANGEFS_VFS_OP_INVALID           0xFF000000
+#define ORANGEFS_VFS_OP_FILE_IO        0xFF000001
+#define ORANGEFS_VFS_OP_LOOKUP         0xFF000002
+#define ORANGEFS_VFS_OP_CREATE         0xFF000003
+#define ORANGEFS_VFS_OP_GETATTR        0xFF000004
+#define ORANGEFS_VFS_OP_REMOVE         0xFF000005
+#define ORANGEFS_VFS_OP_MKDIR          0xFF000006
+#define ORANGEFS_VFS_OP_READDIR        0xFF000007
+#define ORANGEFS_VFS_OP_SETATTR        0xFF000008
+#define ORANGEFS_VFS_OP_SYMLINK        0xFF000009
+#define ORANGEFS_VFS_OP_RENAME         0xFF00000A
+#define ORANGEFS_VFS_OP_STATFS         0xFF00000B
+#define ORANGEFS_VFS_OP_TRUNCATE       0xFF00000C
+#define ORANGEFS_VFS_OP_MMAP_RA_FLUSH  0xFF00000D
+#define ORANGEFS_VFS_OP_FS_MOUNT       0xFF00000E
+#define ORANGEFS_VFS_OP_FS_UMOUNT      0xFF00000F
+#define ORANGEFS_VFS_OP_GETXATTR       0xFF000010
+#define ORANGEFS_VFS_OP_SETXATTR          0xFF000011
+#define ORANGEFS_VFS_OP_LISTXATTR         0xFF000012
+#define ORANGEFS_VFS_OP_REMOVEXATTR       0xFF000013
+#define ORANGEFS_VFS_OP_PARAM          0xFF000014
+#define ORANGEFS_VFS_OP_PERF_COUNT     0xFF000015
+#define ORANGEFS_VFS_OP_CANCEL            0xFF00EE00
+#define ORANGEFS_VFS_OP_FSYNC          0xFF00EE01
+#define ORANGEFS_VFS_OP_FSKEY             0xFF00EE02
+#define ORANGEFS_VFS_OP_READDIRPLUS       0xFF00EE03
+
+/*
+ * Misc constants. Please retain them as multiples of 8!
+ * Otherwise 32-64 bit interactions will be messed up :)
+ */
+#define ORANGEFS_NAME_LEN              0x00000100
+#define ORANGEFS_MAX_DEBUG_STRING_LEN  0x00000400
+#define ORANGEFS_MAX_DEBUG_ARRAY_LEN   0x00000800
+
+/*
+ * The maximum number of directory entries in a single request is 96.
+ * XXX: Why can this not be higher. The client-side code can handle up to 512.
+ * XXX: What happens if we expect more than the client can return?
+ */
+#define ORANGEFS_MAX_DIRENT_COUNT_READDIR 96
+
+#include "upcall.h"
+#include "downcall.h"
+
+#endif
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
new file mode 100644 (file)
index 0000000..a8cde90
--- /dev/null
@@ -0,0 +1,689 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  The ORANGEFS Linux kernel support allows ORANGEFS volumes to be mounted and
+ *  accessed through the Linux VFS (i.e. using standard I/O system calls).
+ *  This support is only needed on clients that wish to mount the file system.
+ *
+ */
+
+/*
+ *  Declarations and macros for the ORANGEFS Linux kernel support.
+ */
+
+#ifndef __ORANGEFSKERNEL_H
+#define __ORANGEFSKERNEL_H
+
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/statfs.h>
+#include <linux/backing-dev.h>
+#include <linux/device.h>
+#include <linux/mpage.h>
+#include <linux/namei.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/vmalloc.h>
+
+#include <linux/aio.h>
+#include <linux/posix_acl.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/compat.h>
+#include <linux/mount.h>
+#include <linux/uaccess.h>
+#include <linux/atomic.h>
+#include <linux/uio.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/wait.h>
+#include <linux/dcache.h>
+#include <linux/pagemap.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/xattr.h>
+#include <linux/exportfs.h>
+
+#include <asm/unaligned.h>
+
+#include "orangefs-dev-proto.h"
+
+#ifdef ORANGEFS_KERNEL_DEBUG
+#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS       10
+#else
+#define ORANGEFS_DEFAULT_OP_TIMEOUT_SECS       20
+#endif
+
+#define ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS   30
+
+#define ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS     900     /* 15 minutes */
+
+#define ORANGEFS_REQDEVICE_NAME          "pvfs2-req"
+
+#define ORANGEFS_DEVREQ_MAGIC             0x20030529
+#define ORANGEFS_LINK_MAX                 0x000000FF
+#define ORANGEFS_PURGE_RETRY_COUNT     0x00000005
+#define ORANGEFS_SEEK_END              0x00000002
+#define ORANGEFS_MAX_NUM_OPTIONS          0x00000004
+#define ORANGEFS_MAX_MOUNT_OPT_LEN        0x00000080
+#define ORANGEFS_MAX_FSKEY_LEN            64
+
+#define MAX_DEV_REQ_UPSIZE (2 * sizeof(__s32) +   \
+sizeof(__u64) + sizeof(struct orangefs_upcall_s))
+#define MAX_DEV_REQ_DOWNSIZE (2 * sizeof(__s32) + \
+sizeof(__u64) + sizeof(struct orangefs_downcall_s))
+
+/*
+ * valid orangefs kernel operation states
+ *
+ * unknown  - op was just initialized
+ * waiting  - op is on request_list (upward bound)
+ * inprogr  - op is in progress (waiting for downcall)
+ * serviced - op has matching downcall; ok
+ * purged   - op has to start a timer since client-core
+ *            exited uncleanly before servicing op
+ * given up - submitter has given up waiting for it
+ */
+enum orangefs_vfs_op_states {
+       OP_VFS_STATE_UNKNOWN = 0,
+       OP_VFS_STATE_WAITING = 1,
+       OP_VFS_STATE_INPROGR = 2,
+       OP_VFS_STATE_SERVICED = 4,
+       OP_VFS_STATE_PURGED = 8,
+       OP_VFS_STATE_GIVEN_UP = 16,
+};
+
+/*
+ * Defines for controlling whether I/O upcalls are for async or sync operations
+ */
+enum ORANGEFS_async_io_type {
+       ORANGEFS_VFS_SYNC_IO = 0,
+       ORANGEFS_VFS_ASYNC_IO = 1,
+};
+
+/*
+ * An array of client_debug_mask will be built to hold debug keyword/mask
+ * values fetched from userspace.
+ */
+struct client_debug_mask {
+       char *keyword;
+       __u64 mask1;
+       __u64 mask2;
+};
+
+/*
+ * orangefs kernel memory related flags
+ */
+
+#if ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB))
+#define ORANGEFS_CACHE_CREATE_FLAGS SLAB_RED_ZONE
+#else
+#define ORANGEFS_CACHE_CREATE_FLAGS 0
+#endif /* ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB)) */
+
+#define ORANGEFS_GFP_FLAGS (GFP_KERNEL)
+#define ORANGEFS_BUFMAP_GFP_FLAGS (GFP_KERNEL)
+
+/* orangefs xattr and acl related defines */
+#define ORANGEFS_XATTR_INDEX_POSIX_ACL_ACCESS  1
+#define ORANGEFS_XATTR_INDEX_POSIX_ACL_DEFAULT 2
+#define ORANGEFS_XATTR_INDEX_TRUSTED           3
+#define ORANGEFS_XATTR_INDEX_DEFAULT           4
+
+#define ORANGEFS_XATTR_NAME_ACL_ACCESS XATTR_NAME_POSIX_ACL_ACCESS
+#define ORANGEFS_XATTR_NAME_ACL_DEFAULT XATTR_NAME_POSIX_ACL_DEFAULT
+#define ORANGEFS_XATTR_NAME_TRUSTED_PREFIX "trusted."
+#define ORANGEFS_XATTR_NAME_DEFAULT_PREFIX ""
+
+/* these functions are defined in orangefs-utils.c */
+int orangefs_prepare_cdm_array(char *debug_array_string);
+int orangefs_prepare_debugfs_help_string(int);
+
+/* defined in orangefs-debugfs.c */
+int orangefs_client_debug_init(void);
+
+void debug_string_to_mask(char *, void *, int);
+void do_c_mask(int, char *, struct client_debug_mask **);
+void do_k_mask(int, char *, __u64 **);
+
+void debug_mask_to_string(void *, int);
+void do_k_string(void *, int);
+void do_c_string(void *, int);
+int check_amalgam_keyword(void *, int);
+int keyword_is_amalgam(char *);
+
+/*these variables are defined in orangefs-mod.c */
+extern char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+extern char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+extern char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+extern unsigned int kernel_mask_set_mod_init;
+
+extern int orangefs_init_acl(struct inode *inode, struct inode *dir);
+extern const struct xattr_handler *orangefs_xattr_handlers[];
+
+extern struct posix_acl *orangefs_get_acl(struct inode *inode, int type);
+extern int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+
+/*
+ * Redefine xtvec structure so that we could move helper functions out of
+ * the define
+ */
+struct xtvec {
+       __kernel_off_t xtv_off;         /* must be off_t */
+       __kernel_size_t xtv_len;        /* must be size_t */
+};
+
+/*
+ * orangefs data structures
+ */
+struct orangefs_kernel_op_s {
+       enum orangefs_vfs_op_states op_state;
+       __u64 tag;
+
+       /*
+        * Set uses_shared_memory to 1 if this operation uses shared memory.
+        * If true, then a retry on the op must also get a new shared memory
+        * buffer and re-populate it.
+        */
+       int uses_shared_memory;
+
+       struct orangefs_upcall_s upcall;
+       struct orangefs_downcall_s downcall;
+
+       wait_queue_head_t waitq;
+       spinlock_t lock;
+
+       struct completion done;
+
+       atomic_t ref_count;
+
+       /* VFS aio fields */
+
+       int attempts;
+
+       struct list_head list;
+};
+
+#define set_op_state_waiting(op)     ((op)->op_state = OP_VFS_STATE_WAITING)
+#define set_op_state_inprogress(op)  ((op)->op_state = OP_VFS_STATE_INPROGR)
+#define set_op_state_given_up(op)  ((op)->op_state = OP_VFS_STATE_GIVEN_UP)
+static inline void set_op_state_serviced(struct orangefs_kernel_op_s *op)
+{
+       op->op_state = OP_VFS_STATE_SERVICED;
+       wake_up_interruptible(&op->waitq);
+}
+static inline void set_op_state_purged(struct orangefs_kernel_op_s *op)
+{
+       op->op_state |= OP_VFS_STATE_PURGED;
+       wake_up_interruptible(&op->waitq);
+}
+
+#define op_state_waiting(op)     ((op)->op_state & OP_VFS_STATE_WAITING)
+#define op_state_in_progress(op) ((op)->op_state & OP_VFS_STATE_INPROGR)
+#define op_state_serviced(op)    ((op)->op_state & OP_VFS_STATE_SERVICED)
+#define op_state_purged(op)      ((op)->op_state & OP_VFS_STATE_PURGED)
+#define op_state_given_up(op)    ((op)->op_state & OP_VFS_STATE_GIVEN_UP)
+
+static inline void get_op(struct orangefs_kernel_op_s *op)
+{
+       atomic_inc(&op->ref_count);
+       gossip_debug(GOSSIP_DEV_DEBUG,
+                       "(get) Alloced OP (%p:%llu)\n", op, llu(op->tag));
+}
+
+void __op_release(struct orangefs_kernel_op_s *op);
+
+static inline void op_release(struct orangefs_kernel_op_s *op)
+{
+       if (atomic_dec_and_test(&op->ref_count)) {
+               gossip_debug(GOSSIP_DEV_DEBUG,
+                       "(put) Releasing OP (%p:%llu)\n", op, llu((op)->tag));
+               __op_release(op);
+       }
+}
+
+/* per inode private orangefs info */
+struct orangefs_inode_s {
+       struct orangefs_object_kref refn;
+       char link_target[ORANGEFS_NAME_MAX];
+       __s64 blksize;
+       /*
+        * Reading/Writing Extended attributes need to acquire the appropriate
+        * reader/writer semaphore on the orangefs_inode_s structure.
+        */
+       struct rw_semaphore xattr_sem;
+
+       struct inode vfs_inode;
+       sector_t last_failed_block_index_read;
+
+       /*
+        * State of in-memory attributes not yet flushed to disk associated
+        * with this object
+        */
+       unsigned long pinode_flags;
+};
+
+#define P_ATIME_FLAG 0
+#define P_MTIME_FLAG 1
+#define P_CTIME_FLAG 2
+#define P_MODE_FLAG  3
+
+#define ClearAtimeFlag(pinode) clear_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
+#define SetAtimeFlag(pinode)   set_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
+#define AtimeFlag(pinode)      test_bit(P_ATIME_FLAG, &(pinode)->pinode_flags)
+
+#define ClearMtimeFlag(pinode) clear_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
+#define SetMtimeFlag(pinode)   set_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
+#define MtimeFlag(pinode)      test_bit(P_MTIME_FLAG, &(pinode)->pinode_flags)
+
+#define ClearCtimeFlag(pinode) clear_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
+#define SetCtimeFlag(pinode)   set_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
+#define CtimeFlag(pinode)      test_bit(P_CTIME_FLAG, &(pinode)->pinode_flags)
+
+#define ClearModeFlag(pinode) clear_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
+#define SetModeFlag(pinode)   set_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
+#define ModeFlag(pinode)      test_bit(P_MODE_FLAG, &(pinode)->pinode_flags)
+
+/* per superblock private orangefs info */
+struct orangefs_sb_info_s {
+       struct orangefs_khandle root_khandle;
+       __s32 fs_id;
+       int id;
+       int flags;
+#define ORANGEFS_OPT_INTR      0x01
+#define ORANGEFS_OPT_LOCAL_LOCK        0x02
+       char devname[ORANGEFS_MAX_SERVER_ADDR_LEN];
+       struct super_block *sb;
+       int mount_pending;
+       struct list_head list;
+};
+
+/*
+ * structure that holds the state of any async I/O operation issued
+ * through the VFS. Needed especially to handle cancellation requests
+ * or even completion notification so that the VFS client-side daemon
+ * can free up its vfs_request slots.
+ */
+struct orangefs_kiocb_s {
+       /* the pointer to the task that initiated the AIO */
+       struct task_struct *tsk;
+
+       /* pointer to the kiocb that kicked this operation */
+       struct kiocb *kiocb;
+
+       /* buffer index that was used for the I/O */
+       struct orangefs_bufmap *bufmap;
+       int buffer_index;
+
+       /* orangefs kernel operation type */
+       struct orangefs_kernel_op_s *op;
+
+       /* The user space buffers from/to which I/O is being staged */
+       struct iovec *iov;
+
+       /* number of elements in the iovector */
+       unsigned long nr_segs;
+
+       /* set to indicate the type of the operation */
+       int rw;
+
+       /* file offset */
+       loff_t offset;
+
+       /* and the count in bytes */
+       size_t bytes_to_be_copied;
+
+       ssize_t bytes_copied;
+       int needs_cleanup;
+};
+
+struct orangefs_stats {
+       unsigned long cache_hits;
+       unsigned long cache_misses;
+       unsigned long reads;
+       unsigned long writes;
+};
+
+extern struct orangefs_stats g_orangefs_stats;
+
+/*
+ * NOTE: See Documentation/filesystems/porting for information
+ * on implementing FOO_I and properly accessing fs private data
+ */
+static inline struct orangefs_inode_s *ORANGEFS_I(struct inode *inode)
+{
+       return container_of(inode, struct orangefs_inode_s, vfs_inode);
+}
+
+static inline struct orangefs_sb_info_s *ORANGEFS_SB(struct super_block *sb)
+{
+       return (struct orangefs_sb_info_s *) sb->s_fs_info;
+}
+
+/* ino_t descends from "unsigned long", 8 bytes, 64 bits. */
+static inline ino_t orangefs_khandle_to_ino(struct orangefs_khandle *khandle)
+{
+       union {
+               unsigned char u[8];
+               __u64 ino;
+       } ihandle;
+
+       ihandle.u[0] = khandle->u[0] ^ khandle->u[4];
+       ihandle.u[1] = khandle->u[1] ^ khandle->u[5];
+       ihandle.u[2] = khandle->u[2] ^ khandle->u[6];
+       ihandle.u[3] = khandle->u[3] ^ khandle->u[7];
+       ihandle.u[4] = khandle->u[12] ^ khandle->u[8];
+       ihandle.u[5] = khandle->u[13] ^ khandle->u[9];
+       ihandle.u[6] = khandle->u[14] ^ khandle->u[10];
+       ihandle.u[7] = khandle->u[15] ^ khandle->u[11];
+
+       return ihandle.ino;
+}
+
+static inline struct orangefs_khandle *get_khandle_from_ino(struct inode *inode)
+{
+       return &(ORANGEFS_I(inode)->refn.khandle);
+}
+
+static inline __s32 get_fsid_from_ino(struct inode *inode)
+{
+       return ORANGEFS_I(inode)->refn.fs_id;
+}
+
+static inline ino_t get_ino_from_khandle(struct inode *inode)
+{
+       struct orangefs_khandle *khandle;
+       ino_t ino;
+
+       khandle = get_khandle_from_ino(inode);
+       ino = orangefs_khandle_to_ino(khandle);
+       return ino;
+}
+
+static inline ino_t get_parent_ino_from_dentry(struct dentry *dentry)
+{
+       return get_ino_from_khandle(dentry->d_parent->d_inode);
+}
+
+static inline int is_root_handle(struct inode *inode)
+{
+       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                    "%s: root handle: %pU, this handle: %pU:\n",
+                    __func__,
+                    &ORANGEFS_SB(inode->i_sb)->root_khandle,
+                    get_khandle_from_ino(inode));
+
+       if (ORANGEFS_khandle_cmp(&(ORANGEFS_SB(inode->i_sb)->root_khandle),
+                            get_khandle_from_ino(inode)))
+               return 0;
+       else
+               return 1;
+}
+
+static inline int match_handle(struct orangefs_khandle resp_handle,
+                              struct inode *inode)
+{
+       gossip_debug(GOSSIP_DCACHE_DEBUG,
+                    "%s: one handle: %pU, another handle:%pU:\n",
+                    __func__,
+                    &resp_handle,
+                    get_khandle_from_ino(inode));
+
+       if (ORANGEFS_khandle_cmp(&resp_handle, get_khandle_from_ino(inode)))
+               return 0;
+       else
+               return 1;
+}
+
+/*
+ * defined in orangefs-cache.c
+ */
+int op_cache_initialize(void);
+int op_cache_finalize(void);
+struct orangefs_kernel_op_s *op_alloc(__s32 type);
+char *get_opname_string(struct orangefs_kernel_op_s *new_op);
+
+int orangefs_inode_cache_initialize(void);
+int orangefs_inode_cache_finalize(void);
+
+/*
+ * defined in orangefs-mod.c
+ */
+void purge_inprogress_ops(void);
+
+/*
+ * defined in waitqueue.c
+ */
+void purge_waiting_ops(void);
+
+/*
+ * defined in super.c
+ */
+struct dentry *orangefs_mount(struct file_system_type *fst,
+                          int flags,
+                          const char *devname,
+                          void *data);
+
+void orangefs_kill_sb(struct super_block *sb);
+int orangefs_remount(struct super_block *sb);
+
+int fsid_key_table_initialize(void);
+void fsid_key_table_finalize(void);
+
+/*
+ * defined in inode.c
+ */
+__u32 convert_to_orangefs_mask(unsigned long lite_mask);
+struct inode *orangefs_new_inode(struct super_block *sb,
+                             struct inode *dir,
+                             int mode,
+                             dev_t dev,
+                             struct orangefs_object_kref *ref);
+
+int orangefs_setattr(struct dentry *dentry, struct iattr *iattr);
+
+int orangefs_getattr(struct vfsmount *mnt,
+                 struct dentry *dentry,
+                 struct kstat *kstat);
+
+int orangefs_permission(struct inode *inode, int mask);
+
+/*
+ * defined in xattr.c
+ */
+int orangefs_setxattr(struct dentry *dentry,
+                  const char *name,
+                  const void *value,
+                  size_t size,
+                  int flags);
+
+ssize_t orangefs_getxattr(struct dentry *dentry,
+                      const char *name,
+                      void *buffer,
+                      size_t size);
+
+ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size);
+
+/*
+ * defined in namei.c
+ */
+struct inode *orangefs_iget(struct super_block *sb,
+                        struct orangefs_object_kref *ref);
+
+ssize_t orangefs_inode_read(struct inode *inode,
+                           struct iov_iter *iter,
+                           loff_t *offset,
+                           loff_t readahead_size);
+
+/*
+ * defined in devorangefs-req.c
+ */
+int orangefs_dev_init(void);
+void orangefs_dev_cleanup(void);
+int is_daemon_in_service(void);
+int fs_mount_pending(__s32 fsid);
+
+/*
+ * defined in orangefs-utils.c
+ */
+__s32 fsid_of_op(struct orangefs_kernel_op_s *op);
+
+int orangefs_flush_inode(struct inode *inode);
+
+ssize_t orangefs_inode_getxattr(struct inode *inode,
+                            const char *prefix,
+                            const char *name,
+                            void *buffer,
+                            size_t size);
+
+int orangefs_inode_setxattr(struct inode *inode,
+                        const char *prefix,
+                        const char *name,
+                        const void *value,
+                        size_t size,
+                        int flags);
+
+int orangefs_inode_getattr(struct inode *inode, __u32 mask, int check);
+
+int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr);
+
+void orangefs_make_bad_inode(struct inode *inode);
+
+void orangefs_block_signals(sigset_t *);
+
+void orangefs_set_signals(sigset_t *);
+
+int orangefs_unmount_sb(struct super_block *sb);
+
+int orangefs_cancel_op_in_progress(__u64 tag);
+
+static inline __u64 orangefs_convert_time_field(const struct timespec *ts)
+{
+       return (__u64)ts->tv_sec;
+}
+
+int orangefs_normalize_to_errno(__s32 error_code);
+
+extern struct mutex devreq_mutex;
+extern struct mutex request_mutex;
+extern int debug;
+extern int op_timeout_secs;
+extern int slot_timeout_secs;
+extern struct list_head orangefs_superblocks;
+extern spinlock_t orangefs_superblocks_lock;
+extern struct list_head orangefs_request_list;
+extern spinlock_t orangefs_request_list_lock;
+extern wait_queue_head_t orangefs_request_list_waitq;
+extern struct list_head *htable_ops_in_progress;
+extern spinlock_t htable_ops_in_progress_lock;
+extern int hash_table_size;
+
+extern const struct address_space_operations orangefs_address_operations;
+extern struct backing_dev_info orangefs_backing_dev_info;
+extern struct inode_operations orangefs_file_inode_operations;
+extern const struct file_operations orangefs_file_operations;
+extern struct inode_operations orangefs_symlink_inode_operations;
+extern struct inode_operations orangefs_dir_inode_operations;
+extern const struct file_operations orangefs_dir_operations;
+extern const struct dentry_operations orangefs_dentry_operations;
+extern const struct file_operations orangefs_devreq_file_operations;
+
+extern wait_queue_head_t orangefs_bufmap_init_waitq;
+
+/*
+ * misc convenience macros
+ */
+
+#define ORANGEFS_OP_INTERRUPTIBLE 1   /* service_operation() is interruptible */
+#define ORANGEFS_OP_PRIORITY      2   /* service_operation() is high priority */
+#define ORANGEFS_OP_CANCELLATION  4   /* this is a cancellation */
+#define ORANGEFS_OP_NO_SEMAPHORE  8   /* don't acquire semaphore */
+#define ORANGEFS_OP_ASYNC         16  /* Queue it, but don't wait */
+
+int service_operation(struct orangefs_kernel_op_s *op,
+                     const char *op_name,
+                     int flags);
+
+#define get_interruptible_flag(inode) \
+       ((ORANGEFS_SB(inode->i_sb)->flags & ORANGEFS_OPT_INTR) ? \
+               ORANGEFS_OP_INTERRUPTIBLE : 0)
+
+#define add_orangefs_sb(sb)                                            \
+do {                                                                   \
+       gossip_debug(GOSSIP_SUPER_DEBUG,                                \
+                    "Adding SB %p to orangefs superblocks\n",          \
+                    ORANGEFS_SB(sb));                                  \
+       spin_lock(&orangefs_superblocks_lock);                          \
+       list_add_tail(&ORANGEFS_SB(sb)->list, &orangefs_superblocks);           \
+       spin_unlock(&orangefs_superblocks_lock); \
+} while (0)
+
+#define remove_orangefs_sb(sb)                                         \
+do {                                                                   \
+       struct list_head *tmp = NULL;                                   \
+       struct list_head *tmp_safe = NULL;                              \
+       struct orangefs_sb_info_s *orangefs_sb = NULL;                  \
+                                                                       \
+       spin_lock(&orangefs_superblocks_lock);                          \
+       list_for_each_safe(tmp, tmp_safe, &orangefs_superblocks) {              \
+               orangefs_sb = list_entry(tmp,                           \
+                                     struct orangefs_sb_info_s,                \
+                                     list);                            \
+               if (orangefs_sb && (orangefs_sb->sb == sb)) {                   \
+                       gossip_debug(GOSSIP_SUPER_DEBUG,                \
+                           "Removing SB %p from orangefs superblocks\n",       \
+                       orangefs_sb);                                   \
+                       list_del(&orangefs_sb->list);                   \
+                       break;                                          \
+               }                                                       \
+       }                                                               \
+       spin_unlock(&orangefs_superblocks_lock);                                \
+} while (0)
+
+#define orangefs_lock_inode(inode) spin_lock(&inode->i_lock)
+#define orangefs_unlock_inode(inode) spin_unlock(&inode->i_lock)
+
+#define fill_default_sys_attrs(sys_attr, type, mode)                   \
+do {                                                                   \
+       sys_attr.owner = from_kuid(current_user_ns(), current_fsuid()); \
+       sys_attr.group = from_kgid(current_user_ns(), current_fsgid()); \
+       sys_attr.size = 0;                                              \
+       sys_attr.perms = ORANGEFS_util_translate_mode(mode);            \
+       sys_attr.objtype = type;                                        \
+       sys_attr.mask = ORANGEFS_ATTR_SYS_ALL_SETABLE;                  \
+} while (0)
+
+#define orangefs_inode_lock(__i)  mutex_lock(&(__i)->i_mutex)
+
+#define orangefs_inode_unlock(__i) mutex_unlock(&(__i)->i_mutex)
+
+static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size)
+{
+#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+       orangefs_inode_lock(inode);
+#endif
+       i_size_write(inode, i_size);
+#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
+       orangefs_inode_unlock(inode);
+#endif
+}
+
+static inline unsigned int diff(struct timeval *end, struct timeval *begin)
+{
+       if (end->tv_usec < begin->tv_usec) {
+               end->tv_usec += 1000000;
+               end->tv_sec--;
+       }
+       end->tv_sec -= begin->tv_sec;
+       end->tv_usec -= begin->tv_usec;
+       return (end->tv_sec * 1000000) + end->tv_usec;
+}
+
+#endif /* __ORANGEFSKERNEL_H */
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
new file mode 100644 (file)
index 0000000..7639ab2
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * Changes by Acxiom Corporation to add proc file handler for pvfs2 client
+ * parameters, Copyright Acxiom Corporation, 2005.
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-debugfs.h"
+#include "orangefs-sysfs.h"
+
+/* ORANGEFS_VERSION is a ./configure define */
+#ifndef ORANGEFS_VERSION
+#define ORANGEFS_VERSION "upstream"
+#endif
+
+/*
+ * global variables declared here
+ */
+
+/* array of client debug keyword/mask values */
+struct client_debug_mask *cdm_array;
+int cdm_element_count;
+
+char kernel_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN] = "none";
+char client_debug_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+char client_debug_array_string[ORANGEFS_MAX_DEBUG_STRING_LEN];
+
+char *debug_help_string;
+int help_string_initialized;
+struct dentry *help_file_dentry;
+struct dentry *client_debug_dentry;
+struct dentry *debug_dir;
+int client_verbose_index;
+int client_all_index;
+struct orangefs_stats g_orangefs_stats;
+
+/* the size of the hash tables for ops in progress */
+int hash_table_size = 509;
+
+static ulong module_parm_debug_mask;
+__u64 gossip_debug_mask;
+struct client_debug_mask client_debug_mask = { NULL, 0, 0 };
+unsigned int kernel_mask_set_mod_init; /* implicitly false */
+int op_timeout_secs = ORANGEFS_DEFAULT_OP_TIMEOUT_SECS;
+int slot_timeout_secs = ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ORANGEFS Development Team");
+MODULE_DESCRIPTION("The Linux Kernel VFS interface to ORANGEFS");
+MODULE_PARM_DESC(module_parm_debug_mask, "debugging level (see orangefs-debug.h for values)");
+MODULE_PARM_DESC(op_timeout_secs, "Operation timeout in seconds");
+MODULE_PARM_DESC(slot_timeout_secs, "Slot timeout in seconds");
+MODULE_PARM_DESC(hash_table_size,
+                "size of hash table for operations in progress");
+
+static struct file_system_type orangefs_fs_type = {
+       .name = "pvfs2",
+       .mount = orangefs_mount,
+       .kill_sb = orangefs_kill_sb,
+       .owner = THIS_MODULE,
+};
+
+module_param(hash_table_size, int, 0);
+module_param(module_parm_debug_mask, ulong, 0644);
+module_param(op_timeout_secs, int, 0);
+module_param(slot_timeout_secs, int, 0);
+
+/* synchronizes the request device file */
+DEFINE_MUTEX(devreq_mutex);
+
+/*
+ * Blocks non-priority requests from being queued for servicing.  This
+ * could be used for protecting the request list data structure, but
+ * for now it's only being used to stall the op addition to the request
+ * list
+ */
+DEFINE_MUTEX(request_mutex);
+
+/* hash table for storing operations waiting for matching downcall */
+struct list_head *htable_ops_in_progress;
+DEFINE_SPINLOCK(htable_ops_in_progress_lock);
+
+/* list for queueing upcall operations */
+LIST_HEAD(orangefs_request_list);
+
+/* used to protect the above orangefs_request_list */
+DEFINE_SPINLOCK(orangefs_request_list_lock);
+
+/* used for incoming request notification */
+DECLARE_WAIT_QUEUE_HEAD(orangefs_request_list_waitq);
+
+static int __init orangefs_init(void)
+{
+       int ret = -1;
+       __u32 i = 0;
+
+       /* convert input debug mask to a 64-bit unsigned integer */
+       gossip_debug_mask = (unsigned long long) module_parm_debug_mask;
+
+       /*
+        * set the kernel's gossip debug string; invalid mask values will
+        * be ignored.
+        */
+       debug_mask_to_string(&gossip_debug_mask, 0);
+
+       /* remove any invalid values from the mask */
+       debug_string_to_mask(kernel_debug_string, &gossip_debug_mask, 0);
+
+       /*
+        * if the mask has a non-zero value, then indicate that the mask
+        * was set when the kernel module was loaded.  The orangefs dev ioctl
+        * command will look at this boolean to determine if the kernel's
+        * debug mask should be overwritten when the client-core is started.
+        */
+       if (gossip_debug_mask != 0)
+               kernel_mask_set_mod_init = true;
+
+       /* print information message to the system log */
+       pr_info("orangefs: orangefs_init called with debug mask: :%s: :%llx:\n",
+              kernel_debug_string,
+              (unsigned long long)gossip_debug_mask);
+
+       ret = bdi_init(&orangefs_backing_dev_info);
+
+       if (ret)
+               return ret;
+
+       if (op_timeout_secs < 0)
+               op_timeout_secs = 0;
+
+       if (slot_timeout_secs < 0)
+               slot_timeout_secs = 0;
+
+       /* initialize global book keeping data structures */
+       ret = op_cache_initialize();
+       if (ret < 0)
+               goto err;
+
+       ret = orangefs_inode_cache_initialize();
+       if (ret < 0)
+               goto cleanup_op;
+
+       /* Initialize the orangefsdev subsystem. */
+       ret = orangefs_dev_init();
+       if (ret < 0) {
+               gossip_err("orangefs: could not initialize device subsystem %d!\n",
+                          ret);
+               goto cleanup_inode;
+       }
+
+       htable_ops_in_progress =
+           kcalloc(hash_table_size, sizeof(struct list_head), GFP_KERNEL);
+       if (!htable_ops_in_progress) {
+               gossip_err("Failed to initialize op hashtable");
+               ret = -ENOMEM;
+               goto cleanup_device;
+       }
+
+       /* initialize a doubly linked at each hash table index */
+       for (i = 0; i < hash_table_size; i++)
+               INIT_LIST_HEAD(&htable_ops_in_progress[i]);
+
+       ret = fsid_key_table_initialize();
+       if (ret < 0)
+               goto cleanup_progress_table;
+
+       /*
+        * Build the contents of /sys/kernel/debug/orangefs/debug-help
+        * from the keywords in the kernel keyword/mask array.
+        *
+        * The keywords in the client keyword/mask array are
+        * unknown at boot time.
+        *
+        * orangefs_prepare_debugfs_help_string will be used again
+        * later to rebuild the debug-help file after the client starts
+        * and passes along the needed info. The argument signifies
+        * which time orangefs_prepare_debugfs_help_string is being
+        * called.
+        *
+        */
+       ret = orangefs_prepare_debugfs_help_string(1);
+       if (ret)
+               goto out;
+
+       orangefs_debugfs_init();
+       orangefs_kernel_debug_init();
+       orangefs_sysfs_init();
+
+       ret = register_filesystem(&orangefs_fs_type);
+       if (ret == 0) {
+               pr_info("orangefs: module version %s loaded\n", ORANGEFS_VERSION);
+               return 0;
+       }
+
+       orangefs_debugfs_cleanup();
+       orangefs_sysfs_exit();
+       fsid_key_table_finalize();
+
+cleanup_progress_table:
+       kfree(htable_ops_in_progress);
+
+cleanup_device:
+       orangefs_dev_cleanup();
+
+cleanup_inode:
+       orangefs_inode_cache_finalize();
+
+cleanup_op:
+       op_cache_finalize();
+
+err:
+       bdi_destroy(&orangefs_backing_dev_info);
+
+out:
+       return ret;
+}
+
+static void __exit orangefs_exit(void)
+{
+       int i = 0;
+       gossip_debug(GOSSIP_INIT_DEBUG, "orangefs: orangefs_exit called\n");
+
+       unregister_filesystem(&orangefs_fs_type);
+       orangefs_debugfs_cleanup();
+       orangefs_sysfs_exit();
+       fsid_key_table_finalize();
+       orangefs_dev_cleanup();
+       BUG_ON(!list_empty(&orangefs_request_list));
+       for (i = 0; i < hash_table_size; i++)
+               BUG_ON(!list_empty(&htable_ops_in_progress[i]));
+
+       orangefs_inode_cache_finalize();
+       op_cache_finalize();
+
+       kfree(htable_ops_in_progress);
+
+       bdi_destroy(&orangefs_backing_dev_info);
+
+       pr_info("orangefs: module version %s unloaded\n", ORANGEFS_VERSION);
+}
+
+/*
+ * What we do in this function is to walk the list of operations
+ * that are in progress in the hash table and mark them as purged as well.
+ */
+void purge_inprogress_ops(void)
+{
+       int i;
+
+       for (i = 0; i < hash_table_size; i++) {
+               struct orangefs_kernel_op_s *op;
+               struct orangefs_kernel_op_s *next;
+
+               spin_lock(&htable_ops_in_progress_lock);
+               list_for_each_entry_safe(op,
+                                        next,
+                                        &htable_ops_in_progress[i],
+                                        list) {
+                       spin_lock(&op->lock);
+                       gossip_debug(GOSSIP_INIT_DEBUG,
+                               "pvfs2-client-core: purging in-progress op tag "
+                               "%llu %s\n",
+                               llu(op->tag),
+                               get_opname_string(op));
+                       set_op_state_purged(op);
+                       spin_unlock(&op->lock);
+               }
+               spin_unlock(&htable_ops_in_progress_lock);
+       }
+}
+
+module_init(orangefs_init);
+module_exit(orangefs_exit);
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
new file mode 100644 (file)
index 0000000..83f4053
--- /dev/null
@@ -0,0 +1,1773 @@
+/*
+ * Documentation/ABI/stable/orangefs-sysfs:
+ *
+ * What:               /sys/fs/orangefs/perf_counter_reset
+ * Date:               June 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     echo a 0 or a 1 into perf_counter_reset to
+ *                     reset all the counters in
+ *                     /sys/fs/orangefs/perf_counters
+ *                     except ones with PINT_PERF_PRESERVE set.
+ *
+ *
+ * What:               /sys/fs/orangefs/perf_counters/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Counters and settings for various caches.
+ *                     Read only.
+ *
+ *
+ * What:               /sys/fs/orangefs/perf_time_interval_secs
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Length of perf counter intervals in
+ *                     seconds.
+ *
+ *
+ * What:               /sys/fs/orangefs/perf_history_size
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     The perf_counters cache statistics have N, or
+ *                     perf_history_size, samples. The default is
+ *                     one.
+ *
+ *                     Every perf_time_interval_secs the (first)
+ *                     samples are reset.
+ *
+ *                     If N is greater than one, the "current" set
+ *                     of samples is reset, and the samples from the
+ *                     other N-1 intervals remain available.
+ *
+ *
+ * What:               /sys/fs/orangefs/op_timeout_secs
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Service operation timeout in seconds.
+ *
+ *
+ * What:               /sys/fs/orangefs/slot_timeout_secs
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     "Slot" timeout in seconds. A "slot"
+ *                     is an indexed buffer in the shared
+ *                     memory segment used for communication
+ *                     between the kernel module and userspace.
+ *                     Slots are requested and waited for,
+ *                     the wait times out after slot_timeout_secs.
+ *
+ *
+ * What:               /sys/fs/orangefs/acache/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Attribute cache configurable settings.
+ *
+ *
+ * What:               /sys/fs/orangefs/ncache/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Name cache configurable settings.
+ *
+ *
+ * What:               /sys/fs/orangefs/capcache/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Capability cache configurable settings.
+ *
+ *
+ * What:               /sys/fs/orangefs/ccache/...
+ * Date:               Jun 2015
+ * Contact:            Mike Marshall <hubcap@omnibond.com>
+ * Description:
+ *                     Credential cache configurable settings.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-sysfs.h"
+
+#define ORANGEFS_KOBJ_ID "orangefs"
+#define ACACHE_KOBJ_ID "acache"
+#define CAPCACHE_KOBJ_ID "capcache"
+#define CCACHE_KOBJ_ID "ccache"
+#define NCACHE_KOBJ_ID "ncache"
+#define PC_KOBJ_ID "pc"
+#define STATS_KOBJ_ID "stats"
+
+struct orangefs_obj {
+       struct kobject kobj;
+       int op_timeout_secs;
+       int perf_counter_reset;
+       int perf_history_size;
+       int perf_time_interval_secs;
+       int slot_timeout_secs;
+};
+
+struct acache_orangefs_obj {
+       struct kobject kobj;
+       int hard_limit;
+       int reclaim_percentage;
+       int soft_limit;
+       int timeout_msecs;
+};
+
+struct capcache_orangefs_obj {
+       struct kobject kobj;
+       int hard_limit;
+       int reclaim_percentage;
+       int soft_limit;
+       int timeout_secs;
+};
+
+struct ccache_orangefs_obj {
+       struct kobject kobj;
+       int hard_limit;
+       int reclaim_percentage;
+       int soft_limit;
+       int timeout_secs;
+};
+
+struct ncache_orangefs_obj {
+       struct kobject kobj;
+       int hard_limit;
+       int reclaim_percentage;
+       int soft_limit;
+       int timeout_msecs;
+};
+
+struct pc_orangefs_obj {
+       struct kobject kobj;
+       char *acache;
+       char *capcache;
+       char *ncache;
+};
+
+struct stats_orangefs_obj {
+       struct kobject kobj;
+       int reads;
+       int writes;
+};
+
+struct orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct orangefs_obj *orangefs_obj,
+                       struct orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct orangefs_obj *orangefs_obj,
+                        struct orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct acache_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct acache_orangefs_obj *acache_orangefs_obj,
+                       struct acache_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct acache_orangefs_obj *acache_orangefs_obj,
+                        struct acache_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct capcache_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct capcache_orangefs_obj *capcache_orangefs_obj,
+                       struct capcache_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct capcache_orangefs_obj *capcache_orangefs_obj,
+                        struct capcache_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct ccache_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct ccache_orangefs_obj *ccache_orangefs_obj,
+                       struct ccache_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct ccache_orangefs_obj *ccache_orangefs_obj,
+                        struct ccache_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct ncache_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct ncache_orangefs_obj *ncache_orangefs_obj,
+                       struct ncache_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct ncache_orangefs_obj *ncache_orangefs_obj,
+                        struct ncache_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct pc_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct pc_orangefs_obj *pc_orangefs_obj,
+                       struct pc_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct pc_orangefs_obj *pc_orangefs_obj,
+                        struct pc_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+struct stats_orangefs_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct stats_orangefs_obj *stats_orangefs_obj,
+                       struct stats_orangefs_attribute *attr,
+                       char *buf);
+       ssize_t (*store)(struct stats_orangefs_obj *stats_orangefs_obj,
+                        struct stats_orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count);
+};
+
+static ssize_t orangefs_attr_show(struct kobject *kobj,
+                                 struct attribute *attr,
+                                 char *buf)
+{
+       struct orangefs_attribute *attribute;
+       struct orangefs_obj *orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct orangefs_attribute, attr);
+       orangefs_obj = container_of(kobj, struct orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t orangefs_attr_store(struct kobject *kobj,
+                                  struct attribute *attr,
+                                  const char *buf,
+                                  size_t len)
+{
+       struct orangefs_attribute *attribute;
+       struct orangefs_obj *orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "orangefs_attr_store: start\n");
+
+       attribute = container_of(attr, struct orangefs_attribute, attr);
+       orangefs_obj = container_of(kobj, struct orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops orangefs_sysfs_ops = {
+       .show = orangefs_attr_show,
+       .store = orangefs_attr_store,
+};
+
+static ssize_t acache_orangefs_attr_show(struct kobject *kobj,
+                                        struct attribute *attr,
+                                        char *buf)
+{
+       struct acache_orangefs_attribute *attribute;
+       struct acache_orangefs_obj *acache_orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct acache_orangefs_attribute, attr);
+       acache_orangefs_obj =
+               container_of(kobj, struct acache_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(acache_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t acache_orangefs_attr_store(struct kobject *kobj,
+                                         struct attribute *attr,
+                                         const char *buf,
+                                         size_t len)
+{
+       struct acache_orangefs_attribute *attribute;
+       struct acache_orangefs_obj *acache_orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "acache_orangefs_attr_store: start\n");
+
+       attribute = container_of(attr, struct acache_orangefs_attribute, attr);
+       acache_orangefs_obj =
+               container_of(kobj, struct acache_orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(acache_orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops acache_orangefs_sysfs_ops = {
+       .show = acache_orangefs_attr_show,
+       .store = acache_orangefs_attr_store,
+};
+
+static ssize_t capcache_orangefs_attr_show(struct kobject *kobj,
+                                          struct attribute *attr,
+                                          char *buf)
+{
+       struct capcache_orangefs_attribute *attribute;
+       struct capcache_orangefs_obj *capcache_orangefs_obj;
+       int rc;
+
+       attribute =
+               container_of(attr, struct capcache_orangefs_attribute, attr);
+       capcache_orangefs_obj =
+               container_of(kobj, struct capcache_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(capcache_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t capcache_orangefs_attr_store(struct kobject *kobj,
+                                           struct attribute *attr,
+                                           const char *buf,
+                                           size_t len)
+{
+       struct capcache_orangefs_attribute *attribute;
+       struct capcache_orangefs_obj *capcache_orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "capcache_orangefs_attr_store: start\n");
+
+       attribute =
+               container_of(attr, struct capcache_orangefs_attribute, attr);
+       capcache_orangefs_obj =
+               container_of(kobj, struct capcache_orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(capcache_orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops capcache_orangefs_sysfs_ops = {
+       .show = capcache_orangefs_attr_show,
+       .store = capcache_orangefs_attr_store,
+};
+
+static ssize_t ccache_orangefs_attr_show(struct kobject *kobj,
+                                        struct attribute *attr,
+                                        char *buf)
+{
+       struct ccache_orangefs_attribute *attribute;
+       struct ccache_orangefs_obj *ccache_orangefs_obj;
+       int rc;
+
+       attribute =
+               container_of(attr, struct ccache_orangefs_attribute, attr);
+       ccache_orangefs_obj =
+               container_of(kobj, struct ccache_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(ccache_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t ccache_orangefs_attr_store(struct kobject *kobj,
+                                         struct attribute *attr,
+                                         const char *buf,
+                                         size_t len)
+{
+       struct ccache_orangefs_attribute *attribute;
+       struct ccache_orangefs_obj *ccache_orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "ccache_orangefs_attr_store: start\n");
+
+       attribute =
+               container_of(attr, struct ccache_orangefs_attribute, attr);
+       ccache_orangefs_obj =
+               container_of(kobj, struct ccache_orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(ccache_orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops ccache_orangefs_sysfs_ops = {
+       .show = ccache_orangefs_attr_show,
+       .store = ccache_orangefs_attr_store,
+};
+
+static ssize_t ncache_orangefs_attr_show(struct kobject *kobj,
+                                        struct attribute *attr,
+                                        char *buf)
+{
+       struct ncache_orangefs_attribute *attribute;
+       struct ncache_orangefs_obj *ncache_orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct ncache_orangefs_attribute, attr);
+       ncache_orangefs_obj =
+               container_of(kobj, struct ncache_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(ncache_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static ssize_t ncache_orangefs_attr_store(struct kobject *kobj,
+                                         struct attribute *attr,
+                                         const char *buf,
+                                         size_t len)
+{
+       struct ncache_orangefs_attribute *attribute;
+       struct ncache_orangefs_obj *ncache_orangefs_obj;
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "ncache_orangefs_attr_store: start\n");
+
+       attribute = container_of(attr, struct ncache_orangefs_attribute, attr);
+       ncache_orangefs_obj =
+               container_of(kobj, struct ncache_orangefs_obj, kobj);
+
+       if (!attribute->store) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->store(ncache_orangefs_obj, attribute, buf, len);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops ncache_orangefs_sysfs_ops = {
+       .show = ncache_orangefs_attr_show,
+       .store = ncache_orangefs_attr_store,
+};
+
+static ssize_t pc_orangefs_attr_show(struct kobject *kobj,
+                                    struct attribute *attr,
+                                    char *buf)
+{
+       struct pc_orangefs_attribute *attribute;
+       struct pc_orangefs_obj *pc_orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct pc_orangefs_attribute, attr);
+       pc_orangefs_obj =
+               container_of(kobj, struct pc_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(pc_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops pc_orangefs_sysfs_ops = {
+       .show = pc_orangefs_attr_show,
+};
+
+static ssize_t stats_orangefs_attr_show(struct kobject *kobj,
+                                       struct attribute *attr,
+                                       char *buf)
+{
+       struct stats_orangefs_attribute *attribute;
+       struct stats_orangefs_obj *stats_orangefs_obj;
+       int rc;
+
+       attribute = container_of(attr, struct stats_orangefs_attribute, attr);
+       stats_orangefs_obj =
+               container_of(kobj, struct stats_orangefs_obj, kobj);
+
+       if (!attribute->show) {
+               rc = -EIO;
+               goto out;
+       }
+
+       rc = attribute->show(stats_orangefs_obj, attribute, buf);
+
+out:
+       return rc;
+}
+
+static const struct sysfs_ops stats_orangefs_sysfs_ops = {
+       .show = stats_orangefs_attr_show,
+};
+
+static void orangefs_release(struct kobject *kobj)
+{
+       struct orangefs_obj *orangefs_obj;
+
+       orangefs_obj = container_of(kobj, struct orangefs_obj, kobj);
+       kfree(orangefs_obj);
+}
+
+static void acache_orangefs_release(struct kobject *kobj)
+{
+       struct acache_orangefs_obj *acache_orangefs_obj;
+
+       acache_orangefs_obj =
+               container_of(kobj, struct acache_orangefs_obj, kobj);
+       kfree(acache_orangefs_obj);
+}
+
+static void capcache_orangefs_release(struct kobject *kobj)
+{
+       struct capcache_orangefs_obj *capcache_orangefs_obj;
+
+       capcache_orangefs_obj =
+               container_of(kobj, struct capcache_orangefs_obj, kobj);
+       kfree(capcache_orangefs_obj);
+}
+
+static void ccache_orangefs_release(struct kobject *kobj)
+{
+       struct ccache_orangefs_obj *ccache_orangefs_obj;
+
+       ccache_orangefs_obj =
+               container_of(kobj, struct ccache_orangefs_obj, kobj);
+       kfree(ccache_orangefs_obj);
+}
+
+static void ncache_orangefs_release(struct kobject *kobj)
+{
+       struct ncache_orangefs_obj *ncache_orangefs_obj;
+
+       ncache_orangefs_obj =
+               container_of(kobj, struct ncache_orangefs_obj, kobj);
+       kfree(ncache_orangefs_obj);
+}
+
+static void pc_orangefs_release(struct kobject *kobj)
+{
+       struct pc_orangefs_obj *pc_orangefs_obj;
+
+       pc_orangefs_obj =
+               container_of(kobj, struct pc_orangefs_obj, kobj);
+       kfree(pc_orangefs_obj);
+}
+
+static void stats_orangefs_release(struct kobject *kobj)
+{
+       struct stats_orangefs_obj *stats_orangefs_obj;
+
+       stats_orangefs_obj =
+               container_of(kobj, struct stats_orangefs_obj, kobj);
+       kfree(stats_orangefs_obj);
+}
+
+static ssize_t sysfs_int_show(char *kobj_id, char *buf, void *attr)
+{
+       int rc = -EIO;
+       struct orangefs_attribute *orangefs_attr;
+       struct stats_orangefs_attribute *stats_orangefs_attr;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG, "sysfs_int_show: id:%s:\n", kobj_id);
+
+       if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) {
+               orangefs_attr = (struct orangefs_attribute *)attr;
+
+               if (!strcmp(orangefs_attr->attr.name, "op_timeout_secs")) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%d\n",
+                                      op_timeout_secs);
+                       goto out;
+               } else if (!strcmp(orangefs_attr->attr.name,
+                                  "slot_timeout_secs")) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%d\n",
+                                      slot_timeout_secs);
+                       goto out;
+               } else {
+                       goto out;
+               }
+
+       } else if (!strcmp(kobj_id, STATS_KOBJ_ID)) {
+               stats_orangefs_attr = (struct stats_orangefs_attribute *)attr;
+
+               if (!strcmp(stats_orangefs_attr->attr.name, "reads")) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%lu\n",
+                                      g_orangefs_stats.reads);
+                       goto out;
+               } else if (!strcmp(stats_orangefs_attr->attr.name, "writes")) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%lu\n",
+                                      g_orangefs_stats.writes);
+                       goto out;
+               } else {
+                       goto out;
+               }
+       }
+
+out:
+
+       return rc;
+}
+
+static ssize_t int_orangefs_show(struct orangefs_obj *orangefs_obj,
+                                struct orangefs_attribute *attr,
+                                char *buf)
+{
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "int_orangefs_show:start attr->attr.name:%s:\n",
+                    attr->attr.name);
+
+       rc = sysfs_int_show(ORANGEFS_KOBJ_ID, buf, (void *) attr);
+
+       return rc;
+}
+
+static ssize_t int_stats_show(struct stats_orangefs_obj *stats_orangefs_obj,
+                       struct stats_orangefs_attribute *attr,
+                       char *buf)
+{
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "int_stats_show:start attr->attr.name:%s:\n",
+                    attr->attr.name);
+
+       rc = sysfs_int_show(STATS_KOBJ_ID, buf, (void *) attr);
+
+       return rc;
+}
+
+static ssize_t int_store(struct orangefs_obj *orangefs_obj,
+                        struct orangefs_attribute *attr,
+                        const char *buf,
+                        size_t count)
+{
+       int rc = 0;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "int_store: start attr->attr.name:%s: buf:%s:\n",
+                    attr->attr.name, buf);
+
+       if (!strcmp(attr->attr.name, "op_timeout_secs")) {
+               rc = kstrtoint(buf, 0, &op_timeout_secs);
+               goto out;
+       } else if (!strcmp(attr->attr.name, "slot_timeout_secs")) {
+               rc = kstrtoint(buf, 0, &slot_timeout_secs);
+               goto out;
+       } else {
+               goto out;
+       }
+
+out:
+       if (rc)
+               rc = -EINVAL;
+       else
+               rc = count;
+
+       return rc;
+}
+
+/*
+ * obtain attribute values from userspace with a service operation.
+ */
+static int sysfs_service_op_show(char *kobj_id, char *buf, void *attr)
+{
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int rc = 0;
+       char *ser_op_type = NULL;
+       struct orangefs_attribute *orangefs_attr;
+       struct acache_orangefs_attribute *acache_attr;
+       struct capcache_orangefs_attribute *capcache_attr;
+       struct ccache_orangefs_attribute *ccache_attr;
+       struct ncache_orangefs_attribute *ncache_attr;
+       struct pc_orangefs_attribute *pc_attr;
+       __u32 op_alloc_type;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "sysfs_service_op_show: id:%s:\n",
+                    kobj_id);
+
+       if (strcmp(kobj_id, PC_KOBJ_ID))
+               op_alloc_type = ORANGEFS_VFS_OP_PARAM;
+       else
+               op_alloc_type = ORANGEFS_VFS_OP_PERF_COUNT;
+
+       new_op = op_alloc(op_alloc_type);
+       if (!new_op)
+               return -ENOMEM;
+
+       /* Can't do a service_operation if the client is not running... */
+       rc = is_daemon_in_service();
+       if (rc) {
+               pr_info("%s: Client not running :%d:\n",
+                       __func__,
+                       is_daemon_in_service());
+               goto out;
+       }
+
+       if (strcmp(kobj_id, PC_KOBJ_ID))
+               new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_GET;
+
+       if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) {
+               orangefs_attr = (struct orangefs_attribute *)attr;
+
+               if (!strcmp(orangefs_attr->attr.name, "perf_history_size"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE;
+               else if (!strcmp(orangefs_attr->attr.name,
+                                "perf_time_interval_secs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS;
+               else if (!strcmp(orangefs_attr->attr.name,
+                                "perf_counter_reset"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_PERF_RESET;
+
+       } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) {
+               acache_attr = (struct acache_orangefs_attribute *)attr;
+
+               if (!strcmp(acache_attr->attr.name, "timeout_msecs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS;
+
+               if (!strcmp(acache_attr->attr.name, "hard_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT;
+
+               if (!strcmp(acache_attr->attr.name, "soft_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT;
+
+               if (!strcmp(acache_attr->attr.name, "reclaim_percentage"))
+                       new_op->upcall.req.param.op =
+                         ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE;
+
+       } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) {
+               capcache_attr = (struct capcache_orangefs_attribute *)attr;
+
+               if (!strcmp(capcache_attr->attr.name, "timeout_secs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS;
+
+               if (!strcmp(capcache_attr->attr.name, "hard_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT;
+
+               if (!strcmp(capcache_attr->attr.name, "soft_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT;
+
+               if (!strcmp(capcache_attr->attr.name, "reclaim_percentage"))
+                       new_op->upcall.req.param.op =
+                         ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE;
+
+       } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) {
+               ccache_attr = (struct ccache_orangefs_attribute *)attr;
+
+               if (!strcmp(ccache_attr->attr.name, "timeout_secs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS;
+
+               if (!strcmp(ccache_attr->attr.name, "hard_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT;
+
+               if (!strcmp(ccache_attr->attr.name, "soft_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT;
+
+               if (!strcmp(ccache_attr->attr.name, "reclaim_percentage"))
+                       new_op->upcall.req.param.op =
+                         ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE;
+
+       } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) {
+               ncache_attr = (struct ncache_orangefs_attribute *)attr;
+
+               if (!strcmp(ncache_attr->attr.name, "timeout_msecs"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS;
+
+               if (!strcmp(ncache_attr->attr.name, "hard_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT;
+
+               if (!strcmp(ncache_attr->attr.name, "soft_limit"))
+                       new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT;
+
+               if (!strcmp(ncache_attr->attr.name, "reclaim_percentage"))
+                       new_op->upcall.req.param.op =
+                         ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE;
+
+       } else if (!strcmp(kobj_id, PC_KOBJ_ID)) {
+               pc_attr = (struct pc_orangefs_attribute *)attr;
+
+               if (!strcmp(pc_attr->attr.name, ACACHE_KOBJ_ID))
+                       new_op->upcall.req.perf_count.type =
+                               ORANGEFS_PERF_COUNT_REQUEST_ACACHE;
+
+               if (!strcmp(pc_attr->attr.name, CAPCACHE_KOBJ_ID))
+                       new_op->upcall.req.perf_count.type =
+                               ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE;
+
+               if (!strcmp(pc_attr->attr.name, NCACHE_KOBJ_ID))
+                       new_op->upcall.req.perf_count.type =
+                               ORANGEFS_PERF_COUNT_REQUEST_NCACHE;
+
+       } else {
+               gossip_err("sysfs_service_op_show: unknown kobj_id:%s:\n",
+                          kobj_id);
+               rc = -EINVAL;
+               goto out;
+       }
+
+
+       if (strcmp(kobj_id, PC_KOBJ_ID))
+               ser_op_type = "orangefs_param";
+       else
+               ser_op_type = "orangefs_perf_count";
+
+       /*
+        * The service_operation will return an errno return code on
+        * error, and zero on success.
+        */
+       rc = service_operation(new_op, ser_op_type, ORANGEFS_OP_INTERRUPTIBLE);
+
+out:
+       if (!rc) {
+               if (strcmp(kobj_id, PC_KOBJ_ID)) {
+                       rc = scnprintf(buf,
+                                      PAGE_SIZE,
+                                      "%d\n",
+                                      (int)new_op->downcall.resp.param.value);
+               } else {
+                       rc = scnprintf(
+                               buf,
+                               PAGE_SIZE,
+                               "%s",
+                               new_op->downcall.resp.perf_count.buffer);
+               }
+       }
+
+       op_release(new_op);
+
+       return rc;
+
+}
+
+static ssize_t service_orangefs_show(struct orangefs_obj *orangefs_obj,
+                                    struct orangefs_attribute *attr,
+                                    char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(ORANGEFS_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t
+       service_acache_show(struct acache_orangefs_obj *acache_orangefs_obj,
+                           struct acache_orangefs_attribute *attr,
+                           char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(ACACHE_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t service_capcache_show(struct capcache_orangefs_obj
+                                       *capcache_orangefs_obj,
+                                    struct capcache_orangefs_attribute *attr,
+                                    char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(CAPCACHE_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t service_ccache_show(struct ccache_orangefs_obj
+                                       *ccache_orangefs_obj,
+                                  struct ccache_orangefs_attribute *attr,
+                                  char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(CCACHE_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t
+       service_ncache_show(struct ncache_orangefs_obj *ncache_orangefs_obj,
+                           struct ncache_orangefs_attribute *attr,
+                           char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(NCACHE_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+static ssize_t
+       service_pc_show(struct pc_orangefs_obj *pc_orangefs_obj,
+                           struct pc_orangefs_attribute *attr,
+                           char *buf)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_show(PC_KOBJ_ID, buf, (void *)attr);
+
+       return rc;
+}
+
+/*
+ * pass attribute values back to userspace with a service operation.
+ *
+ * We have to do a memory allocation, an sscanf and a service operation.
+ * And we have to evaluate what the user entered, to make sure the
+ * value is within the range supported by the attribute. So, there's
+ * a lot of return code checking and mapping going on here.
+ *
+ * We want to return 1 if we think everything went OK, and
+ * EINVAL if not.
+ */
+static int sysfs_service_op_store(char *kobj_id, const char *buf, void *attr)
+{
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int val = 0;
+       int rc = 0;
+       struct orangefs_attribute *orangefs_attr;
+       struct acache_orangefs_attribute *acache_attr;
+       struct capcache_orangefs_attribute *capcache_attr;
+       struct ccache_orangefs_attribute *ccache_attr;
+       struct ncache_orangefs_attribute *ncache_attr;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG,
+                    "sysfs_service_op_store: id:%s:\n",
+                    kobj_id);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_PARAM);
+       if (!new_op)
+               return -EINVAL; /* sic */
+
+       /* Can't do a service_operation if the client is not running... */
+       rc = is_daemon_in_service();
+       if (rc) {
+               pr_info("%s: Client not running :%d:\n",
+                       __func__,
+                       is_daemon_in_service());
+               goto out;
+       }
+
+       /*
+        * The value we want to send back to userspace is in buf.
+        */
+       rc = kstrtoint(buf, 0, &val);
+       if (rc)
+               goto out;
+
+       if (!strcmp(kobj_id, ORANGEFS_KOBJ_ID)) {
+               orangefs_attr = (struct orangefs_attribute *)attr;
+
+               if (!strcmp(orangefs_attr->attr.name, "perf_history_size")) {
+                       if (val > 0) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(orangefs_attr->attr.name,
+                                  "perf_time_interval_secs")) {
+                       if (val > 0) {
+                               new_op->upcall.req.param.op =
+                               ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(orangefs_attr->attr.name,
+                                  "perf_counter_reset")) {
+                       if ((val == 0) || (val == 1)) {
+                               new_op->upcall.req.param.op =
+                                       ORANGEFS_PARAM_REQUEST_OP_PERF_RESET;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else if (!strcmp(kobj_id, ACACHE_KOBJ_ID)) {
+               acache_attr = (struct acache_orangefs_attribute *)attr;
+
+               if (!strcmp(acache_attr->attr.name, "hard_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(acache_attr->attr.name, "soft_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(acache_attr->attr.name,
+                                  "reclaim_percentage")) {
+                       if ((val > -1) && (val < 101)) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(acache_attr->attr.name, "timeout_msecs")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else if (!strcmp(kobj_id, CAPCACHE_KOBJ_ID)) {
+               capcache_attr = (struct capcache_orangefs_attribute *)attr;
+
+               if (!strcmp(capcache_attr->attr.name, "hard_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(capcache_attr->attr.name, "soft_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(capcache_attr->attr.name,
+                                  "reclaim_percentage")) {
+                       if ((val > -1) && (val < 101)) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(capcache_attr->attr.name, "timeout_secs")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else if (!strcmp(kobj_id, CCACHE_KOBJ_ID)) {
+               ccache_attr = (struct ccache_orangefs_attribute *)attr;
+
+               if (!strcmp(ccache_attr->attr.name, "hard_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ccache_attr->attr.name, "soft_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ccache_attr->attr.name,
+                                  "reclaim_percentage")) {
+                       if ((val > -1) && (val < 101)) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ccache_attr->attr.name, "timeout_secs")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else if (!strcmp(kobj_id, NCACHE_KOBJ_ID)) {
+               ncache_attr = (struct ncache_orangefs_attribute *)attr;
+
+               if (!strcmp(ncache_attr->attr.name, "hard_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ncache_attr->attr.name, "soft_limit")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ncache_attr->attr.name,
+                                  "reclaim_percentage")) {
+                       if ((val > -1) && (val < 101)) {
+                               new_op->upcall.req.param.op =
+                                       ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               } else if (!strcmp(ncache_attr->attr.name, "timeout_msecs")) {
+                       if (val > -1) {
+                               new_op->upcall.req.param.op =
+                                 ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS;
+                       } else {
+                               rc = 0;
+                               goto out;
+                       }
+               }
+
+       } else {
+               gossip_err("sysfs_service_op_store: unknown kobj_id:%s:\n",
+                          kobj_id);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       new_op->upcall.req.param.type = ORANGEFS_PARAM_REQUEST_SET;
+
+       new_op->upcall.req.param.value = val;
+
+       /*
+        * The service_operation will return a errno return code on
+        * error, and zero on success.
+        */
+       rc = service_operation(new_op, "orangefs_param", ORANGEFS_OP_INTERRUPTIBLE);
+
+       if (rc < 0) {
+               gossip_err("sysfs_service_op_store: service op returned:%d:\n",
+                       rc);
+               rc = 0;
+       } else {
+               rc = 1;
+       }
+
+out:
+       op_release(new_op);
+
+       if (rc == -ENOMEM || rc == 0)
+               rc = -EINVAL;
+
+       return rc;
+}
+
+static ssize_t
+       service_orangefs_store(struct orangefs_obj *orangefs_obj,
+                              struct orangefs_attribute *attr,
+                              const char *buf,
+                              size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(ORANGEFS_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static ssize_t
+       service_acache_store(struct acache_orangefs_obj *acache_orangefs_obj,
+                            struct acache_orangefs_attribute *attr,
+                            const char *buf,
+                            size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(ACACHE_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static ssize_t
+       service_capcache_store(struct capcache_orangefs_obj
+                               *capcache_orangefs_obj,
+                              struct capcache_orangefs_attribute *attr,
+                              const char *buf,
+                              size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(CAPCACHE_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static ssize_t service_ccache_store(struct ccache_orangefs_obj
+                                       *ccache_orangefs_obj,
+                                   struct ccache_orangefs_attribute *attr,
+                                   const char *buf,
+                                   size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(CCACHE_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static ssize_t
+       service_ncache_store(struct ncache_orangefs_obj *ncache_orangefs_obj,
+                            struct ncache_orangefs_attribute *attr,
+                            const char *buf,
+                            size_t count)
+{
+       int rc = 0;
+
+       rc = sysfs_service_op_store(NCACHE_KOBJ_ID, buf, (void *) attr);
+
+       /* rc should have an errno value if the service_op went bad. */
+       if (rc == 1)
+               rc = count;
+
+       return rc;
+}
+
+static struct orangefs_attribute op_timeout_secs_attribute =
+       __ATTR(op_timeout_secs, 0664, int_orangefs_show, int_store);
+
+static struct orangefs_attribute slot_timeout_secs_attribute =
+       __ATTR(slot_timeout_secs, 0664, int_orangefs_show, int_store);
+
+static struct orangefs_attribute perf_counter_reset_attribute =
+       __ATTR(perf_counter_reset,
+              0664,
+              service_orangefs_show,
+              service_orangefs_store);
+
+static struct orangefs_attribute perf_history_size_attribute =
+       __ATTR(perf_history_size,
+              0664,
+              service_orangefs_show,
+              service_orangefs_store);
+
+static struct orangefs_attribute perf_time_interval_secs_attribute =
+       __ATTR(perf_time_interval_secs,
+              0664,
+              service_orangefs_show,
+              service_orangefs_store);
+
+static struct attribute *orangefs_default_attrs[] = {
+       &op_timeout_secs_attribute.attr,
+       &slot_timeout_secs_attribute.attr,
+       &perf_counter_reset_attribute.attr,
+       &perf_history_size_attribute.attr,
+       &perf_time_interval_secs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type orangefs_ktype = {
+       .sysfs_ops = &orangefs_sysfs_ops,
+       .release = orangefs_release,
+       .default_attrs = orangefs_default_attrs,
+};
+
+static struct acache_orangefs_attribute acache_hard_limit_attribute =
+       __ATTR(hard_limit,
+              0664,
+              service_acache_show,
+              service_acache_store);
+
+static struct acache_orangefs_attribute acache_reclaim_percent_attribute =
+       __ATTR(reclaim_percentage,
+              0664,
+              service_acache_show,
+              service_acache_store);
+
+static struct acache_orangefs_attribute acache_soft_limit_attribute =
+       __ATTR(soft_limit,
+              0664,
+              service_acache_show,
+              service_acache_store);
+
+static struct acache_orangefs_attribute acache_timeout_msecs_attribute =
+       __ATTR(timeout_msecs,
+              0664,
+              service_acache_show,
+              service_acache_store);
+
+static struct attribute *acache_orangefs_default_attrs[] = {
+       &acache_hard_limit_attribute.attr,
+       &acache_reclaim_percent_attribute.attr,
+       &acache_soft_limit_attribute.attr,
+       &acache_timeout_msecs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type acache_orangefs_ktype = {
+       .sysfs_ops = &acache_orangefs_sysfs_ops,
+       .release = acache_orangefs_release,
+       .default_attrs = acache_orangefs_default_attrs,
+};
+
+static struct capcache_orangefs_attribute capcache_hard_limit_attribute =
+       __ATTR(hard_limit,
+              0664,
+              service_capcache_show,
+              service_capcache_store);
+
+static struct capcache_orangefs_attribute capcache_reclaim_percent_attribute =
+       __ATTR(reclaim_percentage,
+              0664,
+              service_capcache_show,
+              service_capcache_store);
+
+static struct capcache_orangefs_attribute capcache_soft_limit_attribute =
+       __ATTR(soft_limit,
+              0664,
+              service_capcache_show,
+              service_capcache_store);
+
+static struct capcache_orangefs_attribute capcache_timeout_secs_attribute =
+       __ATTR(timeout_secs,
+              0664,
+              service_capcache_show,
+              service_capcache_store);
+
+static struct attribute *capcache_orangefs_default_attrs[] = {
+       &capcache_hard_limit_attribute.attr,
+       &capcache_reclaim_percent_attribute.attr,
+       &capcache_soft_limit_attribute.attr,
+       &capcache_timeout_secs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type capcache_orangefs_ktype = {
+       .sysfs_ops = &capcache_orangefs_sysfs_ops,
+       .release = capcache_orangefs_release,
+       .default_attrs = capcache_orangefs_default_attrs,
+};
+
+static struct ccache_orangefs_attribute ccache_hard_limit_attribute =
+       __ATTR(hard_limit,
+              0664,
+              service_ccache_show,
+              service_ccache_store);
+
+static struct ccache_orangefs_attribute ccache_reclaim_percent_attribute =
+       __ATTR(reclaim_percentage,
+              0664,
+              service_ccache_show,
+              service_ccache_store);
+
+static struct ccache_orangefs_attribute ccache_soft_limit_attribute =
+       __ATTR(soft_limit,
+              0664,
+              service_ccache_show,
+              service_ccache_store);
+
+static struct ccache_orangefs_attribute ccache_timeout_secs_attribute =
+       __ATTR(timeout_secs,
+              0664,
+              service_ccache_show,
+              service_ccache_store);
+
+static struct attribute *ccache_orangefs_default_attrs[] = {
+       &ccache_hard_limit_attribute.attr,
+       &ccache_reclaim_percent_attribute.attr,
+       &ccache_soft_limit_attribute.attr,
+       &ccache_timeout_secs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type ccache_orangefs_ktype = {
+       .sysfs_ops = &ccache_orangefs_sysfs_ops,
+       .release = ccache_orangefs_release,
+       .default_attrs = ccache_orangefs_default_attrs,
+};
+
+static struct ncache_orangefs_attribute ncache_hard_limit_attribute =
+       __ATTR(hard_limit,
+              0664,
+              service_ncache_show,
+              service_ncache_store);
+
+static struct ncache_orangefs_attribute ncache_reclaim_percent_attribute =
+       __ATTR(reclaim_percentage,
+              0664,
+              service_ncache_show,
+              service_ncache_store);
+
+static struct ncache_orangefs_attribute ncache_soft_limit_attribute =
+       __ATTR(soft_limit,
+              0664,
+              service_ncache_show,
+              service_ncache_store);
+
+static struct ncache_orangefs_attribute ncache_timeout_msecs_attribute =
+       __ATTR(timeout_msecs,
+              0664,
+              service_ncache_show,
+              service_ncache_store);
+
+static struct attribute *ncache_orangefs_default_attrs[] = {
+       &ncache_hard_limit_attribute.attr,
+       &ncache_reclaim_percent_attribute.attr,
+       &ncache_soft_limit_attribute.attr,
+       &ncache_timeout_msecs_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type ncache_orangefs_ktype = {
+       .sysfs_ops = &ncache_orangefs_sysfs_ops,
+       .release = ncache_orangefs_release,
+       .default_attrs = ncache_orangefs_default_attrs,
+};
+
+static struct pc_orangefs_attribute pc_acache_attribute =
+       __ATTR(acache,
+              0664,
+              service_pc_show,
+              NULL);
+
+static struct pc_orangefs_attribute pc_capcache_attribute =
+       __ATTR(capcache,
+              0664,
+              service_pc_show,
+              NULL);
+
+static struct pc_orangefs_attribute pc_ncache_attribute =
+       __ATTR(ncache,
+              0664,
+              service_pc_show,
+              NULL);
+
+static struct attribute *pc_orangefs_default_attrs[] = {
+       &pc_acache_attribute.attr,
+       &pc_capcache_attribute.attr,
+       &pc_ncache_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type pc_orangefs_ktype = {
+       .sysfs_ops = &pc_orangefs_sysfs_ops,
+       .release = pc_orangefs_release,
+       .default_attrs = pc_orangefs_default_attrs,
+};
+
+static struct stats_orangefs_attribute stats_reads_attribute =
+       __ATTR(reads,
+              0664,
+              int_stats_show,
+              NULL);
+
+static struct stats_orangefs_attribute stats_writes_attribute =
+       __ATTR(writes,
+              0664,
+              int_stats_show,
+              NULL);
+
+static struct attribute *stats_orangefs_default_attrs[] = {
+       &stats_reads_attribute.attr,
+       &stats_writes_attribute.attr,
+       NULL,
+};
+
+static struct kobj_type stats_orangefs_ktype = {
+       .sysfs_ops = &stats_orangefs_sysfs_ops,
+       .release = stats_orangefs_release,
+       .default_attrs = stats_orangefs_default_attrs,
+};
+
+static struct orangefs_obj *orangefs_obj;
+static struct acache_orangefs_obj *acache_orangefs_obj;
+static struct capcache_orangefs_obj *capcache_orangefs_obj;
+static struct ccache_orangefs_obj *ccache_orangefs_obj;
+static struct ncache_orangefs_obj *ncache_orangefs_obj;
+static struct pc_orangefs_obj *pc_orangefs_obj;
+static struct stats_orangefs_obj *stats_orangefs_obj;
+
+int orangefs_sysfs_init(void)
+{
+       int rc;
+
+       gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_init: start\n");
+
+       /* create /sys/fs/orangefs. */
+       orangefs_obj = kzalloc(sizeof(*orangefs_obj), GFP_KERNEL);
+       if (!orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&orangefs_obj->kobj,
+                                 &orangefs_ktype,
+                                 fs_kobj,
+                                 ORANGEFS_KOBJ_ID);
+
+       if (rc) {
+               kobject_put(&orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/acache. */
+       acache_orangefs_obj = kzalloc(sizeof(*acache_orangefs_obj), GFP_KERNEL);
+       if (!acache_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&acache_orangefs_obj->kobj,
+                                 &acache_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 ACACHE_KOBJ_ID);
+
+       if (rc) {
+               kobject_put(&acache_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&acache_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/capcache. */
+       capcache_orangefs_obj =
+               kzalloc(sizeof(*capcache_orangefs_obj), GFP_KERNEL);
+       if (!capcache_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&capcache_orangefs_obj->kobj,
+                                 &capcache_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 CAPCACHE_KOBJ_ID);
+       if (rc) {
+               kobject_put(&capcache_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&capcache_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/ccache. */
+       ccache_orangefs_obj =
+               kzalloc(sizeof(*ccache_orangefs_obj), GFP_KERNEL);
+       if (!ccache_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&ccache_orangefs_obj->kobj,
+                                 &ccache_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 CCACHE_KOBJ_ID);
+       if (rc) {
+               kobject_put(&ccache_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&ccache_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/ncache. */
+       ncache_orangefs_obj = kzalloc(sizeof(*ncache_orangefs_obj), GFP_KERNEL);
+       if (!ncache_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&ncache_orangefs_obj->kobj,
+                                 &ncache_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 NCACHE_KOBJ_ID);
+
+       if (rc) {
+               kobject_put(&ncache_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&ncache_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/perf_counters. */
+       pc_orangefs_obj = kzalloc(sizeof(*pc_orangefs_obj), GFP_KERNEL);
+       if (!pc_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&pc_orangefs_obj->kobj,
+                                 &pc_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 "perf_counters");
+
+       if (rc) {
+               kobject_put(&pc_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&pc_orangefs_obj->kobj, KOBJ_ADD);
+
+       /* create /sys/fs/orangefs/stats. */
+       stats_orangefs_obj = kzalloc(sizeof(*stats_orangefs_obj), GFP_KERNEL);
+       if (!stats_orangefs_obj) {
+               rc = -EINVAL;
+               goto out;
+       }
+
+       rc = kobject_init_and_add(&stats_orangefs_obj->kobj,
+                                 &stats_orangefs_ktype,
+                                 &orangefs_obj->kobj,
+                                 STATS_KOBJ_ID);
+
+       if (rc) {
+               kobject_put(&stats_orangefs_obj->kobj);
+               rc = -EINVAL;
+               goto out;
+       }
+
+       kobject_uevent(&stats_orangefs_obj->kobj, KOBJ_ADD);
+out:
+       return rc;
+}
+
+void orangefs_sysfs_exit(void)
+{
+       gossip_debug(GOSSIP_SYSFS_DEBUG, "orangefs_sysfs_exit: start\n");
+
+       kobject_put(&acache_orangefs_obj->kobj);
+       kobject_put(&capcache_orangefs_obj->kobj);
+       kobject_put(&ccache_orangefs_obj->kobj);
+       kobject_put(&ncache_orangefs_obj->kobj);
+       kobject_put(&pc_orangefs_obj->kobj);
+       kobject_put(&stats_orangefs_obj->kobj);
+
+       kobject_put(&orangefs_obj->kobj);
+}
diff --git a/fs/orangefs/orangefs-sysfs.h b/fs/orangefs/orangefs-sysfs.h
new file mode 100644 (file)
index 0000000..f0b7638
--- /dev/null
@@ -0,0 +1,2 @@
+extern int orangefs_sysfs_init(void);
+extern void orangefs_sysfs_exit(void);
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
new file mode 100644 (file)
index 0000000..fa3ed8a
--- /dev/null
@@ -0,0 +1,1263 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-dev-proto.h"
+#include "orangefs-bufmap.h"
+
+__s32 fsid_of_op(struct orangefs_kernel_op_s *op)
+{
+       __s32 fsid = ORANGEFS_FS_ID_NULL;
+
+       if (op) {
+               switch (op->upcall.type) {
+               case ORANGEFS_VFS_OP_FILE_IO:
+                       fsid = op->upcall.req.io.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_LOOKUP:
+                       fsid = op->upcall.req.lookup.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_CREATE:
+                       fsid = op->upcall.req.create.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_GETATTR:
+                       fsid = op->upcall.req.getattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_REMOVE:
+                       fsid = op->upcall.req.remove.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_MKDIR:
+                       fsid = op->upcall.req.mkdir.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_READDIR:
+                       fsid = op->upcall.req.readdir.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_SETATTR:
+                       fsid = op->upcall.req.setattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_SYMLINK:
+                       fsid = op->upcall.req.sym.parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_RENAME:
+                       fsid = op->upcall.req.rename.old_parent_refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_STATFS:
+                       fsid = op->upcall.req.statfs.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_TRUNCATE:
+                       fsid = op->upcall.req.truncate.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_MMAP_RA_FLUSH:
+                       fsid = op->upcall.req.ra_cache_flush.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_FS_UMOUNT:
+                       fsid = op->upcall.req.fs_umount.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_GETXATTR:
+                       fsid = op->upcall.req.getxattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_SETXATTR:
+                       fsid = op->upcall.req.setxattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_LISTXATTR:
+                       fsid = op->upcall.req.listxattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_REMOVEXATTR:
+                       fsid = op->upcall.req.removexattr.refn.fs_id;
+                       break;
+               case ORANGEFS_VFS_OP_FSYNC:
+                       fsid = op->upcall.req.fsync.refn.fs_id;
+                       break;
+               default:
+                       break;
+               }
+       }
+       return fsid;
+}
+
+static int orangefs_inode_flags(struct ORANGEFS_sys_attr_s *attrs)
+{
+       int flags = 0;
+       if (attrs->flags & ORANGEFS_IMMUTABLE_FL)
+               flags |= S_IMMUTABLE;
+       else
+               flags &= ~S_IMMUTABLE;
+       if (attrs->flags & ORANGEFS_APPEND_FL)
+               flags |= S_APPEND;
+       else
+               flags &= ~S_APPEND;
+       if (attrs->flags & ORANGEFS_NOATIME_FL)
+               flags |= S_NOATIME;
+       else
+               flags &= ~S_NOATIME;
+       return flags;
+}
+
+static int orangefs_inode_perms(struct ORANGEFS_sys_attr_s *attrs)
+{
+       int perm_mode = 0;
+
+       if (attrs->perms & ORANGEFS_O_EXECUTE)
+               perm_mode |= S_IXOTH;
+       if (attrs->perms & ORANGEFS_O_WRITE)
+               perm_mode |= S_IWOTH;
+       if (attrs->perms & ORANGEFS_O_READ)
+               perm_mode |= S_IROTH;
+
+       if (attrs->perms & ORANGEFS_G_EXECUTE)
+               perm_mode |= S_IXGRP;
+       if (attrs->perms & ORANGEFS_G_WRITE)
+               perm_mode |= S_IWGRP;
+       if (attrs->perms & ORANGEFS_G_READ)
+               perm_mode |= S_IRGRP;
+
+       if (attrs->perms & ORANGEFS_U_EXECUTE)
+               perm_mode |= S_IXUSR;
+       if (attrs->perms & ORANGEFS_U_WRITE)
+               perm_mode |= S_IWUSR;
+       if (attrs->perms & ORANGEFS_U_READ)
+               perm_mode |= S_IRUSR;
+
+       if (attrs->perms & ORANGEFS_G_SGID)
+               perm_mode |= S_ISGID;
+       if (attrs->perms & ORANGEFS_U_SUID)
+               perm_mode |= S_ISUID;
+
+       return perm_mode;
+}
+
+/* NOTE: symname is ignored unless the inode is a sym link */
+static int copy_attributes_to_inode(struct inode *inode,
+                                   struct ORANGEFS_sys_attr_s *attrs,
+                                   char *symname)
+{
+       int ret = -1;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       loff_t inode_size = 0;
+       loff_t rounded_up_size = 0;
+
+
+       /*
+        * arbitrarily set the inode block size; FIXME: we need to
+        * resolve the difference between the reported inode blocksize
+        * and the PAGE_CACHE_SIZE, since our block count will always
+        * be wrong.
+        *
+        * For now, we're setting the block count to be the proper
+        * number assuming the block size is 512 bytes, and the size is
+        * rounded up to the nearest 4K.  This is apparently required
+        * to get proper size reports from the 'du' shell utility.
+        *
+        * changing the inode->i_blkbits to something other than
+        * PAGE_CACHE_SHIFT breaks mmap/execution as we depend on that.
+        */
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "attrs->mask = %x (objtype = %s)\n",
+                    attrs->mask,
+                    attrs->objtype == ORANGEFS_TYPE_METAFILE ? "file" :
+                    attrs->objtype == ORANGEFS_TYPE_DIRECTORY ? "directory" :
+                    attrs->objtype == ORANGEFS_TYPE_SYMLINK ? "symlink" :
+                       "invalid/unknown");
+
+       switch (attrs->objtype) {
+       case ORANGEFS_TYPE_METAFILE:
+               inode->i_flags = orangefs_inode_flags(attrs);
+               if (attrs->mask & ORANGEFS_ATTR_SYS_SIZE) {
+                       inode_size = (loff_t) attrs->size;
+                       rounded_up_size =
+                           (inode_size + (4096 - (inode_size % 4096)));
+
+                       orangefs_lock_inode(inode);
+                       inode->i_bytes = inode_size;
+                       inode->i_blocks =
+                           (unsigned long)(rounded_up_size / 512);
+                       orangefs_unlock_inode(inode);
+
+                       /*
+                        * NOTE: make sure all the places we're called
+                        * from have the inode->i_sem lock. We're fine
+                        * in 99% of the cases since we're mostly
+                        * called from a lookup.
+                        */
+                       inode->i_size = inode_size;
+               }
+               break;
+       case ORANGEFS_TYPE_SYMLINK:
+               if (symname != NULL) {
+                       inode->i_size = (loff_t) strlen(symname);
+                       break;
+               }
+               /*FALLTHRU*/
+       default:
+               inode->i_size = PAGE_CACHE_SIZE;
+
+               orangefs_lock_inode(inode);
+               inode_set_bytes(inode, inode->i_size);
+               orangefs_unlock_inode(inode);
+               break;
+       }
+
+       inode->i_uid = make_kuid(&init_user_ns, attrs->owner);
+       inode->i_gid = make_kgid(&init_user_ns, attrs->group);
+       inode->i_atime.tv_sec = (time_t) attrs->atime;
+       inode->i_mtime.tv_sec = (time_t) attrs->mtime;
+       inode->i_ctime.tv_sec = (time_t) attrs->ctime;
+       inode->i_atime.tv_nsec = 0;
+       inode->i_mtime.tv_nsec = 0;
+       inode->i_ctime.tv_nsec = 0;
+
+       inode->i_mode = orangefs_inode_perms(attrs);
+
+       if (is_root_handle(inode)) {
+               /* special case: mark the root inode as sticky */
+               inode->i_mode |= S_ISVTX;
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "Marking inode %pU as sticky\n",
+                            get_khandle_from_ino(inode));
+       }
+
+       switch (attrs->objtype) {
+       case ORANGEFS_TYPE_METAFILE:
+               inode->i_mode |= S_IFREG;
+               ret = 0;
+               break;
+       case ORANGEFS_TYPE_DIRECTORY:
+               inode->i_mode |= S_IFDIR;
+               /* NOTE: we have no good way to keep nlink consistent
+                * for directories across clients; keep constant at 1.
+                * Why 1?  If we go with 2, then find(1) gets confused
+                * and won't work properly withouth the -noleaf option
+                */
+               set_nlink(inode, 1);
+               ret = 0;
+               break;
+       case ORANGEFS_TYPE_SYMLINK:
+               inode->i_mode |= S_IFLNK;
+
+               /* copy link target to inode private data */
+               if (orangefs_inode && symname) {
+                       strncpy(orangefs_inode->link_target,
+                               symname,
+                               ORANGEFS_NAME_MAX);
+                       gossip_debug(GOSSIP_UTILS_DEBUG,
+                                    "Copied attr link target %s\n",
+                                    orangefs_inode->link_target);
+               }
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "symlink mode %o\n",
+                            inode->i_mode);
+               ret = 0;
+               break;
+       default:
+               gossip_err("orangefs: copy_attributes_to_inode: got invalid attribute type %x\n",
+                       attrs->objtype);
+       }
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs: copy_attributes_to_inode: setting i_mode to %o, i_size to %lu\n",
+                    inode->i_mode,
+                    (unsigned long)i_size_read(inode));
+
+       return ret;
+}
+
+/*
+ * NOTE: in kernel land, we never use the sys_attr->link_target for
+ * anything, so don't bother copying it into the sys_attr object here.
+ */
+static inline int copy_attributes_from_inode(struct inode *inode,
+                                            struct ORANGEFS_sys_attr_s *attrs,
+                                            struct iattr *iattr)
+{
+       umode_t tmp_mode;
+
+       if (!iattr || !inode || !attrs) {
+               gossip_err("NULL iattr (%p), inode (%p), attrs (%p) "
+                          "in copy_attributes_from_inode!\n",
+                          iattr,
+                          inode,
+                          attrs);
+               return -EINVAL;
+       }
+       /*
+        * We need to be careful to only copy the attributes out of the
+        * iattr object that we know are valid.
+        */
+       attrs->mask = 0;
+       if (iattr->ia_valid & ATTR_UID) {
+               attrs->owner = from_kuid(current_user_ns(), iattr->ia_uid);
+               attrs->mask |= ORANGEFS_ATTR_SYS_UID;
+               gossip_debug(GOSSIP_UTILS_DEBUG, "(UID) %d\n", attrs->owner);
+       }
+       if (iattr->ia_valid & ATTR_GID) {
+               attrs->group = from_kgid(current_user_ns(), iattr->ia_gid);
+               attrs->mask |= ORANGEFS_ATTR_SYS_GID;
+               gossip_debug(GOSSIP_UTILS_DEBUG, "(GID) %d\n", attrs->group);
+       }
+
+       if (iattr->ia_valid & ATTR_ATIME) {
+               attrs->mask |= ORANGEFS_ATTR_SYS_ATIME;
+               if (iattr->ia_valid & ATTR_ATIME_SET) {
+                       attrs->atime =
+                           orangefs_convert_time_field(&iattr->ia_atime);
+                       attrs->mask |= ORANGEFS_ATTR_SYS_ATIME_SET;
+               }
+       }
+       if (iattr->ia_valid & ATTR_MTIME) {
+               attrs->mask |= ORANGEFS_ATTR_SYS_MTIME;
+               if (iattr->ia_valid & ATTR_MTIME_SET) {
+                       attrs->mtime =
+                           orangefs_convert_time_field(&iattr->ia_mtime);
+                       attrs->mask |= ORANGEFS_ATTR_SYS_MTIME_SET;
+               }
+       }
+       if (iattr->ia_valid & ATTR_CTIME)
+               attrs->mask |= ORANGEFS_ATTR_SYS_CTIME;
+
+       /*
+        * ORANGEFS cannot set size with a setattr operation.  Probably not likely
+        * to be requested through the VFS, but just in case, don't worry about
+        * ATTR_SIZE
+        */
+
+       if (iattr->ia_valid & ATTR_MODE) {
+               tmp_mode = iattr->ia_mode;
+               if (tmp_mode & (S_ISVTX)) {
+                       if (is_root_handle(inode)) {
+                               /*
+                                * allow sticky bit to be set on root (since
+                                * it shows up that way by default anyhow),
+                                * but don't show it to the server
+                                */
+                               tmp_mode -= S_ISVTX;
+                       } else {
+                               gossip_debug(GOSSIP_UTILS_DEBUG,
+                                            "User attempted to set sticky bit on non-root directory; returning EINVAL.\n");
+                               return -EINVAL;
+                       }
+               }
+
+               if (tmp_mode & (S_ISUID)) {
+                       gossip_debug(GOSSIP_UTILS_DEBUG,
+                                    "Attempting to set setuid bit (not supported); returning EINVAL.\n");
+                       return -EINVAL;
+               }
+
+               attrs->perms = ORANGEFS_util_translate_mode(tmp_mode);
+               attrs->mask |= ORANGEFS_ATTR_SYS_PERM;
+       }
+
+       return 0;
+}
+
+static int compare_attributes_to_inode(struct inode *inode,
+                                      struct ORANGEFS_sys_attr_s *attrs,
+                                      char *symname,
+                                      int mask)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       loff_t inode_size, rounded_up_size;
+
+       /* Much of what happens below relies on the type being around. */
+       if (!(mask & ORANGEFS_ATTR_SYS_TYPE))
+               return 0;
+
+       if (attrs->objtype == ORANGEFS_TYPE_METAFILE &&
+           inode->i_flags != orangefs_inode_flags(attrs))
+               return 0;
+
+       /* Compare file size. */
+
+       switch (attrs->objtype) {
+       case ORANGEFS_TYPE_METAFILE:
+               if (mask & ORANGEFS_ATTR_SYS_SIZE) {
+                       inode_size = attrs->size;
+                       rounded_up_size = inode_size +
+                           (4096 - (inode_size % 4096));
+                       if (inode->i_bytes != inode_size ||
+                           inode->i_blocks != rounded_up_size/512)
+                               return 0;
+               }
+               break;
+       case ORANGEFS_TYPE_SYMLINK:
+               if (mask & ORANGEFS_ATTR_SYS_SIZE)
+                       if (symname && strlen(symname) != inode->i_size)
+                               return 0;
+               break;
+       default:
+               if (inode->i_size != PAGE_CACHE_SIZE &&
+                   inode_get_bytes(inode) != PAGE_CACHE_SIZE)
+                       return 0;
+       }
+
+       /* Compare general attributes. */
+
+       if (mask & ORANGEFS_ATTR_SYS_UID &&
+           !uid_eq(inode->i_uid, make_kuid(&init_user_ns, attrs->owner)))
+               return 0;
+       if (mask & ORANGEFS_ATTR_SYS_GID &&
+           !gid_eq(inode->i_gid, make_kgid(&init_user_ns, attrs->group)))
+               return 0;
+       if (mask & ORANGEFS_ATTR_SYS_ATIME &&
+           inode->i_atime.tv_sec != attrs->atime)
+               return 0;
+       if (mask & ORANGEFS_ATTR_SYS_MTIME &&
+           inode->i_atime.tv_sec != attrs->mtime)
+               return 0;
+       if (mask & ORANGEFS_ATTR_SYS_CTIME &&
+           inode->i_atime.tv_sec != attrs->ctime)
+               return 0;
+       if (inode->i_atime.tv_nsec != 0 ||
+           inode->i_mtime.tv_nsec != 0 ||
+           inode->i_ctime.tv_nsec != 0)
+               return 0;
+
+       if (mask & ORANGEFS_ATTR_SYS_PERM &&
+           (inode->i_mode & ~(S_ISVTX|S_IFREG|S_IFDIR|S_IFLNK)) !=
+           orangefs_inode_perms(attrs))
+               return 0;
+
+       if (is_root_handle(inode))
+               if (!(inode->i_mode & S_ISVTX))
+                       return 0;
+
+       /* Compare file type. */
+
+       switch (attrs->objtype) {
+       case ORANGEFS_TYPE_METAFILE:
+               if (!(inode->i_mode & S_IFREG))
+                       return 0;
+               break;
+       case ORANGEFS_TYPE_DIRECTORY:
+               if (!(inode->i_mode & S_IFDIR))
+                       return 0;
+               if (inode->i_nlink != 1)
+                       return 0;
+               break;
+       case ORANGEFS_TYPE_SYMLINK:
+               if (!(inode->i_mode & S_IFLNK))
+                       return 0;
+               if (orangefs_inode && symname &&
+                   mask & ORANGEFS_ATTR_SYS_LNK_TARGET)
+                       if (strcmp(orangefs_inode->link_target, symname))
+                               return 0;
+               break;
+       default:
+               gossip_err("orangefs: compare_attributes_to_inode: got invalid attribute type %x\n",
+                   attrs->objtype);
+
+       }
+
+       return 1;
+}
+
+/*
+ * Issues a orangefs getattr request and fills in the appropriate inode
+ * attributes if successful. When check is 0, returns 0 on success and -errno
+ * otherwise. When check is 1, returns 1 on success where the inode is valid
+ * and 0 on success where the inode is stale and -errno otherwise.
+ */
+int orangefs_inode_getattr(struct inode *inode, __u32 getattr_mask, int check)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       int ret = -EINVAL;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "%s: called on inode %pU\n",
+                    __func__,
+                    get_khandle_from_ino(inode));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR);
+       if (!new_op)
+               return -ENOMEM;
+       new_op->upcall.req.getattr.refn = orangefs_inode->refn;
+       new_op->upcall.req.getattr.mask = getattr_mask;
+
+       ret = service_operation(new_op, __func__,
+                               get_interruptible_flag(inode));
+       if (ret != 0)
+               goto out;
+
+       if (check) {
+               ret = compare_attributes_to_inode(inode,
+                   &new_op->downcall.resp.getattr.attributes,
+                   new_op->downcall.resp.getattr.link_target,
+                   getattr_mask);
+
+               if (new_op->downcall.resp.getattr.attributes.objtype ==
+                   ORANGEFS_TYPE_METAFILE) {
+                       if (orangefs_inode->blksize !=
+                           new_op->downcall.resp.getattr.attributes.blksize)
+                               ret = 0;
+               } else {
+                       if (orangefs_inode->blksize != 1 << inode->i_blkbits)
+                               ret = 0;
+               }
+       } else {
+               if (copy_attributes_to_inode(inode,
+                               &new_op->downcall.resp.getattr.attributes,
+                               new_op->downcall.resp.getattr.link_target)) {
+                       gossip_err("%s: failed to copy attributes\n", __func__);
+                       ret = -ENOENT;
+                       goto out;
+               }
+
+               /*
+                * Store blksize in orangefs specific part of inode structure;
+                * we are only going to use this to report to stat to make sure
+                * it doesn't perturb any inode related code paths.
+                */
+               if (new_op->downcall.resp.getattr.attributes.objtype ==
+                               ORANGEFS_TYPE_METAFILE) {
+                       orangefs_inode->blksize = new_op->downcall.resp.
+                           getattr.attributes.blksize;
+               } else {
+                       /*
+                        * mimic behavior of generic_fillattr() for other file
+                        * types.
+                        */
+                       orangefs_inode->blksize = (1 << inode->i_blkbits);
+
+               }
+       }
+
+out:
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "Getattr on handle %pU, "
+                    "fsid %d\n  (inode ct = %d) returned %d\n",
+                    &orangefs_inode->refn.khandle,
+                    orangefs_inode->refn.fs_id,
+                    (int)atomic_read(&inode->i_count),
+                    ret);
+
+       op_release(new_op);
+       return ret;
+}
+
+/*
+ * issues a orangefs setattr request to make sure the new attribute values
+ * take effect if successful.  returns 0 on success; -errno otherwise
+ */
+int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       int ret;
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_SETATTR);
+       if (!new_op)
+               return -ENOMEM;
+
+       new_op->upcall.req.setattr.refn = orangefs_inode->refn;
+       ret = copy_attributes_from_inode(inode,
+                      &new_op->upcall.req.setattr.attributes,
+                      iattr);
+       if (ret >= 0) {
+               ret = service_operation(new_op, __func__,
+                               get_interruptible_flag(inode));
+
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "orangefs_inode_setattr: returning %d\n",
+                            ret);
+       }
+
+       op_release(new_op);
+
+       /*
+        * successful setattr should clear the atime, mtime and
+        * ctime flags.
+        */
+       if (ret == 0) {
+               ClearAtimeFlag(orangefs_inode);
+               ClearMtimeFlag(orangefs_inode);
+               ClearCtimeFlag(orangefs_inode);
+               ClearModeFlag(orangefs_inode);
+       }
+
+       return ret;
+}
+
+int orangefs_flush_inode(struct inode *inode)
+{
+       /*
+        * If it is a dirty inode, this function gets called.
+        * Gather all the information that needs to be setattr'ed
+        * Right now, this will only be used for mode, atime, mtime
+        * and/or ctime.
+        */
+       struct iattr wbattr;
+       int ret;
+       int mtime_flag;
+       int ctime_flag;
+       int atime_flag;
+       int mode_flag;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+
+       memset(&wbattr, 0, sizeof(wbattr));
+
+       /*
+        * check inode flags up front, and clear them if they are set.  This
+        * will prevent multiple processes from all trying to flush the same
+        * inode if they call close() simultaneously
+        */
+       mtime_flag = MtimeFlag(orangefs_inode);
+       ClearMtimeFlag(orangefs_inode);
+       ctime_flag = CtimeFlag(orangefs_inode);
+       ClearCtimeFlag(orangefs_inode);
+       atime_flag = AtimeFlag(orangefs_inode);
+       ClearAtimeFlag(orangefs_inode);
+       mode_flag = ModeFlag(orangefs_inode);
+       ClearModeFlag(orangefs_inode);
+
+       /*  -- Lazy atime,mtime and ctime update --
+        * Note: all times are dictated by server in the new scheme
+        * and not by the clients
+        *
+        * Also mode updates are being handled now..
+        */
+
+       if (mtime_flag)
+               wbattr.ia_valid |= ATTR_MTIME;
+       if (ctime_flag)
+               wbattr.ia_valid |= ATTR_CTIME;
+       if (atime_flag)
+               wbattr.ia_valid |= ATTR_ATIME;
+
+       if (mode_flag) {
+               wbattr.ia_mode = inode->i_mode;
+               wbattr.ia_valid |= ATTR_MODE;
+       }
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "*********** orangefs_flush_inode: %pU "
+                    "(ia_valid %d)\n",
+                    get_khandle_from_ino(inode),
+                    wbattr.ia_valid);
+       if (wbattr.ia_valid == 0) {
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "orangefs_flush_inode skipping setattr()\n");
+               return 0;
+       }
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_flush_inode (%pU) writing mode %o\n",
+                    get_khandle_from_ino(inode),
+                    inode->i_mode);
+
+       ret = orangefs_inode_setattr(inode, &wbattr);
+
+       return ret;
+}
+
+int orangefs_unmount_sb(struct super_block *sb)
+{
+       int ret = -EINVAL;
+       struct orangefs_kernel_op_s *new_op = NULL;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_unmount_sb called on sb %p\n",
+                    sb);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FS_UMOUNT);
+       if (!new_op)
+               return -ENOMEM;
+       new_op->upcall.req.fs_umount.id = ORANGEFS_SB(sb)->id;
+       new_op->upcall.req.fs_umount.fs_id = ORANGEFS_SB(sb)->fs_id;
+       strncpy(new_op->upcall.req.fs_umount.orangefs_config_server,
+               ORANGEFS_SB(sb)->devname,
+               ORANGEFS_MAX_SERVER_ADDR_LEN);
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "Attempting ORANGEFS Unmount via host %s\n",
+                    new_op->upcall.req.fs_umount.orangefs_config_server);
+
+       ret = service_operation(new_op, "orangefs_fs_umount", 0);
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_unmount: got return value of %d\n", ret);
+       if (ret)
+               sb = ERR_PTR(ret);
+       else
+               ORANGEFS_SB(sb)->mount_pending = 1;
+
+       op_release(new_op);
+       return ret;
+}
+
+/*
+ * NOTE: on successful cancellation, be sure to return -EINTR, as
+ * that's the return value the caller expects
+ */
+int orangefs_cancel_op_in_progress(__u64 tag)
+{
+       int ret = -EINVAL;
+       struct orangefs_kernel_op_s *new_op = NULL;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_cancel_op_in_progress called on tag %llu\n",
+                    llu(tag));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_CANCEL);
+       if (!new_op)
+               return -ENOMEM;
+       new_op->upcall.req.cancel.op_tag = tag;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "Attempting ORANGEFS operation cancellation of tag %llu\n",
+                    llu(new_op->upcall.req.cancel.op_tag));
+
+       ret = service_operation(new_op, "orangefs_cancel", ORANGEFS_OP_CANCELLATION);
+
+       gossip_debug(GOSSIP_UTILS_DEBUG,
+                    "orangefs_cancel_op_in_progress: got return value of %d\n",
+                    ret);
+
+       op_release(new_op);
+       return ret;
+}
+
+void orangefs_make_bad_inode(struct inode *inode)
+{
+       if (is_root_handle(inode)) {
+               /*
+                * if this occurs, the pvfs2-client-core was killed but we
+                * can't afford to lose the inode operations and such
+                * associated with the root handle in any case.
+                */
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "*** NOT making bad root inode %pU\n",
+                            get_khandle_from_ino(inode));
+       } else {
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "*** making bad inode %pU\n",
+                            get_khandle_from_ino(inode));
+               make_bad_inode(inode);
+       }
+}
+
+/* Block all blockable signals... */
+void orangefs_block_signals(sigset_t *orig_sigset)
+{
+       sigset_t mask;
+
+       /*
+        * Initialize all entries in the signal set to the
+        * inverse of the given mask.
+        */
+       siginitsetinv(&mask, sigmask(SIGKILL));
+
+       /* Block 'em Danno... */
+       sigprocmask(SIG_BLOCK, &mask, orig_sigset);
+}
+
+/* set the signal mask to the given template... */
+void orangefs_set_signals(sigset_t *sigset)
+{
+       sigprocmask(SIG_SETMASK, sigset, NULL);
+}
+
+/*
+ * The following is a very dirty hack that is now a permanent part of the
+ * ORANGEFS protocol. See protocol.h for more error definitions.
+ */
+
+/* The order matches include/orangefs-types.h in the OrangeFS source. */
+static int PINT_errno_mapping[] = {
+       0, EPERM, ENOENT, EINTR, EIO, ENXIO, EBADF, EAGAIN, ENOMEM,
+       EFAULT, EBUSY, EEXIST, ENODEV, ENOTDIR, EISDIR, EINVAL, EMFILE,
+       EFBIG, ENOSPC, EROFS, EMLINK, EPIPE, EDEADLK, ENAMETOOLONG,
+       ENOLCK, ENOSYS, ENOTEMPTY, ELOOP, EWOULDBLOCK, ENOMSG, EUNATCH,
+       EBADR, EDEADLOCK, ENODATA, ETIME, ENONET, EREMOTE, ECOMM,
+       EPROTO, EBADMSG, EOVERFLOW, ERESTART, EMSGSIZE, EPROTOTYPE,
+       ENOPROTOOPT, EPROTONOSUPPORT, EOPNOTSUPP, EADDRINUSE,
+       EADDRNOTAVAIL, ENETDOWN, ENETUNREACH, ENETRESET, ENOBUFS,
+       ETIMEDOUT, ECONNREFUSED, EHOSTDOWN, EHOSTUNREACH, EALREADY,
+       EACCES, ECONNRESET, ERANGE
+};
+
+int orangefs_normalize_to_errno(__s32 error_code)
+{
+       __u32 i;
+
+       /* Success */
+       if (error_code == 0) {
+               return 0;
+       /*
+        * This shouldn't ever happen. If it does it should be fixed on the
+        * server.
+        */
+       } else if (error_code > 0) {
+               gossip_err("orangefs: error status receieved.\n");
+               gossip_err("orangefs: assuming error code is inverted.\n");
+               error_code = -error_code;
+       }
+
+       /*
+        * XXX: This is very bad since error codes from ORANGEFS may not be
+        * suitable for return into userspace.
+        */
+
+       /*
+        * Convert ORANGEFS error values into errno values suitable for return
+        * from the kernel.
+        */
+       if ((-error_code) & ORANGEFS_NON_ERRNO_ERROR_BIT) {
+               if (((-error_code) &
+                   (ORANGEFS_ERROR_NUMBER_BITS|ORANGEFS_NON_ERRNO_ERROR_BIT|
+                   ORANGEFS_ERROR_BIT)) == ORANGEFS_ECANCEL) {
+                       /*
+                        * cancellation error codes generally correspond to
+                        * a timeout from the client's perspective
+                        */
+                       error_code = -ETIMEDOUT;
+               } else {
+                       /* assume a default error code */
+                       gossip_err("orangefs: warning: got error code without errno equivalent: %d.\n", error_code);
+                       error_code = -EINVAL;
+               }
+
+       /* Convert ORANGEFS encoded errno values into regular errno values. */
+       } else if ((-error_code) & ORANGEFS_ERROR_BIT) {
+               i = (-error_code) & ~(ORANGEFS_ERROR_BIT|ORANGEFS_ERROR_CLASS_BITS);
+               if (i < sizeof(PINT_errno_mapping)/sizeof(*PINT_errno_mapping))
+                       error_code = -PINT_errno_mapping[i];
+               else
+                       error_code = -EINVAL;
+
+       /*
+        * Only ORANGEFS protocol error codes should ever come here. Otherwise
+        * there is a bug somewhere.
+        */
+       } else {
+               gossip_err("orangefs: orangefs_normalize_to_errno: got error code which is not from ORANGEFS.\n");
+       }
+       return error_code;
+}
+
+#define NUM_MODES 11
+__s32 ORANGEFS_util_translate_mode(int mode)
+{
+       int ret = 0;
+       int i = 0;
+       static int modes[NUM_MODES] = {
+               S_IXOTH, S_IWOTH, S_IROTH,
+               S_IXGRP, S_IWGRP, S_IRGRP,
+               S_IXUSR, S_IWUSR, S_IRUSR,
+               S_ISGID, S_ISUID
+       };
+       static int orangefs_modes[NUM_MODES] = {
+               ORANGEFS_O_EXECUTE, ORANGEFS_O_WRITE, ORANGEFS_O_READ,
+               ORANGEFS_G_EXECUTE, ORANGEFS_G_WRITE, ORANGEFS_G_READ,
+               ORANGEFS_U_EXECUTE, ORANGEFS_U_WRITE, ORANGEFS_U_READ,
+               ORANGEFS_G_SGID, ORANGEFS_U_SUID
+       };
+
+       for (i = 0; i < NUM_MODES; i++)
+               if (mode & modes[i])
+                       ret |= orangefs_modes[i];
+
+       return ret;
+}
+#undef NUM_MODES
+
+/*
+ * After obtaining a string representation of the client's debug
+ * keywords and their associated masks, this function is called to build an
+ * array of these values.
+ */
+int orangefs_prepare_cdm_array(char *debug_array_string)
+{
+       int i;
+       int rc = -EINVAL;
+       char *cds_head = NULL;
+       char *cds_delimiter = NULL;
+       int keyword_len = 0;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+       /*
+        * figure out how many elements the cdm_array needs.
+        */
+       for (i = 0; i < strlen(debug_array_string); i++)
+               if (debug_array_string[i] == '\n')
+                       cdm_element_count++;
+
+       if (!cdm_element_count) {
+               pr_info("No elements in client debug array string!\n");
+               goto out;
+       }
+
+       cdm_array =
+               kzalloc(cdm_element_count * sizeof(struct client_debug_mask),
+                       GFP_KERNEL);
+       if (!cdm_array) {
+               pr_info("malloc failed for cdm_array!\n");
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       cds_head = debug_array_string;
+
+       for (i = 0; i < cdm_element_count; i++) {
+               cds_delimiter = strchr(cds_head, '\n');
+               *cds_delimiter = '\0';
+
+               keyword_len = strcspn(cds_head, " ");
+
+               cdm_array[i].keyword = kzalloc(keyword_len + 1, GFP_KERNEL);
+               if (!cdm_array[i].keyword) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+
+               sscanf(cds_head,
+                      "%s %llx %llx",
+                      cdm_array[i].keyword,
+                      (unsigned long long *)&(cdm_array[i].mask1),
+                      (unsigned long long *)&(cdm_array[i].mask2));
+
+               if (!strcmp(cdm_array[i].keyword, ORANGEFS_VERBOSE))
+                       client_verbose_index = i;
+
+               if (!strcmp(cdm_array[i].keyword, ORANGEFS_ALL))
+                       client_all_index = i;
+
+               cds_head = cds_delimiter + 1;
+       }
+
+       rc = cdm_element_count;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: rc:%d:\n", __func__, rc);
+
+out:
+
+       return rc;
+
+}
+
+/*
+ * /sys/kernel/debug/orangefs/debug-help can be catted to
+ * see all the available kernel and client debug keywords.
+ *
+ * When the kernel boots, we have no idea what keywords the
+ * client supports, nor their associated masks.
+ *
+ * We pass through this function once at boot and stamp a
+ * boilerplate "we don't know" message for the client in the
+ * debug-help file. We pass through here again when the client
+ * starts and then we can fill out the debug-help file fully.
+ *
+ * The client might be restarted any number of times between
+ * reboots, we only build the debug-help file the first time.
+ */
+int orangefs_prepare_debugfs_help_string(int at_boot)
+{
+       int rc = -EINVAL;
+       int i;
+       int byte_count = 0;
+       char *client_title = "Client Debug Keywords:\n";
+       char *kernel_title = "Kernel Debug Keywords:\n";
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+       if (at_boot) {
+               byte_count += strlen(HELP_STRING_UNINITIALIZED);
+               client_title = HELP_STRING_UNINITIALIZED;
+       } else {
+               /*
+                * fill the client keyword/mask array and remember
+                * how many elements there were.
+                */
+               cdm_element_count =
+                       orangefs_prepare_cdm_array(client_debug_array_string);
+               if (cdm_element_count <= 0)
+                       goto out;
+
+               /* Count the bytes destined for debug_help_string. */
+               byte_count += strlen(client_title);
+
+               for (i = 0; i < cdm_element_count; i++) {
+                       byte_count += strlen(cdm_array[i].keyword + 2);
+                       if (byte_count >= DEBUG_HELP_STRING_SIZE) {
+                               pr_info("%s: overflow 1!\n", __func__);
+                               goto out;
+                       }
+               }
+
+               gossip_debug(GOSSIP_UTILS_DEBUG,
+                            "%s: cdm_element_count:%d:\n",
+                            __func__,
+                            cdm_element_count);
+       }
+
+       byte_count += strlen(kernel_title);
+       for (i = 0; i < num_kmod_keyword_mask_map; i++) {
+               byte_count +=
+                       strlen(s_kmod_keyword_mask_map[i].keyword + 2);
+               if (byte_count >= DEBUG_HELP_STRING_SIZE) {
+                       pr_info("%s: overflow 2!\n", __func__);
+                       goto out;
+               }
+       }
+
+       /* build debug_help_string. */
+       debug_help_string = kzalloc(DEBUG_HELP_STRING_SIZE, GFP_KERNEL);
+       if (!debug_help_string) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       strcat(debug_help_string, client_title);
+
+       if (!at_boot) {
+               for (i = 0; i < cdm_element_count; i++) {
+                       strcat(debug_help_string, "\t");
+                       strcat(debug_help_string, cdm_array[i].keyword);
+                       strcat(debug_help_string, "\n");
+               }
+       }
+
+       strcat(debug_help_string, "\n");
+       strcat(debug_help_string, kernel_title);
+
+       for (i = 0; i < num_kmod_keyword_mask_map; i++) {
+               strcat(debug_help_string, "\t");
+               strcat(debug_help_string, s_kmod_keyword_mask_map[i].keyword);
+               strcat(debug_help_string, "\n");
+       }
+
+       rc = 0;
+
+out:
+
+       return rc;
+
+}
+
+/*
+ * kernel = type 0
+ * client = type 1
+ */
+void debug_mask_to_string(void *mask, int type)
+{
+       int i;
+       int len = 0;
+       char *debug_string;
+       int element_count = 0;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+       if (type) {
+               debug_string = client_debug_string;
+               element_count = cdm_element_count;
+       } else {
+               debug_string = kernel_debug_string;
+               element_count = num_kmod_keyword_mask_map;
+       }
+
+       memset(debug_string, 0, ORANGEFS_MAX_DEBUG_STRING_LEN);
+
+       /*
+        * Some keywords, like "all" or "verbose", are amalgams of
+        * numerous other keywords. Make a special check for those
+        * before grinding through the whole mask only to find out
+        * later...
+        */
+       if (check_amalgam_keyword(mask, type))
+               goto out;
+
+       /* Build the debug string. */
+       for (i = 0; i < element_count; i++)
+               if (type)
+                       do_c_string(mask, i);
+               else
+                       do_k_string(mask, i);
+
+       len = strlen(debug_string);
+
+       if ((len) && (type))
+               client_debug_string[len - 1] = '\0';
+       else if (len)
+               kernel_debug_string[len - 1] = '\0';
+       else if (type)
+               strcpy(client_debug_string, "none");
+       else
+               strcpy(kernel_debug_string, "none");
+
+out:
+gossip_debug(GOSSIP_UTILS_DEBUG, "%s: string:%s:\n", __func__, debug_string);
+
+       return;
+
+}
+
+void do_k_string(void *k_mask, int index)
+{
+       __u64 *mask = (__u64 *) k_mask;
+
+       if (keyword_is_amalgam((char *) s_kmod_keyword_mask_map[index].keyword))
+               goto out;
+
+       if (*mask & s_kmod_keyword_mask_map[index].mask_val) {
+               if ((strlen(kernel_debug_string) +
+                    strlen(s_kmod_keyword_mask_map[index].keyword))
+                       < ORANGEFS_MAX_DEBUG_STRING_LEN - 1) {
+                               strcat(kernel_debug_string,
+                                      s_kmod_keyword_mask_map[index].keyword);
+                               strcat(kernel_debug_string, ",");
+                       } else {
+                               gossip_err("%s: overflow!\n", __func__);
+                               strcpy(kernel_debug_string, ORANGEFS_ALL);
+                               goto out;
+                       }
+       }
+
+out:
+
+       return;
+}
+
+void do_c_string(void *c_mask, int index)
+{
+       struct client_debug_mask *mask = (struct client_debug_mask *) c_mask;
+
+       if (keyword_is_amalgam(cdm_array[index].keyword))
+               goto out;
+
+       if ((mask->mask1 & cdm_array[index].mask1) ||
+           (mask->mask2 & cdm_array[index].mask2)) {
+               if ((strlen(client_debug_string) +
+                    strlen(cdm_array[index].keyword) + 1)
+                       < ORANGEFS_MAX_DEBUG_STRING_LEN - 2) {
+                               strcat(client_debug_string,
+                                      cdm_array[index].keyword);
+                               strcat(client_debug_string, ",");
+                       } else {
+                               gossip_err("%s: overflow!\n", __func__);
+                               strcpy(client_debug_string, ORANGEFS_ALL);
+                               goto out;
+                       }
+       }
+out:
+       return;
+}
+
+int keyword_is_amalgam(char *keyword)
+{
+       int rc = 0;
+
+       if ((!strcmp(keyword, ORANGEFS_ALL)) || (!strcmp(keyword, ORANGEFS_VERBOSE)))
+               rc = 1;
+
+       return rc;
+}
+
+/*
+ * kernel = type 0
+ * client = type 1
+ *
+ * return 1 if we found an amalgam.
+ */
+int check_amalgam_keyword(void *mask, int type)
+{
+       __u64 *k_mask;
+       struct client_debug_mask *c_mask;
+       int k_all_index = num_kmod_keyword_mask_map - 1;
+       int rc = 0;
+
+       if (type) {
+               c_mask = (struct client_debug_mask *) mask;
+
+               if ((c_mask->mask1 == cdm_array[client_all_index].mask1) &&
+                   (c_mask->mask2 == cdm_array[client_all_index].mask2)) {
+                       strcpy(client_debug_string, ORANGEFS_ALL);
+                       rc = 1;
+                       goto out;
+               }
+
+               if ((c_mask->mask1 == cdm_array[client_verbose_index].mask1) &&
+                   (c_mask->mask2 == cdm_array[client_verbose_index].mask2)) {
+                       strcpy(client_debug_string, ORANGEFS_VERBOSE);
+                       rc = 1;
+                       goto out;
+               }
+
+       } else {
+               k_mask = (__u64 *) mask;
+
+               if (*k_mask >= s_kmod_keyword_mask_map[k_all_index].mask_val) {
+                       strcpy(kernel_debug_string, ORANGEFS_ALL);
+                       rc = 1;
+                       goto out;
+               }
+       }
+
+out:
+
+       return rc;
+}
+
+/*
+ * kernel = type 0
+ * client = type 1
+ */
+void debug_string_to_mask(char *debug_string, void *mask, int type)
+{
+       char *unchecked_keyword;
+       int i;
+       char *strsep_fodder = kstrdup(debug_string, GFP_KERNEL);
+       char *original_pointer;
+       int element_count = 0;
+       struct client_debug_mask *c_mask;
+       __u64 *k_mask;
+
+       gossip_debug(GOSSIP_UTILS_DEBUG, "%s: start\n", __func__);
+
+       if (type) {
+               c_mask = (struct client_debug_mask *)mask;
+               element_count = cdm_element_count;
+       } else {
+               k_mask = (__u64 *)mask;
+               *k_mask = 0;
+               element_count = num_kmod_keyword_mask_map;
+       }
+
+       original_pointer = strsep_fodder;
+       while ((unchecked_keyword = strsep(&strsep_fodder, ",")))
+               if (strlen(unchecked_keyword)) {
+                       for (i = 0; i < element_count; i++)
+                               if (type)
+                                       do_c_mask(i,
+                                                 unchecked_keyword,
+                                                 &c_mask);
+                               else
+                                       do_k_mask(i,
+                                                 unchecked_keyword,
+                                                 &k_mask);
+               }
+
+       kfree(original_pointer);
+}
+
+void do_c_mask(int i,
+              char *unchecked_keyword,
+              struct client_debug_mask **sane_mask)
+{
+
+       if (!strcmp(cdm_array[i].keyword, unchecked_keyword)) {
+               (**sane_mask).mask1 = (**sane_mask).mask1 | cdm_array[i].mask1;
+               (**sane_mask).mask2 = (**sane_mask).mask2 | cdm_array[i].mask2;
+       }
+}
+
+void do_k_mask(int i, char *unchecked_keyword, __u64 **sane_mask)
+{
+
+       if (!strcmp(s_kmod_keyword_mask_map[i].keyword, unchecked_keyword))
+               **sane_mask = (**sane_mask) |
+                               s_kmod_keyword_mask_map[i].mask_val;
+}
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
new file mode 100644 (file)
index 0000000..6ac0c60
--- /dev/null
@@ -0,0 +1,453 @@
+#include <linux/types.h>
+#include <linux/spinlock_types.h>
+#include <linux/slab.h>
+#include <linux/ioctl.h>
+
+extern struct client_debug_mask *cdm_array;
+extern char *debug_help_string;
+extern int help_string_initialized;
+extern struct dentry *debug_dir;
+extern struct dentry *help_file_dentry;
+extern struct dentry *client_debug_dentry;
+extern const struct file_operations debug_help_fops;
+extern int client_all_index;
+extern int client_verbose_index;
+extern int cdm_element_count;
+#define DEBUG_HELP_STRING_SIZE 4096
+#define HELP_STRING_UNINITIALIZED \
+       "Client Debug Keywords are unknown until the first time\n" \
+       "the client is started after boot.\n"
+#define ORANGEFS_KMOD_DEBUG_HELP_FILE "debug-help"
+#define ORANGEFS_KMOD_DEBUG_FILE "kernel-debug"
+#define ORANGEFS_CLIENT_DEBUG_FILE "client-debug"
+#define ORANGEFS_VERBOSE "verbose"
+#define ORANGEFS_ALL "all"
+
+/* pvfs2-config.h ***********************************************************/
+#define ORANGEFS_VERSION_MAJOR 2
+#define ORANGEFS_VERSION_MINOR 9
+#define ORANGEFS_VERSION_SUB 0
+
+/* khandle stuff  ***********************************************************/
+
+/*
+ * The 2.9 core will put 64 bit handles in here like this:
+ *    1234 0000 0000 5678
+ * The 3.0 and beyond cores will put 128 bit handles in here like this:
+ *    1234 5678 90AB CDEF
+ * The kernel module will always use the first four bytes and
+ * the last four bytes as an inum.
+ */
+struct orangefs_khandle {
+       unsigned char u[16];
+}  __aligned(8);
+
+/*
+ * kernel version of an object ref.
+ */
+struct orangefs_object_kref {
+       struct orangefs_khandle khandle;
+       __s32 fs_id;
+       __s32 __pad1;
+};
+
+/*
+ * compare 2 khandles assumes little endian thus from large address to
+ * small address
+ */
+static inline int ORANGEFS_khandle_cmp(const struct orangefs_khandle *kh1,
+                                  const struct orangefs_khandle *kh2)
+{
+       int i;
+
+       for (i = 15; i >= 0; i--) {
+               if (kh1->u[i] > kh2->u[i])
+                       return 1;
+               if (kh1->u[i] < kh2->u[i])
+                       return -1;
+       }
+
+       return 0;
+}
+
+static inline void ORANGEFS_khandle_to(const struct orangefs_khandle *kh,
+                                  void *p, int size)
+{
+
+       memset(p, 0, size);
+       memcpy(p, kh->u, 16);
+
+}
+
+static inline void ORANGEFS_khandle_from(struct orangefs_khandle *kh,
+                                    void *p, int size)
+{
+       memset(kh, 0, 16);
+       memcpy(kh->u, p, 16);
+
+}
+
+/* pvfs2-types.h ************************************************************/
+typedef __u32 ORANGEFS_uid;
+typedef __u32 ORANGEFS_gid;
+typedef __s32 ORANGEFS_fs_id;
+typedef __u32 ORANGEFS_permissions;
+typedef __u64 ORANGEFS_time;
+typedef __s64 ORANGEFS_size;
+typedef __u64 ORANGEFS_flags;
+typedef __u64 ORANGEFS_ds_position;
+typedef __s32 ORANGEFS_error;
+typedef __s64 ORANGEFS_offset;
+
+#define ORANGEFS_SUPER_MAGIC 0x20030528
+
+/*
+ * ORANGEFS error codes are a signed 32-bit integer. Error codes are negative, but
+ * the sign is stripped before decoding.
+ */
+
+/* Bit 31 is not used since it is the sign. */
+
+/*
+ * Bit 30 specifies that this is a ORANGEFS error. A ORANGEFS error is either an
+ * encoded errno value or a ORANGEFS protocol error.
+ */
+#define ORANGEFS_ERROR_BIT (1 << 30)
+
+/*
+ * Bit 29 specifies that this is a ORANGEFS protocol error and not an encoded
+ * errno value.
+ */
+#define ORANGEFS_NON_ERRNO_ERROR_BIT (1 << 29)
+
+/*
+ * Bits 9, 8, and 7 specify the error class, which encodes the section of
+ * server code the error originated in for logging purposes. It is not used
+ * in the kernel except to be masked out.
+ */
+#define ORANGEFS_ERROR_CLASS_BITS 0x380
+
+/* Bits 6 - 0 are reserved for the actual error code. */
+#define ORANGEFS_ERROR_NUMBER_BITS 0x7f
+
+/* Encoded errno values decoded by PINT_errno_mapping in orangefs-utils.c. */
+
+/* Our own ORANGEFS protocol error codes. */
+#define ORANGEFS_ECANCEL    (1|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_EDEVINIT   (2|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_EDETAIL    (3|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_EHOSTNTFD  (4|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_EADDRNTFD  (5|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_ENORECVR   (6|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_ETRYAGAIN  (7|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_ENOTPVFS   (8|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+#define ORANGEFS_ESECURITY  (9|ORANGEFS_NON_ERRNO_ERROR_BIT|ORANGEFS_ERROR_BIT)
+
+/* permission bits */
+#define ORANGEFS_O_EXECUTE (1 << 0)
+#define ORANGEFS_O_WRITE   (1 << 1)
+#define ORANGEFS_O_READ    (1 << 2)
+#define ORANGEFS_G_EXECUTE (1 << 3)
+#define ORANGEFS_G_WRITE   (1 << 4)
+#define ORANGEFS_G_READ    (1 << 5)
+#define ORANGEFS_U_EXECUTE (1 << 6)
+#define ORANGEFS_U_WRITE   (1 << 7)
+#define ORANGEFS_U_READ    (1 << 8)
+/* no ORANGEFS_U_VTX (sticky bit) */
+#define ORANGEFS_G_SGID    (1 << 10)
+#define ORANGEFS_U_SUID    (1 << 11)
+
+/* definition taken from stdint.h */
+#define INT32_MAX (2147483647)
+#define ORANGEFS_ITERATE_START    (INT32_MAX - 1)
+#define ORANGEFS_ITERATE_END      (INT32_MAX - 2)
+#define ORANGEFS_ITERATE_NEXT     (INT32_MAX - 3)
+#define ORANGEFS_READDIR_START ORANGEFS_ITERATE_START
+#define ORANGEFS_READDIR_END   ORANGEFS_ITERATE_END
+#define ORANGEFS_IMMUTABLE_FL FS_IMMUTABLE_FL
+#define ORANGEFS_APPEND_FL    FS_APPEND_FL
+#define ORANGEFS_NOATIME_FL   FS_NOATIME_FL
+#define ORANGEFS_MIRROR_FL    0x01000000ULL
+#define ORANGEFS_O_EXECUTE (1 << 0)
+#define ORANGEFS_FS_ID_NULL       ((__s32)0)
+#define ORANGEFS_ATTR_SYS_UID                   (1 << 0)
+#define ORANGEFS_ATTR_SYS_GID                   (1 << 1)
+#define ORANGEFS_ATTR_SYS_PERM                  (1 << 2)
+#define ORANGEFS_ATTR_SYS_ATIME                 (1 << 3)
+#define ORANGEFS_ATTR_SYS_CTIME                 (1 << 4)
+#define ORANGEFS_ATTR_SYS_MTIME                 (1 << 5)
+#define ORANGEFS_ATTR_SYS_TYPE                  (1 << 6)
+#define ORANGEFS_ATTR_SYS_ATIME_SET             (1 << 7)
+#define ORANGEFS_ATTR_SYS_MTIME_SET             (1 << 8)
+#define ORANGEFS_ATTR_SYS_SIZE                  (1 << 20)
+#define ORANGEFS_ATTR_SYS_LNK_TARGET            (1 << 24)
+#define ORANGEFS_ATTR_SYS_DFILE_COUNT           (1 << 25)
+#define ORANGEFS_ATTR_SYS_DIRENT_COUNT          (1 << 26)
+#define ORANGEFS_ATTR_SYS_BLKSIZE               (1 << 28)
+#define ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT   (1 << 29)
+#define ORANGEFS_ATTR_SYS_COMMON_ALL   \
+       (ORANGEFS_ATTR_SYS_UID  |       \
+        ORANGEFS_ATTR_SYS_GID  |       \
+        ORANGEFS_ATTR_SYS_PERM |       \
+        ORANGEFS_ATTR_SYS_ATIME        |       \
+        ORANGEFS_ATTR_SYS_CTIME        |       \
+        ORANGEFS_ATTR_SYS_MTIME        |       \
+        ORANGEFS_ATTR_SYS_TYPE)
+
+#define ORANGEFS_ATTR_SYS_ALL_SETABLE          \
+(ORANGEFS_ATTR_SYS_COMMON_ALL-ORANGEFS_ATTR_SYS_TYPE)
+
+#define ORANGEFS_ATTR_SYS_ALL_NOHINT                   \
+       (ORANGEFS_ATTR_SYS_COMMON_ALL           |       \
+        ORANGEFS_ATTR_SYS_SIZE                 |       \
+        ORANGEFS_ATTR_SYS_LNK_TARGET           |       \
+        ORANGEFS_ATTR_SYS_DFILE_COUNT          |       \
+        ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT  |       \
+        ORANGEFS_ATTR_SYS_DIRENT_COUNT         |       \
+        ORANGEFS_ATTR_SYS_BLKSIZE)
+
+#define ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE            \
+       (ORANGEFS_ATTR_SYS_COMMON_ALL           |       \
+        ORANGEFS_ATTR_SYS_LNK_TARGET           |       \
+        ORANGEFS_ATTR_SYS_DFILE_COUNT          |       \
+        ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT  |       \
+        ORANGEFS_ATTR_SYS_DIRENT_COUNT         |       \
+        ORANGEFS_ATTR_SYS_BLKSIZE)
+
+#define ORANGEFS_XATTR_REPLACE 0x2
+#define ORANGEFS_XATTR_CREATE  0x1
+#define ORANGEFS_MAX_SERVER_ADDR_LEN 256
+#define ORANGEFS_NAME_MAX                256
+/*
+ * max extended attribute name len as imposed by the VFS and exploited for the
+ * upcall request types.
+ * NOTE: Please retain them as multiples of 8 even if you wish to change them
+ * This is *NECESSARY* for supporting 32 bit user-space binaries on a 64-bit
+ * kernel. Due to implementation within DBPF, this really needs to be
+ * ORANGEFS_NAME_MAX, which it was the same value as, but no reason to let it
+ * break if that changes in the future.
+ */
+#define ORANGEFS_MAX_XATTR_NAMELEN   ORANGEFS_NAME_MAX /* Not the same as
+                                                * XATTR_NAME_MAX defined
+                                                * by <linux/xattr.h>
+                                                */
+#define ORANGEFS_MAX_XATTR_VALUELEN  8192      /* Not the same as XATTR_SIZE_MAX
+                                        * defined by <linux/xattr.h>
+                                        */
+#define ORANGEFS_MAX_XATTR_LISTLEN   16        /* Not the same as XATTR_LIST_MAX
+                                        * defined by <linux/xattr.h>
+                                        */
+/*
+ * ORANGEFS I/O operation types, used in both system and server interfaces.
+ */
+enum ORANGEFS_io_type {
+       ORANGEFS_IO_READ = 1,
+       ORANGEFS_IO_WRITE = 2
+};
+
+/*
+ * If this enum is modified the server parameters related to the precreate pool
+ * batch and low threshold sizes may need to be modified  to reflect this
+ * change.
+ */
+enum orangefs_ds_type {
+       ORANGEFS_TYPE_NONE = 0,
+       ORANGEFS_TYPE_METAFILE = (1 << 0),
+       ORANGEFS_TYPE_DATAFILE = (1 << 1),
+       ORANGEFS_TYPE_DIRECTORY = (1 << 2),
+       ORANGEFS_TYPE_SYMLINK = (1 << 3),
+       ORANGEFS_TYPE_DIRDATA = (1 << 4),
+       ORANGEFS_TYPE_INTERNAL = (1 << 5)       /* for the server's private use */
+};
+
+/*
+ * ORANGEFS_certificate simply stores a buffer with the buffer size.
+ * The buffer can be converted to an OpenSSL X509 struct for use.
+ */
+struct ORANGEFS_certificate {
+       __u32 buf_size;
+       unsigned char *buf;
+};
+
+/*
+ * A credential identifies a user and is signed by the client/user
+ * private key.
+ */
+struct ORANGEFS_credential {
+       __u32 userid;   /* user id */
+       __u32 num_groups;       /* length of group_array */
+       __u32 *group_array;     /* groups for which the user is a member */
+       char *issuer;           /* alias of the issuing server */
+       __u64 timeout;  /* seconds after epoch to time out */
+       __u32 sig_size; /* length of the signature in bytes */
+       unsigned char *signature;       /* digital signature */
+       struct ORANGEFS_certificate certificate;        /* user certificate buffer */
+};
+#define extra_size_ORANGEFS_credential (ORANGEFS_REQ_LIMIT_GROUPS      *       \
+                                   sizeof(__u32)               +       \
+                                   ORANGEFS_REQ_LIMIT_ISSUER   +       \
+                                   ORANGEFS_REQ_LIMIT_SIGNATURE        +       \
+                                   extra_size_ORANGEFS_certificate)
+
+/* This structure is used by the VFS-client interaction alone */
+struct ORANGEFS_keyval_pair {
+       char key[ORANGEFS_MAX_XATTR_NAMELEN];
+       __s32 key_sz;   /* __s32 for portable, fixed-size structures */
+       __s32 val_sz;
+       char val[ORANGEFS_MAX_XATTR_VALUELEN];
+};
+
+/* pvfs2-sysint.h ***********************************************************/
+/* Describes attributes for a file, directory, or symlink. */
+struct ORANGEFS_sys_attr_s {
+       __u32 owner;
+       __u32 group;
+       __u32 perms;
+       __u64 atime;
+       __u64 mtime;
+       __u64 ctime;
+       __s64 size;
+
+       /* NOTE: caller must free if valid */
+       char *link_target;
+
+       /* Changed to __s32 so that size of structure does not change */
+       __s32 dfile_count;
+
+       /* Changed to __s32 so that size of structure does not change */
+       __s32 distr_dir_servers_initial;
+
+       /* Changed to __s32 so that size of structure does not change */
+       __s32 distr_dir_servers_max;
+
+       /* Changed to __s32 so that size of structure does not change */
+       __s32 distr_dir_split_size;
+
+       __u32 mirror_copies_count;
+
+       /* NOTE: caller must free if valid */
+       char *dist_name;
+
+       /* NOTE: caller must free if valid */
+       char *dist_params;
+
+       __s64 dirent_count;
+       enum orangefs_ds_type objtype;
+       __u64 flags;
+       __u32 mask;
+       __s64 blksize;
+};
+
+#define ORANGEFS_LOOKUP_LINK_NO_FOLLOW 0
+#define ORANGEFS_LOOKUP_LINK_FOLLOW    1
+
+/* pint-dev.h ***************************************************************/
+
+/* parameter structure used in ORANGEFS_DEV_DEBUG ioctl command */
+struct dev_mask_info_s {
+       enum {
+               KERNEL_MASK,
+               CLIENT_MASK,
+       } mask_type;
+       __u64 mask_value;
+};
+
+struct dev_mask2_info_s {
+       __u64 mask1_value;
+       __u64 mask2_value;
+};
+
+/* pvfs2-util.h *************************************************************/
+__s32 ORANGEFS_util_translate_mode(int mode);
+
+/* pvfs2-debug.h ************************************************************/
+#include "orangefs-debug.h"
+
+/* pvfs2-internal.h *********************************************************/
+#define llu(x) (unsigned long long)(x)
+#define lld(x) (long long)(x)
+
+/* pint-dev-shared.h ********************************************************/
+#define ORANGEFS_DEV_MAGIC 'k'
+
+#define ORANGEFS_READDIR_DEFAULT_DESC_COUNT  5
+
+#define DEV_GET_MAGIC           0x1
+#define DEV_GET_MAX_UPSIZE      0x2
+#define DEV_GET_MAX_DOWNSIZE    0x3
+#define DEV_MAP                 0x4
+#define DEV_REMOUNT_ALL         0x5
+#define DEV_DEBUG               0x6
+#define DEV_UPSTREAM            0x7
+#define DEV_CLIENT_MASK         0x8
+#define DEV_CLIENT_STRING       0x9
+#define DEV_MAX_NR              0xa
+
+/* supported ioctls, codes are with respect to user-space */
+enum {
+       ORANGEFS_DEV_GET_MAGIC = _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAGIC, __s32),
+       ORANGEFS_DEV_GET_MAX_UPSIZE =
+           _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAX_UPSIZE, __s32),
+       ORANGEFS_DEV_GET_MAX_DOWNSIZE =
+           _IOW(ORANGEFS_DEV_MAGIC, DEV_GET_MAX_DOWNSIZE, __s32),
+       ORANGEFS_DEV_MAP = _IO(ORANGEFS_DEV_MAGIC, DEV_MAP),
+       ORANGEFS_DEV_REMOUNT_ALL = _IO(ORANGEFS_DEV_MAGIC, DEV_REMOUNT_ALL),
+       ORANGEFS_DEV_DEBUG = _IOR(ORANGEFS_DEV_MAGIC, DEV_DEBUG, __s32),
+       ORANGEFS_DEV_UPSTREAM = _IOW(ORANGEFS_DEV_MAGIC, DEV_UPSTREAM, int),
+       ORANGEFS_DEV_CLIENT_MASK = _IOW(ORANGEFS_DEV_MAGIC,
+                                   DEV_CLIENT_MASK,
+                                   struct dev_mask2_info_s),
+       ORANGEFS_DEV_CLIENT_STRING = _IOW(ORANGEFS_DEV_MAGIC,
+                                     DEV_CLIENT_STRING,
+                                     char *),
+       ORANGEFS_DEV_MAXNR = DEV_MAX_NR,
+};
+
+/*
+ * version number for use in communicating between kernel space and user
+ * space. Zero signifies the upstream version of the kernel module.
+ */
+#define ORANGEFS_KERNEL_PROTO_VERSION 0
+#define ORANGEFS_MINIMUM_USERSPACE_VERSION 20904
+
+/*
+ * describes memory regions to map in the ORANGEFS_DEV_MAP ioctl.
+ * NOTE: See devorangefs-req.c for 32 bit compat structure.
+ * Since this structure has a variable-sized layout that is different
+ * on 32 and 64 bit platforms, we need to normalize to a 64 bit layout
+ * on such systems before servicing ioctl calls from user-space binaries
+ * that may be 32 bit!
+ */
+struct ORANGEFS_dev_map_desc {
+       void *ptr;
+       __s32 total_size;
+       __s32 size;
+       __s32 count;
+};
+
+/* gossip.h *****************************************************************/
+
+#ifdef GOSSIP_DISABLE_DEBUG
+#define gossip_debug(mask, format, f...) do {} while (0)
+#else
+extern __u64 gossip_debug_mask;
+extern struct client_debug_mask client_debug_mask;
+
+/* try to avoid function call overhead by checking masks in macro */
+#define gossip_debug(mask, format, f...)                       \
+do {                                                           \
+       if (gossip_debug_mask & mask)                           \
+               printk(format, ##f);                            \
+} while (0)
+#endif /* GOSSIP_DISABLE_DEBUG */
+
+/* do file and line number printouts w/ the GNU preprocessor */
+#define gossip_ldebug(mask, format, f...)                              \
+               gossip_debug(mask, "%s: " format, __func__, ##f)
+
+#define gossip_err printk
+#define gossip_lerr(format, f...)                                      \
+               gossip_err("%s line %d: " format,                       \
+                          __FILE__,                                    \
+                          __LINE__,                                    \
+                          ##f)
diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
new file mode 100644 (file)
index 0000000..93cc352
--- /dev/null
@@ -0,0 +1,544 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+#include <linux/parser.h>
+
+/* a cache for orangefs-inode objects (i.e. orangefs inode private data) */
+static struct kmem_cache *orangefs_inode_cache;
+
+/* list for storing orangefs specific superblocks in use */
+LIST_HEAD(orangefs_superblocks);
+
+DEFINE_SPINLOCK(orangefs_superblocks_lock);
+
+enum {
+       Opt_intr,
+       Opt_acl,
+       Opt_local_lock,
+
+       Opt_err
+};
+
+static const match_table_t tokens = {
+       { Opt_acl,              "acl" },
+       { Opt_intr,             "intr" },
+       { Opt_local_lock,       "local_lock" },
+       { Opt_err,      NULL }
+};
+
+
+static int parse_mount_options(struct super_block *sb, char *options,
+               int silent)
+{
+       struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(sb);
+       substring_t args[MAX_OPT_ARGS];
+       char *p;
+
+       /*
+        * Force any potential flags that might be set from the mount
+        * to zero, ie, initialize to unset.
+        */
+       sb->s_flags &= ~MS_POSIXACL;
+       orangefs_sb->flags &= ~ORANGEFS_OPT_INTR;
+       orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK;
+
+       while ((p = strsep(&options, ",")) != NULL) {
+               int token;
+
+               if (!*p)
+                       continue;
+
+               token = match_token(p, tokens, args);
+               switch (token) {
+               case Opt_acl:
+                       sb->s_flags |= MS_POSIXACL;
+                       break;
+               case Opt_intr:
+                       orangefs_sb->flags |= ORANGEFS_OPT_INTR;
+                       break;
+               case Opt_local_lock:
+                       orangefs_sb->flags |= ORANGEFS_OPT_LOCAL_LOCK;
+                       break;
+               default:
+                       goto fail;
+               }
+       }
+
+       return 0;
+fail:
+       if (!silent)
+               gossip_err("Error: mount option [%s] is not supported.\n", p);
+       return -EINVAL;
+}
+
+static void orangefs_inode_cache_ctor(void *req)
+{
+       struct orangefs_inode_s *orangefs_inode = req;
+
+       inode_init_once(&orangefs_inode->vfs_inode);
+       init_rwsem(&orangefs_inode->xattr_sem);
+
+       orangefs_inode->vfs_inode.i_version = 1;
+}
+
+static struct inode *orangefs_alloc_inode(struct super_block *sb)
+{
+       struct orangefs_inode_s *orangefs_inode;
+
+       orangefs_inode = kmem_cache_alloc(orangefs_inode_cache, GFP_KERNEL);
+       if (orangefs_inode == NULL) {
+               gossip_err("Failed to allocate orangefs_inode\n");
+               return NULL;
+       }
+
+       /*
+        * We want to clear everything except for rw_semaphore and the
+        * vfs_inode.
+        */
+       memset(&orangefs_inode->refn.khandle, 0, 16);
+       orangefs_inode->refn.fs_id = ORANGEFS_FS_ID_NULL;
+       orangefs_inode->last_failed_block_index_read = 0;
+       memset(orangefs_inode->link_target, 0, sizeof(orangefs_inode->link_target));
+       orangefs_inode->pinode_flags = 0;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_alloc_inode: allocated %p\n",
+                    &orangefs_inode->vfs_inode);
+       return &orangefs_inode->vfs_inode;
+}
+
+static void orangefs_destroy_inode(struct inode *inode)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                       "%s: deallocated %p destroying inode %pU\n",
+                       __func__, orangefs_inode, get_khandle_from_ino(inode));
+
+       kmem_cache_free(orangefs_inode_cache, orangefs_inode);
+}
+
+/*
+ * NOTE: information filled in here is typically reflected in the
+ * output of the system command 'df'
+*/
+static int orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+       int ret = -ENOMEM;
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int flags = 0;
+       struct super_block *sb = NULL;
+
+       sb = dentry->d_sb;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_statfs: called on sb %p (fs_id is %d)\n",
+                    sb,
+                    (int)(ORANGEFS_SB(sb)->fs_id));
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_STATFS);
+       if (!new_op)
+               return ret;
+       new_op->upcall.req.statfs.fs_id = ORANGEFS_SB(sb)->fs_id;
+
+       if (ORANGEFS_SB(sb)->flags & ORANGEFS_OPT_INTR)
+               flags = ORANGEFS_OP_INTERRUPTIBLE;
+
+       ret = service_operation(new_op, "orangefs_statfs", flags);
+
+       if (new_op->downcall.status < 0)
+               goto out_op_release;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "%s: got %ld blocks available | "
+                    "%ld blocks total | %ld block size | "
+                    "%ld files total | %ld files avail\n",
+                    __func__,
+                    (long)new_op->downcall.resp.statfs.blocks_avail,
+                    (long)new_op->downcall.resp.statfs.blocks_total,
+                    (long)new_op->downcall.resp.statfs.block_size,
+                    (long)new_op->downcall.resp.statfs.files_total,
+                    (long)new_op->downcall.resp.statfs.files_avail);
+
+       buf->f_type = sb->s_magic;
+       memcpy(&buf->f_fsid, &ORANGEFS_SB(sb)->fs_id, sizeof(buf->f_fsid));
+       buf->f_bsize = new_op->downcall.resp.statfs.block_size;
+       buf->f_namelen = ORANGEFS_NAME_LEN;
+
+       buf->f_blocks = (sector_t) new_op->downcall.resp.statfs.blocks_total;
+       buf->f_bfree = (sector_t) new_op->downcall.resp.statfs.blocks_avail;
+       buf->f_bavail = (sector_t) new_op->downcall.resp.statfs.blocks_avail;
+       buf->f_files = (sector_t) new_op->downcall.resp.statfs.files_total;
+       buf->f_ffree = (sector_t) new_op->downcall.resp.statfs.files_avail;
+       buf->f_frsize = sb->s_blocksize;
+
+out_op_release:
+       op_release(new_op);
+       gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_statfs: returning %d\n", ret);
+       return ret;
+}
+
+/*
+ * Remount as initiated by VFS layer.  We just need to reparse the mount
+ * options, no need to signal pvfs2-client-core about it.
+ */
+static int orangefs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+       gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount_fs: called\n");
+       return parse_mount_options(sb, data, 1);
+}
+
+/*
+ * Remount as initiated by pvfs2-client-core on restart.  This is used to
+ * repopulate mount information left from previous pvfs2-client-core.
+ *
+ * the idea here is that given a valid superblock, we're
+ * re-initializing the user space client with the initial mount
+ * information specified when the super block was first initialized.
+ * this is very different than the first initialization/creation of a
+ * superblock.  we use the special service_priority_operation to make
+ * sure that the mount gets ahead of any other pending operation that
+ * is waiting for servicing.  this means that the pvfs2-client won't
+ * fail to start several times for all other pending operations before
+ * the client regains all of the mount information from us.
+ * NOTE: this function assumes that the request_mutex is already acquired!
+ */
+int orangefs_remount(struct super_block *sb)
+{
+       struct orangefs_kernel_op_s *new_op;
+       int ret = -EINVAL;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount: called\n");
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT);
+       if (!new_op)
+               return -ENOMEM;
+       strncpy(new_op->upcall.req.fs_mount.orangefs_config_server,
+               ORANGEFS_SB(sb)->devname,
+               ORANGEFS_MAX_SERVER_ADDR_LEN);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "Attempting ORANGEFS Remount via host %s\n",
+                    new_op->upcall.req.fs_mount.orangefs_config_server);
+
+       /*
+        * we assume that the calling function has already acquire the
+        * request_mutex to prevent other operations from bypassing
+        * this one
+        */
+       ret = service_operation(new_op, "orangefs_remount",
+               ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_SEMAPHORE);
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_remount: mount got return value of %d\n",
+                    ret);
+       if (ret == 0) {
+               /*
+                * store the id assigned to this sb -- it's just a
+                * short-lived mapping that the system interface uses
+                * to map this superblock to a particular mount entry
+                */
+               ORANGEFS_SB(sb)->id = new_op->downcall.resp.fs_mount.id;
+               ORANGEFS_SB(sb)->mount_pending = 0;
+       }
+
+       op_release(new_op);
+       return ret;
+}
+
+int fsid_key_table_initialize(void)
+{
+       return 0;
+}
+
+void fsid_key_table_finalize(void)
+{
+}
+
+/* Called whenever the VFS dirties the inode in response to atime updates */
+static void orangefs_dirty_inode(struct inode *inode, int flags)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_dirty_inode: %pU\n",
+                    get_khandle_from_ino(inode));
+       SetAtimeFlag(orangefs_inode);
+}
+
+static const struct super_operations orangefs_s_ops = {
+       .alloc_inode = orangefs_alloc_inode,
+       .destroy_inode = orangefs_destroy_inode,
+       .dirty_inode = orangefs_dirty_inode,
+       .drop_inode = generic_delete_inode,
+       .statfs = orangefs_statfs,
+       .remount_fs = orangefs_remount_fs,
+       .show_options = generic_show_options,
+};
+
+static struct dentry *orangefs_fh_to_dentry(struct super_block *sb,
+                                 struct fid *fid,
+                                 int fh_len,
+                                 int fh_type)
+{
+       struct orangefs_object_kref refn;
+
+       if (fh_len < 5 || fh_type > 2)
+               return NULL;
+
+       ORANGEFS_khandle_from(&(refn.khandle), fid->raw, 16);
+       refn.fs_id = (u32) fid->raw[4];
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "fh_to_dentry: handle %pU, fs_id %d\n",
+                    &refn.khandle,
+                    refn.fs_id);
+
+       return d_obtain_alias(orangefs_iget(sb, &refn));
+}
+
+static int orangefs_encode_fh(struct inode *inode,
+                   __u32 *fh,
+                   int *max_len,
+                   struct inode *parent)
+{
+       int len = parent ? 10 : 5;
+       int type = 1;
+       struct orangefs_object_kref refn;
+
+       if (*max_len < len) {
+               gossip_lerr("fh buffer is too small for encoding\n");
+               *max_len = len;
+               type = 255;
+               goto out;
+       }
+
+       refn = ORANGEFS_I(inode)->refn;
+       ORANGEFS_khandle_to(&refn.khandle, fh, 16);
+       fh[4] = refn.fs_id;
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "Encoding fh: handle %pU, fsid %u\n",
+                    &refn.khandle,
+                    refn.fs_id);
+
+
+       if (parent) {
+               refn = ORANGEFS_I(parent)->refn;
+               ORANGEFS_khandle_to(&refn.khandle, (char *) fh + 20, 16);
+               fh[9] = refn.fs_id;
+
+               type = 2;
+               gossip_debug(GOSSIP_SUPER_DEBUG,
+                            "Encoding parent: handle %pU, fsid %u\n",
+                            &refn.khandle,
+                            refn.fs_id);
+       }
+       *max_len = len;
+
+out:
+       return type;
+}
+
+static const struct export_operations orangefs_export_ops = {
+       .encode_fh = orangefs_encode_fh,
+       .fh_to_dentry = orangefs_fh_to_dentry,
+};
+
+static int orangefs_fill_sb(struct super_block *sb,
+               struct orangefs_fs_mount_response *fs_mount,
+               void *data, int silent)
+{
+       int ret = -EINVAL;
+       struct inode *root = NULL;
+       struct dentry *root_dentry = NULL;
+       struct orangefs_object_kref root_object;
+
+       /* alloc and init our private orangefs sb info */
+       sb->s_fs_info =
+               kzalloc(sizeof(struct orangefs_sb_info_s), ORANGEFS_GFP_FLAGS);
+       if (!ORANGEFS_SB(sb))
+               return -ENOMEM;
+       ORANGEFS_SB(sb)->sb = sb;
+
+       ORANGEFS_SB(sb)->root_khandle = fs_mount->root_khandle;
+       ORANGEFS_SB(sb)->fs_id = fs_mount->fs_id;
+       ORANGEFS_SB(sb)->id = fs_mount->id;
+
+       if (data) {
+               ret = parse_mount_options(sb, data, silent);
+               if (ret)
+                       return ret;
+       }
+
+       /* Hang the xattr handlers off the superblock */
+       sb->s_xattr = orangefs_xattr_handlers;
+       sb->s_magic = ORANGEFS_SUPER_MAGIC;
+       sb->s_op = &orangefs_s_ops;
+       sb->s_d_op = &orangefs_dentry_operations;
+
+       sb->s_blocksize = orangefs_bufmap_size_query();
+       sb->s_blocksize_bits = orangefs_bufmap_shift_query();
+       sb->s_maxbytes = MAX_LFS_FILESIZE;
+
+       root_object.khandle = ORANGEFS_SB(sb)->root_khandle;
+       root_object.fs_id = ORANGEFS_SB(sb)->fs_id;
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "get inode %pU, fsid %d\n",
+                    &root_object.khandle,
+                    root_object.fs_id);
+
+       root = orangefs_iget(sb, &root_object);
+       if (IS_ERR(root))
+               return PTR_ERR(root);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "Allocated root inode [%p] with mode %x\n",
+                    root,
+                    root->i_mode);
+
+       /* allocates and places root dentry in dcache */
+       root_dentry = d_make_root(root);
+       if (!root_dentry)
+               return -ENOMEM;
+
+       sb->s_export_op = &orangefs_export_ops;
+       sb->s_root = root_dentry;
+       return 0;
+}
+
+struct dentry *orangefs_mount(struct file_system_type *fst,
+                          int flags,
+                          const char *devname,
+                          void *data)
+{
+       int ret = -EINVAL;
+       struct super_block *sb = ERR_PTR(-EINVAL);
+       struct orangefs_kernel_op_s *new_op;
+       struct dentry *d = ERR_PTR(-EINVAL);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_mount: called with devname %s\n",
+                    devname);
+
+       if (!devname) {
+               gossip_err("ERROR: device name not specified.\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT);
+       if (!new_op)
+               return ERR_PTR(-ENOMEM);
+
+       strncpy(new_op->upcall.req.fs_mount.orangefs_config_server,
+               devname,
+               ORANGEFS_MAX_SERVER_ADDR_LEN);
+
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "Attempting ORANGEFS Mount via host %s\n",
+                    new_op->upcall.req.fs_mount.orangefs_config_server);
+
+       ret = service_operation(new_op, "orangefs_mount", 0);
+       gossip_debug(GOSSIP_SUPER_DEBUG,
+                    "orangefs_mount: mount got return value of %d\n", ret);
+       if (ret)
+               goto free_op;
+
+       if (new_op->downcall.resp.fs_mount.fs_id == ORANGEFS_FS_ID_NULL) {
+               gossip_err("ERROR: Retrieved null fs_id\n");
+               ret = -EINVAL;
+               goto free_op;
+       }
+
+       sb = sget(fst, NULL, set_anon_super, flags, NULL);
+
+       if (IS_ERR(sb)) {
+               d = ERR_CAST(sb);
+               goto free_op;
+       }
+
+       ret = orangefs_fill_sb(sb,
+             &new_op->downcall.resp.fs_mount, data,
+             flags & MS_SILENT ? 1 : 0);
+
+       if (ret) {
+               d = ERR_PTR(ret);
+               goto free_op;
+       }
+
+       /*
+        * on successful mount, store the devname and data
+        * used
+        */
+       strncpy(ORANGEFS_SB(sb)->devname,
+               devname,
+               ORANGEFS_MAX_SERVER_ADDR_LEN);
+
+       /* mount_pending must be cleared */
+       ORANGEFS_SB(sb)->mount_pending = 0;
+
+       /*
+        * finally, add this sb to our list of known orangefs
+        * sb's
+        */
+       add_orangefs_sb(sb);
+       op_release(new_op);
+       return dget(sb->s_root);
+
+free_op:
+       gossip_err("orangefs_mount: mount request failed with %d\n", ret);
+       if (ret == -EINVAL) {
+               gossip_err("Ensure that all orangefs-servers have the same FS configuration files\n");
+               gossip_err("Look at pvfs2-client-core log file (typically /tmp/pvfs2-client.log) for more details\n");
+       }
+
+       op_release(new_op);
+
+       return d;
+}
+
+void orangefs_kill_sb(struct super_block *sb)
+{
+       gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_kill_sb: called\n");
+
+       /*
+        * issue the unmount to userspace to tell it to remove the
+        * dynamic mount info it has for this superblock
+        */
+       orangefs_unmount_sb(sb);
+
+       /* remove the sb from our list of orangefs specific sb's */
+       remove_orangefs_sb(sb);
+
+       /* provided sb cleanup */
+       kill_anon_super(sb);
+
+       /* free the orangefs superblock private data */
+       kfree(ORANGEFS_SB(sb));
+}
+
+int orangefs_inode_cache_initialize(void)
+{
+       orangefs_inode_cache = kmem_cache_create("orangefs_inode_cache",
+                                             sizeof(struct orangefs_inode_s),
+                                             0,
+                                             ORANGEFS_CACHE_CREATE_FLAGS,
+                                             orangefs_inode_cache_ctor);
+
+       if (!orangefs_inode_cache) {
+               gossip_err("Cannot create orangefs_inode_cache\n");
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+int orangefs_inode_cache_finalize(void)
+{
+       kmem_cache_destroy(orangefs_inode_cache);
+       return 0;
+}
diff --git a/fs/orangefs/symlink.c b/fs/orangefs/symlink.c
new file mode 100644 (file)
index 0000000..943884b
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+static const char *orangefs_get_link(struct dentry *dentry, struct inode *inode,
+                                    struct delayed_call *done)
+{
+       char *target;
+
+       if (!dentry)
+               return ERR_PTR(-ECHILD);
+
+       target =  ORANGEFS_I(dentry->d_inode)->link_target;
+
+       gossip_debug(GOSSIP_INODE_DEBUG,
+                    "%s: called on %s (target is %p)\n",
+                    __func__, (char *)dentry->d_name.name, target);
+
+       return target;
+}
+
+struct inode_operations orangefs_symlink_inode_operations = {
+       .readlink = generic_readlink,
+       .get_link = orangefs_get_link,
+       .setattr = orangefs_setattr,
+       .getattr = orangefs_getattr,
+       .listxattr = orangefs_listxattr,
+       .setxattr = generic_setxattr,
+       .permission = orangefs_permission,
+};
diff --git a/fs/orangefs/upcall.h b/fs/orangefs/upcall.h
new file mode 100644 (file)
index 0000000..781cbc3
--- /dev/null
@@ -0,0 +1,246 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+#ifndef __UPCALL_H
+#define __UPCALL_H
+
+/*
+ * Sanitized this header file to fix
+ * 32-64 bit interaction issues between
+ * client-core and device
+ */
+struct orangefs_io_request_s {
+       __s32 async_vfs_io;
+       __s32 buf_index;
+       __s32 count;
+       __s32 __pad1;
+       __s64 offset;
+       struct orangefs_object_kref refn;
+       enum ORANGEFS_io_type io_type;
+       __s32 readahead_size;
+};
+
+struct orangefs_lookup_request_s {
+       __s32 sym_follow;
+       __s32 __pad1;
+       struct orangefs_object_kref parent_refn;
+       char d_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_create_request_s {
+       struct orangefs_object_kref parent_refn;
+       struct ORANGEFS_sys_attr_s attributes;
+       char d_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_symlink_request_s {
+       struct orangefs_object_kref parent_refn;
+       struct ORANGEFS_sys_attr_s attributes;
+       char entry_name[ORANGEFS_NAME_LEN];
+       char target[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_getattr_request_s {
+       struct orangefs_object_kref refn;
+       __u32 mask;
+       __u32 __pad1;
+};
+
+struct orangefs_setattr_request_s {
+       struct orangefs_object_kref refn;
+       struct ORANGEFS_sys_attr_s attributes;
+};
+
+struct orangefs_remove_request_s {
+       struct orangefs_object_kref parent_refn;
+       char d_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_mkdir_request_s {
+       struct orangefs_object_kref parent_refn;
+       struct ORANGEFS_sys_attr_s attributes;
+       char d_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_readdir_request_s {
+       struct orangefs_object_kref refn;
+       __u64 token;
+       __s32 max_dirent_count;
+       __s32 buf_index;
+};
+
+struct orangefs_readdirplus_request_s {
+       struct orangefs_object_kref refn;
+       __u64 token;
+       __s32 max_dirent_count;
+       __u32 mask;
+       __s32 buf_index;
+       __s32 __pad1;
+};
+
+struct orangefs_rename_request_s {
+       struct orangefs_object_kref old_parent_refn;
+       struct orangefs_object_kref new_parent_refn;
+       char d_old_name[ORANGEFS_NAME_LEN];
+       char d_new_name[ORANGEFS_NAME_LEN];
+};
+
+struct orangefs_statfs_request_s {
+       __s32 fs_id;
+       __s32 __pad1;
+};
+
+struct orangefs_truncate_request_s {
+       struct orangefs_object_kref refn;
+       __s64 size;
+};
+
+struct orangefs_mmap_ra_cache_flush_request_s {
+       struct orangefs_object_kref refn;
+};
+
+struct orangefs_fs_mount_request_s {
+       char orangefs_config_server[ORANGEFS_MAX_SERVER_ADDR_LEN];
+};
+
+struct orangefs_fs_umount_request_s {
+       __s32 id;
+       __s32 fs_id;
+       char orangefs_config_server[ORANGEFS_MAX_SERVER_ADDR_LEN];
+};
+
+struct orangefs_getxattr_request_s {
+       struct orangefs_object_kref refn;
+       __s32 key_sz;
+       __s32 __pad1;
+       char key[ORANGEFS_MAX_XATTR_NAMELEN];
+};
+
+struct orangefs_setxattr_request_s {
+       struct orangefs_object_kref refn;
+       struct ORANGEFS_keyval_pair keyval;
+       __s32 flags;
+       __s32 __pad1;
+};
+
+struct orangefs_listxattr_request_s {
+       struct orangefs_object_kref refn;
+       __s32 requested_count;
+       __s32 __pad1;
+       __u64 token;
+};
+
+struct orangefs_removexattr_request_s {
+       struct orangefs_object_kref refn;
+       __s32 key_sz;
+       __s32 __pad1;
+       char key[ORANGEFS_MAX_XATTR_NAMELEN];
+};
+
+struct orangefs_op_cancel_s {
+       __u64 op_tag;
+};
+
+struct orangefs_fsync_request_s {
+       struct orangefs_object_kref refn;
+};
+
+enum orangefs_param_request_type {
+       ORANGEFS_PARAM_REQUEST_SET = 1,
+       ORANGEFS_PARAM_REQUEST_GET = 2
+};
+
+enum orangefs_param_request_op {
+       ORANGEFS_PARAM_REQUEST_OP_ACACHE_TIMEOUT_MSECS = 1,
+       ORANGEFS_PARAM_REQUEST_OP_ACACHE_HARD_LIMIT = 2,
+       ORANGEFS_PARAM_REQUEST_OP_ACACHE_SOFT_LIMIT = 3,
+       ORANGEFS_PARAM_REQUEST_OP_ACACHE_RECLAIM_PERCENTAGE = 4,
+       ORANGEFS_PARAM_REQUEST_OP_PERF_TIME_INTERVAL_SECS = 5,
+       ORANGEFS_PARAM_REQUEST_OP_PERF_HISTORY_SIZE = 6,
+       ORANGEFS_PARAM_REQUEST_OP_PERF_RESET = 7,
+       ORANGEFS_PARAM_REQUEST_OP_NCACHE_TIMEOUT_MSECS = 8,
+       ORANGEFS_PARAM_REQUEST_OP_NCACHE_HARD_LIMIT = 9,
+       ORANGEFS_PARAM_REQUEST_OP_NCACHE_SOFT_LIMIT = 10,
+       ORANGEFS_PARAM_REQUEST_OP_NCACHE_RECLAIM_PERCENTAGE = 11,
+       ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_TIMEOUT_MSECS = 12,
+       ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_HARD_LIMIT = 13,
+       ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_SOFT_LIMIT = 14,
+       ORANGEFS_PARAM_REQUEST_OP_STATIC_ACACHE_RECLAIM_PERCENTAGE = 15,
+       ORANGEFS_PARAM_REQUEST_OP_CLIENT_DEBUG = 16,
+       ORANGEFS_PARAM_REQUEST_OP_CCACHE_TIMEOUT_SECS = 17,
+       ORANGEFS_PARAM_REQUEST_OP_CCACHE_HARD_LIMIT = 18,
+       ORANGEFS_PARAM_REQUEST_OP_CCACHE_SOFT_LIMIT = 19,
+       ORANGEFS_PARAM_REQUEST_OP_CCACHE_RECLAIM_PERCENTAGE = 20,
+       ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_TIMEOUT_SECS = 21,
+       ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_HARD_LIMIT = 22,
+       ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_SOFT_LIMIT = 23,
+       ORANGEFS_PARAM_REQUEST_OP_CAPCACHE_RECLAIM_PERCENTAGE = 24,
+       ORANGEFS_PARAM_REQUEST_OP_TWO_MASK_VALUES = 25,
+};
+
+struct orangefs_param_request_s {
+       enum orangefs_param_request_type type;
+       enum orangefs_param_request_op op;
+       __s64 value;
+       char s_value[ORANGEFS_MAX_DEBUG_STRING_LEN];
+};
+
+enum orangefs_perf_count_request_type {
+       ORANGEFS_PERF_COUNT_REQUEST_ACACHE = 1,
+       ORANGEFS_PERF_COUNT_REQUEST_NCACHE = 2,
+       ORANGEFS_PERF_COUNT_REQUEST_CAPCACHE = 3,
+};
+
+struct orangefs_perf_count_request_s {
+       enum orangefs_perf_count_request_type type;
+       __s32 __pad1;
+};
+
+struct orangefs_fs_key_request_s {
+       __s32 fsid;
+       __s32 __pad1;
+};
+
+struct orangefs_upcall_s {
+       __s32 type;
+       __u32 uid;
+       __u32 gid;
+       int pid;
+       int tgid;
+       /* Trailers unused but must be retained for protocol compatibility. */
+       __s64 trailer_size;
+       char *trailer_buf;
+
+       union {
+               struct orangefs_io_request_s io;
+               struct orangefs_lookup_request_s lookup;
+               struct orangefs_create_request_s create;
+               struct orangefs_symlink_request_s sym;
+               struct orangefs_getattr_request_s getattr;
+               struct orangefs_setattr_request_s setattr;
+               struct orangefs_remove_request_s remove;
+               struct orangefs_mkdir_request_s mkdir;
+               struct orangefs_readdir_request_s readdir;
+               struct orangefs_readdirplus_request_s readdirplus;
+               struct orangefs_rename_request_s rename;
+               struct orangefs_statfs_request_s statfs;
+               struct orangefs_truncate_request_s truncate;
+               struct orangefs_mmap_ra_cache_flush_request_s ra_cache_flush;
+               struct orangefs_fs_mount_request_s fs_mount;
+               struct orangefs_fs_umount_request_s fs_umount;
+               struct orangefs_getxattr_request_s getxattr;
+               struct orangefs_setxattr_request_s setxattr;
+               struct orangefs_listxattr_request_s listxattr;
+               struct orangefs_removexattr_request_s removexattr;
+               struct orangefs_op_cancel_s cancel;
+               struct orangefs_fsync_request_s fsync;
+               struct orangefs_param_request_s param;
+               struct orangefs_perf_count_request_s perf_count;
+               struct orangefs_fs_key_request_s fs_key;
+       } req;
+};
+
+#endif /* __UPCALL_H */
diff --git a/fs/orangefs/waitqueue.c b/fs/orangefs/waitqueue.c
new file mode 100644 (file)
index 0000000..191d886
--- /dev/null
@@ -0,0 +1,506 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ * (C) 2011 Omnibond Systems
+ *
+ * Changes by Acxiom Corporation to implement generic service_operation()
+ * function, Copyright Acxiom Corporation, 2005.
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  In-kernel waitqueue operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+
+static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *);
+static int wait_for_matching_downcall(struct orangefs_kernel_op_s *);
+
+/*
+ * What we do in this function is to walk the list of operations that are
+ * present in the request queue and mark them as purged.
+ * NOTE: This is called from the device close after client-core has
+ * guaranteed that no new operations could appear on the list since the
+ * client-core is anyway going to exit.
+ */
+void purge_waiting_ops(void)
+{
+       struct orangefs_kernel_op_s *op;
+
+       spin_lock(&orangefs_request_list_lock);
+       list_for_each_entry(op, &orangefs_request_list, list) {
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "pvfs2-client-core: purging op tag %llu %s\n",
+                            llu(op->tag),
+                            get_opname_string(op));
+               spin_lock(&op->lock);
+               set_op_state_purged(op);
+               spin_unlock(&op->lock);
+       }
+       spin_unlock(&orangefs_request_list_lock);
+}
+
+static inline void
+add_op_to_request_list(struct orangefs_kernel_op_s *op)
+{
+       spin_lock(&orangefs_request_list_lock);
+       spin_lock(&op->lock);
+       set_op_state_waiting(op);
+       list_add_tail(&op->list, &orangefs_request_list);
+       spin_unlock(&orangefs_request_list_lock);
+       spin_unlock(&op->lock);
+       wake_up_interruptible(&orangefs_request_list_waitq);
+}
+
+static inline
+void add_priority_op_to_request_list(struct orangefs_kernel_op_s *op)
+{
+       spin_lock(&orangefs_request_list_lock);
+       spin_lock(&op->lock);
+       set_op_state_waiting(op);
+
+       list_add(&op->list, &orangefs_request_list);
+       spin_unlock(&orangefs_request_list_lock);
+       spin_unlock(&op->lock);
+       wake_up_interruptible(&orangefs_request_list_waitq);
+}
+
+/*
+ * submits a ORANGEFS operation and waits for it to complete
+ *
+ * Note op->downcall.status will contain the status of the operation (in
+ * errno format), whether provided by pvfs2-client or a result of failure to
+ * service the operation.  If the caller wishes to distinguish, then
+ * op->state can be checked to see if it was serviced or not.
+ *
+ * Returns contents of op->downcall.status for convenience
+ */
+int service_operation(struct orangefs_kernel_op_s *op,
+                     const char *op_name,
+                     int flags)
+{
+       /* flags to modify behavior */
+       sigset_t orig_sigset;
+       int ret = 0;
+
+       DEFINE_WAIT(wait_entry);
+
+       op->upcall.tgid = current->tgid;
+       op->upcall.pid = current->pid;
+
+retry_servicing:
+       op->downcall.status = 0;
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "orangefs: service_operation: %s %p\n",
+                    op_name,
+                    op);
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "orangefs: operation posted by process: %s, pid: %i\n",
+                    current->comm,
+                    current->pid);
+
+       /* mask out signals if this operation is not to be interrupted */
+       if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
+               orangefs_block_signals(&orig_sigset);
+
+       if (!(flags & ORANGEFS_OP_NO_SEMAPHORE)) {
+               ret = mutex_lock_interruptible(&request_mutex);
+               /*
+                * check to see if we were interrupted while waiting for
+                * semaphore
+                */
+               if (ret < 0) {
+                       if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
+                               orangefs_set_signals(&orig_sigset);
+                       op->downcall.status = ret;
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "orangefs: service_operation interrupted.\n");
+                       return ret;
+               }
+       }
+
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "%s:About to call is_daemon_in_service().\n",
+                    __func__);
+
+       if (is_daemon_in_service() < 0) {
+               /*
+                * By incrementing the per-operation attempt counter, we
+                * directly go into the timeout logic while waiting for
+                * the matching downcall to be read
+                */
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:client core is NOT in service(%d).\n",
+                            __func__,
+                            is_daemon_in_service());
+               op->attempts++;
+       }
+
+       /* queue up the operation */
+       if (flags & ORANGEFS_OP_PRIORITY) {
+               add_priority_op_to_request_list(op);
+       } else {
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:About to call add_op_to_request_list().\n",
+                            __func__);
+               add_op_to_request_list(op);
+       }
+
+       if (!(flags & ORANGEFS_OP_NO_SEMAPHORE))
+               mutex_unlock(&request_mutex);
+
+       /*
+        * If we are asked to service an asynchronous operation from
+        * VFS perspective, we are done.
+        */
+       if (flags & ORANGEFS_OP_ASYNC)
+               return 0;
+
+       if (flags & ORANGEFS_OP_CANCELLATION) {
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:"
+                            "About to call wait_for_cancellation_downcall.\n",
+                            __func__);
+               ret = wait_for_cancellation_downcall(op);
+       } else {
+               ret = wait_for_matching_downcall(op);
+       }
+
+       if (ret < 0) {
+               /* failed to get matching downcall */
+               if (ret == -ETIMEDOUT) {
+                       gossip_err("orangefs: %s -- wait timed out; aborting attempt.\n",
+                                  op_name);
+               }
+               op->downcall.status = ret;
+       } else {
+               /* got matching downcall; make sure status is in errno format */
+               op->downcall.status =
+                   orangefs_normalize_to_errno(op->downcall.status);
+               ret = op->downcall.status;
+       }
+
+       if (!(flags & ORANGEFS_OP_INTERRUPTIBLE))
+               orangefs_set_signals(&orig_sigset);
+
+       BUG_ON(ret != op->downcall.status);
+       /* retry if operation has not been serviced and if requested */
+       if (!op_state_serviced(op) && op->downcall.status == -EAGAIN) {
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "orangefs: tag %llu (%s)"
+                            " -- operation to be retried (%d attempt)\n",
+                            llu(op->tag),
+                            op_name,
+                            op->attempts + 1);
+
+               if (!op->uses_shared_memory)
+                       /*
+                        * this operation doesn't use the shared memory
+                        * system
+                        */
+                       goto retry_servicing;
+
+               /* op uses shared memory */
+               if (orangefs_get_bufmap_init() == 0) {
+                       WARN_ON(1);
+                       /*
+                        * This operation uses the shared memory system AND
+                        * the system is not yet ready. This situation occurs
+                        * when the client-core is restarted AND there were
+                        * operations waiting to be processed or were already
+                        * in process.
+                        */
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "uses_shared_memory is true.\n");
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "Client core in-service status(%d).\n",
+                                    is_daemon_in_service());
+                       gossip_debug(GOSSIP_WAIT_DEBUG, "bufmap_init:%d.\n",
+                                    orangefs_get_bufmap_init());
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "operation's status is 0x%0x.\n",
+                                    op->op_state);
+
+                       /*
+                        * let process sleep for a few seconds so shared
+                        * memory system can be initialized.
+                        */
+                       prepare_to_wait(&orangefs_bufmap_init_waitq,
+                                       &wait_entry,
+                                       TASK_INTERRUPTIBLE);
+
+                       /*
+                        * Wait for orangefs_bufmap_initialize() to wake me up
+                        * within the allotted time.
+                        */
+                       ret = schedule_timeout(
+                               ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS * HZ);
+
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "Value returned from schedule_timeout:"
+                                    "%d.\n",
+                                    ret);
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "Is shared memory available? (%d).\n",
+                                    orangefs_get_bufmap_init());
+
+                       finish_wait(&orangefs_bufmap_init_waitq, &wait_entry);
+
+                       if (orangefs_get_bufmap_init() == 0) {
+                               gossip_err("%s:The shared memory system has not started in %d seconds after the client core restarted.  Aborting user's request(%s).\n",
+                                          __func__,
+                                          ORANGEFS_BUFMAP_WAIT_TIMEOUT_SECS,
+                                          get_opname_string(op));
+                               return -EIO;
+                       }
+
+                       /*
+                        * Return to the calling function and re-populate a
+                        * shared memory buffer.
+                        */
+                       return -EAGAIN;
+               }
+       }
+
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "orangefs: service_operation %s returning: %d for %p.\n",
+                    op_name,
+                    ret,
+                    op);
+       return ret;
+}
+
+static void orangefs_clean_up_interrupted_operation(struct orangefs_kernel_op_s *op)
+{
+       /*
+        * handle interrupted cases depending on what state we were in when
+        * the interruption is detected.  there is a coarse grained lock
+        * across the operation.
+        *
+        * Called with op->lock held.
+        */
+       op->op_state |= OP_VFS_STATE_GIVEN_UP;
+
+       if (op_state_waiting(op)) {
+               /*
+                * upcall hasn't been read; remove op from upcall request
+                * list.
+                */
+               spin_unlock(&op->lock);
+               spin_lock(&orangefs_request_list_lock);
+               list_del(&op->list);
+               spin_unlock(&orangefs_request_list_lock);
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "Interrupted: Removed op %p from request_list\n",
+                            op);
+       } else if (op_state_in_progress(op)) {
+               /* op must be removed from the in progress htable */
+               spin_unlock(&op->lock);
+               spin_lock(&htable_ops_in_progress_lock);
+               list_del(&op->list);
+               spin_unlock(&htable_ops_in_progress_lock);
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "Interrupted: Removed op %p"
+                            " from htable_ops_in_progress\n",
+                            op);
+       } else if (!op_state_serviced(op)) {
+               spin_unlock(&op->lock);
+               gossip_err("interrupted operation is in a weird state 0x%x\n",
+                          op->op_state);
+       } else {
+               /*
+                * It is not intended for execution to flow here,
+                * but having this unlock here makes sparse happy.
+                */
+               gossip_err("%s: can't get here.\n", __func__);
+               spin_unlock(&op->lock);
+       }
+}
+
+/*
+ * sleeps on waitqueue waiting for matching downcall.
+ * if client-core finishes servicing, then we are good to go.
+ * else if client-core exits, we get woken up here, and retry with a timeout
+ *
+ * Post when this call returns to the caller, the specified op will no
+ * longer be on any list or htable.
+ *
+ * Returns 0 on success and -errno on failure
+ * Errors are:
+ * EAGAIN in case we want the caller to requeue and try again..
+ * EINTR/EIO/ETIMEDOUT indicating we are done trying to service this
+ * operation since client-core seems to be exiting too often
+ * or if we were interrupted.
+ */
+static int wait_for_matching_downcall(struct orangefs_kernel_op_s *op)
+{
+       int ret = -EINVAL;
+       DEFINE_WAIT(wait_entry);
+
+       while (1) {
+               spin_lock(&op->lock);
+               prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
+               if (op_state_serviced(op)) {
+                       spin_unlock(&op->lock);
+                       ret = 0;
+                       break;
+               }
+
+               if (unlikely(signal_pending(current))) {
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "*** %s:"
+                                    " operation interrupted by a signal (tag "
+                                    "%llu, op %p)\n",
+                                    __func__,
+                                    llu(op->tag),
+                                    op);
+                       orangefs_clean_up_interrupted_operation(op);
+                       ret = -EINTR;
+                       break;
+               }
+
+               /*
+                * if this was our first attempt and client-core
+                * has not purged our operation, we are happy to
+                * simply wait
+                */
+               if (op->attempts == 0 && !op_state_purged(op)) {
+                       spin_unlock(&op->lock);
+                       schedule();
+               } else {
+                       spin_unlock(&op->lock);
+                       /*
+                        * subsequent attempts, we retry exactly once
+                        * with timeouts
+                        */
+                       if (!schedule_timeout(op_timeout_secs * HZ)) {
+                               gossip_debug(GOSSIP_WAIT_DEBUG,
+                                            "*** %s:"
+                                            " operation timed out (tag"
+                                            " %llu, %p, att %d)\n",
+                                            __func__,
+                                            llu(op->tag),
+                                            op,
+                                            op->attempts);
+                               ret = -ETIMEDOUT;
+                               spin_lock(&op->lock);
+                               orangefs_clean_up_interrupted_operation(op);
+                               break;
+                       }
+               }
+               spin_lock(&op->lock);
+               op->attempts++;
+               /*
+                * if the operation was purged in the meantime, it
+                * is better to requeue it afresh but ensure that
+                * we have not been purged repeatedly. This could
+                * happen if client-core crashes when an op
+                * is being serviced, so we requeue the op, client
+                * core crashes again so we requeue the op, client
+                * core starts, and so on...
+                */
+               if (op_state_purged(op)) {
+                       ret = (op->attempts < ORANGEFS_PURGE_RETRY_COUNT) ?
+                                -EAGAIN :
+                                -EIO;
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "*** %s:"
+                                    " operation purged (tag "
+                                    "%llu, %p, att %d)\n",
+                                    __func__,
+                                    llu(op->tag),
+                                    op,
+                                    op->attempts);
+                       orangefs_clean_up_interrupted_operation(op);
+                       break;
+               }
+               spin_unlock(&op->lock);
+       }
+
+       spin_lock(&op->lock);
+       finish_wait(&op->waitq, &wait_entry);
+       spin_unlock(&op->lock);
+
+       return ret;
+}
+
+/*
+ * similar to wait_for_matching_downcall(), but used in the special case
+ * of I/O cancellations.
+ *
+ * Note we need a special wait function because if this is called we already
+ *      know that a signal is pending in current and need to service the
+ *      cancellation upcall anyway.  the only way to exit this is to either
+ *      timeout or have the cancellation be serviced properly.
+ */
+static int wait_for_cancellation_downcall(struct orangefs_kernel_op_s *op)
+{
+       int ret = -EINVAL;
+       DEFINE_WAIT(wait_entry);
+
+       while (1) {
+               spin_lock(&op->lock);
+               prepare_to_wait(&op->waitq, &wait_entry, TASK_INTERRUPTIBLE);
+               if (op_state_serviced(op)) {
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "%s:op-state is SERVICED.\n",
+                                    __func__);
+                       spin_unlock(&op->lock);
+                       ret = 0;
+                       break;
+               }
+
+               if (signal_pending(current)) {
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "%s:operation interrupted by a signal (tag"
+                                    " %llu, op %p)\n",
+                                    __func__,
+                                    llu(op->tag),
+                                    op);
+                       orangefs_clean_up_interrupted_operation(op);
+                       ret = -EINTR;
+                       break;
+               }
+
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:About to call schedule_timeout.\n",
+                            __func__);
+               spin_unlock(&op->lock);
+               ret = schedule_timeout(op_timeout_secs * HZ);
+
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:Value returned from schedule_timeout(%d).\n",
+                            __func__,
+                            ret);
+               if (!ret) {
+                       gossip_debug(GOSSIP_WAIT_DEBUG,
+                                    "%s:*** operation timed out: %p\n",
+                                    __func__,
+                                    op);
+                       spin_lock(&op->lock);
+                       orangefs_clean_up_interrupted_operation(op);
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               gossip_debug(GOSSIP_WAIT_DEBUG,
+                            "%s:Breaking out of loop, regardless of value returned by schedule_timeout.\n",
+                            __func__);
+               ret = -ETIMEDOUT;
+               break;
+       }
+
+       spin_lock(&op->lock);
+       finish_wait(&op->waitq, &wait_entry);
+       spin_unlock(&op->lock);
+
+       gossip_debug(GOSSIP_WAIT_DEBUG,
+                    "%s:returning ret(%d)\n",
+                    __func__,
+                    ret);
+
+       return ret;
+}
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
new file mode 100644 (file)
index 0000000..8e9ccf9
--- /dev/null
@@ -0,0 +1,540 @@
+/*
+ * (C) 2001 Clemson University and The University of Chicago
+ *
+ * See COPYING in top-level directory.
+ */
+
+/*
+ *  Linux VFS extended attribute operations.
+ */
+
+#include "protocol.h"
+#include "orangefs-kernel.h"
+#include "orangefs-bufmap.h"
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+
+
+#define SYSTEM_ORANGEFS_KEY "system.pvfs2."
+#define SYSTEM_ORANGEFS_KEY_LEN 13
+
+/*
+ * this function returns
+ *   0 if the key corresponding to name is not meant to be printed as part
+ *     of a listxattr.
+ *   1 if the key corresponding to name is meant to be returned as part of
+ *     a listxattr.
+ * The ones that start SYSTEM_ORANGEFS_KEY are the ones to avoid printing.
+ */
+static int is_reserved_key(const char *key, size_t size)
+{
+
+       if (size < SYSTEM_ORANGEFS_KEY_LEN)
+               return 1;
+
+       return strncmp(key, SYSTEM_ORANGEFS_KEY, SYSTEM_ORANGEFS_KEY_LEN) ?  1 : 0;
+}
+
+static inline int convert_to_internal_xattr_flags(int setxattr_flags)
+{
+       int internal_flag = 0;
+
+       if (setxattr_flags & XATTR_REPLACE) {
+               /* Attribute must exist! */
+               internal_flag = ORANGEFS_XATTR_REPLACE;
+       } else if (setxattr_flags & XATTR_CREATE) {
+               /* Attribute must not exist */
+               internal_flag = ORANGEFS_XATTR_CREATE;
+       }
+       return internal_flag;
+}
+
+
+/*
+ * Tries to get a specified key's attributes of a given
+ * file into a user-specified buffer. Note that the getxattr
+ * interface allows for the users to probe the size of an
+ * extended attribute by passing in a value of 0 to size.
+ * Thus our return value is always the size of the attribute
+ * unless the key does not exist for the file and/or if
+ * there were errors in fetching the attribute value.
+ */
+ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
+               const char *name, void *buffer, size_t size)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op = NULL;
+       ssize_t ret = -ENOMEM;
+       ssize_t length = 0;
+       int fsuid;
+       int fsgid;
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "%s: prefix %s name %s, buffer_size %zd\n",
+                    __func__, prefix, name, size);
+
+       if (name == NULL || (size > 0 && buffer == NULL)) {
+               gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n");
+               return -EINVAL;
+       }
+       if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+               gossip_err("Invalid key length (%d)\n",
+                          (int)(strlen(name) + strlen(prefix)));
+               return -EINVAL;
+       }
+
+       fsuid = from_kuid(current_user_ns(), current_fsuid());
+       fsgid = from_kgid(current_user_ns(), current_fsgid());
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "getxattr on inode %pU, name %s "
+                    "(uid %o, gid %o)\n",
+                    get_khandle_from_ino(inode),
+                    name,
+                    fsuid,
+                    fsgid);
+
+       down_read(&orangefs_inode->xattr_sem);
+
+       new_op = op_alloc(ORANGEFS_VFS_OP_GETXATTR);
+       if (!new_op)
+               goto out_unlock;
+
+       new_op->upcall.req.getxattr.refn = orangefs_inode->refn;
+       ret = snprintf((char *)new_op->upcall.req.getxattr.key,
+                      ORANGEFS_MAX_XATTR_NAMELEN, "%s%s", prefix, name);
+
+       /*
+        * NOTE: Although keys are meant to be NULL terminated textual
+        * strings, I am going to explicitly pass the length just in case
+        * we change this later on...
+        */
+       new_op->upcall.req.getxattr.key_sz = ret + 1;
+
+       ret = service_operation(new_op, "orangefs_inode_getxattr",
+                               get_interruptible_flag(inode));
+       if (ret != 0) {
+               if (ret == -ENOENT) {
+                       ret = -ENODATA;
+                       gossip_debug(GOSSIP_XATTR_DEBUG,
+                                    "orangefs_inode_getxattr: inode %pU key %s"
+                                    " does not exist!\n",
+                                    get_khandle_from_ino(inode),
+                                    (char *)new_op->upcall.req.getxattr.key);
+               }
+               goto out_release_op;
+       }
+
+       /*
+        * Length returned includes null terminator.
+        */
+       length = new_op->downcall.resp.getxattr.val_sz;
+
+       /*
+        * Just return the length of the queried attribute.
+        */
+       if (size == 0) {
+               ret = length;
+               goto out_release_op;
+       }
+
+       /*
+        * Check to see if key length is > provided buffer size.
+        */
+       if (length > size) {
+               ret = -ERANGE;
+               goto out_release_op;
+       }
+
+       memset(buffer, 0, size);
+       memcpy(buffer, new_op->downcall.resp.getxattr.val, length);
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+            "orangefs_inode_getxattr: inode %pU "
+            "key %s key_sz %d, val_len %d\n",
+            get_khandle_from_ino(inode),
+            (char *)new_op->
+               upcall.req.getxattr.key,
+                    (int)new_op->
+               upcall.req.getxattr.key_sz,
+            (int)ret);
+
+       ret = length;
+
+out_release_op:
+       op_release(new_op);
+out_unlock:
+       up_read(&orangefs_inode->xattr_sem);
+       return ret;
+}
+
+static int orangefs_inode_removexattr(struct inode *inode,
+                           const char *prefix,
+                           const char *name,
+                           int flags)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op = NULL;
+       int ret = -ENOMEM;
+
+       down_write(&orangefs_inode->xattr_sem);
+       new_op = op_alloc(ORANGEFS_VFS_OP_REMOVEXATTR);
+       if (!new_op)
+               goto out_unlock;
+
+       new_op->upcall.req.removexattr.refn = orangefs_inode->refn;
+       /*
+        * NOTE: Although keys are meant to be NULL terminated
+        * textual strings, I am going to explicitly pass the
+        * length just in case we change this later on...
+        */
+       ret = snprintf((char *)new_op->upcall.req.removexattr.key,
+                      ORANGEFS_MAX_XATTR_NAMELEN,
+                      "%s%s",
+                      (prefix ? prefix : ""),
+                      name);
+       new_op->upcall.req.removexattr.key_sz = ret + 1;
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "orangefs_inode_removexattr: key %s, key_sz %d\n",
+                    (char *)new_op->upcall.req.removexattr.key,
+                    (int)new_op->upcall.req.removexattr.key_sz);
+
+       ret = service_operation(new_op,
+                               "orangefs_inode_removexattr",
+                               get_interruptible_flag(inode));
+       if (ret == -ENOENT) {
+               /*
+                * Request to replace a non-existent attribute is an error.
+                */
+               if (flags & XATTR_REPLACE)
+                       ret = -ENODATA;
+               else
+                       ret = 0;
+       }
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "orangefs_inode_removexattr: returning %d\n", ret);
+
+       op_release(new_op);
+out_unlock:
+       up_write(&orangefs_inode->xattr_sem);
+       return ret;
+}
+
+/*
+ * Tries to set an attribute for a given key on a file.
+ *
+ * Returns a -ve number on error and 0 on success.  Key is text, but value
+ * can be binary!
+ */
+int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
+               const char *name, const void *value, size_t size, int flags)
+{
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       int internal_flag = 0;
+       int ret = -ENOMEM;
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "%s: prefix %s, name %s, buffer_size %zd\n",
+                    __func__, prefix, name, size);
+
+       if (size < 0 ||
+           size >= ORANGEFS_MAX_XATTR_VALUELEN ||
+           flags < 0) {
+               gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n",
+                          (int)size,
+                          flags);
+               return -EINVAL;
+       }
+
+       if (name == NULL ||
+           (size > 0 && value == NULL)) {
+               gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n");
+               return -EINVAL;
+       }
+
+       internal_flag = convert_to_internal_xattr_flags(flags);
+
+       if (prefix) {
+               if (strlen(name) + strlen(prefix) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+                       gossip_err
+                           ("orangefs_inode_setxattr: bogus key size (%d)\n",
+                            (int)(strlen(name) + strlen(prefix)));
+                       return -EINVAL;
+               }
+       } else {
+               if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+                       gossip_err
+                           ("orangefs_inode_setxattr: bogus key size (%d)\n",
+                            (int)(strlen(name)));
+                       return -EINVAL;
+               }
+       }
+
+       /* This is equivalent to a removexattr */
+       if (size == 0 && value == NULL) {
+               gossip_debug(GOSSIP_XATTR_DEBUG,
+                            "removing xattr (%s%s)\n",
+                            prefix,
+                            name);
+               return orangefs_inode_removexattr(inode, prefix, name, flags);
+       }
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "setxattr on inode %pU, name %s\n",
+                    get_khandle_from_ino(inode),
+                    name);
+
+       down_write(&orangefs_inode->xattr_sem);
+       new_op = op_alloc(ORANGEFS_VFS_OP_SETXATTR);
+       if (!new_op)
+               goto out_unlock;
+
+
+       new_op->upcall.req.setxattr.refn = orangefs_inode->refn;
+       new_op->upcall.req.setxattr.flags = internal_flag;
+       /*
+        * NOTE: Although keys are meant to be NULL terminated textual
+        * strings, I am going to explicitly pass the length just in
+        * case we change this later on...
+        */
+       ret = snprintf((char *)new_op->upcall.req.setxattr.keyval.key,
+                      ORANGEFS_MAX_XATTR_NAMELEN,
+                      "%s%s",
+                      prefix, name);
+       new_op->upcall.req.setxattr.keyval.key_sz = ret + 1;
+       memcpy(new_op->upcall.req.setxattr.keyval.val, value, size);
+       new_op->upcall.req.setxattr.keyval.val_sz = size;
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "orangefs_inode_setxattr: key %s, key_sz %d "
+                    " value size %zd\n",
+                    (char *)new_op->upcall.req.setxattr.keyval.key,
+                    (int)new_op->upcall.req.setxattr.keyval.key_sz,
+                    size);
+
+       ret = service_operation(new_op,
+                               "orangefs_inode_setxattr",
+                               get_interruptible_flag(inode));
+
+       gossip_debug(GOSSIP_XATTR_DEBUG,
+                    "orangefs_inode_setxattr: returning %d\n",
+                    ret);
+
+       /* when request is serviced properly, free req op struct */
+       op_release(new_op);
+out_unlock:
+       up_write(&orangefs_inode->xattr_sem);
+       return ret;
+}
+
+/*
+ * Tries to get a specified object's keys into a user-specified buffer of a
+ * given size.  Note that like the previous instances of xattr routines, this
+ * also allows you to pass in a NULL pointer and 0 size to probe the size for
+ * subsequent memory allocations. Thus our return value is always the size of
+ * all the keys unless there were errors in fetching the keys!
+ */
+ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size)
+{
+       struct inode *inode = dentry->d_inode;
+       struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+       struct orangefs_kernel_op_s *new_op;
+       __u64 token = ORANGEFS_ITERATE_START;
+       ssize_t ret = -ENOMEM;
+       ssize_t total = 0;
+       ssize_t length = 0;
+       int count_keys = 0;
+       int key_size;
+       int i = 0;
+       int returned_count = 0;
+
+       if (size > 0 && buffer == NULL) {
+               gossip_err("%s: bogus NULL pointers\n", __func__);
+               return -EINVAL;
+       }
+       if (size < 0) {
+               gossip_err("Invalid size (%d)\n", (int)size);
+               return -EINVAL;
+       }
+
+       down_read(&orangefs_inode->xattr_sem);
+       new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR);
+       if (!new_op)
+               goto out_unlock;
+
+       if (buffer && size > 0)
+               memset(buffer, 0, size);
+
+try_again:
+       key_size = 0;
+       new_op->upcall.req.listxattr.refn = orangefs_inode->refn;
+       new_op->upcall.req.listxattr.token = token;
+       new_op->upcall.req.listxattr.requested_count =
+           (size == 0) ? 0 : ORANGEFS_MAX_XATTR_LISTLEN;
+       ret = service_operation(new_op, __func__,
+                               get_interruptible_flag(inode));
+       if (ret != 0)
+               goto done;
+
+       if (size == 0) {
+               /*
+                * This is a bit of a big upper limit, but I did not want to
+                * spend too much time getting this correct, since users end
+                * up allocating memory rather than us...
+                */
+               total = new_op->downcall.resp.listxattr.returned_count *
+                       ORANGEFS_MAX_XATTR_NAMELEN;
+               goto done;
+       }
+
+       length = new_op->downcall.resp.listxattr.keylen;
+       if (length == 0)
+               goto done;
+
+       returned_count = new_op->downcall.resp.listxattr.returned_count;
+       if (returned_count < 0 ||
+           returned_count >= ORANGEFS_MAX_XATTR_LISTLEN) {
+               gossip_err("%s: impossible value for returned_count:%d:\n",
+               __func__,
+               returned_count);
+               goto done;
+       }
+
+       /*
+        * Check to see how much can be fit in the buffer. Fit only whole keys.
+        */
+       for (i = 0; i < returned_count; i++) {
+               if (total + new_op->downcall.resp.listxattr.lengths[i] > size)
+                       goto done;
+
+               /*
+                * Since many dumb programs try to setxattr() on our reserved
+                * xattrs this is a feeble attempt at defeating those by not
+                * listing them in the output of listxattr.. sigh
+                */
+               if (is_reserved_key(new_op->downcall.resp.listxattr.key +
+                                   key_size,
+                                   new_op->downcall.resp.
+                                       listxattr.lengths[i])) {
+                       gossip_debug(GOSSIP_XATTR_DEBUG, "Copying key %d -> %s\n",
+                                       i, new_op->downcall.resp.listxattr.key +
+                                               key_size);
+                       memcpy(buffer + total,
+                               new_op->downcall.resp.listxattr.key + key_size,
+                               new_op->downcall.resp.listxattr.lengths[i]);
+                       total += new_op->downcall.resp.listxattr.lengths[i];
+                       count_keys++;
+               } else {
+                       gossip_debug(GOSSIP_XATTR_DEBUG, "[RESERVED] key %d -> %s\n",
+                                       i, new_op->downcall.resp.listxattr.key +
+                                               key_size);
+               }
+               key_size += new_op->downcall.resp.listxattr.lengths[i];
+       }
+
+       /*
+        * Since the buffer was large enough, we might have to continue
+        * fetching more keys!
+        */
+       token = new_op->downcall.resp.listxattr.token;
+       if (token != ORANGEFS_ITERATE_END)
+               goto try_again;
+
+done:
+       gossip_debug(GOSSIP_XATTR_DEBUG, "%s: returning %d"
+                    " [size of buffer %ld] (filled in %d keys)\n",
+                    __func__,
+                    ret ? (int)ret : (int)total,
+                    (long)size,
+                    count_keys);
+       op_release(new_op);
+       if (ret == 0)
+               ret = total;
+out_unlock:
+       up_read(&orangefs_inode->xattr_sem);
+       return ret;
+}
+
+static int orangefs_xattr_set_default(const struct xattr_handler *handler,
+                                     struct dentry *dentry,
+                                     const char *name,
+                                     const void *buffer,
+                                     size_t size,
+                                     int flags)
+{
+       return orangefs_inode_setxattr(dentry->d_inode,
+                                   ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+                                   name,
+                                   buffer,
+                                   size,
+                                   flags);
+}
+
+static int orangefs_xattr_get_default(const struct xattr_handler *handler,
+                                     struct dentry *dentry,
+                                     const char *name,
+                                     void *buffer,
+                                     size_t size)
+{
+       return orangefs_inode_getxattr(dentry->d_inode,
+                                   ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+                                   name,
+                                   buffer,
+                                   size);
+
+}
+
+static int orangefs_xattr_set_trusted(const struct xattr_handler *handler,
+                                    struct dentry *dentry,
+                                    const char *name,
+                                    const void *buffer,
+                                    size_t size,
+                                    int flags)
+{
+       return orangefs_inode_setxattr(dentry->d_inode,
+                                   ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
+                                   name,
+                                   buffer,
+                                   size,
+                                   flags);
+}
+
+static int orangefs_xattr_get_trusted(const struct xattr_handler *handler,
+                                     struct dentry *dentry,
+                                     const char *name,
+                                     void *buffer,
+                                     size_t size)
+{
+       return orangefs_inode_getxattr(dentry->d_inode,
+                                   ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
+                                   name,
+                                   buffer,
+                                   size);
+}
+
+static struct xattr_handler orangefs_xattr_trusted_handler = {
+       .prefix = ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
+       .get = orangefs_xattr_get_trusted,
+       .set = orangefs_xattr_set_trusted,
+};
+
+static struct xattr_handler orangefs_xattr_default_handler = {
+       /*
+        * NOTE: this is set to be the empty string.
+        * so that all un-prefixed xattrs keys get caught
+        * here!
+        */
+       .prefix = ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+       .get = orangefs_xattr_get_default,
+       .set = orangefs_xattr_set_default,
+};
+
+const struct xattr_handler *orangefs_xattr_handlers[] = {
+       &posix_acl_access_xattr_handler,
+       &posix_acl_default_xattr_handler,
+       &orangefs_xattr_trusted_handler,
+       &orangefs_xattr_default_handler,
+       NULL
+};
index 85d16c67c33eaa8b7a5ac65e38ec465814916619..fa95ab2d36740803e06da898a4851db317b86a9e 100644 (file)
@@ -259,23 +259,29 @@ static int do_maps_open(struct inode *inode, struct file *file,
                                sizeof(struct proc_maps_private));
 }
 
-static pid_t pid_of_stack(struct proc_maps_private *priv,
-                               struct vm_area_struct *vma, bool is_pid)
+/*
+ * Indicate if the VMA is a stack for the given task; for
+ * /proc/PID/maps that is the stack of the main task.
+ */
+static int is_stack(struct proc_maps_private *priv,
+                   struct vm_area_struct *vma, int is_pid)
 {
-       struct inode *inode = priv->inode;
-       struct task_struct *task;
-       pid_t ret = 0;
+       int stack = 0;
+
+       if (is_pid) {
+               stack = vma->vm_start <= vma->vm_mm->start_stack &&
+                       vma->vm_end >= vma->vm_mm->start_stack;
+       } else {
+               struct inode *inode = priv->inode;
+               struct task_struct *task;
 
-       rcu_read_lock();
-       task = pid_task(proc_pid(inode), PIDTYPE_PID);
-       if (task) {
-               task = task_of_stack(task, vma, is_pid);
+               rcu_read_lock();
+               task = pid_task(proc_pid(inode), PIDTYPE_PID);
                if (task)
-                       ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
+                       stack = vma_is_stack_for_task(vma, task);
+               rcu_read_unlock();
        }
-       rcu_read_unlock();
-
-       return ret;
+       return stack;
 }
 
 static void
@@ -335,8 +341,6 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
 
        name = arch_vma_name(vma);
        if (!name) {
-               pid_t tid;
-
                if (!mm) {
                        name = "[vdso]";
                        goto done;
@@ -348,21 +352,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
                        goto done;
                }
 
-               tid = pid_of_stack(priv, vma, is_pid);
-               if (tid != 0) {
-                       /*
-                        * Thread stack in /proc/PID/task/TID/maps or
-                        * the main process stack.
-                        */
-                       if (!is_pid || (vma->vm_start <= mm->start_stack &&
-                           vma->vm_end >= mm->start_stack)) {
-                               name = "[stack]";
-                       } else {
-                               /* Thread stack in /proc/PID/maps */
-                               seq_pad(m, ' ');
-                               seq_printf(m, "[stack:%d]", tid);
-                       }
-               }
+               if (is_stack(priv, vma, is_pid))
+                       name = "[stack]";
        }
 
 done:
@@ -1552,18 +1543,19 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 static int gather_hugetlb_stats(pte_t *pte, unsigned long hmask,
                unsigned long addr, unsigned long end, struct mm_walk *walk)
 {
+       pte_t huge_pte = huge_ptep_get(pte);
        struct numa_maps *md;
        struct page *page;
 
-       if (!pte_present(*pte))
+       if (!pte_present(huge_pte))
                return 0;
 
-       page = pte_page(*pte);
+       page = pte_page(huge_pte);
        if (!page)
                return 0;
 
        md = walk->private;
-       gather_stats(page, md, pte_dirty(*pte), 1);
+       gather_stats(page, md, pte_dirty(huge_pte), 1);
        return 0;
 }
 
@@ -1617,19 +1609,8 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
                seq_file_path(m, file, "\n\t= ");
        } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
                seq_puts(m, " heap");
-       } else {
-               pid_t tid = pid_of_stack(proc_priv, vma, is_pid);
-               if (tid != 0) {
-                       /*
-                        * Thread stack in /proc/PID/task/TID/maps or
-                        * the main process stack.
-                        */
-                       if (!is_pid || (vma->vm_start <= mm->start_stack &&
-                           vma->vm_end >= mm->start_stack))
-                               seq_puts(m, " stack");
-                       else
-                               seq_printf(m, " stack:%d", tid);
-               }
+       } else if (is_stack(proc_priv, vma, is_pid)) {
+               seq_puts(m, " stack");
        }
 
        if (is_vm_hugetlb_page(vma))
index e0d64c92e4f6576c38a8a4a7cc9b8d13a2b3362e..faacb0c0d857602111bfc04f2e374c451059c358 100644 (file)
@@ -123,23 +123,26 @@ unsigned long task_statm(struct mm_struct *mm,
        return size;
 }
 
-static pid_t pid_of_stack(struct proc_maps_private *priv,
-                               struct vm_area_struct *vma, bool is_pid)
+static int is_stack(struct proc_maps_private *priv,
+                   struct vm_area_struct *vma, int is_pid)
 {
-       struct inode *inode = priv->inode;
-       struct task_struct *task;
-       pid_t ret = 0;
-
-       rcu_read_lock();
-       task = pid_task(proc_pid(inode), PIDTYPE_PID);
-       if (task) {
-               task = task_of_stack(task, vma, is_pid);
+       struct mm_struct *mm = vma->vm_mm;
+       int stack = 0;
+
+       if (is_pid) {
+               stack = vma->vm_start <= mm->start_stack &&
+                       vma->vm_end >= mm->start_stack;
+       } else {
+               struct inode *inode = priv->inode;
+               struct task_struct *task;
+
+               rcu_read_lock();
+               task = pid_task(proc_pid(inode), PIDTYPE_PID);
                if (task)
-                       ret = task_pid_nr_ns(task, inode->i_sb->s_fs_info);
+                       stack = vma_is_stack_for_task(vma, task);
+               rcu_read_unlock();
        }
-       rcu_read_unlock();
-
-       return ret;
+       return stack;
 }
 
 /*
@@ -181,21 +184,9 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
        if (file) {
                seq_pad(m, ' ');
                seq_file_path(m, file, "");
-       } else if (mm) {
-               pid_t tid = pid_of_stack(priv, vma, is_pid);
-
-               if (tid != 0) {
-                       seq_pad(m, ' ');
-                       /*
-                        * Thread stack in /proc/PID/task/TID/maps or
-                        * the main process stack.
-                        */
-                       if (!is_pid || (vma->vm_start <= mm->start_stack &&
-                           vma->vm_end >= mm->start_stack))
-                               seq_printf(m, "[stack]");
-                       else
-                               seq_printf(m, "[stack:%d]", tid);
-               }
+       } else if (mm && is_stack(priv, vma, is_pid)) {
+               seq_pad(m, ' ');
+               seq_printf(m, "[stack]");
        }
 
        seq_putc(m, '\n');
index 3c3b81bb6dfebbd3d6c3f1f8f1252e46308bf9c9..7e0137bde6d6f23ed44d3ab82c9d32b6aecc8cc3 100644 (file)
@@ -2031,6 +2031,21 @@ int dquot_commit_info(struct super_block *sb, int type)
 }
 EXPORT_SYMBOL(dquot_commit_info);
 
+int dquot_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+       struct quota_info *dqopt = sb_dqopt(sb);
+       int err;
+
+       if (!dqopt->ops[qid->type]->get_next_id)
+               return -ENOSYS;
+       mutex_lock(&dqopt->dqio_mutex);
+       err = dqopt->ops[qid->type]->get_next_id(sb, qid);
+       mutex_unlock(&dqopt->dqio_mutex);
+
+       return err;
+}
+EXPORT_SYMBOL(dquot_get_next_id);
+
 /*
  * Definitions of diskquota operations.
  */
@@ -2042,6 +2057,7 @@ const struct dquot_operations dquot_operations = {
        .write_info     = dquot_commit_info,
        .alloc_dquot    = dquot_alloc,
        .destroy_dquot  = dquot_destroy,
+       .get_next_id    = dquot_get_next_id,
 };
 EXPORT_SYMBOL(dquot_operations);
 
@@ -2565,6 +2581,27 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
 }
 EXPORT_SYMBOL(dquot_get_dqblk);
 
+int dquot_get_next_dqblk(struct super_block *sb, struct kqid *qid,
+                        struct qc_dqblk *di)
+{
+       struct dquot *dquot;
+       int err;
+
+       if (!sb->dq_op->get_next_id)
+               return -ENOSYS;
+       err = sb->dq_op->get_next_id(sb, qid);
+       if (err < 0)
+               return err;
+       dquot = dqget(sb, *qid);
+       if (IS_ERR(dquot))
+               return PTR_ERR(dquot);
+       do_get_dqblk(dquot, di);
+       dqput(dquot);
+
+       return 0;
+}
+EXPORT_SYMBOL(dquot_get_next_dqblk);
+
 #define VFS_QC_MASK \
        (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
         QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
@@ -2765,6 +2802,7 @@ const struct quotactl_ops dquot_quotactl_ops = {
        .get_state      = dquot_get_state,
        .set_info       = dquot_set_dqinfo,
        .get_dqblk      = dquot_get_dqblk,
+       .get_nextdqblk  = dquot_get_next_dqblk,
        .set_dqblk      = dquot_set_dqblk
 };
 EXPORT_SYMBOL(dquot_quotactl_ops);
@@ -2776,6 +2814,7 @@ const struct quotactl_ops dquot_quotactl_sysfile_ops = {
        .get_state      = dquot_get_state,
        .set_info       = dquot_set_dqinfo,
        .get_dqblk      = dquot_get_dqblk,
+       .get_nextdqblk  = dquot_get_next_dqblk,
        .set_dqblk      = dquot_set_dqblk
 };
 EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
index 3746367098fda369120ccf0eab4ca0249ed64cb4..8e297c92f7d4c18588c0e76ab0831d663f9268ad 100644 (file)
@@ -79,7 +79,7 @@ unsigned int qtype_enforce_flag(int type)
        return 0;
 }
 
-static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
+static int quota_quotaon(struct super_block *sb, int type, qid_t id,
                         struct path *path)
 {
        if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_enable)
@@ -222,6 +222,34 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
        return 0;
 }
 
+/*
+ * Return quota for next active quota >= this id, if any exists,
+ * otherwise return -ENOENT via ->get_nextdqblk
+ */
+static int quota_getnextquota(struct super_block *sb, int type, qid_t id,
+                         void __user *addr)
+{
+       struct kqid qid;
+       struct qc_dqblk fdq;
+       struct if_nextdqblk idq;
+       int ret;
+
+       if (!sb->s_qcop->get_nextdqblk)
+               return -ENOSYS;
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
+       ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
+       if (ret)
+               return ret;
+       /* struct if_nextdqblk is a superset of struct if_dqblk */
+       copy_to_if_dqblk((struct if_dqblk *)&idq, &fdq);
+       idq.dqb_id = from_kqid(current_user_ns(), qid);
+       if (copy_to_user(addr, &idq, sizeof(idq)))
+               return -EFAULT;
+       return 0;
+}
+
 static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
 {
        dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
@@ -625,6 +653,34 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
        return ret;
 }
 
+/*
+ * Return quota for next active quota >= this id, if any exists,
+ * otherwise return -ENOENT via ->get_nextdqblk.
+ */
+static int quota_getnextxquota(struct super_block *sb, int type, qid_t id,
+                           void __user *addr)
+{
+       struct fs_disk_quota fdq;
+       struct qc_dqblk qdq;
+       struct kqid qid;
+       qid_t id_out;
+       int ret;
+
+       if (!sb->s_qcop->get_nextdqblk)
+               return -ENOSYS;
+       qid = make_kqid(current_user_ns(), type, id);
+       if (!qid_valid(qid))
+               return -EINVAL;
+       ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
+       if (ret)
+               return ret;
+       id_out = from_kqid(current_user_ns(), qid);
+       copy_to_xfs_dqblk(&fdq, &qdq, type, id_out);
+       if (copy_to_user(addr, &fdq, sizeof(fdq)))
+               return -EFAULT;
+       return ret;
+}
+
 static int quota_rmxquota(struct super_block *sb, void __user *addr)
 {
        __u32 flags;
@@ -659,7 +715,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
 
        switch (cmd) {
        case Q_QUOTAON:
-               return quota_quotaon(sb, type, cmd, id, path);
+               return quota_quotaon(sb, type, id, path);
        case Q_QUOTAOFF:
                return quota_quotaoff(sb, type);
        case Q_GETFMT:
@@ -670,6 +726,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
                return quota_setinfo(sb, type, addr);
        case Q_GETQUOTA:
                return quota_getquota(sb, type, id, addr);
+       case Q_GETNEXTQUOTA:
+               return quota_getnextquota(sb, type, id, addr);
        case Q_SETQUOTA:
                return quota_setquota(sb, type, id, addr);
        case Q_SYNC:
@@ -690,6 +748,8 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
                return quota_setxquota(sb, type, id, addr);
        case Q_XGETQUOTA:
                return quota_getxquota(sb, type, id, addr);
+       case Q_XGETNEXTQUOTA:
+               return quota_getnextxquota(sb, type, id, addr);
        case Q_XQUOTASYNC:
                if (sb->s_flags & MS_RDONLY)
                        return -EROFS;
@@ -708,10 +768,13 @@ static int quotactl_cmd_write(int cmd)
        switch (cmd) {
        case Q_GETFMT:
        case Q_GETINFO:
+       case Q_GETQUOTA:
+       case Q_GETNEXTQUOTA:
        case Q_SYNC:
        case Q_XGETQSTAT:
        case Q_XGETQSTATV:
        case Q_XGETQUOTA:
+       case Q_XGETNEXTQUOTA:
        case Q_XQUOTASYNC:
                return 0;
        }
index 58efb83dec1c870c672a5330fcb4571323c73914..0738972e8d3f01c6469ea2f5c0917e89ba26e81a 100644 (file)
@@ -22,10 +22,9 @@ MODULE_LICENSE("GPL");
 
 #define __QUOTA_QT_PARANOIA
 
-static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
+static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth)
 {
        unsigned int epb = info->dqi_usable_bs >> 2;
-       qid_t id = from_kqid(&init_user_ns, qid);
 
        depth = info->dqi_qtree_depth - depth - 1;
        while (depth--)
@@ -33,6 +32,13 @@ static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
        return id % epb;
 }
 
+static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth)
+{
+       qid_t id = from_kqid(&init_user_ns, qid);
+
+       return __get_index(info, id, depth);
+}
+
 /* Number of entries in one blocks */
 static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info)
 {
@@ -668,3 +674,60 @@ int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot)
        return 0;
 }
 EXPORT_SYMBOL(qtree_release_dquot);
+
+static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id,
+                       unsigned int blk, int depth)
+{
+       char *buf = getdqbuf(info->dqi_usable_bs);
+       __le32 *ref = (__le32 *)buf;
+       ssize_t ret;
+       unsigned int epb = info->dqi_usable_bs >> 2;
+       unsigned int level_inc = 1;
+       int i;
+
+       if (!buf)
+               return -ENOMEM;
+
+       for (i = depth; i < info->dqi_qtree_depth - 1; i++)
+               level_inc *= epb;
+
+       ret = read_blk(info, blk, buf);
+       if (ret < 0) {
+               quota_error(info->dqi_sb,
+                           "Can't read quota tree block %u", blk);
+               goto out_buf;
+       }
+       for (i = __get_index(info, *id, depth); i < epb; i++) {
+               if (ref[i] == cpu_to_le32(0)) {
+                       *id += level_inc;
+                       continue;
+               }
+               if (depth == info->dqi_qtree_depth - 1) {
+                       ret = 0;
+                       goto out_buf;
+               }
+               ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1);
+               if (ret != -ENOENT)
+                       break;
+       }
+       if (i == epb) {
+               ret = -ENOENT;
+               goto out_buf;
+       }
+out_buf:
+       kfree(buf);
+       return ret;
+}
+
+int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid)
+{
+       qid_t id = from_kqid(&init_user_ns, *qid);
+       int ret;
+
+       ret = find_next_id(info, &id, QT_TREEOFF, 0);
+       if (ret < 0)
+               return ret;
+       *qid = make_kqid(&init_user_ns, qid->type, id);
+       return 0;
+}
+EXPORT_SYMBOL(qtree_get_next_id);
index ed85d4f35c04b40aab3754ad42e483c4de3a1f1e..ca71bf881ad1e33d11c1fdd10b71070c5079358d 100644 (file)
@@ -304,6 +304,11 @@ static int v2_free_file_info(struct super_block *sb, int type)
        return 0;
 }
 
+static int v2_get_next_id(struct super_block *sb, struct kqid *qid)
+{
+       return qtree_get_next_id(sb_dqinfo(sb, qid->type)->dqi_priv, qid);
+}
+
 static const struct quota_format_ops v2_format_ops = {
        .check_quota_file       = v2_check_quota_file,
        .read_file_info         = v2_read_file_info,
@@ -312,6 +317,7 @@ static const struct quota_format_ops v2_format_ops = {
        .read_dqblk             = v2_read_dquot,
        .commit_dqblk           = v2_write_dquot,
        .release_dqblk          = v2_release_dquot,
+       .get_next_id            = v2_get_next_id,
 };
 
 static struct quota_format_type v2r0_quota_format = {
index c0306ec8ed7b8f5d4eef97fd1cf4c7653c2b1f20..b8f2d1e8c6453c373655be98c03c1118acd71f2c 100644 (file)
@@ -802,6 +802,7 @@ static const struct dquot_operations reiserfs_quota_operations = {
        .write_info = reiserfs_write_info,
        .alloc_dquot    = dquot_alloc,
        .destroy_dquot  = dquot_destroy,
+       .get_next_id    = dquot_get_next_id,
 };
 
 static const struct quotactl_ops reiserfs_qctl_operations = {
index b94fa6c3c6ebe8c7308aea0052f3afacad9745c4..053818dd6c18be8f228e1c40483e50d78aa78007 100644 (file)
@@ -153,7 +153,7 @@ static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx)
        if (isalarm(ctx))
                remaining = alarm_expires_remaining(&ctx->t.alarm);
        else
-               remaining = hrtimer_expires_remaining(&ctx->t.tmr);
+               remaining = hrtimer_expires_remaining_adjusted(&ctx->t.tmr);
 
        return remaining.tv64 < 0 ? ktime_set(0, 0): remaining;
 }
index 541d9c65014dce91e2207759fe5e1e02db60cae5..b51b371b874a0bcace50e83aeba7d3a06f55c67a 100644 (file)
@@ -45,7 +45,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        int block, iblock;
        loff_t nf_pos;
        int flen;
-       unsigned char *fname = NULL;
+       unsigned char *fname = NULL, *copy_name = NULL;
        unsigned char *nameptr;
        uint16_t liu;
        uint8_t lfi;
@@ -143,7 +143,15 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                        if (poffset >= lfi) {
                                nameptr = (char *)(fibh.ebh->b_data + poffset - lfi);
                        } else {
-                               nameptr = fname;
+                               if (!copy_name) {
+                                       copy_name = kmalloc(UDF_NAME_LEN,
+                                                           GFP_NOFS);
+                                       if (!copy_name) {
+                                               ret = -ENOMEM;
+                                               goto out;
+                                       }
+                               }
+                               nameptr = copy_name;
                                memcpy(nameptr, fi->fileIdent + liu,
                                       lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
@@ -185,6 +193,7 @@ out:
        brelse(fibh.sbh);
        brelse(epos.bh);
        kfree(fname);
+       kfree(copy_name);
 
        return ret;
 }
index 42eafb91f7ff3c092fd38f6fb6136862ca6508c8..a2ba11eca9955ea873b25e93075273d2e7837ed8 100644 (file)
@@ -165,7 +165,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
        struct fileIdentDesc *fi = NULL;
        loff_t f_pos;
        int block, flen;
-       unsigned char *fname = NULL;
+       unsigned char *fname = NULL, *copy_name = NULL;
        unsigned char *nameptr;
        uint8_t lfi;
        uint16_t liu;
@@ -236,7 +236,15 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                                nameptr = (uint8_t *)(fibh->ebh->b_data +
                                                      poffset - lfi);
                        else {
-                               nameptr = fname;
+                               if (!copy_name) {
+                                       copy_name = kmalloc(UDF_NAME_LEN,
+                                                           GFP_NOFS);
+                                       if (!copy_name) {
+                                               fi = ERR_PTR(-ENOMEM);
+                                               goto out_err;
+                                       }
+                               }
+                               nameptr = copy_name;
                                memcpy(nameptr, fi->fileIdent + liu,
                                        lfi - poffset);
                                memcpy(nameptr + lfi - poffset,
@@ -279,6 +287,7 @@ out_err:
 out_ok:
        brelse(epos.bh);
        kfree(fname);
+       kfree(copy_name);
 
        return fi;
 }
@@ -291,7 +300,7 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
        struct udf_fileident_bh fibh;
        struct fileIdentDesc *fi;
 
-       if (dentry->d_name.len > UDF_NAME_LEN - 2)
+       if (dentry->d_name.len > UDF_NAME_LEN)
                return ERR_PTR(-ENAMETOOLONG);
 
 #ifdef UDF_RECOVERY
@@ -351,7 +360,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
        struct udf_inode_info *dinfo;
 
        fibh->sbh = fibh->ebh = NULL;
-       name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+       name = kmalloc(UDF_NAME_LEN_CS0, GFP_NOFS);
        if (!name) {
                *err = -ENOMEM;
                goto out_err;
@@ -362,8 +371,9 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
                        *err = -EINVAL;
                        goto out_err;
                }
-               namelen = udf_put_filename(sb, dentry->d_name.name, name,
-                                                dentry->d_name.len);
+               namelen = udf_put_filename(sb, dentry->d_name.name,
+                                          dentry->d_name.len,
+                                          name, UDF_NAME_LEN_CS0);
                if (!namelen) {
                        *err = -ENAMETOOLONG;
                        goto out_err;
@@ -914,7 +924,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
 
        iinfo = UDF_I(inode);
        down_write(&iinfo->i_data_sem);
-       name = kmalloc(UDF_NAME_LEN, GFP_NOFS);
+       name = kmalloc(UDF_NAME_LEN_CS0, GFP_NOFS);
        if (!name) {
                err = -ENOMEM;
                goto out_no_entry;
@@ -997,8 +1007,9 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
                }
 
                if (pc->componentType == 5) {
-                       namelen = udf_put_filename(sb, compstart, name,
-                                                  symname - compstart);
+                       namelen = udf_put_filename(sb, compstart,
+                                                  symname - compstart,
+                                                  name, UDF_NAME_LEN_CS0);
                        if (!namelen)
                                goto out_no_entry;
 
index a522c15a0bfd7e5e9e0d758bfbc59185a8cb2609..fa92fe839fda2f989e3d52c368cc7520b80679e5 100644 (file)
@@ -887,18 +887,14 @@ static int udf_find_fileset(struct super_block *sb,
 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 {
        struct primaryVolDesc *pvoldesc;
-       struct ustr *instr, *outstr;
+       uint8_t *outstr;
        struct buffer_head *bh;
        uint16_t ident;
        int ret = -ENOMEM;
 
-       instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
-       if (!instr)
-               return -ENOMEM;
-
-       outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
+       outstr = kmalloc(128, GFP_NOFS);
        if (!outstr)
-               goto out1;
+               return -ENOMEM;
 
        bh = udf_read_tagged(sb, block, block, &ident);
        if (!bh) {
@@ -923,31 +919,25 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 #endif
        }
 
-       if (!udf_build_ustr(instr, pvoldesc->volIdent, 32)) {
-               ret = udf_CS0toUTF8(outstr, instr);
-               if (ret < 0)
-                       goto out_bh;
+       ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
+       if (ret < 0)
+               goto out_bh;
 
-               strncpy(UDF_SB(sb)->s_volume_ident, outstr->u_name,
-                       outstr->u_len > 31 ? 31 : outstr->u_len);
-               udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
-       }
+       strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
+       udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
 
-       if (!udf_build_ustr(instr, pvoldesc->volSetIdent, 128)) {
-               ret = udf_CS0toUTF8(outstr, instr);
-               if (ret < 0)
-                       goto out_bh;
+       ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
+       if (ret < 0)
+               goto out_bh;
 
-               udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
-       }
+       outstr[ret] = 0;
+       udf_debug("volSetIdent[] = '%s'\n", outstr);
 
        ret = 0;
 out_bh:
        brelse(bh);
 out2:
        kfree(outstr);
-out1:
-       kfree(instr);
        return ret;
 }
 
@@ -2358,7 +2348,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
                                          le32_to_cpu(lvidiu->numDirs)) : 0)
                        + buf->f_bfree;
        buf->f_ffree = buf->f_bfree;
-       buf->f_namelen = UDF_NAME_LEN - 2;
+       buf->f_namelen = UDF_NAME_LEN;
        buf->f_fsid.val[0] = (u32)id;
        buf->f_fsid.val[1] = (u32)(id >> 32);
 
index fa0044b6b81d226223aa7ea61dd56c9f83e146e9..972b70625614f837310e39d41f6d8e836d205144 100644 (file)
@@ -49,8 +49,8 @@ extern __printf(3, 4) void _udf_warn(struct super_block *sb,
 #define UDF_EXTENT_FLAG_MASK   0xC0000000
 
 #define UDF_NAME_PAD           4
-#define UDF_NAME_LEN           256
-#define UDF_PATH_LEN           1023
+#define UDF_NAME_LEN           254
+#define UDF_NAME_LEN_CS0       255
 
 static inline size_t udf_file_entry_alloc_offset(struct inode *inode)
 {
@@ -106,12 +106,6 @@ struct generic_desc {
        __le32          volDescSeqNum;
 };
 
-struct ustr {
-       uint8_t u_cmpID;
-       uint8_t u_name[UDF_NAME_LEN - 2];
-       uint8_t u_len;
-};
-
 
 /* super.c */
 
@@ -214,12 +208,11 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
 }
 
 /* unicode.c */
-extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
-                           int);
-extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
-                           int);
-extern int udf_build_ustr(struct ustr *, dstring *, int);
-extern int udf_CS0toUTF8(struct ustr *, const struct ustr *);
+extern int udf_get_filename(struct super_block *, const uint8_t *, int,
+                           uint8_t *, int);
+extern int udf_put_filename(struct super_block *, const uint8_t *, int,
+                           uint8_t *, int);
+extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
index e788a05aab83670de6be03dc48eb519e2d2ae5d9..3ff42f4437f3eb3374fea6b1cd0e839b71b65577 100644 (file)
 
 #include "udf_sb.h"
 
-static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
-                                 int);
-
-static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
-{
-       if ((!dest) || (!src) || (!strlen) || (strlen > UDF_NAME_LEN - 2))
-               return 0;
-
-       memset(dest, 0, sizeof(struct ustr));
-       memcpy(dest->u_name, src, strlen);
-       dest->u_cmpID = 0x08;
-       dest->u_len = strlen;
-
-       return strlen;
-}
-
-/*
- * udf_build_ustr
- */
-int udf_build_ustr(struct ustr *dest, dstring *ptr, int size)
-{
-       int usesize;
-
-       if (!dest || !ptr || !size)
-               return -1;
-       BUG_ON(size < 2);
-
-       usesize = min_t(size_t, ptr[size - 1], sizeof(dest->u_name));
-       usesize = min(usesize, size - 2);
-       dest->u_cmpID = ptr[0];
-       dest->u_len = usesize;
-       memcpy(dest->u_name, ptr + 1, usesize);
-       memset(dest->u_name + usesize, 0, sizeof(dest->u_name) - usesize);
-
-       return 0;
-}
-
-/*
- * udf_build_ustr_exact
- */
-static void udf_build_ustr_exact(struct ustr *dest, dstring *ptr, int exactsize)
-{
-       memset(dest, 0, sizeof(struct ustr));
-       dest->u_cmpID = ptr[0];
-       dest->u_len = exactsize - 1;
-       memcpy(dest->u_name, ptr + 1, exactsize - 1);
-}
-
-/*
- * udf_CS0toUTF8
- *
- * PURPOSE
- *     Convert OSTA Compressed Unicode to the UTF-8 equivalent.
- *
- * PRE-CONDITIONS
- *     utf                     Pointer to UTF-8 output buffer.
- *     ocu                     Pointer to OSTA Compressed Unicode input buffer
- *                             of size UDF_NAME_LEN bytes.
- *                             both of type "struct ustr *"
- *
- * POST-CONDITIONS
- *     <return>                >= 0 on success.
- *
- * HISTORY
- *     November 12, 1997 - Andrew E. Mileski
- *     Written, tested, and released.
- */
-int udf_CS0toUTF8(struct ustr *utf_o, const struct ustr *ocu_i)
+static int udf_uni2char_utf8(wchar_t uni,
+                            unsigned char *out,
+                            int boundlen)
 {
-       const uint8_t *ocu;
-       uint8_t cmp_id, ocu_len;
-       int i;
-
-       ocu_len = ocu_i->u_len;
-       if (ocu_len == 0) {
-               memset(utf_o, 0, sizeof(struct ustr));
-               return 0;
-       }
-
-       cmp_id = ocu_i->u_cmpID;
-       if (cmp_id != 8 && cmp_id != 16) {
-               memset(utf_o, 0, sizeof(struct ustr));
-               pr_err("unknown compression code (%d) stri=%s\n",
-                      cmp_id, ocu_i->u_name);
-               return -EINVAL;
-       }
-
-       ocu = ocu_i->u_name;
-       utf_o->u_len = 0;
-       for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
-
-               /* Expand OSTA compressed Unicode to Unicode */
-               uint32_t c = ocu[i++];
-               if (cmp_id == 16)
-                       c = (c << 8) | ocu[i++];
-
-               /* Compress Unicode to UTF-8 */
-               if (c < 0x80U)
-                       utf_o->u_name[utf_o->u_len++] = (uint8_t)c;
-               else if (c < 0x800U) {
-                       if (utf_o->u_len > (UDF_NAME_LEN - 4))
-                               break;
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0xc0 | (c >> 6));
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0x80 | (c & 0x3f));
-               } else {
-                       if (utf_o->u_len > (UDF_NAME_LEN - 5))
-                               break;
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0xe0 | (c >> 12));
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0x80 |
-                                                         ((c >> 6) & 0x3f));
-                       utf_o->u_name[utf_o->u_len++] =
-                                               (uint8_t)(0x80 | (c & 0x3f));
-               }
+       int u_len = 0;
+
+       if (boundlen <= 0)
+               return -ENAMETOOLONG;
+
+       if (uni < 0x80) {
+               out[u_len++] = (unsigned char)uni;
+       } else if (uni < 0x800) {
+               if (boundlen < 2)
+                       return -ENAMETOOLONG;
+               out[u_len++] = (unsigned char)(0xc0 | (uni >> 6));
+               out[u_len++] = (unsigned char)(0x80 | (uni & 0x3f));
+       } else {
+               if (boundlen < 3)
+                       return -ENAMETOOLONG;
+               out[u_len++] = (unsigned char)(0xe0 | (uni >> 12));
+               out[u_len++] = (unsigned char)(0x80 | ((uni >> 6) & 0x3f));
+               out[u_len++] = (unsigned char)(0x80 | (uni & 0x3f));
        }
-       utf_o->u_cmpID = 8;
-
-       return utf_o->u_len;
+       return u_len;
 }
 
-/*
- *
- * udf_UTF8toCS0
- *
- * PURPOSE
- *     Convert UTF-8 to the OSTA Compressed Unicode equivalent.
- *
- * DESCRIPTION
- *     This routine is only called by udf_lookup().
- *
- * PRE-CONDITIONS
- *     ocu                     Pointer to OSTA Compressed Unicode output
- *                             buffer of size UDF_NAME_LEN bytes.
- *     utf                     Pointer to UTF-8 input buffer.
- *     utf_len                 Length of UTF-8 input buffer in bytes.
- *
- * POST-CONDITIONS
- *     <return>                Zero on success.
- *
- * HISTORY
- *     November 12, 1997 - Andrew E. Mileski
- *     Written, tested, and released.
- */
-static int udf_UTF8toCS0(dstring *ocu, struct ustr *utf, int length)
+static int udf_char2uni_utf8(const unsigned char *in,
+                            int boundlen,
+                            wchar_t *uni)
 {
-       unsigned c, i, max_val, utf_char;
-       int utf_cnt, u_len, u_ch;
-
-       memset(ocu, 0, sizeof(dstring) * length);
-       ocu[0] = 8;
-       max_val = 0xffU;
-       u_ch = 1;
+       unsigned int utf_char;
+       unsigned char c;
+       int utf_cnt, u_len;
 
-try_again:
-       u_len = 0U;
-       utf_char = 0U;
-       utf_cnt = 0U;
-       for (i = 0U; i < utf->u_len; i++) {
-               /* Name didn't fit? */
-               if (u_len + 1 + u_ch >= length)
-                       return 0;
-
-               c = (uint8_t)utf->u_name[i];
+       utf_char = 0;
+       utf_cnt = 0;
+       for (u_len = 0; u_len < boundlen;) {
+               c = in[u_len++];
 
                /* Complete a multi-byte UTF-8 character */
                if (utf_cnt) {
-                       utf_char = (utf_char << 6) | (c & 0x3fU);
+                       utf_char = (utf_char << 6) | (c & 0x3f);
                        if (--utf_cnt)
                                continue;
                } else {
                        /* Check for a multi-byte UTF-8 character */
-                       if (c & 0x80U) {
+                       if (c & 0x80) {
                                /* Start a multi-byte UTF-8 character */
-                               if ((c & 0xe0U) == 0xc0U) {
-                                       utf_char = c & 0x1fU;
+                               if ((c & 0xe0) == 0xc0) {
+                                       utf_char = c & 0x1f;
                                        utf_cnt = 1;
-                               } else if ((c & 0xf0U) == 0xe0U) {
-                                       utf_char = c & 0x0fU;
+                               } else if ((c & 0xf0) == 0xe0) {
+                                       utf_char = c & 0x0f;
                                        utf_cnt = 2;
-                               } else if ((c & 0xf8U) == 0xf0U) {
-                                       utf_char = c & 0x07U;
+                               } else if ((c & 0xf8) == 0xf0) {
+                                       utf_char = c & 0x07;
                                        utf_cnt = 3;
-                               } else if ((c & 0xfcU) == 0xf8U) {
-                                       utf_char = c & 0x03U;
+                               } else if ((c & 0xfc) == 0xf8) {
+                                       utf_char = c & 0x03;
                                        utf_cnt = 4;
-                               } else if ((c & 0xfeU) == 0xfcU) {
-                                       utf_char = c & 0x01U;
+                               } else if ((c & 0xfe) == 0xfc) {
+                                       utf_char = c & 0x01;
                                        utf_cnt = 5;
                                } else {
-                                       goto error_out;
+                                       utf_cnt = -1;
+                                       break;
                                }
                                continue;
                        } else {
@@ -228,97 +101,216 @@ try_again:
                                utf_char = c;
                        }
                }
-
-               /* Choose no compression if necessary */
-               if (utf_char > max_val) {
-                       if (max_val == 0xffU) {
-                               max_val = 0xffffU;
-                               ocu[0] = (uint8_t)0x10U;
-                               u_ch = 2;
-                               goto try_again;
-                       }
-                       goto error_out;
-               }
-
-               if (max_val == 0xffffU)
-                       ocu[++u_len] = (uint8_t)(utf_char >> 8);
-               ocu[++u_len] = (uint8_t)(utf_char & 0xffU);
+               *uni = utf_char;
+               break;
        }
-
        if (utf_cnt) {
-error_out:
-               ocu[++u_len] = '?';
-               printk(KERN_DEBUG pr_fmt("bad UTF-8 character\n"));
+               *uni = '?';
+               return -EINVAL;
        }
+       return u_len;
+}
 
-       ocu[length - 1] = (uint8_t)u_len + 1;
+#define ILLEGAL_CHAR_MARK      '_'
+#define EXT_MARK               '.'
+#define CRC_MARK               '#'
+#define EXT_SIZE               5
+/* Number of chars we need to store generated CRC to make filename unique */
+#define CRC_LEN                        5
+
+static int udf_name_conv_char(uint8_t *str_o, int str_o_max_len,
+                             int *str_o_idx,
+                             const uint8_t *str_i, int str_i_max_len,
+                             int *str_i_idx,
+                             int u_ch, int *needsCRC,
+                             int (*conv_f)(wchar_t, unsigned char *, int),
+                             int translate)
+{
+       uint32_t c;
+       int illChar = 0;
+       int len, gotch = 0;
+
+       for (; (!gotch) && (*str_i_idx < str_i_max_len); *str_i_idx += u_ch) {
+               if (*str_o_idx >= str_o_max_len) {
+                       *needsCRC = 1;
+                       return gotch;
+               }
 
-       return u_len + 1;
+               /* Expand OSTA compressed Unicode to Unicode */
+               c = str_i[*str_i_idx];
+               if (u_ch > 1)
+                       c = (c << 8) | str_i[*str_i_idx + 1];
+
+               if (translate && (c == '/' || c == 0))
+                       illChar = 1;
+               else if (illChar)
+                       break;
+               else
+                       gotch = 1;
+       }
+       if (illChar) {
+               *needsCRC = 1;
+               c = ILLEGAL_CHAR_MARK;
+               gotch = 1;
+       }
+       if (gotch) {
+               len = conv_f(c, &str_o[*str_o_idx], str_o_max_len - *str_o_idx);
+               /* Valid character? */
+               if (len >= 0)
+                       *str_o_idx += len;
+               else if (len == -ENAMETOOLONG) {
+                       *needsCRC = 1;
+                       gotch = 0;
+               } else {
+                       str_o[(*str_o_idx)++] = '?';
+                       *needsCRC = 1;
+               }
+       }
+       return gotch;
 }
 
-static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
-                       const struct ustr *ocu_i)
+static int udf_name_from_CS0(uint8_t *str_o, int str_max_len,
+                            const uint8_t *ocu, int ocu_len,
+                            int (*conv_f)(wchar_t, unsigned char *, int),
+                            int translate)
 {
-       const uint8_t *ocu;
-       uint8_t cmp_id, ocu_len;
-       int i, len;
+       uint32_t c;
+       uint8_t cmp_id;
+       int idx, len;
+       int u_ch;
+       int needsCRC = 0;
+       int ext_i_len, ext_max_len;
+       int str_o_len = 0;      /* Length of resulting output */
+       int ext_o_len = 0;      /* Extension output length */
+       int ext_crc_len = 0;    /* Extension output length if used with CRC */
+       int i_ext = -1;         /* Extension position in input buffer */
+       int o_crc = 0;          /* Rightmost possible output pos for CRC+ext */
+       unsigned short valueCRC;
+       uint8_t ext[EXT_SIZE * NLS_MAX_CHARSET_SIZE + 1];
+       uint8_t crc[CRC_LEN];
 
+       if (str_max_len <= 0)
+               return 0;
 
-       ocu_len = ocu_i->u_len;
        if (ocu_len == 0) {
-               memset(utf_o, 0, sizeof(struct ustr));
+               memset(str_o, 0, str_max_len);
                return 0;
        }
 
-       cmp_id = ocu_i->u_cmpID;
+       cmp_id = ocu[0];
        if (cmp_id != 8 && cmp_id != 16) {
-               memset(utf_o, 0, sizeof(struct ustr));
-               pr_err("unknown compression code (%d) stri=%s\n",
-                      cmp_id, ocu_i->u_name);
+               memset(str_o, 0, str_max_len);
+               pr_err("unknown compression code (%d)\n", cmp_id);
                return -EINVAL;
        }
+       u_ch = cmp_id >> 3;
 
-       ocu = ocu_i->u_name;
-       utf_o->u_len = 0;
-       for (i = 0; (i < ocu_len) && (utf_o->u_len <= (UDF_NAME_LEN - 3));) {
-               /* Expand OSTA compressed Unicode to Unicode */
-               uint32_t c = ocu[i++];
-               if (cmp_id == 16)
-                       c = (c << 8) | ocu[i++];
+       ocu++;
+       ocu_len--;
 
-               len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
-                                   UDF_NAME_LEN - 2 - utf_o->u_len);
-               /* Valid character? */
-               if (len >= 0)
-                       utf_o->u_len += len;
-               else
-                       utf_o->u_name[utf_o->u_len++] = '?';
+       if (ocu_len % u_ch) {
+               pr_err("incorrect filename length (%d)\n", ocu_len + 1);
+               return -EINVAL;
+       }
+
+       if (translate) {
+               /* Look for extension */
+               for (idx = ocu_len - u_ch, ext_i_len = 0;
+                    (idx >= 0) && (ext_i_len < EXT_SIZE);
+                    idx -= u_ch, ext_i_len++) {
+                       c = ocu[idx];
+                       if (u_ch > 1)
+                               c = (c << 8) | ocu[idx + 1];
+
+                       if (c == EXT_MARK) {
+                               if (ext_i_len)
+                                       i_ext = idx;
+                               break;
+                       }
+               }
+               if (i_ext >= 0) {
+                       /* Convert extension */
+                       ext_max_len = min_t(int, sizeof(ext), str_max_len);
+                       ext[ext_o_len++] = EXT_MARK;
+                       idx = i_ext + u_ch;
+                       while (udf_name_conv_char(ext, ext_max_len, &ext_o_len,
+                                                 ocu, ocu_len, &idx,
+                                                 u_ch, &needsCRC,
+                                                 conv_f, translate)) {
+                               if ((ext_o_len + CRC_LEN) < str_max_len)
+                                       ext_crc_len = ext_o_len;
+                       }
+               }
        }
-       utf_o->u_cmpID = 8;
 
-       return utf_o->u_len;
+       idx = 0;
+       while (1) {
+               if (translate && (idx == i_ext)) {
+                       if (str_o_len > (str_max_len - ext_o_len))
+                               needsCRC = 1;
+                       break;
+               }
+
+               if (!udf_name_conv_char(str_o, str_max_len, &str_o_len,
+                                       ocu, ocu_len, &idx,
+                                       u_ch, &needsCRC, conv_f, translate))
+                       break;
+
+               if (translate &&
+                   (str_o_len <= (str_max_len - ext_o_len - CRC_LEN)))
+                       o_crc = str_o_len;
+       }
+
+       if (translate) {
+               if (str_o_len <= 2 && str_o[0] == '.' &&
+                   (str_o_len == 1 || str_o[1] == '.'))
+                       needsCRC = 1;
+               if (needsCRC) {
+                       str_o_len = o_crc;
+                       valueCRC = crc_itu_t(0, ocu, ocu_len);
+                       crc[0] = CRC_MARK;
+                       crc[1] = hex_asc_upper_hi(valueCRC >> 8);
+                       crc[2] = hex_asc_upper_lo(valueCRC >> 8);
+                       crc[3] = hex_asc_upper_hi(valueCRC);
+                       crc[4] = hex_asc_upper_lo(valueCRC);
+                       len = min_t(int, CRC_LEN, str_max_len - str_o_len);
+                       memcpy(&str_o[str_o_len], crc, len);
+                       str_o_len += len;
+                       ext_o_len = ext_crc_len;
+               }
+               if (ext_o_len > 0) {
+                       memcpy(&str_o[str_o_len], ext, ext_o_len);
+                       str_o_len += ext_o_len;
+               }
+       }
+
+       return str_o_len;
 }
 
-static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
-                       int length)
+static int udf_name_to_CS0(uint8_t *ocu, int ocu_max_len,
+                          const uint8_t *str_i, int str_len,
+                          int (*conv_f)(const unsigned char *, int, wchar_t *))
 {
-       int len;
-       unsigned i, max_val;
-       uint16_t uni_char;
+       int i, len;
+       unsigned int max_val;
+       wchar_t uni_char;
        int u_len, u_ch;
 
-       memset(ocu, 0, sizeof(dstring) * length);
+       if (ocu_max_len <= 0)
+               return 0;
+
+       memset(ocu, 0, ocu_max_len);
        ocu[0] = 8;
-       max_val = 0xffU;
+       max_val = 0xff;
        u_ch = 1;
 
 try_again:
-       u_len = 0U;
-       for (i = 0U; i < uni->u_len; i++) {
+       u_len = 1;
+       for (i = 0; i < str_len; i++) {
                /* Name didn't fit? */
-               if (u_len + 1 + u_ch >= length)
+               if (u_len + u_ch > ocu_max_len)
                        return 0;
-               len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
+               len = conv_f(&str_i[i], str_len - i, &uni_char);
                if (!len)
                        continue;
                /* Invalid character, deal with it */
@@ -328,187 +320,65 @@ try_again:
                }
 
                if (uni_char > max_val) {
-                       max_val = 0xffffU;
-                       ocu[0] = (uint8_t)0x10U;
+                       max_val = 0xffff;
+                       ocu[0] = 0x10;
                        u_ch = 2;
                        goto try_again;
                }
 
-               if (max_val == 0xffffU)
-                       ocu[++u_len] = (uint8_t)(uni_char >> 8);
-               ocu[++u_len] = (uint8_t)(uni_char & 0xffU);
+               if (max_val == 0xffff)
+                       ocu[u_len++] = (uint8_t)(uni_char >> 8);
+               ocu[u_len++] = (uint8_t)(uni_char & 0xff);
                i += len - 1;
        }
 
-       ocu[length - 1] = (uint8_t)u_len + 1;
-       return u_len + 1;
+       return u_len;
 }
 
-int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
+int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len)
+{
+       return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len,
+                                udf_uni2char_utf8, 0);
+}
+
+int udf_get_filename(struct super_block *sb, const uint8_t *sname, int slen,
                     uint8_t *dname, int dlen)
 {
-       struct ustr *filename, *unifilename;
+       int (*conv_f)(wchar_t, unsigned char *, int);
        int ret;
 
        if (!slen)
                return -EIO;
 
-       filename = kmalloc(sizeof(struct ustr), GFP_NOFS);
-       if (!filename)
-               return -ENOMEM;
-
-       unifilename = kmalloc(sizeof(struct ustr), GFP_NOFS);
-       if (!unifilename) {
-               ret = -ENOMEM;
-               goto out1;
-       }
+       if (dlen <= 0)
+               return 0;
 
-       udf_build_ustr_exact(unifilename, sname, slen);
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
-               ret = udf_CS0toUTF8(filename, unifilename);
-               if (ret < 0) {
-                       udf_debug("Failed in udf_get_filename: sname = %s\n",
-                                 sname);
-                       goto out2;
-               }
+               conv_f = udf_uni2char_utf8;
        } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
-               ret = udf_CS0toNLS(UDF_SB(sb)->s_nls_map, filename,
-                                  unifilename);
-               if (ret < 0) {
-                       udf_debug("Failed in udf_get_filename: sname = %s\n",
-                                 sname);
-                       goto out2;
-               }
+               conv_f = UDF_SB(sb)->s_nls_map->uni2char;
        } else
                BUG();
 
-       ret = udf_translate_to_linux(dname, dlen,
-                                    filename->u_name, filename->u_len,
-                                    unifilename->u_name, unifilename->u_len);
+       ret = udf_name_from_CS0(dname, dlen, sname, slen, conv_f, 1);
        /* Zero length filename isn't valid... */
        if (ret == 0)
                ret = -EINVAL;
-out2:
-       kfree(unifilename);
-out1:
-       kfree(filename);
        return ret;
 }
 
-int udf_put_filename(struct super_block *sb, const uint8_t *sname,
-                    uint8_t *dname, int flen)
+int udf_put_filename(struct super_block *sb, const uint8_t *sname, int slen,
+                    uint8_t *dname, int dlen)
 {
-       struct ustr unifilename;
-       int namelen;
-
-       if (!udf_char_to_ustr(&unifilename, sname, flen))
-               return 0;
+       int (*conv_f)(const unsigned char *, int, wchar_t *);
 
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
-               namelen = udf_UTF8toCS0(dname, &unifilename, UDF_NAME_LEN);
-               if (!namelen)
-                       return 0;
+               conv_f = udf_char2uni_utf8;
        } else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
-               namelen = udf_NLStoCS0(UDF_SB(sb)->s_nls_map, dname,
-                                       &unifilename, UDF_NAME_LEN);
-               if (!namelen)
-                       return 0;
+               conv_f = UDF_SB(sb)->s_nls_map->char2uni;
        } else
-               return 0;
+               BUG();
 
-       return namelen;
+       return udf_name_to_CS0(dname, dlen, sname, slen, conv_f);
 }
 
-#define ILLEGAL_CHAR_MARK      '_'
-#define EXT_MARK               '.'
-#define CRC_MARK               '#'
-#define EXT_SIZE               5
-/* Number of chars we need to store generated CRC to make filename unique */
-#define CRC_LEN                        5
-
-static int udf_translate_to_linux(uint8_t *newName, int newLen,
-                                 uint8_t *udfName, int udfLen,
-                                 uint8_t *fidName, int fidNameLen)
-{
-       int index, newIndex = 0, needsCRC = 0;
-       int extIndex = 0, newExtIndex = 0, hasExt = 0;
-       unsigned short valueCRC;
-       uint8_t curr;
-
-       if (udfName[0] == '.' &&
-           (udfLen == 1 || (udfLen == 2 && udfName[1] == '.'))) {
-               needsCRC = 1;
-               newIndex = udfLen;
-               memcpy(newName, udfName, udfLen);
-       } else {
-               for (index = 0; index < udfLen; index++) {
-                       curr = udfName[index];
-                       if (curr == '/' || curr == 0) {
-                               needsCRC = 1;
-                               curr = ILLEGAL_CHAR_MARK;
-                               while (index + 1 < udfLen &&
-                                               (udfName[index + 1] == '/' ||
-                                                udfName[index + 1] == 0))
-                                       index++;
-                       }
-                       if (curr == EXT_MARK &&
-                                       (udfLen - index - 1) <= EXT_SIZE) {
-                               if (udfLen == index + 1)
-                                       hasExt = 0;
-                               else {
-                                       hasExt = 1;
-                                       extIndex = index;
-                                       newExtIndex = newIndex;
-                               }
-                       }
-                       if (newIndex < newLen)
-                               newName[newIndex++] = curr;
-                       else
-                               needsCRC = 1;
-               }
-       }
-       if (needsCRC) {
-               uint8_t ext[EXT_SIZE];
-               int localExtIndex = 0;
-
-               if (hasExt) {
-                       int maxFilenameLen;
-                       for (index = 0;
-                            index < EXT_SIZE && extIndex + index + 1 < udfLen;
-                            index++) {
-                               curr = udfName[extIndex + index + 1];
-
-                               if (curr == '/' || curr == 0) {
-                                       needsCRC = 1;
-                                       curr = ILLEGAL_CHAR_MARK;
-                                       while (extIndex + index + 2 < udfLen &&
-                                             (index + 1 < EXT_SIZE &&
-                                               (udfName[extIndex + index + 2] == '/' ||
-                                                udfName[extIndex + index + 2] == 0)))
-                                               index++;
-                               }
-                               ext[localExtIndex++] = curr;
-                       }
-                       maxFilenameLen = newLen - CRC_LEN - localExtIndex;
-                       if (newIndex > maxFilenameLen)
-                               newIndex = maxFilenameLen;
-                       else
-                               newIndex = newExtIndex;
-               } else if (newIndex > newLen - CRC_LEN)
-                       newIndex = newLen - CRC_LEN;
-               newName[newIndex++] = CRC_MARK;
-               valueCRC = crc_itu_t(0, fidName, fidNameLen);
-               newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
-               newName[newIndex++] = hex_asc_upper_lo(valueCRC >> 8);
-               newName[newIndex++] = hex_asc_upper_hi(valueCRC);
-               newName[newIndex++] = hex_asc_upper_lo(valueCRC);
-
-               if (hasExt) {
-                       newName[newIndex++] = EXT_MARK;
-                       for (index = 0; index < localExtIndex; index++)
-                               newName[newIndex++] = ext[index];
-               }
-       }
-
-       return newIndex;
-}
index 444626ddbd1b9ba2baca676b48a62d90299d515e..d9b42425291e37c6a4845c21dd0e1f61d8a76e86 100644 (file)
@@ -118,8 +118,6 @@ xfs_allocbt_free_block(
        xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
                              XFS_EXTENT_BUSY_SKIP_DISCARD);
        xfs_trans_agbtree_delta(cur->bc_tp, -1);
-
-       xfs_trans_binval(cur->bc_tp, bp);
        return 0;
 }
 
index 919756e3ba53591a9132849a6dc7c57771f1aebb..90928bbe693c03bcb5a74aecaac421ba3132bebe 100644 (file)
  * Small attribute lists are packed as tightly as possible so as
  * to fit into the literal area of the inode.
  */
-
-/*
- * Entries are packed toward the top as tight as possible.
- */
-typedef struct xfs_attr_shortform {
-       struct xfs_attr_sf_hdr {        /* constant-structure header block */
-               __be16  totsize;        /* total bytes in shortform list */
-               __u8    count;  /* count of active entries */
-       } hdr;
-       struct xfs_attr_sf_entry {
-               __uint8_t namelen;      /* actual length of name (no NULL) */
-               __uint8_t valuelen;     /* actual length of value (no NULL) */
-               __uint8_t flags;        /* flags bits (see xfs_attr_leaf.h) */
-               __uint8_t nameval[1];   /* name & value bytes concatenated */
-       } list[1];                      /* variable sized array */
-} xfs_attr_shortform_t;
 typedef struct xfs_attr_sf_hdr xfs_attr_sf_hdr_t;
 typedef struct xfs_attr_sf_entry xfs_attr_sf_entry_t;
 
index ef00156f4f9616178006df4ef6248b1abb2297ab..6a051662d8f9b2d0dfb184689ddc9130cd9bec74 100644 (file)
@@ -912,7 +912,7 @@ xfs_bmap_local_to_extents(
         * We don't want to deal with the case of keeping inode data inline yet.
         * So sending the data fork of a regular inode is invalid.
         */
-       ASSERT(!(S_ISREG(ip->i_d.di_mode) && whichfork == XFS_DATA_FORK));
+       ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
        ifp = XFS_IFORK_PTR(ip, whichfork);
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
 
@@ -1079,7 +1079,7 @@ xfs_bmap_add_attrfork_local(
        if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
                return 0;
 
-       if (S_ISDIR(ip->i_d.di_mode)) {
+       if (S_ISDIR(VFS_I(ip)->i_mode)) {
                memset(&dargs, 0, sizeof(dargs));
                dargs.geo = ip->i_mount->m_dir_geo;
                dargs.dp = ip;
@@ -1091,7 +1091,7 @@ xfs_bmap_add_attrfork_local(
                return xfs_dir2_sf_to_block(&dargs);
        }
 
-       if (S_ISLNK(ip->i_d.di_mode))
+       if (S_ISLNK(VFS_I(ip)->i_mode))
                return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
                                                 flags, XFS_DATA_FORK,
                                                 xfs_symlink_local_to_remote);
index 1637c37bfbaa1cb61ef69e48c52eb95716ecd649..e37508ae589b11ef4009129c82b3a3cce98f3ca6 100644 (file)
@@ -531,7 +531,6 @@ xfs_bmbt_free_block(
 
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
        xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
-       xfs_trans_binval(tp, bp);
        return 0;
 }
 
index a0eb18ce3ad38f205f5f3487ea4684937b7b287c..1f88e1ce770f35442f0161466632c68fe0e46153 100644 (file)
@@ -294,6 +294,21 @@ xfs_btree_sblock_verify_crc(
        return true;
 }
 
+static int
+xfs_btree_free_block(
+       struct xfs_btree_cur    *cur,
+       struct xfs_buf          *bp)
+{
+       int                     error;
+
+       error = cur->bc_ops->free_block(cur, bp);
+       if (!error) {
+               xfs_trans_binval(cur->bc_tp, bp);
+               XFS_BTREE_STATS_INC(cur, free);
+       }
+       return error;
+}
+
 /*
  * Delete the btree cursor.
  */
@@ -3209,6 +3224,7 @@ xfs_btree_kill_iroot(
        int                     level;
        int                     index;
        int                     numrecs;
+       int                     error;
 #ifdef DEBUG
        union xfs_btree_ptr     ptr;
        int                     i;
@@ -3272,8 +3288,6 @@ xfs_btree_kill_iroot(
        cpp = xfs_btree_ptr_addr(cur, 1, cblock);
 #ifdef DEBUG
        for (i = 0; i < numrecs; i++) {
-               int             error;
-
                error = xfs_btree_check_ptr(cur, cpp, i, level - 1);
                if (error) {
                        XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
@@ -3283,8 +3297,11 @@ xfs_btree_kill_iroot(
 #endif
        xfs_btree_copy_ptrs(cur, pp, cpp, numrecs);
 
-       cur->bc_ops->free_block(cur, cbp);
-       XFS_BTREE_STATS_INC(cur, free);
+       error = xfs_btree_free_block(cur, cbp);
+       if (error) {
+               XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+               return error;
+       }
 
        cur->bc_bufs[level - 1] = NULL;
        be16_add_cpu(&block->bb_level, -1);
@@ -3317,14 +3334,12 @@ xfs_btree_kill_root(
         */
        cur->bc_ops->set_root(cur, newroot, -1);
 
-       error = cur->bc_ops->free_block(cur, bp);
+       error = xfs_btree_free_block(cur, bp);
        if (error) {
                XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
                return error;
        }
 
-       XFS_BTREE_STATS_INC(cur, free);
-
        cur->bc_bufs[level] = NULL;
        cur->bc_ra[level] = 0;
        cur->bc_nlevels--;
@@ -3830,10 +3845,9 @@ xfs_btree_delrec(
        }
 
        /* Free the deleted block. */
-       error = cur->bc_ops->free_block(cur, rbp);
+       error = xfs_btree_free_block(cur, rbp);
        if (error)
                goto error0;
-       XFS_BTREE_STATS_INC(cur, free);
 
        /*
         * If we joined with the left neighbor, set the buffer in the
index b14bbd6bb05fad090571bcada4e4867e35040b87..8d4d8bce41bf7873fec0fc8211801207a0a46494 100644 (file)
@@ -641,6 +641,22 @@ xfs_dir2_block_leaf_p(struct xfs_dir2_block_tail *btp)
  */
 #define XFS_ATTR_LEAF_MAPSIZE  3       /* how many freespace slots */
 
+/*
+ * Entries are packed toward the top as tight as possible.
+ */
+typedef struct xfs_attr_shortform {
+       struct xfs_attr_sf_hdr {        /* constant-structure header block */
+               __be16  totsize;        /* total bytes in shortform list */
+               __u8    count;  /* count of active entries */
+       } hdr;
+       struct xfs_attr_sf_entry {
+               __uint8_t namelen;      /* actual length of name (no NULL) */
+               __uint8_t valuelen;     /* actual length of value (no NULL) */
+               __uint8_t flags;        /* flags bits (see xfs_attr_leaf.h) */
+               __uint8_t nameval[1];   /* name & value bytes concatenated */
+       } list[1];                      /* variable sized array */
+} xfs_attr_shortform_t;
+
 typedef struct xfs_attr_leaf_map {     /* RLE map of free bytes */
        __be16  base;                     /* base of free region */
        __be16  size;                     /* length of free region */
index 2fb53a5c0a745259d2e18164e8505cb21d704724..af0f9d171f8a012758d778a0bd105e51448e5cf3 100644 (file)
@@ -176,7 +176,7 @@ xfs_dir_isempty(
 {
        xfs_dir2_sf_hdr_t       *sfp;
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        if (dp->i_d.di_size == 0)       /* might happen during shutdown. */
                return 1;
        if (dp->i_d.di_size > XFS_IFORK_DSIZE(dp))
@@ -231,7 +231,7 @@ xfs_dir_init(
        struct xfs_da_args *args;
        int             error;
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        error = xfs_dir_ino_validate(tp->t_mountp, pdp->i_ino);
        if (error)
                return error;
@@ -266,7 +266,7 @@ xfs_dir_createname(
        int                     rval;
        int                     v;              /* type-checking value */
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        if (inum) {
                rval = xfs_dir_ino_validate(tp->t_mountp, inum);
                if (rval)
@@ -364,7 +364,7 @@ xfs_dir_lookup(
        int             v;              /* type-checking value */
        int             lock_mode;
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        XFS_STATS_INC(dp->i_mount, xs_dir_lookup);
 
        /*
@@ -443,7 +443,7 @@ xfs_dir_removename(
        int             rval;
        int             v;              /* type-checking value */
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        XFS_STATS_INC(dp->i_mount, xs_dir_remove);
 
        args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS);
@@ -505,7 +505,7 @@ xfs_dir_replace(
        int             rval;
        int             v;              /* type-checking value */
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
 
        rval = xfs_dir_ino_validate(tp->t_mountp, inum);
        if (rval)
index c679f3c05b63cb535de34e7de5543a75ac57faac..89c21d771e35edbc026eb7fe7cb373280774b162 100644 (file)
@@ -125,16 +125,8 @@ xfs_inobt_free_block(
        struct xfs_btree_cur    *cur,
        struct xfs_buf          *bp)
 {
-       xfs_fsblock_t           fsbno;
-       int                     error;
-
-       fsbno = XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp));
-       error = xfs_free_extent(cur->bc_tp, fsbno, 1);
-       if (error)
-               return error;
-
-       xfs_trans_binval(cur->bc_tp, bp);
-       return error;
+       return xfs_free_extent(cur->bc_tp,
+                       XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1);
 }
 
 STATIC int
index 1aabfda669b0bb7bb85dbc8d9889ffd452d17c79..9d9559eb2835a33621e568392fab2c1074022da3 100644 (file)
@@ -195,28 +195,50 @@ xfs_imap_to_bp(
 }
 
 void
-xfs_dinode_from_disk(
-       xfs_icdinode_t          *to,
-       xfs_dinode_t            *from)
+xfs_inode_from_disk(
+       struct xfs_inode        *ip,
+       struct xfs_dinode       *from)
 {
-       to->di_magic = be16_to_cpu(from->di_magic);
-       to->di_mode = be16_to_cpu(from->di_mode);
-       to->di_version = from ->di_version;
+       struct xfs_icdinode     *to = &ip->i_d;
+       struct inode            *inode = VFS_I(ip);
+
+
+       /*
+        * Convert v1 inodes immediately to v2 inode format as this is the
+        * minimum inode version format we support in the rest of the code.
+        */
+       to->di_version = from->di_version;
+       if (to->di_version == 1) {
+               set_nlink(inode, be16_to_cpu(from->di_onlink));
+               to->di_projid_lo = 0;
+               to->di_projid_hi = 0;
+               to->di_version = 2;
+       } else {
+               set_nlink(inode, be32_to_cpu(from->di_nlink));
+               to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
+               to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
+       }
+
        to->di_format = from->di_format;
-       to->di_onlink = be16_to_cpu(from->di_onlink);
        to->di_uid = be32_to_cpu(from->di_uid);
        to->di_gid = be32_to_cpu(from->di_gid);
-       to->di_nlink = be32_to_cpu(from->di_nlink);
-       to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
-       to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
-       memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
        to->di_flushiter = be16_to_cpu(from->di_flushiter);
-       to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
-       to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
-       to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
-       to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
-       to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
-       to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
+
+       /*
+        * Time is signed, so need to convert to signed 32 bit before
+        * storing in inode timestamp which may be 64 bit. Otherwise
+        * a time before epoch is converted to a time long after epoch
+        * on 64 bit systems.
+        */
+       inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
+       inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
+       inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
+       inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
+       inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
+       inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
+       inode->i_generation = be32_to_cpu(from->di_gen);
+       inode->i_mode = be16_to_cpu(from->di_mode);
+
        to->di_size = be64_to_cpu(from->di_size);
        to->di_nblocks = be64_to_cpu(from->di_nblocks);
        to->di_extsize = be32_to_cpu(from->di_extsize);
@@ -227,42 +249,96 @@ xfs_dinode_from_disk(
        to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
        to->di_dmstate  = be16_to_cpu(from->di_dmstate);
        to->di_flags    = be16_to_cpu(from->di_flags);
-       to->di_gen      = be32_to_cpu(from->di_gen);
 
        if (to->di_version == 3) {
-               to->di_changecount = be64_to_cpu(from->di_changecount);
+               inode->i_version = be64_to_cpu(from->di_changecount);
                to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
                to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
                to->di_flags2 = be64_to_cpu(from->di_flags2);
-               to->di_ino = be64_to_cpu(from->di_ino);
-               to->di_lsn = be64_to_cpu(from->di_lsn);
-               memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
-               uuid_copy(&to->di_uuid, &from->di_uuid);
        }
 }
 
 void
-xfs_dinode_to_disk(
-       xfs_dinode_t            *to,
-       xfs_icdinode_t          *from)
+xfs_inode_to_disk(
+       struct xfs_inode        *ip,
+       struct xfs_dinode       *to,
+       xfs_lsn_t               lsn)
+{
+       struct xfs_icdinode     *from = &ip->i_d;
+       struct inode            *inode = VFS_I(ip);
+
+       to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
+       to->di_onlink = 0;
+
+       to->di_version = from->di_version;
+       to->di_format = from->di_format;
+       to->di_uid = cpu_to_be32(from->di_uid);
+       to->di_gid = cpu_to_be32(from->di_gid);
+       to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
+       to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
+
+       memset(to->di_pad, 0, sizeof(to->di_pad));
+       to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
+       to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
+       to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
+       to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
+       to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
+       to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
+       to->di_nlink = cpu_to_be32(inode->i_nlink);
+       to->di_gen = cpu_to_be32(inode->i_generation);
+       to->di_mode = cpu_to_be16(inode->i_mode);
+
+       to->di_size = cpu_to_be64(from->di_size);
+       to->di_nblocks = cpu_to_be64(from->di_nblocks);
+       to->di_extsize = cpu_to_be32(from->di_extsize);
+       to->di_nextents = cpu_to_be32(from->di_nextents);
+       to->di_anextents = cpu_to_be16(from->di_anextents);
+       to->di_forkoff = from->di_forkoff;
+       to->di_aformat = from->di_aformat;
+       to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
+       to->di_dmstate = cpu_to_be16(from->di_dmstate);
+       to->di_flags = cpu_to_be16(from->di_flags);
+
+       if (from->di_version == 3) {
+               to->di_changecount = cpu_to_be64(inode->i_version);
+               to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
+               to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
+               to->di_flags2 = cpu_to_be64(from->di_flags2);
+
+               to->di_ino = cpu_to_be64(ip->i_ino);
+               to->di_lsn = cpu_to_be64(lsn);
+               memset(to->di_pad2, 0, sizeof(to->di_pad2));
+               uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
+               to->di_flushiter = 0;
+       } else {
+               to->di_flushiter = cpu_to_be16(from->di_flushiter);
+       }
+}
+
+void
+xfs_log_dinode_to_disk(
+       struct xfs_log_dinode   *from,
+       struct xfs_dinode       *to)
 {
        to->di_magic = cpu_to_be16(from->di_magic);
        to->di_mode = cpu_to_be16(from->di_mode);
-       to->di_version = from ->di_version;
+       to->di_version = from->di_version;
        to->di_format = from->di_format;
-       to->di_onlink = cpu_to_be16(from->di_onlink);
+       to->di_onlink = 0;
        to->di_uid = cpu_to_be32(from->di_uid);
        to->di_gid = cpu_to_be32(from->di_gid);
        to->di_nlink = cpu_to_be32(from->di_nlink);
        to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
        to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
        memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
+
        to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
        to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
        to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
        to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
        to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
        to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
+
        to->di_size = cpu_to_be64(from->di_size);
        to->di_nblocks = cpu_to_be64(from->di_nblocks);
        to->di_extsize = cpu_to_be32(from->di_extsize);
@@ -367,13 +443,10 @@ xfs_iread(
            !(mp->m_flags & XFS_MOUNT_IKEEP)) {
                /* initialise the on-disk inode core */
                memset(&ip->i_d, 0, sizeof(ip->i_d));
-               ip->i_d.di_magic = XFS_DINODE_MAGIC;
-               ip->i_d.di_gen = prandom_u32();
-               if (xfs_sb_version_hascrc(&mp->m_sb)) {
+               VFS_I(ip)->i_generation = prandom_u32();
+               if (xfs_sb_version_hascrc(&mp->m_sb))
                        ip->i_d.di_version = 3;
-                       ip->i_d.di_ino = ip->i_ino;
-                       uuid_copy(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid);
-               } else
+               else
                        ip->i_d.di_version = 2;
                return 0;
        }
@@ -403,7 +476,7 @@ xfs_iread(
         * Otherwise, just get the truly permanent information.
         */
        if (dip->di_mode) {
-               xfs_dinode_from_disk(&ip->i_d, dip);
+               xfs_inode_from_disk(ip, dip);
                error = xfs_iformat_fork(ip, dip);
                if (error)  {
 #ifdef DEBUG
@@ -417,16 +490,10 @@ xfs_iread(
                 * Partial initialisation of the in-core inode. Just the bits
                 * that xfs_ialloc won't overwrite or relies on being correct.
                 */
-               ip->i_d.di_magic = be16_to_cpu(dip->di_magic);
                ip->i_d.di_version = dip->di_version;
-               ip->i_d.di_gen = be32_to_cpu(dip->di_gen);
+               VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
                ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
 
-               if (dip->di_version == 3) {
-                       ip->i_d.di_ino = be64_to_cpu(dip->di_ino);
-                       uuid_copy(&ip->i_d.di_uuid, &dip->di_uuid);
-               }
-
                /*
                 * Make sure to pull in the mode here as well in
                 * case the inode is released without being used.
@@ -434,25 +501,10 @@ xfs_iread(
                 * the inode is already free and not try to mess
                 * with the uninitialized part of it.
                 */
-               ip->i_d.di_mode = 0;
-       }
-
-       /*
-        * Automatically convert version 1 inode formats in memory to version 2
-        * inode format. If the inode is modified, it will get logged and
-        * rewritten as a version 2 inode. We can do this because we set the
-        * superblock feature bit for v2 inodes unconditionally during mount
-        * and it means the reast of the code can assume the inode version is 2
-        * or higher.
-        */
-       if (ip->i_d.di_version == 1) {
-               ip->i_d.di_version = 2;
-               memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
-               ip->i_d.di_nlink = ip->i_d.di_onlink;
-               ip->i_d.di_onlink = 0;
-               xfs_set_projid(ip, 0);
+               VFS_I(ip)->i_mode = 0;
        }
 
+       ASSERT(ip->i_d.di_version >= 2);
        ip->i_delayed_blks = 0;
 
        /*
index 9308c47f2a527dc08b75b66de5d064e0b13e0cfe..7c4dd321b2152915c2d9075222b4d757a08539cf 100644 (file)
 
 struct xfs_inode;
 struct xfs_dinode;
-struct xfs_icdinode;
+
+/*
+ * In memory representation of the XFS inode. This is held in the in-core struct
+ * xfs_inode and represents the current on disk values but the structure is not
+ * in on-disk format.  That is, this structure is always translated to on-disk
+ * format specific structures at the appropriate time.
+ */
+struct xfs_icdinode {
+       __int8_t        di_version;     /* inode version */
+       __int8_t        di_format;      /* format of di_c data */
+       __uint16_t      di_flushiter;   /* incremented on flush */
+       __uint32_t      di_uid;         /* owner's user id */
+       __uint32_t      di_gid;         /* owner's group id */
+       __uint16_t      di_projid_lo;   /* lower part of owner's project id */
+       __uint16_t      di_projid_hi;   /* higher part of owner's project id */
+       xfs_fsize_t     di_size;        /* number of bytes in file */
+       xfs_rfsblock_t  di_nblocks;     /* # of direct & btree blocks used */
+       xfs_extlen_t    di_extsize;     /* basic/minimum extent size for file */
+       xfs_extnum_t    di_nextents;    /* number of extents in data fork */
+       xfs_aextnum_t   di_anextents;   /* number of extents in attribute fork*/
+       __uint8_t       di_forkoff;     /* attr fork offs, <<3 for 64b align */
+       __int8_t        di_aformat;     /* format of attr fork's data */
+       __uint32_t      di_dmevmask;    /* DMIG event mask */
+       __uint16_t      di_dmstate;     /* DMIG state info */
+       __uint16_t      di_flags;       /* random flags, XFS_DIFLAG_... */
+
+       __uint64_t      di_flags2;      /* more random flags */
+
+       xfs_ictimestamp_t di_crtime;    /* time created */
+};
 
 /*
  * Inode location information.  Stored in the inode and passed to
@@ -38,8 +67,11 @@ int  xfs_imap_to_bp(struct xfs_mount *, struct xfs_trans *,
 int    xfs_iread(struct xfs_mount *, struct xfs_trans *,
                  struct xfs_inode *, uint);
 void   xfs_dinode_calc_crc(struct xfs_mount *, struct xfs_dinode *);
-void   xfs_dinode_to_disk(struct xfs_dinode *to, struct xfs_icdinode *from);
-void   xfs_dinode_from_disk(struct xfs_icdinode *to, struct xfs_dinode *from);
+void   xfs_inode_to_disk(struct xfs_inode *ip, struct xfs_dinode *to,
+                         xfs_lsn_t lsn);
+void   xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from);
+void   xfs_log_dinode_to_disk(struct xfs_log_dinode *from,
+                              struct xfs_dinode *to);
 
 #if defined(DEBUG)
 void   xfs_inobp_check(struct xfs_mount *, struct xfs_buf *);
index 0defbd02f62d58bb36e62f2cc4738cf862de4e1c..11faf7df14c8099e49759f51f0315dd5caec6632 100644 (file)
@@ -31,6 +31,7 @@
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_attr_sf.h"
+#include "xfs_da_format.h"
 
 kmem_zone_t *xfs_ifork_zone;
 
@@ -120,7 +121,7 @@ xfs_iformat_fork(
                return -EFSCORRUPTED;
        }
 
-       switch (ip->i_d.di_mode & S_IFMT) {
+       switch (VFS_I(ip)->i_mode & S_IFMT) {
        case S_IFIFO:
        case S_IFCHR:
        case S_IFBLK:
index 2653146904153178d474172bcacfec55e7742907..d54a8018b079dd3f0c078e5fdf56cf48a151a545 100644 (file)
@@ -290,6 +290,7 @@ typedef struct xfs_inode_log_format_64 {
        __int32_t               ilf_boffset;    /* off of inode in buffer */
 } xfs_inode_log_format_64_t;
 
+
 /*
  * Flags for xfs_trans_log_inode flags field.
  */
@@ -360,15 +361,15 @@ typedef struct xfs_ictimestamp {
 } xfs_ictimestamp_t;
 
 /*
- * NOTE:  This structure must be kept identical to struct xfs_dinode
- *       except for the endianness annotations.
+ * Define the format of the inode core that is logged. This structure must be
+ * kept identical to struct xfs_dinode except for the endianness annotations.
  */
-typedef struct xfs_icdinode {
+struct xfs_log_dinode {
        __uint16_t      di_magic;       /* inode magic # = XFS_DINODE_MAGIC */
        __uint16_t      di_mode;        /* mode and type of file */
        __int8_t        di_version;     /* inode version */
        __int8_t        di_format;      /* format of di_c data */
-       __uint16_t      di_onlink;      /* old number of links to file */
+       __uint8_t       di_pad3[2];     /* unused in v2/3 inodes */
        __uint32_t      di_uid;         /* owner's user id */
        __uint32_t      di_gid;         /* owner's group id */
        __uint32_t      di_nlink;       /* number of links to file */
@@ -407,13 +408,13 @@ typedef struct xfs_icdinode {
        uuid_t          di_uuid;        /* UUID of the filesystem */
 
        /* structure must be padded to 64 bit alignment */
-} xfs_icdinode_t;
+};
 
-static inline uint xfs_icdinode_size(int version)
+static inline uint xfs_log_dinode_size(int version)
 {
        if (version == 3)
-               return sizeof(struct xfs_icdinode);
-       return offsetof(struct xfs_icdinode, di_next_unlinked);
+               return sizeof(struct xfs_log_dinode);
+       return offsetof(struct xfs_log_dinode, di_next_unlinked);
 }
 
 /*
@@ -495,6 +496,8 @@ enum xfs_blft {
        XFS_BLFT_ATTR_LEAF_BUF,
        XFS_BLFT_ATTR_RMT_BUF,
        XFS_BLFT_SB_BUF,
+       XFS_BLFT_RTBITMAP_BUF,
+       XFS_BLFT_RTSUMMARY_BUF,
        XFS_BLFT_MAX_BUF = (1 << XFS_BLFT_BITS),
 };
 
index f51078f1e92ad4e29b6dbe4c9e4fe8f46f9b2be6..8eed51275bb39b4466f8457b3dc7ba0aa592a583 100644 (file)
@@ -37,7 +37,7 @@ typedef __uint16_t    xfs_qwarncnt_t;
 #define XFS_DQ_PROJ            0x0002          /* project quota */
 #define XFS_DQ_GROUP           0x0004          /* a group quota */
 #define XFS_DQ_DIRTY           0x0008          /* dquot is dirty */
-#define XFS_DQ_FREEING         0x0010          /* dquot is beeing torn down */
+#define XFS_DQ_FREEING         0x0010          /* dquot is being torn down */
 
 #define XFS_DQ_ALLTYPES                (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP)
 
@@ -116,6 +116,7 @@ typedef __uint16_t  xfs_qwarncnt_t;
 #define XFS_QMOPT_DQREPAIR     0x0001000 /* repair dquot if damaged */
 #define XFS_QMOPT_GQUOTA       0x0002000 /* group dquot requested */
 #define XFS_QMOPT_ENOSPC       0x0004000 /* enospc instead of edquot (prj) */
+#define XFS_QMOPT_DQNEXT       0x0008000 /* return next dquot >= this ID */
 
 /*
  * flags to xfs_trans_mod_dquot to indicate which field needs to be
index 9b59ffa1fc198d4934a575af40716837bc54c11b..951c044e24e40d024e5abf48057c0f0942111acc 100644 (file)
  * Realtime allocator bitmap functions shared with userspace.
  */
 
+/*
+ * Real time buffers need verifiers to avoid runtime warnings during IO.
+ * We don't have anything to verify, however, so these are just dummy
+ * operations.
+ */
+static void
+xfs_rtbuf_verify_read(
+       struct xfs_buf  *bp)
+{
+       return;
+}
+
+static void
+xfs_rtbuf_verify_write(
+       struct xfs_buf  *bp)
+{
+       return;
+}
+
+const struct xfs_buf_ops xfs_rtbuf_ops = {
+       .name = "rtbuf",
+       .verify_read = xfs_rtbuf_verify_read,
+       .verify_write = xfs_rtbuf_verify_write,
+};
+
 /*
  * Get a buffer for the bitmap or summary file block specified.
  * The buffer is returned read and locked.
@@ -68,9 +93,12 @@ xfs_rtbuf_get(
        ASSERT(map.br_startblock != NULLFSBLOCK);
        error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
                                   XFS_FSB_TO_DADDR(mp, map.br_startblock),
-                                  mp->m_bsize, 0, &bp, NULL);
+                                  mp->m_bsize, 0, &bp, &xfs_rtbuf_ops);
        if (error)
                return error;
+
+       xfs_trans_buf_set_type(tp, bp, issum ? XFS_BLFT_RTSUMMARY_BUF
+                                            : XFS_BLFT_RTBITMAP_BUF);
        *bpp = bp;
        return 0;
 }
@@ -983,7 +1011,7 @@ xfs_rtfree_extent(
            mp->m_sb.sb_rextents) {
                if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM))
                        mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
-               *(__uint64_t *)&mp->m_rbmip->i_d.di_atime = 0;
+               *(__uint64_t *)&VFS_I(mp->m_rbmip)->i_atime = 0;
                xfs_trans_log_inode(tp, mp->m_rbmip, XFS_ILOG_CORE);
        }
        return 0;
index b25bb9a343f33f99ca2bf4392d696c59f80178b4..961e6475a3099bb9acf2c5df67f355f35ffbb3c7 100644 (file)
@@ -27,7 +27,6 @@ extern struct xfs_perag *xfs_perag_get_tag(struct xfs_mount *, xfs_agnumber_t,
 extern void    xfs_perag_put(struct xfs_perag *pag);
 extern int     xfs_initialize_perag_data(struct xfs_mount *, xfs_agnumber_t);
 
-extern void    xfs_sb_calc_crc(struct xfs_buf *bp);
 extern void    xfs_log_sb(struct xfs_trans *tp);
 extern int     xfs_sync_sb(struct xfs_mount *mp, bool wait);
 extern void    xfs_sb_mount_common(struct xfs_mount *mp, struct xfs_sb *sbp);
index 15c3ceb845b91a31353a21123450c820fef26c49..81ac870834da9e63515553e3fa291318acd1e73a 100644 (file)
@@ -53,6 +53,7 @@ extern const struct xfs_buf_ops xfs_dquot_buf_ra_ops;
 extern const struct xfs_buf_ops xfs_sb_buf_ops;
 extern const struct xfs_buf_ops xfs_sb_quiet_buf_ops;
 extern const struct xfs_buf_ops xfs_symlink_buf_ops;
+extern const struct xfs_buf_ops xfs_rtbuf_ops;
 
 /*
  * Transaction types.  Used to distinguish types of buffers. These never reach
index 379c089fb0514a5a34934b51b790181567a3384f..14ac9822b3036284692228ad57f1956029bc958b 100644 (file)
 #include <linux/pagevec.h>
 #include <linux/writeback.h>
 
+/* flags for direct write completions */
+#define XFS_DIO_FLAG_UNWRITTEN (1 << 0)
+#define XFS_DIO_FLAG_APPEND    (1 << 1)
+
 void
 xfs_count_page_state(
        struct page             *page,
@@ -214,10 +218,12 @@ xfs_end_io(
        struct xfs_inode *ip = XFS_I(ioend->io_inode);
        int             error = 0;
 
-       if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+       /*
+        * Set an error if the mount has shut down and proceed with end I/O
+        * processing so it can perform whatever cleanups are necessary.
+        */
+       if (XFS_FORCED_SHUTDOWN(ip->i_mount))
                ioend->io_error = -EIO;
-               goto done;
-       }
 
        /*
         * For unwritten extents we need to issue transactions to convert a
@@ -1238,27 +1244,8 @@ xfs_vm_releasepage(
 }
 
 /*
- * When we map a DIO buffer, we may need to attach an ioend that describes the
- * type of write IO we are doing. This passes to the completion function the
- * operations it needs to perform. If the mapping is for an overwrite wholly
- * within the EOF then we don't need an ioend and so we don't allocate one.
- * This avoids the unnecessary overhead of allocating and freeing ioends for
- * workloads that don't require transactions on IO completion.
- *
- * If we get multiple mappings in a single IO, we might be mapping different
- * types. But because the direct IO can only have a single private pointer, we
- * need to ensure that:
- *
- * a) i) the ioend spans the entire region of unwritten mappings; or
- *    ii) the ioend spans all the mappings that cross or are beyond EOF; and
- * b) if it contains unwritten extents, it is *permanently* marked as such
- *
- * We could do this by chaining ioends like buffered IO does, but we only
- * actually get one IO completion callback from the direct IO, and that spans
- * the entire IO regardless of how many mappings and IOs are needed to complete
- * the DIO. There is only going to be one reference to the ioend and its life
- * cycle is constrained by the DIO completion code. hence we don't need
- * reference counting here.
+ * When we map a DIO buffer, we may need to pass flags to
+ * xfs_end_io_direct_write to tell it what kind of write IO we are doing.
  *
  * Note that for DIO, an IO to the highest supported file block offset (i.e.
  * 2^63 - 1FSB bytes) will result in the offset + count overflowing a signed 64
@@ -1266,68 +1253,26 @@ xfs_vm_releasepage(
  * extending the file size. We won't know for sure until IO completion is run
  * and the actual max write offset is communicated to the IO completion
  * routine.
- *
- * For DAX page faults, we are preparing to never see unwritten extents here,
- * nor should we ever extend the inode size. Hence we will soon have nothing to
- * do here for this case, ensuring we don't have to provide an IO completion
- * callback to free an ioend that we don't actually need for a fault into the
- * page at offset (2^63 - 1FSB) bytes.
  */
-
 static void
 xfs_map_direct(
        struct inode            *inode,
        struct buffer_head      *bh_result,
        struct xfs_bmbt_irec    *imap,
-       xfs_off_t               offset,
-       bool                    dax_fault)
+       xfs_off_t               offset)
 {
-       struct xfs_ioend        *ioend;
+       uintptr_t               *flags = (uintptr_t *)&bh_result->b_private;
        xfs_off_t               size = bh_result->b_size;
-       int                     type;
 
-       if (ISUNWRITTEN(imap))
-               type = XFS_IO_UNWRITTEN;
-       else
-               type = XFS_IO_OVERWRITE;
-
-       trace_xfs_gbmap_direct(XFS_I(inode), offset, size, type, imap);
-
-       if (dax_fault) {
-               ASSERT(type == XFS_IO_OVERWRITE);
-               trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
-                                           imap);
-               return;
-       }
-
-       if (bh_result->b_private) {
-               ioend = bh_result->b_private;
-               ASSERT(ioend->io_size > 0);
-               ASSERT(offset >= ioend->io_offset);
-               if (offset + size > ioend->io_offset + ioend->io_size)
-                       ioend->io_size = offset - ioend->io_offset + size;
-
-               if (type == XFS_IO_UNWRITTEN && type != ioend->io_type)
-                       ioend->io_type = XFS_IO_UNWRITTEN;
-
-               trace_xfs_gbmap_direct_update(XFS_I(inode), ioend->io_offset,
-                                             ioend->io_size, ioend->io_type,
-                                             imap);
-       } else if (type == XFS_IO_UNWRITTEN ||
-                  offset + size > i_size_read(inode) ||
-                  offset + size < 0) {
-               ioend = xfs_alloc_ioend(inode, type);
-               ioend->io_offset = offset;
-               ioend->io_size = size;
+       trace_xfs_get_blocks_map_direct(XFS_I(inode), offset, size,
+               ISUNWRITTEN(imap) ? XFS_IO_UNWRITTEN : XFS_IO_OVERWRITE, imap);
 
-               bh_result->b_private = ioend;
+       if (ISUNWRITTEN(imap)) {
+               *flags |= XFS_DIO_FLAG_UNWRITTEN;
+               set_buffer_defer_completion(bh_result);
+       } else if (offset + size > i_size_read(inode) || offset + size < 0) {
+               *flags |= XFS_DIO_FLAG_APPEND;
                set_buffer_defer_completion(bh_result);
-
-               trace_xfs_gbmap_direct_new(XFS_I(inode), offset, size, type,
-                                          imap);
-       } else {
-               trace_xfs_gbmap_direct_none(XFS_I(inode), offset, size, type,
-                                           imap);
        }
 }
 
@@ -1498,9 +1443,12 @@ __xfs_get_blocks(
                if (ISUNWRITTEN(&imap))
                        set_buffer_unwritten(bh_result);
                /* direct IO needs special help */
-               if (create && direct)
-                       xfs_map_direct(inode, bh_result, &imap, offset,
-                                      dax_fault);
+               if (create && direct) {
+                       if (dax_fault)
+                               ASSERT(!ISUNWRITTEN(&imap));
+                       else
+                               xfs_map_direct(inode, bh_result, &imap, offset);
+               }
        }
 
        /*
@@ -1570,42 +1518,50 @@ xfs_get_blocks_dax_fault(
        return __xfs_get_blocks(inode, iblock, bh_result, create, true, true);
 }
 
-static void
-__xfs_end_io_direct_write(
-       struct inode            *inode,
-       struct xfs_ioend        *ioend,
+/*
+ * Complete a direct I/O write request.
+ *
+ * xfs_map_direct passes us some flags in the private data to tell us what to
+ * do.  If no flags are set, then the write IO is an overwrite wholly within
+ * the existing allocated file size and so there is nothing for us to do.
+ *
+ * Note that in this case the completion can be called in interrupt context,
+ * whereas if we have flags set we will always be called in task context
+ * (i.e. from a workqueue).
+ */
+STATIC int
+xfs_end_io_direct_write(
+       struct kiocb            *iocb,
        loff_t                  offset,
-       ssize_t                 size)
+       ssize_t                 size,
+       void                    *private)
 {
-       struct xfs_mount        *mp = XFS_I(inode)->i_mount;
+       struct inode            *inode = file_inode(iocb->ki_filp);
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       uintptr_t               flags = (uintptr_t)private;
+       int                     error = 0;
 
-       if (XFS_FORCED_SHUTDOWN(mp) || ioend->io_error)
-               goto out_end_io;
+       trace_xfs_end_io_direct_write(ip, offset, size);
 
-       /*
-        * dio completion end_io functions are only called on writes if more
-        * than 0 bytes was written.
-        */
-       ASSERT(size > 0);
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
 
-       /*
-        * The ioend only maps whole blocks, while the IO may be sector aligned.
-        * Hence the ioend offset/size may not match the IO offset/size exactly.
-        * Because we don't map overwrites within EOF into the ioend, the offset
-        * may not match, but only if the endio spans EOF.  Either way, write
-        * the IO sizes into the ioend so that completion processing does the
-        * right thing.
-        */
-       ASSERT(offset + size <= ioend->io_offset + ioend->io_size);
-       ioend->io_size = size;
-       ioend->io_offset = offset;
+       if (size <= 0)
+               return size;
 
        /*
-        * The ioend tells us whether we are doing unwritten extent conversion
+        * The flags tell us whether we are doing unwritten extent conversions
         * or an append transaction that updates the on-disk file size. These
         * cases are the only cases where we should *potentially* be needing
         * to update the VFS inode size.
-        *
+        */
+       if (flags == 0) {
+               ASSERT(offset + size <= i_size_read(inode));
+               return 0;
+       }
+
+       /*
         * We need to update the in-core inode size here so that we don't end up
         * with the on-disk inode size being outside the in-core inode size. We
         * have no other method of updating EOF for AIO, so always do it here
@@ -1616,91 +1572,56 @@ __xfs_end_io_direct_write(
         * here can result in EOF moving backwards and Bad Things Happen when
         * that occurs.
         */
-       spin_lock(&XFS_I(inode)->i_flags_lock);
+       spin_lock(&ip->i_flags_lock);
        if (offset + size > i_size_read(inode))
                i_size_write(inode, offset + size);
-       spin_unlock(&XFS_I(inode)->i_flags_lock);
+       spin_unlock(&ip->i_flags_lock);
 
-       /*
-        * If we are doing an append IO that needs to update the EOF on disk,
-        * do the transaction reserve now so we can use common end io
-        * processing. Stashing the error (if there is one) in the ioend will
-        * result in the ioend processing passing on the error if it is
-        * possible as we can't return it from here.
-        */
-       if (ioend->io_type == XFS_IO_OVERWRITE)
-               ioend->io_error = xfs_setfilesize_trans_alloc(ioend);
+       if (flags & XFS_DIO_FLAG_UNWRITTEN) {
+               trace_xfs_end_io_direct_write_unwritten(ip, offset, size);
 
-out_end_io:
-       xfs_end_io(&ioend->io_work);
-       return;
-}
+               error = xfs_iomap_write_unwritten(ip, offset, size);
+       } else if (flags & XFS_DIO_FLAG_APPEND) {
+               struct xfs_trans *tp;
 
-/*
- * Complete a direct I/O write request.
- *
- * The ioend structure is passed from __xfs_get_blocks() to tell us what to do.
- * If no ioend exists (i.e. @private == NULL) then the write IO is an overwrite
- * wholly within the EOF and so there is nothing for us to do. Note that in this
- * case the completion can be called in interrupt context, whereas if we have an
- * ioend we will always be called in task context (i.e. from a workqueue).
- */
-STATIC void
-xfs_end_io_direct_write(
-       struct kiocb            *iocb,
-       loff_t                  offset,
-       ssize_t                 size,
-       void                    *private)
-{
-       struct inode            *inode = file_inode(iocb->ki_filp);
-       struct xfs_ioend        *ioend = private;
+               trace_xfs_end_io_direct_write_append(ip, offset, size);
 
-       trace_xfs_gbmap_direct_endio(XFS_I(inode), offset, size,
-                                    ioend ? ioend->io_type : 0, NULL);
-
-       if (!ioend) {
-               ASSERT(offset + size <= i_size_read(inode));
-               return;
+               tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+               error = xfs_trans_reserve(tp, &M_RES(mp)->tr_fsyncts, 0, 0);
+               if (error) {
+                       xfs_trans_cancel(tp);
+                       return error;
+               }
+               error = xfs_setfilesize(ip, tp, offset, size);
        }
 
-       __xfs_end_io_direct_write(inode, ioend, offset, size);
+       return error;
 }
 
-static inline ssize_t
-xfs_vm_do_dio(
-       struct inode            *inode,
+STATIC ssize_t
+xfs_vm_direct_IO(
        struct kiocb            *iocb,
        struct iov_iter         *iter,
-       loff_t                  offset,
-       void                    (*endio)(struct kiocb   *iocb,
-                                        loff_t         offset,
-                                        ssize_t        size,
-                                        void           *private),
-       int                     flags)
+       loff_t                  offset)
 {
+       struct inode            *inode = iocb->ki_filp->f_mapping->host;
+       dio_iodone_t            *endio = NULL;
+       int                     flags = 0;
        struct block_device     *bdev;
 
-       if (IS_DAX(inode))
+       if (iov_iter_rw(iter) == WRITE) {
+               endio = xfs_end_io_direct_write;
+               flags = DIO_ASYNC_EXTEND;
+       }
+
+       if (IS_DAX(inode)) {
                return dax_do_io(iocb, inode, iter, offset,
                                 xfs_get_blocks_direct, endio, 0);
+       }
 
        bdev = xfs_find_bdev_for_inode(inode);
        return  __blockdev_direct_IO(iocb, inode, bdev, iter, offset,
-                                    xfs_get_blocks_direct, endio, NULL, flags);
-}
-
-STATIC ssize_t
-xfs_vm_direct_IO(
-       struct kiocb            *iocb,
-       struct iov_iter         *iter,
-       loff_t                  offset)
-{
-       struct inode            *inode = iocb->ki_filp->f_mapping->host;
-
-       if (iov_iter_rw(iter) == WRITE)
-               return xfs_vm_do_dio(inode, iocb, iter, offset,
-                                    xfs_end_io_direct_write, DIO_ASYNC_EXTEND);
-       return xfs_vm_do_dio(inode, iocb, iter, offset, NULL, 0);
+                       xfs_get_blocks_direct, endio, NULL, flags);
 }
 
 /*
@@ -1783,14 +1704,22 @@ xfs_vm_write_failed(
                if (block_start >= to)
                        break;
 
-               if (!buffer_delay(bh))
+               /*
+                * Process delalloc and unwritten buffers beyond EOF. We can
+                * encounter unwritten buffers in the event that a file has
+                * post-EOF unwritten extents and an extending write happens to
+                * fail (e.g., an unaligned write that also involves a delalloc
+                * to the same page).
+                */
+               if (!buffer_delay(bh) && !buffer_unwritten(bh))
                        continue;
 
                if (!buffer_new(bh) && block_offset < i_size_read(inode))
                        continue;
 
-               xfs_vm_kill_delalloc_range(inode, block_offset,
-                                          block_offset + bh->b_size);
+               if (buffer_delay(bh))
+                       xfs_vm_kill_delalloc_range(inode, block_offset,
+                                                  block_offset + bh->b_size);
 
                /*
                 * This buffer does not contain data anymore. make sure anyone
@@ -1801,6 +1730,7 @@ xfs_vm_write_failed(
                clear_buffer_mapped(bh);
                clear_buffer_new(bh);
                clear_buffer_dirty(bh);
+               clear_buffer_unwritten(bh);
        }
 
 }
index 45ec9e40150c3dc44bc2268c274472f108ad188b..fd7f51c39b3fe5782c2383feb30f6117d02cfc25 100644 (file)
@@ -202,10 +202,12 @@ xfs_bmap_rtalloc(
                ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
 
        /*
-        * Lock out other modifications to the RT bitmap inode.
+        * Lock out modifications to both the RT bitmap and summary inodes
         */
        xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
        xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+       xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
 
        /*
         * If it's an allocation to an empty file at offset 0,
@@ -821,7 +823,7 @@ bool
 xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
 {
        /* prealloc/delalloc exists only on regular files */
-       if (!S_ISREG(ip->i_d.di_mode))
+       if (!S_ISREG(VFS_I(ip)->i_mode))
                return false;
 
        /*
@@ -1726,7 +1728,7 @@ xfs_swap_extents(
        xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
 
        /* Verify that both files have the same format */
-       if ((ip->i_d.di_mode & S_IFMT) != (tip->i_d.di_mode & S_IFMT)) {
+       if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
                error = -EINVAL;
                goto out_unlock;
        }
index 435c7de42e5f322a82845382ad7e1fa54dfe3d0b..9a2191b911377f94e38d81d57d5d037a7e19ae8b 100644 (file)
@@ -650,7 +650,7 @@ xfs_buf_read_map(
        if (bp) {
                trace_xfs_buf_read(bp, flags, _RET_IP_);
 
-               if (!XFS_BUF_ISDONE(bp)) {
+               if (!(bp->b_flags & XBF_DONE)) {
                        XFS_STATS_INC(target->bt_mount, xb_get_read);
                        bp->b_ops = ops;
                        _xfs_buf_read(bp, flags);
index c75721acd8679687ae403d9eb5ee463a3cb0dc17..4eb89bd4ee73b6f4265eb63b8238e9571150bf26 100644 (file)
@@ -302,6 +302,7 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
 
 /* Buffer Utility Routines */
 extern void *xfs_buf_offset(struct xfs_buf *, size_t);
+extern void xfs_buf_stale(struct xfs_buf *bp);
 
 /* Delayed Write Buffer Routines */
 extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
@@ -312,31 +313,6 @@ extern int xfs_buf_delwri_submit_nowait(struct list_head *);
 extern int xfs_buf_init(void);
 extern void xfs_buf_terminate(void);
 
-#define XFS_BUF_ZEROFLAGS(bp) \
-       ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
-                           XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
-                           XBF_WRITE_FAIL))
-
-void xfs_buf_stale(struct xfs_buf *bp);
-#define XFS_BUF_UNSTALE(bp)    ((bp)->b_flags &= ~XBF_STALE)
-#define XFS_BUF_ISSTALE(bp)    ((bp)->b_flags & XBF_STALE)
-
-#define XFS_BUF_DONE(bp)       ((bp)->b_flags |= XBF_DONE)
-#define XFS_BUF_UNDONE(bp)     ((bp)->b_flags &= ~XBF_DONE)
-#define XFS_BUF_ISDONE(bp)     ((bp)->b_flags & XBF_DONE)
-
-#define XFS_BUF_ASYNC(bp)      ((bp)->b_flags |= XBF_ASYNC)
-#define XFS_BUF_UNASYNC(bp)    ((bp)->b_flags &= ~XBF_ASYNC)
-#define XFS_BUF_ISASYNC(bp)    ((bp)->b_flags & XBF_ASYNC)
-
-#define XFS_BUF_READ(bp)       ((bp)->b_flags |= XBF_READ)
-#define XFS_BUF_UNREAD(bp)     ((bp)->b_flags &= ~XBF_READ)
-#define XFS_BUF_ISREAD(bp)     ((bp)->b_flags & XBF_READ)
-
-#define XFS_BUF_WRITE(bp)      ((bp)->b_flags |= XBF_WRITE)
-#define XFS_BUF_UNWRITE(bp)    ((bp)->b_flags &= ~XBF_WRITE)
-#define XFS_BUF_ISWRITE(bp)    ((bp)->b_flags & XBF_WRITE)
-
 /*
  * These macros use the IO block map rather than b_bn. b_bn is now really
  * just for the buffer cache index for cached buffers. As IO does not use b_bn
index 7e986da34f6cb40ad3aca9e9845f81a070dd2d4d..99e91a0e554ea6512ce5eb43cb8a338804f550ae 100644 (file)
@@ -431,7 +431,7 @@ xfs_buf_item_unpin(
        if (freed && stale) {
                ASSERT(bip->bli_flags & XFS_BLI_STALE);
                ASSERT(xfs_buf_islocked(bp));
-               ASSERT(XFS_BUF_ISSTALE(bp));
+               ASSERT(bp->b_flags & XBF_STALE);
                ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
 
                trace_xfs_buf_item_unpin_stale(bip);
@@ -493,7 +493,7 @@ xfs_buf_item_unpin(
                xfs_buf_hold(bp);
                bp->b_flags |= XBF_ASYNC;
                xfs_buf_ioerror(bp, -EIO);
-               XFS_BUF_UNDONE(bp);
+               bp->b_flags &= ~XBF_DONE;
                xfs_buf_stale(bp);
                xfs_buf_ioend(bp);
        }
@@ -1067,7 +1067,7 @@ xfs_buf_iodone_callbacks(
         */
        if (XFS_FORCED_SHUTDOWN(mp)) {
                xfs_buf_stale(bp);
-               XFS_BUF_DONE(bp);
+               bp->b_flags |= XBF_DONE;
                trace_xfs_buf_item_iodone(bp, _RET_IP_);
                goto do_callbacks;
        }
@@ -1090,7 +1090,7 @@ xfs_buf_iodone_callbacks(
         * errors tend to affect the whole device and a failing log write
         * will make us give up.  But we really ought to do better here.
         */
-       if (XFS_BUF_ISASYNC(bp)) {
+       if (bp->b_flags & XBF_ASYNC) {
                ASSERT(bp->b_iodone != NULL);
 
                trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
@@ -1113,7 +1113,7 @@ xfs_buf_iodone_callbacks(
         * sure to return the error to the caller of xfs_bwrite().
         */
        xfs_buf_stale(bp);
-       XFS_BUF_DONE(bp);
+       bp->b_flags |= XBF_DONE;
 
        trace_xfs_buf_error_relse(bp, _RET_IP_);
 
index 642d55d100758b10fb3b9deec90de526707c3d98..93b3ab0c54350fbdd6e977e787d6b7c3911b792d 100644 (file)
@@ -665,7 +665,7 @@ xfs_readdir(
        if (XFS_FORCED_SHUTDOWN(dp->i_mount))
                return -EIO;
 
-       ASSERT(S_ISDIR(dp->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
        XFS_STATS_INC(dp->i_mount, xs_dir_getdents);
 
        args.dp = dp;
index 9c44d38dcd1f8ac9a11525e1a4e651a8e298b404..316b2a1bdba5f6da82f1bad0dcbc0708151a59d2 100644 (file)
@@ -92,26 +92,28 @@ xfs_qm_adjust_dqlimits(
 {
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        struct xfs_disk_dquot   *d = &dq->q_core;
+       struct xfs_def_quota    *defq;
        int                     prealloc = 0;
 
        ASSERT(d->d_id);
+       defq = xfs_get_defquota(dq, q);
 
-       if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
-               d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
+       if (defq->bsoftlimit && !d->d_blk_softlimit) {
+               d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit);
                prealloc = 1;
        }
-       if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
-               d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
+       if (defq->bhardlimit && !d->d_blk_hardlimit) {
+               d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit);
                prealloc = 1;
        }
-       if (q->qi_isoftlimit && !d->d_ino_softlimit)
-               d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
-       if (q->qi_ihardlimit && !d->d_ino_hardlimit)
-               d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
-       if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
-               d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
-       if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
-               d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
+       if (defq->isoftlimit && !d->d_ino_softlimit)
+               d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit);
+       if (defq->ihardlimit && !d->d_ino_hardlimit)
+               d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit);
+       if (defq->rtbsoftlimit && !d->d_rtb_softlimit)
+               d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit);
+       if (defq->rtbhardlimit && !d->d_rtb_hardlimit)
+               d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit);
 
        if (prealloc)
                xfs_dquot_set_prealloc_limits(dq);
@@ -232,7 +234,8 @@ xfs_qm_init_dquot_blk(
 {
        struct xfs_quotainfo    *q = mp->m_quotainfo;
        xfs_dqblk_t     *d;
-       int             curid, i;
+       xfs_dqid_t      curid;
+       int             i;
 
        ASSERT(tp);
        ASSERT(xfs_buf_islocked(bp));
@@ -243,7 +246,6 @@ xfs_qm_init_dquot_blk(
         * ID of the first dquot in the block - id's are zero based.
         */
        curid = id - (id % q->qi_dqperchunk);
-       ASSERT(curid >= 0);
        memset(d, 0, BBTOB(q->qi_dqchunklen));
        for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
                d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
@@ -464,12 +466,13 @@ xfs_qm_dqtobp(
        struct xfs_bmbt_irec    map;
        int                     nmaps = 1, error;
        struct xfs_buf          *bp;
-       struct xfs_inode        *quotip = xfs_dq_to_quota_inode(dqp);
+       struct xfs_inode        *quotip;
        struct xfs_mount        *mp = dqp->q_mount;
        xfs_dqid_t              id = be32_to_cpu(dqp->q_core.d_id);
        struct xfs_trans        *tp = (tpp ? *tpp : NULL);
        uint                    lock_mode;
 
+       quotip = xfs_quota_inode(dqp->q_mount, dqp->dq_flags);
        dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
 
        lock_mode = xfs_ilock_data_map_shared(quotip);
@@ -684,6 +687,56 @@ error0:
        return error;
 }
 
+/*
+ * Advance to the next id in the current chunk, or if at the
+ * end of the chunk, skip ahead to first id in next allocated chunk
+ * using the SEEK_DATA interface.
+ */
+int
+xfs_dq_get_next_id(
+       xfs_mount_t             *mp,
+       uint                    type,
+       xfs_dqid_t              *id,
+       loff_t                  eof)
+{
+       struct xfs_inode        *quotip;
+       xfs_fsblock_t           start;
+       loff_t                  offset;
+       uint                    lock;
+       xfs_dqid_t              next_id;
+       int                     error = 0;
+
+       /* Simple advance */
+       next_id = *id + 1;
+
+       /* If new ID is within the current chunk, advancing it sufficed */
+       if (next_id % mp->m_quotainfo->qi_dqperchunk) {
+               *id = next_id;
+               return 0;
+       }
+
+       /* Nope, next_id is now past the current chunk, so find the next one */
+       start = (xfs_fsblock_t)next_id / mp->m_quotainfo->qi_dqperchunk;
+
+       quotip = xfs_quota_inode(mp, type);
+       lock = xfs_ilock_data_map_shared(quotip);
+
+       offset = __xfs_seek_hole_data(VFS_I(quotip), XFS_FSB_TO_B(mp, start),
+                                     eof, SEEK_DATA);
+       if (offset < 0)
+               error = offset;
+
+       xfs_iunlock(quotip, lock);
+
+       /* -ENXIO is essentially "no more data" */
+       if (error)
+               return (error == -ENXIO ? -ENOENT: error);
+
+       /* Convert next data offset back to a quota id */
+       *id = XFS_B_TO_FSB(mp, offset) * mp->m_quotainfo->qi_dqperchunk;
+       return 0;
+}
+
 /*
  * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
  * a locked dquot, doing an allocation (if requested) as needed.
@@ -704,6 +757,7 @@ xfs_qm_dqget(
        struct xfs_quotainfo    *qi = mp->m_quotainfo;
        struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
        struct xfs_dquot        *dqp;
+       loff_t                  eof = 0;
        int                     error;
 
        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
@@ -731,6 +785,21 @@ xfs_qm_dqget(
        }
 #endif
 
+       /* Get the end of the quota file if we need it */
+       if (flags & XFS_QMOPT_DQNEXT) {
+               struct xfs_inode        *quotip;
+               xfs_fileoff_t           last;
+               uint                    lock_mode;
+
+               quotip = xfs_quota_inode(mp, type);
+               lock_mode = xfs_ilock_data_map_shared(quotip);
+               error = xfs_bmap_last_offset(quotip, &last, XFS_DATA_FORK);
+               xfs_iunlock(quotip, lock_mode);
+               if (error)
+                       return error;
+               eof = XFS_FSB_TO_B(mp, last);
+       }
+
 restart:
        mutex_lock(&qi->qi_tree_lock);
        dqp = radix_tree_lookup(tree, id);
@@ -744,6 +813,18 @@ restart:
                        goto restart;
                }
 
+               /* uninit / unused quota found in radix tree, keep looking  */
+               if (flags & XFS_QMOPT_DQNEXT) {
+                       if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
+                               xfs_dqunlock(dqp);
+                               mutex_unlock(&qi->qi_tree_lock);
+                               error = xfs_dq_get_next_id(mp, type, &id, eof);
+                               if (error)
+                                       return error;
+                               goto restart;
+                       }
+               }
+
                dqp->q_nrefs++;
                mutex_unlock(&qi->qi_tree_lock);
 
@@ -770,6 +851,13 @@ restart:
        if (ip)
                xfs_ilock(ip, XFS_ILOCK_EXCL);
 
+       /* If we are asked to find next active id, keep looking */
+       if (error == -ENOENT && (flags & XFS_QMOPT_DQNEXT)) {
+               error = xfs_dq_get_next_id(mp, type, &id, eof);
+               if (!error)
+                       goto restart;
+       }
+
        if (error)
                return error;
 
@@ -820,6 +908,17 @@ restart:
        qi->qi_dquots++;
        mutex_unlock(&qi->qi_tree_lock);
 
+       /* If we are asked to find next active id, keep looking */
+       if (flags & XFS_QMOPT_DQNEXT) {
+               if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
+                       xfs_qm_dqput(dqp);
+                       error = xfs_dq_get_next_id(mp, type, &id, eof);
+                       if (error)
+                               return error;
+                       goto restart;
+               }
+       }
+
  dqret:
        ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
        trace_xfs_dqget_miss(dqp);
index 652cd3c5b58c1cac1562239c9c644a19dbe588b7..2816d42507bc8ab7cf00fecb775b11e19dfb6088 100644 (file)
@@ -152,7 +152,7 @@ xfs_nfs_get_inode(
                return ERR_PTR(error);
        }
 
-       if (ip->i_d.di_gen != generation) {
+       if (VFS_I(ip)->i_generation != generation) {
                IRELE(ip);
                return ERR_PTR(-ESTALE);
        }
index 52883ac3cf84c06761afcf0792bbafcf781d509b..ac0fd32de31e4e5455e43da208cdef4861710a21 100644 (file)
@@ -156,9 +156,9 @@ xfs_update_prealloc_flags(
        xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
        if (!(flags & XFS_PREALLOC_INVISIBLE)) {
-               ip->i_d.di_mode &= ~S_ISUID;
-               if (ip->i_d.di_mode & S_IXGRP)
-                       ip->i_d.di_mode &= ~S_ISGID;
+               VFS_I(ip)->i_mode &= ~S_ISUID;
+               if (VFS_I(ip)->i_mode & S_IXGRP)
+                       VFS_I(ip)->i_mode &= ~S_ISGID;
                xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
        }
 
@@ -1337,31 +1337,31 @@ out:
        return found;
 }
 
-STATIC loff_t
-xfs_seek_hole_data(
-       struct file             *file,
+/*
+ * caller must lock inode with xfs_ilock_data_map_shared,
+ * can we craft an appropriate ASSERT?
+ *
+ * end is because the VFS-level lseek interface is defined such that any
+ * offset past i_size shall return -ENXIO, but we use this for quota code
+ * which does not maintain i_size, and we want to SEEK_DATA past i_size.
+ */
+loff_t
+__xfs_seek_hole_data(
+       struct inode            *inode,
        loff_t                  start,
+       loff_t                  end,
        int                     whence)
 {
-       struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        struct xfs_mount        *mp = ip->i_mount;
        loff_t                  uninitialized_var(offset);
-       xfs_fsize_t             isize;
        xfs_fileoff_t           fsbno;
-       xfs_filblks_t           end;
-       uint                    lock;
+       xfs_filblks_t           lastbno;
        int                     error;
 
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return -EIO;
-
-       lock = xfs_ilock_data_map_shared(ip);
-
-       isize = i_size_read(inode);
-       if (start >= isize) {
+       if (start >= end) {
                error = -ENXIO;
-               goto out_unlock;
+               goto out_error;
        }
 
        /*
@@ -1369,22 +1369,22 @@ xfs_seek_hole_data(
         * by fsbno to the end block of the file.
         */
        fsbno = XFS_B_TO_FSBT(mp, start);
-       end = XFS_B_TO_FSB(mp, isize);
+       lastbno = XFS_B_TO_FSB(mp, end);
 
        for (;;) {
                struct xfs_bmbt_irec    map[2];
                int                     nmap = 2;
                unsigned int            i;
 
-               error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
+               error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
                                       XFS_BMAPI_ENTIRE);
                if (error)
-                       goto out_unlock;
+                       goto out_error;
 
                /* No extents at given offset, must be beyond EOF */
                if (nmap == 0) {
                        error = -ENXIO;
-                       goto out_unlock;
+                       goto out_error;
                }
 
                for (i = 0; i < nmap; i++) {
@@ -1426,7 +1426,7 @@ xfs_seek_hole_data(
                         * hole at the end of any file).
                         */
                        if (whence == SEEK_HOLE) {
-                               offset = isize;
+                               offset = end;
                                break;
                        }
                        /*
@@ -1434,7 +1434,7 @@ xfs_seek_hole_data(
                         */
                        ASSERT(whence == SEEK_DATA);
                        error = -ENXIO;
-                       goto out_unlock;
+                       goto out_error;
                }
 
                ASSERT(i > 1);
@@ -1445,14 +1445,14 @@ xfs_seek_hole_data(
                 */
                fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
                start = XFS_FSB_TO_B(mp, fsbno);
-               if (start >= isize) {
+               if (start >= end) {
                        if (whence == SEEK_HOLE) {
-                               offset = isize;
+                               offset = end;
                                break;
                        }
                        ASSERT(whence == SEEK_DATA);
                        error = -ENXIO;
-                       goto out_unlock;
+                       goto out_error;
                }
        }
 
@@ -1464,7 +1464,39 @@ out:
         * situation in particular.
         */
        if (whence == SEEK_HOLE)
-               offset = min_t(loff_t, offset, isize);
+               offset = min_t(loff_t, offset, end);
+
+       return offset;
+
+out_error:
+       return error;
+}
+
+STATIC loff_t
+xfs_seek_hole_data(
+       struct file             *file,
+       loff_t                  start,
+       int                     whence)
+{
+       struct inode            *inode = file->f_mapping->host;
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       uint                    lock;
+       loff_t                  offset, end;
+       int                     error = 0;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+
+       lock = xfs_ilock_data_map_shared(ip);
+
+       end = i_size_read(inode);
+       offset = __xfs_seek_hole_data(inode, start, end, whence);
+       if (offset < 0) {
+               error = offset;
+               goto out_unlock;
+       }
+
        offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
 
 out_unlock:
index c4c130f9bfb64fec1d7d5dccb27963a236477ced..a51353a1f87f1a5e78064c0598f42397ece8f767 100644 (file)
@@ -151,7 +151,7 @@ xfs_filestream_pick_ag(
        xfs_agnumber_t          ag, max_ag = NULLAGNUMBER;
        int                     err, trylock, nscan;
 
-       ASSERT(S_ISDIR(ip->i_d.di_mode));
+       ASSERT(S_ISDIR(VFS_I(ip)->i_mode));
 
        /* 2% of an AG's blocks must be free for it to be chosen. */
        minfree = mp->m_sb.sb_agblocks / 50;
@@ -319,7 +319,7 @@ xfs_filestream_lookup_ag(
        xfs_agnumber_t          startag, ag = NULLAGNUMBER;
        struct xfs_mru_cache_elem *mru;
 
-       ASSERT(S_ISREG(ip->i_d.di_mode));
+       ASSERT(S_ISREG(VFS_I(ip)->i_mode));
 
        pip = xfs_filestream_get_parent(ip);
        if (!pip)
index 1b6a98b66886c76d1fab032673ec88f4cb11afa0..f32713f14f9a21c1b752e2e8eb889dea72411f8e 100644 (file)
@@ -25,6 +25,5 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
                                xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
-extern int xfs_fs_log_dummy(struct xfs_mount *mp);
 
 #endif /* __XFS_FSOPS_H__ */
index d7a490f24ead08e3abf5019654ee5ee6e2e1eb7b..bf2d60749278602b5b4afcda09ede7d3dd89fd1e 100644 (file)
@@ -63,6 +63,9 @@ xfs_inode_alloc(
                return NULL;
        }
 
+       /* VFS doesn't initialise i_mode! */
+       VFS_I(ip)->i_mode = 0;
+
        XFS_STATS_INC(mp, vn_active);
        ASSERT(atomic_read(&ip->i_pincount) == 0);
        ASSERT(!spin_is_locked(&ip->i_flags_lock));
@@ -79,7 +82,7 @@ xfs_inode_alloc(
        memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
        ip->i_flags = 0;
        ip->i_delayed_blks = 0;
-       memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
+       memset(&ip->i_d, 0, sizeof(ip->i_d));
 
        return ip;
 }
@@ -98,7 +101,7 @@ void
 xfs_inode_free(
        struct xfs_inode        *ip)
 {
-       switch (ip->i_d.di_mode & S_IFMT) {
+       switch (VFS_I(ip)->i_mode & S_IFMT) {
        case S_IFREG:
        case S_IFDIR:
        case S_IFLNK:
@@ -134,6 +137,34 @@ xfs_inode_free(
        call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
 }
 
+/*
+ * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
+ * part of the structure. This is made more complex by the fact we store
+ * information about the on-disk values in the VFS inode and so we can't just
+ * overwrite the values unconditionally. Hence we save the parameters we
+ * need to retain across reinitialisation, and rewrite them into the VFS inode
+ * after reinitialisation even if it fails.
+ */
+static int
+xfs_reinit_inode(
+       struct xfs_mount        *mp,
+       struct inode            *inode)
+{
+       int             error;
+       uint32_t        nlink = inode->i_nlink;
+       uint32_t        generation = inode->i_generation;
+       uint64_t        version = inode->i_version;
+       umode_t         mode = inode->i_mode;
+
+       error = inode_init_always(mp->m_super, inode);
+
+       set_nlink(inode, nlink);
+       inode->i_generation = generation;
+       inode->i_version = version;
+       inode->i_mode = mode;
+       return error;
+}
+
 /*
  * Check the validity of the inode we just found it the cache
  */
@@ -185,7 +216,7 @@ xfs_iget_cache_hit(
        /*
         * If lookup is racing with unlink return an error immediately.
         */
-       if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
+       if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
                error = -ENOENT;
                goto out_error;
        }
@@ -208,7 +239,7 @@ xfs_iget_cache_hit(
                spin_unlock(&ip->i_flags_lock);
                rcu_read_unlock();
 
-               error = inode_init_always(mp->m_super, inode);
+               error = xfs_reinit_inode(mp, inode);
                if (error) {
                        /*
                         * Re-initializing the inode failed, and we are in deep
@@ -295,7 +326,7 @@ xfs_iget_cache_miss(
 
        trace_xfs_iget_miss(ip);
 
-       if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
+       if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
                error = -ENOENT;
                goto out_destroy;
        }
@@ -444,7 +475,7 @@ again:
         * If we have a real type for an on-disk inode, we can setup the inode
         * now.  If it's a new inode being created, xfs_ialloc will handle it.
         */
-       if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
+       if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
                xfs_setup_existing_inode(ip);
        return 0;
 
index ceba1a83cacccd649caf473ebcf2dfae984bb243..96f606deee313aed506b7e7ee229fc801ba5de80 100644 (file)
@@ -57,9 +57,9 @@ kmem_zone_t *xfs_inode_zone;
  */
 #define        XFS_ITRUNC_MAX_EXTENTS  2
 
-STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
-
-STATIC int xfs_iunlink_remove(xfs_trans_t *, xfs_inode_t *);
+STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
+STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
+STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
 
 /*
  * helper function to extract extent size hint from inode
@@ -766,6 +766,7 @@ xfs_ialloc(
        uint            flags;
        int             error;
        struct timespec tv;
+       struct inode    *inode;
 
        /*
         * Call the space management code to pick
@@ -791,6 +792,7 @@ xfs_ialloc(
        if (error)
                return error;
        ASSERT(ip != NULL);
+       inode = VFS_I(ip);
 
        /*
         * We always convert v1 inodes to v2 now - we only support filesystems
@@ -800,20 +802,16 @@ xfs_ialloc(
        if (ip->i_d.di_version == 1)
                ip->i_d.di_version = 2;
 
-       ip->i_d.di_mode = mode;
-       ip->i_d.di_onlink = 0;
-       ip->i_d.di_nlink = nlink;
-       ASSERT(ip->i_d.di_nlink == nlink);
+       inode->i_mode = mode;
+       set_nlink(inode, nlink);
        ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
        ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
        xfs_set_projid(ip, prid);
-       memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
 
        if (pip && XFS_INHERIT_GID(pip)) {
                ip->i_d.di_gid = pip->i_d.di_gid;
-               if ((pip->i_d.di_mode & S_ISGID) && S_ISDIR(mode)) {
-                       ip->i_d.di_mode |= S_ISGID;
-               }
+               if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
+                       inode->i_mode |= S_ISGID;
        }
 
        /*
@@ -822,38 +820,29 @@ xfs_ialloc(
         * (and only if the irix_sgid_inherit compatibility variable is set).
         */
        if ((irix_sgid_inherit) &&
-           (ip->i_d.di_mode & S_ISGID) &&
-           (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid)))) {
-               ip->i_d.di_mode &= ~S_ISGID;
-       }
+           (inode->i_mode & S_ISGID) &&
+           (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
+               inode->i_mode &= ~S_ISGID;
 
        ip->i_d.di_size = 0;
        ip->i_d.di_nextents = 0;
        ASSERT(ip->i_d.di_nblocks == 0);
 
        tv = current_fs_time(mp->m_super);
-       ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec;
-       ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec;
-       ip->i_d.di_atime = ip->i_d.di_mtime;
-       ip->i_d.di_ctime = ip->i_d.di_mtime;
+       inode->i_mtime = tv;
+       inode->i_atime = tv;
+       inode->i_ctime = tv;
 
-       /*
-        * di_gen will have been taken care of in xfs_iread.
-        */
        ip->i_d.di_extsize = 0;
        ip->i_d.di_dmevmask = 0;
        ip->i_d.di_dmstate = 0;
        ip->i_d.di_flags = 0;
 
        if (ip->i_d.di_version == 3) {
-               ASSERT(ip->i_d.di_ino == ino);
-               ASSERT(uuid_equal(&ip->i_d.di_uuid, &mp->m_sb.sb_meta_uuid));
-               ip->i_d.di_crc = 0;
-               ip->i_d.di_changecount = 1;
-               ip->i_d.di_lsn = 0;
+               inode->i_version = 1;
                ip->i_d.di_flags2 = 0;
-               memset(&(ip->i_d.di_pad2[0]), 0, sizeof(ip->i_d.di_pad2));
-               ip->i_d.di_crtime = ip->i_d.di_mtime;
+               ip->i_d.di_crtime.t_sec = (__int32_t)tv.tv_sec;
+               ip->i_d.di_crtime.t_nsec = (__int32_t)tv.tv_nsec;
        }
 
 
@@ -1092,35 +1081,24 @@ xfs_dir_ialloc(
 }
 
 /*
- * Decrement the link count on an inode & log the change.
- * If this causes the link count to go to zero, initiate the
- * logging activity required to truncate a file.
+ * Decrement the link count on an inode & log the change.  If this causes the
+ * link count to go to zero, move the inode to AGI unlinked list so that it can
+ * be freed when the last active reference goes away via xfs_inactive().
  */
 int                            /* error */
 xfs_droplink(
        xfs_trans_t *tp,
        xfs_inode_t *ip)
 {
-       int     error;
-
        xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 
-       ASSERT (ip->i_d.di_nlink > 0);
-       ip->i_d.di_nlink--;
        drop_nlink(VFS_I(ip));
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
-       error = 0;
-       if (ip->i_d.di_nlink == 0) {
-               /*
-                * We're dropping the last link to this file.
-                * Move the on-disk inode to the AGI unlinked list.
-                * From xfs_inactive() we will pull the inode from
-                * the list and free it.
-                */
-               error = xfs_iunlink(tp, ip);
-       }
-       return error;
+       if (VFS_I(ip)->i_nlink)
+               return 0;
+
+       return xfs_iunlink(tp, ip);
 }
 
 /*
@@ -1134,8 +1112,6 @@ xfs_bumplink(
        xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
 
        ASSERT(ip->i_d.di_version > 1);
-       ASSERT(ip->i_d.di_nlink > 0 || (VFS_I(ip)->i_state & I_LINKABLE));
-       ip->i_d.di_nlink++;
        inc_nlink(VFS_I(ip));
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
        return 0;
@@ -1393,7 +1369,6 @@ xfs_create_tmpfile(
         */
        xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
 
-       ip->i_d.di_nlink--;
        error = xfs_iunlink(tp, ip);
        if (error)
                goto out_trans_cancel;
@@ -1444,7 +1419,7 @@ xfs_link(
 
        trace_xfs_link(tdp, target_name);
 
-       ASSERT(!S_ISDIR(sip->i_d.di_mode));
+       ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
 
        if (XFS_FORCED_SHUTDOWN(mp))
                return -EIO;
@@ -1492,7 +1467,10 @@ xfs_link(
 
        xfs_bmap_init(&free_list, &first_block);
 
-       if (sip->i_d.di_nlink == 0) {
+       /*
+        * Handle initial link state of O_TMPFILE inode
+        */
+       if (VFS_I(sip)->i_nlink == 0) {
                error = xfs_iunlink_remove(tp, sip);
                if (error)
                        goto error_return;
@@ -1648,7 +1626,7 @@ xfs_release(
        xfs_mount_t     *mp = ip->i_mount;
        int             error;
 
-       if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
+       if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
                return 0;
 
        /* If this is a read-only mount, don't do this (would generate I/O) */
@@ -1679,7 +1657,7 @@ xfs_release(
                }
        }
 
-       if (ip->i_d.di_nlink == 0)
+       if (VFS_I(ip)->i_nlink == 0)
                return 0;
 
        if (xfs_can_free_eofblocks(ip, false)) {
@@ -1883,7 +1861,7 @@ xfs_inactive(
         * If the inode is already free, then there can be nothing
         * to clean up here.
         */
-       if (ip->i_d.di_mode == 0) {
+       if (VFS_I(ip)->i_mode == 0) {
                ASSERT(ip->i_df.if_real_bytes == 0);
                ASSERT(ip->i_df.if_broot_bytes == 0);
                return;
@@ -1895,7 +1873,7 @@ xfs_inactive(
        if (mp->m_flags & XFS_MOUNT_RDONLY)
                return;
 
-       if (ip->i_d.di_nlink != 0) {
+       if (VFS_I(ip)->i_nlink != 0) {
                /*
                 * force is true because we are evicting an inode from the
                 * cache. Post-eof blocks must be freed, lest we end up with
@@ -1907,7 +1885,7 @@ xfs_inactive(
                return;
        }
 
-       if (S_ISREG(ip->i_d.di_mode) &&
+       if (S_ISREG(VFS_I(ip)->i_mode) &&
            (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
             ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
                truncate = 1;
@@ -1916,7 +1894,7 @@ xfs_inactive(
        if (error)
                return;
 
-       if (S_ISLNK(ip->i_d.di_mode))
+       if (S_ISLNK(VFS_I(ip)->i_mode))
                error = xfs_inactive_symlink(ip);
        else if (truncate)
                error = xfs_inactive_truncate(ip);
@@ -1952,16 +1930,21 @@ xfs_inactive(
 }
 
 /*
- * This is called when the inode's link count goes to 0.
- * We place the on-disk inode on a list in the AGI.  It
- * will be pulled from this list when the inode is freed.
+ * This is called when the inode's link count goes to 0 or we are creating a
+ * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
+ * set to true as the link count is dropped to zero by the VFS after we've
+ * created the file successfully, so we have to add it to the unlinked list
+ * while the link count is non-zero.
+ *
+ * We place the on-disk inode on a list in the AGI.  It will be pulled from this
+ * list when the inode is freed.
  */
-int
+STATIC int
 xfs_iunlink(
-       xfs_trans_t     *tp,
-       xfs_inode_t     *ip)
+       struct xfs_trans *tp,
+       struct xfs_inode *ip)
 {
-       xfs_mount_t     *mp;
+       xfs_mount_t     *mp = tp->t_mountp;
        xfs_agi_t       *agi;
        xfs_dinode_t    *dip;
        xfs_buf_t       *agibp;
@@ -1971,10 +1954,7 @@ xfs_iunlink(
        int             offset;
        int             error;
 
-       ASSERT(ip->i_d.di_nlink == 0);
-       ASSERT(ip->i_d.di_mode != 0);
-
-       mp = tp->t_mountp;
+       ASSERT(VFS_I(ip)->i_mode != 0);
 
        /*
         * Get the agi buffer first.  It ensures lock ordering
@@ -2412,10 +2392,10 @@ xfs_ifree(
        struct xfs_icluster     xic = { 0 };
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
-       ASSERT(ip->i_d.di_nlink == 0);
+       ASSERT(VFS_I(ip)->i_nlink == 0);
        ASSERT(ip->i_d.di_nextents == 0);
        ASSERT(ip->i_d.di_anextents == 0);
-       ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
+       ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
        ASSERT(ip->i_d.di_nblocks == 0);
 
        /*
@@ -2429,7 +2409,7 @@ xfs_ifree(
        if (error)
                return error;
 
-       ip->i_d.di_mode = 0;            /* mark incore inode as free */
+       VFS_I(ip)->i_mode = 0;          /* mark incore inode as free */
        ip->i_d.di_flags = 0;
        ip->i_d.di_dmevmask = 0;
        ip->i_d.di_forkoff = 0;         /* mark the attr fork not in use */
@@ -2439,7 +2419,7 @@ xfs_ifree(
         * Bump the generation count so no one will be confused
         * by reincarnations of this inode.
         */
-       ip->i_d.di_gen++;
+       VFS_I(ip)->i_generation++;
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 
        if (xic.deleted)
@@ -2526,7 +2506,7 @@ xfs_remove(
 {
        xfs_mount_t             *mp = dp->i_mount;
        xfs_trans_t             *tp = NULL;
-       int                     is_dir = S_ISDIR(ip->i_d.di_mode);
+       int                     is_dir = S_ISDIR(VFS_I(ip)->i_mode);
        int                     error = 0;
        xfs_bmap_free_t         free_list;
        xfs_fsblock_t           first_block;
@@ -2580,8 +2560,8 @@ xfs_remove(
         * If we're removing a directory perform some additional validation.
         */
        if (is_dir) {
-               ASSERT(ip->i_d.di_nlink >= 2);
-               if (ip->i_d.di_nlink != 2) {
+               ASSERT(VFS_I(ip)->i_nlink >= 2);
+               if (VFS_I(ip)->i_nlink != 2) {
                        error = -ENOTEMPTY;
                        goto out_trans_cancel;
                }
@@ -2771,7 +2751,7 @@ xfs_cross_rename(
        if (dp1 != dp2) {
                dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
 
-               if (S_ISDIR(ip2->i_d.di_mode)) {
+               if (S_ISDIR(VFS_I(ip2)->i_mode)) {
                        error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
                                                dp1->i_ino, first_block,
                                                free_list, spaceres);
@@ -2779,7 +2759,7 @@ xfs_cross_rename(
                                goto out_trans_abort;
 
                        /* transfer ip2 ".." reference to dp1 */
-                       if (!S_ISDIR(ip1->i_d.di_mode)) {
+                       if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
                                error = xfs_droplink(tp, dp2);
                                if (error)
                                        goto out_trans_abort;
@@ -2798,7 +2778,7 @@ xfs_cross_rename(
                        ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
                }
 
-               if (S_ISDIR(ip1->i_d.di_mode)) {
+               if (S_ISDIR(VFS_I(ip1)->i_mode)) {
                        error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
                                                dp2->i_ino, first_block,
                                                free_list, spaceres);
@@ -2806,7 +2786,7 @@ xfs_cross_rename(
                                goto out_trans_abort;
 
                        /* transfer ip1 ".." reference to dp2 */
-                       if (!S_ISDIR(ip2->i_d.di_mode)) {
+                       if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
                                error = xfs_droplink(tp, dp1);
                                if (error)
                                        goto out_trans_abort;
@@ -2903,7 +2883,7 @@ xfs_rename(
        struct xfs_inode        *inodes[__XFS_SORT_INODES];
        int                     num_inodes = __XFS_SORT_INODES;
        bool                    new_parent = (src_dp != target_dp);
-       bool                    src_is_directory = S_ISDIR(src_ip->i_d.di_mode);
+       bool                    src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
        int                     spaceres;
        int                     error;
 
@@ -3032,12 +3012,12 @@ xfs_rename(
                 * target and source are directories and that target can be
                 * destroyed, or that neither is a directory.
                 */
-               if (S_ISDIR(target_ip->i_d.di_mode)) {
+               if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
                        /*
                         * Make sure target dir is empty.
                         */
                        if (!(xfs_dir_isempty(target_ip)) ||
-                           (target_ip->i_d.di_nlink > 2)) {
+                           (VFS_I(target_ip)->i_nlink > 2)) {
                                error = -EEXIST;
                                goto out_trans_cancel;
                        }
@@ -3144,7 +3124,7 @@ xfs_rename(
         * intermediate state on disk.
         */
        if (wip) {
-               ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0);
+               ASSERT(VFS_I(wip)->i_nlink == 0);
                error = xfs_bumplink(tp, wip);
                if (error)
                        goto out_bmap_cancel;
@@ -3313,7 +3293,7 @@ cluster_corrupt_out:
                 * mark it as stale and brelse.
                 */
                if (bp->b_iodone) {
-                       XFS_BUF_UNDONE(bp);
+                       bp->b_flags &= ~XBF_DONE;
                        xfs_buf_stale(bp);
                        xfs_buf_ioerror(bp, -EIO);
                        xfs_buf_ioend(bp);
@@ -3462,14 +3442,7 @@ xfs_iflush_int(
                        __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
                goto corrupt_out;
        }
-       if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
-                               mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
-               xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
-                       "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
-                       __func__, ip->i_ino, ip, ip->i_d.di_magic);
-               goto corrupt_out;
-       }
-       if (S_ISREG(ip->i_d.di_mode)) {
+       if (S_ISREG(VFS_I(ip)->i_mode)) {
                if (XFS_TEST_ERROR(
                    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
                    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
@@ -3479,7 +3452,7 @@ xfs_iflush_int(
                                __func__, ip->i_ino, ip);
                        goto corrupt_out;
                }
-       } else if (S_ISDIR(ip->i_d.di_mode)) {
+       } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
                if (XFS_TEST_ERROR(
                    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
                    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
@@ -3523,12 +3496,11 @@ xfs_iflush_int(
                ip->i_d.di_flushiter++;
 
        /*
-        * Copy the dirty parts of the inode into the on-disk
-        * inode.  We always copy out the core of the inode,
-        * because if the inode is dirty at all the core must
-        * be.
+        * Copy the dirty parts of the inode into the on-disk inode.  We always
+        * copy out the core of the inode, because if the inode is dirty at all
+        * the core must be.
         */
-       xfs_dinode_to_disk(dip, &ip->i_d);
+       xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
 
        /* Wrap, we never let the log put out DI_MAX_FLUSH */
        if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
@@ -3580,10 +3552,6 @@ xfs_iflush_int(
         */
        xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
 
-       /* update the lsn in the on disk inode if required */
-       if (ip->i_d.di_version == 3)
-               dip->di_lsn = cpu_to_be64(iip->ili_item.li_lsn);
-
        /* generate the checksum. */
        xfs_dinode_calc_crc(mp, dip);
 
index ca9e11989cbd4f330c6cb0d1a1bede113fd9c8b2..43e1d51b15eb84ca34e978166025b74d30e5b573 100644 (file)
@@ -63,7 +63,7 @@ typedef struct xfs_inode {
        unsigned long           i_flags;        /* see defined flags below */
        unsigned int            i_delayed_blks; /* count of delay alloc blks */
 
-       xfs_icdinode_t          i_d;            /* most of ondisk inode */
+       struct xfs_icdinode     i_d;            /* most of ondisk inode */
 
        /* VFS inode */
        struct inode            i_vnode;        /* embedded VFS inode */
@@ -88,7 +88,7 @@ static inline struct inode *VFS_I(struct xfs_inode *ip)
  */
 static inline xfs_fsize_t XFS_ISIZE(struct xfs_inode *ip)
 {
-       if (S_ISREG(ip->i_d.di_mode))
+       if (S_ISREG(VFS_I(ip)->i_mode))
                return i_size_read(VFS_I(ip));
        return ip->i_d.di_size;
 }
@@ -369,7 +369,7 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
  */
 #define XFS_INHERIT_GID(pip)   \
        (((pip)->i_mount->m_flags & XFS_MOUNT_GRPID) || \
-        ((pip)->i_d.di_mode & S_ISGID))
+        (VFS_I(pip)->i_mode & S_ISGID))
 
 int            xfs_release(struct xfs_inode *ip);
 void           xfs_inactive(struct xfs_inode *ip);
@@ -405,8 +405,6 @@ int         xfs_ifree(struct xfs_trans *, xfs_inode_t *,
                           struct xfs_bmap_free *);
 int            xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *,
                                      int, xfs_fsize_t);
-int            xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
-
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
 
 void           xfs_iunpin_wait(xfs_inode_t *);
@@ -437,6 +435,8 @@ int xfs_update_prealloc_flags(struct xfs_inode *ip,
 int    xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
                     xfs_fsize_t isize, bool *did_zeroing);
 int    xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
+loff_t __xfs_seek_hole_data(struct inode *inode, loff_t start,
+                            loff_t eof, int whence);
 
 
 /* from xfs_iops.c */
index d14b12b8cfefb90f8fe4c92a0033a41cbde2e552..c48b5b18d771fab685e23c03613a1c6e762efcb4 100644 (file)
@@ -135,7 +135,7 @@ xfs_inode_item_size(
 
        *nvecs += 2;
        *nbytes += sizeof(struct xfs_inode_log_format) +
-                  xfs_icdinode_size(ip->i_d.di_version);
+                  xfs_log_dinode_size(ip->i_d.di_version);
 
        xfs_inode_item_data_fork_size(iip, nvecs, nbytes);
        if (XFS_IFORK_Q(ip))
@@ -322,6 +322,81 @@ xfs_inode_item_format_attr_fork(
        }
 }
 
+static void
+xfs_inode_to_log_dinode(
+       struct xfs_inode        *ip,
+       struct xfs_log_dinode   *to,
+       xfs_lsn_t               lsn)
+{
+       struct xfs_icdinode     *from = &ip->i_d;
+       struct inode            *inode = VFS_I(ip);
+
+       to->di_magic = XFS_DINODE_MAGIC;
+
+       to->di_version = from->di_version;
+       to->di_format = from->di_format;
+       to->di_uid = from->di_uid;
+       to->di_gid = from->di_gid;
+       to->di_projid_lo = from->di_projid_lo;
+       to->di_projid_hi = from->di_projid_hi;
+
+       memset(to->di_pad, 0, sizeof(to->di_pad));
+       memset(to->di_pad3, 0, sizeof(to->di_pad3));
+       to->di_atime.t_sec = inode->i_atime.tv_sec;
+       to->di_atime.t_nsec = inode->i_atime.tv_nsec;
+       to->di_mtime.t_sec = inode->i_mtime.tv_sec;
+       to->di_mtime.t_nsec = inode->i_mtime.tv_nsec;
+       to->di_ctime.t_sec = inode->i_ctime.tv_sec;
+       to->di_ctime.t_nsec = inode->i_ctime.tv_nsec;
+       to->di_nlink = inode->i_nlink;
+       to->di_gen = inode->i_generation;
+       to->di_mode = inode->i_mode;
+
+       to->di_size = from->di_size;
+       to->di_nblocks = from->di_nblocks;
+       to->di_extsize = from->di_extsize;
+       to->di_nextents = from->di_nextents;
+       to->di_anextents = from->di_anextents;
+       to->di_forkoff = from->di_forkoff;
+       to->di_aformat = from->di_aformat;
+       to->di_dmevmask = from->di_dmevmask;
+       to->di_dmstate = from->di_dmstate;
+       to->di_flags = from->di_flags;
+
+       if (from->di_version == 3) {
+               to->di_changecount = inode->i_version;
+               to->di_crtime.t_sec = from->di_crtime.t_sec;
+               to->di_crtime.t_nsec = from->di_crtime.t_nsec;
+               to->di_flags2 = from->di_flags2;
+
+               to->di_ino = ip->i_ino;
+               to->di_lsn = lsn;
+               memset(to->di_pad2, 0, sizeof(to->di_pad2));
+               uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
+               to->di_flushiter = 0;
+       } else {
+               to->di_flushiter = from->di_flushiter;
+       }
+}
+
+/*
+ * Format the inode core. Current timestamp data is only in the VFS inode
+ * fields, so we need to grab them from there. Hence rather than just copying
+ * the XFS inode core structure, format the fields directly into the iovec.
+ */
+static void
+xfs_inode_item_format_core(
+       struct xfs_inode        *ip,
+       struct xfs_log_vec      *lv,
+       struct xfs_log_iovec    **vecp)
+{
+       struct xfs_log_dinode   *dic;
+
+       dic = xlog_prepare_iovec(lv, vecp, XLOG_REG_TYPE_ICORE);
+       xfs_inode_to_log_dinode(ip, dic, ip->i_itemp->ili_item.li_lsn);
+       xlog_finish_iovec(lv, *vecp, xfs_log_dinode_size(ip->i_d.di_version));
+}
+
 /*
  * This is called to fill in the vector of log iovecs for the given inode
  * log item.  It fills the first item with an inode log format structure,
@@ -351,10 +426,7 @@ xfs_inode_item_format(
        ilf->ilf_size = 2; /* format + core */
        xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format));
 
-       xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_ICORE,
-                       &ip->i_d,
-                       xfs_icdinode_size(ip->i_d.di_version));
-
+       xfs_inode_item_format_core(ip, lv, &vecp);
        xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp);
        if (XFS_IFORK_Q(ip)) {
                xfs_inode_item_format_attr_fork(iip, ilf, lv, &vecp);
index 478d04e07f9500d6ceed20231deb6b474161c708..81d6d62188037ba6f5f6ccc3dcc68fdeab2ec8c3 100644 (file)
@@ -114,7 +114,7 @@ xfs_find_handle(
                handle.ha_fid.fid_len = sizeof(xfs_fid_t) -
                                        sizeof(handle.ha_fid.fid_len);
                handle.ha_fid.fid_pad = 0;
-               handle.ha_fid.fid_gen = ip->i_d.di_gen;
+               handle.ha_fid.fid_gen = inode->i_generation;
                handle.ha_fid.fid_ino = ip->i_ino;
 
                hsize = XFS_HSIZE(handle);
@@ -963,7 +963,7 @@ xfs_set_diflags(
                di_flags |= XFS_DIFLAG_NODEFRAG;
        if (xflags & FS_XFLAG_FILESTREAM)
                di_flags |= XFS_DIFLAG_FILESTREAM;
-       if (S_ISDIR(ip->i_d.di_mode)) {
+       if (S_ISDIR(VFS_I(ip)->i_mode)) {
                if (xflags & FS_XFLAG_RTINHERIT)
                        di_flags |= XFS_DIFLAG_RTINHERIT;
                if (xflags & FS_XFLAG_NOSYMLINKS)
@@ -972,7 +972,7 @@ xfs_set_diflags(
                        di_flags |= XFS_DIFLAG_EXTSZINHERIT;
                if (xflags & FS_XFLAG_PROJINHERIT)
                        di_flags |= XFS_DIFLAG_PROJINHERIT;
-       } else if (S_ISREG(ip->i_d.di_mode)) {
+       } else if (S_ISREG(VFS_I(ip)->i_mode)) {
                if (xflags & FS_XFLAG_REALTIME)
                        di_flags |= XFS_DIFLAG_REALTIME;
                if (xflags & FS_XFLAG_EXTSIZE)
@@ -1128,14 +1128,14 @@ xfs_ioctl_setattr_check_extsize(
 {
        struct xfs_mount        *mp = ip->i_mount;
 
-       if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(ip->i_d.di_mode))
+       if ((fa->fsx_xflags & FS_XFLAG_EXTSIZE) && !S_ISREG(VFS_I(ip)->i_mode))
                return -EINVAL;
 
        if ((fa->fsx_xflags & FS_XFLAG_EXTSZINHERIT) &&
-           !S_ISDIR(ip->i_d.di_mode))
+           !S_ISDIR(VFS_I(ip)->i_mode))
                return -EINVAL;
 
-       if (S_ISREG(ip->i_d.di_mode) && ip->i_d.di_nextents &&
+       if (S_ISREG(VFS_I(ip)->i_mode) && ip->i_d.di_nextents &&
            ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) != fa->fsx_extsize))
                return -EINVAL;
 
@@ -1256,9 +1256,9 @@ xfs_ioctl_setattr(
         * successful return from chown()
         */
 
-       if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+       if ((VFS_I(ip)->i_mode & (S_ISUID|S_ISGID)) &&
            !capable_wrt_inode_uidgid(VFS_I(ip), CAP_FSETID))
-               ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
+               VFS_I(ip)->i_mode &= ~(S_ISUID|S_ISGID);
 
        /* Change the ownerships and register project quota modifications */
        if (xfs_get_projid(ip) != fa->fsx_projid) {
index 76b71a1c6c323e2043aeab1e93fb5a66db9f39d0..0d38b1d2c420fcc75ffa0cbc28fe3f0db8de17b4 100644 (file)
@@ -459,8 +459,8 @@ xfs_vn_getattr(
 
        stat->size = XFS_ISIZE(ip);
        stat->dev = inode->i_sb->s_dev;
-       stat->mode = ip->i_d.di_mode;
-       stat->nlink = ip->i_d.di_nlink;
+       stat->mode = inode->i_mode;
+       stat->nlink = inode->i_nlink;
        stat->uid = inode->i_uid;
        stat->gid = inode->i_gid;
        stat->ino = ip->i_ino;
@@ -506,9 +506,6 @@ xfs_setattr_mode(
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-       ip->i_d.di_mode &= S_IFMT;
-       ip->i_d.di_mode |= mode & ~S_IFMT;
-
        inode->i_mode &= S_IFMT;
        inode->i_mode |= mode & ~S_IFMT;
 }
@@ -522,21 +519,12 @@ xfs_setattr_time(
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
-       if (iattr->ia_valid & ATTR_ATIME) {
+       if (iattr->ia_valid & ATTR_ATIME)
                inode->i_atime = iattr->ia_atime;
-               ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
-               ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
-       }
-       if (iattr->ia_valid & ATTR_CTIME) {
+       if (iattr->ia_valid & ATTR_CTIME)
                inode->i_ctime = iattr->ia_ctime;
-               ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
-               ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
-       }
-       if (iattr->ia_valid & ATTR_MTIME) {
+       if (iattr->ia_valid & ATTR_MTIME)
                inode->i_mtime = iattr->ia_mtime;
-               ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
-               ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
-       }
 }
 
 int
@@ -661,9 +649,9 @@ xfs_setattr_nonsize(
                 * The set-user-ID and set-group-ID bits of a file will be
                 * cleared upon successful return from chown()
                 */
-               if ((ip->i_d.di_mode & (S_ISUID|S_ISGID)) &&
+               if ((inode->i_mode & (S_ISUID|S_ISGID)) &&
                    !capable(CAP_FSETID))
-                       ip->i_d.di_mode &= ~(S_ISUID|S_ISGID);
+                       inode->i_mode &= ~(S_ISUID|S_ISGID);
 
                /*
                 * Change the ownerships and register quota modifications
@@ -773,7 +761,7 @@ xfs_setattr_size(
 
        ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
        ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
-       ASSERT(S_ISREG(ip->i_d.di_mode));
+       ASSERT(S_ISREG(inode->i_mode));
        ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
                ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
 
@@ -991,21 +979,13 @@ xfs_vn_update_time(
        }
 
        xfs_ilock(ip, XFS_ILOCK_EXCL);
-       if (flags & S_CTIME) {
+       if (flags & S_CTIME)
                inode->i_ctime = *now;
-               ip->i_d.di_ctime.t_sec = (__int32_t)now->tv_sec;
-               ip->i_d.di_ctime.t_nsec = (__int32_t)now->tv_nsec;
-       }
-       if (flags & S_MTIME) {
+       if (flags & S_MTIME)
                inode->i_mtime = *now;
-               ip->i_d.di_mtime.t_sec = (__int32_t)now->tv_sec;
-               ip->i_d.di_mtime.t_nsec = (__int32_t)now->tv_nsec;
-       }
-       if (flags & S_ATIME) {
+       if (flags & S_ATIME)
                inode->i_atime = *now;
-               ip->i_d.di_atime.t_sec = (__int32_t)now->tv_sec;
-               ip->i_d.di_atime.t_nsec = (__int32_t)now->tv_nsec;
-       }
+
        xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_TIMESTAMP);
        return xfs_trans_commit(tp);
@@ -1232,8 +1212,6 @@ xfs_setup_inode(
        /* make the inode look hashed for the writeback code */
        hlist_add_fake(&inode->i_hash);
 
-       inode->i_mode   = ip->i_d.di_mode;
-       set_nlink(inode, ip->i_d.di_nlink);
        inode->i_uid    = xfs_uid_to_kuid(ip->i_d.di_uid);
        inode->i_gid    = xfs_gid_to_kgid(ip->i_d.di_gid);
 
@@ -1249,14 +1227,7 @@ xfs_setup_inode(
                break;
        }
 
-       inode->i_generation = ip->i_d.di_gen;
        i_size_write(inode, ip->i_d.di_size);
-       inode->i_atime.tv_sec   = ip->i_d.di_atime.t_sec;
-       inode->i_atime.tv_nsec  = ip->i_d.di_atime.t_nsec;
-       inode->i_mtime.tv_sec   = ip->i_d.di_mtime.t_sec;
-       inode->i_mtime.tv_nsec  = ip->i_d.di_mtime.t_nsec;
-       inode->i_ctime.tv_sec   = ip->i_d.di_ctime.t_sec;
-       inode->i_ctime.tv_nsec  = ip->i_d.di_ctime.t_nsec;
        xfs_diflags_to_iflags(inode, ip);
 
        ip->d_ops = ip->i_mount->m_nondir_inode_ops;
index 930ebd86bebac3a300faf44fabe77aa28258cf60..ce73eb34620dbbf06570650a582163a98a0d8f92 100644 (file)
@@ -57,6 +57,7 @@ xfs_bulkstat_one_int(
 {
        struct xfs_icdinode     *dic;           /* dinode core info pointer */
        struct xfs_inode        *ip;            /* incore inode pointer */
+       struct inode            *inode;
        struct xfs_bstat        *buf;           /* return buffer */
        int                     error = 0;      /* error value */
 
@@ -77,30 +78,33 @@ xfs_bulkstat_one_int(
 
        ASSERT(ip != NULL);
        ASSERT(ip->i_imap.im_blkno != 0);
+       inode = VFS_I(ip);
 
        dic = &ip->i_d;
 
        /* xfs_iget returns the following without needing
         * further change.
         */
-       buf->bs_nlink = dic->di_nlink;
        buf->bs_projid_lo = dic->di_projid_lo;
        buf->bs_projid_hi = dic->di_projid_hi;
        buf->bs_ino = ino;
-       buf->bs_mode = dic->di_mode;
        buf->bs_uid = dic->di_uid;
        buf->bs_gid = dic->di_gid;
        buf->bs_size = dic->di_size;
-       buf->bs_atime.tv_sec = dic->di_atime.t_sec;
-       buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
-       buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
-       buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
-       buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
-       buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
+
+       buf->bs_nlink = inode->i_nlink;
+       buf->bs_atime.tv_sec = inode->i_atime.tv_sec;
+       buf->bs_atime.tv_nsec = inode->i_atime.tv_nsec;
+       buf->bs_mtime.tv_sec = inode->i_mtime.tv_sec;
+       buf->bs_mtime.tv_nsec = inode->i_mtime.tv_nsec;
+       buf->bs_ctime.tv_sec = inode->i_ctime.tv_sec;
+       buf->bs_ctime.tv_nsec = inode->i_ctime.tv_nsec;
+       buf->bs_gen = inode->i_generation;
+       buf->bs_mode = inode->i_mode;
+
        buf->bs_xflags = xfs_ip2xflags(ip);
        buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
        buf->bs_extents = dic->di_nextents;
-       buf->bs_gen = dic->di_gen;
        memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
        buf->bs_dmevmask = dic->di_dmevmask;
        buf->bs_dmstate = dic->di_dmstate;
index 9c9a1c9bcc7f0bf0090fa4ffdffdfa35d6068122..40b700d3f42638537cf90f7020dbb3d5affd2347 100644 (file)
@@ -1212,7 +1212,7 @@ xlog_iodone(xfs_buf_t *bp)
        }
 
        /* log I/O is always issued ASYNC */
-       ASSERT(XFS_BUF_ISASYNC(bp));
+       ASSERT(bp->b_flags & XBF_ASYNC);
        xlog_state_done_syncing(iclog, aborted);
 
        /*
@@ -1864,9 +1864,8 @@ xlog_sync(
 
        bp->b_io_length = BTOBB(count);
        bp->b_fspriv = iclog;
-       XFS_BUF_ZEROFLAGS(bp);
-       XFS_BUF_ASYNC(bp);
-       bp->b_flags |= XBF_SYNCIO;
+       bp->b_flags &= ~(XBF_FUA | XBF_FLUSH);
+       bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE);
 
        if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
                bp->b_flags |= XBF_FUA;
@@ -1893,12 +1892,11 @@ xlog_sync(
 
        /* account for log which doesn't start at block #0 */
        XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
+
        /*
         * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
         * is shutting down.
         */
-       XFS_BUF_WRITE(bp);
-
        error = xlog_bdstrat(bp);
        if (error) {
                xfs_buf_ioerror_alert(bp, "xlog_sync");
@@ -1910,9 +1908,8 @@ xlog_sync(
                xfs_buf_associate_memory(bp,
                                (char *)&iclog->ic_header + count, split);
                bp->b_fspriv = iclog;
-               XFS_BUF_ZEROFLAGS(bp);
-               XFS_BUF_ASYNC(bp);
-               bp->b_flags |= XBF_SYNCIO;
+               bp->b_flags &= ~(XBF_FUA | XBF_FLUSH);
+               bp->b_flags |= (XBF_ASYNC | XBF_SYNCIO | XBF_WRITE);
                if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
                        bp->b_flags |= XBF_FUA;
 
@@ -1921,7 +1918,6 @@ xlog_sync(
 
                /* account for internal log which doesn't start at block #0 */
                XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
-               XFS_BUF_WRITE(bp);
                error = xlog_bdstrat(bp);
                if (error) {
                        xfs_buf_ioerror_alert(bp, "xlog_sync (split)");
@@ -3979,7 +3975,7 @@ xfs_log_force_umount(
            log->l_flags & XLOG_ACTIVE_RECOVERY) {
                mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
                if (mp->m_sb_bp)
-                       XFS_BUF_DONE(mp->m_sb_bp);
+                       mp->m_sb_bp->b_flags |= XBF_DONE;
                return 0;
        }
 
@@ -4009,7 +4005,7 @@ xfs_log_force_umount(
        spin_lock(&log->l_icloglock);
        mp->m_flags |= XFS_MOUNT_FS_SHUTDOWN;
        if (mp->m_sb_bp)
-               XFS_BUF_DONE(mp->m_sb_bp);
+               mp->m_sb_bp->b_flags |= XBF_DONE;
 
        /*
         * Mark the log and the iclogs with IO error flags to prevent any
index da37beb76f6e67faf90a658d55f49a06c6fc4152..1dc0e1488d4c272aa0b59d4ac65a1ecc68845113 100644 (file)
@@ -190,7 +190,7 @@ xlog_bread_noalign(
        ASSERT(nbblks <= bp->b_length);
 
        XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
-       XFS_BUF_READ(bp);
+       bp->b_flags |= XBF_READ;
        bp->b_io_length = nbblks;
        bp->b_error = 0;
 
@@ -275,7 +275,6 @@ xlog_bwrite(
        ASSERT(nbblks <= bp->b_length);
 
        XFS_BUF_SET_ADDR(bp, log->l_logBBstart + blk_no);
-       XFS_BUF_ZEROFLAGS(bp);
        xfs_buf_hold(bp);
        xfs_buf_lock(bp);
        bp->b_io_length = nbblks;
@@ -2473,6 +2472,13 @@ xlog_recover_validate_buf_type(
                }
                bp->b_ops = &xfs_sb_buf_ops;
                break;
+#ifdef CONFIG_XFS_RT
+       case XFS_BLFT_RTBITMAP_BUF:
+       case XFS_BLFT_RTSUMMARY_BUF:
+               /* no magic numbers for verification of RT buffers */
+               bp->b_ops = &xfs_rtbuf_ops;
+               break;
+#endif /* CONFIG_XFS_RT */
        default:
                xfs_warn(mp, "Unknown buffer type %d!",
                         xfs_blft_from_flags(buf_f));
@@ -2793,7 +2799,7 @@ xfs_recover_inode_owner_change(
                return -ENOMEM;
 
        /* instantiate the inode */
-       xfs_dinode_from_disk(&ip->i_d, dip);
+       xfs_inode_from_disk(ip, dip);
        ASSERT(ip->i_d.di_version >= 3);
 
        error = xfs_iformat_fork(ip, dip);
@@ -2839,7 +2845,7 @@ xlog_recover_inode_pass2(
        int                     error;
        int                     attr_index;
        uint                    fields;
-       xfs_icdinode_t          *dicp;
+       struct xfs_log_dinode   *ldip;
        uint                    isize;
        int                     need_free = 0;
 
@@ -2892,8 +2898,8 @@ xlog_recover_inode_pass2(
                error = -EFSCORRUPTED;
                goto out_release;
        }
-       dicp = item->ri_buf[1].i_addr;
-       if (unlikely(dicp->di_magic != XFS_DINODE_MAGIC)) {
+       ldip = item->ri_buf[1].i_addr;
+       if (unlikely(ldip->di_magic != XFS_DINODE_MAGIC)) {
                xfs_alert(mp,
                        "%s: Bad inode log record, rec ptr 0x%p, ino %Ld",
                        __func__, item, in_f->ilf_ino);
@@ -2929,13 +2935,13 @@ xlog_recover_inode_pass2(
         * to skip replay when the on disk inode is newer than the log one
         */
        if (!xfs_sb_version_hascrc(&mp->m_sb) &&
-           dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+           ldip->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
                /*
                 * Deal with the wrap case, DI_MAX_FLUSH is less
                 * than smaller numbers
                 */
                if (be16_to_cpu(dip->di_flushiter) == DI_MAX_FLUSH &&
-                   dicp->di_flushiter < (DI_MAX_FLUSH >> 1)) {
+                   ldip->di_flushiter < (DI_MAX_FLUSH >> 1)) {
                        /* do nothing */
                } else {
                        trace_xfs_log_recover_inode_skip(log, in_f);
@@ -2945,13 +2951,13 @@ xlog_recover_inode_pass2(
        }
 
        /* Take the opportunity to reset the flush iteration count */
-       dicp->di_flushiter = 0;
+       ldip->di_flushiter = 0;
 
-       if (unlikely(S_ISREG(dicp->di_mode))) {
-               if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
-                   (dicp->di_format != XFS_DINODE_FMT_BTREE)) {
+       if (unlikely(S_ISREG(ldip->di_mode))) {
+               if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+                   (ldip->di_format != XFS_DINODE_FMT_BTREE)) {
                        XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(3)",
-                                        XFS_ERRLEVEL_LOW, mp, dicp);
+                                        XFS_ERRLEVEL_LOW, mp, ldip);
                        xfs_alert(mp,
                "%s: Bad regular inode log record, rec ptr 0x%p, "
                "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
@@ -2959,12 +2965,12 @@ xlog_recover_inode_pass2(
                        error = -EFSCORRUPTED;
                        goto out_release;
                }
-       } else if (unlikely(S_ISDIR(dicp->di_mode))) {
-               if ((dicp->di_format != XFS_DINODE_FMT_EXTENTS) &&
-                   (dicp->di_format != XFS_DINODE_FMT_BTREE) &&
-                   (dicp->di_format != XFS_DINODE_FMT_LOCAL)) {
+       } else if (unlikely(S_ISDIR(ldip->di_mode))) {
+               if ((ldip->di_format != XFS_DINODE_FMT_EXTENTS) &&
+                   (ldip->di_format != XFS_DINODE_FMT_BTREE) &&
+                   (ldip->di_format != XFS_DINODE_FMT_LOCAL)) {
                        XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(4)",
-                                            XFS_ERRLEVEL_LOW, mp, dicp);
+                                            XFS_ERRLEVEL_LOW, mp, ldip);
                        xfs_alert(mp,
                "%s: Bad dir inode log record, rec ptr 0x%p, "
                "ino ptr = 0x%p, ino bp = 0x%p, ino %Ld",
@@ -2973,32 +2979,32 @@ xlog_recover_inode_pass2(
                        goto out_release;
                }
        }
-       if (unlikely(dicp->di_nextents + dicp->di_anextents > dicp->di_nblocks)){
+       if (unlikely(ldip->di_nextents + ldip->di_anextents > ldip->di_nblocks)){
                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(5)",
-                                    XFS_ERRLEVEL_LOW, mp, dicp);
+                                    XFS_ERRLEVEL_LOW, mp, ldip);
                xfs_alert(mp,
        "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
        "dino bp 0x%p, ino %Ld, total extents = %d, nblocks = %Ld",
                        __func__, item, dip, bp, in_f->ilf_ino,
-                       dicp->di_nextents + dicp->di_anextents,
-                       dicp->di_nblocks);
+                       ldip->di_nextents + ldip->di_anextents,
+                       ldip->di_nblocks);
                error = -EFSCORRUPTED;
                goto out_release;
        }
-       if (unlikely(dicp->di_forkoff > mp->m_sb.sb_inodesize)) {
+       if (unlikely(ldip->di_forkoff > mp->m_sb.sb_inodesize)) {
                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(6)",
-                                    XFS_ERRLEVEL_LOW, mp, dicp);
+                                    XFS_ERRLEVEL_LOW, mp, ldip);
                xfs_alert(mp,
        "%s: Bad inode log record, rec ptr 0x%p, dino ptr 0x%p, "
        "dino bp 0x%p, ino %Ld, forkoff 0x%x", __func__,
-                       item, dip, bp, in_f->ilf_ino, dicp->di_forkoff);
+                       item, dip, bp, in_f->ilf_ino, ldip->di_forkoff);
                error = -EFSCORRUPTED;
                goto out_release;
        }
-       isize = xfs_icdinode_size(dicp->di_version);
+       isize = xfs_log_dinode_size(ldip->di_version);
        if (unlikely(item->ri_buf[1].i_len > isize)) {
                XFS_CORRUPTION_ERROR("xlog_recover_inode_pass2(7)",
-                                    XFS_ERRLEVEL_LOW, mp, dicp);
+                                    XFS_ERRLEVEL_LOW, mp, ldip);
                xfs_alert(mp,
                        "%s: Bad inode log record length %d, rec ptr 0x%p",
                        __func__, item->ri_buf[1].i_len, item);
@@ -3006,8 +3012,8 @@ xlog_recover_inode_pass2(
                goto out_release;
        }
 
-       /* The core is in in-core format */
-       xfs_dinode_to_disk(dip, dicp);
+       /* recover the log dinode inode into the on disk inode */
+       xfs_log_dinode_to_disk(ldip, dip);
 
        /* the rest is in on-disk format */
        if (item->ri_buf[1].i_len > isize) {
@@ -4337,8 +4343,8 @@ xlog_recover_process_one_iunlink(
        if (error)
                goto fail_iput;
 
-       ASSERT(ip->i_d.di_nlink == 0);
-       ASSERT(ip->i_d.di_mode != 0);
+       ASSERT(VFS_I(ip)->i_nlink == 0);
+       ASSERT(VFS_I(ip)->i_mode != 0);
 
        /* setup for the next pass */
        agino = be32_to_cpu(dip->di_next_unlinked);
@@ -4491,7 +4497,7 @@ xlog_recover_process(
         * know precisely what failed.
         */
        if (pass == XLOG_RECOVER_CRCPASS) {
-               if (rhead->h_crc && crc != le32_to_cpu(rhead->h_crc))
+               if (rhead->h_crc && crc != rhead->h_crc)
                        return -EFSBADCRC;
                return 0;
        }
@@ -4502,7 +4508,7 @@ xlog_recover_process(
         * zero CRC check prevents warnings from being emitted when upgrading
         * the kernel from one that does not add CRCs by default.
         */
-       if (crc != le32_to_cpu(rhead->h_crc)) {
+       if (crc != rhead->h_crc) {
                if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
                        xfs_alert(log->l_mp,
                "log record CRC mismatch: found 0x%x, expected 0x%x.",
@@ -4926,10 +4932,9 @@ xlog_do_recover(
         * updates, re-read in the superblock and reverify it.
         */
        bp = xfs_getsb(log->l_mp, 0);
-       XFS_BUF_UNDONE(bp);
-       ASSERT(!(XFS_BUF_ISWRITE(bp)));
-       XFS_BUF_READ(bp);
-       XFS_BUF_UNASYNC(bp);
+       bp->b_flags &= ~(XBF_DONE | XBF_ASYNC);
+       ASSERT(!(bp->b_flags & XBF_WRITE));
+       bp->b_flags |= XBF_READ;
        bp->b_ops = &xfs_sb_buf_ops;
 
        error = xfs_buf_submit_wait(bp);
index bb753b359bee188b13023caf4597582b402cd31d..986290c4b7ab9fc753772ffe2d56015ef94e4643 100644 (file)
@@ -865,7 +865,7 @@ xfs_mountfs(
 
        ASSERT(rip != NULL);
 
-       if (unlikely(!S_ISDIR(rip->i_d.di_mode))) {
+       if (unlikely(!S_ISDIR(VFS_I(rip)->i_mode))) {
                xfs_warn(mp, "corrupted root inode %llu: not a directory",
                        (unsigned long long)rip->i_ino);
                xfs_iunlock(rip, XFS_ILOCK_EXCL);
@@ -1284,7 +1284,7 @@ xfs_getsb(
        }
 
        xfs_buf_hold(bp);
-       ASSERT(XFS_BUF_ISDONE(bp));
+       ASSERT(bp->b_flags & XBF_DONE);
        return bp;
 }
 
index b57098481c10a2a55a05bf6e75e6e43f7e224401..a4e03ab50342532ef394cd6ca0f34402a52c3153 100644 (file)
@@ -327,7 +327,6 @@ extern int  xfs_mod_fdblocks(struct xfs_mount *mp, int64_t delta,
                                 bool reserved);
 extern int     xfs_mod_frextents(struct xfs_mount *mp, int64_t delta);
 
-extern int     xfs_mount_log_sb(xfs_mount_t *);
 extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
 extern int     xfs_readsb(xfs_mount_t *, int);
 extern void    xfs_freesb(xfs_mount_t *);
index 532ab79d38fe376c14a5463a97195b59a61d8f84..be125e1758c1a5e4df36cfb8ec6e3e3643adc534 100644 (file)
@@ -560,6 +560,37 @@ xfs_qm_shrink_count(
        return list_lru_shrink_count(&qi->qi_lru, sc);
 }
 
+STATIC void
+xfs_qm_set_defquota(
+       xfs_mount_t     *mp,
+       uint            type,
+       xfs_quotainfo_t *qinf)
+{
+       xfs_dquot_t             *dqp;
+       struct xfs_def_quota    *defq;
+       int                     error;
+
+       error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
+
+       if (!error) {
+               xfs_disk_dquot_t        *ddqp = &dqp->q_core;
+
+               defq = xfs_get_defquota(dqp, qinf);
+
+               /*
+                * Timers and warnings have been already set, let's just set the
+                * default limits for this quota type
+                */
+               defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
+               defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
+               defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
+               defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
+               defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
+               defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
+               xfs_qm_dqdestroy(dqp);
+       }
+}
+
 /*
  * This initializes all the quota information that's kept in the
  * mount structure
@@ -606,19 +637,19 @@ xfs_qm_init_quotainfo(
         * We try to get the limits from the superuser's limits fields.
         * This is quite hacky, but it is standard quota practice.
         *
-        * We look at the USR dquot with id == 0 first, but if user quotas
-        * are not enabled we goto the GRP dquot with id == 0.
-        * We don't really care to keep separate default limits for user
-        * and group quotas, at least not at this point.
-        *
         * Since we may not have done a quotacheck by this point, just read
         * the dquot without attaching it to any hashtables or lists.
+        *
+        * Timers and warnings are globally set by the first timer found in
+        * user/group/proj quota types, otherwise a default value is used.
+        * This should be split into different fields per quota type.
         */
        error = xfs_qm_dqread(mp, 0,
                        XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
                         (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
                          XFS_DQ_PROJ),
                        XFS_QMOPT_DOWARN, &dqp);
+
        if (!error) {
                xfs_disk_dquot_t        *ddqp = &dqp->q_core;
 
@@ -639,13 +670,6 @@ xfs_qm_init_quotainfo(
                        be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
                qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
                        be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
-               qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
-               qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
-               qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
-               qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
-               qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
-               qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
-
                xfs_qm_dqdestroy(dqp);
        } else {
                qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
@@ -656,6 +680,13 @@ xfs_qm_init_quotainfo(
                qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
        }
 
+       if (XFS_IS_UQUOTA_RUNNING(mp))
+               xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
+       if (XFS_IS_GQUOTA_RUNNING(mp))
+               xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
+       if (XFS_IS_PQUOTA_RUNNING(mp))
+               xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
+
        qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
        qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
        qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
index 996a04064894cf4c07ffa28d058150243ffd86a4..2975a822e9f044cbb1ff66ec217d29ab29bf9f22 100644 (file)
@@ -53,6 +53,15 @@ extern struct kmem_zone      *xfs_qm_dqtrxzone;
  */
 #define XFS_DQUOT_CLUSTER_SIZE_FSB     (xfs_filblks_t)1
 
+struct xfs_def_quota {
+       xfs_qcnt_t       bhardlimit;     /* default data blk hard limit */
+       xfs_qcnt_t       bsoftlimit;     /* default data blk soft limit */
+       xfs_qcnt_t       ihardlimit;     /* default inode count hard limit */
+       xfs_qcnt_t       isoftlimit;     /* default inode count soft limit */
+       xfs_qcnt_t       rtbhardlimit;   /* default realtime blk hard limit */
+       xfs_qcnt_t       rtbsoftlimit;   /* default realtime blk soft limit */
+};
+
 /*
  * Various quota information for individual filesystems.
  * The mount structure keeps a pointer to this.
@@ -76,12 +85,9 @@ typedef struct xfs_quotainfo {
        struct mutex     qi_quotaofflock;/* to serialize quotaoff */
        xfs_filblks_t    qi_dqchunklen;  /* # BBs in a chunk of dqs */
        uint             qi_dqperchunk;  /* # ondisk dqs in above chunk */
-       xfs_qcnt_t       qi_bhardlimit;  /* default data blk hard limit */
-       xfs_qcnt_t       qi_bsoftlimit;  /* default data blk soft limit */
-       xfs_qcnt_t       qi_ihardlimit;  /* default inode count hard limit */
-       xfs_qcnt_t       qi_isoftlimit;  /* default inode count soft limit */
-       xfs_qcnt_t       qi_rtbhardlimit;/* default realtime blk hard limit */
-       xfs_qcnt_t       qi_rtbsoftlimit;/* default realtime blk soft limit */
+       struct xfs_def_quota    qi_usr_default;
+       struct xfs_def_quota    qi_grp_default;
+       struct xfs_def_quota    qi_prj_default;
        struct shrinker  qi_shrinker;
 } xfs_quotainfo_t;
 
@@ -104,15 +110,15 @@ xfs_dquot_tree(
 }
 
 static inline struct xfs_inode *
-xfs_dq_to_quota_inode(struct xfs_dquot *dqp)
+xfs_quota_inode(xfs_mount_t *mp, uint dq_flags)
 {
-       switch (dqp->dq_flags & XFS_DQ_ALLTYPES) {
+       switch (dq_flags & XFS_DQ_ALLTYPES) {
        case XFS_DQ_USER:
-               return dqp->q_mount->m_quotainfo->qi_uquotaip;
+               return mp->m_quotainfo->qi_uquotaip;
        case XFS_DQ_GROUP:
-               return dqp->q_mount->m_quotainfo->qi_gquotaip;
+               return mp->m_quotainfo->qi_gquotaip;
        case XFS_DQ_PROJ:
-               return dqp->q_mount->m_quotainfo->qi_pquotaip;
+               return mp->m_quotainfo->qi_pquotaip;
        default:
                ASSERT(0);
        }
@@ -164,11 +170,27 @@ extern void               xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
 
 /* quota ops */
 extern int             xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
-extern int             xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
-                                       uint, struct qc_dqblk *);
+extern int             xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t *,
+                                       uint, struct qc_dqblk *, uint);
 extern int             xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
                                        struct qc_dqblk *);
 extern int             xfs_qm_scall_quotaon(struct xfs_mount *, uint);
 extern int             xfs_qm_scall_quotaoff(struct xfs_mount *, uint);
 
+static inline struct xfs_def_quota *
+xfs_get_defquota(struct xfs_dquot *dqp, struct xfs_quotainfo *qi)
+{
+       struct xfs_def_quota *defq;
+
+       if (XFS_QM_ISUDQ(dqp))
+               defq = &qi->qi_usr_default;
+       else if (XFS_QM_ISGDQ(dqp))
+               defq = &qi->qi_grp_default;
+       else {
+               ASSERT(XFS_QM_ISPDQ(dqp));
+               defq = &qi->qi_prj_default;
+       }
+       return defq;
+}
+
 #endif /* __XFS_QM_H__ */
index 3640c6e896af70eb2e910a31786cb7ac2298f847..f4d0e0a8f517c65913b8d45f383450384576b39e 100644 (file)
@@ -404,6 +404,7 @@ xfs_qm_scall_setqlim(
        struct xfs_disk_dquot   *ddq;
        struct xfs_dquot        *dqp;
        struct xfs_trans        *tp;
+       struct xfs_def_quota    *defq;
        int                     error;
        xfs_qcnt_t              hard, soft;
 
@@ -431,6 +432,8 @@ xfs_qm_scall_setqlim(
                ASSERT(error != -ENOENT);
                goto out_unlock;
        }
+
+       defq = xfs_get_defquota(dqp, q);
        xfs_dqunlock(dqp);
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
@@ -458,8 +461,8 @@ xfs_qm_scall_setqlim(
                ddq->d_blk_softlimit = cpu_to_be64(soft);
                xfs_dquot_set_prealloc_limits(dqp);
                if (id == 0) {
-                       q->qi_bhardlimit = hard;
-                       q->qi_bsoftlimit = soft;
+                       defq->bhardlimit = hard;
+                       defq->bsoftlimit = soft;
                }
        } else {
                xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
@@ -474,8 +477,8 @@ xfs_qm_scall_setqlim(
                ddq->d_rtb_hardlimit = cpu_to_be64(hard);
                ddq->d_rtb_softlimit = cpu_to_be64(soft);
                if (id == 0) {
-                       q->qi_rtbhardlimit = hard;
-                       q->qi_rtbsoftlimit = soft;
+                       defq->rtbhardlimit = hard;
+                       defq->rtbsoftlimit = soft;
                }
        } else {
                xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
@@ -491,8 +494,8 @@ xfs_qm_scall_setqlim(
                ddq->d_ino_hardlimit = cpu_to_be64(hard);
                ddq->d_ino_softlimit = cpu_to_be64(soft);
                if (id == 0) {
-                       q->qi_ihardlimit = hard;
-                       q->qi_isoftlimit = soft;
+                       defq->ihardlimit = hard;
+                       defq->isoftlimit = soft;
                }
        } else {
                xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft);
@@ -635,9 +638,10 @@ out:
 int
 xfs_qm_scall_getquota(
        struct xfs_mount        *mp,
-       xfs_dqid_t              id,
+       xfs_dqid_t              *id,
        uint                    type,
-       struct qc_dqblk         *dst)
+       struct qc_dqblk         *dst,
+       uint                    dqget_flags)
 {
        struct xfs_dquot        *dqp;
        int                     error;
@@ -647,7 +651,7 @@ xfs_qm_scall_getquota(
         * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
         * exist, we'll get ENOENT back.
         */
-       error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
+       error = xfs_qm_dqget(mp, NULL, *id, type, dqget_flags, &dqp);
        if (error)
                return error;
 
@@ -660,6 +664,9 @@ xfs_qm_scall_getquota(
                goto out_put;
        }
 
+       /* Fill in the ID we actually read from disk */
+       *id = be32_to_cpu(dqp->q_core.d_id);
+
        memset(dst, 0, sizeof(*dst));
        dst->d_spc_hardlimit =
                XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
@@ -701,7 +708,7 @@ xfs_qm_scall_getquota(
        if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
             (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
             (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
-           id != 0) {
+           *id != 0) {
                if ((dst->d_space > dst->d_spc_softlimit) &&
                    (dst->d_spc_softlimit > 0)) {
                        ASSERT(dst->d_spc_timer != 0);
index 7795e0d01382a60798b4e83f35ba6db725fe8779..f82d79a8c694a8f32427b8d0e2923dbde1670d88 100644 (file)
@@ -231,14 +231,45 @@ xfs_fs_get_dqblk(
        struct qc_dqblk         *qdq)
 {
        struct xfs_mount        *mp = XFS_M(sb);
+       xfs_dqid_t              id;
 
        if (!XFS_IS_QUOTA_RUNNING(mp))
                return -ENOSYS;
        if (!XFS_IS_QUOTA_ON(mp))
                return -ESRCH;
 
-       return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
-                                     xfs_quota_type(qid.type), qdq);
+       id = from_kqid(&init_user_ns, qid);
+       return xfs_qm_scall_getquota(mp, &id,
+                                     xfs_quota_type(qid.type), qdq, 0);
+}
+
+/* Return quota info for active quota >= this qid */
+STATIC int
+xfs_fs_get_nextdqblk(
+       struct super_block      *sb,
+       struct kqid             *qid,
+       struct qc_dqblk         *qdq)
+{
+       int                     ret;
+       struct xfs_mount        *mp = XFS_M(sb);
+       xfs_dqid_t              id;
+
+       if (!XFS_IS_QUOTA_RUNNING(mp))
+               return -ENOSYS;
+       if (!XFS_IS_QUOTA_ON(mp))
+               return -ESRCH;
+
+       id = from_kqid(&init_user_ns, *qid);
+       ret = xfs_qm_scall_getquota(mp, &id,
+                                   xfs_quota_type(qid->type), qdq,
+                                   XFS_QMOPT_DQNEXT);
+       if (ret)
+               return ret;
+
+       /* ID may be different, so convert back what we got */
+       *qid = make_kqid(current_user_ns(), qid->type, id);
+       return 0;
+       
 }
 
 STATIC int
@@ -267,5 +298,6 @@ const struct quotactl_ops xfs_quotactl_operations = {
        .quota_disable          = xfs_quota_disable,
        .rm_xquota              = xfs_fs_rm_xquota,
        .get_dqblk              = xfs_fs_get_dqblk,
+       .get_nextdqblk          = xfs_fs_get_nextdqblk,
        .set_dqblk              = xfs_fs_set_dqblk,
 };
index be02a68b2fe292e077c84862f93271dd049c3359..abf44435d04a3f4b898e21a00e45ee8ae607738a 100644 (file)
@@ -1272,7 +1272,7 @@ xfs_rtpick_extent(
 
        ASSERT(xfs_isilocked(mp->m_rbmip, XFS_ILOCK_EXCL));
 
-       seqp = (__uint64_t *)&mp->m_rbmip->i_d.di_atime;
+       seqp = (__uint64_t *)&VFS_I(mp->m_rbmip)->i_atime;
        if (!(mp->m_rbmip->i_d.di_flags & XFS_DIFLAG_NEWRTBM)) {
                mp->m_rbmip->i_d.di_flags |= XFS_DIFLAG_NEWRTBM;
                *seqp = 0;
index 391d797cb53fee0a9196d3e392dfd8fa95d5d33a..c8d58426008ed7ef49096097904ed13653a8cfe9 100644 (file)
@@ -1296,11 +1296,7 @@ DEFINE_IOMAP_EVENT(xfs_map_blocks_found);
 DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
 DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
 DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct_new);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct_update);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct_none);
-DEFINE_IOMAP_EVENT(xfs_gbmap_direct_endio);
+DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct);
 
 DECLARE_EVENT_CLASS(xfs_simple_io_class,
        TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -1340,6 +1336,9 @@ DEFINE_SIMPLE_IO_EVENT(xfs_unwritten_convert);
 DEFINE_SIMPLE_IO_EVENT(xfs_get_blocks_notfound);
 DEFINE_SIMPLE_IO_EVENT(xfs_setfilesize);
 DEFINE_SIMPLE_IO_EVENT(xfs_zero_eof);
+DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write);
+DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_unwritten);
+DEFINE_SIMPLE_IO_EVENT(xfs_end_io_direct_write_append);
 
 DECLARE_EVENT_CLASS(xfs_itrunc_class,
        TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
index 4f18fd92ca13b21d8fd68e955e082d9db9a61195..d6c9c3e9e02b2c45f2cd57074cbbcdebde1e804a 100644 (file)
@@ -497,6 +497,7 @@ xfsaild(
        long            tout = 0;       /* milliseconds */
 
        current->flags |= PF_MEMALLOC;
+       set_freezable();
 
        while (!kthread_should_stop()) {
                if (tout && tout <= 20)
@@ -519,14 +520,14 @@ xfsaild(
                if (!xfs_ail_min(ailp) &&
                    ailp->xa_target == ailp->xa_target_prev) {
                        spin_unlock(&ailp->xa_lock);
-                       schedule();
+                       freezable_schedule();
                        tout = 0;
                        continue;
                }
                spin_unlock(&ailp->xa_lock);
 
                if (tout)
-                       schedule_timeout(msecs_to_jiffies(tout));
+                       freezable_schedule_timeout(msecs_to_jiffies(tout));
 
                __set_current_state(TASK_RUNNING);
 
index 75798412859a7ba2f47b01945c54b7ee82ff4e7e..8ee29ca132dc13c0f302fa470cfaffc788cb9938 100644 (file)
@@ -155,7 +155,7 @@ xfs_trans_get_buf_map(
                ASSERT(xfs_buf_islocked(bp));
                if (XFS_FORCED_SHUTDOWN(tp->t_mountp)) {
                        xfs_buf_stale(bp);
-                       XFS_BUF_DONE(bp);
+                       bp->b_flags |= XBF_DONE;
                }
 
                ASSERT(bp->b_transp == tp);
@@ -518,7 +518,7 @@ xfs_trans_log_buf(xfs_trans_t       *tp,
         * inside the b_bdstrat callback so that this won't get written to
         * disk.
         */
-       XFS_BUF_DONE(bp);
+       bp->b_flags |= XBF_DONE;
 
        ASSERT(atomic_read(&bip->bli_refcount) > 0);
        bp->b_iodone = xfs_buf_iodone_callbacks;
@@ -534,8 +534,8 @@ xfs_trans_log_buf(xfs_trans_t       *tp,
         */
        if (bip->bli_flags & XFS_BLI_STALE) {
                bip->bli_flags &= ~XFS_BLI_STALE;
-               ASSERT(XFS_BUF_ISSTALE(bp));
-               XFS_BUF_UNSTALE(bp);
+               ASSERT(bp->b_flags & XBF_STALE);
+               bp->b_flags &= ~XBF_STALE;
                bip->__bli_format.blf_flags &= ~XFS_BLF_CANCEL;
        }
 
@@ -600,7 +600,7 @@ xfs_trans_binval(
                 * If the buffer is already invalidated, then
                 * just return.
                 */
-               ASSERT(XFS_BUF_ISSTALE(bp));
+               ASSERT(bp->b_flags & XBF_STALE);
                ASSERT(!(bip->bli_flags & (XFS_BLI_LOGGED | XFS_BLI_DIRTY)));
                ASSERT(!(bip->__bli_format.blf_flags & XFS_BLF_INODE_BUF));
                ASSERT(!(bip->__bli_format.blf_flags & XFS_BLFT_MASK));
index 995170194df040b5b3e02cab80b5942ee92cb2ad..c3d547211d16001ad686c543fd960996b2321c75 100644 (file)
@@ -609,17 +609,20 @@ xfs_trans_dqresv(
        xfs_qcnt_t      total_count;
        xfs_qcnt_t      *resbcountp;
        xfs_quotainfo_t *q = mp->m_quotainfo;
+       struct xfs_def_quota    *defq;
 
 
        xfs_dqlock(dqp);
 
+       defq = xfs_get_defquota(dqp, q);
+
        if (flags & XFS_TRANS_DQ_RES_BLKS) {
                hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
                if (!hardlimit)
-                       hardlimit = q->qi_bhardlimit;
+                       hardlimit = defq->bhardlimit;
                softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit);
                if (!softlimit)
-                       softlimit = q->qi_bsoftlimit;
+                       softlimit = defq->bsoftlimit;
                timer = be32_to_cpu(dqp->q_core.d_btimer);
                warns = be16_to_cpu(dqp->q_core.d_bwarns);
                warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit;
@@ -628,10 +631,10 @@ xfs_trans_dqresv(
                ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS);
                hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit);
                if (!hardlimit)
-                       hardlimit = q->qi_rtbhardlimit;
+                       hardlimit = defq->rtbhardlimit;
                softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit);
                if (!softlimit)
-                       softlimit = q->qi_rtbsoftlimit;
+                       softlimit = defq->rtbsoftlimit;
                timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
                warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
                warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit;
@@ -672,10 +675,10 @@ xfs_trans_dqresv(
                        warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
                        hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
                        if (!hardlimit)
-                               hardlimit = q->qi_ihardlimit;
+                               hardlimit = defq->ihardlimit;
                        softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
                        if (!softlimit)
-                               softlimit = q->qi_isoftlimit;
+                               softlimit = defq->isoftlimit;
 
                        if (hardlimit && total_count > hardlimit) {
                                xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
index b97f1df910abb0bd60ac5850fbe549237806d078..11a3af08b5c7ea1e40dcd348606414e147e6a0cd 100644 (file)
@@ -75,18 +75,10 @@ xfs_trans_ichgtime(
 
        tv = current_fs_time(inode->i_sb);
 
-       if ((flags & XFS_ICHGTIME_MOD) &&
-           !timespec_equal(&inode->i_mtime, &tv)) {
+       if (flags & XFS_ICHGTIME_MOD)
                inode->i_mtime = tv;
-               ip->i_d.di_mtime.t_sec = tv.tv_sec;
-               ip->i_d.di_mtime.t_nsec = tv.tv_nsec;
-       }
-       if ((flags & XFS_ICHGTIME_CHG) &&
-           !timespec_equal(&inode->i_ctime, &tv)) {
+       if (flags & XFS_ICHGTIME_CHG)
                inode->i_ctime = tv;
-               ip->i_d.di_ctime.t_sec = tv.tv_sec;
-               ip->i_d.di_ctime.t_nsec = tv.tv_nsec;
-       }
 }
 
 /*
@@ -125,7 +117,7 @@ xfs_trans_log_inode(
         */
        if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) &&
            IS_I_VERSION(VFS_I(ip))) {
-               ip->i_d.di_changecount = ++VFS_I(ip)->i_version;
+               VFS_I(ip)->i_version++;
                flags |= XFS_ILOG_CORE;
        }
 
index 717a29810473894aaf2f22e6e487f84087628df5..dad8af3ebeb5405c144db72cea488d7f5ea5fd39 100644 (file)
@@ -133,6 +133,5 @@ extern int acpi_get_psd_map(struct cpudata **);
 /* Methods to interact with the PCC mailbox controller. */
 extern struct mbox_chan *
        pcc_mbox_request_channel(struct mbox_client *, unsigned int);
-extern int mbox_send_message(struct mbox_chan *chan, void *mssg);
 
 #endif /* _CPPC_ACPI_H*/
index 0419485891f2a8eb6125f154f52d7b16c5fb05b4..0f1c6f315cdc5294ca63bac49efbf69a5dfee805 100644 (file)
@@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
  */
 static inline cputime_t timespec_to_cputime(const struct timespec *val)
 {
-       u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+       u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
        return (__force cputime_t) ret;
 }
 static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
@@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
  */
 static inline cputime_t timeval_to_cputime(const struct timeval *val)
 {
-       u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+       u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
+                       val->tv_usec * NSEC_PER_USEC;
        return (__force cputime_t) ret;
 }
 static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
index 1cbb8338edf391bd83c4d1b0bc0dff2cbbe56e75..827e4d3bbc7a46ef59222651a8020234addc82cb 100644 (file)
@@ -70,12 +70,12 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
 #endif
 
 /* Return a pointer with offset calculated */
-#define __set_fixmap_offset(idx, phys, flags)                \
-({                                                           \
-       unsigned long addr;                                   \
-       __set_fixmap(idx, phys, flags);                       \
-       addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
-       addr;                                                 \
+#define __set_fixmap_offset(idx, phys, flags)                          \
+({                                                                     \
+       unsigned long ________addr;                                     \
+       __set_fixmap(idx, phys, flags);                                 \
+       ________addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1));   \
+       ________addr;                                                   \
 })
 
 #define set_fixmap_offset(idx, phys) \
diff --git a/include/asm-generic/pci-bridge.h b/include/asm-generic/pci-bridge.h
deleted file mode 100644 (file)
index 20db2e5..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef _ASM_GENERIC_PCI_BRIDGE_H
-#define _ASM_GENERIC_PCI_BRIDGE_H
-
-#ifdef __KERNEL__
-
-enum {
-       /* Force re-assigning all resources (ignore firmware
-        * setup completely)
-        */
-       PCI_REASSIGN_ALL_RSRC   = 0x00000001,
-
-       /* Re-assign all bus numbers */
-       PCI_REASSIGN_ALL_BUS    = 0x00000002,
-
-       /* Do not try to assign, just use existing setup */
-       PCI_PROBE_ONLY          = 0x00000004,
-
-       /* Don't bother with ISA alignment unless the bridge has
-        * ISA forwarding enabled
-        */
-       PCI_CAN_SKIP_ISA_ALIGN  = 0x00000008,
-
-       /* Enable domain numbers in /proc */
-       PCI_ENABLE_PROC_DOMAINS = 0x00000010,
-       /* ... except for domain 0 */
-       PCI_COMPAT_DOMAIN_0     = 0x00000020,
-
-       /* PCIe downstream ports are bridges that normally lead to only a
-        * device 0, but if this is set, we scan all possible devices, not
-        * just device 0.
-        */
-       PCI_SCAN_ALL_PCIE_DEVS  = 0x00000040,
-};
-
-#ifdef CONFIG_PCI
-extern unsigned int pci_flags;
-
-static inline void pci_set_flags(int flags)
-{
-       pci_flags = flags;
-}
-
-static inline void pci_add_flags(int flags)
-{
-       pci_flags |= flags;
-}
-
-static inline void pci_clear_flags(int flags)
-{
-       pci_flags &= ~flags;
-}
-
-static inline int pci_has_flag(int flag)
-{
-       return pci_flags & flag;
-}
-#else
-static inline void pci_set_flags(int flags) { }
-static inline void pci_add_flags(int flags) { }
-static inline void pci_clear_flags(int flags) { }
-static inline int pci_has_flag(int flag)
-{
-       return 0;
-}
-#endif /* CONFIG_PCI */
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_GENERIC_PCI_BRIDGE_H */
index 0b3c0d39ef753053bb26c1b9fb4979e706240a58..c370b261c72004dcafa3dd036920b7b5fe3d01a2 100644 (file)
@@ -239,6 +239,14 @@ extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                            pmd_t *pmdp);
 #endif
 
+#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
+static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+                                          unsigned long address, pmd_t *pmdp)
+{
+
+}
+#endif
+
 #ifndef __HAVE_ARCH_PTE_SAME
 static inline int pte_same(pte_t pte_a, pte_t pte_b)
 {
index c4bd0e2c173c011e37f6eef7acfb9bc3920fcca6..e9a81a6a109f488a4e656c63801b4322988d60af 100644 (file)
 #define EARLYCON_TABLE() STRUCT_ALIGN();                       \
                         VMLINUX_SYMBOL(__earlycon_table) = .;  \
                         *(__earlycon_table)                    \
-                        *(__earlycon_table_end)
+                        VMLINUX_SYMBOL(__earlycon_table_end) = .;
 #else
 #define EARLYCON_TABLE()
 #endif
 #define RESERVEDMEM_OF_TABLES()        OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
 #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
 #define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
-#define EARLYCON_OF_TABLES()   OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon)
 
 #ifdef CONFIG_ACPI
 #define ACPI_PROBE_TABLE(name)                                         \
        IRQCHIP_OF_MATCH_TABLE()                                        \
        ACPI_PROBE_TABLE(irqchip)                                       \
        ACPI_PROBE_TABLE(clksrc)                                        \
-       EARLYCON_TABLE()                                                \
-       EARLYCON_OF_TABLES()
+       EARLYCON_TABLE()
 
 #define INIT_TEXT                                                      \
        *(.init.text)                                                   \
index c9fe145f7dd3bad3af8cd7902accefbc8c7b5366..eeafd21afb446ddf9e6607d86e5faea394ced07f 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/crypto.h>
 #include <linux/list.h>
 #include <linux/kernel.h>
+#include <linux/kthread.h>
 #include <linux/skbuff.h>
 
 struct crypto_aead;
@@ -128,6 +129,75 @@ struct ablkcipher_walk {
        unsigned int            blocksize;
 };
 
+#define ENGINE_NAME_LEN        30
+/*
+ * struct crypto_engine - crypto hardware engine
+ * @name: the engine name
+ * @idling: the engine is entering idle state
+ * @busy: request pump is busy
+ * @running: the engine is on working
+ * @cur_req_prepared: current request is prepared
+ * @list: link with the global crypto engine list
+ * @queue_lock: spinlock to syncronise access to request queue
+ * @queue: the crypto queue of the engine
+ * @rt: whether this queue is set to run as a realtime task
+ * @prepare_crypt_hardware: a request will soon arrive from the queue
+ * so the subsystem requests the driver to prepare the hardware
+ * by issuing this call
+ * @unprepare_crypt_hardware: there are currently no more requests on the
+ * queue so the subsystem notifies the driver that it may relax the
+ * hardware by issuing this call
+ * @prepare_request: do some prepare if need before handle the current request
+ * @unprepare_request: undo any work done by prepare_message()
+ * @crypt_one_request: do encryption for current request
+ * @kworker: thread struct for request pump
+ * @kworker_task: pointer to task for request pump kworker thread
+ * @pump_requests: work struct for scheduling work to the request pump
+ * @priv_data: the engine private data
+ * @cur_req: the current request which is on processing
+ */
+struct crypto_engine {
+       char                    name[ENGINE_NAME_LEN];
+       bool                    idling;
+       bool                    busy;
+       bool                    running;
+       bool                    cur_req_prepared;
+
+       struct list_head        list;
+       spinlock_t              queue_lock;
+       struct crypto_queue     queue;
+
+       bool                    rt;
+
+       int (*prepare_crypt_hardware)(struct crypto_engine *engine);
+       int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
+
+       int (*prepare_request)(struct crypto_engine *engine,
+                              struct ablkcipher_request *req);
+       int (*unprepare_request)(struct crypto_engine *engine,
+                                struct ablkcipher_request *req);
+       int (*crypt_one_request)(struct crypto_engine *engine,
+                                struct ablkcipher_request *req);
+
+       struct kthread_worker           kworker;
+       struct task_struct              *kworker_task;
+       struct kthread_work             pump_requests;
+
+       void                            *priv_data;
+       struct ablkcipher_request       *cur_req;
+};
+
+int crypto_transfer_request(struct crypto_engine *engine,
+                           struct ablkcipher_request *req, bool need_pump);
+int crypto_transfer_request_to_engine(struct crypto_engine *engine,
+                                     struct ablkcipher_request *req);
+void crypto_finalize_request(struct crypto_engine *engine,
+                            struct ablkcipher_request *req, int err);
+int crypto_engine_start(struct crypto_engine *engine);
+int crypto_engine_stop(struct crypto_engine *engine);
+struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
+int crypto_engine_exit(struct crypto_engine *engine);
+
 extern const struct crypto_type crypto_ablkcipher_type;
 extern const struct crypto_type crypto_blkcipher_type;
 
@@ -184,6 +254,10 @@ int crypto_enqueue_request(struct crypto_queue *queue,
                           struct crypto_async_request *request);
 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
+static inline unsigned int crypto_queue_len(struct crypto_queue *queue)
+{
+       return queue->qlen;
+}
 
 /* These functions require the input/output to be aligned as u32. */
 void crypto_inc(u8 *a, unsigned int size);
@@ -275,24 +349,6 @@ static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
        return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
 }
 
-static inline struct crypto_hash *crypto_spawn_hash(struct crypto_spawn *spawn)
-{
-       u32 type = CRYPTO_ALG_TYPE_HASH;
-       u32 mask = CRYPTO_ALG_TYPE_HASH_MASK;
-
-       return __crypto_hash_cast(crypto_spawn_tfm(spawn, type, mask));
-}
-
-static inline void *crypto_hash_ctx(struct crypto_hash *tfm)
-{
-       return crypto_tfm_ctx(&tfm->base);
-}
-
-static inline void *crypto_hash_ctx_aligned(struct crypto_hash *tfm)
-{
-       return crypto_tfm_ctx_aligned(&tfm->base);
-}
-
 static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
                                       struct scatterlist *dst,
                                       struct scatterlist *src,
diff --git a/include/crypto/compress.h b/include/crypto/compress.h
deleted file mode 100644 (file)
index 5b67af8..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Compress: Compression algorithms under the cryptographic API.
- *
- * Copyright 2008 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- * If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _CRYPTO_COMPRESS_H
-#define _CRYPTO_COMPRESS_H
-
-#include <linux/crypto.h>
-
-
-struct comp_request {
-       const void *next_in;            /* next input byte */
-       void *next_out;                 /* next output byte */
-       unsigned int avail_in;          /* bytes available at next_in */
-       unsigned int avail_out;         /* bytes available at next_out */
-};
-
-enum zlib_comp_params {
-       ZLIB_COMP_LEVEL = 1,            /* e.g. Z_DEFAULT_COMPRESSION */
-       ZLIB_COMP_METHOD,               /* e.g. Z_DEFLATED */
-       ZLIB_COMP_WINDOWBITS,           /* e.g. MAX_WBITS */
-       ZLIB_COMP_MEMLEVEL,             /* e.g. DEF_MEM_LEVEL */
-       ZLIB_COMP_STRATEGY,             /* e.g. Z_DEFAULT_STRATEGY */
-       __ZLIB_COMP_MAX,
-};
-
-#define ZLIB_COMP_MAX  (__ZLIB_COMP_MAX - 1)
-
-
-enum zlib_decomp_params {
-       ZLIB_DECOMP_WINDOWBITS = 1,     /* e.g. DEF_WBITS */
-       __ZLIB_DECOMP_MAX,
-};
-
-#define ZLIB_DECOMP_MAX        (__ZLIB_DECOMP_MAX - 1)
-
-
-struct crypto_pcomp {
-       struct crypto_tfm base;
-};
-
-struct pcomp_alg {
-       int (*compress_setup)(struct crypto_pcomp *tfm, const void *params,
-                             unsigned int len);
-       int (*compress_init)(struct crypto_pcomp *tfm);
-       int (*compress_update)(struct crypto_pcomp *tfm,
-                              struct comp_request *req);
-       int (*compress_final)(struct crypto_pcomp *tfm,
-                             struct comp_request *req);
-       int (*decompress_setup)(struct crypto_pcomp *tfm, const void *params,
-                               unsigned int len);
-       int (*decompress_init)(struct crypto_pcomp *tfm);
-       int (*decompress_update)(struct crypto_pcomp *tfm,
-                                struct comp_request *req);
-       int (*decompress_final)(struct crypto_pcomp *tfm,
-                               struct comp_request *req);
-
-       struct crypto_alg base;
-};
-
-extern struct crypto_pcomp *crypto_alloc_pcomp(const char *alg_name, u32 type,
-                                              u32 mask);
-
-static inline struct crypto_tfm *crypto_pcomp_tfm(struct crypto_pcomp *tfm)
-{
-       return &tfm->base;
-}
-
-static inline void crypto_free_pcomp(struct crypto_pcomp *tfm)
-{
-       crypto_destroy_tfm(tfm, crypto_pcomp_tfm(tfm));
-}
-
-static inline struct pcomp_alg *__crypto_pcomp_alg(struct crypto_alg *alg)
-{
-       return container_of(alg, struct pcomp_alg, base);
-}
-
-static inline struct pcomp_alg *crypto_pcomp_alg(struct crypto_pcomp *tfm)
-{
-       return __crypto_pcomp_alg(crypto_pcomp_tfm(tfm)->__crt_alg);
-}
-
-static inline int crypto_compress_setup(struct crypto_pcomp *tfm,
-                                       const void *params, unsigned int len)
-{
-       return crypto_pcomp_alg(tfm)->compress_setup(tfm, params, len);
-}
-
-static inline int crypto_compress_init(struct crypto_pcomp *tfm)
-{
-       return crypto_pcomp_alg(tfm)->compress_init(tfm);
-}
-
-static inline int crypto_compress_update(struct crypto_pcomp *tfm,
-                                        struct comp_request *req)
-{
-       return crypto_pcomp_alg(tfm)->compress_update(tfm, req);
-}
-
-static inline int crypto_compress_final(struct crypto_pcomp *tfm,
-                                       struct comp_request *req)
-{
-       return crypto_pcomp_alg(tfm)->compress_final(tfm, req);
-}
-
-static inline int crypto_decompress_setup(struct crypto_pcomp *tfm,
-                                         const void *params, unsigned int len)
-{
-       return crypto_pcomp_alg(tfm)->decompress_setup(tfm, params, len);
-}
-
-static inline int crypto_decompress_init(struct crypto_pcomp *tfm)
-{
-       return crypto_pcomp_alg(tfm)->decompress_init(tfm);
-}
-
-static inline int crypto_decompress_update(struct crypto_pcomp *tfm,
-                                          struct comp_request *req)
-{
-       return crypto_pcomp_alg(tfm)->decompress_update(tfm, req);
-}
-
-static inline int crypto_decompress_final(struct crypto_pcomp *tfm,
-                                         struct comp_request *req)
-{
-       return crypto_pcomp_alg(tfm)->decompress_final(tfm, req);
-}
-
-#endif /* _CRYPTO_COMPRESS_H */
index 9756c70899d8195eb54f7349efb911e8604d6222..d961b2b16f5593e695c353aa055595145fba8413 100644 (file)
@@ -117,10 +117,6 @@ struct drbg_state {
        void *priv_data;        /* Cipher handle */
        bool seeded;            /* DRBG fully seeded? */
        bool pr;                /* Prediction resistance enabled? */
-#ifdef CONFIG_CRYPTO_FIPS
-       bool fips_primed;       /* Continuous test primed? */
-       unsigned char *prev;    /* FIPS 140-2 continuous test value */
-#endif
        struct work_struct seed_work;   /* asynchronous seeding support */
        struct crypto_rng *jent;
        const struct drbg_state_ops *d_ops;
index 6361892ea737137899c5d9489634446cb3bdedac..1969f1416658babc28dd6d2a75363b93da36ac63 100644 (file)
@@ -14,6 +14,7 @@
 #define _CRYPTO_HASH_H
 
 #include <linux/crypto.h>
+#include <linux/string.h>
 
 struct crypto_ahash;
 
@@ -259,6 +260,28 @@ static inline void crypto_free_ahash(struct crypto_ahash *tfm)
        crypto_destroy_tfm(tfm, crypto_ahash_tfm(tfm));
 }
 
+/**
+ * crypto_has_ahash() - Search for the availability of an ahash.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ *           ahash
+ * @type: specifies the type of the ahash
+ * @mask: specifies the mask for the ahash
+ *
+ * Return: true when the ahash is known to the kernel crypto API; false
+ *        otherwise
+ */
+int crypto_has_ahash(const char *alg_name, u32 type, u32 mask);
+
+static inline const char *crypto_ahash_alg_name(struct crypto_ahash *tfm)
+{
+       return crypto_tfm_alg_name(crypto_ahash_tfm(tfm));
+}
+
+static inline const char *crypto_ahash_driver_name(struct crypto_ahash *tfm)
+{
+       return crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
+}
+
 static inline unsigned int crypto_ahash_alignmask(
        struct crypto_ahash *tfm)
 {
@@ -550,6 +573,12 @@ static inline void ahash_request_free(struct ahash_request *req)
        kzfree(req);
 }
 
+static inline void ahash_request_zero(struct ahash_request *req)
+{
+       memzero_explicit(req, sizeof(*req) +
+                             crypto_ahash_reqsize(crypto_ahash_reqtfm(req)));
+}
+
 static inline struct ahash_request *ahash_request_cast(
        struct crypto_async_request *req)
 {
@@ -657,6 +686,16 @@ static inline void crypto_free_shash(struct crypto_shash *tfm)
        crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
 }
 
+static inline const char *crypto_shash_alg_name(struct crypto_shash *tfm)
+{
+       return crypto_tfm_alg_name(crypto_shash_tfm(tfm));
+}
+
+static inline const char *crypto_shash_driver_name(struct crypto_shash *tfm)
+{
+       return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm));
+}
+
 static inline unsigned int crypto_shash_alignmask(
        struct crypto_shash *tfm)
 {
@@ -872,4 +911,10 @@ int crypto_shash_final(struct shash_desc *desc, u8 *out);
 int crypto_shash_finup(struct shash_desc *desc, const u8 *data,
                       unsigned int len, u8 *out);
 
+static inline void shash_desc_zero(struct shash_desc *desc)
+{
+       memzero_explicit(desc,
+                        sizeof(*desc) + crypto_shash_descsize(desc->tfm));
+}
+
 #endif /* _CRYPTO_HASH_H */
index 5554cdd8d6c17344f049f138e57027defc050dc1..da3864991d4c50b33a4145c57444d8a70a0db14a 100644 (file)
@@ -80,6 +80,12 @@ static inline u32 aead_request_flags(struct aead_request *req)
        return req->base.flags;
 }
 
+static inline struct aead_request *aead_request_cast(
+       struct crypto_async_request *req)
+{
+       return container_of(req, struct aead_request, base);
+}
+
 static inline void crypto_set_aead_spawn(
        struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
 {
diff --git a/include/crypto/internal/compress.h b/include/crypto/internal/compress.h
deleted file mode 100644 (file)
index 178a888..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Compress: Compression algorithms under the cryptographic API.
- *
- * Copyright 2008 Sony Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.
- * If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _CRYPTO_INTERNAL_COMPRESS_H
-#define _CRYPTO_INTERNAL_COMPRESS_H
-
-#include <crypto/compress.h>
-
-extern int crypto_register_pcomp(struct pcomp_alg *alg);
-extern int crypto_unregister_pcomp(struct pcomp_alg *alg);
-
-#endif /* _CRYPTO_INTERNAL_COMPRESS_H */
index 3b4af1d7c7e91ce7482814955ed449b4e6954253..49dae16f8929deec7e34adb3f66ef6b04767863d 100644 (file)
@@ -57,9 +57,6 @@ int crypto_hash_walk_first(struct ahash_request *req,
                           struct crypto_hash_walk *walk);
 int crypto_ahash_walk_first(struct ahash_request *req,
                           struct crypto_hash_walk *walk);
-int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
-                                 struct crypto_hash_walk *walk,
-                                 struct scatterlist *sg, unsigned int len);
 
 static inline int crypto_ahash_walk_done(struct crypto_hash_walk *walk,
                                         int err)
index fd8742a40ff3e93476ec3b8cdb1370200275b6d3..905490c1da89b30ab1cf05b7a4aeaf1962e4e6ce 100644 (file)
@@ -60,8 +60,7 @@ struct crypto_skcipher {
 
        unsigned int ivsize;
        unsigned int reqsize;
-
-       bool has_setkey;
+       unsigned int keysize;
 
        struct crypto_tfm base;
 };
@@ -232,6 +231,12 @@ static inline int crypto_has_skcipher(const char *alg_name, u32 type,
                              crypto_skcipher_mask(mask));
 }
 
+static inline const char *crypto_skcipher_driver_name(
+       struct crypto_skcipher *tfm)
+{
+       return crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
+}
+
 /**
  * crypto_skcipher_ivsize() - obtain IV size
  * @tfm: cipher handle
@@ -309,7 +314,13 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
 
 static inline bool crypto_skcipher_has_setkey(struct crypto_skcipher *tfm)
 {
-       return tfm->has_setkey;
+       return tfm->keysize;
+}
+
+static inline unsigned int crypto_skcipher_default_keysize(
+       struct crypto_skcipher *tfm)
+{
+       return tfm->keysize;
 }
 
 /**
@@ -440,6 +451,13 @@ static inline void skcipher_request_free(struct skcipher_request *req)
        kzfree(req);
 }
 
+static inline void skcipher_request_zero(struct skcipher_request *req)
+{
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+       memzero_explicit(req, sizeof(*req) + crypto_skcipher_reqsize(tfm));
+}
+
 /**
  * skcipher_request_set_callback() - set asynchronous callback function
  * @req: request handle
index d7162cf1c3e10d4947d355b594c70ab30b8d99f6..3c8422c69572557a43e56921e475f6a17693453c 100644 (file)
@@ -283,6 +283,7 @@ struct drm_ioctl_desc {
 struct drm_pending_event {
        struct drm_event *event;
        struct list_head link;
+       struct list_head pending_link;
        struct drm_file *file_priv;
        pid_t pid; /* pid of requester, no guarantee it's valid by the time
                      we deliver the event, for tracing only */
@@ -346,6 +347,7 @@ struct drm_file {
        struct list_head blobs;
 
        wait_queue_head_t event_wait;
+       struct list_head pending_event_list;
        struct list_head event_list;
        int event_space;
 
@@ -919,15 +921,25 @@ extern long drm_compat_ioctl(struct file *filp,
                             unsigned int cmd, unsigned long arg);
 extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
 
-                               /* Device support (drm_fops.h) */
-extern int drm_open(struct inode *inode, struct file *filp);
-extern ssize_t drm_read(struct file *filp, char __user *buffer,
-                       size_t count, loff_t *offset);
-extern int drm_release(struct inode *inode, struct file *filp);
-extern int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
-
-                               /* Mapping support (drm_vm.h) */
-extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+/* File Operations (drm_fops.c) */
+int drm_open(struct inode *inode, struct file *filp);
+ssize_t drm_read(struct file *filp, char __user *buffer,
+                size_t count, loff_t *offset);
+int drm_release(struct inode *inode, struct file *filp);
+int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv);
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+int drm_event_reserve_init_locked(struct drm_device *dev,
+                                 struct drm_file *file_priv,
+                                 struct drm_pending_event *p,
+                                 struct drm_event *e);
+int drm_event_reserve_init(struct drm_device *dev,
+                          struct drm_file *file_priv,
+                          struct drm_pending_event *p,
+                          struct drm_event *e);
+void drm_event_cancel_free(struct drm_device *dev,
+                          struct drm_pending_event *p);
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
+void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
 
 /* Misc. IOCTL support (drm_ioctl.c) */
 int drm_noop(struct drm_device *dev, void *data,
index 89d008dc08e2e2c6caa1d1d5eeb1ac3bdedd2474..fe5efada9d68237c3863ad8f9bb972da6f610819 100644 (file)
@@ -42,6 +42,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
                             struct drm_atomic_state *state,
                             bool async);
 
+bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev,
+                                          struct drm_atomic_state *old_state,
+                                          struct drm_crtc *crtc);
+
 void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
                                        struct drm_atomic_state *old_state);
 
index 7bfb063029d83fe1374e78a174f4a638bc39bc49..461a0558bca4d8d16e81b88e0286e3fa6251ab79 100644 (file)
 
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
 
+static inline bool drm_arch_can_wc_memory(void)
+{
+#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
+       return false;
+#else
+       return true;
+#endif
+}
+
 #endif
index c65a212db77e7a99b26735c262cb31a9b2504195..8c7fb3d0f9d0f621bea427be0462cbb27ba51d1a 100644 (file)
@@ -307,6 +307,7 @@ struct drm_plane_helper_funcs;
  * @connectors_changed: connectors to this crtc have been updated
  * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
  * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
+ * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
  * @last_vblank_count: for helpers and drivers to capture the vblank of the
  *     update to ensure framebuffer cleanup isn't done too early
  * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
@@ -341,6 +342,7 @@ struct drm_crtc_state {
        u32 plane_mask;
 
        u32 connector_mask;
+       u32 encoder_mask;
 
        /* last_vblank_count: for vblank waits before cleanup */
        u32 last_vblank_count;
@@ -2153,6 +2155,17 @@ struct drm_mode_config {
        list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
                for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
 
+/**
+ * drm_for_each_encoder_mask - iterate over encoders specified by bitmask
+ * @encoder: the loop cursor
+ * @dev: the DRM device
+ * @encoder_mask: bitmask of encoder indices
+ *
+ * Iterate over all encoders specified by bitmask.
+ */
+#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
+       list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
+               for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
 
 #define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
 #define obj_to_connector(x) container_of(x, struct drm_connector, base)
@@ -2225,6 +2238,7 @@ int drm_encoder_init(struct drm_device *dev,
                     struct drm_encoder *encoder,
                     const struct drm_encoder_funcs *funcs,
                     int encoder_type, const char *name, ...);
+extern unsigned int drm_encoder_index(struct drm_encoder *encoder);
 
 /**
  * drm_encoder_crtc_ok - can a given crtc drive a given encoder?
@@ -2282,6 +2296,8 @@ extern void drm_property_destroy_user_blobs(struct drm_device *dev,
 extern bool drm_probe_ddc(struct i2c_adapter *adapter);
 extern struct edid *drm_get_edid(struct drm_connector *connector,
                                 struct i2c_adapter *adapter);
+extern struct edid *drm_get_edid_switcheroo(struct drm_connector *connector,
+                                           struct i2c_adapter *adapter);
 extern struct edid *drm_edid_duplicate(const struct edid *edid);
 extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
 extern void drm_mode_config_init(struct drm_device *dev);
@@ -2482,6 +2498,8 @@ extern int drm_format_num_planes(uint32_t format);
 extern int drm_format_plane_cpp(uint32_t format, int plane);
 extern int drm_format_horz_chroma_subsampling(uint32_t format);
 extern int drm_format_vert_chroma_subsampling(uint32_t format);
+extern int drm_format_plane_width(int width, uint32_t format, int plane);
+extern int drm_format_plane_height(int height, uint32_t format, int plane);
 extern const char *drm_get_format_name(uint32_t format);
 extern struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
                                                              unsigned int supported_rotations);
index 24ab1787b771936fdf1894aabd6271f65a771993..fdb47051d5492a4db126cc525667bd181343d2cd 100644 (file)
@@ -44,8 +44,6 @@ struct drm_dp_vcpi {
 /**
  * struct drm_dp_mst_port - MST port
  * @kref: reference count for this port.
- * @guid_valid: for DP 1.2 devices if we have validated the GUID.
- * @guid: guid for DP 1.2 device on this port.
  * @port_num: port number
  * @input: if this port is an input port.
  * @mcs: message capability status - DP 1.2 spec.
@@ -70,10 +68,6 @@ struct drm_dp_vcpi {
 struct drm_dp_mst_port {
        struct kref kref;
 
-       /* if dpcd 1.2 device is on this port - its GUID info */
-       bool guid_valid;
-       u8 guid[16];
-
        u8 port_num;
        bool input;
        bool mcs;
@@ -110,10 +104,12 @@ struct drm_dp_mst_port {
  * @tx_slots: transmission slots for this device.
  * @last_seqno: last sequence number used to talk to this.
  * @link_address_sent: if a link address message has been sent to this device yet.
+ * @guid: guid for DP 1.2 branch device. port under this branch can be
+ * identified by port #.
  *
  * This structure represents an MST branch device, there is one
- * primary branch device at the root, along with any others connected
- * to downstream ports
+ * primary branch device at the root, along with any other branches connected
+ * to downstream port of parent branches.
  */
 struct drm_dp_mst_branch {
        struct kref kref;
@@ -132,6 +128,9 @@ struct drm_dp_mst_branch {
        struct drm_dp_sideband_msg_tx *tx_slots[2];
        int last_seqno;
        bool link_address_sent;
+
+       /* global unique identifier to identify branch devices */
+       u8 guid[16];
 };
 
 
@@ -406,11 +405,9 @@ struct drm_dp_payload {
  * @conn_base_id: DRM connector ID this mgr is connected to.
  * @down_rep_recv: msg receiver state for down replies.
  * @up_req_recv: msg receiver state for up requests.
- * @lock: protects mst state, primary, guid, dpcd.
+ * @lock: protects mst state, primary, dpcd.
  * @mst_state: if this manager is enabled for an MST capable port.
  * @mst_primary: pointer to the primary branch device.
- * @guid_valid: GUID valid for the primary branch device.
- * @guid: GUID for primary port.
  * @dpcd: cache of DPCD for primary port.
  * @pbn_div: PBN to slots divisor.
  *
@@ -432,13 +429,11 @@ struct drm_dp_mst_topology_mgr {
        struct drm_dp_sideband_msg_rx up_req_recv;
 
        /* pointer to info about the initial MST device */
-       struct mutex lock; /* protects mst_state + primary + guid + dpcd */
+       struct mutex lock; /* protects mst_state + primary + dpcd */
 
        bool mst_state;
        struct drm_dp_mst_branch *mst_primary;
-       /* primary MST device GUID */
-       bool guid_valid;
-       u8 guid[16];
+
        u8 dpcd[DP_RECEIVER_CAP_SIZE];
        u8 sink_count;
        int pbn_div;
index d639049a613dfdb264650f8ee938836e6ed28e10..553210c02ee0f655fd28b8ae64d370e7cee2fc70 100644 (file)
@@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
 #define DRM_FIXED_ONE          (1ULL << DRM_FIXED_POINT)
 #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
 #define DRM_FIXED_DIGITS_MASK  (~DRM_FIXED_DECIMAL_MASK)
+#define DRM_FIXED_EPSILON      1LL
+#define DRM_FIXED_ALMOST_ONE   (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
 
 static inline s64 drm_int2fixp(int a)
 {
        return ((s64)a) << DRM_FIXED_POINT;
 }
 
-static inline int drm_fixp2int(int64_t a)
+static inline int drm_fixp2int(s64 a)
 {
        return ((s64)a) >> DRM_FIXED_POINT;
 }
 
-static inline unsigned drm_fixp_msbset(int64_t a)
+static inline int drm_fixp2int_ceil(s64 a)
+{
+       if (a > 0)
+               return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
+       else
+               return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
+}
+
+static inline unsigned drm_fixp_msbset(s64 a)
 {
        unsigned shift, sign = (a >> 63) & 1;
 
@@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
        return result;
 }
 
+static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
+{
+       s64 res;
+       bool a_neg = a < 0;
+       bool b_neg = b < 0;
+       u64 a_abs = a_neg ? -a : a;
+       u64 b_abs = b_neg ? -b : b;
+       u64 rem;
+
+       /* determine integer part */
+       u64 res_abs  = div64_u64_rem(a_abs, b_abs, &rem);
+
+       /* determine fractional part */
+       {
+               u32 i = DRM_FIXED_POINT;
+
+               do {
+                       rem <<= 1;
+                       res_abs <<= 1;
+                       if (rem >= b_abs) {
+                               res_abs |= 1;
+                               rem -= b_abs;
+                       }
+               } while (--i != 0);
+       }
+
+       /* round up LSB */
+       {
+               u64 summand = (rem << 1) >= b_abs;
+
+               res_abs += summand;
+       }
+
+       res = (s64) res_abs;
+       if (a_neg ^ b_neg)
+               res = -res;
+       return res;
+}
+
 static inline s64 drm_fixp_exp(s64 x)
 {
        s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
index f97020904717f7e5ef26a1043577d2d270ce19ba..9094599a11509c190bfc5ae4df853531f5a442da 100644 (file)
        INTEL_VGA_DEVICE(0x191D, info)  /* WKS GT2 */
 
 #define INTEL_SKL_GT3_IDS(info) \
+       INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
+       INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
        INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
        INTEL_VGA_DEVICE(0x192A, info)  /* SRV GT3 */
 
 #define INTEL_BXT_IDS(info) \
        INTEL_VGA_DEVICE(0x0A84, info), \
        INTEL_VGA_DEVICE(0x1A84, info), \
-       INTEL_VGA_DEVICE(0x5A84, info)
+       INTEL_VGA_DEVICE(0x1A85, info), \
+       INTEL_VGA_DEVICE(0x5A84, info), /* APL HD Graphics 505 */ \
+       INTEL_VGA_DEVICE(0x5A85, info)  /* APL HD Graphics 500 */
 
 #define INTEL_KBL_GT1_IDS(info)        \
        INTEL_VGA_DEVICE(0x5913, info), /* ULT GT1.5 */ \
index 1579e07f96a312db9e456fb5b2c8064f1e1f26af..efcbc594fe82e6837a73d575c215b826dcddca6d 100644 (file)
 #define R8A7793_CLK_SCU_ALL            17
 #define R8A7793_CLK_SCU_DVC1           18
 #define R8A7793_CLK_SCU_DVC0           19
+#define R8A7793_CLK_SCU_CTU1_MIX1      20
+#define R8A7793_CLK_SCU_CTU0_MIX0      21
 #define R8A7793_CLK_SCU_SRC9           22
 #define R8A7793_CLK_SCU_SRC8           23
 #define R8A7793_CLK_SCU_SRC7           24
index 8df77a7c030b0647abbf81e3a592c488f273638c..4f53e70f68ee5b32ee69cd825e370ba3f65d9ce8 100644 (file)
@@ -55,6 +55,7 @@
 #define SCLK_TIMER6            90
 #define SCLK_JTAG              91
 #define SCLK_SMC               92
+#define SCLK_TSADC             93
 
 #define DCLK_LCDC0             190
 #define DCLK_LCDC1             191
index 6f45aea49e4ff049a51394f6782a803e32c389eb..0a05b0d36ae74d6c4e4a7e810e442ab2ff2b09bf 100644 (file)
 /* 104 */
 /* 105 */
 #define TEGRA210_CLK_D_AUDIO 106
-/* 107 ( affects abp -> ape) */
+#define TEGRA210_CLK_APB2APE 107
 /* 108 */
 /* 109 */
 /* 110 */
diff --git a/include/dt-bindings/power/rk3368-power.h b/include/dt-bindings/power/rk3368-power.h
new file mode 100644 (file)
index 0000000..93633d5
--- /dev/null
@@ -0,0 +1,28 @@
+#ifndef __DT_BINDINGS_POWER_RK3368_POWER_H__
+#define __DT_BINDINGS_POWER_RK3368_POWER_H__
+
+/* VD_CORE */
+#define RK3368_PD_A53_L0       0
+#define RK3368_PD_A53_L1       1
+#define RK3368_PD_A53_L2       2
+#define RK3368_PD_A53_L3       3
+#define RK3368_PD_SCU_L                4
+#define RK3368_PD_A53_B0       5
+#define RK3368_PD_A53_B1       6
+#define RK3368_PD_A53_B2       7
+#define RK3368_PD_A53_B3       8
+#define RK3368_PD_SCU_B                9
+
+/* VD_LOGIC */
+#define RK3368_PD_BUS          10
+#define RK3368_PD_PERI         11
+#define RK3368_PD_VIO          12
+#define RK3368_PD_ALIVE                13
+#define RK3368_PD_VIDEO                14
+#define RK3368_PD_GPU_0                15
+#define RK3368_PD_GPU_1                16
+
+/* VD_PMU */
+#define RK3368_PD_PMU          17
+
+#endif
diff --git a/include/linux/apple-gmux.h b/include/linux/apple-gmux.h
new file mode 100644 (file)
index 0000000..b2d32e0
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * apple-gmux.h - microcontroller built into dual GPU MacBook Pro & Mac Pro
+ * Copyright (C) 2015 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef LINUX_APPLE_GMUX_H
+#define LINUX_APPLE_GMUX_H
+
+#include <linux/acpi.h>
+
+#define GMUX_ACPI_HID "APP000B"
+
+#if IS_ENABLED(CONFIG_APPLE_GMUX)
+
+/**
+ * apple_gmux_present() - detect if gmux is built into the machine
+ *
+ * Drivers may use this to activate quirks specific to dual GPU MacBook Pros
+ * and Mac Pros, e.g. for deferred probing, runtime pm and backlight.
+ *
+ * Return: %true if gmux is present and the kernel was configured
+ * with CONFIG_APPLE_GMUX, %false otherwise.
+ */
+static inline bool apple_gmux_present(void)
+{
+       return acpi_dev_present(GMUX_ACPI_HID);
+}
+
+#else  /* !CONFIG_APPLE_GMUX */
+
+static inline bool apple_gmux_present(void)
+{
+       return false;
+}
+
+#endif /* !CONFIG_APPLE_GMUX */
+
+#endif /* LINUX_APPLE_GMUX_H */
index b40ed5df5542f5049a28106353e909304831816f..e38e3fc13ea8764a66d4c84a6dea1bee3f4e630b 100644 (file)
@@ -109,6 +109,10 @@ extern int audit_classify_compat_syscall(int abi, unsigned syscall);
 /* maximized args number that audit_socketcall can process */
 #define AUDITSC_ARGS           6
 
+/* bit values for ->signal->audit_tty */
+#define AUDIT_TTY_ENABLE       BIT(0)
+#define AUDIT_TTY_LOG_PASSWD   BIT(1)
+
 struct filename;
 
 extern void audit_log_session_info(struct audit_buffer *ab);
diff --git a/include/linux/bcm963xx_nvram.h b/include/linux/bcm963xx_nvram.h
new file mode 100644 (file)
index 0000000..290c231
--- /dev/null
@@ -0,0 +1,112 @@
+#ifndef __LINUX_BCM963XX_NVRAM_H__
+#define __LINUX_BCM963XX_NVRAM_H__
+
+#include <linux/crc32.h>
+#include <linux/if_ether.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+/*
+ * Broadcom BCM963xx SoC board nvram data structure.
+ *
+ * The nvram structure varies in size depending on the SoC board version. Use
+ * the appropriate minimum BCM963XX_NVRAM_*_SIZE define for the information
+ * you need instead of sizeof(struct bcm963xx_nvram) as this may change.
+ */
+
+#define BCM963XX_NVRAM_V4_SIZE         300
+#define BCM963XX_NVRAM_V5_SIZE         (1 * SZ_1K)
+
+#define BCM963XX_DEFAULT_PSI_SIZE      64
+
+enum bcm963xx_nvram_nand_part {
+       BCM963XX_NVRAM_NAND_PART_BOOT = 0,
+       BCM963XX_NVRAM_NAND_PART_ROOTFS_1,
+       BCM963XX_NVRAM_NAND_PART_ROOTFS_2,
+       BCM963XX_NVRAM_NAND_PART_DATA,
+       BCM963XX_NVRAM_NAND_PART_BBT,
+
+       __BCM963XX_NVRAM_NAND_NR_PARTS
+};
+
+struct bcm963xx_nvram {
+       u32     version;
+       char    bootline[256];
+       char    name[16];
+       u32     main_tp_number;
+       u32     psi_size;
+       u32     mac_addr_count;
+       u8      mac_addr_base[ETH_ALEN];
+       u8      __reserved1[2];
+       u32     checksum_v4;
+
+       u8      __reserved2[292];
+       u32     nand_part_offset[__BCM963XX_NVRAM_NAND_NR_PARTS];
+       u32     nand_part_size[__BCM963XX_NVRAM_NAND_NR_PARTS];
+       u8      __reserved3[388];
+       u32     checksum_v5;
+};
+
+#define BCM963XX_NVRAM_NAND_PART_OFFSET(nvram, part) \
+       bcm963xx_nvram_nand_part_offset(nvram, BCM963XX_NVRAM_NAND_PART_ ##part)
+
+static inline u64 __pure bcm963xx_nvram_nand_part_offset(
+       const struct bcm963xx_nvram *nvram,
+       enum bcm963xx_nvram_nand_part part)
+{
+       return nvram->nand_part_offset[part] * SZ_1K;
+}
+
+#define BCM963XX_NVRAM_NAND_PART_SIZE(nvram, part) \
+       bcm963xx_nvram_nand_part_size(nvram, BCM963XX_NVRAM_NAND_PART_ ##part)
+
+static inline u64 __pure bcm963xx_nvram_nand_part_size(
+       const struct bcm963xx_nvram *nvram,
+       enum bcm963xx_nvram_nand_part part)
+{
+       return nvram->nand_part_size[part] * SZ_1K;
+}
+
+/*
+ * bcm963xx_nvram_checksum - Verify nvram checksum
+ *
+ * @nvram: pointer to full size nvram data structure
+ * @expected_out: optional pointer to store expected checksum value
+ * @actual_out: optional pointer to store actual checksum value
+ *
+ * Return: 0 if the checksum is valid, otherwise -EINVAL
+ */
+static int __maybe_unused bcm963xx_nvram_checksum(
+       const struct bcm963xx_nvram *nvram,
+       u32 *expected_out, u32 *actual_out)
+{
+       u32 expected, actual;
+       size_t len;
+
+       if (nvram->version <= 4) {
+               expected = nvram->checksum_v4;
+               len = BCM963XX_NVRAM_V4_SIZE - sizeof(u32);
+       } else {
+               expected = nvram->checksum_v5;
+               len = BCM963XX_NVRAM_V5_SIZE - sizeof(u32);
+       }
+
+       /*
+        * Calculate the CRC32 value for the nvram with a checksum value
+        * of 0 without modifying or copying the nvram by combining:
+        * - The CRC32 of the nvram without the checksum value
+        * - The CRC32 of a zero checksum value (which is also 0)
+        */
+       actual = crc32_le_combine(
+               crc32_le(~0, (u8 *)nvram, len), 0, sizeof(u32));
+
+       if (expected_out)
+               *expected_out = expected;
+
+       if (actual_out)
+               *actual_out = actual;
+
+       return expected == actual ? 0 : -EINVAL;
+};
+
+#endif /* __LINUX_BCM963XX_NVRAM_H__ */
similarity index 87%
rename from arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h
rename to include/linux/bcm963xx_tag.h
index 1e6b587f62c940a513d357ab0064d103d3d974de..161c7b37a77b4cef3697132e27e1948468185d3b 100644 (file)
@@ -1,5 +1,7 @@
-#ifndef __BCM963XX_TAG_H
-#define __BCM963XX_TAG_H
+#ifndef __LINUX_BCM963XX_TAG_H__
+#define __LINUX_BCM963XX_TAG_H__
+
+#include <linux/types.h>
 
 #define TAGVER_LEN             4       /* Length of Tag Version */
 #define TAGLAYOUT_LEN          4       /* Length of FlashLayoutVer */
@@ -10,8 +12,7 @@
 #define CHIPID_LEN             6       /* Chip Id Length */
 #define IMAGE_LEN              10      /* Length of Length Field */
 #define ADDRESS_LEN            12      /* Length of Address field */
-#define DUALFLAG_LEN           2       /* Dual Image flag Length */
-#define INACTIVEFLAG_LEN       2       /* Inactie Flag Length */
+#define IMAGE_SEQUENCE_LEN     4       /* Image sequence Length */
 #define RSASIG_LEN             20      /* Length of RSA Signature in tag */
 #define TAGINFO1_LEN           30      /* Length of vendor information field1 in tag */
 #define FLASHLAYOUTVER_LEN     4       /* Length of Flash Layout Version String tag */
        "DWV-S0", \
 }
 
+/* Extended flash address, needs to be subtracted
+ * from bcm_tag flash image offsets.
+ */
+#define BCM963XX_EXTENDED_SIZE 0xBFC00000
+
 /*
  * The broadcom firmware assumes the rootfs starts the image,
  * therefore uses the rootfs start (flash_image_address)
@@ -65,10 +71,10 @@ struct bcm_tag {
        char kernel_address[ADDRESS_LEN];
        /* 128-137: Size of kernel */
        char kernel_length[IMAGE_LEN];
-       /* 138-139: Unused at the moment */
-       char dual_image[DUALFLAG_LEN];
-       /* 140-141: Unused at the moment */
-       char inactive_flag[INACTIVEFLAG_LEN];
+       /* 138-141: Image sequence number
+        * (to be incremented when flashed with a new image)
+        */
+       char image_sequence[IMAGE_SEQUENCE_LEN];
        /* 142-161: RSA Signature (not used; some vendors may use this) */
        char rsa_signature[RSASIG_LEN];
        /* 162-191: Compilation and related information (not used in OpenWrt) */
@@ -93,4 +99,4 @@ struct bcm_tag {
        char reserved2[16];
 };
 
-#endif /* __BCM63XX_TAG_H */
+#endif /* __LINUX_BCM63XX_TAG_H__ */
index 3feb1b2d75d87b83febb605a86720b06a8132d3f..0367c63f596019b5b94d66f315d426e6ab8a4cc4 100644 (file)
@@ -151,6 +151,8 @@ struct bcma_host_ops {
 #define BCMA_CORE_PCIE2                        0x83C   /* PCI Express Gen2 */
 #define BCMA_CORE_USB30_DEV            0x83D
 #define BCMA_CORE_ARM_CR4              0x83E
+#define BCMA_CORE_GCI                  0x840
+#define BCMA_CORE_CMEM                 0x846   /* CNDS DDR2/3 memory controller */
 #define BCMA_CORE_ARM_CA7              0x847
 #define BCMA_CORE_SYS_MEM              0x849
 #define BCMA_CORE_DEFAULT              0xFFF
@@ -199,6 +201,7 @@ struct bcma_host_ops {
 #define  BCMA_PKG_ID_BCM4707   1
 #define  BCMA_PKG_ID_BCM4708   2
 #define  BCMA_PKG_ID_BCM4709   0
+#define BCMA_CHIP_ID_BCM47094  53030
 #define BCMA_CHIP_ID_BCM53018  53018
 
 /* Board types (on PCI usually equals to the subsystem dev id) */
index db51a6ffb7d6839ba1ffd08ffd072faed79031dc..700d0c6f7480e46f972a7f30756cbb3320f3dd6c 100644 (file)
 #define         BCMA_CC_CLKDIV_JTAG_SHIFT      8
 #define         BCMA_CC_CLKDIV_UART            0x000000FF
 #define BCMA_CC_CAP_EXT                        0x00AC          /* Capabilities */
+#define  BCMA_CC_CAP_EXT_SECI_PRESENT  0x00000001
+#define  BCMA_CC_CAP_EXT_GSIO_PRESENT  0x00000002
+#define  BCMA_CC_CAP_EXT_GCI_PRESENT   0x00000004
+#define  BCMA_CC_CAP_EXT_SECI_PUART_PRESENT            0x00000008    /* UART present */
+#define  BCMA_CC_CAP_EXT_AOB_PRESENT   0x00000040
 #define BCMA_CC_PLLONDELAY             0x00B0          /* Rev >= 4 only */
 #define BCMA_CC_FREFSELDELAY           0x00B4          /* Rev >= 4 only */
 #define BCMA_CC_SLOWCLKCTL             0x00B8          /* 6 <= Rev <= 9 only */
 #define BCMA_CC_PMU_RES_REQTS          0x0640 /* PMU res req timer sel */
 #define BCMA_CC_PMU_RES_REQT           0x0644 /* PMU res req timer */
 #define BCMA_CC_PMU_RES_REQM           0x0648 /* PMU res req mask */
-#define BCMA_CC_CHIPCTL_ADDR           0x0650
-#define BCMA_CC_CHIPCTL_DATA           0x0654
-#define BCMA_CC_REGCTL_ADDR            0x0658
-#define BCMA_CC_REGCTL_DATA            0x065C
-#define BCMA_CC_PLLCTL_ADDR            0x0660
-#define BCMA_CC_PLLCTL_DATA            0x0664
+#define BCMA_CC_PMU_CHIPCTL_ADDR       0x0650
+#define BCMA_CC_PMU_CHIPCTL_DATA       0x0654
+#define BCMA_CC_PMU_REGCTL_ADDR                0x0658
+#define BCMA_CC_PMU_REGCTL_DATA                0x065C
+#define BCMA_CC_PMU_PLLCTL_ADDR                0x0660
+#define BCMA_CC_PMU_PLLCTL_DATA                0x0664
 #define BCMA_CC_PMU_STRAPOPT           0x0668 /* (corerev >= 28) */
 #define BCMA_CC_PMU_XTAL_FREQ          0x066C /* (pmurev >= 10) */
 #define  BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK     0x00001FFF
  * Check availability with ((struct bcma_chipcommon)->capabilities & BCMA_CC_CAP_PMU)
  */
 struct bcma_chipcommon_pmu {
+       struct bcma_device *core;       /* Can be separated core or just ChipCommon one */
        u8 rev;                 /* PMU revision */
        u32 crystalfreq;        /* The active crystal frequency (in kHz) */
 };
@@ -660,6 +666,19 @@ struct bcma_drv_cc_b {
 #define bcma_cc_maskset32(cc, offset, mask, set) \
        bcma_cc_write32(cc, offset, (bcma_cc_read32(cc, offset) & (mask)) | (set))
 
+/* PMU registers access */
+#define bcma_pmu_read32(cc, offset) \
+       bcma_read32((cc)->pmu.core, offset)
+#define bcma_pmu_write32(cc, offset, val) \
+       bcma_write32((cc)->pmu.core, offset, val)
+
+#define bcma_pmu_mask32(cc, offset, mask) \
+       bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) & (mask))
+#define bcma_pmu_set32(cc, offset, set) \
+       bcma_pmu_write32(cc, offset, bcma_pmu_read32(cc, offset) | (set))
+#define bcma_pmu_maskset32(cc, offset, mask, set) \
+       bcma_pmu_write32(cc, offset, (bcma_pmu_read32(cc, offset) & (mask)) | (set))
+
 extern u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks);
 
 extern u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc);
index 7fc9296b574290e768dbb38585e725525802940b..15a73d49fd1d54a4b401bf3a042b466de6f311c8 100644 (file)
@@ -244,6 +244,8 @@ void blk_mq_freeze_queue(struct request_queue *q);
 void blk_mq_unfreeze_queue(struct request_queue *q);
 void blk_mq_freeze_queue_start(struct request_queue *q);
 
+void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
+
 /*
  * Driver command data is immediately after the request. So subtract request
  * size to get back to the original request, add request size to get the PDU.
index 83d1926c61e4567b881bfbc26b75b802c428cbc3..90ee6ab24bc53badebf5ab0f6362de1d4ad2062d 100644 (file)
@@ -151,6 +151,7 @@ struct bpf_array {
        union {
                char value[0] __aligned(8);
                void *ptrs[0] __aligned(8);
+               void __percpu *pptrs[0] __aligned(8);
        };
 };
 #define MAX_TAIL_CALL_CNT 32
@@ -182,6 +183,29 @@ int bpf_prog_new_fd(struct bpf_prog *prog);
 int bpf_obj_pin_user(u32 ufd, const char __user *pathname);
 int bpf_obj_get_user(const char __user *pathname);
 
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
+int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
+                          u64 flags);
+int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
+                           u64 flags);
+
+/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
+ * forced to use 'long' read/writes to try to atomically copy long counters.
+ * Best-effort only.  No barriers here, since it _will_ race with concurrent
+ * updates from BPF programs. Called from bpf syscall and mostly used with
+ * size 8 or 16 bytes, so ask compiler to inline it.
+ */
+static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+{
+       const long *lsrc = src;
+       long *ldst = dst;
+
+       size /= sizeof(long);
+       while (size--)
+               *ldst++ = *lsrc++;
+}
+
 /* verify correctness of eBPF program */
 int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
 #else
index f89b31d45cc894814d409a19e161a29c2c41e832..c1ef6f14e7be7f1317e34a0deac61fcb80511eef 100644 (file)
 #define CEPH_FEATURE_OSD_MIN_SIZE_RECOVERY (1ULL<<49)
 // duplicated since it was introduced at the same time as MIN_SIZE_RECOVERY
 #define CEPH_FEATURE_OSD_PROXY_FEATURES (1ULL<<49)  /* overlap w/ above */
+#define CEPH_FEATURE_MON_METADATA (1ULL<<50)
+#define CEPH_FEATURE_OSD_BITWISE_HOBJ_SORT (1ULL<<51) /* can sort objs bitwise */
+#define CEPH_FEATURE_OSD_PROXY_WRITE_FEATURES (1ULL<<52)
+#define CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 (1ULL<<53)
+#define CEPH_FEATURE_OSD_HITSET_GMT (1ULL<<54)
+#define CEPH_FEATURE_HAMMER_0_94_4 (1ULL<<55)
+#define CEPH_FEATURE_NEW_OSDOP_ENCODING   (1ULL<<56) /* New, v7 encoding */
+#define CEPH_FEATURE_MON_STATEFUL_SUB (1ULL<<57) /* stateful mon subscription */
+#define CEPH_FEATURE_MON_ROUTE_OSDMAP (1ULL<<57) /* peon sends osdmaps */
+#define CEPH_FEATURE_CRUSH_TUNABLES5   (1ULL<<58) /* chooseleaf stable mode */
+// duplicated since it was introduced at the same time as CEPH_FEATURE_CRUSH_TUNABLES5
+#define CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING   (1ULL<<58) /* New, v7 encoding */
 
 /*
  * The introduction of CEPH_FEATURE_OSD_SNAPMAPPER caused the feature
@@ -108,7 +120,9 @@ static inline u64 ceph_sanitize_features(u64 features)
         CEPH_FEATURE_CRUSH_TUNABLES3 |         \
         CEPH_FEATURE_OSD_PRIMARY_AFFINITY |    \
         CEPH_FEATURE_MSGR_KEEPALIVE2 |         \
-        CEPH_FEATURE_CRUSH_V4)
+        CEPH_FEATURE_CRUSH_V4 |                \
+        CEPH_FEATURE_CRUSH_TUNABLES5 |         \
+        CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
 
 #define CEPH_FEATURES_REQUIRED_DEFAULT   \
        (CEPH_FEATURE_NOSRCADDR |        \
index 5babb8e95352a6a7468feca70a343fafb56bddad..b827e066e55a198ec13f41d52b52964bd72edf4d 100644 (file)
@@ -40,46 +40,11 @@ static inline __u32 ceph_frag_mask_shift(__u32 f)
        return 24 - ceph_frag_bits(f);
 }
 
-static inline int ceph_frag_contains_value(__u32 f, __u32 v)
+static inline bool ceph_frag_contains_value(__u32 f, __u32 v)
 {
        return (v & ceph_frag_mask(f)) == ceph_frag_value(f);
 }
-static inline int ceph_frag_contains_frag(__u32 f, __u32 sub)
-{
-       /* is sub as specific as us, and contained by us? */
-       return ceph_frag_bits(sub) >= ceph_frag_bits(f) &&
-              (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f);
-}
 
-static inline __u32 ceph_frag_parent(__u32 f)
-{
-       return ceph_frag_make(ceph_frag_bits(f) - 1,
-                        ceph_frag_value(f) & (ceph_frag_mask(f) << 1));
-}
-static inline int ceph_frag_is_left_child(__u32 f)
-{
-       return ceph_frag_bits(f) > 0 &&
-               (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0;
-}
-static inline int ceph_frag_is_right_child(__u32 f)
-{
-       return ceph_frag_bits(f) > 0 &&
-               (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1;
-}
-static inline __u32 ceph_frag_sibling(__u32 f)
-{
-       return ceph_frag_make(ceph_frag_bits(f),
-                     ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f)));
-}
-static inline __u32 ceph_frag_left_child(__u32 f)
-{
-       return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f));
-}
-static inline __u32 ceph_frag_right_child(__u32 f)
-{
-       return ceph_frag_make(ceph_frag_bits(f)+1,
-             ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f))));
-}
 static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
 {
        int newbits = ceph_frag_bits(f) + by;
index 71b1d6cdcb5d1fc53dd45fa3950e138cfac19dfe..8dbd7879fdc6b74719d9cb65b85d8c60f589bde6 100644 (file)
@@ -220,6 +220,7 @@ struct ceph_connection {
        struct ceph_entity_addr actual_peer_addr;
 
        /* message out temps */
+       struct ceph_msg_header out_hdr;
        struct ceph_msg *out_msg;        /* sending message (== tail of
                                            out_sent) */
        bool out_msg_done;
@@ -229,7 +230,6 @@ struct ceph_connection {
        int out_kvec_left;   /* kvec's left in out_kvec */
        int out_skip;        /* skip this many bytes */
        int out_kvec_bytes;  /* total bytes left */
-       bool out_kvec_is_msg; /* kvec refers to out_msg */
        int out_more;        /* there is more data after the kvecs */
        __le64 out_temp_ack; /* for writing an ack */
        struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
index 7f540f7f588d8c8461af975a5ebd21a08e6cf14b..789471dba6fb30f15c752fbca46ddbefe5bfe850 100644 (file)
@@ -127,6 +127,12 @@ struct cgroup_subsys_state {
         */
        u64 serial_nr;
 
+       /*
+        * Incremented by online self and children.  Used to guarantee that
+        * parents are not offlined before their children.
+        */
+       atomic_t online_cnt;
+
        /* percpu_ref killing and RCU release */
        struct rcu_head rcu_head;
        struct work_struct destroy_work;
index bda5ec0b4b4dc26a8431756e468241aad6bdd030..fccf7f44139dd67c2bae6db1799e2517bcd9cd00 100644 (file)
@@ -37,7 +37,7 @@ struct cleancache_ops {
        void (*invalidate_fs)(int);
 };
 
-extern int cleancache_register_ops(struct cleancache_ops *ops);
+extern int cleancache_register_ops(const struct cleancache_ops *ops);
 extern void __cleancache_init_fs(struct super_block *);
 extern void __cleancache_init_shared_fs(struct super_block *);
 extern int  __cleancache_get_page(struct page *);
@@ -48,14 +48,14 @@ extern void __cleancache_invalidate_fs(struct super_block *);
 
 #ifdef CONFIG_CLEANCACHE
 #define cleancache_enabled (1)
-static inline bool cleancache_fs_enabled(struct page *page)
-{
-       return page->mapping->host->i_sb->cleancache_poolid >= 0;
-}
 static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
 {
        return mapping->host->i_sb->cleancache_poolid >= 0;
 }
+static inline bool cleancache_fs_enabled(struct page *page)
+{
+       return cleancache_fs_enabled_mapping(page->mapping);
+}
 #else
 #define cleancache_enabled (0)
 #define cleancache_fs_enabled(_page) (0)
@@ -89,11 +89,9 @@ static inline void cleancache_init_shared_fs(struct super_block *sb)
 
 static inline int cleancache_get_page(struct page *page)
 {
-       int ret = -1;
-
        if (cleancache_enabled && cleancache_fs_enabled(page))
-               ret = __cleancache_get_page(page);
-       return ret;
+               return __cleancache_get_page(page);
+       return -1;
 }
 
 static inline void cleancache_put_page(struct page *page)
index bdcf358dfce2a9d0f5420d9fbde6066b1e2dc73b..0d442e34c34979a503030214007223742b3bc822 100644 (file)
@@ -190,9 +190,9 @@ extern void clockevents_config_and_register(struct clock_event_device *dev,
 extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
 
 static inline void
-clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec)
+clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec)
 {
-       return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec);
+       return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec);
 }
 
 extern void clockevents_suspend(void);
index d1e49d52b6407979e19e9c1fc788a53bdb48e1ae..de179993e039d41d7e9034b5744be40954f81c09 100644 (file)
@@ -10,3 +10,8 @@
 #undef uninitialized_var
 #define uninitialized_var(x) x = *(&(x))
 #endif
+
+/* same as gcc, this was present in clang-2.6 so we can assume it works
+ * with any version that can compile the kernel
+ */
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
index 00b042c49ccdac7af3262a399d33dacc88c83e25..b5acbb4048546b5ff344f42ba440a96820b36b7c 100644 (file)
 # define __pmem                __attribute__((noderef, address_space(5)))
 #ifdef CONFIG_SPARSE_RCU_POINTER
 # define __rcu         __attribute__((noderef, address_space(4)))
-#else
+#else /* CONFIG_SPARSE_RCU_POINTER */
 # define __rcu
-#endif
+#endif /* CONFIG_SPARSE_RCU_POINTER */
+# define __private     __attribute__((noderef))
 extern void __chk_user_ptr(const volatile void __user *);
 extern void __chk_io_ptr(const volatile void __iomem *);
-#else
+# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
+#else /* __CHECKER__ */
 # define __user
 # define __kernel
 # define __safe
@@ -44,7 +46,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
 # define __percpu
 # define __rcu
 # define __pmem
-#endif
+# define __private
+# define ACCESS_PRIVATE(p, member) ((p)->member)
+#endif /* __CHECKER__ */
 
 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
 #define ___PASTE(a,b) a##b
@@ -263,8 +267,9 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  * In contrast to ACCESS_ONCE these two macros will also work on aggregate
  * data types like structs or unions. If the size of the accessed data
  * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and WRITE_ONCE()  will fall back to memcpy and print a
- * compile-time warning.
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at
+ * least two memcpy()s: one for the __builtin_memcpy() and then one for
+ * the macro doing the copy of variable - '__u' allocated on the stack.
  *
  * Their two major use cases are: (1) Mediating communication between
  * process-level code and irq/NMI handlers, all running on the same CPU,
index 88a4215125bce6be0ac47ef1fda2948eead30c27..d0bf555b6bbfb2344d199eeadf338c5183f72852 100644 (file)
@@ -464,29 +464,8 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
 int cpufreq_register_governor(struct cpufreq_governor *governor);
 void cpufreq_unregister_governor(struct cpufreq_governor *governor);
 
-/* CPUFREQ DEFAULT GOVERNOR */
-/*
- * Performance governor is fallback governor if any other gov failed to auto
- * load due latency restrictions
- */
-#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
-extern struct cpufreq_governor cpufreq_gov_performance;
-#endif
-#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_performance)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE)
-extern struct cpufreq_governor cpufreq_gov_powersave;
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_powersave)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
-extern struct cpufreq_governor cpufreq_gov_userspace;
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_userspace)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
-extern struct cpufreq_governor cpufreq_gov_ondemand;
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_ondemand)
-#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
-extern struct cpufreq_governor cpufreq_gov_conservative;
-#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_conservative)
-#endif
+struct cpufreq_governor *cpufreq_default_governor(void);
+struct cpufreq_governor *cpufreq_fallback_governor(void);
 
 /*********************************************************************
  *                     FREQUENCY TABLE HELPERS                       *
index 85a868ccb4931d374a1ee9fb4e4036bb84399561..fea160ee5803fd121d0493f622e240b4c35da480 100644 (file)
@@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
        task_unlock(current);
 }
 
+extern void cpuset_post_attach_flush(void);
+
 #else /* !CONFIG_CPUSETS */
 
 static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
        return false;
 }
 
+static inline void cpuset_post_attach_flush(void)
+{
+}
+
 #endif /* !CONFIG_CPUSETS */
 
 #endif /* _LINUX_CPUSET_H */
index 48b49305716bd728e69477db6b430c34e7781746..be8f12b8f1950499380c10de27ab6928df25fd8e 100644 (file)
@@ -59,7 +59,8 @@ enum {
        CRUSH_RULE_SET_CHOOSELEAF_TRIES = 9, /* override chooseleaf_descend_once */
        CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES = 10,
        CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES = 11,
-       CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12
+       CRUSH_RULE_SET_CHOOSELEAF_VARY_R = 12,
+       CRUSH_RULE_SET_CHOOSELEAF_STABLE = 13
 };
 
 /*
@@ -205,6 +206,11 @@ struct crush_map {
         * mappings line up a bit better with previous mappings. */
        __u8 chooseleaf_vary_r;
 
+       /* if true, it makes chooseleaf firstn to return stable results (if
+        * no local retry) so that data migrations would be optimal when some
+        * device fails. */
+       __u8 chooseleaf_stable;
+
 #ifndef __KERNEL__
        /*
         * version 0 (original) of straw_calc has various flaws.  version 1
index e71cb70a1ac2f6eb1faf9023671f469574786c60..99c94899ad0fef73d8c7854c247e5cd109ebe241 100644 (file)
@@ -54,7 +54,6 @@
 #define CRYPTO_ALG_TYPE_AHASH          0x0000000a
 #define CRYPTO_ALG_TYPE_RNG            0x0000000c
 #define CRYPTO_ALG_TYPE_AKCIPHER       0x0000000d
-#define CRYPTO_ALG_TYPE_PCOMPRESS      0x0000000f
 
 #define CRYPTO_ALG_TYPE_HASH_MASK      0x0000000e
 #define CRYPTO_ALG_TYPE_AHASH_MASK     0x0000000c
@@ -137,7 +136,6 @@ struct scatterlist;
 struct crypto_ablkcipher;
 struct crypto_async_request;
 struct crypto_blkcipher;
-struct crypto_hash;
 struct crypto_tfm;
 struct crypto_type;
 struct skcipher_givcrypt_request;
@@ -187,11 +185,6 @@ struct cipher_desc {
        void *info;
 };
 
-struct hash_desc {
-       struct crypto_hash *tfm;
-       u32 flags;
-};
-
 /**
  * DOC: Block Cipher Algorithm Definitions
  *
@@ -519,18 +512,6 @@ struct cipher_tfm {
        void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
 };
 
-struct hash_tfm {
-       int (*init)(struct hash_desc *desc);
-       int (*update)(struct hash_desc *desc,
-                     struct scatterlist *sg, unsigned int nsg);
-       int (*final)(struct hash_desc *desc, u8 *out);
-       int (*digest)(struct hash_desc *desc, struct scatterlist *sg,
-                     unsigned int nsg, u8 *out);
-       int (*setkey)(struct crypto_hash *tfm, const u8 *key,
-                     unsigned int keylen);
-       unsigned int digestsize;
-};
-
 struct compress_tfm {
        int (*cot_compress)(struct crypto_tfm *tfm,
                            const u8 *src, unsigned int slen,
@@ -543,7 +524,6 @@ struct compress_tfm {
 #define crt_ablkcipher crt_u.ablkcipher
 #define crt_blkcipher  crt_u.blkcipher
 #define crt_cipher     crt_u.cipher
-#define crt_hash       crt_u.hash
 #define crt_compress   crt_u.compress
 
 struct crypto_tfm {
@@ -554,7 +534,6 @@ struct crypto_tfm {
                struct ablkcipher_tfm ablkcipher;
                struct blkcipher_tfm blkcipher;
                struct cipher_tfm cipher;
-               struct hash_tfm hash;
                struct compress_tfm compress;
        } crt_u;
 
@@ -581,10 +560,6 @@ struct crypto_comp {
        struct crypto_tfm base;
 };
 
-struct crypto_hash {
-       struct crypto_tfm base;
-};
-
 enum {
        CRYPTOA_UNSPEC,
        CRYPTOA_ALG,
@@ -1577,233 +1552,6 @@ static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
                                                dst, src);
 }
 
-/**
- * DOC: Synchronous Message Digest API
- *
- * The synchronous message digest API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_HASH (listed as type "hash" in /proc/crypto)
- */
-
-static inline struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm)
-{
-       return (struct crypto_hash *)tfm;
-}
-
-static inline struct crypto_hash *crypto_hash_cast(struct crypto_tfm *tfm)
-{
-       BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_HASH) &
-              CRYPTO_ALG_TYPE_HASH_MASK);
-       return __crypto_hash_cast(tfm);
-}
-
-/**
- * crypto_alloc_hash() - allocate synchronous message digest handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- *           message digest cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a message digest. The returned struct
- * crypto_hash is the cipher handle that is required for any subsequent
- * API invocation for that message digest.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_hash *crypto_alloc_hash(const char *alg_name,
-                                                   u32 type, u32 mask)
-{
-       type &= ~CRYPTO_ALG_TYPE_MASK;
-       mask &= ~CRYPTO_ALG_TYPE_MASK;
-       type |= CRYPTO_ALG_TYPE_HASH;
-       mask |= CRYPTO_ALG_TYPE_HASH_MASK;
-
-       return __crypto_hash_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm)
-{
-       return &tfm->base;
-}
-
-/**
- * crypto_free_hash() - zeroize and free message digest handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_hash(struct crypto_hash *tfm)
-{
-       crypto_free_tfm(crypto_hash_tfm(tfm));
-}
-
-/**
- * crypto_has_hash() - Search for the availability of a message digest
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- *           message digest cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the message digest cipher is known to the kernel crypto
- *        API; false otherwise
- */
-static inline int crypto_has_hash(const char *alg_name, u32 type, u32 mask)
-{
-       type &= ~CRYPTO_ALG_TYPE_MASK;
-       mask &= ~CRYPTO_ALG_TYPE_MASK;
-       type |= CRYPTO_ALG_TYPE_HASH;
-       mask |= CRYPTO_ALG_TYPE_HASH_MASK;
-
-       return crypto_has_alg(alg_name, type, mask);
-}
-
-static inline struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm)
-{
-       return &crypto_hash_tfm(tfm)->crt_hash;
-}
-
-/**
- * crypto_hash_blocksize() - obtain block size for message digest
- * @tfm: cipher handle
- *
- * The block size for the message digest cipher referenced with the cipher
- * handle is returned.
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_hash_blocksize(struct crypto_hash *tfm)
-{
-       return crypto_tfm_alg_blocksize(crypto_hash_tfm(tfm));
-}
-
-static inline unsigned int crypto_hash_alignmask(struct crypto_hash *tfm)
-{
-       return crypto_tfm_alg_alignmask(crypto_hash_tfm(tfm));
-}
-
-/**
- * crypto_hash_digestsize() - obtain message digest size
- * @tfm: cipher handle
- *
- * The size for the message digest created by the message digest cipher
- * referenced with the cipher handle is returned.
- *
- * Return: message digest size
- */
-static inline unsigned int crypto_hash_digestsize(struct crypto_hash *tfm)
-{
-       return crypto_hash_crt(tfm)->digestsize;
-}
-
-static inline u32 crypto_hash_get_flags(struct crypto_hash *tfm)
-{
-       return crypto_tfm_get_flags(crypto_hash_tfm(tfm));
-}
-
-static inline void crypto_hash_set_flags(struct crypto_hash *tfm, u32 flags)
-{
-       crypto_tfm_set_flags(crypto_hash_tfm(tfm), flags);
-}
-
-static inline void crypto_hash_clear_flags(struct crypto_hash *tfm, u32 flags)
-{
-       crypto_tfm_clear_flags(crypto_hash_tfm(tfm), flags);
-}
-
-/**
- * crypto_hash_init() - (re)initialize message digest handle
- * @desc: cipher request handle that to be filled by caller --
- *       desc.tfm is filled with the hash cipher handle;
- *       desc.flags is filled with either CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * The call (re-)initializes the message digest referenced by the hash cipher
- * request handle. Any potentially existing state created by previous
- * operations is discarded.
- *
- * Return: 0 if the message digest initialization was successful; < 0 if an
- *        error occurred
- */
-static inline int crypto_hash_init(struct hash_desc *desc)
-{
-       return crypto_hash_crt(desc->tfm)->init(desc);
-}
-
-/**
- * crypto_hash_update() - add data to message digest for processing
- * @desc: cipher request handle
- * @sg: scatter / gather list pointing to the data to be added to the message
- *      digest
- * @nbytes: number of bytes to be processed from @sg
- *
- * Updates the message digest state of the cipher handle pointed to by the
- * hash cipher request handle with the input data pointed to by the
- * scatter/gather list.
- *
- * Return: 0 if the message digest update was successful; < 0 if an error
- *        occurred
- */
-static inline int crypto_hash_update(struct hash_desc *desc,
-                                    struct scatterlist *sg,
-                                    unsigned int nbytes)
-{
-       return crypto_hash_crt(desc->tfm)->update(desc, sg, nbytes);
-}
-
-/**
- * crypto_hash_final() - calculate message digest
- * @desc: cipher request handle
- * @out: message digest output buffer -- The caller must ensure that the out
- *      buffer has a sufficient size (e.g. by using the crypto_hash_digestsize
- *      function).
- *
- * Finalize the message digest operation and create the message digest
- * based on all data added to the cipher handle. The message digest is placed
- * into the output buffer.
- *
- * Return: 0 if the message digest creation was successful; < 0 if an error
- *        occurred
- */
-static inline int crypto_hash_final(struct hash_desc *desc, u8 *out)
-{
-       return crypto_hash_crt(desc->tfm)->final(desc, out);
-}
-
-/**
- * crypto_hash_digest() - calculate message digest for a buffer
- * @desc: see crypto_hash_final()
- * @sg: see crypto_hash_update()
- * @nbytes:  see crypto_hash_update()
- * @out: see crypto_hash_final()
- *
- * This function is a "short-hand" for the function calls of crypto_hash_init,
- * crypto_hash_update and crypto_hash_final. The parameters have the same
- * meaning as discussed for those separate three functions.
- *
- * Return: 0 if the message digest creation was successful; < 0 if an error
- *        occurred
- */
-static inline int crypto_hash_digest(struct hash_desc *desc,
-                                    struct scatterlist *sg,
-                                    unsigned int nbytes, u8 *out)
-{
-       return crypto_hash_crt(desc->tfm)->digest(desc, sg, nbytes, out);
-}
-
-/**
- * crypto_hash_setkey() - set key for message digest
- * @hash: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the message digest cipher. The cipher
- * handle must point to a keyed hash in order for this function to succeed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_hash_setkey(struct crypto_hash *hash,
-                                    const u8 *key, unsigned int keylen)
-{
-       return crypto_hash_crt(hash)->setkey(hash, key, keylen);
-}
-
 static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
 {
        return (struct crypto_comp *)tfm;
index 8204c3dc3800f37839513d7f2c0567d6a41f87a7..818e45078929b94af5780a8c91f0e84d82da8e0c 100644 (file)
@@ -14,6 +14,17 @@ int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
                dax_iodone_t);
 int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
                dax_iodone_t);
+
+#ifdef CONFIG_FS_DAX
+struct page *read_dax_sector(struct block_device *bdev, sector_t n);
+#else
+static inline struct page *read_dax_sector(struct block_device *bdev,
+               sector_t n)
+{
+       return ERR_PTR(-ENXIO);
+}
+#endif
+
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
                                unsigned int flags, get_block_t, dax_iodone_t);
index 19c066dce1dab2e0a4dfab9f2851d6998d0f8716..981e53ab84e8e9d48948967d6a98bc90977df9cc 100644 (file)
@@ -162,6 +162,14 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
        return ERR_PTR(-ENODEV);
 }
 
+static inline struct dentry *debugfs_create_automount(const char *name,
+                                       struct dentry *parent,
+                                       struct vfsmount *(*f)(void *),
+                                       void *data)
+{
+       return ERR_PTR(-ENODEV);
+}
+
 static inline void debugfs_remove(struct dentry *dentry)
 { }
 
index ec1c61c87d8974897af2be54af23588f14746777..82ae3b5e2c457bd647dfa8894f6be7f778b07615 100644 (file)
@@ -189,6 +189,13 @@ struct target_type {
 #define DM_TARGET_IMMUTABLE            0x00000004
 #define dm_target_is_immutable(type)   ((type)->features & DM_TARGET_IMMUTABLE)
 
+/*
+ * Indicates that a target may replace any target; even immutable targets.
+ * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined.
+ */
+#define DM_TARGET_WILDCARD             0x00000008
+#define dm_target_is_wildcard(type)    ((type)->features & DM_TARGET_WILDCARD)
+
 /*
  * Some targets need to be sent the same WRITE bio severals times so
  * that they can send copies of it to different devices.  This function
@@ -231,10 +238,10 @@ struct dm_target {
        unsigned num_write_same_bios;
 
        /*
-        * The minimum number of extra bytes allocated in each bio for the
-        * target to use.  dm_per_bio_data returns the data location.
+        * The minimum number of extra bytes allocated in each io for the
+        * target to use.
         */
-       unsigned per_bio_data_size;
+       unsigned per_io_data_size;
 
        /*
         * If defined, this function is called to find out how many
index 6d6f1fec092fe5df78671f5b352931a2edae4aca..6c1a8ce77e3b391396939a7e7663c7bc21745608 100644 (file)
@@ -958,6 +958,11 @@ static inline void device_lock(struct device *dev)
        mutex_lock(&dev->mutex);
 }
 
+static inline int device_lock_interruptible(struct device *dev)
+{
+       return mutex_lock_interruptible(&dev->mutex);
+}
+
 static inline int device_trylock(struct device *dev)
 {
        return mutex_trylock(&dev->mutex);
@@ -1291,8 +1296,9 @@ do {                                                                      \
  * dev_WARN*() acts like dev_printk(), but with the key difference of
  * using WARN/WARN_ONCE to include file/line information and a backtrace.
  */
-#define dev_WARN(dev, format, arg...) \
-       WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
+#define dev_WARN(dev, condition, format, arg...)               \
+       WARN(condition, "%s %s: " format,                       \
+                       dev_driver_string(dev), dev_name(dev), ## arg)
 
 #define dev_WARN_ONCE(dev, condition, format, arg...) \
        WARN_ONCE(condition, "%s %s: " format, \
index 251a2090a55444cec55ce4f04510b6ef83a69cfb..e0ee0b3000b2da107c975137165fc989777d8a58 100644 (file)
@@ -19,6 +19,8 @@
 
 int devpts_new_index(struct inode *ptmx_inode);
 void devpts_kill_index(struct inode *ptmx_inode, int idx);
+void devpts_add_ref(struct inode *ptmx_inode);
+void devpts_del_ref(struct inode *ptmx_inode);
 /* mknod in devpts */
 struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
                void *priv);
@@ -32,6 +34,8 @@ void devpts_pty_kill(struct inode *inode);
 /* Dummy stubs in the no-pty case */
 static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
 static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
 static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
                dev_t device, int index, void *priv)
 {
index f98bd7068d55a3f4ddfdb50dc01d95909ab30f45..532108ea0c1c046dbc264af046137efc3a0d7b81 100644 (file)
@@ -54,7 +54,7 @@ struct dma_buf_attachment;
  * @release: release this buffer; to be called after the last dma_buf_put.
  * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
  *                   caches and allocate backing storage (if not yet done)
- *                   respectively pin the objet into memory.
+ *                   respectively pin the object into memory.
  * @end_cpu_access: [optional] called after cpu access to flush caches.
  * @kmap_atomic: maps a page from the buffer into kernel address
  *              space, users may not block until the subsequent unmap call.
@@ -93,10 +93,8 @@ struct dma_buf_ops {
        /* after final dma_buf_put() */
        void (*release)(struct dma_buf *);
 
-       int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
-                               enum dma_data_direction);
-       void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
-                              enum dma_data_direction);
+       int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
+       void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
        void *(*kmap_atomic)(struct dma_buf *, unsigned long);
        void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
        void *(*kmap)(struct dma_buf *, unsigned long);
@@ -224,9 +222,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
                                        enum dma_data_direction);
 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
                                enum dma_data_direction);
-int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
                             enum dma_data_direction dir);
-void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
                            enum dma_data_direction dir);
 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
index 16a1cad30c337369e62efab4a808397853d7b26a..0a9a0ba1998b01b97304322151cd24a42e2f8e57 100644 (file)
@@ -401,6 +401,7 @@ enum dma_residue_granularity {
  *     since the enum dma_transfer_direction is not defined as bits for each
  *     type of direction, the dma controller should fill (1 << <TYPE>) and same
  *     should be checked by controller as well
+ * @max_burst: max burst capability per-transfer
  * @cmd_pause: true, if pause and thereby resume is supported
  * @cmd_terminate: true, if terminate cmd is supported
  * @residue_granularity: granularity of the reported transfer residue
@@ -411,6 +412,7 @@ struct dma_slave_caps {
        u32 src_addr_widths;
        u32 dst_addr_widths;
        u32 directions;
+       u32 max_burst;
        bool cmd_pause;
        bool cmd_terminate;
        enum dma_residue_granularity residue_granularity;
@@ -654,6 +656,7 @@ struct dma_filter {
  *     the enum dma_transfer_direction is not defined as bits for
  *     each type of direction, the dma controller should fill (1 <<
  *     <TYPE>) and same should be checked by controller as well
+ * @max_burst: max burst capability per-transfer
  * @residue_granularity: granularity of the transfer residue reported
  *     by tx_status
  * @device_alloc_chan_resources: allocate resources and return the
@@ -712,6 +715,7 @@ struct dma_device {
        u32 src_addr_widths;
        u32 dst_addr_widths;
        u32 directions;
+       u32 max_burst;
        bool descriptor_reuse;
        enum dma_residue_granularity residue_granularity;
 
index ff8b55359648c9cbb91d91cf2c123a5911b806b9..0de21e9359760d5b19640b5a532398f8a800b7fa 100644 (file)
@@ -15,6 +15,7 @@
 #define QTREE_DEL_REWRITE 6
 
 struct dquot;
+struct kqid;
 
 /* Operations */
 struct qtree_fmt_operations {
@@ -52,5 +53,6 @@ static inline int qtree_depth(struct qtree_mem_dqinfo *info)
                entries *= epb;
        return i;
 }
+int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid);
 
 #endif /* _LINUX_DQBLK_QTREE_H */
index 569b5a866bb1e6308bbc4c0a28a2da92d106d6b5..3c6cbbdae4aaf698e781277496de56999df74952 100644 (file)
@@ -97,6 +97,7 @@ typedef       struct {
 #define EFI_MEMORY_WP          ((u64)0x0000000000001000ULL)    /* write-protect */
 #define EFI_MEMORY_RP          ((u64)0x0000000000002000ULL)    /* read-protect */
 #define EFI_MEMORY_XP          ((u64)0x0000000000004000ULL)    /* execute-protect */
+#define EFI_MEMORY_NV          ((u64)0x0000000000008000ULL)    /* non-volatile */
 #define EFI_MEMORY_MORE_RELIABLE \
                                ((u64)0x0000000000010000ULL)    /* higher reliability */
 #define EFI_MEMORY_RO          ((u64)0x0000000000020000ULL)    /* read-only */
@@ -507,10 +508,6 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char
 typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, 
                                         u32 attr, unsigned long data_size,
                                         void *data);
-typedef efi_status_t
-efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
-                              u32 attr, unsigned long data_size, void *data);
-
 typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
 typedef void efi_reset_system_t (int reset_type, efi_status_t status,
                                 unsigned long data_size, efi_char16_t *data);
@@ -529,7 +526,9 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules,
                                              unsigned long count,
                                              u64 *max_size,
                                              int *reset_type);
-typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size);
+typedef efi_status_t efi_query_variable_store_t(u32 attributes,
+                                               unsigned long size,
+                                               bool nonblocking);
 
 void efi_native_runtime_setup(void);
 
@@ -851,8 +850,9 @@ extern struct efi {
        efi_get_variable_t *get_variable;
        efi_get_next_variable_t *get_next_variable;
        efi_set_variable_t *set_variable;
-       efi_set_variable_nonblocking_t *set_variable_nonblocking;
+       efi_set_variable_t *set_variable_nonblocking;
        efi_query_variable_info_t *query_variable_info;
+       efi_query_variable_info_t *query_variable_info_nonblocking;
        efi_update_capsule_t *update_capsule;
        efi_query_capsule_caps_t *query_capsule_caps;
        efi_get_next_high_mono_count_t *get_next_high_mono_count;
@@ -884,13 +884,17 @@ extern void efi_enter_virtual_mode (void);        /* switch EFI to virtual mode, if pos
 #ifdef CONFIG_X86
 extern void efi_late_init(void);
 extern void efi_free_boot_services(void);
-extern efi_status_t efi_query_variable_store(u32 attributes, unsigned long size);
+extern efi_status_t efi_query_variable_store(u32 attributes,
+                                            unsigned long size,
+                                            bool nonblocking);
 extern void efi_find_mirror(void);
 #else
 static inline void efi_late_init(void) {}
 static inline void efi_free_boot_services(void) {}
 
-static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
+static inline efi_status_t efi_query_variable_store(u32 attributes,
+                                                   unsigned long size,
+                                                   bool nonblocking)
 {
        return EFI_SUCCESS;
 }
@@ -1091,7 +1095,7 @@ struct efivar_operations {
        efi_get_variable_t *get_variable;
        efi_get_next_variable_t *get_next_variable;
        efi_set_variable_t *set_variable;
-       efi_set_variable_nonblocking_t *set_variable_nonblocking;
+       efi_set_variable_t *set_variable_nonblocking;
        efi_query_variable_store_t *query_variable_store;
 };
 
index e59c3be921069635c90da255087b12978bf70c9a..f43e6a01a0236ed30674f778d48c6c78a8c2e938 100644 (file)
@@ -21,7 +21,7 @@
 #define F2FS_BLKSIZE                   4096    /* support only 4KB block */
 #define F2FS_BLKSIZE_BITS              12      /* bits for F2FS_BLKSIZE */
 #define F2FS_MAX_EXTENSION             64      /* # of extension entries */
-#define F2FS_BLK_ALIGN(x)      (((x) + F2FS_BLKSIZE - 1) / F2FS_BLKSIZE)
+#define F2FS_BLK_ALIGN(x)      (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
 
 #define NULL_ADDR              ((block_t)0)    /* used as block_t addresses */
 #define NEW_ADDR               ((block_t)-1)   /* used as block_t addresses */
@@ -170,12 +170,12 @@ struct f2fs_extent {
 #define F2FS_INLINE_XATTR_ADDRS        50      /* 200 bytes for inline xattrs */
 #define DEF_ADDRS_PER_INODE    923     /* Address Pointers in an Inode */
 #define DEF_NIDS_PER_INODE     5       /* Node IDs in an Inode */
-#define ADDRS_PER_INODE(fi)    addrs_per_inode(fi)
+#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
 #define ADDRS_PER_BLOCK                1018    /* Address Pointers in a Direct Block */
 #define NIDS_PER_BLOCK         1018    /* Node IDs in an Indirect Block */
 
-#define ADDRS_PER_PAGE(page, fi)       \
-       (IS_INODE(page) ? ADDRS_PER_INODE(fi) : ADDRS_PER_BLOCK)
+#define ADDRS_PER_PAGE(page, inode)    \
+       (IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK)
 
 #define        NODE_DIR1_BLOCK         (DEF_ADDRS_PER_INODE + 1)
 #define        NODE_DIR2_BLOCK         (DEF_ADDRS_PER_INODE + 2)
@@ -345,7 +345,7 @@ struct f2fs_summary {
 
 struct summary_footer {
        unsigned char entry_type;       /* SUM_TYPE_XXX */
-       __u32 check_sum;                /* summary checksum */
+       __le32 check_sum;               /* summary checksum */
 } __packed;
 
 #define SUM_JOURNAL_SIZE       (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
@@ -358,6 +358,12 @@ struct summary_footer {
                                sizeof(struct sit_journal_entry))
 #define SIT_JOURNAL_RESERVED   ((SUM_JOURNAL_SIZE - 2) %\
                                sizeof(struct sit_journal_entry))
+
+/* Reserved area should make size of f2fs_extra_info equals to
+ * that of nat_journal and sit_journal.
+ */
+#define EXTRA_INFO_RESERVED    (SUM_JOURNAL_SIZE - 2 - 8)
+
 /*
  * frequently updated NAT/SIT entries can be stored in the spare area in
  * summary blocks
@@ -387,6 +393,11 @@ struct sit_journal {
        __u8 reserved[SIT_JOURNAL_RESERVED];
 } __packed;
 
+struct f2fs_extra_info {
+       __le64 kbytes_written;
+       __u8 reserved[EXTRA_INFO_RESERVED];
+} __packed;
+
 /* 4KB-sized summary block structure */
 struct f2fs_summary_block {
        struct f2fs_summary entries[ENTRIES_IN_SUM];
@@ -394,10 +405,11 @@ struct f2fs_summary_block {
                __le16 n_nats;
                __le16 n_sits;
        };
-       /* spare area is used by NAT or SIT journals */
+       /* spare area is used by NAT or SIT journals or extra info */
        union {
                struct nat_journal nat_j;
                struct sit_journal sit_j;
+               struct f2fs_extra_info info;
        };
        struct summary_footer footer;
 } __packed;
index 1a2046275cdf0353e68a9cf38c5fe8762a62fdc2..a401dc8ad85d3174c1c0ca992f55b7b30d6e3358 100644 (file)
@@ -70,7 +70,7 @@ extern int sysctl_protected_hardlinks;
 struct buffer_head;
 typedef int (get_block_t)(struct inode *inode, sector_t iblock,
                        struct buffer_head *bh_result, int create);
-typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
+typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
                        ssize_t bytes, void *private);
 typedef void (dax_iodone_t)(struct buffer_head *bh_map, int uptodate);
 
@@ -484,9 +484,6 @@ struct block_device {
        int                     bd_fsfreeze_count;
        /* Mutex for freeze */
        struct mutex            bd_fsfreeze_mutex;
-#ifdef CONFIG_FS_DAX
-       int                     bd_map_count;
-#endif
 };
 
 /*
@@ -2907,7 +2904,7 @@ extern void replace_mount_options(struct super_block *sb, char *options);
 
 static inline bool io_is_direct(struct file *filp)
 {
-       return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
+       return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
 }
 
 static inline int iocb_flags(struct file *file)
index 0639dcc9819523a002de1149da5629d24bce039b..81de7123959d96d97dd57d41d76289b3a8185920 100644 (file)
@@ -165,7 +165,6 @@ struct ftrace_ops {
        ftrace_func_t                   saved_func;
        int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
-       int                             nr_trampolines;
        struct ftrace_ops_hash          local_hash;
        struct ftrace_ops_hash          *func_hash;
        struct ftrace_ops_hash          old_hash;
index 28ad5f6494b018a2c49493920059de62fcacdb8e..af1f2b24bbe4172f6055407229f0670ee90dc534 100644 (file)
@@ -547,16 +547,16 @@ static inline bool pm_suspended_storage(void)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-#ifdef CONFIG_CMA
-
+#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 /* The below functions must be run on a range from a single zone. */
 extern int alloc_contig_range(unsigned long start, unsigned long end,
                              unsigned migratetype);
 extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+#endif
 
+#ifdef CONFIG_CMA
 /* CMA stuff */
 extern void init_cma_reserved_pageblock(struct page *page);
-
 #endif
 
 #endif /* __LINUX_GFP_H */
index 76dd4f0da5ca9907572bea19f4b0d0dbe8ad06eb..2f7bf185aeb21168f23ece47c7e39a2cf3fcbb64 100644 (file)
@@ -87,7 +87,8 @@ enum hrtimer_restart {
  * @function:  timer expiry callback function
  * @base:      pointer to the timer base (per cpu and per clock)
  * @state:     state information (See bit values above)
- * @start_pid: timer statistics field to store the pid of the task which
+ * @is_rel:    Set if the timer was armed relative
+ * @start_pid:  timer statistics field to store the pid of the task which
  *             started the timer
  * @start_site:        timer statistics field to store the site where the timer
  *             was started
@@ -101,7 +102,8 @@ struct hrtimer {
        ktime_t                         _softexpires;
        enum hrtimer_restart            (*function)(struct hrtimer *);
        struct hrtimer_clock_base       *base;
-       unsigned long                   state;
+       u8                              state;
+       u8                              is_rel;
 #ifdef CONFIG_TIMER_STATS
        int                             start_pid;
        void                            *start_site;
@@ -151,6 +153,7 @@ enum  hrtimer_base_type {
        HRTIMER_BASE_REALTIME,
        HRTIMER_BASE_BOOTTIME,
        HRTIMER_BASE_TAI,
+       HRTIMER_BASE_MONOTONIC_RAW,
        HRTIMER_MAX_CLOCK_BASES,
 };
 
@@ -321,6 +324,27 @@ static inline void clock_was_set_delayed(void) { }
 
 #endif
 
+static inline ktime_t
+__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
+{
+       ktime_t rem = ktime_sub(timer->node.expires, now);
+
+       /*
+        * Adjust relative timers for the extra we added in
+        * hrtimer_start_range_ns() to prevent short timeouts.
+        */
+       if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
+               rem.tv64 -= hrtimer_resolution;
+       return rem;
+}
+
+static inline ktime_t
+hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
+{
+       return __hrtimer_expires_remaining_adjusted(timer,
+                                                   timer->base->get_time());
+}
+
 extern void clock_was_set(void);
 #ifdef CONFIG_TIMERFD
 extern void timerfd_clock_was_set(void);
@@ -390,7 +414,12 @@ static inline void hrtimer_restart(struct hrtimer *timer)
 }
 
 /* Query timers: */
-extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+
+static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+{
+       return __hrtimer_get_remaining(timer, false);
+}
 
 extern u64 hrtimer_get_next_event(void);
 
index 452c0b0d2f3219dc0f11bec80878e5ea0b8e8803..3b1f6cef95136fa381d06eb5ccb409dde3e030a0 100644 (file)
@@ -163,6 +163,14 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
 /* 30 byte 4 addr hdr, 2 byte QoS, 2304 byte MSDU, 12 byte crypt, 4 byte FCS */
 #define IEEE80211_MAX_FRAME_LEN                2352
 
+/* Maximal size of an A-MSDU */
+#define IEEE80211_MAX_MPDU_LEN_HT_3839         3839
+#define IEEE80211_MAX_MPDU_LEN_HT_7935         7935
+
+#define IEEE80211_MAX_MPDU_LEN_VHT_3895                3895
+#define IEEE80211_MAX_MPDU_LEN_VHT_7991                7991
+#define IEEE80211_MAX_MPDU_LEN_VHT_11454       11454
+
 #define IEEE80211_MAX_SSID_LEN         32
 
 #define IEEE80211_MAX_MESH_ID_LEN      32
@@ -843,6 +851,8 @@ enum ieee80211_vht_opmode_bits {
 };
 
 #define WLAN_SA_QUERY_TR_ID_LEN 2
+#define WLAN_MEMBERSHIP_LEN 8
+#define WLAN_USER_POSITION_LEN 16
 
 /**
  * struct ieee80211_tpc_report_ie
@@ -989,6 +999,11 @@ struct ieee80211_mgmt {
                                        u8 action_code;
                                        u8 operating_mode;
                                } __packed vht_opmode_notif;
+                               struct {
+                                       u8 action_code;
+                                       u8 membership[WLAN_MEMBERSHIP_LEN];
+                                       u8 position[WLAN_USER_POSITION_LEN];
+                               } __packed vht_group_notif;
                                struct {
                                        u8 action_code;
                                        u8 dialog_token;
@@ -1498,6 +1513,7 @@ struct ieee80211_vht_operation {
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895                 0x00000000
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991                 0x00000001
 #define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454                        0x00000002
+#define IEEE80211_VHT_CAP_MAX_MPDU_MASK                                0x00000003
 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ               0x00000004
 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ      0x00000008
 #define IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK                 0x0000000C
@@ -2079,6 +2095,16 @@ enum ieee80211_tdls_actioncode {
 #define WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED    BIT(5)
 #define WLAN_EXT_CAPA8_OPMODE_NOTIF    BIT(6)
 
+/* Defines the maximal number of MSDUs in an A-MSDU. */
+#define WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB   BIT(7)
+#define WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB   BIT(0)
+
+/*
+ * Fine Timing Measurement Initiator - bit 71 of @WLAN_EID_EXT_CAPABILITY
+ * information element
+ */
+#define WLAN_EXT_CAPA9_FTM_INITIATOR   BIT(7)
+
 /* TDLS specific payload type in the LLC/SNAP header */
 #define WLAN_TDLS_SNAP_RFTYPE  0x2
 
index b84e49c3a738fc4c2e726c31d78b85296e68a5b5..174f43f43affc1dcfd7a4a67a0efcf06547607fb 100644 (file)
@@ -24,6 +24,7 @@ struct team_pcpu_stats {
        struct u64_stats_sync   syncp;
        u32                     rx_dropped;
        u32                     tx_dropped;
+       u32                     rx_nohandler;
 };
 
 struct team;
index 5af7c66f1fcaccfde3c7bfcb80220ec865de6f2b..586c8c95dcb096d93959b1368d0846383045188c 100644 (file)
 /* Active distance in pixels for a gesture to be reported */
 #define CY_ACT_DIST_DFLT 0xF8 /* pixels */
 
-struct cyttsp_platform_data {
-       u32 maxx;
-       u32 maxy;
-       bool use_hndshk;
-       u8 act_dist;    /* Active distance */
-       u8 act_intrvl;  /* Active refresh interval; ms */
-       u8 tch_tmout;   /* Active touch timeout; ms */
-       u8 lp_intrvl;   /* Low power refresh interval; ms */
-       int (*init)(void);
-       void (*exit)(void);
-       char *name;
-       s16 irq_gpio;
-       u8 *bl_keys;
-};
-
 #endif /* _CYTTSP_H_ */
index f28dff313b07c3bb072dffc2235b27d5d0eaa09a..a5c539fa5d2bc03ba233f4d11de1b64d839990ab 100644 (file)
@@ -133,8 +133,9 @@ struct iommu_dm_region {
 
 /**
  * struct iommu_ops - iommu ops and capabilities
- * @domain_init: init iommu domain
- * @domain_destroy: destroy iommu domain
+ * @capable: check capability
+ * @domain_alloc: allocate iommu domain
+ * @domain_free: free iommu domain
  * @attach_dev: attach device to an iommu domain
  * @detach_dev: detach device from an iommu domain
  * @map: map a physically contiguous memory region to an iommu domain
@@ -144,8 +145,15 @@ struct iommu_dm_region {
  * @iova_to_phys: translate iova to physical address
  * @add_device: add device to iommu grouping
  * @remove_device: remove device from iommu grouping
+ * @device_group: find iommu group for a particular device
  * @domain_get_attr: Query domain attributes
  * @domain_set_attr: Change domain attributes
+ * @get_dm_regions: Request list of direct mapping requirements for a device
+ * @put_dm_regions: Free list of direct mapping requirements for a device
+ * @domain_window_enable: Configure and enable a particular window for a domain
+ * @domain_window_disable: Disable a particular window for a domain
+ * @domain_set_windows: Set the number of windows for a domain
+ * @domain_get_windows: Return the number of windows for a domain
  * @of_xlate: add OF master IDs to iommu grouping
  * @pgsize_bitmap: bitmap of supported page sizes
  * @priv: per-instance data private to the iommu driver
@@ -182,9 +190,9 @@ struct iommu_ops {
        int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
                                    phys_addr_t paddr, u64 size, int prot);
        void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
-       /* Set the numer of window per domain */
+       /* Set the number of windows per domain */
        int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
-       /* Get the numer of window per domain */
+       /* Get the number of windows per domain */
        u32 (*domain_get_windows)(struct iommu_domain *domain);
 
 #ifdef CONFIG_OF_IOMMU
index 24bea087e7af8083b3c81596a289d70530c64285..afb45597fb5f0d3aa19f6bb9fd0818995ae6623e 100644 (file)
@@ -20,6 +20,7 @@ struct resource {
        resource_size_t end;
        const char *name;
        unsigned long flags;
+       unsigned long desc;
        struct resource *parent, *sibling, *child;
 };
 
@@ -49,12 +50,19 @@ struct resource {
 #define IORESOURCE_WINDOW      0x00200000      /* forwarded by bridge */
 #define IORESOURCE_MUXED       0x00400000      /* Resource is software muxed */
 
+#define IORESOURCE_EXT_TYPE_BITS 0x01000000    /* Resource extended types */
+#define IORESOURCE_SYSRAM      0x01000000      /* System RAM (modifier) */
+
 #define IORESOURCE_EXCLUSIVE   0x08000000      /* Userland may not map this resource */
+
 #define IORESOURCE_DISABLED    0x10000000
 #define IORESOURCE_UNSET       0x20000000      /* No address assigned yet */
 #define IORESOURCE_AUTO                0x40000000
 #define IORESOURCE_BUSY                0x80000000      /* Driver has marked this resource busy */
 
+/* I/O resource extended types */
+#define IORESOURCE_SYSTEM_RAM          (IORESOURCE_MEM|IORESOURCE_SYSRAM)
+
 /* PnP IRQ specific bits (IORESOURCE_BITS) */
 #define IORESOURCE_IRQ_HIGHEDGE                (1<<0)
 #define IORESOURCE_IRQ_LOWEDGE         (1<<1)
@@ -105,6 +113,22 @@ struct resource {
 /* PCI control bits.  Shares IORESOURCE_BITS with above PCI ROM.  */
 #define IORESOURCE_PCI_FIXED           (1<<4)  /* Do not move resource */
 
+/*
+ * I/O Resource Descriptors
+ *
+ * Descriptors are used by walk_iomem_res_desc() and region_intersects()
+ * for searching a specific resource range in the iomem table.  Assign
+ * a new descriptor when a resource range supports the search interfaces.
+ * Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
+ */
+enum {
+       IORES_DESC_NONE                         = 0,
+       IORES_DESC_CRASH_KERNEL                 = 1,
+       IORES_DESC_ACPI_TABLES                  = 2,
+       IORES_DESC_ACPI_NV_STORAGE              = 3,
+       IORES_DESC_PERSISTENT_MEMORY            = 4,
+       IORES_DESC_PERSISTENT_MEMORY_LEGACY     = 5,
+};
 
 /* helpers to define resources */
 #define DEFINE_RES_NAMED(_start, _size, _name, _flags)                 \
@@ -113,6 +137,7 @@ struct resource {
                .end = (_start) + (_size) - 1,                          \
                .name = (_name),                                        \
                .flags = (_flags),                                      \
+               .desc = IORES_DESC_NONE,                                \
        }
 
 #define DEFINE_RES_IO_NAMED(_start, _size, _name)                      \
@@ -170,6 +195,10 @@ static inline unsigned long resource_type(const struct resource *res)
 {
        return res->flags & IORESOURCE_TYPE_BITS;
 }
+static inline unsigned long resource_ext_type(const struct resource *res)
+{
+       return res->flags & IORESOURCE_EXT_TYPE_BITS;
+}
 /* True iff r1 completely contains r2 */
 static inline bool resource_contains(struct resource *r1, struct resource *r2)
 {
@@ -239,8 +268,8 @@ extern int
 walk_system_ram_res(u64 start, u64 end, void *arg,
                    int (*func)(u64, u64, void *));
 extern int
-walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end, void *arg,
-              int (*func)(u64, u64, void *));
+walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
+                   void *arg, int (*func)(u64, u64, void *));
 
 /* True if any part of r1 overlaps r2 */
 static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
index 3c1c96786248cb02d2d9792cce455892323ef275..cd14cd4a22b4379a37b408ca6a51b8b402838045 100644 (file)
@@ -137,7 +137,7 @@ struct irq_domain;
  * @msi_desc:          MSI descriptor
  */
 struct irq_common_data {
-       unsigned int            state_use_accessors;
+       unsigned int            __private state_use_accessors;
 #ifdef CONFIG_NUMA
        unsigned int            node;
 #endif
@@ -208,7 +208,7 @@ enum {
        IRQD_FORWARDED_TO_VCPU          = (1 << 20),
 };
 
-#define __irqd_to_state(d)             ((d)->common->state_use_accessors)
+#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
 
 static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
 {
@@ -299,6 +299,8 @@ static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d)
        __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU;
 }
 
+#undef __irqd_to_state
+
 static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
 {
        return d->hwirq;
index f64622ad02c196185be2eb79269e782d5772fcd0..04579d9fbce4f3b5c0a8484116500ee3034b93f7 100644 (file)
@@ -70,6 +70,7 @@ struct irq_fwspec {
  */
 enum irq_domain_bus_token {
        DOMAIN_BUS_ANY          = 0,
+       DOMAIN_BUS_WIRED,
        DOMAIN_BUS_PCI_MSI,
        DOMAIN_BUS_PLATFORM_MSI,
        DOMAIN_BUS_NEXUS,
index 1e9a0f2a86260b31619fbdcd7fe9aea66a1e1435..df97c8444f5d71d13999f28b2a26731ef8224822 100644 (file)
@@ -319,6 +319,7 @@ typedef struct modem_info {
   int                   online;          /* 1 = B-Channel is up, drop data */
                                         /* 2 = B-Channel is up, deliver d.*/
   int                   dialing;         /* Dial in progress or ATA        */
+  int                   closing;
   int                   rcvsched;        /* Receive needs schedule         */
   int                   isdn_driver;    /* Index to isdn-driver           */
   int                   isdn_channel;    /* Index to isdn-channel          */
index e23121f9d82a042a9b10907fe40e468e5711f99f..59ccab297ae061ba03494ccede8eae2d7a5236a1 100644 (file)
@@ -37,6 +37,9 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
 
 void clear_all_latency_tracing(struct task_struct *p);
 
+extern int sysctl_latencytop(struct ctl_table *table, int write,
+                       void __user *buffer, size_t *lenp, loff_t *ppos);
+
 #else
 
 static inline void
index bc1476fda96eb3206995b270e32cb54a49305eea..f203a8f89d30d0f3e214b43d2c72efea1bdad9f9 100644 (file)
@@ -39,6 +39,7 @@ struct led_classdev {
 
        /* Lower 16 bits reflect status */
 #define LED_SUSPENDED          (1 << 0)
+#define LED_UNREGISTERING      (1 << 1)
        /* Upper 16 bits reflect control information */
 #define LED_CORE_SUSPENDRESUME (1 << 16)
 #define LED_BLINK_ONESHOT      (1 << 17)
@@ -48,9 +49,12 @@ struct led_classdev {
 #define LED_BLINK_DISABLE      (1 << 21)
 #define LED_SYSFS_DISABLE      (1 << 22)
 #define LED_DEV_CAP_FLASH      (1 << 23)
+#define LED_HW_PLUGGABLE       (1 << 24)
 
-       /* Set LED brightness level */
-       /* Must not sleep, use a workqueue if needed */
+       /* Set LED brightness level
+        * Must not sleep. Use brightness_set_blocking for drivers
+        * that can sleep while setting brightness.
+        */
        void            (*brightness_set)(struct led_classdev *led_cdev,
                                          enum led_brightness brightness);
        /*
index 851821bfd55321dce527f4b32e03d1534994678a..bec2abbd7ab28485cbf32bfefa7430b6a47c81e4 100644 (file)
@@ -526,6 +526,7 @@ enum ata_lpm_policy {
 enum ata_lpm_hints {
        ATA_LPM_EMPTY           = (1 << 0), /* port empty/probing */
        ATA_LPM_HIPM            = (1 << 1), /* may use HIPM */
+       ATA_LPM_WAKE_ONLY       = (1 << 2), /* only wake up link */
 };
 
 /* forward declarations */
index c57e424d914b70fc5032f9b6d1ec918e8e195c64..d026b190c53066d25753ce98f0d7c66d864a6c0a 100644 (file)
@@ -66,7 +66,7 @@ struct lock_class {
        /*
         * class-hash:
         */
-       struct list_head                hash_entry;
+       struct hlist_node               hash_entry;
 
        /*
         * global list of all lock-classes:
@@ -199,7 +199,7 @@ struct lock_chain {
        u8                              irq_context;
        u8                              depth;
        u16                             base;
-       struct list_head                entry;
+       struct hlist_node               entry;
        u64                             chain_key;
 };
 
@@ -261,7 +261,6 @@ struct held_lock {
 /*
  * Initialization, self-test and debugging-output methods:
  */
-extern void lockdep_init(void);
 extern void lockdep_info(void);
 extern void lockdep_reset(void);
 extern void lockdep_reset_lock(struct lockdep_map *lock);
@@ -392,7 +391,6 @@ static inline void lockdep_on(void)
 # define lockdep_set_current_reclaim_state(g)  do { } while (0)
 # define lockdep_clear_current_reclaim_state() do { } while (0)
 # define lockdep_trace_alloc(g)                        do { } while (0)
-# define lockdep_init()                                do { } while (0)
 # define lockdep_info()                                do { } while (0)
 # define lockdep_init_map(lock, name, key, sub) \
                do { (void)(name); (void)(key); } while (0)
index 9ae48d4aeb5ec7d6fafa9528c79f65f62a091ed3..792c8981e63365b06a03e7feda7e647eafb533c0 100644 (file)
@@ -51,7 +51,7 @@ enum mem_cgroup_stat_index {
        MEM_CGROUP_STAT_SWAP,           /* # of pages, swapped out */
        MEM_CGROUP_STAT_NSTATS,
        /* default hierarchy stats */
-       MEMCG_SOCK,
+       MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
        MEMCG_NR_STAT,
 };
 
index bc6f7e00fb3de33438759c6c88184b7385dc86b7..1a5a87f3cd38e5733f8c9a518d7389b889fc56bd 100644 (file)
 
 #include <linux/platform_device.h>
 
+#define MFD_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define MFD_CELL_ALL(_name, _res, _pdata, _id, _compat, _match)                \
+       {                                                               \
+               .name = (_name),                                        \
+               .resources = (_res),                                    \
+               .num_resources = MFD_ARRAY_SIZE((_res)),                \
+               .platform_data = (_pdata),                              \
+               .pdata_size = MFD_ARRAY_SIZE((_pdata)),                 \
+               .of_compatible = (_compat),                             \
+               .acpi_match = (_match),                                 \
+               .id = _id,                                              \
+       }
+
+#define OF_MFD_CELL(_name, _res, _pdata, _id, _compat)                 \
+               MFD_CELL_ALL(_name, _res, _pdata, _id, _compat, NULL)   \
+
+#define ACPI_MFD_CELL(_name, _res, _pdata, _id, _match)                        \
+               MFD_CELL_ALL(_name, _res, _pdata, _id, NULL, _match)    \
+
+#define MFD_CELL_BASIC(_name, _res, _pdata, _id)                       \
+               MFD_CELL_ALL(_name, _res, _pdata, _id, NULL, NULL)      \
+
+#define MFD_CELL_NAME(_name)                                           \
+               MFD_CELL_ALL(_name, NULL, NULL, 0, NULL, NULL)          \
+
 struct irq_domain;
 struct property_set;
 
index 24b86d538e8852eef94cc020a7f92230dcfacedd..05d58ee5e6a78fb4d8366b2391d84a2f4838aa82 100644 (file)
  * Some controllers can support SDIO IRQ signalling.
  */
 #define TMIO_MMC_SDIO_IRQ              (1 << 2)
+
+/* Some controllers don't need to wait 10ms for clock changes */
+#define TMIO_MMC_FAST_CLK_CHG          (1 << 3)
+
 /*
  * Some controllers require waiting for the SD bus to become
  * idle before writing to some registers.
diff --git a/include/linux/mfd/tps65086.h b/include/linux/mfd/tps65086.h
new file mode 100644 (file)
index 0000000..a228ae4
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ *     Andrew F. Davis <afd@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether expressed or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License version 2 for more details.
+ *
+ * Based on the TPS65912 driver
+ */
+
+#ifndef __LINUX_MFD_TPS65086_H
+#define __LINUX_MFD_TPS65086_H
+
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+/* List of registers for TPS65086 */
+#define TPS65086_DEVICEID              0x01
+#define TPS65086_IRQ                   0x02
+#define TPS65086_IRQ_MASK              0x03
+#define TPS65086_PMICSTAT              0x04
+#define TPS65086_SHUTDNSRC             0x05
+#define TPS65086_BUCK1CTRL             0x20
+#define TPS65086_BUCK2CTRL             0x21
+#define TPS65086_BUCK3DECAY            0x22
+#define TPS65086_BUCK3VID              0x23
+#define TPS65086_BUCK3SLPCTRL          0x24
+#define TPS65086_BUCK4CTRL             0x25
+#define TPS65086_BUCK5CTRL             0x26
+#define TPS65086_BUCK6CTRL             0x27
+#define TPS65086_LDOA2CTRL             0x28
+#define TPS65086_LDOA3CTRL             0x29
+#define TPS65086_DISCHCTRL1            0x40
+#define TPS65086_DISCHCTRL2            0x41
+#define TPS65086_DISCHCTRL3            0x42
+#define TPS65086_PG_DELAY1             0x43
+#define TPS65086_FORCESHUTDN           0x91
+#define TPS65086_BUCK1SLPCTRL          0x92
+#define TPS65086_BUCK2SLPCTRL          0x93
+#define TPS65086_BUCK4VID              0x94
+#define TPS65086_BUCK4SLPVID           0x95
+#define TPS65086_BUCK5VID              0x96
+#define TPS65086_BUCK5SLPVID           0x97
+#define TPS65086_BUCK6VID              0x98
+#define TPS65086_BUCK6SLPVID           0x99
+#define TPS65086_LDOA2VID              0x9A
+#define TPS65086_LDOA3VID              0x9B
+#define TPS65086_BUCK123CTRL           0x9C
+#define TPS65086_PG_DELAY2             0x9D
+#define TPS65086_PIN_EN_MASK1          0x9E
+#define TPS65086_PIN_EN_MASK2          0x9F
+#define TPS65086_SWVTT_EN              0x9F
+#define TPS65086_PIN_EN_OVR1           0xA0
+#define TPS65086_PIN_EN_OVR2           0xA1
+#define TPS65086_GPOCTRL               0xA1
+#define TPS65086_PWR_FAULT_MASK1       0xA2
+#define TPS65086_PWR_FAULT_MASK2       0xA3
+#define TPS65086_GPO1PG_CTRL1          0xA4
+#define TPS65086_GPO1PG_CTRL2          0xA5
+#define TPS65086_GPO4PG_CTRL1          0xA6
+#define TPS65086_GPO4PG_CTRL2          0xA7
+#define TPS65086_GPO2PG_CTRL1          0xA8
+#define TPS65086_GPO2PG_CTRL2          0xA9
+#define TPS65086_GPO3PG_CTRL1          0xAA
+#define TPS65086_GPO3PG_CTRL2          0xAB
+#define TPS65086_LDOA1CTRL             0xAE
+#define TPS65086_PG_STATUS1            0xB0
+#define TPS65086_PG_STATUS2            0xB1
+#define TPS65086_PWR_FAULT_STATUS1     0xB2
+#define TPS65086_PWR_FAULT_STATUS2     0xB3
+#define TPS65086_TEMPCRIT              0xB4
+#define TPS65086_TEMPHOT               0xB5
+#define TPS65086_OC_STATUS             0xB6
+
+/* IRQ Register field definitions */
+#define TPS65086_IRQ_DIETEMP_MASK      BIT(0)
+#define TPS65086_IRQ_SHUTDN_MASK       BIT(3)
+#define TPS65086_IRQ_FAULT_MASK                BIT(7)
+
+/* DEVICEID Register field definitions */
+#define TPS65086_DEVICEID_PART_MASK    GENMASK(3, 0)
+#define TPS65086_DEVICEID_OTP_MASK     GENMASK(5, 4)
+#define TPS65086_DEVICEID_REV_MASK     GENMASK(7, 6)
+
+/* VID Masks */
+#define BUCK_VID_MASK                  GENMASK(7, 1)
+#define VDOA1_VID_MASK                 GENMASK(4, 1)
+#define VDOA23_VID_MASK                        GENMASK(3, 0)
+
+/* Define the TPS65086 IRQ numbers */
+enum tps65086_irqs {
+       TPS65086_IRQ_DIETEMP,
+       TPS65086_IRQ_SHUTDN,
+       TPS65086_IRQ_FAULT,
+};
+
+/**
+ * struct tps65086 - state holder for the tps65086 driver
+ *
+ * Device data may be used to access the TPS65086 chip
+ */
+struct tps65086 {
+       struct device *dev;
+       struct regmap *regmap;
+
+       /* IRQ Data */
+       int irq;
+       struct regmap_irq_chip_data *irq_data;
+};
+
+#endif /* __LINUX_MFD_TPS65086_H */
index f1cd22f2df1ac50438e7d70bb0df85c44580b8d3..3579d1e2fe3acd30bcf7733c356a8b018758391d 100644 (file)
@@ -201,11 +201,13 @@ extern unsigned int kobjsize(const void *objp);
 #endif
 
 #ifdef CONFIG_STACK_GROWSUP
-#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STACK       VM_GROWSUP
 #else
-#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STACK       VM_GROWSDOWN
 #endif
 
+#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+
 /*
  * Special vmas that are non-mergable, non-mlock()able.
  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
@@ -385,7 +387,8 @@ enum {
        REGION_MIXED,
 };
 
-int region_intersects(resource_size_t offset, size_t size, const char *type);
+int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
+                     unsigned long desc);
 
 /* Support for virtually mapped pages */
 struct page *vmalloc_to_page(const void *addr);
@@ -1341,8 +1344,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
                !vma_growsup(vma->vm_next, addr);
 }
 
-extern struct task_struct *task_of_stack(struct task_struct *task,
-                               struct vm_area_struct *vma, bool in_group);
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
 
 extern unsigned long move_page_tables(struct vm_area_struct *vma,
                unsigned long old_addr, struct vm_area_struct *new_vma,
@@ -2137,6 +2139,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn);
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn, pgprot_t pgprot);
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
                        pfn_t pfn);
 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
index d3ebb9d21a5334d26e85bc865d318535f5864569..944b2b37313b49bffd581963a5f13380a7c6e520 100644 (file)
@@ -424,9 +424,9 @@ struct mm_struct {
        unsigned long total_vm;         /* Total pages mapped */
        unsigned long locked_vm;        /* Pages that have PG_mlocked set */
        unsigned long pinned_vm;        /* Refcount permanently increased */
-       unsigned long data_vm;          /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
-       unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE */
-       unsigned long stack_vm;         /* VM_GROWSUP/DOWN */
+       unsigned long data_vm;          /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+       unsigned long exec_vm;          /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+       unsigned long stack_vm;         /* VM_STACK */
        unsigned long def_flags;
        unsigned long start_code, end_code, start_data, end_data;
        unsigned long start_brk, brk, start_stack;
@@ -566,10 +566,26 @@ static inline void clear_tlb_flush_pending(struct mm_struct *mm)
 }
 #endif
 
-struct vm_special_mapping
-{
-       const char *name;
+struct vm_fault;
+
+struct vm_special_mapping {
+       const char *name;       /* The name, e.g. "[vdso]". */
+
+       /*
+        * If .fault is not provided, this points to a
+        * NULL-terminated array of pages that back the special mapping.
+        *
+        * This must not be NULL unless .fault is provided.
+        */
        struct page **pages;
+
+       /*
+        * If non-NULL, then this is called to resolve page faults
+        * on the special mapping.  If used, .pages is not checked.
+        */
+       int (*fault)(const struct vm_special_mapping *sm,
+                    struct vm_area_struct *vma,
+                    struct vm_fault *vmf);
 };
 
 enum tlb_flush_reason {
index 37967b6da03cf542d7a5762342c2089d93054b7a..b01e77de1a74de9350fd96a66440566592a69bf5 100644 (file)
@@ -113,7 +113,6 @@ struct mmc_data {
 
 #define MMC_DATA_WRITE (1 << 8)
 #define MMC_DATA_READ  (1 << 9)
-#define MMC_DATA_STREAM        (1 << 10)
 
        unsigned int            bytes_xfered;
 
index 89df7abedd67298d00df558af49de438cccf8439..7b41c6db1bb6e704cba300f137b7de15e494b966 100644 (file)
@@ -235,21 +235,11 @@ struct dw_mci_dma_ops {
 };
 
 /* IP Quirks/flags. */
-/* Unreliable card detection */
-#define DW_MCI_QUIRK_BROKEN_CARD_DETECTION     BIT(0)
 /* Timer for broken data transfer over scheme */
-#define DW_MCI_QUIRK_BROKEN_DTO                        BIT(1)
+#define DW_MCI_QUIRK_BROKEN_DTO                        BIT(0)
 
 struct dma_pdata;
 
-struct block_settings {
-       unsigned short  max_segs;       /* see blk_queue_max_segments */
-       unsigned int    max_blk_size;   /* maximum size of one mmc block */
-       unsigned int    max_blk_count;  /* maximum number of blocks in one req*/
-       unsigned int    max_req_size;   /* maximum number of bytes in one req*/
-       unsigned int    max_seg_size;   /* see blk_queue_max_segment_size */
-};
-
 /* Board platform data */
 struct dw_mci_board {
        u32 num_slots;
index 84d9053b5dca3ef453ac611fd6b223c02e1903c6..5f5cd80e97650047a8d4752d0389c5598f2a56a7 100644 (file)
@@ -1,6 +1,8 @@
 /*
  * include/linux/mmc/tmio.h
  *
+ * Copyright (C) 2016 Sang Engineering, Wolfram Sang
+ * Copyright (C) 2015-16 Renesas Electronics Corporation
  * Copyright (C) 2007 Ian Molton
  * Copyright (C) 2004 Ian Molton
  *
@@ -61,6 +63,9 @@
 #define TMIO_STAT_CMD_BUSY      0x40000000
 #define TMIO_STAT_ILL_ACCESS    0x80000000
 
+#define        CLK_CTL_DIV_MASK        0xff
+#define        CLK_CTL_SCLKEN          BIT(8)
+
 #define TMIO_BBS               512             /* Boot block size */
 
 #endif /* LINUX_MMC_TMIO_H */
index 33bb1b19273e3ad165382f4c7f0f3cd8f8c0b407..7b6c2cfee390e939e13b0f14c92224bcb3aafcf4 100644 (file)
@@ -682,6 +682,12 @@ typedef struct pglist_data {
         */
        unsigned long first_deferred_pfn;
 #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       spinlock_t split_queue_lock;
+       struct list_head split_queue;
+       unsigned long split_queue_len;
+#endif
 } pg_data_t;
 
 #define node_present_pages(nid)        (NODE_DATA(nid)->node_present_pages)
index 4560d8f1545d2cec7e6da19da460fff26151c105..2bb0c308570672e7105b14f85a4215b0f9500207 100644 (file)
@@ -324,6 +324,12 @@ struct module_layout {
 #define __module_layout_align
 #endif
 
+struct mod_kallsyms {
+       Elf_Sym *symtab;
+       unsigned int num_symtab;
+       char *strtab;
+};
+
 struct module {
        enum module_state state;
 
@@ -405,15 +411,10 @@ struct module {
 #endif
 
 #ifdef CONFIG_KALLSYMS
-       /*
-        * We keep the symbol and string tables for kallsyms.
-        * The core_* fields below are temporary, loader-only (they
-        * could really be discarded after module init).
-        */
-       Elf_Sym *symtab, *core_symtab;
-       unsigned int num_symtab, core_num_syms;
-       char *strtab, *core_strtab;
-
+       /* Protected by RCU and/or module_mutex: use rcu_dereference() */
+       struct mod_kallsyms *kallsyms;
+       struct mod_kallsyms core_kallsyms;
+       
        /* Section attributes */
        struct module_sect_attrs *sect_attrs;
 
index 36bb6a503f196ea04e4184c1445aa142db906607..3bf8f954b642581c271a7236e8e1b46f6820b015 100644 (file)
@@ -166,7 +166,6 @@ struct bbm_info {
 };
 
 /* OneNAND BBT interface */
-extern int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd);
 extern int onenand_default_bbt(struct mtd_info *mtd);
 
 #endif /* __LINUX_MTD_BBM_H */
index 02cd5f9b79b875ed03e5b9042df1cf98eff60709..8255118be0f0a508fdee0b6a83ba148e0d837bb9 100644 (file)
@@ -44,7 +44,6 @@ struct INFTLrecord {
        unsigned int nb_blocks;         /* number of physical blocks */
        unsigned int nb_boot_blocks;    /* number of blocks used by the bios */
        struct erase_info instr;
-       struct nand_ecclayout oobinfo;
 };
 
 int INFTL_mount(struct INFTLrecord *s);
index bdd68e22b5a59d891765d2b5f852a7fabb57d141..7604f4be33865deeee79c50251a2a71e4aebfe38 100644 (file)
@@ -168,6 +168,12 @@ typedef enum {
 /* Device supports subpage reads */
 #define NAND_SUBPAGE_READ      0x00001000
 
+/*
+ * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
+ * patterns.
+ */
+#define NAND_NEED_SCRAMBLING   0x00002000
+
 /* Options valid for Samsung large page devices */
 #define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
 
@@ -896,7 +902,6 @@ extern int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
  * @chip_delay:                R/B delay value in us
  * @options:           Option flags, e.g. 16bit buswidth
  * @bbt_options:       BBT option flags, e.g. NAND_BBT_USE_FLASH
- * @ecclayout:         ECC layout info structure
  * @part_probe_types:  NULL-terminated array of probe types
  */
 struct platform_nand_chip {
@@ -904,7 +909,6 @@ struct platform_nand_chip {
        int chip_offset;
        int nr_partitions;
        struct mtd_partition *partitions;
-       struct nand_ecclayout *ecclayout;
        int chip_delay;
        unsigned int options;
        unsigned int bbt_options;
index b059629e22bc6901f9f17007672b0ee2f24191ec..044daa02b8ff94fde7531d4e9c4ebe548d5d49ef 100644 (file)
@@ -50,7 +50,6 @@ struct NFTLrecord {
         unsigned int nb_blocks;                /* number of physical blocks */
         unsigned int nb_boot_blocks;   /* number of blocks used by the bios */
         struct erase_info instr;
-       struct nand_ecclayout oobinfo;
 };
 
 int NFTL_mount(struct NFTLrecord *s);
index 5ac140dcb789499e51b951fa45fb34378d636407..219f53c30cb3cd4a9a2fdd1c2a2d3ca343f28bca 100644 (file)
@@ -512,7 +512,6 @@ static inline void napi_enable(struct napi_struct *n)
        clear_bit(NAPI_STATE_NPSVC, &n->state);
 }
 
-#ifdef CONFIG_SMP
 /**
  *     napi_synchronize - wait until NAPI is not running
  *     @n: napi context
@@ -523,12 +522,12 @@ static inline void napi_enable(struct napi_struct *n)
  */
 static inline void napi_synchronize(const struct napi_struct *n)
 {
-       while (test_bit(NAPI_STATE_SCHED, &n->state))
-               msleep(1);
+       if (IS_ENABLED(CONFIG_SMP))
+               while (test_bit(NAPI_STATE_SCHED, &n->state))
+                       msleep(1);
+       else
+               barrier();
 }
-#else
-# define napi_synchronize(n)   barrier()
-#endif
 
 enum netdev_queue_state_t {
        __QUEUE_STATE_DRV_XOFF,
@@ -1398,6 +1397,8 @@ enum netdev_priv_flags {
  *                     do not use this in drivers
  *     @tx_dropped:    Dropped packets by core network,
  *                     do not use this in drivers
+ *     @rx_nohandler:  nohandler dropped packets by core network on
+ *                     inactive devices, do not use this in drivers
  *
  *     @wireless_handlers:     List of functions to handle Wireless Extensions,
  *                             instead of ioctl,
@@ -1612,6 +1613,7 @@ struct net_device {
 
        atomic_long_t           rx_dropped;
        atomic_long_t           tx_dropped;
+       atomic_long_t           rx_nohandler;
 
 #ifdef CONFIG_WIRELESS_EXT
        const struct iw_handler_def *   wireless_handlers;
@@ -3742,7 +3744,7 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
 
 /* RSS keys are 40 or 52 bytes long */
 #define NETDEV_RSS_KEY_LEN 52
-extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 void netdev_rss_key_fill(void *buffer, size_t len);
 
 int dev_get_nest_level(struct net_device *dev,
index 48e0320cd6432463b86cc3d7684d5ecd4ab48651..67300f8e5f2f0248e8651cbe17e5c14e1110ca2c 100644 (file)
@@ -550,9 +550,7 @@ extern int  nfs_readpage_async(struct nfs_open_context *, struct inode *,
 
 static inline loff_t nfs_size_to_loff_t(__u64 size)
 {
-       if (size > (__u64) OFFSET_MAX - 1)
-               return OFFSET_MAX - 1;
-       return (loff_t) size;
+       return min_t(u64, size, OFFSET_MAX);
 }
 
 static inline ino_t
index dd10626a615fb160587ecf09f860de36ba583c19..dc6e39696b64c8e925601f598b9cdab5684f1970 100644 (file)
@@ -929,7 +929,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
        return num;
 }
 
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && !defined(MODULE)
 #define _OF_DECLARE(table, name, compat, fn, fn_type)                  \
        static const struct of_device_id __of_table_##name              \
                __used __section(__##table##_of_table)                  \
index df9ef380181285a5b196b18a925942346c836d63..2fbe8682a66f491975ca13481572f59ef7f8ee5b 100644 (file)
@@ -88,7 +88,7 @@ extern void unflatten_device_tree(void);
 extern void unflatten_and_copy_device_tree(void);
 extern void early_init_devtree(void *);
 extern void early_get_first_memblock_info(void *, phys_addr_t *);
-extern u64 fdt_translate_address(const void *blob, int node_offset);
+extern u64 of_flat_dt_translate_address(unsigned long node);
 extern void of_fdt_limit_memory(int limit);
 #else /* CONFIG_OF_FLATTREE */
 static inline void early_init_fdt_scan_reserved_mem(void) {}
index 27df4a6585daedcc6a74865bf048cfd06a0593ba..3d371c15075316670f37ee2ac284d9befc9f9de8 100644 (file)
@@ -746,9 +746,26 @@ struct pci_driver {
        .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
        .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
 
+enum {
+       PCI_REASSIGN_ALL_RSRC   = 0x00000001,   /* ignore firmware setup */
+       PCI_REASSIGN_ALL_BUS    = 0x00000002,   /* reassign all bus numbers */
+       PCI_PROBE_ONLY          = 0x00000004,   /* use existing setup */
+       PCI_CAN_SKIP_ISA_ALIGN  = 0x00000008,   /* don't do ISA alignment */
+       PCI_ENABLE_PROC_DOMAINS = 0x00000010,   /* enable domains in /proc */
+       PCI_COMPAT_DOMAIN_0     = 0x00000020,   /* ... except domain 0 */
+       PCI_SCAN_ALL_PCIE_DEVS  = 0x00000040,   /* scan all, not just dev 0 */
+};
+
 /* these external functions are only available when PCI support is enabled */
 #ifdef CONFIG_PCI
 
+extern unsigned int pci_flags;
+
+static inline void pci_set_flags(int flags) { pci_flags = flags; }
+static inline void pci_add_flags(int flags) { pci_flags |= flags; }
+static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
+static inline int pci_has_flag(int flag) { return pci_flags & flag; }
+
 void pcie_bus_configure_settings(struct pci_bus *bus);
 
 enum pcie_bus_config_types {
@@ -1405,6 +1422,11 @@ void pci_register_set_vga_state(arch_set_vga_state_t func);
 
 #else /* CONFIG_PCI is not enabled */
 
+static inline void pci_set_flags(int flags) { }
+static inline void pci_add_flags(int flags) { }
+static inline void pci_clear_flags(int flags) { }
+static inline int pci_has_flag(int flag) { return 0; }
+
 /*
  *  If the system does not have PCI, clearly these return errors.  Define
  *  these as simple inline functions to avoid hair in drivers.
index f9828a48f16addab4737683f3c8345ab87e52a0a..b35a61a481fa0460b3b32591e6dd52f2fa9077e7 100644 (file)
@@ -634,9 +634,6 @@ struct perf_event_context {
        int                             nr_cgroups;      /* cgroup evts */
        void                            *task_ctx_data; /* pmu specific data */
        struct rcu_head                 rcu_head;
-
-       struct delayed_work             orphans_remove;
-       bool                            orphans_remove_sched;
 };
 
 /*
@@ -729,7 +726,7 @@ extern int perf_event_init_task(struct task_struct *child);
 extern void perf_event_exit_task(struct task_struct *child);
 extern void perf_event_free_task(struct task_struct *task);
 extern void perf_event_delayed_put(struct task_struct *task);
-extern struct perf_event *perf_event_get(unsigned int fd);
+extern struct file *perf_event_get(unsigned int fd);
 extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
 extern void perf_event_print_debug(void);
 extern void perf_pmu_disable(struct pmu *pmu);
@@ -1044,7 +1041,7 @@ extern void perf_swevent_put_recursion_context(int rctx);
 extern u64 perf_swevent_set_period(struct perf_event *event);
 extern void perf_event_enable(struct perf_event *event);
 extern void perf_event_disable(struct perf_event *event);
-extern int __perf_event_disable(void *info);
+extern void perf_event_disable_local(struct perf_event *event);
 extern void perf_event_task_tick(void);
 #else /* !CONFIG_PERF_EVENTS: */
 static inline void *
@@ -1070,7 +1067,7 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
 static inline void perf_event_exit_task(struct task_struct *child)     { }
 static inline void perf_event_free_task(struct task_struct *task)      { }
 static inline void perf_event_delayed_put(struct task_struct *task)    { }
-static inline struct perf_event *perf_event_get(unsigned int fd)       { return ERR_PTR(-EINVAL); }
+static inline struct file *perf_event_get(unsigned int fd)     { return ERR_PTR(-EINVAL); }
 static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
 {
        return ERR_PTR(-EINVAL);
index 0703b5360d31b5fa599a541bce9881941a170281..37448ab5fb5c2eddc80b97125ee80ecff4bc1b58 100644 (file)
@@ -29,7 +29,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
        return __pfn_to_pfn_t(pfn, 0);
 }
 
-extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags);
+extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
 
 static inline bool pfn_t_has_page(pfn_t pfn)
 {
@@ -48,7 +48,7 @@ static inline struct page *pfn_t_to_page(pfn_t pfn)
        return NULL;
 }
 
-static inline dma_addr_t pfn_t_to_phys(pfn_t pfn)
+static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
 {
        return PFN_PHYS(pfn_t_to_pfn(pfn));
 }
index 36bb92172f4795ff806275f8777d5ecf0bd93b1a..c55e42ee57fa0c9ba3083d6bf03b04fae275d13c 100644 (file)
@@ -40,7 +40,6 @@ struct s3c2410_nand_set {
        char                    *name;
        int                     *nr_map;
        struct mtd_partition    *partitions;
-       struct nand_ecclayout   *ecc_layout;
 };
 
 struct s3c2410_platform_nand {
diff --git a/include/linux/platform_data/sdhci-pic32.h b/include/linux/platform_data/sdhci-pic32.h
new file mode 100644 (file)
index 0000000..7e0efe6
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Purna Chandra Mandal, purna.mandal@microchip.com
+ * Copyright (C) 2015 Microchip Technology Inc.  All rights reserved.
+ *
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ */
+#ifndef __PIC32_SDHCI_PDATA_H__
+#define __PIC32_SDHCI_PDATA_H__
+
+struct pic32_sdhci_platform_data {
+       /* read & write fifo threshold */
+       int (*setup_dma)(u32 rfifo, u32 wfifo);
+};
+
+#endif
index d09275f3cde3d852bfec79c25359e4d76f37eb01..2ba2c34ca3d33cd5f679c8328145fd5944af3ae2 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/device.h>
 #include <linux/pm_qos.h>
 
-#define DRIVER_NAME    "omap_uart"
+#define OMAP_SERIAL_DRIVER_NAME        "omap_uart"
 
 /*
  * Use tty device name as ttyO, [O -> OMAP]
index 95403d2ccaf56b70207fa09e65c592d882fb30fd..cccaf4a29e9f02c9a60b65f73a523a69efa5af3a 100644 (file)
@@ -34,6 +34,8 @@ bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp);
 
 int dev_pm_opp_get_opp_count(struct device *dev);
 unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev);
+unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev);
 struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev);
 
 struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
@@ -60,6 +62,9 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
 void dev_pm_opp_put_supported_hw(struct device *dev);
 int dev_pm_opp_set_prop_name(struct device *dev, const char *name);
 void dev_pm_opp_put_prop_name(struct device *dev);
+int dev_pm_opp_set_regulator(struct device *dev, const char *name);
+void dev_pm_opp_put_regulator(struct device *dev);
+int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
 #else
 static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 {
@@ -86,6 +91,16 @@ static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
        return 0;
 }
 
+static inline unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
+{
+       return 0;
+}
+
+static inline unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
+{
+       return 0;
+}
+
 static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
 {
        return NULL;
@@ -151,6 +166,18 @@ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
 
 static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
 
+static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
+{
+       return -EINVAL;
+}
+
+static inline void dev_pm_opp_put_regulator(struct device *dev) {}
+
+static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+{
+       return -EINVAL;
+}
+
 #endif         /* CONFIG_PM_OPP */
 
 #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
index 12c4865457adc3d0412c573b710feec7d3d6fd81..393efe2edf9afb9d8a38d1a16201e78c64881026 100644 (file)
@@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu);
 bool psci_power_state_loses_context(u32 state);
 bool psci_power_state_is_valid(u32 state);
 
+int psci_cpu_init_idle(unsigned int cpu);
+int psci_cpu_suspend_enter(unsigned long index);
+
 struct psci_operations {
        int (*cpu_suspend)(u32 state, unsigned long entry_point);
        int (*cpu_off)(u32 state);
index c2f2574ff61ceebbe4729df6d577722b391d0600..2a097d176ba964faae345a245f516c447701553c 100644 (file)
@@ -197,6 +197,7 @@ enum pxa_ssp_type {
        QUARK_X1000_SSP,
        LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
        LPSS_BYT_SSP,
+       LPSS_BSW_SSP,
        LPSS_SPT_SSP,
        LPSS_BXT_SSP,
 };
index 9e12000914b3451107abdde161e1ea64b7cfeb5e..1e36898edbda783215113a857c60003d736abd1c 100644 (file)
@@ -29,6 +29,12 @@ extern bool qcom_scm_hdcp_available(void);
 extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
                u32 *resp);
 
+extern bool qcom_scm_pas_supported(u32 peripheral);
+extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size);
+extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
+extern int qcom_scm_pas_shutdown(u32 peripheral);
+
 #define QCOM_SCM_CPU_PWR_DOWN_L2_ON    0x0
 #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF   0x1
 
index b2505acfd3c078c70e733f7d9e826cfc4b6c9524..9dfb6bce8c9eb08f0c45a8d1b76f86b0b489b30e 100644 (file)
@@ -306,6 +306,7 @@ struct quota_format_ops {
        int (*read_dqblk)(struct dquot *dquot);         /* Read structure for one user */
        int (*commit_dqblk)(struct dquot *dquot);       /* Write structure for one user */
        int (*release_dqblk)(struct dquot *dquot);      /* Called when last reference to dquot is being dropped */
+       int (*get_next_id)(struct super_block *sb, struct kqid *qid);   /* Get next ID with existing structure in the quota file */
 };
 
 /* Operations working with dquots */
@@ -321,6 +322,8 @@ struct dquot_operations {
         * quota code only */
        qsize_t *(*get_reserved_space) (struct inode *);
        int (*get_projid) (struct inode *, kprojid_t *);/* Get project ID */
+       /* Get next ID with active quota structure */
+       int (*get_next_id) (struct super_block *sb, struct kqid *qid);
 };
 
 struct path;
@@ -425,6 +428,8 @@ struct quotactl_ops {
        int (*quota_sync)(struct super_block *, int);
        int (*set_info)(struct super_block *, int, struct qc_info *);
        int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+       int (*get_nextdqblk)(struct super_block *, struct kqid *,
+                            struct qc_dqblk *);
        int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
        int (*get_state)(struct super_block *, struct qc_state *);
        int (*rm_xquota)(struct super_block *, unsigned int);
index 7a57c28eb5e708d0346760818533d7b6f8665096..f00fa86ac9660ad79e243f9c9e590bf854dcb48d 100644 (file)
@@ -82,6 +82,7 @@ int dquot_commit(struct dquot *dquot);
 int dquot_acquire(struct dquot *dquot);
 int dquot_release(struct dquot *dquot);
 int dquot_commit_info(struct super_block *sb, int type);
+int dquot_get_next_id(struct super_block *sb, struct kqid *qid);
 int dquot_mark_dquot_dirty(struct dquot *dquot);
 
 int dquot_file_open(struct inode *inode, struct file *file);
@@ -99,6 +100,8 @@ int dquot_get_state(struct super_block *sb, struct qc_state *state);
 int dquot_set_dqinfo(struct super_block *sb, int type, struct qc_info *ii);
 int dquot_get_dqblk(struct super_block *sb, struct kqid id,
                struct qc_dqblk *di);
+int dquot_get_next_dqblk(struct super_block *sb, struct kqid *id,
+               struct qc_dqblk *di);
 int dquot_set_dqblk(struct super_block *sb, struct kqid id,
                struct qc_dqblk *di);
 
index 7c88ad156a293c0bedcea771b58f3113909b6532..f54be708220760f9a367f4a29ca1cbe7cfb7fba2 100644 (file)
@@ -378,13 +378,29 @@ radix_tree_iter_init(struct radix_tree_iter *iter, unsigned long start)
 void **radix_tree_next_chunk(struct radix_tree_root *root,
                             struct radix_tree_iter *iter, unsigned flags);
 
+/**
+ * radix_tree_iter_retry - retry this chunk of the iteration
+ * @iter:      iterator state
+ *
+ * If we iterate over a tree protected only by the RCU lock, a race
+ * against deletion or creation may result in seeing a slot for which
+ * radix_tree_deref_retry() returns true.  If so, call this function
+ * and continue the iteration.
+ */
+static inline __must_check
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+{
+       iter->next_index = iter->index;
+       return NULL;
+}
+
 /**
  * radix_tree_chunk_size - get current chunk size
  *
  * @iter:      pointer to radix tree iterator
  * Returns:    current chunk size
  */
-static __always_inline unsigned
+static __always_inline long
 radix_tree_chunk_size(struct radix_tree_iter *iter)
 {
        return iter->next_index - iter->index;
@@ -418,9 +434,9 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
                        return slot + offset + 1;
                }
        } else {
-               unsigned size = radix_tree_chunk_size(iter) - 1;
+               long size = radix_tree_chunk_size(iter);
 
-               while (size--) {
+               while (--size > 0) {
                        slot++;
                        iter->index++;
                        if (likely(*slot))
index a7a06d1dcf9cb17ee65d1befe1ef6ac62d882d16..a0118d5929a9c9ed17df3b75a8b599472ff1f205 100644 (file)
@@ -152,6 +152,8 @@ void raid6_dual_recov(int disks, size_t bytes, int faila, int failb,
 
 # define jiffies       raid6_jiffies()
 # define printk        printf
+# define pr_err(format, ...) fprintf(stderr, format, ## __VA_ARGS__)
+# define pr_info(format, ...) fprintf(stdout, format, ## __VA_ARGS__)
 # define GFP_KERNEL    0
 # define __get_free_pages(x, y)        ((unsigned long)mmap(NULL, PAGE_SIZE << (y), \
                                                     PROT_READ|PROT_WRITE,   \
index 64809aea661cee43646c4803247604cc40f16d6c..93aea75029fbd7795e99ffcf77d781ed122f1cba 100644 (file)
@@ -149,6 +149,22 @@ static inline unsigned long rcu_batches_completed_sched(void)
        return 0;
 }
 
+/*
+ * Return the number of expedited grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of expedited sched grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed_sched(void)
+{
+       return 0;
+}
+
 static inline void rcu_force_quiescent_state(void)
 {
 }
index ad1eda9fa4daea077998d253dc604c18c3d444c6..5043cb823fb273b48a24c5366e800ee704000355 100644 (file)
@@ -87,6 +87,8 @@ unsigned long rcu_batches_started_sched(void);
 unsigned long rcu_batches_completed(void);
 unsigned long rcu_batches_completed_bh(void);
 unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_exp_batches_completed(void);
+unsigned long rcu_exp_batches_completed_sched(void);
 void show_rcu_gp_kthreads(void);
 
 void rcu_force_quiescent_state(void);
index 18394343f4891a42dc986e4a17e9c9ebb0b747ee..27aaac9027c4545d0e69a22b49128b380218cbe3 100644 (file)
@@ -162,7 +162,7 @@ typedef void (*regmap_unlock)(void *);
  *               This field is a duplicate of a similar file in
  *               'struct regmap_bus' and serves exact same purpose.
  *                Use it only for "no-bus" cases.
- * @max_register: Optional, specifies the maximum valid register index.
+ * @max_register: Optional, specifies the maximum valid register address.
  * @wr_table:     Optional, points to a struct regmap_access_table specifying
  *                valid ranges for write access.
  * @rd_table:     As above, for read access.
index 132e05c466617b9336d7e0db49ef8455f48f171b..6029279f4eed990a74ce8d8bdc8718dcf35ca606 100644 (file)
@@ -18,6 +18,9 @@
 
 #define LP872X_MAX_REGULATORS          9
 
+#define LP8720_ENABLE_DELAY            200
+#define LP8725_ENABLE_DELAY            30000
+
 enum lp872x_regulator_id {
        LP8720_ID_BASE,
        LP8720_ID_LDO1 = LP8720_ID_BASE,
@@ -79,12 +82,14 @@ struct lp872x_regulator_data {
  * @update_config     : if LP872X_GENERAL_CFG register is updated, set true
  * @regulator_data    : platform regulator id and init data
  * @dvs               : dvs data for buck voltage control
+ * @enable_gpio       : gpio pin number for enable control
  */
 struct lp872x_platform_data {
        u8 general_config;
        bool update_config;
        struct lp872x_regulator_data regulator_data[LP872X_MAX_REGULATORS];
        struct lp872x_dvs *dvs;
+       int enable_gpio;
 };
 
 #endif
index d9010789b4e8215cc74a2ffc12e83f3cfd9b73cd..7af625f6d226a4d16266107282668fdb9bfc3bdc 100644 (file)
@@ -104,7 +104,8 @@ int __must_check rfkill_register(struct rfkill *rfkill);
  *
  * Pause polling -- say transmitter is off for other reasons.
  * NOTE: not necessary for suspend/resume -- in that case the
- * core stops polling anyway
+ * core stops polling anyway (but will also correctly handle
+ * the case of polling having been paused before suspend.)
  */
 void rfkill_pause_polling(struct rfkill *rfkill);
 
index bdf597c4f0be82965911266ed1668d06510492cb..a07f42bedda32687c94ed574f5a606b19912b042 100644 (file)
@@ -109,20 +109,6 @@ static inline void put_anon_vma(struct anon_vma *anon_vma)
                __put_anon_vma(anon_vma);
 }
 
-static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
-{
-       struct anon_vma *anon_vma = vma->anon_vma;
-       if (anon_vma)
-               down_write(&anon_vma->root->rwsem);
-}
-
-static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
-{
-       struct anon_vma *anon_vma = vma->anon_vma;
-       if (anon_vma)
-               up_write(&anon_vma->root->rwsem);
-}
-
 static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
 {
        down_write(&anon_vma->root->rwsem);
index a10494a94cc30f24d65ab866771833883c6f886d..98db9e2e04fe836ef2248f14a6a81b56a828302c 100644 (file)
@@ -775,7 +775,6 @@ struct signal_struct {
 #endif
 #ifdef CONFIG_AUDIT
        unsigned audit_tty;
-       unsigned audit_tty_log_passwd;
        struct tty_audit_buf *tty_audit_buf;
 #endif
 
@@ -920,6 +919,10 @@ static inline int sched_info_on(void)
 #endif
 }
 
+#ifdef CONFIG_SCHEDSTATS
+void force_schedstat_enabled(void);
+#endif
+
 enum cpu_idle_type {
        CPU_IDLE,
        CPU_NOT_IDLE,
index c9e4731cf10b8e97956b160c503e447490991931..4f080ab4f2cd1199f3f5aee15e7b06fdebb61333 100644 (file)
@@ -95,4 +95,8 @@ extern int sysctl_numa_balancing(struct ctl_table *table, int write,
                                 void __user *buffer, size_t *lenp,
                                 loff_t *ppos);
 
+extern int sysctl_schedstats(struct ctl_table *table, int write,
+                                void __user *buffer, size_t *lenp,
+                                loff_t *ppos);
+
 #endif /* _SCHED_SYSCTL_H */
index faa0e0370ce73c59305ed2a14928ad8bb0c4cbe2..4348797597256f06ffbd091209366335ea340664 100644 (file)
@@ -76,6 +76,12 @@ struct uart_8250_ops {
        void            (*release_irq)(struct uart_8250_port *);
 };
 
+struct uart_8250_em485 {
+       struct timer_list       start_tx_timer; /* "rs485 start tx" timer */
+       struct timer_list       stop_tx_timer;  /* "rs485 stop tx" timer */
+       struct timer_list       *active_timer;  /* pointer to active timer */
+};
+
 /*
  * This should be used by drivers which want to register
  * their own 8250 ports without registering their own
@@ -122,6 +128,8 @@ struct uart_8250_port {
        /* 8250 specific callbacks */
        int                     (*dl_read)(struct uart_8250_port *);
        void                    (*dl_write)(struct uart_8250_port *, int);
+
+       struct uart_8250_em485 *em485;
 };
 
 static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up)
index e03d6ba5e5b428849b6a269fc6db72a7563b4dc9..cbfcf38e220def6070e3de57c56ce34f3536bd6c 100644 (file)
@@ -342,21 +342,26 @@ struct earlycon_device {
 
 struct earlycon_id {
        char    name[16];
+       char    compatible[128];
        int     (*setup)(struct earlycon_device *, const char *options);
 } __aligned(32);
 
-extern int setup_earlycon(char *buf);
-extern int of_setup_earlycon(unsigned long addr,
-                            int (*setup)(struct earlycon_device *, const char *));
+extern const struct earlycon_id __earlycon_table[];
+extern const struct earlycon_id __earlycon_table_end[];
+
+#define OF_EARLYCON_DECLARE(_name, compat, fn)                         \
+       static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
+            __used __section(__earlycon_table)                         \
+               = { .name = __stringify(_name),                         \
+                   .compatible = compat,                               \
+                   .setup = fn  }
 
-#define EARLYCON_DECLARE(_name, func)                                  \
-       static const struct earlycon_id __earlycon_##_name              \
-               __used __section(__earlycon_table)                      \
-                = { .name  = __stringify(_name),                       \
-                    .setup = func  }
+#define EARLYCON_DECLARE(_name, fn)    OF_EARLYCON_DECLARE(_name, "", fn)
 
-#define OF_EARLYCON_DECLARE(name, compat, fn)                          \
-       _OF_DECLARE(earlycon, name, compat, fn, void *)
+extern int setup_earlycon(char *buf);
+extern int of_setup_earlycon(const struct earlycon_id *match,
+                            unsigned long node,
+                            const char *options);
 
 struct uart_port *uart_get_console(struct uart_port *ports, int nr,
                                   struct console *c);
index 11f935c1a090419d6cda938aa925bfa79de3616b..4ce9ff7086f4897a67f6037d88df21012cf28413 100644 (file)
@@ -299,6 +299,7 @@ struct sk_buff;
 #else
 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
 #endif
+extern int sysctl_max_skb_frags;
 
 typedef struct skb_frag_struct skb_frag_t;
 
index d0cb6d189a0a02bd28459c7b143229563f23c4e4..bd51c8a9d80733aaf393adc4a57c7c5f73ade535 100644 (file)
@@ -26,6 +26,8 @@ struct qcom_smd_device {
        struct qcom_smd_channel *channel;
 };
 
+typedef int (*qcom_smd_cb_t)(struct qcom_smd_device *, const void *, size_t);
+
 /**
  * struct qcom_smd_driver - smd driver struct
  * @driver:    underlying device driver
@@ -42,7 +44,7 @@ struct qcom_smd_driver {
 
        int (*probe)(struct qcom_smd_device *dev);
        void (*remove)(struct qcom_smd_device *dev);
-       int (*callback)(struct qcom_smd_device *, const void *, size_t);
+       qcom_smd_cb_t callback;
 };
 
 int qcom_smd_driver_register(struct qcom_smd_driver *drv);
@@ -54,4 +56,8 @@ void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
 
 int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
 
+struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_device *sdev,
+                                              const char *name,
+                                              qcom_smd_cb_t cb);
+
 #endif
index f35e1512fcaa1bb723da9f5825a2bc8406071fca..7b88697929e9ef6f729ce9fd1d9e512d352f8b94 100644 (file)
@@ -1,12 +1,17 @@
 #ifndef __QCOM_SMEM_STATE__
 #define __QCOM_SMEM_STATE__
 
+#include <linux/errno.h>
+
+struct device_node;
 struct qcom_smem_state;
 
 struct qcom_smem_state_ops {
        int (*update_bits)(void *, u32, u32);
 };
 
+#ifdef CONFIG_QCOM_SMEM_STATE
+
 struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
 void qcom_smem_state_put(struct qcom_smem_state *);
 
@@ -15,4 +20,34 @@ int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 val
 struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data);
 void qcom_smem_state_unregister(struct qcom_smem_state *state);
 
+#else
+
+static inline struct qcom_smem_state *qcom_smem_state_get(struct device *dev,
+       const char *con_id, unsigned *bit)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_put(struct qcom_smem_state *state)
+{
+}
+
+static inline int qcom_smem_state_update_bits(struct qcom_smem_state *state,
+       u32 mask, u32 value)
+{
+       return -EINVAL;
+}
+
+static inline struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node,
+       const struct qcom_smem_state_ops *ops, void *data)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline void qcom_smem_state_unregister(struct qcom_smem_state *state)
+{
+}
+
+#endif
+
 #endif
similarity index 81%
rename from arch/arm/mach-exynos/exynos-pmu.h
rename to include/linux/soc/samsung/exynos-pmu.h
index a2ab0d52b2304868636ae1badd30936fab6c5ec4..e2e9de1acc5b789534892853f7f4b63d6d3cc0b6 100644 (file)
@@ -9,8 +9,8 @@
  * published by the Free Software Foundation.
  */
 
-#ifndef __EXYNOS_PMU_H
-#define __EXYNOS_PMU_H
+#ifndef __LINUX_SOC_EXYNOS_PMU_H
+#define __LINUX_SOC_EXYNOS_PMU_H
 
 enum sys_powerdown {
        SYS_AFTR,
@@ -21,4 +21,4 @@ enum sys_powerdown {
 
 extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
 
-#endif /* __EXYNOS_PMU_H */
+#endif /* __LINUX_SOC_EXYNOS_PMU_H */
similarity index 99%
rename from arch/arm/mach-exynos/regs-pmu.h
rename to include/linux/soc/samsung/exynos-regs-pmu.h
index 5e4f4c23b06a11d19c377d2a50af373d49d3b5bc..d30186e2b60946a41ced4fdaad421f0ad4db90ac 100644 (file)
@@ -9,8 +9,8 @@
  * published by the Free Software Foundation.
 */
 
-#ifndef __ASM_ARCH_REGS_PMU_H
-#define __ASM_ARCH_REGS_PMU_H __FILE__
+#ifndef __LINUX_SOC_EXYNOS_REGS_PMU_H
+#define __LINUX_SOC_EXYNOS_REGS_PMU_H __FILE__
 
 #define S5P_CENTRAL_SEQ_CONFIGURATION          0x0200
 
                                         | EXYNOS5420_KFC_USE_STANDBY_WFI2  \
                                         | EXYNOS5420_KFC_USE_STANDBY_WFI3)
 
-#endif /* __ASM_ARCH_REGS_PMU_H */
+#endif /* __LINUX_SOC_EXYNOS_REGS_PMU_H */
index 53be3a4c60cbe4d53388a18ef7ca96d3204b26cc..ceac6067b66958a6ebdca94551138126c1311087 100644 (file)
@@ -25,6 +25,7 @@
 struct dma_chan;
 struct spi_master;
 struct spi_transfer;
+struct spi_flash_read_message;
 
 /*
  * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -53,6 +54,10 @@ extern struct bus_type spi_bus_type;
  *
  * @transfer_bytes_histo:
  *                 transfer bytes histogramm
+ *
+ * @transfers_split_maxsize:
+ *                 number of transfers that have been split because of
+ *                 maxsize limit
  */
 struct spi_statistics {
        spinlock_t              lock; /* lock for the whole structure */
@@ -72,6 +77,8 @@ struct spi_statistics {
 
 #define SPI_STATISTICS_HISTO_SIZE 17
        unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
+
+       unsigned long transfers_split_maxsize;
 };
 
 void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
@@ -303,6 +310,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @min_speed_hz: Lowest supported transfer speed
  * @max_speed_hz: Highest supported transfer speed
  * @flags: other constraints relevant to this driver
+ * @max_transfer_size: function that returns the max transfer size for
+ *     a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
  * @bus_lock_spinlock: spinlock for SPI bus locking
  * @bus_lock_mutex: mutex for SPI bus locking
  * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -361,6 +370,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @handle_err: the subsystem calls the driver to handle an error that occurs
  *             in the generic implementation of transfer_one_message().
  * @unprepare_message: undo any work done by prepare_message().
+ * @spi_flash_read: to support spi-controller hardwares that provide
+ *                  accelerated interface to read from flash devices.
  * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
  *     number. Any individual value may be -ENOENT for CS lines that
  *     are not GPIOs (driven by the SPI controller itself).
@@ -369,6 +380,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
  * @dma_rx: DMA receive channel
  * @dummy_rx: dummy receive buffer for full-duplex devices
  * @dummy_tx: dummy transmit buffer for full-duplex devices
+ * @fw_translate_cs: If the boot firmware uses different numbering scheme
+ *     what Linux expects, this optional hook can be used to translate
+ *     between the two.
  *
  * Each SPI master controller can communicate with one or more @spi_device
  * children.  These make a small bus, sharing MOSI, MISO and SCK signals
@@ -513,6 +527,8 @@ struct spi_master {
                               struct spi_message *message);
        int (*unprepare_message)(struct spi_master *master,
                                 struct spi_message *message);
+       int (*spi_flash_read)(struct  spi_device *spi,
+                             struct spi_flash_read_message *msg);
 
        /*
         * These hooks are for drivers that use a generic implementation
@@ -537,6 +553,8 @@ struct spi_master {
        /* dummy data for full duplex devices */
        void                    *dummy_rx;
        void                    *dummy_tx;
+
+       int (*fw_translate_cs)(struct spi_master *master, unsigned cs);
 };
 
 static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -582,6 +600,37 @@ extern void spi_unregister_master(struct spi_master *master);
 
 extern struct spi_master *spi_busnum_to_master(u16 busnum);
 
+/*
+ * SPI resource management while processing a SPI message
+ */
+
+/**
+ * struct spi_res - spi resource management structure
+ * @entry:   list entry
+ * @release: release code called prior to freeing this resource
+ * @data:    extra data allocated for the specific use-case
+ *
+ * this is based on ideas from devres, but focused on life-cycle
+ * management during spi_message processing
+ */
+typedef void (*spi_res_release_t)(struct spi_master *master,
+                                 struct spi_message *msg,
+                                 void *res);
+struct spi_res {
+       struct list_head        entry;
+       spi_res_release_t       release;
+       unsigned long long      data[]; /* guarantee ull alignment */
+};
+
+extern void *spi_res_alloc(struct spi_device *spi,
+                          spi_res_release_t release,
+                          size_t size, gfp_t gfp);
+extern void spi_res_add(struct spi_message *message, void *res);
+extern void spi_res_free(void *res);
+
+extern void spi_res_release(struct spi_master *master,
+                           struct spi_message *message);
+
 /*---------------------------------------------------------------------------*/
 
 /*
@@ -720,6 +769,7 @@ struct spi_transfer {
  * @status: zero for success, else negative errno
  * @queue: for use by whichever driver currently owns the message
  * @state: for use by whichever driver currently owns the message
+ * @resources: for resource management when the spi message is processed
  *
  * A @spi_message is used to execute an atomic sequence of data transfers,
  * each represented by a struct spi_transfer.  The sequence is "atomic"
@@ -766,11 +816,15 @@ struct spi_message {
         */
        struct list_head        queue;
        void                    *state;
+
+       /* list of spi_res reources when the spi message is processed */
+       struct list_head        resources;
 };
 
 static inline void spi_message_init_no_memset(struct spi_message *m)
 {
        INIT_LIST_HEAD(&m->transfers);
+       INIT_LIST_HEAD(&m->resources);
 }
 
 static inline void spi_message_init(struct spi_message *m)
@@ -854,6 +908,60 @@ spi_max_transfer_size(struct spi_device *spi)
 
 /*---------------------------------------------------------------------------*/
 
+/* SPI transfer replacement methods which make use of spi_res */
+
+/**
+ * struct spi_replaced_transfers - structure describing the spi_transfer
+ *                                 replacements that have occurred
+ *                                 so that they can get reverted
+ * @release:            some extra release code to get executed prior to
+ *                      relasing this structure
+ * @extradata:          pointer to some extra data if requested or NULL
+ * @replaced_transfers: transfers that have been replaced and which need
+ *                      to get restored
+ * @replaced_after:     the transfer after which the @replaced_transfers
+ *                      are to get re-inserted
+ * @inserted:           number of transfers inserted
+ * @inserted_transfers: array of spi_transfers of array-size @inserted,
+ *                      that have been replacing replaced_transfers
+ *
+ * note: that @extradata will point to @inserted_transfers[@inserted]
+ * if some extra allocation is requested, so alignment will be the same
+ * as for spi_transfers
+ */
+struct spi_replaced_transfers;
+typedef void (*spi_replaced_release_t)(struct spi_master *master,
+                                      struct spi_message *msg,
+                                      struct spi_replaced_transfers *res);
+struct spi_replaced_transfers {
+       spi_replaced_release_t release;
+       void *extradata;
+       struct list_head replaced_transfers;
+       struct list_head *replaced_after;
+       size_t inserted;
+       struct spi_transfer inserted_transfers[];
+};
+
+extern struct spi_replaced_transfers *spi_replace_transfers(
+       struct spi_message *msg,
+       struct spi_transfer *xfer_first,
+       size_t remove,
+       size_t insert,
+       spi_replaced_release_t release,
+       size_t extradatasize,
+       gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
+/* SPI transfer transformation methods */
+
+extern int spi_split_transfers_maxsize(struct spi_master *master,
+                                      struct spi_message *msg,
+                                      size_t maxsize,
+                                      gfp_t gfp);
+
+/*---------------------------------------------------------------------------*/
+
 /* All these synchronous SPI transfer routines are utilities layered
  * over the core async transfer primitive.  Here, "synchronous" means
  * they will sleep uninterruptibly until the async transfer completes.
@@ -1019,6 +1127,42 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
        return be16_to_cpu(result);
 }
 
+/**
+ * struct spi_flash_read_message - flash specific information for
+ * spi-masters that provide accelerated flash read interfaces
+ * @buf: buffer to read data
+ * @from: offset within the flash from where data is to be read
+ * @len: length of data to be read
+ * @retlen: actual length of data read
+ * @read_opcode: read_opcode to be used to communicate with flash
+ * @addr_width: number of address bytes
+ * @dummy_bytes: number of dummy bytes
+ * @opcode_nbits: number of lines to send opcode
+ * @addr_nbits: number of lines to send address
+ * @data_nbits: number of lines for data
+ */
+struct spi_flash_read_message {
+       void *buf;
+       loff_t from;
+       size_t len;
+       size_t retlen;
+       u8 read_opcode;
+       u8 addr_width;
+       u8 dummy_bytes;
+       u8 opcode_nbits;
+       u8 addr_nbits;
+       u8 data_nbits;
+};
+
+/* SPI core interface for flash read support */
+static inline bool spi_flash_read_supported(struct spi_device *spi)
+{
+       return spi->master->spi_flash_read ? true : false;
+}
+
+int spi_flash_read(struct spi_device *spi,
+                  struct spi_flash_read_message *msg);
+
 /*---------------------------------------------------------------------------*/
 
 /*
index f5f80c5643ac5669abbb1e3e736ded2f49b5f3a0..dc8eb63c6568a8b0d2b9a2d793f80f255f3fc570 100644 (file)
@@ -99,8 +99,23 @@ void process_srcu(struct work_struct *work);
        }
 
 /*
- * define and init a srcu struct at build time.
- * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique.  These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function.  If these rules are too
+ * restrictive, declare the srcu_struct manually.  For example, in
+ * each file:
+ *
+ *     static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ *     init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
  */
 #define __DEFINE_SRCU(name, is_static)                                 \
        static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
index df02a41884874f68dfb2aded42a2d38658eaff06..7df625d41e35892b5c785faaaa744aa593f0af33 100644 (file)
@@ -36,7 +36,7 @@
  *
  */
 
-#include <linux/crypto.h>
+#include <crypto/skcipher.h>
 #include <linux/sunrpc/auth_gss.h>
 #include <linux/sunrpc/gss_err.h>
 #include <linux/sunrpc/gss_asn1.h>
@@ -71,10 +71,10 @@ struct gss_krb5_enctype {
        const u32               keyed_cksum;    /* is it a keyed cksum? */
        const u32               keybytes;       /* raw key len, in bytes */
        const u32               keylength;      /* final key len, in bytes */
-       u32 (*encrypt) (struct crypto_blkcipher *tfm,
+       u32 (*encrypt) (struct crypto_skcipher *tfm,
                        void *iv, void *in, void *out,
                        int length);            /* encryption function */
-       u32 (*decrypt) (struct crypto_blkcipher *tfm,
+       u32 (*decrypt) (struct crypto_skcipher *tfm,
                        void *iv, void *in, void *out,
                        int length);            /* decryption function */
        u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
@@ -98,12 +98,12 @@ struct krb5_ctx {
        u32                     enctype;
        u32                     flags;
        const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
-       struct crypto_blkcipher *enc;
-       struct crypto_blkcipher *seq;
-       struct crypto_blkcipher *acceptor_enc;
-       struct crypto_blkcipher *initiator_enc;
-       struct crypto_blkcipher *acceptor_enc_aux;
-       struct crypto_blkcipher *initiator_enc_aux;
+       struct crypto_skcipher  *enc;
+       struct crypto_skcipher  *seq;
+       struct crypto_skcipher *acceptor_enc;
+       struct crypto_skcipher *initiator_enc;
+       struct crypto_skcipher *acceptor_enc_aux;
+       struct crypto_skcipher *initiator_enc_aux;
        u8                      Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
        u8                      cksum[GSS_KRB5_MAX_KEYLEN];
        s32                     endtime;
@@ -262,24 +262,24 @@ gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset,
 
 
 u32
-krb5_encrypt(struct crypto_blkcipher *key,
+krb5_encrypt(struct crypto_skcipher *key,
             void *iv, void *in, void *out, int length);
 
 u32
-krb5_decrypt(struct crypto_blkcipher *key,
+krb5_decrypt(struct crypto_skcipher *key,
             void *iv, void *in, void *out, int length); 
 
 int
-gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *outbuf,
+gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *outbuf,
                    int offset, struct page **pages);
 
 int
-gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *inbuf,
+gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *inbuf,
                    int offset);
 
 s32
 krb5_make_seq_num(struct krb5_ctx *kctx,
-               struct crypto_blkcipher *key,
+               struct crypto_skcipher *key,
                int direction,
                u32 seqnum, unsigned char *cksum, unsigned char *buf);
 
@@ -320,12 +320,12 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset,
 
 int
 krb5_rc4_setup_seq_key(struct krb5_ctx *kctx,
-                      struct crypto_blkcipher *cipher,
+                      struct crypto_skcipher *cipher,
                       unsigned char *cksum);
 
 int
 krb5_rc4_setup_enc_key(struct krb5_ctx *kctx,
-                      struct crypto_blkcipher *cipher,
+                      struct crypto_skcipher *cipher,
                       s32 seqnum);
 void
 gss_krb5_make_confounder(char *p, u32 conflen);
index e7a018eaf3a255db2198049f543ea3111004852a..017fced60242ecdf5eaf9daeb0b035e18333d6e3 100644 (file)
@@ -1,10 +1,13 @@
 #ifndef __LINUX_SWIOTLB_H
 #define __LINUX_SWIOTLB_H
 
+#include <linux/dma-direction.h>
+#include <linux/init.h>
 #include <linux/types.h>
 
 struct device;
 struct dma_attrs;
+struct page;
 struct scatterlist;
 
 extern int swiotlb_force;
index b386361ba3e87226c329924bc1992252fcf0b9d6..d909feeeaea25f437505734f24129c54d86719de 100644 (file)
@@ -256,6 +256,7 @@ struct tcp_sock {
        u32     prr_delivered;  /* Number of newly delivered packets to
                                 * receiver in Recovery. */
        u32     prr_out;        /* Total number of pkts sent during Recovery. */
+       u32     delivered;      /* Total data packets delivered incl. rexmits */
 
        u32     rcv_wnd;        /* Current receiver window              */
        u32     write_seq;      /* Tail(+1) of data held in tcp send buffer */
index 613c29bd6baf4763e57530b794df843d9cea61ee..e13a1ace50e902ad0ae38a81f5563d67eaa4a00f 100644 (file)
@@ -43,6 +43,9 @@
 /* Default weight of a bound cooling device */
 #define THERMAL_WEIGHT_DEFAULT 0
 
+/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
+#define THERMAL_TEMP_INVALID   -274000
+
 /* Unit conversion macros */
 #define DECI_KELVIN_TO_CELSIUS(t)      ({                      \
        long _t = (t);                                          \
@@ -167,6 +170,7 @@ struct thermal_attr {
  * @forced_passive:    If > 0, temperature at which to switch on all ACPI
  *                     processor cooling devices.  Currently only used by the
  *                     step-wise governor.
+ * @need_update:       if equals 1, thermal_zone_device_update needs to be invoked.
  * @ops:       operations this &thermal_zone_device supports
  * @tzp:       thermal zone parameters
  * @governor:  pointer to the governor for this thermal zone
@@ -194,6 +198,7 @@ struct thermal_zone_device {
        int emul_temperature;
        int passive;
        unsigned int forced_passive;
+       atomic_t need_update;
        struct thermal_zone_device_ops *ops;
        struct thermal_zone_params *tzp;
        struct thermal_governor *governor;
index 2fd8708ea88893cf57e03f0ae23688ac42c9d24c..3b09f235db6668b932792f32890d0bbee942e8cb 100644 (file)
@@ -302,6 +302,7 @@ struct tty_struct {
        struct work_struct hangup_work;
        void *disc_data;
        void *driver_data;
+       spinlock_t files_lock;          /* protects tty_files list */
        struct list_head tty_files;
 
 #define N_TTY_BUF_SIZE 4096
@@ -336,7 +337,6 @@ struct tty_file_private {
 #define TTY_IO_ERROR           1       /* Cause an I/O error (may be no ldisc too) */
 #define TTY_OTHER_CLOSED       2       /* Other side (if any) has closed */
 #define TTY_EXCLUSIVE          3       /* Exclusive open mode */
-#define TTY_DEBUG              4       /* Debugging */
 #define TTY_DO_WRITE_WAKEUP    5       /* Call write_wakeup after queuing new */
 #define TTY_OTHER_DONE         6       /* Closed pty has completed input processing */
 #define TTY_LDISC_OPEN         11      /* Line discipline is open */
@@ -433,8 +433,6 @@ extern struct device *tty_register_device_attr(struct tty_driver *driver,
                                void *drvdata,
                                const struct attribute_group **attr_grp);
 extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
-extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
-                            int buflen);
 extern void tty_write_message(struct tty_struct *tty, char *msg);
 extern int tty_send_xchar(struct tty_struct *tty, char ch);
 extern int tty_put_char(struct tty_struct *tty, unsigned char c);
@@ -446,12 +444,7 @@ extern void tty_unthrottle(struct tty_struct *tty);
 extern int tty_throttle_safe(struct tty_struct *tty);
 extern int tty_unthrottle_safe(struct tty_struct *tty);
 extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
-extern void tty_driver_remove_tty(struct tty_driver *driver,
-                                 struct tty_struct *tty);
-extern void tty_free_termios(struct tty_struct *tty);
 extern int is_current_pgrp_orphaned(void);
-extern int is_ignored(int sig);
-extern int tty_signal(int sig, struct tty_struct *tty);
 extern void tty_hangup(struct tty_struct *tty);
 extern void tty_vhangup(struct tty_struct *tty);
 extern int tty_hung_up_p(struct file *filp);
@@ -493,7 +486,8 @@ extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
 extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
 extern void tty_ldisc_deref(struct tty_ldisc *);
 extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
-extern void tty_ldisc_hangup(struct tty_struct *tty);
+extern void tty_ldisc_hangup(struct tty_struct *tty, bool reset);
+extern int tty_ldisc_reinit(struct tty_struct *tty, int disc);
 extern const struct file_operations tty_ldiscs_proc_fops;
 
 extern void tty_wakeup(struct tty_struct *tty);
@@ -508,16 +502,13 @@ extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx);
 extern int tty_alloc_file(struct file *file);
 extern void tty_add_file(struct tty_struct *tty, struct file *file);
 extern void tty_free_file(struct file *file);
-extern void free_tty_struct(struct tty_struct *tty);
-extern void deinitialize_tty_struct(struct tty_struct *tty);
 extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx);
 extern int tty_release(struct inode *inode, struct file *filp);
-extern int tty_init_termios(struct tty_struct *tty);
+extern void tty_init_termios(struct tty_struct *tty);
 extern int tty_standard_install(struct tty_driver *driver,
                struct tty_struct *tty);
 
 extern struct mutex tty_mutex;
-extern spinlock_t tty_files_lock;
 
 #define tty_is_writelocked(tty)  (mutex_is_locked(&tty->atomic_write_lock))
 
@@ -575,43 +566,29 @@ static inline int tty_port_users(struct tty_port *port)
 
 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
 extern int tty_unregister_ldisc(int disc);
-extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
+extern int tty_set_ldisc(struct tty_struct *tty, int disc);
 extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
 extern void tty_ldisc_release(struct tty_struct *tty);
 extern void tty_ldisc_init(struct tty_struct *tty);
 extern void tty_ldisc_deinit(struct tty_struct *tty);
-extern void tty_ldisc_begin(void);
-
-static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
-                                       char *f, int count)
-{
-       if (ld->ops->receive_buf2)
-               count = ld->ops->receive_buf2(ld->tty, p, f, count);
-       else {
-               count = min_t(int, count, ld->tty->receive_room);
-               if (count)
-                       ld->ops->receive_buf(ld->tty, p, f, count);
-       }
-       return count;
-}
-
+extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+                                char *f, int count);
 
 /* n_tty.c */
-extern struct tty_ldisc_ops tty_ldisc_N_TTY;
 extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+extern void __init n_tty_init(void);
 
 /* tty_audit.c */
 #ifdef CONFIG_AUDIT
 extern void tty_audit_add_data(struct tty_struct *tty, const void *data,
-                              size_t size, unsigned icanon);
+                              size_t size);
 extern void tty_audit_exit(void);
 extern void tty_audit_fork(struct signal_struct *sig);
 extern void tty_audit_tiocsti(struct tty_struct *tty, char ch);
-extern void tty_audit_push(struct tty_struct *tty);
-extern int tty_audit_push_current(void);
+extern int tty_audit_push(void);
 #else
 static inline void tty_audit_add_data(struct tty_struct *tty, const void *data,
-                                     size_t size, unsigned icanon)
+                                     size_t size)
 {
 }
 static inline void tty_audit_tiocsti(struct tty_struct *tty, char ch)
@@ -623,10 +600,7 @@ static inline void tty_audit_exit(void)
 static inline void tty_audit_fork(struct signal_struct *sig)
 {
 }
-static inline void tty_audit_push(struct tty_struct *tty)
-{
-}
-static inline int tty_audit_push_current(void)
+static inline int tty_audit_push(void)
 {
        return 0;
 }
@@ -648,10 +622,11 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
 
 /* tty_mutex.c */
 /* functions for preparation of BKL removal */
-extern void __lockfunc tty_lock(struct tty_struct *tty);
-extern void __lockfunc tty_unlock(struct tty_struct *tty);
-extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
-extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
+extern void tty_lock(struct tty_struct *tty);
+extern int  tty_lock_interruptible(struct tty_struct *tty);
+extern void tty_unlock(struct tty_struct *tty);
+extern void tty_lock_slave(struct tty_struct *tty);
+extern void tty_unlock_slave(struct tty_struct *tty);
 extern void tty_set_lock_subclass(struct tty_struct *tty);
 
 #ifdef CONFIG_PROC_FS
index 00c9d688d7b7189cc4e15c266f29296cb3204145..3971cf0eb467c0e1985011cc76db2e9f9484a274 100644 (file)
  *     buffers of any input characters it may have queued to be
  *     delivered to the user mode process.
  *
- * ssize_t (*chars_in_buffer)(struct tty_struct *tty);
- *
- *     This function returns the number of input characters the line
- *     discipline may have queued up to be delivered to the user mode
- *     process.
- *
  * ssize_t (*read)(struct tty_struct * tty, struct file * file,
  *                unsigned char * buf, size_t nr);
  *
  *     seek to perform this action quickly but should wait until
  *     any pending driver I/O is completed.
  *
- * void (*fasync)(struct tty_struct *, int on)
- *
- *     Notify line discipline when signal-driven I/O is enabled or
- *     disabled.
- *
  * void (*dcd_change)(struct tty_struct *tty, unsigned int status)
  *
  *     Tells the discipline that the DCD pin has changed its status.
@@ -188,7 +177,6 @@ struct tty_ldisc_ops {
        int     (*open)(struct tty_struct *);
        void    (*close)(struct tty_struct *);
        void    (*flush_buffer)(struct tty_struct *tty);
-       ssize_t (*chars_in_buffer)(struct tty_struct *tty);
        ssize_t (*read)(struct tty_struct *tty, struct file *file,
                        unsigned char __user *buf, size_t nr);
        ssize_t (*write)(struct tty_struct *tty, struct file *file,
@@ -209,7 +197,6 @@ struct tty_ldisc_ops {
                               char *fp, int count);
        void    (*write_wakeup)(struct tty_struct *);
        void    (*dcd_change)(struct tty_struct *, unsigned int);
-       void    (*fasync)(struct tty_struct *tty, int on);
        int     (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
                                char *fp, int count);
 
index 89533ba38691a75ae70523d5f56bcf0b57a22c80..dc0ea0de8a81d0a301dce453d16f78a66dff1c51 100644 (file)
@@ -375,7 +375,6 @@ struct usb_bus {
        struct usb_devmap devmap;       /* device address allocation map */
        struct usb_device *root_hub;    /* Root hub */
        struct usb_bus *hs_companion;   /* Companion EHCI bus, if any */
-       struct list_head bus_list;      /* list of busses */
 
        struct mutex usb_address0_mutex; /* unaddressed device mutex */
 
@@ -642,9 +641,10 @@ extern struct usb_device *usb_hub_find_child(struct usb_device *hdev,
                if (!child) continue; else
 
 /* USB device locking */
-#define usb_lock_device(udev)          device_lock(&(udev)->dev)
-#define usb_unlock_device(udev)                device_unlock(&(udev)->dev)
-#define usb_trylock_device(udev)       device_trylock(&(udev)->dev)
+#define usb_lock_device(udev)                  device_lock(&(udev)->dev)
+#define usb_unlock_device(udev)                        device_unlock(&(udev)->dev)
+#define usb_lock_device_interruptible(udev)    device_lock_interruptible(&(udev)->dev)
+#define usb_trylock_device(udev)               device_trylock(&(udev)->dev)
 extern int usb_lock_device_for_reset(struct usb_device *udev,
                                     const struct usb_interface *iface);
 
index 4dcf8446dbcdac6203f7f825719aac83967a19a7..b98f831dcda32f7eb27b5df41242613f44e08474 100644 (file)
@@ -23,6 +23,7 @@
 
 #include <linux/rwsem.h>
 #include <linux/interrupt.h>
+#include <linux/idr.h>
 
 #define MAX_TOPO_LEVEL         6
 
@@ -630,8 +631,8 @@ extern void usb_set_device_state(struct usb_device *udev,
 
 /* exported only within usbcore */
 
-extern struct list_head usb_bus_list;
-extern struct mutex usb_bus_list_lock;
+extern struct idr usb_bus_idr;
+extern struct mutex usb_bus_idr_lock;
 extern wait_queue_head_t usb_kill_urb_queue;
 
 
index e159b39f67a2f4645d953e25264bff4aae5eb3bd..974c3796a23fc70b23e5873e1c5151cca6a7acd4 100644 (file)
@@ -22,6 +22,7 @@
 #define USB_AHBBURST         (MSM_USB_BASE + 0x0090)
 #define USB_AHBMODE          (MSM_USB_BASE + 0x0098)
 #define USB_GENCONFIG_2      (MSM_USB_BASE + 0x00a0)
+#define ULPI_TX_PKT_EN_CLR_FIX BIT(19)
 
 #define USB_CAPLENGTH        (MSM_USB_BASE + 0x0100) /* 8 bit */
 
index cb33fff2ba0be33f3aa6b764e78b402f948e9c4d..305ee8db7faf38617b1c9d71045732a230bc14b1 100644 (file)
@@ -45,9 +45,9 @@
 
 #define USB_PR_DEVICE  0xff            /* Use device's value */
 
- /*
 * Bulk only data structures
 */
+/*
+ * Bulk only data structures
+ */
 
 /* command block wrapper */
 struct bulk_cb_wrap {
@@ -56,18 +56,18 @@ struct bulk_cb_wrap {
        __le32  DataTransferLength;     /* size of data */
        __u8    Flags;                  /* direction in bit 0 */
        __u8    Lun;                    /* LUN normally 0 */
-       __u8    Length;                 /* of of the CDB */
+       __u8    Length;                 /* length of the CDB */
        __u8    CDB[16];                /* max command */
 };
 
 #define US_BULK_CB_WRAP_LEN    31
-#define US_BULK_CB_SIGN                0x43425355      /*spells out USBC */
+#define US_BULK_CB_SIGN                0x43425355      /* spells out 'USBC' */
 #define US_BULK_FLAG_IN                (1 << 7)
 #define US_BULK_FLAG_OUT       0
 
 /* command status wrapper */
 struct bulk_cs_wrap {
-       __le32  Signature;      /* should = 'USBS' */
+       __le32  Signature;      /* contains 'USBS' */
        __u32   Tag;            /* same as original command */
        __le32  Residue;        /* amount not transferred */
        __u8    Status;         /* see below */
index 69e1d4a1f1b3d122c53ec22eb13543219502c881..b39a5f3153bd3f1754d81e1914673a281a14814c 100644 (file)
 
 struct pci_dev;
 
+/**
+ * enum vga_switcheroo_handler_flags_t - handler flags bitmask
+ * @VGA_SWITCHEROO_CAN_SWITCH_DDC: whether the handler is able to switch the
+ *     DDC lines separately. This signals to clients that they should call
+ *     drm_get_edid_switcheroo() to probe the EDID
+ * @VGA_SWITCHEROO_NEEDS_EDP_CONFIG: whether the handler is unable to switch
+ *     the AUX channel separately. This signals to clients that the active
+ *     GPU needs to train the link and communicate the link parameters to the
+ *     inactive GPU (mediated by vga_switcheroo). The inactive GPU may then
+ *     skip the AUX handshake and set up its output with these pre-calibrated
+ *     values (DisplayPort specification v1.1a, section 2.5.3.3)
+ *
+ * Handler flags bitmask. Used by handlers to declare their capabilities upon
+ * registering with vga_switcheroo.
+ */
+enum vga_switcheroo_handler_flags_t {
+       VGA_SWITCHEROO_CAN_SWITCH_DDC   = (1 << 0),
+       VGA_SWITCHEROO_NEEDS_EDP_CONFIG = (1 << 1),
+};
+
 /**
  * enum vga_switcheroo_state - client power state
  * @VGA_SWITCHEROO_OFF: off
@@ -82,6 +102,9 @@ enum vga_switcheroo_client_id {
  *     Mandatory. For muxless machines this should be a no-op. Returning 0
  *     denotes success, anything else failure (in which case the switch is
  *     aborted)
+ * @switch_ddc: switch DDC lines to given client.
+ *     Optional. Should return the previous DDC owner on success or a
+ *     negative int on failure
  * @power_state: cut or reinstate power of given client.
  *     Optional. The return value is ignored
  * @get_client_id: determine if given pci device is integrated or discrete GPU.
@@ -93,6 +116,7 @@ enum vga_switcheroo_client_id {
 struct vga_switcheroo_handler {
        int (*init)(void);
        int (*switchto)(enum vga_switcheroo_client_id id);
+       int (*switch_ddc)(enum vga_switcheroo_client_id id);
        int (*power_state)(enum vga_switcheroo_client_id id,
                           enum vga_switcheroo_state state);
        enum vga_switcheroo_client_id (*get_client_id)(struct pci_dev *pdev);
@@ -132,8 +156,12 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
 void vga_switcheroo_client_fb_set(struct pci_dev *dev,
                                  struct fb_info *info);
 
-int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler);
+int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+                                   enum vga_switcheroo_handler_flags_t handler_flags);
 void vga_switcheroo_unregister_handler(void);
+enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void);
+int vga_switcheroo_lock_ddc(struct pci_dev *pdev);
+int vga_switcheroo_unlock_ddc(struct pci_dev *pdev);
 
 int vga_switcheroo_process_delayed_switch(void);
 
@@ -150,11 +178,15 @@ static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
 static inline int vga_switcheroo_register_client(struct pci_dev *dev,
                const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
 static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
-static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler) { return 0; }
+static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_handler *handler,
+               enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
 static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
        const struct vga_switcheroo_client_ops *ops,
        enum vga_switcheroo_client_id id) { return 0; }
 static inline void vga_switcheroo_unregister_handler(void) {}
+static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
+static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
+static inline int vga_switcheroo_unlock_ddc(struct pci_dev *pdev) { return -ENODEV; }
 static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
 static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
 
index 0e32bc71245ef46b90aa21112e4d2bef42cc5950..ca73c503b92a758ad5ce9b6d022c53c0758951c5 100644 (file)
@@ -311,6 +311,7 @@ enum {
 
        __WQ_DRAINING           = 1 << 16, /* internal: workqueue is draining */
        __WQ_ORDERED            = 1 << 17, /* internal: workqueue is ordered */
+       __WQ_LEGACY             = 1 << 18, /* internal: create*_workqueue() */
 
        WQ_MAX_ACTIVE           = 512,    /* I like 512, better ideas? */
        WQ_MAX_UNBOUND_PER_CPU  = 4,      /* 4 * #cpus for unbound wq */
@@ -411,12 +412,12 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
        alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
 
 #define create_workqueue(name)                                         \
-       alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
+       alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
 #define create_freezable_workqueue(name)                               \
-       alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
-                       1, (name))
+       alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
+                       WQ_MEM_RECLAIM, 1, (name))
 #define create_singlethread_workqueue(name)                            \
-       alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+       alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
 
 extern void destroy_workqueue(struct workqueue_struct *wq);
 
index e5321fda548935e1bab99fbce565dc7a018d52e9..b3edc14e763f020cf4500d0965d118cc572b3569 100644 (file)
 #ifdef __KERNEL__
 
 #include <linux/videodev2.h>
-
-/* Tuner PADs */
-/* FIXME: is this the right place for it? */
-enum tuner_pad_index {
-       TUNER_PAD_RF_INPUT,
-       TUNER_PAD_IF_OUTPUT,
-       TUNER_NUM_PADS
-};
+#include <media/v4l2-mc.h>
 
 #define ADDR_UNSET (255)
 
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
new file mode 100644 (file)
index 0000000..df11519
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * v4l2-mc.h - Media Controller V4L2 types and prototypes
+ *
+ * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/**
+ * enum tuner_pad_index - tuner pad index for MEDIA_ENT_F_TUNER
+ *
+ * @TUNER_PAD_RF_INPUT:        Radiofrequency (RF) sink pad, usually linked to a
+ *                     RF connector entity.
+ * @TUNER_PAD_OUTPUT:  Tuner video output source pad. Contains the video
+ *                     chrominance and luminance or the hole bandwidth
+ *                     of the signal converted to an Intermediate Frequency
+ *                     (IF) or to baseband (on zero-IF tuners).
+ * @TUNER_PAD_AUD_OUT: Tuner audio output source pad. Tuners used to decode
+ *                     analog TV signals have an extra pad for audio output.
+ *                     Old tuners use an analog stage with a saw filter for
+ *                     the audio IF frequency. The output of the pad is, in
+ *                     this case, the audio IF, with should be decoded either
+ *                     by the bridge chipset (that's the case of cx2388x
+ *                     chipsets) or may require an external IF sound
+ *                     processor, like msp34xx. On modern silicon tuners,
+ *                     the audio IF decoder is usually incorporated at the
+ *                     tuner. On such case, the output of this pad is an
+ *                     audio sampled data.
+ * @TUNER_NUM_PADS:    Number of pads of the tuner.
+ */
+enum tuner_pad_index {
+       TUNER_PAD_RF_INPUT,
+       TUNER_PAD_OUTPUT,
+       TUNER_PAD_AUD_OUT,
+       TUNER_NUM_PADS
+};
+
+/**
+ * enum if_vid_dec_index - video IF-PLL pad index for
+ *                        MEDIA_ENT_F_IF_VID_DECODER
+ *
+ * @IF_VID_DEC_PAD_IF_INPUT:   video Intermediate Frequency (IF) sink pad
+ * @IF_VID_DEC_PAD_OUT:                IF-PLL video output source pad. Contains the
+ *                             video chrominance and luminance IF signals.
+ * @IF_VID_DEC_PAD_NUM_PADS:   Number of pads of the video IF-PLL.
+ */
+enum if_vid_dec_pad_index {
+       IF_VID_DEC_PAD_IF_INPUT,
+       IF_VID_DEC_PAD_OUT,
+       IF_VID_DEC_PAD_NUM_PADS
+};
+
+/**
+ * enum if_aud_dec_index - audio/sound IF-PLL pad index for
+ *                        MEDIA_ENT_F_IF_AUD_DECODER
+ *
+ * @IF_AUD_DEC_PAD_IF_INPUT:   audio Intermediate Frequency (IF) sink pad
+ * @IF_AUD_DEC_PAD_OUT:                IF-PLL audio output source pad. Contains the
+ *                             audio sampled stream data, usually connected
+ *                             to the bridge bus via an Inter-IC Sound (I2S)
+ *                             bus.
+ * @IF_AUD_DEC_PAD_NUM_PADS:   Number of pads of the audio IF-PLL.
+ */
+enum if_aud_dec_pad_index {
+       IF_AUD_DEC_PAD_IF_INPUT,
+       IF_AUD_DEC_PAD_OUT,
+       IF_AUD_DEC_PAD_NUM_PADS
+};
+
+/**
+ * enum demod_pad_index - analog TV pad index for MEDIA_ENT_F_ATV_DECODER
+ *
+ * @DEMOD_PAD_IF_INPUT:        IF input sink pad.
+ * @DEMOD_PAD_VID_OUT: Video output source pad.
+ * @DEMOD_PAD_VBI_OUT: Vertical Blank Interface (VBI) output source pad.
+ * @DEMOD_NUM_PADS:    Maximum number of output pads.
+ */
+enum demod_pad_index {
+       DEMOD_PAD_IF_INPUT,
+       DEMOD_PAD_VID_OUT,
+       DEMOD_PAD_VBI_OUT,
+       DEMOD_NUM_PADS
+};
index ef03ae56b1c1cc18d9541697b353a01842b50210..8a0f55b6c2ba80e25c67caf89e2050fa58ae322f 100644 (file)
@@ -533,7 +533,8 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
                const unsigned int requested_sizes[]);
 int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb);
 int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb);
-int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking);
+int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+                  bool nonblocking);
 
 int vb2_core_streamon(struct vb2_queue *q, unsigned int type);
 int vb2_core_streamoff(struct vb2_queue *q, unsigned int type);
index 2a91a0561a478393ca9e9d2f1993467cc4c5c9cb..9b4c418bebd84ae0a7debfbcc697ee92b136ec99 100644 (file)
@@ -6,8 +6,8 @@
 #include <linux/mutex.h>
 #include <net/sock.h>
 
-void unix_inflight(struct file *fp);
-void unix_notinflight(struct file *fp);
+void unix_inflight(struct user_struct *user, struct file *fp);
+void unix_notinflight(struct user_struct *user, struct file *fp);
 void unix_gc(void);
 void wait_for_unix_gc(void);
 struct sock *unix_get_socket(struct file *filp);
index d4f82edb5cffe0528719dc416e8b28b71460e485..dc71473462ac9f60680dad7c7cce8e9878b1a8a5 100644 (file)
@@ -25,6 +25,7 @@
 #ifndef __HCI_CORE_H
 #define __HCI_CORE_H
 
+#include <linux/leds.h>
 #include <net/bluetooth/hci.h>
 #include <net/bluetooth/hci_sock.h>
 
@@ -396,6 +397,8 @@ struct hci_dev {
        struct delayed_work     rpa_expired;
        bdaddr_t                rpa;
 
+       struct led_trigger      *power_led;
+
        int (*open)(struct hci_dev *hdev);
        int (*close)(struct hci_dev *hdev);
        int (*flush)(struct hci_dev *hdev);
index 52899291f40144c0dab97e135b0c9762ade5c8d1..5ee3c689c86370b4b325309d337611c9b51af5d1 100644 (file)
@@ -252,6 +252,12 @@ struct l2cap_conn_rsp {
 #define L2CAP_PSM_3DSP         0x0021
 #define L2CAP_PSM_IPSP         0x0023 /* 6LoWPAN */
 
+#define L2CAP_PSM_DYN_START    0x1001
+#define L2CAP_PSM_DYN_END      0xffff
+#define L2CAP_PSM_AUTO_END     0x10ff
+#define L2CAP_PSM_LE_DYN_START  0x0080
+#define L2CAP_PSM_LE_DYN_END   0x00ff
+
 /* channel identifier */
 #define L2CAP_CID_SIGNALING    0x0001
 #define L2CAP_CID_CONN_LESS    0x0002
index f1fbc3b119623de61becd9c45e44c05211b97ec3..f358ad5e421457b0312f7ffff2f766004ebca663 100644 (file)
@@ -306,5 +306,6 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
                         struct slave *slave);
 int bond_3ad_set_carrier(struct bonding *bond);
 void bond_3ad_update_lacp_rate(struct bonding *bond);
+void bond_3ad_update_ad_actor_settings(struct bonding *bond);
 #endif /* _NET_BOND_3AD_H */
 
index 9bcaaf7cd15ab053a5de9cbf92fd02f0fb0ffaf7..9e1b24c29f0c65ce997d71a566e63125f7a6e248 100644 (file)
@@ -712,6 +712,8 @@ struct cfg80211_acl_data {
  * @p2p_opp_ps: P2P opportunistic PS
  * @acl: ACL configuration used by the drivers which has support for
  *     MAC address based access control
+ * @pbss: If set, start as a PCP instead of AP. Relevant for DMG
+ *     networks.
  */
 struct cfg80211_ap_settings {
        struct cfg80211_chan_def chandef;
@@ -730,6 +732,7 @@ struct cfg80211_ap_settings {
        u8 p2p_ctwindow;
        bool p2p_opp_ps;
        const struct cfg80211_acl_data *acl;
+       bool pbss;
 };
 
 /**
@@ -1888,6 +1891,8 @@ struct cfg80211_ibss_params {
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
  * @vht_capa:  VHT Capability overrides
  * @vht_capa_mask: The bits of vht_capa which are to be used.
+ * @pbss: if set, connect to a PCP instead of AP. Valid for DMG
+ *     networks.
  */
 struct cfg80211_connect_params {
        struct ieee80211_channel *channel;
@@ -1910,6 +1915,7 @@ struct cfg80211_connect_params {
        struct ieee80211_ht_cap ht_capa_mask;
        struct ieee80211_vht_cap vht_capa;
        struct ieee80211_vht_cap vht_capa_mask;
+       bool pbss;
 };
 
 /**
@@ -3489,6 +3495,7 @@ struct cfg80211_cached_keys;
  *     registered for unexpected class 3 frames (AP mode)
  * @conn: (private) cfg80211 software SME connection state machine data
  * @connect_keys: (private) keys to set after connection is established
+ * @conn_bss_type: connecting/connected BSS type
  * @ibss_fixed: (private) IBSS is using fixed BSSID
  * @ibss_dfs_possible: (private) IBSS may change to a DFS channel
  * @event_list: (private) list for internal event processing
@@ -3519,6 +3526,7 @@ struct wireless_dev {
        u8 ssid_len, mesh_id_len, mesh_id_up_len;
        struct cfg80211_conn *conn;
        struct cfg80211_cached_keys *connect_keys;
+       enum ieee80211_bss_type conn_bss_type;
 
        struct list_head event_list;
        spinlock_t event_lock;
index 6816f0fa5693dc275e1e4997375f29b5d99f7b64..30a56ab2ccfb05d7bc9a527ebdc09da19b3d5f7b 100644 (file)
@@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
        return dst && !(dst->flags & DST_METADATA);
 }
 
+static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
+                                      const struct sk_buff *skb_b)
+{
+       const struct metadata_dst *a, *b;
+
+       if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
+               return 0;
+
+       a = (const struct metadata_dst *) skb_dst(skb_a);
+       b = (const struct metadata_dst *) skb_dst(skb_b);
+
+       if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
+               return 1;
+
+       return memcmp(&a->u.tun_info, &b->u.tun_info,
+                     sizeof(a->u.tun_info) + a->u.tun_info.options_len);
+}
+
 struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
 struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
 
index 877f682989b892fb1d70256bda6b33f03e34a0f5..295d291269e2c88ed4930041597a28f7c9a7a2f7 100644 (file)
@@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
 
 void ip6_route_input(struct sk_buff *skb);
 
-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
-                                  struct flowi6 *fl6);
+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
+                                        struct flowi6 *fl6, int flags);
+
+static inline struct dst_entry *ip6_route_output(struct net *net,
+                                                const struct sock *sk,
+                                                struct flowi6 *fl6)
+{
+       return ip6_route_output_flags(net, sk, fl6, 0);
+}
+
 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
                                   int flags);
 
index 6db96ea0144f0445c4e52c51cddcc3d25d60ccdd..dda9abf6b89c14afddf71562c763bdd6bb7d85c3 100644 (file)
@@ -230,6 +230,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
 int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
                    u8 *protocol, struct flowi4 *fl4);
+int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
 int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
 
 struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
index 8f81bbbc38fc939070a5761e3af90da62faf8d68..e0f4109e64c6fca9ba87d768c2c7b1220a6557f4 100644 (file)
@@ -439,6 +439,12 @@ int dev_get_wireless_info(char *buffer, char **start, off_t offset, int length);
 /* Send a single event to user space */
 void wireless_send_event(struct net_device *dev, unsigned int cmd,
                         union iwreq_data *wrqu, const char *extra);
+#ifdef CONFIG_WEXT_CORE
+/* flush all previous wext events - if work is done from netdev notifiers */
+void wireless_nlevent_flush(void);
+#else
+static inline void wireless_nlevent_flush(void) {}
+#endif
 
 /* We may need a function to send a stream of events to user space.
  * More on that later... */
index 7c30faff245f262a59831132f84f5cb37c2cc8a5..57147749ae423eee59083c61ea60b159078e2ac5 100644 (file)
@@ -5,7 +5,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2015 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -298,6 +298,7 @@ struct ieee80211_vif_chanctx_switch {
  *     note that this is only called when it changes after the channel
  *     context had been assigned.
  * @BSS_CHANGED_OCB: OCB join status changed
+ * @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed
  */
 enum ieee80211_bss_change {
        BSS_CHANGED_ASSOC               = 1<<0,
@@ -323,6 +324,7 @@ enum ieee80211_bss_change {
        BSS_CHANGED_BEACON_INFO         = 1<<20,
        BSS_CHANGED_BANDWIDTH           = 1<<21,
        BSS_CHANGED_OCB                 = 1<<22,
+       BSS_CHANGED_MU_GROUPS           = 1<<23,
 
        /* when adding here, make sure to change ieee80211_reconfig */
 };
@@ -435,6 +437,19 @@ struct ieee80211_event {
        } u;
 };
 
+/**
+ * struct ieee80211_mu_group_data - STA's VHT MU-MIMO group data
+ *
+ * This structure describes the group id data of VHT MU-MIMO
+ *
+ * @membership: 64 bits array - a bit is set if station is member of the group
+ * @position: 2 bits per group id indicating the position in the group
+ */
+struct ieee80211_mu_group_data {
+       u8 membership[WLAN_MEMBERSHIP_LEN];
+       u8 position[WLAN_USER_POSITION_LEN];
+};
+
 /**
  * struct ieee80211_bss_conf - holds the BSS's changing parameters
  *
@@ -477,6 +492,7 @@ struct ieee80211_event {
  * @enable_beacon: whether beaconing should be enabled or not
  * @chandef: Channel definition for this BSS -- the hardware might be
  *     configured a higher bandwidth than this BSS uses, for example.
+ * @mu_group: VHT MU-MIMO group membership data
  * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
  *     This field is only valid when the channel is a wide HT/VHT channel.
  *     Note that with TDLS this can be the case (channel is HT, protection must
@@ -535,6 +551,7 @@ struct ieee80211_bss_conf {
        s32 cqm_rssi_thold;
        u32 cqm_rssi_hyst;
        struct cfg80211_chan_def chandef;
+       struct ieee80211_mu_group_data mu_group;
        __be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
        int arp_addr_cnt;
        bool qos;
@@ -691,12 +708,14 @@ enum mac80211_tx_info_flags {
  *     protocol frame (e.g. EAP)
  * @IEEE80211_TX_CTRL_PS_RESPONSE: This frame is a response to a poll
  *     frame (PS-Poll or uAPSD).
+ * @IEEE80211_TX_CTRL_RATE_INJECT: This frame is injected with rate information
  *
  * These flags are used in tx_info->control.flags.
  */
 enum mac80211_tx_control_flags {
        IEEE80211_TX_CTRL_PORT_CTRL_PROTO       = BIT(0),
        IEEE80211_TX_CTRL_PS_RESPONSE           = BIT(1),
+       IEEE80211_TX_CTRL_RATE_INJECT           = BIT(2),
 };
 
 /*
@@ -993,6 +1012,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_MACTIME_END: The timestamp passed in the RX status (@mactime
  *     field) is valid and contains the time the last symbol of the MPDU
  *     (including FCS) was received.
+ * @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime
+ *     field) is valid and contains the time the SYNC preamble was received.
  * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
  * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
  * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
@@ -1014,6 +1035,14 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
  * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
  *     is stored in the @ampdu_delimiter_crc field)
  * @RX_FLAG_LDPC: LDPC was used
+ * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
+ *     processing it in any regular way.
+ *     This is useful if drivers offload some frames but still want to report
+ *     them for sniffing purposes.
+ * @RX_FLAG_SKIP_MONITOR: Process and report frame to all interfaces except
+ *     monitor interfaces.
+ *     This is useful if drivers offload some frames but still want to report
+ *     them for sniffing purposes.
  * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
  * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
  * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
@@ -1033,6 +1062,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
 enum mac80211_rx_flags {
        RX_FLAG_MMIC_ERROR              = BIT(0),
        RX_FLAG_DECRYPTED               = BIT(1),
+       RX_FLAG_MACTIME_PLCP_START      = BIT(2),
        RX_FLAG_MMIC_STRIPPED           = BIT(3),
        RX_FLAG_IV_STRIPPED             = BIT(4),
        RX_FLAG_FAILED_FCS_CRC          = BIT(5),
@@ -1046,7 +1076,7 @@ enum mac80211_rx_flags {
        RX_FLAG_HT_GF                   = BIT(13),
        RX_FLAG_AMPDU_DETAILS           = BIT(14),
        RX_FLAG_PN_VALIDATED            = BIT(15),
-       /* bit 16 free */
+       RX_FLAG_DUP_VALIDATED           = BIT(16),
        RX_FLAG_AMPDU_LAST_KNOWN        = BIT(17),
        RX_FLAG_AMPDU_IS_LAST           = BIT(18),
        RX_FLAG_AMPDU_DELIM_CRC_ERROR   = BIT(19),
@@ -1054,6 +1084,8 @@ enum mac80211_rx_flags {
        RX_FLAG_MACTIME_END             = BIT(21),
        RX_FLAG_VHT                     = BIT(22),
        RX_FLAG_LDPC                    = BIT(23),
+       RX_FLAG_ONLY_MONITOR            = BIT(24),
+       RX_FLAG_SKIP_MONITOR            = BIT(25),
        RX_FLAG_STBC_MASK               = BIT(26) | BIT(27),
        RX_FLAG_10MHZ                   = BIT(28),
        RX_FLAG_5MHZ                    = BIT(29),
@@ -1072,6 +1104,7 @@ enum mac80211_rx_flags {
  * @RX_VHT_FLAG_160MHZ: 160 MHz was used
  * @RX_VHT_FLAG_BF: packet was beamformed
  */
+
 enum mac80211_rx_vht_flags {
        RX_VHT_FLAG_80MHZ               = BIT(0),
        RX_VHT_FLAG_160MHZ              = BIT(1),
@@ -1091,6 +1124,8 @@ enum mac80211_rx_vht_flags {
  *     it but can store it and pass it back to the driver for synchronisation
  * @band: the active band when this frame was received
  * @freq: frequency the radio was tuned to when receiving this frame, in MHz
+ *     This field must be set for management frames, but isn't strictly needed
+ *     for data (other) frames - for those it only affects radiotap reporting.
  * @signal: signal strength when receiving this frame, either in dBm, in dB or
  *     unspecified depending on the hardware capabilities flags
  *     @IEEE80211_HW_SIGNAL_*
@@ -1684,6 +1719,18 @@ struct ieee80211_sta_rates {
  * @tdls_initiator: indicates the STA is an initiator of the TDLS link. Only
  *     valid if the STA is a TDLS peer in the first place.
  * @mfp: indicates whether the STA uses management frame protection or not.
+ * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
+ *     A-MSDU. Taken from the Extended Capabilities element. 0 means
+ *     unlimited.
+ * @max_amsdu_len: indicates the maximal length of an A-MSDU in bytes. This
+ *     field is always valid for packets with a VHT preamble. For packets
+ *     with a HT preamble, additional limits apply:
+ *             + If the skb is transmitted as part of a BA agreement, the
+ *               A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
+ *             + If the skb is not part of a BA aggreement, the A-MSDU maximal
+ *               size is min(max_amsdu_len, 7935) bytes.
+ *     Both additional HT limits must be enforced by the low level driver.
+ *     This is defined by the spec (IEEE 802.11-2012 section 8.3.2.2 NOTE 2).
  * @txq: per-TID data TX queues (if driver uses the TXQ abstraction)
  */
 struct ieee80211_sta {
@@ -1702,6 +1749,8 @@ struct ieee80211_sta {
        bool tdls;
        bool tdls_initiator;
        bool mfp;
+       u8 max_amsdu_subframes;
+       u16 max_amsdu_len;
 
        struct ieee80211_txq *txq[IEEE80211_NUM_TIDS];
 
@@ -1910,6 +1959,11 @@ struct ieee80211_txq {
  *     by just its MAC address; this prevents, for example, the same station
  *     from connecting to two virtual AP interfaces at the same time.
  *
+ * @IEEE80211_HW_SUPPORTS_REORDERING_BUFFER: Hardware (or driver) manages the
+ *     reordering buffer internally, guaranteeing mac80211 receives frames in
+ *     order and does not need to manage its own reorder buffer or BA session
+ *     timeout.
+ *
  * @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
  */
 enum ieee80211_hw_flags {
@@ -1946,6 +2000,7 @@ enum ieee80211_hw_flags {
        IEEE80211_HW_SUPPORTS_AMSDU_IN_AMPDU,
        IEEE80211_HW_BEACON_TX_STATUS,
        IEEE80211_HW_NEEDS_UNIQUE_STA_ADDR,
+       IEEE80211_HW_SUPPORTS_REORDERING_BUFFER,
 
        /* keep last, obviously */
        NUM_IEEE80211_HW_FLAGS
@@ -2167,7 +2222,7 @@ static inline void SET_IEEE80211_DEV(struct ieee80211_hw *hw, struct device *dev
  * @hw: the &struct ieee80211_hw to set the MAC address for
  * @addr: the address to set
  */
-static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, u8 *addr)
+static inline void SET_IEEE80211_PERM_ADDR(struct ieee80211_hw *hw, const u8 *addr)
 {
        memcpy(hw->wiphy->perm_addr, addr, ETH_ALEN);
 }
@@ -2683,6 +2738,33 @@ enum ieee80211_ampdu_mlme_action {
        IEEE80211_AMPDU_TX_OPERATIONAL,
 };
 
+/**
+ * struct ieee80211_ampdu_params - AMPDU action parameters
+ *
+ * @action: the ampdu action, value from %ieee80211_ampdu_mlme_action.
+ * @sta: peer of this AMPDU session
+ * @tid: tid of the BA session
+ * @ssn: start sequence number of the session. TX/RX_STOP can pass 0. When
+ *     action is set to %IEEE80211_AMPDU_RX_START the driver passes back the
+ *     actual ssn value used to start the session and writes the value here.
+ * @buf_size: reorder buffer size  (number of subframes). Valid only when the
+ *     action is set to %IEEE80211_AMPDU_RX_START or
+ *     %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @amsdu: indicates the peer's ability to receive A-MSDU within A-MPDU.
+ *     valid when the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL
+ * @timeout: BA session timeout. Valid only when the action is set to
+ *     %IEEE80211_AMPDU_RX_START
+ */
+struct ieee80211_ampdu_params {
+       enum ieee80211_ampdu_mlme_action action;
+       struct ieee80211_sta *sta;
+       u16 tid;
+       u16 ssn;
+       u8 buf_size;
+       bool amsdu;
+       u16 timeout;
+};
+
 /**
  * enum ieee80211_frame_release_type - frame release reason
  * @IEEE80211_FRAME_RELEASE_PSPOLL: frame released for PS-Poll
@@ -3027,13 +3109,9 @@ enum ieee80211_reconfig_type {
  * @ampdu_action: Perform a certain A-MPDU action
  *     The RA/TID combination determines the destination and TID we want
  *     the ampdu action to be performed for. The action is defined through
- *     ieee80211_ampdu_mlme_action. Starting sequence number (@ssn)
- *     is the first frame we expect to perform the action on. Notice
- *     that TX/RX_STOP can pass NULL for this parameter.
- *     The @buf_size parameter is only valid when the action is set to
- *     %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's reorder
- *     buffer size (number of subframes) for this session -- the driver
- *     may neither send aggregates containing more subframes than this
+ *     ieee80211_ampdu_mlme_action.
+ *     When the action is set to %IEEE80211_AMPDU_TX_OPERATIONAL the driver
+ *     may neither send aggregates containing more subframes than @buf_size
  *     nor send aggregates in a way that lost frames would exceed the
  *     buffer size. If just limiting the aggregate size, this would be
  *     possible with a buf_size of 8:
@@ -3044,9 +3122,6 @@ enum ieee80211_reconfig_type {
  *     buffer size of 8. Correct ways to retransmit #1 would be:
  *      - TX:       1 or 18 or 81
  *     Even "189" would be wrong since 1 could be lost again.
- *     The @amsdu parameter is valid when the action is set to
- *     %IEEE80211_AMPDU_TX_OPERATIONAL and indicates the peer's ability
- *     to receive A-MSDU within A-MPDU.
  *
  *     Returns a negative error code on failure.
  *     The callback can sleep.
@@ -3388,9 +3463,7 @@ struct ieee80211_ops {
        int (*tx_last_beacon)(struct ieee80211_hw *hw);
        int (*ampdu_action)(struct ieee80211_hw *hw,
                            struct ieee80211_vif *vif,
-                           enum ieee80211_ampdu_mlme_action action,
-                           struct ieee80211_sta *sta, u16 tid, u16 *ssn,
-                           u8 buf_size, bool amsdu);
+                           struct ieee80211_ampdu_params *params);
        int (*get_survey)(struct ieee80211_hw *hw, int idx,
                struct survey_info *survey);
        void (*rfkill_poll)(struct ieee80211_hw *hw);
@@ -5120,6 +5193,24 @@ void ieee80211_remain_on_channel_expired(struct ieee80211_hw *hw);
 void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap,
                                  const u8 *addr);
 
+/**
+ * ieee80211_mark_rx_ba_filtered_frames - move RX BA window and mark filtered
+ * @pubsta: station struct
+ * @tid: the session's TID
+ * @ssn: starting sequence number of the bitmap, all frames before this are
+ *     assumed to be out of the window after the call
+ * @filtered: bitmap of filtered frames, BIT(0) is the @ssn entry etc.
+ * @received_mpdus: number of received mpdus in firmware
+ *
+ * This function moves the BA window and releases all frames before @ssn, and
+ * marks frames marked in the bitmap as having been filtered. Afterwards, it
+ * checks if any frames in the window starting from @ssn can now be released
+ * (in case they were only waiting for frames that were filtered.)
+ */
+void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+                                         u16 ssn, u64 filtered,
+                                         u16 received_mpdus);
+
 /**
  * ieee80211_send_bar - send a BlockAckReq frame
  *
@@ -5523,4 +5614,19 @@ void ieee80211_unreserve_tid(struct ieee80211_sta *sta, u8 tid);
  */
 struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
                                     struct ieee80211_txq *txq);
+
+/**
+ * ieee80211_txq_get_depth - get pending frame/byte count of given txq
+ *
+ * The values are not guaranteed to be coherent with regard to each other, i.e.
+ * txq state can change half-way of this function and the caller may end up
+ * with "new" frame_cnt and "old" byte_cnt or vice-versa.
+ *
+ * @txq: pointer obtained from station or virtual interface
+ * @frame_cnt: pointer to store frame count
+ * @byte_cnt: pointer to store byte count
+ */
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+                            unsigned long *frame_cnt,
+                            unsigned long *byte_cnt);
 #endif /* MAC80211_H */
index 788ef58a66b9b78807d164bce85060cc760ee336..62e17d1319ff7423dbcf176815f04cecfcdf3371 100644 (file)
@@ -79,12 +79,10 @@ print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
             const struct nf_conntrack_l3proto *l3proto,
             const struct nf_conntrack_l4proto *proto);
 
-#ifdef CONFIG_LOCKDEP
-# define CONNTRACK_LOCKS 8
-#else
-# define CONNTRACK_LOCKS 1024
-#endif
+#define CONNTRACK_LOCKS 1024
+
 extern spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];
+void nf_conntrack_lock(spinlock_t *lock);
 
 extern spinlock_t nf_conntrack_expect_lock;
 
index 2b7907a3556877ec5311b7ee541e25f064d06644..4d6ec3f6fafe0d9b44b0383808d06f25e8e599db 100644 (file)
@@ -98,6 +98,16 @@ struct netns_ipv4 {
        int sysctl_tcp_keepalive_probes;
        int sysctl_tcp_keepalive_intvl;
 
+       int sysctl_tcp_syn_retries;
+       int sysctl_tcp_synack_retries;
+       int sysctl_tcp_syncookies;
+       int sysctl_tcp_reordering;
+       int sysctl_tcp_retries1;
+       int sysctl_tcp_retries2;
+       int sysctl_tcp_orphan_retries;
+       int sysctl_tcp_fin_timeout;
+       unsigned int sysctl_tcp_notsent_lowat;
+
        struct ping_group_range ping_group_range;
 
        atomic_t dev_addr_genid;
index 262532d111f51e3a91a06af785b4721e8fab9d56..59fa93c01d2a16a129298498f1d56556b895acd9 100644 (file)
@@ -21,6 +21,7 @@ struct scm_creds {
 struct scm_fp_list {
        short                   count;
        short                   max;
+       struct user_struct      *user;
        struct file             *fp[SCM_MAX_FD];
 };
 
index f2d58aa37a6fef8438d0ea0ea1ebce870a2a31bb..9b9fb122b31f6b78884a0392081a3afceb49337d 100644 (file)
 #define __sctp_auth_h__
 
 #include <linux/list.h>
-#include <linux/crypto.h>
 
 struct sctp_endpoint;
 struct sctp_association;
 struct sctp_authkey;
 struct sctp_hmacalgo;
+struct crypto_shash;
 
 /*
  * Define a generic struct that will hold all the info
@@ -90,7 +90,7 @@ int sctp_auth_asoc_copy_shkeys(const struct sctp_endpoint *ep,
                                struct sctp_association *asoc,
                                gfp_t gfp);
 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
-void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[]);
+void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[]);
 struct sctp_hmac *sctp_auth_get_hmac(__u16 hmac_id);
 struct sctp_hmac *sctp_auth_asoc_get_hmac(const struct sctp_association *asoc);
 void sctp_auth_asoc_set_default_hmac(struct sctp_association *asoc,
index 20e72129be1ce0063eeafcbaadcee1f37e0c614c..5a57409da37bba7aaa652ec860044db4061ba4a4 100644 (file)
@@ -82,7 +82,7 @@ struct sctp_bind_addr;
 struct sctp_ulpq;
 struct sctp_ep_common;
 struct sctp_ssnmap;
-struct crypto_hash;
+struct crypto_shash;
 
 
 #include <net/sctp/tsnmap.h>
@@ -166,7 +166,7 @@ struct sctp_sock {
        struct sctp_pf *pf;
 
        /* Access to HMAC transform. */
-       struct crypto_hash *hmac;
+       struct crypto_shash *hmac;
        char *sctp_hmac_alg;
 
        /* What is our base endpointer? */
@@ -756,7 +756,6 @@ struct sctp_transport {
 
        /* Reference counting. */
        atomic_t refcnt;
-       __u32    dead:1,
                /* RTO-Pending : A flag used to track if one of the DATA
                 *              chunks sent to this address is currently being
                 *              used to compute a RTT. If this flag is 0,
@@ -766,7 +765,7 @@ struct sctp_transport {
                 *              calculation completes (i.e. the DATA chunk
                 *              is SACK'd) clear this flag.
                 */
-                rto_pending:1,
+       __u32   rto_pending:1,
 
                /*
                 * hb_sent : a flag that signals that we have a pending
@@ -955,7 +954,7 @@ void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
 void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
 void sctp_transport_free(struct sctp_transport *);
 void sctp_transport_reset_timers(struct sctp_transport *);
-void sctp_transport_hold(struct sctp_transport *);
+int sctp_transport_hold(struct sctp_transport *);
 void sctp_transport_put(struct sctp_transport *);
 void sctp_transport_update_rto(struct sctp_transport *, __u32);
 void sctp_transport_raise_cwnd(struct sctp_transport *, __u32, __u32);
@@ -1235,7 +1234,7 @@ struct sctp_endpoint {
        /* SCTP AUTH: array of the HMACs that will be allocated
         * we need this per association so that we don't serialize
         */
-       struct crypto_hash **auth_hmacs;
+       struct crypto_shash **auth_hmacs;
 
        /* SCTP-AUTH: hmacs for the endpoint encoded into parameter */
         struct sctp_hmac_algo_param *auth_hmacs_list;
index b9e7b3d863a09db498e00004da4da8a781e90b9f..f5ea148853e2f76257aca80a4aef65a30d587538 100644 (file)
@@ -1035,18 +1035,6 @@ struct proto {
        struct list_head        node;
 #ifdef SOCK_REFCNT_DEBUG
        atomic_t                socks;
-#endif
-#ifdef CONFIG_MEMCG_KMEM
-       /*
-        * cgroup specific init/deinit functions. Called once for all
-        * protocols that implement it, from cgroups populate function.
-        * This function has to setup any files the protocol want to
-        * appear in the kmem cgroup filesystem.
-        */
-       int                     (*init_cgroup)(struct mem_cgroup *memcg,
-                                              struct cgroup_subsys *ss);
-       void                    (*destroy_cgroup)(struct mem_cgroup *memcg);
-       struct cg_proto         *(*proto_cgroup)(struct mem_cgroup *memcg);
 #endif
        int                     (*diag_destroy)(struct sock *sk, int err);
 };
index 7dda3d7adba8e905b3d14db7f3361d9a96493b46..aecd30308d500b53a1b46902f2b3d254bcbc39b6 100644 (file)
@@ -16,7 +16,7 @@ struct sock_reuseport {
 };
 
 extern int reuseport_alloc(struct sock *sk);
-extern int reuseport_add_sock(struct sock *sk, const struct sock *sk2);
+extern int reuseport_add_sock(struct sock *sk, struct sock *sk2);
 extern void reuseport_detach_sock(struct sock *sk);
 extern struct sock *reuseport_select_sock(struct sock *sk,
                                          u32 hash,
index 8ea19977ea530bffb57bebed734b977c95b603c5..cb8d30c20ef3efa694540d0bf92fd3e04369b34f 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/cache.h>
 #include <linux/percpu.h>
 #include <linux/skbuff.h>
-#include <linux/crypto.h>
 #include <linux/cryptohash.h>
 #include <linux/kref.h>
 #include <linux/ktime.h>
@@ -216,7 +215,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
 /* TCP thin-stream limits */
 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
 
-/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
+/* TCP initial congestion window as per rfc6928 */
 #define TCP_INIT_CWND          10
 
 /* Bit Flags for sysctl_tcp_fastopen */
@@ -239,13 +238,6 @@ extern struct inet_timewait_death_row tcp_death_row;
 extern int sysctl_tcp_timestamps;
 extern int sysctl_tcp_window_scaling;
 extern int sysctl_tcp_sack;
-extern int sysctl_tcp_fin_timeout;
-extern int sysctl_tcp_syn_retries;
-extern int sysctl_tcp_synack_retries;
-extern int sysctl_tcp_retries1;
-extern int sysctl_tcp_retries2;
-extern int sysctl_tcp_orphan_retries;
-extern int sysctl_tcp_syncookies;
 extern int sysctl_tcp_fastopen;
 extern int sysctl_tcp_retrans_collapse;
 extern int sysctl_tcp_stdurg;
@@ -274,7 +266,6 @@ extern int sysctl_tcp_thin_dupack;
 extern int sysctl_tcp_early_retrans;
 extern int sysctl_tcp_limit_output_bytes;
 extern int sysctl_tcp_challenge_ack_limit;
-extern unsigned int sysctl_tcp_notsent_lowat;
 extern int sysctl_tcp_min_tso_segs;
 extern int sysctl_tcp_min_rtt_wlen;
 extern int sysctl_tcp_autocorking;
@@ -447,7 +438,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
 
 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
 void tcp_v4_mtu_reduced(struct sock *sk);
-void tcp_req_err(struct sock *sk, u32 seq);
+void tcp_req_err(struct sock *sk, u32 seq, bool abort);
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_create_openreq_child(const struct sock *sk,
                                      struct request_sock *req,
@@ -568,6 +559,7 @@ void tcp_rearm_rto(struct sock *sk);
 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
 void tcp_reset(struct sock *sk);
 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
+void tcp_fin(struct sock *sk);
 
 /* tcp_timer.c */
 void tcp_init_xmit_timers(struct sock *);
@@ -963,9 +955,11 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
  */
 static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
 {
+       struct net *net = sock_net((struct sock *)tp);
+
        tp->do_early_retrans = sysctl_tcp_early_retrans &&
                sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
-               sysctl_tcp_reordering == 3;
+               net->ipv4.sysctl_tcp_reordering == 3;
 }
 
 static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
@@ -1252,7 +1246,7 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
 
 static inline int tcp_fin_time(const struct sock *sk)
 {
-       int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
+       int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
        const int rto = inet_csk(sk)->icsk_rto;
 
        if (fin_timeout < (rto << 2) - (rto >> 1))
@@ -1325,9 +1319,6 @@ static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
        tp->retransmit_skb_hint = NULL;
 }
 
-/* MD5 Signature */
-struct crypto_hash;
-
 union tcp_md5_addr {
        struct in_addr  a4;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -1376,7 +1367,7 @@ union tcp_md5sum_block {
 
 /* - pool: digest algorithm, hash description and scratch buffer */
 struct tcp_md5sig_pool {
-       struct hash_desc        md5_desc;
+       struct ahash_request    *md5_req;
        union tcp_md5sum_block  md5_blk;
 };
 
@@ -1437,6 +1428,7 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
 
 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
+void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
                              struct request_sock *req,
                              struct tcp_fastopen_cookie *foc,
@@ -1685,7 +1677,8 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
 
 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
 {
-       return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
+       struct net *net = sock_net((struct sock *)tp);
+       return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
 }
 
 static inline bool tcp_stream_memory_free(const struct sock *sk)
index 0fb86442544b26627fb5871f26d1ede748f9d6d9..25bd919c9ef0c9fcd91e62884781113d6b7c2d30 100644 (file)
@@ -9,17 +9,71 @@
 #include <linux/udp.h>
 #include <net/dst_metadata.h>
 
+/* VXLAN protocol (RFC 7348) header:
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|R|R|R|               Reserved                        |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |                VXLAN Network Identifier (VNI) |   Reserved    |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * I = VXLAN Network Identifier (VNI) present.
+ */
+struct vxlanhdr {
+       __be32 vx_flags;
+       __be32 vx_vni;
+};
+
+/* VXLAN header flags. */
+#define VXLAN_HF_VNI BIT(27)
+
+#define VXLAN_N_VID     (1u << 24)
+#define VXLAN_VID_MASK  (VXLAN_N_VID - 1)
+#define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+
 #define VNI_HASH_BITS  10
 #define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
+#define FDB_HASH_BITS  8
+#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
+
+/* Remote checksum offload for VXLAN (VXLAN_F_REMCSUM_[RT]X):
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |R|R|R|R|I|R|R|R|R|R|C|              Reserved                   |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |           VXLAN Network Identifier (VNI)      |O| Csum start  |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * C = Remote checksum offload bit. When set indicates that the
+ *     remote checksum offload data is present.
+ *
+ * O = Offset bit. Indicates the checksum offset relative to
+ *     checksum start.
+ *
+ * Csum start = Checksum start divided by two.
+ *
+ * http://tools.ietf.org/html/draft-herbert-vxlan-rco
+ */
+
+/* VXLAN-RCO header flags. */
+#define VXLAN_HF_RCO BIT(21)
+
+/* Remote checksum offload header option */
+#define VXLAN_RCO_MASK  0x7f    /* Last byte of vni field */
+#define VXLAN_RCO_UDP   0x80    /* Indicate UDP RCO (TCP when not set *) */
+#define VXLAN_RCO_SHIFT 1       /* Left shift of start */
+#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
+#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT)
 
 /*
- * VXLAN Group Based Policy Extension:
+ * VXLAN Group Based Policy Extension (VXLAN_F_GBP):
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |1|-|-|-|1|-|-|-|R|D|R|R|A|R|R|R|        Group Policy ID        |
+ * |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R|        Group Policy ID        |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  * |                VXLAN Network Identifier (VNI) |   Reserved    |
  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
  *
+ * G = Group Policy ID present.
+ *
  * D = Don't Learn bit. When set, this bit indicates that the egress
  *     VTEP MUST NOT learn the source address of the encapsulated frame.
  *
  *     this packet. Policies MUST NOT be applied by devices when the
  *     A bit is set.
  *
- * [0] https://tools.ietf.org/html/draft-smith-vxlan-group-policy
+ * https://tools.ietf.org/html/draft-smith-vxlan-group-policy
  */
 struct vxlanhdr_gbp {
-       __u8    vx_flags;
+       u8      vx_flags;
 #ifdef __LITTLE_ENDIAN_BITFIELD
-       __u8    reserved_flags1:3,
+       u8      reserved_flags1:3,
                policy_applied:1,
                reserved_flags2:2,
                dont_learn:1,
                reserved_flags3:1;
 #elif defined(__BIG_ENDIAN_BITFIELD)
-       __u8    reserved_flags1:1,
+       u8      reserved_flags1:1,
                dont_learn:1,
                reserved_flags2:2,
                policy_applied:1,
@@ -50,6 +104,9 @@ struct vxlanhdr_gbp {
        __be32  vx_vni;
 };
 
+/* VXLAN-GBP header flags. */
+#define VXLAN_HF_GBP BIT(31)
+
 #define VXLAN_GBP_USED_BITS (VXLAN_HF_GBP | 0xFFFFFF)
 
 /* skb->mark mapping
@@ -62,44 +119,6 @@ struct vxlanhdr_gbp {
 #define VXLAN_GBP_POLICY_APPLIED       (BIT(3) << 16)
 #define VXLAN_GBP_ID_MASK              (0xFFFF)
 
-/* VXLAN protocol header:
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |G|R|R|R|I|R|R|C|               Reserved                        |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * |                VXLAN Network Identifier (VNI) |   Reserved    |
- * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- *
- * G = 1       Group Policy (VXLAN-GBP)
- * I = 1       VXLAN Network Identifier (VNI) present
- * C = 1       Remote checksum offload (RCO)
- */
-struct vxlanhdr {
-       __be32 vx_flags;
-       __be32 vx_vni;
-};
-
-/* VXLAN header flags. */
-#define VXLAN_HF_RCO BIT(21)
-#define VXLAN_HF_VNI BIT(27)
-#define VXLAN_HF_GBP BIT(31)
-
-/* Remote checksum offload header option */
-#define VXLAN_RCO_MASK  0x7f    /* Last byte of vni field */
-#define VXLAN_RCO_UDP   0x80    /* Indicate UDP RCO (TCP when not set *) */
-#define VXLAN_RCO_SHIFT 1       /* Left shift of start */
-#define VXLAN_RCO_SHIFT_MASK ((1 << VXLAN_RCO_SHIFT) - 1)
-#define VXLAN_MAX_REMCSUM_START (VXLAN_RCO_MASK << VXLAN_RCO_SHIFT)
-
-#define VXLAN_N_VID     (1u << 24)
-#define VXLAN_VID_MASK  (VXLAN_N_VID - 1)
-#define VXLAN_VNI_MASK  (VXLAN_VID_MASK << 8)
-#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
-
-#define VNI_HASH_BITS  10
-#define VNI_HASH_SIZE  (1<<VNI_HASH_BITS)
-#define FDB_HASH_BITS  8
-#define FDB_HASH_SIZE  (1<<FDB_HASH_BITS)
-
 struct vxlan_metadata {
        u32             gbp;
 };
@@ -138,10 +157,10 @@ struct vxlan_config {
        int                     remote_ifindex;
        int                     mtu;
        __be16                  dst_port;
-       __u16                   port_min;
-       __u16                   port_max;
-       __u8                    tos;
-       __u8                    ttl;
+       u16                     port_min;
+       u16                     port_max;
+       u8                      tos;
+       u8                      ttl;
        u32                     flags;
        unsigned long           age_interval;
        unsigned int            addrmax;
index 2a7aa75dd00926f6438197c5605553abcda8d4f3..30520d5ee3d14a576eb39ab852b1421d49a4c5b1 100644 (file)
@@ -26,7 +26,7 @@
 struct iscsi_tcp_conn;
 struct iscsi_segment;
 struct sk_buff;
-struct hash_desc;
+struct ahash_request;
 
 typedef int iscsi_segment_done_fn_t(struct iscsi_tcp_conn *,
                                    struct iscsi_segment *);
@@ -38,7 +38,7 @@ struct iscsi_segment {
        unsigned int            total_size;
        unsigned int            total_copied;
 
-       struct hash_desc        *hash;
+       struct ahash_request    *hash;
        unsigned char           padbuf[ISCSI_PAD_LEN];
        unsigned char           recv_digest[ISCSI_DIGEST_SIZE];
        unsigned char           digest[ISCSI_DIGEST_SIZE];
@@ -73,7 +73,7 @@ struct iscsi_tcp_conn {
        /* control data */
        struct iscsi_tcp_recv   in;             /* TCP receive context */
        /* CRC32C (Rx) LLD should set this is they do not offload */
-       struct hash_desc        *rx_hash;
+       struct ahash_request    *rx_hash;
 };
 
 struct iscsi_tcp_task {
@@ -111,15 +111,16 @@ extern void iscsi_tcp_segment_unmap(struct iscsi_segment *segment);
 extern void iscsi_segment_init_linear(struct iscsi_segment *segment,
                                      void *data, size_t size,
                                      iscsi_segment_done_fn_t *done,
-                                     struct hash_desc *hash);
+                                     struct ahash_request *hash);
 extern int
 iscsi_segment_seek_sg(struct iscsi_segment *segment,
                      struct scatterlist *sg_list, unsigned int sg_count,
                      unsigned int offset, size_t size,
-                     iscsi_segment_done_fn_t *done, struct hash_desc *hash);
+                     iscsi_segment_done_fn_t *done,
+                     struct ahash_request *hash);
 
 /* digest helpers */
-extern void iscsi_tcp_dgst_header(struct hash_desc *hash, const void *hdr,
+extern void iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
                                  size_t hdrlen,
                                  unsigned char digest[ISCSI_DIGEST_SIZE]);
 extern struct iscsi_cls_conn *
index b0be09279943fc1a911a6071507ebdd79eed2912..af1fb37c6b265cf20de58dee1e0d7490669a9bf1 100644 (file)
@@ -1093,6 +1093,8 @@ unsigned int snd_pcm_rate_to_rate_bit(unsigned int rate);
 unsigned int snd_pcm_rate_bit_to_rate(unsigned int rate_bit);
 unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
                                         unsigned int rates_b);
+unsigned int snd_pcm_rate_range_to_bits(unsigned int rate_min,
+                                       unsigned int rate_max);
 
 /**
  * snd_pcm_set_runtime_buffer - Set the PCM runtime buffer
index fdabbb4ddba92f801e2952fafaeb997c7fa03a33..f730b91e472f19f97b15ec8982443fe099dd5db4 100644 (file)
@@ -167,6 +167,10 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
 int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count);
 int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
                         unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+                             unsigned char *buffer, int count);
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream,
+                              int count);
 
 /* main midi functions */
 
index 373d3342002bfefdc9911f7b3f5ac57af1826f9b..c3371fa548cb9b3b30c563e89de65e9fce7fb585 100644 (file)
@@ -570,8 +570,8 @@ struct iscsi_conn {
        spinlock_t              response_queue_lock;
        spinlock_t              state_lock;
        /* libcrypto RX and TX contexts for crc32c */
-       struct hash_desc        conn_rx_hash;
-       struct hash_desc        conn_tx_hash;
+       struct ahash_request    *conn_rx_hash;
+       struct ahash_request    *conn_tx_hash;
        /* Used for scheduling TX and RX connection kthreads */
        cpumask_var_t           conn_cpumask;
        unsigned int            conn_rx_reset_cpumask:1;
index 98feb1b82896504bfc0a40157c84a22ca137e259..d6dfa05ba322c9465d3027c0c992a97eff6a504e 100644 (file)
@@ -17,7 +17,7 @@ TRACE_EVENT(fence_annotate_wait_on,
 
        TP_STRUCT__entry(
                __string(driver, fence->ops->get_driver_name(fence))
-               __string(timeline, fence->ops->get_driver_name(fence))
+               __string(timeline, fence->ops->get_timeline_name(fence))
                __field(unsigned int, context)
                __field(unsigned int, seqno)
 
index 284244ebfe8d2c3ef7036c3a7e9b55ea15b11078..19e50300ce7d63da516ef9ea365c156a4e77ad3c 100644 (file)
@@ -38,6 +38,28 @@ DEFINE_EVENT(cpu, cpu_idle,
        TP_ARGS(state, cpu_id)
 );
 
+TRACE_EVENT(powernv_throttle,
+
+       TP_PROTO(int chip_id, const char *reason, int pmax),
+
+       TP_ARGS(chip_id, reason, pmax),
+
+       TP_STRUCT__entry(
+               __field(int, chip_id)
+               __string(reason, reason)
+               __field(int, pmax)
+       ),
+
+       TP_fast_assign(
+               __entry->chip_id = chip_id;
+               __assign_str(reason, reason);
+               __entry->pmax = pmax;
+       ),
+
+       TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
+                 __entry->pmax, __get_str(reason))
+);
+
 TRACE_EVENT(pstate_sample,
 
        TP_PROTO(u32 core_busy,
index ef72c4aada566bb853a09c6c4df95661f6233153..aacc172eba7ee78c68d11906569ee090bddbf89c 100644 (file)
@@ -171,6 +171,76 @@ TRACE_EVENT(rcu_grace_period_init,
                  __entry->grplo, __entry->grphi, __entry->qsmask)
 );
 
+/*
+ * Tracepoint for expedited grace-period events.  Takes a string identifying
+ * the RCU flavor, the expedited grace-period sequence number, and a string
+ * identifying the grace-period-related event as follows:
+ *
+ *     "snap": Captured snapshot of expedited grace period sequence number.
+ *     "start": Started a real expedited grace period.
+ *     "end": Ended a real expedited grace period.
+ *     "done": Someone else did the expedited grace period for us.
+ */
+TRACE_EVENT(rcu_exp_grace_period,
+
+       TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
+
+       TP_ARGS(rcuname, gpseq, gpevent),
+
+       TP_STRUCT__entry(
+               __field(const char *, rcuname)
+               __field(unsigned long, gpseq)
+               __field(const char *, gpevent)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->gpseq = gpseq;
+               __entry->gpevent = gpevent;
+       ),
+
+       TP_printk("%s %lu %s",
+                 __entry->rcuname, __entry->gpseq, __entry->gpevent)
+);
+
+/*
+ * Tracepoint for expedited grace-period funnel-locking events.  Takes a
+ * string identifying the RCU flavor, an integer identifying the rcu_node
+ * combining-tree level, another pair of integers identifying the lowest-
+ * and highest-numbered CPU associated with the current rcu_node structure,
+ * and a string.  identifying the grace-period-related event as follows:
+ *
+ *     "acq": Acquired a level of funnel lock
+ *     "rel": Released a level of funnel lock
+ */
+TRACE_EVENT(rcu_exp_funnel_lock,
+
+       TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
+                const char *gpevent),
+
+       TP_ARGS(rcuname, level, grplo, grphi, gpevent),
+
+       TP_STRUCT__entry(
+               __field(const char *, rcuname)
+               __field(u8, level)
+               __field(int, grplo)
+               __field(int, grphi)
+               __field(const char *, gpevent)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->level = level;
+               __entry->grplo = grplo;
+               __entry->grphi = grphi;
+               __entry->gpevent = gpevent;
+       ),
+
+       TP_printk("%s %d %d %d %s",
+                 __entry->rcuname, __entry->level, __entry->grplo,
+                 __entry->grphi, __entry->gpevent)
+);
+
 /*
  * Tracepoint for RCU no-CBs CPU callback handoffs.  This event is intended
  * to assist debugging of these handoffs.
@@ -704,11 +774,15 @@ TRACE_EVENT(rcu_barrier,
 #else /* #ifdef CONFIG_RCU_TRACE */
 
 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
-                                   qsmask) do { } while (0)
 #define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
                                      level, grplo, grphi, event) \
                                      do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+                                   qsmask) do { } while (0)
+#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
+       do { } while (0)
+#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
+       do { } while (0)
 #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
diff --git a/include/trace/events/sunvnet.h b/include/trace/events/sunvnet.h
new file mode 100644 (file)
index 0000000..eb080b2
--- /dev/null
@@ -0,0 +1,139 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sunvnet
+
+#if !defined(_TRACE_SUNVNET_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SUNVNET_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vnet_rx_one,
+
+       TP_PROTO(int lsid, int rsid, int index, int needs_ack),
+
+       TP_ARGS(lsid, rsid, index, needs_ack),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, index)
+               __field(int, needs_ack)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->index = index;
+               __entry->needs_ack = needs_ack;
+       ),
+
+       TP_printk("(%x:%x) walk_rx_one index %d; needs_ack %d",
+               __entry->lsid, __entry->rsid,
+               __entry->index, __entry->needs_ack)
+);
+
+DECLARE_EVENT_CLASS(vnet_tx_stopped_ack_template,
+
+       TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+
+       TP_ARGS(lsid, rsid, ack_end, npkts),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, ack_end)
+               __field(int, npkts)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->ack_end = ack_end;
+               __entry->npkts = npkts;
+       ),
+
+       TP_printk("(%x:%x) stopped ack for %d; npkts %d",
+               __entry->lsid, __entry->rsid,
+               __entry->ack_end, __entry->npkts)
+);
+DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_send_stopped_ack,
+            TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+            TP_ARGS(lsid, rsid, ack_end, npkts));
+DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_defer_stopped_ack,
+            TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+            TP_ARGS(lsid, rsid, ack_end, npkts));
+DEFINE_EVENT(vnet_tx_stopped_ack_template, vnet_tx_pending_stopped_ack,
+            TP_PROTO(int lsid, int rsid, int ack_end, int npkts),
+            TP_ARGS(lsid, rsid, ack_end, npkts));
+
+TRACE_EVENT(vnet_rx_stopped_ack,
+
+       TP_PROTO(int lsid, int rsid, int end),
+
+       TP_ARGS(lsid, rsid, end),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, end)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->end = end;
+       ),
+
+       TP_printk("(%x:%x) stopped ack for index %d",
+               __entry->lsid, __entry->rsid, __entry->end)
+);
+
+TRACE_EVENT(vnet_tx_trigger,
+
+       TP_PROTO(int lsid, int rsid, int start, int err),
+
+       TP_ARGS(lsid, rsid, start, err),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, start)
+               __field(int, err)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->start = start;
+               __entry->err = err;
+       ),
+
+       TP_printk("(%x:%x) Tx trigger for %d sent with err %d %s",
+               __entry->lsid, __entry->rsid, __entry->start,
+               __entry->err, __entry->err > 0 ? "(ok)" : " ")
+);
+
+TRACE_EVENT(vnet_skip_tx_trigger,
+
+       TP_PROTO(int lsid, int rsid, int last),
+
+       TP_ARGS(lsid, rsid, last),
+
+       TP_STRUCT__entry(
+               __field(int, lsid)
+               __field(int, rsid)
+               __field(int, last)
+       ),
+
+       TP_fast_assign(
+               __entry->lsid = lsid;
+               __entry->rsid = rsid;
+               __entry->last = last;
+       ),
+
+       TP_printk("(%x:%x) Skip Tx trigger. Last trigger sent was %d",
+               __entry->lsid, __entry->rsid, __entry->last)
+);
+#endif /* _TRACE_SOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index b4e92eb120444e36c46a06b6c5e3abdc563985ad..a0ebfe7c9a28c5d57a71a6a857291ee5b41fd5d6 100644 (file)
@@ -669,6 +669,7 @@ struct drm_set_client_cap {
        __u64 value;
 };
 
+#define DRM_RDWR O_RDWR
 #define DRM_CLOEXEC O_CLOEXEC
 struct drm_prime_handle {
        __u32 handle;
index 4cc989ad685164689072f90f1ef2c40f633c4088..f95e1c43c3fbc06fc2de769cd5360a5559d549b3 100644 (file)
@@ -48,6 +48,8 @@ struct drm_etnaviv_timespec {
 #define ETNAVIV_PARAM_GPU_FEATURES_2                0x05
 #define ETNAVIV_PARAM_GPU_FEATURES_3                0x06
 #define ETNAVIV_PARAM_GPU_FEATURES_4                0x07
+#define ETNAVIV_PARAM_GPU_FEATURES_5                0x08
+#define ETNAVIV_PARAM_GPU_FEATURES_6                0x09
 
 #define ETNAVIV_PARAM_GPU_STREAM_COUNT              0x10
 #define ETNAVIV_PARAM_GPU_REGISTER_MAX              0x11
@@ -59,6 +61,7 @@ struct drm_etnaviv_timespec {
 #define ETNAVIV_PARAM_GPU_BUFFER_SIZE               0x17
 #define ETNAVIV_PARAM_GPU_INSTRUCTION_COUNT         0x18
 #define ETNAVIV_PARAM_GPU_NUM_CONSTANTS             0x19
+#define ETNAVIV_PARAM_GPU_NUM_VARYINGS              0x1a
 
 #define ETNA_MAX_PIPES 4
 
index acf21026c78acb625b67f78c9fdf21914dc8e4a2..a5524cc95ff8b6acd1c7a79bb554b0f24cf7bc42 100644 (file)
@@ -772,10 +772,12 @@ struct drm_i915_gem_execbuffer2 {
 #define I915_EXEC_HANDLE_LUT           (1<<12)
 
 /** Used for switching BSD rings on the platforms with two BSD rings */
-#define I915_EXEC_BSD_MASK             (3<<13)
-#define I915_EXEC_BSD_DEFAULT          (0<<13) /* default ping-pong mode */
-#define I915_EXEC_BSD_RING1            (1<<13)
-#define I915_EXEC_BSD_RING2            (2<<13)
+#define I915_EXEC_BSD_SHIFT     (13)
+#define I915_EXEC_BSD_MASK      (3 << I915_EXEC_BSD_SHIFT)
+/* default ping-pong mode */
+#define I915_EXEC_BSD_DEFAULT   (0 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING1     (1 << I915_EXEC_BSD_SHIFT)
+#define I915_EXEC_BSD_RING2     (2 << I915_EXEC_BSD_SHIFT)
 
 /** Tell the kernel that the batchbuffer is processed by
  *  the resource streamer.
@@ -812,10 +814,35 @@ struct drm_i915_gem_busy {
        /** Handle of the buffer to check for busy */
        __u32 handle;
 
-       /** Return busy status (1 if busy, 0 if idle).
-        * The high word is used to indicate on which rings the object
-        * currently resides:
-        *  16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
+       /** Return busy status
+        *
+        * A return of 0 implies that the object is idle (after
+        * having flushed any pending activity), and a non-zero return that
+        * the object is still in-flight on the GPU. (The GPU has not yet
+        * signaled completion for all pending requests that reference the
+        * object.)
+        *
+        * The returned dword is split into two fields to indicate both
+        * the engines on which the object is being read, and the
+        * engine on which it is currently being written (if any).
+        *
+        * The low word (bits 0:15) indicate if the object is being written
+        * to by any engine (there can only be one, as the GEM implicit
+        * synchronisation rules force writes to be serialised). Only the
+        * engine for the last write is reported.
+        *
+        * The high word (bits 16:31) are a bitmask of which engines are
+        * currently reading from the object. Multiple engines may be
+        * reading from the object simultaneously.
+        *
+        * The value of each engine is the same as specified in the
+        * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc.
+        * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to
+        * the I915_EXEC_RENDER engine for execution, and so it is never
+        * reported as active itself. Some hardware may have parallel
+        * execution engines, e.g. multiple media engines, which are
+        * mapped to the same identifier in the EXECBUFFER2 ioctl and
+        * so are not separately reported for busyness.
         */
        __u32 busy;
 };
index 81e6e0d1d3600c8a90d83ce8f444a8106d28e546..0e3c4c48b326ff6a694e2d2fb197f1b4b22494f2 100644 (file)
@@ -50,6 +50,7 @@ struct drm_msm_timespec {
 #define MSM_PARAM_GPU_ID     0x01
 #define MSM_PARAM_GMEM_SIZE  0x02
 #define MSM_PARAM_CHIP_ID    0x03
+#define MSM_PARAM_MAX_FREQ   0x04
 
 struct drm_msm_param {
        __u32 pipe;           /* in, MSM_PIPE_x */
index 843540c398eb078d318a1a1c15d11b303a092110..d820aa979620b5f75b071dffbd73429bfc360282 100644 (file)
 #define AUDIT_SECCOMP          1326    /* Secure Computing event */
 #define AUDIT_PROCTITLE                1327    /* Proctitle emit event */
 #define AUDIT_FEATURE_CHANGE   1328    /* audit log listing feature changes */
+#define AUDIT_REPLACE          1329    /* Replace auditd if this packet unanswerd */
 
 #define AUDIT_AVC              1400    /* SE Linux avc denial or grant */
 #define AUDIT_SELINUX_ERR      1401    /* Internal SE Linux Errors */
index aa6f8571de136b74fba93996883bd69b3e28d412..2ee0fde1bf9649739e8663dc480742fdf0ede72f 100644 (file)
@@ -81,6 +81,8 @@ enum bpf_map_type {
        BPF_MAP_TYPE_ARRAY,
        BPF_MAP_TYPE_PROG_ARRAY,
        BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+       BPF_MAP_TYPE_PERCPU_HASH,
+       BPF_MAP_TYPE_PERCPU_ARRAY,
 };
 
 enum bpf_prog_type {
index dcd75cc261962f65c909a6efa0defe3f1dcdc281..11b3b31faf1483a46122a67e93b823182b768cb9 100644 (file)
@@ -39,6 +39,7 @@
 #define Q_XQUOTARM     XQM_CMD(6)      /* free disk space used by dquots */
 #define Q_XQUOTASYNC   XQM_CMD(7)      /* delalloc flush, updates dquots */
 #define Q_XGETQSTATV   XQM_CMD(8)      /* newer version of get quota */
+#define Q_XGETNEXTQUOTA        XQM_CMD(9)      /* get disk limits and usage >= ID */
 
 /*
  * fs_disk_quota structure:
index 57fa39005e794c65931f8e4cfd425d470693addd..b2e18018162987288a7b9ea3715e6e5f8cd3ce1b 100644 (file)
@@ -1319,11 +1319,45 @@ enum ethtool_sfeatures_retval_bits {
 
 #define SPEED_UNKNOWN          -1
 
+static inline int ethtool_validate_speed(__u32 speed)
+{
+       switch (speed) {
+       case SPEED_10:
+       case SPEED_100:
+       case SPEED_1000:
+       case SPEED_2500:
+       case SPEED_5000:
+       case SPEED_10000:
+       case SPEED_20000:
+       case SPEED_25000:
+       case SPEED_40000:
+       case SPEED_50000:
+       case SPEED_56000:
+       case SPEED_100000:
+       case SPEED_UNKNOWN:
+               return 1;
+       }
+
+       return 0;
+}
+
 /* Duplex, half or full. */
 #define DUPLEX_HALF            0x00
 #define DUPLEX_FULL            0x01
 #define DUPLEX_UNKNOWN         0xff
 
+static inline int ethtool_validate_duplex(__u8 duplex)
+{
+       switch (duplex) {
+       case DUPLEX_HALF:
+       case DUPLEX_FULL:
+       case DUPLEX_UNKNOWN:
+               return 1;
+       }
+
+       return 0;
+}
+
 /* Which connector port. */
 #define PORT_TP                        0x00
 #define PORT_AUI               0x01
index 41e0433b4a8398b3dc3da6d2d6aacb8e46287150..149bec83a907515dccea4425acf3b71d8d18410f 100644 (file)
@@ -222,7 +222,6 @@ struct fsxattr {
 #define BLKSECDISCARD _IO(0x12,125)
 #define BLKROTATIONAL _IO(0x12,126)
 #define BLKZEROOUT _IO(0x12,127)
-#define BLKDAXSET _IO(0x12,128)
 #define BLKDAXGET _IO(0x12,129)
 
 #define BMAP_IOCTL 1           /* obsolete - kept for compatibility */
index 18db14477bdda952fa946ccfba979950233f3130..ec35472349988fa556f39028d5ea8ecd689b85b0 100644 (file)
@@ -183,6 +183,8 @@ struct br_mdb_entry {
 #define MDB_TEMPORARY 0
 #define MDB_PERMANENT 1
        __u8 state;
+#define MDB_FLAGS_OFFLOAD      (1 << 0)
+       __u8 flags;
        __u16 vid;
        struct {
                union {
index a30b78090594d500df10aa91f9f1b6628ae88593..d452cea5902039e2abd15cbd344a75fd879a6228 100644 (file)
@@ -35,6 +35,8 @@ struct rtnl_link_stats {
        /* for cslip etc */
        __u32   rx_compressed;
        __u32   tx_compressed;
+
+       __u32   rx_nohandler;           /* dropped, no handler found    */
 };
 
 /* The main device statistics structure */
@@ -68,6 +70,8 @@ struct rtnl_link_stats64 {
        /* for cslip etc */
        __u64   rx_compressed;
        __u64   tx_compressed;
+
+       __u64   rx_nohandler;           /* dropped, no handler found    */
 };
 
 /* The struct should be in sync with struct ifmap */
@@ -401,6 +405,14 @@ enum {
 
 #define IFLA_VRF_MAX (__IFLA_VRF_MAX - 1)
 
+enum {
+       IFLA_VRF_PORT_UNSPEC,
+       IFLA_VRF_PORT_TABLE,
+       __IFLA_VRF_PORT_MAX
+};
+
+#define IFLA_VRF_PORT_MAX (__IFLA_VRF_PORT_MAX - 1)
+
 /* IPVLAN section */
 enum {
        IFLA_IPVLAN_UNSPEC,
index 9da905157ceeebcb5afbd9e73b8ac56195954929..a2fe0ac1d61ac89dcbd950db16f1224b9fd1625e 100644 (file)
@@ -541,7 +541,13 @@ struct kvm_s390_pgm_info {
        __u8 exc_access_id;
        __u8 per_access_id;
        __u8 op_access_id;
-       __u8 pad[3];
+#define KVM_S390_PGM_FLAGS_ILC_VALID   0x01
+#define KVM_S390_PGM_FLAGS_ILC_0       0x02
+#define KVM_S390_PGM_FLAGS_ILC_1       0x04
+#define KVM_S390_PGM_FLAGS_ILC_MASK    0x06
+#define KVM_S390_PGM_FLAGS_NO_REWIND   0x08
+       __u8 flags;
+       __u8 pad[2];
 };
 
 struct kvm_s390_prefix_info {
index 1e3c8cb43bd7ac61650da4b6e31ab854af4a7b89..c9eb42a6c0210fa1f075cc294c70516d18dc17f8 100644 (file)
@@ -88,6 +88,15 @@ struct media_device_info {
 #define MEDIA_ENT_F_IO_VBI             (MEDIA_ENT_F_BASE + 32)
 #define MEDIA_ENT_F_IO_SWRADIO         (MEDIA_ENT_F_BASE + 33)
 
+/*
+ * Analog TV IF-PLL decoders
+ *
+ * It is a responsibility of the master/bridge drivers to create links
+ * for MEDIA_ENT_F_IF_VID_DECODER and MEDIA_ENT_F_IF_AUD_DECODER.
+ */
+#define MEDIA_ENT_F_IF_VID_DECODER     (MEDIA_ENT_F_BASE + 41)
+#define MEDIA_ENT_F_IF_AUD_DECODER     (MEDIA_ENT_F_BASE + 42)
+
 /*
  * Don't touch on those. The ranges MEDIA_ENT_F_OLD_BASE and
  * MEDIA_ENT_F_OLD_SUBDEV_BASE are kept to keep backward compatibility
@@ -107,8 +116,12 @@ struct media_device_info {
 #define MEDIA_ENT_F_LENS               (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3)
 #define MEDIA_ENT_F_ATV_DECODER                (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
 /*
- * It is a responsibility of the entity drivers to add connectors and links
- *     for the tuner entities.
+ * It is a responsibility of the master/bridge drivers to add connectors
+ * and links for MEDIA_ENT_F_TUNER. Please notice that some old tuners
+ * may require the usage of separate I2C chips to decode analog TV signals,
+ * when the master/bridge chipset doesn't have its own TV standard decoder.
+ * On such cases, the IF-PLL staging is mapped via one or two entities:
+ * MEDIA_ENT_F_IF_VID_DECODER and/or MEDIA_ENT_F_IF_AUD_DECODER.
  */
 #define MEDIA_ENT_F_TUNER              (MEDIA_ENT_F_OLD_SUBDEV_BASE + 5)
 
@@ -286,7 +299,7 @@ struct media_links_enum {
  *       later, before the adding this API upstream.
  */
 
-#if 0 /* Let's postpone it to Kernel 4.6 */
+
 struct media_v2_entity {
        __u32 id;
        char name[64];          /* FIXME: move to a property? (RFC says so) */
@@ -351,7 +364,6 @@ static inline void __user *media_get_uptr(__u64 arg)
 {
        return (void __user *)(uintptr_t)arg;
 }
-#endif
 
 /* ioctls */
 
@@ -359,9 +371,6 @@ static inline void __user *media_get_uptr(__u64 arg)
 #define MEDIA_IOC_ENUM_ENTITIES                _IOWR('|', 0x01, struct media_entity_desc)
 #define MEDIA_IOC_ENUM_LINKS           _IOWR('|', 0x02, struct media_links_enum)
 #define MEDIA_IOC_SETUP_LINK           _IOWR('|', 0x03, struct media_link_desc)
-
-#if 0 /* Let's postpone it to Kernel 4.6 */
 #define MEDIA_IOC_G_TOPOLOGY           _IOWR('|', 0x04, struct media_v2_topology)
-#endif
 
 #endif /* __LINUX_MEDIA_H */
index 5b7b5ebe7ca8731868e38bfab6218c166cb0317d..7758969a2a8e83f53e1bfa8f810b3826b9c7d44a 100644 (file)
@@ -1789,6 +1789,10 @@ enum nl80211_commands {
  *     thus it must not specify the number of iterations, only the interval
  *     between scans. The scan plans are executed sequentially.
  *     Each scan plan is a nested attribute of &enum nl80211_sched_scan_plan.
+ * @NL80211_ATTR_PBSS: flag attribute. If set it means operate
+ *     in a PBSS. Specified in %NL80211_CMD_CONNECT to request
+ *     connecting to a PCP, and in %NL80211_CMD_START_AP to start
+ *     a PCP instead of AP. Relevant for DMG networks only.
  *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2164,6 +2168,8 @@ enum nl80211_attrs {
        NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
        NL80211_ATTR_SCHED_SCAN_PLANS,
 
+       NL80211_ATTR_PBSS,
+
        /* add attributes here, update the policy in nl80211.c */
 
        __NL80211_ATTR_AFTER_LAST,
index 9c95b2c1c88a6ef0a6bb4207cd5122011d3007f9..38baddb807f503f5f526d93377df677ab80c5ad1 100644 (file)
@@ -71,6 +71,7 @@
 #define Q_SETINFO  0x800006    /* set information about quota files */
 #define Q_GETQUOTA 0x800007    /* get user quota structure */
 #define Q_SETQUOTA 0x800008    /* set user quota structure */
+#define Q_GETNEXTQUOTA 0x800009        /* get disk limits and usage >= ID */
 
 /* Quota format type IDs */
 #define        QFMT_VFS_OLD 1
@@ -119,6 +120,19 @@ struct if_dqblk {
        __u32 dqb_valid;
 };
 
+struct if_nextdqblk {
+       __u64 dqb_bhardlimit;
+       __u64 dqb_bsoftlimit;
+       __u64 dqb_curspace;
+       __u64 dqb_ihardlimit;
+       __u64 dqb_isoftlimit;
+       __u64 dqb_curinodes;
+       __u64 dqb_btime;
+       __u64 dqb_itime;
+       __u32 dqb_valid;
+       __u32 dqb_id;
+};
+
 /*
  * Structure used for setting quota information about file via quotactl
  * Following flags are used to specify which fields are valid
index 058757f7a7339b104f9f44ebd77569cf20f35ef6..2e00dcebebd0700ac835d0d98221ef1cb4717974 100644 (file)
@@ -59,6 +59,8 @@ enum rfkill_type {
  * @RFKILL_OP_DEL: a device was removed
  * @RFKILL_OP_CHANGE: a device's state changed -- userspace changes one device
  * @RFKILL_OP_CHANGE_ALL: userspace changes all devices (of a type, or all)
+ *     into a state, also updating the default state used for devices that
+ *     are hot-plugged later.
  */
 enum rfkill_operation {
        RFKILL_OP_ADD = 0,
index 331499d597fa86cd941d99d5ad8ab9ec2120cb86..361297e96f5826360bb24f80de9089b83a9881ba 100644 (file)
 #define USB_RT_HUB     (USB_TYPE_CLASS | USB_RECIP_DEVICE)
 #define USB_RT_PORT    (USB_TYPE_CLASS | USB_RECIP_OTHER)
 
+/*
+ * Port status type for GetPortStatus requests added in USB 3.1
+ * See USB 3.1 spec Table 10-12
+ */
+#define HUB_PORT_STATUS                0
+#define HUB_PORT_PD_STATUS     1
+#define HUB_EXT_PORT_STATUS    2
+
 /*
  * Hub class requests
  * See USB 2.0 spec Table 11-16
 /*
  * Hub Status and Hub Change results
  * See USB 2.0 spec Table 11-19 and Table 11-20
+ * USB 3.1 extends the port status request and may return 4 additional bytes.
+ * See USB 3.1 spec section 10.16.2.6 Table 10-12 and 10-15
  */
 struct usb_port_status {
        __le16 wPortStatus;
        __le16 wPortChange;
+       __le32 dwExtPortStatus;
 } __attribute__ ((packed));
 
 /*
@@ -172,6 +183,16 @@ struct usb_port_status {
 #define USB_PORT_STAT_C_LINK_STATE     0x0040
 #define USB_PORT_STAT_C_CONFIG_ERROR   0x0080
 
+/*
+ * USB 3.1 dwExtPortStatus field masks
+ * See USB 3.1 spec 10.16.2.6.3 Table 10-15
+ */
+
+#define USB_EXT_PORT_STAT_RX_SPEED_ID  0x0000000f
+#define USB_EXT_PORT_STAT_TX_SPEED_ID  0x000000f0
+#define USB_EXT_PORT_STAT_RX_LANES     0x00000f00
+#define USB_EXT_PORT_STAT_TX_LANES     0x0000f000
+
 /*
  * wHubCharacteristics (masks)
  * See USB 2.0 spec Table 11-13, offset 3
index 4338eb7b09b3ad705f766fa1c3e1f8a100ae049e..779a62aafafe6a2d19b3c25bd9ddbef7617b0f7f 100644 (file)
@@ -954,6 +954,7 @@ enum usb_device_speed {
        USB_SPEED_HIGH,                         /* usb 2.0 */
        USB_SPEED_WIRELESS,                     /* wireless (usb 2.5) */
        USB_SPEED_SUPER,                        /* usb 3.0 */
+       USB_SPEED_SUPER_PLUS,                   /* usb 3.1 */
 };
 
 
index c045ae12556cddee60e06bae44001c4a21c020ef..2e59d9c50b8d739855f87989f09e2ed32ce1a4e6 100644 (file)
@@ -2,12 +2,14 @@
  * Copyright (C) 2007 Stefan Kopp, Gechingen, Germany
  * Copyright (C) 2008 Novell, Inc.
  * Copyright (C) 2008 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (C) 2015 Dave Penkler <dpenkler@gmail.com>
  *
  * This file holds USB constants defined by the USB Device Class
- * Definition for Test and Measurement devices published by the USB-IF.
+ * and USB488 Subclass Definitions for Test and Measurement devices
+ * published by the USB-IF.
  *
- * It also has the ioctl definitions for the usbtmc kernel driver that
- * userspace needs to know about.
+ * It also has the ioctl and capability definitions for the
+ * usbtmc kernel driver that userspace needs to know about.
  */
 
 #ifndef __LINUX_USB_TMC_H
 #define USBTMC_REQUEST_CHECK_CLEAR_STATUS              6
 #define USBTMC_REQUEST_GET_CAPABILITIES                        7
 #define USBTMC_REQUEST_INDICATOR_PULSE                 64
+#define USBTMC488_REQUEST_READ_STATUS_BYTE             128
+#define USBTMC488_REQUEST_REN_CONTROL                  160
+#define USBTMC488_REQUEST_GOTO_LOCAL                   161
+#define USBTMC488_REQUEST_LOCAL_LOCKOUT                        162
 
 /* Request values for USBTMC driver's ioctl entry point */
 #define USBTMC_IOC_NR                  91
 #define USBTMC_IOCTL_ABORT_BULK_IN     _IO(USBTMC_IOC_NR, 4)
 #define USBTMC_IOCTL_CLEAR_OUT_HALT    _IO(USBTMC_IOC_NR, 6)
 #define USBTMC_IOCTL_CLEAR_IN_HALT     _IO(USBTMC_IOC_NR, 7)
+#define USBTMC488_IOCTL_GET_CAPS       _IOR(USBTMC_IOC_NR, 17, unsigned char)
+#define USBTMC488_IOCTL_READ_STB       _IOR(USBTMC_IOC_NR, 18, unsigned char)
+#define USBTMC488_IOCTL_REN_CONTROL    _IOW(USBTMC_IOC_NR, 19, unsigned char)
+#define USBTMC488_IOCTL_GOTO_LOCAL     _IO(USBTMC_IOC_NR, 20)
+#define USBTMC488_IOCTL_LOCAL_LOCKOUT  _IO(USBTMC_IOC_NR, 21)
+
+/* Driver encoded usb488 capabilities */
+#define USBTMC488_CAPABILITY_TRIGGER         1
+#define USBTMC488_CAPABILITY_SIMPLE          2
+#define USBTMC488_CAPABILITY_REN_CONTROL     2
+#define USBTMC488_CAPABILITY_GOTO_LOCAL      2
+#define USBTMC488_CAPABILITY_LOCAL_LOCKOUT   2
+#define USBTMC488_CAPABILITY_488_DOT_2       4
+#define USBTMC488_CAPABILITY_DT1             16
+#define USBTMC488_CAPABILITY_RL1             32
+#define USBTMC488_CAPABILITY_SR1             64
+#define USBTMC488_CAPABILITY_FULL_SCPI       128
 
 #endif
index 15273987093e41e7823f98a953f088332db066aa..5b3f685a2d50ca2146e579f81580aecb61f19922 100644 (file)
  * Copyright (C) 2012 Nokia Corporation
  * Contact: Sakari Ailus <sakari.ailus@iki.fi>
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
  *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
+ *  Alternatively you can redistribute this file under the terms of the
+ *  BSD license as stated below:
+ *
+ *  Redistribution and use in source and binary forms, with or without
+ *  modification, are permitted provided that the following conditions
+ *  are met:
+ *  1. Redistributions of source code must retain the above copyright
+ *     notice, this list of conditions and the following disclaimer.
+ *  2. Redistributions in binary form must reproduce the above copyright
+ *     notice, this list of conditions and the following disclaimer in
+ *     the documentation and/or other materials provided with the
+ *     distribution.
+ *  3. The names of its contributors may not be used to endorse or promote
+ *     products derived from this software without specific prior written
+ *     permission.
+ *
+ *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ *  TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ *  PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ *  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ *  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ *  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  *
  */
 
index 58c9e374704bb20cfff41fa92afcf7cafe498c32..b3008bcfb1dc42d72b5faed1776c2e8c3618919d 100644 (file)
@@ -499,11 +499,6 @@ asmlinkage __visible void __init start_kernel(void)
        char *command_line;
        char *after_dashes;
 
-       /*
-        * Need to run as early as possible, to initialize the
-        * lockdep hash:
-        */
-       lockdep_init();
        set_task_stack_end_magic(&init_task);
        smp_setup_processor_id();
        debug_objects_early_init();
index 3a3e5deeda8d3e5f16977c7fac2ceac4480f792b..678c3f000191be66aed944e764209ef5c2924b98 100644 (file)
@@ -809,6 +809,16 @@ static int audit_set_feature(struct sk_buff *skb)
        return 0;
 }
 
+static int audit_replace(pid_t pid)
+{
+       struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0,
+                                              &pid, sizeof(pid));
+
+       if (!skb)
+               return -ENOMEM;
+       return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+}
+
 static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        u32                     seq;
@@ -870,9 +880,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                }
                if (s.mask & AUDIT_STATUS_PID) {
                        int new_pid = s.pid;
+                       pid_t requesting_pid = task_tgid_vnr(current);
 
-                       if ((!new_pid) && (task_tgid_vnr(current) != audit_pid))
+                       if ((!new_pid) && (requesting_pid != audit_pid)) {
+                               audit_log_config_change("audit_pid", new_pid, audit_pid, 0);
                                return -EACCES;
+                       }
+                       if (audit_pid && new_pid &&
+                           audit_replace(requesting_pid) != -ECONNREFUSED) {
+                               audit_log_config_change("audit_pid", new_pid, audit_pid, 0);
+                               return -EEXIST;
+                       }
                        if (audit_enabled != AUDIT_OFF)
                                audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
                        audit_pid = new_pid;
@@ -920,7 +938,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (err == 1) { /* match or error */
                        err = 0;
                        if (msg_type == AUDIT_USER_TTY) {
-                               err = tty_audit_push_current();
+                               err = tty_audit_push();
                                if (err)
                                        break;
                        }
@@ -1030,20 +1048,19 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                break;
        case AUDIT_TTY_GET: {
                struct audit_tty_status s;
-               struct task_struct *tsk = current;
+               unsigned int t;
 
-               spin_lock(&tsk->sighand->siglock);
-               s.enabled = tsk->signal->audit_tty;
-               s.log_passwd = tsk->signal->audit_tty_log_passwd;
-               spin_unlock(&tsk->sighand->siglock);
+               t = READ_ONCE(current->signal->audit_tty);
+               s.enabled = t & AUDIT_TTY_ENABLE;
+               s.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD);
 
                audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
                break;
        }
        case AUDIT_TTY_SET: {
                struct audit_tty_status s, old;
-               struct task_struct *tsk = current;
                struct audit_buffer     *ab;
+               unsigned int t;
 
                memset(&s, 0, sizeof(s));
                /* guard against past and future API changes */
@@ -1053,14 +1070,14 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                    (s.log_passwd != 0 && s.log_passwd != 1))
                        err = -EINVAL;
 
-               spin_lock(&tsk->sighand->siglock);
-               old.enabled = tsk->signal->audit_tty;
-               old.log_passwd = tsk->signal->audit_tty_log_passwd;
-               if (!err) {
-                       tsk->signal->audit_tty = s.enabled;
-                       tsk->signal->audit_tty_log_passwd = s.log_passwd;
+               if (err)
+                       t = READ_ONCE(current->signal->audit_tty);
+               else {
+                       t = s.enabled | (-s.log_passwd & AUDIT_TTY_LOG_PASSWD);
+                       t = xchg(&current->signal->audit_tty, t);
                }
-               spin_unlock(&tsk->sighand->siglock);
+               old.enabled = t & AUDIT_TTY_ENABLE;
+               old.log_passwd = !!(t & AUDIT_TTY_LOG_PASSWD);
 
                audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE);
                audit_log_format(ab, " op=tty_set old-enabled=%d new-enabled=%d"
index 9f194aad0adc0600ce257581315824f540eb9c0b..3cf1c5978d39e4fbc7f44826017bbdd755f6c9df 100644 (file)
@@ -185,7 +185,7 @@ static struct audit_watch *audit_init_watch(char *path)
        return watch;
 }
 
-/* Translate a watch string to kernel respresentation. */
+/* Translate a watch string to kernel representation. */
 int audit_to_watch(struct audit_krule *krule, char *path, int len, u32 op)
 {
        struct audit_watch *watch;
index b8ff9e193753614072d55b98b0bb46e2a8534880..94ca7b1e5e7eadd1053f1e01185c1a36320f52df 100644 (file)
@@ -158,7 +158,7 @@ char *audit_unpack_string(void **bufp, size_t *remain, size_t len)
        return str;
 }
 
-/* Translate an inode field to kernel respresentation. */
+/* Translate an inode field to kernel representation. */
 static inline int audit_to_inode(struct audit_krule *krule,
                                 struct audit_field *f)
 {
@@ -415,7 +415,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f)
        return 0;
 }
 
-/* Translate struct audit_rule_data to kernel's rule respresentation. */
+/* Translate struct audit_rule_data to kernel's rule representation. */
 static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                                               size_t datasz)
 {
@@ -593,7 +593,7 @@ static inline size_t audit_pack_string(void **bufp, const char *str)
        return len;
 }
 
-/* Translate kernel rule respresentation to struct audit_rule_data. */
+/* Translate kernel rule representation to struct audit_rule_data. */
 static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
 {
        struct audit_rule_data *data;
index b0799bced518691bd847a16973be74184bcdcbaf..bd3bdf2486a7b1aa4744f2bd5045ff21e47812cd 100644 (file)
 #include <linux/filter.h>
 #include <linux/perf_event.h>
 
+static void bpf_array_free_percpu(struct bpf_array *array)
+{
+       int i;
+
+       for (i = 0; i < array->map.max_entries; i++)
+               free_percpu(array->pptrs[i]);
+}
+
+static int bpf_array_alloc_percpu(struct bpf_array *array)
+{
+       void __percpu *ptr;
+       int i;
+
+       for (i = 0; i < array->map.max_entries; i++) {
+               ptr = __alloc_percpu_gfp(array->elem_size, 8,
+                                        GFP_USER | __GFP_NOWARN);
+               if (!ptr) {
+                       bpf_array_free_percpu(array);
+                       return -ENOMEM;
+               }
+               array->pptrs[i] = ptr;
+       }
+
+       return 0;
+}
+
 /* Called from syscall */
 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 {
+       bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
        struct bpf_array *array;
-       u32 elem_size, array_size;
+       u64 array_size;
+       u32 elem_size;
 
        /* check sanity of attributes */
        if (attr->max_entries == 0 || attr->key_size != 4 ||
@@ -36,12 +64,16 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
 
        elem_size = round_up(attr->value_size, 8);
 
-       /* check round_up into zero and u32 overflow */
-       if (elem_size == 0 ||
-           attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size)
+       array_size = sizeof(*array);
+       if (percpu)
+               array_size += (u64) attr->max_entries * sizeof(void *);
+       else
+               array_size += (u64) attr->max_entries * elem_size;
+
+       /* make sure there is no u32 overflow later in round_up() */
+       if (array_size >= U32_MAX - PAGE_SIZE)
                return ERR_PTR(-ENOMEM);
 
-       array_size = sizeof(*array) + attr->max_entries * elem_size;
 
        /* allocate all map elements and zero-initialize them */
        array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
@@ -52,12 +84,25 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
        }
 
        /* copy mandatory map attributes */
+       array->map.map_type = attr->map_type;
        array->map.key_size = attr->key_size;
        array->map.value_size = attr->value_size;
        array->map.max_entries = attr->max_entries;
-       array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
        array->elem_size = elem_size;
 
+       if (!percpu)
+               goto out;
+
+       array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
+
+       if (array_size >= U32_MAX - PAGE_SIZE ||
+           elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
+               kvfree(array);
+               return ERR_PTR(-ENOMEM);
+       }
+out:
+       array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
+
        return &array->map;
 }
 
@@ -67,12 +112,50 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        u32 index = *(u32 *)key;
 
-       if (index >= array->map.max_entries)
+       if (unlikely(index >= array->map.max_entries))
                return NULL;
 
        return array->value + array->elem_size * index;
 }
 
+/* Called from eBPF program */
+static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       u32 index = *(u32 *)key;
+
+       if (unlikely(index >= array->map.max_entries))
+               return NULL;
+
+       return this_cpu_ptr(array->pptrs[index]);
+}
+
+int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       u32 index = *(u32 *)key;
+       void __percpu *pptr;
+       int cpu, off = 0;
+       u32 size;
+
+       if (unlikely(index >= array->map.max_entries))
+               return -ENOENT;
+
+       /* per_cpu areas are zero-filled and bpf programs can only
+        * access 'value_size' of them, so copying rounded areas
+        * will not leak any kernel data
+        */
+       size = round_up(map->value_size, 8);
+       rcu_read_lock();
+       pptr = array->pptrs[index];
+       for_each_possible_cpu(cpu) {
+               bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
+               off += size;
+       }
+       rcu_read_unlock();
+       return 0;
+}
+
 /* Called from syscall */
 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 {
@@ -99,19 +182,62 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        u32 index = *(u32 *)key;
 
-       if (map_flags > BPF_EXIST)
+       if (unlikely(map_flags > BPF_EXIST))
                /* unknown flags */
                return -EINVAL;
 
-       if (index >= array->map.max_entries)
+       if (unlikely(index >= array->map.max_entries))
+               /* all elements were pre-allocated, cannot insert a new one */
+               return -E2BIG;
+
+       if (unlikely(map_flags == BPF_NOEXIST))
+               /* all elements already exist */
+               return -EEXIST;
+
+       if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+               memcpy(this_cpu_ptr(array->pptrs[index]),
+                      value, map->value_size);
+       else
+               memcpy(array->value + array->elem_size * index,
+                      value, map->value_size);
+       return 0;
+}
+
+int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
+                           u64 map_flags)
+{
+       struct bpf_array *array = container_of(map, struct bpf_array, map);
+       u32 index = *(u32 *)key;
+       void __percpu *pptr;
+       int cpu, off = 0;
+       u32 size;
+
+       if (unlikely(map_flags > BPF_EXIST))
+               /* unknown flags */
+               return -EINVAL;
+
+       if (unlikely(index >= array->map.max_entries))
                /* all elements were pre-allocated, cannot insert a new one */
                return -E2BIG;
 
-       if (map_flags == BPF_NOEXIST)
+       if (unlikely(map_flags == BPF_NOEXIST))
                /* all elements already exist */
                return -EEXIST;
 
-       memcpy(array->value + array->elem_size * index, value, map->value_size);
+       /* the user space will provide round_up(value_size, 8) bytes that
+        * will be copied into per-cpu area. bpf programs can only access
+        * value_size of it. During lookup the same extra bytes will be
+        * returned or zeros which were zero-filled by percpu_alloc,
+        * so no kernel data leaks possible
+        */
+       size = round_up(map->value_size, 8);
+       rcu_read_lock();
+       pptr = array->pptrs[index];
+       for_each_possible_cpu(cpu) {
+               bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
+               off += size;
+       }
+       rcu_read_unlock();
        return 0;
 }
 
@@ -133,6 +259,9 @@ static void array_map_free(struct bpf_map *map)
         */
        synchronize_rcu();
 
+       if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+               bpf_array_free_percpu(array);
+
        kvfree(array);
 }
 
@@ -150,9 +279,24 @@ static struct bpf_map_type_list array_type __read_mostly = {
        .type = BPF_MAP_TYPE_ARRAY,
 };
 
+static const struct bpf_map_ops percpu_array_ops = {
+       .map_alloc = array_map_alloc,
+       .map_free = array_map_free,
+       .map_get_next_key = array_map_get_next_key,
+       .map_lookup_elem = percpu_array_map_lookup_elem,
+       .map_update_elem = array_map_update_elem,
+       .map_delete_elem = array_map_delete_elem,
+};
+
+static struct bpf_map_type_list percpu_array_type __read_mostly = {
+       .ops = &percpu_array_ops,
+       .type = BPF_MAP_TYPE_PERCPU_ARRAY,
+};
+
 static int __init register_array_map(void)
 {
        bpf_register_map_type(&array_type);
+       bpf_register_map_type(&percpu_array_type);
        return 0;
 }
 late_initcall(register_array_map);
@@ -291,10 +435,13 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
 {
        struct perf_event *event;
        const struct perf_event_attr *attr;
+       struct file *file;
 
-       event = perf_event_get(fd);
-       if (IS_ERR(event))
-               return event;
+       file = perf_event_get(fd);
+       if (IS_ERR(file))
+               return file;
+
+       event = file->private_data;
 
        attr = perf_event_attrs(event);
        if (IS_ERR(attr))
@@ -304,24 +451,22 @@ static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd)
                goto err;
 
        if (attr->type == PERF_TYPE_RAW)
-               return event;
+               return file;
 
        if (attr->type == PERF_TYPE_HARDWARE)
-               return event;
+               return file;
 
        if (attr->type == PERF_TYPE_SOFTWARE &&
            attr->config == PERF_COUNT_SW_BPF_OUTPUT)
-               return event;
+               return file;
 err:
-       perf_event_release_kernel(event);
+       fput(file);
        return ERR_PTR(-EINVAL);
 }
 
 static void perf_event_fd_array_put_ptr(void *ptr)
 {
-       struct perf_event *event = ptr;
-
-       perf_event_release_kernel(event);
+       fput((struct file *)ptr);
 }
 
 static const struct bpf_map_ops perf_event_array_ops = {
index c5b30fd8a3151f99b8cf175ad2a7ce563a0e13c8..fd5db8fe9360db2134339d3e3f4ec47032d42772 100644 (file)
@@ -31,21 +31,27 @@ struct bpf_htab {
 struct htab_elem {
        struct hlist_node hash_node;
        struct rcu_head rcu;
-       u32 hash;
+       union {
+               u32 hash;
+               u32 key_size;
+       };
        char key[0] __aligned(8);
 };
 
 /* Called from syscall */
 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
 {
+       bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_HASH;
        struct bpf_htab *htab;
        int err, i;
+       u64 cost;
 
        htab = kzalloc(sizeof(*htab), GFP_USER);
        if (!htab)
                return ERR_PTR(-ENOMEM);
 
        /* mandatory map attributes */
+       htab->map.map_type = attr->map_type;
        htab->map.key_size = attr->key_size;
        htab->map.value_size = attr->value_size;
        htab->map.max_entries = attr->max_entries;
@@ -77,24 +83,34 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
                 */
                goto free_htab;
 
+       if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE)
+               /* make sure the size for pcpu_alloc() is reasonable */
+               goto free_htab;
+
        htab->elem_size = sizeof(struct htab_elem) +
-                         round_up(htab->map.key_size, 8) +
-                         htab->map.value_size;
+                         round_up(htab->map.key_size, 8);
+       if (percpu)
+               htab->elem_size += sizeof(void *);
+       else
+               htab->elem_size += htab->map.value_size;
 
        /* prevent zero size kmalloc and check for u32 overflow */
        if (htab->n_buckets == 0 ||
            htab->n_buckets > U32_MAX / sizeof(struct bucket))
                goto free_htab;
 
-       if ((u64) htab->n_buckets * sizeof(struct bucket) +
-           (u64) htab->elem_size * htab->map.max_entries >=
-           U32_MAX - PAGE_SIZE)
+       cost = (u64) htab->n_buckets * sizeof(struct bucket) +
+              (u64) htab->elem_size * htab->map.max_entries;
+
+       if (percpu)
+               cost += (u64) round_up(htab->map.value_size, 8) *
+                       num_possible_cpus() * htab->map.max_entries;
+
+       if (cost >= U32_MAX - PAGE_SIZE)
                /* make sure page count doesn't overflow */
                goto free_htab;
 
-       htab->map.pages = round_up(htab->n_buckets * sizeof(struct bucket) +
-                                  htab->elem_size * htab->map.max_entries,
-                                  PAGE_SIZE) >> PAGE_SHIFT;
+       htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 
        err = -ENOMEM;
        htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
@@ -148,7 +164,7 @@ static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash,
 }
 
 /* Called from syscall or from eBPF program */
-static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
+static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
        struct hlist_head *head;
@@ -166,6 +182,13 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
 
        l = lookup_elem_raw(head, hash, key, key_size);
 
+       return l;
+}
+
+static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       struct htab_elem *l = __htab_map_lookup_elem(map, key);
+
        if (l)
                return l->key + round_up(map->key_size, 8);
 
@@ -230,65 +253,149 @@ find_first_elem:
        return -ENOENT;
 }
 
+
+static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
+                                    void __percpu *pptr)
+{
+       *(void __percpu **)(l->key + key_size) = pptr;
+}
+
+static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
+{
+       return *(void __percpu **)(l->key + key_size);
+}
+
+static void htab_percpu_elem_free(struct htab_elem *l)
+{
+       free_percpu(htab_elem_get_ptr(l, l->key_size));
+       kfree(l);
+}
+
+static void htab_percpu_elem_free_rcu(struct rcu_head *head)
+{
+       struct htab_elem *l = container_of(head, struct htab_elem, rcu);
+
+       htab_percpu_elem_free(l);
+}
+
+static void free_htab_elem(struct htab_elem *l, bool percpu, u32 key_size)
+{
+       if (percpu) {
+               l->key_size = key_size;
+               call_rcu(&l->rcu, htab_percpu_elem_free_rcu);
+       } else {
+               kfree_rcu(l, rcu);
+       }
+}
+
+static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+                                        void *value, u32 key_size, u32 hash,
+                                        bool percpu, bool onallcpus)
+{
+       u32 size = htab->map.value_size;
+       struct htab_elem *l_new;
+       void __percpu *pptr;
+
+       l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
+       if (!l_new)
+               return NULL;
+
+       memcpy(l_new->key, key, key_size);
+       if (percpu) {
+               /* round up value_size to 8 bytes */
+               size = round_up(size, 8);
+
+               /* alloc_percpu zero-fills */
+               pptr = __alloc_percpu_gfp(size, 8, GFP_ATOMIC | __GFP_NOWARN);
+               if (!pptr) {
+                       kfree(l_new);
+                       return NULL;
+               }
+
+               if (!onallcpus) {
+                       /* copy true value_size bytes */
+                       memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
+               } else {
+                       int off = 0, cpu;
+
+                       for_each_possible_cpu(cpu) {
+                               bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
+                                               value + off, size);
+                               off += size;
+                       }
+               }
+               htab_elem_set_ptr(l_new, key_size, pptr);
+       } else {
+               memcpy(l_new->key + round_up(key_size, 8), value, size);
+       }
+
+       l_new->hash = hash;
+       return l_new;
+}
+
+static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
+                      u64 map_flags)
+{
+       if (!l_old && unlikely(atomic_read(&htab->count) >= htab->map.max_entries))
+               /* if elem with this 'key' doesn't exist and we've reached
+                * max_entries limit, fail insertion of new elem
+                */
+               return -E2BIG;
+
+       if (l_old && map_flags == BPF_NOEXIST)
+               /* elem already exists */
+               return -EEXIST;
+
+       if (!l_old && map_flags == BPF_EXIST)
+               /* elem doesn't exist, cannot update it */
+               return -ENOENT;
+
+       return 0;
+}
+
 /* Called from syscall or from eBPF program */
 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                                u64 map_flags)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
-       struct htab_elem *l_new, *l_old;
+       struct htab_elem *l_new = NULL, *l_old;
        struct hlist_head *head;
-       struct bucket *b;
        unsigned long flags;
-       u32 key_size;
+       struct bucket *b;
+       u32 key_size, hash;
        int ret;
 
-       if (map_flags > BPF_EXIST)
+       if (unlikely(map_flags > BPF_EXIST))
                /* unknown flags */
                return -EINVAL;
 
        WARN_ON_ONCE(!rcu_read_lock_held());
 
-       /* allocate new element outside of lock */
-       l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN);
-       if (!l_new)
-               return -ENOMEM;
-
        key_size = map->key_size;
 
-       memcpy(l_new->key, key, key_size);
-       memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
+       hash = htab_map_hash(key, key_size);
+
+       /* allocate new element outside of the lock, since
+        * we're most likley going to insert it
+        */
+       l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false);
+       if (!l_new)
+               return -ENOMEM;
 
-       l_new->hash = htab_map_hash(l_new->key, key_size);
-       b = __select_bucket(htab, l_new->hash);
+       b = __select_bucket(htab, hash);
        head = &b->head;
 
        /* bpf_map_update_elem() can be called in_irq() */
        raw_spin_lock_irqsave(&b->lock, flags);
 
-       l_old = lookup_elem_raw(head, l_new->hash, key, key_size);
+       l_old = lookup_elem_raw(head, hash, key, key_size);
 
-       if (!l_old && unlikely(atomic_read(&htab->count) >= map->max_entries)) {
-               /* if elem with this 'key' doesn't exist and we've reached
-                * max_entries limit, fail insertion of new elem
-                */
-               ret = -E2BIG;
+       ret = check_flags(htab, l_old, map_flags);
+       if (ret)
                goto err;
-       }
 
-       if (l_old && map_flags == BPF_NOEXIST) {
-               /* elem already exists */
-               ret = -EEXIST;
-               goto err;
-       }
-
-       if (!l_old && map_flags == BPF_EXIST) {
-               /* elem doesn't exist, cannot update it */
-               ret = -ENOENT;
-               goto err;
-       }
-
-       /* add new element to the head of the list, so that concurrent
-        * search will find it before old elem
+       /* add new element to the head of the list, so that
+        * concurrent search will find it before old elem
         */
        hlist_add_head_rcu(&l_new->hash_node, head);
        if (l_old) {
@@ -298,7 +405,6 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
                atomic_inc(&htab->count);
        }
        raw_spin_unlock_irqrestore(&b->lock, flags);
-
        return 0;
 err:
        raw_spin_unlock_irqrestore(&b->lock, flags);
@@ -306,10 +412,84 @@ err:
        return ret;
 }
 
+static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
+                                        void *value, u64 map_flags,
+                                        bool onallcpus)
+{
+       struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       struct htab_elem *l_new = NULL, *l_old;
+       struct hlist_head *head;
+       unsigned long flags;
+       struct bucket *b;
+       u32 key_size, hash;
+       int ret;
+
+       if (unlikely(map_flags > BPF_EXIST))
+               /* unknown flags */
+               return -EINVAL;
+
+       WARN_ON_ONCE(!rcu_read_lock_held());
+
+       key_size = map->key_size;
+
+       hash = htab_map_hash(key, key_size);
+
+       b = __select_bucket(htab, hash);
+       head = &b->head;
+
+       /* bpf_map_update_elem() can be called in_irq() */
+       raw_spin_lock_irqsave(&b->lock, flags);
+
+       l_old = lookup_elem_raw(head, hash, key, key_size);
+
+       ret = check_flags(htab, l_old, map_flags);
+       if (ret)
+               goto err;
+
+       if (l_old) {
+               void __percpu *pptr = htab_elem_get_ptr(l_old, key_size);
+               u32 size = htab->map.value_size;
+
+               /* per-cpu hash map can update value in-place */
+               if (!onallcpus) {
+                       memcpy(this_cpu_ptr(pptr), value, size);
+               } else {
+                       int off = 0, cpu;
+
+                       size = round_up(size, 8);
+                       for_each_possible_cpu(cpu) {
+                               bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
+                                               value + off, size);
+                               off += size;
+                       }
+               }
+       } else {
+               l_new = alloc_htab_elem(htab, key, value, key_size,
+                                       hash, true, onallcpus);
+               if (!l_new) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
+               hlist_add_head_rcu(&l_new->hash_node, head);
+               atomic_inc(&htab->count);
+       }
+       ret = 0;
+err:
+       raw_spin_unlock_irqrestore(&b->lock, flags);
+       return ret;
+}
+
+static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
+                                      void *value, u64 map_flags)
+{
+       return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
+}
+
 /* Called from syscall or from eBPF program */
 static int htab_map_delete_elem(struct bpf_map *map, void *key)
 {
        struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
+       bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_HASH;
        struct hlist_head *head;
        struct bucket *b;
        struct htab_elem *l;
@@ -332,7 +512,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
        if (l) {
                hlist_del_rcu(&l->hash_node);
                atomic_dec(&htab->count);
-               kfree_rcu(l, rcu);
+               free_htab_elem(l, percpu, key_size);
                ret = 0;
        }
 
@@ -352,7 +532,12 @@ static void delete_all_elements(struct bpf_htab *htab)
                hlist_for_each_entry_safe(l, n, head, hash_node) {
                        hlist_del_rcu(&l->hash_node);
                        atomic_dec(&htab->count);
-                       kfree(l);
+                       if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) {
+                               l->key_size = htab->map.key_size;
+                               htab_percpu_elem_free(l);
+                       } else {
+                               kfree(l);
+                       }
                }
        }
 }
@@ -391,9 +576,70 @@ static struct bpf_map_type_list htab_type __read_mostly = {
        .type = BPF_MAP_TYPE_HASH,
 };
 
+/* Called from eBPF program */
+static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
+{
+       struct htab_elem *l = __htab_map_lookup_elem(map, key);
+
+       if (l)
+               return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
+       else
+               return NULL;
+}
+
+int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
+{
+       struct htab_elem *l;
+       void __percpu *pptr;
+       int ret = -ENOENT;
+       int cpu, off = 0;
+       u32 size;
+
+       /* per_cpu areas are zero-filled and bpf programs can only
+        * access 'value_size' of them, so copying rounded areas
+        * will not leak any kernel data
+        */
+       size = round_up(map->value_size, 8);
+       rcu_read_lock();
+       l = __htab_map_lookup_elem(map, key);
+       if (!l)
+               goto out;
+       pptr = htab_elem_get_ptr(l, map->key_size);
+       for_each_possible_cpu(cpu) {
+               bpf_long_memcpy(value + off,
+                               per_cpu_ptr(pptr, cpu), size);
+               off += size;
+       }
+       ret = 0;
+out:
+       rcu_read_unlock();
+       return ret;
+}
+
+int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
+                          u64 map_flags)
+{
+       return __htab_percpu_map_update_elem(map, key, value, map_flags, true);
+}
+
+static const struct bpf_map_ops htab_percpu_ops = {
+       .map_alloc = htab_map_alloc,
+       .map_free = htab_map_free,
+       .map_get_next_key = htab_map_get_next_key,
+       .map_lookup_elem = htab_percpu_map_lookup_elem,
+       .map_update_elem = htab_percpu_map_update_elem,
+       .map_delete_elem = htab_map_delete_elem,
+};
+
+static struct bpf_map_type_list htab_percpu_type __read_mostly = {
+       .ops = &htab_percpu_ops,
+       .type = BPF_MAP_TYPE_PERCPU_HASH,
+};
+
 static int __init register_htab_map(void)
 {
        bpf_register_map_type(&htab_type);
+       bpf_register_map_type(&htab_percpu_type);
        return 0;
 }
 late_initcall(register_htab_map);
index 637397059f763564b535cfda2e4eaa9bf1d34fad..c95a753c2007966a752c2c2a5eda00e6a0a39072 100644 (file)
@@ -239,6 +239,7 @@ static int map_lookup_elem(union bpf_attr *attr)
        int ufd = attr->map_fd;
        struct bpf_map *map;
        void *key, *value, *ptr;
+       u32 value_size;
        struct fd f;
        int err;
 
@@ -259,23 +260,35 @@ static int map_lookup_elem(union bpf_attr *attr)
        if (copy_from_user(key, ukey, map->key_size) != 0)
                goto free_key;
 
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+           map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+               value_size = round_up(map->value_size, 8) * num_possible_cpus();
+       else
+               value_size = map->value_size;
+
        err = -ENOMEM;
-       value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
+       value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
        if (!value)
                goto free_key;
 
-       rcu_read_lock();
-       ptr = map->ops->map_lookup_elem(map, key);
-       if (ptr)
-               memcpy(value, ptr, map->value_size);
-       rcu_read_unlock();
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
+               err = bpf_percpu_hash_copy(map, key, value);
+       } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
+               err = bpf_percpu_array_copy(map, key, value);
+       } else {
+               rcu_read_lock();
+               ptr = map->ops->map_lookup_elem(map, key);
+               if (ptr)
+                       memcpy(value, ptr, value_size);
+               rcu_read_unlock();
+               err = ptr ? 0 : -ENOENT;
+       }
 
-       err = -ENOENT;
-       if (!ptr)
+       if (err)
                goto free_value;
 
        err = -EFAULT;
-       if (copy_to_user(uvalue, value, map->value_size) != 0)
+       if (copy_to_user(uvalue, value, value_size) != 0)
                goto free_value;
 
        err = 0;
@@ -298,6 +311,7 @@ static int map_update_elem(union bpf_attr *attr)
        int ufd = attr->map_fd;
        struct bpf_map *map;
        void *key, *value;
+       u32 value_size;
        struct fd f;
        int err;
 
@@ -318,21 +332,30 @@ static int map_update_elem(union bpf_attr *attr)
        if (copy_from_user(key, ukey, map->key_size) != 0)
                goto free_key;
 
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+           map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+               value_size = round_up(map->value_size, 8) * num_possible_cpus();
+       else
+               value_size = map->value_size;
+
        err = -ENOMEM;
-       value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN);
+       value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
        if (!value)
                goto free_key;
 
        err = -EFAULT;
-       if (copy_from_user(value, uvalue, map->value_size) != 0)
+       if (copy_from_user(value, uvalue, value_size) != 0)
                goto free_value;
 
-       /* eBPF program that use maps are running under rcu_read_lock(),
-        * therefore all map accessors rely on this fact, so do the same here
-        */
-       rcu_read_lock();
-       err = map->ops->map_update_elem(map, key, value, attr->flags);
-       rcu_read_unlock();
+       if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
+               err = bpf_percpu_hash_update(map, key, value, attr->flags);
+       } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
+               err = bpf_percpu_array_update(map, key, value, attr->flags);
+       } else {
+               rcu_read_lock();
+               err = map->ops->map_update_elem(map, key, value, attr->flags);
+               rcu_read_unlock();
+       }
 
 free_value:
        kfree(value);
index d1d3e8f57de907764fe3080632062485f5639443..2e7f7ab739e41c46072a69e787bf4e44f560ae55 100644 (file)
@@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
                /* adjust offset of jmps if necessary */
                if (i < pos && i + insn->off + 1 > pos)
                        insn->off += delta;
-               else if (i > pos && i + insn->off + 1 < pos)
+               else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
                        insn->off -= delta;
        }
 }
index c03a640ef6da265db01b93c2970ab6b2da7abd67..d27904c193daa1d8a8680522093254cfde376177 100644 (file)
@@ -58,6 +58,7 @@
 #include <linux/kthread.h>
 #include <linux/delay.h>
 #include <linux/atomic.h>
+#include <linux/cpuset.h>
 #include <net/sock.h>
 
 /*
@@ -2739,6 +2740,7 @@ out_unlock_rcu:
 out_unlock_threadgroup:
        percpu_up_write(&cgroup_threadgroup_rwsem);
        cgroup_kn_unlock(of->kn);
+       cpuset_post_attach_flush();
        return ret ?: nbytes;
 }
 
@@ -4655,14 +4657,15 @@ static void css_free_work_fn(struct work_struct *work)
 
        if (ss) {
                /* css free path */
+               struct cgroup_subsys_state *parent = css->parent;
                int id = css->id;
 
-               if (css->parent)
-                       css_put(css->parent);
-
                ss->css_free(css);
                cgroup_idr_remove(&ss->css_idr, id);
                cgroup_put(cgrp);
+
+               if (parent)
+                       css_put(parent);
        } else {
                /* cgroup free path */
                atomic_dec(&cgrp->root->nr_cgrps);
@@ -4758,6 +4761,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
        INIT_LIST_HEAD(&css->sibling);
        INIT_LIST_HEAD(&css->children);
        css->serial_nr = css_serial_nr_next++;
+       atomic_set(&css->online_cnt, 0);
 
        if (cgroup_parent(cgrp)) {
                css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -4780,6 +4784,10 @@ static int online_css(struct cgroup_subsys_state *css)
        if (!ret) {
                css->flags |= CSS_ONLINE;
                rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
+
+               atomic_inc(&css->online_cnt);
+               if (css->parent)
+                       atomic_inc(&css->parent->online_cnt);
        }
        return ret;
 }
@@ -5017,10 +5025,15 @@ static void css_killed_work_fn(struct work_struct *work)
                container_of(work, struct cgroup_subsys_state, destroy_work);
 
        mutex_lock(&cgroup_mutex);
-       offline_css(css);
-       mutex_unlock(&cgroup_mutex);
 
-       css_put(css);
+       do {
+               offline_css(css);
+               css_put(css);
+               /* @css can't go away while we're holding cgroup_mutex */
+               css = css->parent;
+       } while (css && atomic_dec_and_test(&css->online_cnt));
+
+       mutex_unlock(&cgroup_mutex);
 }
 
 /* css kill confirmation processing requires process context, bounce */
@@ -5029,8 +5042,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
        struct cgroup_subsys_state *css =
                container_of(ref, struct cgroup_subsys_state, refcnt);
 
-       INIT_WORK(&css->destroy_work, css_killed_work_fn);
-       queue_work(cgroup_destroy_wq, &css->destroy_work);
+       if (atomic_dec_and_test(&css->online_cnt)) {
+               INIT_WORK(&css->destroy_work, css_killed_work_fn);
+               queue_work(cgroup_destroy_wq, &css->destroy_work);
+       }
 }
 
 /**
index 3e945fcd81796f954a7e1c81ae95e78bd1a91dba..41989ab4db571cbf93d1a12738bc9afc3411e019 100644 (file)
@@ -287,6 +287,8 @@ static struct cpuset top_cpuset = {
 static DEFINE_MUTEX(cpuset_mutex);
 static DEFINE_SPINLOCK(callback_lock);
 
+static struct workqueue_struct *cpuset_migrate_mm_wq;
+
 /*
  * CPU / memory hotplug is handled asynchronously.
  */
@@ -972,31 +974,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
 }
 
 /*
- * cpuset_migrate_mm
- *
- *    Migrate memory region from one set of nodes to another.
- *
- *    Temporarilly set tasks mems_allowed to target nodes of migration,
- *    so that the migration code can allocate pages on these nodes.
- *
- *    While the mm_struct we are migrating is typically from some
- *    other task, the task_struct mems_allowed that we are hacking
- *    is for our current task, which must allocate new pages for that
- *    migrating memory region.
+ * Migrate memory region from one set of nodes to another.  This is
+ * performed asynchronously as it can be called from process migration path
+ * holding locks involved in process management.  All mm migrations are
+ * performed in the queued order and can be waited for by flushing
+ * cpuset_migrate_mm_wq.
  */
 
+struct cpuset_migrate_mm_work {
+       struct work_struct      work;
+       struct mm_struct        *mm;
+       nodemask_t              from;
+       nodemask_t              to;
+};
+
+static void cpuset_migrate_mm_workfn(struct work_struct *work)
+{
+       struct cpuset_migrate_mm_work *mwork =
+               container_of(work, struct cpuset_migrate_mm_work, work);
+
+       /* on a wq worker, no need to worry about %current's mems_allowed */
+       do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
+       mmput(mwork->mm);
+       kfree(mwork);
+}
+
 static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
                                                        const nodemask_t *to)
 {
-       struct task_struct *tsk = current;
-
-       tsk->mems_allowed = *to;
+       struct cpuset_migrate_mm_work *mwork;
 
-       do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+       mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
+       if (mwork) {
+               mwork->mm = mm;
+               mwork->from = *from;
+               mwork->to = *to;
+               INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
+               queue_work(cpuset_migrate_mm_wq, &mwork->work);
+       } else {
+               mmput(mm);
+       }
+}
 
-       rcu_read_lock();
-       guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
-       rcu_read_unlock();
+void cpuset_post_attach_flush(void)
+{
+       flush_workqueue(cpuset_migrate_mm_wq);
 }
 
 /*
@@ -1097,7 +1119,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
                mpol_rebind_mm(mm, &cs->mems_allowed);
                if (migrate)
                        cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
-               mmput(mm);
+               else
+                       mmput(mm);
        }
        css_task_iter_end(&it);
 
@@ -1545,11 +1568,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
                         * @old_mems_allowed is the right nodesets that we
                         * migrate mm from.
                         */
-                       if (is_memory_migrate(cs)) {
+                       if (is_memory_migrate(cs))
                                cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
                                                  &cpuset_attach_nodemask_to);
-                       }
-                       mmput(mm);
+                       else
+                               mmput(mm);
                }
        }
 
@@ -1714,6 +1737,7 @@ out_unlock:
        mutex_unlock(&cpuset_mutex);
        kernfs_unbreak_active_protection(of->kn);
        css_put(&cs->css);
+       flush_workqueue(cpuset_migrate_mm_wq);
        return retval ?: nbytes;
 }
 
@@ -2359,6 +2383,9 @@ void __init cpuset_init_smp(void)
        top_cpuset.effective_mems = node_states[N_MEMORY];
 
        register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
+
+       cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
+       BUG_ON(!cpuset_migrate_mm_wq);
 }
 
 /**
index fc1ef736253c79954686d018a2deca4c86300fa6..77777d91867696484e09fef5f4302e1987f98139 100644 (file)
@@ -349,7 +349,7 @@ poll_again:
                        }
                        kdb_printf("\n");
                        for (i = 0; i < count; i++) {
-                               if (kallsyms_symbol_next(p_tmp, i) < 0)
+                               if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
                                        break;
                                kdb_printf("%s ", p_tmp);
                                *(p_tmp + len) = '\0';
index 06ae52e99ac255fcf4ec7925205742b85426e487..5946460b24250a264f75c442099bb52ca06b0269 100644 (file)
@@ -49,8 +49,6 @@
 
 #include <asm/irq_regs.h>
 
-static struct workqueue_struct *perf_wq;
-
 typedef int (*remote_function_f)(void *);
 
 struct remote_function_call {
@@ -126,44 +124,181 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info)
        return data.ret;
 }
 
-static void event_function_call(struct perf_event *event,
-                               int (*active)(void *),
-                               void (*inactive)(void *),
-                               void *data)
+static inline struct perf_cpu_context *
+__get_cpu_context(struct perf_event_context *ctx)
+{
+       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
+}
+
+static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
+                         struct perf_event_context *ctx)
 {
+       raw_spin_lock(&cpuctx->ctx.lock);
+       if (ctx)
+               raw_spin_lock(&ctx->lock);
+}
+
+static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
+                           struct perf_event_context *ctx)
+{
+       if (ctx)
+               raw_spin_unlock(&ctx->lock);
+       raw_spin_unlock(&cpuctx->ctx.lock);
+}
+
+#define TASK_TOMBSTONE ((void *)-1L)
+
+static bool is_kernel_event(struct perf_event *event)
+{
+       return READ_ONCE(event->owner) == TASK_TOMBSTONE;
+}
+
+/*
+ * On task ctx scheduling...
+ *
+ * When !ctx->nr_events a task context will not be scheduled. This means
+ * we can disable the scheduler hooks (for performance) without leaving
+ * pending task ctx state.
+ *
+ * This however results in two special cases:
+ *
+ *  - removing the last event from a task ctx; this is relatively straight
+ *    forward and is done in __perf_remove_from_context.
+ *
+ *  - adding the first event to a task ctx; this is tricky because we cannot
+ *    rely on ctx->is_active and therefore cannot use event_function_call().
+ *    See perf_install_in_context().
+ *
+ * This is because we need a ctx->lock serialized variable (ctx->is_active)
+ * to reliably determine if a particular task/context is scheduled in. The
+ * task_curr() use in task_function_call() is racy in that a remote context
+ * switch is not a single atomic operation.
+ *
+ * As is, the situation is 'safe' because we set rq->curr before we do the
+ * actual context switch. This means that task_curr() will fail early, but
+ * we'll continue spinning on ctx->is_active until we've passed
+ * perf_event_task_sched_out().
+ *
+ * Without this ctx->lock serialized variable we could have race where we find
+ * the task (and hence the context) would not be active while in fact they are.
+ *
+ * If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
+ */
+
+typedef void (*event_f)(struct perf_event *, struct perf_cpu_context *,
+                       struct perf_event_context *, void *);
+
+struct event_function_struct {
+       struct perf_event *event;
+       event_f func;
+       void *data;
+};
+
+static int event_function(void *info)
+{
+       struct event_function_struct *efs = info;
+       struct perf_event *event = efs->event;
        struct perf_event_context *ctx = event->ctx;
-       struct task_struct *task = ctx->task;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+       struct perf_event_context *task_ctx = cpuctx->task_ctx;
+       int ret = 0;
+
+       WARN_ON_ONCE(!irqs_disabled());
+
+       perf_ctx_lock(cpuctx, task_ctx);
+       /*
+        * Since we do the IPI call without holding ctx->lock things can have
+        * changed, double check we hit the task we set out to hit.
+        */
+       if (ctx->task) {
+               if (ctx->task != current) {
+                       ret = -EAGAIN;
+                       goto unlock;
+               }
+
+               /*
+                * We only use event_function_call() on established contexts,
+                * and event_function() is only ever called when active (or
+                * rather, we'll have bailed in task_function_call() or the
+                * above ctx->task != current test), therefore we must have
+                * ctx->is_active here.
+                */
+               WARN_ON_ONCE(!ctx->is_active);
+               /*
+                * And since we have ctx->is_active, cpuctx->task_ctx must
+                * match.
+                */
+               WARN_ON_ONCE(task_ctx != ctx);
+       } else {
+               WARN_ON_ONCE(&cpuctx->ctx != ctx);
+       }
+
+       efs->func(event, cpuctx, ctx, efs->data);
+unlock:
+       perf_ctx_unlock(cpuctx, task_ctx);
+
+       return ret;
+}
+
+static void event_function_local(struct perf_event *event, event_f func, void *data)
+{
+       struct event_function_struct efs = {
+               .event = event,
+               .func = func,
+               .data = data,
+       };
+
+       int ret = event_function(&efs);
+       WARN_ON_ONCE(ret);
+}
+
+static void event_function_call(struct perf_event *event, event_f func, void *data)
+{
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task = READ_ONCE(ctx->task); /* verified in event_function */
+       struct event_function_struct efs = {
+               .event = event,
+               .func = func,
+               .data = data,
+       };
+
+       if (!event->parent) {
+               /*
+                * If this is a !child event, we must hold ctx::mutex to
+                * stabilize the the event->ctx relation. See
+                * perf_event_ctx_lock().
+                */
+               lockdep_assert_held(&ctx->mutex);
+       }
 
        if (!task) {
-               cpu_function_call(event->cpu, active, data);
+               cpu_function_call(event->cpu, event_function, &efs);
                return;
        }
 
 again:
-       if (!task_function_call(task, active, data))
+       if (task == TASK_TOMBSTONE)
+               return;
+
+       if (!task_function_call(task, event_function, &efs))
                return;
 
        raw_spin_lock_irq(&ctx->lock);
-       if (ctx->is_active) {
-               /*
-                * Reload the task pointer, it might have been changed by
-                * a concurrent perf_event_context_sched_out().
-                */
-               task = ctx->task;
-               raw_spin_unlock_irq(&ctx->lock);
-               goto again;
+       /*
+        * Reload the task pointer, it might have been changed by
+        * a concurrent perf_event_context_sched_out().
+        */
+       task = ctx->task;
+       if (task != TASK_TOMBSTONE) {
+               if (ctx->is_active) {
+                       raw_spin_unlock_irq(&ctx->lock);
+                       goto again;
+               }
+               func(event, NULL, ctx, data);
        }
-       inactive(data);
        raw_spin_unlock_irq(&ctx->lock);
 }
 
-#define EVENT_OWNER_KERNEL ((void *) -1)
-
-static bool is_kernel_event(struct perf_event *event)
-{
-       return event->owner == EVENT_OWNER_KERNEL;
-}
-
 #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
                       PERF_FLAG_FD_OUTPUT  |\
                       PERF_FLAG_PID_CGROUP |\
@@ -368,28 +503,6 @@ static inline u64 perf_event_clock(struct perf_event *event)
        return event->clock();
 }
 
-static inline struct perf_cpu_context *
-__get_cpu_context(struct perf_event_context *ctx)
-{
-       return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
-}
-
-static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
-                         struct perf_event_context *ctx)
-{
-       raw_spin_lock(&cpuctx->ctx.lock);
-       if (ctx)
-               raw_spin_lock(&ctx->lock);
-}
-
-static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
-                           struct perf_event_context *ctx)
-{
-       if (ctx)
-               raw_spin_unlock(&ctx->lock);
-       raw_spin_unlock(&cpuctx->ctx.lock);
-}
-
 #ifdef CONFIG_CGROUP_PERF
 
 static inline bool
@@ -579,13 +692,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
         * we are holding the rcu lock
         */
        cgrp1 = perf_cgroup_from_task(task, NULL);
-
-       /*
-        * next is NULL when called from perf_event_enable_on_exec()
-        * that will systematically cause a cgroup_switch()
-        */
-       if (next)
-               cgrp2 = perf_cgroup_from_task(next, NULL);
+       cgrp2 = perf_cgroup_from_task(next, NULL);
 
        /*
         * only schedule out current cgroup events if we know
@@ -611,8 +718,6 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
         * we are holding the rcu lock
         */
        cgrp1 = perf_cgroup_from_task(task, NULL);
-
-       /* prev can never be NULL */
        cgrp2 = perf_cgroup_from_task(prev, NULL);
 
        /*
@@ -917,7 +1022,7 @@ static void put_ctx(struct perf_event_context *ctx)
        if (atomic_dec_and_test(&ctx->refcount)) {
                if (ctx->parent_ctx)
                        put_ctx(ctx->parent_ctx);
-               if (ctx->task)
+               if (ctx->task && ctx->task != TASK_TOMBSTONE)
                        put_task_struct(ctx->task);
                call_rcu(&ctx->rcu_head, free_ctx);
        }
@@ -934,9 +1039,8 @@ static void put_ctx(struct perf_event_context *ctx)
  * perf_event_context::mutex nests and those are:
  *
  *  - perf_event_exit_task_context()   [ child , 0 ]
- *      __perf_event_exit_task()
- *        sync_child_event()
- *          put_event()                        [ parent, 1 ]
+ *      perf_event_exit_event()
+ *        put_event()                  [ parent, 1 ]
  *
  *  - perf_event_init_context()                [ parent, 0 ]
  *      inherit_task_group()
@@ -979,8 +1083,8 @@ static void put_ctx(struct perf_event_context *ctx)
  * Lock order:
  *     task_struct::perf_event_mutex
  *       perf_event_context::mutex
- *         perf_event_context::lock
  *         perf_event::child_mutex;
+ *           perf_event_context::lock
  *         perf_event::mmap_mutex
  *         mmap_sem
  */
@@ -1078,6 +1182,7 @@ static u64 primary_event_id(struct perf_event *event)
 
 /*
  * Get the perf_event_context for a task and lock it.
+ *
  * This has to cope with with the fact that until it is locked,
  * the context could get moved to another task.
  */
@@ -1118,9 +1223,12 @@ retry:
                        goto retry;
                }
 
-               if (!atomic_inc_not_zero(&ctx->refcount)) {
+               if (ctx->task == TASK_TOMBSTONE ||
+                   !atomic_inc_not_zero(&ctx->refcount)) {
                        raw_spin_unlock(&ctx->lock);
                        ctx = NULL;
+               } else {
+                       WARN_ON_ONCE(ctx->task != task);
                }
        }
        rcu_read_unlock();
@@ -1246,6 +1354,8 @@ ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 static void
 list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 {
+       lockdep_assert_held(&ctx->lock);
+
        WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
        event->attach_state |= PERF_ATTACH_CONTEXT;
 
@@ -1448,11 +1558,14 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
 
        if (is_cgroup_event(event)) {
                ctx->nr_cgroups--;
+               /*
+                * Because cgroup events are always per-cpu events, this will
+                * always be called from the right CPU.
+                */
                cpuctx = __get_cpu_context(ctx);
                /*
-                * if there are no more cgroup events
-                * then cler cgrp to avoid stale pointer
-                * in update_cgrp_time_from_cpuctx()
+                * If there are no more cgroup events then clear cgrp to avoid
+                * stale pointer in update_cgrp_time_from_cpuctx().
                 */
                if (!ctx->nr_cgroups)
                        cpuctx->cgrp = NULL;
@@ -1530,45 +1643,11 @@ out:
                perf_event__header_size(tmp);
 }
 
-/*
- * User event without the task.
- */
 static bool is_orphaned_event(struct perf_event *event)
 {
-       return event && !is_kernel_event(event) && !event->owner;
+       return event->state == PERF_EVENT_STATE_EXIT;
 }
 
-/*
- * Event has a parent but parent's task finished and it's
- * alive only because of children holding refference.
- */
-static bool is_orphaned_child(struct perf_event *event)
-{
-       return is_orphaned_event(event->parent);
-}
-
-static void orphans_remove_work(struct work_struct *work);
-
-static void schedule_orphans_remove(struct perf_event_context *ctx)
-{
-       if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
-               return;
-
-       if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
-               get_ctx(ctx);
-               ctx->orphans_remove_sched = true;
-       }
-}
-
-static int __init perf_workqueue_init(void)
-{
-       perf_wq = create_singlethread_workqueue("perf");
-       WARN(!perf_wq, "failed to create perf workqueue\n");
-       return perf_wq ? 0 : -1;
-}
-
-core_initcall(perf_workqueue_init);
-
 static inline int pmu_filter_match(struct perf_event *event)
 {
        struct pmu *pmu = event->pmu;
@@ -1629,9 +1708,6 @@ event_sched_out(struct perf_event *event,
        if (event->attr.exclusive || !cpuctx->active_oncpu)
                cpuctx->exclusive = 0;
 
-       if (is_orphaned_child(event))
-               schedule_orphans_remove(ctx);
-
        perf_pmu_enable(event->pmu);
 }
 
@@ -1655,21 +1731,8 @@ group_sched_out(struct perf_event *group_event,
                cpuctx->exclusive = 0;
 }
 
-struct remove_event {
-       struct perf_event *event;
-       bool detach_group;
-};
-
-static void ___perf_remove_from_context(void *info)
-{
-       struct remove_event *re = info;
-       struct perf_event *event = re->event;
-       struct perf_event_context *ctx = event->ctx;
-
-       if (re->detach_group)
-               perf_group_detach(event);
-       list_del_event(event, ctx);
-}
+#define DETACH_GROUP   0x01UL
+#define DETACH_STATE   0x02UL
 
 /*
  * Cross CPU call to remove a performance event
@@ -1677,33 +1740,33 @@ static void ___perf_remove_from_context(void *info)
  * We disable the event on the hardware level first. After that we
  * remove it from the context list.
  */
-static int __perf_remove_from_context(void *info)
+static void
+__perf_remove_from_context(struct perf_event *event,
+                          struct perf_cpu_context *cpuctx,
+                          struct perf_event_context *ctx,
+                          void *info)
 {
-       struct remove_event *re = info;
-       struct perf_event *event = re->event;
-       struct perf_event_context *ctx = event->ctx;
-       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+       unsigned long flags = (unsigned long)info;
 
-       raw_spin_lock(&ctx->lock);
        event_sched_out(event, cpuctx, ctx);
-       if (re->detach_group)
+       if (flags & DETACH_GROUP)
                perf_group_detach(event);
        list_del_event(event, ctx);
-       if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
+       if (flags & DETACH_STATE)
+               event->state = PERF_EVENT_STATE_EXIT;
+
+       if (!ctx->nr_events && ctx->is_active) {
                ctx->is_active = 0;
-               cpuctx->task_ctx = NULL;
+               if (ctx->task) {
+                       WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+                       cpuctx->task_ctx = NULL;
+               }
        }
-       raw_spin_unlock(&ctx->lock);
-
-       return 0;
 }
 
 /*
  * Remove the event from a task's (or a CPU's) list of events.
  *
- * CPU events are removed with a smp call. For task events we only
- * call when the task is on a CPU.
- *
  * If event->ctx is a cloned context, callers must make sure that
  * every task struct that event->ctx->task could possibly point to
  * remains valid.  This is OK when called from perf_release since
@@ -1711,73 +1774,32 @@ static int __perf_remove_from_context(void *info)
  * When called from perf_event_exit_task, it's OK because the
  * context has been detached from its task.
  */
-static void perf_remove_from_context(struct perf_event *event, bool detach_group)
+static void perf_remove_from_context(struct perf_event *event, unsigned long flags)
 {
-       struct perf_event_context *ctx = event->ctx;
-       struct remove_event re = {
-               .event = event,
-               .detach_group = detach_group,
-       };
+       lockdep_assert_held(&event->ctx->mutex);
 
-       lockdep_assert_held(&ctx->mutex);
-
-       event_function_call(event, __perf_remove_from_context,
-                           ___perf_remove_from_context, &re);
+       event_function_call(event, __perf_remove_from_context, (void *)flags);
 }
 
 /*
  * Cross CPU call to disable a performance event
  */
-int __perf_event_disable(void *info)
-{
-       struct perf_event *event = info;
-       struct perf_event_context *ctx = event->ctx;
-       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
-
-       /*
-        * If this is a per-task event, need to check whether this
-        * event's task is the current task on this cpu.
-        *
-        * Can trigger due to concurrent perf_event_context_sched_out()
-        * flipping contexts around.
-        */
-       if (ctx->task && cpuctx->task_ctx != ctx)
-               return -EINVAL;
-
-       raw_spin_lock(&ctx->lock);
-
-       /*
-        * If the event is on, turn it off.
-        * If it is in error state, leave it in error state.
-        */
-       if (event->state >= PERF_EVENT_STATE_INACTIVE) {
-               update_context_time(ctx);
-               update_cgrp_time_from_event(event);
-               update_group_times(event);
-               if (event == event->group_leader)
-                       group_sched_out(event, cpuctx, ctx);
-               else
-                       event_sched_out(event, cpuctx, ctx);
-               event->state = PERF_EVENT_STATE_OFF;
-       }
-
-       raw_spin_unlock(&ctx->lock);
-
-       return 0;
-}
-
-void ___perf_event_disable(void *info)
+static void __perf_event_disable(struct perf_event *event,
+                                struct perf_cpu_context *cpuctx,
+                                struct perf_event_context *ctx,
+                                void *info)
 {
-       struct perf_event *event = info;
+       if (event->state < PERF_EVENT_STATE_INACTIVE)
+               return;
 
-       /*
-        * Since we have the lock this context can't be scheduled
-        * in, so we can change the state safely.
-        */
-       if (event->state == PERF_EVENT_STATE_INACTIVE) {
-               update_group_times(event);
-               event->state = PERF_EVENT_STATE_OFF;
-       }
+       update_context_time(ctx);
+       update_cgrp_time_from_event(event);
+       update_group_times(event);
+       if (event == event->group_leader)
+               group_sched_out(event, cpuctx, ctx);
+       else
+               event_sched_out(event, cpuctx, ctx);
+       event->state = PERF_EVENT_STATE_OFF;
 }
 
 /*
@@ -1788,7 +1810,8 @@ void ___perf_event_disable(void *info)
  * remains valid.  This condition is satisifed when called through
  * perf_event_for_each_child or perf_event_for_each because they
  * hold the top-level event's child_mutex, so any descendant that
- * goes to exit will block in sync_child_event.
+ * goes to exit will block in perf_event_exit_event().
+ *
  * When called from perf_pending_event it's OK because event->ctx
  * is the current context on this CPU and preemption is disabled,
  * hence we can't get into perf_event_task_sched_out for this context.
@@ -1804,8 +1827,12 @@ static void _perf_event_disable(struct perf_event *event)
        }
        raw_spin_unlock_irq(&ctx->lock);
 
-       event_function_call(event, __perf_event_disable,
-                           ___perf_event_disable, event);
+       event_function_call(event, __perf_event_disable, NULL);
+}
+
+void perf_event_disable_local(struct perf_event *event)
+{
+       event_function_local(event, __perf_event_disable, NULL);
 }
 
 /*
@@ -1918,9 +1945,6 @@ event_sched_in(struct perf_event *event,
        if (event->attr.exclusive)
                cpuctx->exclusive = 1;
 
-       if (is_orphaned_child(event))
-               schedule_orphans_remove(ctx);
-
 out:
        perf_pmu_enable(event->pmu);
 
@@ -2039,7 +2063,8 @@ static void add_event_to_ctx(struct perf_event *event,
        event->tstamp_stopped = tstamp;
 }
 
-static void task_ctx_sched_out(struct perf_event_context *ctx);
+static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                              struct perf_event_context *ctx);
 static void
 ctx_sched_in(struct perf_event_context *ctx,
             struct perf_cpu_context *cpuctx,
@@ -2058,16 +2083,15 @@ static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
                ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
 }
 
-static void ___perf_install_in_context(void *info)
+static void ctx_resched(struct perf_cpu_context *cpuctx,
+                       struct perf_event_context *task_ctx)
 {
-       struct perf_event *event = info;
-       struct perf_event_context *ctx = event->ctx;
-
-       /*
-        * Since the task isn't running, its safe to add the event, us holding
-        * the ctx->lock ensures the task won't get scheduled in.
-        */
-       add_event_to_ctx(event, ctx);
+       perf_pmu_disable(cpuctx->ctx.pmu);
+       if (task_ctx)
+               task_ctx_sched_out(cpuctx, task_ctx);
+       cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+       perf_event_sched_in(cpuctx, task_ctx, current);
+       perf_pmu_enable(cpuctx->ctx.pmu);
 }
 
 /*
@@ -2077,55 +2101,31 @@ static void ___perf_install_in_context(void *info)
  */
 static int  __perf_install_in_context(void *info)
 {
-       struct perf_event *event = info;
-       struct perf_event_context *ctx = event->ctx;
+       struct perf_event_context *ctx = info;
        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
        struct perf_event_context *task_ctx = cpuctx->task_ctx;
-       struct task_struct *task = current;
-
-       perf_ctx_lock(cpuctx, task_ctx);
-       perf_pmu_disable(cpuctx->ctx.pmu);
 
-       /*
-        * If there was an active task_ctx schedule it out.
-        */
-       if (task_ctx)
-               task_ctx_sched_out(task_ctx);
-
-       /*
-        * If the context we're installing events in is not the
-        * active task_ctx, flip them.
-        */
-       if (ctx->task && task_ctx != ctx) {
-               if (task_ctx)
-                       raw_spin_unlock(&task_ctx->lock);
+       raw_spin_lock(&cpuctx->ctx.lock);
+       if (ctx->task) {
                raw_spin_lock(&ctx->lock);
+               /*
+                * If we hit the 'wrong' task, we've since scheduled and
+                * everything should be sorted, nothing to do!
+                */
                task_ctx = ctx;
-       }
+               if (ctx->task != current)
+                       goto unlock;
 
-       if (task_ctx) {
-               cpuctx->task_ctx = task_ctx;
-               task = task_ctx->task;
+               /*
+                * If task_ctx is set, it had better be to us.
+                */
+               WARN_ON_ONCE(cpuctx->task_ctx != ctx && cpuctx->task_ctx);
+       } else if (task_ctx) {
+               raw_spin_lock(&task_ctx->lock);
        }
 
-       cpu_ctx_sched_out(cpuctx, EVENT_ALL);
-
-       update_context_time(ctx);
-       /*
-        * update cgrp time only if current cgrp
-        * matches event->cgrp. Must be done before
-        * calling add_event_to_ctx()
-        */
-       update_cgrp_time_from_event(event);
-
-       add_event_to_ctx(event, ctx);
-
-       /*
-        * Schedule everything back in
-        */
-       perf_event_sched_in(cpuctx, task_ctx, task);
-
-       perf_pmu_enable(cpuctx->ctx.pmu);
+       ctx_resched(cpuctx, task_ctx);
+unlock:
        perf_ctx_unlock(cpuctx, task_ctx);
 
        return 0;
@@ -2133,27 +2133,54 @@ static int  __perf_install_in_context(void *info)
 
 /*
  * Attach a performance event to a context
- *
- * First we add the event to the list with the hardware enable bit
- * in event->hw_config cleared.
- *
- * If the event is attached to a task which is on a CPU we use a smp
- * call to enable it in the task context. The task might have been
- * scheduled away, but we check this in the smp call again.
  */
 static void
 perf_install_in_context(struct perf_event_context *ctx,
                        struct perf_event *event,
                        int cpu)
 {
+       struct task_struct *task = NULL;
+
        lockdep_assert_held(&ctx->mutex);
 
        event->ctx = ctx;
        if (event->cpu != -1)
                event->cpu = cpu;
 
-       event_function_call(event, __perf_install_in_context,
-                           ___perf_install_in_context, event);
+       /*
+        * Installing events is tricky because we cannot rely on ctx->is_active
+        * to be set in case this is the nr_events 0 -> 1 transition.
+        *
+        * So what we do is we add the event to the list here, which will allow
+        * a future context switch to DTRT and then send a racy IPI. If the IPI
+        * fails to hit the right task, this means a context switch must have
+        * happened and that will have taken care of business.
+        */
+       raw_spin_lock_irq(&ctx->lock);
+       task = ctx->task;
+       /*
+        * Worse, we cannot even rely on the ctx actually existing anymore. If
+        * between find_get_context() and perf_install_in_context() the task
+        * went through perf_event_exit_task() its dead and we should not be
+        * adding new events.
+        */
+       if (task == TASK_TOMBSTONE) {
+               raw_spin_unlock_irq(&ctx->lock);
+               return;
+       }
+       update_context_time(ctx);
+       /*
+        * Update cgrp time only if current cgrp matches event->cgrp.
+        * Must be done before calling add_event_to_ctx().
+        */
+       update_cgrp_time_from_event(event);
+       add_event_to_ctx(event, ctx);
+       raw_spin_unlock_irq(&ctx->lock);
+
+       if (task)
+               task_function_call(task, __perf_install_in_context, ctx);
+       else
+               cpu_function_call(cpu, __perf_install_in_context, ctx);
 }
 
 /*
@@ -2180,43 +2207,30 @@ static void __perf_event_mark_enabled(struct perf_event *event)
 /*
  * Cross CPU call to enable a performance event
  */
-static int __perf_event_enable(void *info)
+static void __perf_event_enable(struct perf_event *event,
+                               struct perf_cpu_context *cpuctx,
+                               struct perf_event_context *ctx,
+                               void *info)
 {
-       struct perf_event *event = info;
-       struct perf_event_context *ctx = event->ctx;
        struct perf_event *leader = event->group_leader;
-       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
-       int err;
+       struct perf_event_context *task_ctx;
 
-       /*
-        * There's a time window between 'ctx->is_active' check
-        * in perf_event_enable function and this place having:
-        *   - IRQs on
-        *   - ctx->lock unlocked
-        *
-        * where the task could be killed and 'ctx' deactivated
-        * by perf_event_exit_task.
-        */
-       if (!ctx->is_active)
-               return -EINVAL;
+       if (event->state >= PERF_EVENT_STATE_INACTIVE ||
+           event->state <= PERF_EVENT_STATE_ERROR)
+               return;
 
-       raw_spin_lock(&ctx->lock);
        update_context_time(ctx);
-
-       if (event->state >= PERF_EVENT_STATE_INACTIVE)
-               goto unlock;
-
-       /*
-        * set current task's cgroup time reference point
-        */
-       perf_cgroup_set_timestamp(current, ctx);
-
        __perf_event_mark_enabled(event);
 
+       if (!ctx->is_active)
+               return;
+
        if (!event_filter_match(event)) {
-               if (is_cgroup_event(event))
+               if (is_cgroup_event(event)) {
+                       perf_cgroup_set_timestamp(current, ctx); // XXX ?
                        perf_cgroup_defer_enabled(event);
-               goto unlock;
+               }
+               return;
        }
 
        /*
@@ -2224,41 +2238,13 @@ static int __perf_event_enable(void *info)
         * then don't put it on unless the group is on.
         */
        if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
-               goto unlock;
-
-       if (!group_can_go_on(event, cpuctx, 1)) {
-               err = -EEXIST;
-       } else {
-               if (event == leader)
-                       err = group_sched_in(event, cpuctx, ctx);
-               else
-                       err = event_sched_in(event, cpuctx, ctx);
-       }
-
-       if (err) {
-               /*
-                * If this event can't go on and it's part of a
-                * group, then the whole group has to come off.
-                */
-               if (leader != event) {
-                       group_sched_out(leader, cpuctx, ctx);
-                       perf_mux_hrtimer_restart(cpuctx);
-               }
-               if (leader->attr.pinned) {
-                       update_group_times(leader);
-                       leader->state = PERF_EVENT_STATE_ERROR;
-               }
-       }
-
-unlock:
-       raw_spin_unlock(&ctx->lock);
+               return;
 
-       return 0;
-}
+       task_ctx = cpuctx->task_ctx;
+       if (ctx->task)
+               WARN_ON_ONCE(task_ctx != ctx);
 
-void ___perf_event_enable(void *info)
-{
-       __perf_event_mark_enabled((struct perf_event *)info);
+       ctx_resched(cpuctx, task_ctx);
 }
 
 /*
@@ -2275,7 +2261,8 @@ static void _perf_event_enable(struct perf_event *event)
        struct perf_event_context *ctx = event->ctx;
 
        raw_spin_lock_irq(&ctx->lock);
-       if (event->state >= PERF_EVENT_STATE_INACTIVE) {
+       if (event->state >= PERF_EVENT_STATE_INACTIVE ||
+           event->state <  PERF_EVENT_STATE_ERROR) {
                raw_spin_unlock_irq(&ctx->lock);
                return;
        }
@@ -2291,8 +2278,7 @@ static void _perf_event_enable(struct perf_event *event)
                event->state = PERF_EVENT_STATE_OFF;
        raw_spin_unlock_irq(&ctx->lock);
 
-       event_function_call(event, __perf_event_enable,
-                           ___perf_event_enable, event);
+       event_function_call(event, __perf_event_enable, NULL);
 }
 
 /*
@@ -2342,12 +2328,27 @@ static void ctx_sched_out(struct perf_event_context *ctx,
                          struct perf_cpu_context *cpuctx,
                          enum event_type_t event_type)
 {
-       struct perf_event *event;
        int is_active = ctx->is_active;
+       struct perf_event *event;
 
-       ctx->is_active &= ~event_type;
-       if (likely(!ctx->nr_events))
+       lockdep_assert_held(&ctx->lock);
+
+       if (likely(!ctx->nr_events)) {
+               /*
+                * See __perf_remove_from_context().
+                */
+               WARN_ON_ONCE(ctx->is_active);
+               if (ctx->task)
+                       WARN_ON_ONCE(cpuctx->task_ctx);
                return;
+       }
+
+       ctx->is_active &= ~event_type;
+       if (ctx->task) {
+               WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+               if (!ctx->is_active)
+                       cpuctx->task_ctx = NULL;
+       }
 
        update_context_time(ctx);
        update_cgrp_time_from_cpuctx(cpuctx);
@@ -2518,17 +2519,21 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
                raw_spin_lock(&ctx->lock);
                raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
                if (context_equiv(ctx, next_ctx)) {
-                       /*
-                        * XXX do we need a memory barrier of sorts
-                        * wrt to rcu_dereference() of perf_event_ctxp
-                        */
-                       task->perf_event_ctxp[ctxn] = next_ctx;
-                       next->perf_event_ctxp[ctxn] = ctx;
-                       ctx->task = next;
-                       next_ctx->task = task;
+                       WRITE_ONCE(ctx->task, next);
+                       WRITE_ONCE(next_ctx->task, task);
 
                        swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
 
+                       /*
+                        * RCU_INIT_POINTER here is safe because we've not
+                        * modified the ctx and the above modification of
+                        * ctx->task and ctx->task_ctx_data are immaterial
+                        * since those values are always verified under
+                        * ctx->lock which we're now holding.
+                        */
+                       RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], next_ctx);
+                       RCU_INIT_POINTER(next->perf_event_ctxp[ctxn], ctx);
+
                        do_switch = 0;
 
                        perf_event_sync_stat(ctx, next_ctx);
@@ -2541,8 +2546,7 @@ unlock:
 
        if (do_switch) {
                raw_spin_lock(&ctx->lock);
-               ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-               cpuctx->task_ctx = NULL;
+               task_ctx_sched_out(cpuctx, ctx);
                raw_spin_unlock(&ctx->lock);
        }
 }
@@ -2637,10 +2641,9 @@ void __perf_event_task_sched_out(struct task_struct *task,
                perf_cgroup_sched_out(task, next);
 }
 
-static void task_ctx_sched_out(struct perf_event_context *ctx)
+static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
+                              struct perf_event_context *ctx)
 {
-       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
-
        if (!cpuctx->task_ctx)
                return;
 
@@ -2648,7 +2651,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx)
                return;
 
        ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-       cpuctx->task_ctx = NULL;
 }
 
 /*
@@ -2725,13 +2727,22 @@ ctx_sched_in(struct perf_event_context *ctx,
             enum event_type_t event_type,
             struct task_struct *task)
 {
-       u64 now;
        int is_active = ctx->is_active;
+       u64 now;
+
+       lockdep_assert_held(&ctx->lock);
 
-       ctx->is_active |= event_type;
        if (likely(!ctx->nr_events))
                return;
 
+       ctx->is_active |= event_type;
+       if (ctx->task) {
+               if (!is_active)
+                       cpuctx->task_ctx = ctx;
+               else
+                       WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+       }
+
        now = perf_clock();
        ctx->timestamp = now;
        perf_cgroup_set_timestamp(task, ctx);
@@ -2773,12 +2784,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
         * cpu flexible, task flexible.
         */
        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
-
-       if (ctx->nr_events)
-               cpuctx->task_ctx = ctx;
-
-       perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
-
+       perf_event_sched_in(cpuctx, ctx, task);
        perf_pmu_enable(ctx->pmu);
        perf_ctx_unlock(cpuctx, ctx);
 }
@@ -2800,6 +2806,16 @@ void __perf_event_task_sched_in(struct task_struct *prev,
        struct perf_event_context *ctx;
        int ctxn;
 
+       /*
+        * If cgroup events exist on this CPU, then we need to check if we have
+        * to switch in PMU state; cgroup event are system-wide mode only.
+        *
+        * Since cgroup events are CPU events, we must schedule these in before
+        * we schedule in the task events.
+        */
+       if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
+               perf_cgroup_sched_in(prev, task);
+
        for_each_task_context_nr(ctxn) {
                ctx = task->perf_event_ctxp[ctxn];
                if (likely(!ctx))
@@ -2807,13 +2823,6 @@ void __perf_event_task_sched_in(struct task_struct *prev,
 
                perf_event_context_sched_in(ctx, task);
        }
-       /*
-        * if cgroup events exist on this CPU, then we need
-        * to check if we have to switch in PMU state.
-        * cgroup event are system-wide mode only
-        */
-       if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
-               perf_cgroup_sched_in(prev, task);
 
        if (atomic_read(&nr_switch_events))
                perf_event_switch(task, prev, true);
@@ -3099,46 +3108,30 @@ static int event_enable_on_exec(struct perf_event *event,
 static void perf_event_enable_on_exec(int ctxn)
 {
        struct perf_event_context *ctx, *clone_ctx = NULL;
+       struct perf_cpu_context *cpuctx;
        struct perf_event *event;
        unsigned long flags;
        int enabled = 0;
-       int ret;
 
        local_irq_save(flags);
        ctx = current->perf_event_ctxp[ctxn];
        if (!ctx || !ctx->nr_events)
                goto out;
 
-       /*
-        * We must ctxsw out cgroup events to avoid conflict
-        * when invoking perf_task_event_sched_in() later on
-        * in this function. Otherwise we end up trying to
-        * ctxswin cgroup events which are already scheduled
-        * in.
-        */
-       perf_cgroup_sched_out(current, NULL);
-
-       raw_spin_lock(&ctx->lock);
-       task_ctx_sched_out(ctx);
-
-       list_for_each_entry(event, &ctx->event_list, event_entry) {
-               ret = event_enable_on_exec(event, ctx);
-               if (ret)
-                       enabled = 1;
-       }
+       cpuctx = __get_cpu_context(ctx);
+       perf_ctx_lock(cpuctx, ctx);
+       list_for_each_entry(event, &ctx->event_list, event_entry)
+               enabled |= event_enable_on_exec(event, ctx);
 
        /*
-        * Unclone this context if we enabled any event.
+        * Unclone and reschedule this context if we enabled any event.
         */
-       if (enabled)
+       if (enabled) {
                clone_ctx = unclone_ctx(ctx);
+               ctx_resched(cpuctx, ctx);
+       }
+       perf_ctx_unlock(cpuctx, ctx);
 
-       raw_spin_unlock(&ctx->lock);
-
-       /*
-        * Also calls ctxswin for cgroup events, if any:
-        */
-       perf_event_context_sched_in(ctx, ctx->task);
 out:
        local_irq_restore(flags);
 
@@ -3334,7 +3327,6 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
        INIT_LIST_HEAD(&ctx->flexible_groups);
        INIT_LIST_HEAD(&ctx->event_list);
        atomic_set(&ctx->refcount, 1);
-       INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
 }
 
 static struct perf_event_context *
@@ -3521,11 +3513,13 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
 
 static void unaccount_event(struct perf_event *event)
 {
+       bool dec = false;
+
        if (event->parent)
                return;
 
        if (event->attach_state & PERF_ATTACH_TASK)
-               static_key_slow_dec_deferred(&perf_sched_events);
+               dec = true;
        if (event->attr.mmap || event->attr.mmap_data)
                atomic_dec(&nr_mmap_events);
        if (event->attr.comm)
@@ -3535,12 +3529,15 @@ static void unaccount_event(struct perf_event *event)
        if (event->attr.freq)
                atomic_dec(&nr_freq_events);
        if (event->attr.context_switch) {
-               static_key_slow_dec_deferred(&perf_sched_events);
+               dec = true;
                atomic_dec(&nr_switch_events);
        }
        if (is_cgroup_event(event))
-               static_key_slow_dec_deferred(&perf_sched_events);
+               dec = true;
        if (has_branch_stack(event))
+               dec = true;
+
+       if (dec)
                static_key_slow_dec_deferred(&perf_sched_events);
 
        unaccount_event_cpu(event, event->cpu);
@@ -3556,7 +3553,7 @@ static void unaccount_event(struct perf_event *event)
  *  3) two matching events on the same context.
  *
  * The former two cases are handled in the allocation path (perf_event_alloc(),
- * __free_event()), the latter -- before the first perf_install_in_context().
+ * _free_event()), the latter -- before the first perf_install_in_context().
  */
 static int exclusive_event_init(struct perf_event *event)
 {
@@ -3631,29 +3628,6 @@ static bool exclusive_event_installable(struct perf_event *event,
        return true;
 }
 
-static void __free_event(struct perf_event *event)
-{
-       if (!event->parent) {
-               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
-                       put_callchain_buffers();
-       }
-
-       perf_event_free_bpf_prog(event);
-
-       if (event->destroy)
-               event->destroy(event);
-
-       if (event->ctx)
-               put_ctx(event->ctx);
-
-       if (event->pmu) {
-               exclusive_event_destroy(event);
-               module_put(event->pmu->module);
-       }
-
-       call_rcu(&event->rcu_head, free_event_rcu);
-}
-
 static void _free_event(struct perf_event *event)
 {
        irq_work_sync(&event->pending);
@@ -3675,7 +3649,25 @@ static void _free_event(struct perf_event *event)
        if (is_cgroup_event(event))
                perf_detach_cgroup(event);
 
-       __free_event(event);
+       if (!event->parent) {
+               if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
+                       put_callchain_buffers();
+       }
+
+       perf_event_free_bpf_prog(event);
+
+       if (event->destroy)
+               event->destroy(event);
+
+       if (event->ctx)
+               put_ctx(event->ctx);
+
+       if (event->pmu) {
+               exclusive_event_destroy(event);
+               module_put(event->pmu->module);
+       }
+
+       call_rcu(&event->rcu_head, free_event_rcu);
 }
 
 /*
@@ -3702,14 +3694,13 @@ static void perf_remove_from_owner(struct perf_event *event)
        struct task_struct *owner;
 
        rcu_read_lock();
-       owner = ACCESS_ONCE(event->owner);
        /*
-        * Matches the smp_wmb() in perf_event_exit_task(). If we observe
-        * !owner it means the list deletion is complete and we can indeed
-        * free this event, otherwise we need to serialize on
+        * Matches the smp_store_release() in perf_event_exit_task(). If we
+        * observe !owner it means the list deletion is complete and we can
+        * indeed free this event, otherwise we need to serialize on
         * owner->perf_event_mutex.
         */
-       smp_read_barrier_depends();
+       owner = lockless_dereference(event->owner);
        if (owner) {
                /*
                 * Since delayed_put_task_struct() also drops the last
@@ -3737,8 +3728,10 @@ static void perf_remove_from_owner(struct perf_event *event)
                 * ensured they're done, and we can proceed with freeing the
                 * event.
                 */
-               if (event->owner)
+               if (event->owner) {
                        list_del_init(&event->owner_entry);
+                       smp_store_release(&event->owner, NULL);
+               }
                mutex_unlock(&owner->perf_event_mutex);
                put_task_struct(owner);
        }
@@ -3746,36 +3739,98 @@ static void perf_remove_from_owner(struct perf_event *event)
 
 static void put_event(struct perf_event *event)
 {
-       struct perf_event_context *ctx;
-
        if (!atomic_long_dec_and_test(&event->refcount))
                return;
 
+       _free_event(event);
+}
+
+/*
+ * Kill an event dead; while event:refcount will preserve the event
+ * object, it will not preserve its functionality. Once the last 'user'
+ * gives up the object, we'll destroy the thing.
+ */
+int perf_event_release_kernel(struct perf_event *event)
+{
+       struct perf_event_context *ctx;
+       struct perf_event *child, *tmp;
+
        if (!is_kernel_event(event))
                perf_remove_from_owner(event);
 
+       ctx = perf_event_ctx_lock(event);
+       WARN_ON_ONCE(ctx->parent_ctx);
+       perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE);
+       perf_event_ctx_unlock(event, ctx);
+
        /*
-        * There are two ways this annotation is useful:
+        * At this point we must have event->state == PERF_EVENT_STATE_EXIT,
+        * either from the above perf_remove_from_context() or through
+        * perf_event_exit_event().
         *
-        *  1) there is a lock recursion from perf_event_exit_task
-        *     see the comment there.
+        * Therefore, anybody acquiring event->child_mutex after the below
+        * loop _must_ also see this, most importantly inherit_event() which
+        * will avoid placing more children on the list.
         *
-        *  2) there is a lock-inversion with mmap_sem through
-        *     perf_read_group(), which takes faults while
-        *     holding ctx->mutex, however this is called after
-        *     the last filedesc died, so there is no possibility
-        *     to trigger the AB-BA case.
+        * Thus this guarantees that we will in fact observe and kill _ALL_
+        * child events.
         */
-       ctx = perf_event_ctx_lock_nested(event, SINGLE_DEPTH_NESTING);
-       WARN_ON_ONCE(ctx->parent_ctx);
-       perf_remove_from_context(event, true);
-       perf_event_ctx_unlock(event, ctx);
+       WARN_ON_ONCE(event->state != PERF_EVENT_STATE_EXIT);
 
-       _free_event(event);
-}
+again:
+       mutex_lock(&event->child_mutex);
+       list_for_each_entry(child, &event->child_list, child_list) {
 
-int perf_event_release_kernel(struct perf_event *event)
-{
+               /*
+                * Cannot change, child events are not migrated, see the
+                * comment with perf_event_ctx_lock_nested().
+                */
+               ctx = lockless_dereference(child->ctx);
+               /*
+                * Since child_mutex nests inside ctx::mutex, we must jump
+                * through hoops. We start by grabbing a reference on the ctx.
+                *
+                * Since the event cannot get freed while we hold the
+                * child_mutex, the context must also exist and have a !0
+                * reference count.
+                */
+               get_ctx(ctx);
+
+               /*
+                * Now that we have a ctx ref, we can drop child_mutex, and
+                * acquire ctx::mutex without fear of it going away. Then we
+                * can re-acquire child_mutex.
+                */
+               mutex_unlock(&event->child_mutex);
+               mutex_lock(&ctx->mutex);
+               mutex_lock(&event->child_mutex);
+
+               /*
+                * Now that we hold ctx::mutex and child_mutex, revalidate our
+                * state, if child is still the first entry, it didn't get freed
+                * and we can continue doing so.
+                */
+               tmp = list_first_entry_or_null(&event->child_list,
+                                              struct perf_event, child_list);
+               if (tmp == child) {
+                       perf_remove_from_context(child, DETACH_GROUP);
+                       list_del(&child->child_list);
+                       free_event(child);
+                       /*
+                        * This matches the refcount bump in inherit_event();
+                        * this can't be the last reference.
+                        */
+                       put_event(event);
+               }
+
+               mutex_unlock(&event->child_mutex);
+               mutex_unlock(&ctx->mutex);
+               put_ctx(ctx);
+               goto again;
+       }
+       mutex_unlock(&event->child_mutex);
+
+       /* Must be the last reference */
        put_event(event);
        return 0;
 }
@@ -3786,46 +3841,10 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
  */
 static int perf_release(struct inode *inode, struct file *file)
 {
-       put_event(file->private_data);
+       perf_event_release_kernel(file->private_data);
        return 0;
 }
 
-/*
- * Remove all orphanes events from the context.
- */
-static void orphans_remove_work(struct work_struct *work)
-{
-       struct perf_event_context *ctx;
-       struct perf_event *event, *tmp;
-
-       ctx = container_of(work, struct perf_event_context,
-                          orphans_remove.work);
-
-       mutex_lock(&ctx->mutex);
-       list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
-               struct perf_event *parent_event = event->parent;
-
-               if (!is_orphaned_child(event))
-                       continue;
-
-               perf_remove_from_context(event, true);
-
-               mutex_lock(&parent_event->child_mutex);
-               list_del_init(&event->child_list);
-               mutex_unlock(&parent_event->child_mutex);
-
-               free_event(event);
-               put_event(parent_event);
-       }
-
-       raw_spin_lock_irq(&ctx->lock);
-       ctx->orphans_remove_sched = false;
-       raw_spin_unlock_irq(&ctx->lock);
-       mutex_unlock(&ctx->mutex);
-
-       put_ctx(ctx);
-}
-
 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
 {
        struct perf_event *child;
@@ -4054,7 +4073,7 @@ static void _perf_event_reset(struct perf_event *event)
 /*
  * Holding the top-level event's child_mutex means that any
  * descendant process that has inherited this event will block
- * in sync_child_event if it goes to exit, thus satisfying the
+ * in perf_event_exit_event() if it goes to exit, thus satisfying the
  * task existence requirements of perf_event_enable/disable.
  */
 static void perf_event_for_each_child(struct perf_event *event,
@@ -4086,36 +4105,14 @@ static void perf_event_for_each(struct perf_event *event,
                perf_event_for_each_child(sibling, func);
 }
 
-struct period_event {
-       struct perf_event *event;
-       u64 value;
-};
-
-static void ___perf_event_period(void *info)
-{
-       struct period_event *pe = info;
-       struct perf_event *event = pe->event;
-       u64 value = pe->value;
-
-       if (event->attr.freq) {
-               event->attr.sample_freq = value;
-       } else {
-               event->attr.sample_period = value;
-               event->hw.sample_period = value;
-       }
-
-       local64_set(&event->hw.period_left, 0);
-}
-
-static int __perf_event_period(void *info)
+static void __perf_event_period(struct perf_event *event,
+                               struct perf_cpu_context *cpuctx,
+                               struct perf_event_context *ctx,
+                               void *info)
 {
-       struct period_event *pe = info;
-       struct perf_event *event = pe->event;
-       struct perf_event_context *ctx = event->ctx;
-       u64 value = pe->value;
+       u64 value = *((u64 *)info);
        bool active;
 
-       raw_spin_lock(&ctx->lock);
        if (event->attr.freq) {
                event->attr.sample_freq = value;
        } else {
@@ -4135,14 +4132,10 @@ static int __perf_event_period(void *info)
                event->pmu->start(event, PERF_EF_RELOAD);
                perf_pmu_enable(ctx->pmu);
        }
-       raw_spin_unlock(&ctx->lock);
-
-       return 0;
 }
 
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
-       struct period_event pe = { .event = event, };
        u64 value;
 
        if (!is_sampling_event(event))
@@ -4157,10 +4150,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
        if (event->attr.freq && value > sysctl_perf_event_sample_rate)
                return -EINVAL;
 
-       pe.value = value;
-
-       event_function_call(event, __perf_event_period,
-                           ___perf_event_period, &pe);
+       event_function_call(event, __perf_event_period, &value);
 
        return 0;
 }
@@ -4932,7 +4922,7 @@ static void perf_pending_event(struct irq_work *entry)
 
        if (event->pending_disable) {
                event->pending_disable = 0;
-               __perf_event_disable(event);
+               perf_event_disable_local(event);
        }
 
        if (event->pending_wakeup) {
@@ -7753,11 +7743,13 @@ static void account_event_cpu(struct perf_event *event, int cpu)
 
 static void account_event(struct perf_event *event)
 {
+       bool inc = false;
+
        if (event->parent)
                return;
 
        if (event->attach_state & PERF_ATTACH_TASK)
-               static_key_slow_inc(&perf_sched_events.key);
+               inc = true;
        if (event->attr.mmap || event->attr.mmap_data)
                atomic_inc(&nr_mmap_events);
        if (event->attr.comm)
@@ -7770,11 +7762,14 @@ static void account_event(struct perf_event *event)
        }
        if (event->attr.context_switch) {
                atomic_inc(&nr_switch_events);
-               static_key_slow_inc(&perf_sched_events.key);
+               inc = true;
        }
        if (has_branch_stack(event))
-               static_key_slow_inc(&perf_sched_events.key);
+               inc = true;
        if (is_cgroup_event(event))
+               inc = true;
+
+       if (inc)
                static_key_slow_inc(&perf_sched_events.key);
 
        account_event_cpu(event, event->cpu);
@@ -8422,11 +8417,11 @@ SYSCALL_DEFINE5(perf_event_open,
                 * See perf_event_ctx_lock() for comments on the details
                 * of swizzling perf_event::ctx.
                 */
-               perf_remove_from_context(group_leader, false);
+               perf_remove_from_context(group_leader, 0);
 
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_remove_from_context(sibling, false);
+                       perf_remove_from_context(sibling, 0);
                        put_ctx(gctx);
                }
 
@@ -8479,6 +8474,8 @@ SYSCALL_DEFINE5(perf_event_open,
        perf_event__header_size(event);
        perf_event__id_header_size(event);
 
+       event->owner = current;
+
        perf_install_in_context(ctx, event, event->cpu);
        perf_unpin_context(ctx);
 
@@ -8488,8 +8485,6 @@ SYSCALL_DEFINE5(perf_event_open,
 
        put_online_cpus();
 
-       event->owner = current;
-
        mutex_lock(&current->perf_event_mutex);
        list_add_tail(&event->owner_entry, &current->perf_event_list);
        mutex_unlock(&current->perf_event_mutex);
@@ -8556,7 +8551,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
        }
 
        /* Mark owner so we could distinguish it from user events. */
-       event->owner = EVENT_OWNER_KERNEL;
+       event->owner = TASK_TOMBSTONE;
 
        account_event(event);
 
@@ -8606,7 +8601,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
        mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex);
        list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
                                 event_entry) {
-               perf_remove_from_context(event, false);
+               perf_remove_from_context(event, 0);
                unaccount_event_cpu(event, src_cpu);
                put_ctx(src_ctx);
                list_add(&event->migrate_entry, &events);
@@ -8673,33 +8668,15 @@ static void sync_child_event(struct perf_event *child_event,
                     &parent_event->child_total_time_enabled);
        atomic64_add(child_event->total_time_running,
                     &parent_event->child_total_time_running);
-
-       /*
-        * Remove this event from the parent's list
-        */
-       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
-       mutex_lock(&parent_event->child_mutex);
-       list_del_init(&child_event->child_list);
-       mutex_unlock(&parent_event->child_mutex);
-
-       /*
-        * Make sure user/parent get notified, that we just
-        * lost one event.
-        */
-       perf_event_wakeup(parent_event);
-
-       /*
-        * Release the parent event, if this was the last
-        * reference to it.
-        */
-       put_event(parent_event);
 }
 
 static void
-__perf_event_exit_task(struct perf_event *child_event,
-                        struct perf_event_context *child_ctx,
-                        struct task_struct *child)
+perf_event_exit_event(struct perf_event *child_event,
+                     struct perf_event_context *child_ctx,
+                     struct task_struct *child)
 {
+       struct perf_event *parent_event = child_event->parent;
+
        /*
         * Do not destroy the 'original' grouping; because of the context
         * switch optimization the original events could've ended up in a
@@ -8712,57 +8689,86 @@ __perf_event_exit_task(struct perf_event *child_event,
         * Do destroy all inherited groups, we don't care about those
         * and being thorough is better.
         */
-       perf_remove_from_context(child_event, !!child_event->parent);
+       raw_spin_lock_irq(&child_ctx->lock);
+       WARN_ON_ONCE(child_ctx->is_active);
+
+       if (parent_event)
+               perf_group_detach(child_event);
+       list_del_event(child_event, child_ctx);
+       child_event->state = PERF_EVENT_STATE_EXIT; /* see perf_event_release_kernel() */
+       raw_spin_unlock_irq(&child_ctx->lock);
 
        /*
-        * It can happen that the parent exits first, and has events
-        * that are still around due to the child reference. These
-        * events need to be zapped.
+        * Parent events are governed by their filedesc, retain them.
         */
-       if (child_event->parent) {
-               sync_child_event(child_event, child);
-               free_event(child_event);
-       } else {
-               child_event->state = PERF_EVENT_STATE_EXIT;
+       if (!parent_event) {
                perf_event_wakeup(child_event);
+               return;
        }
+       /*
+        * Child events can be cleaned up.
+        */
+
+       sync_child_event(child_event, child);
+
+       /*
+        * Remove this event from the parent's list
+        */
+       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
+       mutex_lock(&parent_event->child_mutex);
+       list_del_init(&child_event->child_list);
+       mutex_unlock(&parent_event->child_mutex);
+
+       /*
+        * Kick perf_poll() for is_event_hup().
+        */
+       perf_event_wakeup(parent_event);
+       free_event(child_event);
+       put_event(parent_event);
 }
 
 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 {
-       struct perf_event *child_event, *next;
        struct perf_event_context *child_ctx, *clone_ctx = NULL;
-       unsigned long flags;
+       struct perf_event *child_event, *next;
+
+       WARN_ON_ONCE(child != current);
 
-       if (likely(!child->perf_event_ctxp[ctxn]))
+       child_ctx = perf_pin_task_context(child, ctxn);
+       if (!child_ctx)
                return;
 
-       local_irq_save(flags);
        /*
-        * We can't reschedule here because interrupts are disabled,
-        * and either child is current or it is a task that can't be
-        * scheduled, so we are now safe from rescheduling changing
-        * our context.
+        * In order to reduce the amount of tricky in ctx tear-down, we hold
+        * ctx::mutex over the entire thing. This serializes against almost
+        * everything that wants to access the ctx.
+        *
+        * The exception is sys_perf_event_open() /
+        * perf_event_create_kernel_count() which does find_get_context()
+        * without ctx::mutex (it cannot because of the move_group double mutex
+        * lock thing). See the comments in perf_install_in_context().
         */
-       child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
+       mutex_lock(&child_ctx->mutex);
 
        /*
-        * Take the context lock here so that if find_get_context is
-        * reading child->perf_event_ctxp, we wait until it has
-        * incremented the context's refcount before we do put_ctx below.
+        * In a single ctx::lock section, de-schedule the events and detach the
+        * context from the task such that we cannot ever get it scheduled back
+        * in.
         */
-       raw_spin_lock(&child_ctx->lock);
-       task_ctx_sched_out(child_ctx);
-       child->perf_event_ctxp[ctxn] = NULL;
+       raw_spin_lock_irq(&child_ctx->lock);
+       task_ctx_sched_out(__get_cpu_context(child_ctx), child_ctx);
 
        /*
-        * If this context is a clone; unclone it so it can't get
-        * swapped to another process while we're removing all
-        * the events from it.
+        * Now that the context is inactive, destroy the task <-> ctx relation
+        * and mark the context dead.
         */
+       RCU_INIT_POINTER(child->perf_event_ctxp[ctxn], NULL);
+       put_ctx(child_ctx); /* cannot be last */
+       WRITE_ONCE(child_ctx->task, TASK_TOMBSTONE);
+       put_task_struct(current); /* cannot be last */
+
        clone_ctx = unclone_ctx(child_ctx);
-       update_context_time(child_ctx);
-       raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
+       raw_spin_unlock_irq(&child_ctx->lock);
 
        if (clone_ctx)
                put_ctx(clone_ctx);
@@ -8774,20 +8780,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
         */
        perf_event_task(child, child_ctx, 0);
 
-       /*
-        * We can recurse on the same lock type through:
-        *
-        *   __perf_event_exit_task()
-        *     sync_child_event()
-        *       put_event()
-        *         mutex_lock(&ctx->mutex)
-        *
-        * But since its the parent context it won't be the same instance.
-        */
-       mutex_lock(&child_ctx->mutex);
-
        list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
-               __perf_event_exit_task(child_event, child_ctx, child);
+               perf_event_exit_event(child_event, child_ctx, child);
 
        mutex_unlock(&child_ctx->mutex);
 
@@ -8812,8 +8806,7 @@ void perf_event_exit_task(struct task_struct *child)
                 * the owner, closes a race against perf_release() where
                 * we need to serialize on the owner->perf_event_mutex.
                 */
-               smp_wmb();
-               event->owner = NULL;
+               smp_store_release(&event->owner, NULL);
        }
        mutex_unlock(&child->perf_event_mutex);
 
@@ -8896,21 +8889,20 @@ void perf_event_delayed_put(struct task_struct *task)
                WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
 }
 
-struct perf_event *perf_event_get(unsigned int fd)
+struct file *perf_event_get(unsigned int fd)
 {
-       int err;
-       struct fd f;
-       struct perf_event *event;
+       struct file *file;
 
-       err = perf_fget_light(fd, &f);
-       if (err)
-               return ERR_PTR(err);
+       file = fget_raw(fd);
+       if (!file)
+               return ERR_PTR(-EBADF);
 
-       event = f.file->private_data;
-       atomic_long_inc(&event->refcount);
-       fdput(f);
+       if (file->f_op != &perf_fops) {
+               fput(file);
+               return ERR_PTR(-EBADF);
+       }
 
-       return event;
+       return file;
 }
 
 const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
@@ -8953,8 +8945,16 @@ inherit_event(struct perf_event *parent_event,
        if (IS_ERR(child_event))
                return child_event;
 
+       /*
+        * is_orphaned_event() and list_add_tail(&parent_event->child_list)
+        * must be under the same lock in order to serialize against
+        * perf_event_release_kernel(), such that either we must observe
+        * is_orphaned_event() or they will observe us on the child_list.
+        */
+       mutex_lock(&parent_event->child_mutex);
        if (is_orphaned_event(parent_event) ||
            !atomic_long_inc_not_zero(&parent_event->refcount)) {
+               mutex_unlock(&parent_event->child_mutex);
                free_event(child_event);
                return NULL;
        }
@@ -9002,8 +9002,6 @@ inherit_event(struct perf_event *parent_event,
        /*
         * Link this into the parent event's child list
         */
-       WARN_ON_ONCE(parent_event->ctx->parent_ctx);
-       mutex_lock(&parent_event->child_mutex);
        list_add_tail(&child_event->child_list, &parent_event->child_list);
        mutex_unlock(&parent_event->child_mutex);
 
@@ -9221,13 +9219,14 @@ static void perf_event_init_cpu(int cpu)
 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
 static void __perf_event_exit_context(void *__info)
 {
-       struct remove_event re = { .detach_group = true };
        struct perf_event_context *ctx = __info;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+       struct perf_event *event;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
-               __perf_remove_from_context(&re);
-       rcu_read_unlock();
+       raw_spin_lock(&ctx->lock);
+       list_for_each_entry(event, &ctx->event_list, event_entry)
+               __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP);
+       raw_spin_unlock(&ctx->lock);
 }
 
 static void perf_event_exit_cpu_context(int cpu)
index 92ce5f4ccc264e01a5551965d173e39e41392143..3f8cb1e14588fe3258a1c3aa49874e008f5a266a 100644 (file)
@@ -444,7 +444,7 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att
         * current task.
         */
        if (irqs_disabled() && bp->ctx && bp->ctx->task == current)
-               __perf_event_disable(bp);
+               perf_event_disable_local(bp);
        else
                perf_event_disable(bp);
 
index adfdc0536117c1f10bf8f0dd8014798011e1f855..1faad2cfdb9e4572c91431e4b11941266e23a231 100644 (file)
@@ -459,6 +459,25 @@ static void rb_free_aux_page(struct ring_buffer *rb, int idx)
        __free_page(page);
 }
 
+static void __rb_free_aux(struct ring_buffer *rb)
+{
+       int pg;
+
+       if (rb->aux_priv) {
+               rb->free_aux(rb->aux_priv);
+               rb->free_aux = NULL;
+               rb->aux_priv = NULL;
+       }
+
+       if (rb->aux_nr_pages) {
+               for (pg = 0; pg < rb->aux_nr_pages; pg++)
+                       rb_free_aux_page(rb, pg);
+
+               kfree(rb->aux_pages);
+               rb->aux_nr_pages = 0;
+       }
+}
+
 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
                 pgoff_t pgoff, int nr_pages, long watermark, int flags)
 {
@@ -547,30 +566,11 @@ out:
        if (!ret)
                rb->aux_pgoff = pgoff;
        else
-               rb_free_aux(rb);
+               __rb_free_aux(rb);
 
        return ret;
 }
 
-static void __rb_free_aux(struct ring_buffer *rb)
-{
-       int pg;
-
-       if (rb->aux_priv) {
-               rb->free_aux(rb->aux_priv);
-               rb->free_aux = NULL;
-               rb->aux_priv = NULL;
-       }
-
-       if (rb->aux_nr_pages) {
-               for (pg = 0; pg < rb->aux_nr_pages; pg++)
-                       rb_free_aux_page(rb, pg);
-
-               kfree(rb->aux_pages);
-               rb->aux_nr_pages = 0;
-       }
-}
-
 void rb_free_aux(struct ring_buffer *rb)
 {
        if (atomic_dec_and_test(&rb->aux_refcount))
index 0773f2b23b107dcc3be9e5caabdbaa0a25de01d0..5d6ce6413ef1d227b32c99a4330bee1289f9f571 100644 (file)
@@ -1191,7 +1191,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
        if (pi_state->owner != current)
                return -EINVAL;
 
-       raw_spin_lock(&pi_state->pi_mutex.wait_lock);
+       raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
        new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
 
        /*
@@ -1217,22 +1217,22 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
        else if (curval != uval)
                ret = -EINVAL;
        if (ret) {
-               raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+               raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
                return ret;
        }
 
-       raw_spin_lock_irq(&pi_state->owner->pi_lock);
+       raw_spin_lock(&pi_state->owner->pi_lock);
        WARN_ON(list_empty(&pi_state->list));
        list_del_init(&pi_state->list);
-       raw_spin_unlock_irq(&pi_state->owner->pi_lock);
+       raw_spin_unlock(&pi_state->owner->pi_lock);
 
-       raw_spin_lock_irq(&new_owner->pi_lock);
+       raw_spin_lock(&new_owner->pi_lock);
        WARN_ON(!list_empty(&pi_state->list));
        list_add(&pi_state->list, &new_owner->pi_state_list);
        pi_state->owner = new_owner;
-       raw_spin_unlock_irq(&new_owner->pi_lock);
+       raw_spin_unlock(&new_owner->pi_lock);
 
-       raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+       raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
 
        deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
 
@@ -2127,11 +2127,11 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
                 * we returned due to timeout or signal without taking the
                 * rt_mutex. Too late.
                 */
-               raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
+               raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
                owner = rt_mutex_owner(&q->pi_state->pi_mutex);
                if (!owner)
                        owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
-               raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
+               raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
                ret = fixup_pi_state_owner(uaddr, q, owner);
                goto out;
        }
index a302cf9a2126c8a911ff4da1a944c88c6d3b3f8a..57bff7857e879bf2301a92723ade0a968870e637 100644 (file)
@@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
        unsigned int flags = 0, irq = desc->irq_data.irq;
        struct irqaction *action = desc->action;
 
-       do {
+       /* action might have become NULL since we dropped the lock */
+       while (action) {
                irqreturn_t res;
 
                trace_irq_handler_entry(irq, action);
@@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
 
                retval |= res;
                action = action->next;
-       } while (action);
+       }
 
        add_interrupt_randomness(irq, flags);
 
index fcab63c669059308d3cc27054742587b0627b23f..3d182932d2d1e951eb06dc019eb3338d06273d3d 100644 (file)
@@ -160,6 +160,8 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags)
        __irq_put_desc_unlock(desc, flags, false);
 }
 
+#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
+
 /*
  * Manipulation functions for irq_data.state
  */
@@ -188,6 +190,8 @@ static inline bool irqd_has_set(struct irq_data *d, unsigned int mask)
        return __irqd_to_state(d) & mask;
 }
 
+#undef __irqd_to_state
+
 static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
 {
        __this_cpu_inc(*desc->kstat_irqs);
index 0409da0bcc3358b6b49ba6a79a339cf2176897ff..0ccd028817d7136ce7bd1faf10c6630690e2264e 100644 (file)
 static struct lock_class_key irq_desc_lock_class;
 
 #if defined(CONFIG_SMP)
+static int __init irq_affinity_setup(char *str)
+{
+       zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+       cpulist_parse(str, irq_default_affinity);
+       /*
+        * Set at least the boot cpu. We don't want to end up with
+        * bugreports caused by random comandline masks
+        */
+       cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
+       return 1;
+}
+__setup("irqaffinity=", irq_affinity_setup);
+
 static void __init init_irq_default_affinity(void)
 {
-       alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
-       cpumask_setall(irq_default_affinity);
+#ifdef CONFIG_CPUMASK_OFFSTACK
+       if (!irq_default_affinity)
+               zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
+#endif
+       if (cpumask_empty(irq_default_affinity))
+               cpumask_setall(irq_default_affinity);
 }
 #else
 static void __init init_irq_default_affinity(void)
index 6e655f7acd3bf79b68f62fc4fbdf6806cfd11884..3e56d2f03e24e6ee147e837727e6aac7a479f9aa 100644 (file)
@@ -575,10 +575,15 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
        unsigned int type = IRQ_TYPE_NONE;
        int virq;
 
-       if (fwspec->fwnode)
-               domain = irq_find_matching_fwnode(fwspec->fwnode, DOMAIN_BUS_ANY);
-       else
+       if (fwspec->fwnode) {
+               domain = irq_find_matching_fwnode(fwspec->fwnode,
+                                                 DOMAIN_BUS_WIRED);
+               if (!domain)
+                       domain = irq_find_matching_fwnode(fwspec->fwnode,
+                                                         DOMAIN_BUS_ANY);
+       } else {
                domain = irq_default_domain;
+       }
 
        if (!domain) {
                pr_warn("no irq domain found for %s !\n",
index 8dc65914486999f43caa9276dde914663919fbc6..8d34308ea449ad31bae472f0403c5f1b25324683 100644 (file)
@@ -66,13 +66,15 @@ struct resource crashk_res = {
        .name  = "Crash kernel",
        .start = 0,
        .end   = 0,
-       .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
+       .desc  = IORES_DESC_CRASH_KERNEL
 };
 struct resource crashk_low_res = {
        .name  = "Crash kernel",
        .start = 0,
        .end   = 0,
-       .flags = IORESOURCE_BUSY | IORESOURCE_MEM
+       .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
+       .desc  = IORES_DESC_CRASH_KERNEL
 };
 
 int kexec_should_crash(struct task_struct *p)
@@ -959,7 +961,7 @@ int crash_shrink_memory(unsigned long new_size)
 
        ram_res->start = end;
        ram_res->end = crashk_res.end;
-       ram_res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
+       ram_res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
        ram_res->name = "System RAM";
 
        crashk_res.end = end - 1;
index 007b791f676d5605fb674b6db333d9dd1d0a23b4..56b18eb1f0013c9bc7e041a3dd2a8ecaeb580b95 100644 (file)
@@ -524,10 +524,10 @@ int kexec_add_buffer(struct kimage *image, char *buffer, unsigned long bufsz,
 
        /* Walk the RAM ranges and allocate a suitable range for the buffer */
        if (image->type == KEXEC_TYPE_CRASH)
-               ret = walk_iomem_res("Crash kernel",
-                                    IORESOURCE_MEM | IORESOURCE_BUSY,
-                                    crashk_res.start, crashk_res.end, kbuf,
-                                    locate_mem_hole_callback);
+               ret = walk_iomem_res_desc(crashk_res.desc,
+                               IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
+                               crashk_res.start, crashk_res.end, kbuf,
+                               locate_mem_hole_callback);
        else
                ret = walk_system_ram_res(0, -1, kbuf,
                                          locate_mem_hole_callback);
index a02812743a7e63378a79cc768255f807a7fd469b..b5c30d9f46c5084acddbd34e533f91e9c98e8300 100644 (file)
  * of times)
  */
 
-#include <linux/latencytop.h>
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
 #include <linux/notifier.h>
 #include <linux/spinlock.h>
 #include <linux/proc_fs.h>
+#include <linux/latencytop.h>
 #include <linux/export.h>
 #include <linux/sched.h>
 #include <linux/list.h>
@@ -289,4 +289,16 @@ static int __init init_lstats_procfs(void)
        proc_create("latency_stats", 0644, NULL, &lstats_fops);
        return 0;
 }
+
+int sysctl_latencytop(struct ctl_table *table, int write,
+                       void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       int err;
+
+       err = proc_dointvec(table, write, buffer, lenp, ppos);
+       if (latencytop_enabled)
+               force_schedstat_enabled();
+
+       return err;
+}
 device_initcall(init_lstats_procfs);
index 60ace56618f6c222de90e569bf1702b6dba05e26..3261214323fa1104287c0ab2fdac76cab9c88848 100644 (file)
@@ -123,8 +123,6 @@ static inline int debug_locks_off_graph_unlock(void)
        return ret;
 }
 
-static int lockdep_initialized;
-
 unsigned long nr_list_entries;
 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
 
@@ -292,7 +290,7 @@ LIST_HEAD(all_lock_classes);
 #define __classhashfn(key)     hash_long((unsigned long)key, CLASSHASH_BITS)
 #define classhashentry(key)    (classhash_table + __classhashfn((key)))
 
-static struct list_head classhash_table[CLASSHASH_SIZE];
+static struct hlist_head classhash_table[CLASSHASH_SIZE];
 
 /*
  * We put the lock dependency chains into a hash-table as well, to cache
@@ -303,7 +301,7 @@ static struct list_head classhash_table[CLASSHASH_SIZE];
 #define __chainhashfn(chain)   hash_long(chain, CHAINHASH_BITS)
 #define chainhashentry(chain)  (chainhash_table + __chainhashfn((chain)))
 
-static struct list_head chainhash_table[CHAINHASH_SIZE];
+static struct hlist_head chainhash_table[CHAINHASH_SIZE];
 
 /*
  * The hash key of the lock dependency chains is a hash itself too:
@@ -433,19 +431,6 @@ unsigned int nr_process_chains;
 unsigned int max_lockdep_depth;
 
 #ifdef CONFIG_DEBUG_LOCKDEP
-/*
- * We cannot printk in early bootup code. Not even early_printk()
- * might work. So we mark any initialization errors and printk
- * about it later on, in lockdep_info().
- */
-static int lockdep_init_error;
-static const char *lock_init_error;
-static unsigned long lockdep_init_trace_data[20];
-static struct stack_trace lockdep_init_trace = {
-       .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
-       .entries = lockdep_init_trace_data,
-};
-
 /*
  * Various lockdep statistics:
  */
@@ -666,23 +651,9 @@ static inline struct lock_class *
 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
 {
        struct lockdep_subclass_key *key;
-       struct list_head *hash_head;
+       struct hlist_head *hash_head;
        struct lock_class *class;
 
-#ifdef CONFIG_DEBUG_LOCKDEP
-       /*
-        * If the architecture calls into lockdep before initializing
-        * the hashes then we'll warn about it later. (we cannot printk
-        * right now)
-        */
-       if (unlikely(!lockdep_initialized)) {
-               lockdep_init();
-               lockdep_init_error = 1;
-               lock_init_error = lock->name;
-               save_stack_trace(&lockdep_init_trace);
-       }
-#endif
-
        if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
                debug_locks_off();
                printk(KERN_ERR
@@ -719,7 +690,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return NULL;
 
-       list_for_each_entry_rcu(class, hash_head, hash_entry) {
+       hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
                if (class->key == key) {
                        /*
                         * Huh! same key, different name? Did someone trample
@@ -742,7 +713,7 @@ static inline struct lock_class *
 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 {
        struct lockdep_subclass_key *key;
-       struct list_head *hash_head;
+       struct hlist_head *hash_head;
        struct lock_class *class;
 
        DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@@ -774,7 +745,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
         * We have to do the hash-walk again, to avoid races
         * with another CPU:
         */
-       list_for_each_entry_rcu(class, hash_head, hash_entry) {
+       hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
                if (class->key == key)
                        goto out_unlock_set;
        }
@@ -805,7 +776,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
         * We use RCU's safe list-add method to make
         * parallel walking of the hash-list safe:
         */
-       list_add_tail_rcu(&class->hash_entry, hash_head);
+       hlist_add_head_rcu(&class->hash_entry, hash_head);
        /*
         * Add it to the global list of classes:
         */
@@ -1822,7 +1793,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
  */
 static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
-              struct held_lock *next, int distance, int trylock_loop)
+              struct held_lock *next, int distance, int *stack_saved)
 {
        struct lock_list *entry;
        int ret;
@@ -1883,8 +1854,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
                }
        }
 
-       if (!trylock_loop && !save_trace(&trace))
-               return 0;
+       if (!*stack_saved) {
+               if (!save_trace(&trace))
+                       return 0;
+               *stack_saved = 1;
+       }
 
        /*
         * Ok, all validations passed, add the new lock
@@ -1907,6 +1881,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
         * Debugging printouts:
         */
        if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
+               /* We drop graph lock, so another thread can overwrite trace. */
+               *stack_saved = 0;
                graph_unlock();
                printk("\n new dependency: ");
                print_lock_name(hlock_class(prev));
@@ -1929,7 +1905,7 @@ static int
 check_prevs_add(struct task_struct *curr, struct held_lock *next)
 {
        int depth = curr->lockdep_depth;
-       int trylock_loop = 0;
+       int stack_saved = 0;
        struct held_lock *hlock;
 
        /*
@@ -1956,7 +1932,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
                 */
                if (hlock->read != 2 && hlock->check) {
                        if (!check_prev_add(curr, hlock, next,
-                                               distance, trylock_loop))
+                                               distance, &stack_saved))
                                return 0;
                        /*
                         * Stop after the first non-trylock entry,
@@ -1979,7 +1955,6 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
                if (curr->held_locks[depth].irq_context !=
                                curr->held_locks[depth-1].irq_context)
                        break;
-               trylock_loop = 1;
        }
        return 1;
 out_bug:
@@ -2017,7 +1992,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
                                     u64 chain_key)
 {
        struct lock_class *class = hlock_class(hlock);
-       struct list_head *hash_head = chainhashentry(chain_key);
+       struct hlist_head *hash_head = chainhashentry(chain_key);
        struct lock_chain *chain;
        struct held_lock *hlock_curr;
        int i, j;
@@ -2033,7 +2008,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
         * We can walk it lock-free, because entries only get added
         * to the hash:
         */
-       list_for_each_entry_rcu(chain, hash_head, entry) {
+       hlist_for_each_entry_rcu(chain, hash_head, entry) {
                if (chain->chain_key == chain_key) {
 cache_hit:
                        debug_atomic_inc(chain_lookup_hits);
@@ -2057,7 +2032,7 @@ cache_hit:
        /*
         * We have to walk the chain again locked - to avoid duplicates:
         */
-       list_for_each_entry(chain, hash_head, entry) {
+       hlist_for_each_entry(chain, hash_head, entry) {
                if (chain->chain_key == chain_key) {
                        graph_unlock();
                        goto cache_hit;
@@ -2091,7 +2066,7 @@ cache_hit:
                }
                chain_hlocks[chain->base + j] = class - lock_classes;
        }
-       list_add_tail_rcu(&chain->entry, hash_head);
+       hlist_add_head_rcu(&chain->entry, hash_head);
        debug_atomic_inc(chain_lookup_misses);
        inc_chains();
 
@@ -3875,7 +3850,7 @@ void lockdep_reset(void)
        nr_process_chains = 0;
        debug_locks = 1;
        for (i = 0; i < CHAINHASH_SIZE; i++)
-               INIT_LIST_HEAD(chainhash_table + i);
+               INIT_HLIST_HEAD(chainhash_table + i);
        raw_local_irq_restore(flags);
 }
 
@@ -3894,7 +3869,7 @@ static void zap_class(struct lock_class *class)
        /*
         * Unhash the class and remove it from the all_lock_classes list:
         */
-       list_del_rcu(&class->hash_entry);
+       hlist_del_rcu(&class->hash_entry);
        list_del_rcu(&class->lock_entry);
 
        RCU_INIT_POINTER(class->key, NULL);
@@ -3917,7 +3892,7 @@ static inline int within(const void *addr, void *start, unsigned long size)
 void lockdep_free_key_range(void *start, unsigned long size)
 {
        struct lock_class *class;
-       struct list_head *head;
+       struct hlist_head *head;
        unsigned long flags;
        int i;
        int locked;
@@ -3930,9 +3905,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
         */
        for (i = 0; i < CLASSHASH_SIZE; i++) {
                head = classhash_table + i;
-               if (list_empty(head))
-                       continue;
-               list_for_each_entry_rcu(class, head, hash_entry) {
+               hlist_for_each_entry_rcu(class, head, hash_entry) {
                        if (within(class->key, start, size))
                                zap_class(class);
                        else if (within(class->name, start, size))
@@ -3962,7 +3935,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
 void lockdep_reset_lock(struct lockdep_map *lock)
 {
        struct lock_class *class;
-       struct list_head *head;
+       struct hlist_head *head;
        unsigned long flags;
        int i, j;
        int locked;
@@ -3987,9 +3960,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
        locked = graph_lock();
        for (i = 0; i < CLASSHASH_SIZE; i++) {
                head = classhash_table + i;
-               if (list_empty(head))
-                       continue;
-               list_for_each_entry_rcu(class, head, hash_entry) {
+               hlist_for_each_entry_rcu(class, head, hash_entry) {
                        int match = 0;
 
                        for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
@@ -4013,28 +3984,6 @@ out_restore:
        raw_local_irq_restore(flags);
 }
 
-void lockdep_init(void)
-{
-       int i;
-
-       /*
-        * Some architectures have their own start_kernel()
-        * code which calls lockdep_init(), while we also
-        * call lockdep_init() from the start_kernel() itself,
-        * and we want to initialize the hashes only once:
-        */
-       if (lockdep_initialized)
-               return;
-
-       for (i = 0; i < CLASSHASH_SIZE; i++)
-               INIT_LIST_HEAD(classhash_table + i);
-
-       for (i = 0; i < CHAINHASH_SIZE; i++)
-               INIT_LIST_HEAD(chainhash_table + i);
-
-       lockdep_initialized = 1;
-}
-
 void __init lockdep_info(void)
 {
        printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
@@ -4061,14 +4010,6 @@ void __init lockdep_info(void)
 
        printk(" per task-struct memory footprint: %lu bytes\n",
                sizeof(struct held_lock) * MAX_LOCK_DEPTH);
-
-#ifdef CONFIG_DEBUG_LOCKDEP
-       if (lockdep_init_error) {
-               printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error);
-               printk("Call stack leading to lockdep invocation was:\n");
-               print_stack_trace(&lockdep_init_trace, 0);
-       }
-#endif
 }
 
 static void
index 0551c219c40e5bb8f8f87bfb0445100fe1b31cc9..596b3416b97120a93d233ad84edab731d43b2155 100644 (file)
@@ -256,7 +256,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
        struct task_struct *owner;
        int retval = 1;
 
-       if (need_resched())
+       if (need_resched() || atomic_read(&lock->count) == -1)
                return 0;
 
        rcu_read_lock();
@@ -283,10 +283,11 @@ static inline bool mutex_try_to_acquire(struct mutex *lock)
 /*
  * Optimistic spinning.
  *
- * We try to spin for acquisition when we find that the lock owner
- * is currently running on a (different) CPU and while we don't
- * need to reschedule. The rationale is that if the lock owner is
- * running, it is likely to release the lock soon.
+ * We try to spin for acquisition when we find that there are no
+ * pending waiters and the lock owner is currently running on a
+ * (different) CPU and while we don't need to reschedule. The
+ * rationale is that if the lock owner is running, it is likely
+ * to release the lock soon.
  *
  * Since this needs the lock owner, and this mutex implementation
  * doesn't track the owner atomically in the lock field, we need to
index 8251e75dd9c0bd67337754baf6f03fb2cf1f956f..3e746607abe5230bb9c650957582669c085d9a90 100644 (file)
@@ -99,13 +99,14 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
  * 2) Drop lock->wait_lock
  * 3) Try to unlock the lock with cmpxchg
  */
-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+                                       unsigned long flags)
        __releases(lock->wait_lock)
 {
        struct task_struct *owner = rt_mutex_owner(lock);
 
        clear_rt_mutex_waiters(lock);
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
        /*
         * If a new waiter comes in between the unlock and the cmpxchg
         * we have two situations:
@@ -147,11 +148,12 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
 /*
  * Simple slow path only version: lock->owner is protected by lock->wait_lock.
  */
-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
+                                       unsigned long flags)
        __releases(lock->wait_lock)
 {
        lock->owner = NULL;
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
        return true;
 }
 #endif
@@ -433,7 +435,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        int ret = 0, depth = 0;
        struct rt_mutex *lock;
        bool detect_deadlock;
-       unsigned long flags;
        bool requeue = true;
 
        detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
@@ -476,7 +477,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        /*
         * [1] Task cannot go away as we did a get_task() before !
         */
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       raw_spin_lock_irq(&task->pi_lock);
 
        /*
         * [2] Get the waiter on which @task is blocked on.
@@ -560,7 +561,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
         * operations.
         */
        if (!raw_spin_trylock(&lock->wait_lock)) {
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               raw_spin_unlock_irq(&task->pi_lock);
                cpu_relax();
                goto retry;
        }
@@ -591,7 +592,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                /*
                 * No requeue[7] here. Just release @task [8]
                 */
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               raw_spin_unlock(&task->pi_lock);
                put_task_struct(task);
 
                /*
@@ -599,14 +600,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                 * If there is no owner of the lock, end of chain.
                 */
                if (!rt_mutex_owner(lock)) {
-                       raw_spin_unlock(&lock->wait_lock);
+                       raw_spin_unlock_irq(&lock->wait_lock);
                        return 0;
                }
 
                /* [10] Grab the next task, i.e. owner of @lock */
                task = rt_mutex_owner(lock);
                get_task_struct(task);
-               raw_spin_lock_irqsave(&task->pi_lock, flags);
+               raw_spin_lock(&task->pi_lock);
 
                /*
                 * No requeue [11] here. We just do deadlock detection.
@@ -621,8 +622,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                top_waiter = rt_mutex_top_waiter(lock);
 
                /* [13] Drop locks */
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock(&task->pi_lock);
+               raw_spin_unlock_irq(&lock->wait_lock);
 
                /* If owner is not blocked, end of chain. */
                if (!next_lock)
@@ -643,7 +644,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        rt_mutex_enqueue(lock, waiter);
 
        /* [8] Release the task */
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       raw_spin_unlock(&task->pi_lock);
        put_task_struct(task);
 
        /*
@@ -661,14 +662,14 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                 */
                if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
                        wake_up_process(rt_mutex_top_waiter(lock)->task);
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock_irq(&lock->wait_lock);
                return 0;
        }
 
        /* [10] Grab the next task, i.e. the owner of @lock */
        task = rt_mutex_owner(lock);
        get_task_struct(task);
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       raw_spin_lock(&task->pi_lock);
 
        /* [11] requeue the pi waiters if necessary */
        if (waiter == rt_mutex_top_waiter(lock)) {
@@ -722,8 +723,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        top_waiter = rt_mutex_top_waiter(lock);
 
        /* [13] Drop the locks */
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock(&task->pi_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        /*
         * Make the actual exit decisions [12], based on the stored
@@ -746,7 +747,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        goto again;
 
  out_unlock_pi:
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       raw_spin_unlock_irq(&task->pi_lock);
  out_put_task:
        put_task_struct(task);
 
@@ -756,7 +757,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
 /*
  * Try to take an rt-mutex
  *
- * Must be called with lock->wait_lock held.
+ * Must be called with lock->wait_lock held and interrupts disabled
  *
  * @lock:   The lock to be acquired.
  * @task:   The task which wants to acquire the lock
@@ -766,8 +767,6 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
                                struct rt_mutex_waiter *waiter)
 {
-       unsigned long flags;
-
        /*
         * Before testing whether we can acquire @lock, we set the
         * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
@@ -852,7 +851,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
         * case, but conditionals are more expensive than a redundant
         * store.
         */
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       raw_spin_lock(&task->pi_lock);
        task->pi_blocked_on = NULL;
        /*
         * Finish the lock acquisition. @task is the new owner. If
@@ -861,7 +860,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
         */
        if (rt_mutex_has_waiters(lock))
                rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       raw_spin_unlock(&task->pi_lock);
 
 takeit:
        /* We got the lock. */
@@ -883,7 +882,7 @@ takeit:
  *
  * Prepare waiter and propagate pi chain
  *
- * This must be called with lock->wait_lock held.
+ * This must be called with lock->wait_lock held and interrupts disabled
  */
 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
                                   struct rt_mutex_waiter *waiter,
@@ -894,7 +893,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        struct rt_mutex_waiter *top_waiter = waiter;
        struct rt_mutex *next_lock;
        int chain_walk = 0, res;
-       unsigned long flags;
 
        /*
         * Early deadlock detection. We really don't want the task to
@@ -908,7 +906,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        if (owner == task)
                return -EDEADLK;
 
-       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       raw_spin_lock(&task->pi_lock);
        __rt_mutex_adjust_prio(task);
        waiter->task = task;
        waiter->lock = lock;
@@ -921,12 +919,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 
        task->pi_blocked_on = waiter;
 
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       raw_spin_unlock(&task->pi_lock);
 
        if (!owner)
                return 0;
 
-       raw_spin_lock_irqsave(&owner->pi_lock, flags);
+       raw_spin_lock(&owner->pi_lock);
        if (waiter == rt_mutex_top_waiter(lock)) {
                rt_mutex_dequeue_pi(owner, top_waiter);
                rt_mutex_enqueue_pi(owner, waiter);
@@ -941,7 +939,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        /* Store the lock on which owner is blocked or NULL */
        next_lock = task_blocked_on_lock(owner);
 
-       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+       raw_spin_unlock(&owner->pi_lock);
        /*
         * Even if full deadlock detection is on, if the owner is not
         * blocked itself, we can avoid finding this out in the chain
@@ -957,12 +955,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
         */
        get_task_struct(owner);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
                                         next_lock, waiter, task);
 
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irq(&lock->wait_lock);
 
        return res;
 }
@@ -971,15 +969,14 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
  * Remove the top waiter from the current tasks pi waiter tree and
  * queue it up.
  *
- * Called with lock->wait_lock held.
+ * Called with lock->wait_lock held and interrupts disabled.
  */
 static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
                                    struct rt_mutex *lock)
 {
        struct rt_mutex_waiter *waiter;
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&current->pi_lock, flags);
+       raw_spin_lock(&current->pi_lock);
 
        waiter = rt_mutex_top_waiter(lock);
 
@@ -1001,7 +998,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
         */
        lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
 
-       raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+       raw_spin_unlock(&current->pi_lock);
 
        wake_q_add(wake_q, waiter->task);
 }
@@ -1009,7 +1006,7 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
 /*
  * Remove a waiter from a lock and give up
  *
- * Must be called with lock->wait_lock held and
+ * Must be called with lock->wait_lock held and interrupts disabled. I must
  * have just failed to try_to_take_rt_mutex().
  */
 static void remove_waiter(struct rt_mutex *lock,
@@ -1018,12 +1015,11 @@ static void remove_waiter(struct rt_mutex *lock,
        bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
        struct task_struct *owner = rt_mutex_owner(lock);
        struct rt_mutex *next_lock;
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&current->pi_lock, flags);
+       raw_spin_lock(&current->pi_lock);
        rt_mutex_dequeue(lock, waiter);
        current->pi_blocked_on = NULL;
-       raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+       raw_spin_unlock(&current->pi_lock);
 
        /*
         * Only update priority if the waiter was the highest priority
@@ -1032,7 +1028,7 @@ static void remove_waiter(struct rt_mutex *lock,
        if (!owner || !is_top_waiter)
                return;
 
-       raw_spin_lock_irqsave(&owner->pi_lock, flags);
+       raw_spin_lock(&owner->pi_lock);
 
        rt_mutex_dequeue_pi(owner, waiter);
 
@@ -1044,7 +1040,7 @@ static void remove_waiter(struct rt_mutex *lock,
        /* Store the lock on which owner is blocked or NULL */
        next_lock = task_blocked_on_lock(owner);
 
-       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+       raw_spin_unlock(&owner->pi_lock);
 
        /*
         * Don't walk the chain, if the owner task is not blocked
@@ -1056,12 +1052,12 @@ static void remove_waiter(struct rt_mutex *lock,
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
        get_task_struct(owner);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
                                   next_lock, NULL, current);
 
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irq(&lock->wait_lock);
 }
 
 /*
@@ -1097,11 +1093,11 @@ void rt_mutex_adjust_pi(struct task_struct *task)
  * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
  * @lock:               the rt_mutex to take
  * @state:              the state the task should block in (TASK_INTERRUPTIBLE
- *                      or TASK_UNINTERRUPTIBLE)
+ *                      or TASK_UNINTERRUPTIBLE)
  * @timeout:            the pre-initialized and started timer, or NULL for none
  * @waiter:             the pre-initialized rt_mutex_waiter
  *
- * lock->wait_lock must be held by the caller.
+ * Must be called with lock->wait_lock held and interrupts disabled
  */
 static int __sched
 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
@@ -1129,13 +1125,13 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
                                break;
                }
 
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock_irq(&lock->wait_lock);
 
                debug_rt_mutex_print_deadlock(waiter);
 
                schedule();
 
-               raw_spin_lock(&lock->wait_lock);
+               raw_spin_lock_irq(&lock->wait_lock);
                set_current_state(state);
        }
 
@@ -1172,17 +1168,26 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
                  enum rtmutex_chainwalk chwalk)
 {
        struct rt_mutex_waiter waiter;
+       unsigned long flags;
        int ret = 0;
 
        debug_rt_mutex_init_waiter(&waiter);
        RB_CLEAR_NODE(&waiter.pi_tree_entry);
        RB_CLEAR_NODE(&waiter.tree_entry);
 
-       raw_spin_lock(&lock->wait_lock);
+       /*
+        * Technically we could use raw_spin_[un]lock_irq() here, but this can
+        * be called in early boot if the cmpxchg() fast path is disabled
+        * (debug, no architecture support). In this case we will acquire the
+        * rtmutex with lock->wait_lock held. But we cannot unconditionally
+        * enable interrupts in that early boot case. So we need to use the
+        * irqsave/restore variants.
+        */
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
 
        /* Try to acquire the lock again: */
        if (try_to_take_rt_mutex(lock, current, NULL)) {
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
                return 0;
        }
 
@@ -1211,7 +1216,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
         */
        fixup_rt_mutex_waiters(lock);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
        /* Remove pending timer: */
        if (unlikely(timeout))
@@ -1227,6 +1232,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
  */
 static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
 {
+       unsigned long flags;
        int ret;
 
        /*
@@ -1238,10 +1244,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
                return 0;
 
        /*
-        * The mutex has currently no owner. Lock the wait lock and
-        * try to acquire the lock.
+        * The mutex has currently no owner. Lock the wait lock and try to
+        * acquire the lock. We use irqsave here to support early boot calls.
         */
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
 
        ret = try_to_take_rt_mutex(lock, current, NULL);
 
@@ -1251,7 +1257,7 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
         */
        fixup_rt_mutex_waiters(lock);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
        return ret;
 }
@@ -1263,7 +1269,10 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
 static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
                                        struct wake_q_head *wake_q)
 {
-       raw_spin_lock(&lock->wait_lock);
+       unsigned long flags;
+
+       /* irqsave required to support early boot calls */
+       raw_spin_lock_irqsave(&lock->wait_lock, flags);
 
        debug_rt_mutex_unlock(lock);
 
@@ -1302,10 +1311,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
         */
        while (!rt_mutex_has_waiters(lock)) {
                /* Drops lock->wait_lock ! */
-               if (unlock_rt_mutex_safe(lock) == true)
+               if (unlock_rt_mutex_safe(lock, flags) == true)
                        return false;
                /* Relock the rtmutex and try again */
-               raw_spin_lock(&lock->wait_lock);
+               raw_spin_lock_irqsave(&lock->wait_lock, flags);
        }
 
        /*
@@ -1316,7 +1325,7 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
         */
        mark_wakeup_next_waiter(wake_q, lock);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
 
        /* check PI boosting */
        return true;
@@ -1596,10 +1605,10 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
 {
        int ret;
 
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irq(&lock->wait_lock);
 
        if (try_to_take_rt_mutex(lock, task, NULL)) {
-               raw_spin_unlock(&lock->wait_lock);
+               raw_spin_unlock_irq(&lock->wait_lock);
                return 1;
        }
 
@@ -1620,7 +1629,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
        if (unlikely(ret))
                remove_waiter(lock, waiter);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        debug_rt_mutex_print_deadlock(waiter);
 
@@ -1668,7 +1677,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
 {
        int ret;
 
-       raw_spin_lock(&lock->wait_lock);
+       raw_spin_lock_irq(&lock->wait_lock);
 
        set_current_state(TASK_INTERRUPTIBLE);
 
@@ -1684,7 +1693,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
         */
        fixup_rt_mutex_waiters(lock);
 
-       raw_spin_unlock(&lock->wait_lock);
+       raw_spin_unlock_irq(&lock->wait_lock);
 
        return ret;
 }
index e517a16cb426cc30b0251a43d22c5d2cb01bead9..f80167d6a66d00e1151462df65886965ec4cb357 100644 (file)
@@ -47,7 +47,7 @@ static void *try_ram_remap(resource_size_t offset, size_t size)
  * being mapped does not have i/o side effects and the __iomem
  * annotation is not applicable.
  *
- * MEMREMAP_WB - matches the default mapping for "System RAM" on
+ * MEMREMAP_WB - matches the default mapping for System RAM on
  * the architecture.  This is usually a read-allocate write-back cache.
  * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
  * memremap() will bypass establishing a new mapping and instead return
@@ -56,11 +56,12 @@ static void *try_ram_remap(resource_size_t offset, size_t size)
  * MEMREMAP_WT - establish a mapping whereby writes either bypass the
  * cache or are written through to memory and never exist in a
  * cache-dirty state with respect to program visibility.  Attempts to
- * map "System RAM" with this mapping type will fail.
+ * map System RAM with this mapping type will fail.
  */
 void *memremap(resource_size_t offset, size_t size, unsigned long flags)
 {
-       int is_ram = region_intersects(offset, size, "System RAM");
+       int is_ram = region_intersects(offset, size,
+                                      IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
        void *addr = NULL;
 
        if (is_ram == REGION_MIXED) {
@@ -76,7 +77,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
                 * MEMREMAP_WB is special in that it can be satisifed
                 * from the direct map.  Some archs depend on the
                 * capability of memremap() to autodetect cases where
-                * the requested range is potentially in "System RAM"
+                * the requested range is potentially in System RAM.
                 */
                if (is_ram == REGION_INTERSECTS)
                        addr = try_ram_remap(offset, size);
@@ -88,7 +89,7 @@ void *memremap(resource_size_t offset, size_t size, unsigned long flags)
         * If we don't have a mapping yet and more request flags are
         * pending then we will be attempting to establish a new virtual
         * address mapping.  Enforce that this mapping is not aliasing
-        * "System RAM"
+        * System RAM.
         */
        if (!addr && is_ram == REGION_INTERSECTS && flags) {
                WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
@@ -150,7 +151,7 @@ void devm_memunmap(struct device *dev, void *addr)
 }
 EXPORT_SYMBOL(devm_memunmap);
 
-pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags)
+pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
 {
        return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
 }
@@ -183,7 +184,11 @@ EXPORT_SYMBOL(put_zone_device_page);
 
 static void pgmap_radix_release(struct resource *res)
 {
-       resource_size_t key;
+       resource_size_t key, align_start, align_size, align_end;
+
+       align_start = res->start & ~(SECTION_SIZE - 1);
+       align_size = ALIGN(resource_size(res), SECTION_SIZE);
+       align_end = align_start + align_size - 1;
 
        mutex_lock(&pgmap_lock);
        for (key = res->start; key <= res->end; key += SECTION_SIZE)
@@ -222,16 +227,16 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
        struct dev_pagemap *pgmap = &page_map->pgmap;
 
        if (percpu_ref_tryget_live(pgmap->ref)) {
-               dev_WARN(dev, "%s: page mapping is still live!\n", __func__);
+               dev_WARN(dev, true, "%s: page mapping is still live!\n",
+                        __func__);
                percpu_ref_put(pgmap->ref);
        }
 
-       pgmap_radix_release(res);
-
        /* pages are dead and unused, undo the arch mapping */
        align_start = res->start & ~(SECTION_SIZE - 1);
        align_size = ALIGN(resource_size(res), SECTION_SIZE);
        arch_remove_memory(align_start, align_size);
+       pgmap_radix_release(res);
        dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
                        "%s: failed to free all reserved pages\n", __func__);
 }
@@ -266,8 +271,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
                struct percpu_ref *ref, struct vmem_altmap *altmap)
 {
        int is_ram = region_intersects(res->start, resource_size(res),
-                       "System RAM");
-       resource_size_t key, align_start, align_size;
+                                      IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
+       resource_size_t key, align_start, align_size, align_end;
        struct dev_pagemap *pgmap;
        struct page_map *page_map;
        unsigned long pfn;
@@ -309,7 +314,10 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
 
        mutex_lock(&pgmap_lock);
        error = 0;
-       for (key = res->start; key <= res->end; key += SECTION_SIZE) {
+       align_start = res->start & ~(SECTION_SIZE - 1);
+       align_size = ALIGN(resource_size(res), SECTION_SIZE);
+       align_end = align_start + align_size - 1;
+       for (key = align_start; key <= align_end; key += SECTION_SIZE) {
                struct dev_pagemap *dup;
 
                rcu_read_lock();
@@ -336,8 +344,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
        if (nid < 0)
                nid = numa_mem_id();
 
-       align_start = res->start & ~(SECTION_SIZE - 1);
-       align_size = ALIGN(resource_size(res), SECTION_SIZE);
        error = arch_add_memory(nid, align_start, align_size, true);
        if (error)
                goto err_add_memory;
index 8358f4697c0c3aea77aba802833660d4082b7c13..9537da37ce87fe28cf11ee97d78bfa17ddc6beb3 100644 (file)
@@ -303,6 +303,9 @@ struct load_info {
        struct _ddebug *debug;
        unsigned int num_debug;
        bool sig_ok;
+#ifdef CONFIG_KALLSYMS
+       unsigned long mod_kallsyms_init_off;
+#endif
        struct {
                unsigned int sym, str, mod, vers, info, pcpu;
        } index;
@@ -2480,10 +2483,21 @@ static void layout_symtab(struct module *mod, struct load_info *info)
        strsect->sh_flags |= SHF_ALLOC;
        strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
                                         info->index.str) | INIT_OFFSET_MASK;
-       mod->init_layout.size = debug_align(mod->init_layout.size);
        pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+
+       /* We'll tack temporary mod_kallsyms on the end. */
+       mod->init_layout.size = ALIGN(mod->init_layout.size,
+                                     __alignof__(struct mod_kallsyms));
+       info->mod_kallsyms_init_off = mod->init_layout.size;
+       mod->init_layout.size += sizeof(struct mod_kallsyms);
+       mod->init_layout.size = debug_align(mod->init_layout.size);
 }
 
+/*
+ * We use the full symtab and strtab which layout_symtab arranged to
+ * be appended to the init section.  Later we switch to the cut-down
+ * core-only ones.
+ */
 static void add_kallsyms(struct module *mod, const struct load_info *info)
 {
        unsigned int i, ndst;
@@ -2492,29 +2506,34 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
        char *s;
        Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
 
-       mod->symtab = (void *)symsec->sh_addr;
-       mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+       /* Set up to point into init section. */
+       mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
+
+       mod->kallsyms->symtab = (void *)symsec->sh_addr;
+       mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
        /* Make sure we get permanent strtab: don't use info->strtab. */
-       mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+       mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
 
        /* Set types up while we still have access to sections. */
-       for (i = 0; i < mod->num_symtab; i++)
-               mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
-
-       mod->core_symtab = dst = mod->core_layout.base + info->symoffs;
-       mod->core_strtab = s = mod->core_layout.base + info->stroffs;
-       src = mod->symtab;
-       for (ndst = i = 0; i < mod->num_symtab; i++) {
+       for (i = 0; i < mod->kallsyms->num_symtab; i++)
+               mod->kallsyms->symtab[i].st_info
+                       = elf_type(&mod->kallsyms->symtab[i], info);
+
+       /* Now populate the cut down core kallsyms for after init. */
+       mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
+       mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
+       src = mod->kallsyms->symtab;
+       for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
                if (i == 0 ||
                    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
                                   info->index.pcpu)) {
                        dst[ndst] = src[i];
-                       dst[ndst++].st_name = s - mod->core_strtab;
-                       s += strlcpy(s, &mod->strtab[src[i].st_name],
+                       dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
+                       s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
                                     KSYM_NAME_LEN) + 1;
                }
        }
-       mod->core_num_syms = ndst;
+       mod->core_kallsyms.num_symtab = ndst;
 }
 #else
 static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -3263,9 +3282,8 @@ static noinline int do_init_module(struct module *mod)
        module_put(mod);
        trim_init_extable(mod);
 #ifdef CONFIG_KALLSYMS
-       mod->num_symtab = mod->core_num_syms;
-       mod->symtab = mod->core_symtab;
-       mod->strtab = mod->core_strtab;
+       /* Switch to core kallsyms now init is done: kallsyms may be walking! */
+       rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
 #endif
        mod_tree_remove_init(mod);
        disable_ro_nx(&mod->init_layout);
@@ -3496,7 +3514,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
 
        /* Module is ready to execute: parsing args may do that. */
        after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
-                                 -32768, 32767, NULL,
+                                 -32768, 32767, mod,
                                  unknown_module_param_cb);
        if (IS_ERR(after_dashes)) {
                err = PTR_ERR(after_dashes);
@@ -3627,6 +3645,11 @@ static inline int is_arm_mapping_symbol(const char *str)
               && (str[2] == '\0' || str[2] == '.');
 }
 
+static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
+{
+       return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
+}
+
 static const char *get_ksymbol(struct module *mod,
                               unsigned long addr,
                               unsigned long *size,
@@ -3634,6 +3657,7 @@ static const char *get_ksymbol(struct module *mod,
 {
        unsigned int i, best = 0;
        unsigned long nextval;
+       struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
        /* At worse, next value is at end of module */
        if (within_module_init(addr, mod))
@@ -3643,32 +3667,32 @@ static const char *get_ksymbol(struct module *mod,
 
        /* Scan for closest preceding symbol, and next symbol. (ELF
           starts real symbols at 1). */
-       for (i = 1; i < mod->num_symtab; i++) {
-               if (mod->symtab[i].st_shndx == SHN_UNDEF)
+       for (i = 1; i < kallsyms->num_symtab; i++) {
+               if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
                        continue;
 
                /* We ignore unnamed symbols: they're uninformative
                 * and inserted at a whim. */
-               if (mod->symtab[i].st_value <= addr
-                   && mod->symtab[i].st_value > mod->symtab[best].st_value
-                   && *(mod->strtab + mod->symtab[i].st_name) != '\0'
-                   && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
+               if (*symname(kallsyms, i) == '\0'
+                   || is_arm_mapping_symbol(symname(kallsyms, i)))
+                       continue;
+
+               if (kallsyms->symtab[i].st_value <= addr
+                   && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
                        best = i;
-               if (mod->symtab[i].st_value > addr
-                   && mod->symtab[i].st_value < nextval
-                   && *(mod->strtab + mod->symtab[i].st_name) != '\0'
-                   && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
-                       nextval = mod->symtab[i].st_value;
+               if (kallsyms->symtab[i].st_value > addr
+                   && kallsyms->symtab[i].st_value < nextval)
+                       nextval = kallsyms->symtab[i].st_value;
        }
 
        if (!best)
                return NULL;
 
        if (size)
-               *size = nextval - mod->symtab[best].st_value;
+               *size = nextval - kallsyms->symtab[best].st_value;
        if (offset)
-               *offset = addr - mod->symtab[best].st_value;
-       return mod->strtab + mod->symtab[best].st_name;
+               *offset = addr - kallsyms->symtab[best].st_value;
+       return symname(kallsyms, best);
 }
 
 /* For kallsyms to ask for address resolution.  NULL means not found.  Careful
@@ -3758,19 +3782,21 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 
        preempt_disable();
        list_for_each_entry_rcu(mod, &modules, list) {
+               struct mod_kallsyms *kallsyms;
+
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
-               if (symnum < mod->num_symtab) {
-                       *value = mod->symtab[symnum].st_value;
-                       *type = mod->symtab[symnum].st_info;
-                       strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
-                               KSYM_NAME_LEN);
+               kallsyms = rcu_dereference_sched(mod->kallsyms);
+               if (symnum < kallsyms->num_symtab) {
+                       *value = kallsyms->symtab[symnum].st_value;
+                       *type = kallsyms->symtab[symnum].st_info;
+                       strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
                        strlcpy(module_name, mod->name, MODULE_NAME_LEN);
                        *exported = is_exported(name, *value, mod);
                        preempt_enable();
                        return 0;
                }
-               symnum -= mod->num_symtab;
+               symnum -= kallsyms->num_symtab;
        }
        preempt_enable();
        return -ERANGE;
@@ -3779,11 +3805,12 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
 static unsigned long mod_find_symname(struct module *mod, const char *name)
 {
        unsigned int i;
+       struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
 
-       for (i = 0; i < mod->num_symtab; i++)
-               if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
-                   mod->symtab[i].st_info != 'U')
-                       return mod->symtab[i].st_value;
+       for (i = 0; i < kallsyms->num_symtab; i++)
+               if (strcmp(name, symname(kallsyms, i)) == 0 &&
+                   kallsyms->symtab[i].st_info != 'U')
+                       return kallsyms->symtab[i].st_value;
        return 0;
 }
 
@@ -3822,11 +3849,14 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
        module_assert_mutex();
 
        list_for_each_entry(mod, &modules, list) {
+               /* We hold module_mutex: no need for rcu_dereference_sched */
+               struct mod_kallsyms *kallsyms = mod->kallsyms;
+
                if (mod->state == MODULE_STATE_UNFORMED)
                        continue;
-               for (i = 0; i < mod->num_symtab; i++) {
-                       ret = fn(data, mod->strtab + mod->symtab[i].st_name,
-                                mod, mod->symtab[i].st_value);
+               for (i = 0; i < kallsyms->num_symtab; i++) {
+                       ret = fn(data, symname(kallsyms, i),
+                                mod, kallsyms->symtab[i].st_value);
                        if (ret != 0)
                                return ret;
                }
index f4ad91b746f1e4afd96b3615a4bad844a0875528..4d73a834c7e6590e29eb0d1a71645caf6c274016 100644 (file)
@@ -588,7 +588,7 @@ void __init pidhash_init(void)
 
 void __init pidmap_init(void)
 {
-       /* Veryify no one has done anything silly */
+       /* Verify no one has done anything silly: */
        BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
 
        /* bump default and minimum pid_max based on number of cpus */
index 02e8dfaa1ce2d2085bd85e165ad6f25b9697a11a..68d3ebc12601b6bdea4f9d2c6cfca997a0b6a0dc 100644 (file)
@@ -235,7 +235,7 @@ config PM_TRACE_RTC
 
 config APM_EMULATION
        tristate "Advanced Power Management Emulation"
-       depends on PM && SYS_SUPPORTS_APM_EMULATION
+       depends on SYS_SUPPORTS_APM_EMULATION
        help
          APM is a BIOS specification for saving power using several different
          techniques. This is mostly useful for battery powered laptops with
index 99513e1160e518d322f6d0ce0f346e6da9fcbbf0..51369697466e36ba52af710863376d4847d43e36 100644 (file)
@@ -59,6 +59,7 @@ int profile_setup(char *str)
 
        if (!strncmp(str, sleepstr, strlen(sleepstr))) {
 #ifdef CONFIG_SCHEDSTATS
+               force_schedstat_enabled();
                prof_on = SLEEP_PROFILING;
                if (str[strlen(sleepstr)] == ',')
                        str += strlen(sleepstr) + 1;
index 61a16569ffbf53c2b19b9b67d908edcd225dc201..ea25ceb6220ca6833f9fe27253b2813e7ddefae4 100644 (file)
@@ -1,6 +1,7 @@
 obj-y += update.o sync.o
 obj-$(CONFIG_SRCU) += srcu.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
+obj-$(CONFIG_RCU_PERF_TEST) += rcuperf.o
 obj-$(CONFIG_TREE_RCU) += tree.o
 obj-$(CONFIG_PREEMPT_RCU) += tree.o
 obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
new file mode 100644 (file)
index 0000000..09ffa36
--- /dev/null
@@ -0,0 +1,642 @@
+/*
+ * Read-Copy Update module-based performance-test facility
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2015
+ *
+ * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/srcu.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/torture.h>
+#include <linux/vmalloc.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
+
+#define PERF_FLAG "-perf:"
+#define PERFOUT_STRING(s) \
+       pr_alert("%s" PERF_FLAG s "\n", perf_type)
+#define VERBOSE_PERFOUT_STRING(s) \
+       do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
+#define VERBOSE_PERFOUT_ERRSTRING(s) \
+       do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
+
+torture_param(bool, gp_exp, true, "Use expedited GP wait primitives");
+torture_param(int, nreaders, -1, "Number of RCU reader threads");
+torture_param(int, nwriters, -1, "Number of RCU updater threads");
+torture_param(bool, shutdown, false, "Shutdown at end of performance tests.");
+torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
+
+static char *perf_type = "rcu";
+module_param(perf_type, charp, 0444);
+MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)");
+
+static int nrealreaders;
+static int nrealwriters;
+static struct task_struct **writer_tasks;
+static struct task_struct **reader_tasks;
+static struct task_struct *shutdown_task;
+
+static u64 **writer_durations;
+static int *writer_n_durations;
+static atomic_t n_rcu_perf_reader_started;
+static atomic_t n_rcu_perf_writer_started;
+static atomic_t n_rcu_perf_writer_finished;
+static wait_queue_head_t shutdown_wq;
+static u64 t_rcu_perf_writer_started;
+static u64 t_rcu_perf_writer_finished;
+static unsigned long b_rcu_perf_writer_started;
+static unsigned long b_rcu_perf_writer_finished;
+
+static int rcu_perf_writer_state;
+#define RTWS_INIT              0
+#define RTWS_EXP_SYNC          1
+#define RTWS_SYNC              2
+#define RTWS_IDLE              2
+#define RTWS_STOPPING          3
+
+#define MAX_MEAS 10000
+#define MIN_MEAS 100
+
+#if defined(MODULE) || defined(CONFIG_RCU_PERF_TEST_RUNNABLE)
+#define RCUPERF_RUNNABLE_INIT 1
+#else
+#define RCUPERF_RUNNABLE_INIT 0
+#endif
+static int perf_runnable = RCUPERF_RUNNABLE_INIT;
+module_param(perf_runnable, int, 0444);
+MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot");
+
+/*
+ * Operations vector for selecting different types of tests.
+ */
+
+struct rcu_perf_ops {
+       int ptype;
+       void (*init)(void);
+       void (*cleanup)(void);
+       int (*readlock)(void);
+       void (*readunlock)(int idx);
+       unsigned long (*started)(void);
+       unsigned long (*completed)(void);
+       unsigned long (*exp_completed)(void);
+       void (*sync)(void);
+       void (*exp_sync)(void);
+       const char *name;
+};
+
+static struct rcu_perf_ops *cur_ops;
+
+/*
+ * Definitions for rcu perf testing.
+ */
+
+static int rcu_perf_read_lock(void) __acquires(RCU)
+{
+       rcu_read_lock();
+       return 0;
+}
+
+static void rcu_perf_read_unlock(int idx) __releases(RCU)
+{
+       rcu_read_unlock();
+}
+
+static unsigned long rcu_no_completed(void)
+{
+       return 0;
+}
+
+static void rcu_sync_perf_init(void)
+{
+}
+
+static struct rcu_perf_ops rcu_ops = {
+       .ptype          = RCU_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = rcu_perf_read_lock,
+       .readunlock     = rcu_perf_read_unlock,
+       .started        = rcu_batches_started,
+       .completed      = rcu_batches_completed,
+       .exp_completed  = rcu_exp_batches_completed,
+       .sync           = synchronize_rcu,
+       .exp_sync       = synchronize_rcu_expedited,
+       .name           = "rcu"
+};
+
+/*
+ * Definitions for rcu_bh perf testing.
+ */
+
+static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
+{
+       rcu_read_lock_bh();
+       return 0;
+}
+
+static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
+{
+       rcu_read_unlock_bh();
+}
+
+static struct rcu_perf_ops rcu_bh_ops = {
+       .ptype          = RCU_BH_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = rcu_bh_perf_read_lock,
+       .readunlock     = rcu_bh_perf_read_unlock,
+       .started        = rcu_batches_started_bh,
+       .completed      = rcu_batches_completed_bh,
+       .exp_completed  = rcu_exp_batches_completed_sched,
+       .sync           = synchronize_rcu_bh,
+       .exp_sync       = synchronize_rcu_bh_expedited,
+       .name           = "rcu_bh"
+};
+
+/*
+ * Definitions for srcu perf testing.
+ */
+
+DEFINE_STATIC_SRCU(srcu_ctl_perf);
+static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
+
+static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
+{
+       return srcu_read_lock(srcu_ctlp);
+}
+
+static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
+{
+       srcu_read_unlock(srcu_ctlp, idx);
+}
+
+static unsigned long srcu_perf_completed(void)
+{
+       return srcu_batches_completed(srcu_ctlp);
+}
+
+static void srcu_perf_synchronize(void)
+{
+       synchronize_srcu(srcu_ctlp);
+}
+
+static void srcu_perf_synchronize_expedited(void)
+{
+       synchronize_srcu_expedited(srcu_ctlp);
+}
+
+static struct rcu_perf_ops srcu_ops = {
+       .ptype          = SRCU_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = srcu_perf_read_lock,
+       .readunlock     = srcu_perf_read_unlock,
+       .started        = NULL,
+       .completed      = srcu_perf_completed,
+       .exp_completed  = srcu_perf_completed,
+       .sync           = srcu_perf_synchronize,
+       .exp_sync       = srcu_perf_synchronize_expedited,
+       .name           = "srcu"
+};
+
+/*
+ * Definitions for sched perf testing.
+ */
+
+static int sched_perf_read_lock(void)
+{
+       preempt_disable();
+       return 0;
+}
+
+static void sched_perf_read_unlock(int idx)
+{
+       preempt_enable();
+}
+
+static struct rcu_perf_ops sched_ops = {
+       .ptype          = RCU_SCHED_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = sched_perf_read_lock,
+       .readunlock     = sched_perf_read_unlock,
+       .started        = rcu_batches_started_sched,
+       .completed      = rcu_batches_completed_sched,
+       .exp_completed  = rcu_exp_batches_completed_sched,
+       .sync           = synchronize_sched,
+       .exp_sync       = synchronize_sched_expedited,
+       .name           = "sched"
+};
+
+#ifdef CONFIG_TASKS_RCU
+
+/*
+ * Definitions for RCU-tasks perf testing.
+ */
+
+static int tasks_perf_read_lock(void)
+{
+       return 0;
+}
+
+static void tasks_perf_read_unlock(int idx)
+{
+}
+
+static struct rcu_perf_ops tasks_ops = {
+       .ptype          = RCU_TASKS_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = tasks_perf_read_lock,
+       .readunlock     = tasks_perf_read_unlock,
+       .started        = rcu_no_completed,
+       .completed      = rcu_no_completed,
+       .sync           = synchronize_rcu_tasks,
+       .exp_sync       = synchronize_rcu_tasks,
+       .name           = "tasks"
+};
+
+#define RCUPERF_TASKS_OPS &tasks_ops,
+
+static bool __maybe_unused torturing_tasks(void)
+{
+       return cur_ops == &tasks_ops;
+}
+
+#else /* #ifdef CONFIG_TASKS_RCU */
+
+#define RCUPERF_TASKS_OPS
+
+static bool __maybe_unused torturing_tasks(void)
+{
+       return false;
+}
+
+#endif /* #else #ifdef CONFIG_TASKS_RCU */
+
+/*
+ * If performance tests complete, wait for shutdown to commence.
+ */
+static void rcu_perf_wait_shutdown(void)
+{
+       cond_resched_rcu_qs();
+       if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
+               return;
+       while (!torture_must_stop())
+               schedule_timeout_uninterruptible(1);
+}
+
+/*
+ * RCU perf reader kthread.  Repeatedly does empty RCU read-side
+ * critical section, minimizing update-side interference.
+ */
+static int
+rcu_perf_reader(void *arg)
+{
+       unsigned long flags;
+       int idx;
+       long me = (long)arg;
+
+       VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
+       set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+       set_user_nice(current, MAX_NICE);
+       atomic_inc(&n_rcu_perf_reader_started);
+
+       do {
+               local_irq_save(flags);
+               idx = cur_ops->readlock();
+               cur_ops->readunlock(idx);
+               local_irq_restore(flags);
+               rcu_perf_wait_shutdown();
+       } while (!torture_must_stop());
+       torture_kthread_stopping("rcu_perf_reader");
+       return 0;
+}
+
+/*
+ * RCU perf writer kthread.  Repeatedly does a grace period.
+ */
+static int
+rcu_perf_writer(void *arg)
+{
+       int i = 0;
+       int i_max;
+       long me = (long)arg;
+       struct sched_param sp;
+       bool started = false, done = false, alldone = false;
+       u64 t;
+       u64 *wdp;
+       u64 *wdpp = writer_durations[me];
+
+       VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
+       WARN_ON(rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp);
+       WARN_ON(rcu_gp_is_normal() && gp_exp);
+       WARN_ON(!wdpp);
+       set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+       sp.sched_priority = 1;
+       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+       t = ktime_get_mono_fast_ns();
+       if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
+               t_rcu_perf_writer_started = t;
+               if (gp_exp) {
+                       b_rcu_perf_writer_started =
+                               cur_ops->exp_completed() / 2;
+               } else {
+                       b_rcu_perf_writer_started =
+                               cur_ops->completed();
+               }
+       }
+
+       do {
+               wdp = &wdpp[i];
+               *wdp = ktime_get_mono_fast_ns();
+               if (gp_exp) {
+                       rcu_perf_writer_state = RTWS_EXP_SYNC;
+                       cur_ops->exp_sync();
+               } else {
+                       rcu_perf_writer_state = RTWS_SYNC;
+                       cur_ops->sync();
+               }
+               rcu_perf_writer_state = RTWS_IDLE;
+               t = ktime_get_mono_fast_ns();
+               *wdp = t - *wdp;
+               i_max = i;
+               if (!started &&
+                   atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
+                       started = true;
+               if (!done && i >= MIN_MEAS) {
+                       done = true;
+                       pr_alert("%s" PERF_FLAG
+                                "rcu_perf_writer %ld has %d measurements\n",
+                                perf_type, me, MIN_MEAS);
+                       if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
+                           nrealwriters) {
+                               ftrace_dump(DUMP_ALL);
+                               PERFOUT_STRING("Test complete");
+                               t_rcu_perf_writer_finished = t;
+                               if (gp_exp) {
+                                       b_rcu_perf_writer_finished =
+                                               cur_ops->exp_completed() / 2;
+                               } else {
+                                       b_rcu_perf_writer_finished =
+                                               cur_ops->completed();
+                               }
+                               smp_mb(); /* Assign before wake. */
+                               wake_up(&shutdown_wq);
+                       }
+               }
+               if (done && !alldone &&
+                   atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
+                       alldone = true;
+               if (started && !alldone && i < MAX_MEAS - 1)
+                       i++;
+               rcu_perf_wait_shutdown();
+       } while (!torture_must_stop());
+       rcu_perf_writer_state = RTWS_STOPPING;
+       writer_n_durations[me] = i_max;
+       torture_kthread_stopping("rcu_perf_writer");
+       return 0;
+}
+
+static inline void
+rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
+{
+       pr_alert("%s" PERF_FLAG
+                "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
+                perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
+}
+
+static void
+rcu_perf_cleanup(void)
+{
+       int i;
+       int j;
+       int ngps = 0;
+       u64 *wdp;
+       u64 *wdpp;
+
+       if (torture_cleanup_begin())
+               return;
+
+       if (reader_tasks) {
+               for (i = 0; i < nrealreaders; i++)
+                       torture_stop_kthread(rcu_perf_reader,
+                                            reader_tasks[i]);
+               kfree(reader_tasks);
+       }
+
+       if (writer_tasks) {
+               for (i = 0; i < nrealwriters; i++) {
+                       torture_stop_kthread(rcu_perf_writer,
+                                            writer_tasks[i]);
+                       if (!writer_n_durations)
+                               continue;
+                       j = writer_n_durations[i];
+                       pr_alert("%s%s writer %d gps: %d\n",
+                                perf_type, PERF_FLAG, i, j);
+                       ngps += j;
+               }
+               pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
+                        perf_type, PERF_FLAG,
+                        t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
+                        t_rcu_perf_writer_finished -
+                        t_rcu_perf_writer_started,
+                        ngps,
+                        b_rcu_perf_writer_finished -
+                        b_rcu_perf_writer_started);
+               for (i = 0; i < nrealwriters; i++) {
+                       if (!writer_durations)
+                               break;
+                       wdpp = writer_durations[i];
+                       if (!wdpp)
+                               continue;
+                       for (j = 0; j <= writer_n_durations[i]; j++) {
+                               wdp = &wdpp[j];
+                               pr_alert("%s%s %4d writer-duration: %5d %llu\n",
+                                       perf_type, PERF_FLAG,
+                                       i, j, *wdp);
+                               if (j % 100 == 0)
+                                       schedule_timeout_uninterruptible(1);
+                       }
+                       kfree(writer_durations[i]);
+               }
+               kfree(writer_tasks);
+               kfree(writer_durations);
+               kfree(writer_n_durations);
+       }
+
+       /* Do flavor-specific cleanup operations.  */
+       if (cur_ops->cleanup != NULL)
+               cur_ops->cleanup();
+
+       torture_cleanup_end();
+}
+
+/*
+ * Return the number if non-negative.  If -1, the number of CPUs.
+ * If less than -1, that much less than the number of CPUs, but
+ * at least one.
+ */
+static int compute_real(int n)
+{
+       int nr;
+
+       if (n >= 0) {
+               nr = n;
+       } else {
+               nr = num_online_cpus() + 1 + n;
+               if (nr <= 0)
+                       nr = 1;
+       }
+       return nr;
+}
+
+/*
+ * RCU perf shutdown kthread.  Just waits to be awakened, then shuts
+ * down system.
+ */
+static int
+rcu_perf_shutdown(void *arg)
+{
+       do {
+               wait_event(shutdown_wq,
+                          atomic_read(&n_rcu_perf_writer_finished) >=
+                          nrealwriters);
+       } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
+       smp_mb(); /* Wake before output. */
+       rcu_perf_cleanup();
+       kernel_power_off();
+       return -EINVAL;
+}
+
+static int __init
+rcu_perf_init(void)
+{
+       long i;
+       int firsterr = 0;
+       static struct rcu_perf_ops *perf_ops[] = {
+               &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
+               RCUPERF_TASKS_OPS
+       };
+
+       if (!torture_init_begin(perf_type, verbose, &perf_runnable))
+               return -EBUSY;
+
+       /* Process args and tell the world that the perf'er is on the job. */
+       for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
+               cur_ops = perf_ops[i];
+               if (strcmp(perf_type, cur_ops->name) == 0)
+                       break;
+       }
+       if (i == ARRAY_SIZE(perf_ops)) {
+               pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
+                        perf_type);
+               pr_alert("rcu-perf types:");
+               for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
+                       pr_alert(" %s", perf_ops[i]->name);
+               pr_alert("\n");
+               firsterr = -EINVAL;
+               goto unwind;
+       }
+       if (cur_ops->init)
+               cur_ops->init();
+
+       nrealwriters = compute_real(nwriters);
+       nrealreaders = compute_real(nreaders);
+       atomic_set(&n_rcu_perf_reader_started, 0);
+       atomic_set(&n_rcu_perf_writer_started, 0);
+       atomic_set(&n_rcu_perf_writer_finished, 0);
+       rcu_perf_print_module_parms(cur_ops, "Start of test");
+
+       /* Start up the kthreads. */
+
+       if (shutdown) {
+               init_waitqueue_head(&shutdown_wq);
+               firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
+                                                 shutdown_task);
+               if (firsterr)
+                       goto unwind;
+               schedule_timeout_uninterruptible(1);
+       }
+       reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
+                              GFP_KERNEL);
+       if (reader_tasks == NULL) {
+               VERBOSE_PERFOUT_ERRSTRING("out of memory");
+               firsterr = -ENOMEM;
+               goto unwind;
+       }
+       for (i = 0; i < nrealreaders; i++) {
+               firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
+                                                 reader_tasks[i]);
+               if (firsterr)
+                       goto unwind;
+       }
+       while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
+               schedule_timeout_uninterruptible(1);
+       writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
+                              GFP_KERNEL);
+       writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
+                                  GFP_KERNEL);
+       writer_n_durations =
+               kcalloc(nrealwriters, sizeof(*writer_n_durations),
+                       GFP_KERNEL);
+       if (!writer_tasks || !writer_durations || !writer_n_durations) {
+               VERBOSE_PERFOUT_ERRSTRING("out of memory");
+               firsterr = -ENOMEM;
+               goto unwind;
+       }
+       for (i = 0; i < nrealwriters; i++) {
+               writer_durations[i] =
+                       kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
+                               GFP_KERNEL);
+               if (!writer_durations[i])
+                       goto unwind;
+               firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
+                                                 writer_tasks[i]);
+               if (firsterr)
+                       goto unwind;
+       }
+       torture_init_end();
+       return 0;
+
+unwind:
+       torture_init_end();
+       rcu_perf_cleanup();
+       return firsterr;
+}
+
+module_init(rcu_perf_init);
+module_exit(rcu_perf_cleanup);
index d2988d047d668d1d280c894beef56befdab65e65..c57d75a43564308d6e63f3b77c3ab76fbe4db2aa 100644 (file)
@@ -918,7 +918,7 @@ rcu_torture_fqs(void *arg)
 static int
 rcu_torture_writer(void *arg)
 {
-       bool can_expedite = !rcu_gp_is_expedited();
+       bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
        int expediting = 0;
        unsigned long gp_snap;
        bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
@@ -932,12 +932,14 @@ rcu_torture_writer(void *arg)
        int nsynctypes = 0;
 
        VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
-       pr_alert("%s" TORTURE_FLAG
-                " Grace periods expedited from boot/sysfs for %s,\n",
-                torture_type, cur_ops->name);
-       pr_alert("%s" TORTURE_FLAG
-                " Testing of dynamic grace-period expediting diabled.\n",
-                torture_type);
+       if (!can_expedite) {
+               pr_alert("%s" TORTURE_FLAG
+                        " GP expediting controlled from boot/sysfs for %s,\n",
+                        torture_type, cur_ops->name);
+               pr_alert("%s" TORTURE_FLAG
+                        " Disabled dynamic grace-period expediting.\n",
+                        torture_type);
+       }
 
        /* Initialize synctype[] array.  If none set, take default. */
        if (!gp_cond1 && !gp_exp1 && !gp_normal1 && !gp_sync1)
index e492a5253e0f10c94da7056efd8f42ba2c1a394c..196f0302e2f4320ebd25dedc31d0ea8a4ab026c1 100644 (file)
@@ -23,7 +23,7 @@
  */
 
 #include <linux/kthread.h>
-#include <linux/module.h>
+#include <linux/init.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 
@@ -122,18 +122,7 @@ free_out:
        debugfs_remove_recursive(rcudir);
        return 1;
 }
-
-static void __exit rcutiny_trace_cleanup(void)
-{
-       debugfs_remove_recursive(rcudir);
-}
-
-module_init(rcutiny_trace_init);
-module_exit(rcutiny_trace_cleanup);
-
-MODULE_AUTHOR("Paul E. McKenney");
-MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
-MODULE_LICENSE("GPL");
+device_initcall(rcutiny_trace_init);
 
 static void check_cpu_stall(struct rcu_ctrlblk *rcp)
 {
index e41dd4131f7a141976e771653e3169f2955f6f33..251a7d5afcd7db8ffa8c0b7d177d3a984ae81a61 100644 (file)
@@ -108,7 +108,6 @@ RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
 
 static struct rcu_state *const rcu_state_p;
-static struct rcu_data __percpu *const rcu_data_p;
 LIST_HEAD(rcu_struct_flavors);
 
 /* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -371,6 +370,21 @@ void rcu_all_qs(void)
                rcu_momentary_dyntick_idle();
                local_irq_restore(flags);
        }
+       if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) {
+               /*
+                * Yes, we just checked a per-CPU variable with preemption
+                * enabled, so we might be migrated to some other CPU at
+                * this point.  That is OK because in that case, the
+                * migration will supply the needed quiescent state.
+                * We might end up needlessly disabling preemption and
+                * invoking rcu_sched_qs() on the destination CPU, but
+                * the probability and cost are both quite low, so this
+                * should not be a problem in practice.
+                */
+               preempt_disable();
+               rcu_sched_qs();
+               preempt_enable();
+       }
        this_cpu_inc(rcu_qs_ctr);
        barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
@@ -386,9 +400,11 @@ module_param(qlowmark, long, 0444);
 
 static ulong jiffies_till_first_fqs = ULONG_MAX;
 static ulong jiffies_till_next_fqs = ULONG_MAX;
+static bool rcu_kick_kthreads;
 
 module_param(jiffies_till_first_fqs, ulong, 0644);
 module_param(jiffies_till_next_fqs, ulong, 0644);
+module_param(rcu_kick_kthreads, bool, 0644);
 
 /*
  * How long the grace period must be before we start recruiting
@@ -460,6 +476,28 @@ unsigned long rcu_batches_completed_bh(void)
 }
 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
 
+/*
+ * Return the number of RCU expedited batches completed thus far for
+ * debug & stats.  Odd numbers mean that a batch is in progress, even
+ * numbers mean idle.  The value returned will thus be roughly double
+ * the cumulative batches since boot.
+ */
+unsigned long rcu_exp_batches_completed(void)
+{
+       return rcu_state_p->expedited_sequence;
+}
+EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
+
+/*
+ * Return the number of RCU-sched expedited batches completed thus far
+ * for debug & stats.  Similar to rcu_exp_batches_completed().
+ */
+unsigned long rcu_exp_batches_completed_sched(void)
+{
+       return rcu_sched_state.expedited_sequence;
+}
+EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
+
 /*
  * Force a quiescent state.
  */
@@ -1083,13 +1121,12 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
        rcu_sysidle_check_cpu(rdp, isidle, maxj);
        if ((rdp->dynticks_snap & 0x1) == 0) {
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
-               return 1;
-       } else {
                if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
                                 rdp->mynode->gpnum))
                        WRITE_ONCE(rdp->gpwrap, true);
-               return 0;
+               return 1;
        }
+       return 0;
 }
 
 /*
@@ -1173,15 +1210,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
                        smp_mb(); /* ->cond_resched_completed before *rcrmp. */
                        WRITE_ONCE(*rcrmp,
                                   READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
-                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
-                       rdp->rsp->jiffies_resched += 5; /* Enable beating. */
-               } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
-                       /* Time to beat on that CPU again! */
-                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
-                       rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
                }
+               rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
        }
 
+       /* And if it has been a really long time, kick the CPU as well. */
+       if (ULONG_CMP_GE(jiffies,
+                        rdp->rsp->gp_start + 2 * jiffies_till_sched_qs) ||
+           ULONG_CMP_GE(jiffies, rdp->rsp->gp_start + jiffies_till_sched_qs))
+               resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+
        return 0;
 }
 
@@ -1225,8 +1263,10 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
                       rsp->gp_flags,
                       gp_state_getname(rsp->gp_state), rsp->gp_state,
                       rsp->gp_kthread ? rsp->gp_kthread->state : ~0);
-               if (rsp->gp_kthread)
+               if (rsp->gp_kthread) {
                        sched_show_task(rsp->gp_kthread);
+                       wake_up_process(rsp->gp_kthread);
+               }
        }
 }
 
@@ -1246,7 +1286,25 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
                                if (rnp->qsmask & (1UL << cpu))
                                        dump_cpu_task(rnp->grplo + cpu);
                }
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+       }
+}
+
+/*
+ * If too much time has passed in the current grace period, and if
+ * so configured, go kick the relevant kthreads.
+ */
+static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
+{
+       unsigned long j;
+
+       if (!rcu_kick_kthreads)
+               return;
+       j = READ_ONCE(rsp->jiffies_kick_kthreads);
+       if (time_after(jiffies, j) && rsp->gp_kthread) {
+               WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
+               wake_up_process(rsp->gp_kthread);
+               WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
        }
 }
 
@@ -1261,17 +1319,22 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        struct rcu_node *rnp = rcu_get_root(rsp);
        long totqlen = 0;
 
+       /* Kick and suppress, if so configured. */
+       rcu_stall_kick_kthreads(rsp);
+       if (rcu_cpu_stall_suppress)
+               return;
+
        /* Only let one CPU complain about others per time interval. */
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        delta = jiffies - READ_ONCE(rsp->jiffies_stall);
        if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
        WRITE_ONCE(rsp->jiffies_stall,
                   jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
        /*
         * OK, time to rat on our buddy...
@@ -1292,7 +1355,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
                                        ndetected++;
                                }
                }
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
 
        print_cpu_stall_info_end();
@@ -1334,6 +1397,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
        struct rcu_node *rnp = rcu_get_root(rsp);
        long totqlen = 0;
 
+       /* Kick and suppress, if so configured. */
+       rcu_stall_kick_kthreads(rsp);
+       if (rcu_cpu_stall_suppress)
+               return;
+
        /*
         * OK, time to rat on ourselves...
         * See Documentation/RCU/stallwarn.txt for info on how to debug
@@ -1357,7 +1425,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
        if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
                WRITE_ONCE(rsp->jiffies_stall,
                           jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
        /*
         * Attempt to revive the RCU machinery by forcing a context switch.
@@ -1378,7 +1446,8 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
        unsigned long js;
        struct rcu_node *rnp;
 
-       if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
+       if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
+           !rcu_gp_in_progress(rsp))
                return;
        j = jiffies;
 
@@ -1595,7 +1664,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
        }
 unlock_out:
        if (rnp != rnp_root)
-               raw_spin_unlock(&rnp_root->lock);
+               raw_spin_unlock_rcu_node(rnp_root);
 out:
        if (c_out != NULL)
                *c_out = c;
@@ -1815,7 +1884,7 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
                return;
        }
        needwake = __note_gp_changes(rsp, rnp, rdp);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        if (needwake)
                rcu_gp_kthread_wake(rsp);
 }
@@ -1840,7 +1909,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
        raw_spin_lock_irq_rcu_node(rnp);
        if (!READ_ONCE(rsp->gp_flags)) {
                /* Spurious wakeup, tell caller to go back to sleep.  */
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
                return false;
        }
        WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
@@ -1850,7 +1919,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                 * Grace period already in progress, don't start another.
                 * Not supposed to be able to happen.
                 */
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
                return false;
        }
 
@@ -1859,7 +1928,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
        /* Record GP times before starting GP, hence smp_store_release(). */
        smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
        trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
-       raw_spin_unlock_irq(&rnp->lock);
+       raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
         * Apply per-leaf buffered online and offline operations to the
@@ -1873,7 +1942,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
                    !rnp->wait_blkd_tasks) {
                        /* Nothing to do on this leaf rcu_node structure. */
-                       raw_spin_unlock_irq(&rnp->lock);
+                       raw_spin_unlock_irq_rcu_node(rnp);
                        continue;
                }
 
@@ -1907,7 +1976,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                        rcu_cleanup_dead_rnp(rnp);
                }
 
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
        }
 
        /*
@@ -1938,7 +2007,7 @@ static bool rcu_gp_init(struct rcu_state *rsp)
                trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
                                            rnp->level, rnp->grplo,
                                            rnp->grphi, rnp->qsmask);
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
                cond_resched_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
        }
@@ -1996,7 +2065,7 @@ static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
                raw_spin_lock_irq_rcu_node(rnp);
                WRITE_ONCE(rsp->gp_flags,
                           READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
        }
 }
 
@@ -2025,7 +2094,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
         * safe for us to drop the lock in order to mark the grace
         * period as completed in all of the rcu_node structures.
         */
-       raw_spin_unlock_irq(&rnp->lock);
+       raw_spin_unlock_irq_rcu_node(rnp);
 
        /*
         * Propagate new ->completed value to rcu_node structures so
@@ -2046,7 +2115,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                        needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
                /* smp_mb() provided by prior unlock-lock pair. */
                nocb += rcu_future_gp_cleanup(rsp, rnp);
-               raw_spin_unlock_irq(&rnp->lock);
+               raw_spin_unlock_irq_rcu_node(rnp);
                cond_resched_rcu_qs();
                WRITE_ONCE(rsp->gp_activity, jiffies);
                rcu_gp_slow(rsp, gp_cleanup_delay);
@@ -2068,7 +2137,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                                       READ_ONCE(rsp->gpnum),
                                       TPS("newreq"));
        }
-       raw_spin_unlock_irq(&rnp->lock);
+       raw_spin_unlock_irq_rcu_node(rnp);
 }
 
 /*
@@ -2116,8 +2185,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
                }
                ret = 0;
                for (;;) {
-                       if (!ret)
+                       if (!ret) {
                                rsp->jiffies_force_qs = jiffies + j;
+                               WRITE_ONCE(rsp->jiffies_kick_kthreads,
+                                          jiffies + 3 * j);
+                       }
                        trace_rcu_grace_period(rsp->name,
                                               READ_ONCE(rsp->gpnum),
                                               TPS("fqswait"));
@@ -2143,6 +2215,15 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                                       TPS("fqsend"));
                                cond_resched_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
+                               ret = 0; /* Force full wait till next FQS. */
+                               j = jiffies_till_next_fqs;
+                               if (j > HZ) {
+                                       j = HZ;
+                                       jiffies_till_next_fqs = HZ;
+                               } else if (j < 1) {
+                                       j = 1;
+                                       jiffies_till_next_fqs = 1;
+                               }
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_rcu_qs();
@@ -2151,14 +2232,12 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                trace_rcu_grace_period(rsp->name,
                                                       READ_ONCE(rsp->gpnum),
                                                       TPS("fqswaitsig"));
-                       }
-                       j = jiffies_till_next_fqs;
-                       if (j > HZ) {
-                               j = HZ;
-                               jiffies_till_next_fqs = HZ;
-                       } else if (j < 1) {
-                               j = 1;
-                               jiffies_till_next_fqs = 1;
+                               ret = 1; /* Keep old FQS timing. */
+                               j = jiffies;
+                               if (time_after(jiffies, rsp->jiffies_force_qs))
+                                       j = 1;
+                               else
+                                       j = rsp->jiffies_force_qs - j;
                        }
                }
 
@@ -2234,18 +2313,20 @@ static bool rcu_start_gp(struct rcu_state *rsp)
 }
 
 /*
- * Report a full set of quiescent states to the specified rcu_state
- * data structure.  This involves cleaning up after the prior grace
- * period and letting rcu_start_gp() start up the next grace period
- * if one is needed.  Note that the caller must hold rnp->lock, which
- * is released before return.
+ * Report a full set of quiescent states to the specified rcu_state data
+ * structure.  Invoke rcu_gp_kthread_wake() to awaken the grace-period
+ * kthread if another grace period is required.  Whether we wake
+ * the grace-period kthread or it awakens itself for the next round
+ * of quiescent-state forcing, that kthread will clean up after the
+ * just-completed grace period.  Note that the caller must hold rnp->lock,
+ * which is released before return.
  */
 static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
        __releases(rcu_get_root(rsp)->lock)
 {
        WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
        WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
-       raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
        rcu_gp_kthread_wake(rsp);
 }
 
@@ -2275,7 +2356,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                         * Our bit has already been cleared, or the
                         * relevant grace period is already over, so done.
                         */
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        return;
                }
                WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
@@ -2287,7 +2368,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
                if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
 
                        /* Other bits still set at this level, so done. */
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        return;
                }
                mask = rnp->grpmask;
@@ -2297,7 +2378,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
 
                        break;
                }
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                rnp_c = rnp;
                rnp = rnp->parent;
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -2329,7 +2410,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
 
        if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
            rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;  /* Still need more quiescent states! */
        }
 
@@ -2346,19 +2427,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
        /* Report up the rest of the hierarchy, tracking current ->gpnum. */
        gps = rnp->gpnum;
        mask = rnp->grpmask;
-       raw_spin_unlock(&rnp->lock);    /* irqs remain disabled. */
+       raw_spin_unlock_rcu_node(rnp);  /* irqs remain disabled. */
        raw_spin_lock_rcu_node(rnp_p);  /* irqs already disabled. */
        rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
 }
 
 /*
  * Record a quiescent state for the specified CPU to that CPU's rcu_data
- * structure.  This must be either called from the specified CPU, or
- * called when the specified CPU is known to be offline (and when it is
- * also known that no other CPU is concurrently trying to help the offline
- * CPU).  The lastcomp argument is used to make sure we are still in the
- * grace period of interest.  We don't want to end the current grace period
- * based on quiescent states detected in an earlier grace period!
+ * structure.  This must be called from the specified CPU.
  */
 static void
 rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
@@ -2383,14 +2459,14 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
                 */
                rdp->cpu_no_qs.b.norm = true;   /* need qs for new gp. */
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
        mask = rdp->grpmask;
        if ((rnp->qsmask & mask) == 0) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        } else {
-               rdp->core_needs_qs = 0;
+               rdp->core_needs_qs = false;
 
                /*
                 * This GP can't end until cpu checks in, so all of our
@@ -2599,10 +2675,11 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
                rnp->qsmaskinit &= ~mask;
                rnp->qsmask &= ~mask;
                if (rnp->qsmaskinit) {
-                       raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+                       raw_spin_unlock_rcu_node(rnp);
+                       /* irqs remain disabled. */
                        return;
                }
-               raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
        }
 }
 
@@ -2625,7 +2702,7 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
        mask = rdp->grpmask;
        raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
        rnp->qsmaskinitnext &= ~mask;
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 /*
@@ -2859,7 +2936,7 @@ static void force_qs_rnp(struct rcu_state *rsp,
                        rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
                } else {
                        /* Nothing to do here, so just drop the lock. */
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                }
        }
 }
@@ -2895,11 +2972,11 @@ static void force_quiescent_state(struct rcu_state *rsp)
        raw_spin_unlock(&rnp_old->fqslock);
        if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
                rsp->n_force_qs_lh++;
-               raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
                return;  /* Someone beat us to it. */
        }
        WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
-       raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
        rcu_gp_kthread_wake(rsp);
 }
 
@@ -2925,7 +3002,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
        if (cpu_needs_another_gp(rsp, rdp)) {
                raw_spin_lock_rcu_node(rcu_get_root(rsp)); /* irqs disabled. */
                needwake = rcu_start_gp(rsp);
-               raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
                if (needwake)
                        rcu_gp_kthread_wake(rsp);
        } else {
@@ -3016,7 +3093,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
 
                        raw_spin_lock_rcu_node(rnp_root);
                        needwake = rcu_start_gp(rsp);
-                       raw_spin_unlock(&rnp_root->lock);
+                       raw_spin_unlock_rcu_node(rnp_root);
                        if (needwake)
                                rcu_gp_kthread_wake(rsp);
                } else {
@@ -3436,14 +3513,14 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
        rcu_for_each_leaf_node(rsp, rnp) {
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                if (rnp->expmaskinit == rnp->expmaskinitnext) {
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        continue;  /* No new CPUs, nothing to do. */
                }
 
                /* Update this node's mask, track old value for propagation. */
                oldmask = rnp->expmaskinit;
                rnp->expmaskinit = rnp->expmaskinitnext;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
                /* If was already nonzero, nothing to propagate. */
                if (oldmask)
@@ -3458,7 +3535,7 @@ static void sync_exp_reset_tree_hotplug(struct rcu_state *rsp)
                        if (rnp_up->expmaskinit)
                                done = true;
                        rnp_up->expmaskinit |= mask;
-                       raw_spin_unlock_irqrestore(&rnp_up->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp_up, flags);
                        if (done)
                                break;
                        mask = rnp_up->grpmask;
@@ -3481,7 +3558,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
                raw_spin_lock_irqsave_rcu_node(rnp, flags);
                WARN_ON_ONCE(rnp->expmask);
                rnp->expmask = rnp->expmaskinit;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
 }
 
@@ -3522,11 +3599,11 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                        if (!rnp->expmask)
                                rcu_initiate_boost(rnp, flags);
                        else
-                               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        break;
                }
                if (rnp->parent == NULL) {
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        if (wake) {
                                smp_mb(); /* EGP done before wake_up(). */
                                wake_up(&rsp->expedited_wq);
@@ -3534,7 +3611,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                        break;
                }
                mask = rnp->grpmask;
-               raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
+               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled */
                rnp = rnp->parent;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled */
                WARN_ON_ONCE(!(rnp->expmask & mask));
@@ -3569,7 +3646,7 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        if (!(rnp->expmask & mask)) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
        rnp->expmask &= ~mask;
@@ -3592,10 +3669,19 @@ static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
                               atomic_long_t *stat, unsigned long s)
 {
        if (rcu_exp_gp_seq_done(rsp, s)) {
-               if (rnp)
+               trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
+               if (rnp) {
+                       trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
+                                                 rnp->grplo, rnp->grphi,
+                                                 TPS("rel"));
                        mutex_unlock(&rnp->exp_funnel_mutex);
-               else if (rdp)
+               } else if (rdp) {
+                       trace_rcu_exp_funnel_lock(rsp->name,
+                                                 rdp->mynode->level + 1,
+                                                 rdp->cpu, rdp->cpu,
+                                                 TPS("rel"));
                        mutex_unlock(&rdp->exp_funnel_mutex);
+               }
                /* Ensure test happens before caller kfree(). */
                smp_mb__before_atomic(); /* ^^^ */
                atomic_long_inc(stat);
@@ -3615,22 +3701,6 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
        struct rcu_node *rnp0;
        struct rcu_node *rnp1 = NULL;
 
-       /*
-        * First try directly acquiring the root lock in order to reduce
-        * latency in the common case where expedited grace periods are
-        * rare.  We check mutex_is_locked() to avoid pathological levels of
-        * memory contention on ->exp_funnel_mutex in the heavy-load case.
-        */
-       rnp0 = rcu_get_root(rsp);
-       if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
-               if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
-                       if (sync_exp_work_done(rsp, rnp0, NULL,
-                                              &rdp->expedited_workdone0, s))
-                               return NULL;
-                       return rnp0;
-               }
-       }
-
        /*
         * Each pass through the following loop works its way
         * up the rcu_node tree, returning if others have done the
@@ -3642,16 +3712,28 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
        if (sync_exp_work_done(rsp, NULL, NULL, &rdp->expedited_workdone1, s))
                return NULL;
        mutex_lock(&rdp->exp_funnel_mutex);
+       trace_rcu_exp_funnel_lock(rsp->name, rdp->mynode->level + 1,
+                                 rdp->cpu, rdp->cpu, TPS("acq"));
        rnp0 = rdp->mynode;
        for (; rnp0 != NULL; rnp0 = rnp0->parent) {
                if (sync_exp_work_done(rsp, rnp1, rdp,
                                       &rdp->expedited_workdone2, s))
                        return NULL;
                mutex_lock(&rnp0->exp_funnel_mutex);
-               if (rnp1)
+               trace_rcu_exp_funnel_lock(rsp->name, rnp0->level,
+                                         rnp0->grplo, rnp0->grphi, TPS("acq"));
+               if (rnp1) {
+                       trace_rcu_exp_funnel_lock(rsp->name, rnp1->level,
+                                                 rnp1->grplo, rnp1->grphi,
+                                                 TPS("rel"));
                        mutex_unlock(&rnp1->exp_funnel_mutex);
-               else
+               } else {
+                       trace_rcu_exp_funnel_lock(rsp->name,
+                                                 rdp->mynode->level + 1,
+                                                 rdp->cpu, rdp->cpu,
+                                                 TPS("rel"));
                        mutex_unlock(&rdp->exp_funnel_mutex);
+               }
                rnp1 = rnp0;
        }
        if (sync_exp_work_done(rsp, rnp1, rdp,
@@ -3672,6 +3754,11 @@ static void sync_sched_exp_handler(void *data)
        if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
            __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
                return;
+       if (rcu_is_cpu_rrupt_from_idle()) {
+               rcu_report_exp_rdp(&rcu_sched_state,
+                                  this_cpu_ptr(&rcu_sched_data), true);
+               return;
+       }
        __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
        resched_cpu(smp_processor_id());
 }
@@ -3730,7 +3817,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
                 */
                if (rcu_preempt_has_tasks(rnp))
                        rnp->exp_tasks = rnp->blkd_tasks.next;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 
                /* IPI the remaining CPUs for expedited quiescent state. */
                mask = 1;
@@ -3747,7 +3834,7 @@ retry_ipi:
                        raw_spin_lock_irqsave_rcu_node(rnp, flags);
                        if (cpu_online(cpu) &&
                            (rnp->expmask & mask)) {
-                               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                                schedule_timeout_uninterruptible(1);
                                if (cpu_online(cpu) &&
                                    (rnp->expmask & mask))
@@ -3756,7 +3843,7 @@ retry_ipi:
                        }
                        if (!(rnp->expmask & mask))
                                mask_ofl_ipi &= ~mask;
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                }
                /* Report quiescent states for those that went offline. */
                mask_ofl_test |= mask_ofl_ipi;
@@ -3796,7 +3883,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
                       rsp->name);
                ndetected = 0;
                rcu_for_each_leaf_node(rsp, rnp) {
-                       ndetected = rcu_print_task_exp_stall(rnp);
+                       ndetected += rcu_print_task_exp_stall(rnp);
                        mask = 1;
                        for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
                                struct rcu_data *rdp;
@@ -3806,7 +3893,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
                                ndetected++;
                                rdp = per_cpu_ptr(rsp->rda, cpu);
                                pr_cont(" %d-%c%c%c", cpu,
-                                       "O."[cpu_online(cpu)],
+                                       "O."[!!cpu_online(cpu)],
                                        "o."[!!(rdp->grpmask & rnp->expmaskinit)],
                                        "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
                        }
@@ -3815,7 +3902,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
                pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
                        jiffies - jiffies_start, rsp->expedited_sequence,
                        rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
-               if (!ndetected) {
+               if (ndetected) {
                        pr_err("blocking rcu_node structures:");
                        rcu_for_each_node_breadth_first(rsp, rnp) {
                                if (rnp == rnp_root)
@@ -3875,16 +3962,21 @@ void synchronize_sched_expedited(void)
 
        /* Take a snapshot of the sequence number.  */
        s = rcu_exp_gp_seq_snap(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
 
        rnp = exp_funnel_lock(rsp, s);
        if (rnp == NULL)
                return;  /* Someone else did our work for us. */
 
        rcu_exp_gp_seq_start(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
        sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
        synchronize_sched_expedited_wait(rsp);
 
        rcu_exp_gp_seq_end(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
+       trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
+                                 rnp->grplo, rnp->grphi, TPS("rel"));
        mutex_unlock(&rnp->exp_funnel_mutex);
 }
 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
@@ -4163,7 +4255,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
                        return;
                raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
                rnp->qsmaskinit |= mask;
-               raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
+               raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
        }
 }
 
@@ -4187,7 +4279,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->rsp = rsp;
        mutex_init(&rdp->exp_funnel_mutex);
        rcu_boot_init_nocb_percpu_data(rdp);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 /*
@@ -4215,7 +4307,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rcu_sysidle_init_percpu_data(rdp->dynticks);
        atomic_set(&rdp->dynticks->dynticks,
                   (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
-       raw_spin_unlock(&rnp->lock);            /* irqs remain disabled. */
+       raw_spin_unlock_rcu_node(rnp);          /* irqs remain disabled. */
 
        /*
         * Add CPU to leaf rcu_node pending-online bitmask.  Any needed
@@ -4236,7 +4328,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
        rdp->core_needs_qs = false;
        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 static void rcu_prepare_cpu(int cpu)
@@ -4358,7 +4450,7 @@ static int __init rcu_spawn_gp_kthread(void)
                        sp.sched_priority = kthread_prio;
                        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
                }
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                wake_up_process(t);
        }
        rcu_spawn_nocb_kthreads();
@@ -4449,8 +4541,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                cpustride *= levelspread[i];
                rnp = rsp->level[i];
                for (j = 0; j < levelcnt[i]; j++, rnp++) {
-                       raw_spin_lock_init(&rnp->lock);
-                       lockdep_set_class_and_name(&rnp->lock,
+                       raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
+                       lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
                                                   &rcu_node_class[i], buf[i]);
                        raw_spin_lock_init(&rnp->fqslock);
                        lockdep_set_class_and_name(&rnp->fqslock,
index 83360b4f4352786b0129d5ac64046102d73933db..4c2dba5987fffaeaddf547d43a0e1855f3ef796d 100644 (file)
@@ -149,8 +149,9 @@ struct rcu_dynticks {
  * Definition for node within the RCU grace-period-detection hierarchy.
  */
 struct rcu_node {
-       raw_spinlock_t lock;    /* Root rcu_node's lock protects some */
-                               /*  rcu_state fields as well as following. */
+       raw_spinlock_t __private lock;  /* Root rcu_node's lock protects */
+                                       /*  some rcu_state fields as well as */
+                                       /*  following. */
        unsigned long gpnum;    /* Current grace period for this node. */
                                /*  This will either be equal to or one */
                                /*  behind the root rcu_node's gpnum. */
@@ -386,7 +387,6 @@ struct rcu_data {
        struct rcu_head oom_head;
 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
        struct mutex exp_funnel_mutex;
-       atomic_long_t expedited_workdone0;      /* # done by others #0. */
        atomic_long_t expedited_workdone1;      /* # done by others #1. */
        atomic_long_t expedited_workdone2;      /* # done by others #2. */
        atomic_long_t expedited_workdone3;      /* # done by others #3. */
@@ -511,6 +511,8 @@ struct rcu_state {
 
        unsigned long jiffies_force_qs;         /* Time at which to invoke */
                                                /*  force_quiescent_state(). */
+       unsigned long jiffies_kick_kthreads;    /* Time at which to kick */
+                                               /*  kthreads, if configured. */
        unsigned long n_force_qs;               /* Number of calls to */
                                                /*  force_quiescent_state(). */
        unsigned long n_force_qs_lh;            /* ~Number of calls leaving */
@@ -680,7 +682,7 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 #endif /* #else #ifdef CONFIG_PPC */
 
 /*
- * Wrappers for the rcu_node::lock acquire.
+ * Wrappers for the rcu_node::lock acquire and release.
  *
  * Because the rcu_nodes form a tree, the tree traversal locking will observe
  * different lock values, this in turn means that an UNLOCK of one level
@@ -689,29 +691,48 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
  *
  * In order to restore full ordering between tree levels, augment the regular
  * lock acquire functions with smp_mb__after_unlock_lock().
+ *
+ * As ->lock of struct rcu_node is a __private field, therefore one should use
+ * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
  */
 static inline void raw_spin_lock_rcu_node(struct rcu_node *rnp)
 {
-       raw_spin_lock(&rnp->lock);
+       raw_spin_lock(&ACCESS_PRIVATE(rnp, lock));
        smp_mb__after_unlock_lock();
 }
 
+static inline void raw_spin_unlock_rcu_node(struct rcu_node *rnp)
+{
+       raw_spin_unlock(&ACCESS_PRIVATE(rnp, lock));
+}
+
 static inline void raw_spin_lock_irq_rcu_node(struct rcu_node *rnp)
 {
-       raw_spin_lock_irq(&rnp->lock);
+       raw_spin_lock_irq(&ACCESS_PRIVATE(rnp, lock));
        smp_mb__after_unlock_lock();
 }
 
-#define raw_spin_lock_irqsave_rcu_node(rnp, flags)     \
-do {                                                   \
-       typecheck(unsigned long, flags);                \
-       raw_spin_lock_irqsave(&(rnp)->lock, flags);     \
-       smp_mb__after_unlock_lock();                    \
+static inline void raw_spin_unlock_irq_rcu_node(struct rcu_node *rnp)
+{
+       raw_spin_unlock_irq(&ACCESS_PRIVATE(rnp, lock));
+}
+
+#define raw_spin_lock_irqsave_rcu_node(rnp, flags)                     \
+do {                                                                   \
+       typecheck(unsigned long, flags);                                \
+       raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags);       \
+       smp_mb__after_unlock_lock();                                    \
+} while (0)
+
+#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags)                        \
+do {                                                                   \
+       typecheck(unsigned long, flags);                                \
+       raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags);  \
 } while (0)
 
 static inline bool raw_spin_trylock_rcu_node(struct rcu_node *rnp)
 {
-       bool locked = raw_spin_trylock(&rnp->lock);
+       bool locked = raw_spin_trylock(&ACCESS_PRIVATE(rnp, lock));
 
        if (locked)
                smp_mb__after_unlock_lock();
index 9467a8b7e756173bc9a94cc5b9828066a9f50e16..ae9ac254ab27dc3dd21e5fb7224ef83130e81a4e 100644 (file)
@@ -235,7 +235,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
                rnp->gp_tasks = &t->rcu_node_entry;
        if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
                rnp->exp_tasks = &t->rcu_node_entry;
-       raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
+       raw_spin_unlock_rcu_node(rnp); /* interrupts remain disabled. */
 
        /*
         * Report the quiescent state for the expedited GP.  This expedited
@@ -489,7 +489,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                                                         !!rnp->gp_tasks);
                        rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
                } else {
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                }
 
                /* Unboost if we were boosted. */
@@ -518,14 +518,14 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        if (!rcu_preempt_blocked_readers_cgp(rnp)) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
        t = list_entry(rnp->gp_tasks->prev,
                       struct task_struct, rcu_node_entry);
        list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
                sched_show_task(t);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 /*
@@ -722,13 +722,19 @@ static void sync_rcu_exp_handler(void *info)
  * synchronize_rcu_expedited - Brute-force RCU grace period
  *
  * Wait for an RCU-preempt grace period, but expedite it.  The basic
- * idea is to invoke synchronize_sched_expedited() to push all the tasks to
- * the ->blkd_tasks lists and wait for this list to drain.  This consumes
- * significant time on all CPUs and is unfriendly to real-time workloads,
- * so is thus not recommended for any sort of common-case code.
- * In fact, if you are using synchronize_rcu_expedited() in a loop,
- * please restructure your code to batch your updates, and then Use a
- * single synchronize_rcu() instead.
+ * idea is to IPI all non-idle non-nohz online CPUs.  The IPI handler
+ * checks whether the CPU is in an RCU-preempt critical section, and
+ * if so, it sets a flag that causes the outermost rcu_read_unlock()
+ * to report the quiescent state.  On the other hand, if the CPU is
+ * not in an RCU read-side critical section, the IPI handler reports
+ * the quiescent state immediately.
+ *
+ * Although this is a greate improvement over previous expedited
+ * implementations, it is still unfriendly to real-time workloads, so is
+ * thus not recommended for any sort of common-case code.  In fact, if
+ * you are using synchronize_rcu_expedited() in a loop, please restructure
+ * your code to batch your updates, and then Use a single synchronize_rcu()
+ * instead.
  */
 void synchronize_rcu_expedited(void)
 {
@@ -744,12 +750,14 @@ void synchronize_rcu_expedited(void)
        }
 
        s = rcu_exp_gp_seq_snap(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
 
        rnp_unlock = exp_funnel_lock(rsp, s);
        if (rnp_unlock == NULL)
                return;  /* Someone else did our work for us. */
 
        rcu_exp_gp_seq_start(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
 
        /* Initialize the rcu_node tree in preparation for the wait. */
        sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
@@ -760,7 +768,11 @@ void synchronize_rcu_expedited(void)
 
        /* Clean up and exit. */
        rcu_exp_gp_seq_end(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
        mutex_unlock(&rnp_unlock->exp_funnel_mutex);
+       trace_rcu_exp_funnel_lock(rsp->name, rnp_unlock->level,
+                                 rnp_unlock->grplo, rnp_unlock->grphi,
+                                 TPS("rel"));
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
@@ -807,7 +819,6 @@ void exit_rcu(void)
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
 static struct rcu_state *const rcu_state_p = &rcu_sched_state;
-static struct rcu_data __percpu *const rcu_data_p = &rcu_sched_data;
 
 /*
  * Tell them what RCU they are running.
@@ -991,7 +1002,7 @@ static int rcu_boost(struct rcu_node *rnp)
         * might exit their RCU read-side critical sections on their own.
         */
        if (rnp->exp_tasks == NULL && rnp->boost_tasks == NULL) {
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return 0;
        }
 
@@ -1028,7 +1039,7 @@ static int rcu_boost(struct rcu_node *rnp)
         */
        t = container_of(tb, struct task_struct, rcu_node_entry);
        rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        /* Lock only for side effect: boosts task t's priority. */
        rt_mutex_lock(&rnp->boost_mtx);
        rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
@@ -1088,7 +1099,7 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 
        if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
                rnp->n_balk_exp_gp_tasks++;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                return;
        }
        if (rnp->exp_tasks != NULL ||
@@ -1098,13 +1109,13 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
             ULONG_CMP_GE(jiffies, rnp->boost_time))) {
                if (rnp->exp_tasks == NULL)
                        rnp->boost_tasks = rnp->gp_tasks;
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                t = rnp->boost_kthread_task;
                if (t)
                        rcu_wake_cond(t, rnp->boost_kthread_status);
        } else {
                rcu_initiate_boost_trace(rnp);
-               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        }
 }
 
@@ -1172,7 +1183,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
                return PTR_ERR(t);
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        rnp->boost_kthread_task = t;
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        sp.sched_priority = kthread_prio;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
@@ -1308,7 +1319,7 @@ static void rcu_prepare_kthreads(int cpu)
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
        __releases(rnp->lock)
 {
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
 static void invoke_rcu_callbacks_kthread(void)
@@ -1559,7 +1570,7 @@ static void rcu_prepare_for_idle(void)
                rnp = rdp->mynode;
                raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
                needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
-               raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
+               raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
                if (needwake)
                        rcu_gp_kthread_wake(rsp);
        }
@@ -2059,7 +2070,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
        needwake = rcu_start_future_gp(rnp, rdp, &c);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
        if (needwake)
                rcu_gp_kthread_wake(rdp->rsp);
 
index 1088e64f01ad84f98143b95c549bf77ad9c655ab..d149c412a4e5170871261bd6c92c38a4ee1ec31d 100644 (file)
@@ -185,17 +185,16 @@ static int show_rcuexp(struct seq_file *m, void *v)
        int cpu;
        struct rcu_state *rsp = (struct rcu_state *)m->private;
        struct rcu_data *rdp;
-       unsigned long s0 = 0, s1 = 0, s2 = 0, s3 = 0;
+       unsigned long s1 = 0, s2 = 0, s3 = 0;
 
        for_each_possible_cpu(cpu) {
                rdp = per_cpu_ptr(rsp->rda, cpu);
-               s0 += atomic_long_read(&rdp->expedited_workdone0);
                s1 += atomic_long_read(&rdp->expedited_workdone1);
                s2 += atomic_long_read(&rdp->expedited_workdone2);
                s3 += atomic_long_read(&rdp->expedited_workdone3);
        }
-       seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
-                  rsp->expedited_sequence, s0, s1, s2, s3,
+       seq_printf(m, "s=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
+                  rsp->expedited_sequence, s1, s2, s3,
                   atomic_long_read(&rsp->expedited_normal),
                   atomic_read(&rsp->expedited_need_qs),
                   rsp->expedited_sequence / 2);
index 76b94e19430b21b8f4615b70dbb89282445ca149..ca828b41c938b24e5b11b33e421ea4b4e2366f1f 100644 (file)
@@ -128,6 +128,7 @@ bool rcu_gp_is_normal(void)
 {
        return READ_ONCE(rcu_normal);
 }
+EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
 
 static atomic_t rcu_expedited_nesting =
        ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
index 09c0597840b02dc260c65baafdd1fde0715bcbb4..49834309043c4d639becb95f6ee5c4f6b05d6666 100644 (file)
@@ -333,13 +333,13 @@ int release_resource(struct resource *old)
 EXPORT_SYMBOL(release_resource);
 
 /*
- * Finds the lowest iomem reosurce exists with-in [res->start.res->end)
- * the caller must specify res->start, res->end, res->flags and "name".
- * If found, returns 0, res is overwritten, if not found, returns -1.
- * This walks through whole tree and not just first level children
- * until and unless first_level_children_only is true.
+ * Finds the lowest iomem resource existing within [res->start.res->end).
+ * The caller must specify res->start, res->end, res->flags, and optionally
+ * desc.  If found, returns 0, res is overwritten, if not found, returns -1.
+ * This function walks the whole tree and not just first level children until
+ * and unless first_level_children_only is true.
  */
-static int find_next_iomem_res(struct resource *res, char *name,
+static int find_next_iomem_res(struct resource *res, unsigned long desc,
                               bool first_level_children_only)
 {
        resource_size_t start, end;
@@ -358,9 +358,9 @@ static int find_next_iomem_res(struct resource *res, char *name,
        read_lock(&resource_lock);
 
        for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) {
-               if (p->flags != res->flags)
+               if ((p->flags & res->flags) != res->flags)
                        continue;
-               if (name && strcmp(p->name, name))
+               if ((desc != IORES_DESC_NONE) && (desc != p->desc))
                        continue;
                if (p->start > end) {
                        p = NULL;
@@ -385,15 +385,18 @@ static int find_next_iomem_res(struct resource *res, char *name,
  * Walks through iomem resources and calls func() with matching resource
  * ranges. This walks through whole tree and not just first level children.
  * All the memory ranges which overlap start,end and also match flags and
- * name are valid candidates.
+ * desc are valid candidates.
  *
- * @name: name of resource
- * @flags: resource flags
+ * @desc: I/O resource descriptor. Use IORES_DESC_NONE to skip @desc check.
+ * @flags: I/O resource flags
  * @start: start addr
  * @end: end addr
+ *
+ * NOTE: For a new descriptor search, define a new IORES_DESC in
+ * <linux/ioport.h> and set it in 'desc' of a target resource entry.
  */
-int walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end,
-               void *arg, int (*func)(u64, u64, void *))
+int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start,
+               u64 end, void *arg, int (*func)(u64, u64, void *))
 {
        struct resource res;
        u64 orig_end;
@@ -403,23 +406,27 @@ int walk_iomem_res(char *name, unsigned long flags, u64 start, u64 end,
        res.end = end;
        res.flags = flags;
        orig_end = res.end;
+
        while ((res.start < res.end) &&
-               (!find_next_iomem_res(&res, name, false))) {
+               (!find_next_iomem_res(&res, desc, false))) {
+
                ret = (*func)(res.start, res.end, arg);
                if (ret)
                        break;
+
                res.start = res.end + 1;
                res.end = orig_end;
        }
+
        return ret;
 }
 
 /*
- * This function calls callback against all memory range of "System RAM"
- * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
- * Now, this function is only for "System RAM". This function deals with
- * full ranges and not pfn. If resources are not pfn aligned, dealing
- * with pfn can truncate ranges.
+ * This function calls the @func callback against all memory ranges of type
+ * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
+ * Now, this function is only for System RAM, it deals with full ranges and
+ * not PFNs. If resources are not PFN-aligned, dealing with PFNs can truncate
+ * ranges.
  */
 int walk_system_ram_res(u64 start, u64 end, void *arg,
                                int (*func)(u64, u64, void *))
@@ -430,10 +437,10 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
 
        res.start = start;
        res.end = end;
-       res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
        orig_end = res.end;
        while ((res.start < res.end) &&
-               (!find_next_iomem_res(&res, "System RAM", true))) {
+               (!find_next_iomem_res(&res, IORES_DESC_NONE, true))) {
                ret = (*func)(res.start, res.end, arg);
                if (ret)
                        break;
@@ -446,9 +453,9 @@ int walk_system_ram_res(u64 start, u64 end, void *arg,
 #if !defined(CONFIG_ARCH_HAS_WALK_MEMORY)
 
 /*
- * This function calls callback against all memory range of "System RAM"
- * which are marked as IORESOURCE_MEM and IORESOUCE_BUSY.
- * Now, this function is only for "System RAM".
+ * This function calls the @func callback against all memory ranges of type
+ * System RAM which are marked as IORESOURCE_SYSTEM_RAM and IORESOUCE_BUSY.
+ * It is to be used only for System RAM.
  */
 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
                void *arg, int (*func)(unsigned long, unsigned long, void *))
@@ -460,10 +467,10 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
 
        res.start = (u64) start_pfn << PAGE_SHIFT;
        res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
-       res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
        orig_end = res.end;
        while ((res.start < res.end) &&
-               (find_next_iomem_res(&res, "System RAM", true) >= 0)) {
+               (find_next_iomem_res(&res, IORES_DESC_NONE, true) >= 0)) {
                pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT;
                end_pfn = (res.end + 1) >> PAGE_SHIFT;
                if (end_pfn > pfn)
@@ -484,7 +491,7 @@ static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg)
 }
 /*
  * This generic page_is_ram() returns true if specified address is
- * registered as "System RAM" in iomem_resource list.
+ * registered as System RAM in iomem_resource list.
  */
 int __weak page_is_ram(unsigned long pfn)
 {
@@ -496,30 +503,34 @@ EXPORT_SYMBOL_GPL(page_is_ram);
  * region_intersects() - determine intersection of region with known resources
  * @start: region start address
  * @size: size of region
- * @name: name of resource (in iomem_resource)
+ * @flags: flags of resource (in iomem_resource)
+ * @desc: descriptor of resource (in iomem_resource) or IORES_DESC_NONE
  *
  * Check if the specified region partially overlaps or fully eclipses a
- * resource identified by @name.  Return REGION_DISJOINT if the region
- * does not overlap @name, return REGION_MIXED if the region overlaps
- * @type and another resource, and return REGION_INTERSECTS if the
- * region overlaps @type and no other defined resource. Note, that
- * REGION_INTERSECTS is also returned in the case when the specified
- * region overlaps RAM and undefined memory holes.
+ * resource identified by @flags and @desc (optional with IORES_DESC_NONE).
+ * Return REGION_DISJOINT if the region does not overlap @flags/@desc,
+ * return REGION_MIXED if the region overlaps @flags/@desc and another
+ * resource, and return REGION_INTERSECTS if the region overlaps @flags/@desc
+ * and no other defined resource. Note that REGION_INTERSECTS is also
+ * returned in the case when the specified region overlaps RAM and undefined
+ * memory holes.
  *
  * region_intersect() is used by memory remapping functions to ensure
  * the user is not remapping RAM and is a vast speed up over walking
  * through the resource table page by page.
  */
-int region_intersects(resource_size_t start, size_t size, const char *name)
+int region_intersects(resource_size_t start, size_t size, unsigned long flags,
+                     unsigned long desc)
 {
-       unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
        resource_size_t end = start + size - 1;
        int type = 0; int other = 0;
        struct resource *p;
 
        read_lock(&resource_lock);
        for (p = iomem_resource.child; p ; p = p->sibling) {
-               bool is_type = strcmp(p->name, name) == 0 && p->flags == flags;
+               bool is_type = (((p->flags & flags) == flags) &&
+                               ((desc == IORES_DESC_NONE) ||
+                                (desc == p->desc)));
 
                if (start >= p->start && start <= p->end)
                        is_type ? type++ : other++;
@@ -538,6 +549,7 @@ int region_intersects(resource_size_t start, size_t size, const char *name)
 
        return REGION_DISJOINT;
 }
+EXPORT_SYMBOL_GPL(region_intersects);
 
 void __weak arch_remove_reservations(struct resource *avail)
 {
@@ -948,6 +960,7 @@ static void __init __reserve_region_with_split(struct resource *root,
        res->start = start;
        res->end = end;
        res->flags = IORESOURCE_BUSY;
+       res->desc = IORES_DESC_NONE;
 
        while (1) {
 
@@ -982,6 +995,7 @@ static void __init __reserve_region_with_split(struct resource *root,
                                next_res->start = conflict->end + 1;
                                next_res->end = end;
                                next_res->flags = IORESOURCE_BUSY;
+                               next_res->desc = IORES_DESC_NONE;
                        }
                } else {
                        res->start = conflict->end + 1;
@@ -1071,8 +1085,9 @@ struct resource * __request_region(struct resource *parent,
        res->name = name;
        res->start = start;
        res->end = start + n - 1;
-       res->flags = resource_type(parent);
+       res->flags = resource_type(parent) | resource_ext_type(parent);
        res->flags |= IORESOURCE_BUSY | flags;
+       res->desc = IORES_DESC_NONE;
 
        write_lock(&resource_lock);
 
@@ -1237,6 +1252,7 @@ int release_mem_region_adjustable(struct resource *parent,
                        new_res->start = end + 1;
                        new_res->end = res->end;
                        new_res->flags = res->flags;
+                       new_res->desc = res->desc;
                        new_res->parent = res->parent;
                        new_res->sibling = res->sibling;
                        new_res->child = NULL;
@@ -1412,6 +1428,7 @@ static int __init reserve_setup(char *str)
                        res->start = io_start;
                        res->end = io_start + io_num - 1;
                        res->flags = IORESOURCE_BUSY;
+                       res->desc = IORES_DESC_NONE;
                        res->child = NULL;
                        if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0)
                                reserved = x+1;
index 63d3a24e081ad311b59152d884209c7d1aa81c70..7e548bde67ee255e0d1bfdb018f1b7af912f4b29 100644 (file)
@@ -2093,7 +2093,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
 
        ttwu_queue(p, cpu);
 stat:
-       ttwu_stat(p, cpu, wake_flags);
+       if (schedstat_enabled())
+               ttwu_stat(p, cpu, wake_flags);
 out:
        raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
@@ -2141,7 +2142,8 @@ static void try_to_wake_up_local(struct task_struct *p)
                ttwu_activate(rq, p, ENQUEUE_WAKEUP);
 
        ttwu_do_wakeup(rq, p, 0);
-       ttwu_stat(p, smp_processor_id(), 0);
+       if (schedstat_enabled())
+               ttwu_stat(p, smp_processor_id(), 0);
 out:
        raw_spin_unlock(&p->pi_lock);
 }
@@ -2210,6 +2212,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
 #endif
 
 #ifdef CONFIG_SCHEDSTATS
+       /* Even if schedstat is disabled, there should not be garbage */
        memset(&p->se.statistics, 0, sizeof(p->se.statistics));
 #endif
 
@@ -2281,6 +2284,69 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
 #endif
 #endif
 
+DEFINE_STATIC_KEY_FALSE(sched_schedstats);
+
+#ifdef CONFIG_SCHEDSTATS
+static void set_schedstats(bool enabled)
+{
+       if (enabled)
+               static_branch_enable(&sched_schedstats);
+       else
+               static_branch_disable(&sched_schedstats);
+}
+
+void force_schedstat_enabled(void)
+{
+       if (!schedstat_enabled()) {
+               pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
+               static_branch_enable(&sched_schedstats);
+       }
+}
+
+static int __init setup_schedstats(char *str)
+{
+       int ret = 0;
+       if (!str)
+               goto out;
+
+       if (!strcmp(str, "enable")) {
+               set_schedstats(true);
+               ret = 1;
+       } else if (!strcmp(str, "disable")) {
+               set_schedstats(false);
+               ret = 1;
+       }
+out:
+       if (!ret)
+               pr_warn("Unable to parse schedstats=\n");
+
+       return ret;
+}
+__setup("schedstats=", setup_schedstats);
+
+#ifdef CONFIG_PROC_SYSCTL
+int sysctl_schedstats(struct ctl_table *table, int write,
+                        void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct ctl_table t;
+       int err;
+       int state = static_branch_likely(&sched_schedstats);
+
+       if (write && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       t = *table;
+       t.data = &state;
+       err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
+       if (err < 0)
+               return err;
+       if (write)
+               set_schedstats(state);
+       return err;
+}
+#endif
+#endif
+
 /*
  * fork()/clone()-time setup:
  */
@@ -6173,11 +6239,16 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
 /* Setup the mask of cpus configured for isolated domains */
 static int __init isolated_cpu_setup(char *str)
 {
+       int ret;
+
        alloc_bootmem_cpumask_var(&cpu_isolated_map);
-       cpulist_parse(str, cpu_isolated_map);
+       ret = cpulist_parse(str, cpu_isolated_map);
+       if (ret) {
+               pr_err("sched: Error, all isolcpus= values must be between 0 and %d\n", nr_cpu_ids);
+               return 0;
+       }
        return 1;
 }
-
 __setup("isolcpus=", isolated_cpu_setup);
 
 struct s_data {
@@ -6840,7 +6911,7 @@ static void sched_init_numa(void)
 
                        sched_domains_numa_masks[i][j] = mask;
 
-                       for (k = 0; k < nr_node_ids; k++) {
+                       for_each_node(k) {
                                if (node_distance(j, k) > sched_domains_numa_distance[i])
                                        continue;
 
index 641511771ae6a696271f77532ac9e40e28175749..7cfa87bd8b89681ce6862e9934c020bf076c69ae 100644 (file)
@@ -75,16 +75,18 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
        PN(se->vruntime);
        PN(se->sum_exec_runtime);
 #ifdef CONFIG_SCHEDSTATS
-       PN(se->statistics.wait_start);
-       PN(se->statistics.sleep_start);
-       PN(se->statistics.block_start);
-       PN(se->statistics.sleep_max);
-       PN(se->statistics.block_max);
-       PN(se->statistics.exec_max);
-       PN(se->statistics.slice_max);
-       PN(se->statistics.wait_max);
-       PN(se->statistics.wait_sum);
-       P(se->statistics.wait_count);
+       if (schedstat_enabled()) {
+               PN(se->statistics.wait_start);
+               PN(se->statistics.sleep_start);
+               PN(se->statistics.block_start);
+               PN(se->statistics.sleep_max);
+               PN(se->statistics.block_max);
+               PN(se->statistics.exec_max);
+               PN(se->statistics.slice_max);
+               PN(se->statistics.wait_max);
+               PN(se->statistics.wait_sum);
+               P(se->statistics.wait_count);
+       }
 #endif
        P(se->load.weight);
 #ifdef CONFIG_SMP
@@ -122,10 +124,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                (long long)(p->nvcsw + p->nivcsw),
                p->prio);
 #ifdef CONFIG_SCHEDSTATS
-       SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-               SPLIT_NS(p->se.statistics.wait_sum),
-               SPLIT_NS(p->se.sum_exec_runtime),
-               SPLIT_NS(p->se.statistics.sum_sleep_runtime));
+       if (schedstat_enabled()) {
+               SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
+                       SPLIT_NS(p->se.statistics.wait_sum),
+                       SPLIT_NS(p->se.sum_exec_runtime),
+                       SPLIT_NS(p->se.statistics.sum_sleep_runtime));
+       }
 #else
        SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
                0LL, 0L,
@@ -313,17 +317,18 @@ do {                                                                      \
 #define P(n) SEQ_printf(m, "  .%-30s: %d\n", #n, rq->n);
 #define P64(n) SEQ_printf(m, "  .%-30s: %Ld\n", #n, rq->n);
 
-       P(yld_count);
-
-       P(sched_count);
-       P(sched_goidle);
 #ifdef CONFIG_SMP
        P64(avg_idle);
        P64(max_idle_balance_cost);
 #endif
 
-       P(ttwu_count);
-       P(ttwu_local);
+       if (schedstat_enabled()) {
+               P(yld_count);
+               P(sched_count);
+               P(sched_goidle);
+               P(ttwu_count);
+               P(ttwu_local);
+       }
 
 #undef P
 #undef P64
@@ -569,38 +574,39 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
        nr_switches = p->nvcsw + p->nivcsw;
 
 #ifdef CONFIG_SCHEDSTATS
-       PN(se.statistics.sum_sleep_runtime);
-       PN(se.statistics.wait_start);
-       PN(se.statistics.sleep_start);
-       PN(se.statistics.block_start);
-       PN(se.statistics.sleep_max);
-       PN(se.statistics.block_max);
-       PN(se.statistics.exec_max);
-       PN(se.statistics.slice_max);
-       PN(se.statistics.wait_max);
-       PN(se.statistics.wait_sum);
-       P(se.statistics.wait_count);
-       PN(se.statistics.iowait_sum);
-       P(se.statistics.iowait_count);
        P(se.nr_migrations);
-       P(se.statistics.nr_migrations_cold);
-       P(se.statistics.nr_failed_migrations_affine);
-       P(se.statistics.nr_failed_migrations_running);
-       P(se.statistics.nr_failed_migrations_hot);
-       P(se.statistics.nr_forced_migrations);
-       P(se.statistics.nr_wakeups);
-       P(se.statistics.nr_wakeups_sync);
-       P(se.statistics.nr_wakeups_migrate);
-       P(se.statistics.nr_wakeups_local);
-       P(se.statistics.nr_wakeups_remote);
-       P(se.statistics.nr_wakeups_affine);
-       P(se.statistics.nr_wakeups_affine_attempts);
-       P(se.statistics.nr_wakeups_passive);
-       P(se.statistics.nr_wakeups_idle);
 
-       {
+       if (schedstat_enabled()) {
                u64 avg_atom, avg_per_cpu;
 
+               PN(se.statistics.sum_sleep_runtime);
+               PN(se.statistics.wait_start);
+               PN(se.statistics.sleep_start);
+               PN(se.statistics.block_start);
+               PN(se.statistics.sleep_max);
+               PN(se.statistics.block_max);
+               PN(se.statistics.exec_max);
+               PN(se.statistics.slice_max);
+               PN(se.statistics.wait_max);
+               PN(se.statistics.wait_sum);
+               P(se.statistics.wait_count);
+               PN(se.statistics.iowait_sum);
+               P(se.statistics.iowait_count);
+               P(se.statistics.nr_migrations_cold);
+               P(se.statistics.nr_failed_migrations_affine);
+               P(se.statistics.nr_failed_migrations_running);
+               P(se.statistics.nr_failed_migrations_hot);
+               P(se.statistics.nr_forced_migrations);
+               P(se.statistics.nr_wakeups);
+               P(se.statistics.nr_wakeups_sync);
+               P(se.statistics.nr_wakeups_migrate);
+               P(se.statistics.nr_wakeups_local);
+               P(se.statistics.nr_wakeups_remote);
+               P(se.statistics.nr_wakeups_affine);
+               P(se.statistics.nr_wakeups_affine_attempts);
+               P(se.statistics.nr_wakeups_passive);
+               P(se.statistics.nr_wakeups_idle);
+
                avg_atom = p->se.sum_exec_runtime;
                if (nr_switches)
                        avg_atom = div64_ul(avg_atom, nr_switches);
index 1926606ece807361ab02ef51f4864543640e8d8d..7ce24a4563221e547712351ecb7b4996047fdbef 100644 (file)
@@ -20,8 +20,8 @@
  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
  */
 
-#include <linux/latencytop.h>
 #include <linux/sched.h>
+#include <linux/latencytop.h>
 #include <linux/cpumask.h>
 #include <linux/cpuidle.h>
 #include <linux/slab.h>
@@ -755,7 +755,9 @@ static void
 update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        struct task_struct *p;
-       u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
+       u64 delta;
+
+       delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
 
        if (entity_is_task(se)) {
                p = task_of(se);
@@ -776,22 +778,12 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
        se->statistics.wait_sum += delta;
        se->statistics.wait_start = 0;
 }
-#else
-static inline void
-update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-}
-
-static inline void
-update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
-{
-}
-#endif
 
 /*
  * Task is being enqueued - update stats:
  */
-static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static inline void
+update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        /*
         * Are we enqueueing a waiting task? (for current tasks
@@ -802,7 +794,7 @@ static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 }
 
 static inline void
-update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
        /*
         * Mark the end of the wait period if dequeueing a
@@ -810,8 +802,41 @@ update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
         */
        if (se != cfs_rq->curr)
                update_stats_wait_end(cfs_rq, se);
+
+       if (flags & DEQUEUE_SLEEP) {
+               if (entity_is_task(se)) {
+                       struct task_struct *tsk = task_of(se);
+
+                       if (tsk->state & TASK_INTERRUPTIBLE)
+                               se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
+                       if (tsk->state & TASK_UNINTERRUPTIBLE)
+                               se->statistics.block_start = rq_clock(rq_of(cfs_rq));
+               }
+       }
+
+}
+#else
+static inline void
+update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
 }
 
+static inline void
+update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+
+static inline void
+update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
+{
+}
+
+static inline void
+update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+{
+}
+#endif
+
 /*
  * We are picking a new current task - update its stats:
  */
@@ -907,10 +932,11 @@ struct numa_group {
        spinlock_t lock; /* nr_tasks, tasks */
        int nr_tasks;
        pid_t gid;
+       int active_nodes;
 
        struct rcu_head rcu;
-       nodemask_t active_nodes;
        unsigned long total_faults;
+       unsigned long max_faults_cpu;
        /*
         * Faults_cpu is used to decide whether memory should move
         * towards the CPU. As a consequence, these stats are weighted
@@ -969,6 +995,18 @@ static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
                group->faults_cpu[task_faults_idx(NUMA_MEM, nid, 1)];
 }
 
+/*
+ * A node triggering more than 1/3 as many NUMA faults as the maximum is
+ * considered part of a numa group's pseudo-interleaving set. Migrations
+ * between these nodes are slowed down, to allow things to settle down.
+ */
+#define ACTIVE_NODE_FRACTION 3
+
+static bool numa_is_active_node(int nid, struct numa_group *ng)
+{
+       return group_faults_cpu(ng, nid) * ACTIVE_NODE_FRACTION > ng->max_faults_cpu;
+}
+
 /* Handle placement on systems where not all nodes are directly connected. */
 static unsigned long score_nearby_nodes(struct task_struct *p, int nid,
                                        int maxdist, bool task)
@@ -1118,27 +1156,23 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
                return true;
 
        /*
-        * Do not migrate if the destination is not a node that
-        * is actively used by this numa group.
-        */
-       if (!node_isset(dst_nid, ng->active_nodes))
-               return false;
-
-       /*
-        * Source is a node that is not actively used by this
-        * numa group, while the destination is. Migrate.
+        * Destination node is much more heavily used than the source
+        * node? Allow migration.
         */
-       if (!node_isset(src_nid, ng->active_nodes))
+       if (group_faults_cpu(ng, dst_nid) > group_faults_cpu(ng, src_nid) *
+                                       ACTIVE_NODE_FRACTION)
                return true;
 
        /*
-        * Both source and destination are nodes in active
-        * use by this numa group. Maximize memory bandwidth
-        * by migrating from more heavily used groups, to less
-        * heavily used ones, spreading the load around.
-        * Use a 1/4 hysteresis to avoid spurious page movement.
+        * Distribute memory according to CPU & memory use on each node,
+        * with 3/4 hysteresis to avoid unnecessary memory migrations:
+        *
+        * faults_cpu(dst)   3   faults_cpu(src)
+        * --------------- * - > ---------------
+        * faults_mem(dst)   4   faults_mem(src)
         */
-       return group_faults(p, dst_nid) < (group_faults(p, src_nid) * 3 / 4);
+       return group_faults_cpu(ng, dst_nid) * group_faults(p, src_nid) * 3 >
+              group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
 }
 
 static unsigned long weighted_cpuload(const int cpu);
@@ -1220,8 +1254,6 @@ static void task_numa_assign(struct task_numa_env *env,
 {
        if (env->best_task)
                put_task_struct(env->best_task);
-       if (p)
-               get_task_struct(p);
 
        env->best_task = p;
        env->best_imp = imp;
@@ -1289,20 +1321,30 @@ static void task_numa_compare(struct task_numa_env *env,
        long imp = env->p->numa_group ? groupimp : taskimp;
        long moveimp = imp;
        int dist = env->dist;
+       bool assigned = false;
 
        rcu_read_lock();
 
        raw_spin_lock_irq(&dst_rq->lock);
        cur = dst_rq->curr;
        /*
-        * No need to move the exiting task, and this ensures that ->curr
-        * wasn't reaped and thus get_task_struct() in task_numa_assign()
-        * is safe under RCU read lock.
-        * Note that rcu_read_lock() itself can't protect from the final
-        * put_task_struct() after the last schedule().
+        * No need to move the exiting task or idle task.
         */
        if ((cur->flags & PF_EXITING) || is_idle_task(cur))
                cur = NULL;
+       else {
+               /*
+                * The task_struct must be protected here to protect the
+                * p->numa_faults access in the task_weight since the
+                * numa_faults could already be freed in the following path:
+                * finish_task_switch()
+                *     --> put_task_struct()
+                *         --> __put_task_struct()
+                *             --> task_numa_free()
+                */
+               get_task_struct(cur);
+       }
+
        raw_spin_unlock_irq(&dst_rq->lock);
 
        /*
@@ -1386,6 +1428,7 @@ balance:
                 */
                if (!load_too_imbalanced(src_load, dst_load, env)) {
                        imp = moveimp - 1;
+                       put_task_struct(cur);
                        cur = NULL;
                        goto assign;
                }
@@ -1411,9 +1454,16 @@ balance:
                env->dst_cpu = select_idle_sibling(env->p, env->dst_cpu);
 
 assign:
+       assigned = true;
        task_numa_assign(env, cur, imp);
 unlock:
        rcu_read_unlock();
+       /*
+        * The dst_rq->curr isn't assigned. The protection for task_struct is
+        * finished.
+        */
+       if (cur && !assigned)
+               put_task_struct(cur);
 }
 
 static void task_numa_find_cpu(struct task_numa_env *env,
@@ -1468,7 +1518,7 @@ static int task_numa_migrate(struct task_struct *p)
 
                .best_task = NULL,
                .best_imp = 0,
-               .best_cpu = -1
+               .best_cpu = -1,
        };
        struct sched_domain *sd;
        unsigned long taskweight, groupweight;
@@ -1520,8 +1570,7 @@ static int task_numa_migrate(struct task_struct *p)
         *   multiple NUMA nodes; in order to better consolidate the group,
         *   we need to check other locations.
         */
-       if (env.best_cpu == -1 || (p->numa_group &&
-                       nodes_weight(p->numa_group->active_nodes) > 1)) {
+       if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
                for_each_online_node(nid) {
                        if (nid == env.src_nid || nid == p->numa_preferred_nid)
                                continue;
@@ -1556,12 +1605,14 @@ static int task_numa_migrate(struct task_struct *p)
         * trying for a better one later. Do not set the preferred node here.
         */
        if (p->numa_group) {
+               struct numa_group *ng = p->numa_group;
+
                if (env.best_cpu == -1)
                        nid = env.src_nid;
                else
                        nid = env.dst_nid;
 
-               if (node_isset(nid, p->numa_group->active_nodes))
+               if (ng->active_nodes > 1 && numa_is_active_node(env.dst_nid, ng))
                        sched_setnuma(p, env.dst_nid);
        }
 
@@ -1611,20 +1662,15 @@ static void numa_migrate_preferred(struct task_struct *p)
 }
 
 /*
- * Find the nodes on which the workload is actively running. We do this by
+ * Find out how many nodes on the workload is actively running on. Do this by
  * tracking the nodes from which NUMA hinting faults are triggered. This can
  * be different from the set of nodes where the workload's memory is currently
  * located.
- *
- * The bitmask is used to make smarter decisions on when to do NUMA page
- * migrations, To prevent flip-flopping, and excessive page migrations, nodes
- * are added when they cause over 6/16 of the maximum number of faults, but
- * only removed when they drop below 3/16.
  */
-static void update_numa_active_node_mask(struct numa_group *numa_group)
+static void numa_group_count_active_nodes(struct numa_group *numa_group)
 {
        unsigned long faults, max_faults = 0;
-       int nid;
+       int nid, active_nodes = 0;
 
        for_each_online_node(nid) {
                faults = group_faults_cpu(numa_group, nid);
@@ -1634,12 +1680,12 @@ static void update_numa_active_node_mask(struct numa_group *numa_group)
 
        for_each_online_node(nid) {
                faults = group_faults_cpu(numa_group, nid);
-               if (!node_isset(nid, numa_group->active_nodes)) {
-                       if (faults > max_faults * 6 / 16)
-                               node_set(nid, numa_group->active_nodes);
-               } else if (faults < max_faults * 3 / 16)
-                       node_clear(nid, numa_group->active_nodes);
+               if (faults * ACTIVE_NODE_FRACTION > max_faults)
+                       active_nodes++;
        }
+
+       numa_group->max_faults_cpu = max_faults;
+       numa_group->active_nodes = active_nodes;
 }
 
 /*
@@ -1930,7 +1976,7 @@ static void task_numa_placement(struct task_struct *p)
        update_task_scan_period(p, fault_types[0], fault_types[1]);
 
        if (p->numa_group) {
-               update_numa_active_node_mask(p->numa_group);
+               numa_group_count_active_nodes(p->numa_group);
                spin_unlock_irq(group_lock);
                max_nid = preferred_group_nid(p, max_group_nid);
        }
@@ -1974,14 +2020,14 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
                        return;
 
                atomic_set(&grp->refcount, 1);
+               grp->active_nodes = 1;
+               grp->max_faults_cpu = 0;
                spin_lock_init(&grp->lock);
                grp->gid = p->pid;
                /* Second half of the array tracks nids where faults happen */
                grp->faults_cpu = grp->faults + NR_NUMA_HINT_FAULT_TYPES *
                                                nr_node_ids;
 
-               node_set(task_node(current), grp->active_nodes);
-
                for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
                        grp->faults[i] = p->numa_faults[i];
 
@@ -2095,6 +2141,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
        bool migrated = flags & TNF_MIGRATED;
        int cpu_node = task_node(current);
        int local = !!(flags & TNF_FAULT_LOCAL);
+       struct numa_group *ng;
        int priv;
 
        if (!static_branch_likely(&sched_numa_balancing))
@@ -2135,9 +2182,10 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
         * actively using should be counted as local. This allows the
         * scan rate to slow down when a workload has settled down.
         */
-       if (!priv && !local && p->numa_group &&
-                       node_isset(cpu_node, p->numa_group->active_nodes) &&
-                       node_isset(mem_node, p->numa_group->active_nodes))
+       ng = p->numa_group;
+       if (!priv && !local && ng && ng->active_nodes > 1 &&
+                               numa_is_active_node(cpu_node, ng) &&
+                               numa_is_active_node(mem_node, ng))
                local = 1;
 
        task_numa_placement(p);
@@ -3086,6 +3134,26 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
 
 static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
 
+static inline void check_schedstat_required(void)
+{
+#ifdef CONFIG_SCHEDSTATS
+       if (schedstat_enabled())
+               return;
+
+       /* Force schedstat enabled if a dependent tracepoint is active */
+       if (trace_sched_stat_wait_enabled()    ||
+                       trace_sched_stat_sleep_enabled()   ||
+                       trace_sched_stat_iowait_enabled()  ||
+                       trace_sched_stat_blocked_enabled() ||
+                       trace_sched_stat_runtime_enabled())  {
+               pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
+                            "stat_blocked and stat_runtime require the "
+                            "kernel parameter schedstats=enabled or "
+                            "kernel.sched_schedstats=1\n");
+       }
+#endif
+}
+
 static void
 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
@@ -3106,11 +3174,15 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 
        if (flags & ENQUEUE_WAKEUP) {
                place_entity(cfs_rq, se, 0);
-               enqueue_sleeper(cfs_rq, se);
+               if (schedstat_enabled())
+                       enqueue_sleeper(cfs_rq, se);
        }
 
-       update_stats_enqueue(cfs_rq, se);
-       check_spread(cfs_rq, se);
+       check_schedstat_required();
+       if (schedstat_enabled()) {
+               update_stats_enqueue(cfs_rq, se);
+               check_spread(cfs_rq, se);
+       }
        if (se != cfs_rq->curr)
                __enqueue_entity(cfs_rq, se);
        se->on_rq = 1;
@@ -3177,19 +3249,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
        update_curr(cfs_rq);
        dequeue_entity_load_avg(cfs_rq, se);
 
-       update_stats_dequeue(cfs_rq, se);
-       if (flags & DEQUEUE_SLEEP) {
-#ifdef CONFIG_SCHEDSTATS
-               if (entity_is_task(se)) {
-                       struct task_struct *tsk = task_of(se);
-
-                       if (tsk->state & TASK_INTERRUPTIBLE)
-                               se->statistics.sleep_start = rq_clock(rq_of(cfs_rq));
-                       if (tsk->state & TASK_UNINTERRUPTIBLE)
-                               se->statistics.block_start = rq_clock(rq_of(cfs_rq));
-               }
-#endif
-       }
+       if (schedstat_enabled())
+               update_stats_dequeue(cfs_rq, se, flags);
 
        clear_buddies(cfs_rq, se);
 
@@ -3263,7 +3324,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
                 * a CPU. So account for the time it spent waiting on the
                 * runqueue.
                 */
-               update_stats_wait_end(cfs_rq, se);
+               if (schedstat_enabled())
+                       update_stats_wait_end(cfs_rq, se);
                __dequeue_entity(cfs_rq, se);
                update_load_avg(se, 1);
        }
@@ -3276,7 +3338,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
         * least twice that of our own weight (i.e. dont track it
         * when there are only lesser-weight tasks around):
         */
-       if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
+       if (schedstat_enabled() && rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
                se->statistics.slice_max = max(se->statistics.slice_max,
                        se->sum_exec_runtime - se->prev_sum_exec_runtime);
        }
@@ -3359,9 +3421,13 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
        /* throttle cfs_rqs exceeding runtime */
        check_cfs_rq_runtime(cfs_rq);
 
-       check_spread(cfs_rq, prev);
+       if (schedstat_enabled()) {
+               check_spread(cfs_rq, prev);
+               if (prev->on_rq)
+                       update_stats_wait_start(cfs_rq, prev);
+       }
+
        if (prev->on_rq) {
-               update_stats_wait_start(cfs_rq, prev);
                /* Put 'current' back into the tree. */
                __enqueue_entity(cfs_rq, prev);
                /* in !on_rq case, update occurred at dequeue */
index de0e786b26671b633ccaa56d9ebba6ce7b9d77cf..544a7133cbd1df2a6ee4369231148fc0309eae11 100644 (file)
@@ -162,7 +162,7 @@ static void cpuidle_idle_call(void)
         */
        if (idle_should_freeze()) {
                entered_state = cpuidle_enter_freeze(drv, dev);
-               if (entered_state >= 0) {
+               if (entered_state > 0) {
                        local_irq_enable();
                        goto exit_idle;
                }
index 10f16374df7f3a3f0f3dc0eb281559cacd223311..1d583870e1a61ac21e48a4d034400041dca16758 100644 (file)
@@ -1022,6 +1022,7 @@ extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
 #endif /* SCHED_DEBUG && HAVE_JUMP_LABEL */
 
 extern struct static_key_false sched_numa_balancing;
+extern struct static_key_false sched_schedstats;
 
 static inline u64 global_rt_period(void)
 {
index b0fbc7632de5f9b13d8ccd2c42d73560c347669a..70b3b6a20fb0e362f4c816fe0f069c7c8576c7f4 100644 (file)
@@ -29,9 +29,10 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
        if (rq)
                rq->rq_sched_info.run_delay += delta;
 }
-# define schedstat_inc(rq, field)      do { (rq)->field++; } while (0)
-# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
-# define schedstat_set(var, val)       do { var = (val); } while (0)
+# define schedstat_enabled()           static_branch_unlikely(&sched_schedstats)
+# define schedstat_inc(rq, field)      do { if (schedstat_enabled()) { (rq)->field++; } } while (0)
+# define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0)
+# define schedstat_set(var, val)       do { if (schedstat_enabled()) { var = (val); } } while (0)
 #else /* !CONFIG_SCHEDSTATS */
 static inline void
 rq_sched_info_arrive(struct rq *rq, unsigned long long delta)
@@ -42,6 +43,7 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
 static inline void
 rq_sched_info_depart(struct rq *rq, unsigned long long delta)
 {}
+# define schedstat_enabled()           0
 # define schedstat_inc(rq, field)      do { } while (0)
 # define schedstat_add(rq, field, amt) do { } while (0)
 # define schedstat_set(var, val)       do { } while (0)
index 580ac2d4024ffbdb29960ccdd86424fa730b1e78..15a1795bbba17d1140e3220c41d48dd26ac43c95 100644 (file)
@@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
                put_seccomp_filter(thread);
                smp_store_release(&thread->seccomp.filter,
                                  caller->seccomp.filter);
+
+               /*
+                * Don't let an unprivileged task work around
+                * the no_new_privs restriction by creating
+                * a thread that sets it up, enters seccomp,
+                * then dies.
+                */
+               if (task_no_new_privs(caller))
+                       task_set_no_new_privs(thread);
+
                /*
                 * Opt the other thread into seccomp if needed.
                 * As threads are considered to be trust-realm
                 * equivalent (see ptrace_may_access), it is safe to
                 * allow one thread to transition the other.
                 */
-               if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
-                       /*
-                        * Don't let an unprivileged task work around
-                        * the no_new_privs restriction by creating
-                        * a thread that sets it up, enters seccomp,
-                        * then dies.
-                        */
-                       if (task_no_new_privs(caller))
-                               task_set_no_new_privs(thread);
-
+               if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
                        seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
-               }
        }
 }
 
index f3f1f7a972fd40f3d437d6bf3b5db2cacaffe6ca..0508544c8ced0d96913905dc53af68f38b6ee618 100644 (file)
@@ -3508,8 +3508,10 @@ static int sigsuspend(sigset_t *set)
        current->saved_sigmask = current->blocked;
        set_current_blocked(set);
 
-       __set_current_state(TASK_INTERRUPTIBLE);
-       schedule();
+       while (!signal_pending(current)) {
+               __set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
+       }
        set_restore_sigmask();
        return -ERESTARTNOHAND;
 }
index 97715fd9e790ade5d7cd7731107de2dafb8272e3..f5102fabef7f525f6c79d66756336c262e465caa 100644 (file)
@@ -350,6 +350,17 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+#ifdef CONFIG_SCHEDSTATS
+       {
+               .procname       = "sched_schedstats",
+               .data           = NULL,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = sysctl_schedstats,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
+#endif /* CONFIG_SCHEDSTATS */
 #endif /* CONFIG_SMP */
 #ifdef CONFIG_NUMA_BALANCING
        {
@@ -505,7 +516,7 @@ static struct ctl_table kern_table[] = {
                .data           = &latencytop_enabled,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = sysctl_latencytop,
        },
 #endif
 #ifdef CONFIG_BLK_DEV_INITRD
index 664de539299b6ec3c8eb9a1565f02c14ae839379..56ece145a814a87ce172d96f32c17bf9d119b2f8 100644 (file)
@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
                /* cs is a watchdog. */
                if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS)
                        cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
+       }
+       spin_unlock_irqrestore(&watchdog_lock, flags);
+}
+
+static void clocksource_select_watchdog(bool fallback)
+{
+       struct clocksource *cs, *old_wd;
+       unsigned long flags;
+
+       spin_lock_irqsave(&watchdog_lock, flags);
+       /* save current watchdog */
+       old_wd = watchdog;
+       if (fallback)
+               watchdog = NULL;
+
+       list_for_each_entry(cs, &clocksource_list, list) {
+               /* cs is a clocksource to be watched. */
+               if (cs->flags & CLOCK_SOURCE_MUST_VERIFY)
+                       continue;
+
+               /* Skip current if we were requested for a fallback. */
+               if (fallback && cs == old_wd)
+                       continue;
+
                /* Pick the best watchdog. */
-               if (!watchdog || cs->rating > watchdog->rating) {
+               if (!watchdog || cs->rating > watchdog->rating)
                        watchdog = cs;
-                       /* Reset watchdog cycles */
-                       clocksource_reset_watchdog();
-               }
        }
+       /* If we failed to find a fallback restore the old one. */
+       if (!watchdog)
+               watchdog = old_wd;
+
+       /* If we changed the watchdog we need to reset cycles. */
+       if (watchdog != old_wd)
+               clocksource_reset_watchdog();
+
        /* Check if the watchdog timer needs to be started. */
        clocksource_start_watchdog();
        spin_unlock_irqrestore(&watchdog_lock, flags);
@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
                cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES;
 }
 
+static void clocksource_select_watchdog(bool fallback) { }
 static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
 static inline void clocksource_resume_watchdog(void) { }
 static inline int __clocksource_watchdog_kthread(void) { return 0; }
@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
        clocksource_enqueue(cs);
        clocksource_enqueue_watchdog(cs);
        clocksource_select();
+       clocksource_select_watchdog(false);
        mutex_unlock(&clocksource_mutex);
        return 0;
 }
@@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating)
        mutex_lock(&clocksource_mutex);
        __clocksource_change_rating(cs, rating);
        clocksource_select();
+       clocksource_select_watchdog(false);
        mutex_unlock(&clocksource_mutex);
 }
 EXPORT_SYMBOL(clocksource_change_rating);
@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating);
  */
 static int clocksource_unbind(struct clocksource *cs)
 {
-       /*
-        * I really can't convince myself to support this on hardware
-        * designed by lobotomized monkeys.
-        */
-       if (clocksource_is_watchdog(cs))
-               return -EBUSY;
+       if (clocksource_is_watchdog(cs)) {
+               /* Select and try to install a replacement watchdog. */
+               clocksource_select_watchdog(true);
+               if (clocksource_is_watchdog(cs))
+                       return -EBUSY;
+       }
 
        if (cs == curr_clocksource) {
                /* Select and try to install a replacement clock source */
index 435b8850dd80a300c11ad128bc0cf51e3c23e15a..9eb2eef7a4ef1a92eb6d3d4bc4c4ff49180e477c 100644 (file)
@@ -90,19 +90,30 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
                        .clockid = CLOCK_TAI,
                        .get_time = &ktime_get_clocktai,
                },
+               {
+                       .index = HRTIMER_BASE_MONOTONIC_RAW,
+                       .clockid = CLOCK_MONOTONIC_RAW,
+                       .get_time = &ktime_get_raw,
+               },
        }
 };
 
 static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
+       /* Make sure we catch unsupported clockids */
+       [0 ... MAX_CLOCKS - 1]  = HRTIMER_MAX_CLOCK_BASES,
+
        [CLOCK_REALTIME]        = HRTIMER_BASE_REALTIME,
        [CLOCK_MONOTONIC]       = HRTIMER_BASE_MONOTONIC,
+       [CLOCK_MONOTONIC_RAW]   = HRTIMER_BASE_MONOTONIC_RAW,
        [CLOCK_BOOTTIME]        = HRTIMER_BASE_BOOTTIME,
        [CLOCK_TAI]             = HRTIMER_BASE_TAI,
 };
 
 static inline int hrtimer_clockid_to_base(clockid_t clock_id)
 {
-       return hrtimer_clock_to_base_table[clock_id];
+       int base = hrtimer_clock_to_base_table[clock_id];
+       BUG_ON(base == HRTIMER_MAX_CLOCK_BASES);
+       return base;
 }
 
 /*
@@ -897,10 +908,10 @@ static int enqueue_hrtimer(struct hrtimer *timer,
  */
 static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
-                            unsigned long newstate, int reprogram)
+                            u8 newstate, int reprogram)
 {
        struct hrtimer_cpu_base *cpu_base = base->cpu_base;
-       unsigned int state = timer->state;
+       u8 state = timer->state;
 
        timer->state = newstate;
        if (!(state & HRTIMER_STATE_ENQUEUED))
@@ -930,7 +941,7 @@ static inline int
 remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
 {
        if (hrtimer_is_queued(timer)) {
-               unsigned long state = timer->state;
+               u8 state = timer->state;
                int reprogram;
 
                /*
@@ -954,6 +965,22 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool rest
        return 0;
 }
 
+static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
+                                           const enum hrtimer_mode mode)
+{
+#ifdef CONFIG_TIME_LOW_RES
+       /*
+        * CONFIG_TIME_LOW_RES indicates that the system has no way to return
+        * granular time values. For relative timers we add hrtimer_resolution
+        * (i.e. one jiffie) to prevent short timeouts.
+        */
+       timer->is_rel = mode & HRTIMER_MODE_REL;
+       if (timer->is_rel)
+               tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
+#endif
+       return tim;
+}
+
 /**
  * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
  * @timer:     the timer to be added
@@ -974,19 +1001,10 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        /* Remove an active timer from the queue: */
        remove_hrtimer(timer, base, true);
 
-       if (mode & HRTIMER_MODE_REL) {
+       if (mode & HRTIMER_MODE_REL)
                tim = ktime_add_safe(tim, base->get_time());
-               /*
-                * CONFIG_TIME_LOW_RES is a temporary way for architectures
-                * to signal that they simply return xtime in
-                * do_gettimeoffset(). In this case we want to round up by
-                * resolution when starting a relative timer, to avoid short
-                * timeouts. This will go away with the GTOD framework.
-                */
-#ifdef CONFIG_TIME_LOW_RES
-               tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
-#endif
-       }
+
+       tim = hrtimer_update_lowres(timer, tim, mode);
 
        hrtimer_set_expires_range_ns(timer, tim, delta_ns);
 
@@ -1074,19 +1092,23 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
 /**
  * hrtimer_get_remaining - get remaining time for the timer
  * @timer:     the timer to read
+ * @adjust:    adjust relative timers when CONFIG_TIME_LOW_RES=y
  */
-ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust)
 {
        unsigned long flags;
        ktime_t rem;
 
        lock_hrtimer_base(timer, &flags);
-       rem = hrtimer_expires_remaining(timer);
+       if (IS_ENABLED(CONFIG_TIME_LOW_RES) && adjust)
+               rem = hrtimer_expires_remaining_adjusted(timer);
+       else
+               rem = hrtimer_expires_remaining(timer);
        unlock_hrtimer_base(timer, &flags);
 
        return rem;
 }
-EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
+EXPORT_SYMBOL_GPL(__hrtimer_get_remaining);
 
 #ifdef CONFIG_NO_HZ_COMMON
 /**
@@ -1219,6 +1241,14 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
        timer_stats_account_hrtimer(timer);
        fn = timer->function;
 
+       /*
+        * Clear the 'is relative' flag for the TIME_LOW_RES case. If the
+        * timer is restarted with a period then it becomes an absolute
+        * timer. If its not restarted it does not matter.
+        */
+       if (IS_ENABLED(CONFIG_TIME_LOW_RES))
+               timer->is_rel = false;
+
        /*
         * Because we run timers from hardirq context, there is no chance
         * they get migrated to another cpu, therefore its safe to unlock
@@ -1268,7 +1298,10 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
                if (!(active & 0x01))
                        continue;
 
-               basenow = ktime_add(now, base->offset);
+               if (unlikely(base->index == HRTIMER_BASE_MONOTONIC_RAW))
+                       basenow = ktime_get_raw();
+               else
+                       basenow = ktime_add(now, base->offset);
 
                while ((node = timerqueue_getnext(&base->active))) {
                        struct hrtimer *timer;
index 8d262b4675738d21bea752d65a47117c3d8ae514..1d5c7204ddc97bce881431b399e58741961c6391 100644 (file)
@@ -26,7 +26,7 @@
  */
 static struct timeval itimer_get_remtime(struct hrtimer *timer)
 {
-       ktime_t rem = hrtimer_get_remaining(timer);
+       ktime_t rem = __hrtimer_get_remaining(timer, true);
 
        /*
         * Racy but safe: if the itimer expires after the above
index 36f2ca09aa5e458bf1f31e3ec018e180dcc5b345..6df8927c58a5bbedcab9efceac7c21985bec0bf0 100644 (file)
@@ -685,8 +685,18 @@ int ntp_validate_timex(struct timex *txc)
                if (!capable(CAP_SYS_TIME))
                        return -EPERM;
 
-               if (!timeval_inject_offset_valid(&txc->time))
-                       return -EINVAL;
+               if (txc->modes & ADJ_NANO) {
+                       struct timespec ts;
+
+                       ts.tv_sec = txc->time.tv_sec;
+                       ts.tv_nsec = txc->time.tv_usec;
+                       if (!timespec_inject_offset_valid(&ts))
+                               return -EINVAL;
+
+               } else {
+                       if (!timeval_inject_offset_valid(&txc->time))
+                               return -EINVAL;
+               }
        }
 
        /*
index 31d11ac9fa4739789728c44470b82115ec307d11..f2826c35e9185f60586f06dff53f51ca3324c659 100644 (file)
@@ -760,7 +760,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
            (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
                timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
 
-       remaining = ktime_sub(hrtimer_get_expires(timer), now);
+       remaining = __hrtimer_expires_remaining_adjusted(timer, now);
        /* Return 0 only, when the timer is expired and not pending */
        if (remaining.tv64 <= 0) {
                /*
index 9d7a053545f5aca7a324f3530ee52ca9dac413f8..0b17424349eb4dafd76a2cf588748988ce51a295 100644 (file)
  */
 static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
 
-/*
- * The time, when the last jiffy update happened. Protected by jiffies_lock.
- */
-static ktime_t last_jiffies_update;
-
 struct tick_sched *tick_get_tick_sched(int cpu)
 {
        return &per_cpu(tick_cpu_sched, cpu);
 }
 
+#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+/*
+ * The time, when the last jiffy update happened. Protected by jiffies_lock.
+ */
+static ktime_t last_jiffies_update;
+
 /*
  * Must be called with interrupts disabled !
  */
@@ -151,6 +152,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
        update_process_times(user_mode(regs));
        profile_tick(CPU_PROFILING);
 }
+#endif
 
 #ifdef CONFIG_NO_HZ_FULL
 cpumask_var_t tick_nohz_full_mask;
@@ -993,9 +995,9 @@ static void tick_nohz_switch_to_nohz(void)
        /* Get the next period */
        next = tick_init_jiffy_update();
 
-       hrtimer_forward_now(&ts->sched_timer, tick_period);
        hrtimer_set_expires(&ts->sched_timer, next);
-       tick_program_event(next, 1);
+       hrtimer_forward_now(&ts->sched_timer, tick_period);
+       tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
        tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
 }
 
index 34b4cedfa80da0de969c12d1551620513f93d02c..1f1d3aae4b50815693b90b511c3d5e1531f35e17 100644 (file)
@@ -131,7 +131,7 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
                printk_deferred("         timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
        } else {
                if (offset > (max_cycles >> 1)) {
-                       printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n",
+                       printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
                                        offset, name, max_cycles >> 1);
                        printk_deferred("      timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
                }
index f75e35b6014900da71ffa438a507c34f19e36e3a..ba7d8b288bb37250ce9e9c0e5f562b45832c4ca9 100644 (file)
@@ -69,7 +69,7 @@ print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
        print_name_offset(m, taddr);
        SEQ_printf(m, ", ");
        print_name_offset(m, timer->function);
-       SEQ_printf(m, ", S:%02lx", timer->state);
+       SEQ_printf(m, ", S:%02x", timer->state);
 #ifdef CONFIG_TIMER_STATS
        SEQ_printf(m, ", ");
        print_name_offset(m, timer->start_site);
index 45dd798bcd37e7bd529e61932e51a30366590626..326a75e884dbf3f46513538861a028c7be459f09 100644 (file)
@@ -191,14 +191,17 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
        struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
        struct bpf_array *array = container_of(map, struct bpf_array, map);
        struct perf_event *event;
+       struct file *file;
 
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       event = (struct perf_event *)array->ptrs[index];
-       if (!event)
+       file = (struct file *)array->ptrs[index];
+       if (unlikely(!file))
                return -ENOENT;
 
+       event = file->private_data;
+
        /* make sure event is local and doesn't have pmu::count */
        if (event->oncpu != smp_processor_id() ||
            event->pmu->count)
@@ -228,6 +231,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
        void *data = (void *) (long) r4;
        struct perf_sample_data sample_data;
        struct perf_event *event;
+       struct file *file;
        struct perf_raw_record raw = {
                .size = size,
                .data = data,
@@ -236,10 +240,12 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
        if (unlikely(index >= array->map.max_entries))
                return -E2BIG;
 
-       event = (struct perf_event *)array->ptrs[index];
-       if (unlikely(!event))
+       file = (struct file *)array->ptrs[index];
+       if (unlikely(!file))
                return -ENOENT;
 
+       event = file->private_data;
+
        if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
                     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
                return -EINVAL;
index eb4220a132ecd1be9002cf35edf82870969d6c82..81b87451c0ea145f93ba41e47dacc6a2acec8189 100644 (file)
@@ -15,4 +15,5 @@
 
 EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
+EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle);
 
index 87fb9801bd9eead0d6e45bef49bc1eefc324c6a7..d9293402ee685ae59007fe1ade71866e121a0882 100644 (file)
@@ -1751,7 +1751,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
 {
        __buffer_unlock_commit(buffer, event);
 
-       ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
+       ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
        ftrace_trace_userstack(buffer, flags, pc);
 }
 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
index c9956440d0e609c51e2032f6f625c4061ba764a4..21b81a41dae572a3263c7b731efa97a2d25872e9 100644 (file)
@@ -30,7 +30,7 @@
 struct trace_kprobe {
        struct list_head        list;
        struct kretprobe        rp;     /* Use rp.kp for kprobe use */
-       unsigned long           nhit;
+       unsigned long __percpu *nhit;
        const char              *symbol;        /* symbol name */
        struct trace_probe      tp;
 };
@@ -274,6 +274,10 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
        if (!tk)
                return ERR_PTR(ret);
 
+       tk->nhit = alloc_percpu(unsigned long);
+       if (!tk->nhit)
+               goto error;
+
        if (symbol) {
                tk->symbol = kstrdup(symbol, GFP_KERNEL);
                if (!tk->symbol)
@@ -313,6 +317,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
 error:
        kfree(tk->tp.call.name);
        kfree(tk->symbol);
+       free_percpu(tk->nhit);
        kfree(tk);
        return ERR_PTR(ret);
 }
@@ -327,6 +332,7 @@ static void free_trace_kprobe(struct trace_kprobe *tk)
        kfree(tk->tp.call.class->system);
        kfree(tk->tp.call.name);
        kfree(tk->symbol);
+       free_percpu(tk->nhit);
        kfree(tk);
 }
 
@@ -874,9 +880,14 @@ static const struct file_operations kprobe_events_ops = {
 static int probes_profile_seq_show(struct seq_file *m, void *v)
 {
        struct trace_kprobe *tk = v;
+       unsigned long nhit = 0;
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               nhit += *per_cpu_ptr(tk->nhit, cpu);
 
        seq_printf(m, "  %-44s %15lu %15lu\n",
-                  trace_event_name(&tk->tp.call), tk->nhit,
+                  trace_event_name(&tk->tp.call), nhit,
                   tk->rp.kp.nmissed);
 
        return 0;
@@ -1225,7 +1236,7 @@ static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
 {
        struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
 
-       tk->nhit++;
+       raw_cpu_inc(*tk->nhit);
 
        if (tk->tp.flags & TP_FLAG_TRACE)
                kprobe_trace_func(tk, regs);
@@ -1242,7 +1253,7 @@ kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
        struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
 
-       tk->nhit++;
+       raw_cpu_inc(*tk->nhit);
 
        if (tk->tp.flags & TP_FLAG_TRACE)
                kretprobe_trace_func(tk, ri, regs);
index dda9e6742950305f36fbe920f9fe0c6f68d83fbf..202df6cffccaab333c02facc56b28082b09e5e42 100644 (file)
@@ -125,6 +125,13 @@ check_stack(unsigned long ip, unsigned long *stack)
                        break;
        }
 
+       /*
+        * Some archs may not have the passed in ip in the dump.
+        * If that happens, we need to show everything.
+        */
+       if (i == stack_trace_max.nr_entries)
+               i = 0;
+
        /*
         * Now find where in the stack these are.
         */
index 61a0264e28f9b5917c0e8a60bb9668b21e59c4b2..7ff5dc7d2ac5f47395bc3be8484c642c5d577b6b 100644 (file)
@@ -301,7 +301,23 @@ static DEFINE_SPINLOCK(wq_mayday_lock);    /* protects wq->maydays list */
 static LIST_HEAD(workqueues);          /* PR: list of all workqueues */
 static bool workqueue_freezing;                /* PL: have wqs started freezing? */
 
-static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */
+/* PL: allowable cpus for unbound wqs and work items */
+static cpumask_var_t wq_unbound_cpumask;
+
+/* CPU where unbound work was last round robin scheduled from this CPU */
+static DEFINE_PER_CPU(int, wq_rr_cpu_last);
+
+/*
+ * Local execution of unbound work items is no longer guaranteed.  The
+ * following always forces round-robin CPU selection on unbound work items
+ * to uncover usages which depend on it.
+ */
+#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
+static bool wq_debug_force_rr_cpu = true;
+#else
+static bool wq_debug_force_rr_cpu = false;
+#endif
+module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
 
 /* the per-cpu worker pools */
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
@@ -570,6 +586,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
                                                  int node)
 {
        assert_rcu_or_wq_mutex_or_pool_mutex(wq);
+
+       /*
+        * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
+        * delayed item is pending.  The plan is to keep CPU -> NODE
+        * mapping valid and stable across CPU on/offlines.  Once that
+        * happens, this workaround can be removed.
+        */
+       if (unlikely(node == NUMA_NO_NODE))
+               return wq->dfl_pwq;
+
        return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 }
 
@@ -1298,6 +1324,39 @@ static bool is_chained_work(struct workqueue_struct *wq)
        return worker && worker->current_pwq->wq == wq;
 }
 
+/*
+ * When queueing an unbound work item to a wq, prefer local CPU if allowed
+ * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
+ * avoid perturbing sensitive tasks.
+ */
+static int wq_select_unbound_cpu(int cpu)
+{
+       static bool printed_dbg_warning;
+       int new_cpu;
+
+       if (likely(!wq_debug_force_rr_cpu)) {
+               if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
+                       return cpu;
+       } else if (!printed_dbg_warning) {
+               pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
+               printed_dbg_warning = true;
+       }
+
+       if (cpumask_empty(wq_unbound_cpumask))
+               return cpu;
+
+       new_cpu = __this_cpu_read(wq_rr_cpu_last);
+       new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
+       if (unlikely(new_cpu >= nr_cpu_ids)) {
+               new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
+               if (unlikely(new_cpu >= nr_cpu_ids))
+                       return cpu;
+       }
+       __this_cpu_write(wq_rr_cpu_last, new_cpu);
+
+       return new_cpu;
+}
+
 static void __queue_work(int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
@@ -1323,7 +1382,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
                return;
 retry:
        if (req_cpu == WORK_CPU_UNBOUND)
-               cpu = raw_smp_processor_id();
+               cpu = wq_select_unbound_cpu(raw_smp_processor_id());
 
        /* pwq which will be used unless @work is executing elsewhere */
        if (!(wq->flags & WQ_UNBOUND))
@@ -1464,13 +1523,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
        timer_stats_timer_set_start_info(&dwork->timer);
 
        dwork->wq = wq;
-       /* timer isn't guaranteed to run in this cpu, record earlier */
-       if (cpu == WORK_CPU_UNBOUND)
-               cpu = raw_smp_processor_id();
        dwork->cpu = cpu;
        timer->expires = jiffies + delay;
 
-       add_timer_on(timer, cpu);
+       if (unlikely(cpu != WORK_CPU_UNBOUND))
+               add_timer_on(timer, cpu);
+       else
+               add_timer(timer);
 }
 
 /**
@@ -2355,7 +2414,8 @@ static void check_flush_dependency(struct workqueue_struct *target_wq,
        WARN_ONCE(current->flags & PF_MEMALLOC,
                  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
                  current->pid, current->comm, target_wq->name, target_func);
-       WARN_ONCE(worker && (worker->current_pwq->wq->flags & WQ_MEM_RECLAIM),
+       WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
+                             (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
                  "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
                  worker->current_pwq->wq->name, worker->current_func,
                  target_wq->name, target_func);
index a7f278d2ed8ff21dbf64c6f0193e60c0ea29d5ac..11fc39b4032b5aa0d4e1958ecb1cabdc43cdcb07 100644 (file)
@@ -254,7 +254,7 @@ static int do_op(struct sw842_param *p, u8 o)
                case OP_ACTION_NOOP:
                        break;
                default:
-                       pr_err("Interal error, invalid op %x\n", op);
+                       pr_err("Internal error, invalid op %x\n", op);
                        return -EINVAL;
                }
 
index ecb9e75614bf87f1e368074d6ef84bfe3f1003b8..fe802ae067c6d2ac2e7b036c968265791bbfe427 100644 (file)
@@ -1256,6 +1256,39 @@ config TORTURE_TEST
        tristate
        default n
 
+config RCU_PERF_TEST
+       tristate "performance tests for RCU"
+       depends on DEBUG_KERNEL
+       select TORTURE_TEST
+       select SRCU
+       select TASKS_RCU
+       default n
+       help
+         This option provides a kernel module that runs performance
+         tests on the RCU infrastructure.  The kernel module may be built
+         after the fact on the running kernel to be tested, if desired.
+
+         Say Y here if you want RCU performance tests to be built into
+         the kernel.
+         Say M if you want the RCU performance tests to build as a module.
+         Say N if you are unsure.
+
+config RCU_PERF_TEST_RUNNABLE
+       bool "performance tests for RCU runnable by default"
+       depends on RCU_PERF_TEST = y
+       default n
+       help
+         This option provides a way to build the RCU performance tests
+         directly into the kernel without them starting up at boot time.
+         You can use /sys/module to manually override this setting.
+         This /proc file is available only when the RCU performance
+         tests have been built into the kernel.
+
+         Say Y here if you want the RCU performance tests to start during
+         boot (you probably don't).
+         Say N here if you want the RCU performance tests to start only
+         after being manually enabled via /sys/module.
+
 config RCU_TORTURE_TEST
        tristate "torture tests for RCU"
        depends on DEBUG_KERNEL
@@ -1400,6 +1433,21 @@ config RCU_EQS_DEBUG
 
 endmenu # "RCU Debugging"
 
+config DEBUG_WQ_FORCE_RR_CPU
+       bool "Force round-robin CPU selection for unbound work items"
+       depends on DEBUG_KERNEL
+       default n
+       help
+         Workqueue used to implicitly guarantee that work items queued
+         without explicit CPU specified are put on the local CPU.  This
+         guarantee is no longer true and while local CPU is still
+         preferred work items may be put on foreign CPUs.  Kernel
+         parameter "workqueue.debug_force_rr_cpu" is added to force
+         round-robin CPU selection to flush out usages which depend on the
+         now broken guarantee.  This config option enables the debug
+         feature by default.  When enabled, memory and cache locality will
+         be impacted.
+
 config DEBUG_BLOCK_EXT_DEVT
         bool "Force extended block device numbers and spread them"
        depends on DEBUG_KERNEL
index d62de8bf022d2534182fa2bcbdf31b4eb142a8a3..1234818143204a754ec6df5ae8073107c5171dc5 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/atomic.h>
 
 #ifdef CONFIG_X86
-#include <asm/processor.h>     /* for boot_cpu_has below */
+#include <asm/cpufeature.h>    /* for boot_cpu_has below */
 #endif
 
 #define TEST(bit, op, c_op, val)                               \
index 547f7f923dbcbd24f8bb99e13ef7c13db947b8dd..519b5a10fd704dd412c7cc9c7327fcb2da3c6f0e 100644 (file)
@@ -21,7 +21,7 @@
 #define ODEBUG_HASH_BITS       14
 #define ODEBUG_HASH_SIZE       (1 << ODEBUG_HASH_BITS)
 
-#define ODEBUG_POOL_SIZE       512
+#define ODEBUG_POOL_SIZE       1024
 #define ODEBUG_POOL_MIN_LEVEL  256
 
 #define ODEBUG_CHUNK_SHIFT     PAGE_SHIFT
index 6745c6230db3403629048256968443f51b777655..c30d07e99dba4cc32be6aeb4d353d79058509b4b 100644 (file)
@@ -25,6 +25,7 @@ static atomic_t dump_lock = ATOMIC_INIT(-1);
 
 asmlinkage __visible void dump_stack(void)
 {
+       unsigned long flags;
        int was_locked;
        int old;
        int cpu;
@@ -33,9 +34,8 @@ asmlinkage __visible void dump_stack(void)
         * Permit this cpu to perform nested stack dumps while serialising
         * against other CPUs
         */
-       preempt_disable();
-
 retry:
+       local_irq_save(flags);
        cpu = smp_processor_id();
        old = atomic_cmpxchg(&dump_lock, -1, cpu);
        if (old == -1) {
@@ -43,6 +43,7 @@ retry:
        } else if (old == cpu) {
                was_locked = 1;
        } else {
+               local_irq_restore(flags);
                cpu_relax();
                goto retry;
        }
@@ -52,7 +53,7 @@ retry:
        if (!was_locked)
                atomic_set(&dump_lock, -1);
 
-       preempt_enable();
+       local_irq_restore(flags);
 }
 #else
 asmlinkage __visible void dump_stack(void)
index 8f25652f40d4f3aafc770b87b2692b54629a6273..a71cf1bdd4c94a92c4c888be21cad8eebf813c99 100644 (file)
@@ -17,7 +17,7 @@
  *
  *   \Sum_{j} p_{j} = 1,
  *
- * This formula can be straightforwardly computed by maintaing denominator
+ * This formula can be straightforwardly computed by maintaining denominator
  * (let's call it 'd') and for each event type its numerator (let's call it
  * 'n_j'). When an event of type 'j' happens, we simply need to do:
  *   n_j++; d++;
index d74cf7a29afdb043112fee5c9ad7b37a631a9985..0507fa5d84c534917d0842a453bdbcaba386422a 100644 (file)
@@ -282,9 +282,9 @@ void klist_iter_init_node(struct klist *k, struct klist_iter *i,
                          struct klist_node *n)
 {
        i->i_klist = k;
-       i->i_cur = n;
-       if (n)
-               kref_get(&n->n_ref);
+       i->i_cur = NULL;
+       if (n && kref_get_unless_zero(&n->n_ref))
+               i->i_cur = n;
 }
 EXPORT_SYMBOL_GPL(klist_iter_init_node);
 
index 7cbccd2b4c72042595484e32c2e11906948414b7..445dcaeb0f56de7882a1b1278a90ef7931f4099c 100644 (file)
@@ -861,6 +861,7 @@ struct kobject *kset_find_obj(struct kset *kset, const char *name)
        spin_unlock(&kset->list_lock);
        return ret;
 }
+EXPORT_SYMBOL_GPL(kset_find_obj);
 
 static void kset_release(struct kobject *kobj)
 {
index 6111bcb28376bfe412830431ca32f3e4607cfc29..27fe74948882e9558ad8cc72dfa6da1b7051dce7 100644 (file)
@@ -12,7 +12,7 @@
  * particular cpu can (and will) wrap - this is fine, when we go to shutdown the
  * percpu counters will all sum to the correct value
  *
- * (More precisely: because moduler arithmatic is commutative the sum of all the
+ * (More precisely: because modular arithmetic is commutative the sum of all the
  * percpu_count vars will be equal to what it would have been if all the gets
  * and puts were done to a single integer, even if some of the percpu integers
  * overflow or underflow).
index fcf5d98574ce46871dca087d2c803dbfb67c0b81..6b79e9026e24894000a2bdf4180db80f8190b357 100644 (file)
@@ -1019,9 +1019,13 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
                return 0;
 
        radix_tree_for_each_slot(slot, root, &iter, first_index) {
-               results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+               results[ret] = rcu_dereference_raw(*slot);
                if (!results[ret])
                        continue;
+               if (radix_tree_is_indirect_ptr(results[ret])) {
+                       slot = radix_tree_iter_retry(&iter);
+                       continue;
+               }
                if (++ret == max_items)
                        break;
        }
@@ -1098,9 +1102,13 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
                return 0;
 
        radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
-               results[ret] = indirect_to_ptr(rcu_dereference_raw(*slot));
+               results[ret] = rcu_dereference_raw(*slot);
                if (!results[ret])
                        continue;
+               if (radix_tree_is_indirect_ptr(results[ret])) {
+                       slot = radix_tree_iter_retry(&iter);
+                       continue;
+               }
                if (++ret == max_items)
                        break;
        }
index bafa9933fa768d6f76e4ade1296c873a3c38c139..004fc70fc56a3d06947f9e89c60e40e272ce551c 100644 (file)
@@ -598,9 +598,9 @@ EXPORT_SYMBOL(sg_miter_next);
  *
  * Description:
  *   Stops mapping iterator @miter.  @miter should have been started
- *   started using sg_miter_start().  A stopped iteration can be
- *   resumed by calling sg_miter_next() on it.  This is useful when
- *   resources (kmap) need to be released during iteration.
+ *   using sg_miter_start().  A stopped iteration can be resumed by
+ *   calling sg_miter_next() on it.  This is useful when resources (kmap)
+ *   need to be released during iteration.
  *
  * Context:
  *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
index 98866a770770c8bba0acf7bdbacea4699d9f14bf..25b5cbfb7615bd63da19677c877b9e49c3f519e3 100644 (file)
@@ -327,36 +327,67 @@ out:
 }
 
 #define string_get_size_maxbuf 16
-#define test_string_get_size_one(size, blk_size, units, exp_result)            \
+#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2)    \
        do {                                                                   \
-               BUILD_BUG_ON(sizeof(exp_result) >= string_get_size_maxbuf);    \
-               __test_string_get_size((size), (blk_size), (units),            \
-                                      (exp_result));                          \
+               BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf);  \
+               BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf);   \
+               __test_string_get_size((size), (blk_size), (exp_result10),     \
+                                      (exp_result2));                         \
        } while (0)
 
 
-static __init void __test_string_get_size(const u64 size, const u64 blk_size,
-                                         const enum string_size_units units,
-                                         const char *exp_result)
+static __init void test_string_get_size_check(const char *units,
+                                             const char *exp,
+                                             char *res,
+                                             const u64 size,
+                                             const u64 blk_size)
 {
-       char buf[string_get_size_maxbuf];
-
-       string_get_size(size, blk_size, units, buf, sizeof(buf));
-       if (!memcmp(buf, exp_result, strlen(exp_result) + 1))
+       if (!memcmp(res, exp, strlen(exp) + 1))
                return;
 
-       buf[sizeof(buf) - 1] = '\0';
-       pr_warn("Test 'test_string_get_size_one' failed!\n");
-       pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %d\n",
+       res[string_get_size_maxbuf - 1] = '\0';
+
+       pr_warn("Test 'test_string_get_size' failed!\n");
+       pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n",
                size, blk_size, units);
-       pr_warn("expected: '%s', got '%s'\n", exp_result, buf);
+       pr_warn("expected: '%s', got '%s'\n", exp, res);
+}
+
+static __init void __test_string_get_size(const u64 size, const u64 blk_size,
+                                         const char *exp_result10,
+                                         const char *exp_result2)
+{
+       char buf10[string_get_size_maxbuf];
+       char buf2[string_get_size_maxbuf];
+
+       string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10));
+       string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2));
+
+       test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10,
+                                  size, blk_size);
+
+       test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2,
+                                  size, blk_size);
 }
 
 static __init void test_string_get_size(void)
 {
-       test_string_get_size_one(16384, 512, STRING_UNITS_2, "8.00 MiB");
-       test_string_get_size_one(8192, 4096, STRING_UNITS_10, "32.7 MB");
-       test_string_get_size_one(1, 512, STRING_UNITS_10, "512 B");
+       /* small values */
+       test_string_get_size_one(0, 512, "0 B", "0 B");
+       test_string_get_size_one(1, 512, "512 B", "512 B");
+       test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB");
+
+       /* normal values */
+       test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB");
+       test_string_get_size_one(500118192, 512, "256 GB", "238 GiB");
+       test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB");
+
+       /* weird block sizes */
+       test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB");
+
+       /* huge values */
+       test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB");
+       test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB");
 }
 
 static int __init test_string_helpers_init(void)
index c61b299e367ffac86450ac59c123d0925285b96c..915d75df20864d07428376750c1bd148c216a585 100644 (file)
@@ -46,8 +46,11 @@ struct test_key {
        bool                    (*test_key)(void);
 };
 
-#define test_key_func(key, branch) \
-       ({bool func(void) { return branch(key); } func; })
+#define test_key_func(key, branch)     \
+static bool key ## _ ## branch(void)   \
+{                                      \
+       return branch(&key);            \
+}
 
 static void invert_key(struct static_key *key)
 {
@@ -92,6 +95,25 @@ static int verify_keys(struct test_key *keys, int size, bool invert)
        return 0;
 }
 
+test_key_func(old_true_key, static_key_true)
+test_key_func(old_false_key, static_key_false)
+test_key_func(true_key, static_branch_likely)
+test_key_func(true_key, static_branch_unlikely)
+test_key_func(false_key, static_branch_likely)
+test_key_func(false_key, static_branch_unlikely)
+test_key_func(base_old_true_key, static_key_true)
+test_key_func(base_inv_old_true_key, static_key_true)
+test_key_func(base_old_false_key, static_key_false)
+test_key_func(base_inv_old_false_key, static_key_false)
+test_key_func(base_true_key, static_branch_likely)
+test_key_func(base_true_key, static_branch_unlikely)
+test_key_func(base_inv_true_key, static_branch_likely)
+test_key_func(base_inv_true_key, static_branch_unlikely)
+test_key_func(base_false_key, static_branch_likely)
+test_key_func(base_false_key, static_branch_unlikely)
+test_key_func(base_inv_false_key, static_branch_likely)
+test_key_func(base_inv_false_key, static_branch_unlikely)
+
 static int __init test_static_key_init(void)
 {
        int ret;
@@ -102,95 +124,95 @@ static int __init test_static_key_init(void)
                {
                        .init_state     = true,
                        .key            = &old_true_key,
-                       .test_key       = test_key_func(&old_true_key, static_key_true),
+                       .test_key       = &old_true_key_static_key_true,
                },
                {
                        .init_state     = false,
                        .key            = &old_false_key,
-                       .test_key       = test_key_func(&old_false_key, static_key_false),
+                       .test_key       = &old_false_key_static_key_false,
                },
                /* internal keys - new keys */
                {
                        .init_state     = true,
                        .key            = &true_key.key,
-                       .test_key       = test_key_func(&true_key, static_branch_likely),
+                       .test_key       = &true_key_static_branch_likely,
                },
                {
                        .init_state     = true,
                        .key            = &true_key.key,
-                       .test_key       = test_key_func(&true_key, static_branch_unlikely),
+                       .test_key       = &true_key_static_branch_unlikely,
                },
                {
                        .init_state     = false,
                        .key            = &false_key.key,
-                       .test_key       = test_key_func(&false_key, static_branch_likely),
+                       .test_key       = &false_key_static_branch_likely,
                },
                {
                        .init_state     = false,
                        .key            = &false_key.key,
-                       .test_key       = test_key_func(&false_key, static_branch_unlikely),
+                       .test_key       = &false_key_static_branch_unlikely,
                },
                /* external keys - old keys */
                {
                        .init_state     = true,
                        .key            = &base_old_true_key,
-                       .test_key       = test_key_func(&base_old_true_key, static_key_true),
+                       .test_key       = &base_old_true_key_static_key_true,
                },
                {
                        .init_state     = false,
                        .key            = &base_inv_old_true_key,
-                       .test_key       = test_key_func(&base_inv_old_true_key, static_key_true),
+                       .test_key       = &base_inv_old_true_key_static_key_true,
                },
                {
                        .init_state     = false,
                        .key            = &base_old_false_key,
-                       .test_key       = test_key_func(&base_old_false_key, static_key_false),
+                       .test_key       = &base_old_false_key_static_key_false,
                },
                {
                        .init_state     = true,
                        .key            = &base_inv_old_false_key,
-                       .test_key       = test_key_func(&base_inv_old_false_key, static_key_false),
+                       .test_key       = &base_inv_old_false_key_static_key_false,
                },
                /* external keys - new keys */
                {
                        .init_state     = true,
                        .key            = &base_true_key.key,
-                       .test_key       = test_key_func(&base_true_key, static_branch_likely),
+                       .test_key       = &base_true_key_static_branch_likely,
                },
                {
                        .init_state     = true,
                        .key            = &base_true_key.key,
-                       .test_key       = test_key_func(&base_true_key, static_branch_unlikely),
+                       .test_key       = &base_true_key_static_branch_unlikely,
                },
                {
                        .init_state     = false,
                        .key            = &base_inv_true_key.key,
-                       .test_key       = test_key_func(&base_inv_true_key, static_branch_likely),
+                       .test_key       = &base_inv_true_key_static_branch_likely,
                },
                {
                        .init_state     = false,
                        .key            = &base_inv_true_key.key,
-                       .test_key       = test_key_func(&base_inv_true_key, static_branch_unlikely),
+                       .test_key       = &base_inv_true_key_static_branch_unlikely,
                },
                {
                        .init_state     = false,
                        .key            = &base_false_key.key,
-                       .test_key       = test_key_func(&base_false_key, static_branch_likely),
+                       .test_key       = &base_false_key_static_branch_likely,
                },
                {
                        .init_state     = false,
                        .key            = &base_false_key.key,
-                       .test_key       = test_key_func(&base_false_key, static_branch_unlikely),
+                       .test_key       = &base_false_key_static_branch_unlikely,
                },
                {
                        .init_state     = true,
                        .key            = &base_inv_false_key.key,
-                       .test_key       = test_key_func(&base_inv_false_key, static_branch_likely),
+                       .test_key       = &base_inv_false_key_static_branch_likely,
                },
                {
                        .init_state     = true,
                        .key            = &base_inv_false_key.key,
-                       .test_key       = test_key_func(&base_inv_false_key, static_branch_unlikely),
+                       .test_key       = &base_inv_false_key_static_branch_unlikely,
                },
        };
 
index 97a4e06b15c00dfd7f8b8bc0f1e1e4610b769938..03cbfa072f42a7378ed57b8b1060641b31e7d97e 100644 (file)
@@ -624,7 +624,7 @@ config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
        bool
 
 config DEFERRED_STRUCT_PAGE_INIT
-       bool "Defer initialisation of struct pages to kswapd"
+       bool "Defer initialisation of struct pages to kthreads"
        default n
        depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
        depends on MEMORY_HOTPLUG
@@ -633,9 +633,10 @@ config DEFERRED_STRUCT_PAGE_INIT
          single thread. On very large machines this can take a considerable
          amount of time. If this option is set, large machines will bring up
          a subset of memmap at boot and then initialise the rest in parallel
-         when kswapd starts. This has a potential performance impact on
-         processes running early in the lifetime of the systemm until kswapd
-         finishes the initialisation.
+         by starting one-off "pgdatinitX" kernel thread for each node X. This
+         has a potential performance impact on processes running early in the
+         lifetime of the system until these kthreads finish the
+         initialisation.
 
 config IDLE_PAGE_TRACKING
        bool "Enable idle page tracking"
index cc5d29d2da9b4b36be3fde383267ff10c59e1422..926c76d56388d12ea723b68add1e5b7ab6613842 100644 (file)
@@ -989,7 +989,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout)
                 * here rather than calling cond_resched().
                 */
                if (current->flags & PF_WQ_WORKER)
-                       schedule_timeout(1);
+                       schedule_timeout_uninterruptible(1);
                else
                        cond_resched();
 
index 300117f1a08f35c27c531d55ed0a08a1a9b81e50..57b3e9bd6bc52727b62d563d0dd482eba41badf0 100644 (file)
 /*
  * balloon_page_enqueue - allocates a new page and inserts it into the balloon
  *                       page list.
- * @b_dev_info: balloon device decriptor where we will insert a new page to
+ * @b_dev_info: balloon device descriptor where we will insert a new page to
  *
  * Driver must call it to properly allocate a new enlisted balloon page
- * before definetively removing it from the guest system.
+ * before definitively removing it from the guest system.
  * This function returns the page address for the recently enqueued page or
  * NULL in the case we fail to allocate a new page this turn.
  */
index 8fc50811119b28b54f15ac875ddc956217acc570..ba5d8f3e6d68a3769589c372adc3183b4addfb40 100644 (file)
@@ -22,7 +22,7 @@
  * cleancache_ops is set by cleancache_register_ops to contain the pointers
  * to the cleancache "backend" implementation functions.
  */
-static struct cleancache_ops *cleancache_ops __read_mostly;
+static const struct cleancache_ops *cleancache_ops __read_mostly;
 
 /*
  * Counters available via /sys/kernel/debug/cleancache (if debugfs is
@@ -49,7 +49,7 @@ static void cleancache_register_ops_sb(struct super_block *sb, void *unused)
 /*
  * Register operations for cleancache. Returns 0 on success.
  */
-int cleancache_register_ops(struct cleancache_ops *ops)
+int cleancache_register_ops(const struct cleancache_ops *ops)
 {
        if (cmpxchg(&cleancache_ops, NULL, ops))
                return -EBUSY;
index b64a36175884e07604b0e216bc2d545a2892dcb7..7bf19ffa21999c13fa1f24dc01a6bda77217688c 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -430,10 +430,8 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
                         * Anon pages in shared mappings are surprising: now
                         * just reject it.
                         */
-                       if (!is_cow_mapping(vm_flags)) {
-                               WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
+                       if (!is_cow_mapping(vm_flags))
                                return -EFAULT;
-                       }
                }
        } else if (!(vm_flags & VM_READ)) {
                if (!(gup_flags & FOLL_FORCE))
index fd3a07b3e6f4e086b2111c312a78512bed927f9b..aea8f7a42df97d7185f626d5bbc445c64f376eb1 100644 (file)
@@ -138,9 +138,6 @@ static struct khugepaged_scan khugepaged_scan = {
        .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
 };
 
-static DEFINE_SPINLOCK(split_queue_lock);
-static LIST_HEAD(split_queue);
-static unsigned long split_queue_len;
 static struct shrinker deferred_split_shrinker;
 
 static void set_recommended_min_free_kbytes(void)
@@ -861,7 +858,8 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
                return false;
        entry = mk_pmd(zero_page, vma->vm_page_prot);
        entry = pmd_mkhuge(entry);
-       pgtable_trans_huge_deposit(mm, pmd, pgtable);
+       if (pgtable)
+               pgtable_trans_huge_deposit(mm, pmd, pgtable);
        set_pmd_at(mm, haddr, pmd, entry);
        atomic_long_inc(&mm->nr_ptes);
        return true;
@@ -1039,13 +1037,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        spinlock_t *dst_ptl, *src_ptl;
        struct page *src_page;
        pmd_t pmd;
-       pgtable_t pgtable;
+       pgtable_t pgtable = NULL;
        int ret;
 
-       ret = -ENOMEM;
-       pgtable = pte_alloc_one(dst_mm, addr);
-       if (unlikely(!pgtable))
-               goto out;
+       if (!vma_is_dax(vma)) {
+               ret = -ENOMEM;
+               pgtable = pte_alloc_one(dst_mm, addr);
+               if (unlikely(!pgtable))
+                       goto out;
+       }
 
        dst_ptl = pmd_lock(dst_mm, dst_pmd);
        src_ptl = pmd_lockptr(src_mm, src_pmd);
@@ -1076,7 +1076,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                goto out_unlock;
        }
 
-       if (pmd_trans_huge(pmd)) {
+       if (!vma_is_dax(vma)) {
                /* thp accounting separate from pmd_devmap accounting */
                src_page = pmd_page(pmd);
                VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
@@ -2860,6 +2860,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
        young = pmd_young(*pmd);
        dirty = pmd_dirty(*pmd);
 
+       pmdp_huge_split_prepare(vma, haddr, pmd);
        pgtable = pgtable_trans_huge_withdraw(mm, pmd);
        pmd_populate(mm, &_pmd, pgtable);
 
@@ -3358,6 +3359,7 @@ int total_mapcount(struct page *page)
 int split_huge_page_to_list(struct page *page, struct list_head *list)
 {
        struct page *head = compound_head(page);
+       struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
        struct anon_vma *anon_vma;
        int count, mapcount, ret;
        bool mlocked;
@@ -3401,19 +3403,19 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                lru_add_drain();
 
        /* Prevent deferred_split_scan() touching ->_count */
-       spin_lock_irqsave(&split_queue_lock, flags);
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
        count = page_count(head);
        mapcount = total_mapcount(head);
        if (!mapcount && count == 1) {
                if (!list_empty(page_deferred_list(head))) {
-                       split_queue_len--;
+                       pgdata->split_queue_len--;
                        list_del(page_deferred_list(head));
                }
-               spin_unlock_irqrestore(&split_queue_lock, flags);
+               spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
                __split_huge_page(page, list);
                ret = 0;
        } else if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
-               spin_unlock_irqrestore(&split_queue_lock, flags);
+               spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
                pr_alert("total_mapcount: %u, page_count(): %u\n",
                                mapcount, count);
                if (PageTail(page))
@@ -3421,7 +3423,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                dump_page(page, "total_mapcount(head) > 0");
                BUG();
        } else {
-               spin_unlock_irqrestore(&split_queue_lock, flags);
+               spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
                unfreeze_page(anon_vma, head);
                ret = -EBUSY;
        }
@@ -3436,64 +3438,65 @@ out:
 
 void free_transhuge_page(struct page *page)
 {
+       struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
        unsigned long flags;
 
-       spin_lock_irqsave(&split_queue_lock, flags);
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
        if (!list_empty(page_deferred_list(page))) {
-               split_queue_len--;
+               pgdata->split_queue_len--;
                list_del(page_deferred_list(page));
        }
-       spin_unlock_irqrestore(&split_queue_lock, flags);
+       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
        free_compound_page(page);
 }
 
 void deferred_split_huge_page(struct page *page)
 {
+       struct pglist_data *pgdata = NODE_DATA(page_to_nid(page));
        unsigned long flags;
 
        VM_BUG_ON_PAGE(!PageTransHuge(page), page);
 
-       spin_lock_irqsave(&split_queue_lock, flags);
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
        if (list_empty(page_deferred_list(page))) {
-               list_add_tail(page_deferred_list(page), &split_queue);
-               split_queue_len++;
+               list_add_tail(page_deferred_list(page), &pgdata->split_queue);
+               pgdata->split_queue_len++;
        }
-       spin_unlock_irqrestore(&split_queue_lock, flags);
+       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 }
 
 static unsigned long deferred_split_count(struct shrinker *shrink,
                struct shrink_control *sc)
 {
-       /*
-        * Split a page from split_queue will free up at least one page,
-        * at most HPAGE_PMD_NR - 1. We don't track exact number.
-        * Let's use HPAGE_PMD_NR / 2 as ballpark.
-        */
-       return ACCESS_ONCE(split_queue_len) * HPAGE_PMD_NR / 2;
+       struct pglist_data *pgdata = NODE_DATA(sc->nid);
+       return ACCESS_ONCE(pgdata->split_queue_len);
 }
 
 static unsigned long deferred_split_scan(struct shrinker *shrink,
                struct shrink_control *sc)
 {
+       struct pglist_data *pgdata = NODE_DATA(sc->nid);
        unsigned long flags;
        LIST_HEAD(list), *pos, *next;
        struct page *page;
        int split = 0;
 
-       spin_lock_irqsave(&split_queue_lock, flags);
-       list_splice_init(&split_queue, &list);
-
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
        /* Take pin on all head pages to avoid freeing them under us */
-       list_for_each_safe(pos, next, &list) {
+       list_for_each_safe(pos, next, &pgdata->split_queue) {
                page = list_entry((void *)pos, struct page, mapping);
                page = compound_head(page);
-               /* race with put_compound_page() */
-               if (!get_page_unless_zero(page)) {
+               if (get_page_unless_zero(page)) {
+                       list_move(page_deferred_list(page), &list);
+               } else {
+                       /* We lost race with put_compound_page() */
                        list_del_init(page_deferred_list(page));
-                       split_queue_len--;
+                       pgdata->split_queue_len--;
                }
+               if (!--sc->nr_to_scan)
+                       break;
        }
-       spin_unlock_irqrestore(&split_queue_lock, flags);
+       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 
        list_for_each_safe(pos, next, &list) {
                page = list_entry((void *)pos, struct page, mapping);
@@ -3505,17 +3508,24 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
                put_page(page);
        }
 
-       spin_lock_irqsave(&split_queue_lock, flags);
-       list_splice_tail(&list, &split_queue);
-       spin_unlock_irqrestore(&split_queue_lock, flags);
+       spin_lock_irqsave(&pgdata->split_queue_lock, flags);
+       list_splice_tail(&list, &pgdata->split_queue);
+       spin_unlock_irqrestore(&pgdata->split_queue_lock, flags);
 
-       return split * HPAGE_PMD_NR / 2;
+       /*
+        * Stop shrinker if we didn't split any page, but the queue is empty.
+        * This can happen if pages were freed under us.
+        */
+       if (!split && list_empty(&pgdata->split_queue))
+               return SHRINK_STOP;
+       return split;
 }
 
 static struct shrinker deferred_split_shrinker = {
        .count_objects = deferred_split_count,
        .scan_objects = deferred_split_scan,
        .seeks = DEFAULT_SEEKS,
+       .flags = SHRINKER_NUMA_AWARE,
 };
 
 #ifdef CONFIG_DEBUG_FS
index 12908dcf58316afd3f4b14d379a8e139249cd396..06ae13e869d0f088cdddc0862aa315ce8d488443 100644 (file)
@@ -1001,7 +1001,7 @@ static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
                ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
                nr_nodes--)
 
-#if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
+#if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
 static void destroy_compound_gigantic_page(struct page *page,
                                        unsigned int order)
 {
@@ -1214,8 +1214,8 @@ void free_huge_page(struct page *page)
 
        set_page_private(page, 0);
        page->mapping = NULL;
-       BUG_ON(page_count(page));
-       BUG_ON(page_mapcount(page));
+       VM_BUG_ON_PAGE(page_count(page), page);
+       VM_BUG_ON_PAGE(page_mapcount(page), page);
        restore_reserve = PagePrivate(page);
        ClearPagePrivate(page);
 
@@ -1286,6 +1286,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
                set_page_count(p, 0);
                set_compound_head(p, page);
        }
+       atomic_set(compound_mapcount_ptr(page), -1);
 }
 
 /*
index ed8b5ffcf9b16fbfcf3ccba0d182957980ad45ab..a38a21ebddb454540fc1dcae83be516ffc7cdf26 100644 (file)
@@ -216,6 +216,37 @@ static inline bool is_cow_mapping(vm_flags_t flags)
        return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
 }
 
+/*
+ * These three helpers classifies VMAs for virtual memory accounting.
+ */
+
+/*
+ * Executable code area - executable, not writable, not stack
+ */
+static inline bool is_exec_mapping(vm_flags_t flags)
+{
+       return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
+}
+
+/*
+ * Stack area - atomatically grows in one direction
+ *
+ * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
+ * do_mmap() forbids all other combinations.
+ */
+static inline bool is_stack_mapping(vm_flags_t flags)
+{
+       return (flags & VM_STACK) == VM_STACK;
+}
+
+/*
+ * Data area - private, writable, not stack
+ */
+static inline bool is_data_mapping(vm_flags_t flags)
+{
+       return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
+}
+
 /* mm/util.c */
 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
                struct vm_area_struct *prev, struct rb_node *rb_parent);
index d2ed81e59a94c3f81674ead9e84d51ca8ebd9950..dd7989929f13ae16e265bff30fd958f1f13993cb 100644 (file)
@@ -1448,7 +1448,7 @@ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
  * Remaining API functions
  */
 
-phys_addr_t __init memblock_phys_mem_size(void)
+phys_addr_t __init_memblock memblock_phys_mem_size(void)
 {
        return memblock.memory.total_size;
 }
index 30991f83d0bf54dc537f1927a10883aa5787dd97..38090ca37a08b256ae83633e8709953f842fcbfe 100644 (file)
@@ -1550,9 +1550,30 @@ out:
  */
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn)
+{
+       return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_pfn);
+
+/**
+ * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ * @pgprot: pgprot flags for the inserted page
+ *
+ * This is exactly like vm_insert_pfn, except that it allows drivers to
+ * to override pgprot on a per-page basis.
+ *
+ * This only makes sense for IO mappings, and it makes no sense for
+ * cow mappings.  In general, using multiple vmas is preferable;
+ * vm_insert_pfn_prot should only be used if using multiple VMAs is
+ * impractical.
+ */
+int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn, pgprot_t pgprot)
 {
        int ret;
-       pgprot_t pgprot = vma->vm_page_prot;
        /*
         * Technically, architectures with pte_special can avoid all these
         * restrictions (same for remap_pfn_range).  However we would like
@@ -1574,7 +1595,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        return ret;
 }
-EXPORT_SYMBOL(vm_insert_pfn);
+EXPORT_SYMBOL(vm_insert_pfn_prot);
 
 int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
                        pfn_t pfn)
@@ -1591,10 +1612,15 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
         * than insert_pfn).  If a zero_pfn were inserted into a VM_MIXEDMAP
         * without pte special, it would there be refcounted as a normal page.
         */
-       if (!HAVE_PTE_SPECIAL && pfn_t_valid(pfn)) {
+       if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
                struct page *page;
 
-               page = pfn_t_to_page(pfn);
+               /*
+                * At this point we are committed to insert_page()
+                * regardless of whether the caller specified flags that
+                * result in pfn_t_has_page() == false.
+                */
+               page = pfn_to_page(pfn_t_to_pfn(pfn));
                return insert_page(vma, addr, page, vma->vm_page_prot);
        }
        return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
@@ -2232,11 +2258,6 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
 
        page_cache_get(old_page);
 
-       /*
-        * Only catch write-faults on shared writable pages,
-        * read-only shared pages can get COWed by
-        * get_user_pages(.write=1, .force=1).
-        */
        if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
                int tmp;
 
index 4af58a3a8ffa345c16fe190be76ba9f9e83113d4..979b18cbd343e3422d5b4ccb86d1fab2bb1090e2 100644 (file)
@@ -138,7 +138,7 @@ static struct resource *register_memory_resource(u64 start, u64 size)
        res->name = "System RAM";
        res->start = start;
        res->end = start + size - 1;
-       res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+       res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
        if (request_resource(&iomem_resource, res) < 0) {
                pr_debug("System RAM resource %pR cannot be added\n", res);
                kfree(res);
index 27d135408a22057a5b166e7eb2bf2e080bbd33f2..4c4187c0e1deeb25bdbf5eb383132d27feb9be90 100644 (file)
@@ -548,8 +548,7 @@ retry:
                        goto retry;
                }
 
-               if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
-                       migrate_page_add(page, qp->pagelist, flags);
+               migrate_page_add(page, qp->pagelist, flags);
        }
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
@@ -625,7 +624,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
        unsigned long endvma = vma->vm_end;
        unsigned long flags = qp->flags;
 
-       if (vma->vm_flags & VM_PFNMAP)
+       if (!vma_migratable(vma))
                return 1;
 
        if (endvma > end)
@@ -644,16 +643,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
 
        if (flags & MPOL_MF_LAZY) {
                /* Similar to task_numa_work, skip inaccessible VMAs */
-               if (vma_migratable(vma) &&
-                       vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
+               if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
                        change_prot_numa(vma, start, endvma);
                return 1;
        }
 
-       if ((flags & MPOL_MF_STRICT) ||
-           ((flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) &&
-            vma_migratable(vma)))
-               /* queue pages from current vma */
+       /* queue pages from current vma */
+       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
                return 0;
        return 1;
 }
index 84b12624ceb01d83762172634179825b086961fd..e2e9f48b06c2ad7a4fb4a11c61221a84caa5f593 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -42,6 +42,7 @@
 #include <linux/memory.h>
 #include <linux/printk.h>
 #include <linux/userfaultfd_k.h>
+#include <linux/moduleparam.h>
 
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
@@ -69,6 +70,8 @@ const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
 #endif
 
+static bool ignore_rlimit_data = true;
+core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
 
 static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
@@ -387,8 +390,9 @@ static long vma_compute_subtree_gap(struct vm_area_struct *vma)
 }
 
 #ifdef CONFIG_DEBUG_VM_RB
-static int browse_rb(struct rb_root *root)
+static int browse_rb(struct mm_struct *mm)
 {
+       struct rb_root *root = &mm->mm_rb;
        int i = 0, j, bug = 0;
        struct rb_node *nd, *pn = NULL;
        unsigned long prev = 0, pend = 0;
@@ -411,12 +415,14 @@ static int browse_rb(struct rb_root *root)
                                  vma->vm_start, vma->vm_end);
                        bug = 1;
                }
+               spin_lock(&mm->page_table_lock);
                if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
                        pr_emerg("free gap %lx, correct %lx\n",
                               vma->rb_subtree_gap,
                               vma_compute_subtree_gap(vma));
                        bug = 1;
                }
+               spin_unlock(&mm->page_table_lock);
                i++;
                pn = nd;
                prev = vma->vm_start;
@@ -453,12 +459,16 @@ static void validate_mm(struct mm_struct *mm)
        struct vm_area_struct *vma = mm->mmap;
 
        while (vma) {
+               struct anon_vma *anon_vma = vma->anon_vma;
                struct anon_vma_chain *avc;
 
-               vma_lock_anon_vma(vma);
-               list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
-                       anon_vma_interval_tree_verify(avc);
-               vma_unlock_anon_vma(vma);
+               if (anon_vma) {
+                       anon_vma_lock_read(anon_vma);
+                       list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
+                               anon_vma_interval_tree_verify(avc);
+                       anon_vma_unlock_read(anon_vma);
+               }
+
                highest_address = vma->vm_end;
                vma = vma->vm_next;
                i++;
@@ -472,7 +482,7 @@ static void validate_mm(struct mm_struct *mm)
                          mm->highest_vm_end, highest_address);
                bug = 1;
        }
-       i = browse_rb(&mm->mm_rb);
+       i = browse_rb(mm);
        if (i != mm->map_count) {
                if (i != -1)
                        pr_emerg("map_count %d rb %d\n", mm->map_count, i);
@@ -2139,32 +2149,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
 {
        struct mm_struct *mm = vma->vm_mm;
-       int error;
+       int error = 0;
 
        if (!(vma->vm_flags & VM_GROWSUP))
                return -EFAULT;
 
-       /*
-        * We must make sure the anon_vma is allocated
-        * so that the anon_vma locking is not a noop.
-        */
+       /* Guard against wrapping around to address 0. */
+       if (address < PAGE_ALIGN(address+4))
+               address = PAGE_ALIGN(address+4);
+       else
+               return -ENOMEM;
+
+       /* We must make sure the anon_vma is allocated. */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
-       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
-        * Also guard against wrapping around to address 0.
         */
-       if (address < PAGE_ALIGN(address+4))
-               address = PAGE_ALIGN(address+4);
-       else {
-               vma_unlock_anon_vma(vma);
-               return -ENOMEM;
-       }
-       error = 0;
+       anon_vma_lock_write(vma->anon_vma);
 
        /* Somebody else might have raced and expanded it already */
        if (address > vma->vm_end) {
@@ -2182,7 +2187,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                                 * updates, but we only hold a shared mmap_sem
                                 * lock here, so we need to protect against
                                 * concurrent vma expansions.
-                                * vma_lock_anon_vma() doesn't help here, as
+                                * anon_vma_lock_write() doesn't help here, as
                                 * we don't guarantee that all growable vmas
                                 * in a mm share the same root anon vma.
                                 * So, we reuse mm->page_table_lock to guard
@@ -2205,7 +2210,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                        }
                }
        }
-       vma_unlock_anon_vma(vma);
+       anon_vma_unlock_write(vma->anon_vma);
        khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(mm);
        return error;
@@ -2221,25 +2226,21 @@ int expand_downwards(struct vm_area_struct *vma,
        struct mm_struct *mm = vma->vm_mm;
        int error;
 
-       /*
-        * We must make sure the anon_vma is allocated
-        * so that the anon_vma locking is not a noop.
-        */
-       if (unlikely(anon_vma_prepare(vma)))
-               return -ENOMEM;
-
        address &= PAGE_MASK;
        error = security_mmap_addr(address);
        if (error)
                return error;
 
-       vma_lock_anon_vma(vma);
+       /* We must make sure the anon_vma is allocated. */
+       if (unlikely(anon_vma_prepare(vma)))
+               return -ENOMEM;
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
         * is required to hold the mmap_sem in read mode.  We need the
         * anon_vma lock to serialize against concurrent expand_stacks.
         */
+       anon_vma_lock_write(vma->anon_vma);
 
        /* Somebody else might have raced and expanded it already */
        if (address < vma->vm_start) {
@@ -2257,7 +2258,7 @@ int expand_downwards(struct vm_area_struct *vma,
                                 * updates, but we only hold a shared mmap_sem
                                 * lock here, so we need to protect against
                                 * concurrent vma expansions.
-                                * vma_lock_anon_vma() doesn't help here, as
+                                * anon_vma_lock_write() doesn't help here, as
                                 * we don't guarantee that all growable vmas
                                 * in a mm share the same root anon vma.
                                 * So, we reuse mm->page_table_lock to guard
@@ -2278,7 +2279,7 @@ int expand_downwards(struct vm_area_struct *vma,
                        }
                }
        }
-       vma_unlock_anon_vma(vma);
+       anon_vma_unlock_write(vma->anon_vma);
        khugepaged_enter_vma_merge(vma, vma->vm_flags);
        validate_mm(mm);
        return error;
@@ -2982,9 +2983,17 @@ bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
        if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
                return false;
 
-       if ((flags & (VM_WRITE | VM_SHARED | (VM_STACK_FLAGS &
-                               (VM_GROWSUP | VM_GROWSDOWN)))) == VM_WRITE)
-               return mm->data_vm + npages <= rlimit(RLIMIT_DATA);
+       if (is_data_mapping(flags) &&
+           mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
+               if (ignore_rlimit_data)
+                       pr_warn_once("%s (%d): VmData %lu exceed data ulimit "
+                                    "%lu. Will be forbidden soon.\n",
+                                    current->comm, current->pid,
+                                    (mm->data_vm + npages) << PAGE_SHIFT,
+                                    rlimit(RLIMIT_DATA));
+               else
+                       return false;
+       }
 
        return true;
 }
@@ -2993,11 +3002,11 @@ void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
 {
        mm->total_vm += npages;
 
-       if ((flags & (VM_EXEC | VM_WRITE)) == VM_EXEC)
+       if (is_exec_mapping(flags))
                mm->exec_vm += npages;
-       else if (flags & (VM_STACK_FLAGS & (VM_GROWSUP | VM_GROWSDOWN)))
+       else if (is_stack_mapping(flags))
                mm->stack_vm += npages;
-       else if ((flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
+       else if (is_data_mapping(flags))
                mm->data_vm += npages;
 }
 
@@ -3033,11 +3042,16 @@ static int special_mapping_fault(struct vm_area_struct *vma,
        pgoff_t pgoff;
        struct page **pages;
 
-       if (vma->vm_ops == &legacy_special_mapping_vmops)
+       if (vma->vm_ops == &legacy_special_mapping_vmops) {
                pages = vma->vm_private_data;
-       else
-               pages = ((struct vm_special_mapping *)vma->vm_private_data)->
-                       pages;
+       } else {
+               struct vm_special_mapping *sm = vma->vm_private_data;
+
+               if (sm->fault)
+                       return sm->fault(sm, vma, vmf);
+
+               pages = sm->pages;
+       }
 
        for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
                pgoff--;
index 63358d9f9aa98eff0848879b0503b5d80b9ea8d0..838ca8bb64f7376062fc6670c759a27d7f59c7e3 100644 (file)
@@ -5209,6 +5209,11 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
        spin_lock_init(&pgdat->numabalancing_migrate_lock);
        pgdat->numabalancing_migrate_nr_pages = 0;
        pgdat->numabalancing_migrate_next_window = jiffies;
+#endif
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+       spin_lock_init(&pgdat->split_queue_lock);
+       INIT_LIST_HEAD(&pgdat->split_queue);
+       pgdat->split_queue_len = 0;
 #endif
        init_waitqueue_head(&pgdat->kswapd_wait);
        init_waitqueue_head(&pgdat->pfmemalloc_wait);
@@ -6615,7 +6620,7 @@ bool is_pageblock_removable_nolock(struct page *page)
        return !has_unmovable_pages(zone, page, 0, true);
 }
 
-#ifdef CONFIG_CMA
+#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
 
 static unsigned long pfn_max_align_down(unsigned long pfn)
 {
index d2c37365e2d6e429cf0f7cdbb62308f5e76aea93..38884383ebac5d8d55dc14e49f10d190a4dee37a 100644 (file)
@@ -48,6 +48,12 @@ static sector_t map_swap_entry(swp_entry_t, struct block_device**);
 DEFINE_SPINLOCK(swap_lock);
 static unsigned int nr_swapfiles;
 atomic_long_t nr_swap_pages;
+/*
+ * Some modules use swappable objects and may try to swap them out under
+ * memory pressure (via the shrinker). Before doing so, they may wish to
+ * check to see if any swap space is available.
+ */
+EXPORT_SYMBOL_GPL(nr_swap_pages);
 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
 long total_swap_pages;
 static int least_priority;
index c108a6542d05d3e1c880c1f6ab540ced62156e54..4fb14ca5a41967696a6f769189eb5a59c6a91c0f 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -230,36 +230,11 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
 }
 
 /* Check if the vma is being used as a stack by this task */
-static int vm_is_stack_for_task(struct task_struct *t,
-                               struct vm_area_struct *vma)
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t)
 {
        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 }
 
-/*
- * Check if the vma is being used as a stack.
- * If is_group is non-zero, check in the entire thread group or else
- * just check in the current task. Returns the task_struct of the task
- * that the vma is stack for. Must be called under rcu_read_lock().
- */
-struct task_struct *task_of_stack(struct task_struct *task,
-                               struct vm_area_struct *vma, bool in_group)
-{
-       if (vm_is_stack_for_task(task, vma))
-               return task;
-
-       if (in_group) {
-               struct task_struct *t;
-
-               for_each_thread(task, t) {
-                       if (vm_is_stack_for_task(t, vma))
-                               return t;
-               }
-       }
-
-       return NULL;
-}
-
 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {
index 9a6c0704211c856c300f67057fc202e25c9657ab..149fdf6c5c56f927f3613538c61b3c5831c80af9 100644 (file)
@@ -248,9 +248,8 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
 
        if (tree) {
                spin_lock(&vmpr->sr_lock);
-               vmpr->tree_scanned += scanned;
+               scanned = vmpr->tree_scanned += scanned;
                vmpr->tree_reclaimed += reclaimed;
-               scanned = vmpr->scanned;
                spin_unlock(&vmpr->sr_lock);
 
                if (scanned < vmpressure_win)
index eb3dd37ccd7c727dcc0b6030f62c183097b956bb..71b1c29948dba30aab0a894ddc7c84eb62acde2b 100644 (file)
@@ -1443,7 +1443,7 @@ int isolate_lru_page(struct page *page)
        int ret = -EBUSY;
 
        VM_BUG_ON_PAGE(!page_count(page), page);
-       VM_BUG_ON_PAGE(PageTail(page), page);
+       WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");
 
        if (PageLRU(page)) {
                struct zone *zone = page_zone(page);
index 40b2c74ddf16d69abc52574c469d9f22dd49cf03..084c6725b3734430483e7ea4fcf74e2ab67f7bfa 100644 (file)
@@ -1396,10 +1396,15 @@ static void vmstat_update(struct work_struct *w)
                 * Counters were updated so we expect more updates
                 * to occur in the future. Keep on running the
                 * update worker thread.
+                * If we were marked on cpu_stat_off clear the flag
+                * so that vmstat_shepherd doesn't schedule us again.
                 */
-               queue_delayed_work_on(smp_processor_id(), vmstat_wq,
-                       this_cpu_ptr(&vmstat_work),
-                       round_jiffies_relative(sysctl_stat_interval));
+               if (!cpumask_test_and_clear_cpu(smp_processor_id(),
+                                               cpu_stat_off)) {
+                       queue_delayed_work_on(smp_processor_id(), vmstat_wq,
+                               this_cpu_ptr(&vmstat_work),
+                               round_jiffies_relative(sysctl_stat_interval));
+               }
        } else {
                /*
                 * We did not update any counters so the app may be in
@@ -1417,18 +1422,6 @@ static void vmstat_update(struct work_struct *w)
  * until the diffs stay at zero. The function is used by NOHZ and can only be
  * invoked when tick processing is not active.
  */
-void quiet_vmstat(void)
-{
-       if (system_state != SYSTEM_RUNNING)
-               return;
-
-       do {
-               if (!cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
-                       cancel_delayed_work(this_cpu_ptr(&vmstat_work));
-
-       } while (refresh_cpu_vm_stats(false));
-}
-
 /*
  * Check if the diffs for a certain cpu indicate that
  * an update is needed.
@@ -1452,6 +1445,30 @@ static bool need_update(int cpu)
        return false;
 }
 
+void quiet_vmstat(void)
+{
+       if (system_state != SYSTEM_RUNNING)
+               return;
+
+       /*
+        * If we are already in hands of the shepherd then there
+        * is nothing for us to do here.
+        */
+       if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
+               return;
+
+       if (!need_update(smp_processor_id()))
+               return;
+
+       /*
+        * Just refresh counters and do not care about the pending delayed
+        * vmstat_update. It doesn't fire that often to matter and canceling
+        * it would be too expensive from this path.
+        * vmstat_shepherd will take care about that for us.
+        */
+       refresh_cpu_vm_stats(false);
+}
+
 
 /*
  * Shepherd worker thread that checks the
@@ -1469,18 +1486,25 @@ static void vmstat_shepherd(struct work_struct *w)
 
        get_online_cpus();
        /* Check processors whose vmstat worker threads have been disabled */
-       for_each_cpu(cpu, cpu_stat_off)
-               if (need_update(cpu) &&
-                       cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
-
-                       queue_delayed_work_on(cpu, vmstat_wq,
-                               &per_cpu(vmstat_work, cpu), 0);
+       for_each_cpu(cpu, cpu_stat_off) {
+               struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
 
+               if (need_update(cpu)) {
+                       if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
+                               queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
+               } else {
+                       /*
+                        * Cancel the work if quiet_vmstat has put this
+                        * cpu on cpu_stat_off because the work item might
+                        * be still scheduled
+                        */
+                       cancel_delayed_work(dw);
+               }
+       }
        put_online_cpus();
 
        schedule_delayed_work(&shepherd,
                round_jiffies_relative(sysctl_stat_interval));
-
 }
 
 static void __init start_shepherd_timer(void)
@@ -1488,7 +1512,7 @@ static void __init start_shepherd_timer(void)
        int cpu;
 
        for_each_possible_cpu(cpu)
-               INIT_DELAYED_WORK(per_cpu_ptr(&vmstat_work, cpu),
+               INIT_DEFERRABLE_WORK(per_cpu_ptr(&vmstat_work, cpu),
                        vmstat_update);
 
        if (!alloc_cpumask_var(&cpu_stat_off, GFP_KERNEL))
index bced8c074c1280c6ff175576cb035bf048f18ddd..7bc2208b6cc4c445286c20e01d9911ac9e75b9b2 100644 (file)
@@ -108,9 +108,7 @@ struct p9_poll_wait {
  * @unsent_req_list: accounting for requests that haven't been sent
  * @req: current request being processed (if any)
  * @tmp_buf: temporary buffer to read in header
- * @rsize: amount to read for current frame
- * @rpos: read position in current frame
- * @rbuf: current read buffer
+ * @rc: temporary fcall for reading current frame
  * @wpos: write position for current frame
  * @wsize: amount of data to write for current frame
  * @wbuf: current write buffer
@@ -131,9 +129,7 @@ struct p9_conn {
        struct list_head unsent_req_list;
        struct p9_req_t *req;
        char tmp_buf[7];
-       int rsize;
-       int rpos;
-       char *rbuf;
+       struct p9_fcall rc;
        int wpos;
        int wsize;
        char *wbuf;
@@ -305,69 +301,77 @@ static void p9_read_work(struct work_struct *work)
        if (m->err < 0)
                return;
 
-       p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
+       p9_debug(P9_DEBUG_TRANS, "start mux %p pos %zd\n", m, m->rc.offset);
 
-       if (!m->rbuf) {
-               m->rbuf = m->tmp_buf;
-               m->rpos = 0;
-               m->rsize = 7; /* start by reading header */
+       if (!m->rc.sdata) {
+               m->rc.sdata = m->tmp_buf;
+               m->rc.offset = 0;
+               m->rc.capacity = 7; /* start by reading header */
        }
 
        clear_bit(Rpending, &m->wsched);
-       p9_debug(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n",
-                m, m->rpos, m->rsize, m->rsize-m->rpos);
-       err = p9_fd_read(m->client, m->rbuf + m->rpos,
-                                               m->rsize - m->rpos);
+       p9_debug(P9_DEBUG_TRANS, "read mux %p pos %zd size: %zd = %zd\n",
+                m, m->rc.offset, m->rc.capacity,
+                m->rc.capacity - m->rc.offset);
+       err = p9_fd_read(m->client, m->rc.sdata + m->rc.offset,
+                        m->rc.capacity - m->rc.offset);
        p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
-       if (err == -EAGAIN) {
+       if (err == -EAGAIN)
                goto end_clear;
-       }
 
        if (err <= 0)
                goto error;
 
-       m->rpos += err;
+       m->rc.offset += err;
 
-       if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
-               u16 tag;
+       /* header read in */
+       if ((!m->req) && (m->rc.offset == m->rc.capacity)) {
                p9_debug(P9_DEBUG_TRANS, "got new header\n");
 
-               n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
-               if (n >= m->client->msize) {
+               err = p9_parse_header(&m->rc, NULL, NULL, NULL, 0);
+               if (err) {
+                       p9_debug(P9_DEBUG_ERROR,
+                                "error parsing header: %d\n", err);
+                       goto error;
+               }
+
+               if (m->rc.size >= m->client->msize) {
                        p9_debug(P9_DEBUG_ERROR,
-                                "requested packet size too big: %d\n", n);
+                                "requested packet size too big: %d\n",
+                                m->rc.size);
                        err = -EIO;
                        goto error;
                }
 
-               tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
                p9_debug(P9_DEBUG_TRANS,
-                        "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
+                        "mux %p pkt: size: %d bytes tag: %d\n",
+                        m, m->rc.size, m->rc.tag);
 
-               m->req = p9_tag_lookup(m->client, tag);
+               m->req = p9_tag_lookup(m->client, m->rc.tag);
                if (!m->req || (m->req->status != REQ_STATUS_SENT)) {
                        p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
-                                tag);
+                                m->rc.tag);
                        err = -EIO;
                        goto error;
                }
 
                if (m->req->rc == NULL) {
-                       m->req->rc = kmalloc(sizeof(struct p9_fcall) +
-                                               m->client->msize, GFP_NOFS);
-                       if (!m->req->rc) {
-                               m->req = NULL;
-                               err = -ENOMEM;
-                               goto error;
-                       }
+                       p9_debug(P9_DEBUG_ERROR,
+                                "No recv fcall for tag %d (req %p), disconnecting!\n",
+                                m->rc.tag, m->req);
+                       m->req = NULL;
+                       err = -EIO;
+                       goto error;
                }
-               m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
-               memcpy(m->rbuf, m->tmp_buf, m->rsize);
-               m->rsize = n;
+               m->rc.sdata = (char *)m->req->rc + sizeof(struct p9_fcall);
+               memcpy(m->rc.sdata, m->tmp_buf, m->rc.capacity);
+               m->rc.capacity = m->rc.size;
        }
 
-       /* not an else because some packets (like clunk) have no payload */
-       if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
+       /* packet is read in
+        * not an else because some packets (like clunk) have no payload
+        */
+       if ((m->req) && (m->rc.offset == m->rc.capacity)) {
                p9_debug(P9_DEBUG_TRANS, "got new packet\n");
                spin_lock(&m->client->lock);
                if (m->req->status != REQ_STATUS_ERROR)
@@ -375,9 +379,9 @@ static void p9_read_work(struct work_struct *work)
                list_del(&m->req->req_list);
                spin_unlock(&m->client->lock);
                p9_client_cb(m->client, m->req, status);
-               m->rbuf = NULL;
-               m->rpos = 0;
-               m->rsize = 0;
+               m->rc.sdata = NULL;
+               m->rc.offset = 0;
+               m->rc.capacity = 0;
                m->req = NULL;
        }
 
index 199bc76202d2552d23fe964f377bbf69eaf518b9..4acb1d5417aaf980bc7797c817eb9a9a350ecbf5 100644 (file)
@@ -658,7 +658,7 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args)
        mutex_unlock(&virtio_9p_lock);
 
        if (!found) {
-               pr_err("no channels available\n");
+               pr_err("no channels available for device %s\n", devname);
                return ret;
        }
 
index c6fc8f756c9aa500d1efa61de95b5757d2dccee8..2dd40e5ea030a1bbebdfbcad89ccdda78dfd36e9 100644 (file)
@@ -12,7 +12,7 @@ config BATMAN_ADV
           B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
           a routing protocol for multi-hop ad-hoc mesh networks. The
           networks may be wired or wireless. See
-          http://www.open-mesh.org/ for more information and user space
+          https://www.open-mesh.org/ for more information and user space
           tools.
 
 config BATMAN_ADV_BLA
index 21434ab79d2ce7ad4a959be69aebcaac7771233e..207e2af316c7bcfc64955a711bc4dc756ab46b9c 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
 #
 # Marek Lindner, Simon Wunderlich
 #
index 4e59cf3eb079ec0c1d1836a0be39e95901146b91..a7485d676088c367eb56b98a7030aca21c29374d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index df625de55ef2226fcc79a9dca11dc978ebe6d7df..3266bcb5bb06a550fedfeaca6b5962821e549193 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -88,7 +88,7 @@ static void batadv_ring_buffer_set(u8 lq_recv[], u8 *lq_index, u8 value)
  * in the given ring buffer
  * @lq_recv: pointer to the ring buffer
  *
- * Returns computed average value.
+ * Return: computed average value.
  */
 static u8 batadv_ring_buffer_avg(const u8 lq_recv[])
 {
@@ -132,7 +132,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
  * @orig_node: the orig_node that has to be changed
  * @max_if_num: the current amount of interfaces
  *
- * Returns 0 on success, a negative error code otherwise.
+ * Return: 0 on success, a negative error code otherwise.
  */
 static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
                                     int max_if_num)
@@ -180,7 +180,7 @@ unlock:
  * @max_if_num: the current amount of interfaces
  * @del_if_num: the index of the interface being removed
  *
- * Returns 0 on success, a negative error code otherwise.
+ * Return: 0 on success, a negative error code otherwise.
  */
 static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
                                     int max_if_num, int del_if_num)
@@ -246,7 +246,7 @@ unlock:
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: mac address of the originator
  *
- * Returns the originator object corresponding to the passed mac address or NULL
+ * Return: the originator object corresponding to the passed mac address or NULL
  * on failure.
  * If the object does not exists it is created an initialised.
  */
@@ -396,7 +396,14 @@ static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv)
        return new_tq;
 }
 
-/* is there another aggregated packet here? */
+/**
+ * batadv_iv_ogm_aggr_packet - checks if there is another OGM attached
+ * @buff_pos: current position in the skb
+ * @packet_len: total length of the skb
+ * @tvlv_len: tvlv length of the previously considered OGM
+ *
+ * Return: true if there is enough space for another OGM, false otherwise.
+ */
 static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len,
                                      __be16 tvlv_len)
 {
@@ -522,7 +529,7 @@ out:
  * @if_outgoing: interface for which the retransmission should be considered
  * @forw_packet: the forwarded packet which should be checked
  *
- * Returns true if new_packet can be aggregated with forw_packet
+ * Return: true if new_packet can be aggregated with forw_packet
  */
 static bool
 batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
@@ -1125,7 +1132,7 @@ out:
  * @if_incoming: interface where the packet was received
  * @if_outgoing: interface for which the retransmission should be considered
  *
- * Returns 1 if the link can be considered bidirectional, 0 otherwise
+ * Return: 1 if the link can be considered bidirectional, 0 otherwise
  */
 static int batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
                                 struct batadv_orig_node *orig_neigh_node,
@@ -1269,7 +1276,7 @@ out:
  * @if_incoming: interface on which the OGM packet was received
  * @if_outgoing: interface for which the retransmission should be considered
  *
- * Returns duplicate status as enum batadv_dup_status
+ * Return: duplicate status as enum batadv_dup_status
  */
 static enum batadv_dup_status
 batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
@@ -1929,7 +1936,7 @@ static void batadv_iv_neigh_print(struct batadv_priv *bat_priv,
  * @neigh2: the second neighbor object of the comparison
  * @if_outgoing2: outgoing interface for the second neighbor
  *
- * Returns a value less, equal to or greater than 0 if the metric via neigh1 is
+ * Return: a value less, equal to or greater than 0 if the metric via neigh1 is
  * lower, the same as or higher than the metric via neigh2
  */
 static int batadv_iv_ogm_neigh_cmp(struct batadv_neigh_node *neigh1,
@@ -1970,7 +1977,7 @@ out:
  * @neigh2: the second neighbor object of the comparison
  * @if_outgoing2: outgoing interface for the second neighbor
  *
- * Returns true if the metric via neigh1 is equally good or better than
+ * Return: true if the metric via neigh1 is equally good or better than
  * the metric via neigh2, false otherwise.
  */
 static bool
index 25cbc36e997adab14dca5e3be3ee832ce57acccb..b56bb000a0abcbb2fe547612d12009a18e69d530 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
@@ -29,10 +29,16 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, s32 n)
        bitmap_shift_left(seq_bits, seq_bits, n, BATADV_TQ_LOCAL_WINDOW_SIZE);
 }
 
-/* receive and process one packet within the sequence number window.
+/**
+ * batadv_bit_get_packet - receive and process one packet within the sequence
+ *  number window
+ * @priv: the bat priv with all the soft interface information
+ * @seq_bits: pointer to the sequence number receive packet
+ * @seq_num_diff: difference between the current/received sequence number and
+ *  the last sequence number
+ * @set_mark: whether this packet should be marked in seq_bits
  *
- * returns:
- *  1 if the window was moved (either new or very old)
+ * Return: 1 if the window was moved (either new or very old),
  *  0 if the window was not moved/shifted.
  */
 int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
index 0226b220fe5b0bda455eb81ccc6a7e495bf296e7..3e41bb80eb81ac34dce4c3c34fa33a3643b19bdc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
 #include <linux/compiler.h>
 #include <linux/types.h>
 
-/* Returns 1 if the corresponding bit in the given seq_bits indicates true
+/**
+ * batadv_test_bit - check if bit is set in the current window
+ *
+ * @seq_bits: pointer to the sequence number receive packet
+ * @last_seqno: latest sequence number in seq_bits
+ * @curr_seqno: sequence number to test for
+ *
+ * Return: 1 if the corresponding bit in the given seq_bits indicates true
  * and curr_seqno is within range of last_seqno. Otherwise returns 0.
  */
 static inline int batadv_test_bit(const unsigned long *seq_bits,
@@ -48,9 +55,6 @@ static inline void batadv_set_bit(unsigned long *seq_bits, s32 n)
        set_bit(n, seq_bits); /* turn the position on */
 }
 
-/* receive and process one packet, returns 1 if received seq_num is considered
- * new, 0 if old
- */
 int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, s32 seq_num_diff,
                          int set_mark);
 
index c24c481b666f776c864e31eefe92fdc2ca5fd779..77916093484464d20111f7b6b3cbf8e15926220c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
@@ -58,7 +58,13 @@ static void
 batadv_bla_send_announce(struct batadv_priv *bat_priv,
                         struct batadv_bla_backbone_gw *backbone_gw);
 
-/* return the index of the claim */
+/**
+ * batadv_choose_claim - choose the right bucket for a claim.
+ * @data: data to hash
+ * @size: size of the hash table
+ *
+ * Return: the hash index of the claim
+ */
 static inline u32 batadv_choose_claim(const void *data, u32 size)
 {
        struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
@@ -70,7 +76,13 @@ static inline u32 batadv_choose_claim(const void *data, u32 size)
        return hash % size;
 }
 
-/* return the index of the backbone gateway */
+/**
+ * batadv_choose_backbone_gw - choose the right bucket for a backbone gateway.
+ * @data: data to hash
+ * @size: size of the hash table
+ *
+ * Return: the hash index of the backbone gateway
+ */
 static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
 {
        const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
@@ -82,7 +94,13 @@ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
        return hash % size;
 }
 
-/* compares address and vid of two backbone gws */
+/**
+ * batadv_compare_backbone_gw - compare address and vid of two backbone gws
+ * @node: list node of the first entry to compare
+ * @data2: pointer to the second backbone gateway
+ *
+ * Return: 1 if the backbones have the same data, 0 otherwise
+ */
 static int batadv_compare_backbone_gw(const struct hlist_node *node,
                                      const void *data2)
 {
@@ -100,7 +118,13 @@ static int batadv_compare_backbone_gw(const struct hlist_node *node,
        return 1;
 }
 
-/* compares address and vid of two claims */
+/**
+ * batadv_compare_backbone_gw - compare address and vid of two claims
+ * @node: list node of the first entry to compare
+ * @data2: pointer to the second claims
+ *
+ * Return: 1 if the claim have the same data, 0 otherwise
+ */
 static int batadv_compare_claim(const struct hlist_node *node,
                                const void *data2)
 {
@@ -118,7 +142,10 @@ static int batadv_compare_claim(const struct hlist_node *node,
        return 1;
 }
 
-/* free a backbone gw */
+/**
+ * batadv_compare_backbone_gw - free backbone gw
+ * @backbone_gw: backbone gateway to be free'd
+ */
 static void
 batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
 {
@@ -126,14 +153,22 @@ batadv_backbone_gw_free_ref(struct batadv_bla_backbone_gw *backbone_gw)
                kfree_rcu(backbone_gw, rcu);
 }
 
-/* finally deinitialize the claim */
+/**
+ * batadv_claim_release - release claim from lists and queue for free after rcu
+ *  grace period
+ * @ref: kref pointer of the claim
+ */
 static void batadv_claim_release(struct batadv_bla_claim *claim)
 {
        batadv_backbone_gw_free_ref(claim->backbone_gw);
        kfree_rcu(claim, rcu);
 }
 
-/* free a claim, call claim_free_rcu if its the last reference */
+/**
+ * batadv_claim_free_ref - decrement the claim refcounter and possibly
+ *  release it
+ * @claim: claim to be free'd
+ */
 static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
 {
        if (atomic_dec_and_test(&claim->refcount))
@@ -141,12 +176,11 @@ static void batadv_claim_free_ref(struct batadv_bla_claim *claim)
 }
 
 /**
- * batadv_claim_hash_find
+ * batadv_claim_hash_find - looks for a claim in the claim hash
  * @bat_priv: the bat priv with all the soft interface information
  * @data: search data (may be local/static data)
  *
- * looks for a claim in the hash, and returns it if found
- * or NULL otherwise.
+ * Return: claim if found or NULL otherwise.
  */
 static struct batadv_bla_claim
 *batadv_claim_hash_find(struct batadv_priv *bat_priv,
@@ -181,12 +215,12 @@ static struct batadv_bla_claim
 }
 
 /**
- * batadv_backbone_hash_find - looks for a claim in the hash
+ * batadv_backbone_hash_find - looks for a backbone gateway in the hash
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address of the originator
  * @vid: the VLAN ID
  *
- * Returns claim if found or NULL otherwise.
+ * Return: backbone gateway if found or NULL otherwise
  */
 static struct batadv_bla_backbone_gw *
 batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
@@ -224,7 +258,10 @@ batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
        return backbone_gw_tmp;
 }
 
-/* delete all claims for a backbone */
+/**
+ * batadv_bla_del_backbone_claims - delete all claims for a backbone
+ * @backbone_gw: backbone gateway where the claims should be removed
+ */
 static void
 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
 {
@@ -372,14 +409,13 @@ out:
 }
 
 /**
- * batadv_bla_get_backbone_gw
+ * batadv_bla_get_backbone_gw - finds or creates a backbone gateway
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: the mac address of the originator
  * @vid: the VLAN ID
  * @own_backbone: set if the requested backbone is local
  *
- * searches for the backbone gw or creates a new one if it could not
- * be found.
+ * Return: the (possibly created) backbone gateway or NULL on error
  */
 static struct batadv_bla_backbone_gw *
 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
@@ -445,7 +481,13 @@ batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
        return entry;
 }
 
-/* update or add the own backbone gw to make sure we announce
+/**
+ * batadv_bla_update_own_backbone_gw - updates the own backbone gw for a VLAN
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the selected primary interface
+ * @vid: VLAN identifier
+ *
+ * update or add the own backbone gw to make sure we announce
  * where we receive other backbone gws
  */
 static void
@@ -542,12 +584,9 @@ static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
 }
 
 /**
- * batadv_bla_send_announce
+ * batadv_bla_send_announce - Send an announcement frame
  * @bat_priv: the bat priv with all the soft interface information
  * @backbone_gw: our backbone gateway which should be announced
- *
- * This function sends an announcement. It is called from multiple
- * places.
  */
 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
                                     struct batadv_bla_backbone_gw *backbone_gw)
@@ -637,8 +676,11 @@ claim_free_ref:
        batadv_claim_free_ref(claim);
 }
 
-/* Delete a claim from the claim hash which has the
- * given mac address and vid.
+/**
+ * batadv_bla_del_claim - delete a claim from the claim hash
+ * @bat_priv: the bat priv with all the soft interface information
+ * @mac: mac address of the claim to be removed
+ * @vid: VLAN id for the claim to be removed
  */
 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
                                 const u8 *mac, const unsigned short vid)
@@ -666,7 +708,15 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
        batadv_claim_free_ref(claim);
 }
 
-/* check for ANNOUNCE frame, return 1 if handled */
+/**
+ * batadv_handle_announce - check for ANNOUNCE frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @an_addr: announcement mac address (ARP Sender HW address)
+ * @backbone_addr: originator address of the sender (Ethernet source MAC)
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: 1 if handled
+ */
 static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
                                  u8 *backbone_addr, unsigned short vid)
 {
@@ -716,7 +766,16 @@ static int batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
        return 1;
 }
 
-/* check for REQUEST frame, return 1 if handled */
+/**
+ * batadv_handle_request - check for REQUEST frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
+ * @ethhdr: ethernet header of a packet
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: 1 if handled
+ */
 static int batadv_handle_request(struct batadv_priv *bat_priv,
                                 struct batadv_hard_iface *primary_if,
                                 u8 *backbone_addr, struct ethhdr *ethhdr,
@@ -740,7 +799,16 @@ static int batadv_handle_request(struct batadv_priv *bat_priv,
        return 1;
 }
 
-/* check for UNCLAIM frame, return 1 if handled */
+/**
+ * batadv_handle_unclaim - check for UNCLAIM frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @backbone_addr: originator address of the backbone (Ethernet source)
+ * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: 1 if handled
+ */
 static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
                                 struct batadv_hard_iface *primary_if,
                                 u8 *backbone_addr, u8 *claim_addr,
@@ -769,7 +837,16 @@ static int batadv_handle_unclaim(struct batadv_priv *bat_priv,
        return 1;
 }
 
-/* check for CLAIM frame, return 1 if handled */
+/**
+ * batadv_handle_claim - check for CLAIM frame
+ * @bat_priv: the bat priv with all the soft interface information
+ * @primary_if: the primary hard interface of this batman soft interface
+ * @backbone_addr: originator address of the backbone (Ethernet Source)
+ * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
+ * @vid: the VLAN ID of the frame
+ *
+ * Return: 1 if handled
+ */
 static int batadv_handle_claim(struct batadv_priv *bat_priv,
                               struct batadv_hard_iface *primary_if,
                               u8 *backbone_addr, u8 *claim_addr,
@@ -798,7 +875,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_check_claim_group
+ * batadv_check_claim_group - check for claim group membership
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary interface of this batman interface
  * @hw_src: the Hardware source in the ARP Header
@@ -809,7 +886,7 @@ static int batadv_handle_claim(struct batadv_priv *bat_priv,
  * This function also applies the group ID of the sender
  * if it is in the same mesh.
  *
- * returns:
+ * Return:
  *     2  - if it is a claim packet and on the same group
  *     1  - if is a claim packet from another group
  *     0  - if it is not a claim packet
@@ -873,14 +950,12 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_bla_process_claim
+ * batadv_bla_process_claim - Check if this is a claim frame, and process it
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the primary hard interface of this batman soft interface
  * @skb: the frame to be checked
  *
- * Check if this is a claim frame, and process it accordingly.
- *
- * returns 1 if it was a claim frame, otherwise return 0 to
+ * Return: 1 if it was a claim frame, otherwise return 0 to
  * tell the callee that it can use the frame on its own.
  */
 static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
@@ -1011,7 +1086,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        return 1;
 }
 
-/* Check when we last heard from other nodes, and remove them in case of
+/**
+ * batadv_bla_purge_backbone_gw - Remove backbone gateways after a timeout or
+ *  immediately
+ * @bat_priv: the bat priv with all the soft interface information
+ * @now: whether the whole hash shall be wiped now
+ *
+ * Check when we last heard from other nodes, and remove them in case of
  * a time out, or clean all backbone gws if now is set.
  */
 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
@@ -1059,7 +1140,7 @@ purge_now:
 }
 
 /**
- * batadv_bla_purge_claims
+ * batadv_bla_purge_claims - Remove claims after a timeout or immediately
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the selected primary interface, may be NULL if now is set
  * @now: whether the whole hash shall be wiped now
@@ -1108,12 +1189,11 @@ purge_now:
 }
 
 /**
- * batadv_bla_update_orig_address
+ * batadv_bla_update_orig_address - Update the backbone gateways when the own
+ *  originator address changes
  * @bat_priv: the bat priv with all the soft interface information
  * @primary_if: the new selected primary_if
  * @oldif: the old primary interface, may be NULL
- *
- * Update the backbone gateways when the own orig address changes.
  */
 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
@@ -1184,7 +1264,11 @@ void batadv_bla_status_update(struct net_device *net_dev)
        batadv_hardif_free_ref(primary_if);
 }
 
-/* periodic work to do:
+/**
+ * batadv_bla_periodic_work - performs periodic bla work
+ * @work: kernel work struct
+ *
+ * periodic work to do:
  *  * purge structures when they are too old
  *  * send announcements
  */
@@ -1265,7 +1349,12 @@ out:
 static struct lock_class_key batadv_claim_hash_lock_class_key;
 static struct lock_class_key batadv_backbone_hash_lock_class_key;
 
-/* initialize all bla structures */
+/**
+ * batadv_bla_init - initialize all bla structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success, < 0 on error.
+ */
 int batadv_bla_init(struct batadv_priv *bat_priv)
 {
        int i;
@@ -1320,7 +1409,7 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_check_bcast_duplist
+ * batadv_bla_check_bcast_duplist - Check if a frame is in the broadcast dup.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: contains the bcast_packet to be checked
  *
@@ -1332,6 +1421,8 @@ int batadv_bla_init(struct batadv_priv *bat_priv)
  * with a good chance that it is the same packet. If it is furthermore
  * sent by another host, drop it. We allow equal packets from
  * the same host however as this might be intended.
+ *
+ * Return: 1 if a packet is in the duplicate list, 0 otherwise.
  */
 int batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
                                   struct sk_buff *skb)
@@ -1390,14 +1481,13 @@ out:
 }
 
 /**
- * batadv_bla_is_backbone_gw_orig
+ * batadv_bla_is_backbone_gw_orig - Check if the originator is a gateway for
+ *  the VLAN identified by vid.
  * @bat_priv: the bat priv with all the soft interface information
  * @orig: originator mac address
  * @vid: VLAN identifier
  *
- * Check if the originator is a gateway for the VLAN identified by vid.
- *
- * Returns true if orig is a backbone for this vid, false otherwise.
+ * Return: true if orig is a backbone for this vid, false otherwise.
  */
 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
                                    unsigned short vid)
@@ -1431,14 +1521,13 @@ bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
 }
 
 /**
- * batadv_bla_is_backbone_gw
+ * batadv_bla_is_backbone_gw - check if originator is a backbone gw for a VLAN.
  * @skb: the frame to be checked
  * @orig_node: the orig_node of the frame
  * @hdr_size: maximum length of the frame
  *
- * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
- * if the orig_node is also a gateway on the soft interface, otherwise it
- * returns 0.
+ * Return: 1 if the orig_node is also a gateway on the soft interface, otherwise
+ * it returns 0.
  */
 int batadv_bla_is_backbone_gw(struct sk_buff *skb,
                              struct batadv_orig_node *orig_node, int hdr_size)
@@ -1465,7 +1554,12 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb,
        return 1;
 }
 
-/* free all bla structures (for softinterface free or module unload) */
+/**
+ * batadv_bla_init - free all bla structures
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * for softinterface free or module unload
+ */
 void batadv_bla_free(struct batadv_priv *bat_priv)
 {
        struct batadv_hard_iface *primary_if;
@@ -1488,18 +1582,19 @@ void batadv_bla_free(struct batadv_priv *bat_priv)
 }
 
 /**
- * batadv_bla_rx
+ * batadv_bla_rx - check packets coming from the mesh.
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
  * @is_bcast: the packet came in a broadcast packet type.
  *
- * bla_rx avoidance checks if:
+ * batadv_bla_rx avoidance checks if:
  *  * we have to race for a claim
  *  * if the frame is allowed on the LAN
  *
- * in these cases, the skb is further handled by this function and
- * returns 1, otherwise it returns 0 and the caller shall further
+ * in these cases, the skb is further handled by this function
+ *
+ * Return: 1 if handled, otherwise it returns 0 and the caller shall further
  * process the skb.
  */
 int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
@@ -1583,20 +1678,21 @@ out:
 }
 
 /**
- * batadv_bla_tx
+ * batadv_bla_tx - check packets going into the mesh
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the frame to be checked
  * @vid: the VLAN ID of the frame
  *
- * bla_tx checks if:
+ * batadv_bla_tx checks if:
  *  * a claim was received which has to be processed
  *  * the frame is allowed on the mesh
  *
- * in these cases, the skb is further handled by this function and
- * returns 1, otherwise it returns 0 and the caller shall further
- * process the skb.
+ * in these cases, the skb is further handled by this function.
  *
  * This call might reallocate skb data.
+ *
+ * Return: 1 if handled, otherwise it returns 0 and the caller shall further
+ * process the skb.
  */
 int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
                  unsigned short vid)
@@ -1670,6 +1766,13 @@ out:
        return ret;
 }
 
+/**
+ * batadv_bla_claim_table_seq_print_text - print the claim table in a seq file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
@@ -1719,6 +1822,14 @@ out:
        return 0;
 }
 
+/**
+ * batadv_bla_backbone_table_seq_print_text - print the backbone table in a seq
+ *  file
+ * @seq: seq file to print on
+ * @offset: not used
+ *
+ * Return: always 0
+ */
 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
 {
        struct net_device *net_dev = (struct net_device *)seq->private;
index 7ea199b8b5ab2ab64dba9e56c4ccfba272e80c37..579f0fa6fe6a47c7fd1c7bba73dd4496cec24ca7 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich
  *
index 037ad0a5f485e133a571aeea8d1d9534730612e9..48253cf8341bd82d70d0cd218571741b9257fa37 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -281,6 +281,8 @@ static int batadv_originators_open(struct inode *inode, struct file *file)
  *  originator table of an hard interface
  * @inode: inode pointer to debugfs file
  * @file: pointer to the seq_file
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 static int batadv_originators_hardif_open(struct inode *inode,
                                          struct file *file)
@@ -329,6 +331,8 @@ static int batadv_bla_backbone_table_open(struct inode *inode,
  * batadv_dat_cache_open - Prepare file handler for reads from dat_chache
  * @inode: inode which was opened
  * @file: file handle to be initialized
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 static int batadv_dat_cache_open(struct inode *inode, struct file *file)
 {
@@ -483,6 +487,8 @@ void batadv_debugfs_destroy(void)
  * batadv_debugfs_add_hardif - creates the base directory for a hard interface
  *  in debugfs.
  * @hard_iface: hard interface which should be added.
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
 {
index 80ab8d6f0ab3c0e70cb468bc9dc2c8d7e8e17815..1ab4e2e63afc885d124a16d50c10707e9ab9615a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index a49c705fb86b861f5595c8c0cfb7b8b1e1010589..017fffe9a5b8d125d3f1f2ec9ffa1e964767891c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
@@ -76,7 +76,7 @@ static void batadv_dat_entry_free_ref(struct batadv_dat_entry *dat_entry)
  * batadv_dat_to_purge - check whether a dat_entry has to be purged or not
  * @dat_entry: the entry to check
  *
- * Returns true if the entry has to be purged now, false otherwise.
+ * Return: true if the entry has to be purged now, false otherwise.
  */
 static bool batadv_dat_to_purge(struct batadv_dat_entry *dat_entry)
 {
@@ -151,7 +151,7 @@ static void batadv_dat_purge(struct work_struct *work)
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Returns 1 if the two entries are the same, 0 otherwise.
+ * Return: 1 if the two entries are the same, 0 otherwise.
  */
 static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
 {
@@ -166,7 +166,7 @@ static int batadv_compare_dat(const struct hlist_node *node, const void *data2)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the hw_src field in the ARP packet.
+ * Return: the value of the hw_src field in the ARP packet.
  */
 static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
 {
@@ -183,7 +183,7 @@ static u8 *batadv_arp_hw_src(struct sk_buff *skb, int hdr_size)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the ip_src field in the ARP packet.
+ * Return: the value of the ip_src field in the ARP packet.
  */
 static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
 {
@@ -195,7 +195,7 @@ static __be32 batadv_arp_ip_src(struct sk_buff *skb, int hdr_size)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the hw_dst field in the ARP packet.
+ * Return: the value of the hw_dst field in the ARP packet.
  */
 static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
 {
@@ -207,7 +207,7 @@ static u8 *batadv_arp_hw_dst(struct sk_buff *skb, int hdr_size)
  * @skb: ARP packet
  * @hdr_size: size of the possible header before the ARP packet
  *
- * Returns the value of the ip_dst field in the ARP packet.
+ * Return: the value of the ip_dst field in the ARP packet.
  */
 static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
 {
@@ -219,7 +219,7 @@ static __be32 batadv_arp_ip_dst(struct sk_buff *skb, int hdr_size)
  * @data: data to hash
  * @size: size of the hash table
  *
- * Returns the selected index in the hash table for the given data.
+ * Return: the selected index in the hash table for the given data.
  */
 static u32 batadv_hash_dat(const void *data, u32 size)
 {
@@ -256,7 +256,7 @@ static u32 batadv_hash_dat(const void *data, u32 size)
  * @ip: search key
  * @vid: VLAN identifier
  *
- * Returns the dat_entry if found, NULL otherwise.
+ * Return: the dat_entry if found, NULL otherwise.
  */
 static struct batadv_dat_entry *
 batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
@@ -440,7 +440,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
  * @candidate: orig_node under evaluation
  * @max_orig_node: last selected candidate
  *
- * Returns true if the node has been elected as next candidate or false
+ * Return: true if the node has been elected as next candidate or false
  * otherwise.
  */
 static bool batadv_is_orig_node_eligible(struct batadv_dat_candidate *res,
@@ -558,7 +558,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * closest values (from the LEFT, with wrap around if needed) then the hash
  * value of the key. ip_dst is the key.
  *
- * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM.
+ * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
  */
 static struct batadv_dat_candidate *
 batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
@@ -602,7 +602,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  * This function copies the skb with pskb_copy() and is sent as unicast packet
  * to each of the selected candidates.
  *
- * Returns true if the packet is sent to at least one candidate, false
+ * Return: true if the packet is sent to at least one candidate, false
  * otherwise.
  */
 static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
@@ -741,6 +741,8 @@ static void batadv_dat_hash_free(struct batadv_priv *bat_priv)
 /**
  * batadv_dat_init - initialise the DAT internals
  * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 in case of success, a negative error code otherwise
  */
 int batadv_dat_init(struct batadv_priv *bat_priv)
 {
@@ -779,6 +781,8 @@ void batadv_dat_free(struct batadv_priv *bat_priv)
  * batadv_dat_cache_seq_print_text - print the local DAT hash table
  * @seq: seq file to print on
  * @offset: not used
+ *
+ * Return: always 0
  */
 int batadv_dat_cache_seq_print_text(struct seq_file *seq, void *offset)
 {
@@ -831,7 +835,7 @@ out:
  * @skb: packet to analyse
  * @hdr_size: size of the possible header before the ARP packet in the skb
  *
- * Returns the ARP type if the skb contains a valid ARP packet, 0 otherwise.
+ * Return: the ARP type if the skb contains a valid ARP packet, 0 otherwise.
  */
 static u16 batadv_arp_get_type(struct batadv_priv *bat_priv,
                               struct sk_buff *skb, int hdr_size)
@@ -904,8 +908,9 @@ out:
  * @skb: the buffer containing the packet to extract the VID from
  * @hdr_size: the size of the batman-adv header encapsulating the packet
  *
- * If the packet embedded in the skb is vlan tagged this function returns the
- * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ * Return: If the packet embedded in the skb is vlan tagged this function
+ * returns the VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS
+ * is returned.
  */
 static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
 {
@@ -930,7 +935,7 @@ static unsigned short batadv_dat_get_vid(struct sk_buff *skb, int *hdr_size)
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: packet to check
  *
- * Returns true if the message has been sent to the dht candidates, false
+ * Return: true if the message has been sent to the dht candidates, false
  * otherwise. In case of a positive return value the message has to be enqueued
  * to permit the fallback.
  */
@@ -1020,7 +1025,7 @@ out:
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
  *
- * Returns true if the request has been answered, false otherwise.
+ * Return: true if the request has been answered, false otherwise.
  */
 bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
                                           struct sk_buff *skb, int hdr_size)
@@ -1143,7 +1148,7 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
  * @skb: packet to check
  * @hdr_size: size of the encapsulation header
  *
- * Returns true if the packet was snooped and consumed by DAT. False if the
+ * Return: true if the packet was snooped and consumed by DAT. False if the
  * packet has to be delivered to the interface
  */
 bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
@@ -1200,7 +1205,7 @@ out:
  * @bat_priv: the bat priv with all the soft interface information
  * @forw_packet: the broadcast packet
  *
- * Returns true if the node can drop the packet, false otherwise.
+ * Return: true if the node can drop the packet, false otherwise.
  */
 bool batadv_dat_drop_broadcast_packet(struct batadv_priv *bat_priv,
                                      struct batadv_forw_packet *forw_packet)
index 26d4a525a798ec37b644e789b5176475e0cf0d92..813ecea96cf9334700fa219a41fe1cf5a20f791d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-201 B.A.T.M.A.N. contributors:
  *
  * Antonio Quartulli
  *
index 20d9282f895b2d3115f056f09b81090b5d0956a0..55656e84bc7e1a143615ad097ea5a492e2451474 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-201 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
@@ -85,7 +85,7 @@ void batadv_frag_purge_orig(struct batadv_orig_node *orig_node,
 /**
  * batadv_frag_size_limit - maximum possible size of packet to be fragmented
  *
- * Returns the maximum size of payload that can be fragmented.
+ * Return: the maximum size of payload that can be fragmented.
  */
 static int batadv_frag_size_limit(void)
 {
@@ -107,7 +107,7 @@ static int batadv_frag_size_limit(void)
  *
  * Caller must hold chain->lock.
  *
- * Returns true if chain is empty and caller can just insert the new fragment
+ * Return: true if chain is empty and caller can just insert the new fragment
  * without searching for the right position.
  */
 static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
@@ -136,7 +136,7 @@ static bool batadv_frag_init_chain(struct batadv_frag_table_entry *chain,
  * Insert a new fragment into the reverse ordered chain in the right table
  * entry. The hash table entry is cleared if "old" fragments exist in it.
  *
- * Returns true if skb is buffered, false on error. If the chain has all the
+ * Return: true if skb is buffered, false on error. If the chain has all the
  * fragments needed to merge the packet, the chain is moved to the passed head
  * to avoid locking the chain in the table.
  */
@@ -242,12 +242,11 @@ err:
 /**
  * batadv_frag_merge_packets - merge a chain of fragments
  * @chain: head of chain with fragments
- * @skb: packet with total size of skb after merging
  *
  * Expand the first skb in the chain and copy the content of the remaining
  * skb's into the expanded one. After doing so, clear the chain.
  *
- * Returns the merged skb or NULL on error.
+ * Return: the merged skb or NULL on error.
  */
 static struct sk_buff *
 batadv_frag_merge_packets(struct hlist_head *chain)
@@ -307,6 +306,9 @@ free:
  * There are three possible outcomes: 1) Packet is merged: Return true and
  * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
  * to NULL; 3) Error: Return false and leave skb as is.
+ *
+ * Return: true when packet is merged or buffered, false when skb is not not
+ * used.
  */
 bool batadv_frag_skb_buffer(struct sk_buff **skb,
                            struct batadv_orig_node *orig_node_src)
@@ -344,7 +346,7 @@ out_err:
  * will exceed the MTU towards the next-hop. If so, the fragment is forwarded
  * without merging it.
  *
- * Returns true if the fragment is consumed/forwarded, false otherwise.
+ * Return: true if the fragment is consumed/forwarded, false otherwise.
  */
 bool batadv_frag_skb_fwd(struct sk_buff *skb,
                         struct batadv_hard_iface *recv_if,
@@ -399,7 +401,7 @@ out:
  * passed mtu and the old one with the rest. The new skb contains data from the
  * tail of the old skb.
  *
- * Returns the new fragment, NULL on error.
+ * Return: the new fragment, NULL on error.
  */
 static struct sk_buff *batadv_frag_create(struct sk_buff *skb,
                                          struct batadv_frag_packet *frag_head,
@@ -433,7 +435,7 @@ err:
  * @orig_node: final destination of the created fragments
  * @neigh_node: next-hop of the created fragments
  *
- * Returns true on success, false otherwise.
+ * Return: true on success, false otherwise.
  */
 bool batadv_frag_send_packet(struct sk_buff *skb,
                             struct batadv_orig_node *orig_node,
index 8b9877e70b95eaa9307e66749446ab12441dfbb2..9ff77c7ef7c7719aab1376675cf449b9e250b4ab 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-201 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll <martin@hundeboll.net>
  *
@@ -42,7 +42,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
  * batadv_frag_check_entry - check if a list of fragments has timed out
  * @frags_entry: table entry to check
  *
- * Returns true if the frags entry has timed out, false otherwise.
+ * Return: true if the frags entry has timed out, false otherwise.
  */
 static inline bool
 batadv_frag_check_entry(struct batadv_frag_table_entry *frags_entry)
index e6c8382c79ba86dfea5078a37f692f74ebffb01c..5950974de7b1fa6a20d350bfb2a6a7f1a94141a6 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -456,7 +456,7 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: originator announcing gateway capabilities
  *
- * Returns gateway node if found or NULL otherwise.
+ * Return: gateway node if found or NULL otherwise.
  */
 static struct batadv_gw_node *
 batadv_gw_node_get(struct batadv_priv *bat_priv,
@@ -655,13 +655,13 @@ out:
  * @chaddr: buffer where the client address will be stored. Valid
  *  only if the function returns BATADV_DHCP_TO_CLIENT
  *
- * Returns:
+ * This function may re-allocate the data buffer of the skb passed as argument.
+ *
+ * Return:
  * - BATADV_DHCP_NO if the packet is not a dhcp message or if there was an error
  *   while parsing it
  * - BATADV_DHCP_TO_SERVER if this is a message going to the DHCP server
  * - BATADV_DHCP_TO_CLIENT if this is a message going to a DHCP client
- *
- * This function may re-allocate the data buffer of the skb passed as argument.
  */
 enum batadv_dhcp_recipient
 batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
@@ -776,11 +776,11 @@ batadv_gw_dhcp_recipient_get(struct sk_buff *skb, unsigned int *header_len,
  * server. Due to topology changes it may be the case that the GW server
  * previously selected is not the best one anymore.
  *
- * Returns true if the packet destination is unicast and it is not the best gw,
- * false otherwise.
- *
  * This call might reallocate skb data.
  * Must be invoked only when the DHCP packet is going TO a DHCP SERVER.
+ *
+ * Return: true if the packet destination is unicast and it is not the best gw,
+ * false otherwise.
  */
 bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
                            struct sk_buff *skb)
index fa9527785ed3c62aaf3fcd37c7e2439e01498d0a..582dd8c413c838a4958f726cec8c1de69c164d37 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index b51bface8bdd72d2ad7e9f397684cd45d8b26316..5ee04f7140af7bf9d459488b8c886d8df8873cd3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -38,7 +38,7 @@
  * @description: text shown when throughput string cannot be parsed
  * @throughput: pointer holding the returned throughput information
  *
- * Returns false on parse error and true otherwise.
+ * Return: false on parse error and true otherwise.
  */
 static bool batadv_parse_throughput(struct net_device *net_dev, char *buff,
                                    const char *description, u32 *throughput)
index ab893e3182292b5b67ec9fb821aaae9157c79913..b58346350024dbe3eab5f365d3a59073953b59a3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 01acccc4d2185806ae6dd37b2f0f091d9a0b92a0..db90022c00a4d0858923065896fbbdda40e61c55 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -85,7 +85,7 @@ out:
  * This function recursively checks all the fathers of the device passed as
  * argument looking for a batman-adv soft interface.
  *
- * Returns true if the device is descendant of a batman-adv mesh interface (or
+ * Return: true if the device is descendant of a batman-adv mesh interface (or
  * if it is a batman-adv interface itself), false otherwise
  */
 static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
@@ -136,7 +136,7 @@ static int batadv_is_valid_iface(const struct net_device *net_dev)
  *  interface
  * @net_device: the device to check
  *
- * Returns true if the net device is a 802.11 wireless device, false otherwise.
+ * Return: true if the net device is a 802.11 wireless device, false otherwise.
  */
 bool batadv_is_wifi_netdev(struct net_device *net_device)
 {
@@ -401,7 +401,8 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
  *
  * Invoke ndo_del_slave on master passing slave as argument. In this way slave
  * is free'd and master can correctly change its internal state.
- * Return 0 on success, a negative value representing the error otherwise
+ *
+ * Return: 0 on success, a negative value representing the error otherwise
  */
 static int batadv_master_del_slave(struct batadv_hard_iface *slave,
                                   struct net_device *master)
index 7b12ea8ea29d1a398f649b40c97106705bcd2289..4d6b5e12331f25d5d603a44783db434142257cd1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 2ea6a18d793fe9f184af18322885e20d115815c6..a0a0fdb8580513215a59f971dd199aa00dc10d0f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
index 377626250ac7129e1cf65d1768e0c2ee4643b59e..9bb57b87447cc0ca43c8c9bf31625c3a1f00303c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-201 B.A.T.M.A.N. contributors:
  *
  * Simon Wunderlich, Marek Lindner
  *
 struct lock_class_key;
 
 /* callback to a compare function.  should compare 2 element datas for their
- * keys, return 0 if same and not 0 if not same
+ * keys
+ *
+ * Return: 0 if same and not 0 if not same
  */
 typedef int (*batadv_hashdata_compare_cb)(const struct hlist_node *,
                                          const void *);
 
-/* the hashfunction, should return an index
- * based on the key in the data of the first
- * argument and the size the second
+/* the hashfunction
+ *
+ * Return: an index based on the key in the data of the first argument and the
+ * size the second
  */
 typedef u32 (*batadv_hashdata_choose_cb)(const void *, u32);
 typedef void (*batadv_hashdata_free_cb)(struct hlist_node *, void *);
@@ -96,7 +99,7 @@ static inline void batadv_hash_delete(struct batadv_hashtable *hash,
  *     @data: data passed to the aforementioned callbacks as argument
  *     @data_node: to be added element
  *
- *     Returns 0 on success, 1 if the element already is in the hash
+ *     Return: 0 on success, 1 if the element already is in the hash
  *     and -1 on error.
  */
 static inline int batadv_hash_add(struct batadv_hashtable *hash,
@@ -139,10 +142,11 @@ out:
        return ret;
 }
 
-/* removes data from hash, if found. returns pointer do data on success, so you
- * can remove the used structure yourself, or NULL on error .  data could be the
- * structure you use with just the key filled, we just need the key for
- * comparing.
+/* removes data from hash, if found. data could be the structure you use with
+ * just the key filled, we just need the key for comparing.
+ *
+ * Return: returns pointer do data on success, so you can remove the used
+ * structure yourself, or NULL on error
  */
 static inline void *batadv_hash_remove(struct batadv_hashtable *hash,
                                       batadv_hashdata_compare_cb compare,
index bcabb5e3f4d3a2c3478999b3deb80b1a7595500b..a69da37bbad57dbcfc01d75406d0011ea83ec805 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index e937143f0b102d36682f30b778c06a24d1c876a5..618d5de06f202b8ea630c90513d88541f1503014 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index 4b5d61fbadb1fb77b2f8484f74abd231dd789346..5f319fd6ecd76da333c7aceaa9c6af1b0c7ebbd3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -233,7 +233,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
  * @bat_priv: the bat priv with all the soft interface information
  * @addr: the address to check
  *
- * Returns 'true' if the mac address was found, false otherwise.
+ * Return: 'true' if the mac address was found, false otherwise.
  */
 bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
 {
@@ -262,7 +262,7 @@ bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
  *  function that requires the primary interface
  * @seq: debugfs table seq_file struct
  *
- * Returns primary interface if found or NULL otherwise.
+ * Return: primary interface if found or NULL otherwise.
  */
 struct batadv_hard_iface *
 batadv_seq_print_text_primary_if_get(struct seq_file *seq)
@@ -297,7 +297,7 @@ out:
  * batadv_max_header_len - calculate maximum encapsulation overhead for a
  *  payload packet
  *
- * Return the maximum encapsulation overhead in bytes.
+ * Return: the maximum encapsulation overhead in bytes.
  */
 int batadv_max_header_len(void)
 {
@@ -599,6 +599,8 @@ int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
  *
  * payload_ptr must always point to an address in the skb head buffer and not to
  * a fragment.
+ *
+ * Return: big endian crc32c of the checksummed data
  */
 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
 {
@@ -640,7 +642,7 @@ batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
  * @type: tvlv handler type to look for
  * @version: tvlv handler version to look for
  *
- * Returns tvlv handler if found or NULL otherwise.
+ * Return: tvlv handler if found or NULL otherwise.
  */
 static struct batadv_tvlv_handler
 *batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
@@ -688,7 +690,7 @@ static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
  * Has to be called with the appropriate locks being acquired
  * (tvlv.container_list_lock).
  *
- * Returns tvlv container if found or NULL otherwise.
+ * Return: tvlv container if found or NULL otherwise.
  */
 static struct batadv_tvlv_container
 *batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
@@ -720,7 +722,7 @@ static struct batadv_tvlv_container
  * Has to be called with the appropriate locks being acquired
  * (tvlv.container_list_lock).
  *
- * Returns size of all currently registered tvlv containers in bytes.
+ * Return: size of all currently registered tvlv containers in bytes.
  */
 static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
 {
@@ -826,7 +828,7 @@ void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
  * @additional_packet_len: requested additional packet size on top of minimum
  *  size
  *
- * Returns true of the packet buffer could be changed to the requested size,
+ * Return: true of the packet buffer could be changed to the requested size,
  * false otherwise.
  */
 static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
@@ -862,7 +864,7 @@ static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
  * The ogm packet might be enlarged or shrunk depending on the current size
  * and the size of the to-be-appended tvlv containers.
  *
- * Returns size of all appended tvlv containers in bytes.
+ * Return: size of all appended tvlv containers in bytes.
  */
 u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
                                     unsigned char **packet_buff,
@@ -915,7 +917,7 @@ end:
  * @tvlv_value: tvlv content
  * @tvlv_value_len: tvlv content length
  *
- * Returns success if handler was not found or the return value of the handler
+ * Return: success if handler was not found or the return value of the handler
  * callback.
  */
 static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
@@ -968,7 +970,7 @@ static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
  * @tvlv_value: tvlv content
  * @tvlv_value_len: tvlv content length
  *
- * Returns success when processing an OGM or the return value of all called
+ * Return: success when processing an OGM or the return value of all called
  * handler callbacks.
  */
 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
@@ -1190,8 +1192,8 @@ out:
  * @skb: the buffer containing the packet
  * @header_len: length of the batman header preceding the ethernet header
  *
- * If the packet embedded in the skb is vlan tagged this function returns the
- * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
+ * Return: VID with the BATADV_VLAN_HAS_TAG flag when the packet embedded in the
+ * skb is vlan tagged. Otherwise BATADV_NO_FLAGS.
  */
 unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
 {
@@ -1218,7 +1220,7 @@ unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
  * @vid: the VLAN identifier for which the AP isolation attributed as to be
  *  looked up
  *
- * Returns true if AP isolation is on for the VLAN idenfied by vid, false
+ * Return: true if AP isolation is on for the VLAN idenfied by vid, false
  * otherwise
  */
 bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
index 9dbd9107e7e1333abc996e5b9da54de9c97e480b..a7dc41a2709bd0746f0b27fae109ad832c8883d3 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -273,9 +273,14 @@ static inline void _batadv_dbg(int type __always_unused,
                pr_err("%s: " fmt, _netdev->name, ## arg);              \
        } while (0)
 
-/* returns 1 if they are the same ethernet addr
+/**
+ * batadv_compare_eth - Compare two not u16 aligned Ethernet addresses
+ * @data1: Pointer to a six-byte array containing the Ethernet address
+ * @data2: Pointer other six-byte array containing the Ethernet address
  *
  * note: can't use ether_addr_equal() as it requires aligned memory
+ *
+ * Return: 1 if they are the same ethernet addr
  */
 static inline bool batadv_compare_eth(const void *data1, const void *data2)
 {
@@ -287,7 +292,7 @@ static inline bool batadv_compare_eth(const void *data1, const void *data2)
  * @timestamp:         base value to compare with (in jiffies)
  * @timeout:           added to base value before comparing (in milliseconds)
  *
- * Returns true if current time is after timestamp + timeout
+ * Return: true if current time is after timestamp + timeout
  */
 static inline bool batadv_has_timed_out(unsigned long timestamp,
                                        unsigned int timeout)
@@ -326,7 +331,13 @@ static inline void batadv_add_counter(struct batadv_priv *bat_priv, size_t idx,
 
 #define batadv_inc_counter(b, i) batadv_add_counter(b, i, 1)
 
-/* Sum and return the cpu-local counters for index 'idx' */
+/**
+ * batadv_sum_counter - Sum the cpu-local counters for index 'idx'
+ * @bat_priv: the bat priv with all the soft interface information
+ * @idx: index of counter to sum up
+ *
+ * Return: sum of all cpu-local counters
+ */
 static inline u64 batadv_sum_counter(struct batadv_priv *bat_priv,  size_t idx)
 {
        u64 *counters, sum = 0;
index 75fa5013af724e9bb5e00aa7e98cae74774cc319..155565e0fecce4237eb0ed4304fd99923215c9d1 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-201 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
@@ -55,7 +55,7 @@
  * Collect multicast addresses of the local multicast listeners
  * on the given soft interface, dev, in the given mcast_list.
  *
- * Returns -ENOMEM on memory allocation error or the number of
+ * Return: -ENOMEM on memory allocation error or the number of
  * items added to the mcast_list otherwise.
  */
 static int batadv_mcast_mla_softif_get(struct net_device *dev,
@@ -87,7 +87,7 @@ static int batadv_mcast_mla_softif_get(struct net_device *dev,
  * @mcast_addr: the multicast address to check
  * @mcast_list: the list with multicast addresses to search in
  *
- * Returns true if the given address is already in the given list.
+ * Return: true if the given address is already in the given list.
  * Otherwise returns false.
  */
 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
@@ -195,8 +195,9 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
  * batadv_mcast_has_bridge - check whether the soft-iface is bridged
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Checks whether there is a bridge on top of our soft interface. Returns
- * true if so, false otherwise.
+ * Checks whether there is a bridge on top of our soft interface.
+ *
+ * Return: true if there is a bridge, false otherwise.
  */
 static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
 {
@@ -218,7 +219,7 @@ static bool batadv_mcast_has_bridge(struct batadv_priv *bat_priv)
  * Updates the own multicast tvlv with our current multicast related settings,
  * capabilities and inabilities.
  *
- * Returns true if the tvlv container is registered afterwards. Otherwise
+ * Return: true if the tvlv container is registered afterwards. Otherwise
  * returns false.
  */
 static bool batadv_mcast_mla_tvlv_update(struct batadv_priv *bat_priv)
@@ -289,8 +290,8 @@ out:
  * Checks whether the given IPv4 packet has the potential to be forwarded with a
  * mode more optimal than classic flooding.
  *
- * If so then returns 0. Otherwise -EINVAL is returned or -ENOMEM in case of
- * memory allocation failure.
+ * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
+ * allocation failure.
  */
 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
                                             struct sk_buff *skb,
@@ -327,8 +328,7 @@ static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
  * Checks whether the given IPv6 packet has the potential to be forwarded with a
  * mode more optimal than classic flooding.
  *
- * If so then returns 0. Otherwise -EINVAL is returned or -ENOMEM if we are out
- * of memory.
+ * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
  */
 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
                                             struct sk_buff *skb,
@@ -366,8 +366,7 @@ static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
  * Checks whether the given multicast ethernet frame has the potential to be
  * forwarded with a mode more optimal than classic flooding.
  *
- * If so then returns 0. Otherwise -EINVAL is returned or -ENOMEM if we are out
- * of memory.
+ * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
  */
 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
                                        struct sk_buff *skb,
@@ -398,7 +397,7 @@ static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: ethernet header of a packet
  *
- * Returns the number of nodes which want all IPv4 multicast traffic if the
+ * Return: the number of nodes which want all IPv4 multicast traffic if the
  * given ethhdr is from an IPv4 packet or the number of nodes which want all
  * IPv6 traffic if it matches an IPv6 packet.
  */
@@ -421,7 +420,7 @@ static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: the ether header containing the multicast destination
  *
- * Returns an orig_node matching the multicast address provided by ethhdr
+ * Return: an orig_node matching the multicast address provided by ethhdr
  * via a translation table lookup. This increases the returned nodes refcount.
  */
 static struct batadv_orig_node *
@@ -436,7 +435,7 @@ batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
  * batadv_mcast_want_forw_ipv4_node_get - get a node with an ipv4 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
+ * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
  * increases its refcount.
  */
 static struct batadv_orig_node *
@@ -463,7 +462,7 @@ batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
  * batadv_mcast_want_forw_ipv6_node_get - get a node with an ipv6 flag
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
+ * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
  * and increases its refcount.
  */
 static struct batadv_orig_node *
@@ -491,7 +490,7 @@ batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
  * @bat_priv: the bat priv with all the soft interface information
  * @ethhdr: an ethernet header to determine the protocol family from
  *
- * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
+ * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
  * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, set and
  * increases its refcount.
  */
@@ -514,7 +513,7 @@ batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
  * batadv_mcast_want_forw_unsnoop_node_get - get a node with an unsnoopable flag
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Returns an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
+ * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
  * set and increases its refcount.
  */
 static struct batadv_orig_node *
@@ -543,7 +542,7 @@ batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
  * @skb: The multicast packet to check
  * @orig: an originator to be set to forward the skb to
  *
- * Returns the forwarding mode as enum batadv_forw_mode and in case of
+ * Return: the forwarding mode as enum batadv_forw_mode and in case of
  * BATADV_FORW_SINGLE set the orig to the single originator the skb
  * should be forwarded to.
  */
index 8f3cb04b9f13f3e56b360781afc238dc64aeb27a..80bceec55592a7e7347bc09140b177a88bdccf2b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-201 B.A.T.M.A.N. contributors:
  *
  * Linus Lüssing
  *
@@ -23,7 +23,7 @@
 struct sk_buff;
 
 /**
- * batadv_forw_mode - the way a packet should be forwarded as
+ * enum batadv_forw_mode - the way a packet should be forwarded as
  * @BATADV_FORW_ALL: forward the packet to all nodes (currently via classic
  *  flooding)
  * @BATADV_FORW_SINGLE: forward the packet to a single node (currently via the
index cc63b44f0d2e2fdc61b41be26e745dea7567f28c..0b30c15eee5fc7d97fb3531b9991088aa6e64908 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-201 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
@@ -64,6 +64,8 @@ static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
 
 /**
  * batadv_nc_init - one-time initialization for network coding
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 int __init batadv_nc_init(void)
 {
@@ -142,6 +144,8 @@ static void batadv_nc_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
 /**
  * batadv_nc_mesh_init - initialise coding hash table and start house keeping
  * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
 {
@@ -251,7 +255,7 @@ static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet)
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_node: the nc node to check
  *
- * Returns true if the entry has to be purged now, false otherwise
+ * Return: true if the entry has to be purged now, false otherwise
  */
 static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
                                       struct batadv_nc_node *nc_node)
@@ -267,7 +271,7 @@ static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
- * Returns true if the entry has to be purged now, false otherwise
+ * Return: true if the entry has to be purged now, false otherwise
  */
 static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
                                              struct batadv_nc_path *nc_path)
@@ -287,7 +291,7 @@ static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @nc_path: the nc path to check
  *
- * Returns true if the entry has to be purged now, false otherwise
+ * Return: true if the entry has to be purged now, false otherwise
  */
 static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
                                                struct batadv_nc_path *nc_path)
@@ -470,7 +474,7 @@ static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
  * @data: data to hash
  * @size: size of the hash table
  *
- * Returns the selected index in the hash table for the given data.
+ * Return: the selected index in the hash table for the given data.
  */
 static u32 batadv_nc_hash_choose(const void *data, u32 size)
 {
@@ -489,7 +493,7 @@ static u32 batadv_nc_hash_choose(const void *data, u32 size)
  * @node: node in the local table
  * @data2: second object to compare the node to
  *
- * Returns 1 if the two entry are the same, 0 otherwise
+ * Return: 1 if the two entry are the same, 0 otherwise
  */
 static int batadv_nc_hash_compare(const struct hlist_node *node,
                                  const void *data2)
@@ -516,7 +520,7 @@ static int batadv_nc_hash_compare(const struct hlist_node *node,
  * @hash: hash table containing the nc path
  * @data: search key
  *
- * Returns the nc_path if found, NULL otherwise.
+ * Return: the nc_path if found, NULL otherwise.
  */
 static struct batadv_nc_path *
 batadv_nc_hash_find(struct batadv_hashtable *hash,
@@ -571,7 +575,7 @@ static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
  * timeout. If so, the packet is no longer kept and the entry deleted from the
  * queue. Has to be called with the appropriate locks.
  *
- * Returns false as soon as the entry in the fifo queue has not been timed out
+ * Return: false as soon as the entry in the fifo queue has not been timed out
  * yet and true otherwise.
  */
 static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
@@ -610,7 +614,7 @@ out:
  * packet is no longer delayed, immediately sent and the entry deleted from the
  * queue. Has to be called with the appropriate locks.
  *
- * Returns false as soon as the entry in the fifo queue has not been timed out
+ * Return: false as soon as the entry in the fifo queue has not been timed out
  * yet and true otherwise.
  */
 static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
@@ -731,7 +735,7 @@ static void batadv_nc_worker(struct work_struct *work)
  * @orig_node: neighboring orig node which may be used as nc candidate
  * @ogm_packet: incoming ogm packet also used for the checks
  *
- * Returns true if:
+ * Return: true if:
  *  1) The OGM must have the most recent sequence number.
  *  2) The TTL must be decremented by one and only one.
  *  3) The OGM must be received from the first hop from orig_node.
@@ -772,7 +776,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
  *  (can be equal to orig_node)
  * @in_coding: traverse incoming or outgoing network coding list
  *
- * Returns the nc_node if found, NULL otherwise.
+ * Return: the nc_node if found, NULL otherwise.
  */
 static struct batadv_nc_node
 *batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
@@ -814,7 +818,7 @@ static struct batadv_nc_node
  *  (can be equal to orig_node)
  * @in_coding: traverse incoming or outgoing network coding list
  *
- * Returns the nc_node if found or created, NULL in case of an error.
+ * Return: the nc_node if found or created, NULL in case of an error.
  */
 static struct batadv_nc_node
 *batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
@@ -932,7 +936,7 @@ out:
  * @src: ethernet source address - first half of the nc path search key
  * @dst: ethernet destination address - second half of the nc path search key
  *
- * Returns pointer to nc_path if the path was found or created, returns NULL
+ * Return: pointer to nc_path if the path was found or created, returns NULL
  * on error.
  */
 static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
@@ -989,6 +993,8 @@ static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
  * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair
  *  selection of a receiver with slightly lower TQ than the other
  * @tq: to be weighted tq value
+ *
+ * Return: scaled tq value
  */
 static u8 batadv_nc_random_weight_tq(u8 tq)
 {
@@ -1029,7 +1035,7 @@ static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
  * @nc_packet: structure containing the packet to the skb can be coded with
  * @neigh_node: next hop to forward packet to
  *
- * Returns true if both packets are consumed, false otherwise.
+ * Return: true if both packets are consumed, false otherwise.
  */
 static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
                                   struct sk_buff *skb,
@@ -1228,7 +1234,7 @@ out:
  * Since the source encoded the packet we can be certain it has all necessary
  * decode information.
  *
- * Returns true if coding of a decoded packet is allowed.
+ * Return: true if coding of a decoded packet is allowed.
  */
 static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
 {
@@ -1246,7 +1252,7 @@ static bool batadv_nc_skb_coding_possible(struct sk_buff *skb, u8 *dst, u8 *src)
  * @skb: data skb to forward
  * @eth_dst: next hop mac address of skb
  *
- * Returns true if coding of a decoded skb is allowed.
+ * Return: true if coding of a decoded skb is allowed.
  */
 static struct batadv_nc_packet *
 batadv_nc_path_search(struct batadv_priv *bat_priv,
@@ -1314,7 +1320,7 @@ batadv_nc_path_search(struct batadv_priv *bat_priv,
  * @eth_src: source mac address of skb
  * @in_nc_node: pointer to skb next hop's neighbor nc node
  *
- * Returns an nc packet if a suitable coding packet was found, NULL otherwise.
+ * Return: an nc packet if a suitable coding packet was found, NULL otherwise.
  */
 static struct batadv_nc_packet *
 batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
@@ -1397,7 +1403,7 @@ static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
  * next hop that potentially sent a packet which our next hop also received
  * (overheard) and has stored for later decoding.
  *
- * Returns true if the skb was consumed (encoded packet sent) or false otherwise
+ * Return: true if the skb was consumed (encoded packet sent) or false otherwise
  */
 static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
                                     struct batadv_neigh_node *neigh_node,
@@ -1451,7 +1457,7 @@ static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
  * @neigh_node: next hop to forward packet to
  * @packet_id: checksum to identify packet
  *
- * Returns true if the packet was buffered or false in case of an error.
+ * Return: true if the packet was buffered or false in case of an error.
  */
 static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
                                      struct batadv_nc_path *nc_path,
@@ -1485,7 +1491,7 @@ static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
  * @skb: data skb to forward
  * @neigh_node: next hop to forward packet to
  *
- * Returns true if the skb was consumed (encoded packet sent) or false otherwise
+ * Return: true if the skb was consumed (encoded packet sent) or false otherwise
  */
 bool batadv_nc_skb_forward(struct sk_buff *skb,
                           struct batadv_neigh_node *neigh_node)
@@ -1624,7 +1630,7 @@ void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
  * @skb: unicast skb to decode
  * @nc_packet: decode data needed to decode the skb
  *
- * Returns pointer to decoded unicast packet if the packet was decoded or NULL
+ * Return: pointer to decoded unicast packet if the packet was decoded or NULL
  * in case of an error.
  */
 static struct batadv_unicast_packet *
@@ -1718,7 +1724,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
  * @ethhdr: pointer to the ethernet header inside the coded packet
  * @coded: coded packet we try to find decode data for
  *
- * Returns pointer to nc packet if the needed data was found or NULL otherwise.
+ * Return: pointer to nc packet if the needed data was found or NULL otherwise.
  */
 static struct batadv_nc_packet *
 batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
@@ -1781,6 +1787,9 @@ batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
  *  resulting unicast packet
  * @skb: incoming coded packet
  * @recv_if: pointer to interface this packet was received on
+ *
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * otherwise.
  */
 static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
                                       struct batadv_hard_iface *recv_if)
@@ -1865,6 +1874,8 @@ void batadv_nc_mesh_free(struct batadv_priv *bat_priv)
  * batadv_nc_nodes_seq_print_text - print the nc node information
  * @seq: seq file to print on
  * @offset: not used
+ *
+ * Return: always 0
  */
 int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset)
 {
@@ -1927,6 +1938,8 @@ out:
 /**
  * batadv_nc_init_debugfs - create nc folder and related files in debugfs
  * @bat_priv: the bat priv with all the soft interface information
+ *
+ * Return: 0 on success or negative error number in case of failure
  */
 int batadv_nc_init_debugfs(struct batadv_priv *bat_priv)
 {
index 8f6d4ad8778ade1766f026e8bbd520ce37a95451..d6d7fb4ec5d595ae996b983309e6ffc1246338c9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-201 B.A.T.M.A.N. contributors:
  *
  * Martin Hundebøll, Jeppe Ledet-Pedersen
  *
index fe578f75c39137c451fcc307e729046bd1d1c3c0..d4a30db0158a25908e4d13c51068c9c02eaf6fbd 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -47,7 +47,13 @@ static struct lock_class_key batadv_orig_hash_lock_class_key;
 
 static void batadv_purge_orig(struct work_struct *work);
 
-/* returns 1 if they are the same originator */
+/**
+ * batadv_compare_orig - comparing function used in the originator hash table
+ * @node: node in the local table
+ * @data2: second object to compare the node to
+ *
+ * Return: 1 if they are the same originator
+ */
 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_orig_node,
@@ -61,7 +67,7 @@ int batadv_compare_orig(const struct hlist_node *node, const void *data2)
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
  *
- * Returns the vlan object identified by vid and belonging to orig_node or NULL
+ * Return: the vlan object identified by vid and belonging to orig_node or NULL
  * if it does not exist.
  */
 struct batadv_orig_node_vlan *
@@ -93,7 +99,7 @@ batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
  * @orig_node: the originator serving the VLAN
  * @vid: the VLAN identifier
  *
- * Returns NULL in case of failure or the vlan object identified by vid and
+ * Return: NULL in case of failure or the vlan object identified by vid and
  * belonging to orig_node otherwise. The object is created and added to the list
  * if it does not exist.
  *
@@ -266,7 +272,7 @@ void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
  * @if_outgoing: the interface where the payload packet has been received or
  *  the OGM should be sent to
  *
- * Returns the neighbor which should be router for this orig_node/iface.
+ * Return: the neighbor which should be router for this orig_node/iface.
  *
  * The object is returned with refcounter increased by 1.
  */
@@ -298,7 +304,7 @@ batadv_orig_router_get(struct batadv_orig_node *orig_node,
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
- * Returns the requested orig_ifinfo or NULL if not found.
+ * Return: the requested orig_ifinfo or NULL if not found.
  *
  * The object is returned with refcounter increased by 1.
  */
@@ -330,7 +336,7 @@ batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
  * @orig_node: the orig node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
- * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
+ * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
  * interface otherwise. The object is created and added to the list
  * if it does not exist.
  *
@@ -375,12 +381,12 @@ out:
 
 /**
  * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
- * @neigh_node: the neigh node to be queried
+ * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
  * The object is returned with refcounter increased by 1.
  *
- * Returns the requested neigh_ifinfo or NULL if not found
+ * Return: the requested neigh_ifinfo or NULL if not found
  */
 struct batadv_neigh_ifinfo *
 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
@@ -408,10 +414,10 @@ batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
 
 /**
  * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
- * @neigh_node: the neigh node to be queried
+ * @neigh: the neigh node to be queried
  * @if_outgoing: the interface for which the ifinfo should be acquired
  *
- * Returns NULL in case of failure or the neigh_ifinfo object for the
+ * Return: NULL in case of failure or the neigh_ifinfo object for the
  * if_outgoing interface otherwise. The object is created and added to the list
  * if it does not exist.
  *
@@ -459,7 +465,8 @@ out:
  *
  * Looks for and possibly returns a neighbour belonging to this originator list
  * which is connected through the provided hard interface.
- * Returns NULL if the neighbour is not found.
+ *
+ * Return: neighbor when found. Othwerwise NULL
  */
 static struct batadv_neigh_node *
 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
@@ -492,7 +499,7 @@ batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
  *
- * Returns the hardif neighbour node if found or created or NULL otherwise.
+ * Return: the hardif neighbour node if found or created or NULL otherwise.
  */
 static struct batadv_hardif_neigh_node *
 batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
@@ -540,7 +547,7 @@ out:
  * @hard_iface: the interface this neighbour is connected to
  * @neigh_addr: the interface address of the neighbour to retrieve
  *
- * Returns the hardif neighbour node if found or created or NULL otherwise.
+ * Return: the hardif neighbour node if found or created or NULL otherwise.
  */
 static struct batadv_hardif_neigh_node *
 batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
@@ -562,7 +569,8 @@ batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
  * @neigh_addr: the address of the neighbour
  *
  * Looks for and possibly returns a neighbour belonging to this hard interface.
- * Returns NULL if the neighbour is not found.
+ *
+ * Return: neighbor when found. Othwerwise NULL
  */
 struct batadv_hardif_neigh_node *
 batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
@@ -594,7 +602,8 @@ batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
  * @neigh_addr: the mac address of the neighbour interface
  *
  * Allocates a new neigh_node object and initialises all the generic fields.
- * Returns the new object or NULL on failure.
+ *
+ * Return: neighbor when found. Othwerwise NULL
  */
 struct batadv_neigh_node *
 batadv_neigh_node_new(struct batadv_orig_node *orig_node,
@@ -656,7 +665,7 @@ out:
  * @seq: neighbour table seq_file struct
  * @offset: not used
  *
- * Always returns 0.
+ * Return: always 0
  */
 int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
 {
@@ -820,7 +829,8 @@ void batadv_originator_free(struct batadv_priv *bat_priv)
  *
  * Creates a new originator object and initialise all the generic fields.
  * The new object is not added to the originator list.
- * Returns the newly created object or NULL on failure.
+ *
+ * Return: the newly created object or NULL on failure.
  */
 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
                                              const u8 *addr)
@@ -937,7 +947,7 @@ batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
- * Returns true if any ifinfo entry was purged, false otherwise.
+ * Return: true if any ifinfo entry was purged, false otherwise.
  */
 static bool
 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
@@ -989,7 +999,7 @@ batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node which is to be checked
  *
- * Returns true if any neighbor was purged, false otherwise
+ * Return: true if any neighbor was purged, false otherwise
  */
 static bool
 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
@@ -1048,7 +1058,7 @@ batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
  * @orig_node: orig node which is to be checked
  * @if_outgoing: the interface for which the metric should be compared
  *
- * Returns the current best neighbor, with refcount increased.
+ * Return: the current best neighbor, with refcount increased.
  */
 static struct batadv_neigh_node *
 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
@@ -1085,7 +1095,7 @@ batadv_find_best_neighbor(struct batadv_priv *bat_priv,
  * This function checks if the orig_node or substructures of it have become
  * obsolete, and purges this information if that's the case.
  *
- * Returns true if the orig_node is to be removed, false otherwise.
+ * Return: true if the orig_node is to be removed, false otherwise.
  */
 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
                                   struct batadv_orig_node *orig_node)
@@ -1230,7 +1240,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
  * @seq: debugfs table seq_file struct
  * @offset: not used
  *
- * Returns 0
+ * Return: 0
  */
 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
 {
index cf0730414ed22e0a99a0415202536b13ad2ae479..745b4e4fcdc4d4b316ac8e9ab84c3e56a07a35fc 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 0558e3237e0e7e38a42ff7cfc7f266dc90f69709..e7f915181abaedd51dfda248d7a9d08bf66fecb0 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -158,7 +158,7 @@ enum batadv_tt_client_flags {
 };
 
 /**
- * batadv_vlan_flags - flags for the four MSB of any vlan ID field
+ * enum batadv_vlan_flags - flags for the four MSB of any vlan ID field
  * @BATADV_VLAN_HAS_TAG: whether the field contains a valid vlan tag or not
  */
 enum batadv_vlan_flags {
@@ -209,6 +209,11 @@ struct batadv_bla_claim_dst {
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
  * @flags: contains routing relevant flags - see enum batadv_iv_flags
+ * @seqno: sequence identification
+ * @orig: address of the source node
+ * @prev_sender: address of the previous sender
+ * @reserved: reserved byte for alignment
+ * @tq: transmission quality
  * @tvlv_len: length of tvlv data following the ogm header
  */
 struct batadv_ogm_packet {
@@ -230,7 +235,7 @@ struct batadv_ogm_packet {
 #define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
 
 /**
- * batadv_icmp_header - common members among all the ICMP packets
+ * struct batadv_icmp_header - common members among all the ICMP packets
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
@@ -256,7 +261,7 @@ struct batadv_icmp_header {
 };
 
 /**
- * batadv_icmp_packet - ICMP packet
+ * struct batadv_icmp_packet - ICMP packet
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
@@ -282,7 +287,7 @@ struct batadv_icmp_packet {
 #define BATADV_RR_LEN 16
 
 /**
- * batadv_icmp_packet_rr - ICMP RouteRecord packet
+ * struct batadv_icmp_packet_rr - ICMP RouteRecord packet
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
@@ -345,6 +350,7 @@ struct batadv_unicast_packet {
  * @u: common unicast packet header
  * @src: address of the source
  * @subtype: packet subtype
+ * @reserved: reserved byte for alignment
  */
 struct batadv_unicast_4addr_packet {
        struct batadv_unicast_packet u;
@@ -413,7 +419,6 @@ struct batadv_bcast_packet {
  * @packet_type: batman-adv packet type, part of the general header
  * @version: batman-adv protocol version, part of the genereal header
  * @ttl: time to live for this packet, part of the genereal header
- * @reserved: Align following fields to 2-byte boundaries
  * @first_source: original source of first included packet
  * @first_orig_dest: original destinal of first included packet
  * @first_crc: checksum of first included packet
@@ -495,7 +500,7 @@ struct batadv_tvlv_gateway_data {
  * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container
  * @flags: translation table flags (see batadv_tt_data_flags)
  * @ttvn: translation table version number
- * @vlan_num: number of announced VLANs. In the TVLV this struct is followed by
+ * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by
  *  one batadv_tvlv_tt_vlan_data object per announced vlan
  */
 struct batadv_tvlv_tt_data {
index e4f2646d92463a915b3a52a43b46c6f0736b5e52..f4b60b1fb50edb2d9a063047340eb008b9375244 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -140,9 +140,17 @@ out:
                batadv_neigh_node_free_ref(router);
 }
 
-/* checks whether the host restarted and is in the protection time.
- * returns:
- *  0 if the packet is to be accepted
+/**
+ * batadv_window_protected - checks whether the host restarted and is in the
+ *  protection time.
+ * @bat_priv: the bat priv with all the soft interface information
+ * @seq_num_diff: difference between the current/received sequence number and
+ *  the last sequence number
+ * @last_reset: jiffies timestamp of the last reset, will be updated when reset
+ *  is detected
+ *
+ * Return:
+ *  0 if the packet is to be accepted.
  *  1 if the packet is to be ignored.
  */
 int batadv_window_protected(struct batadv_priv *bat_priv, s32 seq_num_diff,
@@ -198,7 +206,7 @@ bool batadv_check_management_packet(struct sk_buff *skb,
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: icmp packet to process
  *
- * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
  * otherwise.
  */
 static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
@@ -398,10 +406,11 @@ out:
  * @skb: packet to check
  * @hdr_size: size of header to pull
  *
- * Check for short header and bad addresses in given packet. Returns negative
- * value when check fails and 0 otherwise. The negative value depends on the
- * reason: -ENODATA for bad header, -EBADR for broadcast destination or source,
- * and -EREMOTE for non-local (other host) destination.
+ * Check for short header and bad addresses in given packet.
+ *
+ * Return: negative value when check fails and 0 otherwise. The negative value
+ * depends on the reason: -ENODATA for bad header, -EBADR for broadcast
+ * destination or source, and -EREMOTE for non-local (other host) destination.
  */
 static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
                                       struct sk_buff *skb, int hdr_size)
@@ -435,7 +444,7 @@ static int batadv_check_unicast_packet(struct batadv_priv *bat_priv,
  * @orig_node: the destination node
  * @recv_if: pointer to interface this packet was received on
  *
- * Returns the router which should be used for this orig_node on
+ * Return: the router which should be used for this orig_node on
  * this interface, or NULL if not available.
  */
 struct batadv_neigh_node *
@@ -648,7 +657,7 @@ out:
  * the new corresponding information (originator address where the destination
  * client currently is and its known TTVN)
  *
- * Returns true if the packet header has been updated, false otherwise
+ * Return: true if the packet header has been updated, false otherwise
  */
 static bool
 batadv_reroute_unicast_packet(struct batadv_priv *bat_priv,
@@ -805,7 +814,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
  *
- * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
  * otherwise.
  */
 int batadv_recv_unhandled_unicast_packet(struct sk_buff *skb,
@@ -904,9 +913,8 @@ rx_success:
  * batadv_recv_unicast_tvlv - receive and process unicast tvlv packets
  * @skb: unicast tvlv packet to process
  * @recv_if: pointer to interface this packet was received on
- * @dst_addr: the payload destination
  *
- * Returns NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
+ * Return: NET_RX_SUCCESS if the packet has been consumed or NET_RX_DROP
  * otherwise.
  */
 int batadv_recv_unicast_tvlv(struct sk_buff *skb,
@@ -960,7 +968,7 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
  * the assembled packet will exceed our MTU; 2) Buffer fragment, if we till
  * lack further fragments; 3) Merge fragments, if we have all needed parts.
  *
- * Return NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
+ * Return: NET_RX_DROP if the skb is not consumed, NET_RX_SUCCESS otherwise.
  */
 int batadv_recv_frag_packet(struct sk_buff *skb,
                            struct batadv_hard_iface *recv_if)
index 204bbe4952a6d27848f78fd932b8c077baea8382..c776e9655b9b4f0cb63888729a4132c378905902 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
index 782fa33ec296a85a2869e40fec9dabc6c89da923..d8b03fd604e0f7e6970a44f66615b145b22575e9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -111,7 +111,7 @@ send_skb_err:
  * host, NULL can be passed as recv_if and no interface alternating is
  * attempted.
  *
- * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
+ * Return: NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
  * NET_XMIT_POLICED if the skb is buffered for later transmit.
  */
 int batadv_send_skb_to_orig(struct sk_buff *skb,
@@ -165,7 +165,7 @@ out:
  * @hdr_size: amount of bytes to push at the beginning of the skb
  * @orig_node: the destination node
  *
- * Returns false if the buffer extension was not possible or true otherwise.
+ * Return: false if the buffer extension was not possible or true otherwise.
  */
 static bool
 batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
@@ -196,7 +196,7 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
  * @skb: the skb containing the payload to encapsulate
  * @orig_node: the destination node
  *
- * Returns false if the payload could not be encapsulated or true otherwise.
+ * Return: false if the payload could not be encapsulated or true otherwise.
  */
 static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
                                            struct batadv_orig_node *orig_node)
@@ -211,10 +211,10 @@ static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
  *  unicast 4addr header
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: the skb containing the payload to encapsulate
- * @orig_node: the destination node
+ * @orig: the destination node
  * @packet_subtype: the unicast 4addr packet subtype to use
  *
- * Returns false if the payload could not be encapsulated or true otherwise.
+ * Return: false if the payload could not be encapsulated or true otherwise.
  */
 bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
                                           struct sk_buff *skb,
@@ -265,7 +265,7 @@ out:
  * as packet_type. Then send this frame to the given orig_node and release a
  * reference to this orig_node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
                            struct sk_buff *skb, int packet_type,
@@ -339,7 +339,7 @@ out:
  * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
  * to the according destination node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
                                   struct sk_buff *skb, int packet_type,
@@ -373,7 +373,7 @@ int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
  * Look up the currently selected gateway. Wrap the given skb into a batman-adv
  * unicast header and send this frame to this gateway node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
                           unsigned short vid)
@@ -430,14 +430,19 @@ _batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
                           send_time);
 }
 
-/* add a broadcast packet to the queue and setup timers. broadcast packets
- * are sent multiple times to increase probability for being received.
+/**
+ * batadv_add_bcast_packet_to_list - queue broadcast packet for multiple sends
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: broadcast packet to add
+ * @delay: number of jiffies to wait before sending
  *
- * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
- * errors.
+ * add a broadcast packet to the queue and setup timers. broadcast packets
+ * are sent multiple times to increase probability for being received.
  *
  * The skb is not consumed, so the caller should make sure that the
  * skb is freed.
+ *
+ * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY on errors.
  */
 int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
                                    const struct sk_buff *skb,
index 82059f259e4682d52997eb1a0f8692c627569ec9..7ff95cada2e743c6981fe443528133c590771b44 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -69,7 +69,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
  * header via the translation table. Wrap the given skb into a batman-adv
  * unicast header. Then send this frame to the according destination node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
                                         struct sk_buff *skb, u8 *dst_hint,
@@ -92,7 +92,7 @@ static inline int batadv_send_skb_via_tt(struct batadv_priv *bat_priv,
  * unicast-4addr header. Then send this frame to the according destination
  * node.
  *
- * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
+ * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
  */
 static inline int batadv_send_skb_via_tt_4addr(struct batadv_priv *bat_priv,
                                               struct sk_buff *skb,
index ac4d08de5df46abc5c7986b29eb89627fe160fc3..4bf35b8c3d238ef51232276678a439e67734f0b9 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -480,7 +480,7 @@ out:
 /**
  * batadv_softif_vlan_free_ref - decrease the vlan object refcounter and
  *  possibly free it
- * @softif_vlan: the vlan object to release
+ * @vlan: the vlan object to release
  */
 void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
 {
@@ -501,7 +501,7 @@ void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the identifier of the vlan object to retrieve
  *
- * Returns the private data of the vlan matching the vid passed as argument or
+ * Return: the private data of the vlan matching the vid passed as argument or
  * NULL otherwise. The refcounter of the returned object is incremented by 1.
  */
 struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
@@ -530,7 +530,7 @@ struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  *
- * Returns 0 on success, a negative error otherwise.
+ * Return: 0 on success, a negative error otherwise.
  */
 int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 {
@@ -594,12 +594,13 @@ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
 /**
  * batadv_interface_add_vid - ndo_add_vid API implementation
  * @dev: the netdev of the mesh interface
+ * @proto: protocol of the the vlan id
  * @vid: identifier of the new vlan
  *
  * Set up all the internal structures for handling the new vlan on top of the
  * mesh interface
  *
- * Returns 0 on success or a negative error code in case of failure.
+ * Return: 0 on success or a negative error code in case of failure.
  */
 static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
                                    unsigned short vid)
@@ -651,12 +652,13 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 /**
  * batadv_interface_kill_vid - ndo_kill_vid API implementation
  * @dev: the netdev of the mesh interface
+ * @proto: protocol of the the vlan id
  * @vid: identifier of the deleted vlan
  *
  * Destroy all the internal structures used to handle the vlan identified by vid
  * on top of the mesh interface
  *
- * Returns 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
+ * Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q
  * or -ENOENT if the specified vlan id wasn't registered.
  */
 static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
@@ -745,7 +747,7 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
  * batadv_softif_init_late - late stage initialization of soft interface
  * @dev: registered network device to modify
  *
- * Returns error code on failures
+ * Return: error code on failures
  */
 static int batadv_softif_init_late(struct net_device *dev)
 {
@@ -847,7 +849,7 @@ free_bat_counters:
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should become the slave interface
  *
- * Return 0 if successful or error otherwise.
+ * Return: 0 if successful or error otherwise.
  */
 static int batadv_softif_slave_add(struct net_device *dev,
                                   struct net_device *slave_dev)
@@ -872,7 +874,7 @@ out:
  * @dev: batadv_soft_interface used as master interface
  * @slave_dev: net_device which should be removed from the master interface
  *
- * Return 0 if successful or error otherwise.
+ * Return: 0 if successful or error otherwise.
  */
 static int batadv_softif_slave_del(struct net_device *dev,
                                   struct net_device *slave_dev)
index 8e82176f40b1f4f5705f4b213b94f2971f885ac8..d17cfbacf8093fb125ace2965ee8dcaec4c60a2d 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index fe87777fda8a0a0e2074adffd9b833284c370006..964fc5986b2c1eb6521be386264f4702745a87b5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
@@ -64,7 +64,7 @@ static struct batadv_priv *batadv_kobj_to_batpriv(struct kobject *obj)
  * batadv_vlan_kobj_to_batpriv - convert a vlan kobj in the associated batpriv
  * @obj: kobject to covert
  *
- * Returns the associated batadv_priv struct.
+ * Return: the associated batadv_priv struct.
  */
 static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
 {
@@ -82,9 +82,10 @@ static struct batadv_priv *batadv_vlan_kobj_to_batpriv(struct kobject *obj)
 
 /**
  * batadv_kobj_to_vlan - convert a kobj in the associated softif_vlan struct
+ * @bat_priv: the bat priv with all the soft interface information
  * @obj: kobject to covert
  *
- * Returns the associated softif_vlan struct if found, NULL otherwise.
+ * Return: the associated softif_vlan struct if found, NULL otherwise.
  */
 static struct batadv_softif_vlan *
 batadv_kobj_to_vlan(struct batadv_priv *bat_priv, struct kobject *obj)
@@ -491,7 +492,7 @@ static ssize_t batadv_store_gw_bwidth(struct kobject *kobj,
  * @attr: the batman-adv attribute the user is interacting with
  * @buff: the buffer that will contain the data to send back to the user
  *
- * Returns the number of bytes written into 'buff' on success or a negative
+ * Return: the number of bytes written into 'buff' on success or a negative
  * error code in case of failure
  */
 static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
@@ -511,7 +512,7 @@ static ssize_t batadv_show_isolation_mark(struct kobject *kobj,
  * @buff: the buffer containing the user data
  * @count: number of bytes in the buffer
  *
- * Returns 'count' on success or a negative error code in case of failure
+ * Return: 'count' on success or a negative error code in case of failure
  */
 static ssize_t batadv_store_isolation_mark(struct kobject *kobj,
                                           struct attribute *attr, char *buff,
@@ -620,9 +621,7 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
 
 BATADV_ATTR_VLAN_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
 
-/**
- * batadv_vlan_attrs - array of vlan specific sysfs attributes
- */
+/* array of vlan specific sysfs attributes */
 static struct batadv_attribute *batadv_vlan_attrs[] = {
        &batadv_attr_vlan_ap_isolation,
        NULL,
@@ -683,7 +682,7 @@ void batadv_sysfs_del_meshif(struct net_device *dev)
  * @dev: netdev of the mesh interface
  * @vlan: private data of the newly added VLAN interface
  *
- * Returns 0 on success and -ENOMEM if any of the structure allocations fails.
+ * Return: 0 on success and -ENOMEM if any of the structure allocations fails.
  */
 int batadv_sysfs_add_vlan(struct net_device *dev,
                          struct batadv_softif_vlan *vlan)
index 61974428a7af3c9c9494f25f57d52fd2811bc923..c76021b4e1980a75bb8daa366b95d53e653094f4 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner
  *
index cdfc85fa2743c78d4e0e3e269085ec5d6f1fb22f..0dc8a5ca33bf83367862303c17439e319da7864a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
@@ -68,7 +68,15 @@ static void batadv_tt_global_del(struct batadv_priv *bat_priv,
                                 unsigned short vid, const char *message,
                                 bool roaming);
 
-/* returns 1 if they are the same mac addr and vid */
+/**
+ * batadv_compare_tt - check if two TT entries are the same
+ * @node: the list element pointer of the first TT entry
+ * @data2: pointer to the tt_common_entry of the second TT entry
+ *
+ * Compare the MAC address and the VLAN ID of the two TT entries and check if
+ * they are the same TT client.
+ * Return: 1 if the two TT clients are the same, 0 otherwise
+ */
 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
 {
        const void *data1 = container_of(node, struct batadv_tt_common_entry,
@@ -84,7 +92,7 @@ static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
  * @data: pointer to the tt_common_entry object to map
  * @size: the size of the hash table
  *
- * Returns the hash index where the object represented by 'data' should be
+ * Return: the hash index where the object represented by 'data' should be
  * stored at.
  */
 static inline u32 batadv_choose_tt(const void *data, u32 size)
@@ -105,7 +113,7 @@ static inline u32 batadv_choose_tt(const void *data, u32 size)
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
  *
- * Returns a pointer to the tt_common struct belonging to the searched client if
+ * Return: a pointer to the tt_common struct belonging to the searched client if
  * found, NULL otherwise.
  */
 static struct batadv_tt_common_entry *
@@ -150,7 +158,7 @@ batadv_tt_hash_find(struct batadv_hashtable *hash, const u8 *addr,
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
  *
- * Returns a pointer to the corresponding tt_local_entry struct if the client is
+ * Return: a pointer to the corresponding tt_local_entry struct if the client is
  * found, NULL otherwise.
  */
 static struct batadv_tt_local_entry *
@@ -175,7 +183,7 @@ batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
  * @addr: the mac address of the client to look for
  * @vid: VLAN identifier
  *
- * Returns a pointer to the corresponding tt_global_entry struct if the client
+ * Return: a pointer to the corresponding tt_global_entry struct if the client
  * is found, NULL otherwise.
  */
 static struct batadv_tt_global_entry *
@@ -217,11 +225,11 @@ batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
 
 /**
  * batadv_tt_global_hash_count - count the number of orig entries
- * @hash: hash table containing the tt entries
+ * @bat_priv: the bat priv with all the soft interface information
  * @addr: the mac address of the client to count entries for
  * @vid: VLAN identifier
  *
- * Return the number of originators advertising the given address/data
+ * Return: the number of originators advertising the given address/data
  * (excluding ourself).
  */
 int batadv_tt_global_hash_count(struct batadv_priv *bat_priv,
@@ -286,9 +294,9 @@ static void batadv_tt_local_size_dec(struct batadv_priv *bat_priv,
 }
 
 /**
- * batadv_tt_global_size_mod - change the size by v of the local table
- *  identified by vid
- * @bat_priv: the bat priv with all the soft interface information
+ * batadv_tt_global_size_mod - change the size by v of the global table
+ *  for orig_node identified by vid
+ * @orig_node: the originator for which the table has to be modified
  * @vid: the VLAN identifier
  * @v: the amount to sum to the global table size
  */
@@ -435,7 +443,7 @@ unlock:
  * batadv_tt_len - compute length in bytes of given number of tt changes
  * @changes_num: number of tt changes
  *
- * Returns computed length in bytes.
+ * Return: computed length in bytes.
  */
 static int batadv_tt_len(int changes_num)
 {
@@ -446,7 +454,7 @@ static int batadv_tt_len(int changes_num)
  * batadv_tt_entries - compute the number of entries fitting in tt_len bytes
  * @tt_len: available space
  *
- * Returns the number of entries.
+ * Return: the number of entries.
  */
 static u16 batadv_tt_entries(u16 tt_len)
 {
@@ -458,7 +466,7 @@ static u16 batadv_tt_entries(u16 tt_len)
  *  size when transmitted over the air
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Returns local translation table size in bytes.
+ * Return: local translation table size in bytes.
  */
 static int batadv_tt_local_table_transmit_size(struct batadv_priv *bat_priv)
 {
@@ -524,7 +532,7 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
  * @mark: the value contained in the skb->mark field of the received packet (if
  *  any)
  *
- * Returns true if the client was successfully added, false otherwise.
+ * Return: true if the client was successfully added, false otherwise.
  */
 bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
                         unsigned short vid, int ifindex, u32 mark)
@@ -719,12 +727,11 @@ out:
  *  function reserves the amount of space needed to send the entire global TT
  *  table. In case of success the value is updated with the real amount of
  *  reserved bytes
-
  * Allocate the needed amount of memory for the entire TT TVLV and write its
  * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
  * objects, one per active VLAN served by the originator node.
  *
- * Return the size of the allocated buffer or 0 in case of failure.
+ * Return: the size of the allocated buffer or 0 in case of failure.
  */
 static u16
 batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
@@ -798,7 +805,7 @@ out:
  * header made up by one tvlv_tt_data object and a series of tvlv_tt_vlan_data
  * objects, one per active VLAN.
  *
- * Return the size of the allocated buffer or 0 in case of failure.
+ * Return: the size of the allocated buffer or 0 in case of failure.
  */
 static u16
 batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
@@ -1040,7 +1047,7 @@ batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
  * @message: message to append to the log on deletion
  * @roaming: true if the deletion is due to a roaming event
  *
- * Returns the flags assigned to the local entry before being deleted
+ * Return: the flags assigned to the local entry before being deleted
  */
 u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
                           unsigned short vid, const char *message,
@@ -1240,10 +1247,16 @@ static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
        spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 }
 
-/* retrieves the orig_tt_list_entry belonging to orig_node from the
+/**
+ * batadv_tt_global_orig_entry_find - find a TT orig_list_entry
+ * @entry: the TT global entry where the orig_list_entry has to be
+ *  extracted from
+ * @orig_node: the originator for which the orig_list_entry has to be found
+ *
+ * retrieve the orig_tt_list_entry belonging to orig_node from the
  * batadv_tt_global_entry list
  *
- * returns it with an increased refcounter, NULL if not found
+ * Return: it with an increased refcounter, NULL if not found
  */
 static struct batadv_tt_orig_list_entry *
 batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
@@ -1268,8 +1281,15 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
        return orig_entry;
 }
 
-/* find out if an orig_node is already in the list of a tt_global_entry.
- * returns true if found, false otherwise
+/**
+ * batadv_tt_global_entry_has_orig - check if a TT global entry is also handled
+ *  by a given originator
+ * @entry: the TT global entry to check
+ * @orig_node: the originator to search in the list
+ *
+ * find out if an orig_node is already in the list of a tt_global_entry.
+ *
+ * Return: true if found, false otherwise
  */
 static bool
 batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
@@ -1341,7 +1361,7 @@ out:
  *
  * The caller must hold orig_node refcount.
  *
- * Return true if the new entry has been added, false otherwise
+ * Return: true if the new entry has been added, false otherwise
  */
 static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
                                 struct batadv_orig_node *orig_node,
@@ -1499,7 +1519,7 @@ out:
  * @tt_global_entry: global translation table entry to be analyzed
  *
  * This functon assumes the caller holds rcu_read_lock().
- * Returns best originator list entry or NULL on errors.
+ * Return: best originator list entry or NULL on errors.
  */
 static struct batadv_tt_orig_list_entry *
 batadv_transtable_best_orig(struct batadv_priv *bat_priv,
@@ -2029,7 +2049,7 @@ _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
  * @addr: mac address of the destination client
  * @vid: VLAN identifier
  *
- * Returns a pointer to the originator that was selected as destination in the
+ * Return: a pointer to the originator that was selected as destination in the
  * mesh for contacting the client 'addr', NULL otherwise.
  * In case of multiple originators serving the same client, the function returns
  * the best one (best in terms of metric towards the destination node).
@@ -2104,7 +2124,7 @@ out:
  * because the XOR operation can combine them all while trying to reduce the
  * noise as much as possible.
  *
- * Returns the checksum of the global table of a given originator.
+ * Return: the checksum of the global table of a given originator.
  */
 static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
                                struct batadv_orig_node *orig_node,
@@ -2181,7 +2201,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
  * For details about the computation, please refer to the documentation for
  * batadv_tt_global_crc().
  *
- * Returns the checksum of the local table
+ * Return: the checksum of the local table
  */
 static u32 batadv_tt_local_crc(struct batadv_priv *bat_priv,
                               unsigned short vid)
@@ -2287,7 +2307,7 @@ static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
  * @bat_priv: the bat priv with all the soft interface information
  * @orig_node: orig node this request is being issued for
  *
- * Returns the pointer to the new tt_req_node struct if no request
+ * Return: the pointer to the new tt_req_node struct if no request
  * has already been issued for this orig_node, NULL otherwise.
  */
 static struct batadv_tt_req_node *
@@ -2322,7 +2342,7 @@ unlock:
  * @entry_ptr: to be checked local tt entry
  * @data_ptr: not used but definition required to satisfy the callback prototype
  *
- * Returns 1 if the entry is a valid, 0 otherwise.
+ * Return: 1 if the entry is a valid, 0 otherwise.
  */
 static int batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
 {
@@ -2406,9 +2426,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
  * @orig_node: originator for which the CRCs have to be checked
  * @tt_vlan: pointer to the first tvlv VLAN entry
  * @num_vlan: number of tvlv VLAN entries
- * @create: if true, create VLAN objects if not found
  *
- * Return true if all the received CRCs match the locally stored ones, false
+ * Return: true if all the received CRCs match the locally stored ones, false
  * otherwise
  */
 static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
@@ -2511,6 +2530,8 @@ static void batadv_tt_global_update_crc(struct batadv_priv *bat_priv,
  * @num_vlan: number of tvlv VLAN entries
  * @full_table: ask for the entire translation table if true, while only for the
  *  last TT diff otherwise
+ *
+ * Return: true if the TT Request was sent, false otherwise
  */
 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
                                  struct batadv_orig_node *dst_orig_node,
@@ -2591,7 +2612,7 @@ out:
  * @req_src: mac address of tt request sender
  * @req_dst: mac address of tt request recipient
  *
- * Returns true if tt request reply was sent, false otherwise.
+ * Return: true if tt request reply was sent, false otherwise.
  */
 static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
                                          struct batadv_tvlv_tt_data *tt_data,
@@ -2723,7 +2744,7 @@ out:
  * @tt_data: tt data containing the tt request information
  * @req_src: mac address of tt request sender
  *
- * Returns true if tt request reply was sent, false otherwise.
+ * Return: true if tt request reply was sent, false otherwise.
  */
 static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
                                       struct batadv_tvlv_tt_data *tt_data,
@@ -2841,7 +2862,7 @@ out:
  * @req_src: mac address of tt request sender
  * @req_dst: mac address of tt request recipient
  *
- * Returns true if tt request reply was sent, false otherwise.
+ * Return: true if tt request reply was sent, false otherwise.
  */
 static bool batadv_send_tt_response(struct batadv_priv *bat_priv,
                                    struct batadv_tvlv_tt_data *tt_data,
@@ -2936,7 +2957,7 @@ static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
  *
- * Returns true if the client is served by this node, false otherwise.
+ * Return: true if the client is served by this node, false otherwise.
  */
 bool batadv_is_my_client(struct batadv_priv *bat_priv, const u8 *addr,
                         unsigned short vid)
@@ -3053,11 +3074,16 @@ static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
        spin_unlock_bh(&bat_priv->tt.roam_list_lock);
 }
 
-/* This function checks whether the client already reached the
+/**
+ * batadv_tt_check_roam_count - check if a client has roamed too frequently
+ * @bat_priv: the bat priv with all the soft interface information
+ * @client: mac address of the roaming client
+ *
+ * This function checks whether the client already reached the
  * maximum number of possible roaming phases. In this case the ROAMING_ADV
  * will not be sent.
  *
- * returns true if the ROAMING_ADV can be sent, false otherwise
+ * Return: true if the ROAMING_ADV can be sent, false otherwise
  */
 static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv, u8 *client)
 {
@@ -3369,13 +3395,12 @@ out:
  * batadv_tt_update_orig - update global translation table with new tt
  *  information received via ogms
  * @bat_priv: the bat priv with all the soft interface information
- * @orig: the orig_node of the ogm
- * @tt_vlan: pointer to the first tvlv VLAN entry
+ * @orig_node: the orig_node of the ogm
+ * @tt_buff: pointer to the first tvlv VLAN entry
  * @tt_num_vlan: number of tvlv VLAN entries
  * @tt_change: pointer to the first entry in the TT buffer
  * @tt_num_changes: number of tt changes inside the tt buffer
  * @ttvn: translation table version number of this changeset
- * @tt_crc: crc32 checksum of orig node's translation table
  */
 static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
                                  struct batadv_orig_node *orig_node,
@@ -3457,7 +3482,7 @@ request_table:
  * @addr: the mac address of the client to check
  * @vid: VLAN identifier
  *
- * Returns true if we know that the client has moved from its old originator
+ * Return: true if we know that the client has moved from its old originator
  * to another one. This entry is still kept for consistency purposes and will be
  * deleted later by a DEL or because of timeout
  */
@@ -3483,7 +3508,7 @@ out:
  * @addr: the mac address of the local client to query
  * @vid: VLAN identifier
  *
- * Returns true if the local client is known to be roaming (it is not served by
+ * Return: true if the local client is known to be roaming (it is not served by
  * this node anymore) or not. If yes, the client is still present in the table
  * to keep the latter consistent with the node TTVN
  */
@@ -3612,7 +3637,7 @@ static void batadv_tt_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
  * @tvlv_value: tvlv buffer containing the tt data
  * @tvlv_value_len: tvlv buffer length
  *
- * Returns NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
+ * Return: NET_RX_DROP if the tt tvlv is to be re-routed, NET_RX_SUCCESS
  * otherwise.
  */
 static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
@@ -3693,7 +3718,7 @@ static int batadv_tt_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
  * @tvlv_value: tvlv buffer containing the tt data
  * @tvlv_value_len: tvlv buffer length
  *
- * Returns NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
+ * Return: NET_RX_DROP if the tt roam tvlv is to be re-routed, NET_RX_SUCCESS
  * otherwise.
  */
 static int batadv_roam_tvlv_unicast_handler_v1(struct batadv_priv *bat_priv,
@@ -3739,7 +3764,7 @@ out:
  * batadv_tt_init - initialise the translation table internals
  * @bat_priv: the bat priv with all the soft interface information
  *
- * Return 0 on success or negative error number in case of failure.
+ * Return: 0 on success or negative error number in case of failure.
  */
 int batadv_tt_init(struct batadv_priv *bat_priv)
 {
@@ -3777,7 +3802,7 @@ int batadv_tt_init(struct batadv_priv *bat_priv)
  * @addr: the mac address of the client
  * @vid: the identifier of the VLAN where this client is connected
  *
- * Returns true if the client is marked with the TT_CLIENT_ISOLA flag, false
+ * Return: true if the client is marked with the TT_CLIENT_ISOLA flag, false
  * otherwise
  */
 bool batadv_tt_global_is_isolated(struct batadv_priv *bat_priv,
index abd8e116e5fb0dad0ca4bd74d18ad8e77490d024..7c7e2c006bfe07d48ce81c5bfa7d08b495ce2245 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich, Antonio Quartulli
  *
index 3437b667a2cd670965cc12479c2b2169f7020803..8974bc0dc15cabd44926dba01ad8064d912fcb4a 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-201 B.A.T.M.A.N. contributors:
  *
  * Marek Lindner, Simon Wunderlich
  *
@@ -73,7 +73,7 @@ enum batadv_dhcp_recipient {
 #define BATADV_TT_SYNC_MASK    0x00F0
 
 /**
- * struct batadv_hard_iface_bat_iv - per hard interface B.A.T.M.A.N. IV data
+ * struct batadv_hard_iface_bat_iv - per hard-interface B.A.T.M.A.N. IV data
  * @ogm_buff: buffer holding the OGM packet
  * @ogm_buff_len: length of the OGM packet buffer
  * @ogm_seqno: OGM sequence number - used to identify each OGM
@@ -97,8 +97,8 @@ struct batadv_hard_iface_bat_iv {
  *  batman-adv for this interface
  * @soft_iface: the batman-adv interface which uses this network interface
  * @rcu: struct used for freeing in an RCU-safe manner
- * @bat_iv: BATMAN IV specific per hard interface data
- * @cleanup_work: work queue callback item for hard interface deinit
+ * @bat_iv: per hard-interface B.A.T.M.A.N. IV data
+ * @cleanup_work: work queue callback item for hard-interface deinit
  * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
  * @neigh_list: list of unique single hop neighbors via this interface
  * @neigh_list_lock: lock protecting neigh_list
@@ -125,7 +125,7 @@ struct batadv_hard_iface {
 /**
  * struct batadv_orig_ifinfo - originator info per outgoing interface
  * @list: list node for orig_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard interface
+ * @if_outgoing: pointer to outgoing hard-interface
  * @router: router that should be used to reach this originator
  * @last_real_seqno: last and best known sequence number
  * @last_ttl: ttl of last received packet
@@ -202,7 +202,7 @@ struct batadv_orig_node_vlan {
 
 /**
  * struct batadv_orig_bat_iv - B.A.T.M.A.N. IV private orig_node members
- * @bcast_own: set of bitfields (one per hard interface) where each one counts
+ * @bcast_own: set of bitfields (one per hard-interface) where each one counts
  * the number of our OGMs this orig_node rebroadcasted "back" to us  (relative
  * to last_real_seqno). Every bitfield is BATADV_TQ_LOCAL_WINDOW_SIZE bits long.
  * @bcast_own_sum: sum of bcast_own
@@ -346,10 +346,11 @@ struct batadv_gw_node {
 };
 
 /**
- * batadv_hardif_neigh_node - unique neighbor per hard interface
+ * struct batadv_hardif_neigh_node - unique neighbor per hard-interface
  * @list: list node for batadv_hard_iface::neigh_list
  * @addr: the MAC address of the neighboring interface
- * @if_incoming: pointer to incoming hard interface
+ * @if_incoming: pointer to incoming hard-interface
+ * @last_seen: when last packet via this neighbor was received
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in a RCU-safe manner
  */
@@ -369,7 +370,7 @@ struct batadv_hardif_neigh_node {
  * @addr: the MAC address of the neighboring interface
  * @ifinfo_list: list for routing metrics per outgoing interface
  * @ifinfo_lock: lock protecting private ifinfo members and list
- * @if_incoming: pointer to incoming hard interface
+ * @if_incoming: pointer to incoming hard-interface
  * @last_seen: when last packet via this neighbor was received
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
@@ -388,7 +389,7 @@ struct batadv_neigh_node {
 
 /**
  * struct batadv_neigh_ifinfo_bat_iv - neighbor information per outgoing
- *  interface for BATMAN IV
+ *  interface for B.A.T.M.A.N. IV
  * @tq_recv: ring buffer of received TQ values from this neigh node
  * @tq_index: ring buffer index
  * @tq_avg: averaged tq of all tq values in the ring buffer (tq_recv)
@@ -407,7 +408,7 @@ struct batadv_neigh_ifinfo_bat_iv {
 /**
  * struct batadv_neigh_ifinfo - neighbor information per outgoing interface
  * @list: list node for batadv_neigh_node::ifinfo_list
- * @if_outgoing: pointer to outgoing hard interface
+ * @if_outgoing: pointer to outgoing hard-interface
  * @bat_iv: B.A.T.M.A.N. IV private structure
  * @last_ttl: last received ttl from this neigh node
  * @refcount: number of contexts the object is used
@@ -771,6 +772,9 @@ struct batadv_softif_vlan {
  * @orig_interval: OGM broadcast interval in milliseconds
  * @hop_penalty: penalty which will be applied to an OGM's tq-field on every hop
  * @log_level: configured log level (see batadv_dbg_level)
+ * @isolation_mark: the skb->mark value used to match packets for AP isolation
+ * @isolation_mark_mask: bitmask identifying the bits in skb->mark to be used
+ *  for the isolation mark
  * @bcast_seqno: last sent broadcast packet sequence number
  * @bcast_queue_left: number of remaining buffered broadcast packet slots
  * @batman_queue_left: number of remaining OGM packet slots
@@ -783,8 +787,8 @@ struct batadv_softif_vlan {
  * @forw_bat_list_lock: lock protecting forw_bat_list
  * @forw_bcast_list_lock: lock protecting forw_bcast_list
  * @orig_work: work queue callback item for orig node purging
- * @cleanup_work: work queue callback item for soft interface deinit
- * @primary_if: one of the hard interfaces assigned to this mesh interface
+ * @cleanup_work: work queue callback item for soft-interface deinit
+ * @primary_if: one of the hard-interfaces assigned to this mesh interface
  *  becomes the primary interface
  * @bat_algo_ops: routing algorithm used by this mesh interface
  * @softif_vlan_list: a list of softif_vlan structs, one per VLAN created on top
index d040365ba98e7f7cafcbedd20ac3b4c632603414..8a4cc2f7f0db2a277e4281d5be62d76c7446a716 100644 (file)
@@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
        /* check that it's our buffer */
        if (lowpan_is_ipv6(*skb_network_header(skb))) {
+               /* Pull off the 1-byte of 6lowpan header. */
+               skb_pull(skb, 1);
+
                /* Copy the packet so that the IPv6 header is
                 * properly aligned.
                 */
@@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
                local_skb->protocol = htons(ETH_P_IPV6);
                local_skb->pkt_type = PACKET_HOST;
+               local_skb->dev = dev;
 
                skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
 
@@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
                if (!local_skb)
                        goto drop;
 
+               local_skb->dev = dev;
+
                ret = iphc_decompress(local_skb, dev, chan);
                if (ret < 0) {
                        kfree_skb(local_skb);
@@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
 
                local_skb->protocol = htons(ETH_P_IPV6);
                local_skb->pkt_type = PACKET_HOST;
-               local_skb->dev = dev;
 
                if (give_skb_to_upper(local_skb, dev)
                                != NET_RX_SUCCESS) {
index 95d1a66ba03aa20095932a1c45f9f76af2cc1393..06c31b9a68b0bce3cc4f28224c3c23864f78e7c9 100644 (file)
@@ -69,6 +69,15 @@ config BT_6LOWPAN
        help
          IPv6 compression over Bluetooth Low Energy.
 
+config BT_LEDS
+       bool "Enable LED triggers"
+       depends on BT
+       depends on LEDS_CLASS
+       select LEDS_TRIGGERS
+       help
+         This option selects a few LED triggers for different
+         Bluetooth events.
+
 config BT_SELFTEST
        bool "Bluetooth self testing support"
        depends on BT && DEBUG_KERNEL
index 2b15ae8c1def06642682c488a9439f0e57feeb13..b3ff12eb9b6dcdaa590bacc997cbe2e0e566783e 100644 (file)
@@ -17,6 +17,7 @@ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \
 
 bluetooth-$(CONFIG_BT_BREDR) += sco.o
 bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o
+bluetooth-$(CONFIG_BT_LEDS) += leds.o
 bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o
 bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o
 
index 47bcef7547967ce544ff0cf5ea7dabecdd080d90..541760fe53d4cf91553fb0b126de340007d453e5 100644 (file)
@@ -40,6 +40,7 @@
 #include "hci_request.h"
 #include "hci_debugfs.h"
 #include "smp.h"
+#include "leds.h"
 
 static void hci_rx_work(struct work_struct *work);
 static void hci_cmd_work(struct work_struct *work);
@@ -1395,6 +1396,7 @@ static int hci_dev_do_open(struct hci_dev *hdev)
                hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
                set_bit(HCI_UP, &hdev->flags);
                hci_sock_dev_event(hdev, HCI_DEV_UP);
+               hci_leds_update_powered(hdev, true);
                if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
                    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
                    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
@@ -1532,6 +1534,8 @@ int hci_dev_do_close(struct hci_dev *hdev)
                return 0;
        }
 
+       hci_leds_update_powered(hdev, false);
+
        /* Flush RX and TX works */
        flush_work(&hdev->tx_work);
        flush_work(&hdev->rx_work);
@@ -3067,6 +3071,8 @@ int hci_register_dev(struct hci_dev *hdev)
        if (error < 0)
                goto err_wqueue;
 
+       hci_leds_init(hdev);
+
        hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
                                    RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
                                    hdev);
index 41b5f3813f02f1ed73ffecdc7d685645ded8c265..c78ee2dc93237475eb002ae61a737e0137c387f9 100644 (file)
@@ -688,21 +688,29 @@ static u8 update_white_list(struct hci_request *req)
         * command to remove it from the controller.
         */
        list_for_each_entry(b, &hdev->le_white_list, list) {
-               struct hci_cp_le_del_from_white_list cp;
+               /* If the device is neither in pend_le_conns nor
+                * pend_le_reports then remove it from the whitelist.
+                */
+               if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
+                                              &b->bdaddr, b->bdaddr_type) &&
+                   !hci_pend_le_action_lookup(&hdev->pend_le_reports,
+                                              &b->bdaddr, b->bdaddr_type)) {
+                       struct hci_cp_le_del_from_white_list cp;
+
+                       cp.bdaddr_type = b->bdaddr_type;
+                       bacpy(&cp.bdaddr, &b->bdaddr);
 
-               if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
-                                             &b->bdaddr, b->bdaddr_type) ||
-                   hci_pend_le_action_lookup(&hdev->pend_le_reports,
-                                             &b->bdaddr, b->bdaddr_type)) {
-                       white_list_entries++;
+                       hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
+                                   sizeof(cp), &cp);
                        continue;
                }
 
-               cp.bdaddr_type = b->bdaddr_type;
-               bacpy(&cp.bdaddr, &b->bdaddr);
+               if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
+                       /* White list can not be used with RPAs */
+                       return 0x00;
+               }
 
-               hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
-                           sizeof(cp), &cp);
+               white_list_entries++;
        }
 
        /* Since all no longer valid white list entries have been
index 39a5149f301085d45d37a94361e2b86486d852bd..eb4f5f24cbe36a1d4f524978d52d8558f4dbde80 100644 (file)
@@ -197,10 +197,20 @@ int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
                chan->sport = psm;
                err = 0;
        } else {
-               u16 p;
+               u16 p, start, end, incr;
+
+               if (chan->src_type == BDADDR_BREDR) {
+                       start = L2CAP_PSM_DYN_START;
+                       end = L2CAP_PSM_AUTO_END;
+                       incr = 2;
+               } else {
+                       start = L2CAP_PSM_LE_DYN_START;
+                       end = L2CAP_PSM_LE_DYN_END;
+                       incr = 1;
+               }
 
                err = -EINVAL;
-               for (p = 0x1001; p < 0x1100; p += 2)
+               for (p = start; p <= end; p += incr)
                        if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
                                chan->psm   = cpu_to_le16(p);
                                chan->sport = cpu_to_le16(p);
index 1bb5515270449e8115a0c169ea5b6380594c40a4..e4cae72895a72bebc4c95b430d688e83cfebfada 100644 (file)
@@ -58,7 +58,7 @@ static int l2cap_validate_bredr_psm(u16 psm)
                return -EINVAL;
 
        /* Restrict usage of well-known PSMs */
-       if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE))
+       if (psm < L2CAP_PSM_DYN_START && !capable(CAP_NET_BIND_SERVICE))
                return -EACCES;
 
        return 0;
@@ -67,11 +67,11 @@ static int l2cap_validate_bredr_psm(u16 psm)
 static int l2cap_validate_le_psm(u16 psm)
 {
        /* Valid LE_PSM ranges are defined only until 0x00ff */
-       if (psm > 0x00ff)
+       if (psm > L2CAP_PSM_LE_DYN_END)
                return -EINVAL;
 
        /* Restrict fixed, SIG assigned PSM values to CAP_NET_BIND_SERVICE */
-       if (psm <= 0x007f && !capable(CAP_NET_BIND_SERVICE))
+       if (psm < L2CAP_PSM_LE_DYN_START && !capable(CAP_NET_BIND_SERVICE))
                return -EACCES;
 
        return 0;
@@ -125,6 +125,9 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                        goto done;
        }
 
+       bacpy(&chan->src, &la.l2_bdaddr);
+       chan->src_type = la.l2_bdaddr_type;
+
        if (la.l2_cid)
                err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
        else
@@ -156,9 +159,6 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
                break;
        }
 
-       bacpy(&chan->src, &la.l2_bdaddr);
-       chan->src_type = la.l2_bdaddr_type;
-
        if (chan->psm && bdaddr_type_is_le(chan->src_type))
                chan->mode = L2CAP_MODE_LE_FLOWCTL;
 
diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c
new file mode 100644 (file)
index 0000000..8319c84
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2015, Heiner Kallweit <hkallweit1@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <net/bluetooth/bluetooth.h>
+#include <net/bluetooth/hci_core.h>
+
+#include "leds.h"
+
+struct hci_basic_led_trigger {
+       struct led_trigger      led_trigger;
+       struct hci_dev          *hdev;
+};
+
+#define to_hci_basic_led_trigger(arg) container_of(arg, \
+                       struct hci_basic_led_trigger, led_trigger)
+
+void hci_leds_update_powered(struct hci_dev *hdev, bool enabled)
+{
+       if (hdev->power_led)
+               led_trigger_event(hdev->power_led,
+                                 enabled ? LED_FULL : LED_OFF);
+}
+
+static void power_activate(struct led_classdev *led_cdev)
+{
+       struct hci_basic_led_trigger *htrig;
+       bool powered;
+
+       htrig = to_hci_basic_led_trigger(led_cdev->trigger);
+       powered = test_bit(HCI_UP, &htrig->hdev->flags);
+
+       led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF);
+}
+
+static struct led_trigger *led_allocate_basic(struct hci_dev *hdev,
+                       void (*activate)(struct led_classdev *led_cdev),
+                       const char *name)
+{
+       struct hci_basic_led_trigger *htrig;
+
+       htrig = devm_kzalloc(&hdev->dev, sizeof(*htrig), GFP_KERNEL);
+       if (!htrig)
+               return NULL;
+
+       htrig->hdev = hdev;
+       htrig->led_trigger.activate = activate;
+       htrig->led_trigger.name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+                                                "%s-%s", hdev->name,
+                                                name);
+       if (!htrig->led_trigger.name)
+               goto err_alloc;
+
+       if (devm_led_trigger_register(&hdev->dev, &htrig->led_trigger))
+               goto err_register;
+
+       return &htrig->led_trigger;
+
+err_register:
+       devm_kfree(&hdev->dev, (void *)htrig->led_trigger.name);
+err_alloc:
+       devm_kfree(&hdev->dev, htrig);
+       return NULL;
+}
+
+void hci_leds_init(struct hci_dev *hdev)
+{
+       /* initialize power_led */
+       hdev->power_led = led_allocate_basic(hdev, power_activate, "power");
+}
diff --git a/net/bluetooth/leds.h b/net/bluetooth/leds.h
new file mode 100644 (file)
index 0000000..a9c4d6e
--- /dev/null
@@ -0,0 +1,16 @@
+/*
+ * Copyright 2015, Heiner Kallweit <hkallweit1@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if IS_ENABLED(CONFIG_BT_LEDS)
+void hci_leds_update_powered(struct hci_dev *hdev, bool enabled);
+void hci_leds_init(struct hci_dev *hdev);
+#else
+static inline void hci_leds_update_powered(struct hci_dev *hdev,
+                                          bool enabled) {}
+static inline void hci_leds_init(struct hci_dev *hdev) {}
+#endif
index ffed8a1d4f27634866c93d22b4ceb059b956cc91..50976a6481f3cd21e33e5ee75d74dacc06470584 100644 (file)
 */
 
 #include <linux/debugfs.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <crypto/b128ops.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -87,8 +88,8 @@ struct smp_dev {
        u8                      min_key_size;
        u8                      max_key_size;
 
-       struct crypto_blkcipher *tfm_aes;
-       struct crypto_hash      *tfm_cmac;
+       struct crypto_skcipher  *tfm_aes;
+       struct crypto_shash     *tfm_cmac;
 };
 
 struct smp_chan {
@@ -126,8 +127,8 @@ struct smp_chan {
        u8                      dhkey[32];
        u8                      mackey[16];
 
-       struct crypto_blkcipher *tfm_aes;
-       struct crypto_hash      *tfm_cmac;
+       struct crypto_skcipher  *tfm_aes;
+       struct crypto_shash     *tfm_cmac;
 };
 
 /* These debug key values are defined in the SMP section of the core
@@ -165,12 +166,11 @@ static inline void swap_buf(const u8 *src, u8 *dst, size_t len)
  * AES-CMAC, f4, f5, f6, g2 and h6.
  */
 
-static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
+static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m,
                    size_t len, u8 mac[16])
 {
        uint8_t tmp[16], mac_msb[16], msg_msb[CMAC_MSG_MAX];
-       struct hash_desc desc;
-       struct scatterlist sg;
+       SHASH_DESC_ON_STACK(desc, tfm);
        int err;
 
        if (len > CMAC_MSG_MAX)
@@ -181,10 +181,8 @@ static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
                return -EINVAL;
        }
 
-       desc.tfm = tfm;
-       desc.flags = 0;
-
-       crypto_hash_init(&desc);
+       desc->tfm = tfm;
+       desc->flags = 0;
 
        /* Swap key and message from LSB to MSB */
        swap_buf(k, tmp, 16);
@@ -193,23 +191,16 @@ static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
        SMP_DBG("msg (len %zu) %*phN", len, (int) len, m);
        SMP_DBG("key %16phN", k);
 
-       err = crypto_hash_setkey(tfm, tmp, 16);
+       err = crypto_shash_setkey(tfm, tmp, 16);
        if (err) {
                BT_ERR("cipher setkey failed: %d", err);
                return err;
        }
 
-       sg_init_one(&sg, msg_msb, len);
-
-       err = crypto_hash_update(&desc, &sg, len);
-       if (err) {
-               BT_ERR("Hash update error %d", err);
-               return err;
-       }
-
-       err = crypto_hash_final(&desc, mac_msb);
+       err = crypto_shash_digest(desc, msg_msb, len, mac_msb);
+       shash_desc_zero(desc);
        if (err) {
-               BT_ERR("Hash final error %d", err);
+               BT_ERR("Hash computation error %d", err);
                return err;
        }
 
@@ -220,8 +211,8 @@ static int aes_cmac(struct crypto_hash *tfm, const u8 k[16], const u8 *m,
        return 0;
 }
 
-static int smp_f4(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
-                 const u8 x[16], u8 z, u8 res[16])
+static int smp_f4(struct crypto_shash *tfm_cmac, const u8 u[32],
+                 const u8 v[32], const u8 x[16], u8 z, u8 res[16])
 {
        u8 m[65];
        int err;
@@ -243,7 +234,7 @@ static int smp_f4(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
        return err;
 }
 
-static int smp_f5(struct crypto_hash *tfm_cmac, const u8 w[32],
+static int smp_f5(struct crypto_shash *tfm_cmac, const u8 w[32],
                  const u8 n1[16], const u8 n2[16], const u8 a1[7],
                  const u8 a2[7], u8 mackey[16], u8 ltk[16])
 {
@@ -296,7 +287,7 @@ static int smp_f5(struct crypto_hash *tfm_cmac, const u8 w[32],
        return 0;
 }
 
-static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
+static int smp_f6(struct crypto_shash *tfm_cmac, const u8 w[16],
                  const u8 n1[16], const u8 n2[16], const u8 r[16],
                  const u8 io_cap[3], const u8 a1[7], const u8 a2[7],
                  u8 res[16])
@@ -324,7 +315,7 @@ static int smp_f6(struct crypto_hash *tfm_cmac, const u8 w[16],
        return err;
 }
 
-static int smp_g2(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
+static int smp_g2(struct crypto_shash *tfm_cmac, const u8 u[32], const u8 v[32],
                  const u8 x[16], const u8 y[16], u32 *val)
 {
        u8 m[80], tmp[16];
@@ -350,7 +341,7 @@ static int smp_g2(struct crypto_hash *tfm_cmac, const u8 u[32], const u8 v[32],
        return 0;
 }
 
-static int smp_h6(struct crypto_hash *tfm_cmac, const u8 w[16],
+static int smp_h6(struct crypto_shash *tfm_cmac, const u8 w[16],
                  const u8 key_id[4], u8 res[16])
 {
        int err;
@@ -370,9 +361,9 @@ static int smp_h6(struct crypto_hash *tfm_cmac, const u8 w[16],
  * s1 and ah.
  */
 
-static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
+static int smp_e(struct crypto_skcipher *tfm, const u8 *k, u8 *r)
 {
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        struct scatterlist sg;
        uint8_t tmp[16], data[16];
        int err;
@@ -384,13 +375,10 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
                return -EINVAL;
        }
 
-       desc.tfm = tfm;
-       desc.flags = 0;
-
        /* The most significant octet of key corresponds to k[0] */
        swap_buf(k, tmp, 16);
 
-       err = crypto_blkcipher_setkey(tfm, tmp, 16);
+       err = crypto_skcipher_setkey(tfm, tmp, 16);
        if (err) {
                BT_ERR("cipher setkey failed: %d", err);
                return err;
@@ -401,7 +389,12 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
 
        sg_init_one(&sg, data, 16);
 
-       err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, 16, NULL);
+
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
        if (err)
                BT_ERR("Encrypt data error %d", err);
 
@@ -413,7 +406,7 @@ static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
        return err;
 }
 
-static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
+static int smp_c1(struct crypto_skcipher *tfm_aes, const u8 k[16],
                  const u8 r[16], const u8 preq[7], const u8 pres[7], u8 _iat,
                  const bdaddr_t *ia, u8 _rat, const bdaddr_t *ra, u8 res[16])
 {
@@ -462,7 +455,7 @@ static int smp_c1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
        return err;
 }
 
-static int smp_s1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
+static int smp_s1(struct crypto_skcipher *tfm_aes, const u8 k[16],
                  const u8 r1[16], const u8 r2[16], u8 _r[16])
 {
        int err;
@@ -478,7 +471,7 @@ static int smp_s1(struct crypto_blkcipher *tfm_aes, const u8 k[16],
        return err;
 }
 
-static int smp_ah(struct crypto_blkcipher *tfm, const u8 irk[16],
+static int smp_ah(struct crypto_skcipher *tfm, const u8 irk[16],
                  const u8 r[3], u8 res[3])
 {
        u8 _res[16];
@@ -766,8 +759,8 @@ static void smp_chan_destroy(struct l2cap_conn *conn)
        kzfree(smp->slave_csrk);
        kzfree(smp->link_key);
 
-       crypto_free_blkcipher(smp->tfm_aes);
-       crypto_free_hash(smp->tfm_cmac);
+       crypto_free_skcipher(smp->tfm_aes);
+       crypto_free_shash(smp->tfm_cmac);
 
        /* Ensure that we don't leave any debug key around if debug key
         * support hasn't been explicitly enabled.
@@ -1072,22 +1065,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
                        hcon->dst_type = smp->remote_irk->addr_type;
                        queue_work(hdev->workqueue, &conn->id_addr_update_work);
                }
-
-               /* When receiving an indentity resolving key for
-                * a remote device that does not use a resolvable
-                * private address, just remove the key so that
-                * it is possible to use the controller white
-                * list for scanning.
-                *
-                * Userspace will have been told to not store
-                * this key at this point. So it is safe to
-                * just remove it.
-                */
-               if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
-                       list_del_rcu(&smp->remote_irk->list);
-                       kfree_rcu(smp->remote_irk, rcu);
-                       smp->remote_irk = NULL;
-               }
        }
 
        if (smp->csrk) {
@@ -1382,17 +1359,17 @@ static struct smp_chan *smp_chan_create(struct l2cap_conn *conn)
        if (!smp)
                return NULL;
 
-       smp->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+       smp->tfm_aes = crypto_alloc_skcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(smp->tfm_aes)) {
                BT_ERR("Unable to create ECB crypto context");
                kzfree(smp);
                return NULL;
        }
 
-       smp->tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+       smp->tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0);
        if (IS_ERR(smp->tfm_cmac)) {
                BT_ERR("Unable to create CMAC crypto context");
-               crypto_free_blkcipher(smp->tfm_aes);
+               crypto_free_skcipher(smp->tfm_aes);
                kzfree(smp);
                return NULL;
        }
@@ -3143,8 +3120,8 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
 {
        struct l2cap_chan *chan;
        struct smp_dev *smp;
-       struct crypto_blkcipher *tfm_aes;
-       struct crypto_hash *tfm_cmac;
+       struct crypto_skcipher *tfm_aes;
+       struct crypto_shash *tfm_cmac;
 
        if (cid == L2CAP_CID_SMP_BREDR) {
                smp = NULL;
@@ -3155,17 +3132,17 @@ static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid)
        if (!smp)
                return ERR_PTR(-ENOMEM);
 
-       tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_aes = crypto_alloc_skcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_aes)) {
                BT_ERR("Unable to create ECB crypto context");
                kzfree(smp);
                return ERR_CAST(tfm_aes);
        }
 
-       tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0);
        if (IS_ERR(tfm_cmac)) {
                BT_ERR("Unable to create CMAC crypto context");
-               crypto_free_blkcipher(tfm_aes);
+               crypto_free_skcipher(tfm_aes);
                kzfree(smp);
                return ERR_CAST(tfm_cmac);
        }
@@ -3179,8 +3156,8 @@ create_chan:
        chan = l2cap_chan_create();
        if (!chan) {
                if (smp) {
-                       crypto_free_blkcipher(smp->tfm_aes);
-                       crypto_free_hash(smp->tfm_cmac);
+                       crypto_free_skcipher(smp->tfm_aes);
+                       crypto_free_shash(smp->tfm_cmac);
                        kzfree(smp);
                }
                return ERR_PTR(-ENOMEM);
@@ -3226,10 +3203,8 @@ static void smp_del_chan(struct l2cap_chan *chan)
        smp = chan->data;
        if (smp) {
                chan->data = NULL;
-               if (smp->tfm_aes)
-                       crypto_free_blkcipher(smp->tfm_aes);
-               if (smp->tfm_cmac)
-                       crypto_free_hash(smp->tfm_cmac);
+               crypto_free_skcipher(smp->tfm_aes);
+               crypto_free_shash(smp->tfm_cmac);
                kzfree(smp);
        }
 
@@ -3465,7 +3440,7 @@ void smp_unregister(struct hci_dev *hdev)
 
 #if IS_ENABLED(CONFIG_BT_SELFTEST_SMP)
 
-static int __init test_ah(struct crypto_blkcipher *tfm_aes)
+static int __init test_ah(struct crypto_skcipher *tfm_aes)
 {
        const u8 irk[16] = {
                        0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
@@ -3485,7 +3460,7 @@ static int __init test_ah(struct crypto_blkcipher *tfm_aes)
        return 0;
 }
 
-static int __init test_c1(struct crypto_blkcipher *tfm_aes)
+static int __init test_c1(struct crypto_skcipher *tfm_aes)
 {
        const u8 k[16] = {
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3515,7 +3490,7 @@ static int __init test_c1(struct crypto_blkcipher *tfm_aes)
        return 0;
 }
 
-static int __init test_s1(struct crypto_blkcipher *tfm_aes)
+static int __init test_s1(struct crypto_skcipher *tfm_aes)
 {
        const u8 k[16] = {
                        0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3540,7 +3515,7 @@ static int __init test_s1(struct crypto_blkcipher *tfm_aes)
        return 0;
 }
 
-static int __init test_f4(struct crypto_hash *tfm_cmac)
+static int __init test_f4(struct crypto_shash *tfm_cmac)
 {
        const u8 u[32] = {
                        0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
@@ -3572,7 +3547,7 @@ static int __init test_f4(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
-static int __init test_f5(struct crypto_hash *tfm_cmac)
+static int __init test_f5(struct crypto_shash *tfm_cmac)
 {
        const u8 w[32] = {
                        0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86,
@@ -3609,7 +3584,7 @@ static int __init test_f5(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
-static int __init test_f6(struct crypto_hash *tfm_cmac)
+static int __init test_f6(struct crypto_shash *tfm_cmac)
 {
        const u8 w[16] = {
                        0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd,
@@ -3642,7 +3617,7 @@ static int __init test_f6(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
-static int __init test_g2(struct crypto_hash *tfm_cmac)
+static int __init test_g2(struct crypto_shash *tfm_cmac)
 {
        const u8 u[32] = {
                        0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc,
@@ -3674,7 +3649,7 @@ static int __init test_g2(struct crypto_hash *tfm_cmac)
        return 0;
 }
 
-static int __init test_h6(struct crypto_hash *tfm_cmac)
+static int __init test_h6(struct crypto_shash *tfm_cmac)
 {
        const u8 w[16] = {
                        0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34,
@@ -3711,8 +3686,8 @@ static const struct file_operations test_smp_fops = {
        .llseek         = default_llseek,
 };
 
-static int __init run_selftests(struct crypto_blkcipher *tfm_aes,
-                               struct crypto_hash *tfm_cmac)
+static int __init run_selftests(struct crypto_skcipher *tfm_aes,
+                               struct crypto_shash *tfm_cmac)
 {
        ktime_t calltime, delta, rettime;
        unsigned long long duration;
@@ -3789,27 +3764,27 @@ done:
 
 int __init bt_selftest_smp(void)
 {
-       struct crypto_blkcipher *tfm_aes;
-       struct crypto_hash *tfm_cmac;
+       struct crypto_skcipher *tfm_aes;
+       struct crypto_shash *tfm_cmac;
        int err;
 
-       tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_aes = crypto_alloc_skcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_aes)) {
                BT_ERR("Unable to create ECB crypto context");
                return PTR_ERR(tfm_aes);
        }
 
-       tfm_cmac = crypto_alloc_hash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
+       tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm_cmac)) {
                BT_ERR("Unable to create CMAC crypto context");
-               crypto_free_blkcipher(tfm_aes);
+               crypto_free_skcipher(tfm_aes);
                return PTR_ERR(tfm_cmac);
        }
 
        err = run_selftests(tfm_aes, tfm_cmac);
 
-       crypto_free_hash(tfm_cmac);
-       crypto_free_blkcipher(tfm_aes);
+       crypto_free_shash(tfm_cmac);
+       crypto_free_skcipher(tfm_aes);
 
        return err;
 }
index a1abe4936fe15299b7643b8944023050c5f938c1..3addc05b9a16676b07ca13bc6faf0cba62b3acfd 100644 (file)
@@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
        .notifier_call = br_device_event
 };
 
+/* called with RTNL */
 static int br_switchdev_event(struct notifier_block *unused,
                              unsigned long event, void *ptr)
 {
@@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused,
        struct switchdev_notifier_fdb_info *fdb_info;
        int err = NOTIFY_DONE;
 
-       rtnl_lock();
        p = br_port_get_rtnl(dev);
        if (!p)
                goto out;
@@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused,
        }
 
 out:
-       rtnl_unlock();
        return err;
 }
 
index 30e105f57f0d9a59e1ce03de8531b90718ac8e29..ac089286526ef8b94f39b869275b1f5ff07834ad 100644 (file)
@@ -41,6 +41,14 @@ fail:
        return -EMSGSIZE;
 }
 
+static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
+{
+       e->state = flags & MDB_PG_FLAGS_PERMANENT;
+       e->flags = 0;
+       if (flags & MDB_PG_FLAGS_OFFLOAD)
+               e->flags |= MDB_FLAGS_OFFLOAD;
+}
+
 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
                            struct net_device *dev)
 {
@@ -85,8 +93,8 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
                                        struct br_mdb_entry e;
                                        memset(&e, 0, sizeof(e));
                                        e.ifindex = port->dev->ifindex;
-                                       e.state = p->state;
                                        e.vid = p->addr.vid;
+                                       __mdb_entry_fill_flags(&e, p->flags);
                                        if (p->addr.proto == htons(ETH_P_IP))
                                                e.addr.u.ip4 = p->addr.u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
@@ -209,7 +217,7 @@ static inline size_t rtnl_mdb_nlmsg_size(void)
 }
 
 static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
-                           int type)
+                           int type, struct net_bridge_port_group *pg)
 {
        struct switchdev_obj_port_mdb mdb = {
                .obj = {
@@ -232,10 +240,13 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
 #endif
 
        mdb.obj.orig_dev = port_dev;
-       if (port_dev && type == RTM_NEWMDB)
-               switchdev_port_obj_add(port_dev, &mdb.obj);
-       else if (port_dev && type == RTM_DELMDB)
+       if (port_dev && type == RTM_NEWMDB) {
+               err = switchdev_port_obj_add(port_dev, &mdb.obj);
+               if (!err && pg)
+                       pg->flags |= MDB_PG_FLAGS_OFFLOAD;
+       } else if (port_dev && type == RTM_DELMDB) {
                switchdev_port_obj_del(port_dev, &mdb.obj);
+       }
 
        skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
        if (!skb)
@@ -253,21 +264,21 @@ errout:
        rtnl_set_sk_err(net, RTNLGRP_MDB, err);
 }
 
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                  struct br_ip *group, int type, u8 state)
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
+                  int type)
 {
        struct br_mdb_entry entry;
 
        memset(&entry, 0, sizeof(entry));
-       entry.ifindex = port->dev->ifindex;
-       entry.addr.proto = group->proto;
-       entry.addr.u.ip4 = group->u.ip4;
+       entry.ifindex = pg->port->dev->ifindex;
+       entry.addr.proto = pg->addr.proto;
+       entry.addr.u.ip4 = pg->addr.u.ip4;
 #if IS_ENABLED(CONFIG_IPV6)
-       entry.addr.u.ip6 = group->u.ip6;
+       entry.addr.u.ip6 = pg->addr.u.ip6;
 #endif
-       entry.state = state;
-       entry.vid = group->vid;
-       __br_mdb_notify(dev, &entry, type);
+       entry.vid = pg->addr.vid;
+       __mdb_entry_fill_flags(&entry, pg->flags);
+       __br_mdb_notify(dev, &entry, type, pg);
 }
 
 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
@@ -412,7 +423,8 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
 }
 
 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
-                           struct br_ip *group, unsigned char state)
+                           struct br_ip *group, unsigned char state,
+                           struct net_bridge_port_group **pg)
 {
        struct net_bridge_mdb_entry *mp;
        struct net_bridge_port_group *p;
@@ -443,6 +455,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
        if (unlikely(!p))
                return -ENOMEM;
        rcu_assign_pointer(*pp, p);
+       *pg = p;
        if (state == MDB_TEMPORARY)
                mod_timer(&p->timer, now + br->multicast_membership_interval);
 
@@ -450,7 +463,8 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
 }
 
 static int __br_mdb_add(struct net *net, struct net_bridge *br,
-                       struct br_mdb_entry *entry)
+                       struct br_mdb_entry *entry,
+                       struct net_bridge_port_group **pg)
 {
        struct br_ip ip;
        struct net_device *dev;
@@ -479,7 +493,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
 #endif
 
        spin_lock_bh(&br->multicast_lock);
-       ret = br_mdb_add_group(br, p, &ip, entry->state);
+       ret = br_mdb_add_group(br, p, &ip, entry->state, pg);
        spin_unlock_bh(&br->multicast_lock);
        return ret;
 }
@@ -487,6 +501,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
 {
        struct net *net = sock_net(skb->sk);
+       struct net_bridge_port_group *pg;
        struct net_bridge_vlan_group *vg;
        struct net_device *dev, *pdev;
        struct br_mdb_entry *entry;
@@ -516,15 +531,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (br_vlan_enabled(br) && vg && entry->vid == 0) {
                list_for_each_entry(v, &vg->vlan_list, vlist) {
                        entry->vid = v->vid;
-                       err = __br_mdb_add(net, br, entry);
+                       err = __br_mdb_add(net, br, entry, &pg);
                        if (err)
                                break;
-                       __br_mdb_notify(dev, entry, RTM_NEWMDB);
+                       __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
                }
        } else {
-               err = __br_mdb_add(net, br, entry);
+               err = __br_mdb_add(net, br, entry, &pg);
                if (!err)
-                       __br_mdb_notify(dev, entry, RTM_NEWMDB);
+                       __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
        }
 
        return err;
@@ -568,7 +583,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
                if (p->port->state == BR_STATE_DISABLED)
                        goto unlock;
 
-               entry->state = p->state;
+               __mdb_entry_fill_flags(entry, p->flags);
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
@@ -620,12 +635,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
                        entry->vid = v->vid;
                        err = __br_mdb_del(br, entry);
                        if (!err)
-                               __br_mdb_notify(dev, entry, RTM_DELMDB);
+                               __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
                }
        } else {
                err = __br_mdb_del(br, entry);
                if (!err)
-                       __br_mdb_notify(dev, entry, RTM_DELMDB);
+                       __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
        }
 
        return err;
index 03661d97463c0a185b6c688ee281309682209528..8b6e4249be1b09a37047988424c8c60c9d38290e 100644 (file)
@@ -283,8 +283,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
                rcu_assign_pointer(*pp, p->next);
                hlist_del_init(&p->mglist);
                del_timer(&p->timer);
-               br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
-                             p->state);
+               br_mdb_notify(br->dev, p, RTM_DELMDB);
                call_rcu_bh(&p->rcu, br_multicast_free_pg);
 
                if (!mp->ports && !mp->mglist &&
@@ -304,7 +303,7 @@ static void br_multicast_port_group_expired(unsigned long data)
 
        spin_lock(&br->multicast_lock);
        if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
-           hlist_unhashed(&pg->mglist) || pg->state & MDB_PERMANENT)
+           hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
                goto out;
 
        br_multicast_del_pg(br, pg);
@@ -649,7 +648,7 @@ struct net_bridge_port_group *br_multicast_new_port_group(
                        struct net_bridge_port *port,
                        struct br_ip *group,
                        struct net_bridge_port_group __rcu *next,
-                       unsigned char state)
+                       unsigned char flags)
 {
        struct net_bridge_port_group *p;
 
@@ -659,7 +658,7 @@ struct net_bridge_port_group *br_multicast_new_port_group(
 
        p->addr = *group;
        p->port = port;
-       p->state = state;
+       p->flags = flags;
        rcu_assign_pointer(p->next, next);
        hlist_add_head(&p->mglist, &port->mglist);
        setup_timer(&p->timer, br_multicast_port_group_expired,
@@ -702,11 +701,11 @@ static int br_multicast_add_group(struct net_bridge *br,
                        break;
        }
 
-       p = br_multicast_new_port_group(port, group, *pp, MDB_TEMPORARY);
+       p = br_multicast_new_port_group(port, group, *pp, 0);
        if (unlikely(!p))
                goto err;
        rcu_assign_pointer(*pp, p);
-       br_mdb_notify(br->dev, port, group, RTM_NEWMDB, MDB_TEMPORARY);
+       br_mdb_notify(br->dev, p, RTM_NEWMDB);
 
 found:
        mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -975,7 +974,7 @@ void br_multicast_disable_port(struct net_bridge_port *port)
 
        spin_lock(&br->multicast_lock);
        hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
-               if (pg->state == MDB_TEMPORARY)
+               if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
                        br_multicast_del_pg(br, pg);
 
        if (!hlist_unhashed(&port->rlist)) {
@@ -1453,8 +1452,7 @@ br_multicast_leave_group(struct net_bridge *br,
                        hlist_del_init(&p->mglist);
                        del_timer(&p->timer);
                        call_rcu_bh(&p->rcu, br_multicast_free_pg);
-                       br_mdb_notify(br->dev, port, group, RTM_DELMDB,
-                                     p->state);
+                       br_mdb_notify(br->dev, p, RTM_DELMDB);
 
                        if (!mp->ports && !mp->mglist &&
                            netif_running(br->dev))
index 216018c760187db31e45206225ce8c7594a849d5..302ab0a43725845c5ec7a1ffb30db59bfaf16db5 100644 (file)
@@ -150,6 +150,9 @@ struct net_bridge_fdb_entry
        struct rcu_head                 rcu;
 };
 
+#define MDB_PG_FLAGS_PERMANENT BIT(0)
+#define MDB_PG_FLAGS_OFFLOAD   BIT(1)
+
 struct net_bridge_port_group {
        struct net_bridge_port          *port;
        struct net_bridge_port_group __rcu *next;
@@ -157,7 +160,7 @@ struct net_bridge_port_group {
        struct rcu_head                 rcu;
        struct timer_list               timer;
        struct br_ip                    addr;
-       unsigned char                   state;
+       unsigned char                   flags;
 };
 
 struct net_bridge_mdb_entry
@@ -554,11 +557,11 @@ void br_multicast_free_pg(struct rcu_head *head);
 struct net_bridge_port_group *
 br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
                            struct net_bridge_port_group __rcu *next,
-                           unsigned char state);
+                           unsigned char flags);
 void br_mdb_init(void);
 void br_mdb_uninit(void);
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
-                  struct br_ip *group, int type, u8 state);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
+                  int type);
 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
                   int type);
 
index 10d87753ed8737329c244b1ae7718e95c8abd118..9e43a315e6622028ba2b61fd8d36ab20305d611b 100644 (file)
@@ -152,7 +152,6 @@ static int process_one_ticket(struct ceph_auth_client *ac,
        void *ticket_buf = NULL;
        void *tp, *tpend;
        void **ptp;
-       struct ceph_timespec new_validity;
        struct ceph_crypto_key new_session_key;
        struct ceph_buffer *new_ticket_blob;
        unsigned long new_expires, new_renew_after;
@@ -193,8 +192,8 @@ static int process_one_ticket(struct ceph_auth_client *ac,
        if (ret)
                goto out;
 
-       ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
-       ceph_decode_timespec(&validity, &new_validity);
+       ceph_decode_timespec(&validity, dp);
+       dp += sizeof(struct ceph_timespec);
        new_expires = get_seconds() + validity.tv_sec;
        new_renew_after = new_expires - (validity.tv_sec / 4);
        dout(" expires=%lu renew_after=%lu\n", new_expires,
@@ -233,10 +232,10 @@ static int process_one_ticket(struct ceph_auth_client *ac,
                ceph_buffer_put(th->ticket_blob);
        th->session_key = new_session_key;
        th->ticket_blob = new_ticket_blob;
-       th->validity = new_validity;
        th->secret_id = new_secret_id;
        th->expires = new_expires;
        th->renew_after = new_renew_after;
+       th->have_key = true;
        dout(" got ticket service %d (%s) secret_id %lld len %d\n",
             type, ceph_entity_type_name(type), th->secret_id,
             (int)th->ticket_blob->vec.iov_len);
@@ -384,6 +383,24 @@ bad:
        return -ERANGE;
 }
 
+static bool need_key(struct ceph_x_ticket_handler *th)
+{
+       if (!th->have_key)
+               return true;
+
+       return get_seconds() >= th->renew_after;
+}
+
+static bool have_key(struct ceph_x_ticket_handler *th)
+{
+       if (th->have_key) {
+               if (get_seconds() >= th->expires)
+                       th->have_key = false;
+       }
+
+       return th->have_key;
+}
+
 static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed)
 {
        int want = ac->want_keys;
@@ -402,20 +419,18 @@ static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed)
                        continue;
 
                th = get_ticket_handler(ac, service);
-
                if (IS_ERR(th)) {
                        *pneed |= service;
                        continue;
                }
 
-               if (get_seconds() >= th->renew_after)
+               if (need_key(th))
                        *pneed |= service;
-               if (get_seconds() >= th->expires)
+               if (!have_key(th))
                        xi->have_keys &= ~service;
        }
 }
 
-
 static int ceph_x_build_request(struct ceph_auth_client *ac,
                                void *buf, void *end)
 {
@@ -667,14 +682,26 @@ static void ceph_x_destroy(struct ceph_auth_client *ac)
        ac->private = NULL;
 }
 
-static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
-                                  int peer_type)
+static void invalidate_ticket(struct ceph_auth_client *ac, int peer_type)
 {
        struct ceph_x_ticket_handler *th;
 
        th = get_ticket_handler(ac, peer_type);
        if (!IS_ERR(th))
-               memset(&th->validity, 0, sizeof(th->validity));
+               th->have_key = false;
+}
+
+static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
+                                        int peer_type)
+{
+       /*
+        * We are to invalidate a service ticket in the hopes of
+        * getting a new, hopefully more valid, one.  But, we won't get
+        * it unless our AUTH ticket is good, so invalidate AUTH ticket
+        * as well, just in case.
+        */
+       invalidate_ticket(ac, peer_type);
+       invalidate_ticket(ac, CEPH_ENTITY_TYPE_AUTH);
 }
 
 static int calcu_signature(struct ceph_x_authorizer *au,
index e8b7c6917d472f69d356252415efcb76a4afd3cf..40b1a3cf7397352e4673becccdbbb083c88f04d8 100644 (file)
@@ -16,7 +16,7 @@ struct ceph_x_ticket_handler {
        unsigned int service;
 
        struct ceph_crypto_key session_key;
-       struct ceph_timespec validity;
+       bool have_key;
 
        u64 secret_id;
        struct ceph_buffer *ticket_blob;
index 393bfb22d5bbafd83c20f5953b98c6709b637452..5fcfb98f309efb5018f84628902cd1711fc36705 100644 (file)
@@ -403,6 +403,7 @@ static int is_out(const struct crush_map *map,
  * @local_retries: localized retries
  * @local_fallback_retries: localized fallback retries
  * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
+ * @stable: stable mode starts rep=0 in the recursive call for all replicas
  * @vary_r: pass r to recursive calls
  * @out2: second output vector for leaf items (if @recurse_to_leaf)
  * @parent_r: r value passed from the parent
@@ -419,6 +420,7 @@ static int crush_choose_firstn(const struct crush_map *map,
                               unsigned int local_fallback_retries,
                               int recurse_to_leaf,
                               unsigned int vary_r,
+                              unsigned int stable,
                               int *out2,
                               int parent_r)
 {
@@ -433,13 +435,13 @@ static int crush_choose_firstn(const struct crush_map *map,
        int collide, reject;
        int count = out_size;
 
-       dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d\n",
+       dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d stable %d\n",
                recurse_to_leaf ? "_LEAF" : "",
                bucket->id, x, outpos, numrep,
                tries, recurse_tries, local_retries, local_fallback_retries,
-               parent_r);
+               parent_r, stable);
 
-       for (rep = outpos; rep < numrep && count > 0 ; rep++) {
+       for (rep = stable ? 0 : outpos; rep < numrep && count > 0 ; rep++) {
                /* keep trying until we get a non-out, non-colliding item */
                ftotal = 0;
                skip_rep = 0;
@@ -512,13 +514,14 @@ static int crush_choose_firstn(const struct crush_map *map,
                                                if (crush_choose_firstn(map,
                                                         map->buckets[-1-item],
                                                         weight, weight_max,
-                                                        x, outpos+1, 0,
+                                                        x, stable ? 1 : outpos+1, 0,
                                                         out2, outpos, count,
                                                         recurse_tries, 0,
                                                         local_retries,
                                                         local_fallback_retries,
                                                         0,
                                                         vary_r,
+                                                        stable,
                                                         NULL,
                                                         sub_r) <= outpos)
                                                        /* didn't get leaf */
@@ -816,6 +819,7 @@ int crush_do_rule(const struct crush_map *map,
        int choose_local_fallback_retries = map->choose_local_fallback_tries;
 
        int vary_r = map->chooseleaf_vary_r;
+       int stable = map->chooseleaf_stable;
 
        if ((__u32)ruleno >= map->max_rules) {
                dprintk(" bad ruleno %d\n", ruleno);
@@ -835,7 +839,8 @@ int crush_do_rule(const struct crush_map *map,
                case CRUSH_RULE_TAKE:
                        if ((curstep->arg1 >= 0 &&
                             curstep->arg1 < map->max_devices) ||
-                           (-1-curstep->arg1 < map->max_buckets &&
+                           (-1-curstep->arg1 >= 0 &&
+                            -1-curstep->arg1 < map->max_buckets &&
                             map->buckets[-1-curstep->arg1])) {
                                w[0] = curstep->arg1;
                                wsize = 1;
@@ -869,6 +874,11 @@ int crush_do_rule(const struct crush_map *map,
                                vary_r = curstep->arg1;
                        break;
 
+               case CRUSH_RULE_SET_CHOOSELEAF_STABLE:
+                       if (curstep->arg1 >= 0)
+                               stable = curstep->arg1;
+                       break;
+
                case CRUSH_RULE_CHOOSELEAF_FIRSTN:
                case CRUSH_RULE_CHOOSE_FIRSTN:
                        firstn = 1;
@@ -888,6 +898,7 @@ int crush_do_rule(const struct crush_map *map,
                        osize = 0;
 
                        for (i = 0; i < wsize; i++) {
+                               int bno;
                                /*
                                 * see CRUSH_N, CRUSH_N_MINUS macros.
                                 * basically, numrep <= 0 means relative to
@@ -900,6 +911,13 @@ int crush_do_rule(const struct crush_map *map,
                                                continue;
                                }
                                j = 0;
+                               /* make sure bucket id is valid */
+                               bno = -1 - w[i];
+                               if (bno < 0 || bno >= map->max_buckets) {
+                                       /* w[i] is probably CRUSH_ITEM_NONE */
+                                       dprintk("  bad w[i] %d\n", w[i]);
+                                       continue;
+                               }
                                if (firstn) {
                                        int recurse_tries;
                                        if (choose_leaf_tries)
@@ -911,7 +929,7 @@ int crush_do_rule(const struct crush_map *map,
                                                recurse_tries = choose_tries;
                                        osize += crush_choose_firstn(
                                                map,
-                                               map->buckets[-1-w[i]],
+                                               map->buckets[bno],
                                                weight, weight_max,
                                                x, numrep,
                                                curstep->arg2,
@@ -923,6 +941,7 @@ int crush_do_rule(const struct crush_map *map,
                                                choose_local_fallback_retries,
                                                recurse_to_leaf,
                                                vary_r,
+                                               stable,
                                                c+osize,
                                                0);
                                } else {
@@ -930,7 +949,7 @@ int crush_do_rule(const struct crush_map *map,
                                                    numrep : (result_max-osize));
                                        crush_choose_indep(
                                                map,
-                                               map->buckets[-1-w[i]],
+                                               map->buckets[bno],
                                                weight, weight_max,
                                                x, out_size, numrep,
                                                curstep->arg2,
index 42e8649c6e79b4950531f0027f2e67820377c066..db2847ac5f122988ce1625153fc219e74a3ddb8d 100644 (file)
@@ -4,7 +4,8 @@
 #include <linux/err.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
-#include <crypto/hash.h>
+#include <crypto/aes.h>
+#include <crypto/skcipher.h>
 #include <linux/key-type.h>
 
 #include <keys/ceph-type.h>
@@ -79,9 +80,9 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
        return 0;
 }
 
-static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
+static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
 {
-       return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
+       return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
 }
 
 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
@@ -162,11 +163,10 @@ static int ceph_aes_encrypt(const void *key, int key_len,
 {
        struct scatterlist sg_in[2], prealloc_sg;
        struct sg_table sg_out;
-       struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
-       struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+       struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        int ret;
-       void *iv;
-       int ivsize;
+       char iv[AES_BLOCK_SIZE];
        size_t zero_padding = (0x10 - (src_len & 0x0f));
        char pad[16];
 
@@ -184,10 +184,13 @@ static int ceph_aes_encrypt(const void *key, int key_len,
        if (ret)
                goto out_tfm;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       iv = crypto_blkcipher_crt(tfm)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       memcpy(iv, aes_iv, ivsize);
+       crypto_skcipher_setkey((void *)tfm, key, key_len);
+       memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
+                                  src_len + zero_padding, iv);
 
        /*
        print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
@@ -197,8 +200,8 @@ static int ceph_aes_encrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
-                                    src_len + zero_padding);
+       ret = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
        if (ret < 0) {
                pr_err("ceph_aes_crypt failed %d\n", ret);
                goto out_sg;
@@ -211,7 +214,7 @@ static int ceph_aes_encrypt(const void *key, int key_len,
 out_sg:
        teardown_sgtable(&sg_out);
 out_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
        return ret;
 }
 
@@ -222,11 +225,10 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
 {
        struct scatterlist sg_in[3], prealloc_sg;
        struct sg_table sg_out;
-       struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
-       struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
+       struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        int ret;
-       void *iv;
-       int ivsize;
+       char iv[AES_BLOCK_SIZE];
        size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
        char pad[16];
 
@@ -245,10 +247,13 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
        if (ret)
                goto out_tfm;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       iv = crypto_blkcipher_crt(tfm)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       memcpy(iv, aes_iv, ivsize);
+       crypto_skcipher_setkey((void *)tfm, key, key_len);
+       memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
+                                  src1_len + src2_len + zero_padding, iv);
 
        /*
        print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
@@ -260,8 +265,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
        print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
-                                    src1_len + src2_len + zero_padding);
+       ret = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
        if (ret < 0) {
                pr_err("ceph_aes_crypt2 failed %d\n", ret);
                goto out_sg;
@@ -274,7 +279,7 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
 out_sg:
        teardown_sgtable(&sg_out);
 out_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
        return ret;
 }
 
@@ -284,11 +289,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
 {
        struct sg_table sg_in;
        struct scatterlist sg_out[2], prealloc_sg;
-       struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
-       struct blkcipher_desc desc = { .tfm = tfm };
+       struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        char pad[16];
-       void *iv;
-       int ivsize;
+       char iv[AES_BLOCK_SIZE];
        int ret;
        int last_byte;
 
@@ -302,10 +306,13 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        if (ret)
                goto out_tfm;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       iv = crypto_blkcipher_crt(tfm)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       memcpy(iv, aes_iv, ivsize);
+       crypto_skcipher_setkey((void *)tfm, key, key_len);
+       memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
+                                  src_len, iv);
 
        /*
        print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
@@ -313,7 +320,8 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+       ret = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
                goto out_sg;
@@ -338,7 +346,7 @@ static int ceph_aes_decrypt(const void *key, int key_len,
 out_sg:
        teardown_sgtable(&sg_in);
 out_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
        return ret;
 }
 
@@ -349,11 +357,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
 {
        struct sg_table sg_in;
        struct scatterlist sg_out[3], prealloc_sg;
-       struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
-       struct blkcipher_desc desc = { .tfm = tfm };
+       struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
        char pad[16];
-       void *iv;
-       int ivsize;
+       char iv[AES_BLOCK_SIZE];
        int ret;
        int last_byte;
 
@@ -368,10 +375,13 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        if (ret)
                goto out_tfm;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       iv = crypto_blkcipher_crt(tfm)->iv;
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       memcpy(iv, aes_iv, ivsize);
+       crypto_skcipher_setkey((void *)tfm, key, key_len);
+       memcpy(iv, aes_iv, AES_BLOCK_SIZE);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
+                                  src_len, iv);
 
        /*
        print_hex_dump(KERN_ERR, "dec  key: ", DUMP_PREFIX_NONE, 16, 1,
@@ -379,7 +389,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
+       ret = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
                goto out_sg;
@@ -415,7 +426,7 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
 out_sg:
        teardown_sgtable(&sg_in);
 out_tfm:
-       crypto_free_blkcipher(tfm);
+       crypto_free_skcipher(tfm);
        return ret;
 }
 
index 9981039ef4ffcca29081b83a5e06a81ce6871f20..9cfedf565f5b236b5ede7974466cf62af6ad995c 100644 (file)
@@ -23,9 +23,6 @@
 #include <linux/ceph/pagelist.h>
 #include <linux/export.h>
 
-#define list_entry_next(pos, member)                                   \
-       list_entry(pos->member.next, typeof(*pos), member)
-
 /*
  * Ceph uses the messenger to exchange ceph_msg messages with other
  * hosts in the system.  The messenger provides ordered and reliable
@@ -672,6 +669,8 @@ static void reset_connection(struct ceph_connection *con)
        }
        con->in_seq = 0;
        con->in_seq_acked = 0;
+
+       con->out_skip = 0;
 }
 
 /*
@@ -771,6 +770,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
 
 static void con_out_kvec_reset(struct ceph_connection *con)
 {
+       BUG_ON(con->out_skip);
+
        con->out_kvec_left = 0;
        con->out_kvec_bytes = 0;
        con->out_kvec_cur = &con->out_kvec[0];
@@ -779,9 +780,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
 static void con_out_kvec_add(struct ceph_connection *con,
                                size_t size, void *data)
 {
-       int index;
+       int index = con->out_kvec_left;
 
-       index = con->out_kvec_left;
+       BUG_ON(con->out_skip);
        BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
 
        con->out_kvec[index].iov_len = size;
@@ -790,6 +791,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
        con->out_kvec_bytes += size;
 }
 
+/*
+ * Chop off a kvec from the end.  Return residual number of bytes for
+ * that kvec, i.e. how many bytes would have been written if the kvec
+ * hadn't been nuked.
+ */
+static int con_out_kvec_skip(struct ceph_connection *con)
+{
+       int off = con->out_kvec_cur - con->out_kvec;
+       int skip = 0;
+
+       if (con->out_kvec_bytes > 0) {
+               skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
+               BUG_ON(con->out_kvec_bytes < skip);
+               BUG_ON(!con->out_kvec_left);
+               con->out_kvec_bytes -= skip;
+               con->out_kvec_left--;
+       }
+
+       return skip;
+}
+
 #ifdef CONFIG_BLOCK
 
 /*
@@ -1042,7 +1064,7 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
        /* Move on to the next page */
 
        BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
-       cursor->page = list_entry_next(cursor->page, lru);
+       cursor->page = list_next_entry(cursor->page, lru);
        cursor->last_piece = cursor->resid <= PAGE_SIZE;
 
        return true;
@@ -1166,7 +1188,7 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
        if (!cursor->resid && cursor->total_resid) {
                WARN_ON(!cursor->last_piece);
                BUG_ON(list_is_last(&cursor->data->links, cursor->data_head));
-               cursor->data = list_entry_next(cursor->data, links);
+               cursor->data = list_next_entry(cursor->data, links);
                __ceph_msg_data_cursor_init(cursor);
                new_piece = true;
        }
@@ -1197,7 +1219,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
        m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
 
        dout("prepare_write_message_footer %p\n", con);
-       con->out_kvec_is_msg = true;
        con->out_kvec[v].iov_base = &m->footer;
        if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
                if (con->ops->sign_message)
@@ -1225,7 +1246,6 @@ static void prepare_write_message(struct ceph_connection *con)
        u32 crc;
 
        con_out_kvec_reset(con);
-       con->out_kvec_is_msg = true;
        con->out_msg_done = false;
 
        /* Sneak an ack in there first?  If we can get it into the same
@@ -1265,18 +1285,19 @@ static void prepare_write_message(struct ceph_connection *con)
 
        /* tag + hdr + front + middle */
        con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
-       con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
+       con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
        con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
 
        if (m->middle)
                con_out_kvec_add(con, m->middle->vec.iov_len,
                        m->middle->vec.iov_base);
 
-       /* fill in crc (except data pages), footer */
+       /* fill in hdr crc and finalize hdr */
        crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
        con->out_msg->hdr.crc = cpu_to_le32(crc);
-       con->out_msg->footer.flags = 0;
+       memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
 
+       /* fill in front and middle crc, footer */
        crc = crc32c(0, m->front.iov_base, m->front.iov_len);
        con->out_msg->footer.front_crc = cpu_to_le32(crc);
        if (m->middle) {
@@ -1288,6 +1309,7 @@ static void prepare_write_message(struct ceph_connection *con)
        dout("%s front_crc %u middle_crc %u\n", __func__,
             le32_to_cpu(con->out_msg->footer.front_crc),
             le32_to_cpu(con->out_msg->footer.middle_crc));
+       con->out_msg->footer.flags = 0;
 
        /* is there a data payload? */
        con->out_msg->footer.data_crc = 0;
@@ -1492,7 +1514,6 @@ static int write_partial_kvec(struct ceph_connection *con)
                }
        }
        con->out_kvec_left = 0;
-       con->out_kvec_is_msg = false;
        ret = 1;
 out:
        dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
@@ -1584,6 +1605,7 @@ static int write_partial_skip(struct ceph_connection *con)
 {
        int ret;
 
+       dout("%s %p %d left\n", __func__, con, con->out_skip);
        while (con->out_skip > 0) {
                size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
 
@@ -2506,13 +2528,13 @@ more:
 
 more_kvec:
        /* kvec data queued? */
-       if (con->out_skip) {
-               ret = write_partial_skip(con);
+       if (con->out_kvec_left) {
+               ret = write_partial_kvec(con);
                if (ret <= 0)
                        goto out;
        }
-       if (con->out_kvec_left) {
-               ret = write_partial_kvec(con);
+       if (con->out_skip) {
+               ret = write_partial_skip(con);
                if (ret <= 0)
                        goto out;
        }
@@ -2805,13 +2827,17 @@ static bool con_backoff(struct ceph_connection *con)
 
 static void con_fault_finish(struct ceph_connection *con)
 {
+       dout("%s %p\n", __func__, con);
+
        /*
         * in case we faulted due to authentication, invalidate our
         * current tickets so that we can get new ones.
         */
-       if (con->auth_retry && con->ops->invalidate_authorizer) {
-               dout("calling invalidate_authorizer()\n");
-               con->ops->invalidate_authorizer(con);
+       if (con->auth_retry) {
+               dout("auth_retry %d, invalidating\n", con->auth_retry);
+               if (con->ops->invalidate_authorizer)
+                       con->ops->invalidate_authorizer(con);
+               con->auth_retry = 0;
        }
 
        if (con->ops->fault)
@@ -3050,16 +3076,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
                ceph_msg_put(msg);
        }
        if (con->out_msg == msg) {
-               dout("%s %p msg %p - was sending\n", __func__, con, msg);
-               con->out_msg = NULL;
-               if (con->out_kvec_is_msg) {
-                       con->out_skip = con->out_kvec_bytes;
-                       con->out_kvec_is_msg = false;
+               BUG_ON(con->out_skip);
+               /* footer */
+               if (con->out_msg_done) {
+                       con->out_skip += con_out_kvec_skip(con);
+               } else {
+                       BUG_ON(!msg->data_length);
+                       if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
+                               con->out_skip += sizeof(msg->footer);
+                       else
+                               con->out_skip += sizeof(msg->old_footer);
                }
+               /* data, middle, front */
+               if (msg->data_length)
+                       con->out_skip += msg->cursor.total_resid;
+               if (msg->middle)
+                       con->out_skip += con_out_kvec_skip(con);
+               con->out_skip += con_out_kvec_skip(con);
+
+               dout("%s %p msg %p - was sending, will write %d skip %d\n",
+                    __func__, con, msg, con->out_kvec_bytes, con->out_skip);
                msg->hdr.seq = 0;
-
+               con->out_msg = NULL;
                ceph_msg_put(msg);
        }
+
        mutex_unlock(&con->mutex);
 }
 
@@ -3361,9 +3402,7 @@ static void ceph_msg_free(struct ceph_msg *m)
 static void ceph_msg_release(struct kref *kref)
 {
        struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
-       LIST_HEAD(data);
-       struct list_head *links;
-       struct list_head *next;
+       struct ceph_msg_data *data, *next;
 
        dout("%s %p\n", __func__, m);
        WARN_ON(!list_empty(&m->list_head));
@@ -3376,12 +3415,8 @@ static void ceph_msg_release(struct kref *kref)
                m->middle = NULL;
        }
 
-       list_splice_init(&m->data, &data);
-       list_for_each_safe(links, next, &data) {
-               struct ceph_msg_data *data;
-
-               data = list_entry(links, struct ceph_msg_data, links);
-               list_del_init(links);
+       list_for_each_entry_safe(data, next, &m->data, links) {
+               list_del_init(&data->links);
                ceph_msg_data_destroy(data);
        }
        m->data_length = 0;
index edda01626a459efbfdaeebd46fd6a0b13a037879..de85dddc3dc08cfeee81435c4475a6d975c4cd96 100644 (file)
@@ -364,10 +364,6 @@ static bool have_debugfs_info(struct ceph_mon_client *monc)
        return monc->client->have_fsid && monc->auth->global_id > 0;
 }
 
-/*
- * The monitor responds with mount ack indicate mount success.  The
- * included client ticket allows the client to talk to MDSs and OSDs.
- */
 static void ceph_monc_handle_map(struct ceph_mon_client *monc,
                                 struct ceph_msg *msg)
 {
index f8f235930d887adf1df94314b18c719638328df0..3534e12683d3b2b6efa9988faff9e9b3249e6fa6 100644 (file)
@@ -1770,6 +1770,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
        u32 osdmap_epoch;
        int already_completed;
        u32 bytes;
+       u8 decode_redir;
        unsigned int i;
 
        tid = le64_to_cpu(msg->hdr.tid);
@@ -1841,6 +1842,15 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg)
                p += 8 + 4; /* skip replay_version */
                p += 8; /* skip user_version */
 
+               if (le16_to_cpu(msg->hdr.version) >= 7)
+                       ceph_decode_8_safe(&p, end, decode_redir, bad_put);
+               else
+                       decode_redir = 1;
+       } else {
+               decode_redir = 0;
+       }
+
+       if (decode_redir) {
                err = ceph_redirect_decode(&p, end, &redir);
                if (err)
                        goto bad_put;
index 7d8f581d9f1f7987b8d7051160c34f42ad2f5e73..243574c8cf33807fcaf9374530358f1e44080764 100644 (file)
@@ -342,23 +342,32 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
         c->choose_local_tries = ceph_decode_32(p);
         c->choose_local_fallback_tries =  ceph_decode_32(p);
         c->choose_total_tries = ceph_decode_32(p);
-        dout("crush decode tunable choose_local_tries = %d",
+        dout("crush decode tunable choose_local_tries = %d\n",
              c->choose_local_tries);
-        dout("crush decode tunable choose_local_fallback_tries = %d",
+        dout("crush decode tunable choose_local_fallback_tries = %d\n",
              c->choose_local_fallback_tries);
-        dout("crush decode tunable choose_total_tries = %d",
+        dout("crush decode tunable choose_total_tries = %d\n",
              c->choose_total_tries);
 
        ceph_decode_need(p, end, sizeof(u32), done);
        c->chooseleaf_descend_once = ceph_decode_32(p);
-       dout("crush decode tunable chooseleaf_descend_once = %d",
+       dout("crush decode tunable chooseleaf_descend_once = %d\n",
             c->chooseleaf_descend_once);
 
        ceph_decode_need(p, end, sizeof(u8), done);
        c->chooseleaf_vary_r = ceph_decode_8(p);
-       dout("crush decode tunable chooseleaf_vary_r = %d",
+       dout("crush decode tunable chooseleaf_vary_r = %d\n",
             c->chooseleaf_vary_r);
 
+       /* skip straw_calc_version, allowed_bucket_algs */
+       ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
+       *p += sizeof(u8) + sizeof(u32);
+
+       ceph_decode_need(p, end, sizeof(u8), done);
+       c->chooseleaf_stable = ceph_decode_8(p);
+       dout("crush decode tunable chooseleaf_stable = %d\n",
+            c->chooseleaf_stable);
+
 done:
        dout("crush_decode success\n");
        return c;
index cc9e3652cf93a6306e6f614f966e0fe7cf17a10e..f1284835b8c9222ffca96f63cd3351d2fcb54170 100644 (file)
@@ -4154,7 +4154,10 @@ ncls:
                        ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
        } else {
 drop:
-               atomic_long_inc(&skb->dev->rx_dropped);
+               if (!deliver_exact)
+                       atomic_long_inc(&skb->dev->rx_dropped);
+               else
+                       atomic_long_inc(&skb->dev->rx_nohandler);
                kfree_skb(skb);
                /* Jamal, now you will not able to escape explaining
                 * me how you were going to use this. :-)
@@ -4351,6 +4354,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
 
                diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
                diffs |= p->vlan_tci ^ skb->vlan_tci;
+               diffs |= skb_metadata_dst_cmp(p, skb);
                if (maclen == ETH_HLEN)
                        diffs |= compare_ether_header(skb_mac_header(p),
                                                      skb_mac_header(skb));
@@ -4548,10 +4552,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
                break;
 
        case GRO_MERGED_FREE:
-               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+               if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
+                       skb_dst_drop(skb);
                        kmem_cache_free(skbuff_head_cache, skb);
-               else
+               } else {
                        __kfree_skb(skb);
+               }
                break;
 
        case GRO_HELD:
@@ -7250,24 +7256,31 @@ void netdev_run_todo(void)
        }
 }
 
-/* Convert net_device_stats to rtnl_link_stats64.  They have the same
- * fields in the same order, with only the type differing.
+/* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has
+ * all the same fields in the same order as net_device_stats, with only
+ * the type differing, but rtnl_link_stats64 may have additional fields
+ * at the end for newer counters.
  */
 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
                             const struct net_device_stats *netdev_stats)
 {
 #if BITS_PER_LONG == 64
-       BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
+       BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats));
        memcpy(stats64, netdev_stats, sizeof(*stats64));
+       /* zero out counters that only exist in rtnl_link_stats64 */
+       memset((char *)stats64 + sizeof(*netdev_stats), 0,
+              sizeof(*stats64) - sizeof(*netdev_stats));
 #else
-       size_t i, n = sizeof(*stats64) / sizeof(u64);
+       size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
        const unsigned long *src = (const unsigned long *)netdev_stats;
        u64 *dst = (u64 *)stats64;
 
-       BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
-                    sizeof(*stats64) / sizeof(u64));
+       BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
        for (i = 0; i < n; i++)
                dst[i] = src[i];
+       /* zero out counters that only exist in rtnl_link_stats64 */
+       memset((char *)stats64 + n * sizeof(u64), 0,
+              sizeof(*stats64) - n * sizeof(u64));
 #endif
 }
 EXPORT_SYMBOL(netdev_stats_to_stats64);
@@ -7297,6 +7310,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
        }
        storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
        storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
+       storage->rx_nohandler += atomic_long_read(&dev->rx_nohandler);
        return storage;
 }
 EXPORT_SYMBOL(dev_get_stats);
index daf04709dd3c695ff5c47f7d30c0f005397db00e..453c803f1c8713da0138041e2b59534aae6b8206 100644 (file)
@@ -632,7 +632,7 @@ static int ethtool_copy_validate_indir(u32 *indir, void __user *useraddr,
        return 0;
 }
 
-u8 netdev_rss_key[NETDEV_RSS_KEY_LEN];
+u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 
 void netdev_rss_key_fill(void *buffer, size_t len)
 {
index d79699c9d1b9eb9f250254e360b2d5a7b4ff6e34..eab81bc80e5cdff993a030b109cbffa133262bf0 100644 (file)
@@ -208,7 +208,6 @@ ip:
        case htons(ETH_P_IPV6): {
                const struct ipv6hdr *iph;
                struct ipv6hdr _iph;
-               __be32 flow_label;
 
 ipv6:
                iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
@@ -230,8 +229,12 @@ ipv6:
                        key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
                }
 
-               flow_label = ip6_flowlabel(iph);
-               if (flow_label) {
+               if ((dissector_uses_key(flow_dissector,
+                                       FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
+                    (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
+                   ip6_flowlabel(iph)) {
+                       __be32 flow_label = ip6_flowlabel(iph);
+
                        if (dissector_uses_key(flow_dissector,
                                               FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
                                key_tags = skb_flow_dissector_target(flow_dissector,
index b6c8a6629b397134f5ea539adb7ef8ba80625225..da7dbc237a5f4c2f98aee3bf5f95a78f6768707e 100644 (file)
@@ -574,6 +574,7 @@ NETSTAT_ENTRY(tx_heartbeat_errors);
 NETSTAT_ENTRY(tx_window_errors);
 NETSTAT_ENTRY(rx_compressed);
 NETSTAT_ENTRY(tx_compressed);
+NETSTAT_ENTRY(rx_nohandler);
 
 static struct attribute *netstat_attrs[] = {
        &dev_attr_rx_packets.attr,
@@ -599,6 +600,7 @@ static struct attribute *netstat_attrs[] = {
        &dev_attr_tx_window_errors.attr,
        &dev_attr_rx_compressed.attr,
        &dev_attr_tx_compressed.attr,
+       &dev_attr_rx_nohandler.attr,
        NULL
 };
 
index d735e854f916040912fb12930cbc6a7950ace942..20d71358c14392901937b8c85c38164c247b046b 100644 (file)
@@ -804,6 +804,8 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
 
        a->rx_compressed = b->rx_compressed;
        a->tx_compressed = b->tx_compressed;
+
+       a->rx_nohandler = b->rx_nohandler;
 }
 
 static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
index 14596fb3717270d62fa70544b7ec2496de96e1ce..2696aefdc148887138d46f98dc0a678ce2699512 100644 (file)
@@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
                *fplp = fpl;
                fpl->count = 0;
                fpl->max = SCM_MAX_FD;
+               fpl->user = NULL;
        }
        fpp = &fpl->fp[fpl->count];
 
@@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
                *fpp++ = file;
                fpl->count++;
        }
+
+       if (!fpl->user)
+               fpl->user = get_uid(current_user());
+
        return num;
 }
 
@@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
                scm->fp = NULL;
                for (i=fpl->count-1; i>=0; i--)
                        fput(fpl->fp[i]);
+               free_uid(fpl->user);
                kfree(fpl);
        }
 }
@@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
                for (i = 0; i < fpl->count; i++)
                        get_file(fpl->fp[i]);
                new_fpl->max = new_fpl->count;
+               new_fpl->user = get_uid(fpl->user);
        }
        return new_fpl;
 }
index b2df375ec9c2173a8132b8efa1c3062f0510284b..5bf88f58bee7405ff65f80487a64339b92a91bcb 100644 (file)
@@ -79,6 +79,8 @@
 
 struct kmem_cache *skbuff_head_cache __read_mostly;
 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
+EXPORT_SYMBOL(sysctl_max_skb_frags);
 
 /**
  *     skb_panic - private function for out-of-line support
index 1df98c55744029de02522e34ed00c3cd2ca03af1..e92b759d906c1bbcad5ff3ecc977d6393df90361 100644 (file)
@@ -93,10 +93,17 @@ static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
  *  @sk2: Socket belonging to the existing reuseport group.
  *  May return ENOMEM and not add socket to group under memory pressure.
  */
-int reuseport_add_sock(struct sock *sk, const struct sock *sk2)
+int reuseport_add_sock(struct sock *sk, struct sock *sk2)
 {
        struct sock_reuseport *reuse;
 
+       if (!rcu_access_pointer(sk2->sk_reuseport_cb)) {
+               int err = reuseport_alloc(sk2);
+
+               if (err)
+                       return err;
+       }
+
        spin_lock_bh(&reuseport_lock);
        reuse = rcu_dereference_protected(sk2->sk_reuseport_cb,
                                          lockdep_is_held(&reuseport_lock)),
index 95b6139d710c46825d1e43f825188d81fcb70f60..a6beb7b6ae556dff501413d6661d9c4655502e36 100644 (file)
@@ -26,6 +26,7 @@ static int zero = 0;
 static int one = 1;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
+static int max_skb_frags = MAX_SKB_FRAGS;
 
 static int net_msg_warn;       /* Unused, but still a sysctl */
 
@@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "max_skb_frags",
+               .data           = &sysctl_max_skb_frags,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &one,
+               .extra2         = &max_skb_frags,
+       },
        { }
 };
 
index c22920525e5d844bd7e4210b233440b883c2681e..775824720b6b57d68460fc8e05915e479f221414 100644 (file)
@@ -353,6 +353,7 @@ config INET_ESP
        select CRYPTO_CBC
        select CRYPTO_SHA1
        select CRYPTO_DES
+       select CRYPTO_ECHAINIV
        ---help---
          Support for IPsec ESP.
 
index 7aea0ccb6be6e463393c737103bf1ad467ee1d39..d07fc076bea0a4bc96f68075fb3bb79b95007e63 100644 (file)
@@ -1394,9 +1394,10 @@ found:
                struct fib_info *fi = fa->fa_info;
                int nhsel, err;
 
-               if ((index >= (1ul << fa->fa_slen)) &&
-                   ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen != KEYLENGTH)))
-                       continue;
+               if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) {
+                       if (index >= (1ul << fa->fa_slen))
+                               continue;
+               }
                if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
                        continue;
                if (fi->fib_dead)
index 46b9c887bede0568ec378d3b809dda8fa463a166..9b17c1792dce6b1f93b7c1a0c4e5cf941220b9c2 100644 (file)
@@ -482,10 +482,6 @@ EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
 #define AF_INET_FAMILY(fam) true
 #endif
 
-/* Only thing we need from tcp.h */
-extern int sysctl_tcp_synack_retries;
-
-
 /* Decide when to expire the request and when to resend SYN-ACK */
 static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
                                  const int max_retries,
@@ -557,6 +553,7 @@ static void reqsk_timer_handler(unsigned long data)
 {
        struct request_sock *req = (struct request_sock *)data;
        struct sock *sk_listener = req->rsk_listener;
+       struct net *net = sock_net(sk_listener);
        struct inet_connection_sock *icsk = inet_csk(sk_listener);
        struct request_sock_queue *queue = &icsk->icsk_accept_queue;
        int qlen, expire = 0, resend = 0;
@@ -566,7 +563,7 @@ static void reqsk_timer_handler(unsigned long data)
        if (sk_state_load(sk_listener) != TCP_LISTEN)
                goto drop;
 
-       max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
+       max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
        thresh = max_retries;
        /* Normally all the openreqs are young and become mature
         * (i.e. converted to established socket) for first timeout.
index 8bb8e7ad85483234d8a852782515ca11c93af68d..6029157a19ed1632fbf642416c0199829bf6d7ff 100644 (file)
@@ -361,13 +361,20 @@ struct sock *inet_diag_find_one_icsk(struct net *net,
                                 req->id.idiag_dport, req->id.idiag_src[0],
                                 req->id.idiag_sport, req->id.idiag_if);
 #if IS_ENABLED(CONFIG_IPV6)
-       else if (req->sdiag_family == AF_INET6)
-               sk = inet6_lookup(net, hashinfo,
-                                 (struct in6_addr *)req->id.idiag_dst,
-                                 req->id.idiag_dport,
-                                 (struct in6_addr *)req->id.idiag_src,
-                                 req->id.idiag_sport,
-                                 req->id.idiag_if);
+       else if (req->sdiag_family == AF_INET6) {
+               if (ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_dst) &&
+                   ipv6_addr_v4mapped((struct in6_addr *)req->id.idiag_src))
+                       sk = inet_lookup(net, hashinfo, req->id.idiag_dst[3],
+                                        req->id.idiag_dport, req->id.idiag_src[3],
+                                        req->id.idiag_sport, req->id.idiag_if);
+               else
+                       sk = inet6_lookup(net, hashinfo,
+                                         (struct in6_addr *)req->id.idiag_dst,
+                                         req->id.idiag_dport,
+                                         (struct in6_addr *)req->id.idiag_src,
+                                         req->id.idiag_sport,
+                                         req->id.idiag_if);
+       }
 #endif
        else
                return ERR_PTR(-EINVAL);
index 3f00810b7288991f83ba147a65ac4f78e68ae594..187c6fcc3027779ba31d0721eb4a7389addaaf8a 100644 (file)
@@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
        struct ipq *qp;
 
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
+       skb_orphan(skb);
 
        /* Lookup (or create) queue header */
        qp = ip_find(net, ip_hdr(skb), user, vif);
index 7c51c4e1661f9aafc63829cf58b3b5ec28ed859e..56fdf4e0dce4ef4cb88e34096b4d06cae7de41c5 100644 (file)
@@ -1240,6 +1240,14 @@ struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
        err = ipgre_newlink(net, dev, tb, NULL);
        if (err < 0)
                goto out;
+
+       /* openvswitch users expect packet sizes to be unrestricted,
+        * so set the largest MTU we can.
+        */
+       err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
+       if (err)
+               goto out;
+
        return dev;
 out:
        free_netdev(dev);
index b1209b63381f6f9ae81cf258ad3724e1b6a6b9d8..d77eb0c3b6842bc5605a882f9ad46c609dcdeb50 100644 (file)
@@ -316,7 +316,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
        const struct iphdr *iph = ip_hdr(skb);
        struct rtable *rt;
 
-       if (sysctl_ip_early_demux && !skb_dst(skb) && !skb->sk) {
+       if (sysctl_ip_early_demux &&
+           !skb_dst(skb) &&
+           !skb->sk &&
+           !ip_is_fragment(iph)) {
                const struct net_protocol *ipprot;
                int protocol = iph->protocol;
 
index c7bd72e9b544848d10a490b010e0a30c4d5e4c21..89e8861e05fcb1d371c371c1784b89499b69d9df 100644 (file)
@@ -943,17 +943,31 @@ done:
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
 
-int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
 {
        struct ip_tunnel *tunnel = netdev_priv(dev);
        int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+       int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
 
-       if (new_mtu < 68 ||
-           new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
+       if (new_mtu < 68)
                return -EINVAL;
+
+       if (new_mtu > max_mtu) {
+               if (strict)
+                       return -EINVAL;
+
+               new_mtu = max_mtu;
+       }
+
        dev->mtu = new_mtu;
        return 0;
 }
+EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
+
+int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+       return __ip_tunnel_change_mtu(dev, new_mtu, true);
+}
 EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
 
 static void ip_tunnel_dev_free(struct net_device *dev)
index 67f7c9de0b16689dd1e7d075882b9477ac5e2bf9..2ed9dd2b5f2f5a6ea3ea52208e5ec1424a501fc5 100644 (file)
@@ -143,7 +143,11 @@ static char dhcp_client_identifier[253] __initdata;
 
 /* Persistent data: */
 
+#ifdef IPCONFIG_DYNAMIC
 static int ic_proto_used;                      /* Protocol used, if any */
+#else
+#define ic_proto_used 0
+#endif
 static __be32 ic_nameservers[CONF_NAMESERVERS_MAX]; /* DNS Server IP addresses */
 static u8 ic_domain[64];               /* DNS (not NIS) domain name */
 
index 6fb869f646bf7a15b2f012cc1982c2a9f3d5935f..a04dee536b8ef8b5f4c1a085563d5d37b6fa118b 100644 (file)
@@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
 {
        int err;
 
-       skb_orphan(skb);
-
        local_bh_disable();
        err = ip_defrag(net, skb, user);
        local_bh_enable();
index 643a86c490208cad3fa1e4098e5bc7c30d03eab8..ba0dcffada3b74cdaf8a4c1bf422704541f6d69f 100644 (file)
@@ -19,8 +19,6 @@
 #include <net/tcp.h>
 #include <net/route.h>
 
-extern int sysctl_tcp_syncookies;
-
 static u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS] __read_mostly;
 
 #define COOKIEBITS 24  /* Upper bits store count */
@@ -307,7 +305,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
        __u8 rcv_wscale;
        struct flowi4 fl4;
 
-       if (!sysctl_tcp_syncookies || !th->ack || th->rst)
+       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
                goto out;
 
        if (tcp_synq_no_recent_overflow(sk))
index 4d367b4139a34fe04cc17dfdebab5dc42a53921a..44bb59824267c74f3aa2aa7067c1d8a6233f1b87 100644 (file)
@@ -291,22 +291,6 @@ static struct ctl_table ipv4_table[] = {
                .extra1         = &ip_ttl_min,
                .extra2         = &ip_ttl_max,
        },
-       {
-               .procname       = "tcp_syn_retries",
-               .data           = &sysctl_tcp_syn_retries,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &tcp_syn_retries_min,
-               .extra2         = &tcp_syn_retries_max
-       },
-       {
-               .procname       = "tcp_synack_retries",
-               .data           = &sysctl_tcp_synack_retries,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_max_orphans",
                .data           = &sysctl_tcp_max_orphans,
@@ -335,37 +319,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
-       {
-               .procname       = "tcp_retries1",
-               .data           = &sysctl_tcp_retries1,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra2         = &tcp_retr1_max
-       },
-       {
-               .procname       = "tcp_retries2",
-               .data           = &sysctl_tcp_retries2,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
-       {
-               .procname       = "tcp_fin_timeout",
-               .data           = &sysctl_tcp_fin_timeout,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-#ifdef CONFIG_SYN_COOKIES
-       {
-               .procname       = "tcp_syncookies",
-               .data           = &sysctl_tcp_syncookies,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
-#endif
        {
                .procname       = "tcp_fastopen",
                .data           = &sysctl_tcp_fastopen,
@@ -459,13 +412,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
-       {
-               .procname       = "tcp_orphan_retries",
-               .data           = &sysctl_tcp_orphan_retries,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_fack",
                .data           = &sysctl_tcp_fack,
@@ -480,13 +426,6 @@ static struct ctl_table ipv4_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-       {
-               .procname       = "tcp_reordering",
-               .data           = &sysctl_tcp_reordering,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
        {
                .procname       = "tcp_max_reordering",
                .data           = &sysctl_tcp_max_reordering,
@@ -516,13 +455,6 @@ static struct ctl_table ipv4_table[] = {
                .proc_handler   = proc_dointvec_minmax,
                .extra1         = &one,
        },
-       {
-               .procname       = "tcp_notsent_lowat",
-               .data           = &sysctl_tcp_notsent_lowat,
-               .maxlen         = sizeof(sysctl_tcp_notsent_lowat),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec,
-       },
        {
                .procname       = "tcp_rmem",
                .data           = &sysctl_tcp_rmem,
@@ -960,6 +892,74 @@ static struct ctl_table ipv4_net_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
        },
+       {
+               .procname       = "tcp_syn_retries",
+               .data           = &init_net.ipv4.sysctl_tcp_syn_retries,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &tcp_syn_retries_min,
+               .extra2         = &tcp_syn_retries_max
+       },
+       {
+               .procname       = "tcp_synack_retries",
+               .data           = &init_net.ipv4.sysctl_tcp_synack_retries,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+#ifdef CONFIG_SYN_COOKIES
+       {
+               .procname       = "tcp_syncookies",
+               .data           = &init_net.ipv4.sysctl_tcp_syncookies,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+#endif
+       {
+               .procname       = "tcp_reordering",
+               .data           = &init_net.ipv4.sysctl_tcp_reordering,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_retries1",
+               .data           = &init_net.ipv4.sysctl_tcp_retries1,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra2         = &tcp_retr1_max
+       },
+       {
+               .procname       = "tcp_retries2",
+               .data           = &init_net.ipv4.sysctl_tcp_retries2,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_orphan_retries",
+               .data           = &init_net.ipv4.sysctl_tcp_orphan_retries,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "tcp_fin_timeout",
+               .data           = &init_net.ipv4.sysctl_tcp_fin_timeout,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_jiffies,
+       },
+       {
+               .procname       = "tcp_notsent_lowat",
+               .data           = &init_net.ipv4.sysctl_tcp_notsent_lowat,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
        { }
 };
 
index fd17eec9352517f68b86fcd08bf5b215bfa6fa46..7d233201763bb2c8404ea952a9abb0b39998e788 100644 (file)
 
 #define pr_fmt(fmt) "TCP: " fmt
 
+#include <crypto/hash.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/swap.h>
 #include <linux/cache.h>
 #include <linux/err.h>
-#include <linux/crypto.h>
 #include <linux/time.h>
 #include <linux/slab.h>
 
 
 #include <asm/uaccess.h>
 #include <asm/ioctls.h>
+#include <asm/unaligned.h>
 #include <net/busy_poll.h>
 
-int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
-
 int sysctl_tcp_min_tso_segs __read_mostly = 2;
 
 int sysctl_tcp_autocorking __read_mostly = 1;
@@ -405,7 +404,7 @@ void tcp_init_sock(struct sock *sk)
        tp->mss_cache = TCP_MSS_DEFAULT;
        u64_stats_init(&tp->syncp);
 
-       tp->reordering = sysctl_tcp_reordering;
+       tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
        tcp_enable_early_retrans(tp);
        tcp_assign_congestion_control(sk);
 
@@ -939,7 +938,7 @@ new_segment:
 
                i = skb_shinfo(skb)->nr_frags;
                can_coalesce = skb_can_coalesce(skb, i, page, offset);
-               if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+               if (!can_coalesce && i >= sysctl_max_skb_frags) {
                        tcp_mark_push(tp, skb);
                        goto new_segment;
                }
@@ -1212,7 +1211,7 @@ new_segment:
 
                        if (!skb_can_coalesce(skb, i, pfrag->page,
                                              pfrag->offset)) {
-                               if (i == MAX_SKB_FRAGS || !sg) {
+                               if (i == sysctl_max_skb_frags || !sg) {
                                        tcp_mark_push(tp, skb);
                                        goto new_segment;
                                }
@@ -1465,8 +1464,10 @@ static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
 
        while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
                offset = seq - TCP_SKB_CB(skb)->seq;
-               if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+               if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+                       pr_err_once("%s: found a SYN, please report !\n", __func__);
                        offset--;
+               }
                if (offset < skb->len || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) {
                        *off = offset;
                        return skb;
@@ -1656,8 +1657,10 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
                                break;
 
                        offset = *seq - TCP_SKB_CB(skb)->seq;
-                       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+                       if (unlikely(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) {
+                               pr_err_once("%s: found a SYN, please report !\n", __func__);
                                offset--;
+                       }
                        if (offset < skb->len)
                                goto found_ok_skb;
                        if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -2325,6 +2328,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct net *net = sock_net(sk);
        int val;
        int err = 0;
 
@@ -2521,7 +2525,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
        case TCP_LINGER2:
                if (val < 0)
                        tp->linger2 = -1;
-               else if (val > sysctl_tcp_fin_timeout / HZ)
+               else if (val > net->ipv4.sysctl_tcp_fin_timeout / HZ)
                        tp->linger2 = 0;
                else
                        tp->linger2 = val * HZ;
@@ -2638,6 +2642,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now = tcp_time_stamp;
        unsigned int start;
+       u64 rate64;
        u32 rate;
 
        memset(info, 0, sizeof(*info));
@@ -2703,15 +2708,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
        info->tcpi_total_retrans = tp->total_retrans;
 
        rate = READ_ONCE(sk->sk_pacing_rate);
-       info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_pacing_rate);
 
        rate = READ_ONCE(sk->sk_max_pacing_rate);
-       info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
+       rate64 = rate != ~0U ? rate : ~0ULL;
+       put_unaligned(rate64, &info->tcpi_max_pacing_rate);
 
        do {
                start = u64_stats_fetch_begin_irq(&tp->syncp);
-               info->tcpi_bytes_acked = tp->bytes_acked;
-               info->tcpi_bytes_received = tp->bytes_received;
+               put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
+               put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
        } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
        info->tcpi_segs_out = tp->segs_out;
        info->tcpi_segs_in = tp->segs_in;
@@ -2723,6 +2730,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        int val, len;
 
        if (get_user(len, optlen))
@@ -2757,12 +2765,12 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                val = keepalive_probes(tp);
                break;
        case TCP_SYNCNT:
-               val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+               val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
                break;
        case TCP_LINGER2:
                val = tp->linger2;
                if (val >= 0)
-                       val = (val ? : sysctl_tcp_fin_timeout) / HZ;
+                       val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ;
                break;
        case TCP_DEFER_ACCEPT:
                val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
@@ -2939,17 +2947,26 @@ static bool tcp_md5sig_pool_populated = false;
 
 static void __tcp_alloc_md5sig_pool(void)
 {
+       struct crypto_ahash *hash;
        int cpu;
 
+       hash = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR_OR_NULL(hash))
+               return;
+
        for_each_possible_cpu(cpu) {
-               if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
-                       struct crypto_hash *hash;
+               struct ahash_request *req;
 
-                       hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
-                       if (IS_ERR_OR_NULL(hash))
-                               return;
-                       per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
-               }
+               if (per_cpu(tcp_md5sig_pool, cpu).md5_req)
+                       continue;
+
+               req = ahash_request_alloc(hash, GFP_KERNEL);
+               if (!req)
+                       return;
+
+               ahash_request_set_callback(req, 0, NULL, NULL);
+
+               per_cpu(tcp_md5sig_pool, cpu).md5_req = req;
        }
        /* before setting tcp_md5sig_pool_populated, we must commit all writes
         * to memory. See smp_rmb() in tcp_get_md5sig_pool()
@@ -2999,7 +3016,6 @@ int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
 {
        struct scatterlist sg;
        struct tcphdr hdr;
-       int err;
 
        /* We are not allowed to change tcphdr, make a local copy */
        memcpy(&hdr, th, sizeof(hdr));
@@ -3007,8 +3023,8 @@ int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
 
        /* options aren't included in the hash */
        sg_init_one(&sg, &hdr, sizeof(hdr));
-       err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
-       return err;
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(hdr));
+       return crypto_ahash_update(hp->md5_req);
 }
 EXPORT_SYMBOL(tcp_md5_hash_header);
 
@@ -3017,7 +3033,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
 {
        struct scatterlist sg;
        const struct tcphdr *tp = tcp_hdr(skb);
-       struct hash_desc *desc = &hp->md5_desc;
+       struct ahash_request *req = hp->md5_req;
        unsigned int i;
        const unsigned int head_data_len = skb_headlen(skb) > header_len ?
                                           skb_headlen(skb) - header_len : 0;
@@ -3027,7 +3043,8 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
        sg_init_table(&sg, 1);
 
        sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
-       if (crypto_hash_update(desc, &sg, head_data_len))
+       ahash_request_set_crypt(req, &sg, NULL, head_data_len);
+       if (crypto_ahash_update(req))
                return 1;
 
        for (i = 0; i < shi->nr_frags; ++i) {
@@ -3037,7 +3054,8 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
 
                sg_set_page(&sg, page, skb_frag_size(f),
                            offset_in_page(offset));
-               if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
+               ahash_request_set_crypt(req, &sg, NULL, skb_frag_size(f));
+               if (crypto_ahash_update(req))
                        return 1;
        }
 
@@ -3054,7 +3072,8 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *ke
        struct scatterlist sg;
 
        sg_init_one(&sg, key->key, key->keylen);
-       return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, key->keylen);
+       return crypto_ahash_update(hp->md5_req);
 }
 EXPORT_SYMBOL(tcp_md5_hash_key);
 
index 55be6ac70cff3679cd7a80aa9aaac48ac156a203..279bac6357f4de9c2fa0377929badd4adc03aae8 100644 (file)
@@ -1,3 +1,4 @@
+#include <linux/crypto.h>
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
@@ -124,6 +125,41 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
        return false;
 }
 
+
+/* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
+ * queue this additional data / FIN.
+ */
+void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
+               return;
+
+       skb = skb_clone(skb, GFP_ATOMIC);
+       if (!skb)
+               return;
+
+       skb_dst_drop(skb);
+       __skb_pull(skb, tcp_hdrlen(skb));
+       skb_set_owner_r(skb, sk);
+
+       TCP_SKB_CB(skb)->seq++;
+       TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;
+
+       tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       __skb_queue_tail(&sk->sk_receive_queue, skb);
+       tp->syn_data_acked = 1;
+
+       /* u64_stats_update_begin(&tp->syncp) not needed here,
+        * as we certainly are not changing upper 32bit value (0)
+        */
+       tp->bytes_received = skb->len;
+
+       if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+               tcp_fin(sk);
+}
+
 static struct sock *tcp_fastopen_create_child(struct sock *sk,
                                              struct sk_buff *skb,
                                              struct dst_entry *dst,
@@ -132,7 +168,6 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
        struct tcp_sock *tp;
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        struct sock *child;
-       u32 end_seq;
        bool own_req;
 
        req->num_retrans = 0;
@@ -178,35 +213,11 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
        tcp_init_metrics(child);
        tcp_init_buffer_space(child);
 
-       /* Queue the data carried in the SYN packet.
-        * We used to play tricky games with skb_get().
-        * With lockless listener, it is a dead end.
-        * Do not think about it.
-        *
-        * XXX (TFO) - we honor a zero-payload TFO request for now,
-        * (any reason not to?) but no need to queue the skb since
-        * there is no data. How about SYN+FIN?
-        */
-       end_seq = TCP_SKB_CB(skb)->end_seq;
-       if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
-               struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
-
-               if (likely(skb2)) {
-                       skb_dst_drop(skb2);
-                       __skb_pull(skb2, tcp_hdrlen(skb));
-                       skb_set_owner_r(skb2, child);
-                       __skb_queue_tail(&child->sk_receive_queue, skb2);
-                       tp->syn_data_acked = 1;
-
-                       /* u64_stats_update_begin(&tp->syncp) not needed here,
-                        * as we certainly are not changing upper 32bit value (0)
-                        */
-                       tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
-               } else {
-                       end_seq = TCP_SKB_CB(skb)->seq + 1;
-               }
-       }
-       tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
+       tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+
+       tcp_fastopen_add_skb(child, skb);
+
+       tcp_rsk(req)->rcv_nxt = tp->rcv_nxt;
        /* tcp_conn_request() is sending the SYNACK,
         * and queues the child into listener accept queue.
         */
index 0003d409fec5beec010050ef5ccafd58bf2f1501..5ee6fe0d152dbe8ded87fc7bb1f49d2053124413 100644 (file)
@@ -80,9 +80,7 @@ int sysctl_tcp_timestamps __read_mostly = 1;
 int sysctl_tcp_window_scaling __read_mostly = 1;
 int sysctl_tcp_sack __read_mostly = 1;
 int sysctl_tcp_fack __read_mostly = 1;
-int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
 int sysctl_tcp_max_reordering __read_mostly = 300;
-EXPORT_SYMBOL(sysctl_tcp_reordering);
 int sysctl_tcp_dsack __read_mostly = 1;
 int sysctl_tcp_app_win __read_mostly = 31;
 int sysctl_tcp_adv_win_scale __read_mostly = 1;
@@ -126,6 +124,10 @@ int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
 #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
 #define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
 
+#define REXMIT_NONE    0 /* no loss recovery to do */
+#define REXMIT_LOST    1 /* retransmit packets marked lost */
+#define REXMIT_NEW     2 /* FRTO-style transmit of unsent/new packets */
+
 /* Adapt the MSS value used to make delayed ack decision to the
  * real world.
  */
@@ -1210,6 +1212,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
                sacked |= TCPCB_SACKED_ACKED;
                state->flag |= FLAG_DATA_SACKED;
                tp->sacked_out += pcount;
+               tp->delivered += pcount;  /* Out-of-order packets delivered */
 
                fack_count += pcount;
 
@@ -1821,8 +1824,12 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
 static void tcp_add_reno_sack(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       u32 prior_sacked = tp->sacked_out;
+
        tp->sacked_out++;
        tcp_check_reno_reordering(sk, 0);
+       if (tp->sacked_out > prior_sacked)
+               tp->delivered++; /* Some out-of-order packet is delivered */
        tcp_verify_left_out(tp);
 }
 
@@ -1834,6 +1841,7 @@ static void tcp_remove_reno_sacks(struct sock *sk, int acked)
 
        if (acked > 0) {
                /* One ACK acked hole. The rest eat duplicate ACKs. */
+               tp->delivered += max_t(int, acked - tp->sacked_out, 1);
                if (acked - 1 >= tp->sacked_out)
                        tp->sacked_out = 0;
                else
@@ -1873,6 +1881,7 @@ void tcp_enter_loss(struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        struct sk_buff *skb;
        bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
        bool is_reneg;                  /* is receiver reneging on SACKs? */
@@ -1923,9 +1932,9 @@ void tcp_enter_loss(struct sock *sk)
         * suggests that the degree of reordering is over-estimated.
         */
        if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
-           tp->sacked_out >= sysctl_tcp_reordering)
+           tp->sacked_out >= net->ipv4.sysctl_tcp_reordering)
                tp->reordering = min_t(unsigned int, tp->reordering,
-                                      sysctl_tcp_reordering);
+                                      net->ipv4.sysctl_tcp_reordering);
        tcp_set_ca_state(sk, TCP_CA_Loss);
        tp->high_seq = tp->snd_nxt;
        tcp_ecn_queue_cwr(tp);
@@ -2109,6 +2118,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        __u32 packets_out;
+       int tcp_reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
 
        /* Trick#1: The loss is proven. */
        if (tp->lost_out)
@@ -2123,7 +2133,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
         */
        packets_out = tp->packets_out;
        if (packets_out <= tp->reordering &&
-           tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
+           tp->sacked_out >= max_t(__u32, packets_out/2, tcp_reordering) &&
            !tcp_may_send_now(sk)) {
                /* We have nothing to send. This connection is limited
                 * either by receiver window or by application.
@@ -2164,8 +2174,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int cnt, oldcnt;
-       int err;
+       int cnt, oldcnt, lost;
        unsigned int mss;
        /* Use SACK to deduce losses of new sequences sent during recovery */
        const u32 loss_high = tcp_is_sack(tp) ?  tp->snd_nxt : tp->high_seq;
@@ -2205,9 +2214,10 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
                                break;
 
                        mss = tcp_skb_mss(skb);
-                       err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
-                                          mss, GFP_ATOMIC);
-                       if (err < 0)
+                       /* If needed, chop off the prefix to mark as lost. */
+                       lost = (packets - oldcnt) * mss;
+                       if (lost < skb->len &&
+                           tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0)
                                break;
                        cnt = packets;
                }
@@ -2366,8 +2376,6 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
                        tp->snd_ssthresh = tp->prior_ssthresh;
                        tcp_ecn_withdraw_cwr(tp);
                }
-       } else {
-               tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
        }
        tp->snd_cwnd_stamp = tcp_time_stamp;
        tp->undo_marker = 0;
@@ -2469,14 +2477,12 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
        tcp_ecn_queue_cwr(tp);
 }
 
-static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
-                              int fast_rexmit, int flag)
+static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
+                              int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        int sndcnt = 0;
        int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
-       int newly_acked_sacked = prior_unsacked -
-                                (tp->packets_out - tp->sacked_out);
 
        if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd))
                return;
@@ -2494,7 +2500,8 @@ static void tcp_cwnd_reduction(struct sock *sk, const int prior_unsacked,
        } else {
                sndcnt = min(delta, newly_acked_sacked);
        }
-       sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
+       /* Force a fast retransmit upon entering fast recovery */
+       sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
        tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
 }
 
@@ -2539,7 +2546,7 @@ static void tcp_try_keep_open(struct sock *sk)
        }
 }
 
-static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
+static void tcp_try_to_open(struct sock *sk, int flag)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2553,8 +2560,6 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
-       } else {
-               tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
        }
 }
 
@@ -2664,7 +2669,8 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 /* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
  * recovered or spurious. Otherwise retransmits more on partial ACKs.
  */
-static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
+static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
+                            int *rexmit)
 {
        struct tcp_sock *tp = tcp_sk(sk);
        bool recovered = !before(tp->snd_una, tp->high_seq);
@@ -2686,10 +2692,15 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
                                tp->frto = 0; /* Step 3.a. loss was real */
                } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
                        tp->high_seq = tp->snd_nxt;
-                       __tcp_push_pending_frames(sk, tcp_current_mss(sk),
-                                                 TCP_NAGLE_OFF);
-                       if (after(tp->snd_nxt, tp->high_seq))
-                               return; /* Step 2.b */
+                       /* Step 2.b. Try send new data (but deferred until cwnd
+                        * is updated in tcp_ack()). Otherwise fall back to
+                        * the conventional recovery.
+                        */
+                       if (tcp_send_head(sk) &&
+                           after(tcp_wnd_end(tp), tp->snd_nxt)) {
+                               *rexmit = REXMIT_NEW;
+                               return;
+                       }
                        tp->frto = 0;
                }
        }
@@ -2708,12 +2719,11 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
                else if (flag & FLAG_SND_UNA_ADVANCED)
                        tcp_reset_reno_sack(tp);
        }
-       tcp_xmit_retransmit_queue(sk);
+       *rexmit = REXMIT_LOST;
 }
 
 /* Undo during fast recovery after partial ACK. */
-static bool tcp_try_undo_partial(struct sock *sk, const int acked,
-                                const int prior_unsacked, int flag)
+static bool tcp_try_undo_partial(struct sock *sk, const int acked)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2728,10 +2738,8 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
                 * can undo. Otherwise we clock out new packets but do not
                 * mark more packets lost or retransmit more.
                 */
-               if (tp->retrans_out) {
-                       tcp_cwnd_reduction(sk, prior_unsacked, 0, flag);
+               if (tp->retrans_out)
                        return true;
-               }
 
                if (!tcp_any_retrans_done(sk))
                        tp->retrans_stamp = 0;
@@ -2750,21 +2758,21 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked,
  * taking into account both packets sitting in receiver's buffer and
  * packets lost by network.
  *
- * Besides that it does CWND reduction, when packet loss is detected
- * and changes state of machine.
+ * Besides that it updates the congestion state when packet loss or ECN
+ * is detected. But it does not reduce the cwnd, it is done by the
+ * congestion control later.
  *
  * It does _not_ decide what to send, it is made in function
  * tcp_xmit_retransmit_queue().
  */
 static void tcp_fastretrans_alert(struct sock *sk, const int acked,
-                                 const int prior_unsacked,
-                                 bool is_dupack, int flag)
+                                 bool is_dupack, int *ack_flag, int *rexmit)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       int fast_rexmit = 0, flag = *ack_flag;
        bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
                                    (tcp_fackets_out(tp) > tp->reordering));
-       int fast_rexmit = 0;
 
        if (WARN_ON(!tp->packets_out && tp->sacked_out))
                tp->sacked_out = 0;
@@ -2811,8 +2819,10 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
 
        /* Use RACK to detect loss */
        if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
-           tcp_rack_mark_lost(sk))
+           tcp_rack_mark_lost(sk)) {
                flag |= FLAG_LOST_RETRANS;
+               *ack_flag |= FLAG_LOST_RETRANS;
+       }
 
        /* E. Process state. */
        switch (icsk->icsk_ca_state) {
@@ -2821,7 +2831,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                        if (tcp_is_reno(tp) && is_dupack)
                                tcp_add_reno_sack(sk);
                } else {
-                       if (tcp_try_undo_partial(sk, acked, prior_unsacked, flag))
+                       if (tcp_try_undo_partial(sk, acked))
                                return;
                        /* Partial ACK arrived. Force fast retransmit. */
                        do_lost = tcp_is_reno(tp) ||
@@ -2833,7 +2843,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                }
                break;
        case TCP_CA_Loss:
-               tcp_process_loss(sk, flag, is_dupack);
+               tcp_process_loss(sk, flag, is_dupack, rexmit);
                if (icsk->icsk_ca_state != TCP_CA_Open &&
                    !(flag & FLAG_LOST_RETRANS))
                        return;
@@ -2850,7 +2860,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
                        tcp_try_undo_dsack(sk);
 
                if (!tcp_time_to_recover(sk, flag)) {
-                       tcp_try_to_open(sk, flag, prior_unsacked);
+                       tcp_try_to_open(sk, flag);
                        return;
                }
 
@@ -2872,8 +2882,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
 
        if (do_lost)
                tcp_update_scoreboard(sk, fast_rexmit);
-       tcp_cwnd_reduction(sk, prior_unsacked, fast_rexmit, flag);
-       tcp_xmit_retransmit_queue(sk);
+       *rexmit = REXMIT_LOST;
 }
 
 /* Kathleen Nichols' algorithm for tracking the minimum value of
@@ -3095,7 +3104,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
  * arrived at the other end.
  */
 static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
-                              u32 prior_snd_una,
+                              u32 prior_snd_una, int *acked,
                               struct tcp_sacktag_state *sack)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -3153,10 +3162,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                                flag |= FLAG_ORIG_SACK_ACKED;
                }
 
-               if (sacked & TCPCB_SACKED_ACKED)
+               if (sacked & TCPCB_SACKED_ACKED) {
                        tp->sacked_out -= acked_pcount;
-               else if (tcp_is_sack(tp) && !tcp_skb_spurious_retrans(tp, skb))
-                       tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
+               } else if (tcp_is_sack(tp)) {
+                       tp->delivered += acked_pcount;
+                       if (!tcp_skb_spurious_retrans(tp, skb))
+                               tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
+               }
                if (sacked & TCPCB_LOST)
                        tp->lost_out -= acked_pcount;
 
@@ -3265,6 +3277,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
                }
        }
 #endif
+       *acked = pkts_acked;
        return flag;
 }
 
@@ -3298,21 +3311,36 @@ static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
 /* Decide wheather to run the increase function of congestion control. */
 static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
 {
-       if (tcp_in_cwnd_reduction(sk))
-               return false;
-
        /* If reordering is high then always grow cwnd whenever data is
         * delivered regardless of its ordering. Otherwise stay conservative
         * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/
         * new SACK or ECE mark may first advance cwnd here and later reduce
         * cwnd in tcp_fastretrans_alert() based on more states.
         */
-       if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
+       if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering)
                return flag & FLAG_FORWARD_PROGRESS;
 
        return flag & FLAG_DATA_ACKED;
 }
 
+/* The "ultimate" congestion control function that aims to replace the rigid
+ * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction).
+ * It's called toward the end of processing an ACK with precise rate
+ * information. All transmission or retransmission are delayed afterwards.
+ */
+static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked,
+                            int flag)
+{
+       if (tcp_in_cwnd_reduction(sk)) {
+               /* Reduce cwnd if state mandates */
+               tcp_cwnd_reduction(sk, acked_sacked, flag);
+       } else if (tcp_may_raise_cwnd(sk, flag)) {
+               /* Advance cwnd if state allows */
+               tcp_cong_avoid(sk, ack, acked_sacked);
+       }
+       tcp_update_pacing_rate(sk);
+}
+
 /* Check that window update is acceptable.
  * The function assumes that snd_una<=ack<=snd_next.
  */
@@ -3508,6 +3536,27 @@ static inline void tcp_in_ack_event(struct sock *sk, u32 flags)
                icsk->icsk_ca_ops->in_ack_event(sk, flags);
 }
 
+/* Congestion control has updated the cwnd already. So if we're in
+ * loss recovery then now we do any new sends (for FRTO) or
+ * retransmits (for CA_Loss or CA_recovery) that make sense.
+ */
+static void tcp_xmit_recovery(struct sock *sk, int rexmit)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       if (rexmit == REXMIT_NONE)
+               return;
+
+       if (unlikely(rexmit == 2)) {
+               __tcp_push_pending_frames(sk, tcp_current_mss(sk),
+                                         TCP_NAGLE_OFF);
+               if (after(tp->snd_nxt, tp->high_seq))
+                       return;
+               tp->frto = 0;
+       }
+       tcp_xmit_retransmit_queue(sk);
+}
+
 /* This routine deals with incoming acks, but not outgoing ones. */
 static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 {
@@ -3520,8 +3569,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
        bool is_dupack = false;
        u32 prior_fackets;
        int prior_packets = tp->packets_out;
-       const int prior_unsacked = tp->packets_out - tp->sacked_out;
+       u32 prior_delivered = tp->delivered;
        int acked = 0; /* Number of packets newly acked */
+       int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
 
        sack_state.first_sackt.v64 = 0;
 
@@ -3610,23 +3660,16 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                goto no_queue;
 
        /* See if we can take anything off of the retransmit queue. */
-       acked = tp->packets_out;
-       flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
+       flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
                                    &sack_state);
-       acked -= tp->packets_out;
 
        if (tcp_ack_is_dubious(sk, flag)) {
                is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
-               tcp_fastretrans_alert(sk, acked, prior_unsacked,
-                                     is_dupack, flag);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
        }
        if (tp->tlp_high_seq)
                tcp_process_tlp_ack(sk, ack, flag);
 
-       /* Advance cwnd if state allows */
-       if (tcp_may_raise_cwnd(sk, flag))
-               tcp_cong_avoid(sk, ack, acked);
-
        if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
                struct dst_entry *dst = __sk_dst_get(sk);
                if (dst)
@@ -3635,14 +3678,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
 
        if (icsk->icsk_pending == ICSK_TIME_RETRANS)
                tcp_schedule_loss_probe(sk);
-       tcp_update_pacing_rate(sk);
+       tcp_cong_control(sk, ack, tp->delivered - prior_delivered, flag);
+       tcp_xmit_recovery(sk, rexmit);
        return 1;
 
 no_queue:
        /* If data was DSACKed, see if we can undo a cwnd reduction. */
        if (flag & FLAG_DSACKING_ACK)
-               tcp_fastretrans_alert(sk, acked, prior_unsacked,
-                                     is_dupack, flag);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
        /* If this ack opens up a zero window, clear backoff.  It was
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
@@ -3665,8 +3708,8 @@ old_ack:
        if (TCP_SKB_CB(skb)->sacked) {
                flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
                                                &sack_state);
-               tcp_fastretrans_alert(sk, acked, prior_unsacked,
-                                     is_dupack, flag);
+               tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+               tcp_xmit_recovery(sk, rexmit);
        }
 
        SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
@@ -3997,7 +4040,7 @@ void tcp_reset(struct sock *sk)
  *
  *     If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
  */
-static void tcp_fin(struct sock *sk)
+void tcp_fin(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -5511,6 +5554,9 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
        tp->syn_data_acked = tp->syn_data;
        if (tp->syn_data_acked)
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+
+       tcp_fastopen_add_skb(sk, synack);
+
        return false;
 }
 
@@ -6117,9 +6163,10 @@ static bool tcp_syn_flood_action(const struct sock *sk,
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        const char *msg = "Dropping request";
        bool want_cookie = false;
+       struct net *net = sock_net(sk);
 
 #ifdef CONFIG_SYN_COOKIES
-       if (sysctl_tcp_syncookies) {
+       if (net->ipv4.sysctl_tcp_syncookies) {
                msg = "Sending cookies";
                want_cookie = true;
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
@@ -6128,7 +6175,7 @@ static bool tcp_syn_flood_action(const struct sock *sk,
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
 
        if (!queue->synflood_warned &&
-           sysctl_tcp_syncookies != 2 &&
+           net->ipv4.sysctl_tcp_syncookies != 2 &&
            xchg(&queue->synflood_warned, 1) == 0)
                pr_info("%s: Possible SYN flooding on port %d. %s.  Check SNMP counters.\n",
                        proto, ntohs(tcp_hdr(skb)->dest), msg);
@@ -6161,6 +6208,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
        __u32 isn = TCP_SKB_CB(skb)->tcp_tw_isn;
        struct tcp_options_received tmp_opt;
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        struct sock *fastopen_sk = NULL;
        struct dst_entry *dst = NULL;
        struct request_sock *req;
@@ -6171,7 +6219,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
         * limitations, they conserve resources and peer is
         * evidently real one.
         */
-       if ((sysctl_tcp_syncookies == 2 ||
+       if ((net->ipv4.sysctl_tcp_syncookies == 2 ||
             inet_csk_reqsk_queue_is_full(sk)) && !isn) {
                want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
                if (!want_cookie)
@@ -6237,7 +6285,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
                        }
                }
                /* Kill the following clause, if you dislike this way. */
-               else if (!sysctl_tcp_syncookies &&
+               else if (!net->ipv4.sysctl_tcp_syncookies &&
                         (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
                          (sysctl_max_syn_backlog >> 2)) &&
                         !tcp_peer_is_proven(req, dst, false,
index 5ced3e4013e3c2119f43b1674e69ff18e281e664..e37222e46e0ee6098cfaa86827d848ca2f0a6fb5 100644 (file)
@@ -81,7 +81,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 #include <linux/scatterlist.h>
 
 int sysctl_tcp_tw_reuse __read_mostly;
@@ -311,7 +311,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
 
 
 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
-void tcp_req_err(struct sock *sk, u32 seq)
+void tcp_req_err(struct sock *sk, u32 seq, bool abort)
 {
        struct request_sock *req = inet_reqsk(sk);
        struct net *net = sock_net(sk);
@@ -323,7 +323,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
 
        if (seq != tcp_rsk(req)->snt_isn) {
                NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
-       } else {
+       } else if (abort) {
                /*
                 * Still in SYN_RECV, just remove it silently.
                 * There is no good way to pass the error to the newly
@@ -383,7 +383,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        }
        seq = ntohl(th->seq);
        if (sk->sk_state == TCP_NEW_SYN_RECV)
-               return tcp_req_err(sk, seq);
+               return tcp_req_err(sk, seq,
+                                 type == ICMP_PARAMETERPROB ||
+                                 type == ICMP_TIME_EXCEEDED ||
+                                 (type == ICMP_DEST_UNREACH &&
+                                  (code == ICMP_NET_UNREACH ||
+                                   code == ICMP_HOST_UNREACH)));
 
        bh_lock_sock(sk);
        /* If too many ICMPs get dropped on busy
@@ -707,7 +712,8 @@ release_sk1:
    outside socket context is ugly, certainly. What can I do?
  */
 
-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+static void tcp_v4_send_ack(struct net *net,
+                           struct sk_buff *skb, u32 seq, u32 ack,
                            u32 win, u32 tsval, u32 tsecr, int oif,
                            struct tcp_md5sig_key *key,
                            int reply_flags, u8 tos)
@@ -722,7 +728,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
                        ];
        } rep;
        struct ip_reply_arg arg;
-       struct net *net = dev_net(skb_dst(skb)->dev);
 
        memset(&rep.th, 0, sizeof(struct tcphdr));
        memset(&arg, 0, sizeof(arg));
@@ -784,7 +789,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
        struct inet_timewait_sock *tw = inet_twsk(sk);
        struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
 
-       tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+       tcp_v4_send_ack(sock_net(sk), skb,
+                       tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
                        tcp_time_stamp + tcptw->tw_ts_offset,
                        tcptw->tw_ts_recent,
@@ -803,8 +809,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
        /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
         * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
         */
-       tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
-                       tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
+       u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
+                                            tcp_sk(sk)->snd_nxt;
+
+       tcp_v4_send_ack(sock_net(sk), skb, seq,
                        tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
                        tcp_time_stamp,
                        req->ts_recent,
@@ -857,7 +865,6 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
        kfree(inet_rsk(req)->opt);
 }
 
-
 #ifdef CONFIG_TCP_MD5SIG
 /*
  * RFC2385 MD5 checksumming requires a mapping of
@@ -1031,21 +1038,22 @@ static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
        bp->len = cpu_to_be16(nbytes);
 
        sg_init_one(&sg, bp, sizeof(*bp));
-       return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
+       return crypto_ahash_update(hp->md5_req);
 }
 
 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
                               __be32 daddr, __be32 saddr, const struct tcphdr *th)
 {
        struct tcp_md5sig_pool *hp;
-       struct hash_desc *desc;
+       struct ahash_request *req;
 
        hp = tcp_get_md5sig_pool();
        if (!hp)
                goto clear_hash_noput;
-       desc = &hp->md5_desc;
+       req = hp->md5_req;
 
-       if (crypto_hash_init(desc))
+       if (crypto_ahash_init(req))
                goto clear_hash;
        if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
                goto clear_hash;
@@ -1053,7 +1061,8 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
                goto clear_hash;
        if (tcp_md5_hash_key(hp, key))
                goto clear_hash;
-       if (crypto_hash_final(desc, md5_hash))
+       ahash_request_set_crypt(req, NULL, md5_hash, 0);
+       if (crypto_ahash_final(req))
                goto clear_hash;
 
        tcp_put_md5sig_pool();
@@ -1071,7 +1080,7 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
                        const struct sk_buff *skb)
 {
        struct tcp_md5sig_pool *hp;
-       struct hash_desc *desc;
+       struct ahash_request *req;
        const struct tcphdr *th = tcp_hdr(skb);
        __be32 saddr, daddr;
 
@@ -1087,9 +1096,9 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
        hp = tcp_get_md5sig_pool();
        if (!hp)
                goto clear_hash_noput;
-       desc = &hp->md5_desc;
+       req = hp->md5_req;
 
-       if (crypto_hash_init(desc))
+       if (crypto_ahash_init(req))
                goto clear_hash;
 
        if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
@@ -1100,7 +1109,8 @@ int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
                goto clear_hash;
        if (tcp_md5_hash_key(hp, key))
                goto clear_hash;
-       if (crypto_hash_final(desc, md5_hash))
+       ahash_request_set_crypt(req, NULL, md5_hash, 0);
+       if (crypto_ahash_final(req))
                goto clear_hash;
 
        tcp_put_md5sig_pool();
@@ -2385,6 +2395,16 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
        net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
 
+       net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
+       net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
+       net->ipv4.sysctl_tcp_syncookies = 1;
+       net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
+       net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
+       net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
+       net->ipv4.sysctl_tcp_orphan_retries = 0;
+       net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
+       net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
+
        return 0;
 fail:
        tcp_sk_exit(net);
index c8cbc2b4b7921fb4f70681e4ac6d945f5499654c..c26241f3057b18d8e1d2aaece7d1e2f7f8399462 100644 (file)
@@ -369,6 +369,7 @@ void tcp_update_metrics(struct sock *sk)
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct dst_entry *dst = __sk_dst_get(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        struct tcp_metrics_block *tm;
        unsigned long rtt;
        u32 val;
@@ -473,7 +474,7 @@ void tcp_update_metrics(struct sock *sk)
                if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
                        val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
                        if (val < tp->reordering &&
-                           tp->reordering != sysctl_tcp_reordering)
+                           tp->reordering != net->ipv4.sysctl_tcp_reordering)
                                tcp_metric_set(tm, TCP_METRIC_REORDERING,
                                               tp->reordering);
                }
index 75632a92582425db63f1078c223cbd54a91fa0c3..fadd8b978951817f75402af7a12f2b5681b00462 100644 (file)
@@ -27,9 +27,6 @@
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 
-int sysctl_tcp_syncookies __read_mostly = 1;
-EXPORT_SYMBOL(sysctl_tcp_syncookies);
-
 int sysctl_tcp_abort_on_overflow __read_mostly;
 
 struct inet_timewait_death_row tcp_death_row = {
index fda379cd600d4e033333a37301f3cba4eec0ba7b..7d2c7a400456bf036ec6b7a32eaf2657eed94378 100644 (file)
@@ -62,9 +62,6 @@ int sysctl_tcp_tso_win_divisor __read_mostly = 3;
 /* By default, RFC2861 behavior.  */
 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
 
-unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
-EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
-
 static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                           int push_one, gfp_t gfp);
 
@@ -3476,6 +3473,7 @@ void tcp_send_probe0(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        unsigned long probe_max;
        int err;
 
@@ -3489,7 +3487,7 @@ void tcp_send_probe0(struct sock *sk)
        }
 
        if (err <= 0) {
-               if (icsk->icsk_backoff < sysctl_tcp_retries2)
+               if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2)
                        icsk->icsk_backoff++;
                icsk->icsk_probes_out++;
                probe_max = TCP_RTO_MAX;
index a4730a28b220a4f8c9a59db57eb30eb3501dbf43..49bc474f8e35ee50407622ab02867df698bc5117 100644 (file)
 #include <linux/gfp.h>
 #include <net/tcp.h>
 
-int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
-int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
-int sysctl_tcp_retries1 __read_mostly = TCP_RETR1;
-int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
-int sysctl_tcp_orphan_retries __read_mostly;
 int sysctl_tcp_thin_linear_timeouts __read_mostly;
 
 static void tcp_write_err(struct sock *sk)
@@ -82,7 +77,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
 /* Calculate maximal number or retries on an orphaned socket. */
 static int tcp_orphan_retries(struct sock *sk, bool alive)
 {
-       int retries = sysctl_tcp_orphan_retries; /* May be zero. */
+       int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
 
        /* We know from an ICMP that something is wrong. */
        if (sk->sk_err_soft && !alive)
@@ -157,6 +152,7 @@ static int tcp_write_timeout(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        int retry_until;
        bool do_reset, syn_set = false;
 
@@ -169,10 +165,10 @@ static int tcp_write_timeout(struct sock *sk)
                                NET_INC_STATS_BH(sock_net(sk),
                                                 LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                }
-               retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
+               retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
                syn_set = true;
        } else {
-               if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) {
+               if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) {
                        /* Some middle-boxes may black-hole Fast Open _after_
                         * the handshake. Therefore we conservatively disable
                         * Fast Open on this path on recurring timeouts with
@@ -181,7 +177,7 @@ static int tcp_write_timeout(struct sock *sk)
                        if (tp->syn_data_acked &&
                            tp->bytes_acked <= tp->rx_opt.mss_clamp) {
                                tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
-                               if (icsk->icsk_retransmits == sysctl_tcp_retries1)
+                               if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
                                        NET_INC_STATS_BH(sock_net(sk),
                                                         LINUX_MIB_TCPFASTOPENACTIVEFAIL);
                        }
@@ -191,7 +187,7 @@ static int tcp_write_timeout(struct sock *sk)
                        dst_negative_advice(sk);
                }
 
-               retry_until = sysctl_tcp_retries2;
+               retry_until = net->ipv4.sysctl_tcp_retries2;
                if (sock_flag(sk, SOCK_DEAD)) {
                        const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
 
@@ -305,7 +301,7 @@ static void tcp_probe_timer(struct sock *sk)
                 (s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout)
                goto abort;
 
-       max_probes = sysctl_tcp_retries2;
+       max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
        if (sock_flag(sk, SOCK_DEAD)) {
                const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
 
@@ -332,7 +328,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        int max_retries = icsk->icsk_syn_retries ? :
-           sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
+           sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
        struct request_sock *req;
 
        req = tcp_sk(sk)->fastopen_rsk;
@@ -360,6 +356,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk)
 void tcp_retransmit_timer(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
+       struct net *net = sock_net(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
 
        if (tp->fastopen_rsk) {
@@ -490,7 +487,7 @@ out_reset_timer:
                icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
        }
        inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX);
-       if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0))
+       if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0))
                __sk_dst_reset(sk);
 
 out:;
index dc45b538e237b908f4d6c6337b4c533a4236735a..be0b21852b138ebc5eed9caf37740cbe1cb1abe0 100644 (file)
@@ -499,6 +499,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
        struct sock *sk, *result;
        struct hlist_nulls_node *node;
        int score, badness, matches = 0, reuseport = 0;
+       bool select_ok = true;
        u32 hash = 0;
 
 begin:
@@ -512,14 +513,18 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               struct sock *sk2;
                                hash = udp_ehashfn(net, daddr, hnum,
                                                   saddr, sport);
-                               sk2 = reuseport_select_sock(sk, hash, skb,
-                                                           sizeof(struct udphdr));
-                               if (sk2) {
-                                       result = sk2;
-                                       goto found;
+                               if (select_ok) {
+                                       struct sock *sk2;
+
+                                       sk2 = reuseport_select_sock(sk, hash, skb,
+                                                       sizeof(struct udphdr));
+                                       if (sk2) {
+                                               result = sk2;
+                                               select_ok = false;
+                                               goto found;
+                                       }
                                }
                                matches = 1;
                        }
@@ -563,6 +568,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
        int score, badness, matches = 0, reuseport = 0;
+       bool select_ok = true;
        u32 hash = 0;
 
        rcu_read_lock();
@@ -601,14 +607,18 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               struct sock *sk2;
                                hash = udp_ehashfn(net, daddr, hnum,
                                                   saddr, sport);
-                               sk2 = reuseport_select_sock(sk, hash, skb,
+                               if (select_ok) {
+                                       struct sock *sk2;
+
+                                       sk2 = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (sk2) {
-                                       result = sk2;
-                                       goto found;
+                                       if (sk2) {
+                                               result = sk2;
+                                               select_ok = false;
+                                               goto found;
+                                       }
                                }
                                matches = 1;
                        }
index bb7dabe2ebbf8aabc624af02c00dbc2b33ad544a..40c897515ddc44d57c434dcd761c2ddc5e7fad50 100644 (file)
@@ -69,6 +69,7 @@ config INET6_ESP
        select CRYPTO_CBC
        select CRYPTO_SHA1
        select CRYPTO_DES
+       select CRYPTO_ECHAINIV
        ---help---
          Support for IPsec ESP.
 
index 38eeddedfc21baa843b315d720cc94a28b6af605..9efd9ffdc34cf5f39ab8b2548050cfe1e6ed4141 100644 (file)
@@ -3538,6 +3538,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
 {
        struct inet6_dev *idev = ifp->idev;
        struct net_device *dev = idev->dev;
+       bool notify = false;
 
        addrconf_join_solict(dev, &ifp->addr);
 
@@ -3583,7 +3584,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
                        /* Because optimistic nodes can use this address,
                         * notify listeners. If DAD fails, RTM_DELADDR is sent.
                         */
-                       ipv6_ifa_notify(RTM_NEWADDR, ifp);
+                       notify = true;
                }
        }
 
@@ -3591,6 +3592,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
 out:
        spin_unlock(&ifp->lock);
        read_unlock_bh(&idev->lock);
+       if (notify)
+               ipv6_ifa_notify(RTM_NEWADDR, ifp);
 }
 
 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
index 517c55b01ba84b55a0004ce9505d14c4a3951cfc..428162155280ca2af782ea9fd9fa26e0d1666d89 100644 (file)
@@ -162,6 +162,9 @@ ipv4_connected:
        fl6.fl6_dport = inet->inet_dport;
        fl6.fl6_sport = inet->inet_sport;
 
+       if (!fl6.flowi6_oif)
+               fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
+
        if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
                fl6.flowi6_oif = np->mcast_oif;
 
index 1f9ebe3cbb4ac042edd0b05754d1fc03cdfe73cd..dc2db4f7b182c4ebc1a8a51487a3d2e893955df9 100644 (file)
@@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
                }
                spin_lock_bh(&ip6_sk_fl_lock);
                for (sflp = &np->ipv6_fl_list;
-                    (sfl = rcu_dereference(*sflp)) != NULL;
+                    (sfl = rcu_dereference_protected(*sflp,
+                                                     lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
                     sflp = &sfl->next) {
                        if (sfl->fl->label == freq.flr_label) {
                                if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
                                        np->flow_label &= ~IPV6_FLOWLABEL_MASK;
-                               *sflp = rcu_dereference(sfl->next);
+                               *sflp = sfl->next;
                                spin_unlock_bh(&ip6_sk_fl_lock);
                                fl_release(sfl->fl);
                                kfree_rcu(sfl, rcu);
index 23de98f976d5b7c846e42e9260acbac3b1632c17..a163102f1803e5f9eb4a60c501bd4adcc27bd62b 100644 (file)
@@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
        struct rt6_info *rt;
 #endif
        int err;
+       int flags = 0;
 
        /* The correct way to handle this would be to do
         * ip6_route_get_saddr, and then ip6_route_output; however,
@@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
                        dst_release(*dst);
                        *dst = NULL;
                }
+
+               if (fl6->flowi6_oif)
+                       flags |= RT6_LOOKUP_F_IFACE;
        }
 
        if (!*dst)
-               *dst = ip6_route_output(net, sk, fl6);
+               *dst = ip6_route_output_flags(net, sk, fl6, flags);
 
        err = (*dst)->error;
        if (err)
index 3c8834bc822d2b64b8d7f4b33a9042dd279c584a..ed446639219c3ae8832a19a1f944e4dd6ddc6302 100644 (file)
@@ -1183,11 +1183,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
        return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
 }
 
-struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
-                                   struct flowi6 *fl6)
+struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
+                                        struct flowi6 *fl6, int flags)
 {
        struct dst_entry *dst;
-       int flags = 0;
        bool any_src;
 
        dst = l3mdev_rt6_dst_by_oif(net, fl6);
@@ -1208,7 +1207,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
 
        return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
 }
-EXPORT_SYMBOL(ip6_route_output);
+EXPORT_SYMBOL_GPL(ip6_route_output_flags);
 
 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
 {
index e794ef66a40135f33b5731a1059a6f45d0fc90b7..2066d1c25a11bb18bbe75caf4abe38eeccaa4b61 100644 (file)
@@ -201,14 +201,14 @@ static int ipip6_tunnel_create(struct net_device *dev)
        if ((__force u16)t->parms.i_flags & SIT_ISATAP)
                dev->priv_flags |= IFF_ISATAP;
 
+       dev->rtnl_link_ops = &sit_link_ops;
+
        err = register_netdevice(dev);
        if (err < 0)
                goto out;
 
        ipip6_tunnel_clone_6rd(dev, sitn);
 
-       dev->rtnl_link_ops = &sit_link_ops;
-
        dev_hold(dev);
 
        ipip6_tunnel_link(sitn, t);
index 2906ef20795e4ce2365011128c5eb9bfbcc9bdca..0e393ff7f5d07e7294df6cda18deddad568bddbb 100644 (file)
@@ -148,7 +148,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
        struct dst_entry *dst;
        __u8 rcv_wscale;
 
-       if (!sysctl_tcp_syncookies || !th->ack || th->rst)
+       if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst)
                goto out;
 
        if (tcp_synq_no_recent_overflow(sk))
index 006396e31cb0dcedf359c5aaa068ab3d28f7501c..ad422babc1f50fdfe12c22f7cb1bcb97ced3f2a7 100644 (file)
@@ -66,7 +66,7 @@
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
 #include <linux/scatterlist.h>
 
 static void    tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
@@ -327,6 +327,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct tcp_sock *tp;
        __u32 seq, snd_una;
        struct sock *sk;
+       bool fatal;
        int err;
 
        sk = __inet6_lookup_established(net, &tcp_hashinfo,
@@ -345,8 +346,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                return;
        }
        seq = ntohl(th->seq);
+       fatal = icmpv6_err_convert(type, code, &err);
        if (sk->sk_state == TCP_NEW_SYN_RECV)
-               return tcp_req_err(sk, seq);
+               return tcp_req_err(sk, seq, fatal);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -400,7 +402,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
                goto out;
        }
 
-       icmpv6_err_convert(type, code, &err);
 
        /* Might be for an request_sock */
        switch (sk->sk_state) {
@@ -540,7 +541,8 @@ static int tcp_v6_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
        bp->len = cpu_to_be32(nbytes);
 
        sg_init_one(&sg, bp, sizeof(*bp));
-       return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
+       ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
+       return crypto_ahash_update(hp->md5_req);
 }
 
 static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
@@ -548,14 +550,14 @@ static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
                               const struct tcphdr *th)
 {
        struct tcp_md5sig_pool *hp;
-       struct hash_desc *desc;
+       struct ahash_request *req;
 
        hp = tcp_get_md5sig_pool();
        if (!hp)
                goto clear_hash_noput;
-       desc = &hp->md5_desc;
+       req = hp->md5_req;
 
-       if (crypto_hash_init(desc))
+       if (crypto_ahash_init(req))
                goto clear_hash;
        if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
                goto clear_hash;
@@ -563,7 +565,8 @@ static int tcp_v6_md5_hash_hdr(char *md5_hash, struct tcp_md5sig_key *key,
                goto clear_hash;
        if (tcp_md5_hash_key(hp, key))
                goto clear_hash;
-       if (crypto_hash_final(desc, md5_hash))
+       ahash_request_set_crypt(req, NULL, md5_hash, 0);
+       if (crypto_ahash_final(req))
                goto clear_hash;
 
        tcp_put_md5sig_pool();
@@ -583,7 +586,7 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
 {
        const struct in6_addr *saddr, *daddr;
        struct tcp_md5sig_pool *hp;
-       struct hash_desc *desc;
+       struct ahash_request *req;
        const struct tcphdr *th = tcp_hdr(skb);
 
        if (sk) { /* valid for establish/request sockets */
@@ -598,9 +601,9 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
        hp = tcp_get_md5sig_pool();
        if (!hp)
                goto clear_hash_noput;
-       desc = &hp->md5_desc;
+       req = hp->md5_req;
 
-       if (crypto_hash_init(desc))
+       if (crypto_ahash_init(req))
                goto clear_hash;
 
        if (tcp_v6_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
@@ -611,7 +614,8 @@ static int tcp_v6_md5_hash_skb(char *md5_hash,
                goto clear_hash;
        if (tcp_md5_hash_key(hp, key))
                goto clear_hash;
-       if (crypto_hash_final(desc, md5_hash))
+       ahash_request_set_crypt(req, NULL, md5_hash, 0);
+       if (crypto_ahash_final(req))
                goto clear_hash;
 
        tcp_put_md5sig_pool();
index 5d2c2afffe7b2c0790bc00505c518d21eb4b589b..22e28a44e3c88c6e52ff59ea1c7dd8a3e964e701 100644 (file)
@@ -257,6 +257,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
        struct sock *sk, *result;
        struct hlist_nulls_node *node;
        int score, badness, matches = 0, reuseport = 0;
+       bool select_ok = true;
        u32 hash = 0;
 
 begin:
@@ -270,14 +271,18 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               struct sock *sk2;
                                hash = udp6_ehashfn(net, daddr, hnum,
                                                    saddr, sport);
-                               sk2 = reuseport_select_sock(sk, hash, skb,
-                                                           sizeof(struct udphdr));
-                               if (sk2) {
-                                       result = sk2;
-                                       goto found;
+                               if (select_ok) {
+                                       struct sock *sk2;
+
+                                       sk2 = reuseport_select_sock(sk, hash, skb,
+                                                       sizeof(struct udphdr));
+                                       if (sk2) {
+                                               result = sk2;
+                                               select_ok = false;
+                                               goto found;
+                                       }
                                }
                                matches = 1;
                        }
@@ -321,6 +326,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
        int score, badness, matches = 0, reuseport = 0;
+       bool select_ok = true;
        u32 hash = 0;
 
        rcu_read_lock();
@@ -358,14 +364,18 @@ begin:
                        badness = score;
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
-                               struct sock *sk2;
                                hash = udp6_ehashfn(net, daddr, hnum,
                                                    saddr, sport);
-                               sk2 = reuseport_select_sock(sk, hash, skb,
+                               if (select_ok) {
+                                       struct sock *sk2;
+
+                                       sk2 = reuseport_select_sock(sk, hash, skb,
                                                        sizeof(struct udphdr));
-                               if (sk2) {
-                                       result = sk2;
-                                       goto found;
+                                       if (sk2) {
+                                               result = sk2;
+                                               select_ok = false;
+                                               goto found;
+                                       }
                                }
                                matches = 1;
                        }
index 3c4caa60c9265dd95f62d7a3aba84d40cdba0492..5728e76ca6d51adabef2cde3e06b0b9184c2d07e 100644 (file)
@@ -134,11 +134,10 @@ int ircomm_param_request(struct ircomm_tty_cb *self, __u8 pi, int flush)
                return -1;
        }
        skb_put(skb, count);
+       pr_debug("%s(), skb->len=%d\n", __func__, skb->len);
 
        spin_unlock_irqrestore(&self->spinlock, flags);
 
-       pr_debug("%s(), skb->len=%d\n", __func__ , skb->len);
-
        if (flush) {
                /* ircomm_tty_do_softint will take care of the rest */
                schedule_work(&self->tqueue);
index a4237707f79d56097099c2fe8704d686670cc9fe..da126ee6d2183c7509afa0fa4d238845e9fa7f36 100644 (file)
@@ -287,14 +287,14 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
 
        if (filp->f_flags & O_NONBLOCK) {
                /* nonblock mode is set */
-               if (tty->termios.c_cflag & CBAUD)
+               if (C_BAUD(tty))
                        tty_port_raise_dtr_rts(port);
                port->flags |= ASYNC_NORMAL_ACTIVE;
                pr_debug("%s(), O_NONBLOCK requested!\n", __func__);
                return 0;
        }
 
-       if (tty->termios.c_cflag & CLOCAL) {
+       if (C_CLOCAL(tty)) {
                pr_debug("%s(), doing CLOCAL!\n", __func__);
                do_clocal = 1;
        }
@@ -806,7 +806,7 @@ static void ircomm_tty_throttle(struct tty_struct *tty)
                ircomm_tty_send_xchar(tty, STOP_CHAR(tty));
 
        /* Hardware flow control? */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                self->settings.dte &= ~IRCOMM_RTS;
                self->settings.dte |= IRCOMM_DELTA_RTS;
 
@@ -831,12 +831,11 @@ static void ircomm_tty_unthrottle(struct tty_struct *tty)
        IRDA_ASSERT(self->magic == IRCOMM_TTY_MAGIC, return;);
 
        /* Using software flow control? */
-       if (I_IXOFF(tty)) {
+       if (I_IXOFF(tty))
                ircomm_tty_send_xchar(tty, START_CHAR(tty));
-       }
 
        /* Using hardware flow control? */
-       if (tty->termios.c_cflag & CRTSCTS) {
+       if (C_CRTSCTS(tty)) {
                self->settings.dte |= (IRCOMM_RTS|IRCOMM_DELTA_RTS);
 
                ircomm_param_request(self, IRCOMM_DTE, TRUE);
@@ -1268,10 +1267,6 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
                seq_printf(m, "%cASYNC_LOW_LATENCY", sep);
                sep = '|';
        }
-       if (self->port.flags & ASYNC_CLOSING) {
-               seq_printf(m, "%cASYNC_CLOSING", sep);
-               sep = '|';
-       }
        if (self->port.flags & ASYNC_NORMAL_ACTIVE) {
                seq_printf(m, "%cASYNC_NORMAL_ACTIVE", sep);
                sep = '|';
index 75ccdbd0728e18a9f4892504f2814e8e918b8097..d3687aaa23de08a856021a6a7d92930197dda05e 100644 (file)
@@ -158,26 +158,21 @@ void ircomm_tty_set_termios(struct tty_struct *tty,
        ircomm_tty_change_speed(self, tty);
 
        /* Handle transition to B0 status */
-       if ((old_termios->c_cflag & CBAUD) &&
-           !(cflag & CBAUD)) {
+       if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
                self->settings.dte &= ~(IRCOMM_DTR|IRCOMM_RTS);
                ircomm_param_request(self, IRCOMM_DTE, TRUE);
        }
 
        /* Handle transition away from B0 status */
-       if (!(old_termios->c_cflag & CBAUD) &&
-           (cflag & CBAUD)) {
+       if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
                self->settings.dte |= IRCOMM_DTR;
-               if (!(tty->termios.c_cflag & CRTSCTS) ||
-                   !test_bit(TTY_THROTTLED, &tty->flags)) {
+               if (!C_CRTSCTS(tty) || !test_bit(TTY_THROTTLED, &tty->flags))
                        self->settings.dte |= IRCOMM_RTS;
-               }
                ircomm_param_request(self, IRCOMM_DTE, TRUE);
        }
 
        /* Handle turning off CRTSCTS */
-       if ((old_termios->c_cflag & CRTSCTS) &&
-           !(tty->termios.c_cflag & CRTSCTS))
+       if ((old_termios->c_cflag & CRTSCTS) && !C_CRTSCTS(tty))
        {
                tty->hw_stopped = 0;
                ircomm_tty_start(tty);
index ef50a94d3eb726f75d4202a7a15c7dd1f812a3aa..fc3598a922b071503514af87deb603a154e2b33b 100644 (file)
@@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
        if (!addr || addr->sa_family != AF_IUCV)
                return -EINVAL;
 
+       if (addr_len < sizeof(struct sockaddr_iucv))
+               return -EINVAL;
+
        lock_sock(sk);
        if (sk->sk_state != IUCV_OPEN) {
                err = -EBADFD;
index 10ad4ac1fa0ba8ebd04e793595fab47d326194ce..3a8f881b22f10c983ab354a4393157e090fd6d3a 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -61,16 +62,25 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
 {
        struct ieee80211_local *local = sta->local;
        struct tid_ampdu_rx *tid_rx;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .action = IEEE80211_AMPDU_RX_STOP,
+               .tid = tid,
+               .amsdu = false,
+               .timeout = 0,
+               .ssn = 0,
+       };
 
        lockdep_assert_held(&sta->ampdu_mlme.mtx);
 
        tid_rx = rcu_dereference_protected(sta->ampdu_mlme.tid_rx[tid],
                                        lockdep_is_held(&sta->ampdu_mlme.mtx));
 
-       if (!tid_rx)
+       if (!test_bit(tid, sta->ampdu_mlme.agg_session_valid))
                return;
 
        RCU_INIT_POINTER(sta->ampdu_mlme.tid_rx[tid], NULL);
+       __clear_bit(tid, sta->ampdu_mlme.agg_session_valid);
 
        ht_dbg(sta->sdata,
               "Rx BA session stop requested for %pM tid %u %s reason: %d\n",
@@ -78,8 +88,7 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
               initiator == WLAN_BACK_RECIPIENT ? "recipient" : "inititator",
               (int)reason);
 
-       if (drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_STOP,
-                            &sta->sta, tid, NULL, 0, false))
+       if (drv_ampdu_action(local, sta->sdata, &params))
                sdata_info(sta->sdata,
                           "HW problem - can not stop rx aggregation for %pM tid %d\n",
                           sta->sta.addr, tid);
@@ -89,6 +98,13 @@ void ___ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
                ieee80211_send_delba(sta->sdata, sta->sta.addr,
                                     tid, WLAN_BACK_RECIPIENT, reason);
 
+       /*
+        * return here in case tid_rx is not assigned - which will happen if
+        * IEEE80211_HW_SUPPORTS_REORDERING_BUFFER is set.
+        */
+       if (!tid_rx)
+               return;
+
        del_timer_sync(&tid_rx->session_timer);
 
        /* make sure ieee80211_sta_reorder_release() doesn't re-arm the timer */
@@ -237,6 +253,15 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
 {
        struct ieee80211_local *local = sta->sdata->local;
        struct tid_ampdu_rx *tid_agg_rx;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .action = IEEE80211_AMPDU_RX_START,
+               .tid = tid,
+               .amsdu = false,
+               .timeout = timeout,
+               .ssn = start_seq_num,
+       };
+
        int i, ret = -EOPNOTSUPP;
        u16 status = WLAN_STATUS_REQUEST_DECLINED;
 
@@ -275,11 +300,12 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        /* make sure the size doesn't exceed the maximum supported by the hw */
        if (buf_size > local->hw.max_rx_aggregation_subframes)
                buf_size = local->hw.max_rx_aggregation_subframes;
+       params.buf_size = buf_size;
 
        /* examine state machine */
        mutex_lock(&sta->ampdu_mlme.mtx);
 
-       if (sta->ampdu_mlme.tid_rx[tid]) {
+       if (test_bit(tid, sta->ampdu_mlme.agg_session_valid)) {
                ht_dbg_ratelimited(sta->sdata,
                                   "unexpected AddBA Req from %pM on tid %u\n",
                                   sta->sta.addr, tid);
@@ -290,8 +316,18 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
                                                false);
        }
 
+       if (ieee80211_hw_check(&local->hw, SUPPORTS_REORDERING_BUFFER)) {
+               ret = drv_ampdu_action(local, sta->sdata, &params);
+               ht_dbg(sta->sdata,
+                      "Rx A-MPDU request on %pM tid %d result %d\n",
+                      sta->sta.addr, tid, ret);
+               if (!ret)
+                       status = WLAN_STATUS_SUCCESS;
+               goto end;
+       }
+
        /* prepare A-MPDU MLME for Rx aggregation */
-       tid_agg_rx = kmalloc(sizeof(struct tid_ampdu_rx), GFP_KERNEL);
+       tid_agg_rx = kzalloc(sizeof(*tid_agg_rx), GFP_KERNEL);
        if (!tid_agg_rx)
                goto end;
 
@@ -322,8 +358,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        for (i = 0; i < buf_size; i++)
                __skb_queue_head_init(&tid_agg_rx->reorder_buf[i]);
 
-       ret = drv_ampdu_action(local, sta->sdata, IEEE80211_AMPDU_RX_START,
-                              &sta->sta, tid, &start_seq_num, 0, false);
+       ret = drv_ampdu_action(local, sta->sdata, &params);
        ht_dbg(sta->sdata, "Rx A-MPDU request on %pM tid %d result %d\n",
               sta->sta.addr, tid, ret);
        if (ret) {
@@ -341,6 +376,7 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        tid_agg_rx->timeout = timeout;
        tid_agg_rx->stored_mpdu_num = 0;
        tid_agg_rx->auto_seq = auto_seq;
+       tid_agg_rx->reorder_buf_filtered = 0;
        status = WLAN_STATUS_SUCCESS;
 
        /* activate it for RX */
@@ -352,6 +388,8 @@ void __ieee80211_start_rx_ba_session(struct sta_info *sta,
        }
 
 end:
+       if (status == WLAN_STATUS_SUCCESS)
+               __set_bit(tid, sta->ampdu_mlme.agg_session_valid);
        mutex_unlock(&sta->ampdu_mlme.mtx);
 
 end_no_lock:
index ff757181b0a85c820e1acc53a088e95c78b87bff..4932e9f243a2cb9e156e7e24ac1c098fc9a4b19c 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2007-2010, Intel Corporation
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -295,7 +296,14 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 {
        struct ieee80211_local *local = sta->local;
        struct tid_ampdu_tx *tid_tx;
-       enum ieee80211_ampdu_mlme_action action;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .tid = tid,
+               .buf_size = 0,
+               .amsdu = false,
+               .timeout = 0,
+               .ssn = 0,
+       };
        int ret;
 
        lockdep_assert_held(&sta->ampdu_mlme.mtx);
@@ -304,10 +312,10 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
        case AGG_STOP_DECLINED:
        case AGG_STOP_LOCAL_REQUEST:
        case AGG_STOP_PEER_REQUEST:
-               action = IEEE80211_AMPDU_TX_STOP_CONT;
+               params.action = IEEE80211_AMPDU_TX_STOP_CONT;
                break;
        case AGG_STOP_DESTROY_STA:
-               action = IEEE80211_AMPDU_TX_STOP_FLUSH;
+               params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
                break;
        default:
                WARN_ON_ONCE(1);
@@ -330,9 +338,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                spin_unlock_bh(&sta->lock);
                if (reason != AGG_STOP_DESTROY_STA)
                        return -EALREADY;
-               ret = drv_ampdu_action(local, sta->sdata,
-                                      IEEE80211_AMPDU_TX_STOP_FLUSH_CONT,
-                                      &sta->sta, tid, NULL, 0, false);
+               params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
+               ret = drv_ampdu_action(local, sta->sdata, &params);
                WARN_ON_ONCE(ret);
                return 0;
        }
@@ -381,8 +388,7 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                        WLAN_BACK_INITIATOR;
        tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;
 
-       ret = drv_ampdu_action(local, sta->sdata, action,
-                              &sta->sta, tid, NULL, 0, false);
+       ret = drv_ampdu_action(local, sta->sdata, &params);
 
        /* HW shall not deny going back to legacy */
        if (WARN_ON(ret)) {
@@ -445,7 +451,14 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
        struct tid_ampdu_tx *tid_tx;
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       u16 start_seq_num;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .action = IEEE80211_AMPDU_TX_START,
+               .tid = tid,
+               .buf_size = 0,
+               .amsdu = false,
+               .timeout = 0,
+       };
        int ret;
 
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
@@ -467,10 +480,8 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
         */
        synchronize_net();
 
-       start_seq_num = sta->tid_seq[tid] >> 4;
-
-       ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
-                              &sta->sta, tid, &start_seq_num, 0, false);
+       params.ssn = sta->tid_seq[tid] >> 4;
+       ret = drv_ampdu_action(local, sdata, &params);
        if (ret) {
                ht_dbg(sdata,
                       "BA request denied - HW unavailable for %pM tid %d\n",
@@ -499,7 +510,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
 
        /* send AddBA request */
        ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
-                                    tid_tx->dialog_token, start_seq_num,
+                                    tid_tx->dialog_token, params.ssn,
                                     IEEE80211_MAX_AMPDU_BUF,
                                     tid_tx->timeout);
 }
@@ -684,18 +695,24 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
                                         struct sta_info *sta, u16 tid)
 {
        struct tid_ampdu_tx *tid_tx;
+       struct ieee80211_ampdu_params params = {
+               .sta = &sta->sta,
+               .action = IEEE80211_AMPDU_TX_OPERATIONAL,
+               .tid = tid,
+               .timeout = 0,
+               .ssn = 0,
+       };
 
        lockdep_assert_held(&sta->ampdu_mlme.mtx);
 
        tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
+       params.buf_size = tid_tx->buf_size;
+       params.amsdu = tid_tx->amsdu;
 
        ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
               sta->sta.addr, tid);
 
-       drv_ampdu_action(local, sta->sdata,
-                        IEEE80211_AMPDU_TX_OPERATIONAL,
-                        &sta->sta, tid, NULL, tid_tx->buf_size,
-                        tid_tx->amsdu);
+       drv_ampdu_action(local, sta->sdata, &params);
 
        /*
         * synchronize with TX path, while splicing the TX path
index 166a29fe6c35f63a5357a01ceeb140f19346f9bd..66d22de93c8df75e7d0b747bf71a45cb3b45f927 100644 (file)
@@ -1131,6 +1131,34 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                sta->sta.max_sp = params->max_sp;
        }
 
+       /* The sender might not have sent the last bit, consider it to be 0 */
+       if (params->ext_capab_len >= 8) {
+               u8 val = (params->ext_capab[7] &
+                         WLAN_EXT_CAPA8_MAX_MSDU_IN_AMSDU_LSB) >> 7;
+
+               /* we did get all the bits, take the MSB as well */
+               if (params->ext_capab_len >= 9) {
+                       u8 val_msb = params->ext_capab[8] &
+                               WLAN_EXT_CAPA9_MAX_MSDU_IN_AMSDU_MSB;
+                       val_msb <<= 1;
+                       val |= val_msb;
+               }
+
+               switch (val) {
+               case 1:
+                       sta->sta.max_amsdu_subframes = 32;
+                       break;
+               case 2:
+                       sta->sta.max_amsdu_subframes = 16;
+                       break;
+               case 3:
+                       sta->sta.max_amsdu_subframes = 8;
+                       break;
+               default:
+                       sta->sta.max_amsdu_subframes = 0;
+               }
+       }
+
        /*
         * cfg80211 validates this (1-2007) and allows setting the AID
         * only when creating a new station entry
@@ -1160,6 +1188,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
                                                  params->ht_capa, sta);
 
+       /* VHT can override some HT caps such as the A-MSDU max length */
        if (params->vht_capa)
                ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
                                                    params->vht_capa, sta);
index 1d1b9b7bdefe74ac851ca6d01554d663fae39541..283981108ca80cb5bb7182b28e626a1a14c09fb1 100644 (file)
@@ -231,7 +231,7 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
                    !(sta->sdata->bss && sta->sdata->bss == sdata->bss))
                        continue;
 
-               if (!sta->uploaded)
+               if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
                        continue;
 
                max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
index 3e24d0ddb51bfaca18d39669e41dba2bb6a91bb5..4ab5c522ceeeee5def93f869fd96618999a2daf5 100644 (file)
@@ -126,6 +126,7 @@ static const char *hw_flag_names[] = {
        FLAG(SUPPORTS_AMSDU_IN_AMPDU),
        FLAG(BEACON_TX_STATUS),
        FLAG(NEEDS_UNIQUE_STA_ADDR),
+       FLAG(SUPPORTS_REORDERING_BUFFER),
 #undef FLAG
 };
 
index ca1fe5576103767c3a98b52e037c0034b949887f..c258f1041d330885d08869e302a5ec255b470b4b 100644 (file)
@@ -284,9 +284,7 @@ int drv_switch_vif_chanctx(struct ieee80211_local *local,
 
 int drv_ampdu_action(struct ieee80211_local *local,
                     struct ieee80211_sub_if_data *sdata,
-                    enum ieee80211_ampdu_mlme_action action,
-                    struct ieee80211_sta *sta, u16 tid,
-                    u16 *ssn, u8 buf_size, bool amsdu)
+                    struct ieee80211_ampdu_params *params)
 {
        int ret = -EOPNOTSUPP;
 
@@ -296,12 +294,10 @@ int drv_ampdu_action(struct ieee80211_local *local,
        if (!check_sdata_in_driver(sdata))
                return -EIO;
 
-       trace_drv_ampdu_action(local, sdata, action, sta, tid,
-                              ssn, buf_size, amsdu);
+       trace_drv_ampdu_action(local, sdata, params);
 
        if (local->ops->ampdu_action)
-               ret = local->ops->ampdu_action(&local->hw, &sdata->vif, action,
-                                              sta, tid, ssn, buf_size, amsdu);
+               ret = local->ops->ampdu_action(&local->hw, &sdata->vif, params);
 
        trace_drv_return_int(local, ret);
 
index 154ce4b13406d5a31bac8797f8069184ca9cd6f5..18b0d65baff000156c8015f76b9ce527a938e95a 100644 (file)
@@ -585,9 +585,7 @@ static inline int drv_tx_last_beacon(struct ieee80211_local *local)
 
 int drv_ampdu_action(struct ieee80211_local *local,
                     struct ieee80211_sub_if_data *sdata,
-                    enum ieee80211_ampdu_mlme_action action,
-                    struct ieee80211_sta *sta, u16 tid,
-                    u16 *ssn, u8 buf_size, bool amsdu);
+                    struct ieee80211_ampdu_params *params);
 
 static inline int drv_get_survey(struct ieee80211_local *local, int idx,
                                struct survey_info *survey)
index 7a76ce639d58d6071681551e916f0125cf376212..f4a52877356349abed96d0a77d6ae75f301181e4 100644 (file)
@@ -230,6 +230,11 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
        /* set Rx highest rate */
        ht_cap.mcs.rx_highest = ht_cap_ie->mcs.rx_highest;
 
+       if (ht_cap.cap & IEEE80211_HT_CAP_MAX_AMSDU)
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_7935;
+       else
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_HT_3839;
+
  apply:
        changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
 
index f7fc0e00497fd73f858833e2cb54ec3058b177fe..fc3238376b39546d025f81ad92240356a77aa48b 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
  * Copyright 2009, Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1050,9 +1051,8 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
                struct cfg80211_chan_def chandef;
                enum ieee80211_sta_rx_bandwidth bw = sta->sta.bandwidth;
 
-               ieee80211_ht_oper_to_chandef(channel,
-                                            elems->ht_operation,
-                                            &chandef);
+               cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT);
+               ieee80211_chandef_ht_oper(elems->ht_operation, &chandef);
 
                memcpy(&htcap_ie, elems->ht_cap_elem, sizeof(htcap_ie));
                rates_updated |= ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
@@ -1066,9 +1066,8 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
                        struct ieee80211_vht_cap cap_ie;
                        struct ieee80211_sta_vht_cap cap = sta->sta.vht_cap;
 
-                       ieee80211_vht_oper_to_chandef(channel,
-                                                     elems->vht_operation,
-                                                     &chandef);
+                       ieee80211_chandef_vht_oper(elems->vht_operation,
+                                                  &chandef);
                        memcpy(&cap_ie, elems->vht_cap_elem, sizeof(cap_ie));
                        ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
                                                            &cap_ie, sta);
@@ -1485,14 +1484,21 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
 
                sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
 
-               num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
-                                                        &ifibss->chandef,
-                                                        channels,
-                                                        ARRAY_SIZE(channels));
                scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
-               ieee80211_request_ibss_scan(sdata, ifibss->ssid,
-                                           ifibss->ssid_len, channels, num,
-                                           scan_width);
+
+               if (ifibss->fixed_channel) {
+                       num = ieee80211_ibss_setup_scan_channels(local->hw.wiphy,
+                                                                &ifibss->chandef,
+                                                                channels,
+                                                                ARRAY_SIZE(channels));
+                       ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+                                                   ifibss->ssid_len, channels,
+                                                   num, scan_width);
+               } else {
+                       ieee80211_request_ibss_scan(sdata, ifibss->ssid,
+                                                   ifibss->ssid_len, NULL,
+                                                   0, scan_width);
+               }
        } else {
                int interval = IEEE80211_SCAN_INTERVAL;
 
@@ -1733,7 +1739,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
                if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
                        continue;
                sdata->u.ibss.last_scan_completed = jiffies;
-               ieee80211_queue_work(&local->hw, &sdata->work);
        }
        mutex_unlock(&local->iflist_mtx);
 }
index b84f6aa32c0831ce200f286af2ecd947ef0fefde..a49c10361f1c2f58faa922eb5f9866cf43cbf842 100644 (file)
@@ -804,6 +804,7 @@ enum txq_info_flags {
 struct txq_info {
        struct sk_buff_head queue;
        unsigned long flags;
+       unsigned long byte_cnt;
 
        /* keep last! */
        struct ieee80211_txq txq;
@@ -1466,7 +1467,13 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status)
 {
        WARN_ON_ONCE(status->flag & RX_FLAG_MACTIME_START &&
                     status->flag & RX_FLAG_MACTIME_END);
-       return status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END);
+       if (status->flag & (RX_FLAG_MACTIME_START | RX_FLAG_MACTIME_END))
+               return true;
+       /* can't handle HT/VHT preamble yet */
+       if (status->flag & RX_FLAG_MACTIME_PLCP_START &&
+           !(status->flag & (RX_FLAG_HT | RX_FLAG_VHT)))
+               return true;
+       return false;
 }
 
 u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
@@ -1714,6 +1721,8 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta);
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
 void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
+                                struct ieee80211_mgmt *mgmt);
 u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                   struct sta_info *sta, u8 opmode,
                                  enum ieee80211_band band);
@@ -1829,20 +1838,6 @@ static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
        ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
 }
 
-static inline bool ieee80211_rx_reorder_ready(struct sk_buff_head *frames)
-{
-       struct sk_buff *tail = skb_peek_tail(frames);
-       struct ieee80211_rx_status *status;
-
-       if (!tail)
-               return false;
-
-       status = IEEE80211_SKB_RXCB(tail);
-       if (status->flag & RX_FLAG_AMSDU_MORE)
-               return false;
-
-       return true;
-}
 
 extern const int ieee802_1d_to_ac[8];
 
@@ -1986,12 +1981,10 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
 u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
 
 /* channel management */
-void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                 const struct ieee80211_ht_operation *ht_oper,
-                                 struct cfg80211_chan_def *chandef);
-void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                  const struct ieee80211_vht_operation *oper,
-                                  struct cfg80211_chan_def *chandef);
+bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
+                              struct cfg80211_chan_def *chandef);
+bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+                               struct cfg80211_chan_def *chandef);
 u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
 
 int __must_check
index c9e325d2e120c0f9c230dbace71c3c405b9216a3..453b4e7417804105cb136e2ec7f4c57a39392db3 100644 (file)
@@ -977,7 +977,11 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
        if (sdata->vif.txq) {
                struct txq_info *txqi = to_txq_info(sdata->vif.txq);
 
+               spin_lock_bh(&txqi->queue.lock);
                ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
+               txqi->byte_cnt = 0;
+               spin_unlock_bh(&txqi->queue.lock);
+
                atomic_set(&sdata->txqs_len[txqi->txq.ac], 0);
        }
 
@@ -1271,6 +1275,16 @@ static void ieee80211_iface_work(struct work_struct *work)
                                }
                        }
                        mutex_unlock(&local->sta_mtx);
+               } else if (ieee80211_is_action(mgmt->frame_control) &&
+                          mgmt->u.action.category == WLAN_CATEGORY_VHT) {
+                       switch (mgmt->u.action.u.vht_group_notif.action_code) {
+                       case WLAN_VHT_ACTION_GROUPID_MGMT:
+                               ieee80211_process_mu_groups(sdata, mgmt);
+                               break;
+                       default:
+                               WARN_ON(1);
+                               break;
+                       }
                } else if (ieee80211_is_data_qos(mgmt->frame_control)) {
                        struct ieee80211_hdr *hdr = (void *)mgmt;
                        /*
index 6bcf0faa4a89dbf38ee125099ad8c328f378331f..8190bf27ebfff8b0a594b7c029130dc9b1b99957 100644 (file)
@@ -248,6 +248,7 @@ static void ieee80211_restart_work(struct work_struct *work)
 
        /* wait for scan work complete */
        flush_workqueue(local->workqueue);
+       flush_work(&local->sched_scan_stopped_work);
 
        WARN(test_bit(SCAN_HW_SCANNING, &local->scanning),
             "%s called with hardware scan in progress\n", __func__);
@@ -256,6 +257,11 @@ static void ieee80211_restart_work(struct work_struct *work)
        list_for_each_entry(sdata, &local->interfaces, list)
                flush_delayed_work(&sdata->dec_tailroom_needed_wk);
        ieee80211_scan_cancel(local);
+
+       /* make sure any new ROC will consider local->in_reconfig */
+       flush_delayed_work(&local->roc_work);
+       flush_work(&local->hw_roc_done);
+
        ieee80211_reconfig(local);
        rtnl_unlock();
 }
index fa28500f28fd9fd8d452602b0407741589f12e36..d32cefcb63b02ecd4f1af220c559a2a82c75430d 100644 (file)
@@ -91,11 +91,10 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
        if (sdata->vif.bss_conf.basic_rates != basic_rates)
                return false;
 
-       ieee80211_ht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
-                                    ie->ht_operation, &sta_chan_def);
-
-       ieee80211_vht_oper_to_chandef(sdata->vif.bss_conf.chandef.chan,
-                                     ie->vht_operation, &sta_chan_def);
+       cfg80211_chandef_create(&sta_chan_def, sdata->vif.bss_conf.chandef.chan,
+                               NL80211_CHAN_NO_HT);
+       ieee80211_chandef_ht_oper(ie->ht_operation, &sta_chan_def);
+       ieee80211_chandef_vht_oper(ie->vht_operation, &sta_chan_def);
 
        if (!cfg80211_chandef_compatible(&sdata->vif.bss_conf.chandef,
                                         &sta_chan_def))
@@ -1370,17 +1369,6 @@ out:
        sdata_unlock(sdata);
 }
 
-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
-{
-       struct ieee80211_sub_if_data *sdata;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(sdata, &local->interfaces, list)
-               if (ieee80211_vif_is_mesh(&sdata->vif) &&
-                   ieee80211_sdata_running(sdata))
-                       ieee80211_queue_work(&local->hw, &sdata->work);
-       rcu_read_unlock();
-}
 
 void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
 {
index a1596344c3ba1c5fe590767f3c0483310d63121a..87c017a3b1ce8db928a999bec05fb1cf32169b73 100644 (file)
@@ -137,8 +137,6 @@ struct mesh_path {
  * @copy_node: function to copy nodes of the table
  * @size_order: determines size of the table, there will be 2^size_order hash
  *     buckets
- * @mean_chain_len: maximum average length for the hash buckets' list, if it is
- *     reached, the table will grow
  * @known_gates: list of known mesh gates and their mpaths by the station. The
  * gate's mpath may or may not be resolved and active.
  *
@@ -154,7 +152,6 @@ struct mesh_table {
        void (*free_node) (struct hlist_node *p, bool free_leafs);
        int (*copy_node) (struct hlist_node *p, struct mesh_table *newtbl);
        int size_order;
-       int mean_chain_len;
        struct hlist_head *known_gates;
        spinlock_t gates_lock;
 
@@ -362,14 +359,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
        return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
 }
 
-void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
-
 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
 void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
 void ieee80211s_stop(void);
 #else
-static inline void
-ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
 static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
 { return false; }
 static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
index dadf8dc6f1cfdceb762cabcf057e2e485ad853e8..2ba7aa56b11c0ec121d7851665c989e1a2b3b2d1 100644 (file)
@@ -55,16 +55,21 @@ int mpp_paths_generation;
 static DEFINE_RWLOCK(pathtbl_resize_lock);
 
 
+static inline struct mesh_table *resize_dereference_paths(
+       struct mesh_table __rcu *table)
+{
+       return rcu_dereference_protected(table,
+                                       lockdep_is_held(&pathtbl_resize_lock));
+}
+
 static inline struct mesh_table *resize_dereference_mesh_paths(void)
 {
-       return rcu_dereference_protected(mesh_paths,
-               lockdep_is_held(&pathtbl_resize_lock));
+       return resize_dereference_paths(mesh_paths);
 }
 
 static inline struct mesh_table *resize_dereference_mpp_paths(void)
 {
-       return rcu_dereference_protected(mpp_paths,
-               lockdep_is_held(&pathtbl_resize_lock));
+       return resize_dereference_paths(mpp_paths);
 }
 
 /*
@@ -160,11 +165,10 @@ static int mesh_table_grow(struct mesh_table *oldtbl,
        int i;
 
        if (atomic_read(&oldtbl->entries)
-                       < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
+                       < MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1))
                return -EAGAIN;
 
        newtbl->free_node = oldtbl->free_node;
-       newtbl->mean_chain_len = oldtbl->mean_chain_len;
        newtbl->copy_node = oldtbl->copy_node;
        newtbl->known_gates = oldtbl->known_gates;
        atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
@@ -585,7 +589,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
 
        hlist_add_head_rcu(&new_node->list, bucket);
        if (atomic_inc_return(&tbl->entries) >=
-           tbl->mean_chain_len * (tbl->hash_mask + 1))
+           MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
                grow = 1;
 
        mesh_paths_generation++;
@@ -714,7 +718,7 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
 
        hlist_add_head_rcu(&new_node->list, bucket);
        if (atomic_inc_return(&tbl->entries) >=
-           tbl->mean_chain_len * (tbl->hash_mask + 1))
+           MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
                grow = 1;
 
        spin_unlock(&tbl->hashwlock[hash_idx]);
@@ -835,6 +839,29 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
        rcu_read_unlock();
 }
 
+static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
+                              const u8 *proxy)
+{
+       struct mesh_table *tbl;
+       struct mesh_path *mpp;
+       struct mpath_node *node;
+       int i;
+
+       rcu_read_lock();
+       read_lock_bh(&pathtbl_resize_lock);
+       tbl = resize_dereference_mpp_paths();
+       for_each_mesh_entry(tbl, node, i) {
+               mpp = node->mpath;
+               if (ether_addr_equal(mpp->mpp, proxy)) {
+                       spin_lock(&tbl->hashwlock[i]);
+                       __mesh_path_del(tbl, node);
+                       spin_unlock(&tbl->hashwlock[i]);
+               }
+       }
+       read_unlock_bh(&pathtbl_resize_lock);
+       rcu_read_unlock();
+}
+
 static void table_flush_by_iface(struct mesh_table *tbl,
                                 struct ieee80211_sub_if_data *sdata)
 {
@@ -876,14 +903,17 @@ void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
 }
 
 /**
- * mesh_path_del - delete a mesh path from the table
+ * table_path_del - delete a path from the mesh or mpp table
  *
- * @addr: dst address (ETH_ALEN length)
+ * @tbl: mesh or mpp path table
  * @sdata: local subif
+ * @addr: dst address (ETH_ALEN length)
  *
  * Returns: 0 if successful
  */
-int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+static int table_path_del(struct mesh_table __rcu *rcu_tbl,
+                         struct ieee80211_sub_if_data *sdata,
+                         const u8 *addr)
 {
        struct mesh_table *tbl;
        struct mesh_path *mpath;
@@ -892,8 +922,7 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
        int hash_idx;
        int err = 0;
 
-       read_lock_bh(&pathtbl_resize_lock);
-       tbl = resize_dereference_mesh_paths();
+       tbl = resize_dereference_paths(rcu_tbl);
        hash_idx = mesh_table_hash(addr, sdata, tbl);
        bucket = &tbl->hash_buckets[hash_idx];
 
@@ -909,9 +938,50 @@ int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
 
        err = -ENXIO;
 enddel:
-       mesh_paths_generation++;
        spin_unlock(&tbl->hashwlock[hash_idx]);
+       return err;
+}
+
+/**
+ * mesh_path_del - delete a mesh path from the table
+ *
+ * @addr: dst address (ETH_ALEN length)
+ * @sdata: local subif
+ *
+ * Returns: 0 if successful
+ */
+int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+{
+       int err = 0;
+
+       /* flush relevant mpp entries first */
+       mpp_flush_by_proxy(sdata, addr);
+
+       read_lock_bh(&pathtbl_resize_lock);
+       err = table_path_del(mesh_paths, sdata, addr);
+       mesh_paths_generation++;
        read_unlock_bh(&pathtbl_resize_lock);
+
+       return err;
+}
+
+/**
+ * mpp_path_del - delete a mesh proxy path from the table
+ *
+ * @addr: addr address (ETH_ALEN length)
+ * @sdata: local subif
+ *
+ * Returns: 0 if successful
+ */
+static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
+{
+       int err = 0;
+
+       read_lock_bh(&pathtbl_resize_lock);
+       err = table_path_del(mpp_paths, sdata, addr);
+       mpp_paths_generation++;
+       read_unlock_bh(&pathtbl_resize_lock);
+
        return err;
 }
 
@@ -1076,7 +1146,6 @@ int mesh_pathtbl_init(void)
                return -ENOMEM;
        tbl_path->free_node = &mesh_path_node_free;
        tbl_path->copy_node = &mesh_path_node_copy;
-       tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
        tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
        if (!tbl_path->known_gates) {
                ret = -ENOMEM;
@@ -1092,7 +1161,6 @@ int mesh_pathtbl_init(void)
        }
        tbl_mpp->free_node = &mesh_path_node_free;
        tbl_mpp->copy_node = &mesh_path_node_copy;
-       tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
        tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
        if (!tbl_mpp->known_gates) {
                ret = -ENOMEM;
@@ -1131,6 +1199,17 @@ void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
                     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
                        mesh_path_del(mpath->sdata, mpath->dst);
        }
+
+       tbl = rcu_dereference(mpp_paths);
+       for_each_mesh_entry(tbl, node, i) {
+               if (node->mpath->sdata != sdata)
+                       continue;
+               mpath = node->mpath;
+               if ((!(mpath->flags & MESH_PATH_FIXED)) &&
+                   time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
+                       mpp_path_del(mpath->sdata, mpath->dst);
+       }
+
        rcu_read_unlock();
 }
 
index bd3d55eb21d4f8fb5d4cf7a0da75a506ecdf00eb..a07e93c21c9ede90bd94328f1e4aaa51476d58bd 100644 (file)
@@ -976,6 +976,10 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                        mpl_dbg(sdata, "Mesh plink error: no more free plinks\n");
                        goto out;
                }
+
+               /* new matching peer */
+               event = OPN_ACPT;
+               goto out;
        } else {
                if (!test_sta_flag(sta, WLAN_STA_AUTH)) {
                        mpl_dbg(sdata, "Mesh plink: Action frame from non-authed peer\n");
@@ -985,12 +989,6 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata,
                        goto out;
        }
 
-       /* new matching peer */
-       if (!sta) {
-               event = OPN_ACPT;
-               goto out;
-       }
-
        switch (ftype) {
        case WLAN_SP_MESH_PEERING_OPEN:
                if (!matches_local)
index 1c342e2592c4ab8af62871a4e44da0f3029140b0..cbb752ab21720f585a3f2412a3490a552b2786e6 100644 (file)
@@ -196,16 +196,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
 
        /* check 40 MHz support, if we have it */
        if (sta_ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
-               switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
-               case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
-                       chandef->width = NL80211_CHAN_WIDTH_40;
-                       chandef->center_freq1 += 10;
-                       break;
-               case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
-                       chandef->width = NL80211_CHAN_WIDTH_40;
-                       chandef->center_freq1 -= 10;
-                       break;
-               }
+               ieee80211_chandef_ht_oper(ht_oper, chandef);
        } else {
                /* 40 MHz (and 80 MHz) must be supported for VHT */
                ret = IEEE80211_STA_DISABLE_VHT;
@@ -219,35 +210,11 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
                goto out;
        }
 
-       vht_chandef.chan = channel;
-       vht_chandef.center_freq1 =
-               ieee80211_channel_to_frequency(vht_oper->center_freq_seg1_idx,
-                                              channel->band);
-       vht_chandef.center_freq2 = 0;
-
-       switch (vht_oper->chan_width) {
-       case IEEE80211_VHT_CHANWIDTH_USE_HT:
-               vht_chandef.width = chandef->width;
-               vht_chandef.center_freq1 = chandef->center_freq1;
-               break;
-       case IEEE80211_VHT_CHANWIDTH_80MHZ:
-               vht_chandef.width = NL80211_CHAN_WIDTH_80;
-               break;
-       case IEEE80211_VHT_CHANWIDTH_160MHZ:
-               vht_chandef.width = NL80211_CHAN_WIDTH_160;
-               break;
-       case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
-               vht_chandef.width = NL80211_CHAN_WIDTH_80P80;
-               vht_chandef.center_freq2 =
-                       ieee80211_channel_to_frequency(
-                               vht_oper->center_freq_seg2_idx,
-                               channel->band);
-               break;
-       default:
+       vht_chandef = *chandef;
+       if (!ieee80211_chandef_vht_oper(vht_oper, &vht_chandef)) {
                if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
                        sdata_info(sdata,
-                                  "AP VHT operation IE has invalid channel width (%d), disable VHT\n",
-                                  vht_oper->chan_width);
+                                  "AP VHT information is invalid, disable VHT\n");
                ret = IEEE80211_STA_DISABLE_VHT;
                goto out;
        }
@@ -1638,8 +1605,7 @@ void ieee80211_dynamic_ps_timer(unsigned long data)
 
 void ieee80211_dfs_cac_timer_work(struct work_struct *work)
 {
-       struct delayed_work *delayed_work =
-               container_of(work, struct delayed_work, work);
+       struct delayed_work *delayed_work = to_delayed_work(work);
        struct ieee80211_sub_if_data *sdata =
                container_of(delayed_work, struct ieee80211_sub_if_data,
                             dfs_cac_timer_work);
@@ -2079,6 +2045,13 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
        memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
        memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
        memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
+
+       /* reset MU-MIMO ownership and group data */
+       memset(sdata->vif.bss_conf.mu_group.membership, 0,
+              sizeof(sdata->vif.bss_conf.mu_group.membership));
+       memset(sdata->vif.bss_conf.mu_group.position, 0,
+              sizeof(sdata->vif.bss_conf.mu_group.position));
+       changed |= BSS_CHANGED_MU_GROUPS;
        sdata->flags &= ~IEEE80211_SDATA_MU_MIMO_OWNER;
 
        sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
@@ -3571,6 +3544,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
                                elems.ht_cap_elem, elems.ht_operation,
                                elems.vht_operation, bssid, &changed)) {
                mutex_unlock(&local->sta_mtx);
+               sdata_info(sdata,
+                          "failed to follow AP %pM bandwidth change, disconnect\n",
+                          bssid);
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       WLAN_REASON_DEAUTH_LEAVING,
                                       true, deauth_buf);
@@ -3946,11 +3922,9 @@ void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata)
                         * We actually lost the connection ... or did we?
                         * Let's make sure!
                         */
-                       wiphy_debug(local->hw.wiphy,
-                                   "%s: No probe response from AP %pM"
-                                   " after %dms, disconnecting.\n",
-                                   sdata->name,
-                                   bssid, probe_wait_ms);
+                       mlme_dbg(sdata,
+                                "No probe response from AP %pM after %dms, disconnecting.\n",
+                                bssid, probe_wait_ms);
 
                        ieee80211_sta_connection_lost(sdata, bssid,
                                WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, false);
@@ -4005,8 +3979,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
                if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
                        ieee80211_queue_work(&sdata->local->hw,
                                             &sdata->u.mgd.monitor_work);
-               /* and do all the other regular work too */
-               ieee80211_queue_work(&sdata->local->hw, &sdata->work);
        }
 }
 
@@ -4538,6 +4510,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
        if (ifmgd->associated) {
                u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
+               sdata_info(sdata,
+                          "disconnect from AP %pM for new auth to %pM\n",
+                          ifmgd->associated->bssid, req->bss->bssid);
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       WLAN_REASON_UNSPECIFIED,
                                       false, frame_buf);
@@ -4606,6 +4581,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
        if (ifmgd->associated) {
                u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
 
+               sdata_info(sdata,
+                          "disconnect from AP %pM for new assoc to %pM\n",
+                          ifmgd->associated->bssid, req->bss->bssid);
                ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
                                       WLAN_REASON_UNSPECIFIED,
                                       false, frame_buf);
index 8b2f4eaac2ba65f8e25ae47dc316bf288ef8e0e9..55a9c5b94ce128d06ffb3154173fa2ea62a53f9b 100644 (file)
@@ -252,14 +252,11 @@ static bool ieee80211_recalc_sw_work(struct ieee80211_local *local,
 static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
                                         unsigned long start_time)
 {
-       struct ieee80211_local *local = roc->sdata->local;
-
        if (WARN_ON(roc->notified))
                return;
 
        roc->start_time = start_time;
        roc->started = true;
-       roc->hw_begun = true;
 
        if (roc->mgmt_tx_cookie) {
                if (!WARN_ON(!roc->frame)) {
@@ -274,9 +271,6 @@ static void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc,
        }
 
        roc->notified = true;
-
-       if (!local->ops->remain_on_channel)
-               ieee80211_recalc_sw_work(local, start_time);
 }
 
 static void ieee80211_hw_roc_start(struct work_struct *work)
@@ -291,6 +285,7 @@ static void ieee80211_hw_roc_start(struct work_struct *work)
                if (!roc->started)
                        break;
 
+               roc->hw_begun = true;
                ieee80211_handle_roc_started(roc, local->hw_roc_start_time);
        }
 
@@ -413,6 +408,10 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
                return;
        }
 
+       /* defer roc if driver is not started (i.e. during reconfig) */
+       if (local->in_reconfig)
+               return;
+
        roc = list_first_entry(&local->roc_list, struct ieee80211_roc_work,
                               list);
 
@@ -534,8 +533,10 @@ ieee80211_coalesce_hw_started_roc(struct ieee80211_local *local,
         * begin, otherwise they'll both be marked properly by the work
         * struct that runs once the driver notifies us of the beginning
         */
-       if (cur_roc->hw_begun)
+       if (cur_roc->hw_begun) {
+               new_roc->hw_begun = true;
                ieee80211_handle_roc_started(new_roc, now);
+       }
 
        return true;
 }
@@ -658,6 +659,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
                        queued = true;
                        roc->on_channel = tmp->on_channel;
                        ieee80211_handle_roc_started(roc, now);
+                       ieee80211_recalc_sw_work(local, now);
                        break;
                }
 
index 3ece7d1034c81ae8749cada074fbebecbe06d57f..b54f398cda5d0e2d561f2383f1d049a5410fcf68 100644 (file)
@@ -711,7 +711,7 @@ static u32 minstrel_get_expected_throughput(void *priv_sta)
         * computing cur_tp
         */
        tmp_mrs = &mi->r[idx].stats;
-       tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma);
+       tmp_cur_tp = minstrel_get_tp_avg(&mi->r[idx], tmp_mrs->prob_ewma) * 10;
        tmp_cur_tp = tmp_cur_tp * 1200 * 8 / 1024;
 
        return tmp_cur_tp;
index 3928dbd24e257e68627aa977cc54a19aaa996339..702328bcabdc2f49b6b2252f304e039ebe37b9af 100644 (file)
@@ -414,15 +414,16 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
            (max_tp_group != MINSTREL_CCK_GROUP))
                return;
 
+       max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
+       max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
+       max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
+
        if (mrs->prob_ewma > MINSTREL_FRAC(75, 100)) {
                cur_tp_avg = minstrel_ht_get_tp_avg(mi, cur_group, cur_idx,
                                                    mrs->prob_ewma);
                if (cur_tp_avg > tmp_tp_avg)
                        mi->max_prob_rate = index;
 
-               max_gpr_group = mg->max_group_prob_rate / MCS_GROUP_RATES;
-               max_gpr_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
-               max_gpr_prob = mi->groups[max_gpr_group].rates[max_gpr_idx].prob_ewma;
                max_gpr_tp_avg = minstrel_ht_get_tp_avg(mi, max_gpr_group,
                                                        max_gpr_idx,
                                                        max_gpr_prob);
@@ -431,7 +432,7 @@ minstrel_ht_set_best_prob_rate(struct minstrel_ht_sta *mi, u16 index)
        } else {
                if (mrs->prob_ewma > tmp_prob)
                        mi->max_prob_rate = index;
-               if (mrs->prob_ewma > mg->rates[mg->max_group_prob_rate].prob_ewma)
+               if (mrs->prob_ewma > max_gpr_prob)
                        mg->max_group_prob_rate = index;
        }
 }
@@ -1334,7 +1335,8 @@ static u32 minstrel_ht_get_expected_throughput(void *priv_sta)
        prob = mi->groups[i].rates[j].prob_ewma;
 
        /* convert tp_avg from pkt per second in kbps */
-       tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * AVG_PKT_SIZE * 8 / 1024;
+       tp_avg = minstrel_ht_get_tp_avg(mi, i, j, prob) * 10;
+       tp_avg = tp_avg * AVG_PKT_SIZE * 8 / 1024;
 
        return tp_avg;
 }
index bc081850ac0e5538241bd1cab37b65aeefd2c244..91279576f4a716d00cfd810e5d6db18306520c1c 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -18,6 +19,7 @@
 #include <linux/etherdevice.h>
 #include <linux/rcupdate.h>
 #include <linux/export.h>
+#include <linux/bitops.h>
 #include <net/mac80211.h>
 #include <net/ieee80211_radiotap.h>
 #include <asm/unaligned.h>
@@ -122,7 +124,8 @@ static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len,
        hdr = (void *)(skb->data + rtap_vendor_space);
 
        if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
-                           RX_FLAG_FAILED_PLCP_CRC))
+                           RX_FLAG_FAILED_PLCP_CRC |
+                           RX_FLAG_ONLY_MONITOR))
                return true;
 
        if (unlikely(skb->len < 16 + present_fcs_len + rtap_vendor_space))
@@ -507,7 +510,7 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
                return NULL;
        }
 
-       if (!local->monitors) {
+       if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
                if (should_drop_frame(origskb, present_fcs_len,
                                      rtap_vendor_space)) {
                        dev_kfree_skb(origskb);
@@ -797,6 +800,26 @@ static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
        return RX_CONTINUE;
 }
 
+static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx,
+                                             int index)
+{
+       struct sk_buff_head *frames = &tid_agg_rx->reorder_buf[index];
+       struct sk_buff *tail = skb_peek_tail(frames);
+       struct ieee80211_rx_status *status;
+
+       if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index))
+               return true;
+
+       if (!tail)
+               return false;
+
+       status = IEEE80211_SKB_RXCB(tail);
+       if (status->flag & RX_FLAG_AMSDU_MORE)
+               return false;
+
+       return true;
+}
+
 static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
                                            struct tid_ampdu_rx *tid_agg_rx,
                                            int index,
@@ -811,7 +834,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
        if (skb_queue_empty(skb_list))
                goto no_frame;
 
-       if (!ieee80211_rx_reorder_ready(skb_list)) {
+       if (!ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
                __skb_queue_purge(skb_list);
                goto no_frame;
        }
@@ -825,6 +848,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
        }
 
 no_frame:
+       tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
        tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
 }
 
@@ -865,7 +889,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
 
        /* release the buffer until next missing frame */
        index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
-       if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) &&
+       if (!ieee80211_rx_reorder_ready(tid_agg_rx, index) &&
            tid_agg_rx->stored_mpdu_num) {
                /*
                 * No buffers ready to be released, but check whether any
@@ -874,8 +898,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
                int skipped = 1;
                for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
                     j = (j + 1) % tid_agg_rx->buf_size) {
-                       if (!ieee80211_rx_reorder_ready(
-                                       &tid_agg_rx->reorder_buf[j])) {
+                       if (!ieee80211_rx_reorder_ready(tid_agg_rx, j)) {
                                skipped++;
                                continue;
                        }
@@ -902,8 +925,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
                                 skipped) & IEEE80211_SN_MASK;
                        skipped = 0;
                }
-       } else while (ieee80211_rx_reorder_ready(
-                               &tid_agg_rx->reorder_buf[index])) {
+       } else while (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
                ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
                                                frames);
                index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
@@ -914,8 +936,7 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
 
                for (; j != (index - 1) % tid_agg_rx->buf_size;
                     j = (j + 1) % tid_agg_rx->buf_size) {
-                       if (ieee80211_rx_reorder_ready(
-                                       &tid_agg_rx->reorder_buf[j]))
+                       if (ieee80211_rx_reorder_ready(tid_agg_rx, j))
                                break;
                }
 
@@ -986,7 +1007,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
        index = mpdu_seq_num % tid_agg_rx->buf_size;
 
        /* check if we already stored this frame */
-       if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) {
+       if (ieee80211_rx_reorder_ready(tid_agg_rx, index)) {
                dev_kfree_skb(skb);
                goto out;
        }
@@ -1099,6 +1120,9 @@ ieee80211_rx_h_check_dup(struct ieee80211_rx_data *rx)
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
 
+       if (status->flag & RX_FLAG_DUP_VALIDATED)
+               return RX_CONTINUE;
+
        /*
         * Drop duplicate 802.11 retransmissions
         * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
@@ -2199,9 +2223,6 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
        skb->dev = dev;
        __skb_queue_head_init(&frame_list);
 
-       if (skb_linearize(skb))
-               return RX_DROP_UNUSABLE;
-
        ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
                                 rx->sdata->vif.type,
                                 rx->local->hw.extra_tx_headroom, true);
@@ -2231,7 +2252,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
        struct ieee80211_local *local = rx->local;
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
-       u16 q, hdrlen;
+       u16 ac, q, hdrlen;
 
        hdr = (struct ieee80211_hdr *) skb->data;
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2290,6 +2311,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
                        spin_lock_bh(&mppath->state_lock);
                        if (!ether_addr_equal(mppath->mpp, mpp_addr))
                                memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
+                       mppath->exp_time = jiffies;
                        spin_unlock_bh(&mppath->state_lock);
                }
                rcu_read_unlock();
@@ -2300,7 +2322,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
            ether_addr_equal(sdata->vif.addr, hdr->addr3))
                return RX_CONTINUE;
 
-       q = ieee80211_select_queue_80211(sdata, skb, hdr);
+       ac = ieee80211_select_queue_80211(sdata, skb, hdr);
+       q = sdata->vif.hw_queue[ac];
        if (ieee80211_queue_stopped(&local->hw, q)) {
                IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
                return RX_DROP_MONITOR;
@@ -2738,6 +2761,11 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
                                                    opmode, status->band);
                        goto handled;
                }
+               case WLAN_VHT_ACTION_GROUPID_MGMT: {
+                       if (len < IEEE80211_MIN_ACTION_SIZE + 25)
+                               goto invalid;
+                       goto queue;
+               }
                default:
                        break;
                }
@@ -3275,6 +3303,85 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
        ieee80211_rx_handlers(&rx, &frames);
 }
 
+void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
+                                         u16 ssn, u64 filtered,
+                                         u16 received_mpdus)
+{
+       struct sta_info *sta;
+       struct tid_ampdu_rx *tid_agg_rx;
+       struct sk_buff_head frames;
+       struct ieee80211_rx_data rx = {
+               /* This is OK -- must be QoS data frame */
+               .security_idx = tid,
+               .seqno_idx = tid,
+       };
+       int i, diff;
+
+       if (WARN_ON(!pubsta || tid >= IEEE80211_NUM_TIDS))
+               return;
+
+       __skb_queue_head_init(&frames);
+
+       sta = container_of(pubsta, struct sta_info, sta);
+
+       rx.sta = sta;
+       rx.sdata = sta->sdata;
+       rx.local = sta->local;
+
+       rcu_read_lock();
+       tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
+       if (!tid_agg_rx)
+               goto out;
+
+       spin_lock_bh(&tid_agg_rx->reorder_lock);
+
+       if (received_mpdus >= IEEE80211_SN_MODULO >> 1) {
+               int release;
+
+               /* release all frames in the reorder buffer */
+               release = (tid_agg_rx->head_seq_num + tid_agg_rx->buf_size) %
+                          IEEE80211_SN_MODULO;
+               ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx,
+                                                release, &frames);
+               /* update ssn to match received ssn */
+               tid_agg_rx->head_seq_num = ssn;
+       } else {
+               ieee80211_release_reorder_frames(sta->sdata, tid_agg_rx, ssn,
+                                                &frames);
+       }
+
+       /* handle the case that received ssn is behind the mac ssn.
+        * it can be tid_agg_rx->buf_size behind and still be valid */
+       diff = (tid_agg_rx->head_seq_num - ssn) & IEEE80211_SN_MASK;
+       if (diff >= tid_agg_rx->buf_size) {
+               tid_agg_rx->reorder_buf_filtered = 0;
+               goto release;
+       }
+       filtered = filtered >> diff;
+       ssn += diff;
+
+       /* update bitmap */
+       for (i = 0; i < tid_agg_rx->buf_size; i++) {
+               int index = (ssn + i) % tid_agg_rx->buf_size;
+
+               tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index);
+               if (filtered & BIT_ULL(i))
+                       tid_agg_rx->reorder_buf_filtered |= BIT_ULL(index);
+       }
+
+       /* now process also frames that the filter marking released */
+       ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
+
+release:
+       spin_unlock_bh(&tid_agg_rx->reorder_lock);
+
+       ieee80211_rx_handlers(&rx, &frames);
+
+ out:
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL(ieee80211_mark_rx_ba_filtered_frames);
+
 /* main receive path */
 
 static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
index a413e52f7691418116d928f684a0fc27b308c2d6..ae980ce8daff6023bd8e5f113dee6a57b13408cc 100644 (file)
@@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
        bool was_scanning = local->scanning;
        struct cfg80211_scan_request *scan_req;
        struct ieee80211_sub_if_data *scan_sdata;
+       struct ieee80211_sub_if_data *sdata;
 
        lockdep_assert_held(&local->mtx);
 
@@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
 
        ieee80211_mlme_notify_scan_completed(local);
        ieee80211_ibss_notify_scan_completed(local);
-       ieee80211_mesh_notify_scan_completed(local);
+
+       /* Requeue all the work that might have been ignored while
+        * the scan was in progress; if there was none this will
+        * just be a no-op for the particular interface.
+        */
+       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+               if (ieee80211_sdata_running(sdata))
+                       ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+       }
+
        if (was_scanning)
                ieee80211_start_next_roc(local);
 }
@@ -1213,6 +1223,14 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
 
        trace_api_sched_scan_stopped(local);
 
+       /*
+        * this shouldn't really happen, so for simplicity
+        * simply ignore it, and let mac80211 reconfigure
+        * the sched scan later on.
+        */
+       if (local->in_reconfig)
+               return;
+
        schedule_work(&local->sched_scan_stopped_work);
 }
 EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
index 4402ad5b27d1530bf4cc9a0d674c6538f73cb568..d20bab5c146c9d435ee67d6a11eec746db1b1bfa 100644 (file)
@@ -116,6 +116,7 @@ static void __cleanup_single_sta(struct sta_info *sta)
 
                        ieee80211_purge_tx_queue(&local->hw, &txqi->queue);
                        atomic_sub(n, &sdata->txqs_len[txqi->txq.ac]);
+                       txqi->byte_cnt = 0;
                }
        }
 
@@ -498,11 +499,17 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
 {
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct station_info sinfo;
+       struct station_info *sinfo;
        int err = 0;
 
        lockdep_assert_held(&local->sta_mtx);
 
+       sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
+       if (!sinfo) {
+               err = -ENOMEM;
+               goto out_err;
+       }
+
        /* check if STA exists already */
        if (sta_info_get_bss(sdata, sta->sta.addr)) {
                err = -EEXIST;
@@ -530,14 +537,12 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        /* accept BA sessions now */
        clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
 
-       ieee80211_recalc_min_chandef(sdata);
        ieee80211_sta_debugfs_add(sta);
        rate_control_add_sta_debugfs(sta);
 
-       memset(&sinfo, 0, sizeof(sinfo));
-       sinfo.filled = 0;
-       sinfo.generation = local->sta_generation;
-       cfg80211_new_sta(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+       sinfo->generation = local->sta_generation;
+       cfg80211_new_sta(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
+       kfree(sinfo);
 
        sta_dbg(sdata, "Inserted STA %pM\n", sta->sta.addr);
 
@@ -557,6 +562,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        __cleanup_single_sta(sta);
  out_err:
        mutex_unlock(&local->sta_mtx);
+       kfree(sinfo);
        rcu_read_lock();
        return err;
 }
@@ -898,7 +904,7 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
 {
        struct ieee80211_local *local = sta->local;
        struct ieee80211_sub_if_data *sdata = sta->sdata;
-       struct station_info sinfo = {};
+       struct station_info *sinfo;
        int ret;
 
        /*
@@ -936,12 +942,14 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
 
        sta_dbg(sdata, "Removed STA %pM\n", sta->sta.addr);
 
-       sta_set_sinfo(sta, &sinfo);
-       cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, &sinfo, GFP_KERNEL);
+       sinfo = kzalloc(sizeof(*sinfo), GFP_KERNEL);
+       if (sinfo)
+               sta_set_sinfo(sta, sinfo);
+       cfg80211_del_sta_sinfo(sdata->dev, sta->sta.addr, sinfo, GFP_KERNEL);
+       kfree(sinfo);
 
        rate_control_remove_sta_debugfs(sta);
        ieee80211_sta_debugfs_remove(sta);
-       ieee80211_recalc_min_chandef(sdata);
 
        cleanup_single_sta(sta);
 }
@@ -1453,7 +1461,7 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
 
        more_data = ieee80211_sta_ps_more_data(sta, ignored_acs, reason, driver_release_tids);
 
-       if (reason == IEEE80211_FRAME_RELEASE_PSPOLL)
+       if (driver_release_tids && reason == IEEE80211_FRAME_RELEASE_PSPOLL)
                driver_release_tids =
                        BIT(find_highest_prio_tid(driver_release_tids));
 
@@ -1808,14 +1816,17 @@ int sta_info_move_state(struct sta_info *sta,
                        clear_bit(WLAN_STA_AUTH, &sta->_flags);
                break;
        case IEEE80211_STA_AUTH:
-               if (sta->sta_state == IEEE80211_STA_NONE)
+               if (sta->sta_state == IEEE80211_STA_NONE) {
                        set_bit(WLAN_STA_AUTH, &sta->_flags);
-               else if (sta->sta_state == IEEE80211_STA_ASSOC)
+               } else if (sta->sta_state == IEEE80211_STA_ASSOC) {
                        clear_bit(WLAN_STA_ASSOC, &sta->_flags);
+                       ieee80211_recalc_min_chandef(sta->sdata);
+               }
                break;
        case IEEE80211_STA_ASSOC:
                if (sta->sta_state == IEEE80211_STA_AUTH) {
                        set_bit(WLAN_STA_ASSOC, &sta->_flags);
+                       ieee80211_recalc_min_chandef(sta->sdata);
                } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
                        if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
                            (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
index d6051629ed155859819a591960183c0062c230d9..053f5c4fa495ba02cd99c5ddb201ce0ad0f7aeff 100644 (file)
@@ -1,6 +1,7 @@
 /*
  * Copyright 2002-2005, Devicescape Software, Inc.
  * Copyright 2013-2014  Intel Mobile Communications GmbH
+ * Copyright(c) 2015 Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -167,6 +168,8 @@ struct tid_ampdu_tx {
  *
  * @reorder_buf: buffer to reorder incoming aggregated MPDUs. An MPDU may be an
  *     A-MSDU with individually reported subframes.
+ * @reorder_buf_filtered: bitmap indicating where there are filtered frames in
+ *     the reorder buffer that should be ignored when releasing frames
  * @reorder_time: jiffies when skb was added
  * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
  * @reorder_timer: releases expired frames from the reorder buffer.
@@ -194,6 +197,7 @@ struct tid_ampdu_tx {
 struct tid_ampdu_rx {
        struct rcu_head rcu_head;
        spinlock_t reorder_lock;
+       u64 reorder_buf_filtered;
        struct sk_buff_head *reorder_buf;
        unsigned long *reorder_time;
        struct timer_list session_timer;
@@ -212,20 +216,21 @@ struct tid_ampdu_rx {
 /**
  * struct sta_ampdu_mlme - STA aggregation information.
  *
+ * @mtx: mutex to protect all TX data (except non-NULL assignments
+ *     to tid_tx[idx], which are protected by the sta spinlock)
+ *     tid_start_tx is also protected by sta->lock.
  * @tid_rx: aggregation info for Rx per TID -- RCU protected
- * @tid_tx: aggregation info for Tx per TID
- * @tid_start_tx: sessions where start was requested
- * @addba_req_num: number of times addBA request has been sent.
- * @last_addba_req_time: timestamp of the last addBA request.
- * @dialog_token_allocator: dialog token enumerator for each new session;
- * @work: work struct for starting/stopping aggregation
  * @tid_rx_timer_expired: bitmap indicating on which TIDs the
  *     RX timer expired until the work for it runs
  * @tid_rx_stop_requested:  bitmap indicating which BA sessions per TID the
  *     driver requested to close until the work for it runs
- * @mtx: mutex to protect all TX data (except non-NULL assignments
- *     to tid_tx[idx], which are protected by the sta spinlock)
- *     tid_start_tx is also protected by sta->lock.
+ * @agg_session_valid: bitmap indicating which TID has a rx BA session open on
+ * @work: work struct for starting/stopping aggregation
+ * @tid_tx: aggregation info for Tx per TID
+ * @tid_start_tx: sessions where start was requested
+ * @last_addba_req_time: timestamp of the last addBA request.
+ * @addba_req_num: number of times addBA request has been sent.
+ * @dialog_token_allocator: dialog token enumerator for each new session;
  */
 struct sta_ampdu_mlme {
        struct mutex mtx;
@@ -233,6 +238,7 @@ struct sta_ampdu_mlme {
        struct tid_ampdu_rx __rcu *tid_rx[IEEE80211_NUM_TIDS];
        unsigned long tid_rx_timer_expired[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
        unsigned long tid_rx_stop_requested[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
+       unsigned long agg_session_valid[BITS_TO_LONGS(IEEE80211_NUM_TIDS)];
        /* tx */
        struct work_struct work;
        struct tid_ampdu_tx __rcu *tid_tx[IEEE80211_NUM_TIDS];
index 5bad05e9af90fc76201f4bd9a54b931ffd5c61bc..6101deb805a830a80acccbe05499ca4f4fc33988 100644 (file)
@@ -51,6 +51,11 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
        struct ieee80211_hdr *hdr = (void *)skb->data;
        int ac;
 
+       if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) {
+               ieee80211_free_txskb(&local->hw, skb);
+               return;
+       }
+
        /*
         * This skb 'survived' a round-trip through the driver, and
         * hopefully the driver didn't mangle it too badly. However,
index a6b4442776a0519251b1a7650007343a3541181e..2b0a17ee907abff27d2fef4c93e8d2a03eda4c17 100644 (file)
 #define KEY_PR_FMT     " cipher:0x%x, flags=%#x, keyidx=%d, hw_key_idx=%d"
 #define KEY_PR_ARG     __entry->cipher, __entry->flags, __entry->keyidx, __entry->hw_key_idx
 
-
+#define AMPDU_ACTION_ENTRY     __field(enum ieee80211_ampdu_mlme_action,               \
+                                       ieee80211_ampdu_mlme_action)                    \
+                               STA_ENTRY                                               \
+                               __field(u16, tid)                                       \
+                               __field(u16, ssn)                                       \
+                               __field(u8, buf_size)                                   \
+                               __field(bool, amsdu)                                    \
+                               __field(u16, timeout)
+#define AMPDU_ACTION_ASSIGN    STA_NAMED_ASSIGN(params->sta);                          \
+                               __entry->tid = params->tid;                             \
+                               __entry->ssn = params->ssn;                             \
+                               __entry->buf_size = params->buf_size;                   \
+                               __entry->amsdu = params->amsdu;                         \
+                               __entry->timeout = params->timeout;
+#define AMPDU_ACTION_PR_FMT    STA_PR_FMT " tid %d, ssn %d, buf_size %u, amsdu %d, timeout %d"
+#define AMPDU_ACTION_PR_ARG    STA_PR_ARG, __entry->tid, __entry->ssn,                 \
+                               __entry->buf_size, __entry->amsdu, __entry->timeout
 
 /*
  * Tracing for driver callbacks.
@@ -970,38 +986,25 @@ DEFINE_EVENT(local_only_evt, drv_tx_last_beacon,
 TRACE_EVENT(drv_ampdu_action,
        TP_PROTO(struct ieee80211_local *local,
                 struct ieee80211_sub_if_data *sdata,
-                enum ieee80211_ampdu_mlme_action action,
-                struct ieee80211_sta *sta, u16 tid,
-                u16 *ssn, u8 buf_size, bool amsdu),
+                struct ieee80211_ampdu_params *params),
 
-       TP_ARGS(local, sdata, action, sta, tid, ssn, buf_size, amsdu),
+       TP_ARGS(local, sdata, params),
 
        TP_STRUCT__entry(
                LOCAL_ENTRY
-               STA_ENTRY
-               __field(u32, action)
-               __field(u16, tid)
-               __field(u16, ssn)
-               __field(u8, buf_size)
-               __field(bool, amsdu)
                VIF_ENTRY
+               AMPDU_ACTION_ENTRY
        ),
 
        TP_fast_assign(
                LOCAL_ASSIGN;
                VIF_ASSIGN;
-               STA_ASSIGN;
-               __entry->action = action;
-               __entry->tid = tid;
-               __entry->ssn = ssn ? *ssn : 0;
-               __entry->buf_size = buf_size;
-               __entry->amsdu = amsdu;
+               AMPDU_ACTION_ASSIGN;
        ),
 
        TP_printk(
-               LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " action:%d tid:%d buf:%d amsdu:%d",
-               LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->action,
-               __entry->tid, __entry->buf_size, __entry->amsdu
+               LOCAL_PR_FMT VIF_PR_FMT AMPDU_ACTION_PR_FMT,
+               LOCAL_PR_ARG, VIF_PR_ARG, AMPDU_ACTION_PR_ARG
        )
 );
 
index 3311ce0f3d6c2f9c93c075458159935c631d144d..7bb67fa9f4d26ee5acf28b411b8558f4420f40c0 100644 (file)
@@ -710,6 +710,10 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx)
 
        info->control.short_preamble = txrc.short_preamble;
 
+       /* don't ask rate control when rate already injected via radiotap */
+       if (info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)
+               return TX_CONTINUE;
+
        if (tx->sta)
                assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC);
 
@@ -1266,7 +1270,11 @@ static void ieee80211_drv_tx(struct ieee80211_local *local,
        if (atomic_read(&sdata->txqs_len[ac]) >= local->hw.txq_ac_max_pending)
                netif_stop_subqueue(sdata->dev, ac);
 
-       skb_queue_tail(&txqi->queue, skb);
+       spin_lock_bh(&txqi->queue.lock);
+       txqi->byte_cnt += skb->len;
+       __skb_queue_tail(&txqi->queue, skb);
+       spin_unlock_bh(&txqi->queue.lock);
+
        drv_wake_tx_queue(local, txqi);
 
        return;
@@ -1294,6 +1302,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
        if (!skb)
                goto out;
 
+       txqi->byte_cnt -= skb->len;
+
        atomic_dec(&sdata->txqs_len[ac]);
        if (__netif_subqueue_stopped(sdata->dev, ac))
                ieee80211_propagate_queue_wake(local, sdata->vif.hw_queue[ac]);
@@ -1665,15 +1675,24 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata,
        ieee80211_tx(sdata, sta, skb, false);
 }
 
-static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
+static bool ieee80211_parse_tx_radiotap(struct ieee80211_local *local,
+                                       struct sk_buff *skb)
 {
        struct ieee80211_radiotap_iterator iterator;
        struct ieee80211_radiotap_header *rthdr =
                (struct ieee80211_radiotap_header *) skb->data;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_supported_band *sband =
+               local->hw.wiphy->bands[info->band];
        int ret = ieee80211_radiotap_iterator_init(&iterator, rthdr, skb->len,
                                                   NULL);
        u16 txflags;
+       u16 rate = 0;
+       bool rate_found = false;
+       u8 rate_retries = 0;
+       u16 rate_flags = 0;
+       u8 mcs_known, mcs_flags;
+       int i;
 
        info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT |
                       IEEE80211_TX_CTL_DONTFRAG;
@@ -1724,6 +1743,35 @@ static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
                                info->flags |= IEEE80211_TX_CTL_NO_ACK;
                        break;
 
+               case IEEE80211_RADIOTAP_RATE:
+                       rate = *iterator.this_arg;
+                       rate_flags = 0;
+                       rate_found = true;
+                       break;
+
+               case IEEE80211_RADIOTAP_DATA_RETRIES:
+                       rate_retries = *iterator.this_arg;
+                       break;
+
+               case IEEE80211_RADIOTAP_MCS:
+                       mcs_known = iterator.this_arg[0];
+                       mcs_flags = iterator.this_arg[1];
+                       if (!(mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_MCS))
+                               break;
+
+                       rate_found = true;
+                       rate = iterator.this_arg[2];
+                       rate_flags = IEEE80211_TX_RC_MCS;
+
+                       if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_GI &&
+                           mcs_flags & IEEE80211_RADIOTAP_MCS_SGI)
+                               rate_flags |= IEEE80211_TX_RC_SHORT_GI;
+
+                       if (mcs_known & IEEE80211_RADIOTAP_MCS_HAVE_BW &&
+                           mcs_flags & IEEE80211_RADIOTAP_MCS_BW_40)
+                               rate_flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+                       break;
+
                /*
                 * Please update the file
                 * Documentation/networking/mac80211-injection.txt
@@ -1738,6 +1786,32 @@ static bool ieee80211_parse_tx_radiotap(struct sk_buff *skb)
        if (ret != -ENOENT) /* ie, if we didn't simply run out of fields */
                return false;
 
+       if (rate_found) {
+               info->control.flags |= IEEE80211_TX_CTRL_RATE_INJECT;
+
+               for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
+                       info->control.rates[i].idx = -1;
+                       info->control.rates[i].flags = 0;
+                       info->control.rates[i].count = 0;
+               }
+
+               if (rate_flags & IEEE80211_TX_RC_MCS) {
+                       info->control.rates[0].idx = rate;
+               } else {
+                       for (i = 0; i < sband->n_bitrates; i++) {
+                               if (rate * 5 != sband->bitrates[i].bitrate)
+                                       continue;
+
+                               info->control.rates[0].idx = i;
+                               break;
+                       }
+               }
+
+               info->control.rates[0].flags = rate_flags;
+               info->control.rates[0].count = min_t(u8, rate_retries + 1,
+                                                    local->hw.max_rate_tries);
+       }
+
        /*
         * remove the radiotap header
         * iterator->_max_length was sanity-checked against
@@ -1819,7 +1893,7 @@ netdev_tx_t ieee80211_monitor_start_xmit(struct sk_buff *skb,
                      IEEE80211_TX_CTL_INJECTED;
 
        /* process and remove the injection radiotap header */
-       if (!ieee80211_parse_tx_radiotap(skb))
+       if (!ieee80211_parse_tx_radiotap(local, skb))
                goto fail;
 
        rcu_read_lock();
@@ -2099,8 +2173,11 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata,
                                        mpp_lookup = true;
                        }
 
-                       if (mpp_lookup)
+                       if (mpp_lookup) {
                                mppath = mpp_path_lookup(sdata, skb->data);
+                               if (mppath)
+                                       mppath->exp_time = jiffies;
+                       }
 
                        if (mppath && mpath)
                                mesh_path_del(mpath->sdata, mpath->dst);
index 3943d4bf289c29b436765d82dd0f241c8b7f5530..74dd4d72b2332281f0ee3e73138b8fec3a53e380 100644 (file)
@@ -4,7 +4,7 @@
  * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
  * Copyright 2007      Johannes Berg <johannes@sipsolutions.net>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
- * Copyright (C) 2015  Intel Deutschland GmbH
+ * Copyright (C) 2015-2016     Intel Deutschland GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -1928,6 +1928,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                          BSS_CHANGED_IDLE |
                          BSS_CHANGED_TXPOWER;
 
+               if (sdata->flags & IEEE80211_SDATA_MU_MIMO_OWNER)
+                       changed |= BSS_CHANGED_MU_GROUPS;
+
                switch (sdata->vif.type) {
                case NL80211_IFTYPE_STATION:
                        changed |= BSS_CHANGED_ASSOC |
@@ -2043,16 +2046,26 @@ int ieee80211_reconfig(struct ieee80211_local *local)
                 */
                if (sched_scan_req->n_scan_plans > 1 ||
                    __ieee80211_request_sched_scan_start(sched_scan_sdata,
-                                                        sched_scan_req))
+                                                        sched_scan_req)) {
+                       RCU_INIT_POINTER(local->sched_scan_sdata, NULL);
+                       RCU_INIT_POINTER(local->sched_scan_req, NULL);
                        sched_scan_stopped = true;
+               }
        mutex_unlock(&local->mtx);
 
        if (sched_scan_stopped)
                cfg80211_sched_scan_stopped_rtnl(local->hw.wiphy);
 
  wake_up:
-       local->in_reconfig = false;
-       barrier();
+       if (local->in_reconfig) {
+               local->in_reconfig = false;
+               barrier();
+
+               /* Restart deferred ROCs */
+               mutex_lock(&local->mtx);
+               ieee80211_start_next_roc(local);
+               mutex_unlock(&local->mtx);
+       }
 
        if (local->monitors == local->open_count && local->monitors > 0)
                ieee80211_add_virtual_monitor(local);
@@ -2361,10 +2374,23 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
 
        switch (chandef->width) {
        case NL80211_CHAN_WIDTH_160:
-               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+               /*
+                * Convert 160 MHz channel width to new style as interop
+                * workaround.
+                */
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+               vht_oper->center_freq_seg2_idx = vht_oper->center_freq_seg1_idx;
+               if (chandef->chan->center_freq < chandef->center_freq1)
+                       vht_oper->center_freq_seg1_idx -= 8;
+               else
+                       vht_oper->center_freq_seg1_idx += 8;
                break;
        case NL80211_CHAN_WIDTH_80P80:
-               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+               /*
+                * Convert 80+80 MHz channel width to new style as interop
+                * workaround.
+                */
+               vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
                break;
        case NL80211_CHAN_WIDTH_80:
                vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
@@ -2380,17 +2406,13 @@ u8 *ieee80211_ie_build_vht_oper(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
        return pos + sizeof(struct ieee80211_vht_operation);
 }
 
-void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                 const struct ieee80211_ht_operation *ht_oper,
-                                 struct cfg80211_chan_def *chandef)
+bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
+                              struct cfg80211_chan_def *chandef)
 {
        enum nl80211_channel_type channel_type;
 
-       if (!ht_oper) {
-               cfg80211_chandef_create(chandef, control_chan,
-                                       NL80211_CHAN_NO_HT);
-               return;
-       }
+       if (!ht_oper)
+               return false;
 
        switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
        case IEEE80211_HT_PARAM_CHA_SEC_NONE:
@@ -2404,42 +2426,66 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
                break;
        default:
                channel_type = NL80211_CHAN_NO_HT;
+               return false;
        }
 
-       cfg80211_chandef_create(chandef, control_chan, channel_type);
+       cfg80211_chandef_create(chandef, chandef->chan, channel_type);
+       return true;
 }
 
-void ieee80211_vht_oper_to_chandef(struct ieee80211_channel *control_chan,
-                                  const struct ieee80211_vht_operation *oper,
-                                  struct cfg80211_chan_def *chandef)
+bool ieee80211_chandef_vht_oper(const struct ieee80211_vht_operation *oper,
+                               struct cfg80211_chan_def *chandef)
 {
+       struct cfg80211_chan_def new = *chandef;
+       int cf1, cf2;
+
        if (!oper)
-               return;
+               return false;
 
-       chandef->chan = control_chan;
+       cf1 = ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
+                                            chandef->chan->band);
+       cf2 = ieee80211_channel_to_frequency(oper->center_freq_seg2_idx,
+                                            chandef->chan->band);
 
        switch (oper->chan_width) {
        case IEEE80211_VHT_CHANWIDTH_USE_HT:
                break;
        case IEEE80211_VHT_CHANWIDTH_80MHZ:
-               chandef->width = NL80211_CHAN_WIDTH_80;
+               new.width = NL80211_CHAN_WIDTH_80;
+               new.center_freq1 = cf1;
+               /* If needed, adjust based on the newer interop workaround. */
+               if (oper->center_freq_seg2_idx) {
+                       unsigned int diff;
+
+                       diff = abs(oper->center_freq_seg2_idx -
+                                  oper->center_freq_seg1_idx);
+                       if (diff == 8) {
+                               new.width = NL80211_CHAN_WIDTH_160;
+                               new.center_freq1 = cf2;
+                       } else if (diff > 8) {
+                               new.width = NL80211_CHAN_WIDTH_80P80;
+                               new.center_freq2 = cf2;
+                       }
+               }
                break;
        case IEEE80211_VHT_CHANWIDTH_160MHZ:
-               chandef->width = NL80211_CHAN_WIDTH_160;
+               new.width = NL80211_CHAN_WIDTH_160;
+               new.center_freq1 = cf1;
                break;
        case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
-               chandef->width = NL80211_CHAN_WIDTH_80P80;
+               new.width = NL80211_CHAN_WIDTH_80P80;
+               new.center_freq1 = cf1;
+               new.center_freq2 = cf2;
                break;
        default:
-               break;
+               return false;
        }
 
-       chandef->center_freq1 =
-               ieee80211_channel_to_frequency(oper->center_freq_seg1_idx,
-                                              control_chan->band);
-       chandef->center_freq2 =
-               ieee80211_channel_to_frequency(oper->center_freq_seg2_idx,
-                                              control_chan->band);
+       if (!cfg80211_chandef_valid(&new))
+               return false;
+
+       *chandef = new;
+       return true;
 }
 
 int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
@@ -2662,6 +2708,18 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
                sband = local->hw.wiphy->bands[status->band];
                bitrate = sband->bitrates[status->rate_idx].bitrate;
                ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift));
+
+               if (status->flag & RX_FLAG_MACTIME_PLCP_START) {
+                       /* TODO: handle HT/VHT preambles */
+                       if (status->band == IEEE80211_BAND_5GHZ) {
+                               ts += 20 << shift;
+                               mpdu_offset += 2;
+                       } else if (status->flag & RX_FLAG_SHORTPRE) {
+                               ts += 96;
+                       } else {
+                               ts += 192;
+                       }
+               }
        }
 
        rate = cfg80211_calculate_bitrate(&ri);
@@ -3347,3 +3405,17 @@ void ieee80211_init_tx_queue(struct ieee80211_sub_if_data *sdata,
                txqi->txq.ac = IEEE80211_AC_BE;
        }
 }
+
+void ieee80211_txq_get_depth(struct ieee80211_txq *txq,
+                            unsigned long *frame_cnt,
+                            unsigned long *byte_cnt)
+{
+       struct txq_info *txqi = to_txq_info(txq);
+
+       if (frame_cnt)
+               *frame_cnt = txqi->queue.qlen;
+
+       if (byte_cnt)
+               *byte_cnt = txqi->byte_cnt;
+}
+EXPORT_SYMBOL(ieee80211_txq_get_depth);
index c38b2f07a919e20dc22363fe80911f5f5a0b004f..341d192cea522c8f25c695083b84ea9bd5013987 100644 (file)
@@ -1,6 +1,9 @@
 /*
  * VHT handling
  *
+ * Portions of this file
+ * Copyright(c) 2015 Intel Deutschland GmbH
+ *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
@@ -278,6 +281,23 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        }
 
        sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
+
+       /* If HT IE reported 3839 bytes only, stay with that size. */
+       if (sta->sta.max_amsdu_len == IEEE80211_MAX_MPDU_LEN_HT_3839)
+               return;
+
+       switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
+       case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
+               break;
+       case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991:
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_7991;
+               break;
+       case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895:
+       default:
+               sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_3895;
+               break;
+       }
 }
 
 enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta)
@@ -425,6 +445,30 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
        return changed;
 }
 
+void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata,
+                                struct ieee80211_mgmt *mgmt)
+{
+       struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+
+       if (!(sdata->flags & IEEE80211_SDATA_MU_MIMO_OWNER))
+               return;
+
+       if (!memcmp(mgmt->u.action.u.vht_group_notif.position,
+                   bss_conf->mu_group.position, WLAN_USER_POSITION_LEN) &&
+           !memcmp(mgmt->u.action.u.vht_group_notif.membership,
+                   bss_conf->mu_group.membership, WLAN_MEMBERSHIP_LEN))
+               return;
+
+       memcpy(bss_conf->mu_group.membership,
+              mgmt->u.action.u.vht_group_notif.membership,
+              WLAN_MEMBERSHIP_LEN);
+       memcpy(bss_conf->mu_group.position,
+              mgmt->u.action.u.vht_group_notif.position,
+              WLAN_USER_POSITION_LEN);
+
+       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_MU_GROUPS);
+}
+
 void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
                                 struct sta_info *sta, u8 opmode,
                                 enum ieee80211_band band)
index a13d02b7cee47401357f1fd7ff5e0dc74becc9ad..6a3e1c2181d3a960febf400594d9c60d3c44e700 100644 (file)
@@ -17,9 +17,9 @@
 #include <linux/err.h>
 #include <linux/bug.h>
 #include <linux/completion.h>
-#include <linux/crypto.h>
 #include <linux/ieee802154.h>
 #include <crypto/aead.h>
+#include <crypto/skcipher.h>
 
 #include "ieee802154_i.h"
 #include "llsec.h"
@@ -144,18 +144,18 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
                        goto err_tfm;
        }
 
-       key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
+       key->tfm0 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(key->tfm0))
                goto err_tfm;
 
-       if (crypto_blkcipher_setkey(key->tfm0, template->key,
-                                   IEEE802154_LLSEC_KEY_SIZE))
+       if (crypto_skcipher_setkey(key->tfm0, template->key,
+                                  IEEE802154_LLSEC_KEY_SIZE))
                goto err_tfm0;
 
        return key;
 
 err_tfm0:
-       crypto_free_blkcipher(key->tfm0);
+       crypto_free_skcipher(key->tfm0);
 err_tfm:
        for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
                if (key->tfm[i])
@@ -175,7 +175,7 @@ static void llsec_key_release(struct kref *ref)
        for (i = 0; i < ARRAY_SIZE(key->tfm); i++)
                crypto_free_aead(key->tfm[i]);
 
-       crypto_free_blkcipher(key->tfm0);
+       crypto_free_skcipher(key->tfm0);
        kzfree(key);
 }
 
@@ -620,15 +620,17 @@ llsec_do_encrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
 {
        u8 iv[16];
        struct scatterlist src;
-       struct blkcipher_desc req = {
-               .tfm = key->tfm0,
-               .info = iv,
-               .flags = 0,
-       };
+       SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
+       int err;
 
        llsec_geniv(iv, sec->params.hwaddr, &hdr->sec);
        sg_init_one(&src, skb->data, skb->len);
-       return crypto_blkcipher_encrypt_iv(&req, &src, &src, skb->len);
+       skcipher_request_set_tfm(req, key->tfm0);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &src, &src, skb->len, iv);
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
+       return err;
 }
 
 static struct crypto_aead*
@@ -830,11 +832,8 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
        unsigned char *data;
        int datalen;
        struct scatterlist src;
-       struct blkcipher_desc req = {
-               .tfm = key->tfm0,
-               .info = iv,
-               .flags = 0,
-       };
+       SKCIPHER_REQUEST_ON_STACK(req, key->tfm0);
+       int err;
 
        llsec_geniv(iv, dev_addr, &hdr->sec);
        data = skb_mac_header(skb) + skb->mac_len;
@@ -842,7 +841,13 @@ llsec_do_decrypt_unauth(struct sk_buff *skb, const struct mac802154_llsec *sec,
 
        sg_init_one(&src, data, datalen);
 
-       return crypto_blkcipher_decrypt_iv(&req, &src, &src, datalen);
+       skcipher_request_set_tfm(req, key->tfm0);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &src, &src, datalen, iv);
+
+       err = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
+       return err;
 }
 
 static int
index 950578e1d7bebab7636aae278af3964140ec94e1..6f3b658e3279cc536859cbe4f36927eafaa5c19c 100644 (file)
@@ -19,7 +19,6 @@
 
 #include <linux/slab.h>
 #include <linux/hashtable.h>
-#include <linux/crypto.h>
 #include <linux/kref.h>
 #include <linux/spinlock.h>
 #include <net/af_ieee802154.h>
@@ -30,7 +29,7 @@ struct mac802154_llsec_key {
 
        /* one tfm for each authsize (4/8/16) */
        struct crypto_aead *tfm[3];
-       struct crypto_blkcipher *tfm0;
+       struct crypto_skcipher *tfm0;
 
        struct kref ref;
 };
index 43d8c9896fa3e66ea7f76ee1b7934c3129a46fd3..f0f688db6213930ad568b645080eb171bb30f345 100644 (file)
@@ -164,8 +164,6 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb,
        };
        struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-       if (e.cidr == 0)
-               return -EINVAL;
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK;
 
@@ -377,8 +375,6 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb,
        };
        struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 
-       if (e.cidr == 0)
-               return -EINVAL;
        if (adt == IPSET_TEST)
                e.cidr = HOST_MASK;
 
index 0328f725069332d0c84bbb3108192a2a28beeb2a..299edc6add5a6ac2c729e4caa6f9550b1cebe3df 100644 (file)
@@ -605,17 +605,13 @@ static const struct file_operations ip_vs_app_fops = {
 
 int __net_init ip_vs_app_net_init(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
-
        INIT_LIST_HEAD(&ipvs->app_list);
-       proc_create("ip_vs_app", 0, net->proc_net, &ip_vs_app_fops);
+       proc_create("ip_vs_app", 0, ipvs->net->proc_net, &ip_vs_app_fops);
        return 0;
 }
 
 void __net_exit ip_vs_app_net_cleanup(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
-
        unregister_ip_vs_app(ipvs, NULL /* all */);
-       remove_proc_entry("ip_vs_app", net->proc_net);
+       remove_proc_entry("ip_vs_app", ipvs->net->proc_net);
 }
index e7c1b052c2a3ac9cccd54ed1e4558d36bde080c4..404b2a4f4b5be90f630a20ff592e030f1ed4d671 100644 (file)
@@ -1376,8 +1376,6 @@ static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
        struct ip_vs_pe *old_pe;
        struct netns_ipvs *ipvs = svc->ipvs;
 
-       pr_info("%s: enter\n", __func__);
-
        /* Count only IPv4 services for old get/setsockopt interface */
        if (svc->af == AF_INET)
                ipvs->num_services--;
@@ -3947,7 +3945,6 @@ static struct notifier_block ip_vs_dst_notifier = {
 
 int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
        int i, idx;
 
        /* Initialize rs_table */
@@ -3974,9 +3971,9 @@ int __net_init ip_vs_control_net_init(struct netns_ipvs *ipvs)
 
        spin_lock_init(&ipvs->tot_stats.lock);
 
-       proc_create("ip_vs", 0, net->proc_net, &ip_vs_info_fops);
-       proc_create("ip_vs_stats", 0, net->proc_net, &ip_vs_stats_fops);
-       proc_create("ip_vs_stats_percpu", 0, net->proc_net,
+       proc_create("ip_vs", 0, ipvs->net->proc_net, &ip_vs_info_fops);
+       proc_create("ip_vs_stats", 0, ipvs->net->proc_net, &ip_vs_stats_fops);
+       proc_create("ip_vs_stats_percpu", 0, ipvs->net->proc_net,
                    &ip_vs_stats_percpu_fops);
 
        if (ip_vs_control_net_init_sysctl(ipvs))
@@ -3991,13 +3988,11 @@ err:
 
 void __net_exit ip_vs_control_net_cleanup(struct netns_ipvs *ipvs)
 {
-       struct net *net = ipvs->net;
-
        ip_vs_trash_cleanup(ipvs);
        ip_vs_control_net_cleanup_sysctl(ipvs);
-       remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
-       remove_proc_entry("ip_vs_stats", net->proc_net);
-       remove_proc_entry("ip_vs", net->proc_net);
+       remove_proc_entry("ip_vs_stats_percpu", ipvs->net->proc_net);
+       remove_proc_entry("ip_vs_stats", ipvs->net->proc_net);
+       remove_proc_entry("ip_vs", ipvs->net->proc_net);
        free_percpu(ipvs->tot_stats.cpustats);
 }
 
index 1b8d594e493a32f0ea461ade2d2a85387054a78d..c4e9ca016a88ac32e7677253a3b7e8ef30d91f5a 100644 (file)
@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb)
        const char *dptr;
        int retc;
 
-       ip_vs_fill_iph_skb(p->af, skb, false, &iph);
+       retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph);
 
        /* Only useful with UDP */
-       if (iph.protocol != IPPROTO_UDP)
+       if (!retc || iph.protocol != IPPROTO_UDP)
                return -EINVAL;
        /* todo: IPv6 fragments:
         *       I think this only should be done for the first fragment. /HS
index 3cb3cb831591ef79515b4bde66d5db825a732afb..58882de06bd74f254033e0f06521f264c18c8d44 100644 (file)
@@ -66,6 +66,21 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks);
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
 
+static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
+static __read_mostly bool nf_conntrack_locks_all;
+
+void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
+{
+       spin_lock(lock);
+       while (unlikely(nf_conntrack_locks_all)) {
+               spin_unlock(lock);
+               spin_lock(&nf_conntrack_locks_all_lock);
+               spin_unlock(&nf_conntrack_locks_all_lock);
+               spin_lock(lock);
+       }
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_lock);
+
 static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2)
 {
        h1 %= CONNTRACK_LOCKS;
@@ -82,12 +97,12 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1,
        h1 %= CONNTRACK_LOCKS;
        h2 %= CONNTRACK_LOCKS;
        if (h1 <= h2) {
-               spin_lock(&nf_conntrack_locks[h1]);
+               nf_conntrack_lock(&nf_conntrack_locks[h1]);
                if (h1 != h2)
                        spin_lock_nested(&nf_conntrack_locks[h2],
                                         SINGLE_DEPTH_NESTING);
        } else {
-               spin_lock(&nf_conntrack_locks[h2]);
+               nf_conntrack_lock(&nf_conntrack_locks[h2]);
                spin_lock_nested(&nf_conntrack_locks[h1],
                                 SINGLE_DEPTH_NESTING);
        }
@@ -102,16 +117,19 @@ static void nf_conntrack_all_lock(void)
 {
        int i;
 
-       for (i = 0; i < CONNTRACK_LOCKS; i++)
-               spin_lock_nested(&nf_conntrack_locks[i], i);
+       spin_lock(&nf_conntrack_locks_all_lock);
+       nf_conntrack_locks_all = true;
+
+       for (i = 0; i < CONNTRACK_LOCKS; i++) {
+               spin_lock(&nf_conntrack_locks[i]);
+               spin_unlock(&nf_conntrack_locks[i]);
+       }
 }
 
 static void nf_conntrack_all_unlock(void)
 {
-       int i;
-
-       for (i = 0; i < CONNTRACK_LOCKS; i++)
-               spin_unlock(&nf_conntrack_locks[i]);
+       nf_conntrack_locks_all = false;
+       spin_unlock(&nf_conntrack_locks_all_lock);
 }
 
 unsigned int nf_conntrack_htable_size __read_mostly;
@@ -757,7 +775,7 @@ restart:
        hash = hash_bucket(_hash, net);
        for (; i < net->ct.htable_size; i++) {
                lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS];
-               spin_lock(lockp);
+               nf_conntrack_lock(lockp);
                if (read_seqcount_retry(&net->ct.generation, sequence)) {
                        spin_unlock(lockp);
                        goto restart;
@@ -1382,7 +1400,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data),
        for (; *bucket < net->ct.htable_size; (*bucket)++) {
                lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS];
                local_bh_disable();
-               spin_lock(lockp);
+               nf_conntrack_lock(lockp);
                if (*bucket < net->ct.htable_size) {
                        hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) {
                                if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
index bd9d31537905e4e372da427ac29ce5948f2af34c..3b40ec575cd56a7b73c0b25d9c8decdda979d842 100644 (file)
@@ -425,7 +425,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
        }
        local_bh_disable();
        for (i = 0; i < net->ct.htable_size; i++) {
-               spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+               nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
                if (i < net->ct.htable_size) {
                        hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
                                unhelp(h, me);
index dbb1bb3edb456594184bb211332d1ee799af70ef..355e8552fd5b77682295493a22dc0a03d9338255 100644 (file)
@@ -840,7 +840,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
        for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
 restart:
                lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS];
-               spin_lock(lockp);
+               nf_conntrack_lock(lockp);
                if (cb->args[0] >= net->ct.htable_size) {
                        spin_unlock(lockp);
                        goto out;
index b6605e0008013972cd94500db5bd626cb9bf3064..5eefe4a355c640cf85c8328740a877c6909880ba 100644 (file)
@@ -224,12 +224,12 @@ static int __init nf_tables_netdev_init(void)
 
        nft_register_chain_type(&nft_filter_chain_netdev);
        ret = register_pernet_subsys(&nf_tables_netdev_net_ops);
-       if (ret < 0)
+       if (ret < 0) {
                nft_unregister_chain_type(&nft_filter_chain_netdev);
-
+               return ret;
+       }
        register_netdevice_notifier(&nf_tables_netdev_notifier);
-
-       return ret;
+       return 0;
 }
 
 static void __exit nf_tables_netdev_exit(void)
index 5d010f27ac018d079058000c11acc66d7086b57a..94837d236ab0e9a2fc3baa5d8fe7688a1b399afc 100644 (file)
@@ -307,12 +307,12 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout)
 
        local_bh_disable();
        for (i = 0; i < net->ct.htable_size; i++) {
-               spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+               nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
                if (i < net->ct.htable_size) {
                        hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
                                untimeout(h, timeout);
                }
-               spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+               nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
        }
        local_bh_enable();
 }
index 383c17138399aec2a5681580ddd3b6f200f29d67..b78c28ba465ff9c1eed6b6006b6c5e9d9da58072 100644 (file)
@@ -46,16 +46,14 @@ static void nft_byteorder_eval(const struct nft_expr *expr,
                switch (priv->op) {
                case NFT_BYTEORDER_NTOH:
                        for (i = 0; i < priv->len / 8; i++) {
-                               src64 = get_unaligned_be64(&src[i]);
-                               src64 = be64_to_cpu((__force __be64)src64);
+                               src64 = get_unaligned((u64 *)&src[i]);
                                put_unaligned_be64(src64, &dst[i]);
                        }
                        break;
                case NFT_BYTEORDER_HTON:
                        for (i = 0; i < priv->len / 8; i++) {
                                src64 = get_unaligned_be64(&src[i]);
-                               src64 = (__force u64)cpu_to_be64(src64);
-                               put_unaligned_be64(src64, &dst[i]);
+                               put_unaligned(src64, (u64 *)&dst[i]);
                        }
                        break;
                }
index a0eb2161e3efdb3476f692ce242cafaff30e7f18..d4a4619fcebc927acd1bf730e044d29813d096d3 100644 (file)
@@ -127,6 +127,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                               NF_CT_LABELS_MAX_SIZE - size);
                return;
        }
+#endif
        case NFT_CT_BYTES: /* fallthrough */
        case NFT_CT_PKTS: {
                const struct nf_conn_acct *acct = nf_conn_acct_find(ct);
@@ -138,7 +139,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
                memcpy(dest, &count, sizeof(count));
                return;
        }
-#endif
        default:
                break;
        }
index b7c43def0dc69e1cd03db238e0174283d895003a..e118397254af026fd23ed3097ddc78c8ee9fc0c4 100644 (file)
@@ -228,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
 {
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
        u8 nexthdr;
-       __be16 frag_off;
+       __be16 frag_off, oldlen, newlen;
        int tcphoff;
        int ret;
 
@@ -244,7 +244,12 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par)
                return NF_DROP;
        if (ret > 0) {
                ipv6h = ipv6_hdr(skb);
-               ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret);
+               oldlen = ipv6h->payload_len;
+               newlen = htons(ntohs(oldlen) + ret);
+               if (skb->ip_summed == CHECKSUM_COMPLETE)
+                       skb->csum = csum_add(csum_sub(skb->csum, oldlen),
+                                            newlen);
+               ipv6h->payload_len = newlen;
        }
        return XT_CONTINUE;
 }
index 81dc1bb6e0168ed17cd2d60e6dc3a394f0f34e76..f1ffb34e253f4906275d4da99ce26306b94241eb 100644 (file)
@@ -2831,7 +2831,8 @@ static int netlink_dump(struct sock *sk)
         * reasonable static buffer based on the expected largest dump of a
         * single netdev. The outcome is MSG_TRUNC error.
         */
-       skb_reserve(skb, skb_tailroom(skb) - alloc_size);
+       if (!netlink_rx_is_mmaped(sk))
+               skb_reserve(skb, skb_tailroom(skb) - alloc_size);
        netlink_skb_set_owner_r(skb, sk);
 
        len = cb->dump(skb, cb);
index 21d8875673a42229f233ca20af640c53e325a4bb..c468eabd69430b1687d0b68b086e4883afc74bf9 100644 (file)
@@ -171,14 +171,7 @@ static int nci_uart_tty_open(struct tty_struct *tty)
        tty->disc_data = NULL;
        tty->receive_room = 65536;
 
-       /* Flush any pending characters in the driver and line discipline. */
-
-       /* FIXME: why is this needed. Note don't use ldisc_ref here as the
-        * open path is before the ldisc is referencable.
-        */
-
-       if (tty->ldisc->ops->flush_buffer)
-               tty->ldisc->ops->flush_buffer(tty);
+       /* Flush any pending characters in the driver */
        tty_driver_flush_buffer(tty);
 
        return 0;
index 30ab8e127288d4290b5f823a62726179c19b0f5a..1a1fcec8869593a8c99710e2021baa45ebfdedc8 100644 (file)
@@ -132,6 +132,6 @@ static void __exit ovs_geneve_tnl_exit(void)
 module_init(ovs_geneve_tnl_init);
 module_exit(ovs_geneve_tnl_exit);
 
-MODULE_DESCRIPTION("OVS: Geneve swiching port");
+MODULE_DESCRIPTION("OVS: Geneve switching port");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("vport-type-5");
index 1605691d94144aee0fc50ffb17be05eca2b59675..de9cb19efb6a3e2774624c29b2fe5c977ef92fe4 100644 (file)
@@ -91,6 +91,8 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
        struct vxlan_config conf = {
                .no_share = true,
                .flags = VXLAN_F_COLLECT_METADATA,
+               /* Don't restrict the packets that can be sent by MTU */
+               .mtu = IP_MAX_MTU,
        };
 
        if (!options) {
index 992396aa635ce1174f6e62f19ae3f19a550668c6..b7e7851ddc5d079f246e96ffe2372d659376258e 100644 (file)
@@ -1960,6 +1960,64 @@ static unsigned int run_filter(struct sk_buff *skb,
        return res;
 }
 
+static int __packet_rcv_vnet(const struct sk_buff *skb,
+                            struct virtio_net_hdr *vnet_hdr)
+{
+       *vnet_hdr = (const struct virtio_net_hdr) { 0 };
+
+       if (skb_is_gso(skb)) {
+               struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+               /* This is a hint as to how much should be linear. */
+               vnet_hdr->hdr_len =
+                       __cpu_to_virtio16(vio_le(), skb_headlen(skb));
+               vnet_hdr->gso_size =
+                       __cpu_to_virtio16(vio_le(), sinfo->gso_size);
+
+               if (sinfo->gso_type & SKB_GSO_TCPV4)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+               else if (sinfo->gso_type & SKB_GSO_TCPV6)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+               else if (sinfo->gso_type & SKB_GSO_UDP)
+                       vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+               else if (sinfo->gso_type & SKB_GSO_FCOE)
+                       return -EINVAL;
+               else
+                       BUG();
+
+               if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+                       vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+       } else
+               vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               vnet_hdr->csum_start = __cpu_to_virtio16(vio_le(),
+                                 skb_checksum_start_offset(skb));
+               vnet_hdr->csum_offset = __cpu_to_virtio16(vio_le(),
+                                                skb->csum_offset);
+       } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+               vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
+       } /* else everything is zero */
+
+       return 0;
+}
+
+static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
+                          size_t *len)
+{
+       struct virtio_net_hdr vnet_hdr;
+
+       if (*len < sizeof(vnet_hdr))
+               return -EINVAL;
+       *len -= sizeof(vnet_hdr);
+
+       if (__packet_rcv_vnet(skb, &vnet_hdr))
+               return -EINVAL;
+
+       return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
+}
+
 /*
  * This function makes lazy skb cloning in hope that most of packets
  * are discarded by BPF.
@@ -2148,7 +2206,9 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
                unsigned int maclen = skb_network_offset(skb);
                netoff = TPACKET_ALIGN(po->tp_hdrlen +
                                       (maclen < 16 ? 16 : maclen)) +
-                       po->tp_reserve;
+                                      po->tp_reserve;
+               if (po->has_vnet_hdr)
+                       netoff += sizeof(struct virtio_net_hdr);
                macoff = netoff - maclen;
        }
        if (po->tp_version <= TPACKET_V2) {
@@ -2185,7 +2245,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        h.raw = packet_current_rx_frame(po, skb,
                                        TP_STATUS_KERNEL, (macoff+snaplen));
        if (!h.raw)
-               goto ring_is_full;
+               goto drop_n_account;
        if (po->tp_version <= TPACKET_V2) {
                packet_increment_rx_head(po, &po->rx_ring);
        /*
@@ -2204,6 +2264,14 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        }
        spin_unlock(&sk->sk_receive_queue.lock);
 
+       if (po->has_vnet_hdr) {
+               if (__packet_rcv_vnet(skb, h.raw + macoff -
+                                          sizeof(struct virtio_net_hdr))) {
+                       spin_lock(&sk->sk_receive_queue.lock);
+                       goto drop_n_account;
+               }
+       }
+
        skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
 
        if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
@@ -2299,7 +2367,7 @@ drop:
        kfree_skb(skb);
        return 0;
 
-ring_is_full:
+drop_n_account:
        po->stats.stats1.tp_drops++;
        spin_unlock(&sk->sk_receive_queue.lock);
 
@@ -2347,15 +2415,92 @@ static void tpacket_set_protocol(const struct net_device *dev,
        }
 }
 
+static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
+{
+       unsigned short gso_type = 0;
+
+       if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+           (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
+            __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
+             __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
+               vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
+                        __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
+                       __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
+
+       if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
+               return -EINVAL;
+
+       if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+               switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+               case VIRTIO_NET_HDR_GSO_TCPV4:
+                       gso_type = SKB_GSO_TCPV4;
+                       break;
+               case VIRTIO_NET_HDR_GSO_TCPV6:
+                       gso_type = SKB_GSO_TCPV6;
+                       break;
+               case VIRTIO_NET_HDR_GSO_UDP:
+                       gso_type = SKB_GSO_UDP;
+                       break;
+               default:
+                       return -EINVAL;
+               }
+
+               if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+                       gso_type |= SKB_GSO_TCP_ECN;
+
+               if (vnet_hdr->gso_size == 0)
+                       return -EINVAL;
+       }
+
+       vnet_hdr->gso_type = gso_type;  /* changes type, temporary storage */
+       return 0;
+}
+
+static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
+                                struct virtio_net_hdr *vnet_hdr)
+{
+       int n;
+
+       if (*len < sizeof(*vnet_hdr))
+               return -EINVAL;
+       *len -= sizeof(*vnet_hdr);
+
+       n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter);
+       if (n != sizeof(*vnet_hdr))
+               return -EFAULT;
+
+       return __packet_snd_vnet_parse(vnet_hdr, *len);
+}
+
+static int packet_snd_vnet_gso(struct sk_buff *skb,
+                              struct virtio_net_hdr *vnet_hdr)
+{
+       if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+               u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start);
+               u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset);
+
+               if (!skb_partial_csum_set(skb, s, o))
+                       return -EINVAL;
+       }
+
+       skb_shinfo(skb)->gso_size =
+               __virtio16_to_cpu(vio_le(), vnet_hdr->gso_size);
+       skb_shinfo(skb)->gso_type = vnet_hdr->gso_type;
+
+       /* Header must be checked, and gso_segs computed. */
+       skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+       skb_shinfo(skb)->gso_segs = 0;
+       return 0;
+}
+
 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
-               void *frame, struct net_device *dev, int size_max,
-               __be16 proto, unsigned char *addr, int hlen)
+               void *frame, struct net_device *dev, void *data, int tp_len,
+               __be16 proto, unsigned char *addr, int hlen, int copylen)
 {
        union tpacket_uhdr ph;
-       int to_write, offset, len, tp_len, nr_frags, len_max;
+       int to_write, offset, len, nr_frags, len_max;
        struct socket *sock = po->sk.sk_socket;
        struct page *page;
-       void *data;
        int err;
 
        ph.raw = frame;
@@ -2367,51 +2512,9 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
        sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
        skb_shinfo(skb)->destructor_arg = ph.raw;
 
-       switch (po->tp_version) {
-       case TPACKET_V2:
-               tp_len = ph.h2->tp_len;
-               break;
-       default:
-               tp_len = ph.h1->tp_len;
-               break;
-       }
-       if (unlikely(tp_len > size_max)) {
-               pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
-               return -EMSGSIZE;
-       }
-
        skb_reserve(skb, hlen);
        skb_reset_network_header(skb);
 
-       if (unlikely(po->tp_tx_has_off)) {
-               int off_min, off_max, off;
-               off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
-               off_max = po->tx_ring.frame_size - tp_len;
-               if (sock->type == SOCK_DGRAM) {
-                       switch (po->tp_version) {
-                       case TPACKET_V2:
-                               off = ph.h2->tp_net;
-                               break;
-                       default:
-                               off = ph.h1->tp_net;
-                               break;
-                       }
-               } else {
-                       switch (po->tp_version) {
-                       case TPACKET_V2:
-                               off = ph.h2->tp_mac;
-                               break;
-                       default:
-                               off = ph.h1->tp_mac;
-                               break;
-                       }
-               }
-               if (unlikely((off < off_min) || (off_max < off)))
-                       return -EINVAL;
-               data = ph.raw + off;
-       } else {
-               data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
-       }
        to_write = tp_len;
 
        if (sock->type == SOCK_DGRAM) {
@@ -2419,20 +2522,17 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
                                NULL, tp_len);
                if (unlikely(err < 0))
                        return -EINVAL;
-       } else if (dev->hard_header_len) {
-               if (ll_header_truncated(dev, tp_len))
-                       return -EINVAL;
-
+       } else if (copylen) {
                skb_push(skb, dev->hard_header_len);
-               err = skb_store_bits(skb, 0, data,
-                               dev->hard_header_len);
+               skb_put(skb, copylen - dev->hard_header_len);
+               err = skb_store_bits(skb, 0, data, copylen);
                if (unlikely(err))
                        return err;
                if (!skb->protocol)
                        tpacket_set_protocol(dev, skb);
 
-               data += dev->hard_header_len;
-               to_write -= dev->hard_header_len;
+               data += copylen;
+               to_write -= copylen;
        }
 
        offset = offset_in_page(data);
@@ -2469,10 +2569,66 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
        return tp_len;
 }
 
+static int tpacket_parse_header(struct packet_sock *po, void *frame,
+                               int size_max, void **data)
+{
+       union tpacket_uhdr ph;
+       int tp_len, off;
+
+       ph.raw = frame;
+
+       switch (po->tp_version) {
+       case TPACKET_V2:
+               tp_len = ph.h2->tp_len;
+               break;
+       default:
+               tp_len = ph.h1->tp_len;
+               break;
+       }
+       if (unlikely(tp_len > size_max)) {
+               pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
+               return -EMSGSIZE;
+       }
+
+       if (unlikely(po->tp_tx_has_off)) {
+               int off_min, off_max;
+
+               off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
+               off_max = po->tx_ring.frame_size - tp_len;
+               if (po->sk.sk_type == SOCK_DGRAM) {
+                       switch (po->tp_version) {
+                       case TPACKET_V2:
+                               off = ph.h2->tp_net;
+                               break;
+                       default:
+                               off = ph.h1->tp_net;
+                               break;
+                       }
+               } else {
+                       switch (po->tp_version) {
+                       case TPACKET_V2:
+                               off = ph.h2->tp_mac;
+                               break;
+                       default:
+                               off = ph.h1->tp_mac;
+                               break;
+                       }
+               }
+               if (unlikely((off < off_min) || (off_max < off)))
+                       return -EINVAL;
+       } else {
+               off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
+       }
+
+       *data = frame + off;
+       return tp_len;
+}
+
 static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
 {
        struct sk_buff *skb;
        struct net_device *dev;
+       struct virtio_net_hdr *vnet_hdr = NULL;
        __be16 proto;
        int err, reserve = 0;
        void *ph;
@@ -2480,9 +2636,10 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
        int tp_len, size_max;
        unsigned char *addr;
+       void *data;
        int len_sum = 0;
        int status = TP_STATUS_AVAILABLE;
-       int hlen, tlen;
+       int hlen, tlen, copylen = 0;
 
        mutex_lock(&po->pg_vec_lock);
 
@@ -2515,7 +2672,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
        size_max = po->tx_ring.frame_size
                - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
 
-       if (size_max > dev->mtu + reserve + VLAN_HLEN)
+       if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
                size_max = dev->mtu + reserve + VLAN_HLEN;
 
        do {
@@ -2527,11 +2684,36 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                        continue;
                }
 
+               skb = NULL;
+               tp_len = tpacket_parse_header(po, ph, size_max, &data);
+               if (tp_len < 0)
+                       goto tpacket_error;
+
                status = TP_STATUS_SEND_REQUEST;
                hlen = LL_RESERVED_SPACE(dev);
                tlen = dev->needed_tailroom;
+               if (po->has_vnet_hdr) {
+                       vnet_hdr = data;
+                       data += sizeof(*vnet_hdr);
+                       tp_len -= sizeof(*vnet_hdr);
+                       if (tp_len < 0 ||
+                           __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
+                               tp_len = -EINVAL;
+                               goto tpacket_error;
+                       }
+                       copylen = __virtio16_to_cpu(vio_le(),
+                                                   vnet_hdr->hdr_len);
+               }
+               if (dev->hard_header_len) {
+                       if (ll_header_truncated(dev, tp_len)) {
+                               tp_len = -EINVAL;
+                               goto tpacket_error;
+                       }
+                       copylen = max_t(int, copylen, dev->hard_header_len);
+               }
                skb = sock_alloc_send_skb(&po->sk,
-                               hlen + tlen + sizeof(struct sockaddr_ll),
+                               hlen + tlen + sizeof(struct sockaddr_ll) +
+                               (copylen - dev->hard_header_len),
                                !need_wait, &err);
 
                if (unlikely(skb == NULL)) {
@@ -2540,14 +2722,16 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                                err = len_sum;
                        goto out_status;
                }
-               tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
-                                         addr, hlen);
+               tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
+                                         addr, hlen, copylen);
                if (likely(tp_len >= 0) &&
                    tp_len > dev->mtu + reserve &&
+                   !po->has_vnet_hdr &&
                    !packet_extra_vlan_len_allowed(dev, skb))
                        tp_len = -EMSGSIZE;
 
                if (unlikely(tp_len < 0)) {
+tpacket_error:
                        if (po->tp_loss) {
                                __packet_set_status(po, ph,
                                                TP_STATUS_AVAILABLE);
@@ -2561,6 +2745,11 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                        }
                }
 
+               if (po->has_vnet_hdr && packet_snd_vnet_gso(skb, vnet_hdr)) {
+                       tp_len = -EINVAL;
+                       goto tpacket_error;
+               }
+
                packet_pick_tx_queue(dev, skb);
 
                skb->destructor = tpacket_destruct_skb;
@@ -2643,12 +2832,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        struct sockcm_cookie sockc;
        struct virtio_net_hdr vnet_hdr = { 0 };
        int offset = 0;
-       int vnet_hdr_len;
        struct packet_sock *po = pkt_sk(sk);
-       unsigned short gso_type = 0;
        int hlen, tlen;
        int extra_len = 0;
-       ssize_t n;
 
        /*
         *      Get and verify the address.
@@ -2686,53 +2872,9 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        if (sock->type == SOCK_RAW)
                reserve = dev->hard_header_len;
        if (po->has_vnet_hdr) {
-               vnet_hdr_len = sizeof(vnet_hdr);
-
-               err = -EINVAL;
-               if (len < vnet_hdr_len)
-                       goto out_unlock;
-
-               len -= vnet_hdr_len;
-
-               err = -EFAULT;
-               n = copy_from_iter(&vnet_hdr, vnet_hdr_len, &msg->msg_iter);
-               if (n != vnet_hdr_len)
-                       goto out_unlock;
-
-               if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
-                   (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
-                    __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 >
-                     __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len)))
-                       vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(),
-                                __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) +
-                               __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2);
-
-               err = -EINVAL;
-               if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len)
+               err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
+               if (err)
                        goto out_unlock;
-
-               if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
-                       switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
-                       case VIRTIO_NET_HDR_GSO_TCPV4:
-                               gso_type = SKB_GSO_TCPV4;
-                               break;
-                       case VIRTIO_NET_HDR_GSO_TCPV6:
-                               gso_type = SKB_GSO_TCPV6;
-                               break;
-                       case VIRTIO_NET_HDR_GSO_UDP:
-                               gso_type = SKB_GSO_UDP;
-                               break;
-                       default:
-                               goto out_unlock;
-                       }
-
-                       if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
-                               gso_type |= SKB_GSO_TCP_ECN;
-
-                       if (vnet_hdr.gso_size == 0)
-                               goto out_unlock;
-
-               }
        }
 
        if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
@@ -2744,7 +2886,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        }
 
        err = -EMSGSIZE;
-       if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
+       if (!vnet_hdr.gso_type &&
+           (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
                goto out_unlock;
 
        err = -ENOBUFS;
@@ -2775,7 +2918,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 
        sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
 
-       if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
+       if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
            !packet_extra_vlan_len_allowed(dev, skb)) {
                err = -EMSGSIZE;
                goto out_free;
@@ -2789,24 +2932,10 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
        packet_pick_tx_queue(dev, skb);
 
        if (po->has_vnet_hdr) {
-               if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
-                       u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start);
-                       u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset);
-                       if (!skb_partial_csum_set(skb, s, o)) {
-                               err = -EINVAL;
-                               goto out_free;
-                       }
-               }
-
-               skb_shinfo(skb)->gso_size =
-                       __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size);
-               skb_shinfo(skb)->gso_type = gso_type;
-
-               /* Header must be checked, and gso_segs computed. */
-               skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
-               skb_shinfo(skb)->gso_segs = 0;
-
-               len += vnet_hdr_len;
+               err = packet_snd_vnet_gso(skb, &vnet_hdr);
+               if (err)
+                       goto out_free;
+               len += sizeof(vnet_hdr);
        }
 
        skb_probe_transport_header(skb, reserve);
@@ -3177,51 +3306,10 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
                packet_rcv_has_room(pkt_sk(sk), NULL);
 
        if (pkt_sk(sk)->has_vnet_hdr) {
-               struct virtio_net_hdr vnet_hdr = { 0 };
-
-               err = -EINVAL;
-               vnet_hdr_len = sizeof(vnet_hdr);
-               if (len < vnet_hdr_len)
-                       goto out_free;
-
-               len -= vnet_hdr_len;
-
-               if (skb_is_gso(skb)) {
-                       struct skb_shared_info *sinfo = skb_shinfo(skb);
-
-                       /* This is a hint as to how much should be linear. */
-                       vnet_hdr.hdr_len =
-                               __cpu_to_virtio16(vio_le(), skb_headlen(skb));
-                       vnet_hdr.gso_size =
-                               __cpu_to_virtio16(vio_le(), sinfo->gso_size);
-                       if (sinfo->gso_type & SKB_GSO_TCPV4)
-                               vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
-                       else if (sinfo->gso_type & SKB_GSO_TCPV6)
-                               vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
-                       else if (sinfo->gso_type & SKB_GSO_UDP)
-                               vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
-                       else if (sinfo->gso_type & SKB_GSO_FCOE)
-                               goto out_free;
-                       else
-                               BUG();
-                       if (sinfo->gso_type & SKB_GSO_TCP_ECN)
-                               vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
-               } else
-                       vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
-
-               if (skb->ip_summed == CHECKSUM_PARTIAL) {
-                       vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(),
-                                         skb_checksum_start_offset(skb));
-                       vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(),
-                                                        skb->csum_offset);
-               } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
-                       vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
-               } /* else everything is zero */
-
-               err = memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_len);
-               if (err < 0)
+               err = packet_rcv_vnet(msg, skb, &len);
+               if (err)
                        goto out_free;
+               vnet_hdr_len = sizeof(struct virtio_net_hdr);
        }
 
        /* You lose any data beyond the buffer you gave. If it worries
@@ -3552,8 +3640,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
                }
                if (optlen < len)
                        return -EINVAL;
-               if (pkt_sk(sk)->has_vnet_hdr)
-                       return -EINVAL;
                if (copy_from_user(&req_u.req, optval, len))
                        return -EFAULT;
                return packet_set_ring(sk, &req_u, 0,
index f53bf3b6558b094b6e1b379568620157b4ffae17..48ebccf0fe727f70e9f09d77b053cf4e2d2324ca 100644 (file)
@@ -57,6 +57,8 @@ struct rfkill {
 
        bool                    registered;
        bool                    persistent;
+       bool                    polling_paused;
+       bool                    suspended;
 
        const struct rfkill_ops *ops;
        void                    *data;
@@ -233,29 +235,6 @@ static void rfkill_event(struct rfkill *rfkill)
        rfkill_send_events(rfkill, RFKILL_OP_CHANGE);
 }
 
-static bool __rfkill_set_hw_state(struct rfkill *rfkill,
-                                 bool blocked, bool *change)
-{
-       unsigned long flags;
-       bool prev, any;
-
-       BUG_ON(!rfkill);
-
-       spin_lock_irqsave(&rfkill->lock, flags);
-       prev = !!(rfkill->state & RFKILL_BLOCK_HW);
-       if (blocked)
-               rfkill->state |= RFKILL_BLOCK_HW;
-       else
-               rfkill->state &= ~RFKILL_BLOCK_HW;
-       *change = prev != blocked;
-       any = !!(rfkill->state & RFKILL_BLOCK_ANY);
-       spin_unlock_irqrestore(&rfkill->lock, flags);
-
-       rfkill_led_trigger_event(rfkill);
-
-       return any;
-}
-
 /**
  * rfkill_set_block - wrapper for set_block method
  *
@@ -285,7 +264,7 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
        spin_lock_irqsave(&rfkill->lock, flags);
        prev = rfkill->state & RFKILL_BLOCK_SW;
 
-       if (rfkill->state & RFKILL_BLOCK_SW)
+       if (prev)
                rfkill->state |= RFKILL_BLOCK_SW_PREV;
        else
                rfkill->state &= ~RFKILL_BLOCK_SW_PREV;
@@ -332,8 +311,7 @@ static atomic_t rfkill_input_disabled = ATOMIC_INIT(0);
  * @blocked: the new state
  *
  * This function sets the state of all switches of given type,
- * unless a specific switch is claimed by userspace (in which case,
- * that switch is left alone) or suspended.
+ * unless a specific switch is suspended.
  *
  * Caller must have acquired rfkill_global_mutex.
  */
@@ -480,14 +458,26 @@ bool rfkill_get_global_sw_state(const enum rfkill_type type)
 
 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
 {
-       bool ret, change;
+       unsigned long flags;
+       bool ret, prev;
 
-       ret = __rfkill_set_hw_state(rfkill, blocked, &change);
+       BUG_ON(!rfkill);
+
+       spin_lock_irqsave(&rfkill->lock, flags);
+       prev = !!(rfkill->state & RFKILL_BLOCK_HW);
+       if (blocked)
+               rfkill->state |= RFKILL_BLOCK_HW;
+       else
+               rfkill->state &= ~RFKILL_BLOCK_HW;
+       ret = !!(rfkill->state & RFKILL_BLOCK_ANY);
+       spin_unlock_irqrestore(&rfkill->lock, flags);
+
+       rfkill_led_trigger_event(rfkill);
 
        if (!rfkill->registered)
                return ret;
 
-       if (change)
+       if (prev != blocked)
                schedule_work(&rfkill->uevent_work);
 
        return ret;
@@ -730,20 +720,12 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
 }
 static DEVICE_ATTR_RW(state);
 
-static ssize_t claim_show(struct device *dev, struct device_attribute *attr,
-                         char *buf)
-{
-       return sprintf(buf, "%d\n", 0);
-}
-static DEVICE_ATTR_RO(claim);
-
 static struct attribute *rfkill_dev_attrs[] = {
        &dev_attr_name.attr,
        &dev_attr_type.attr,
        &dev_attr_index.attr,
        &dev_attr_persistent.attr,
        &dev_attr_state.attr,
-       &dev_attr_claim.attr,
        &dev_attr_soft.attr,
        &dev_attr_hard.attr,
        NULL,
@@ -786,6 +768,7 @@ void rfkill_pause_polling(struct rfkill *rfkill)
        if (!rfkill->ops->poll)
                return;
 
+       rfkill->polling_paused = true;
        cancel_delayed_work_sync(&rfkill->poll_work);
 }
 EXPORT_SYMBOL(rfkill_pause_polling);
@@ -797,6 +780,11 @@ void rfkill_resume_polling(struct rfkill *rfkill)
        if (!rfkill->ops->poll)
                return;
 
+       rfkill->polling_paused = false;
+
+       if (rfkill->suspended)
+               return;
+
        queue_delayed_work(system_power_efficient_wq,
                           &rfkill->poll_work, 0);
 }
@@ -807,7 +795,8 @@ static int rfkill_suspend(struct device *dev)
 {
        struct rfkill *rfkill = to_rfkill(dev);
 
-       rfkill_pause_polling(rfkill);
+       rfkill->suspended = true;
+       cancel_delayed_work_sync(&rfkill->poll_work);
 
        return 0;
 }
@@ -817,12 +806,16 @@ static int rfkill_resume(struct device *dev)
        struct rfkill *rfkill = to_rfkill(dev);
        bool cur;
 
+       rfkill->suspended = false;
+
        if (!rfkill->persistent) {
                cur = !!(rfkill->state & RFKILL_BLOCK_SW);
                rfkill_set_block(rfkill, cur);
        }
 
-       rfkill_resume_polling(rfkill);
+       if (rfkill->ops->poll && !rfkill->polling_paused)
+               queue_delayed_work(system_power_efficient_wq,
+                                  &rfkill->poll_work, 0);
 
        return 0;
 }
@@ -1095,17 +1088,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
        return res;
 }
 
-static bool rfkill_readable(struct rfkill_data *data)
-{
-       bool r;
-
-       mutex_lock(&data->mtx);
-       r = !list_empty(&data->events);
-       mutex_unlock(&data->mtx);
-
-       return r;
-}
-
 static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
                               size_t count, loff_t *pos)
 {
@@ -1122,8 +1104,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
                        goto out;
                }
                mutex_unlock(&data->mtx);
+               /* since we re-check and it just compares pointers,
+                * using !list_empty() without locking isn't a problem
+                */
                ret = wait_event_interruptible(data->read_wait,
-                                              rfkill_readable(data));
+                                              !list_empty(&data->events));
                mutex_lock(&data->mtx);
 
                if (ret)
index 2934a73a5981ad154888904b67f8082fedac4a3c..71598f5b11b71db20495a095eb167c5e909f8b0e 100644 (file)
@@ -252,7 +252,7 @@ struct rxrpc_connection {
        struct rxrpc_security   *security;      /* applied security module */
        struct key              *key;           /* security for this connection (client) */
        struct key              *server_key;    /* security for this service */
-       struct crypto_blkcipher *cipher;        /* encryption handle */
+       struct crypto_skcipher  *cipher;        /* encryption handle */
        struct rxrpc_crypt      csum_iv;        /* packet checksum base */
        unsigned long           events;
 #define RXRPC_CONN_CHALLENGE   0               /* send challenge packet */
index 3f6571651d32ebf9890ec0e6d28228e62655b71e..3fb492eedeb98e3e7e97496e792f83ad61fa6436 100644 (file)
  *     "afs@CAMBRIDGE.REDHAT.COM>
  */
 
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/net.h>
 #include <linux/skbuff.h>
 #include <linux/key-type.h>
-#include <linux/crypto.h>
 #include <linux/ctype.h>
 #include <linux/slab.h>
 #include <net/sock.h>
@@ -824,7 +824,7 @@ static void rxrpc_free_preparse(struct key_preparsed_payload *prep)
  */
 static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
 {
-       struct crypto_blkcipher *ci;
+       struct crypto_skcipher *ci;
 
        _enter("%zu", prep->datalen);
 
@@ -833,13 +833,13 @@ static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
 
        memcpy(&prep->payload.data[2], prep->data, 8);
 
-       ci = crypto_alloc_blkcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC);
+       ci = crypto_alloc_skcipher("pcbc(des)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(ci)) {
                _leave(" = %ld", PTR_ERR(ci));
                return PTR_ERR(ci);
        }
 
-       if (crypto_blkcipher_setkey(ci, prep->data, 8) < 0)
+       if (crypto_skcipher_setkey(ci, prep->data, 8) < 0)
                BUG();
 
        prep->payload.data[0] = ci;
@@ -853,7 +853,7 @@ static int rxrpc_preparse_s(struct key_preparsed_payload *prep)
 static void rxrpc_free_preparse_s(struct key_preparsed_payload *prep)
 {
        if (prep->payload.data[0])
-               crypto_free_blkcipher(prep->payload.data[0]);
+               crypto_free_skcipher(prep->payload.data[0]);
 }
 
 /*
@@ -870,7 +870,7 @@ static void rxrpc_destroy(struct key *key)
 static void rxrpc_destroy_s(struct key *key)
 {
        if (key->payload.data[0]) {
-               crypto_free_blkcipher(key->payload.data[0]);
+               crypto_free_skcipher(key->payload.data[0]);
                key->payload.data[0] = NULL;
        }
 }
index d7a9ab5a9d9ce8c95d60b5230c476e41740db607..0d96b48a64925840cfc2249e928ebdde51ac00a8 100644 (file)
@@ -9,11 +9,11 @@
  * 2 of the License, or (at your option) any later version.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/net.h>
 #include <linux/skbuff.h>
 #include <linux/udp.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <linux/ctype.h>
 #include <linux/slab.h>
@@ -53,7 +53,7 @@ MODULE_LICENSE("GPL");
  * alloc routine, but since we have it to hand, we use it to decrypt RESPONSE
  * packets
  */
-static struct crypto_blkcipher *rxkad_ci;
+static struct crypto_skcipher *rxkad_ci;
 static DEFINE_MUTEX(rxkad_ci_mutex);
 
 /*
@@ -61,7 +61,7 @@ static DEFINE_MUTEX(rxkad_ci_mutex);
  */
 static int rxkad_init_connection_security(struct rxrpc_connection *conn)
 {
-       struct crypto_blkcipher *ci;
+       struct crypto_skcipher *ci;
        struct rxrpc_key_token *token;
        int ret;
 
@@ -70,15 +70,15 @@ static int rxkad_init_connection_security(struct rxrpc_connection *conn)
        token = conn->key->payload.data[0];
        conn->security_ix = token->security_index;
 
-       ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+       ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(ci)) {
                _debug("no cipher");
                ret = PTR_ERR(ci);
                goto error;
        }
 
-       if (crypto_blkcipher_setkey(ci, token->kad->session_key,
-                                   sizeof(token->kad->session_key)) < 0)
+       if (crypto_skcipher_setkey(ci, token->kad->session_key,
+                                  sizeof(token->kad->session_key)) < 0)
                BUG();
 
        switch (conn->security_level) {
@@ -113,7 +113,7 @@ error:
 static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
 {
        struct rxrpc_key_token *token;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
        struct scatterlist sg[2];
        struct rxrpc_crypt iv;
        struct {
@@ -128,10 +128,6 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
        token = conn->key->payload.data[0];
        memcpy(&iv, token->kad->session_key, sizeof(iv));
 
-       desc.tfm = conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
-
        tmpbuf.x[0] = conn->epoch;
        tmpbuf.x[1] = conn->cid;
        tmpbuf.x[2] = 0;
@@ -139,7 +135,13 @@ static void rxkad_prime_packet_security(struct rxrpc_connection *conn)
 
        sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
        sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       skcipher_request_set_tfm(req, conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 
        memcpy(&conn->csum_iv, &tmpbuf.x[2], sizeof(conn->csum_iv));
        ASSERTCMP(conn->csum_iv.n[0], ==, tmpbuf.x[2]);
@@ -156,7 +158,7 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
                                    void *sechdr)
 {
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[2];
        struct {
@@ -177,13 +179,16 @@ static int rxkad_secure_packet_auth(const struct rxrpc_call *call,
 
        /* start the encryption afresh */
        memset(&iv, 0, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
        sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 
        memcpy(sechdr, &tmpbuf, sizeof(tmpbuf));
 
@@ -203,13 +208,14 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
        struct rxkad_level2_hdr rxkhdr
                __attribute__((aligned(8))); /* must be all on one page */
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[16];
        struct sk_buff *trailer;
        unsigned int len;
        u16 check;
        int nsg;
+       int err;
 
        sp = rxrpc_skb(skb);
 
@@ -223,28 +229,38 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
        /* encrypt from the session key */
        token = call->conn->key->payload.data[0];
        memcpy(&iv, token->kad->session_key, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        sg_init_one(&sg[0], sechdr, sizeof(rxkhdr));
        sg_init_one(&sg[1], &rxkhdr, sizeof(rxkhdr));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(rxkhdr));
+
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(rxkhdr), iv.x);
+
+       crypto_skcipher_encrypt(req);
 
        /* we want to encrypt the skbuff in-place */
        nsg = skb_cow_data(skb, 0, &trailer);
+       err = -ENOMEM;
        if (nsg < 0 || nsg > 16)
-               return -ENOMEM;
+               goto out;
 
        len = data_size + call->conn->size_align - 1;
        len &= ~(call->conn->size_align - 1);
 
        sg_init_table(sg, nsg);
        skb_to_sgvec(skb, sg, 0, len);
-       crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
+
+       skcipher_request_set_crypt(req, sg, sg, len, iv.x);
+
+       crypto_skcipher_encrypt(req);
 
        _leave(" = 0");
-       return 0;
+       err = 0;
+
+out:
+       skcipher_request_zero(req);
+       return err;
 }
 
 /*
@@ -256,7 +272,7 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
                                void *sechdr)
 {
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[2];
        struct {
@@ -281,9 +297,6 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
 
        /* continue encrypting from where we left off */
        memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        /* calculate the security checksum */
        x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
@@ -293,7 +306,13 @@ static int rxkad_secure_packet(const struct rxrpc_call *call,
 
        sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
        sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 
        y = ntohl(tmpbuf.x[1]);
        y = (y >> 16) & 0xffff;
@@ -330,7 +349,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
 {
        struct rxkad_level1_hdr sechdr;
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[16];
        struct sk_buff *trailer;
@@ -352,11 +371,13 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
 
        /* start the decryption afresh */
        memset(&iv, 0, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
-       crypto_blkcipher_decrypt_iv(&desc, sg, sg, 8);
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, 8, iv.x);
+
+       crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
 
        /* remove the decrypted packet length */
        if (skb_copy_bits(skb, 0, &sechdr, sizeof(sechdr)) < 0)
@@ -405,7 +426,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
        const struct rxrpc_key_token *token;
        struct rxkad_level2_hdr sechdr;
        struct rxrpc_skb_priv *sp;
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist _sg[4], *sg;
        struct sk_buff *trailer;
@@ -435,11 +456,13 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
        /* decrypt from the session key */
        token = call->conn->key->payload.data[0];
        memcpy(&iv, token->kad->session_key, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
-       crypto_blkcipher_decrypt_iv(&desc, sg, sg, skb->len);
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, skb->len, iv.x);
+
+       crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
        if (sg != _sg)
                kfree(sg);
 
@@ -487,7 +510,7 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
                               struct sk_buff *skb,
                               u32 *_abort_code)
 {
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, call->conn->cipher);
        struct rxrpc_skb_priv *sp;
        struct rxrpc_crypt iv;
        struct scatterlist sg[2];
@@ -516,9 +539,6 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
 
        /* continue encrypting from where we left off */
        memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
-       desc.tfm = call->conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        /* validate the security checksum */
        x = htonl(call->channel << (32 - RXRPC_CIDSHIFT));
@@ -528,7 +548,13 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
 
        sg_init_one(&sg[0], &tmpbuf, sizeof(tmpbuf));
        sg_init_one(&sg[1], &tmpbuf, sizeof(tmpbuf));
-       crypto_blkcipher_encrypt_iv(&desc, &sg[0], &sg[1], sizeof(tmpbuf));
+
+       skcipher_request_set_tfm(req, call->conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg[1], &sg[0], sizeof(tmpbuf), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 
        y = ntohl(tmpbuf.x[1]);
        y = (y >> 16) & 0xffff;
@@ -718,18 +744,21 @@ static void rxkad_encrypt_response(struct rxrpc_connection *conn,
                                   struct rxkad_response *resp,
                                   const struct rxkad_key *s2)
 {
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, conn->cipher);
        struct rxrpc_crypt iv;
        struct scatterlist sg[2];
 
        /* continue encrypting from where we left off */
        memcpy(&iv, s2->session_key, sizeof(iv));
-       desc.tfm = conn->cipher;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
-       crypto_blkcipher_encrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
+
+       skcipher_request_set_tfm(req, conn->cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
+
+       crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 }
 
 /*
@@ -822,7 +851,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
                                time_t *_expiry,
                                u32 *_abort_code)
 {
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
        struct rxrpc_crypt iv, key;
        struct scatterlist sg[1];
        struct in_addr addr;
@@ -853,12 +882,21 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
 
        memcpy(&iv, &conn->server_key->payload.data[2], sizeof(iv));
 
-       desc.tfm = conn->server_key->payload.data[0];
-       desc.info = iv.x;
-       desc.flags = 0;
+       req = skcipher_request_alloc(conn->server_key->payload.data[0],
+                                    GFP_NOFS);
+       if (!req) {
+               *_abort_code = RXKADNOAUTH;
+               ret = -ENOMEM;
+               goto error;
+       }
 
        sg_init_one(&sg[0], ticket, ticket_len);
-       crypto_blkcipher_decrypt_iv(&desc, sg, sg, ticket_len);
+
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, ticket_len, iv.x);
+
+       crypto_skcipher_decrypt(req);
+       skcipher_request_free(req);
 
        p = ticket;
        end = p + ticket_len;
@@ -966,7 +1004,7 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
                                   struct rxkad_response *resp,
                                   const struct rxrpc_crypt *session_key)
 {
-       struct blkcipher_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, rxkad_ci);
        struct scatterlist sg[2];
        struct rxrpc_crypt iv;
 
@@ -976,17 +1014,21 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
        ASSERT(rxkad_ci != NULL);
 
        mutex_lock(&rxkad_ci_mutex);
-       if (crypto_blkcipher_setkey(rxkad_ci, session_key->x,
-                                   sizeof(*session_key)) < 0)
+       if (crypto_skcipher_setkey(rxkad_ci, session_key->x,
+                                  sizeof(*session_key)) < 0)
                BUG();
 
        memcpy(&iv, session_key, sizeof(iv));
-       desc.tfm = rxkad_ci;
-       desc.info = iv.x;
-       desc.flags = 0;
 
        rxkad_sg_set_buf2(sg, &resp->encrypted, sizeof(resp->encrypted));
-       crypto_blkcipher_decrypt_iv(&desc, sg, sg, sizeof(resp->encrypted));
+
+       skcipher_request_set_tfm(req, rxkad_ci);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, sizeof(resp->encrypted), iv.x);
+
+       crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
+
        mutex_unlock(&rxkad_ci_mutex);
 
        _leave("");
@@ -1115,7 +1157,7 @@ static void rxkad_clear(struct rxrpc_connection *conn)
        _enter("");
 
        if (conn->cipher)
-               crypto_free_blkcipher(conn->cipher);
+               crypto_free_skcipher(conn->cipher);
 }
 
 /*
@@ -1141,7 +1183,7 @@ static __init int rxkad_init(void)
 
        /* pin the cipher we need so that the crypto layer doesn't invoke
         * keventd to go get it */
-       rxkad_ci = crypto_alloc_blkcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
+       rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(rxkad_ci))
                return PTR_ERR(rxkad_ci);
 
@@ -1155,7 +1197,7 @@ static __exit void rxkad_exit(void)
        _enter("");
 
        rxrpc_unregister_security(&rxkad);
-       crypto_free_blkcipher(rxkad_ci);
+       crypto_free_skcipher(rxkad_ci);
 }
 
 module_exit(rxkad_exit);
index f26bdea875c1a8413774e767bb897012d969d2e6..a1cd778240cd7f2b14336c26ba6eec282c1c60b6 100644 (file)
@@ -403,6 +403,8 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
                if (len <= cl->deficit) {
                        cl->deficit -= len;
                        skb = qdisc_dequeue_peeked(cl->qdisc);
+                       if (unlikely(skb == NULL))
+                               goto out;
                        if (cl->qdisc->q.qlen == 0)
                                list_del(&cl->alist);
 
index 1543e39f47c33f6662abd80b5846a845d3760fdc..912eb1685a5d99110c66d0dc4c7b35eac7f37c00 100644 (file)
@@ -27,9 +27,9 @@
  *   Vlad Yasevich     <vladislav.yasevich@hp.com>
  */
 
+#include <crypto/hash.h>
 #include <linux/slab.h>
 #include <linux/types.h>
-#include <linux/crypto.h>
 #include <linux/scatterlist.h>
 #include <net/sctp/sctp.h>
 #include <net/sctp/auth.h>
@@ -448,7 +448,7 @@ struct sctp_shared_key *sctp_auth_get_shkey(
  */
 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
 {
-       struct crypto_hash *tfm = NULL;
+       struct crypto_shash *tfm = NULL;
        __u16   id;
 
        /* If AUTH extension is disabled, we are done */
@@ -462,9 +462,8 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
                return 0;
 
        /* Allocated the array of pointers to transorms */
-       ep->auth_hmacs = kzalloc(
-                           sizeof(struct crypto_hash *) * SCTP_AUTH_NUM_HMACS,
-                           gfp);
+       ep->auth_hmacs = kzalloc(sizeof(struct crypto_shash *) *
+                                SCTP_AUTH_NUM_HMACS, gfp);
        if (!ep->auth_hmacs)
                return -ENOMEM;
 
@@ -483,8 +482,7 @@ int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp)
                        continue;
 
                /* Allocate the ID */
-               tfm = crypto_alloc_hash(sctp_hmac_list[id].hmac_name, 0,
-                                       CRYPTO_ALG_ASYNC);
+               tfm = crypto_alloc_shash(sctp_hmac_list[id].hmac_name, 0, 0);
                if (IS_ERR(tfm))
                        goto out_err;
 
@@ -500,7 +498,7 @@ out_err:
 }
 
 /* Destroy the hmac tfm array */
-void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[])
+void sctp_auth_destroy_hmacs(struct crypto_shash *auth_hmacs[])
 {
        int i;
 
@@ -508,8 +506,7 @@ void sctp_auth_destroy_hmacs(struct crypto_hash *auth_hmacs[])
                return;
 
        for (i = 0; i < SCTP_AUTH_NUM_HMACS; i++) {
-               if (auth_hmacs[i])
-                       crypto_free_hash(auth_hmacs[i]);
+               crypto_free_shash(auth_hmacs[i]);
        }
        kfree(auth_hmacs);
 }
@@ -709,8 +706,7 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
                              struct sctp_auth_chunk *auth,
                              gfp_t gfp)
 {
-       struct scatterlist sg;
-       struct hash_desc desc;
+       struct crypto_shash *tfm;
        struct sctp_auth_bytes *asoc_key;
        __u16 key_id, hmac_id;
        __u8 *digest;
@@ -742,16 +738,22 @@ void sctp_auth_calculate_hmac(const struct sctp_association *asoc,
 
        /* set up scatter list */
        end = skb_tail_pointer(skb);
-       sg_init_one(&sg, auth, end - (unsigned char *)auth);
 
-       desc.tfm = asoc->ep->auth_hmacs[hmac_id];
-       desc.flags = 0;
+       tfm = asoc->ep->auth_hmacs[hmac_id];
 
        digest = auth->auth_hdr.hmac;
-       if (crypto_hash_setkey(desc.tfm, &asoc_key->data[0], asoc_key->len))
+       if (crypto_shash_setkey(tfm, &asoc_key->data[0], asoc_key->len))
                goto free;
 
-       crypto_hash_digest(&desc, &sg, sg.length, digest);
+       {
+               SHASH_DESC_ON_STACK(desc, tfm);
+
+               desc->tfm = tfm;
+               desc->flags = 0;
+               crypto_shash_digest(desc, (u8 *)auth,
+                                   end - (unsigned char *)auth, digest);
+               shash_desc_zero(desc);
+       }
 
 free:
        if (free_key)
index 2522a61752916b58b4a8f94d61e4d68068232d1b..9d494e35e7f9f84d70e3b747c4c335a40a38044c 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/slab.h>
 #include <linux/in.h>
 #include <linux/random.h>      /* get_random_bytes() */
-#include <linux/crypto.h>
 #include <net/sock.h>
 #include <net/ipv6.h>
 #include <net/sctp/sctp.h>
index bf61dfb8e09eea1034c7b9ddd40583752033230c..49d2cc751386f3208c67eed17ec9c3f15b801f50 100644 (file)
@@ -935,15 +935,22 @@ static struct sctp_association *__sctp_lookup_association(
                                        struct sctp_transport **pt)
 {
        struct sctp_transport *t;
+       struct sctp_association *asoc = NULL;
 
+       rcu_read_lock();
        t = sctp_addrs_lookup_transport(net, local, peer);
-       if (!t || t->dead)
-               return NULL;
+       if (!t || !sctp_transport_hold(t))
+               goto out;
 
-       sctp_association_hold(t->asoc);
+       asoc = t->asoc;
+       sctp_association_hold(asoc);
        *pt = t;
 
-       return t->asoc;
+       sctp_transport_put(t);
+
+out:
+       rcu_read_unlock();
+       return asoc;
 }
 
 /* Look up an association. protected by RCU read lock */
@@ -955,9 +962,7 @@ struct sctp_association *sctp_lookup_association(struct net *net,
 {
        struct sctp_association *asoc;
 
-       rcu_read_lock();
        asoc = __sctp_lookup_association(net, laddr, paddr, transportp);
-       rcu_read_unlock();
 
        return asoc;
 }
index 684c5b31563badc1f097434211422f495d356aad..ded7d931a6a5b218c7bbd4481acdea1d2bc29325 100644 (file)
@@ -165,8 +165,6 @@ static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_associa
        list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list,
                        transports) {
                addr = &transport->ipaddr;
-               if (transport->dead)
-                       continue;
 
                af = sctp_get_af_specific(addr->sa.sa_family);
                if (af->cmp_addr(addr, primary)) {
@@ -380,6 +378,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
        }
 
        transport = (struct sctp_transport *)v;
+       if (!sctp_transport_hold(transport))
+               return 0;
        assoc = transport->asoc;
        epb = &assoc->base;
        sk = epb->sk;
@@ -412,6 +412,8 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
                sk->sk_rcvbuf);
        seq_printf(seq, "\n");
 
+       sctp_transport_put(transport);
+
        return 0;
 }
 
@@ -489,12 +491,12 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
        }
 
        tsp = (struct sctp_transport *)v;
+       if (!sctp_transport_hold(tsp))
+               return 0;
        assoc = tsp->asoc;
 
        list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
                                transports) {
-               if (tsp->dead)
-                       continue;
                /*
                 * The remote address (ADDR)
                 */
@@ -544,6 +546,8 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
                seq_printf(seq, "\n");
        }
 
+       sctp_transport_put(tsp);
+
        return 0;
 }
 
index 5d6a03fad3789a12290f5f14c5a7efa69c98f41a..1296e555fe29113e65fd74a3112c7463f224dc25 100644 (file)
@@ -45,6 +45,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <crypto/hash.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/ip.h>
@@ -52,7 +53,6 @@
 #include <linux/net.h>
 #include <linux/inet.h>
 #include <linux/scatterlist.h>
-#include <linux/crypto.h>
 #include <linux/slab.h>
 #include <net/sock.h>
 
@@ -1606,7 +1606,6 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
 {
        sctp_cookie_param_t *retval;
        struct sctp_signed_cookie *cookie;
-       struct scatterlist sg;
        int headersize, bodysize;
 
        /* Header size is static data prior to the actual cookie, including
@@ -1663,16 +1662,19 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
               ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len);
 
        if (sctp_sk(ep->base.sk)->hmac) {
-               struct hash_desc desc;
+               SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac);
+               int err;
 
                /* Sign the message.  */
-               sg_init_one(&sg, &cookie->c, bodysize);
-               desc.tfm = sctp_sk(ep->base.sk)->hmac;
-               desc.flags = 0;
-
-               if (crypto_hash_setkey(desc.tfm, ep->secret_key,
-                                      sizeof(ep->secret_key)) ||
-                   crypto_hash_digest(&desc, &sg, bodysize, cookie->signature))
+               desc->tfm = sctp_sk(ep->base.sk)->hmac;
+               desc->flags = 0;
+
+               err = crypto_shash_setkey(desc->tfm, ep->secret_key,
+                                         sizeof(ep->secret_key)) ?:
+                     crypto_shash_digest(desc, (u8 *)&cookie->c, bodysize,
+                                         cookie->signature);
+               shash_desc_zero(desc);
+               if (err)
                        goto free_cookie;
        }
 
@@ -1697,12 +1699,10 @@ struct sctp_association *sctp_unpack_cookie(
        struct sctp_cookie *bear_cookie;
        int headersize, bodysize, fixed_size;
        __u8 *digest = ep->digest;
-       struct scatterlist sg;
        unsigned int len;
        sctp_scope_t scope;
        struct sk_buff *skb = chunk->skb;
        ktime_t kt;
-       struct hash_desc desc;
 
        /* Header size is static data prior to the actual cookie, including
         * any padding.
@@ -1733,16 +1733,23 @@ struct sctp_association *sctp_unpack_cookie(
                goto no_hmac;
 
        /* Check the signature.  */
-       sg_init_one(&sg, bear_cookie, bodysize);
-       desc.tfm = sctp_sk(ep->base.sk)->hmac;
-       desc.flags = 0;
-
-       memset(digest, 0x00, SCTP_SIGNATURE_SIZE);
-       if (crypto_hash_setkey(desc.tfm, ep->secret_key,
-                              sizeof(ep->secret_key)) ||
-           crypto_hash_digest(&desc, &sg, bodysize, digest)) {
-               *error = -SCTP_IERROR_NOMEM;
-               goto fail;
+       {
+               SHASH_DESC_ON_STACK(desc, sctp_sk(ep->base.sk)->hmac);
+               int err;
+
+               desc->tfm = sctp_sk(ep->base.sk)->hmac;
+               desc->flags = 0;
+
+               err = crypto_shash_setkey(desc->tfm, ep->secret_key,
+                                         sizeof(ep->secret_key)) ?:
+                     crypto_shash_digest(desc, (u8 *)bear_cookie, bodysize,
+                                         digest);
+               shash_desc_zero(desc);
+
+               if (err) {
+                       *error = -SCTP_IERROR_NOMEM;
+                       goto fail;
+               }
        }
 
        if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) {
index 2e21384697c2a6a5fd045142bcd9c39992d3867f..b5327bb77458f3ae68e0098c45d00307a56ed2c5 100644 (file)
@@ -259,12 +259,6 @@ void sctp_generate_t3_rtx_event(unsigned long peer)
                goto out_unlock;
        }
 
-       /* Is this transport really dead and just waiting around for
-        * the timer to let go of the reference?
-        */
-       if (transport->dead)
-               goto out_unlock;
-
        /* Run through the state machine.  */
        error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX),
@@ -380,12 +374,6 @@ void sctp_generate_heartbeat_event(unsigned long data)
                goto out_unlock;
        }
 
-       /* Is this structure just waiting around for us to actually
-        * get destroyed?
-        */
-       if (transport->dead)
-               goto out_unlock;
-
        error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
                           SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT),
                           asoc->state, asoc->ep, asoc,
index 9bb80ec4c08ff06f6e629078c5a926c3def3ce23..de8eabf03eed9b904afd78e9af6f2ab0b172cd2b 100644 (file)
@@ -52,6 +52,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <crypto/hash.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/wait.h>
@@ -61,7 +62,6 @@
 #include <linux/fcntl.h>
 #include <linux/poll.h>
 #include <linux/init.h>
-#include <linux/crypto.h>
 #include <linux/slab.h>
 #include <linux/file.h>
 #include <linux/compat.h>
@@ -4160,7 +4160,7 @@ static void sctp_destruct_sock(struct sock *sk)
        struct sctp_sock *sp = sctp_sk(sk);
 
        /* Free up the HMAC transform. */
-       crypto_free_hash(sp->hmac);
+       crypto_free_shash(sp->hmac);
 
        inet_sock_destruct(sk);
 }
@@ -5538,6 +5538,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
        struct sctp_hmac_algo_param *hmacs;
        __u16 data_len = 0;
        u32 num_idents;
+       int i;
 
        if (!ep->auth_enable)
                return -EACCES;
@@ -5555,8 +5556,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
                return -EFAULT;
        if (put_user(num_idents, &p->shmac_num_idents))
                return -EFAULT;
-       if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
-               return -EFAULT;
+       for (i = 0; i < num_idents; i++) {
+               __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
+
+               if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
+                       return -EFAULT;
+       }
        return 0;
 }
 
@@ -6299,13 +6304,13 @@ static int sctp_listen_start(struct sock *sk, int backlog)
 {
        struct sctp_sock *sp = sctp_sk(sk);
        struct sctp_endpoint *ep = sp->ep;
-       struct crypto_hash *tfm = NULL;
+       struct crypto_shash *tfm = NULL;
        char alg[32];
 
        /* Allocate HMAC for generating cookie. */
        if (!sp->hmac && sp->sctp_hmac_alg) {
                sprintf(alg, "hmac(%s)", sp->sctp_hmac_alg);
-               tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
+               tfm = crypto_alloc_shash(alg, 0, 0);
                if (IS_ERR(tfm)) {
                        net_info_ratelimited("failed to load transform for %s: %ld\n",
                                             sp->sctp_hmac_alg, PTR_ERR(tfm));
@@ -6636,6 +6641,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
 
                        if (cmsgs->srinfo->sinfo_flags &
                            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+                             SCTP_SACK_IMMEDIATELY |
                              SCTP_ABORT | SCTP_EOF))
                                return -EINVAL;
                        break;
@@ -6659,6 +6665,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
 
                        if (cmsgs->sinfo->snd_flags &
                            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+                             SCTP_SACK_IMMEDIATELY |
                              SCTP_ABORT | SCTP_EOF))
                                return -EINVAL;
                        break;
index aab9e3f29755a58eb7584aa763b4b1a46d2e6608..a431c14044a46f98e9b73f66e28f229690ef3c01 100644 (file)
@@ -132,8 +132,6 @@ fail:
  */
 void sctp_transport_free(struct sctp_transport *transport)
 {
-       transport->dead = 1;
-
        /* Try to delete the heartbeat timer.  */
        if (del_timer(&transport->hb_timer))
                sctp_transport_put(transport);
@@ -169,7 +167,7 @@ static void sctp_transport_destroy_rcu(struct rcu_head *head)
  */
 static void sctp_transport_destroy(struct sctp_transport *transport)
 {
-       if (unlikely(!transport->dead)) {
+       if (unlikely(atomic_read(&transport->refcnt))) {
                WARN(1, "Attempt to destroy undead transport %p!\n", transport);
                return;
        }
@@ -296,9 +294,9 @@ void sctp_transport_route(struct sctp_transport *transport,
 }
 
 /* Hold a reference to a transport.  */
-void sctp_transport_hold(struct sctp_transport *transport)
+int sctp_transport_hold(struct sctp_transport *transport)
 {
-       atomic_inc(&transport->refcnt);
+       return atomic_add_unless(&transport->refcnt, 1, 0);
 }
 
 /* Release a reference to a transport and clean up
index fee3c15a4b5293bb33a24beb76ea7a904f6a7aab..d94a8e1e9f05b37cdb3bc82a3b419a0b0b8c1bbf 100644 (file)
  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/types.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
-#include <linux/crypto.h>
 #include <linux/highmem.h>
 #include <linux/pagemap.h>
 #include <linux/random.h>
@@ -51,7 +52,7 @@
 
 u32
 krb5_encrypt(
-       struct crypto_blkcipher *tfm,
+       struct crypto_skcipher *tfm,
        void * iv,
        void * in,
        void * out,
@@ -60,24 +61,28 @@ krb5_encrypt(
        u32 ret = -EINVAL;
        struct scatterlist sg[1];
        u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
-       struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
-       if (length % crypto_blkcipher_blocksize(tfm) != 0)
+       if (length % crypto_skcipher_blocksize(tfm) != 0)
                goto out;
 
-       if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+       if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
                dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
-                       crypto_blkcipher_ivsize(tfm));
+                       crypto_skcipher_ivsize(tfm));
                goto out;
        }
 
        if (iv)
-               memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
+               memcpy(local_iv, iv, crypto_skcipher_ivsize(tfm));
 
        memcpy(out, in, length);
        sg_init_one(sg, out, length);
 
-       ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, length, local_iv);
+
+       ret = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
 out:
        dprintk("RPC:       krb5_encrypt returns %d\n", ret);
        return ret;
@@ -85,7 +90,7 @@ out:
 
 u32
 krb5_decrypt(
-     struct crypto_blkcipher *tfm,
+     struct crypto_skcipher *tfm,
      void * iv,
      void * in,
      void * out,
@@ -94,23 +99,27 @@ krb5_decrypt(
        u32 ret = -EINVAL;
        struct scatterlist sg[1];
        u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
-       struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
-       if (length % crypto_blkcipher_blocksize(tfm) != 0)
+       if (length % crypto_skcipher_blocksize(tfm) != 0)
                goto out;
 
-       if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
+       if (crypto_skcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
                dprintk("RPC:       gss_k5decrypt: tfm iv size too large %d\n",
-                       crypto_blkcipher_ivsize(tfm));
+                       crypto_skcipher_ivsize(tfm));
                goto out;
        }
        if (iv)
-               memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
+               memcpy(local_iv,iv, crypto_skcipher_ivsize(tfm));
 
        memcpy(out, in, length);
        sg_init_one(sg, out, length);
 
-       ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, length, local_iv);
+
+       ret = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
 out:
        dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
        return ret;
@@ -119,9 +128,11 @@ out:
 static int
 checksummer(struct scatterlist *sg, void *data)
 {
-       struct hash_desc *desc = data;
+       struct ahash_request *req = data;
+
+       ahash_request_set_crypt(req, sg, NULL, sg->length);
 
-       return crypto_hash_update(desc, sg, sg->length);
+       return crypto_ahash_update(req);
 }
 
 static int
@@ -152,13 +163,13 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
                       struct xdr_buf *body, int body_offset, u8 *cksumkey,
                       unsigned int usage, struct xdr_netobj *cksumout)
 {
-       struct hash_desc                desc;
        struct scatterlist              sg[1];
        int err;
        u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
        u8 rc4salt[4];
-       struct crypto_hash *md5;
-       struct crypto_hash *hmac_md5;
+       struct crypto_ahash *md5;
+       struct crypto_ahash *hmac_md5;
+       struct ahash_request *req;
 
        if (cksumkey == NULL)
                return GSS_S_FAILURE;
@@ -174,61 +185,79 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
                return GSS_S_FAILURE;
        }
 
-       md5 = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+       md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(md5))
                return GSS_S_FAILURE;
 
-       hmac_md5 = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
-                                    CRYPTO_ALG_ASYNC);
+       hmac_md5 = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0,
+                                     CRYPTO_ALG_ASYNC);
        if (IS_ERR(hmac_md5)) {
-               crypto_free_hash(md5);
+               crypto_free_ahash(md5);
+               return GSS_S_FAILURE;
+       }
+
+       req = ahash_request_alloc(md5, GFP_KERNEL);
+       if (!req) {
+               crypto_free_ahash(hmac_md5);
+               crypto_free_ahash(md5);
                return GSS_S_FAILURE;
        }
 
-       desc.tfm = md5;
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
 
-       err = crypto_hash_init(&desc);
+       err = crypto_ahash_init(req);
        if (err)
                goto out;
        sg_init_one(sg, rc4salt, 4);
-       err = crypto_hash_update(&desc, sg, 4);
+       ahash_request_set_crypt(req, sg, NULL, 4);
+       err = crypto_ahash_update(req);
        if (err)
                goto out;
 
        sg_init_one(sg, header, hdrlen);
-       err = crypto_hash_update(&desc, sg, hdrlen);
+       ahash_request_set_crypt(req, sg, NULL, hdrlen);
+       err = crypto_ahash_update(req);
        if (err)
                goto out;
        err = xdr_process_buf(body, body_offset, body->len - body_offset,
-                             checksummer, &desc);
+                             checksummer, req);
        if (err)
                goto out;
-       err = crypto_hash_final(&desc, checksumdata);
+       ahash_request_set_crypt(req, NULL, checksumdata, 0);
+       err = crypto_ahash_final(req);
        if (err)
                goto out;
 
-       desc.tfm = hmac_md5;
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       ahash_request_free(req);
+       req = ahash_request_alloc(hmac_md5, GFP_KERNEL);
+       if (!req) {
+               crypto_free_ahash(hmac_md5);
+               crypto_free_ahash(md5);
+               return GSS_S_FAILURE;
+       }
+
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
 
-       err = crypto_hash_init(&desc);
+       err = crypto_ahash_init(req);
        if (err)
                goto out;
-       err = crypto_hash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
+       err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
        if (err)
                goto out;
 
-       sg_init_one(sg, checksumdata, crypto_hash_digestsize(md5));
-       err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5),
-                                checksumdata);
+       sg_init_one(sg, checksumdata, crypto_ahash_digestsize(md5));
+       ahash_request_set_crypt(req, sg, checksumdata,
+                               crypto_ahash_digestsize(md5));
+       err = crypto_ahash_digest(req);
        if (err)
                goto out;
 
        memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
        cksumout->len = kctx->gk5e->cksumlength;
 out:
-       crypto_free_hash(md5);
-       crypto_free_hash(hmac_md5);
+       ahash_request_free(req);
+       crypto_free_ahash(md5);
+       crypto_free_ahash(hmac_md5);
        return err ? GSS_S_FAILURE : 0;
 }
 
@@ -242,7 +271,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
              struct xdr_buf *body, int body_offset, u8 *cksumkey,
              unsigned int usage, struct xdr_netobj *cksumout)
 {
-       struct hash_desc                desc;
+       struct crypto_ahash *tfm;
+       struct ahash_request *req;
        struct scatterlist              sg[1];
        int err;
        u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
@@ -259,32 +289,41 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
                return GSS_S_FAILURE;
        }
 
-       desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(desc.tfm))
+       tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
                return GSS_S_FAILURE;
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
 
-       checksumlen = crypto_hash_digestsize(desc.tfm);
+       req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               crypto_free_ahash(tfm);
+               return GSS_S_FAILURE;
+       }
+
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
+       checksumlen = crypto_ahash_digestsize(tfm);
 
        if (cksumkey != NULL) {
-               err = crypto_hash_setkey(desc.tfm, cksumkey,
-                                        kctx->gk5e->keylength);
+               err = crypto_ahash_setkey(tfm, cksumkey,
+                                         kctx->gk5e->keylength);
                if (err)
                        goto out;
        }
 
-       err = crypto_hash_init(&desc);
+       err = crypto_ahash_init(req);
        if (err)
                goto out;
        sg_init_one(sg, header, hdrlen);
-       err = crypto_hash_update(&desc, sg, hdrlen);
+       ahash_request_set_crypt(req, sg, NULL, hdrlen);
+       err = crypto_ahash_update(req);
        if (err)
                goto out;
        err = xdr_process_buf(body, body_offset, body->len - body_offset,
-                             checksummer, &desc);
+                             checksummer, req);
        if (err)
                goto out;
-       err = crypto_hash_final(&desc, checksumdata);
+       ahash_request_set_crypt(req, NULL, checksumdata, 0);
+       err = crypto_ahash_final(req);
        if (err)
                goto out;
 
@@ -307,7 +346,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
        }
        cksumout->len = kctx->gk5e->cksumlength;
 out:
-       crypto_free_hash(desc.tfm);
+       ahash_request_free(req);
+       crypto_free_ahash(tfm);
        return err ? GSS_S_FAILURE : 0;
 }
 
@@ -323,7 +363,8 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
                 struct xdr_buf *body, int body_offset, u8 *cksumkey,
                 unsigned int usage, struct xdr_netobj *cksumout)
 {
-       struct hash_desc desc;
+       struct crypto_ahash *tfm;
+       struct ahash_request *req;
        struct scatterlist sg[1];
        int err;
        u8 checksumdata[GSS_KRB5_MAX_CKSUM_LEN];
@@ -340,31 +381,39 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
                return GSS_S_FAILURE;
        }
 
-       desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0,
-                                                       CRYPTO_ALG_ASYNC);
-       if (IS_ERR(desc.tfm))
+       tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm))
                return GSS_S_FAILURE;
-       checksumlen = crypto_hash_digestsize(desc.tfm);
-       desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+       checksumlen = crypto_ahash_digestsize(tfm);
+
+       req = ahash_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               crypto_free_ahash(tfm);
+               return GSS_S_FAILURE;
+       }
 
-       err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength);
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
+
+       err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength);
        if (err)
                goto out;
 
-       err = crypto_hash_init(&desc);
+       err = crypto_ahash_init(req);
        if (err)
                goto out;
        err = xdr_process_buf(body, body_offset, body->len - body_offset,
-                             checksummer, &desc);
+                             checksummer, req);
        if (err)
                goto out;
        if (header != NULL) {
                sg_init_one(sg, header, hdrlen);
-               err = crypto_hash_update(&desc, sg, hdrlen);
+               ahash_request_set_crypt(req, sg, NULL, hdrlen);
+               err = crypto_ahash_update(req);
                if (err)
                        goto out;
        }
-       err = crypto_hash_final(&desc, checksumdata);
+       ahash_request_set_crypt(req, NULL, checksumdata, 0);
+       err = crypto_ahash_final(req);
        if (err)
                goto out;
 
@@ -381,13 +430,14 @@ make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
                break;
        }
 out:
-       crypto_free_hash(desc.tfm);
+       ahash_request_free(req);
+       crypto_free_ahash(tfm);
        return err ? GSS_S_FAILURE : 0;
 }
 
 struct encryptor_desc {
        u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
        int pos;
        struct xdr_buf *outbuf;
        struct page **pages;
@@ -402,6 +452,7 @@ encryptor(struct scatterlist *sg, void *data)
 {
        struct encryptor_desc *desc = data;
        struct xdr_buf *outbuf = desc->outbuf;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
        struct page *in_page;
        int thislen = desc->fraglen + sg->length;
        int fraglen, ret;
@@ -427,7 +478,7 @@ encryptor(struct scatterlist *sg, void *data)
        desc->fraglen += sg->length;
        desc->pos += sg->length;
 
-       fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
+       fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
        thislen -= fraglen;
 
        if (thislen == 0)
@@ -436,8 +487,10 @@ encryptor(struct scatterlist *sg, void *data)
        sg_mark_end(&desc->infrags[desc->fragno - 1]);
        sg_mark_end(&desc->outfrags[desc->fragno - 1]);
 
-       ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
-                                         desc->infrags, thislen);
+       skcipher_request_set_crypt(desc->req, desc->infrags, desc->outfrags,
+                                  thislen, desc->iv);
+
+       ret = crypto_skcipher_encrypt(desc->req);
        if (ret)
                return ret;
 
@@ -459,18 +512,20 @@ encryptor(struct scatterlist *sg, void *data)
 }
 
 int
-gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
+gss_encrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
                    int offset, struct page **pages)
 {
        int ret;
        struct encryptor_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
+
+       BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
 
-       BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
 
        memset(desc.iv, 0, sizeof(desc.iv));
-       desc.desc.tfm = tfm;
-       desc.desc.info = desc.iv;
-       desc.desc.flags = 0;
+       desc.req = req;
        desc.pos = offset;
        desc.outbuf = buf;
        desc.pages = pages;
@@ -481,12 +536,13 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
        sg_init_table(desc.outfrags, 4);
 
        ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
+       skcipher_request_zero(req);
        return ret;
 }
 
 struct decryptor_desc {
        u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
-       struct blkcipher_desc desc;
+       struct skcipher_request *req;
        struct scatterlist frags[4];
        int fragno;
        int fraglen;
@@ -497,6 +553,7 @@ decryptor(struct scatterlist *sg, void *data)
 {
        struct decryptor_desc *desc = data;
        int thislen = desc->fraglen + sg->length;
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(desc->req);
        int fraglen, ret;
 
        /* Worst case is 4 fragments: head, end of page 1, start
@@ -507,7 +564,7 @@ decryptor(struct scatterlist *sg, void *data)
        desc->fragno++;
        desc->fraglen += sg->length;
 
-       fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1);
+       fraglen = thislen & (crypto_skcipher_blocksize(tfm) - 1);
        thislen -= fraglen;
 
        if (thislen == 0)
@@ -515,8 +572,10 @@ decryptor(struct scatterlist *sg, void *data)
 
        sg_mark_end(&desc->frags[desc->fragno - 1]);
 
-       ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
-                                         desc->frags, thislen);
+       skcipher_request_set_crypt(desc->req, desc->frags, desc->frags,
+                                  thislen, desc->iv);
+
+       ret = crypto_skcipher_decrypt(desc->req);
        if (ret)
                return ret;
 
@@ -535,24 +594,29 @@ decryptor(struct scatterlist *sg, void *data)
 }
 
 int
-gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
+gss_decrypt_xdr_buf(struct crypto_skcipher *tfm, struct xdr_buf *buf,
                    int offset)
 {
+       int ret;
        struct decryptor_desc desc;
+       SKCIPHER_REQUEST_ON_STACK(req, tfm);
 
        /* XXXJBF: */
-       BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
+       BUG_ON((buf->len - offset) % crypto_skcipher_blocksize(tfm) != 0);
+
+       skcipher_request_set_tfm(req, tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
 
        memset(desc.iv, 0, sizeof(desc.iv));
-       desc.desc.tfm = tfm;
-       desc.desc.info = desc.iv;
-       desc.desc.flags = 0;
+       desc.req = req;
        desc.fragno = 0;
        desc.fraglen = 0;
 
        sg_init_table(desc.frags, 4);
 
-       return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
+       ret = xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
+       skcipher_request_zero(req);
+       return ret;
 }
 
 /*
@@ -594,12 +658,12 @@ xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
 }
 
 static u32
-gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
+gss_krb5_cts_crypt(struct crypto_skcipher *cipher, struct xdr_buf *buf,
                   u32 offset, u8 *iv, struct page **pages, int encrypt)
 {
        u32 ret;
        struct scatterlist sg[1];
-       struct blkcipher_desc desc = { .tfm = cipher, .info = iv };
+       SKCIPHER_REQUEST_ON_STACK(req, cipher);
        u8 data[GSS_KRB5_MAX_BLOCKSIZE * 2];
        struct page **save_pages;
        u32 len = buf->len - offset;
@@ -625,10 +689,16 @@ gss_krb5_cts_crypt(struct crypto_blkcipher *cipher, struct xdr_buf *buf,
 
        sg_init_one(sg, data, len);
 
+       skcipher_request_set_tfm(req, cipher);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, sg, sg, len, iv);
+
        if (encrypt)
-               ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);
+               ret = crypto_skcipher_encrypt(req);
        else
-               ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len);
+               ret = crypto_skcipher_decrypt(req);
+
+       skcipher_request_zero(req);
 
        if (ret)
                goto out;
@@ -647,7 +717,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
        struct xdr_netobj hmac;
        u8 *cksumkey;
        u8 *ecptr;
-       struct crypto_blkcipher *cipher, *aux_cipher;
+       struct crypto_skcipher *cipher, *aux_cipher;
        int blocksize;
        struct page **save_pages;
        int nblocks, nbytes;
@@ -666,7 +736,7 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
                cksumkey = kctx->acceptor_integ;
                usage = KG_USAGE_ACCEPTOR_SEAL;
        }
-       blocksize = crypto_blkcipher_blocksize(cipher);
+       blocksize = crypto_skcipher_blocksize(cipher);
 
        /* hide the gss token header and insert the confounder */
        offset += GSS_KRB5_TOK_HDR_LEN;
@@ -719,20 +789,24 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
        memset(desc.iv, 0, sizeof(desc.iv));
 
        if (cbcbytes) {
+               SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
+
                desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
                desc.fragno = 0;
                desc.fraglen = 0;
                desc.pages = pages;
                desc.outbuf = buf;
-               desc.desc.info = desc.iv;
-               desc.desc.flags = 0;
-               desc.desc.tfm = aux_cipher;
+               desc.req = req;
+
+               skcipher_request_set_tfm(req, aux_cipher);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
 
                sg_init_table(desc.infrags, 4);
                sg_init_table(desc.outfrags, 4);
 
                err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
                                      cbcbytes, encryptor, &desc);
+               skcipher_request_zero(req);
                if (err)
                        goto out_err;
        }
@@ -763,7 +837,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
        struct xdr_buf subbuf;
        u32 ret = 0;
        u8 *cksum_key;
-       struct crypto_blkcipher *cipher, *aux_cipher;
+       struct crypto_skcipher *cipher, *aux_cipher;
        struct xdr_netobj our_hmac_obj;
        u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
        u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
@@ -782,7 +856,7 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
                cksum_key = kctx->initiator_integ;
                usage = KG_USAGE_INITIATOR_SEAL;
        }
-       blocksize = crypto_blkcipher_blocksize(cipher);
+       blocksize = crypto_skcipher_blocksize(cipher);
 
 
        /* create a segment skipping the header and leaving out the checksum */
@@ -799,15 +873,19 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
        memset(desc.iv, 0, sizeof(desc.iv));
 
        if (cbcbytes) {
+               SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
+
                desc.fragno = 0;
                desc.fraglen = 0;
-               desc.desc.info = desc.iv;
-               desc.desc.flags = 0;
-               desc.desc.tfm = aux_cipher;
+               desc.req = req;
+
+               skcipher_request_set_tfm(req, aux_cipher);
+               skcipher_request_set_callback(req, 0, NULL, NULL);
 
                sg_init_table(desc.frags, 4);
 
                ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
+               skcipher_request_zero(req);
                if (ret)
                        goto out_err;
        }
@@ -850,61 +928,62 @@ out_err:
  * Set the key of the given cipher.
  */
 int
-krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
+krb5_rc4_setup_seq_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
                       unsigned char *cksum)
 {
-       struct crypto_hash *hmac;
-       struct hash_desc desc;
-       struct scatterlist sg[1];
+       struct crypto_shash *hmac;
+       struct shash_desc *desc;
        u8 Kseq[GSS_KRB5_MAX_KEYLEN];
        u32 zeroconstant = 0;
        int err;
 
        dprintk("%s: entered\n", __func__);
 
-       hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
        if (IS_ERR(hmac)) {
                dprintk("%s: error %ld, allocating hash '%s'\n",
                        __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
                return PTR_ERR(hmac);
        }
 
-       desc.tfm = hmac;
-       desc.flags = 0;
+       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       if (!desc) {
+               dprintk("%s: failed to allocate shash descriptor for '%s'\n",
+                       __func__, kctx->gk5e->cksum_name);
+               crypto_free_shash(hmac);
+               return -ENOMEM;
+       }
 
-       err = crypto_hash_init(&desc);
-       if (err)
-               goto out_err;
+       desc->tfm = hmac;
+       desc->flags = 0;
 
        /* Compute intermediate Kseq from session key */
-       err = crypto_hash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, kctx->Ksess, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
-       sg_init_one(sg, &zeroconstant, 4);
-       err = crypto_hash_digest(&desc, sg, 4, Kseq);
+       err = crypto_shash_digest(desc, (u8 *)&zeroconstant, 4, Kseq);
        if (err)
                goto out_err;
 
        /* Compute final Kseq from the checksum and intermediate Kseq */
-       err = crypto_hash_setkey(hmac, Kseq, kctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, Kseq, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
-       sg_set_buf(sg, cksum, 8);
-
-       err = crypto_hash_digest(&desc, sg, 8, Kseq);
+       err = crypto_shash_digest(desc, cksum, 8, Kseq);
        if (err)
                goto out_err;
 
-       err = crypto_blkcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
+       err = crypto_skcipher_setkey(cipher, Kseq, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
        err = 0;
 
 out_err:
-       crypto_free_hash(hmac);
+       kzfree(desc);
+       crypto_free_shash(hmac);
        dprintk("%s: returning %d\n", __func__, err);
        return err;
 }
@@ -914,12 +993,11 @@ out_err:
  * Set the key of cipher kctx->enc.
  */
 int
-krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
+krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_skcipher *cipher,
                       s32 seqnum)
 {
-       struct crypto_hash *hmac;
-       struct hash_desc desc;
-       struct scatterlist sg[1];
+       struct crypto_shash *hmac;
+       struct shash_desc *desc;
        u8 Kcrypt[GSS_KRB5_MAX_KEYLEN];
        u8 zeroconstant[4] = {0};
        u8 seqnumarray[4];
@@ -927,35 +1005,38 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
 
        dprintk("%s: entered, seqnum %u\n", __func__, seqnum);
 
-       hmac = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       hmac = crypto_alloc_shash(kctx->gk5e->cksum_name, 0, 0);
        if (IS_ERR(hmac)) {
                dprintk("%s: error %ld, allocating hash '%s'\n",
                        __func__, PTR_ERR(hmac), kctx->gk5e->cksum_name);
                return PTR_ERR(hmac);
        }
 
-       desc.tfm = hmac;
-       desc.flags = 0;
+       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       if (!desc) {
+               dprintk("%s: failed to allocate shash descriptor for '%s'\n",
+                       __func__, kctx->gk5e->cksum_name);
+               crypto_free_shash(hmac);
+               return -ENOMEM;
+       }
 
-       err = crypto_hash_init(&desc);
-       if (err)
-               goto out_err;
+       desc->tfm = hmac;
+       desc->flags = 0;
 
        /* Compute intermediate Kcrypt from session key */
        for (i = 0; i < kctx->gk5e->keylength; i++)
                Kcrypt[i] = kctx->Ksess[i] ^ 0xf0;
 
-       err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
-       sg_init_one(sg, zeroconstant, 4);
-       err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
+       err = crypto_shash_digest(desc, zeroconstant, 4, Kcrypt);
        if (err)
                goto out_err;
 
        /* Compute final Kcrypt from the seqnum and intermediate Kcrypt */
-       err = crypto_hash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, Kcrypt, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
@@ -964,20 +1045,19 @@ krb5_rc4_setup_enc_key(struct krb5_ctx *kctx, struct crypto_blkcipher *cipher,
        seqnumarray[2] = (unsigned char) ((seqnum >> 8) & 0xff);
        seqnumarray[3] = (unsigned char) ((seqnum >> 0) & 0xff);
 
-       sg_set_buf(sg, seqnumarray, 4);
-
-       err = crypto_hash_digest(&desc, sg, 4, Kcrypt);
+       err = crypto_shash_digest(desc, seqnumarray, 4, Kcrypt);
        if (err)
                goto out_err;
 
-       err = crypto_blkcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
+       err = crypto_skcipher_setkey(cipher, Kcrypt, kctx->gk5e->keylength);
        if (err)
                goto out_err;
 
        err = 0;
 
 out_err:
-       crypto_free_hash(hmac);
+       kzfree(desc);
+       crypto_free_shash(hmac);
        dprintk("%s: returning %d\n", __func__, err);
        return err;
 }
index 234fa8d0fd9bf80cc28c3d88ae94037122ca422c..87013314602634711ad9dbd223823d896a373210 100644 (file)
@@ -54,9 +54,9 @@
  * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/types.h>
-#include <linux/crypto.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
 #include <linux/lcm.h>
@@ -147,7 +147,7 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
        size_t blocksize, keybytes, keylength, n;
        unsigned char *inblockdata, *outblockdata, *rawkey;
        struct xdr_netobj inblock, outblock;
-       struct crypto_blkcipher *cipher;
+       struct crypto_skcipher *cipher;
        u32 ret = EINVAL;
 
        blocksize = gk5e->blocksize;
@@ -157,11 +157,11 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
        if ((inkey->len != keylength) || (outkey->len != keylength))
                goto err_return;
 
-       cipher = crypto_alloc_blkcipher(gk5e->encrypt_name, 0,
-                                       CRYPTO_ALG_ASYNC);
+       cipher = crypto_alloc_skcipher(gk5e->encrypt_name, 0,
+                                      CRYPTO_ALG_ASYNC);
        if (IS_ERR(cipher))
                goto err_return;
-       if (crypto_blkcipher_setkey(cipher, inkey->data, inkey->len))
+       if (crypto_skcipher_setkey(cipher, inkey->data, inkey->len))
                goto err_return;
 
        /* allocate and set up buffers */
@@ -238,7 +238,7 @@ err_free_in:
        memset(inblockdata, 0, blocksize);
        kfree(inblockdata);
 err_free_cipher:
-       crypto_free_blkcipher(cipher);
+       crypto_free_skcipher(cipher);
 err_return:
        return ret;
 }
index 28db442a0034ad601d1a4cc2f2f82dc16a32a3da..71341ccb989043acd4c6d449a6d89d9db8b98513 100644 (file)
@@ -34,6 +34,8 @@
  *
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/init.h>
@@ -42,7 +44,6 @@
 #include <linux/sunrpc/auth.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/sunrpc/xdr.h>
-#include <linux/crypto.h>
 #include <linux/sunrpc/gss_krb5_enctypes.h>
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
@@ -217,7 +218,7 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
 
 static inline const void *
 get_key(const void *p, const void *end,
-       struct krb5_ctx *ctx, struct crypto_blkcipher **res)
+       struct krb5_ctx *ctx, struct crypto_skcipher **res)
 {
        struct xdr_netobj       key;
        int                     alg;
@@ -245,7 +246,7 @@ get_key(const void *p, const void *end,
        if (IS_ERR(p))
                goto out_err;
 
-       *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
+       *res = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
                                                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(*res)) {
                printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
@@ -253,7 +254,7 @@ get_key(const void *p, const void *end,
                *res = NULL;
                goto out_err_free_key;
        }
-       if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
+       if (crypto_skcipher_setkey(*res, key.data, key.len)) {
                printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
                        "crypto algorithm %s\n", ctx->gk5e->encrypt_name);
                goto out_err_free_tfm;
@@ -263,7 +264,7 @@ get_key(const void *p, const void *end,
        return p;
 
 out_err_free_tfm:
-       crypto_free_blkcipher(*res);
+       crypto_free_skcipher(*res);
 out_err_free_key:
        kfree(key.data);
        p = ERR_PTR(-EINVAL);
@@ -335,30 +336,30 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
        return 0;
 
 out_err_free_key2:
-       crypto_free_blkcipher(ctx->seq);
+       crypto_free_skcipher(ctx->seq);
 out_err_free_key1:
-       crypto_free_blkcipher(ctx->enc);
+       crypto_free_skcipher(ctx->enc);
 out_err_free_mech:
        kfree(ctx->mech_used.data);
 out_err:
        return PTR_ERR(p);
 }
 
-static struct crypto_blkcipher *
+static struct crypto_skcipher *
 context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
 {
-       struct crypto_blkcipher *cp;
+       struct crypto_skcipher *cp;
 
-       cp = crypto_alloc_blkcipher(cname, 0, CRYPTO_ALG_ASYNC);
+       cp = crypto_alloc_skcipher(cname, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(cp)) {
                dprintk("gss_kerberos_mech: unable to initialize "
                        "crypto algorithm %s\n", cname);
                return NULL;
        }
-       if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) {
+       if (crypto_skcipher_setkey(cp, key, ctx->gk5e->keylength)) {
                dprintk("gss_kerberos_mech: error setting key for "
                        "crypto algorithm %s\n", cname);
-               crypto_free_blkcipher(cp);
+               crypto_free_skcipher(cp);
                return NULL;
        }
        return cp;
@@ -412,9 +413,9 @@ context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
        return 0;
 
 out_free_enc:
-       crypto_free_blkcipher(ctx->enc);
+       crypto_free_skcipher(ctx->enc);
 out_free_seq:
-       crypto_free_blkcipher(ctx->seq);
+       crypto_free_skcipher(ctx->seq);
 out_err:
        return -EINVAL;
 }
@@ -427,18 +428,17 @@ out_err:
 static int
 context_derive_keys_rc4(struct krb5_ctx *ctx)
 {
-       struct crypto_hash *hmac;
+       struct crypto_shash *hmac;
        char sigkeyconstant[] = "signaturekey";
        int slen = strlen(sigkeyconstant) + 1;  /* include null terminator */
-       struct hash_desc desc;
-       struct scatterlist sg[1];
+       struct shash_desc *desc;
        int err;
 
        dprintk("RPC:       %s: entered\n", __func__);
        /*
         * derive cksum (aka Ksign) key
         */
-       hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
+       hmac = crypto_alloc_shash(ctx->gk5e->cksum_name, 0, 0);
        if (IS_ERR(hmac)) {
                dprintk("%s: error %ld allocating hash '%s'\n",
                        __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name);
@@ -446,37 +446,40 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
                goto out_err;
        }
 
-       err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
+       err = crypto_shash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength);
        if (err)
                goto out_err_free_hmac;
 
-       sg_init_table(sg, 1);
-       sg_set_buf(sg, sigkeyconstant, slen);
 
-       desc.tfm = hmac;
-       desc.flags = 0;
-
-       err = crypto_hash_init(&desc);
-       if (err)
+       desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+       if (!desc) {
+               dprintk("%s: failed to allocate hash descriptor for '%s'\n",
+                       __func__, ctx->gk5e->cksum_name);
+               err = -ENOMEM;
                goto out_err_free_hmac;
+       }
+
+       desc->tfm = hmac;
+       desc->flags = 0;
 
-       err = crypto_hash_digest(&desc, sg, slen, ctx->cksum);
+       err = crypto_shash_digest(desc, sigkeyconstant, slen, ctx->cksum);
+       kzfree(desc);
        if (err)
                goto out_err_free_hmac;
        /*
-        * allocate hash, and blkciphers for data and seqnum encryption
+        * allocate hash, and skciphers for data and seqnum encryption
         */
-       ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
-                                         CRYPTO_ALG_ASYNC);
+       ctx->enc = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
+                                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(ctx->enc)) {
                err = PTR_ERR(ctx->enc);
                goto out_err_free_hmac;
        }
 
-       ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0,
-                                         CRYPTO_ALG_ASYNC);
+       ctx->seq = crypto_alloc_skcipher(ctx->gk5e->encrypt_name, 0,
+                                        CRYPTO_ALG_ASYNC);
        if (IS_ERR(ctx->seq)) {
-               crypto_free_blkcipher(ctx->enc);
+               crypto_free_skcipher(ctx->enc);
                err = PTR_ERR(ctx->seq);
                goto out_err_free_hmac;
        }
@@ -486,7 +489,7 @@ context_derive_keys_rc4(struct krb5_ctx *ctx)
        err = 0;
 
 out_err_free_hmac:
-       crypto_free_hash(hmac);
+       crypto_free_shash(hmac);
 out_err:
        dprintk("RPC:       %s: returning %d\n", __func__, err);
        return err;
@@ -588,7 +591,7 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
                        context_v2_alloc_cipher(ctx, "cbc(aes)",
                                                ctx->acceptor_seal);
                if (ctx->acceptor_enc_aux == NULL) {
-                       crypto_free_blkcipher(ctx->initiator_enc_aux);
+                       crypto_free_skcipher(ctx->initiator_enc_aux);
                        goto out_free_acceptor_enc;
                }
        }
@@ -596,9 +599,9 @@ context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
        return 0;
 
 out_free_acceptor_enc:
-       crypto_free_blkcipher(ctx->acceptor_enc);
+       crypto_free_skcipher(ctx->acceptor_enc);
 out_free_initiator_enc:
-       crypto_free_blkcipher(ctx->initiator_enc);
+       crypto_free_skcipher(ctx->initiator_enc);
 out_err:
        return -EINVAL;
 }
@@ -710,12 +713,12 @@ static void
 gss_delete_sec_context_kerberos(void *internal_ctx) {
        struct krb5_ctx *kctx = internal_ctx;
 
-       crypto_free_blkcipher(kctx->seq);
-       crypto_free_blkcipher(kctx->enc);
-       crypto_free_blkcipher(kctx->acceptor_enc);
-       crypto_free_blkcipher(kctx->initiator_enc);
-       crypto_free_blkcipher(kctx->acceptor_enc_aux);
-       crypto_free_blkcipher(kctx->initiator_enc_aux);
+       crypto_free_skcipher(kctx->seq);
+       crypto_free_skcipher(kctx->enc);
+       crypto_free_skcipher(kctx->acceptor_enc);
+       crypto_free_skcipher(kctx->initiator_enc);
+       crypto_free_skcipher(kctx->acceptor_enc_aux);
+       crypto_free_skcipher(kctx->initiator_enc_aux);
        kfree(kctx->mech_used.data);
        kfree(kctx);
 }
index 20d55c793eb657203e40b90779ceb361e2391c32..c8b9082f4a9d67eb4832930c8d62388a18d94b3d 100644 (file)
@@ -31,9 +31,9 @@
  * PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/types.h>
 #include <linux/sunrpc/gss_krb5.h>
-#include <linux/crypto.h>
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 # define RPCDBG_FACILITY        RPCDBG_AUTH
@@ -43,13 +43,13 @@ static s32
 krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
                      unsigned char *cksum, unsigned char *buf)
 {
-       struct crypto_blkcipher *cipher;
+       struct crypto_skcipher *cipher;
        unsigned char plain[8];
        s32 code;
 
        dprintk("RPC:       %s:\n", __func__);
-       cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                       CRYPTO_ALG_ASYNC);
+       cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                      CRYPTO_ALG_ASYNC);
        if (IS_ERR(cipher))
                return PTR_ERR(cipher);
 
@@ -68,12 +68,12 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
 
        code = krb5_encrypt(cipher, cksum, plain, buf, 8);
 out:
-       crypto_free_blkcipher(cipher);
+       crypto_free_skcipher(cipher);
        return code;
 }
 s32
 krb5_make_seq_num(struct krb5_ctx *kctx,
-               struct crypto_blkcipher *key,
+               struct crypto_skcipher *key,
                int direction,
                u32 seqnum,
                unsigned char *cksum, unsigned char *buf)
@@ -101,13 +101,13 @@ static s32
 krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
                     unsigned char *buf, int *direction, s32 *seqnum)
 {
-       struct crypto_blkcipher *cipher;
+       struct crypto_skcipher *cipher;
        unsigned char plain[8];
        s32 code;
 
        dprintk("RPC:       %s:\n", __func__);
-       cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                       CRYPTO_ALG_ASYNC);
+       cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                      CRYPTO_ALG_ASYNC);
        if (IS_ERR(cipher))
                return PTR_ERR(cipher);
 
@@ -130,7 +130,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
        *seqnum = ((plain[0] << 24) | (plain[1] << 16) |
                                        (plain[2] << 8) | (plain[3]));
 out:
-       crypto_free_blkcipher(cipher);
+       crypto_free_skcipher(cipher);
        return code;
 }
 
@@ -142,7 +142,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
 {
        s32 code;
        unsigned char plain[8];
-       struct crypto_blkcipher *key = kctx->seq;
+       struct crypto_skcipher *key = kctx->seq;
 
        dprintk("RPC:       krb5_get_seq_num:\n");
 
index ca7e92a32f84920036c124732d71e029a9f0ce4c..765088e4ad84d073b3587917942b9059717875b3 100644 (file)
  * SUCH DAMAGES.
  */
 
+#include <crypto/skcipher.h>
 #include <linux/types.h>
 #include <linux/jiffies.h>
 #include <linux/sunrpc/gss_krb5.h>
 #include <linux/random.h>
 #include <linux/pagemap.h>
-#include <linux/crypto.h>
 
 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
 # define RPCDBG_FACILITY       RPCDBG_AUTH
@@ -174,7 +174,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 
        now = get_seconds();
 
-       blocksize = crypto_blkcipher_blocksize(kctx->enc);
+       blocksize = crypto_skcipher_blocksize(kctx->enc);
        gss_krb5_add_padding(buf, offset, blocksize);
        BUG_ON((buf->len - offset) % blocksize);
        plainlen = conflen + buf->len - offset;
@@ -239,10 +239,10 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
                return GSS_S_FAILURE;
 
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
-               struct crypto_blkcipher *cipher;
+               struct crypto_skcipher *cipher;
                int err;
-               cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                               CRYPTO_ALG_ASYNC);
+               cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                              CRYPTO_ALG_ASYNC);
                if (IS_ERR(cipher))
                        return GSS_S_FAILURE;
 
@@ -250,7 +250,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
 
                err = gss_encrypt_xdr_buf(cipher, buf,
                                          offset + headlen - conflen, pages);
-               crypto_free_blkcipher(cipher);
+               crypto_free_skcipher(cipher);
                if (err)
                        return GSS_S_FAILURE;
        } else {
@@ -327,18 +327,18 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
                return GSS_S_BAD_SIG;
 
        if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
-               struct crypto_blkcipher *cipher;
+               struct crypto_skcipher *cipher;
                int err;
 
-               cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
-                                               CRYPTO_ALG_ASYNC);
+               cipher = crypto_alloc_skcipher(kctx->gk5e->encrypt_name, 0,
+                                              CRYPTO_ALG_ASYNC);
                if (IS_ERR(cipher))
                        return GSS_S_FAILURE;
 
                krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
 
                err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
-               crypto_free_blkcipher(cipher);
+               crypto_free_skcipher(cipher);
                if (err)
                        return GSS_S_DEFECTIVE_TOKEN;
        } else {
@@ -371,7 +371,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
        /* Copy the data back to the right position.  XXX: Would probably be
         * better to copy and encrypt at the same time. */
 
-       blocksize = crypto_blkcipher_blocksize(kctx->enc);
+       blocksize = crypto_skcipher_blocksize(kctx->enc);
        data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
                                        conflen;
        orig_start = buf->head[0].iov_base + offset;
@@ -473,7 +473,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
        *ptr++ = 0xff;
        be16ptr = (__be16 *)ptr;
 
-       blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
+       blocksize = crypto_skcipher_blocksize(kctx->acceptor_enc);
        *be16ptr++ = 0;
        /* "inner" token header always uses 0 for RRC */
        *be16ptr++ = 0;
index ebc661d3b6e37edb55d7c0a719ed9937a9f27a0d..47f7da58a7f0e93e3ffd632396a5a8db65c7f7c3 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/list.h>
 #include <linux/workqueue.h>
 #include <linux/if_vlan.h>
+#include <linux/rtnetlink.h>
 #include <net/ip_fib.h>
 #include <net/switchdev.h>
 
@@ -567,7 +568,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
 }
 EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
 
-static DEFINE_MUTEX(switchdev_mutex);
 static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
 
 /**
@@ -582,9 +582,9 @@ int register_switchdev_notifier(struct notifier_block *nb)
 {
        int err;
 
-       mutex_lock(&switchdev_mutex);
+       rtnl_lock();
        err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
-       mutex_unlock(&switchdev_mutex);
+       rtnl_unlock();
        return err;
 }
 EXPORT_SYMBOL_GPL(register_switchdev_notifier);
@@ -600,9 +600,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb)
 {
        int err;
 
-       mutex_lock(&switchdev_mutex);
+       rtnl_lock();
        err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
-       mutex_unlock(&switchdev_mutex);
+       rtnl_unlock();
        return err;
 }
 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
@@ -616,16 +616,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
  *     Call all network notifier blocks. This should be called by driver
  *     when it needs to propagate hardware event.
  *     Return values are same as for atomic_notifier_call_chain().
+ *     rtnl_lock must be held.
  */
 int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
                             struct switchdev_notifier_info *info)
 {
        int err;
 
+       ASSERT_RTNL();
+
        info->dev = dev;
-       mutex_lock(&switchdev_mutex);
        err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
-       mutex_unlock(&switchdev_mutex);
        return err;
 }
 EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
index 0c2944fb9ae0d34ca8e153d9412c5896b47ffacf..6f4a6d9b014989f29ff378d3e84cb60fd809aabe 100644 (file)
@@ -123,7 +123,6 @@ struct tipc_stats {
 struct tipc_link {
        u32 addr;
        char name[TIPC_MAX_LINK_NAME];
-       struct tipc_media_addr *media_addr;
        struct net *net;
 
        /* Management and link supervision data */
@@ -1261,26 +1260,6 @@ drop:
        return rc;
 }
 
-/*
- * Send protocol message to the other endpoint.
- */
-static void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ,
-                                int probe_msg, u32 gap, u32 tolerance,
-                                u32 priority)
-{
-       struct sk_buff *skb = NULL;
-       struct sk_buff_head xmitq;
-
-       __skb_queue_head_init(&xmitq);
-       tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
-                                 tolerance, priority, &xmitq);
-       skb = __skb_dequeue(&xmitq);
-       if (!skb)
-               return;
-       tipc_bearer_xmit_skb(l->net, l->bearer_id, skb, l->media_addr);
-       l->rcv_unacked = 0;
-}
-
 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
                                      u16 rcvgap, int tolerance, int priority,
                                      struct sk_buff_head *xmitq)
@@ -1479,6 +1458,12 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
                if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
                        l->tolerance = peers_tol;
 
+               if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI,
+                                          TIPC_MAX_LINK_PRI)) {
+                       l->priority = peers_prio;
+                       rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
+               }
+
                l->silent_intv_cnt = 0;
                l->stats.recv_states++;
                if (msg_probe(hdr))
@@ -2021,16 +2006,18 @@ msg_full:
        return -EMSGSIZE;
 }
 
-void tipc_link_set_tolerance(struct tipc_link *l, u32 tol)
+void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
+                            struct sk_buff_head *xmitq)
 {
        l->tolerance = tol;
-       tipc_link_proto_xmit(l, STATE_MSG, 0, 0, tol, 0);
+       tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
 }
 
-void tipc_link_set_prio(struct tipc_link *l, u32 prio)
+void tipc_link_set_prio(struct tipc_link *l, u32 prio,
+                       struct sk_buff_head *xmitq)
 {
        l->priority = prio;
-       tipc_link_proto_xmit(l, STATE_MSG, 0, 0, 0, prio);
+       tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
 }
 
 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
index b2ae0f4276afd72c58b0f08101deeef1a39dc772..b4ee9d6e181d2c2b2a17eefa865dae9c7071f543 100644 (file)
@@ -112,8 +112,10 @@ char tipc_link_plane(struct tipc_link *l);
 int tipc_link_prio(struct tipc_link *l);
 int tipc_link_window(struct tipc_link *l);
 unsigned long tipc_link_tolerance(struct tipc_link *l);
-void tipc_link_set_tolerance(struct tipc_link *l, u32 tol);
-void tipc_link_set_prio(struct tipc_link *l, u32 prio);
+void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
+                            struct sk_buff_head *xmitq);
+void tipc_link_set_prio(struct tipc_link *l, u32 prio,
+                       struct sk_buff_head *xmitq);
 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit);
 void tipc_link_set_queue_limits(struct tipc_link *l, u32 window);
 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
index 91fce70291a898cdbfd439d3eedceeb249bb0622..777b979b84634fbd98aa3ed49388c33e0a9472c7 100644 (file)
@@ -418,6 +418,9 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
                                   struct tipc_subscription *s)
 {
        struct sub_seq *sseq = nseq->sseqs;
+       struct tipc_name_seq ns;
+
+       tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
 
        list_add(&s->nameseq_list, &nseq->subscriptions);
 
@@ -425,7 +428,7 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
                return;
 
        while (sseq != &nseq->sseqs[nseq->first_free]) {
-               if (tipc_subscrp_check_overlap(s, sseq->lower, sseq->upper)) {
+               if (tipc_subscrp_check_overlap(&ns, sseq->lower, sseq->upper)) {
                        struct publication *crs;
                        struct name_info *info = sseq->info;
                        int must_report = 1;
@@ -722,9 +725,10 @@ int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
 void tipc_nametbl_subscribe(struct tipc_subscription *s)
 {
        struct tipc_net *tn = net_generic(s->net, tipc_net_id);
-       u32 type = s->seq.type;
+       u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
        int index = hash(type);
        struct name_seq *seq;
+       struct tipc_name_seq ns;
 
        spin_lock_bh(&tn->nametbl_lock);
        seq = nametbl_find_seq(s->net, type);
@@ -735,8 +739,9 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
                tipc_nameseq_subscribe(seq, s);
                spin_unlock_bh(&seq->lock);
        } else {
+               tipc_subscrp_convert_seq(&s->evt.s.seq, s->swap, &ns);
                pr_warn("Failed to create subscription for {%u,%u,%u}\n",
-                       s->seq.type, s->seq.lower, s->seq.upper);
+                       ns.type, ns.lower, ns.upper);
        }
        spin_unlock_bh(&tn->nametbl_lock);
 }
@@ -748,9 +753,10 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
 {
        struct tipc_net *tn = net_generic(s->net, tipc_net_id);
        struct name_seq *seq;
+       u32 type = tipc_subscrp_convert_seq_type(s->evt.s.seq.type, s->swap);
 
        spin_lock_bh(&tn->nametbl_lock);
-       seq = nametbl_find_seq(s->net, s->seq.type);
+       seq = nametbl_find_seq(s->net, type);
        if (seq != NULL) {
                spin_lock_bh(&seq->lock);
                list_del_init(&s->nameseq_list);
index fa97d9649a2851f1e6d5c8a1f39f2e4f7603e61b..f8a8255a7182905ffa3af8e83bf5fd716db38400 100644 (file)
@@ -1637,9 +1637,12 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
        char *name;
        struct tipc_link *link;
        struct tipc_node *node;
+       struct sk_buff_head xmitq;
        struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
        struct net *net = sock_net(skb->sk);
 
+       __skb_queue_head_init(&xmitq);
+
        if (!info->attrs[TIPC_NLA_LINK])
                return -EINVAL;
 
@@ -1683,13 +1686,13 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
                        u32 tol;
 
                        tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
-                       tipc_link_set_tolerance(link, tol);
+                       tipc_link_set_tolerance(link, tol, &xmitq);
                }
                if (props[TIPC_NLA_PROP_PRIO]) {
                        u32 prio;
 
                        prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
-                       tipc_link_set_prio(link, prio);
+                       tipc_link_set_prio(link, prio, &xmitq);
                }
                if (props[TIPC_NLA_PROP_WIN]) {
                        u32 win;
@@ -1701,7 +1704,7 @@ int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
 
 out:
        tipc_node_read_unlock(node);
-
+       tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
        return res;
 }
 
index 922e04a43396db1f19fa6f1721a29840fb9b45d8..2446bfbaa309284e9d23dd590650e1576ec6b072 100644 (file)
@@ -571,13 +571,13 @@ static void tipc_work_stop(struct tipc_server *s)
 
 static int tipc_work_start(struct tipc_server *s)
 {
-       s->rcv_wq = alloc_workqueue("tipc_rcv", WQ_UNBOUND, 1);
+       s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0);
        if (!s->rcv_wq) {
                pr_err("can't start tipc receive workqueue\n");
                return -ENOMEM;
        }
 
-       s->send_wq = alloc_workqueue("tipc_send", WQ_UNBOUND, 1);
+       s->send_wq = alloc_ordered_workqueue("tipc_send", 0);
        if (!s->send_wq) {
                pr_err("can't start tipc send workqueue\n");
                destroy_workqueue(s->rcv_wq);
index 350cca33ee0a64b08ce6135aa130011263eba136..22963cafd5ede27d59ecb772d1cd5c4257cacb98 100644 (file)
@@ -92,25 +92,42 @@ static void tipc_subscrp_send_event(struct tipc_subscription *sub,
  *
  * Returns 1 if there is overlap, otherwise 0.
  */
-int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
                               u32 found_upper)
 {
-       if (found_lower < sub->seq.lower)
-               found_lower = sub->seq.lower;
-       if (found_upper > sub->seq.upper)
-               found_upper = sub->seq.upper;
+       if (found_lower < seq->lower)
+               found_lower = seq->lower;
+       if (found_upper > seq->upper)
+               found_upper = seq->upper;
        if (found_lower > found_upper)
                return 0;
        return 1;
 }
 
+u32 tipc_subscrp_convert_seq_type(u32 type, int swap)
+{
+       return htohl(type, swap);
+}
+
+void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
+                             struct tipc_name_seq *out)
+{
+       out->type = htohl(in->type, swap);
+       out->lower = htohl(in->lower, swap);
+       out->upper = htohl(in->upper, swap);
+}
+
 void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
                                 u32 found_upper, u32 event, u32 port_ref,
                                 u32 node, int must)
 {
-       if (!tipc_subscrp_check_overlap(sub, found_lower, found_upper))
+       struct tipc_name_seq seq;
+
+       tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
+       if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
                return;
-       if (!must && !(sub->filter & TIPC_SUB_PORTS))
+       if (!must &&
+           !(htohl(sub->evt.s.filter, sub->swap) & TIPC_SUB_PORTS))
                return;
 
        tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
@@ -171,12 +188,14 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
 static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
 {
        struct tipc_subscription *sub, *temp;
+       u32 timeout;
 
        spin_lock_bh(&subscriber->lock);
        /* Destroy any existing subscriptions for subscriber */
        list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
                                 subscrp_list) {
-               if (del_timer(&sub->timer)) {
+               timeout = htohl(sub->evt.s.timeout, sub->swap);
+               if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
                        tipc_subscrp_delete(sub);
                        tipc_subscrb_put(subscriber);
                }
@@ -200,13 +219,16 @@ static void tipc_subscrp_cancel(struct tipc_subscr *s,
                                struct tipc_subscriber *subscriber)
 {
        struct tipc_subscription *sub, *temp;
+       u32 timeout;
 
        spin_lock_bh(&subscriber->lock);
        /* Find first matching subscription, exit if not found */
        list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
                                 subscrp_list) {
                if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
-                       if (del_timer(&sub->timer)) {
+                       timeout = htohl(sub->evt.s.timeout, sub->swap);
+                       if ((timeout == TIPC_WAIT_FOREVER) ||
+                           del_timer(&sub->timer)) {
                                tipc_subscrp_delete(sub);
                                tipc_subscrb_put(subscriber);
                        }
@@ -216,66 +238,67 @@ static void tipc_subscrp_cancel(struct tipc_subscr *s,
        spin_unlock_bh(&subscriber->lock);
 }
 
-static int tipc_subscrp_create(struct net *net, struct tipc_subscr *s,
-                              struct tipc_subscriber *subscriber,
-                              struct tipc_subscription **sub_p)
+static struct tipc_subscription *tipc_subscrp_create(struct net *net,
+                                                    struct tipc_subscr *s,
+                                                    int swap)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
        struct tipc_subscription *sub;
-       int swap;
-
-       /* Determine subscriber's endianness */
-       swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
-
-       /* Detect & process a subscription cancellation request */
-       if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
-               s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
-               tipc_subscrp_cancel(s, subscriber);
-               return 0;
-       }
+       u32 filter = htohl(s->filter, swap);
 
        /* Refuse subscription if global limit exceeded */
        if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
                pr_warn("Subscription rejected, limit reached (%u)\n",
                        TIPC_MAX_SUBSCRIPTIONS);
-               return -EINVAL;
+               return NULL;
        }
 
        /* Allocate subscription object */
        sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
        if (!sub) {
                pr_warn("Subscription rejected, no memory\n");
-               return -ENOMEM;
+               return NULL;
        }
 
        /* Initialize subscription object */
        sub->net = net;
-       sub->seq.type = htohl(s->seq.type, swap);
-       sub->seq.lower = htohl(s->seq.lower, swap);
-       sub->seq.upper = htohl(s->seq.upper, swap);
-       sub->timeout = msecs_to_jiffies(htohl(s->timeout, swap));
-       sub->filter = htohl(s->filter, swap);
-       if ((!(sub->filter & TIPC_SUB_PORTS) ==
-            !(sub->filter & TIPC_SUB_SERVICE)) ||
-           (sub->seq.lower > sub->seq.upper)) {
+       if (((filter & TIPC_SUB_PORTS) && (filter & TIPC_SUB_SERVICE)) ||
+           (htohl(s->seq.lower, swap) > htohl(s->seq.upper, swap))) {
                pr_warn("Subscription rejected, illegal request\n");
                kfree(sub);
-               return -EINVAL;
+               return NULL;
        }
-       spin_lock_bh(&subscriber->lock);
-       list_add(&sub->subscrp_list, &subscriber->subscrp_list);
-       spin_unlock_bh(&subscriber->lock);
-       sub->subscriber = subscriber;
+
        sub->swap = swap;
        memcpy(&sub->evt.s, s, sizeof(*s));
        atomic_inc(&tn->subscription_count);
+       return sub;
+}
+
+static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
+                                  struct tipc_subscriber *subscriber, int swap)
+{
+       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_subscription *sub = NULL;
+       u32 timeout;
+
+       sub = tipc_subscrp_create(net, s, swap);
+       if (!sub)
+               return tipc_conn_terminate(tn->topsrv, subscriber->conid);
+
+       spin_lock_bh(&subscriber->lock);
+       list_add(&sub->subscrp_list, &subscriber->subscrp_list);
+       tipc_subscrb_get(subscriber);
+       sub->subscriber = subscriber;
+       tipc_nametbl_subscribe(sub);
+       spin_unlock_bh(&subscriber->lock);
+
+       timeout = htohl(sub->evt.s.timeout, swap);
+       if (timeout == TIPC_WAIT_FOREVER)
+               return;
+
        setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
-       if (sub->timeout != TIPC_WAIT_FOREVER)
-               sub->timeout += jiffies;
-       if (!mod_timer(&sub->timer, sub->timeout))
-               tipc_subscrb_get(subscriber);
-       *sub_p = sub;
-       return 0;
+       mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
 }
 
 /* Handle one termination request for the subscriber */
@@ -290,14 +313,20 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
                                void *buf, size_t len)
 {
        struct tipc_subscriber *subscriber = usr_data;
-       struct tipc_subscription *sub = NULL;
-       struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_subscr *s = (struct tipc_subscr *)buf;
+       int swap;
+
+       /* Determine subscriber's endianness */
+       swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE |
+                             TIPC_SUB_CANCEL));
+
+       /* Detect & process a subscription cancellation request */
+       if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
+               s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
+               return tipc_subscrp_cancel(s, subscriber);
+       }
 
-       tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
-       if (sub)
-               tipc_nametbl_subscribe(sub);
-       else
-               tipc_conn_terminate(tn->topsrv, subscriber->conid);
+       tipc_subscrp_subscribe(net, s, subscriber, swap);
 }
 
 /* Handle one request to establish a new subscriber */
index 92ee18cc5fe6ef5567a4e52a72e66748ed4b395a..be60103082c923c0fd768f52c081af38eb42491b 100644 (file)
@@ -50,21 +50,15 @@ struct tipc_subscriber;
  * @subscriber: pointer to its subscriber
  * @seq: name sequence associated with subscription
  * @net: point to network namespace
- * @timeout: duration of subscription (in ms)
- * @filter: event filtering to be done for subscription
  * @timer: timer governing subscription duration (optional)
  * @nameseq_list: adjacent subscriptions in name sequence's subscription list
  * @subscrp_list: adjacent subscriptions in subscriber's subscription list
- * @server_ref: object reference of server port associated with subscription
  * @swap: indicates if subscriber uses opposite endianness in its messages
  * @evt: template for events generated by subscription
  */
 struct tipc_subscription {
        struct tipc_subscriber *subscriber;
-       struct tipc_name_seq seq;
        struct net *net;
-       unsigned long timeout;
-       u32 filter;
        struct timer_list timer;
        struct list_head nameseq_list;
        struct list_head subscrp_list;
@@ -72,11 +66,14 @@ struct tipc_subscription {
        struct tipc_event evt;
 };
 
-int tipc_subscrp_check_overlap(struct tipc_subscription *sub, u32 found_lower,
+int tipc_subscrp_check_overlap(struct tipc_name_seq *seq, u32 found_lower,
                               u32 found_upper);
 void tipc_subscrp_report_overlap(struct tipc_subscription *sub,
                                 u32 found_lower, u32 found_upper, u32 event,
                                 u32 port_ref, u32 node, int must);
+void tipc_subscrp_convert_seq(struct tipc_name_seq *in, int swap,
+                             struct tipc_name_seq *out);
+u32 tipc_subscrp_convert_seq_type(u32 type, int swap);
 int tipc_topsrv_start(struct net *net);
 void tipc_topsrv_stop(struct net *net);
 
index c5bf5ef2bf89476b6d7ab766b8591e6af2e869fd..a6d6654697779060ecdeeb34c75fcfe7e618f433 100644 (file)
@@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
        UNIXCB(skb).fp = NULL;
 
        for (i = scm->fp->count-1; i >= 0; i--)
-               unix_notinflight(scm->fp->fp[i]);
+               unix_notinflight(scm->fp->user, scm->fp->fp[i]);
 }
 
 static void unix_destruct_scm(struct sk_buff *skb)
@@ -1534,7 +1534,6 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
 {
        int i;
        unsigned char max_level = 0;
-       int unix_sock_count = 0;
 
        if (too_many_unix_fds(current))
                return -ETOOMANYREFS;
@@ -1542,11 +1541,9 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
        for (i = scm->fp->count - 1; i >= 0; i--) {
                struct sock *sk = unix_get_socket(scm->fp->fp[i]);
 
-               if (sk) {
-                       unix_sock_count++;
+               if (sk)
                        max_level = max(max_level,
                                        unix_sk(sk)->recursion_level);
-               }
        }
        if (unlikely(max_level > MAX_RECURSION_LEVEL))
                return -ETOOMANYREFS;
@@ -1561,7 +1558,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
                return -ENOMEM;
 
        for (i = scm->fp->count - 1; i >= 0; i--)
-               unix_inflight(scm->fp->fp[i]);
+               unix_inflight(scm->fp->user, scm->fp->fp[i]);
        return max_level;
 }
 
@@ -2339,6 +2336,7 @@ again:
 
                        if (signal_pending(current)) {
                                err = sock_intr_errno(timeo);
+                               scm_destroy(&scm);
                                goto out;
                        }
 
index 8fcdc2283af50c5caecd409b79cae6028ca2c836..6a0d48525fcf9a71f54bb43495b200b300f5341e 100644 (file)
@@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
  * descriptor if it is for an AF_UNIX socket.
  */
 
-void unix_inflight(struct file *fp)
+void unix_inflight(struct user_struct *user, struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
 
@@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
                }
                unix_tot_inflight++;
        }
-       fp->f_cred->user->unix_inflight++;
+       user->unix_inflight++;
        spin_unlock(&unix_gc_lock);
 }
 
-void unix_notinflight(struct file *fp)
+void unix_notinflight(struct user_struct *user, struct file *fp)
 {
        struct sock *s = unix_get_socket(fp);
 
@@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
                        list_del_init(&u->link);
                unix_tot_inflight--;
        }
-       fp->f_cred->user->unix_inflight--;
+       user->unix_inflight--;
        spin_unlock(&unix_gc_lock);
 }
 
index da72ed32f14385c3e218cda196e5b4c3511c896e..6c606120abfed96fe7fe4cdb23beb204e5ea109a 100644 (file)
@@ -50,8 +50,8 @@ config CFG80211_DEVELOPER_WARNINGS
        default n
        help
          This option enables some additional warnings that help
-         cfg80211 developers and driver developers, but that can
-         trigger due to races with userspace.
+         cfg80211 developers and driver developers, but beware that
+         they can also trigger due to races with userspace.
 
          For example, when a driver reports that it was disconnected
          from the AP, but the user disconnects manually at the same
@@ -61,19 +61,6 @@ config CFG80211_DEVELOPER_WARNINGS
          on it (or mac80211).
 
 
-config CFG80211_REG_DEBUG
-       bool "cfg80211 regulatory debugging"
-       depends on CFG80211
-       default n
-       ---help---
-         You can enable this if you want to debug regulatory changes.
-         For more information on cfg80211 regulatory refer to the wireless
-         wiki:
-
-         http://wireless.kernel.org/en/developers/Regulatory
-
-         If unsure, say N.
-
 config CFG80211_CERTIFICATION_ONUS
        bool "cfg80211 certification onus"
        depends on CFG80211 && EXPERT
@@ -123,7 +110,7 @@ config CFG80211_REG_RELAX_NO_IR
         interface which associated to an AP which userspace assumes or confirms
         to be an authorized master, i.e., with radar detection support and DFS
         capabilities. However, note that in order to not create daisy chain
-        scenarios, this relaxation is not allowed in cases that the BSS client
+        scenarios, this relaxation is not allowed in cases where the BSS client
         is associated to P2P GO and in addition the P2P GO instantiated on
         a channel due to this relaxation should not allow connection from
         non P2P clients.
@@ -148,7 +135,7 @@ config CFG80211_DEBUGFS
        depends on CFG80211
        depends on DEBUG_FS
        ---help---
-         You can enable this if you want to debugfs entries for cfg80211.
+         You can enable this if you want debugfs entries for cfg80211.
 
          If unsure, say N.
 
@@ -159,7 +146,7 @@ config CFG80211_INTERNAL_REGDB
        ---help---
          This option generates an internal data structure representing
          the wireless regulatory rules described in net/wireless/db.txt
-         and includes code to query that database.  This is an alternative
+         and includes code to query that database. This is an alternative
          to using CRDA for defining regulatory rules for the kernel.
 
          Using this option requires some parsing of the db.txt at build time,
@@ -172,7 +159,7 @@ config CFG80211_INTERNAL_REGDB
 
          http://wireless.kernel.org/en/developers/Regulatory
 
-         Most distributions have a CRDA package.  So if unsure, say N.
+         Most distributions have a CRDA package. So if unsure, say N.
 
 config CFG80211_CRDA_SUPPORT
        bool "support CRDA" if CFG80211_INTERNAL_REGDB
index b0915515640efed1ff4795103c0572aba48c38f4..9f1c4aa851efdf55ab81044b10ef58b7bd73ecf6 100644 (file)
@@ -352,6 +352,16 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
        WARN_ON(ops->add_station && !ops->del_station);
        WARN_ON(ops->add_mpath && !ops->del_mpath);
        WARN_ON(ops->join_mesh && !ops->leave_mesh);
+       WARN_ON(ops->start_p2p_device && !ops->stop_p2p_device);
+       WARN_ON(ops->start_ap && !ops->stop_ap);
+       WARN_ON(ops->join_ocb && !ops->leave_ocb);
+       WARN_ON(ops->suspend && !ops->resume);
+       WARN_ON(ops->sched_scan_start && !ops->sched_scan_stop);
+       WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
+       WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
+       WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
+       WARN_ON(ops->set_tx_power && !ops->get_tx_power);
+       WARN_ON(ops->set_antenna && !ops->get_antenna);
 
        alloc_size = sizeof(*rdev) + sizeof_priv;
 
@@ -1147,6 +1157,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
                return NOTIFY_DONE;
        }
 
+       wireless_nlevent_flush();
+
        return NOTIFY_OK;
 }
 
index 3cd8195392416c1ee7f89884c3e0045ede182d41..71447cf863067ca7be13b7c0b5011a44a3645b49 100644 (file)
@@ -29,7 +29,8 @@
 #include <linux/ieee80211.h>
 #include <net/iw_handler.h>
 
-#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/crc32.h>
 
 #include <net/lib80211.h>
@@ -63,10 +64,10 @@ struct lib80211_tkip_data {
 
        int key_idx;
 
-       struct crypto_blkcipher *rx_tfm_arc4;
-       struct crypto_hash *rx_tfm_michael;
-       struct crypto_blkcipher *tx_tfm_arc4;
-       struct crypto_hash *tx_tfm_michael;
+       struct crypto_skcipher *rx_tfm_arc4;
+       struct crypto_ahash *rx_tfm_michael;
+       struct crypto_skcipher *tx_tfm_arc4;
+       struct crypto_ahash *tx_tfm_michael;
 
        /* scratch buffers for virt_to_page() (crypto API) */
        u8 rx_hdr[16], tx_hdr[16];
@@ -98,29 +99,29 @@ static void *lib80211_tkip_init(int key_idx)
 
        priv->key_idx = key_idx;
 
-       priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
-                                               CRYPTO_ALG_ASYNC);
+       priv->tx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_arc4)) {
                priv->tx_tfm_arc4 = NULL;
                goto fail;
        }
 
-       priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
-                                                CRYPTO_ALG_ASYNC);
+       priv->tx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm_michael)) {
                priv->tx_tfm_michael = NULL;
                goto fail;
        }
 
-       priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
-                                               CRYPTO_ALG_ASYNC);
+       priv->rx_tfm_arc4 = crypto_alloc_skcipher("ecb(arc4)", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_arc4)) {
                priv->rx_tfm_arc4 = NULL;
                goto fail;
        }
 
-       priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0,
-                                                CRYPTO_ALG_ASYNC);
+       priv->rx_tfm_michael = crypto_alloc_ahash("michael_mic", 0,
+                                                 CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm_michael)) {
                priv->rx_tfm_michael = NULL;
                goto fail;
@@ -130,14 +131,10 @@ static void *lib80211_tkip_init(int key_idx)
 
       fail:
        if (priv) {
-               if (priv->tx_tfm_michael)
-                       crypto_free_hash(priv->tx_tfm_michael);
-               if (priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(priv->tx_tfm_arc4);
-               if (priv->rx_tfm_michael)
-                       crypto_free_hash(priv->rx_tfm_michael);
-               if (priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(priv->rx_tfm_arc4);
+               crypto_free_ahash(priv->tx_tfm_michael);
+               crypto_free_skcipher(priv->tx_tfm_arc4);
+               crypto_free_ahash(priv->rx_tfm_michael);
+               crypto_free_skcipher(priv->rx_tfm_arc4);
                kfree(priv);
        }
 
@@ -148,14 +145,10 @@ static void lib80211_tkip_deinit(void *priv)
 {
        struct lib80211_tkip_data *_priv = priv;
        if (_priv) {
-               if (_priv->tx_tfm_michael)
-                       crypto_free_hash(_priv->tx_tfm_michael);
-               if (_priv->tx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->tx_tfm_arc4);
-               if (_priv->rx_tfm_michael)
-                       crypto_free_hash(_priv->rx_tfm_michael);
-               if (_priv->rx_tfm_arc4)
-                       crypto_free_blkcipher(_priv->rx_tfm_arc4);
+               crypto_free_ahash(_priv->tx_tfm_michael);
+               crypto_free_skcipher(_priv->tx_tfm_arc4);
+               crypto_free_ahash(_priv->rx_tfm_michael);
+               crypto_free_skcipher(_priv->rx_tfm_arc4);
        }
        kfree(priv);
 }
@@ -353,11 +346,12 @@ static int lib80211_tkip_hdr(struct sk_buff *skb, int hdr_len,
 static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct lib80211_tkip_data *tkey = priv;
-       struct blkcipher_desc desc = { .tfm = tkey->tx_tfm_arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, tkey->tx_tfm_arc4);
        int len;
        u8 rc4key[16], *pos, *icv;
        u32 crc;
        struct scatterlist sg;
+       int err;
 
        if (tkey->flags & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) {
                struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -382,9 +376,14 @@ static int lib80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;
 
-       crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
+       crypto_skcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
        sg_init_one(&sg, pos, len + 4);
-       return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+       skcipher_request_set_tfm(req, tkey->tx_tfm_arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
+       return err;
 }
 
 /*
@@ -403,7 +402,7 @@ static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n,
 static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct lib80211_tkip_data *tkey = priv;
-       struct blkcipher_desc desc = { .tfm = tkey->rx_tfm_arc4 };
+       SKCIPHER_REQUEST_ON_STACK(req, tkey->rx_tfm_arc4);
        u8 rc4key[16];
        u8 keyidx, *pos;
        u32 iv32;
@@ -413,6 +412,7 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        u32 crc;
        struct scatterlist sg;
        int plen;
+       int err;
 
        hdr = (struct ieee80211_hdr *)skb->data;
 
@@ -465,9 +465,14 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 
        plen = skb->len - hdr_len - 12;
 
-       crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
+       crypto_skcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
        sg_init_one(&sg, pos, plen + 4);
-       if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) {
+       skcipher_request_set_tfm(req, tkey->rx_tfm_arc4);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+       err = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
+       if (err) {
                net_dbg_ratelimited("TKIP: failed to decrypt received packet from %pM\n",
                                    hdr->addr2);
                return -7;
@@ -505,11 +510,12 @@ static int lib80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        return keyidx;
 }
 
-static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
+static int michael_mic(struct crypto_ahash *tfm_michael, u8 * key, u8 * hdr,
                       u8 * data, size_t data_len, u8 * mic)
 {
-       struct hash_desc desc;
+       AHASH_REQUEST_ON_STACK(req, tfm_michael);
        struct scatterlist sg[2];
+       int err;
 
        if (tfm_michael == NULL) {
                pr_warn("%s(): tfm_michael == NULL\n", __func__);
@@ -519,12 +525,15 @@ static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr,
        sg_set_buf(&sg[0], hdr, 16);
        sg_set_buf(&sg[1], data, data_len);
 
-       if (crypto_hash_setkey(tfm_michael, key, 8))
+       if (crypto_ahash_setkey(tfm_michael, key, 8))
                return -1;
 
-       desc.tfm = tfm_michael;
-       desc.flags = 0;
-       return crypto_hash_digest(&desc, sg, data_len + 16, mic);
+       ahash_request_set_tfm(req, tfm_michael);
+       ahash_request_set_callback(req, 0, NULL, NULL);
+       ahash_request_set_crypt(req, sg, mic, data_len + 16);
+       err = crypto_ahash_digest(req);
+       ahash_request_zero(req);
+       return err;
 }
 
 static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
@@ -645,10 +654,10 @@ static int lib80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
 {
        struct lib80211_tkip_data *tkey = priv;
        int keyidx;
-       struct crypto_hash *tfm = tkey->tx_tfm_michael;
-       struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4;
-       struct crypto_hash *tfm3 = tkey->rx_tfm_michael;
-       struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4;
+       struct crypto_ahash *tfm = tkey->tx_tfm_michael;
+       struct crypto_skcipher *tfm2 = tkey->tx_tfm_arc4;
+       struct crypto_ahash *tfm3 = tkey->rx_tfm_michael;
+       struct crypto_skcipher *tfm4 = tkey->rx_tfm_arc4;
 
        keyidx = tkey->key_idx;
        memset(tkey, 0, sizeof(*tkey));
index 1c292e4ea7b60682d5b1d3adf4bd62548de9ea72..d05f58b0fd04f5f15a0d1acccaae44d2de7411df 100644 (file)
@@ -22,7 +22,7 @@
 
 #include <net/lib80211.h>
 
-#include <linux/crypto.h>
+#include <crypto/skcipher.h>
 #include <linux/crc32.h>
 
 MODULE_AUTHOR("Jouni Malinen");
@@ -35,8 +35,8 @@ struct lib80211_wep_data {
        u8 key[WEP_KEY_LEN + 1];
        u8 key_len;
        u8 key_idx;
-       struct crypto_blkcipher *tx_tfm;
-       struct crypto_blkcipher *rx_tfm;
+       struct crypto_skcipher *tx_tfm;
+       struct crypto_skcipher *rx_tfm;
 };
 
 static void *lib80211_wep_init(int keyidx)
@@ -48,13 +48,13 @@ static void *lib80211_wep_init(int keyidx)
                goto fail;
        priv->key_idx = keyidx;
 
-       priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->tx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->tx_tfm)) {
                priv->tx_tfm = NULL;
                goto fail;
        }
 
-       priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
+       priv->rx_tfm = crypto_alloc_skcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(priv->rx_tfm)) {
                priv->rx_tfm = NULL;
                goto fail;
@@ -66,10 +66,8 @@ static void *lib80211_wep_init(int keyidx)
 
       fail:
        if (priv) {
-               if (priv->tx_tfm)
-                       crypto_free_blkcipher(priv->tx_tfm);
-               if (priv->rx_tfm)
-                       crypto_free_blkcipher(priv->rx_tfm);
+               crypto_free_skcipher(priv->tx_tfm);
+               crypto_free_skcipher(priv->rx_tfm);
                kfree(priv);
        }
        return NULL;
@@ -79,10 +77,8 @@ static void lib80211_wep_deinit(void *priv)
 {
        struct lib80211_wep_data *_priv = priv;
        if (_priv) {
-               if (_priv->tx_tfm)
-                       crypto_free_blkcipher(_priv->tx_tfm);
-               if (_priv->rx_tfm)
-                       crypto_free_blkcipher(_priv->rx_tfm);
+               crypto_free_skcipher(_priv->tx_tfm);
+               crypto_free_skcipher(_priv->rx_tfm);
        }
        kfree(priv);
 }
@@ -133,11 +129,12 @@ static int lib80211_wep_build_iv(struct sk_buff *skb, int hdr_len,
 static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct lib80211_wep_data *wep = priv;
-       struct blkcipher_desc desc = { .tfm = wep->tx_tfm };
+       SKCIPHER_REQUEST_ON_STACK(req, wep->tx_tfm);
        u32 crc, klen, len;
        u8 *pos, *icv;
        struct scatterlist sg;
        u8 key[WEP_KEY_LEN + 3];
+       int err;
 
        /* other checks are in lib80211_wep_build_iv */
        if (skb_tailroom(skb) < 4)
@@ -165,9 +162,14 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;
 
-       crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
+       crypto_skcipher_setkey(wep->tx_tfm, key, klen);
        sg_init_one(&sg, pos, len + 4);
-       return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
+       skcipher_request_set_tfm(req, wep->tx_tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, len + 4, NULL);
+       err = crypto_skcipher_encrypt(req);
+       skcipher_request_zero(req);
+       return err;
 }
 
 /* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
@@ -180,11 +182,12 @@ static int lib80211_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
 static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
 {
        struct lib80211_wep_data *wep = priv;
-       struct blkcipher_desc desc = { .tfm = wep->rx_tfm };
+       SKCIPHER_REQUEST_ON_STACK(req, wep->rx_tfm);
        u32 crc, klen, plen;
        u8 key[WEP_KEY_LEN + 3];
        u8 keyidx, *pos, icv[4];
        struct scatterlist sg;
+       int err;
 
        if (skb->len < hdr_len + 8)
                return -1;
@@ -205,9 +208,14 @@ static int lib80211_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
        /* Apply RC4 to data and compute CRC32 over decrypted data */
        plen = skb->len - hdr_len - 8;
 
-       crypto_blkcipher_setkey(wep->rx_tfm, key, klen);
+       crypto_skcipher_setkey(wep->rx_tfm, key, klen);
        sg_init_one(&sg, pos, plen + 4);
-       if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4))
+       skcipher_request_set_tfm(req, wep->rx_tfm);
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       skcipher_request_set_crypt(req, &sg, &sg, plen + 4, NULL);
+       err = crypto_skcipher_decrypt(req);
+       skcipher_request_zero(req);
+       if (err)
                return -7;
 
        crc = ~crc32_le(~0, pos, plen);
index fb44fa3bf4efa750298163a15534572c43229b2e..ff328250bc442db6a9e8e5136496098d4270bf6f 100644 (file)
@@ -711,7 +711,7 @@ EXPORT_SYMBOL(cfg80211_rx_mgmt);
 
 void cfg80211_dfs_channels_update_work(struct work_struct *work)
 {
-       struct delayed_work *delayed_work;
+       struct delayed_work *delayed_work = to_delayed_work(work);
        struct cfg80211_registered_device *rdev;
        struct cfg80211_chan_def chandef;
        struct ieee80211_supported_band *sband;
@@ -721,7 +721,6 @@ void cfg80211_dfs_channels_update_work(struct work_struct *work)
        unsigned long timeout, next_time = 0;
        int bandid, i;
 
-       delayed_work = container_of(work, struct delayed_work, work);
        rdev = container_of(delayed_work, struct cfg80211_registered_device,
                            dfs_update_channels_wk);
        wiphy = &rdev->wiphy;
index d4786f2802aa3c94f0a9bfcba846c98ec5b2b249..268cb493f6a5496bf596d517b6bc62c348e6e9d9 100644 (file)
@@ -401,6 +401,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
        [NL80211_ATTR_SCHED_SCAN_DELAY] = { .type = NLA_U32 },
        [NL80211_ATTR_REG_INDOOR] = { .type = NLA_FLAG },
+       [NL80211_ATTR_PBSS] = { .type = NLA_FLAG },
 };
 
 /* policy for the key attributes */
@@ -3461,6 +3462,10 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
                        return PTR_ERR(params.acl);
        }
 
+       params.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+       if (params.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ])
+               return -EOPNOTSUPP;
+
        wdev_lock(wdev);
        err = rdev_start_ap(rdev, dev, &params);
        if (!err) {
@@ -7980,6 +7985,12 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
                connect.flags |= ASSOC_REQ_USE_RRM;
        }
 
+       connect.pbss = nla_get_flag(info->attrs[NL80211_ATTR_PBSS]);
+       if (connect.pbss && !rdev->wiphy.bands[IEEE80211_BAND_60GHZ]) {
+               kzfree(connkeys);
+               return -EOPNOTSUPP;
+       }
+
        wdev_lock(dev->ieee80211_ptr);
        err = cfg80211_connect(rdev, dev, &connect, connkeys, NULL);
        wdev_unlock(dev->ieee80211_ptr);
index 3b0ce1c484a3dd33f2e2c81edeeadb79a48e2dcc..c5fb317eee68f15a4665a090f7aee16b08ec6765 100644 (file)
 #include "regdb.h"
 #include "nl80211.h"
 
-#ifdef CONFIG_CFG80211_REG_DEBUG
-#define REG_DBG_PRINT(format, args...)                 \
-       printk(KERN_DEBUG pr_fmt(format), ##args)
-#else
-#define REG_DBG_PRINT(args...)
-#endif
-
 /*
  * Grace period we give before making sure all current interfaces reside on
  * channels allowed by the current regulatory domain.
@@ -178,12 +171,10 @@ enum nl80211_dfs_regions reg_get_dfs_region(struct wiphy *wiphy)
        if (wiphy_regd->dfs_region == regd->dfs_region)
                goto out;
 
-       REG_DBG_PRINT("%s: device specific dfs_region "
-                     "(%s) disagrees with cfg80211's "
-                     "central dfs_region (%s)\n",
-                     dev_name(&wiphy->dev),
-                     reg_dfs_region_str(wiphy_regd->dfs_region),
-                     reg_dfs_region_str(regd->dfs_region));
+       pr_debug("%s: device specific dfs_region (%s) disagrees with cfg80211's central dfs_region (%s)\n",
+                dev_name(&wiphy->dev),
+                reg_dfs_region_str(wiphy_regd->dfs_region),
+                reg_dfs_region_str(regd->dfs_region));
 
 out:
        return regd->dfs_region;
@@ -231,20 +222,22 @@ static const struct ieee80211_regdomain world_regdom = {
                /* IEEE 802.11b/g, channels 1..11 */
                REG_RULE(2412-10, 2462+10, 40, 6, 20, 0),
                /* IEEE 802.11b/g, channels 12..13. */
-               REG_RULE(2467-10, 2472+10, 40, 6, 20,
-                       NL80211_RRF_NO_IR),
+               REG_RULE(2467-10, 2472+10, 20, 6, 20,
+                       NL80211_RRF_NO_IR | NL80211_RRF_AUTO_BW),
                /* IEEE 802.11 channel 14 - Only JP enables
                 * this and for 802.11b only */
                REG_RULE(2484-10, 2484+10, 20, 6, 20,
                        NL80211_RRF_NO_IR |
                        NL80211_RRF_NO_OFDM),
                /* IEEE 802.11a, channel 36..48 */
-               REG_RULE(5180-10, 5240+10, 160, 6, 20,
-                        NL80211_RRF_NO_IR),
+               REG_RULE(5180-10, 5240+10, 80, 6, 20,
+                        NL80211_RRF_NO_IR |
+                        NL80211_RRF_AUTO_BW),
 
                /* IEEE 802.11a, channel 52..64 - DFS required */
-               REG_RULE(5260-10, 5320+10, 160, 6, 20,
+               REG_RULE(5260-10, 5320+10, 80, 6, 20,
                        NL80211_RRF_NO_IR |
+                       NL80211_RRF_AUTO_BW |
                        NL80211_RRF_DFS),
 
                /* IEEE 802.11a, channel 100..144 - DFS required */
@@ -541,7 +534,7 @@ static DECLARE_DELAYED_WORK(crda_timeout, crda_timeout_work);
 
 static void crda_timeout_work(struct work_struct *work)
 {
-       REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
+       pr_debug("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
        rtnl_lock();
        reg_crda_timeouts++;
        restore_regulatory_settings(true);
@@ -583,7 +576,7 @@ static int call_crda(const char *alpha2)
 
        if (!is_world_regdom((char *) alpha2))
                pr_debug("Calling CRDA for country: %c%c\n",
-                       alpha2[0], alpha2[1]);
+                        alpha2[0], alpha2[1]);
        else
                pr_debug("Calling CRDA to update world regulatory domain\n");
 
@@ -1130,42 +1123,6 @@ const char *reg_initiator_name(enum nl80211_reg_initiator initiator)
 }
 EXPORT_SYMBOL(reg_initiator_name);
 
-static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
-                                   struct ieee80211_channel *chan,
-                                   const struct ieee80211_reg_rule *reg_rule)
-{
-#ifdef CONFIG_CFG80211_REG_DEBUG
-       const struct ieee80211_power_rule *power_rule;
-       const struct ieee80211_freq_range *freq_range;
-       char max_antenna_gain[32], bw[32];
-
-       power_rule = &reg_rule->power_rule;
-       freq_range = &reg_rule->freq_range;
-
-       if (!power_rule->max_antenna_gain)
-               snprintf(max_antenna_gain, sizeof(max_antenna_gain), "N/A");
-       else
-               snprintf(max_antenna_gain, sizeof(max_antenna_gain), "%d mBi",
-                        power_rule->max_antenna_gain);
-
-       if (reg_rule->flags & NL80211_RRF_AUTO_BW)
-               snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO",
-                        freq_range->max_bandwidth_khz,
-                        reg_get_max_bandwidth(regd, reg_rule));
-       else
-               snprintf(bw, sizeof(bw), "%d KHz",
-                        freq_range->max_bandwidth_khz);
-
-       REG_DBG_PRINT("Updating information on frequency %d MHz with regulatory rule:\n",
-                     chan->center_freq);
-
-       REG_DBG_PRINT("(%d KHz - %d KHz @ %s), (%s, %d mBm)\n",
-                     freq_range->start_freq_khz, freq_range->end_freq_khz,
-                     bw, max_antenna_gain,
-                     power_rule->max_eirp);
-#endif
-}
-
 static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd,
                                          const struct ieee80211_reg_rule *reg_rule,
                                          const struct ieee80211_channel *chan)
@@ -1240,20 +1197,19 @@ static void handle_channel(struct wiphy *wiphy,
                if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
                    request_wiphy && request_wiphy == wiphy &&
                    request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
-                       REG_DBG_PRINT("Disabling freq %d MHz for good\n",
-                                     chan->center_freq);
+                       pr_debug("Disabling freq %d MHz for good\n",
+                                chan->center_freq);
                        chan->orig_flags |= IEEE80211_CHAN_DISABLED;
                        chan->flags = chan->orig_flags;
                } else {
-                       REG_DBG_PRINT("Disabling freq %d MHz\n",
-                                     chan->center_freq);
+                       pr_debug("Disabling freq %d MHz\n",
+                                chan->center_freq);
                        chan->flags |= IEEE80211_CHAN_DISABLED;
                }
                return;
        }
 
        regd = reg_get_regdomain(wiphy);
-       chan_reg_rule_print_dbg(regd, chan, reg_rule);
 
        power_rule = &reg_rule->power_rule;
        bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
@@ -1391,18 +1347,15 @@ static bool ignore_reg_update(struct wiphy *wiphy,
                return true;
 
        if (!lr) {
-               REG_DBG_PRINT("Ignoring regulatory request set by %s "
-                             "since last_request is not set\n",
-                             reg_initiator_name(initiator));
+               pr_debug("Ignoring regulatory request set by %s since last_request is not set\n",
+                        reg_initiator_name(initiator));
                return true;
        }
 
        if (initiator == NL80211_REGDOM_SET_BY_CORE &&
            wiphy->regulatory_flags & REGULATORY_CUSTOM_REG) {
-               REG_DBG_PRINT("Ignoring regulatory request set by %s "
-                             "since the driver uses its own custom "
-                             "regulatory domain\n",
-                             reg_initiator_name(initiator));
+               pr_debug("Ignoring regulatory request set by %s since the driver uses its own custom regulatory domain\n",
+                        reg_initiator_name(initiator));
                return true;
        }
 
@@ -1413,10 +1366,8 @@ static bool ignore_reg_update(struct wiphy *wiphy,
        if (wiphy_strict_alpha2_regd(wiphy) && !wiphy->regd &&
            initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
            !is_world_regdom(lr->alpha2)) {
-               REG_DBG_PRINT("Ignoring regulatory request set by %s "
-                             "since the driver requires its own regulatory "
-                             "domain to be set first\n",
-                             reg_initiator_name(initiator));
+               pr_debug("Ignoring regulatory request set by %s since the driver requires its own regulatory domain to be set first\n",
+                        reg_initiator_name(initiator));
                return true;
        }
 
@@ -1697,7 +1648,7 @@ static void reg_check_chans_work(struct work_struct *work)
 {
        struct cfg80211_registered_device *rdev;
 
-       REG_DBG_PRINT("Verifying active interfaces after reg change\n");
+       pr_debug("Verifying active interfaces after reg change\n");
        rtnl_lock();
 
        list_for_each_entry(rdev, &cfg80211_rdev_list, list)
@@ -1779,8 +1730,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
        }
 
        if (IS_ERR(reg_rule)) {
-               REG_DBG_PRINT("Disabling freq %d MHz as custom regd has no rule that fits it\n",
-                             chan->center_freq);
+               pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n",
+                        chan->center_freq);
                if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
                        chan->flags |= IEEE80211_CHAN_DISABLED;
                } else {
@@ -1790,8 +1741,6 @@ static void handle_channel_custom(struct wiphy *wiphy,
                return;
        }
 
-       chan_reg_rule_print_dbg(regd, chan, reg_rule);
-
        power_rule = &reg_rule->power_rule;
        bw_flags = reg_rule_to_chan_bw_flags(regd, reg_rule, chan);
 
@@ -2522,7 +2471,7 @@ static void restore_alpha2(char *alpha2, bool reset_user)
        if (is_user_regdom_saved()) {
                /* Unless we're asked to ignore it and reset it */
                if (reset_user) {
-                       REG_DBG_PRINT("Restoring regulatory settings including user preference\n");
+                       pr_debug("Restoring regulatory settings including user preference\n");
                        user_alpha2[0] = '9';
                        user_alpha2[1] = '7';
 
@@ -2532,24 +2481,24 @@ static void restore_alpha2(char *alpha2, bool reset_user)
                         * back as they were for a full restore.
                         */
                        if (!is_world_regdom(ieee80211_regdom)) {
-                               REG_DBG_PRINT("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
-                                             ieee80211_regdom[0], ieee80211_regdom[1]);
+                               pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
+                                        ieee80211_regdom[0], ieee80211_regdom[1]);
                                alpha2[0] = ieee80211_regdom[0];
                                alpha2[1] = ieee80211_regdom[1];
                        }
                } else {
-                       REG_DBG_PRINT("Restoring regulatory settings while preserving user preference for: %c%c\n",
-                                     user_alpha2[0], user_alpha2[1]);
+                       pr_debug("Restoring regulatory settings while preserving user preference for: %c%c\n",
+                                user_alpha2[0], user_alpha2[1]);
                        alpha2[0] = user_alpha2[0];
                        alpha2[1] = user_alpha2[1];
                }
        } else if (!is_world_regdom(ieee80211_regdom)) {
-               REG_DBG_PRINT("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
-                             ieee80211_regdom[0], ieee80211_regdom[1]);
+               pr_debug("Keeping preference on module parameter ieee80211_regdom: %c%c\n",
+                        ieee80211_regdom[0], ieee80211_regdom[1]);
                alpha2[0] = ieee80211_regdom[0];
                alpha2[1] = ieee80211_regdom[1];
        } else
-               REG_DBG_PRINT("Restoring regulatory settings\n");
+               pr_debug("Restoring regulatory settings\n");
 }
 
 static void restore_custom_reg_settings(struct wiphy *wiphy)
@@ -2661,14 +2610,14 @@ static void restore_regulatory_settings(bool reset_user)
        list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);
        spin_unlock(&reg_requests_lock);
 
-       REG_DBG_PRINT("Kicking the queue\n");
+       pr_debug("Kicking the queue\n");
 
        schedule_work(&reg_work);
 }
 
 void regulatory_hint_disconnect(void)
 {
-       REG_DBG_PRINT("All devices are disconnected, going to restore regulatory settings\n");
+       pr_debug("All devices are disconnected, going to restore regulatory settings\n");
        restore_regulatory_settings(false);
 }
 
@@ -2716,10 +2665,10 @@ int regulatory_hint_found_beacon(struct wiphy *wiphy,
        if (!reg_beacon)
                return -ENOMEM;
 
-       REG_DBG_PRINT("Found new beacon on frequency: %d MHz (Ch %d) on %s\n",
-                     beacon_chan->center_freq,
-                     ieee80211_frequency_to_channel(beacon_chan->center_freq),
-                     wiphy_name(wiphy));
+       pr_debug("Found new beacon on frequency: %d MHz (Ch %d) on %s\n",
+                beacon_chan->center_freq,
+                ieee80211_frequency_to_channel(beacon_chan->center_freq),
+                wiphy_name(wiphy));
 
        memcpy(&reg_beacon->chan, beacon_chan,
               sizeof(struct ieee80211_channel));
@@ -2745,7 +2694,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
        const struct ieee80211_power_rule *power_rule = NULL;
        char bw[32], cac_time[32];
 
-       pr_info("  (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n");
+       pr_debug("  (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp), (dfs_cac_time)\n");
 
        for (i = 0; i < rd->n_reg_rules; i++) {
                reg_rule = &rd->reg_rules[i];
@@ -2772,7 +2721,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
                 * in certain regions
                 */
                if (power_rule->max_antenna_gain)
-                       pr_info("  (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n",
+                       pr_debug("  (%d KHz - %d KHz @ %s), (%d mBi, %d mBm), (%s)\n",
                                freq_range->start_freq_khz,
                                freq_range->end_freq_khz,
                                bw,
@@ -2780,7 +2729,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
                                power_rule->max_eirp,
                                cac_time);
                else
-                       pr_info("  (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n",
+                       pr_debug("  (%d KHz - %d KHz @ %s), (N/A, %d mBm), (%s)\n",
                                freq_range->start_freq_khz,
                                freq_range->end_freq_khz,
                                bw,
@@ -2798,8 +2747,7 @@ bool reg_supported_dfs_region(enum nl80211_dfs_regions dfs_region)
        case NL80211_DFS_JP:
                return true;
        default:
-               REG_DBG_PRINT("Ignoring uknown DFS master region: %d\n",
-                             dfs_region);
+               pr_debug("Ignoring uknown DFS master region: %d\n", dfs_region);
                return false;
        }
 }
@@ -2813,35 +2761,35 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
                        struct cfg80211_registered_device *rdev;
                        rdev = cfg80211_rdev_by_wiphy_idx(lr->wiphy_idx);
                        if (rdev) {
-                               pr_info("Current regulatory domain updated by AP to: %c%c\n",
+                               pr_debug("Current regulatory domain updated by AP to: %c%c\n",
                                        rdev->country_ie_alpha2[0],
                                        rdev->country_ie_alpha2[1]);
                        } else
-                               pr_info("Current regulatory domain intersected:\n");
+                               pr_debug("Current regulatory domain intersected:\n");
                } else
-                       pr_info("Current regulatory domain intersected:\n");
+                       pr_debug("Current regulatory domain intersected:\n");
        } else if (is_world_regdom(rd->alpha2)) {
-               pr_info("World regulatory domain updated:\n");
+               pr_debug("World regulatory domain updated:\n");
        } else {
                if (is_unknown_alpha2(rd->alpha2))
-                       pr_info("Regulatory domain changed to driver built-in settings (unknown country)\n");
+                       pr_debug("Regulatory domain changed to driver built-in settings (unknown country)\n");
                else {
                        if (reg_request_cell_base(lr))
-                               pr_info("Regulatory domain changed to country: %c%c by Cell Station\n",
+                               pr_debug("Regulatory domain changed to country: %c%c by Cell Station\n",
                                        rd->alpha2[0], rd->alpha2[1]);
                        else
-                               pr_info("Regulatory domain changed to country: %c%c\n",
+                               pr_debug("Regulatory domain changed to country: %c%c\n",
                                        rd->alpha2[0], rd->alpha2[1]);
                }
        }
 
-       pr_info(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region));
+       pr_debug(" DFS Master region: %s", reg_dfs_region_str(rd->dfs_region));
        print_rd_rules(rd);
 }
 
 static void print_regdomain_info(const struct ieee80211_regdomain *rd)
 {
-       pr_info("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]);
+       pr_debug("Regulatory domain: %c%c\n", rd->alpha2[0], rd->alpha2[1]);
        print_rd_rules(rd);
 }
 
@@ -2862,7 +2810,8 @@ static int reg_set_rd_user(const struct ieee80211_regdomain *rd,
                return -EALREADY;
 
        if (!is_valid_rd(rd)) {
-               pr_err("Invalid regulatory domain detected:\n");
+               pr_err("Invalid regulatory domain detected: %c%c\n",
+                      rd->alpha2[0], rd->alpha2[1]);
                print_regdomain_info(rd);
                return -EINVAL;
        }
@@ -2898,7 +2847,8 @@ static int reg_set_rd_driver(const struct ieee80211_regdomain *rd,
                return -EALREADY;
 
        if (!is_valid_rd(rd)) {
-               pr_err("Invalid regulatory domain detected:\n");
+               pr_err("Invalid regulatory domain detected: %c%c\n",
+                      rd->alpha2[0], rd->alpha2[1]);
                print_regdomain_info(rd);
                return -EINVAL;
        }
@@ -2956,7 +2906,8 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
         */
 
        if (!is_valid_rd(rd)) {
-               pr_err("Invalid regulatory domain detected:\n");
+               pr_err("Invalid regulatory domain detected: %c%c\n",
+                      rd->alpha2[0], rd->alpha2[1]);
                print_regdomain_info(rd);
                return -EINVAL;
        }
index 8020b5b094d4c8fba0c0f2431f8af7d8ecc40dca..79bd3a171caa83ed8ae811b744fd271475b0257a 100644 (file)
@@ -264,7 +264,7 @@ static struct cfg80211_bss *cfg80211_get_conn_bss(struct wireless_dev *wdev)
                               wdev->conn->params.bssid,
                               wdev->conn->params.ssid,
                               wdev->conn->params.ssid_len,
-                              IEEE80211_BSS_TYPE_ESS,
+                              wdev->conn_bss_type,
                               IEEE80211_PRIVACY(wdev->conn->params.privacy));
        if (!bss)
                return NULL;
@@ -687,7 +687,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
                WARN_ON_ONCE(!wiphy_to_rdev(wdev->wiphy)->ops->connect);
                bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
                                       wdev->ssid, wdev->ssid_len,
-                                      IEEE80211_BSS_TYPE_ESS,
+                                      wdev->conn_bss_type,
                                       IEEE80211_PRIVACY_ANY);
                if (bss)
                        cfg80211_hold_bss(bss_from_pub(bss));
@@ -846,7 +846,7 @@ void cfg80211_roamed(struct net_device *dev,
 
        bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, wdev->ssid,
                               wdev->ssid_len,
-                              IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+                              wdev->conn_bss_type, IEEE80211_PRIVACY_ANY);
        if (WARN_ON(!bss))
                return;
 
@@ -1017,6 +1017,9 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
        memcpy(wdev->ssid, connect->ssid, connect->ssid_len);
        wdev->ssid_len = connect->ssid_len;
 
+       wdev->conn_bss_type = connect->pbss ? IEEE80211_BSS_TYPE_PBSS :
+                                             IEEE80211_BSS_TYPE_ESS;
+
        if (!rdev->ops->connect)
                err = cfg80211_sme_connect(wdev, connect, prev_bssid);
        else
index 92770427b211f559be4735584d74043ec6301802..6e4eb35551776d497a85d91c1f0fab2ecf25d72b 100644 (file)
@@ -393,9 +393,9 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb)
 }
 EXPORT_SYMBOL(ieee80211_get_hdrlen_from_skb);
 
-unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+static unsigned int __ieee80211_get_mesh_hdrlen(u8 flags)
 {
-       int ae = meshhdr->flags & MESH_FLAGS_AE;
+       int ae = flags & MESH_FLAGS_AE;
        /* 802.11-2012, 8.2.4.7.3 */
        switch (ae) {
        default:
@@ -407,21 +407,31 @@ unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
                return 18;
        }
 }
+
+unsigned int ieee80211_get_mesh_hdrlen(struct ieee80211s_hdr *meshhdr)
+{
+       return __ieee80211_get_mesh_hdrlen(meshhdr->flags);
+}
 EXPORT_SYMBOL(ieee80211_get_mesh_hdrlen);
 
-int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
-                          enum nl80211_iftype iftype)
+static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
+                                   const u8 *addr, enum nl80211_iftype iftype)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
-       u16 hdrlen, ethertype;
-       u8 *payload;
-       u8 dst[ETH_ALEN];
-       u8 src[ETH_ALEN] __aligned(2);
+       struct {
+               u8 hdr[ETH_ALEN] __aligned(2);
+               __be16 proto;
+       } payload;
+       struct ethhdr tmp;
+       u16 hdrlen;
+       u8 mesh_flags = 0;
 
        if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
                return -1;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
+       if (skb->len < hdrlen + 8)
+               return -1;
 
        /* convert IEEE 802.11 header + possible LLC headers into Ethernet
         * header
@@ -432,8 +442,11 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
         *   1     0   BSSID SA    DA    n/a
         *   1     1   RA    TA    DA    SA
         */
-       memcpy(dst, ieee80211_get_DA(hdr), ETH_ALEN);
-       memcpy(src, ieee80211_get_SA(hdr), ETH_ALEN);
+       memcpy(tmp.h_dest, ieee80211_get_DA(hdr), ETH_ALEN);
+       memcpy(tmp.h_source, ieee80211_get_SA(hdr), ETH_ALEN);
+
+       if (iftype == NL80211_IFTYPE_MESH_POINT)
+               skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
 
        switch (hdr->frame_control &
                cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
@@ -450,44 +463,31 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
                             iftype != NL80211_IFTYPE_STATION))
                        return -1;
                if (iftype == NL80211_IFTYPE_MESH_POINT) {
-                       struct ieee80211s_hdr *meshdr =
-                               (struct ieee80211s_hdr *) (skb->data + hdrlen);
-                       /* make sure meshdr->flags is on the linear part */
-                       if (!pskb_may_pull(skb, hdrlen + 1))
-                               return -1;
-                       if (meshdr->flags & MESH_FLAGS_AE_A4)
+                       if (mesh_flags & MESH_FLAGS_AE_A4)
                                return -1;
-                       if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
+                       if (mesh_flags & MESH_FLAGS_AE_A5_A6) {
                                skb_copy_bits(skb, hdrlen +
                                        offsetof(struct ieee80211s_hdr, eaddr1),
-                                       dst, ETH_ALEN);
-                               skb_copy_bits(skb, hdrlen +
-                                       offsetof(struct ieee80211s_hdr, eaddr2),
-                                       src, ETH_ALEN);
+                                       tmp.h_dest, 2 * ETH_ALEN);
                        }
-                       hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+                       hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
                }
                break;
        case cpu_to_le16(IEEE80211_FCTL_FROMDS):
                if ((iftype != NL80211_IFTYPE_STATION &&
                     iftype != NL80211_IFTYPE_P2P_CLIENT &&
                     iftype != NL80211_IFTYPE_MESH_POINT) ||
-                   (is_multicast_ether_addr(dst) &&
-                    ether_addr_equal(src, addr)))
+                   (is_multicast_ether_addr(tmp.h_dest) &&
+                    ether_addr_equal(tmp.h_source, addr)))
                        return -1;
                if (iftype == NL80211_IFTYPE_MESH_POINT) {
-                       struct ieee80211s_hdr *meshdr =
-                               (struct ieee80211s_hdr *) (skb->data + hdrlen);
-                       /* make sure meshdr->flags is on the linear part */
-                       if (!pskb_may_pull(skb, hdrlen + 1))
-                               return -1;
-                       if (meshdr->flags & MESH_FLAGS_AE_A5_A6)
+                       if (mesh_flags & MESH_FLAGS_AE_A5_A6)
                                return -1;
-                       if (meshdr->flags & MESH_FLAGS_AE_A4)
+                       if (mesh_flags & MESH_FLAGS_AE_A4)
                                skb_copy_bits(skb, hdrlen +
                                        offsetof(struct ieee80211s_hdr, eaddr1),
-                                       src, ETH_ALEN);
-                       hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
+                                       tmp.h_source, ETH_ALEN);
+                       hdrlen += __ieee80211_get_mesh_hdrlen(mesh_flags);
                }
                break;
        case cpu_to_le16(0):
@@ -498,33 +498,33 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
                break;
        }
 
-       if (!pskb_may_pull(skb, hdrlen + 8))
-               return -1;
-
-       payload = skb->data + hdrlen;
-       ethertype = (payload[6] << 8) | payload[7];
+       skb_copy_bits(skb, hdrlen, &payload, sizeof(payload));
+       tmp.h_proto = payload.proto;
 
-       if (likely((ether_addr_equal(payload, rfc1042_header) &&
-                   ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
-                  ether_addr_equal(payload, bridge_tunnel_header))) {
+       if (likely((ether_addr_equal(payload.hdr, rfc1042_header) &&
+                   tmp.h_proto != htons(ETH_P_AARP) &&
+                   tmp.h_proto != htons(ETH_P_IPX)) ||
+                  ether_addr_equal(payload.hdr, bridge_tunnel_header)))
                /* remove RFC1042 or Bridge-Tunnel encapsulation and
                 * replace EtherType */
-               skb_pull(skb, hdrlen + 6);
-               memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
-               memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
-       } else {
-               struct ethhdr *ehdr;
-               __be16 len;
+               hdrlen += ETH_ALEN + 2;
+       else
+               tmp.h_proto = htons(skb->len);
 
-               skb_pull(skb, hdrlen);
-               len = htons(skb->len);
+       pskb_pull(skb, hdrlen);
+
+       if (!ehdr)
                ehdr = (struct ethhdr *) skb_push(skb, sizeof(struct ethhdr));
-               memcpy(ehdr->h_dest, dst, ETH_ALEN);
-               memcpy(ehdr->h_source, src, ETH_ALEN);
-               ehdr->h_proto = len;
-       }
+       memcpy(ehdr, &tmp, sizeof(tmp));
+
        return 0;
 }
+
+int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
+                          enum nl80211_iftype iftype)
+{
+       return __ieee80211_data_to_8023(skb, NULL, addr, iftype);
+}
 EXPORT_SYMBOL(ieee80211_data_to_8023);
 
 int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
@@ -644,70 +644,75 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
 }
 EXPORT_SYMBOL(ieee80211_data_from_8023);
 
+static struct sk_buff *
+__ieee80211_amsdu_copy(struct sk_buff *skb, unsigned int hlen,
+                      int offset, int len)
+{
+       struct sk_buff *frame;
+
+       if (skb->len - offset < len)
+               return NULL;
+
+       /*
+        * Allocate and reserve two bytes more for payload
+        * alignment since sizeof(struct ethhdr) is 14.
+        */
+       frame = dev_alloc_skb(hlen + sizeof(struct ethhdr) + 2 + len);
+
+       skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
+       skb_copy_bits(skb, offset, skb_put(frame, len), len);
+
+       return frame;
+}
 
 void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
                              const u8 *addr, enum nl80211_iftype iftype,
                              const unsigned int extra_headroom,
                              bool has_80211_header)
 {
+       unsigned int hlen = ALIGN(extra_headroom, 4);
        struct sk_buff *frame = NULL;
        u16 ethertype;
        u8 *payload;
-       const struct ethhdr *eth;
-       int remaining, err;
-       u8 dst[ETH_ALEN], src[ETH_ALEN];
+       int offset = 0, remaining, err;
+       struct ethhdr eth;
+       bool reuse_skb = true;
+       bool last = false;
 
        if (has_80211_header) {
-               err = ieee80211_data_to_8023(skb, addr, iftype);
+               err = __ieee80211_data_to_8023(skb, &eth, addr, iftype);
                if (err)
                        goto out;
-
-               /* skip the wrapping header */
-               eth = (struct ethhdr *) skb_pull(skb, sizeof(struct ethhdr));
-               if (!eth)
-                       goto out;
-       } else {
-               eth = (struct ethhdr *) skb->data;
        }
 
-       while (skb != frame) {
+       while (!last) {
+               unsigned int subframe_len;
+               int len;
                u8 padding;
-               __be16 len = eth->h_proto;
-               unsigned int subframe_len = sizeof(struct ethhdr) + ntohs(len);
-
-               remaining = skb->len;
-               memcpy(dst, eth->h_dest, ETH_ALEN);
-               memcpy(src, eth->h_source, ETH_ALEN);
 
+               skb_copy_bits(skb, offset, &eth, sizeof(eth));
+               len = ntohs(eth.h_proto);
+               subframe_len = sizeof(struct ethhdr) + len;
                padding = (4 - subframe_len) & 0x3;
+
                /* the last MSDU has no padding */
+               remaining = skb->len - offset;
                if (subframe_len > remaining)
                        goto purge;
 
-               skb_pull(skb, sizeof(struct ethhdr));
+               offset += sizeof(struct ethhdr);
                /* reuse skb for the last subframe */
-               if (remaining <= subframe_len + padding)
+               last = remaining <= subframe_len + padding;
+               if (!skb_is_nonlinear(skb) && last) {
+                       skb_pull(skb, offset);
                        frame = skb;
-               else {
-                       unsigned int hlen = ALIGN(extra_headroom, 4);
-                       /*
-                        * Allocate and reserve two bytes more for payload
-                        * alignment since sizeof(struct ethhdr) is 14.
-                        */
-                       frame = dev_alloc_skb(hlen + subframe_len + 2);
+                       reuse_skb = true;
+               } else {
+                       frame = __ieee80211_amsdu_copy(skb, hlen, offset, len);
                        if (!frame)
                                goto purge;
 
-                       skb_reserve(frame, hlen + sizeof(struct ethhdr) + 2);
-                       memcpy(skb_put(frame, ntohs(len)), skb->data,
-                               ntohs(len));
-
-                       eth = (struct ethhdr *)skb_pull(skb, ntohs(len) +
-                                                       padding);
-                       if (!eth) {
-                               dev_kfree_skb(frame);
-                               goto purge;
-                       }
+                       offset += len + padding;
                }
 
                skb_reset_network_header(frame);
@@ -716,24 +721,20 @@ void ieee80211_amsdu_to_8023s(struct sk_buff *skb, struct sk_buff_head *list,
 
                payload = frame->data;
                ethertype = (payload[6] << 8) | payload[7];
-
                if (likely((ether_addr_equal(payload, rfc1042_header) &&
                            ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
                           ether_addr_equal(payload, bridge_tunnel_header))) {
-                       /* remove RFC1042 or Bridge-Tunnel
-                        * encapsulation and replace EtherType */
-                       skb_pull(frame, 6);
-                       memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
-                       memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
-               } else {
-                       memcpy(skb_push(frame, sizeof(__be16)), &len,
-                               sizeof(__be16));
-                       memcpy(skb_push(frame, ETH_ALEN), src, ETH_ALEN);
-                       memcpy(skb_push(frame, ETH_ALEN), dst, ETH_ALEN);
+                       eth.h_proto = htons(ethertype);
+                       skb_pull(frame, ETH_ALEN + 2);
                }
+
+               memcpy(skb_push(frame, sizeof(eth)), &eth, sizeof(eth));
                __skb_queue_tail(list, frame);
        }
 
+       if (!reuse_skb)
+               dev_kfree_skb(skb);
+
        return;
 
  purge:
index c8717c1d082e702f9b071c480e873b408b400daf..b50ee5d622e14d4fb486ce0c533757e958702f54 100644 (file)
@@ -342,6 +342,40 @@ static const int compat_event_type_size[] = {
 
 /* IW event code */
 
+void wireless_nlevent_flush(void)
+{
+       struct sk_buff *skb;
+       struct net *net;
+
+       ASSERT_RTNL();
+
+       for_each_net(net) {
+               while ((skb = skb_dequeue(&net->wext_nlevents)))
+                       rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
+                                   GFP_KERNEL);
+       }
+}
+EXPORT_SYMBOL_GPL(wireless_nlevent_flush);
+
+static int wext_netdev_notifier_call(struct notifier_block *nb,
+                                    unsigned long state, void *ptr)
+{
+       /*
+        * When a netdev changes state in any way, flush all pending messages
+        * to avoid them going out in a strange order, e.g. RTM_NEWLINK after
+        * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close()
+        * or similar - all of which could otherwise happen due to delays from
+        * schedule_work().
+        */
+       wireless_nlevent_flush();
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block wext_netdev_notifier = {
+       .notifier_call = wext_netdev_notifier_call,
+};
+
 static int __net_init wext_pernet_init(struct net *net)
 {
        skb_queue_head_init(&net->wext_nlevents);
@@ -360,7 +394,12 @@ static struct pernet_operations wext_pernet_ops = {
 
 static int __init wireless_nlevent_init(void)
 {
-       return register_pernet_subsys(&wext_pernet_ops);
+       int err = register_pernet_subsys(&wext_pernet_ops);
+
+       if (err)
+               return err;
+
+       return register_netdevice_notifier(&wext_netdev_notifier);
 }
 
 subsys_initcall(wireless_nlevent_init);
@@ -368,17 +407,8 @@ subsys_initcall(wireless_nlevent_init);
 /* Process events generated by the wireless layer or the driver. */
 static void wireless_nlevent_process(struct work_struct *work)
 {
-       struct sk_buff *skb;
-       struct net *net;
-
        rtnl_lock();
-
-       for_each_net(net) {
-               while ((skb = skb_dequeue(&net->wext_nlevents)))
-                       rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL,
-                                   GFP_KERNEL);
-       }
-
+       wireless_nlevent_flush();
        rtnl_unlock();
 }
 
index f07224d8b88f6a2479de02ce944181edcd2576ab..250e567ba3d636c8f9103a7949b7d1b5208b77ee 100644 (file)
@@ -9,6 +9,8 @@
  * any later version.
  */
 
+#include <crypto/hash.h>
+#include <crypto/skcipher.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/pfkeyv2.h>
@@ -782,14 +784,13 @@ void xfrm_probe_algs(void)
        BUG_ON(in_softirq());
 
        for (i = 0; i < aalg_entries(); i++) {
-               status = crypto_has_hash(aalg_list[i].name, 0,
-                                        CRYPTO_ALG_ASYNC);
+               status = crypto_has_ahash(aalg_list[i].name, 0, 0);
                if (aalg_list[i].available != status)
                        aalg_list[i].available = status;
        }
 
        for (i = 0; i < ealg_entries(); i++) {
-               status = crypto_has_ablkcipher(ealg_list[i].name, 0, 0);
+               status = crypto_has_skcipher(ealg_list[i].name, 0, 0);
                if (ealg_list[i].available != status)
                        ealg_list[i].available = status;
        }
index 6299ee95cd11b63112ae5b7875872cb72ca91208..ad466ed3309307c79a7d393359f74a6c558eef1b 100644 (file)
@@ -89,6 +89,100 @@ static void test_hashmap_sanity(int i, void *data)
        close(map_fd);
 }
 
+/* sanity tests for percpu map API */
+static void test_percpu_hashmap_sanity(int task, void *data)
+{
+       long long key, next_key;
+       int expected_key_mask = 0;
+       unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       long long value[nr_cpus];
+       int map_fd, i;
+
+       map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_HASH, sizeof(key),
+                               sizeof(value[0]), 2);
+       if (map_fd < 0) {
+               printf("failed to create hashmap '%s'\n", strerror(errno));
+               exit(1);
+       }
+
+       for (i = 0; i < nr_cpus; i++)
+               value[i] = i + 100;
+       key = 1;
+       /* insert key=1 element */
+       assert(!(expected_key_mask & key));
+       assert(bpf_update_elem(map_fd, &key, value, BPF_ANY) == 0);
+       expected_key_mask |= key;
+
+       /* BPF_NOEXIST means: add new element if it doesn't exist */
+       assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == -1 &&
+              /* key=1 already exists */
+              errno == EEXIST);
+
+       /* -1 is an invalid flag */
+       assert(bpf_update_elem(map_fd, &key, value, -1) == -1 &&
+              errno == EINVAL);
+
+       /* check that key=1 can be found. value could be 0 if the lookup
+        * was run from a different cpu.
+        */
+       value[0] = 1;
+       assert(bpf_lookup_elem(map_fd, &key, value) == 0 && value[0] == 100);
+
+       key = 2;
+       /* check that key=2 is not found */
+       assert(bpf_lookup_elem(map_fd, &key, value) == -1 && errno == ENOENT);
+
+       /* BPF_EXIST means: update existing element */
+       assert(bpf_update_elem(map_fd, &key, value, BPF_EXIST) == -1 &&
+              /* key=2 is not there */
+              errno == ENOENT);
+
+       /* insert key=2 element */
+       assert(!(expected_key_mask & key));
+       assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == 0);
+       expected_key_mask |= key;
+
+       /* key=1 and key=2 were inserted, check that key=0 cannot be inserted
+        * due to max_entries limit
+        */
+       key = 0;
+       assert(bpf_update_elem(map_fd, &key, value, BPF_NOEXIST) == -1 &&
+              errno == E2BIG);
+
+       /* check that key = 0 doesn't exist */
+       assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
+
+       /* iterate over two elements */
+       while (!bpf_get_next_key(map_fd, &key, &next_key)) {
+               assert((expected_key_mask & next_key) == next_key);
+               expected_key_mask &= ~next_key;
+
+               assert(bpf_lookup_elem(map_fd, &next_key, value) == 0);
+               for (i = 0; i < nr_cpus; i++)
+                       assert(value[i] == i + 100);
+
+               key = next_key;
+       }
+       assert(errno == ENOENT);
+
+       /* Update with BPF_EXIST */
+       key = 1;
+       assert(bpf_update_elem(map_fd, &key, value, BPF_EXIST) == 0);
+
+       /* delete both elements */
+       key = 1;
+       assert(bpf_delete_elem(map_fd, &key) == 0);
+       key = 2;
+       assert(bpf_delete_elem(map_fd, &key) == 0);
+       assert(bpf_delete_elem(map_fd, &key) == -1 && errno == ENOENT);
+
+       key = 0;
+       /* check that map is empty */
+       assert(bpf_get_next_key(map_fd, &key, &next_key) == -1 &&
+              errno == ENOENT);
+       close(map_fd);
+}
+
 static void test_arraymap_sanity(int i, void *data)
 {
        int key, next_key, map_fd;
@@ -142,6 +236,94 @@ static void test_arraymap_sanity(int i, void *data)
        close(map_fd);
 }
 
+static void test_percpu_arraymap_many_keys(void)
+{
+       unsigned nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       unsigned nr_keys = 20000;
+       long values[nr_cpus];
+       int key, map_fd, i;
+
+       map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+                               sizeof(values[0]), nr_keys);
+       if (map_fd < 0) {
+               printf("failed to create per-cpu arraymap '%s'\n",
+                      strerror(errno));
+               exit(1);
+       }
+
+       for (i = 0; i < nr_cpus; i++)
+               values[i] = i + 10;
+
+       for (key = 0; key < nr_keys; key++)
+               assert(bpf_update_elem(map_fd, &key, values, BPF_ANY) == 0);
+
+       for (key = 0; key < nr_keys; key++) {
+               for (i = 0; i < nr_cpus; i++)
+                       values[i] = 0;
+               assert(bpf_lookup_elem(map_fd, &key, values) == 0);
+               for (i = 0; i < nr_cpus; i++)
+                       assert(values[i] == i + 10);
+       }
+
+       close(map_fd);
+}
+
+static void test_percpu_arraymap_sanity(int i, void *data)
+{
+       unsigned nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       long values[nr_cpus];
+       int key, next_key, map_fd;
+
+       map_fd = bpf_create_map(BPF_MAP_TYPE_PERCPU_ARRAY, sizeof(key),
+                               sizeof(values[0]), 2);
+       if (map_fd < 0) {
+               printf("failed to create arraymap '%s'\n", strerror(errno));
+               exit(1);
+       }
+
+       for (i = 0; i < nr_cpus; i++)
+               values[i] = i + 100;
+
+       key = 1;
+       /* insert key=1 element */
+       assert(bpf_update_elem(map_fd, &key, values, BPF_ANY) == 0);
+
+       values[0] = 0;
+       assert(bpf_update_elem(map_fd, &key, values, BPF_NOEXIST) == -1 &&
+              errno == EEXIST);
+
+       /* check that key=1 can be found */
+       assert(bpf_lookup_elem(map_fd, &key, values) == 0 && values[0] == 100);
+
+       key = 0;
+       /* check that key=0 is also found and zero initialized */
+       assert(bpf_lookup_elem(map_fd, &key, values) == 0 &&
+              values[0] == 0 && values[nr_cpus - 1] == 0);
+
+
+       /* check that key=2 cannot be inserted due to max_entries limit */
+       key = 2;
+       assert(bpf_update_elem(map_fd, &key, values, BPF_EXIST) == -1 &&
+              errno == E2BIG);
+
+       /* check that key = 2 doesn't exist */
+       assert(bpf_lookup_elem(map_fd, &key, values) == -1 && errno == ENOENT);
+
+       /* iterate over two elements */
+       assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
+              next_key == 0);
+       assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
+              next_key == 1);
+       assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
+              errno == ENOENT);
+
+       /* delete shouldn't succeed */
+       key = 1;
+       assert(bpf_delete_elem(map_fd, &key) == -1 && errno == EINVAL);
+
+       close(map_fd);
+}
+
 #define MAP_SIZE (32 * 1024)
 static void test_map_large(void)
 {
@@ -209,7 +391,9 @@ static void run_parallel(int tasks, void (*fn)(int i, void *data), void *data)
 static void test_map_stress(void)
 {
        run_parallel(100, test_hashmap_sanity, NULL);
+       run_parallel(100, test_percpu_hashmap_sanity, NULL);
        run_parallel(100, test_arraymap_sanity, NULL);
+       run_parallel(100, test_percpu_arraymap_sanity, NULL);
 }
 
 #define TASKS 1024
@@ -282,7 +466,11 @@ static void test_map_parallel(void)
 int main(void)
 {
        test_hashmap_sanity(0, NULL);
+       test_percpu_hashmap_sanity(0, NULL);
        test_arraymap_sanity(0, NULL);
+       test_percpu_arraymap_sanity(0, NULL);
+       test_percpu_arraymap_many_keys();
+
        test_map_large();
        test_map_parallel();
        test_map_stress();
index b32367cfbff4aff3020bb9c36c8faf6c981dd0f8..09c1adc27d426ed4adec7408a6fbae9193c39bb0 100644 (file)
@@ -70,7 +70,7 @@ struct hist_key {
 };
 
 struct bpf_map_def SEC("maps") my_hist_map = {
-       .type = BPF_MAP_TYPE_HASH,
+       .type = BPF_MAP_TYPE_PERCPU_HASH,
        .key_size = sizeof(struct hist_key),
        .value_size = sizeof(long),
        .max_entries = 1024,
index cd0241c1447a5fb37d0e9e1633f4a47a457f344f..ab5b19e68acf0c3d53916ce6324f57777cf3acbd 100644 (file)
@@ -37,6 +37,8 @@ struct hist_key {
 static void print_hist_for_pid(int fd, void *task)
 {
        struct hist_key key = {}, next_key;
+       unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       long values[nr_cpus];
        char starstr[MAX_STARS];
        long value;
        long data[MAX_INDEX] = {};
@@ -49,7 +51,10 @@ static void print_hist_for_pid(int fd, void *task)
                        key = next_key;
                        continue;
                }
-               bpf_lookup_elem(fd, &next_key, &value);
+               bpf_lookup_elem(fd, &next_key, values);
+               value = 0;
+               for (i = 0; i < nr_cpus; i++)
+                       value += values[i];
                ind = next_key.index;
                data[ind] = value;
                if (value && ind > max_ind)
index bf337fbb09472cbe32bfbaff2d4313b7cafb58c6..9974c3d7c18b90f5850e87822a158c90e2645b8d 100644 (file)
@@ -20,7 +20,7 @@ struct bpf_map_def SEC("maps") my_map = {
 /* kprobe is NOT a stable ABI. If kernel internals change this bpf+kprobe
  * example will no longer be meaningful
  */
-SEC("kprobe/blk_mq_start_request")
+SEC("kprobe/blk_start_request")
 int bpf_prog1(struct pt_regs *ctx)
 {
        long rq = PT_REGS_PARM1(ctx);
@@ -42,13 +42,13 @@ static unsigned int log2l(unsigned long long n)
 #define SLOTS 100
 
 struct bpf_map_def SEC("maps") lat_map = {
-       .type = BPF_MAP_TYPE_ARRAY,
+       .type = BPF_MAP_TYPE_PERCPU_ARRAY,
        .key_size = sizeof(u32),
        .value_size = sizeof(u64),
        .max_entries = SLOTS,
 };
 
-SEC("kprobe/blk_update_request")
+SEC("kprobe/blk_account_io_completion")
 int bpf_prog2(struct pt_regs *ctx)
 {
        long rq = PT_REGS_PARM1(ctx);
@@ -81,7 +81,7 @@ int bpf_prog2(struct pt_regs *ctx)
 
        value = bpf_map_lookup_elem(&lat_map, &index);
        if (value)
-               __sync_fetch_and_add((long *)value, 1);
+               *value += 1;
 
        return 0;
 }
index 0aaa933ab93818df1634ba170e125e65dc6031d5..48716f7f0d8b9eae647a76ba93b07fe76a79f7bd 100644 (file)
 
 static void clear_stats(int fd)
 {
+       unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+       __u64 values[nr_cpus];
        __u32 key;
-       __u64 value = 0;
 
+       memset(values, 0, sizeof(values));
        for (key = 0; key < SLOTS; key++)
-               bpf_update_elem(fd, &key, &value, BPF_ANY);
+               bpf_update_elem(fd, &key, values, BPF_ANY);
 }
 
 const char *color[] = {
@@ -75,15 +77,20 @@ static void print_banner(void)
 
 static void print_hist(int fd)
 {
-       __u32 key;
-       __u64 value;
-       __u64 cnt[SLOTS];
-       __u64 max_cnt = 0;
+       unsigned int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
        __u64 total_events = 0;
+       long values[nr_cpus];
+       __u64 max_cnt = 0;
+       __u64 cnt[SLOTS];
+       __u64 value;
+       __u32 key;
+       int i;
 
        for (key = 0; key < SLOTS; key++) {
+               bpf_lookup_elem(fd, &key, values);
                value = 0;
-               bpf_lookup_elem(fd, &key, &value);
+               for (i = 0; i < nr_cpus; i++)
+                       value += values[i];
                cnt[key] = value;
                total_events += value;
                if (value > max_cnt)
index 1c15717e0d5686972c1130fb718d0bc445c08dad..a1be75d0a5fd3fbf4742e555046896ea6fa6fe65 100644 (file)
@@ -23,8 +23,6 @@ include $(src)/Makefile
 PHONY += __dtbs_install_prep
 __dtbs_install_prep:
 ifeq ("$(dtbinst-root)", "$(obj)")
-       $(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi
-       $(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi
        $(Q)mkdir -p $(INSTALL_DTBS_PATH)
 endif
 
index 0147c91fa549e6ab46b68c47b46425b272c37571..874132b26d23dfec9c5c09afd5d61c78cc562e0f 100755 (executable)
@@ -269,7 +269,8 @@ our $Sparse = qr{
                        __init_refok|
                        __kprobes|
                        __ref|
-                       __rcu
+                       __rcu|
+                       __private
                }x;
 our $InitAttributePrefix = qr{__(?:mem|cpu|dev|net_|)};
 our $InitAttributeData = qr{$InitAttributePrefix(?:initdata\b)};
diff --git a/scripts/dtc/dtx_diff b/scripts/dtc/dtx_diff
new file mode 100755 (executable)
index 0000000..f116005
--- /dev/null
@@ -0,0 +1,343 @@
+#! /bin/bash
+
+# Copyright (C) 2015 Frank Rowand
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; version 2 of the License.
+
+
+usage() {
+
+       # use spaces instead of tabs in the usage message
+       cat >&2 <<eod
+
+Usage:
+
+   `basename $0` DTx
+        decompile DTx
+
+   `basename $0` DTx_1 DTx_2
+        diff DTx_1 and DTx_2
+
+
+       -f           print full dts in diff (--unified=99999)
+       -h           synonym for --help
+       -help        synonym for --help
+      --help        print this message and exit
+       -s SRCTREE   linux kernel source tree is at path SRCTREE
+                        (default is current directory)
+       -S           linux kernel source tree is at root of current git repo
+       -u           unsorted, do not sort DTx
+
+
+Each DTx is processed by the dtc compiler to produce a sorted dts source
+file.  If DTx is a dts source file then it is pre-processed in the same
+manner as done for the compile of the dts source file in the Linux kernel
+build system ('#include' and '/include/' directives are processed).
+
+If two DTx are provided, the resulting dts source files are diffed.
+
+If DTx is a directory, it is treated as a DT subtree, such as
+  /proc/device-tree.
+
+If DTx contains the binary blob magic value in the first four bytes,
+  it is treated as a binary blob (aka .dtb or FDT).
+
+Otherwise DTx is treated as a dts source file (aka .dts).
+
+   If this script is not run from the root of the linux source tree,
+   and DTx utilizes '#include' or '/include/' then the path of the
+   linux source tree can be provided by '-s SRCTREE' or '-S' so that
+   include paths will be set properly.
+
+   The shell variable \${ARCH} must provide the architecture containing
+   the dts source file for include paths to be set properly for '#include'
+   or '/include/' to be processed.
+
+   If DTx_1 and DTx_2 are in different architectures, then this script
+   may not work since \${ARCH} is part of the include path.  Two possible
+   workarounds:
+
+      `basename $0` \\
+          <(ARCH=arch_of_dtx_1 `basename $0` DTx_1) \\
+          <(ARCH=arch_of_dtx_2 `basename $0` DTx_2)
+
+      `basename $0` ARCH=arch_of_dtx_1 DTx_1 >tmp_dtx_1.dts
+      `basename $0` ARCH=arch_of_dtx_2 DTx_2 >tmp_dtx_2.dts
+      `basename $0` tmp_dtx_1.dts tmp_dtx_2.dts
+      rm tmp_dtx_1.dts tmp_dtx_2.dts
+
+   If DTx_1 and DTx_2 are in different directories, then this script will
+   add the path of DTx_1 and DTx_2 to the include paths.  If DTx_2 includes
+   a local file that exists in both the path of DTx_1 and DTx_2 then the
+   file in the path of DTx_1 will incorrectly be included.  Possible
+   workaround:
+
+      `basename $0` DTx_1 >tmp_dtx_1.dts
+      `basename $0` DTx_2 >tmp_dtx_2.dts
+      `basename $0` tmp_dtx_1.dts tmp_dtx_2.dts
+      rm tmp_dtx_1.dts tmp_dtx_2.dts
+
+eod
+}
+
+
+compile_to_dts() {
+
+       dtx="$1"
+
+       if [ -d "${dtx}" ] ; then
+
+               # -----  input is file tree
+
+               if ( ! ${DTC} -I fs ${dtx} ) ; then
+                       exit 3
+               fi
+
+       elif [ -f "${dtx}" ] && [ -r "${dtx}" ] ; then
+
+               magic=`hexdump -n 4 -e '/1 "%02x"' ${dtx}`
+               if [ "${magic}" = "d00dfeed" ] ; then
+
+                       # -----  input is FDT (binary blob)
+
+                       if ( ! ${DTC} -I dtb ${dtx} ) ; then
+                               exit 3
+                       fi
+
+                       return
+
+               fi
+
+               # -----  input is DTS (source)
+
+               if ( cpp ${cpp_flags} -x assembler-with-cpp ${dtx} \
+                       | ${DTC} -I dts ) ; then
+                       return
+               fi
+
+               echo ""                                                      >&2
+               echo "Possible hints to resolve the above error:"            >&2
+               echo "  (hints might not fix the problem)"                   >&2
+
+               hint_given=0
+
+               if [ "${ARCH}" = "" ] ; then
+                       hint_given=1
+                       echo ""                                              >&2
+                       echo "  shell variable \$ARCH not set"               >&2
+               fi
+
+               dtx_arch=`echo "/${dtx}" | sed -e 's|.*/arch/||' -e 's|/.*||'`
+
+               if [ "${dtx_arch}" != ""  -a "${dtx_arch}" != "${ARCH}" ] ; then
+                       hint_given=1
+                       echo ""                                              >&2
+                       echo "  architecture ${dtx_arch} is in file path,"   >&2
+                       echo "  but does not match shell variable \$ARCH"    >&2
+                       echo "  (${ARCH}) does not match shell variable"     >&2
+                       echo "  \$ARCH (${ARCH})"                            >&2
+               fi
+
+               if [ ! -d ${srctree}/arch/${ARCH} ] ; then
+                       hint_given=1
+                       echo ""                                              >&2
+                       echo "  ${srctree}/arch/${ARCH}/ does not exist"     >&2
+                       echo "  Is \$ARCH='${ARCH}' correct?"                >&2
+                       echo "  Possible fix: use '-s' option"               >&2
+
+                       git_root=`git rev-parse --show-toplevel 2>/dev/null`
+                       if [ -d ${git_root}/arch/ ] ; then
+                               echo "  Possible fix: use '-S' option"       >&2
+                       fi
+               fi
+
+               if [ $hint_given = 0 ] ; then
+                       echo ""                                              >&2
+                       echo "  No hints available."                         >&2
+               fi
+
+               echo ""                                                      >&2
+
+               exit 3
+
+       else
+               echo ""                                                     >&2
+               echo "ERROR: ${dtx} does not exist or is not readable"      >&2
+               echo ""                                                     >&2
+               exit 2
+       fi
+
+}
+
+
+# -----  start of script
+
+cmd_diff=0
+diff_flags="-u"
+dtx_file_1=""
+dtx_file_2=""
+dtc_sort="-s"
+help=0
+srctree=""
+
+
+while [ $# -gt 0 ] ; do
+
+       case $1 in
+
+       -f )
+               diff_flags="--unified=999999"
+               shift
+               ;;
+
+       -h | -help | --help )
+               help=1
+               shift
+               ;;
+
+       -s )
+               srctree="$2"
+               shift 2
+               ;;
+
+       -S )
+               git_root=`git rev-parse --show-toplevel 2>/dev/null`
+               srctree="${git_root}"
+               shift
+               ;;
+
+       -u )
+               dtc_sort=""
+               shift
+               ;;
+
+       *)
+               if [ "${dtx_file_1}"  = "" ] ; then
+                       dtx_file_1="$1"
+               elif [ "${dtx_file_2}" = "" ] ; then
+                       dtx_file_2="$1"
+               else
+                       echo ""                                             >&2
+                       echo "ERROR: Unexpected parameter: $1"              >&2
+                       echo ""                                             >&2
+                       exit 2
+               fi
+               shift
+               ;;
+
+       esac
+
+done
+
+if [ "${srctree}" = "" ] ; then
+       srctree="."
+fi
+
+if [ "${dtx_file_2}" != "" ]; then
+       cmd_diff=1
+fi
+
+if (( ${help} )) ; then
+       usage
+       exit 1
+fi
+
+# this must follow check for ${help}
+if [ "${dtx_file_1}" = "" ]; then
+       echo ""                                                             >&2
+       echo "ERROR: parameter DTx required"                                >&2
+       echo ""                                                             >&2
+       exit 2
+fi
+
+
+# -----  prefer dtc from linux kernel, allow fallback to dtc in $PATH
+
+if [ "${KBUILD_OUTPUT:0:2}" = ".." ] ; then
+       __KBUILD_OUTPUT="${srctree}/${KBUILD_OUTPUT}"
+elif [ "${KBUILD_OUTPUT}" = "" ] ; then
+       __KBUILD_OUTPUT="."
+else
+       __KBUILD_OUTPUT="${KBUILD_OUTPUT}"
+fi
+
+DTC="${__KBUILD_OUTPUT}/scripts/dtc/dtc"
+
+if [ ! -x ${DTC} ] ; then
+       __DTC="dtc"
+       if ( ! which ${__DTC} >/dev/null ) ; then
+
+               # use spaces instead of tabs in the error message
+               cat >&2 <<eod
+
+ERROR: unable to find a 'dtc' program
+
+   Preferred 'dtc' (built from Linux kernel source tree) was not found or
+   is not executable.
+
+      'dtc' is: ${DTC}
+
+      If it does not exist, create it from the root of the Linux source tree:
+
+         'make scripts'.
+
+      If not at the root of the Linux kernel source tree -s SRCTREE or -S
+      may need to be specified to find 'dtc'.
+
+      If 'O=\${dir}' is specified in your Linux builds, this script requires
+      'export KBUILD_OUTPUT=\${dir}' or add \${dir}/scripts/dtc to \$PATH
+      before running.
+
+      If \${KBUILD_OUTPUT} is a relative path, then '-s SRCDIR', -S, or run
+      this script from the root of the Linux kernel source tree is required.
+
+   Fallback '${__DTC}' was also not in \${PATH} or is not executable.
+
+eod
+               exit 2
+       fi
+       DTC=${__DTC}
+fi
+
+
+# -----  cpp and dtc flags same as for linux source tree build of .dtb files,
+#        plus directories of the dtx file(s)
+
+dtx_path_1_dtc_include="-i `dirname ${dtx_file_1}`"
+
+dtx_path_2_dtc_include=""
+if (( ${cmd_diff} )) ; then
+       dtx_path_2_dtc_include="-i `dirname ${dtx_file_2}`"
+fi
+
+cpp_flags="\
+       -nostdinc                                  \
+       -I${srctree}/arch/${ARCH}/boot/dts         \
+       -I${srctree}/arch/${ARCH}/boot/dts/include \
+       -I${srctree}/drivers/of/testcase-data      \
+       -undef -D__DTS__"
+
+dtc_flags="\
+       -i ${srctree}/arch/${ARCH}/boot/dts/ \
+       -i ${srctree}/kernel/dts             \
+       ${dtx_path_1_dtc_include}            \
+       ${dtx_path_2_dtc_include}"
+
+DTC="${DTC} ${dtc_flags} -O dts -qq -f ${dtc_sort} -o -"
+
+
+# -----  do the diff or decompile
+
+if (( ${cmd_diff} )) ; then
+
+       diff ${diff_flags} \
+               <(compile_to_dts "${dtx_file_1}") \
+               <(compile_to_dts "${dtx_file_2}")
+
+else
+
+       compile_to_dts "${dtx_file_1}"
+
+fi
index d79cba4ce3ebfb24a660da011076deb81a6ff4ad..ebced77deb9c4dc380ab5f58751950adc0ac3d7f 100644 (file)
@@ -96,13 +96,15 @@ savedefconfig: $(obj)/conf
 defconfig: $(obj)/conf
 ifeq ($(KBUILD_DEFCONFIG),)
        $< $(silent) --defconfig $(Kconfig)
-else ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
+else
+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG)),)
        @$(kecho) "*** Default configuration is based on '$(KBUILD_DEFCONFIG)'"
        $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$(KBUILD_DEFCONFIG) $(Kconfig)
 else
        @$(kecho) "*** Default configuration is based on target '$(KBUILD_DEFCONFIG)'"
        $(Q)$(MAKE) -f $(srctree)/Makefile $(KBUILD_DEFCONFIG)
 endif
+endif
 
 %_defconfig: $(obj)/conf
        $(Q)$< $(silent) --defconfig=arch/$(SRCARCH)/configs/$@ $(Kconfig)
index 0b7dc2fd7bac0e2985a0b16c023b98cca23325e7..dd243d2abd875b535d006535232821eefad3460b 100644 (file)
@@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
                if (in)
                        goto load;
                sym_add_change_count(1);
-               if (!sym_defconfig_list) {
-                       sym_calc_value(modules_sym);
+               if (!sym_defconfig_list)
                        return 1;
-               }
 
                for_all_defaults(sym_defconfig_list, prop) {
                        if (expr_calc_value(prop->visible.expr) == no ||
@@ -403,7 +401,6 @@ setsym:
        }
        free(line);
        fclose(in);
-       sym_calc_value(modules_sym);
        return 0;
 }
 
@@ -414,8 +411,12 @@ int conf_read(const char *name)
 
        sym_set_change_count(0);
 
-       if (conf_read_simple(name, S_DEF_USER))
+       if (conf_read_simple(name, S_DEF_USER)) {
+               sym_calc_value(modules_sym);
                return 1;
+       }
+
+       sym_calc_value(modules_sym);
 
        for_all_symbols(i, sym) {
                sym_calc_value(sym);
@@ -846,6 +847,7 @@ static int conf_split_config(void)
 
        name = conf_get_autoconfig_name();
        conf_read_simple(name, S_DEF_AUTO);
+       sym_calc_value(modules_sym);
 
        if (chdir("include/config"))
                return 1;
index ba6c34ea5429535cd303b1aa1d6ebd6ae30dd912..8f22654c71b46bcd487ef791ecfc9b50b2dbf6f2 100755 (executable)
@@ -93,9 +93,10 @@ kallsyms()
        local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \
                      ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
 
-       ${NM} -n ${1} | \
-               scripts/kallsyms ${kallsymopt} | \
-               ${CC} ${aflags} -c -o ${2} -x assembler-with-cpp -
+       local afile="`basename ${2} .o`.S"
+
+       ${NM} -n ${1} | scripts/kallsyms ${kallsymopt} > ${afile}
+       ${CC} ${aflags} -c -o ${2} ${afile}
 }
 
 # Create map file with all symbols from ${1}
index e080746e1a6b528a218b416b5344368cd6db7065..48958d3cec9e38473ff3296f7f6bf4d5db41612a 100644 (file)
@@ -594,7 +594,8 @@ static int ignore_undef_symbol(struct elf_info *info, const char *symname)
                if (strncmp(symname, "_restgpr0_", sizeof("_restgpr0_") - 1) == 0 ||
                    strncmp(symname, "_savegpr0_", sizeof("_savegpr0_") - 1) == 0 ||
                    strncmp(symname, "_restvr_", sizeof("_restvr_") - 1) == 0 ||
-                   strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0)
+                   strncmp(symname, "_savevr_", sizeof("_savevr_") - 1) == 0 ||
+                   strcmp(symname, ".TOC.") == 0)
                        return 1;
        /* Do not ignore this symbol */
        return 0;
diff --git a/scripts/prune-kernel b/scripts/prune-kernel
new file mode 100755 (executable)
index 0000000..ab5034e
--- /dev/null
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# because I use CONFIG_LOCALVERSION_AUTO, not the same version again and
+# again, /boot and /lib/modules/ eventually fill up.
+# Dumb script to purge that stuff:
+
+for f in "$@"
+do
+        if rpm -qf "/lib/modules/$f" >/dev/null; then
+                echo "keeping $f (installed from rpm)"
+        elif [ $(uname -r) = "$f" ]; then
+                echo "keeping $f (running kernel) "
+        else
+                echo "removing $f"
+                rm -f "/boot/initramfs-$f.img" "/boot/System.map-$f"
+                rm -f "/boot/vmlinuz-$f"   "/boot/config-$f"
+                rm -rf "/lib/modules/$f"
+                new-kernel-pkg --remove $f
+        fi
+done
index 696ccfa08d103cd29ae56ac38c117bbd7725da06..5adbfc32242f81b0f6396fd7d0656c5aa4eca6ff 100644 (file)
 #include <linux/random.h>
 #include <linux/rcupdate.h>
 #include <linux/scatterlist.h>
-#include <linux/crypto.h>
 #include <linux/ctype.h>
 #include <crypto/hash.h>
 #include <crypto/sha.h>
-#include <crypto/aes.h>
+#include <crypto/skcipher.h>
 
 #include "encrypted.h"
 #include "ecryptfs_format.h"
@@ -85,17 +84,17 @@ static const match_table_t key_tokens = {
 
 static int aes_get_sizes(void)
 {
-       struct crypto_blkcipher *tfm;
+       struct crypto_skcipher *tfm;
 
-       tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+       tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
        if (IS_ERR(tfm)) {
                pr_err("encrypted_key: failed to alloc_cipher (%ld)\n",
                       PTR_ERR(tfm));
                return PTR_ERR(tfm);
        }
-       ivsize = crypto_blkcipher_ivsize(tfm);
-       blksize = crypto_blkcipher_blocksize(tfm);
-       crypto_free_blkcipher(tfm);
+       ivsize = crypto_skcipher_ivsize(tfm);
+       blksize = crypto_skcipher_blocksize(tfm);
+       crypto_free_skcipher(tfm);
        return 0;
 }
 
@@ -401,28 +400,37 @@ static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
        return ret;
 }
 
-static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
-                              unsigned int key_len, const u8 *iv,
-                              unsigned int ivsize)
+static struct skcipher_request *init_skcipher_req(const u8 *key,
+                                                 unsigned int key_len)
 {
+       struct skcipher_request *req;
+       struct crypto_skcipher *tfm;
        int ret;
 
-       desc->tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
-       if (IS_ERR(desc->tfm)) {
+       tfm = crypto_alloc_skcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(tfm)) {
                pr_err("encrypted_key: failed to load %s transform (%ld)\n",
-                      blkcipher_alg, PTR_ERR(desc->tfm));
-               return PTR_ERR(desc->tfm);
+                      blkcipher_alg, PTR_ERR(tfm));
+               return ERR_CAST(tfm);
        }
-       desc->flags = 0;
 
-       ret = crypto_blkcipher_setkey(desc->tfm, key, key_len);
+       ret = crypto_skcipher_setkey(tfm, key, key_len);
        if (ret < 0) {
                pr_err("encrypted_key: failed to setkey (%d)\n", ret);
-               crypto_free_blkcipher(desc->tfm);
-               return ret;
+               crypto_free_skcipher(tfm);
+               return ERR_PTR(ret);
        }
-       crypto_blkcipher_set_iv(desc->tfm, iv, ivsize);
-       return 0;
+
+       req = skcipher_request_alloc(tfm, GFP_KERNEL);
+       if (!req) {
+               pr_err("encrypted_key: failed to allocate request for %s\n",
+                      blkcipher_alg);
+               crypto_free_skcipher(tfm);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       skcipher_request_set_callback(req, 0, NULL, NULL);
+       return req;
 }
 
 static struct key *request_master_key(struct encrypted_key_payload *epayload,
@@ -467,7 +475,8 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
 {
        struct scatterlist sg_in[2];
        struct scatterlist sg_out[1];
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *tfm;
+       struct skcipher_request *req;
        unsigned int encrypted_datalen;
        unsigned int padlen;
        char pad[16];
@@ -476,9 +485,9 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
        padlen = encrypted_datalen - epayload->decrypted_datalen;
 
-       ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
-                                 epayload->iv, ivsize);
-       if (ret < 0)
+       req = init_skcipher_req(derived_key, derived_keylen);
+       ret = PTR_ERR(req);
+       if (IS_ERR(req))
                goto out;
        dump_decrypted_data(epayload);
 
@@ -491,8 +500,12 @@ static int derived_key_encrypt(struct encrypted_key_payload *epayload,
        sg_init_table(sg_out, 1);
        sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
 
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, encrypted_datalen);
-       crypto_free_blkcipher(desc.tfm);
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
+                                  epayload->iv);
+       ret = crypto_skcipher_encrypt(req);
+       tfm = crypto_skcipher_reqtfm(req);
+       skcipher_request_free(req);
+       crypto_free_skcipher(tfm);
        if (ret < 0)
                pr_err("encrypted_key: failed to encrypt (%d)\n", ret);
        else
@@ -565,15 +578,16 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
 {
        struct scatterlist sg_in[1];
        struct scatterlist sg_out[2];
-       struct blkcipher_desc desc;
+       struct crypto_skcipher *tfm;
+       struct skcipher_request *req;
        unsigned int encrypted_datalen;
        char pad[16];
        int ret;
 
        encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
-       ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
-                                 epayload->iv, ivsize);
-       if (ret < 0)
+       req = init_skcipher_req(derived_key, derived_keylen);
+       ret = PTR_ERR(req);
+       if (IS_ERR(req))
                goto out;
        dump_encrypted_data(epayload, encrypted_datalen);
 
@@ -585,8 +599,12 @@ static int derived_key_decrypt(struct encrypted_key_payload *epayload,
                   epayload->decrypted_datalen);
        sg_set_buf(&sg_out[1], pad, sizeof pad);
 
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, encrypted_datalen);
-       crypto_free_blkcipher(desc.tfm);
+       skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen,
+                                  epayload->iv);
+       ret = crypto_skcipher_decrypt(req);
+       tfm = crypto_skcipher_reqtfm(req);
+       skcipher_request_free(req);
+       crypto_free_skcipher(tfm);
        if (ret < 0)
                goto out;
        dump_decrypted_data(epayload);
index 07a87311055c5b50916c87f2117f4c70b0321cfd..09ef276c4bdcaf8e0c419df61d1cd7b07bf5253c 100644 (file)
@@ -430,7 +430,8 @@ static int __key_instantiate_and_link(struct key *key,
 
                        /* and link it into the destination keyring */
                        if (keyring) {
-                               set_bit(KEY_FLAG_KEEP, &key->flags);
+                               if (test_bit(KEY_FLAG_KEEP, &keyring->flags))
+                                       set_bit(KEY_FLAG_KEEP, &key->flags);
 
                                __key_link(key, _edit);
                        }
index ad5cd76ec231cd14f02b2fb15f07a3d8a069972f..3411c33e2a445f28282aa5315e4b17a42a69ad23 100644 (file)
@@ -13,7 +13,7 @@ selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
 
 selinux-$(CONFIG_NETLABEL) += netlabel.o
 
-ccflags-y := -Isecurity/selinux -Isecurity/selinux/include
+ccflags-y := -I$(srctree)/security/selinux -I$(srctree)/security/selinux/include
 
 $(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
 
index f8110cfd80ff64bf05ef428f03dff935e6d704c4..8010bc5391c35827a1e84227c85a875e49da23f4 100644 (file)
@@ -2415,7 +2415,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
 
        tty = get_current_tty();
        if (tty) {
-               spin_lock(&tty_files_lock);
+               spin_lock(&tty->files_lock);
                if (!list_empty(&tty->tty_files)) {
                        struct tty_file_private *file_priv;
 
@@ -2430,7 +2430,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
                        if (file_path_has_perm(cred, file, FILE__READ | FILE__WRITE))
                                drop_tty = 1;
                }
-               spin_unlock(&tty_files_lock);
+               spin_unlock(&tty->files_lock);
                tty_kref_put(tty);
        }
        /* Reset controlling tty. */
index 2bbb41822d8ec8882f8dacbbb4c5f8a1feac59ca..8495b93681906bd39f4065723461cddac2e7d347 100644 (file)
@@ -83,6 +83,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
        { TCPDIAG_GETSOCK,      NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
        { DCCPDIAG_GETSOCK,     NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
        { SOCK_DIAG_BY_FAMILY,  NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+       { SOCK_DESTROY,         NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE },
 };
 
 static struct nlmsg_perm nlmsg_xfrm_perms[] =
index e3e949126a5639c6a4a623750ec35922704c5110..a2a1e24becc6b8f2f6288a8853b02e6dd884becd 100644 (file)
@@ -97,11 +97,11 @@ config SND_PCM_TIMER
        bool "PCM timer interface" if EXPERT
        default y
        help
-         If you disable this option, pcm timer will be inavailable, so
-         those stubs used pcm timer (e.g. dmix, dsnoop & co) may work
+         If you disable this option, pcm timer will be unavailable, so
+         those stubs that use pcm timer (e.g. dmix, dsnoop & co) may work
          incorrectlly.
 
-         For some embedded device, we may disable it to reduce memory
+         For some embedded devices, we may disable it to reduce memory
          footprint, about 20KB on x86_64 platform.
 
 config SND_SEQUENCER_OSS
index 18b8dc45bb8f77ea0c2ce16e79305c7872f057bc..7fac3cae8abd0a232a3d7a1c2563999495df6ec2 100644 (file)
 #include <sound/compress_offload.h>
 #include <sound/compress_driver.h>
 
+/* struct snd_compr_codec_caps overflows the ioctl bit size for some
+ * architectures, so we need to disable the relevant ioctls.
+ */
+#if _IOC_SIZEBITS < 14
+#define COMPR_CODEC_CAPS_OVERFLOW
+#endif
+
 /* TODO:
  * - add substream support for multiple devices in case of
  *     SND_DYNAMIC_MINORS is not used
@@ -440,6 +447,7 @@ out:
        return retval;
 }
 
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
 static int
 snd_compr_get_codec_caps(struct snd_compr_stream *stream, unsigned long arg)
 {
@@ -463,6 +471,7 @@ out:
        kfree(caps);
        return retval;
 }
+#endif /* !COMPR_CODEC_CAPS_OVERFLOW */
 
 /* revisit this with snd_pcm_preallocate_xxx */
 static int snd_compr_allocate_buffer(struct snd_compr_stream *stream,
@@ -801,9 +810,11 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
        case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
                retval = snd_compr_get_caps(stream, arg);
                break;
+#ifndef COMPR_CODEC_CAPS_OVERFLOW
        case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
                retval = snd_compr_get_codec_caps(stream, arg);
                break;
+#endif
        case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
                retval = snd_compr_set_params(stream, arg);
                break;
index 0e73d03b30e3f0d712b26f65453346e419caa96d..ebc9fdfe64df618088a8d55efff8b5312af3385d 100644 (file)
@@ -835,7 +835,8 @@ static int choose_rate(struct snd_pcm_substream *substream,
        return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
 }
 
-static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
+static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
+                                    bool trylock)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct snd_pcm_hw_params *params, *sparams;
@@ -849,7 +850,10 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream)
        struct snd_mask sformat_mask;
        struct snd_mask mask;
 
-       if (mutex_lock_interruptible(&runtime->oss.params_lock))
+       if (trylock) {
+               if (!(mutex_trylock(&runtime->oss.params_lock)))
+                       return -EAGAIN;
+       } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
                return -EINTR;
        sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
        params = kmalloc(sizeof(*params), GFP_KERNEL);
@@ -1092,7 +1096,7 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
                if (asubstream == NULL)
                        asubstream = substream;
                if (substream->runtime->oss.params) {
-                       err = snd_pcm_oss_change_params(substream);
+                       err = snd_pcm_oss_change_params(substream, false);
                        if (err < 0)
                                return err;
                }
@@ -1132,7 +1136,7 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
                return 0;
        runtime = substream->runtime;
        if (runtime->oss.params) {
-               err = snd_pcm_oss_change_params(substream);
+               err = snd_pcm_oss_change_params(substream, false);
                if (err < 0)
                        return err;
        }
@@ -2163,7 +2167,7 @@ static int snd_pcm_oss_get_space(struct snd_pcm_oss_file *pcm_oss_file, int stre
        runtime = substream->runtime;
 
        if (runtime->oss.params &&
-           (err = snd_pcm_oss_change_params(substream)) < 0)
+           (err = snd_pcm_oss_change_params(substream, false)) < 0)
                return err;
 
        info.fragsize = runtime->oss.period_bytes;
@@ -2804,7 +2808,12 @@ static int snd_pcm_oss_mmap(struct file *file, struct vm_area_struct *area)
                return -EIO;
        
        if (runtime->oss.params) {
-               if ((err = snd_pcm_oss_change_params(substream)) < 0)
+               /* use mutex_trylock() for params_lock for avoiding a deadlock
+                * between mmap_sem and params_lock taken by
+                * copy_from/to_user() in snd_pcm_oss_write/read()
+                */
+               err = snd_pcm_oss_change_params(substream, true);
+               if (err < 0)
                        return err;
        }
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
index ebe8444de6c6ea8f44a5cacfb39b963939d9880d..53dc37357bca9dea5450d818f2bcee9d71577aec 100644 (file)
@@ -565,3 +565,33 @@ unsigned int snd_pcm_rate_mask_intersect(unsigned int rates_a,
        return rates_a & rates_b;
 }
 EXPORT_SYMBOL_GPL(snd_pcm_rate_mask_intersect);
+
+/**
+ * snd_pcm_rate_range_to_bits - converts rate range to SNDRV_PCM_RATE_xxx bit
+ * @rate_min: the minimum sample rate
+ * @rate_max: the maximum sample rate
+ *
+ * This function has an implicit assumption: the rates in the given range have
+ * only the pre-defined rates like 44100 or 16000.
+ *
+ * Return: The SNDRV_PCM_RATE_xxx flag that corresponds to the given rate range,
+ * or SNDRV_PCM_RATE_KNOT for an unknown range.
+ */
+unsigned int snd_pcm_rate_range_to_bits(unsigned int rate_min,
+       unsigned int rate_max)
+{
+       unsigned int rates = 0;
+       int i;
+
+       for (i = 0; i < snd_pcm_known_rates.count; i++) {
+               if (snd_pcm_known_rates.list[i] >= rate_min
+                       && snd_pcm_known_rates.list[i] <= rate_max)
+                       rates |= 1 << i;
+       }
+
+       if (!rates)
+               rates = SNDRV_PCM_RATE_KNOT;
+
+       return rates;
+}
+EXPORT_SYMBOL_GPL(snd_pcm_rate_range_to_bits);
index a7759846fbaadff0c9493760ce7b14eff7ca8ad9..795437b1008200cd534f9a46ad0272f3dbc20ca4 100644 (file)
@@ -942,31 +942,36 @@ static long snd_rawmidi_kernel_read1(struct snd_rawmidi_substream *substream,
        unsigned long flags;
        long result = 0, count1;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
+       unsigned long appl_ptr;
 
+       spin_lock_irqsave(&runtime->lock, flags);
        while (count > 0 && runtime->avail) {
                count1 = runtime->buffer_size - runtime->appl_ptr;
                if (count1 > count)
                        count1 = count;
-               spin_lock_irqsave(&runtime->lock, flags);
                if (count1 > (int)runtime->avail)
                        count1 = runtime->avail;
+
+               /* update runtime->appl_ptr before unlocking for userbuf */
+               appl_ptr = runtime->appl_ptr;
+               runtime->appl_ptr += count1;
+               runtime->appl_ptr %= runtime->buffer_size;
+               runtime->avail -= count1;
+
                if (kernelbuf)
-                       memcpy(kernelbuf + result, runtime->buffer + runtime->appl_ptr, count1);
+                       memcpy(kernelbuf + result, runtime->buffer + appl_ptr, count1);
                if (userbuf) {
                        spin_unlock_irqrestore(&runtime->lock, flags);
                        if (copy_to_user(userbuf + result,
-                                        runtime->buffer + runtime->appl_ptr, count1)) {
+                                        runtime->buffer + appl_ptr, count1)) {
                                return result > 0 ? result : -EFAULT;
                        }
                        spin_lock_irqsave(&runtime->lock, flags);
                }
-               runtime->appl_ptr += count1;
-               runtime->appl_ptr %= runtime->buffer_size;
-               runtime->avail -= count1;
-               spin_unlock_irqrestore(&runtime->lock, flags);
                result += count1;
                count -= count1;
        }
+       spin_unlock_irqrestore(&runtime->lock, flags);
        return result;
 }
 
@@ -1055,23 +1060,16 @@ int snd_rawmidi_transmit_empty(struct snd_rawmidi_substream *substream)
 EXPORT_SYMBOL(snd_rawmidi_transmit_empty);
 
 /**
- * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * __snd_rawmidi_transmit_peek - copy data from the internal buffer
  * @substream: the rawmidi substream
  * @buffer: the buffer pointer
  * @count: data size to transfer
  *
- * Copies data from the internal output buffer to the given buffer.
- *
- * Call this in the interrupt handler when the midi output is ready,
- * and call snd_rawmidi_transmit_ack() after the transmission is
- * finished.
- *
- * Return: The size of copied data, or a negative error code on failure.
+ * This is a variant of snd_rawmidi_transmit_peek() without spinlock.
  */
-int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+int __snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
                              unsigned char *buffer, int count)
 {
-       unsigned long flags;
        int result, count1;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
 
@@ -1081,7 +1079,6 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
                return -EINVAL;
        }
        result = 0;
-       spin_lock_irqsave(&runtime->lock, flags);
        if (runtime->avail >= runtime->buffer_size) {
                /* warning: lowlevel layer MUST trigger down the hardware */
                goto __skip;
@@ -1106,25 +1103,47 @@ int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
                }
        }
       __skip:
+       return result;
+}
+EXPORT_SYMBOL(__snd_rawmidi_transmit_peek);
+
+/**
+ * snd_rawmidi_transmit_peek - copy data from the internal buffer
+ * @substream: the rawmidi substream
+ * @buffer: the buffer pointer
+ * @count: data size to transfer
+ *
+ * Copies data from the internal output buffer to the given buffer.
+ *
+ * Call this in the interrupt handler when the midi output is ready,
+ * and call snd_rawmidi_transmit_ack() after the transmission is
+ * finished.
+ *
+ * Return: The size of copied data, or a negative error code on failure.
+ */
+int snd_rawmidi_transmit_peek(struct snd_rawmidi_substream *substream,
+                             unsigned char *buffer, int count)
+{
+       struct snd_rawmidi_runtime *runtime = substream->runtime;
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&runtime->lock, flags);
+       result = __snd_rawmidi_transmit_peek(substream, buffer, count);
        spin_unlock_irqrestore(&runtime->lock, flags);
        return result;
 }
 EXPORT_SYMBOL(snd_rawmidi_transmit_peek);
 
 /**
- * snd_rawmidi_transmit_ack - acknowledge the transmission
+ * __snd_rawmidi_transmit_ack - acknowledge the transmission
  * @substream: the rawmidi substream
  * @count: the transferred count
  *
- * Advances the hardware pointer for the internal output buffer with
- * the given size and updates the condition.
- * Call after the transmission is finished.
- *
- * Return: The advanced size if successful, or a negative error code on failure.
+ * This is a variant of __snd_rawmidi_transmit_ack() without spinlock.
  */
-int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+int __snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
 {
-       unsigned long flags;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
 
        if (runtime->buffer == NULL) {
@@ -1132,7 +1151,6 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
                          "snd_rawmidi_transmit_ack: output is not active!!!\n");
                return -EINVAL;
        }
-       spin_lock_irqsave(&runtime->lock, flags);
        snd_BUG_ON(runtime->avail + count > runtime->buffer_size);
        runtime->hw_ptr += count;
        runtime->hw_ptr %= runtime->buffer_size;
@@ -1142,9 +1160,32 @@ int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
                if (runtime->drain || snd_rawmidi_ready(substream))
                        wake_up(&runtime->sleep);
        }
-       spin_unlock_irqrestore(&runtime->lock, flags);
        return count;
 }
+EXPORT_SYMBOL(__snd_rawmidi_transmit_ack);
+
+/**
+ * snd_rawmidi_transmit_ack - acknowledge the transmission
+ * @substream: the rawmidi substream
+ * @count: the transferred count
+ *
+ * Advances the hardware pointer for the internal output buffer with
+ * the given size and updates the condition.
+ * Call after the transmission is finished.
+ *
+ * Return: The advanced size if successful, or a negative error code on failure.
+ */
+int snd_rawmidi_transmit_ack(struct snd_rawmidi_substream *substream, int count)
+{
+       struct snd_rawmidi_runtime *runtime = substream->runtime;
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&runtime->lock, flags);
+       result = __snd_rawmidi_transmit_ack(substream, count);
+       spin_unlock_irqrestore(&runtime->lock, flags);
+       return result;
+}
 EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
 
 /**
@@ -1160,12 +1201,22 @@ EXPORT_SYMBOL(snd_rawmidi_transmit_ack);
 int snd_rawmidi_transmit(struct snd_rawmidi_substream *substream,
                         unsigned char *buffer, int count)
 {
+       struct snd_rawmidi_runtime *runtime = substream->runtime;
+       int result;
+       unsigned long flags;
+
+       spin_lock_irqsave(&runtime->lock, flags);
        if (!substream->opened)
-               return -EBADFD;
-       count = snd_rawmidi_transmit_peek(substream, buffer, count);
-       if (count < 0)
-               return count;
-       return snd_rawmidi_transmit_ack(substream, count);
+               result = -EBADFD;
+       else {
+               count = __snd_rawmidi_transmit_peek(substream, buffer, count);
+               if (count <= 0)
+                       result = count;
+               else
+                       result = __snd_rawmidi_transmit_ack(substream, count);
+       }
+       spin_unlock_irqrestore(&runtime->lock, flags);
+       return result;
 }
 EXPORT_SYMBOL(snd_rawmidi_transmit);
 
@@ -1177,8 +1228,9 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
        unsigned long flags;
        long count1, result;
        struct snd_rawmidi_runtime *runtime = substream->runtime;
+       unsigned long appl_ptr;
 
-       if (snd_BUG_ON(!kernelbuf && !userbuf))
+       if (!kernelbuf && !userbuf)
                return -EINVAL;
        if (snd_BUG_ON(!runtime->buffer))
                return -EINVAL;
@@ -1197,12 +1249,19 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
                        count1 = count;
                if (count1 > (long)runtime->avail)
                        count1 = runtime->avail;
+
+               /* update runtime->appl_ptr before unlocking for userbuf */
+               appl_ptr = runtime->appl_ptr;
+               runtime->appl_ptr += count1;
+               runtime->appl_ptr %= runtime->buffer_size;
+               runtime->avail -= count1;
+
                if (kernelbuf)
-                       memcpy(runtime->buffer + runtime->appl_ptr,
+                       memcpy(runtime->buffer + appl_ptr,
                               kernelbuf + result, count1);
                else if (userbuf) {
                        spin_unlock_irqrestore(&runtime->lock, flags);
-                       if (copy_from_user(runtime->buffer + runtime->appl_ptr,
+                       if (copy_from_user(runtime->buffer + appl_ptr,
                                           userbuf + result, count1)) {
                                spin_lock_irqsave(&runtime->lock, flags);
                                result = result > 0 ? result : -EFAULT;
@@ -1210,9 +1269,6 @@ static long snd_rawmidi_kernel_write1(struct snd_rawmidi_substream *substream,
                        }
                        spin_lock_irqsave(&runtime->lock, flags);
                }
-               runtime->appl_ptr += count1;
-               runtime->appl_ptr %= runtime->buffer_size;
-               runtime->avail -= count1;
                result += count1;
                count -= count1;
        }
index b1221b29728e115ad95e50d4638de3e3949bb9da..6779e82b46dd7060bd982137ca75cfcdbbfa989e 100644 (file)
@@ -202,7 +202,7 @@ snd_seq_oss_open(struct file *file, int level)
 
        dp->index = i;
        if (i >= SNDRV_SEQ_OSS_MAX_CLIENTS) {
-               pr_err("ALSA: seq_oss: too many applications\n");
+               pr_debug("ALSA: seq_oss: too many applications\n");
                rc = -ENOMEM;
                goto _error;
        }
index 0f3b38184fe58da2809f9fa4fa9c79a83967c59e..b16dbef041747e1762c3285875a5573ac119b031 100644 (file)
@@ -308,7 +308,7 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
        struct seq_oss_synth *rec;
        struct seq_oss_synthinfo *info;
 
-       if (snd_BUG_ON(dp->max_synthdev >= SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
+       if (snd_BUG_ON(dp->max_synthdev > SNDRV_SEQ_OSS_MAX_SYNTH_DEVS))
                return;
        for (i = 0; i < dp->max_synthdev; i++) {
                info = &dp->synths[i];
index 13cfa815732db759935f2daaf4c997c73016eb8c..58e79e02f2174e29f409cf129030af87d240dfce 100644 (file)
@@ -678,6 +678,9 @@ static int deliver_to_subscribers(struct snd_seq_client *client,
        else
                down_read(&grp->list_mutex);
        list_for_each_entry(subs, &grp->list_head, src_list) {
+               /* both ports ready? */
+               if (atomic_read(&subs->ref_count) != 2)
+                       continue;
                event->dest = subs->info.dest;
                if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
                        /* convert time according to flag with subscription */
index 55170a20ae7237246f5560e14c5b5066bb52ba35..921fb2bd8fadb37adf167d6bf52dd97270f005a4 100644 (file)
@@ -173,10 +173,6 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client,
 }
 
 /* */
-enum group_type {
-       SRC_LIST, DEST_LIST
-};
-
 static int subscribe_port(struct snd_seq_client *client,
                          struct snd_seq_client_port *port,
                          struct snd_seq_port_subs_info *grp,
@@ -203,6 +199,20 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
        return NULL;
 }
 
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                       struct snd_seq_client_port *port,
+                                       struct snd_seq_subscribers *subs,
+                                       bool is_src, bool ack);
+
+static inline struct snd_seq_subscribers *
+get_subscriber(struct list_head *p, bool is_src)
+{
+       if (is_src)
+               return list_entry(p, struct snd_seq_subscribers, src_list);
+       else
+               return list_entry(p, struct snd_seq_subscribers, dest_list);
+}
+
 /*
  * remove all subscribers on the list
  * this is called from port_delete, for each src and dest list.
@@ -210,7 +220,7 @@ static struct snd_seq_client_port *get_client_port(struct snd_seq_addr *addr,
 static void clear_subscriber_list(struct snd_seq_client *client,
                                  struct snd_seq_client_port *port,
                                  struct snd_seq_port_subs_info *grp,
-                                 int grptype)
+                                 int is_src)
 {
        struct list_head *p, *n;
 
@@ -219,15 +229,13 @@ static void clear_subscriber_list(struct snd_seq_client *client,
                struct snd_seq_client *c;
                struct snd_seq_client_port *aport;
 
-               if (grptype == SRC_LIST) {
-                       subs = list_entry(p, struct snd_seq_subscribers, src_list);
+               subs = get_subscriber(p, is_src);
+               if (is_src)
                        aport = get_client_port(&subs->info.dest, &c);
-               } else {
-                       subs = list_entry(p, struct snd_seq_subscribers, dest_list);
+               else
                        aport = get_client_port(&subs->info.sender, &c);
-               }
-               list_del(p);
-               unsubscribe_port(client, port, grp, &subs->info, 0);
+               delete_and_unsubscribe_port(client, port, subs, is_src, false);
+
                if (!aport) {
                        /* looks like the connected port is being deleted.
                         * we decrease the counter, and when both ports are deleted
@@ -235,21 +243,14 @@ static void clear_subscriber_list(struct snd_seq_client *client,
                         */
                        if (atomic_dec_and_test(&subs->ref_count))
                                kfree(subs);
-               } else {
-                       /* ok we got the connected port */
-                       struct snd_seq_port_subs_info *agrp;
-                       agrp = (grptype == SRC_LIST) ? &aport->c_dest : &aport->c_src;
-                       down_write(&agrp->list_mutex);
-                       if (grptype == SRC_LIST)
-                               list_del(&subs->dest_list);
-                       else
-                               list_del(&subs->src_list);
-                       up_write(&agrp->list_mutex);
-                       unsubscribe_port(c, aport, agrp, &subs->info, 1);
-                       kfree(subs);
-                       snd_seq_port_unlock(aport);
-                       snd_seq_client_unlock(c);
+                       continue;
                }
+
+               /* ok we got the connected port */
+               delete_and_unsubscribe_port(c, aport, subs, !is_src, true);
+               kfree(subs);
+               snd_seq_port_unlock(aport);
+               snd_seq_client_unlock(c);
        }
 }
 
@@ -262,8 +263,8 @@ static int port_delete(struct snd_seq_client *client,
        snd_use_lock_sync(&port->use_lock); 
 
        /* clear subscribers info */
-       clear_subscriber_list(client, port, &port->c_src, SRC_LIST);
-       clear_subscriber_list(client, port, &port->c_dest, DEST_LIST);
+       clear_subscriber_list(client, port, &port->c_src, true);
+       clear_subscriber_list(client, port, &port->c_dest, false);
 
        if (port->private_free)
                port->private_free(port->private_data);
@@ -479,85 +480,120 @@ static int match_subs_info(struct snd_seq_port_subscribe *r,
        return 0;
 }
 
-
-/* connect two ports */
-int snd_seq_port_connect(struct snd_seq_client *connector,
-                        struct snd_seq_client *src_client,
-                        struct snd_seq_client_port *src_port,
-                        struct snd_seq_client *dest_client,
-                        struct snd_seq_client_port *dest_port,
-                        struct snd_seq_port_subscribe *info)
+static int check_and_subscribe_port(struct snd_seq_client *client,
+                                   struct snd_seq_client_port *port,
+                                   struct snd_seq_subscribers *subs,
+                                   bool is_src, bool exclusive, bool ack)
 {
-       struct snd_seq_port_subs_info *src = &src_port->c_src;
-       struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
-       struct snd_seq_subscribers *subs, *s;
-       int err, src_called = 0;
-       unsigned long flags;
-       int exclusive;
+       struct snd_seq_port_subs_info *grp;
+       struct list_head *p;
+       struct snd_seq_subscribers *s;
+       int err;
 
-       subs = kzalloc(sizeof(*subs), GFP_KERNEL);
-       if (! subs)
-               return -ENOMEM;
-
-       subs->info = *info;
-       atomic_set(&subs->ref_count, 2);
-
-       down_write(&src->list_mutex);
-       down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
-       exclusive = info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE ? 1 : 0;
+       grp = is_src ? &port->c_src : &port->c_dest;
        err = -EBUSY;
+       down_write(&grp->list_mutex);
        if (exclusive) {
-               if (! list_empty(&src->list_head) || ! list_empty(&dest->list_head))
+               if (!list_empty(&grp->list_head))
                        goto __error;
        } else {
-               if (src->exclusive || dest->exclusive)
+               if (grp->exclusive)
                        goto __error;
                /* check whether already exists */
-               list_for_each_entry(s, &src->list_head, src_list) {
-                       if (match_subs_info(info, &s->info))
-                               goto __error;
-               }
-               list_for_each_entry(s, &dest->list_head, dest_list) {
-                       if (match_subs_info(info, &s->info))
+               list_for_each(p, &grp->list_head) {
+                       s = get_subscriber(p, is_src);
+                       if (match_subs_info(&subs->info, &s->info))
                                goto __error;
                }
        }
 
-       if ((err = subscribe_port(src_client, src_port, src, info,
-                                 connector->number != src_client->number)) < 0)
-               goto __error;
-       src_called = 1;
-
-       if ((err = subscribe_port(dest_client, dest_port, dest, info,
-                                 connector->number != dest_client->number)) < 0)
+       err = subscribe_port(client, port, grp, &subs->info, ack);
+       if (err < 0) {
+               grp->exclusive = 0;
                goto __error;
+       }
 
        /* add to list */
-       write_lock_irqsave(&src->list_lock, flags);
-       // write_lock(&dest->list_lock); // no other lock yet
-       list_add_tail(&subs->src_list, &src->list_head);
-       list_add_tail(&subs->dest_list, &dest->list_head);
-       // write_unlock(&dest->list_lock); // no other lock yet
-       write_unlock_irqrestore(&src->list_lock, flags);
+       write_lock_irq(&grp->list_lock);
+       if (is_src)
+               list_add_tail(&subs->src_list, &grp->list_head);
+       else
+               list_add_tail(&subs->dest_list, &grp->list_head);
+       grp->exclusive = exclusive;
+       atomic_inc(&subs->ref_count);
+       write_unlock_irq(&grp->list_lock);
+       err = 0;
+
+ __error:
+       up_write(&grp->list_mutex);
+       return err;
+}
 
-       src->exclusive = dest->exclusive = exclusive;
+static void delete_and_unsubscribe_port(struct snd_seq_client *client,
+                                       struct snd_seq_client_port *port,
+                                       struct snd_seq_subscribers *subs,
+                                       bool is_src, bool ack)
+{
+       struct snd_seq_port_subs_info *grp;
+
+       grp = is_src ? &port->c_src : &port->c_dest;
+       down_write(&grp->list_mutex);
+       write_lock_irq(&grp->list_lock);
+       if (is_src)
+               list_del(&subs->src_list);
+       else
+               list_del(&subs->dest_list);
+       grp->exclusive = 0;
+       write_unlock_irq(&grp->list_lock);
+       up_write(&grp->list_mutex);
+
+       unsubscribe_port(client, port, grp, &subs->info, ack);
+}
+
+/* connect two ports */
+int snd_seq_port_connect(struct snd_seq_client *connector,
+                        struct snd_seq_client *src_client,
+                        struct snd_seq_client_port *src_port,
+                        struct snd_seq_client *dest_client,
+                        struct snd_seq_client_port *dest_port,
+                        struct snd_seq_port_subscribe *info)
+{
+       struct snd_seq_subscribers *subs;
+       bool exclusive;
+       int err;
+
+       subs = kzalloc(sizeof(*subs), GFP_KERNEL);
+       if (!subs)
+               return -ENOMEM;
+
+       subs->info = *info;
+       atomic_set(&subs->ref_count, 0);
+       INIT_LIST_HEAD(&subs->src_list);
+       INIT_LIST_HEAD(&subs->dest_list);
+
+       exclusive = !!(info->flags & SNDRV_SEQ_PORT_SUBS_EXCLUSIVE);
+
+       err = check_and_subscribe_port(src_client, src_port, subs, true,
+                                      exclusive,
+                                      connector->number != src_client->number);
+       if (err < 0)
+               goto error;
+       err = check_and_subscribe_port(dest_client, dest_port, subs, false,
+                                      exclusive,
+                                      connector->number != dest_client->number);
+       if (err < 0)
+               goto error_dest;
 
-       up_write(&dest->list_mutex);
-       up_write(&src->list_mutex);
        return 0;
 
__error:
-       if (src_called)
-               unsubscribe_port(src_client, src_port, src, info,
-                                connector->number != src_client->number);
error_dest:
+       delete_and_unsubscribe_port(src_client, src_port, subs, true,
+                                   connector->number != src_client->number);
+ error:
        kfree(subs);
-       up_write(&dest->list_mutex);
-       up_write(&src->list_mutex);
        return err;
 }
 
-
 /* remove the connection */
 int snd_seq_port_disconnect(struct snd_seq_client *connector,
                            struct snd_seq_client *src_client,
@@ -567,37 +603,28 @@ int snd_seq_port_disconnect(struct snd_seq_client *connector,
                            struct snd_seq_port_subscribe *info)
 {
        struct snd_seq_port_subs_info *src = &src_port->c_src;
-       struct snd_seq_port_subs_info *dest = &dest_port->c_dest;
        struct snd_seq_subscribers *subs;
        int err = -ENOENT;
-       unsigned long flags;
 
        down_write(&src->list_mutex);
-       down_write_nested(&dest->list_mutex, SINGLE_DEPTH_NESTING);
-
        /* look for the connection */
        list_for_each_entry(subs, &src->list_head, src_list) {
                if (match_subs_info(info, &subs->info)) {
-                       write_lock_irqsave(&src->list_lock, flags);
-                       // write_lock(&dest->list_lock);  // no lock yet
-                       list_del(&subs->src_list);
-                       list_del(&subs->dest_list);
-                       // write_unlock(&dest->list_lock);
-                       write_unlock_irqrestore(&src->list_lock, flags);
-                       src->exclusive = dest->exclusive = 0;
-                       unsubscribe_port(src_client, src_port, src, info,
-                                        connector->number != src_client->number);
-                       unsubscribe_port(dest_client, dest_port, dest, info,
-                                        connector->number != dest_client->number);
-                       kfree(subs);
+                       atomic_dec(&subs->ref_count); /* mark as not ready */
                        err = 0;
                        break;
                }
        }
-
-       up_write(&dest->list_mutex);
        up_write(&src->list_mutex);
-       return err;
+       if (err < 0)
+               return err;
+
+       delete_and_unsubscribe_port(src_client, src_port, subs, true,
+                                   connector->number != src_client->number);
+       delete_and_unsubscribe_port(dest_client, dest_port, subs, false,
+                                   connector->number != dest_client->number);
+       kfree(subs);
+       return 0;
 }
 
 
index 82b220c769c131ecd05fea96fd0692c12d56642f..293104926098f7074001b6f6a4248d2a09ddd360 100644 (file)
@@ -90,6 +90,9 @@ void snd_seq_timer_delete(struct snd_seq_timer **tmr)
 
 void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&tmr->lock, flags);
        /* setup defaults */
        tmr->ppq = 96;          /* 96 PPQ */
        tmr->tempo = 500000;    /* 120 BPM */
@@ -105,21 +108,25 @@ void snd_seq_timer_defaults(struct snd_seq_timer * tmr)
        tmr->preferred_resolution = seq_default_timer_resolution;
 
        tmr->skew = tmr->skew_base = SKEW_BASE;
+       spin_unlock_irqrestore(&tmr->lock, flags);
 }
 
-void snd_seq_timer_reset(struct snd_seq_timer * tmr)
+static void seq_timer_reset(struct snd_seq_timer *tmr)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&tmr->lock, flags);
-
        /* reset time & songposition */
        tmr->cur_time.tv_sec = 0;
        tmr->cur_time.tv_nsec = 0;
 
        tmr->tick.cur_tick = 0;
        tmr->tick.fraction = 0;
+}
+
+void snd_seq_timer_reset(struct snd_seq_timer *tmr)
+{
+       unsigned long flags;
 
+       spin_lock_irqsave(&tmr->lock, flags);
+       seq_timer_reset(tmr);
        spin_unlock_irqrestore(&tmr->lock, flags);
 }
 
@@ -138,8 +145,11 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
        tmr = q->timer;
        if (tmr == NULL)
                return;
-       if (!tmr->running)
+       spin_lock_irqsave(&tmr->lock, flags);
+       if (!tmr->running) {
+               spin_unlock_irqrestore(&tmr->lock, flags);
                return;
+       }
 
        resolution *= ticks;
        if (tmr->skew != tmr->skew_base) {
@@ -148,8 +158,6 @@ static void snd_seq_timer_interrupt(struct snd_timer_instance *timeri,
                        (((resolution & 0xffff) * tmr->skew) >> 16);
        }
 
-       spin_lock_irqsave(&tmr->lock, flags);
-
        /* update timer */
        snd_seq_inc_time_nsec(&tmr->cur_time, resolution);
 
@@ -296,26 +304,30 @@ int snd_seq_timer_open(struct snd_seq_queue *q)
        t->callback = snd_seq_timer_interrupt;
        t->callback_data = q;
        t->flags |= SNDRV_TIMER_IFLG_AUTO;
+       spin_lock_irq(&tmr->lock);
        tmr->timeri = t;
+       spin_unlock_irq(&tmr->lock);
        return 0;
 }
 
 int snd_seq_timer_close(struct snd_seq_queue *q)
 {
        struct snd_seq_timer *tmr;
+       struct snd_timer_instance *t;
        
        tmr = q->timer;
        if (snd_BUG_ON(!tmr))
                return -EINVAL;
-       if (tmr->timeri) {
-               snd_timer_stop(tmr->timeri);
-               snd_timer_close(tmr->timeri);
-               tmr->timeri = NULL;
-       }
+       spin_lock_irq(&tmr->lock);
+       t = tmr->timeri;
+       tmr->timeri = NULL;
+       spin_unlock_irq(&tmr->lock);
+       if (t)
+               snd_timer_close(t);
        return 0;
 }
 
-int snd_seq_timer_stop(struct snd_seq_timer * tmr)
+static int seq_timer_stop(struct snd_seq_timer *tmr)
 {
        if (! tmr->timeri)
                return -EINVAL;
@@ -326,6 +338,17 @@ int snd_seq_timer_stop(struct snd_seq_timer * tmr)
        return 0;
 }
 
+int snd_seq_timer_stop(struct snd_seq_timer *tmr)
+{
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&tmr->lock, flags);
+       err = seq_timer_stop(tmr);
+       spin_unlock_irqrestore(&tmr->lock, flags);
+       return err;
+}
+
 static int initialize_timer(struct snd_seq_timer *tmr)
 {
        struct snd_timer *t;
@@ -358,13 +381,13 @@ static int initialize_timer(struct snd_seq_timer *tmr)
        return 0;
 }
 
-int snd_seq_timer_start(struct snd_seq_timer * tmr)
+static int seq_timer_start(struct snd_seq_timer *tmr)
 {
        if (! tmr->timeri)
                return -EINVAL;
        if (tmr->running)
-               snd_seq_timer_stop(tmr);
-       snd_seq_timer_reset(tmr);
+               seq_timer_stop(tmr);
+       seq_timer_reset(tmr);
        if (initialize_timer(tmr) < 0)
                return -EINVAL;
        snd_timer_start(tmr->timeri, tmr->ticks);
@@ -373,14 +396,25 @@ int snd_seq_timer_start(struct snd_seq_timer * tmr)
        return 0;
 }
 
-int snd_seq_timer_continue(struct snd_seq_timer * tmr)
+int snd_seq_timer_start(struct snd_seq_timer *tmr)
+{
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&tmr->lock, flags);
+       err = seq_timer_start(tmr);
+       spin_unlock_irqrestore(&tmr->lock, flags);
+       return err;
+}
+
+static int seq_timer_continue(struct snd_seq_timer *tmr)
 {
        if (! tmr->timeri)
                return -EINVAL;
        if (tmr->running)
                return -EBUSY;
        if (! tmr->initialized) {
-               snd_seq_timer_reset(tmr);
+               seq_timer_reset(tmr);
                if (initialize_timer(tmr) < 0)
                        return -EINVAL;
        }
@@ -390,11 +424,24 @@ int snd_seq_timer_continue(struct snd_seq_timer * tmr)
        return 0;
 }
 
+int snd_seq_timer_continue(struct snd_seq_timer *tmr)
+{
+       unsigned long flags;
+       int err;
+
+       spin_lock_irqsave(&tmr->lock, flags);
+       err = seq_timer_continue(tmr);
+       spin_unlock_irqrestore(&tmr->lock, flags);
+       return err;
+}
+
 /* return current 'real' time. use timeofday() to get better granularity. */
 snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
 {
        snd_seq_real_time_t cur_time;
+       unsigned long flags;
 
+       spin_lock_irqsave(&tmr->lock, flags);
        cur_time = tmr->cur_time;
        if (tmr->running) { 
                struct timeval tm;
@@ -410,7 +457,7 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
                }
                snd_seq_sanity_real_time(&cur_time);
        }
-                
+       spin_unlock_irqrestore(&tmr->lock, flags);
        return cur_time;        
 }
 
index 3da2d48610b3a91e4b93b0967b1fac5afebe41f7..c82ed3e70506db65adcbd48f6bdd55bd9628633d 100644 (file)
@@ -155,21 +155,26 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
        struct snd_virmidi *vmidi = substream->runtime->private_data;
        int count, res;
        unsigned char buf[32], *pbuf;
+       unsigned long flags;
 
        if (up) {
                vmidi->trigger = 1;
                if (vmidi->seq_mode == SNDRV_VIRMIDI_SEQ_DISPATCH &&
                    !(vmidi->rdev->flags & SNDRV_VIRMIDI_SUBSCRIBE)) {
-                       snd_rawmidi_transmit_ack(substream, substream->runtime->buffer_size - substream->runtime->avail);
-                       return;         /* ignored */
+                       while (snd_rawmidi_transmit(substream, buf,
+                                                   sizeof(buf)) > 0) {
+                               /* ignored */
+                       }
+                       return;
                }
                if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
                        if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
                                return;
                        vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
                }
+               spin_lock_irqsave(&substream->runtime->lock, flags);
                while (1) {
-                       count = snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
+                       count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
                        if (count <= 0)
                                break;
                        pbuf = buf;
@@ -179,16 +184,18 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
                                        snd_midi_event_reset_encode(vmidi->parser);
                                        continue;
                                }
-                               snd_rawmidi_transmit_ack(substream, res);
+                               __snd_rawmidi_transmit_ack(substream, res);
                                pbuf += res;
                                count -= res;
                                if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
                                        if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
-                                               return;
+                                               goto out;
                                        vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
                                }
                        }
                }
+       out:
+               spin_unlock_irqrestore(&substream->runtime->lock, flags);
        } else {
                vmidi->trigger = 0;
        }
@@ -254,9 +261,13 @@ static int snd_virmidi_output_open(struct snd_rawmidi_substream *substream)
  */
 static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream)
 {
+       struct snd_virmidi_dev *rdev = substream->rmidi->private_data;
        struct snd_virmidi *vmidi = substream->runtime->private_data;
-       snd_midi_event_free(vmidi->parser);
+
+       write_lock_irq(&rdev->filelist_lock);
        list_del(&vmidi->list);
+       write_unlock_irq(&rdev->filelist_lock);
+       snd_midi_event_free(vmidi->parser);
        substream->runtime->private_data = NULL;
        kfree(vmidi);
        return 0;
index af1f68f7e315334202f191e3f049f044de9495f7..dca817fc78941b5f9109e8117b3fc5adb621562e 100644 (file)
@@ -422,7 +422,7 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
        spin_lock_irqsave(&timer->lock, flags);
        list_for_each_entry(ts, &ti->slave_active_head, active_list)
                if (ts->ccallback)
-                       ts->ccallback(ti, event + 100, &tstamp, resolution);
+                       ts->ccallback(ts, event + 100, &tstamp, resolution);
        spin_unlock_irqrestore(&timer->lock, flags);
 }
 
@@ -451,6 +451,10 @@ static int snd_timer_start_slave(struct snd_timer_instance *timeri)
        unsigned long flags;
 
        spin_lock_irqsave(&slave_active_lock, flags);
+       if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+               spin_unlock_irqrestore(&slave_active_lock, flags);
+               return -EBUSY;
+       }
        timeri->flags |= SNDRV_TIMER_IFLG_RUNNING;
        if (timeri->master && timeri->timer) {
                spin_lock(&timeri->timer->lock);
@@ -475,7 +479,8 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
                return -EINVAL;
        if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
                result = snd_timer_start_slave(timeri);
-               snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+               if (result >= 0)
+                       snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
                return result;
        }
        timer = timeri->timer;
@@ -484,11 +489,18 @@ int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks)
        if (timer->card && timer->card->shutdown)
                return -ENODEV;
        spin_lock_irqsave(&timer->lock, flags);
+       if (timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+                            SNDRV_TIMER_IFLG_START)) {
+               result = -EBUSY;
+               goto unlock;
+       }
        timeri->ticks = timeri->cticks = ticks;
        timeri->pticks = 0;
        result = snd_timer_start1(timer, timeri, ticks);
+ unlock:
        spin_unlock_irqrestore(&timer->lock, flags);
-       snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
+       if (result >= 0)
+               snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_START);
        return result;
 }
 
@@ -502,9 +514,17 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
 
        if (timeri->flags & SNDRV_TIMER_IFLG_SLAVE) {
                spin_lock_irqsave(&slave_active_lock, flags);
+               if (!(timeri->flags & SNDRV_TIMER_IFLG_RUNNING)) {
+                       spin_unlock_irqrestore(&slave_active_lock, flags);
+                       return -EBUSY;
+               }
+               if (timeri->timer)
+                       spin_lock(&timeri->timer->lock);
                timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
                list_del_init(&timeri->ack_list);
                list_del_init(&timeri->active_list);
+               if (timeri->timer)
+                       spin_unlock(&timeri->timer->lock);
                spin_unlock_irqrestore(&slave_active_lock, flags);
                goto __end;
        }
@@ -512,6 +532,11 @@ static int _snd_timer_stop(struct snd_timer_instance *timeri, int event)
        if (!timer)
                return -EINVAL;
        spin_lock_irqsave(&timer->lock, flags);
+       if (!(timeri->flags & (SNDRV_TIMER_IFLG_RUNNING |
+                              SNDRV_TIMER_IFLG_START))) {
+               spin_unlock_irqrestore(&timer->lock, flags);
+               return -EBUSY;
+       }
        list_del_init(&timeri->ack_list);
        list_del_init(&timeri->active_list);
        if (timer->card && timer->card->shutdown) {
@@ -581,10 +606,15 @@ int snd_timer_continue(struct snd_timer_instance *timeri)
        if (timer->card && timer->card->shutdown)
                return -ENODEV;
        spin_lock_irqsave(&timer->lock, flags);
+       if (timeri->flags & SNDRV_TIMER_IFLG_RUNNING) {
+               result = -EBUSY;
+               goto unlock;
+       }
        if (!timeri->cticks)
                timeri->cticks = 1;
        timeri->pticks = 0;
        result = snd_timer_start1(timer, timeri, timer->sticks);
+ unlock:
        spin_unlock_irqrestore(&timer->lock, flags);
        snd_timer_notify1(timeri, SNDRV_TIMER_EVENT_CONTINUE);
        return result;
@@ -718,8 +748,8 @@ void snd_timer_interrupt(struct snd_timer * timer, unsigned long ticks_left)
                        ti->cticks = ti->ticks;
                } else {
                        ti->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
-                       if (--timer->running)
-                               list_del_init(&ti->active_list);
+                       --timer->running;
+                       list_del_init(&ti->active_list);
                }
                if ((timer->hw.flags & SNDRV_TIMER_HW_TASKLET) ||
                    (ti->flags & SNDRV_TIMER_IFLG_FAST))
@@ -1032,11 +1062,21 @@ static int snd_timer_s_stop(struct snd_timer * timer)
        return 0;
 }
 
+static int snd_timer_s_close(struct snd_timer *timer)
+{
+       struct snd_timer_system_private *priv;
+
+       priv = (struct snd_timer_system_private *)timer->private_data;
+       del_timer_sync(&priv->tlist);
+       return 0;
+}
+
 static struct snd_timer_hardware snd_timer_system =
 {
        .flags =        SNDRV_TIMER_HW_FIRST | SNDRV_TIMER_HW_TASKLET,
        .resolution =   1000000000L / HZ,
        .ticks =        10000000L,
+       .close =        snd_timer_s_close,
        .start =        snd_timer_s_start,
        .stop =         snd_timer_s_stop
 };
@@ -1893,6 +1933,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 {
        struct snd_timer_user *tu;
        long result = 0, unit;
+       int qhead;
        int err = 0;
 
        tu = file->private_data;
@@ -1904,7 +1945,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
                        if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
                                err = -EAGAIN;
-                               break;
+                               goto _error;
                        }
 
                        set_current_state(TASK_INTERRUPTIBLE);
@@ -1919,42 +1960,37 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
 
                        if (tu->disconnected) {
                                err = -ENODEV;
-                               break;
+                               goto _error;
                        }
                        if (signal_pending(current)) {
                                err = -ERESTARTSYS;
-                               break;
+                               goto _error;
                        }
                }
 
+               qhead = tu->qhead++;
+               tu->qhead %= tu->queue_size;
                spin_unlock_irq(&tu->qlock);
-               if (err < 0)
-                       goto _error;
 
                if (tu->tread) {
-                       if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
-                                        sizeof(struct snd_timer_tread))) {
+                       if (copy_to_user(buffer, &tu->tqueue[qhead],
+                                        sizeof(struct snd_timer_tread)))
                                err = -EFAULT;
-                               goto _error;
-                       }
                } else {
-                       if (copy_to_user(buffer, &tu->queue[tu->qhead++],
-                                        sizeof(struct snd_timer_read))) {
+                       if (copy_to_user(buffer, &tu->queue[qhead],
+                                        sizeof(struct snd_timer_read)))
                                err = -EFAULT;
-                               goto _error;
-                       }
                }
 
-               tu->qhead %= tu->queue_size;
-
-               result += unit;
-               buffer += unit;
-
                spin_lock_irq(&tu->qlock);
                tu->qused--;
+               if (err < 0)
+                       goto _error;
+               result += unit;
+               buffer += unit;
        }
-       spin_unlock_irq(&tu->qlock);
  _error:
+       spin_unlock_irq(&tu->qlock);
        return result > 0 ? result : err;
 }
 
index 75b74850c00535ee320f3827e094d9d4da1e30e3..c0f8f613f1f1b5e954ffa82b760c82353dd17c80 100644 (file)
@@ -109,6 +109,9 @@ struct dummy_timer_ops {
        snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
 };
 
+#define get_dummy_ops(substream) \
+       (*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
+
 struct dummy_model {
        const char *name;
        int (*playback_constraints)(struct snd_pcm_runtime *runtime);
@@ -137,7 +140,6 @@ struct snd_dummy {
        int iobox;
        struct snd_kcontrol *cd_volume_ctl;
        struct snd_kcontrol *cd_switch_ctl;
-       const struct dummy_timer_ops *timer_ops;
 };
 
 /*
@@ -231,6 +233,8 @@ static struct dummy_model *dummy_models[] = {
  */
 
 struct dummy_systimer_pcm {
+       /* ops must be the first item */
+       const struct dummy_timer_ops *timer_ops;
        spinlock_t lock;
        struct timer_list timer;
        unsigned long base_time;
@@ -366,6 +370,8 @@ static const struct dummy_timer_ops dummy_systimer_ops = {
  */
 
 struct dummy_hrtimer_pcm {
+       /* ops must be the first item */
+       const struct dummy_timer_ops *timer_ops;
        ktime_t base_time;
        ktime_t period_time;
        atomic_t running;
@@ -492,31 +498,25 @@ static const struct dummy_timer_ops dummy_hrtimer_ops = {
 
 static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
 {
-       struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
-               return dummy->timer_ops->start(substream);
+               return get_dummy_ops(substream)->start(substream);
        case SNDRV_PCM_TRIGGER_STOP:
        case SNDRV_PCM_TRIGGER_SUSPEND:
-               return dummy->timer_ops->stop(substream);
+               return get_dummy_ops(substream)->stop(substream);
        }
        return -EINVAL;
 }
 
 static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
 {
-       struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
-       return dummy->timer_ops->prepare(substream);
+       return get_dummy_ops(substream)->prepare(substream);
 }
 
 static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
 {
-       struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
-       return dummy->timer_ops->pointer(substream);
+       return get_dummy_ops(substream)->pointer(substream);
 }
 
 static struct snd_pcm_hardware dummy_pcm_hardware = {
@@ -562,17 +562,19 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
        struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
        struct dummy_model *model = dummy->model;
        struct snd_pcm_runtime *runtime = substream->runtime;
+       const struct dummy_timer_ops *ops;
        int err;
 
-       dummy->timer_ops = &dummy_systimer_ops;
+       ops = &dummy_systimer_ops;
 #ifdef CONFIG_HIGH_RES_TIMERS
        if (hrtimer)
-               dummy->timer_ops = &dummy_hrtimer_ops;
+               ops = &dummy_hrtimer_ops;
 #endif
 
-       err = dummy->timer_ops->create(substream);
+       err = ops->create(substream);
        if (err < 0)
                return err;
+       get_dummy_ops(substream) = ops;
 
        runtime->hw = dummy->pcm_hw;
        if (substream->pcm->device & 1) {
@@ -594,7 +596,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
                        err = model->capture_constraints(substream->runtime);
        }
        if (err < 0) {
-               dummy->timer_ops->free(substream);
+               get_dummy_ops(substream)->free(substream);
                return err;
        }
        return 0;
@@ -602,8 +604,7 @@ static int dummy_pcm_open(struct snd_pcm_substream *substream)
 
 static int dummy_pcm_close(struct snd_pcm_substream *substream)
 {
-       struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-       dummy->timer_ops->free(substream);
+       get_dummy_ops(substream)->free(substream);
        return 0;
 }
 
index 926e5dcbb66a1a3ec523482ceb5e199f8c455b64..5022c9b97ddfc007bc6119ab6a0cb41fafb2c16d 100644 (file)
@@ -47,14 +47,16 @@ static const unsigned int bridgeco_freq_table[] = {
        [6] = 0x07,
 };
 
-static unsigned int
-get_formation_index(unsigned int rate)
+static int
+get_formation_index(unsigned int rate, unsigned int *index)
 {
        unsigned int i;
 
        for (i = 0; i < ARRAY_SIZE(snd_bebob_rate_table); i++) {
-               if (snd_bebob_rate_table[i] == rate)
-                       return i;
+               if (snd_bebob_rate_table[i] == rate) {
+                       *index = i;
+                       return 0;
+               }
        }
        return -EINVAL;
 }
@@ -425,7 +427,9 @@ make_both_connections(struct snd_bebob *bebob, unsigned int rate)
                goto end;
 
        /* confirm params for both streams */
-       index = get_formation_index(rate);
+       err = get_formation_index(rate, &index);
+       if (err < 0)
+               goto end;
        pcm_channels = bebob->tx_stream_formations[index].pcm;
        midi_channels = bebob->tx_stream_formations[index].midi;
        err = amdtp_am824_set_parameters(&bebob->tx_stream, rate,
index 151b09f240f280d14233817ff64ef77d14d3a31b..2461311e695a6b7eb480da4fccd5cf5f26598fff 100644 (file)
@@ -103,16 +103,27 @@ static void set_midi_substream_names(struct snd_dice *dice,
 
 int snd_dice_create_midi(struct snd_dice *dice)
 {
+       __be32 reg;
        struct snd_rawmidi *rmidi;
        struct snd_rawmidi_str *str;
-       unsigned int i, midi_in_ports, midi_out_ports;
+       unsigned int midi_in_ports, midi_out_ports;
        int err;
 
-       midi_in_ports = midi_out_ports = 0;
-       for (i = 0; i < 3; i++) {
-               midi_in_ports = max(dice->tx_midi_ports[i], midi_in_ports);
-               midi_out_ports = max(dice->rx_midi_ports[i], midi_out_ports);
-       }
+       /*
+        * Use the number of MIDI conformant data channel at current sampling
+        * transfer frequency.
+        */
+       err = snd_dice_transaction_read_tx(dice, TX_NUMBER_MIDI,
+                                          &reg, sizeof(reg));
+       if (err < 0)
+               return err;
+       midi_in_ports = be32_to_cpu(reg);
+
+       err = snd_dice_transaction_read_rx(dice, RX_NUMBER_MIDI,
+                                          &reg, sizeof(reg));
+       if (err < 0)
+               return err;
+       midi_out_ports = be32_to_cpu(reg);
 
        if (midi_in_ports + midi_out_ports == 0)
                return 0;
index 9b3431999fc8b6df8dac3b58c42d6e1604c617e4..a5c9b58655ef2f69141a66e440480ac4f6049bc6 100644 (file)
@@ -9,99 +9,40 @@
 
 #include "dice.h"
 
-static int dice_rate_constraint(struct snd_pcm_hw_params *params,
-                               struct snd_pcm_hw_rule *rule)
-{
-       struct snd_pcm_substream *substream = rule->private;
-       struct snd_dice *dice = substream->private_data;
-
-       const struct snd_interval *c =
-               hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_CHANNELS);
-       struct snd_interval *r =
-               hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
-       struct snd_interval rates = {
-               .min = UINT_MAX, .max = 0, .integer = 1
-       };
-       unsigned int i, rate, mode, *pcm_channels;
-
-       if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-               pcm_channels = dice->tx_channels;
-       else
-               pcm_channels = dice->rx_channels;
-
-       for (i = 0; i < ARRAY_SIZE(snd_dice_rates); ++i) {
-               rate = snd_dice_rates[i];
-               if (snd_dice_stream_get_rate_mode(dice, rate, &mode) < 0)
-                       continue;
-
-               if (!snd_interval_test(c, pcm_channels[mode]))
-                       continue;
-
-               rates.min = min(rates.min, rate);
-               rates.max = max(rates.max, rate);
-       }
-
-       return snd_interval_refine(r, &rates);
-}
-
-static int dice_channels_constraint(struct snd_pcm_hw_params *params,
-                                   struct snd_pcm_hw_rule *rule)
-{
-       struct snd_pcm_substream *substream = rule->private;
-       struct snd_dice *dice = substream->private_data;
-
-       const struct snd_interval *r =
-               hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
-       struct snd_interval *c =
-               hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS);
-       struct snd_interval channels = {
-               .min = UINT_MAX, .max = 0, .integer = 1
-       };
-       unsigned int i, rate, mode, *pcm_channels;
-
-       if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
-               pcm_channels = dice->tx_channels;
-       else
-               pcm_channels = dice->rx_channels;
-
-       for (i = 0; i < ARRAY_SIZE(snd_dice_rates); ++i) {
-               rate = snd_dice_rates[i];
-               if (snd_dice_stream_get_rate_mode(dice, rate, &mode) < 0)
-                       continue;
-
-               if (!snd_interval_test(r, rate))
-                       continue;
-
-               channels.min = min(channels.min, pcm_channels[mode]);
-               channels.max = max(channels.max, pcm_channels[mode]);
-       }
-
-       return snd_interval_refine(c, &channels);
-}
-
-static void limit_channels_and_rates(struct snd_dice *dice,
-                                    struct snd_pcm_runtime *runtime,
-                                    unsigned int *pcm_channels)
+static int limit_channels_and_rates(struct snd_dice *dice,
+                                   struct snd_pcm_runtime *runtime,
+                                   struct amdtp_stream *stream)
 {
        struct snd_pcm_hardware *hw = &runtime->hw;
-       unsigned int i, rate, mode;
+       unsigned int rate;
+       __be32 reg[2];
+       int err;
 
-       hw->channels_min = UINT_MAX;
-       hw->channels_max = 0;
+       /*
+        * Retrieve current Multi Bit Linear Audio data channel and limit to
+        * it.
+        */
+       if (stream == &dice->tx_stream) {
+               err = snd_dice_transaction_read_tx(dice, TX_NUMBER_AUDIO,
+                                                  reg, sizeof(reg));
+       } else {
+               err = snd_dice_transaction_read_rx(dice, RX_NUMBER_AUDIO,
+                                                  reg, sizeof(reg));
+       }
+       if (err < 0)
+               return err;
 
-       for (i = 0; i < ARRAY_SIZE(snd_dice_rates); ++i) {
-               rate = snd_dice_rates[i];
-               if (snd_dice_stream_get_rate_mode(dice, rate, &mode) < 0)
-                       continue;
-               hw->rates |= snd_pcm_rate_to_rate_bit(rate);
+       hw->channels_min = hw->channels_max = be32_to_cpu(reg[0]);
 
-               if (pcm_channels[mode] == 0)
-                       continue;
-               hw->channels_min = min(hw->channels_min, pcm_channels[mode]);
-               hw->channels_max = max(hw->channels_max, pcm_channels[mode]);
-       }
+       /* Retrieve current sampling transfer frequency and limit to it. */
+       err = snd_dice_transaction_get_rate(dice, &rate);
+       if (err < 0)
+               return err;
 
+       hw->rates = snd_pcm_rate_to_rate_bit(rate);
        snd_pcm_limit_hw_rates(runtime);
+
+       return 0;
 }
 
 static void limit_period_and_buffer(struct snd_pcm_hardware *hw)
@@ -122,7 +63,6 @@ static int init_hw_info(struct snd_dice *dice,
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct snd_pcm_hardware *hw = &runtime->hw;
        struct amdtp_stream *stream;
-       unsigned int *pcm_channels;
        int err;
 
        hw->info = SNDRV_PCM_INFO_MMAP |
@@ -135,37 +75,22 @@ static int init_hw_info(struct snd_dice *dice,
        if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
                hw->formats = AM824_IN_PCM_FORMAT_BITS;
                stream = &dice->tx_stream;
-               pcm_channels = dice->tx_channels;
        } else {
                hw->formats = AM824_OUT_PCM_FORMAT_BITS;
                stream = &dice->rx_stream;
-               pcm_channels = dice->rx_channels;
        }
 
-       limit_channels_and_rates(dice, runtime, pcm_channels);
-       limit_period_and_buffer(hw);
-
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
-                                 dice_rate_constraint, substream,
-                                 SNDRV_PCM_HW_PARAM_CHANNELS, -1);
-       if (err < 0)
-               goto end;
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS,
-                                 dice_channels_constraint, substream,
-                                 SNDRV_PCM_HW_PARAM_RATE, -1);
+       err = limit_channels_and_rates(dice, runtime, stream);
        if (err < 0)
-               goto end;
+               return err;
+       limit_period_and_buffer(hw);
 
-       err = amdtp_am824_add_pcm_hw_constraints(stream, runtime);
-end:
-       return err;
+       return amdtp_am824_add_pcm_hw_constraints(stream, runtime);
 }
 
 static int pcm_open(struct snd_pcm_substream *substream)
 {
        struct snd_dice *dice = substream->private_data;
-       unsigned int source, rate;
-       bool internal;
        int err;
 
        err = snd_dice_stream_lock_try(dice);
@@ -176,39 +101,6 @@ static int pcm_open(struct snd_pcm_substream *substream)
        if (err < 0)
                goto err_locked;
 
-       err = snd_dice_transaction_get_clock_source(dice, &source);
-       if (err < 0)
-               goto err_locked;
-       switch (source) {
-       case CLOCK_SOURCE_AES1:
-       case CLOCK_SOURCE_AES2:
-       case CLOCK_SOURCE_AES3:
-       case CLOCK_SOURCE_AES4:
-       case CLOCK_SOURCE_AES_ANY:
-       case CLOCK_SOURCE_ADAT:
-       case CLOCK_SOURCE_TDIF:
-       case CLOCK_SOURCE_WC:
-               internal = false;
-               break;
-       default:
-               internal = true;
-               break;
-       }
-
-       /*
-        * When source of clock is not internal or any PCM streams are running,
-        * available sampling rate is limited at current sampling rate.
-        */
-       if (!internal ||
-           amdtp_stream_pcm_running(&dice->tx_stream) ||
-           amdtp_stream_pcm_running(&dice->rx_stream)) {
-               err = snd_dice_transaction_get_rate(dice, &rate);
-               if (err < 0)
-                       goto err_locked;
-               substream->runtime->hw.rate_min = rate;
-               substream->runtime->hw.rate_max = rate;
-       }
-
        snd_pcm_set_sync(substream);
 end:
        return err;
@@ -402,17 +294,30 @@ int snd_dice_create_pcm(struct snd_dice *dice)
                .page      = snd_pcm_lib_get_vmalloc_page,
                .mmap      = snd_pcm_lib_mmap_vmalloc,
        };
+       __be32 reg;
        struct snd_pcm *pcm;
-       unsigned int i, capture, playback;
+       unsigned int capture, playback;
        int err;
 
-       capture = playback = 0;
-       for (i = 0; i < 3; i++) {
-               if (dice->tx_channels[i] > 0)
-                       capture = 1;
-               if (dice->rx_channels[i] > 0)
-                       playback = 1;
-       }
+       /*
+        * Check whether PCM substreams are required.
+        *
+        * TODO: in the case that any PCM substreams are not avail at a certain
+        * sampling transfer frequency?
+        */
+       err = snd_dice_transaction_read_tx(dice, TX_NUMBER_AUDIO,
+                                          &reg, sizeof(reg));
+       if (err < 0)
+               return err;
+       if (be32_to_cpu(reg) > 0)
+               capture = 1;
+
+       err = snd_dice_transaction_read_rx(dice, RX_NUMBER_AUDIO,
+                                          &reg, sizeof(reg));
+       if (err < 0)
+               return err;
+       if (be32_to_cpu(reg) > 0)
+               playback = 1;
 
        err = snd_pcm_new(dice->card, "DICE", 0, playback, capture, &pcm);
        if (err < 0)
index a6a39f7ef58d8e32cf6e6ff60d45bcbbb23b1e04..e4938b0cddbea818d436906a5fd5615516e74ce5 100644 (file)
@@ -10,6 +10,7 @@
 #include "dice.h"
 
 #define        CALLBACK_TIMEOUT        200
+#define NOTIFICATION_TIMEOUT_MS        (2 * MSEC_PER_SEC)
 
 const unsigned int snd_dice_rates[SND_DICE_RATES_COUNT] = {
        /* mode 0 */
@@ -24,21 +25,33 @@ const unsigned int snd_dice_rates[SND_DICE_RATES_COUNT] = {
        [6] = 192000,
 };
 
-int snd_dice_stream_get_rate_mode(struct snd_dice *dice, unsigned int rate,
-                                 unsigned int *mode)
+/*
+ * This operation has an effect to synchronize GLOBAL_STATUS/GLOBAL_SAMPLE_RATE
+ * to GLOBAL_STATUS. Especially, just after powering on, these are different.
+ */
+static int ensure_phase_lock(struct snd_dice *dice)
 {
-       int i;
+       __be32 reg;
+       int err;
 
-       for (i = 0; i < ARRAY_SIZE(snd_dice_rates); i++) {
-               if (!(dice->clock_caps & BIT(i)))
-                       continue;
-               if (snd_dice_rates[i] != rate)
-                       continue;
+       err = snd_dice_transaction_read_global(dice, GLOBAL_CLOCK_SELECT,
+                                              &reg, sizeof(reg));
+       if (err < 0)
+               return err;
 
-               *mode = (i - 1) / 2;
-               return 0;
-       }
-       return -EINVAL;
+       if (completion_done(&dice->clock_accepted))
+               reinit_completion(&dice->clock_accepted);
+
+       err = snd_dice_transaction_write_global(dice, GLOBAL_CLOCK_SELECT,
+                                               &reg, sizeof(reg));
+       if (err < 0)
+               return err;
+
+       if (wait_for_completion_timeout(&dice->clock_accepted,
+                       msecs_to_jiffies(NOTIFICATION_TIMEOUT_MS)) == 0)
+               return -ETIMEDOUT;
+
+       return 0;
 }
 
 static void release_resources(struct snd_dice *dice,
@@ -99,23 +112,27 @@ static int start_stream(struct snd_dice *dice, struct amdtp_stream *stream,
                        unsigned int rate)
 {
        struct fw_iso_resources *resources;
-       unsigned int i, mode, pcm_chs, midi_ports;
+       __be32 reg[2];
+       unsigned int i, pcm_chs, midi_ports;
        bool double_pcm_frames;
        int err;
 
-       err = snd_dice_stream_get_rate_mode(dice, rate, &mode);
-       if (err < 0)
-               goto end;
        if (stream == &dice->tx_stream) {
                resources = &dice->tx_resources;
-               pcm_chs = dice->tx_channels[mode];
-               midi_ports = dice->tx_midi_ports[mode];
+               err = snd_dice_transaction_read_tx(dice, TX_NUMBER_AUDIO,
+                                                  reg, sizeof(reg));
        } else {
                resources = &dice->rx_resources;
-               pcm_chs = dice->rx_channels[mode];
-               midi_ports = dice->rx_midi_ports[mode];
+               err = snd_dice_transaction_read_rx(dice, RX_NUMBER_AUDIO,
+                                                  reg, sizeof(reg));
        }
 
+       if (err < 0)
+               goto end;
+
+       pcm_chs = be32_to_cpu(reg[0]);
+       midi_ports = be32_to_cpu(reg[1]);
+
        /*
         * At 176.4/192.0 kHz, Dice has a quirk to transfer two PCM frames in
         * one data block of AMDTP packet. Thus sampling transfer frequency is
@@ -126,7 +143,7 @@ static int start_stream(struct snd_dice *dice, struct amdtp_stream *stream,
         * For this quirk, blocking mode is required and PCM buffer size should
         * be aligned to SYT_INTERVAL.
         */
-       double_pcm_frames = mode > 1;
+       double_pcm_frames = rate > 96000;
        if (double_pcm_frames) {
                rate /= 2;
                pcm_chs *= 2;
@@ -224,8 +241,10 @@ int snd_dice_stream_start_duplex(struct snd_dice *dice, unsigned int rate)
        }
        if (rate == 0)
                rate = curr_rate;
-       if (rate != curr_rate)
-               stop_stream(dice, master);
+       if (rate != curr_rate) {
+               err = -EINVAL;
+               goto end;
+       }
 
        if (!amdtp_stream_running(master)) {
                stop_stream(dice, slave);
@@ -233,10 +252,10 @@ int snd_dice_stream_start_duplex(struct snd_dice *dice, unsigned int rate)
 
                amdtp_stream_set_sync(sync_mode, master, slave);
 
-               err = snd_dice_transaction_set_rate(dice, rate);
+               err = ensure_phase_lock(dice);
                if (err < 0) {
                        dev_err(&dice->unit->device,
-                               "fail to set sampling rate\n");
+                               "fail to ensure phase lock\n");
                        goto end;
                }
 
index a4ff4e0bc0af1ad143471624df0c00a89eca27c4..76f9f72df2a9e56305a8f60608dfe293716ce27d 100644 (file)
@@ -9,8 +9,6 @@
 
 #include "dice.h"
 
-#define NOTIFICATION_TIMEOUT_MS        (2 * MSEC_PER_SEC)
-
 static u64 get_subaddr(struct snd_dice *dice, enum snd_dice_addr_type type,
                       u64 offset)
 {
@@ -62,54 +60,6 @@ static unsigned int get_clock_info(struct snd_dice *dice, __be32 *info)
                                                info, 4);
 }
 
-static int set_clock_info(struct snd_dice *dice,
-                         unsigned int rate, unsigned int source)
-{
-       unsigned int i;
-       __be32 info;
-       u32 mask;
-       u32 clock;
-       int err;
-
-       err = get_clock_info(dice, &info);
-       if (err < 0)
-               return err;
-
-       clock = be32_to_cpu(info);
-       if (source != UINT_MAX) {
-               mask = CLOCK_SOURCE_MASK;
-               clock &= ~mask;
-               clock |= source;
-       }
-       if (rate != UINT_MAX) {
-               for (i = 0; i < ARRAY_SIZE(snd_dice_rates); i++) {
-                       if (snd_dice_rates[i] == rate)
-                               break;
-               }
-               if (i == ARRAY_SIZE(snd_dice_rates))
-                       return -EINVAL;
-
-               mask = CLOCK_RATE_MASK;
-               clock &= ~mask;
-               clock |= i << CLOCK_RATE_SHIFT;
-       }
-       info = cpu_to_be32(clock);
-
-       if (completion_done(&dice->clock_accepted))
-               reinit_completion(&dice->clock_accepted);
-
-       err = snd_dice_transaction_write_global(dice, GLOBAL_CLOCK_SELECT,
-                                               &info, 4);
-       if (err < 0)
-               return err;
-
-       if (wait_for_completion_timeout(&dice->clock_accepted,
-                       msecs_to_jiffies(NOTIFICATION_TIMEOUT_MS)) == 0)
-               return -ETIMEDOUT;
-
-       return 0;
-}
-
 int snd_dice_transaction_get_clock_source(struct snd_dice *dice,
                                          unsigned int *source)
 {
@@ -143,10 +93,6 @@ int snd_dice_transaction_get_rate(struct snd_dice *dice, unsigned int *rate)
 end:
        return err;
 }
-int snd_dice_transaction_set_rate(struct snd_dice *dice, unsigned int rate)
-{
-       return set_clock_info(dice, rate, UINT_MAX);
-}
 
 int snd_dice_transaction_set_enable(struct snd_dice *dice)
 {
index b91b3739c8106f551472d84ae29ef38ee09dd6bb..f7303a650ac2221ab3f656c8384b57ebe926d6bb 100644 (file)
@@ -57,65 +57,10 @@ static int check_dice_category(struct fw_unit *unit)
        return 0;
 }
 
-static int highest_supported_mode_rate(struct snd_dice *dice,
-                                      unsigned int mode, unsigned int *rate)
-{
-       unsigned int i, m;
-
-       for (i = ARRAY_SIZE(snd_dice_rates); i > 0; i--) {
-               *rate = snd_dice_rates[i - 1];
-               if (snd_dice_stream_get_rate_mode(dice, *rate, &m) < 0)
-                       continue;
-               if (mode == m)
-                       break;
-       }
-       if (i == 0)
-               return -EINVAL;
-
-       return 0;
-}
-
-static int dice_read_mode_params(struct snd_dice *dice, unsigned int mode)
-{
-       __be32 values[2];
-       unsigned int rate;
-       int err;
-
-       if (highest_supported_mode_rate(dice, mode, &rate) < 0) {
-               dice->tx_channels[mode] = 0;
-               dice->tx_midi_ports[mode] = 0;
-               dice->rx_channels[mode] = 0;
-               dice->rx_midi_ports[mode] = 0;
-               return 0;
-       }
-
-       err = snd_dice_transaction_set_rate(dice, rate);
-       if (err < 0)
-               return err;
-
-       err = snd_dice_transaction_read_tx(dice, TX_NUMBER_AUDIO,
-                                          values, sizeof(values));
-       if (err < 0)
-               return err;
-
-       dice->tx_channels[mode]   = be32_to_cpu(values[0]);
-       dice->tx_midi_ports[mode] = be32_to_cpu(values[1]);
-
-       err = snd_dice_transaction_read_rx(dice, RX_NUMBER_AUDIO,
-                                          values, sizeof(values));
-       if (err < 0)
-               return err;
-
-       dice->rx_channels[mode]   = be32_to_cpu(values[0]);
-       dice->rx_midi_ports[mode] = be32_to_cpu(values[1]);
-
-       return 0;
-}
-
-static int dice_read_params(struct snd_dice *dice)
+static int check_clock_caps(struct snd_dice *dice)
 {
        __be32 value;
-       int mode, err;
+       int err;
 
        /* some very old firmwares don't tell about their clock support */
        if (dice->clock_caps > 0) {
@@ -133,12 +78,6 @@ static int dice_read_params(struct snd_dice *dice)
                                   CLOCK_CAP_SOURCE_INTERNAL;
        }
 
-       for (mode = 2; mode >= 0; --mode) {
-               err = dice_read_mode_params(dice, mode);
-               if (err < 0)
-                       return err;
-       }
-
        return 0;
 }
 
@@ -215,7 +154,7 @@ static void do_registration(struct work_struct *work)
        if (err < 0)
                goto error;
 
-       err = dice_read_params(dice);
+       err = check_clock_caps(dice);
        if (err < 0)
                goto error;
 
index 3d5ebebe61ea2238cfbb410a403cc6da5452dc01..423cdba997267fd210ea2f3f3e426b147cef8c4e 100644 (file)
@@ -56,10 +56,6 @@ struct snd_dice {
        unsigned int rsrv_offset;
 
        unsigned int clock_caps;
-       unsigned int tx_channels[3];
-       unsigned int rx_channels[3];
-       unsigned int tx_midi_ports[3];
-       unsigned int rx_midi_ports[3];
 
        struct fw_address_handler notification_handler;
        int owner_generation;
@@ -158,7 +154,6 @@ static inline int snd_dice_transaction_read_sync(struct snd_dice *dice,
 
 int snd_dice_transaction_get_clock_source(struct snd_dice *dice,
                                          unsigned int *source);
-int snd_dice_transaction_set_rate(struct snd_dice *dice, unsigned int rate);
 int snd_dice_transaction_get_rate(struct snd_dice *dice, unsigned int *rate);
 int snd_dice_transaction_set_enable(struct snd_dice *dice);
 void snd_dice_transaction_clear_enable(struct snd_dice *dice);
@@ -169,9 +164,6 @@ void snd_dice_transaction_destroy(struct snd_dice *dice);
 #define SND_DICE_RATES_COUNT   7
 extern const unsigned int snd_dice_rates[SND_DICE_RATES_COUNT];
 
-int snd_dice_stream_get_rate_mode(struct snd_dice *dice,
-                                 unsigned int rate, unsigned int *mode);
-
 int snd_dice_stream_start_duplex(struct snd_dice *dice, unsigned int rate);
 void snd_dice_stream_stop_duplex(struct snd_dice *dice);
 int snd_dice_stream_init_duplex(struct snd_dice *dice);
index b02a5e8cad448c40368b9396dc1c0890c91a3a42..0ac92aba5bc1c9a4c01b1764a6f16a098e3bbae1 100644 (file)
@@ -63,7 +63,7 @@ struct amdtp_dot {
 #define BYTE_PER_SAMPLE (4)
 #define MAGIC_DOT_BYTE (2)
 #define MAGIC_BYTE_OFF(x) (((x) * BYTE_PER_SAMPLE) + MAGIC_DOT_BYTE)
-static const u8 dot_scrt(const u8 idx, const unsigned int off)
+static u8 dot_scrt(const u8 idx, const unsigned int off)
 {
        /*
         * the length of the added pattern only depends on the lower nibble
index 904ce0329fa1ac3d7e3364d66664d6ba60d6b6d8..040a96d1ba8ec1fa1dbfa4521a46e7efb865f435 100644 (file)
@@ -230,6 +230,7 @@ int snd_tscm_transaction_register(struct snd_tscm *tscm)
        return err;
 error:
        fw_core_remove_address_handler(&tscm->async_handler);
+       tscm->async_handler.callback_data = NULL;
        return err;
 }
 
@@ -276,6 +277,9 @@ void snd_tscm_transaction_unregister(struct snd_tscm *tscm)
        __be32 reg;
        unsigned int i;
 
+       if (tscm->async_handler.callback_data == NULL)
+               return;
+
        /* Turn off FireWire LED. */
        reg = cpu_to_be32(0x0000008e);
        snd_fw_transaction(tscm->unit, TCODE_WRITE_QUADLET_REQUEST,
@@ -297,6 +301,8 @@ void snd_tscm_transaction_unregister(struct snd_tscm *tscm)
                           &reg, sizeof(reg), 0);
 
        fw_core_remove_address_handler(&tscm->async_handler);
+       tscm->async_handler.callback_data = NULL;
+
        for (i = 0; i < TSCM_MIDI_OUT_PORT_MAX; i++)
                snd_fw_async_midi_port_destroy(&tscm->out_ports[i]);
 }
index ee0bc183950888ba6ade12bb8441fa81be41ec11..e281c338e562d59e861303789d82c319b9a2ae9c 100644 (file)
@@ -21,7 +21,6 @@ static struct snd_tscm_spec model_specs[] = {
                .pcm_playback_analog_channels = 8,
                .midi_capture_ports = 4,
                .midi_playback_ports = 4,
-               .is_controller = true,
        },
        {
                .name = "FW-1082",
@@ -31,9 +30,16 @@ static struct snd_tscm_spec model_specs[] = {
                .pcm_playback_analog_channels = 2,
                .midi_capture_ports = 2,
                .midi_playback_ports = 2,
-               .is_controller = true,
        },
-       /* FW-1804 may be supported. */
+       {
+               .name = "FW-1804",
+               .has_adat = true,
+               .has_spdif = true,
+               .pcm_capture_analog_channels = 8,
+               .pcm_playback_analog_channels = 2,
+               .midi_capture_ports = 2,
+               .midi_playback_ports = 4,
+       },
 };
 
 static int identify_model(struct snd_tscm *tscm)
index 2d028d2bd3bdcdcd8be15a184e555786726c94d8..30ab77e924f7f68f9b939a230109152722ad9057 100644 (file)
@@ -39,7 +39,6 @@ struct snd_tscm_spec {
        unsigned int pcm_playback_analog_channels;
        unsigned int midi_capture_ports;
        unsigned int midi_playback_ports;
-       bool is_controller;
 };
 
 #define TSCM_MIDI_IN_PORT_MAX  4
@@ -72,9 +71,6 @@ struct snd_tscm {
        struct snd_fw_async_midi_port out_ports[TSCM_MIDI_OUT_PORT_MAX];
        u8 running_status[TSCM_MIDI_OUT_PORT_MAX];
        bool on_sysex[TSCM_MIDI_OUT_PORT_MAX];
-
-       /* For control messages. */
-       struct snd_firewire_tascam_status *status;
 };
 
 #define TSCM_ADDR_BASE                 0xffff00000000ull
index 0216475fc759e23442a9f2ab7775c6966458b2cd..37adcc6cbe6bd852a290cc95ea58cc05863e88ba 100644 (file)
@@ -3,6 +3,7 @@
 config SND_WSS_LIB
         tristate
         select SND_PCM
+       select SND_TIMER
 
 config SND_SB_COMMON
         tristate
@@ -42,6 +43,7 @@ config SND_AD1816A
        select SND_OPL3_LIB
        select SND_MPU401_UART
        select SND_PCM
+       select SND_TIMER
        help
          Say Y here to include support for Analog Devices SoundPort
          AD1816A or compatible sound chips.
@@ -209,6 +211,7 @@ config SND_GUSCLASSIC
        tristate "Gravis UltraSound Classic"
        select SND_RAWMIDI
        select SND_PCM
+       select SND_TIMER
        help
          Say Y here to include support for Gravis UltraSound Classic
          soundcards.
@@ -221,6 +224,7 @@ config SND_GUSEXTREME
        select SND_OPL3_LIB
        select SND_MPU401_UART
        select SND_PCM
+       select SND_TIMER
        help
          Say Y here to include support for Gravis UltraSound Extreme
          soundcards.
index 2153d31fb66312025cb6221afd8476d313261785..4a4705031cb96ce24d0a4db4876e32733a9bd8dc 100644 (file)
@@ -23,17 +23,5 @@ config SND_SGI_HAL2
         help
                 Sound support for the SGI Indy and Indigo2 Workstation.
 
-
-config SND_AU1X00
-       tristate "Au1x00 AC97 Port Driver (DEPRECATED)"
-       depends on MIPS_ALCHEMY
-       select SND_PCM
-       select SND_AC97_CODEC
-       help
-         ALSA Sound driver for the Au1x00's AC97 port.
-
-         Newer drivers for ASoC are available, please do not use
-         this driver as it will be removed in the future.
-
 endif  # SND_MIPS
 
index 861ec0a574b46efd5315878f4666edc87d288271..b977c44330d67c6e86cef15e1c8eecf68f217b57 100644 (file)
@@ -2,11 +2,9 @@
 # Makefile for ALSA
 #
 
-snd-au1x00-objs := au1x00.o
 snd-sgi-o2-objs := sgio2audio.o ad1843.o
 snd-sgi-hal2-objs := hal2.o
 
 # Toplevel Module Dependency
-obj-$(CONFIG_SND_AU1X00) += snd-au1x00.o
 obj-$(CONFIG_SND_SGI_O2) += snd-sgi-o2.o
 obj-$(CONFIG_SND_SGI_HAL2) += snd-sgi-hal2.o
diff --git a/sound/mips/au1x00.c b/sound/mips/au1x00.c
deleted file mode 100644 (file)
index 1e30e84..0000000
+++ /dev/null
@@ -1,734 +0,0 @@
-/*
- * BRIEF MODULE DESCRIPTION
- *  Driver for AMD Au1000 MIPS Processor, AC'97 Sound Port
- *
- * Copyright 2004 Cooper Street Innovations Inc.
- * Author: Charles Eidsness    <charles@cooper-street.com>
- *
- *  This program is free software; you can redistribute  it and/or modify it
- *  under  the terms of  the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the  License, or (at your
- *  option) any later version.
- *
- *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
- *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
- *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
- *  NO  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
- *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- *  NOT LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF
- *  USE, DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- *  ANY THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
- *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *  You should have received a copy of the  GNU General Public License along
- *  with this program; if not, write  to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * History:
- *
- * 2004-09-09 Charles Eidsness -- Original verion -- based on
- *                               sa11xx-uda1341.c ALSA driver and the
- *                               au1000.c OSS driver.
- * 2004-09-09 Matt Porter      -- Added support for ALSA 1.0.6
- *
- */
-
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <sound/core.h>
-#include <sound/initval.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/ac97_codec.h>
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/au1000_dma.h>
-
-MODULE_AUTHOR("Charles Eidsness <charles@cooper-street.com>");
-MODULE_DESCRIPTION("Au1000 AC'97 ALSA Driver");
-MODULE_LICENSE("GPL");
-MODULE_SUPPORTED_DEVICE("{{AMD,Au1000 AC'97}}");
-
-#define PLAYBACK 0
-#define CAPTURE 1
-#define AC97_SLOT_3 0x01
-#define AC97_SLOT_4 0x02
-#define AC97_SLOT_6 0x08
-#define AC97_CMD_IRQ 31
-#define READ 0
-#define WRITE 1
-#define READ_WAIT 2
-#define RW_DONE 3
-
-struct au1000_period
-{
-       u32 start;
-       u32 relative_end;       /*realtive to start of buffer*/
-       struct au1000_period * next;
-};
-
-/*Au1000 AC97 Port Control Reisters*/
-struct au1000_ac97_reg {
-       u32 volatile config;
-       u32 volatile status;
-       u32 volatile data;
-       u32 volatile cmd;
-       u32 volatile cntrl;
-};
-
-struct audio_stream {
-       struct snd_pcm_substream *substream;
-       int dma;
-       spinlock_t dma_lock;
-       struct au1000_period * buffer;
-       unsigned int period_size;
-       unsigned int periods;
-};
-
-struct snd_au1000 {
-       struct snd_card *card;
-       struct au1000_ac97_reg volatile *ac97_ioport;
-
-       struct resource *ac97_res_port;
-       spinlock_t ac97_lock;
-       struct snd_ac97 *ac97;
-
-       struct snd_pcm *pcm;
-       struct audio_stream *stream[2]; /* playback & capture */
-       int dmaid[2];           /* tx(0)/rx(1) DMA ids */
-};
-
-/*--------------------------- Local Functions --------------------------------*/
-static void
-au1000_set_ac97_xmit_slots(struct snd_au1000 *au1000, long xmit_slots)
-{
-       u32 volatile ac97_config;
-
-       spin_lock(&au1000->ac97_lock);
-       ac97_config = au1000->ac97_ioport->config;
-       ac97_config = ac97_config & ~AC97C_XMIT_SLOTS_MASK;
-       ac97_config |= (xmit_slots << AC97C_XMIT_SLOTS_BIT);
-       au1000->ac97_ioport->config = ac97_config;
-       spin_unlock(&au1000->ac97_lock);
-}
-
-static void
-au1000_set_ac97_recv_slots(struct snd_au1000 *au1000, long recv_slots)
-{
-       u32 volatile ac97_config;
-
-       spin_lock(&au1000->ac97_lock);
-       ac97_config = au1000->ac97_ioport->config;
-       ac97_config = ac97_config & ~AC97C_RECV_SLOTS_MASK;
-       ac97_config |= (recv_slots << AC97C_RECV_SLOTS_BIT);
-       au1000->ac97_ioport->config = ac97_config;
-       spin_unlock(&au1000->ac97_lock);
-}
-
-
-static void
-au1000_release_dma_link(struct audio_stream *stream)
-{
-       struct au1000_period * pointer;
-       struct au1000_period * pointer_next;
-
-       stream->period_size = 0;
-       stream->periods = 0;
-       pointer = stream->buffer;
-       if (! pointer)
-               return;
-       do {
-               pointer_next = pointer->next;
-               kfree(pointer);
-               pointer = pointer_next;
-       } while (pointer != stream->buffer);
-       stream->buffer = NULL;
-}
-
-static int
-au1000_setup_dma_link(struct audio_stream *stream, unsigned int period_bytes,
-                     unsigned int periods)
-{
-       struct snd_pcm_substream *substream = stream->substream;
-       struct snd_pcm_runtime *runtime = substream->runtime;
-       struct au1000_period *pointer;
-       unsigned long dma_start;
-       int i;
-
-       dma_start = virt_to_phys(runtime->dma_area);
-
-       if (stream->period_size == period_bytes &&
-           stream->periods == periods)
-               return 0; /* not changed */
-
-       au1000_release_dma_link(stream);
-
-       stream->period_size = period_bytes;
-       stream->periods = periods;
-
-       stream->buffer = kmalloc(sizeof(struct au1000_period), GFP_KERNEL);
-       if (! stream->buffer)
-               return -ENOMEM;
-       pointer = stream->buffer;
-       for (i = 0; i < periods; i++) {
-               pointer->start = (u32)(dma_start + (i * period_bytes));
-               pointer->relative_end = (u32) (((i+1) * period_bytes) - 0x1);
-               if (i < periods - 1) {
-                       pointer->next = kmalloc(sizeof(struct au1000_period), GFP_KERNEL);
-                       if (! pointer->next) {
-                               au1000_release_dma_link(stream);
-                               return -ENOMEM;
-                       }
-                       pointer = pointer->next;
-               }
-       }
-       pointer->next = stream->buffer;
-       return 0;
-}
-
-static void
-au1000_dma_stop(struct audio_stream *stream)
-{
-       if (snd_BUG_ON(!stream->buffer))
-               return;
-       disable_dma(stream->dma);
-}
-
-static void
-au1000_dma_start(struct audio_stream *stream)
-{
-       if (snd_BUG_ON(!stream->buffer))
-               return;
-
-       init_dma(stream->dma);
-       if (get_dma_active_buffer(stream->dma) == 0) {
-               clear_dma_done0(stream->dma);
-               set_dma_addr0(stream->dma, stream->buffer->start);
-               set_dma_count0(stream->dma, stream->period_size >> 1);
-               set_dma_addr1(stream->dma, stream->buffer->next->start);
-               set_dma_count1(stream->dma, stream->period_size >> 1);
-       } else {
-               clear_dma_done1(stream->dma);
-               set_dma_addr1(stream->dma, stream->buffer->start);
-               set_dma_count1(stream->dma, stream->period_size >> 1);
-               set_dma_addr0(stream->dma, stream->buffer->next->start);
-               set_dma_count0(stream->dma, stream->period_size >> 1);
-       }
-       enable_dma_buffers(stream->dma);
-       start_dma(stream->dma);
-}
-
-static irqreturn_t
-au1000_dma_interrupt(int irq, void *dev_id)
-{
-       struct audio_stream *stream = (struct audio_stream *) dev_id;
-       struct snd_pcm_substream *substream = stream->substream;
-
-       spin_lock(&stream->dma_lock);
-       switch (get_dma_buffer_done(stream->dma)) {
-       case DMA_D0:
-               stream->buffer = stream->buffer->next;
-               clear_dma_done0(stream->dma);
-               set_dma_addr0(stream->dma, stream->buffer->next->start);
-               set_dma_count0(stream->dma, stream->period_size >> 1);
-               enable_dma_buffer0(stream->dma);
-               break;
-       case DMA_D1:
-               stream->buffer = stream->buffer->next;
-               clear_dma_done1(stream->dma);
-               set_dma_addr1(stream->dma, stream->buffer->next->start);
-               set_dma_count1(stream->dma, stream->period_size >> 1);
-               enable_dma_buffer1(stream->dma);
-               break;
-       case (DMA_D0 | DMA_D1):
-               printk(KERN_ERR "DMA %d missed interrupt.\n",stream->dma);
-               au1000_dma_stop(stream);
-               au1000_dma_start(stream);
-               break;
-       case (~DMA_D0 & ~DMA_D1):
-               printk(KERN_ERR "DMA %d empty irq.\n",stream->dma);
-       }
-       spin_unlock(&stream->dma_lock);
-       snd_pcm_period_elapsed(substream);
-       return IRQ_HANDLED;
-}
-
-/*-------------------------- PCM Audio Streams -------------------------------*/
-
-static unsigned int rates[] = {8000, 11025, 16000, 22050};
-static struct snd_pcm_hw_constraint_list hw_constraints_rates = {
-       .count  = ARRAY_SIZE(rates),
-       .list   = rates,
-       .mask   = 0,
-};
-
-static struct snd_pcm_hardware snd_au1000_hw =
-{
-       .info                   = (SNDRV_PCM_INFO_INTERLEAVED | \
-                               SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID),
-       .formats                = SNDRV_PCM_FMTBIT_S16_LE,
-       .rates                  = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |
-                               SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050),
-       .rate_min               = 8000,
-       .rate_max               = 22050,
-       .channels_min           = 1,
-       .channels_max           = 2,
-       .buffer_bytes_max       = 128*1024,
-       .period_bytes_min       = 32,
-       .period_bytes_max       = 16*1024,
-       .periods_min            = 8,
-       .periods_max            = 255,
-       .fifo_size              = 16,
-};
-
-static int
-snd_au1000_playback_open(struct snd_pcm_substream *substream)
-{
-       struct snd_au1000 *au1000 = substream->pcm->private_data;
-
-       au1000->stream[PLAYBACK]->substream = substream;
-       au1000->stream[PLAYBACK]->buffer = NULL;
-       substream->private_data = au1000->stream[PLAYBACK];
-       substream->runtime->hw = snd_au1000_hw;
-       return (snd_pcm_hw_constraint_list(substream->runtime, 0,
-               SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates) < 0);
-}
-
-static int
-snd_au1000_capture_open(struct snd_pcm_substream *substream)
-{
-       struct snd_au1000 *au1000 = substream->pcm->private_data;
-
-       au1000->stream[CAPTURE]->substream = substream;
-       au1000->stream[CAPTURE]->buffer = NULL;
-       substream->private_data = au1000->stream[CAPTURE];
-       substream->runtime->hw = snd_au1000_hw;
-       return (snd_pcm_hw_constraint_list(substream->runtime, 0,
-               SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates) < 0);
-}
-
-static int
-snd_au1000_playback_close(struct snd_pcm_substream *substream)
-{
-       struct snd_au1000 *au1000 = substream->pcm->private_data;
-
-       au1000->stream[PLAYBACK]->substream = NULL;
-       return 0;
-}
-
-static int
-snd_au1000_capture_close(struct snd_pcm_substream *substream)
-{
-       struct snd_au1000 *au1000 = substream->pcm->private_data;
-
-       au1000->stream[CAPTURE]->substream = NULL;
-       return 0;
-}
-
-static int
-snd_au1000_hw_params(struct snd_pcm_substream *substream,
-                                       struct snd_pcm_hw_params *hw_params)
-{
-       struct audio_stream *stream = substream->private_data;
-       int err;
-
-       err = snd_pcm_lib_malloc_pages(substream,
-                                      params_buffer_bytes(hw_params));
-       if (err < 0)
-               return err;
-       return au1000_setup_dma_link(stream,
-                                    params_period_bytes(hw_params),
-                                    params_periods(hw_params));
-}
-
-static int
-snd_au1000_hw_free(struct snd_pcm_substream *substream)
-{
-       struct audio_stream *stream = substream->private_data;
-       au1000_release_dma_link(stream);
-       return snd_pcm_lib_free_pages(substream);
-}
-
-static int
-snd_au1000_playback_prepare(struct snd_pcm_substream *substream)
-{
-       struct snd_au1000 *au1000 = substream->pcm->private_data;
-       struct snd_pcm_runtime *runtime = substream->runtime;
-
-       if (runtime->channels == 1)
-               au1000_set_ac97_xmit_slots(au1000, AC97_SLOT_4);
-       else
-               au1000_set_ac97_xmit_slots(au1000, AC97_SLOT_3 | AC97_SLOT_4);
-       snd_ac97_set_rate(au1000->ac97, AC97_PCM_FRONT_DAC_RATE, runtime->rate);
-       return 0;
-}
-
-static int
-snd_au1000_capture_prepare(struct snd_pcm_substream *substream)
-{
-       struct snd_au1000 *au1000 = substream->pcm->private_data;
-       struct snd_pcm_runtime *runtime = substream->runtime;
-
-       if (runtime->channels == 1)
-               au1000_set_ac97_recv_slots(au1000, AC97_SLOT_4);
-       else
-               au1000_set_ac97_recv_slots(au1000, AC97_SLOT_3 | AC97_SLOT_4);
-       snd_ac97_set_rate(au1000->ac97, AC97_PCM_LR_ADC_RATE, runtime->rate);
-       return 0;
-}
-
-static int
-snd_au1000_trigger(struct snd_pcm_substream *substream, int cmd)
-{
-       struct audio_stream *stream = substream->private_data;
-       int err = 0;
-
-       spin_lock(&stream->dma_lock);
-       switch (cmd) {
-       case SNDRV_PCM_TRIGGER_START:
-               au1000_dma_start(stream);
-               break;
-       case SNDRV_PCM_TRIGGER_STOP:
-               au1000_dma_stop(stream);
-               break;
-       default:
-               err = -EINVAL;
-               break;
-       }
-       spin_unlock(&stream->dma_lock);
-       return err;
-}
-
-static snd_pcm_uframes_t
-snd_au1000_pointer(struct snd_pcm_substream *substream)
-{
-       struct audio_stream *stream = substream->private_data;
-       struct snd_pcm_runtime *runtime = substream->runtime;
-       long location;
-
-       spin_lock(&stream->dma_lock);
-       location = get_dma_residue(stream->dma);
-       spin_unlock(&stream->dma_lock);
-       location = stream->buffer->relative_end - location;
-       if (location == -1)
-               location = 0;
-       return bytes_to_frames(runtime,location);
-}
-
-static struct snd_pcm_ops snd_card_au1000_playback_ops = {
-       .open                   = snd_au1000_playback_open,
-       .close                  = snd_au1000_playback_close,
-       .ioctl                  = snd_pcm_lib_ioctl,
-       .hw_params              = snd_au1000_hw_params,
-       .hw_free                = snd_au1000_hw_free,
-       .prepare                = snd_au1000_playback_prepare,
-       .trigger                = snd_au1000_trigger,
-       .pointer                = snd_au1000_pointer,
-};
-
-static struct snd_pcm_ops snd_card_au1000_capture_ops = {
-       .open                   = snd_au1000_capture_open,
-       .close                  = snd_au1000_capture_close,
-       .ioctl                  = snd_pcm_lib_ioctl,
-       .hw_params              = snd_au1000_hw_params,
-       .hw_free                = snd_au1000_hw_free,
-       .prepare                = snd_au1000_capture_prepare,
-       .trigger                = snd_au1000_trigger,
-       .pointer                = snd_au1000_pointer,
-};
-
-static int
-snd_au1000_pcm_new(struct snd_au1000 *au1000)
-{
-       struct snd_pcm *pcm;
-       int err;
-       unsigned long flags;
-
-       if ((err = snd_pcm_new(au1000->card, "AU1000 AC97 PCM", 0, 1, 1, &pcm)) < 0)
-               return err;
-
-       snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
-               snd_dma_continuous_data(GFP_KERNEL), 128*1024, 128*1024);
-
-       snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK,
-               &snd_card_au1000_playback_ops);
-       snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
-               &snd_card_au1000_capture_ops);
-
-       pcm->private_data = au1000;
-       pcm->info_flags = 0;
-       strcpy(pcm->name, "Au1000 AC97 PCM");
-
-       spin_lock_init(&au1000->stream[PLAYBACK]->dma_lock);
-       spin_lock_init(&au1000->stream[CAPTURE]->dma_lock);
-
-       flags = claim_dma_lock();
-       au1000->stream[PLAYBACK]->dma = request_au1000_dma(au1000->dmaid[0],
-                       "AC97 TX", au1000_dma_interrupt, 0,
-                       au1000->stream[PLAYBACK]);
-       if (au1000->stream[PLAYBACK]->dma < 0) {
-               release_dma_lock(flags);
-               return -EBUSY;
-       }
-       au1000->stream[CAPTURE]->dma = request_au1000_dma(au1000->dmaid[1],
-                       "AC97 RX", au1000_dma_interrupt, 0,
-                       au1000->stream[CAPTURE]);
-       if (au1000->stream[CAPTURE]->dma < 0){
-               release_dma_lock(flags);
-               return -EBUSY;
-       }
-       /* enable DMA coherency in read/write DMA channels */
-       set_dma_mode(au1000->stream[PLAYBACK]->dma,
-                    get_dma_mode(au1000->stream[PLAYBACK]->dma) & ~DMA_NC);
-       set_dma_mode(au1000->stream[CAPTURE]->dma,
-                    get_dma_mode(au1000->stream[CAPTURE]->dma) & ~DMA_NC);
-       release_dma_lock(flags);
-       au1000->pcm = pcm;
-       return 0;
-}
-
-
-/*-------------------------- AC97 CODEC Control ------------------------------*/
-
-static unsigned short
-snd_au1000_ac97_read(struct snd_ac97 *ac97, unsigned short reg)
-{
-       struct snd_au1000 *au1000 = ac97->private_data;
-       u32 volatile cmd;
-       u16 volatile data;
-       int             i;
-
-       spin_lock(&au1000->ac97_lock);
-/* would rather use the interrupt than this polling but it works and I can't
-get the interrupt driven case to work efficiently */
-       for (i = 0; i < 0x5000; i++)
-               if (!(au1000->ac97_ioport->status & AC97C_CP))
-                       break;
-       if (i == 0x5000)
-               printk(KERN_ERR "au1000 AC97: AC97 command read timeout\n");
-
-       cmd = (u32) reg & AC97C_INDEX_MASK;
-       cmd |= AC97C_READ;
-       au1000->ac97_ioport->cmd = cmd;
-
-       /* now wait for the data */
-       for (i = 0; i < 0x5000; i++)
-               if (!(au1000->ac97_ioport->status & AC97C_CP))
-                       break;
-       if (i == 0x5000) {
-               printk(KERN_ERR "au1000 AC97: AC97 command read timeout\n");
-               spin_unlock(&au1000->ac97_lock);
-               return 0;
-       }
-
-       data = au1000->ac97_ioport->cmd & 0xffff;
-       spin_unlock(&au1000->ac97_lock);
-
-       return data;
-
-}
-
-
-static void
-snd_au1000_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val)
-{
-       struct snd_au1000 *au1000 = ac97->private_data;
-       u32 cmd;
-       int i;
-
-       spin_lock(&au1000->ac97_lock);
-/* would rather use the interrupt than this polling but it works and I can't
-get the interrupt driven case to work efficiently */
-       for (i = 0; i < 0x5000; i++)
-               if (!(au1000->ac97_ioport->status & AC97C_CP))
-                       break;
-       if (i == 0x5000)
-               printk(KERN_ERR "au1000 AC97: AC97 command write timeout\n");
-
-       cmd = (u32) reg & AC97C_INDEX_MASK;
-       cmd &= ~AC97C_READ;
-       cmd |= ((u32) val << AC97C_WD_BIT);
-       au1000->ac97_ioport->cmd = cmd;
-       spin_unlock(&au1000->ac97_lock);
-}
-
-/*------------------------------ Setup / Destroy ----------------------------*/
-
-static void snd_au1000_free(struct snd_card *card)
-{
-       struct snd_au1000 *au1000 = card->private_data;
-
-       if (au1000->stream[PLAYBACK]) {
-               if (au1000->stream[PLAYBACK]->dma >= 0)
-                       free_au1000_dma(au1000->stream[PLAYBACK]->dma);
-               kfree(au1000->stream[PLAYBACK]);
-       }
-
-       if (au1000->stream[CAPTURE]) {
-               if (au1000->stream[CAPTURE]->dma >= 0)
-                       free_au1000_dma(au1000->stream[CAPTURE]->dma);
-               kfree(au1000->stream[CAPTURE]);
-       }
-
-       if (au1000->ac97_res_port) {
-               /* put internal AC97 block into reset */
-               if (au1000->ac97_ioport) {
-                       au1000->ac97_ioport->cntrl = AC97C_RS;
-                       iounmap(au1000->ac97_ioport);
-                       au1000->ac97_ioport = NULL;
-               }
-               release_and_free_resource(au1000->ac97_res_port);
-               au1000->ac97_res_port = NULL;
-       }
-}
-
-static struct snd_ac97_bus_ops ops = {
-       .write  = snd_au1000_ac97_write,
-       .read   = snd_au1000_ac97_read,
-};
-
-static int au1000_ac97_probe(struct platform_device *pdev)
-{
-       int err;
-       void __iomem *io;
-       struct resource *r;
-       struct snd_card *card;
-       struct snd_au1000 *au1000;
-       struct snd_ac97_bus *pbus;
-       struct snd_ac97_template ac97;
-
-       err = snd_card_new(&pdev->dev, -1, "AC97", THIS_MODULE,
-                          sizeof(struct snd_au1000), &card);
-       if (err < 0)
-               return err;
-
-       au1000 = card->private_data;
-       au1000->card = card;
-       spin_lock_init(&au1000->ac97_lock);
-
-       /* from here on let ALSA call the special freeing function */
-       card->private_free = snd_au1000_free;
-
-       /* TX DMA ID */
-       r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-       if (!r) {
-               err = -ENODEV;
-               snd_printk(KERN_INFO "no TX DMA platform resource!\n");
-               goto out;
-       }
-       au1000->dmaid[0] = r->start;
-
-       /* RX DMA ID */
-       r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
-       if (!r) {
-               err = -ENODEV;
-               snd_printk(KERN_INFO "no RX DMA platform resource!\n");
-               goto out;
-       }
-       au1000->dmaid[1] = r->start;
-
-       au1000->stream[PLAYBACK] = kmalloc(sizeof(struct audio_stream),
-                                          GFP_KERNEL);
-       if (!au1000->stream[PLAYBACK]) {
-               err = -ENOMEM;
-               goto out;
-       }
-       au1000->stream[PLAYBACK]->dma = -1;
-
-       au1000->stream[CAPTURE] = kmalloc(sizeof(struct audio_stream),
-                                         GFP_KERNEL);
-       if (!au1000->stream[CAPTURE]) {
-               err = -ENOMEM;
-               goto out;
-       }
-       au1000->stream[CAPTURE]->dma = -1;
-
-       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!r) {
-               err = -ENODEV;
-               goto out;
-       }
-
-       err = -EBUSY;
-       au1000->ac97_res_port = request_mem_region(r->start, resource_size(r),
-                                                  pdev->name);
-       if (!au1000->ac97_res_port) {
-               snd_printk(KERN_ERR "ALSA AC97: can't grab AC97 port\n");
-               goto out;
-       }
-
-       io = ioremap(r->start, resource_size(r));
-       if (!io)
-               goto out;
-
-       au1000->ac97_ioport = (struct au1000_ac97_reg *)io;
-
-       /* configure pins for AC'97
-       TODO: move to board_setup.c */
-       au_writel(au_readl(SYS_PINFUNC) & ~0x02, SYS_PINFUNC);
-
-       /* Initialise Au1000's AC'97 Control Block */
-       au1000->ac97_ioport->cntrl = AC97C_RS | AC97C_CE;
-       udelay(10);
-       au1000->ac97_ioport->cntrl = AC97C_CE;
-       udelay(10);
-
-       /* Initialise External CODEC -- cold reset */
-       au1000->ac97_ioport->config = AC97C_RESET;
-       udelay(10);
-       au1000->ac97_ioport->config = 0x0;
-       mdelay(5);
-
-       /* Initialise AC97 middle-layer */
-       err = snd_ac97_bus(au1000->card, 0, &ops, au1000, &pbus);
-       if (err < 0)
-               goto out;
-
-       memset(&ac97, 0, sizeof(ac97));
-       ac97.private_data = au1000;
-       err = snd_ac97_mixer(pbus, &ac97, &au1000->ac97);
-       if (err < 0)
-               goto out;
-
-       err = snd_au1000_pcm_new(au1000);
-       if (err < 0)
-               goto out;
-
-       strcpy(card->driver, "Au1000-AC97");
-       strcpy(card->shortname, "AMD Au1000-AC97");
-       sprintf(card->longname, "AMD Au1000--AC97 ALSA Driver");
-
-       err = snd_card_register(card);
-       if (err < 0)
-               goto out;
-
-       printk(KERN_INFO "ALSA AC97: Driver Initialized\n");
-
-       platform_set_drvdata(pdev, card);
-
-       return 0;
-
- out:
-       snd_card_free(card);
-       return err;
-}
-
-static int au1000_ac97_remove(struct platform_device *pdev)
-{
-       return snd_card_free(platform_get_drvdata(pdev));
-}
-
-struct platform_driver au1000_ac97c_driver = {
-       .driver = {
-               .name   = "au1000-ac97c",
-               .owner  = THIS_MODULE,
-       },
-       .probe          = au1000_ac97_probe,
-       .remove         = au1000_ac97_remove,
-};
-
-module_platform_driver(au1000_ac97c_driver);
index 656ce39bddbc531c27d5a15b6d07e899e17f1cd6..8f6594a7d37f7b940ded27f6a8a0395ea8fa1442 100644 (file)
@@ -155,6 +155,7 @@ config SND_AZT3328
        select SND_PCM
        select SND_RAWMIDI
        select SND_AC97_CODEC
+       select SND_TIMER
        depends on ZONE_DMA
        help
          Say Y here to include support for Aztech AZF3328 (PCI168)
@@ -463,6 +464,7 @@ config SND_EMU10K1
        select SND_HWDEP
        select SND_RAWMIDI
        select SND_AC97_CODEC
+       select SND_TIMER
        depends on ZONE_DMA
        help
          Say Y to include support for Sound Blaster PCI 512, Live!,
@@ -889,6 +891,7 @@ config SND_YMFPCI
        select SND_OPL3_LIB
        select SND_MPU401_UART
        select SND_AC97_CODEC
+       select SND_TIMER
        help
          Say Y here to include support for Yamaha PCI audio chips -
          YMF724, YMF724F, YMF740, YMF740C, YMF744, YMF754.
index 28e2f8b42f5e8e4e7f3aaaaa88f45d6834b02591..891453451543102994b7912584f00483b2c472f0 100644 (file)
@@ -1141,6 +1141,14 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
                emu->emu1010.firmware_thread =
                        kthread_create(emu1010_firmware_thread, emu,
                                       "emu1010_firmware");
+               if (IS_ERR(emu->emu1010.firmware_thread)) {
+                       err = PTR_ERR(emu->emu1010.firmware_thread);
+                       emu->emu1010.firmware_thread = NULL;
+                       dev_info(emu->card->dev,
+                                       "emu1010: Creating thread failed\n");
+                       return err;
+               }
+
                wake_up_process(emu->emu1010.firmware_thread);
        }
 
index 30c8efe0f80a3a456bbbe09b6ce05abf810a8552..7ca5b89f088a6922e6acd09c12befae864994320 100644 (file)
@@ -4028,9 +4028,9 @@ static void pin_power_callback(struct hda_codec *codec,
                               struct hda_jack_callback *jack,
                               bool on)
 {
-       if (jack && jack->tbl->nid)
+       if (jack && jack->nid)
                sync_power_state_change(codec,
-                                       set_pin_power_jack(codec, jack->tbl->nid, on));
+                                       set_pin_power_jack(codec, jack->nid, on));
 }
 
 /* callback only doing power up -- called at first */
index 256e6cda218f939e6e6d09201c10d39827825ce9..4045dca3d699edd13f06aa9ab7c095948c932a55 100644 (file)
@@ -90,6 +90,8 @@ enum {
 #define NVIDIA_HDA_ENABLE_COHBIT      0x01
 
 /* Defines for Intel SCH HDA snoop control */
+#define INTEL_HDA_CGCTL         0x48
+#define INTEL_HDA_CGCTL_MISCBDCGE        (0x1 << 6)
 #define INTEL_SCH_HDA_DEVC      0x78
 #define INTEL_SCH_HDA_DEVC_NOSNOOP       (0x1<<11)
 
@@ -534,10 +536,21 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
 {
        struct hdac_bus *bus = azx_bus(chip);
        struct pci_dev *pci = chip->pci;
+       u32 val;
 
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
                snd_hdac_set_codec_wakeup(bus, true);
+       if (IS_BROXTON(pci)) {
+               pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+               val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
+               pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+       }
        azx_init_chip(chip, full_reset);
+       if (IS_BROXTON(pci)) {
+               pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
+               val = val | INTEL_HDA_CGCTL_MISCBDCGE;
+               pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
+       }
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
                snd_hdac_set_codec_wakeup(bus, false);
 
index c945e257d368890bcd3f72037568e566c0ba9221..a33234e04d4f7a1fe311ce7db50f1a56e9e1d7d4 100644 (file)
@@ -259,7 +259,7 @@ snd_hda_jack_detect_enable_callback(struct hda_codec *codec, hda_nid_t nid,
                if (!callback)
                        return ERR_PTR(-ENOMEM);
                callback->func = func;
-               callback->tbl = jack;
+               callback->nid = jack->nid;
                callback->next = jack->callback;
                jack->callback = callback;
        }
index 858708a044f57ef563d79a100c5a92e5f3adaa3d..e9814c0168ea5d77da2922454ed4256d7ad2a30a 100644 (file)
@@ -21,7 +21,7 @@ struct hda_jack_callback;
 typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
 
 struct hda_jack_callback {
-       struct hda_jack_tbl *tbl;
+       hda_nid_t nid;
        hda_jack_callback_fn func;
        unsigned int private_data;      /* arbitrary data */
        struct hda_jack_callback *next;
index 4ef2259f88cae3b1cf328880447dbf528ad42a95..9ceb2bc36e68026bd02dfca8bdca4f9ffab2e849 100644 (file)
@@ -4427,13 +4427,16 @@ static void ca0132_process_dsp_response(struct hda_codec *codec,
 static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
 {
        struct ca0132_spec *spec = codec->spec;
+       struct hda_jack_tbl *tbl;
 
        /* Delay enabling the HP amp, to let the mic-detection
         * state machine run.
         */
        cancel_delayed_work_sync(&spec->unsol_hp_work);
        schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
-       cb->tbl->block_report = 1;
+       tbl = snd_hda_jack_tbl_get(codec, cb->nid);
+       if (tbl)
+               tbl->block_report = 1;
 }
 
 static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
index a12ae8ac091451261a2ba613543677fabedb8cd0..c1c855a6c0af8199d03b04419d16d3494507ddeb 100644 (file)
@@ -614,6 +614,7 @@ enum {
        CS4208_MAC_AUTO,
        CS4208_MBA6,
        CS4208_MBP11,
+       CS4208_MACMINI,
        CS4208_GPIO0,
 };
 
@@ -621,6 +622,7 @@ static const struct hda_model_fixup cs4208_models[] = {
        { .id = CS4208_GPIO0, .name = "gpio0" },
        { .id = CS4208_MBA6, .name = "mba6" },
        { .id = CS4208_MBP11, .name = "mbp11" },
+       { .id = CS4208_MACMINI, .name = "macmini" },
        {}
 };
 
@@ -632,6 +634,7 @@ static const struct snd_pci_quirk cs4208_fixup_tbl[] = {
 /* codec SSID matching */
 static const struct snd_pci_quirk cs4208_mac_fixup_tbl[] = {
        SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11),
+       SND_PCI_QUIRK(0x106b, 0x6c00, "MacMini 7,1", CS4208_MACMINI),
        SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6),
        SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6),
        SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11),
@@ -666,6 +669,24 @@ static void cs4208_fixup_mac(struct hda_codec *codec,
        snd_hda_apply_fixup(codec, action);
 }
 
+/* MacMini 7,1 has the inverted jack detection */
+static void cs4208_fixup_macmini(struct hda_codec *codec,
+                                const struct hda_fixup *fix, int action)
+{
+       static const struct hda_pintbl pincfgs[] = {
+               { 0x18, 0x00ab9150 }, /* mic (audio-in) jack: disable detect */
+               { 0x21, 0x004be140 }, /* SPDIF: disable detect */
+               { }
+       };
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               /* HP pin (0x10) has an inverted detection */
+               codec->inv_jack_detect = 1;
+               /* disable the bogus Mic and SPDIF jack detections */
+               snd_hda_apply_pincfgs(codec, pincfgs);
+       }
+}
+
 static int cs4208_spdif_sw_put(struct snd_kcontrol *kcontrol,
                               struct snd_ctl_elem_value *ucontrol)
 {
@@ -709,6 +730,12 @@ static const struct hda_fixup cs4208_fixups[] = {
                .chained = true,
                .chain_id = CS4208_GPIO0,
        },
+       [CS4208_MACMINI] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = cs4208_fixup_macmini,
+               .chained = true,
+               .chain_id = CS4208_GPIO0,
+       },
        [CS4208_GPIO0] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = cs4208_fixup_gpio0,
index 426a29a1c19bbda5841bd9d4574bf66e83cb8b76..f4443b5fbf6ecaac351dbbdecd76ecd1e1dae1b3 100644 (file)
@@ -75,6 +75,8 @@ struct hdmi_spec_per_cvt {
 
 struct hdmi_spec_per_pin {
        hda_nid_t pin_nid;
+       /* pin idx, different device entries on the same pin use the same idx */
+       int pin_nid_idx;
        int num_mux_nids;
        hda_nid_t mux_nids[HDA_MAX_CONNECTIONS];
        int mux_idx;
@@ -85,7 +87,8 @@ struct hdmi_spec_per_pin {
        struct mutex lock;
        struct delayed_work work;
        struct snd_kcontrol *eld_ctl;
-       struct snd_jack *acomp_jack; /* jack via audio component */
+       struct hdmi_pcm *pcm; /* pointer to spec->pcm_rec[n] dynamically*/
+       int pcm_idx; /* which pcm is attached. -1 means no pcm is attached */
        int repoll_count;
        bool setup; /* the stream has been set up by prepare callback */
        int channels; /* current number of channels */
@@ -130,6 +133,11 @@ struct hdmi_ops {
        int (*chmap_validate)(int ca, int channels, unsigned char *chmap);
 };
 
+struct hdmi_pcm {
+       struct hda_pcm *pcm;
+       struct snd_jack *jack;
+};
+
 struct hdmi_spec {
        int num_cvts;
        struct snd_array cvts; /* struct hdmi_spec_per_cvt */
@@ -137,14 +145,23 @@ struct hdmi_spec {
 
        int num_pins;
        struct snd_array pins; /* struct hdmi_spec_per_pin */
-       struct hda_pcm *pcm_rec[16];
+       struct hdmi_pcm pcm_rec[16];
+       struct mutex pcm_lock;
+       /* pcm_bitmap means which pcms have been assigned to pins*/
+       unsigned long pcm_bitmap;
+       int pcm_used;   /* counter of pcm_rec[] */
+       /* bitmap shows whether the pcm is opened in user space
+        * bit 0 means the first playback PCM (PCM3);
+        * bit 1 means the second playback PCM, and so on.
+        */
+       unsigned long pcm_in_use;
        unsigned int channels_max; /* max over all cvts */
 
        struct hdmi_eld temp_eld;
        struct hdmi_ops ops;
 
        bool dyn_pin_out;
-
+       bool dyn_pcm_assign;
        /*
         * Non-generic VIA/NVIDIA specific
         */
@@ -370,7 +387,10 @@ static struct cea_channel_speaker_allocation channel_allocations[] = {
        ((struct hdmi_spec_per_pin *)snd_array_elem(&spec->pins, idx))
 #define get_cvt(spec, idx) \
        ((struct hdmi_spec_per_cvt  *)snd_array_elem(&spec->cvts, idx))
-#define get_pcm_rec(spec, idx) ((spec)->pcm_rec[idx])
+/* obtain hdmi_pcm object assigned to idx */
+#define get_hdmi_pcm(spec, idx)        (&(spec)->pcm_rec[idx])
+/* obtain hda_pcm object assigned to idx */
+#define get_pcm_rec(spec, idx) (get_hdmi_pcm(spec, idx)->pcm)
 
 static int pin_nid_to_pin_index(struct hda_codec *codec, hda_nid_t pin_nid)
 {
@@ -385,20 +405,52 @@ static int pin_nid_to_pin_index(struct hda_codec *codec, hda_nid_t pin_nid)
        return -EINVAL;
 }
 
+static int hinfo_to_pcm_index(struct hda_codec *codec,
+                       struct hda_pcm_stream *hinfo)
+{
+       struct hdmi_spec *spec = codec->spec;
+       int pcm_idx;
+
+       for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++)
+               if (get_pcm_rec(spec, pcm_idx)->stream == hinfo)
+                       return pcm_idx;
+
+       codec_warn(codec, "HDMI: hinfo %p not registered\n", hinfo);
+       return -EINVAL;
+}
+
 static int hinfo_to_pin_index(struct hda_codec *codec,
                              struct hda_pcm_stream *hinfo)
 {
        struct hdmi_spec *spec = codec->spec;
+       struct hdmi_spec_per_pin *per_pin;
        int pin_idx;
 
-       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++)
-               if (get_pcm_rec(spec, pin_idx)->stream == hinfo)
+       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+               per_pin = get_pin(spec, pin_idx);
+               if (per_pin->pcm &&
+                       per_pin->pcm->pcm->stream == hinfo)
                        return pin_idx;
+       }
 
-       codec_warn(codec, "HDMI: hinfo %p not registered\n", hinfo);
+       codec_dbg(codec, "HDMI: hinfo %p not registered\n", hinfo);
        return -EINVAL;
 }
 
+static struct hdmi_spec_per_pin *pcm_idx_to_pin(struct hdmi_spec *spec,
+                                               int pcm_idx)
+{
+       int i;
+       struct hdmi_spec_per_pin *per_pin;
+
+       for (i = 0; i < spec->num_pins; i++) {
+               per_pin = get_pin(spec, i);
+               if (per_pin->pcm_idx == pcm_idx)
+                       return per_pin;
+       }
+       return NULL;
+}
+
 static int cvt_nid_to_cvt_index(struct hda_codec *codec, hda_nid_t cvt_nid)
 {
        struct hdmi_spec *spec = codec->spec;
@@ -448,7 +500,8 @@ static int hdmi_eld_ctl_get(struct snd_kcontrol *kcontrol,
        eld = &per_pin->sink_eld;
 
        mutex_lock(&per_pin->lock);
-       if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
+       if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
+           eld->eld_size > ELD_MAX_SIZE) {
                mutex_unlock(&per_pin->lock);
                snd_BUG();
                return -EINVAL;
@@ -1193,7 +1246,7 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid)
 static void jack_callback(struct hda_codec *codec,
                          struct hda_jack_callback *jack)
 {
-       check_presence_and_report(codec, jack->tbl->nid);
+       check_presence_and_report(codec, jack->nid);
 }
 
 static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
@@ -1337,6 +1390,11 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid,
        return 0;
 }
 
+/* Try to find an available converter
+ * If pin_idx is less then zero, just try to find an available converter.
+ * Otherwise, try to find an available converter and get the cvt mux index
+ * of the pin.
+ */
 static int hdmi_choose_cvt(struct hda_codec *codec,
                        int pin_idx, int *cvt_id, int *mux_id)
 {
@@ -1345,7 +1403,11 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
        struct hdmi_spec_per_cvt *per_cvt = NULL;
        int cvt_idx, mux_idx = 0;
 
-       per_pin = get_pin(spec, pin_idx);
+       /* pin_idx < 0 means no pin will be bound to the converter */
+       if (pin_idx < 0)
+               per_pin = NULL;
+       else
+               per_pin = get_pin(spec, pin_idx);
 
        /* Dynamically assign converter to stream */
        for (cvt_idx = 0; cvt_idx < spec->num_cvts; cvt_idx++) {
@@ -1354,6 +1416,8 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
                /* Must not already be assigned */
                if (per_cvt->assigned)
                        continue;
+               if (per_pin == NULL)
+                       break;
                /* Must be in pin's mux's list of converters */
                for (mux_idx = 0; mux_idx < per_pin->num_mux_nids; mux_idx++)
                        if (per_pin->mux_nids[mux_idx] == per_cvt->cvt_nid)
@@ -1366,9 +1430,10 @@ static int hdmi_choose_cvt(struct hda_codec *codec,
 
        /* No free converters */
        if (cvt_idx == spec->num_cvts)
-               return -ENODEV;
+               return -EBUSY;
 
-       per_pin->mux_idx = mux_idx;
+       if (per_pin != NULL)
+               per_pin->mux_idx = mux_idx;
 
        if (cvt_id)
                *cvt_id = cvt_idx;
@@ -1394,6 +1459,20 @@ static void intel_verify_pin_cvt_connect(struct hda_codec *codec,
                                            mux_idx);
 }
 
+/* get the mux index for the converter of the pins
+ * converter's mux index is the same for all pins on Intel platform
+ */
+static int intel_cvt_id_to_mux_idx(struct hdmi_spec *spec,
+                       hda_nid_t cvt_nid)
+{
+       int i;
+
+       for (i = 0; i < spec->num_cvts; i++)
+               if (spec->cvt_nids[i] == cvt_nid)
+                       return i;
+       return -EINVAL;
+}
+
 /* Intel HDMI workaround to fix audio routing issue:
  * For some Intel display codecs, pins share the same connection list.
  * So a conveter can be selected by multiple pins and playback on any of these
@@ -1445,6 +1524,74 @@ static void intel_not_share_assigned_cvt(struct hda_codec *codec,
        }
 }
 
+/* A wrapper of intel_not_share_asigned_cvt() */
+static void intel_not_share_assigned_cvt_nid(struct hda_codec *codec,
+                       hda_nid_t pin_nid, hda_nid_t cvt_nid)
+{
+       int mux_idx;
+       struct hdmi_spec *spec = codec->spec;
+
+       if (!is_haswell_plus(codec) && !is_valleyview_plus(codec))
+               return;
+
+       /* On Intel platform, the mapping of converter nid to
+        * mux index of the pins are always the same.
+        * The pin nid may be 0, this means all pins will not
+        * share the converter.
+        */
+       mux_idx = intel_cvt_id_to_mux_idx(spec, cvt_nid);
+       if (mux_idx >= 0)
+               intel_not_share_assigned_cvt(codec, pin_nid, mux_idx);
+}
+
+/* called in hdmi_pcm_open when no pin is assigned to the PCM
+ * in dyn_pcm_assign mode.
+ */
+static int hdmi_pcm_open_no_pin(struct hda_pcm_stream *hinfo,
+                        struct hda_codec *codec,
+                        struct snd_pcm_substream *substream)
+{
+       struct hdmi_spec *spec = codec->spec;
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       int cvt_idx, pcm_idx;
+       struct hdmi_spec_per_cvt *per_cvt = NULL;
+       int err;
+
+       pcm_idx = hinfo_to_pcm_index(codec, hinfo);
+       if (pcm_idx < 0)
+               return -EINVAL;
+
+       err = hdmi_choose_cvt(codec, -1, &cvt_idx, NULL);
+       if (err)
+               return err;
+
+       per_cvt = get_cvt(spec, cvt_idx);
+       per_cvt->assigned = 1;
+       hinfo->nid = per_cvt->cvt_nid;
+
+       intel_not_share_assigned_cvt_nid(codec, 0, per_cvt->cvt_nid);
+
+       set_bit(pcm_idx, &spec->pcm_in_use);
+       /* todo: setup spdif ctls assign */
+
+       /* Initially set the converter's capabilities */
+       hinfo->channels_min = per_cvt->channels_min;
+       hinfo->channels_max = per_cvt->channels_max;
+       hinfo->rates = per_cvt->rates;
+       hinfo->formats = per_cvt->formats;
+       hinfo->maxbps = per_cvt->maxbps;
+
+       /* Store the updated parameters */
+       runtime->hw.channels_min = hinfo->channels_min;
+       runtime->hw.channels_max = hinfo->channels_max;
+       runtime->hw.formats = hinfo->formats;
+       runtime->hw.rates = hinfo->rates;
+
+       snd_pcm_hw_constraint_step(substream->runtime, 0,
+                                  SNDRV_PCM_HW_PARAM_CHANNELS, 2);
+       return 0;
+}
+
 /*
  * HDA PCM callbacks
  */
@@ -1454,26 +1601,47 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
 {
        struct hdmi_spec *spec = codec->spec;
        struct snd_pcm_runtime *runtime = substream->runtime;
-       int pin_idx, cvt_idx, mux_idx = 0;
+       int pin_idx, cvt_idx, pcm_idx, mux_idx = 0;
        struct hdmi_spec_per_pin *per_pin;
        struct hdmi_eld *eld;
        struct hdmi_spec_per_cvt *per_cvt = NULL;
        int err;
 
        /* Validate hinfo */
-       pin_idx = hinfo_to_pin_index(codec, hinfo);
-       if (snd_BUG_ON(pin_idx < 0))
+       pcm_idx = hinfo_to_pcm_index(codec, hinfo);
+       if (pcm_idx < 0)
                return -EINVAL;
-       per_pin = get_pin(spec, pin_idx);
-       eld = &per_pin->sink_eld;
+
+       mutex_lock(&spec->pcm_lock);
+       pin_idx = hinfo_to_pin_index(codec, hinfo);
+       if (!spec->dyn_pcm_assign) {
+               if (snd_BUG_ON(pin_idx < 0)) {
+                       mutex_unlock(&spec->pcm_lock);
+                       return -EINVAL;
+               }
+       } else {
+               /* no pin is assigned to the PCM
+                * PA need pcm open successfully when probe
+                */
+               if (pin_idx < 0) {
+                       err = hdmi_pcm_open_no_pin(hinfo, codec, substream);
+                       mutex_unlock(&spec->pcm_lock);
+                       return err;
+               }
+       }
 
        err = hdmi_choose_cvt(codec, pin_idx, &cvt_idx, &mux_idx);
-       if (err < 0)
+       if (err < 0) {
+               mutex_unlock(&spec->pcm_lock);
                return err;
+       }
 
        per_cvt = get_cvt(spec, cvt_idx);
        /* Claim converter */
        per_cvt->assigned = 1;
+
+       set_bit(pcm_idx, &spec->pcm_in_use);
+       per_pin = get_pin(spec, pin_idx);
        per_pin->cvt_nid = per_cvt->cvt_nid;
        hinfo->nid = per_cvt->cvt_nid;
 
@@ -1485,7 +1653,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
        if (is_haswell_plus(codec) || is_valleyview_plus(codec))
                intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx);
 
-       snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+       snd_hda_spdif_ctls_assign(codec, pcm_idx, per_cvt->cvt_nid);
 
        /* Initially set the converter's capabilities */
        hinfo->channels_min = per_cvt->channels_min;
@@ -1494,6 +1662,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
        hinfo->formats = per_cvt->formats;
        hinfo->maxbps = per_cvt->maxbps;
 
+       eld = &per_pin->sink_eld;
        /* Restrict capabilities by ELD if this isn't disabled */
        if (!static_hdmi_pcm && eld->eld_valid) {
                snd_hdmi_eld_update_pcm_info(&eld->info, hinfo);
@@ -1501,11 +1670,13 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
                    !hinfo->rates || !hinfo->formats) {
                        per_cvt->assigned = 0;
                        hinfo->nid = 0;
-                       snd_hda_spdif_ctls_unassign(codec, pin_idx);
+                       snd_hda_spdif_ctls_unassign(codec, pcm_idx);
+                       mutex_unlock(&spec->pcm_lock);
                        return -ENODEV;
                }
        }
 
+       mutex_unlock(&spec->pcm_lock);
        /* Store the updated parameters */
        runtime->hw.channels_min = hinfo->channels_min;
        runtime->hw.channels_max = hinfo->channels_max;
@@ -1540,6 +1711,125 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, int pin_idx)
        return 0;
 }
 
+static int hdmi_find_pcm_slot(struct hdmi_spec *spec,
+                               struct hdmi_spec_per_pin *per_pin)
+{
+       int i;
+
+       /* try the prefer PCM */
+       if (!test_bit(per_pin->pin_nid_idx, &spec->pcm_bitmap))
+               return per_pin->pin_nid_idx;
+
+       /* have a second try; check the "reserved area" over num_pins */
+       for (i = spec->num_pins; i < spec->pcm_used; i++) {
+               if (!test_bit(i, &spec->pcm_bitmap))
+                       return i;
+       }
+
+       /* the last try; check the empty slots in pins */
+       for (i = 0; i < spec->num_pins; i++) {
+               if (!test_bit(i, &spec->pcm_bitmap))
+                       return i;
+       }
+       return -EBUSY;
+}
+
+static void hdmi_attach_hda_pcm(struct hdmi_spec *spec,
+                               struct hdmi_spec_per_pin *per_pin)
+{
+       int idx;
+
+       /* pcm already be attached to the pin */
+       if (per_pin->pcm)
+               return;
+       idx = hdmi_find_pcm_slot(spec, per_pin);
+       if (idx == -ENODEV)
+               return;
+       per_pin->pcm_idx = idx;
+       per_pin->pcm = get_hdmi_pcm(spec, idx);
+       set_bit(idx, &spec->pcm_bitmap);
+}
+
+static void hdmi_detach_hda_pcm(struct hdmi_spec *spec,
+                               struct hdmi_spec_per_pin *per_pin)
+{
+       int idx;
+
+       /* pcm already be detached from the pin */
+       if (!per_pin->pcm)
+               return;
+       idx = per_pin->pcm_idx;
+       per_pin->pcm_idx = -1;
+       per_pin->pcm = NULL;
+       if (idx >= 0 && idx < spec->pcm_used)
+               clear_bit(idx, &spec->pcm_bitmap);
+}
+
+static int hdmi_get_pin_cvt_mux(struct hdmi_spec *spec,
+               struct hdmi_spec_per_pin *per_pin, hda_nid_t cvt_nid)
+{
+       int mux_idx;
+
+       for (mux_idx = 0; mux_idx < per_pin->num_mux_nids; mux_idx++)
+               if (per_pin->mux_nids[mux_idx] == cvt_nid)
+                       break;
+       return mux_idx;
+}
+
+static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid);
+
+static void hdmi_pcm_setup_pin(struct hdmi_spec *spec,
+                          struct hdmi_spec_per_pin *per_pin)
+{
+       struct hda_codec *codec = per_pin->codec;
+       struct hda_pcm *pcm;
+       struct hda_pcm_stream *hinfo;
+       struct snd_pcm_substream *substream;
+       int mux_idx;
+       bool non_pcm;
+
+       if (per_pin->pcm_idx >= 0 && per_pin->pcm_idx < spec->pcm_used)
+               pcm = get_pcm_rec(spec, per_pin->pcm_idx);
+       else
+               return;
+       if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use))
+               return;
+
+       /* hdmi audio only uses playback and one substream */
+       hinfo = pcm->stream;
+       substream = pcm->pcm->streams[0].substream;
+
+       per_pin->cvt_nid = hinfo->nid;
+
+       mux_idx = hdmi_get_pin_cvt_mux(spec, per_pin, hinfo->nid);
+       if (mux_idx < per_pin->num_mux_nids)
+               snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+                               AC_VERB_SET_CONNECT_SEL,
+                               mux_idx);
+       snd_hda_spdif_ctls_assign(codec, per_pin->pcm_idx, hinfo->nid);
+
+       non_pcm = check_non_pcm_per_cvt(codec, hinfo->nid);
+       if (substream->runtime)
+               per_pin->channels = substream->runtime->channels;
+       per_pin->setup = true;
+       per_pin->mux_idx = mux_idx;
+
+       hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
+}
+
+static void hdmi_pcm_reset_pin(struct hdmi_spec *spec,
+                          struct hdmi_spec_per_pin *per_pin)
+{
+       if (per_pin->pcm_idx >= 0 && per_pin->pcm_idx < spec->pcm_used)
+               snd_hda_spdif_ctls_unassign(per_pin->codec, per_pin->pcm_idx);
+
+       per_pin->chmap_set = false;
+       memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
+
+       per_pin->setup = false;
+       per_pin->channels = 0;
+}
+
 /* update per_pin ELD from the given new ELD;
  * setup info frame and notification accordingly
  */
@@ -1548,9 +1838,20 @@ static void update_eld(struct hda_codec *codec,
                       struct hdmi_eld *eld)
 {
        struct hdmi_eld *pin_eld = &per_pin->sink_eld;
+       struct hdmi_spec *spec = codec->spec;
        bool old_eld_valid = pin_eld->eld_valid;
        bool eld_changed;
 
+       if (spec->dyn_pcm_assign) {
+               if (eld->eld_valid) {
+                       hdmi_attach_hda_pcm(spec, per_pin);
+                       hdmi_pcm_setup_pin(spec, per_pin);
+               } else {
+                       hdmi_pcm_reset_pin(spec, per_pin);
+                       hdmi_detach_hda_pcm(spec, per_pin);
+               }
+       }
+
        if (eld->eld_valid)
                snd_hdmi_show_eld(codec, &eld->info);
 
@@ -1661,6 +1962,7 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
 {
        struct hdmi_spec *spec = codec->spec;
        struct hdmi_eld *eld = &spec->temp_eld;
+       struct snd_jack *jack = NULL;
        int size;
 
        mutex_lock(&per_pin->lock);
@@ -1684,8 +1986,17 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
                eld->eld_size = 0;
        }
 
+       /* pcm_idx >=0 before update_eld() means it is in monitor
+        * disconnected event. Jack must be fetched before update_eld()
+        */
+       if (per_pin->pcm_idx >= 0)
+               jack = spec->pcm_rec[per_pin->pcm_idx].jack;
        update_eld(codec, per_pin, eld);
-       snd_jack_report(per_pin->acomp_jack,
+       if (jack == NULL && per_pin->pcm_idx >= 0)
+               jack = spec->pcm_rec[per_pin->pcm_idx].jack;
+       if (jack == NULL)
+               goto unlock;
+       snd_jack_report(jack,
                        eld->monitor_present ? SND_JACK_AVOUT : 0);
  unlock:
        mutex_unlock(&per_pin->lock);
@@ -1694,13 +2005,19 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
 static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
 {
        struct hda_codec *codec = per_pin->codec;
+       struct hdmi_spec *spec = codec->spec;
+       int ret;
 
+       mutex_lock(&spec->pcm_lock);
        if (codec_has_acomp(codec)) {
                sync_eld_via_acomp(codec, per_pin);
-               return false; /* don't call snd_hda_jack_report_sync() */
+               ret = false; /* don't call snd_hda_jack_report_sync() */
        } else {
-               return hdmi_present_sense_via_verbs(per_pin, repoll);
+               ret = hdmi_present_sense_via_verbs(per_pin, repoll);
        }
+       mutex_unlock(&spec->pcm_lock);
+
+       return ret;
 }
 
 static void hdmi_repoll_eld(struct work_struct *work)
@@ -1744,6 +2061,13 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
 
        per_pin->pin_nid = pin_nid;
        per_pin->non_pcm = false;
+       if (spec->dyn_pcm_assign)
+               per_pin->pcm_idx = -1;
+       else {
+               per_pin->pcm = get_hdmi_pcm(spec, pin_idx);
+               per_pin->pcm_idx = pin_idx;
+       }
+       per_pin->pin_nid_idx = pin_idx;
 
        err = hdmi_read_pin_conn(codec, pin_idx);
        if (err < 0)
@@ -1850,13 +2174,34 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
 {
        hda_nid_t cvt_nid = hinfo->nid;
        struct hdmi_spec *spec = codec->spec;
-       int pin_idx = hinfo_to_pin_index(codec, hinfo);
-       struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
-       hda_nid_t pin_nid = per_pin->pin_nid;
+       int pin_idx;
+       struct hdmi_spec_per_pin *per_pin;
+       hda_nid_t pin_nid;
        struct snd_pcm_runtime *runtime = substream->runtime;
        bool non_pcm;
        int pinctl;
+       int err;
+
+       mutex_lock(&spec->pcm_lock);
+       pin_idx = hinfo_to_pin_index(codec, hinfo);
+       if (spec->dyn_pcm_assign && pin_idx < 0) {
+               /* when dyn_pcm_assign and pcm is not bound to a pin
+                * skip pin setup and return 0 to make audio playback
+                * be ongoing
+                */
+               intel_not_share_assigned_cvt_nid(codec, 0, cvt_nid);
+               snd_hda_codec_setup_stream(codec, cvt_nid,
+                                       stream_tag, 0, format);
+               mutex_unlock(&spec->pcm_lock);
+               return 0;
+       }
 
+       if (snd_BUG_ON(pin_idx < 0)) {
+               mutex_unlock(&spec->pcm_lock);
+               return -EINVAL;
+       }
+       per_pin = get_pin(spec, pin_idx);
+       pin_nid = per_pin->pin_nid;
        if (is_haswell_plus(codec) || is_valleyview_plus(codec)) {
                /* Verify pin:cvt selections to avoid silent audio after S3.
                 * After S3, the audio driver restores pin:cvt selections
@@ -1881,7 +2226,6 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
 
        hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
        mutex_unlock(&per_pin->lock);
-
        if (spec->dyn_pin_out) {
                pinctl = snd_hda_codec_read(codec, pin_nid, 0,
                                            AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
@@ -1890,7 +2234,10 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
                                    pinctl | PIN_OUT);
        }
 
-       return spec->ops.setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+       err = spec->ops.setup_stream(codec, cvt_nid, pin_nid,
+                                stream_tag, format);
+       mutex_unlock(&spec->pcm_lock);
+       return err;
 }
 
 static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
@@ -1906,12 +2253,15 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
                          struct snd_pcm_substream *substream)
 {
        struct hdmi_spec *spec = codec->spec;
-       int cvt_idx, pin_idx;
+       int cvt_idx, pin_idx, pcm_idx;
        struct hdmi_spec_per_cvt *per_cvt;
        struct hdmi_spec_per_pin *per_pin;
        int pinctl;
 
        if (hinfo->nid) {
+               pcm_idx = hinfo_to_pcm_index(codec, hinfo);
+               if (snd_BUG_ON(pcm_idx < 0))
+                       return -EINVAL;
                cvt_idx = cvt_nid_to_cvt_index(codec, hinfo->nid);
                if (snd_BUG_ON(cvt_idx < 0))
                        return -EINVAL;
@@ -1921,9 +2271,19 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
                per_cvt->assigned = 0;
                hinfo->nid = 0;
 
+               mutex_lock(&spec->pcm_lock);
+               snd_hda_spdif_ctls_unassign(codec, pcm_idx);
+               clear_bit(pcm_idx, &spec->pcm_in_use);
                pin_idx = hinfo_to_pin_index(codec, hinfo);
-               if (snd_BUG_ON(pin_idx < 0))
+               if (spec->dyn_pcm_assign && pin_idx < 0) {
+                       mutex_unlock(&spec->pcm_lock);
+                       return 0;
+               }
+
+               if (snd_BUG_ON(pin_idx < 0)) {
+                       mutex_unlock(&spec->pcm_lock);
                        return -EINVAL;
+               }
                per_pin = get_pin(spec, pin_idx);
 
                if (spec->dyn_pin_out) {
@@ -1934,8 +2294,6 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
                                            pinctl & ~PIN_OUT);
                }
 
-               snd_hda_spdif_ctls_unassign(codec, pin_idx);
-
                mutex_lock(&per_pin->lock);
                per_pin->chmap_set = false;
                memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
@@ -1943,6 +2301,7 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
                per_pin->setup = false;
                per_pin->channels = 0;
                mutex_unlock(&per_pin->lock);
+               mutex_unlock(&spec->pcm_lock);
        }
 
        return 0;
@@ -2054,10 +2413,16 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
        struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
        struct hda_codec *codec = info->private_data;
        struct hdmi_spec *spec = codec->spec;
-       int pin_idx = kcontrol->private_value;
-       struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+       int pcm_idx = kcontrol->private_value;
+       struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
        int i;
 
+       if (!per_pin) {
+               for (i = 0; i < spec->channels_max; i++)
+                       ucontrol->value.integer.value[i] = 0;
+               return 0;
+       }
+
        for (i = 0; i < ARRAY_SIZE(per_pin->chmap); i++)
                ucontrol->value.integer.value[i] = per_pin->chmap[i];
        return 0;
@@ -2069,13 +2434,19 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
        struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol);
        struct hda_codec *codec = info->private_data;
        struct hdmi_spec *spec = codec->spec;
-       int pin_idx = kcontrol->private_value;
-       struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+       int pcm_idx = kcontrol->private_value;
+       struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx);
        unsigned int ctl_idx;
        struct snd_pcm_substream *substream;
        unsigned char chmap[8];
        int i, err, ca, prepared = 0;
 
+       /* No monitor is connected in dyn_pcm_assign.
+        * It's invalid to setup the chmap
+        */
+       if (!per_pin)
+               return 0;
+
        ctl_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
        substream = snd_pcm_chmap_substream(info, ctl_idx);
        if (!substream || !substream->runtime)
@@ -2125,7 +2496,9 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
                info = snd_hda_codec_pcm_new(codec, "HDMI %d", pin_idx);
                if (!info)
                        return -ENOMEM;
-               spec->pcm_rec[pin_idx] = info;
+
+               spec->pcm_rec[pin_idx].pcm = info;
+               spec->pcm_used++;
                info->pcm_type = HDA_PCM_TYPE_HDMI;
                info->own_chmap = true;
 
@@ -2138,15 +2511,16 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
        return 0;
 }
 
-static void free_acomp_jack_priv(struct snd_jack *jack)
+static void free_hdmi_jack_priv(struct snd_jack *jack)
 {
-       struct hdmi_spec_per_pin *per_pin = jack->private_data;
+       struct hdmi_pcm *pcm = jack->private_data;
 
-       per_pin->acomp_jack = NULL;
+       pcm->jack = NULL;
 }
 
-static int add_acomp_jack_kctl(struct hda_codec *codec,
-                              struct hdmi_spec_per_pin *per_pin,
+static int add_hdmi_jack_kctl(struct hda_codec *codec,
+                              struct hdmi_spec *spec,
+                              int pcm_idx,
                               const char *name)
 {
        struct snd_jack *jack;
@@ -2156,57 +2530,91 @@ static int add_acomp_jack_kctl(struct hda_codec *codec,
                           true, false);
        if (err < 0)
                return err;
-       per_pin->acomp_jack = jack;
-       jack->private_data = per_pin;
-       jack->private_free = free_acomp_jack_priv;
+
+       spec->pcm_rec[pcm_idx].jack = jack;
+       jack->private_data = &spec->pcm_rec[pcm_idx];
+       jack->private_free = free_hdmi_jack_priv;
        return 0;
 }
 
-static int generic_hdmi_build_jack(struct hda_codec *codec, int pin_idx)
+static int generic_hdmi_build_jack(struct hda_codec *codec, int pcm_idx)
 {
        char hdmi_str[32] = "HDMI/DP";
        struct hdmi_spec *spec = codec->spec;
-       struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
-       int pcmdev = get_pcm_rec(spec, pin_idx)->device;
+       struct hdmi_spec_per_pin *per_pin;
+       struct hda_jack_tbl *jack;
+       int pcmdev = get_pcm_rec(spec, pcm_idx)->device;
        bool phantom_jack;
+       int ret;
 
        if (pcmdev > 0)
                sprintf(hdmi_str + strlen(hdmi_str), ",pcm=%d", pcmdev);
-       if (codec_has_acomp(codec))
-               return add_acomp_jack_kctl(codec, per_pin, hdmi_str);
+
+       if (spec->dyn_pcm_assign)
+               return add_hdmi_jack_kctl(codec, spec, pcm_idx, hdmi_str);
+
+       /* for !dyn_pcm_assign, we still use hda_jack for compatibility */
+       /* if !dyn_pcm_assign, it must be non-MST mode.
+        * This means pcms and pins are statically mapped.
+        * And pcm_idx is pin_idx.
+        */
+       per_pin = get_pin(spec, pcm_idx);
        phantom_jack = !is_jack_detectable(codec, per_pin->pin_nid);
        if (phantom_jack)
                strncat(hdmi_str, " Phantom",
                        sizeof(hdmi_str) - strlen(hdmi_str) - 1);
-
-       return snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str,
-                                    phantom_jack);
+       ret = snd_hda_jack_add_kctl(codec, per_pin->pin_nid, hdmi_str,
+                                   phantom_jack);
+       if (ret < 0)
+               return ret;
+       jack = snd_hda_jack_tbl_get(codec, per_pin->pin_nid);
+       if (jack == NULL)
+               return 0;
+       /* assign jack->jack to pcm_rec[].jack to
+        * align with dyn_pcm_assign mode
+        */
+       spec->pcm_rec[pcm_idx].jack = jack->jack;
+       return 0;
 }
 
 static int generic_hdmi_build_controls(struct hda_codec *codec)
 {
        struct hdmi_spec *spec = codec->spec;
        int err;
-       int pin_idx;
+       int pin_idx, pcm_idx;
 
-       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
-               struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
 
-               err = generic_hdmi_build_jack(codec, pin_idx);
+       for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) {
+               err = generic_hdmi_build_jack(codec, pcm_idx);
                if (err < 0)
                        return err;
 
-               err = snd_hda_create_dig_out_ctls(codec,
+               /* create the spdif for each pcm
+                * pin will be bound when monitor is connected
+                */
+               if (spec->dyn_pcm_assign)
+                       err = snd_hda_create_dig_out_ctls(codec,
+                                         0, spec->cvt_nids[0],
+                                         HDA_PCM_TYPE_HDMI);
+               else {
+                       struct hdmi_spec_per_pin *per_pin =
+                               get_pin(spec, pcm_idx);
+                       err = snd_hda_create_dig_out_ctls(codec,
                                                  per_pin->pin_nid,
                                                  per_pin->mux_nids[0],
                                                  HDA_PCM_TYPE_HDMI);
+               }
                if (err < 0)
                        return err;
-               snd_hda_spdif_ctls_unassign(codec, pin_idx);
+               snd_hda_spdif_ctls_unassign(codec, pcm_idx);
+       }
+
+       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+               struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
 
                /* add control for ELD Bytes */
                err = hdmi_create_eld_ctl(codec, pin_idx,
-                                         get_pcm_rec(spec, pin_idx)->device);
+                               get_pcm_rec(spec, pin_idx)->device);
 
                if (err < 0)
                        return err;
@@ -2215,18 +2623,18 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
        }
 
        /* add channel maps */
-       for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+       for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) {
                struct hda_pcm *pcm;
                struct snd_pcm_chmap *chmap;
                struct snd_kcontrol *kctl;
                int i;
 
-               pcm = spec->pcm_rec[pin_idx];
+               pcm = get_pcm_rec(spec, pcm_idx);
                if (!pcm || !pcm->pcm)
                        break;
                err = snd_pcm_add_chmap_ctls(pcm->pcm,
                                             SNDRV_PCM_STREAM_PLAYBACK,
-                                            NULL, 0, pin_idx, &chmap);
+                                            NULL, 0, pcm_idx, &chmap);
                if (err < 0)
                        return err;
                /* override handlers */
@@ -2292,18 +2700,25 @@ static void hdmi_array_free(struct hdmi_spec *spec)
 static void generic_hdmi_free(struct hda_codec *codec)
 {
        struct hdmi_spec *spec = codec->spec;
-       int pin_idx;
+       int pin_idx, pcm_idx;
 
        if (codec_has_acomp(codec))
                snd_hdac_i915_register_notifier(NULL);
 
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
-
                cancel_delayed_work_sync(&per_pin->work);
                eld_proc_free(per_pin);
-               if (per_pin->acomp_jack)
-                       snd_device_free(codec->card, per_pin->acomp_jack);
+       }
+
+       for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) {
+               if (spec->pcm_rec[pcm_idx].jack == NULL)
+                       continue;
+               if (spec->dyn_pcm_assign)
+                       snd_device_free(codec->card,
+                                       spec->pcm_rec[pcm_idx].jack);
+               else
+                       spec->pcm_rec[pcm_idx].jack = NULL;
        }
 
        if (spec->i915_bound)
@@ -2452,6 +2867,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
                return -ENOMEM;
 
        spec->ops = generic_standard_hdmi_ops;
+       mutex_init(&spec->pcm_lock);
        codec->spec = spec;
        hdmi_array_init(spec, 4);
 
@@ -2504,6 +2920,7 @@ static int patch_generic_hdmi(struct hda_codec *codec)
 
        init_channel_allocations();
 
+       WARN_ON(spec->dyn_pcm_assign && !codec_has_acomp(codec));
        return 0;
 }
 
@@ -2526,7 +2943,7 @@ static int simple_playback_build_pcms(struct hda_codec *codec)
        info = snd_hda_codec_pcm_new(codec, "HDMI 0");
        if (!info)
                return -ENOMEM;
-       spec->pcm_rec[0] = info;
+       spec->pcm_rec[0].pcm = info;
        info->pcm_type = HDA_PCM_TYPE_HDMI;
        pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK];
        *pstr = spec->pcm_playback;
@@ -3653,6 +4070,7 @@ HDA_CODEC_ENTRY(0x10de0070, "GPU 70 HDMI/DP",     patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0071, "GPU 71 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de0072, "GPU 72 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de007d, "GPU 7d HDMI/DP",  patch_nvhdmi),
+HDA_CODEC_ENTRY(0x10de0083, "GPU 83 HDMI/DP",  patch_nvhdmi),
 HDA_CODEC_ENTRY(0x10de8001, "MCP73 HDMI",      patch_nvhdmi_2ch),
 HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP",   patch_via_hdmi),
 HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP",   patch_via_hdmi),
index 33753244f48fe98de3a57709de2793abb74f2ae0..efd4980cffb8a0273228ac7570315f0f1aa1dbf9 100644 (file)
@@ -282,7 +282,7 @@ static void alc_update_knob_master(struct hda_codec *codec,
        uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
        if (!uctl)
                return;
-       val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
+       val = snd_hda_codec_read(codec, jack->nid, 0,
                                 AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
        val &= HDA_AMP_VOLMASK;
        uctl->value.integer.value[0] = val;
@@ -327,6 +327,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0292:
                alc_update_coef_idx(codec, 0x4, 1<<15, 0);
                break;
+       case 0x10ec0225:
        case 0x10ec0233:
        case 0x10ec0255:
        case 0x10ec0256:
@@ -900,6 +901,7 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
        { 0x10ec0899, 0x1028, 0, "ALC3861" },
        { 0x10ec0298, 0x1028, 0, "ALC3266" },
        { 0x10ec0256, 0x1028, 0, "ALC3246" },
+       { 0x10ec0225, 0x1028, 0, "ALC3253" },
        { 0x10ec0670, 0x1025, 0, "ALC669X" },
        { 0x10ec0676, 0x1025, 0, "ALC679X" },
        { 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -1785,7 +1787,6 @@ enum {
        ALC882_FIXUP_NO_PRIMARY_HP,
        ALC887_FIXUP_ASUS_BASS,
        ALC887_FIXUP_BASS_CHMAP,
-       ALC882_FIXUP_DISABLE_AAMIX,
 };
 
 static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1947,8 +1948,6 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
 
 static void alc_fixup_bass_chmap(struct hda_codec *codec,
                                 const struct hda_fixup *fix, int action);
-static void alc_fixup_disable_aamix(struct hda_codec *codec,
-                                   const struct hda_fixup *fix, int action);
 
 static const struct hda_fixup alc882_fixups[] = {
        [ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2186,10 +2185,6 @@ static const struct hda_fixup alc882_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_bass_chmap,
        },
-       [ALC882_FIXUP_DISABLE_AAMIX] = {
-               .type = HDA_FIXUP_FUNC,
-               .v.func = alc_fixup_disable_aamix,
-       },
 };
 
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2228,6 +2223,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
        SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
        SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+       SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
 
        /* All Apple entries are in codec SSIDs */
        SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
@@ -2257,7 +2253,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
        SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
        SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
        SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
        SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2651,6 +2646,7 @@ enum {
        ALC269_TYPE_ALC298,
        ALC269_TYPE_ALC255,
        ALC269_TYPE_ALC256,
+       ALC269_TYPE_ALC225,
 };
 
 /*
@@ -2680,6 +2676,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC298:
        case ALC269_TYPE_ALC255:
        case ALC269_TYPE_ALC256:
+       case ALC269_TYPE_ALC225:
                ssids = alc269_ssids;
                break;
        default:
@@ -3658,6 +3655,16 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
                WRITE_COEF(0xb7, 0x802b),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEF(0x4a, 1<<8, 0),
+               UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
+               UPDATE_COEF(0x63, 3<<14, 3<<14),
+               UPDATE_COEF(0x4a, 3<<4, 2<<4),
+               UPDATE_COEF(0x4a, 3<<10, 3<<10),
+               UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+               UPDATE_COEF(0x4a, 3<<10, 0),
+               {}
+       };
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -3682,6 +3689,9 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
        case 0x10ec0668:
                alc_process_coef_fw(codec, coef0668);
                break;
+       case 0x10ec0225:
+               alc_process_coef_fw(codec, coef0225);
+               break;
        }
        codec_dbg(codec, "Headset jack set to unplugged mode.\n");
 }
@@ -3727,6 +3737,13 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
                UPDATE_COEF(0xc3, 0, 1<<12),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14),
+               UPDATE_COEF(0x4a, 3<<4, 2<<4),
+               UPDATE_COEF(0x63, 3<<14, 0),
+               {}
+       };
+
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -3772,6 +3789,12 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
                alc_process_coef_fw(codec, coef0688);
                snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
                break;
+       case 0x10ec0225:
+               alc_update_coef_idx(codec, 0x45, 0x3f<<10, 0x31<<10);
+               snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+               alc_process_coef_fw(codec, coef0225);
+               snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+               break;
        }
        codec_dbg(codec, "Headset jack set to mic-in mode.\n");
 }
@@ -3884,6 +3907,13 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
                WRITE_COEF(0xc3, 0x0000),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
+               UPDATE_COEF(0x49, 1<<8, 1<<8),
+               UPDATE_COEF(0x4a, 7<<6, 7<<6),
+               UPDATE_COEF(0x4a, 3<<4, 3<<4),
+               {}
+       };
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -3912,6 +3942,9 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
        case 0x10ec0668:
                alc_process_coef_fw(codec, coef0688);
                break;
+       case 0x10ec0225:
+               alc_process_coef_fw(codec, coef0225);
+               break;
        }
        codec_dbg(codec, "Headset jack set to iPhone-style headset mode.\n");
 }
@@ -3955,6 +3988,13 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
                WRITE_COEF(0xc3, 0x0000),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10),
+               UPDATE_COEF(0x49, 1<<8, 1<<8),
+               UPDATE_COEF(0x4a, 7<<6, 7<<6),
+               UPDATE_COEF(0x4a, 3<<4, 3<<4),
+               {}
+       };
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -3983,6 +4023,9 @@ static void alc_headset_mode_omtp(struct hda_codec *codec)
        case 0x10ec0668:
                alc_process_coef_fw(codec, coef0688);
                break;
+       case 0x10ec0225:
+               alc_process_coef_fw(codec, coef0225);
+               break;
        }
        codec_dbg(codec, "Headset jack set to Nokia-style headset mode.\n");
 }
@@ -4014,6 +4057,11 @@ static void alc_determine_headset_type(struct hda_codec *codec)
                WRITE_COEF(0xc3, 0x0c00),
                {}
        };
+       static struct coef_fw coef0225[] = {
+               UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+               UPDATE_COEF(0x49, 1<<8, 1<<8),
+               {}
+       };
 
        switch (codec->core.vendor_id) {
        case 0x10ec0255:
@@ -4058,6 +4106,12 @@ static void alc_determine_headset_type(struct hda_codec *codec)
                val = alc_read_coef_idx(codec, 0xbe);
                is_ctia = (val & 0x1c02) == 0x1c02;
                break;
+       case 0x10ec0225:
+               alc_process_coef_fw(codec, coef0225);
+               msleep(800);
+               val = alc_read_coef_idx(codec, 0x46);
+               is_ctia = (val & 0x00f0) == 0x00f0;
+               break;
        }
 
        codec_dbg(codec, "Headset jack detected iPhone-style headset: %s\n",
@@ -5560,6 +5614,9 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC292_FIXUP_TPT440, .name = "tpt440"},
        {}
 };
+#define ALC225_STANDARD_PINS \
+       {0x12, 0xb7a60130}, \
+       {0x21, 0x04211020}
 
 #define ALC256_STANDARD_PINS \
        {0x12, 0x90a60140}, \
@@ -5581,6 +5638,12 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {0x21, 0x03211020}
 
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x14, 0x901701a0}),
+       SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC225_STANDARD_PINS,
+               {0x14, 0x901701b0}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
                {0x14, 0x90170110},
                {0x21, 0x02211020}),
@@ -5906,6 +5969,9 @@ static int patch_alc269(struct hda_codec *codec)
                spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
                break;
+       case 0x10ec0225:
+               spec->codec_variant = ALC269_TYPE_ALC225;
+               break;
        }
 
        if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -6796,6 +6862,7 @@ static int patch_alc680(struct hda_codec *codec)
  */
 static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0235, "ALC233", patch_alc269),
index 2c7c5eb8b1e9514779b78bc278412803862062e9..37b70f8e878f715968c7496e01eeb493f8a2b7a4 100644 (file)
@@ -493,9 +493,9 @@ static void jack_update_power(struct hda_codec *codec,
        if (!spec->num_pwrs)
                return;
 
-       if (jack && jack->tbl->nid) {
-               stac_toggle_power_map(codec, jack->tbl->nid,
-                                     snd_hda_jack_detect(codec, jack->tbl->nid),
+       if (jack && jack->nid) {
+               stac_toggle_power_map(codec, jack->nid,
+                                     snd_hda_jack_detect(codec, jack->nid),
                                      true);
                return;
        }
index 0095a80a997fc157e97b70d3c12051d65d0ee4ab..a5843fc5ff204ee6efb32becd35e671927903f59 100644 (file)
@@ -34,7 +34,6 @@
 #include "pmac.h"
 #include <sound/pcm_params.h>
 #include <asm/pmac_feature.h>
-#include <asm/pci-bridge.h>
 
 
 /* fixed frequency table for awacs, screamer, burgundy, DACA (44100 max) */
index 3191e0a7d273213515ccd3e049e66c4f6ddc5f67..d1fb035f44db8fd7026d97e1148f0c09eb4d1679 100644 (file)
@@ -635,6 +635,7 @@ static int acp_dma_open(struct snd_pcm_substream *substream)
                                            SNDRV_PCM_HW_PARAM_PERIODS);
        if (ret < 0) {
                dev_err(prtd->platform->dev, "set integer constraint failed\n");
+               kfree(adata);
                return ret;
        }
 
index 3303d5f58082f68b68b535b2bdf7ce8d14cafcc4..1c1f2210387b26e9551959378f6792f18648652e 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/init.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/of_address.h>
 #include <linux/slab.h>
 
 #include <sound/core.h>
 #include <sound/pcm_params.h>
 #include <sound/soc.h>
 
-/* Clock registers */
-#define BCM2835_CLK_PCMCTL_REG  0x00
-#define BCM2835_CLK_PCMDIV_REG  0x04
-
-/* Clock register settings */
-#define BCM2835_CLK_PASSWD             (0x5a000000)
-#define BCM2835_CLK_PASSWD_MASK        (0xff000000)
-#define BCM2835_CLK_MASH(v)            ((v) << 9)
-#define BCM2835_CLK_FLIP               BIT(8)
-#define BCM2835_CLK_BUSY               BIT(7)
-#define BCM2835_CLK_KILL               BIT(5)
-#define BCM2835_CLK_ENAB               BIT(4)
-#define BCM2835_CLK_SRC(v)             (v)
-
-#define BCM2835_CLK_SHIFT              (12)
-#define BCM2835_CLK_DIVI(v)            ((v) << BCM2835_CLK_SHIFT)
-#define BCM2835_CLK_DIVF(v)            (v)
-#define BCM2835_CLK_DIVF_MASK          (0xFFF)
-
-enum {
-       BCM2835_CLK_MASH_0 = 0,
-       BCM2835_CLK_MASH_1,
-       BCM2835_CLK_MASH_2,
-       BCM2835_CLK_MASH_3,
-};
-
-enum {
-       BCM2835_CLK_SRC_GND = 0,
-       BCM2835_CLK_SRC_OSC,
-       BCM2835_CLK_SRC_DBG0,
-       BCM2835_CLK_SRC_DBG1,
-       BCM2835_CLK_SRC_PLLA,
-       BCM2835_CLK_SRC_PLLC,
-       BCM2835_CLK_SRC_PLLD,
-       BCM2835_CLK_SRC_HDMI,
-};
-
-/* Most clocks are not useable (freq = 0) */
-static const unsigned int bcm2835_clk_freq[BCM2835_CLK_SRC_HDMI+1] = {
-       [BCM2835_CLK_SRC_GND]           = 0,
-       [BCM2835_CLK_SRC_OSC]           = 19200000,
-       [BCM2835_CLK_SRC_DBG0]          = 0,
-       [BCM2835_CLK_SRC_DBG1]          = 0,
-       [BCM2835_CLK_SRC_PLLA]          = 0,
-       [BCM2835_CLK_SRC_PLLC]          = 0,
-       [BCM2835_CLK_SRC_PLLD]          = 500000000,
-       [BCM2835_CLK_SRC_HDMI]          = 0,
-};
-
 /* I2S registers */
 #define BCM2835_I2S_CS_A_REG           0x00
 #define BCM2835_I2S_FIFO_A_REG         0x04
@@ -158,10 +110,6 @@ static const unsigned int bcm2835_clk_freq[BCM2835_CLK_SRC_HDMI+1] = {
 #define BCM2835_I2S_INT_RXR            BIT(1)
 #define BCM2835_I2S_INT_TXW            BIT(0)
 
-/* I2S DMA interface */
-/* FIXME: Needs IOMMU support */
-#define BCM2835_VCMMU_SHIFT            (0x7E000000 - 0x20000000)
-
 /* General device struct */
 struct bcm2835_i2s_dev {
        struct device                           *dev;
@@ -169,21 +117,23 @@ struct bcm2835_i2s_dev {
        unsigned int                            fmt;
        unsigned int                            bclk_ratio;
 
-       struct regmap *i2s_regmap;
-       struct regmap *clk_regmap;
+       struct regmap                           *i2s_regmap;
+       struct clk                              *clk;
+       bool                                    clk_prepared;
 };
 
 static void bcm2835_i2s_start_clock(struct bcm2835_i2s_dev *dev)
 {
-       /* Start the clock if in master mode */
        unsigned int master = dev->fmt & SND_SOC_DAIFMT_MASTER_MASK;
 
+       if (dev->clk_prepared)
+               return;
+
        switch (master) {
        case SND_SOC_DAIFMT_CBS_CFS:
        case SND_SOC_DAIFMT_CBS_CFM:
-               regmap_update_bits(dev->clk_regmap, BCM2835_CLK_PCMCTL_REG,
-                       BCM2835_CLK_PASSWD_MASK | BCM2835_CLK_ENAB,
-                       BCM2835_CLK_PASSWD | BCM2835_CLK_ENAB);
+               clk_prepare_enable(dev->clk);
+               dev->clk_prepared = true;
                break;
        default:
                break;
@@ -192,28 +142,9 @@ static void bcm2835_i2s_start_clock(struct bcm2835_i2s_dev *dev)
 
 static void bcm2835_i2s_stop_clock(struct bcm2835_i2s_dev *dev)
 {
-       uint32_t clkreg;
-       int timeout = 1000;
-
-       /* Stop clock */
-       regmap_update_bits(dev->clk_regmap, BCM2835_CLK_PCMCTL_REG,
-                       BCM2835_CLK_PASSWD_MASK | BCM2835_CLK_ENAB,
-                       BCM2835_CLK_PASSWD);
-
-       /* Wait for the BUSY flag going down */
-       while (--timeout) {
-               regmap_read(dev->clk_regmap, BCM2835_CLK_PCMCTL_REG, &clkreg);
-               if (!(clkreg & BCM2835_CLK_BUSY))
-                       break;
-       }
-
-       if (!timeout) {
-               /* KILL the clock */
-               dev_err(dev->dev, "I2S clock didn't stop. Kill the clock!\n");
-               regmap_update_bits(dev->clk_regmap, BCM2835_CLK_PCMCTL_REG,
-                       BCM2835_CLK_KILL | BCM2835_CLK_PASSWD_MASK,
-                       BCM2835_CLK_KILL | BCM2835_CLK_PASSWD);
-       }
+       if (dev->clk_prepared)
+               clk_disable_unprepare(dev->clk);
+       dev->clk_prepared = false;
 }
 
 static void bcm2835_i2s_clear_fifos(struct bcm2835_i2s_dev *dev,
@@ -223,8 +154,7 @@ static void bcm2835_i2s_clear_fifos(struct bcm2835_i2s_dev *dev,
        uint32_t syncval;
        uint32_t csreg;
        uint32_t i2s_active_state;
-       uint32_t clkreg;
-       uint32_t clk_active_state;
+       bool clk_was_prepared;
        uint32_t off;
        uint32_t clr;
 
@@ -238,15 +168,10 @@ static void bcm2835_i2s_clear_fifos(struct bcm2835_i2s_dev *dev,
        regmap_read(dev->i2s_regmap, BCM2835_I2S_CS_A_REG, &csreg);
        i2s_active_state = csreg & (BCM2835_I2S_RXON | BCM2835_I2S_TXON);
 
-       regmap_read(dev->clk_regmap, BCM2835_CLK_PCMCTL_REG, &clkreg);
-       clk_active_state = clkreg & BCM2835_CLK_ENAB;
-
        /* Start clock if not running */
-       if (!clk_active_state) {
-               regmap_update_bits(dev->clk_regmap, BCM2835_CLK_PCMCTL_REG,
-                       BCM2835_CLK_PASSWD_MASK | BCM2835_CLK_ENAB,
-                       BCM2835_CLK_PASSWD | BCM2835_CLK_ENAB);
-       }
+       clk_was_prepared = dev->clk_prepared;
+       if (!clk_was_prepared)
+               bcm2835_i2s_start_clock(dev);
 
        /* Stop I2S module */
        regmap_update_bits(dev->i2s_regmap, BCM2835_I2S_CS_A_REG, off, 0);
@@ -280,7 +205,7 @@ static void bcm2835_i2s_clear_fifos(struct bcm2835_i2s_dev *dev,
                dev_err(dev->dev, "I2S SYNC error!\n");
 
        /* Stop clock if it was not running before */
-       if (!clk_active_state)
+       if (!clk_was_prepared)
                bcm2835_i2s_stop_clock(dev);
 
        /* Restore I2S state */
@@ -309,19 +234,9 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
                                 struct snd_soc_dai *dai)
 {
        struct bcm2835_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
-
        unsigned int sampling_rate = params_rate(params);
        unsigned int data_length, data_delay, bclk_ratio;
        unsigned int ch1pos, ch2pos, mode, format;
-       unsigned int mash = BCM2835_CLK_MASH_1;
-       unsigned int divi, divf, target_frequency;
-       int clk_src = -1;
-       unsigned int master = dev->fmt & SND_SOC_DAIFMT_MASTER_MASK;
-       bool bit_master =       (master == SND_SOC_DAIFMT_CBS_CFS
-                                       || master == SND_SOC_DAIFMT_CBS_CFM);
-
-       bool frame_master =     (master == SND_SOC_DAIFMT_CBS_CFS
-                                       || master == SND_SOC_DAIFMT_CBM_CFS);
        uint32_t csreg;
 
        /*
@@ -343,11 +258,9 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
                data_length = 16;
-               bclk_ratio = 40;
                break;
        case SNDRV_PCM_FORMAT_S32_LE:
                data_length = 32;
-               bclk_ratio = 80;
                break;
        default:
                return -EINVAL;
@@ -356,69 +269,12 @@ static int bcm2835_i2s_hw_params(struct snd_pcm_substream *substream,
        /* If bclk_ratio already set, use that one. */
        if (dev->bclk_ratio)
                bclk_ratio = dev->bclk_ratio;
+       else
+               /* otherwise calculate a fitting block ratio */
+               bclk_ratio = 2 * data_length;
 
-       /*
-        * Clock Settings
-        *
-        * The target frequency of the bit clock is
-        *      sampling rate * frame length
-        *
-        * Integer mode:
-        * Sampling rates that are multiples of 8000 kHz
-        * can be driven by the oscillator of 19.2 MHz
-        * with an integer divider as long as the frame length
-        * is an integer divider of 19200000/8000=2400 as set up above.
-        * This is no longer possible if the sampling rate
-        * is too high (e.g. 192 kHz), because the oscillator is too slow.
-        *
-        * MASH mode:
-        * For all other sampling rates, it is not possible to
-        * have an integer divider. Approximate the clock
-        * with the MASH module that induces a slight frequency
-        * variance. To minimize that it is best to have the fastest
-        * clock here. That is PLLD with 500 MHz.
-        */
-       target_frequency = sampling_rate * bclk_ratio;
-       clk_src = BCM2835_CLK_SRC_OSC;
-       mash = BCM2835_CLK_MASH_0;
-
-       if (bcm2835_clk_freq[clk_src] % target_frequency == 0
-                       && bit_master && frame_master) {
-               divi = bcm2835_clk_freq[clk_src] / target_frequency;
-               divf = 0;
-       } else {
-               uint64_t dividend;
-
-               if (!dev->bclk_ratio) {
-                       /*
-                        * Overwrite bclk_ratio, because the
-                        * above trick is not needed or can
-                        * not be used.
-                        */
-                       bclk_ratio = 2 * data_length;
-               }
-
-               target_frequency = sampling_rate * bclk_ratio;
-
-               clk_src = BCM2835_CLK_SRC_PLLD;
-               mash = BCM2835_CLK_MASH_1;
-
-               dividend = bcm2835_clk_freq[clk_src];
-               dividend <<= BCM2835_CLK_SHIFT;
-               do_div(dividend, target_frequency);
-               divi = dividend >> BCM2835_CLK_SHIFT;
-               divf = dividend & BCM2835_CLK_DIVF_MASK;
-       }
-
-       /* Set clock divider */
-       regmap_write(dev->clk_regmap, BCM2835_CLK_PCMDIV_REG, BCM2835_CLK_PASSWD
-                       | BCM2835_CLK_DIVI(divi)
-                       | BCM2835_CLK_DIVF(divf));
-
-       /* Setup clock, but don't start it yet */
-       regmap_write(dev->clk_regmap, BCM2835_CLK_PCMCTL_REG, BCM2835_CLK_PASSWD
-                       | BCM2835_CLK_MASH(mash)
-                       | BCM2835_CLK_SRC(clk_src));
+       /* set target clock rate*/
+       clk_set_rate(dev->clk, sampling_rate * bclk_ratio);
 
        /* Setup the frame format */
        format = BCM2835_I2S_CHEN;
@@ -692,7 +548,7 @@ static const struct snd_soc_dai_ops bcm2835_i2s_dai_ops = {
        .trigger        = bcm2835_i2s_trigger,
        .hw_params      = bcm2835_i2s_hw_params,
        .set_fmt        = bcm2835_i2s_set_dai_fmt,
-       .set_bclk_ratio = bcm2835_i2s_set_dai_bclk_ratio
+       .set_bclk_ratio = bcm2835_i2s_set_dai_bclk_ratio,
 };
 
 static int bcm2835_i2s_dai_probe(struct snd_soc_dai *dai)
@@ -750,34 +606,14 @@ static bool bcm2835_i2s_precious_reg(struct device *dev, unsigned int reg)
        };
 }
 
-static bool bcm2835_clk_volatile_reg(struct device *dev, unsigned int reg)
-{
-       switch (reg) {
-       case BCM2835_CLK_PCMCTL_REG:
-               return true;
-       default:
-               return false;
-       };
-}
-
-static const struct regmap_config bcm2835_regmap_config[] = {
-       {
-               .reg_bits = 32,
-               .reg_stride = 4,
-               .val_bits = 32,
-               .max_register = BCM2835_I2S_GRAY_REG,
-               .precious_reg = bcm2835_i2s_precious_reg,
-               .volatile_reg = bcm2835_i2s_volatile_reg,
-               .cache_type = REGCACHE_RBTREE,
-       },
-       {
-               .reg_bits = 32,
-               .reg_stride = 4,
-               .val_bits = 32,
-               .max_register = BCM2835_CLK_PCMDIV_REG,
-               .volatile_reg = bcm2835_clk_volatile_reg,
-               .cache_type = REGCACHE_RBTREE,
-       },
+static const struct regmap_config bcm2835_regmap_config = {
+       .reg_bits = 32,
+       .reg_stride = 4,
+       .val_bits = 32,
+       .max_register = BCM2835_I2S_GRAY_REG,
+       .precious_reg = bcm2835_i2s_precious_reg,
+       .volatile_reg = bcm2835_i2s_volatile_reg,
+       .cache_type = REGCACHE_RBTREE,
 };
 
 static const struct snd_soc_component_driver bcm2835_i2s_component = {
@@ -787,42 +623,50 @@ static const struct snd_soc_component_driver bcm2835_i2s_component = {
 static int bcm2835_i2s_probe(struct platform_device *pdev)
 {
        struct bcm2835_i2s_dev *dev;
-       int i;
        int ret;
-       struct regmap *regmap[2];
-       struct resource *mem[2];
-
-       /* Request both ioareas */
-       for (i = 0; i <= 1; i++) {
-               void __iomem *base;
-
-               mem[i] = platform_get_resource(pdev, IORESOURCE_MEM, i);
-               base = devm_ioremap_resource(&pdev->dev, mem[i]);
-               if (IS_ERR(base))
-                       return PTR_ERR(base);
-
-               regmap[i] = devm_regmap_init_mmio(&pdev->dev, base,
-                                           &bcm2835_regmap_config[i]);
-               if (IS_ERR(regmap[i]))
-                       return PTR_ERR(regmap[i]);
-       }
+       struct resource *mem;
+       void __iomem *base;
+       const __be32 *addr;
+       dma_addr_t dma_base;
 
        dev = devm_kzalloc(&pdev->dev, sizeof(*dev),
                           GFP_KERNEL);
        if (!dev)
                return -ENOMEM;
 
-       dev->i2s_regmap = regmap[0];
-       dev->clk_regmap = regmap[1];
+       /* get the clock */
+       dev->clk_prepared = false;
+       dev->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(dev->clk)) {
+               dev_err(&pdev->dev, "could not get clk: %ld\n",
+                       PTR_ERR(dev->clk));
+               return PTR_ERR(dev->clk);
+       }
+
+       /* Request ioarea */
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       dev->i2s_regmap = devm_regmap_init_mmio(&pdev->dev, base,
+                               &bcm2835_regmap_config);
+       if (IS_ERR(dev->i2s_regmap))
+               return PTR_ERR(dev->i2s_regmap);
+
+       /* Set the DMA address - we have to parse DT ourselves */
+       addr = of_get_address(pdev->dev.of_node, 0, NULL, NULL);
+       if (!addr) {
+               dev_err(&pdev->dev, "could not get DMA-register address\n");
+               return -EINVAL;
+       }
+       dma_base = be32_to_cpup(addr);
 
-       /* Set the DMA address */
        dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr =
-               (dma_addr_t)mem[0]->start + BCM2835_I2S_FIFO_A_REG
-                                         + BCM2835_VCMMU_SHIFT;
+               dma_base + BCM2835_I2S_FIFO_A_REG;
 
        dev->dma_data[SNDRV_PCM_STREAM_CAPTURE].addr =
-               (dma_addr_t)mem[0]->start + BCM2835_I2S_FIFO_A_REG
-                                         + BCM2835_VCMMU_SHIFT;
+               dma_base + BCM2835_I2S_FIFO_A_REG;
 
        /* Set the bus width */
        dev->dma_data[SNDRV_PCM_STREAM_PLAYBACK].addr_width =
index 50693c867e71d06a2b6ccebf0d2f303c1ef9a0f5..4383966c13c4132505a984c51a11a8cb05b6c74d 100644 (file)
@@ -80,6 +80,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_MAX98095 if I2C
        select SND_SOC_MAX98357A if GPIOLIB
        select SND_SOC_MAX98925 if I2C
+       select SND_SOC_MAX98926 if I2C
        select SND_SOC_MAX9850 if I2C
        select SND_SOC_MAX9768 if I2C
        select SND_SOC_MAX9877 if I2C
@@ -87,7 +88,8 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_ML26124 if I2C
        select SND_SOC_NAU8825 if I2C
        select SND_SOC_PCM1681 if I2C
-       select SND_SOC_PCM179X if SPI_MASTER
+       select SND_SOC_PCM179X_I2C if I2C
+       select SND_SOC_PCM179X_SPI if SPI_MASTER
        select SND_SOC_PCM3008
        select SND_SOC_PCM3168A_I2C if I2C
        select SND_SOC_PCM3168A_SPI if SPI_MASTER
@@ -95,6 +97,7 @@ config SND_SOC_ALL_CODECS
        select SND_SOC_PCM512x_SPI if SPI_MASTER
        select SND_SOC_RT286 if I2C
        select SND_SOC_RT298 if I2C
+       select SND_SOC_RT5514 if I2C
        select SND_SOC_RT5616 if I2C
        select SND_SOC_RT5631 if I2C
        select SND_SOC_RT5640 if I2C
@@ -497,6 +500,7 @@ config SND_SOC_ICS43432
 
 config SND_SOC_INNO_RK3036
        tristate "Inno codec driver for RK3036 SoC"
+       select REGMAP_MMIO
 
 config SND_SOC_ISABELLE
         tristate
@@ -519,6 +523,9 @@ config SND_SOC_MAX98357A
 config SND_SOC_MAX98925
        tristate
 
+config SND_SOC_MAX98926
+       tristate
+
 config SND_SOC_MAX9850
        tristate
 
@@ -527,8 +534,23 @@ config SND_SOC_PCM1681
        depends on I2C
 
 config SND_SOC_PCM179X
-       tristate "Texas Instruments PCM179X CODEC"
+       tristate
+
+config SND_SOC_PCM179X_I2C
+       tristate "Texas Instruments PCM179X CODEC (I2C)"
+       depends on I2C
+       select SND_SOC_PCM179X
+       help
+         Enable support for Texas Instruments PCM179x CODEC.
+         Select this if your PCM179x is connected via an I2C bus.
+
+config SND_SOC_PCM179X_SPI
+       tristate "Texas Instruments PCM179X CODEC (SPI)"
        depends on SPI_MASTER
+       select SND_SOC_PCM179X
+       help
+         Enable support for Texas Instruments PCM179x CODEC.
+         Select this if your PCM179x is connected via an SPI bus.
 
 config SND_SOC_PCM3008
        tristate
@@ -565,6 +587,7 @@ config SND_SOC_PCM512x_SPI
 
 config SND_SOC_RL6231
        tristate
+       default y if SND_SOC_RT5514=y
        default y if SND_SOC_RT5616=y
        default y if SND_SOC_RT5640=y
        default y if SND_SOC_RT5645=y
@@ -572,6 +595,7 @@ config SND_SOC_RL6231
        default y if SND_SOC_RT5659=y
        default y if SND_SOC_RT5670=y
        default y if SND_SOC_RT5677=y
+       default m if SND_SOC_RT5514=m
        default m if SND_SOC_RT5616=m
        default m if SND_SOC_RT5640=m
        default m if SND_SOC_RT5645=m
@@ -595,6 +619,9 @@ config SND_SOC_RT298
        tristate
        depends on I2C
 
+config SND_SOC_RT5514
+       tristate
+
 config SND_SOC_RT5616
        tristate
 
index d44f7d347183cbe9a0aca29826b9c6477c52d6c4..cd05e702c89552420c21cec4f82ea77dabe5c52a 100644 (file)
@@ -75,12 +75,15 @@ snd-soc-max98090-objs := max98090.o
 snd-soc-max98095-objs := max98095.o
 snd-soc-max98357a-objs := max98357a.o
 snd-soc-max98925-objs := max98925.o
+snd-soc-max98926-objs := max98926.o
 snd-soc-max9850-objs := max9850.o
 snd-soc-mc13783-objs := mc13783.o
 snd-soc-ml26124-objs := ml26124.o
 snd-soc-nau8825-objs := nau8825.o
 snd-soc-pcm1681-objs := pcm1681.o
 snd-soc-pcm179x-codec-objs := pcm179x.o
+snd-soc-pcm179x-i2c-objs := pcm179x-i2c.o
+snd-soc-pcm179x-spi-objs := pcm179x-spi.o
 snd-soc-pcm3008-objs := pcm3008.o
 snd-soc-pcm3168a-objs := pcm3168a.o
 snd-soc-pcm3168a-i2c-objs := pcm3168a-i2c.o
@@ -92,6 +95,7 @@ snd-soc-rl6231-objs := rl6231.o
 snd-soc-rl6347a-objs := rl6347a.o
 snd-soc-rt286-objs := rt286.o
 snd-soc-rt298-objs := rt298.o
+snd-soc-rt5514-objs := rt5514.o
 snd-soc-rt5616-objs := rt5616.o
 snd-soc-rt5631-objs := rt5631.o
 snd-soc-rt5640-objs := rt5640.o
@@ -279,12 +283,15 @@ obj-$(CONFIG_SND_SOC_MAX98090)    += snd-soc-max98090.o
 obj-$(CONFIG_SND_SOC_MAX98095) += snd-soc-max98095.o
 obj-$(CONFIG_SND_SOC_MAX98357A)        += snd-soc-max98357a.o
 obj-$(CONFIG_SND_SOC_MAX98925) += snd-soc-max98925.o
+obj-$(CONFIG_SND_SOC_MAX98926) += snd-soc-max98926.o
 obj-$(CONFIG_SND_SOC_MAX9850)  += snd-soc-max9850.o
 obj-$(CONFIG_SND_SOC_MC13783)  += snd-soc-mc13783.o
 obj-$(CONFIG_SND_SOC_ML26124)  += snd-soc-ml26124.o
 obj-$(CONFIG_SND_SOC_NAU8825)   += snd-soc-nau8825.o
 obj-$(CONFIG_SND_SOC_PCM1681)  += snd-soc-pcm1681.o
 obj-$(CONFIG_SND_SOC_PCM179X)  += snd-soc-pcm179x-codec.o
+obj-$(CONFIG_SND_SOC_PCM179X_I2C)      += snd-soc-pcm179x-i2c.o
+obj-$(CONFIG_SND_SOC_PCM179X_SPI)      += snd-soc-pcm179x-spi.o
 obj-$(CONFIG_SND_SOC_PCM3008)  += snd-soc-pcm3008.o
 obj-$(CONFIG_SND_SOC_PCM3168A) += snd-soc-pcm3168a.o
 obj-$(CONFIG_SND_SOC_PCM3168A_I2C)     += snd-soc-pcm3168a-i2c.o
@@ -296,6 +303,7 @@ obj-$(CONFIG_SND_SOC_RL6231)        += snd-soc-rl6231.o
 obj-$(CONFIG_SND_SOC_RL6347A)  += snd-soc-rl6347a.o
 obj-$(CONFIG_SND_SOC_RT286)    += snd-soc-rt286.o
 obj-$(CONFIG_SND_SOC_RT298)    += snd-soc-rt298.o
+obj-$(CONFIG_SND_SOC_RT5514)   += snd-soc-rt5514.o
 obj-$(CONFIG_SND_SOC_RT5616)   += snd-soc-rt5616.o
 obj-$(CONFIG_SND_SOC_RT5631)   += snd-soc-rt5631.o
 obj-$(CONFIG_SND_SOC_RT5640)   += snd-soc-rt5640.o
index affb192238a403b88dc38f3e76e0c54440779ea0..b6820a1f16c7dc051336716e124e06f1a854c500 100644 (file)
@@ -2134,7 +2134,6 @@ static int ab8500_codec_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
                        "%s: ERROR: Unsupporter master mask 0x%x\n",
                        __func__, fmt & SND_SOC_DAIFMT_MASTER_MASK);
                return -EINVAL;
-               break;
        }
 
        snd_soc_update_bits(codec, AB8500_DIGIFCONF3, mask, val);
index 2f12477e539ebe36f90af0218ee83031b0ac178d..e7136b1956a3f80c11a725614c205a5f39869240 100644 (file)
@@ -456,13 +456,17 @@ static int adau1761_set_bias_level(struct snd_soc_codec *codec,
        case SND_SOC_BIAS_PREPARE:
                break;
        case SND_SOC_BIAS_STANDBY:
+               regcache_cache_only(adau->regmap, false);
                regmap_update_bits(adau->regmap, ADAU17X1_CLOCK_CONTROL,
                        ADAU17X1_CLOCK_CONTROL_SYSCLK_EN,
                        ADAU17X1_CLOCK_CONTROL_SYSCLK_EN);
+               if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF)
+                       regcache_sync(adau->regmap);
                break;
        case SND_SOC_BIAS_OFF:
                regmap_update_bits(adau->regmap, ADAU17X1_CLOCK_CONTROL,
                        ADAU17X1_CLOCK_CONTROL_SYSCLK_EN, 0);
+               regcache_cache_only(adau->regmap, true);
                break;
 
        }
@@ -783,6 +787,10 @@ int adau1761_probe(struct device *dev, struct regmap *regmap,
        if (ret)
                return ret;
 
+       /* Enable cache only mode as we could miss writes before bias level
+        * reaches standby and the core clock is enabled */
+       regcache_cache_only(regmap, true);
+
        return snd_soc_register_codec(dev, &adau1761_codec_driver, dai_drv, 1);
 }
 EXPORT_SYMBOL_GPL(adau1761_probe);
index 33143fe1de0bdeaa0c6b04bc9bfd159b49bf658b..913cfa8b03e6db19ce7a9a50efc34c319dd62137 100644 (file)
@@ -1398,29 +1398,6 @@ static const int arizona_48k_bclk_rates[] = {
        24576000,
 };
 
-static const unsigned int arizona_48k_rates[] = {
-       12000,
-       24000,
-       48000,
-       96000,
-       192000,
-       384000,
-       768000,
-       4000,
-       8000,
-       16000,
-       32000,
-       64000,
-       128000,
-       256000,
-       512000,
-};
-
-static const struct snd_pcm_hw_constraint_list arizona_48k_constraint = {
-       .count  = ARRAY_SIZE(arizona_48k_rates),
-       .list   = arizona_48k_rates,
-};
-
 static const int arizona_44k1_bclk_rates[] = {
        -1,
        44100,
@@ -1443,22 +1420,7 @@ static const int arizona_44k1_bclk_rates[] = {
        22579200,
 };
 
-static const unsigned int arizona_44k1_rates[] = {
-       11025,
-       22050,
-       44100,
-       88200,
-       176400,
-       352800,
-       705600,
-};
-
-static const struct snd_pcm_hw_constraint_list arizona_44k1_constraint = {
-       .count  = ARRAY_SIZE(arizona_44k1_rates),
-       .list   = arizona_44k1_rates,
-};
-
-static int arizona_sr_vals[] = {
+static const unsigned int arizona_sr_vals[] = {
        0,
        12000,
        24000,
@@ -1485,13 +1447,21 @@ static int arizona_sr_vals[] = {
        512000,
 };
 
+#define ARIZONA_48K_RATE_MASK  0x0F003E
+#define ARIZONA_44K1_RATE_MASK 0x003E00
+#define ARIZONA_RATE_MASK      (ARIZONA_48K_RATE_MASK | ARIZONA_44K1_RATE_MASK)
+
+static const struct snd_pcm_hw_constraint_list arizona_constraint = {
+       .count  = ARRAY_SIZE(arizona_sr_vals),
+       .list   = arizona_sr_vals,
+};
+
 static int arizona_startup(struct snd_pcm_substream *substream,
                           struct snd_soc_dai *dai)
 {
        struct snd_soc_codec *codec = dai->codec;
        struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
        struct arizona_dai_priv *dai_priv = &priv->dai[dai->id - 1];
-       const struct snd_pcm_hw_constraint_list *constraint;
        unsigned int base_rate;
 
        if (!substream->runtime)
@@ -1509,16 +1479,15 @@ static int arizona_startup(struct snd_pcm_substream *substream,
        }
 
        if (base_rate == 0)
-               return 0;
-
-       if (base_rate % 8000)
-               constraint = &arizona_44k1_constraint;
+               dai_priv->constraint.mask = ARIZONA_RATE_MASK;
+       else if (base_rate % 8000)
+               dai_priv->constraint.mask = ARIZONA_44K1_RATE_MASK;
        else
-               constraint = &arizona_48k_constraint;
+               dai_priv->constraint.mask = ARIZONA_48K_RATE_MASK;
 
        return snd_pcm_hw_constraint_list(substream->runtime, 0,
                                          SNDRV_PCM_HW_PARAM_RATE,
-                                         constraint);
+                                         &dai_priv->constraint);
 }
 
 static void arizona_wm5102_set_dac_comp(struct snd_soc_codec *codec,
@@ -1911,6 +1880,7 @@ int arizona_init_dai(struct arizona_priv *priv, int id)
        struct arizona_dai_priv *dai_priv = &priv->dai[id];
 
        dai_priv->clk = ARIZONA_CLK_SYSCLK;
+       dai_priv->constraint = arizona_constraint;
 
        return 0;
 }
@@ -1929,6 +1899,25 @@ static struct {
        { 1000000, 13500000, 0,  1 },
 };
 
+static const unsigned int pseudo_fref_max[ARIZONA_FLL_MAX_FRATIO] = {
+       13500000,
+        6144000,
+        6144000,
+        3072000,
+        3072000,
+        2822400,
+        2822400,
+        1536000,
+        1536000,
+        1536000,
+        1536000,
+        1536000,
+        1536000,
+        1536000,
+        1536000,
+         768000,
+};
+
 static struct {
        unsigned int min;
        unsigned int max;
@@ -2042,16 +2031,32 @@ static int arizona_calc_fratio(struct arizona_fll *fll,
        /* Adjust FRATIO/refdiv to avoid integer mode if possible */
        refdiv = cfg->refdiv;
 
+       arizona_fll_dbg(fll, "pseudo: initial ratio=%u fref=%u refdiv=%u\n",
+                       init_ratio, Fref, refdiv);
+
        while (div <= ARIZONA_FLL_MAX_REFDIV) {
                for (ratio = init_ratio; ratio <= ARIZONA_FLL_MAX_FRATIO;
                     ratio++) {
                        if ((ARIZONA_FLL_VCO_CORNER / 2) /
-                           (fll->vco_mult * ratio) < Fref)
+                           (fll->vco_mult * ratio) < Fref) {
+                               arizona_fll_dbg(fll, "pseudo: hit VCO corner\n");
                                break;
+                       }
+
+                       if (Fref > pseudo_fref_max[ratio - 1]) {
+                               arizona_fll_dbg(fll,
+                                       "pseudo: exceeded max fref(%u) for ratio=%u\n",
+                                       pseudo_fref_max[ratio - 1],
+                                       ratio);
+                               break;
+                       }
 
                        if (target % (ratio * Fref)) {
                                cfg->refdiv = refdiv;
                                cfg->fratio = ratio - 1;
+                               arizona_fll_dbg(fll,
+                                       "pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
+                                       Fref, refdiv, div, ratio);
                                return ratio;
                        }
                }
@@ -2060,6 +2065,9 @@ static int arizona_calc_fratio(struct arizona_fll *fll,
                        if (target % (ratio * Fref)) {
                                cfg->refdiv = refdiv;
                                cfg->fratio = ratio - 1;
+                               arizona_fll_dbg(fll,
+                                       "pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
+                                       Fref, refdiv, div, ratio);
                                return ratio;
                        }
                }
@@ -2068,6 +2076,9 @@ static int arizona_calc_fratio(struct arizona_fll *fll,
                Fref /= 2;
                refdiv++;
                init_ratio = arizona_find_fratio(Fref, NULL);
+               arizona_fll_dbg(fll,
+                               "pseudo: change fref=%u refdiv=%d(%d) ratio=%u\n",
+                               Fref, refdiv, div, init_ratio);
        }
 
        arizona_fll_warn(fll, "Falling back to integer mode operation\n");
index 8b6adb5419bb896482139426167752a22aa4df57..1ea8e4ecf8d41bbf9d3a792f4d88cf66383052a4 100644 (file)
@@ -57,7 +57,7 @@
 #define ARIZONA_CLK_98MHZ  5
 #define ARIZONA_CLK_147MHZ 6
 
-#define ARIZONA_MAX_DAI  8
+#define ARIZONA_MAX_DAI  10
 #define ARIZONA_MAX_ADSP 4
 
 #define ARIZONA_DVFS_SR1_RQ    0x001
@@ -68,6 +68,8 @@ struct wm_adsp;
 
 struct arizona_dai_priv {
        int clk;
+
+       struct snd_pcm_hw_constraint_list constraint;
 };
 
 struct arizona_priv {
index d562e1b9a5d163bb22e990280b8f2e396e584846..1179101b2b055a639e711049dc389d34e999a011 100644 (file)
@@ -44,6 +44,7 @@ struct cs42xx8_priv {
 
        bool slave_mode;
        unsigned long sysclk;
+       u32 tx_channels;
 };
 
 /* -127.5dB to 0dB with step of 0.5dB */
@@ -257,6 +258,9 @@ static int cs42xx8_hw_params(struct snd_pcm_substream *substream,
        u32 ratio = cs42xx8->sysclk / params_rate(params);
        u32 i, fm, val, mask;
 
+       if (tx)
+               cs42xx8->tx_channels = params_channels(params);
+
        for (i = 0; i < ARRAY_SIZE(cs42xx8_ratios); i++) {
                if (cs42xx8_ratios[i].ratio == ratio)
                        break;
@@ -283,9 +287,11 @@ static int cs42xx8_digital_mute(struct snd_soc_dai *dai, int mute)
 {
        struct snd_soc_codec *codec = dai->codec;
        struct cs42xx8_priv *cs42xx8 = snd_soc_codec_get_drvdata(codec);
+       u8 dac_unmute = cs42xx8->tx_channels ?
+                       ~((0x1 << cs42xx8->tx_channels) - 1) : 0;
 
-       regmap_update_bits(cs42xx8->regmap, CS42XX8_DACMUTE,
-                          CS42XX8_DACMUTE_ALL, mute ? CS42XX8_DACMUTE_ALL : 0);
+       regmap_write(cs42xx8->regmap, CS42XX8_DACMUTE,
+                    mute ? CS42XX8_DACMUTE_ALL : dac_unmute);
 
        return 0;
 }
index dc5ae7f7a1bd77f64e480a0346877358670ffac9..576087bda330ce840bdb4f90777ee7c9f1d580d9 100644 (file)
@@ -57,6 +57,25 @@ static const struct wm_adsp_region *cs47l24_dsp_regions[] = {
        cs47l24_dsp3_regions,
 };
 
+static int cs47l24_adsp_power_ev(struct snd_soc_dapm_widget *w,
+                                struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+       unsigned int v;
+       int ret;
+
+       ret = regmap_read(arizona->regmap, ARIZONA_SYSTEM_CLOCK_1, &v);
+       if (ret != 0) {
+               dev_err(codec->dev, "Failed to read SYSCLK state: %d\n", ret);
+               return ret;
+       }
+
+       v = (v & ARIZONA_SYSCLK_FREQ_MASK) >> ARIZONA_SYSCLK_FREQ_SHIFT;
+
+       return wm_adsp2_early_event(w, kcontrol, event, v);
+}
+
 static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
 static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
 static DECLARE_TLV_DB_SCALE(noise_tlv, -13200, 600, 0);
@@ -405,8 +424,8 @@ SND_SOC_DAPM_PGA("ASRC2L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2L_ENA_SHIFT, 0,
 SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0,
                 NULL, 0),
 
-WM_ADSP2("DSP2", 1),
-WM_ADSP2("DSP3", 2),
+WM_ADSP2("DSP2", 1, cs47l24_adsp_power_ev),
+WM_ADSP2("DSP3", 2, cs47l24_adsp_power_ev),
 
 SND_SOC_DAPM_PGA("ISRC1INT1", ARIZONA_ISRC_1_CTRL_3,
                 ARIZONA_ISRC1_INT0_ENA_SHIFT, 0, NULL, 0),
@@ -779,6 +798,9 @@ static const struct snd_soc_dapm_route cs47l24_dapm_routes[] = {
        { "AIF2 Capture", NULL, "SYSCLK" },
        { "AIF3 Capture", NULL, "SYSCLK" },
 
+       { "Voice Control DSP", NULL, "DSP3" },
+       { "Voice Control DSP", NULL, "SYSCLK" },
+
        { "IN1L PGA", NULL, "IN1L" },
        { "IN1R PGA", NULL, "IN1R" },
 
@@ -901,7 +923,7 @@ static int cs47l24_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
        }
 }
 
-#define CS47L24_RATES SNDRV_PCM_RATE_8000_192000
+#define CS47L24_RATES SNDRV_PCM_RATE_KNOT
 
 #define CS47L24_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
                         SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
@@ -973,12 +995,68 @@ static struct snd_soc_dai_driver cs47l24_dai[] = {
                .symmetric_rates = 1,
                .symmetric_samplebits = 1,
        },
+       {
+               .name = "cs47l24-cpu-voicectrl",
+               .capture = {
+                       .stream_name = "Voice Control CPU",
+                       .channels_min = 1,
+                       .channels_max = 1,
+                       .rates = CS47L24_RATES,
+                       .formats = CS47L24_FORMATS,
+               },
+               .compress_new = snd_soc_new_compress,
+       },
+       {
+               .name = "cs47l24-dsp-voicectrl",
+               .capture = {
+                       .stream_name = "Voice Control DSP",
+                       .channels_min = 1,
+                       .channels_max = 1,
+                       .rates = CS47L24_RATES,
+                       .formats = CS47L24_FORMATS,
+               },
+       },
 };
 
+static int cs47l24_open(struct snd_compr_stream *stream)
+{
+       struct snd_soc_pcm_runtime *rtd = stream->private_data;
+       struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(rtd->codec);
+       struct arizona *arizona = priv->core.arizona;
+       int n_adsp;
+
+       if (strcmp(rtd->codec_dai->name, "cs47l24-dsp-voicectrl") == 0) {
+               n_adsp = 2;
+       } else {
+               dev_err(arizona->dev,
+                       "No suitable compressed stream for DAI '%s'\n",
+                       rtd->codec_dai->name);
+               return -EINVAL;
+       }
+
+       return wm_adsp_compr_open(&priv->core.adsp[n_adsp], stream);
+}
+
+static irqreturn_t cs47l24_adsp2_irq(int irq, void *data)
+{
+       struct cs47l24_priv *priv = data;
+       struct arizona *arizona = priv->core.arizona;
+       int ret;
+
+       ret = wm_adsp_compr_handle_irq(&priv->core.adsp[2]);
+       if (ret == -ENODEV) {
+               dev_err(arizona->dev, "Spurious compressed data IRQ\n");
+               return IRQ_NONE;
+       }
+
+       return IRQ_HANDLED;
+}
+
 static int cs47l24_codec_probe(struct snd_soc_codec *codec)
 {
        struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
        struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
+       struct arizona *arizona = priv->core.arizona;
        int ret;
 
        priv->core.arizona->dapm = dapm;
@@ -987,6 +1065,14 @@ static int cs47l24_codec_probe(struct snd_soc_codec *codec)
        arizona_init_gpio(codec);
        arizona_init_mono(codec);
 
+       ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
+                                 "ADSP2 Compressed IRQ", cs47l24_adsp2_irq,
+                                 priv);
+       if (ret != 0) {
+               dev_err(codec->dev, "Failed to request DSP IRQ: %d\n", ret);
+               return ret;
+       }
+
        ret = wm_adsp2_codec_probe(&priv->core.adsp[1], codec);
        if (ret)
                goto err_adsp2_codec_probe;
@@ -1014,13 +1100,14 @@ err_adsp2_codec_probe:
 static int cs47l24_codec_remove(struct snd_soc_codec *codec)
 {
        struct cs47l24_priv *priv = snd_soc_codec_get_drvdata(codec);
-
+       struct arizona *arizona = priv->core.arizona;
 
        wm_adsp2_codec_remove(&priv->core.adsp[1], codec);
        wm_adsp2_codec_remove(&priv->core.adsp[2], codec);
 
        priv->core.arizona->dapm = NULL;
 
+       arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
        return 0;
 }
 
@@ -1057,6 +1144,19 @@ static struct snd_soc_codec_driver soc_codec_dev_cs47l24 = {
        .num_dapm_routes = ARRAY_SIZE(cs47l24_dapm_routes),
 };
 
+static struct snd_compr_ops cs47l24_compr_ops = {
+       .open = cs47l24_open,
+       .free = wm_adsp_compr_free,
+       .set_params = wm_adsp_compr_set_params,
+       .get_caps = wm_adsp_compr_get_caps,
+       .trigger = wm_adsp_compr_trigger,
+       .pointer = wm_adsp_compr_pointer,
+       .copy = wm_adsp_compr_copy,
+};
+
+static struct snd_soc_platform_driver cs47l24_compr_platform = {
+       .compr_ops = &cs47l24_compr_ops,
+};
 static int cs47l24_probe(struct platform_device *pdev)
 {
        struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
@@ -1120,12 +1220,25 @@ static int cs47l24_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        pm_runtime_idle(&pdev->dev);
 
-       return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_cs47l24,
+       ret = snd_soc_register_platform(&pdev->dev, &cs47l24_compr_platform);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
+               return ret;
+       }
+       ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_cs47l24,
                                      cs47l24_dai, ARRAY_SIZE(cs47l24_dai));
+
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
+               snd_soc_unregister_platform(&pdev->dev);
+       }
+
+       return ret;
 }
 
 static int cs47l24_remove(struct platform_device *pdev)
 {
+       snd_soc_unregister_platform(&pdev->dev);
        snd_soc_unregister_codec(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
diff --git a/sound/soc/codecs/max98926.c b/sound/soc/codecs/max98926.c
new file mode 100644 (file)
index 0000000..66884eb
--- /dev/null
@@ -0,0 +1,604 @@
+/*
+ * max98926.c -- ALSA SoC MAX98926 driver
+ * Copyright 2013-15 Maxim Integrated Products
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include "max98926.h"
+
+static const char * const max98926_boost_voltage_txt[] = {
+       "8.5V", "8.25V", "8.0V", "7.75V", "7.5V", "7.25V", "7.0V", "6.75V",
+       "6.5V", "6.5V", "6.5V", "6.5V", "6.5V", "6.5V", "6.5V", "6.5V"
+};
+
+static const char * const max98926_boost_current_txt[] = {
+       "0.6", "0.8", "1.0", "1.2", "1.4", "1.6", "1.8", "2.0",
+       "2.2", "2.4", "2.6", "2.8", "3.2", "3.6", "4.0", "4.4"
+};
+
+static const char *const max98926_dai_txt[] = {
+       "Left", "Right", "LeftRight", "LeftRightDiv2",
+};
+
+static const char *const max98926_pdm_ch_text[] = {
+       "Current", "Voltage",
+};
+
+static const char *const max98926_hpf_cutoff_txt[] = {
+       "Disable", "DC Block", "100Hz",
+       "200Hz", "400Hz", "800Hz",
+};
+
+static struct reg_default max98926_reg[] = {
+       { 0x0B, 0x00 }, /* IRQ Enable0 */
+       { 0x0C, 0x00 }, /* IRQ Enable1 */
+       { 0x0D, 0x00 }, /* IRQ Enable2 */
+       { 0x0E, 0x00 }, /* IRQ Clear0 */
+       { 0x0F, 0x00 }, /* IRQ Clear1 */
+       { 0x10, 0x00 }, /* IRQ Clear2 */
+       { 0x11, 0xC0 }, /* Map0 */
+       { 0x12, 0x00 }, /* Map1 */
+       { 0x13, 0x00 }, /* Map2 */
+       { 0x14, 0xF0 }, /* Map3 */
+       { 0x15, 0x00 }, /* Map4 */
+       { 0x16, 0xAB }, /* Map5 */
+       { 0x17, 0x89 }, /* Map6 */
+       { 0x18, 0x00 }, /* Map7 */
+       { 0x19, 0x00 }, /* Map8 */
+       { 0x1A, 0x04 }, /* DAI Clock Mode 1 */
+       { 0x1B, 0x00 }, /* DAI Clock Mode 2 */
+       { 0x1C, 0x00 }, /* DAI Clock Divider Denominator MSBs */
+       { 0x1D, 0x00 }, /* DAI Clock Divider Denominator LSBs */
+       { 0x1E, 0xF0 }, /* DAI Clock Divider Numerator MSBs */
+       { 0x1F, 0x00 }, /* DAI Clock Divider Numerator LSBs */
+       { 0x20, 0x50 }, /* Format */
+       { 0x21, 0x00 }, /* TDM Slot Select */
+       { 0x22, 0x00 }, /* DOUT Configuration VMON */
+       { 0x23, 0x00 }, /* DOUT Configuration IMON */
+       { 0x24, 0x00 }, /* DOUT Configuration VBAT */
+       { 0x25, 0x00 }, /* DOUT Configuration VBST */
+       { 0x26, 0x00 }, /* DOUT Configuration FLAG */
+       { 0x27, 0xFF }, /* DOUT HiZ Configuration 1 */
+       { 0x28, 0xFF }, /* DOUT HiZ Configuration 2 */
+       { 0x29, 0xFF }, /* DOUT HiZ Configuration 3 */
+       { 0x2A, 0xFF }, /* DOUT HiZ Configuration 4 */
+       { 0x2B, 0x02 }, /* DOUT Drive Strength */
+       { 0x2C, 0x90 }, /* Filters */
+       { 0x2D, 0x00 }, /* Gain */
+       { 0x2E, 0x02 }, /* Gain Ramping */
+       { 0x2F, 0x00 }, /* Speaker Amplifier */
+       { 0x30, 0x0A }, /* Threshold */
+       { 0x31, 0x00 }, /* ALC Attack */
+       { 0x32, 0x80 }, /* ALC Atten and Release */
+       { 0x33, 0x00 }, /* ALC Infinite Hold Release */
+       { 0x34, 0x92 }, /* ALC Configuration */
+       { 0x35, 0x01 }, /* Boost Converter */
+       { 0x36, 0x00 }, /* Block Enable */
+       { 0x37, 0x00 }, /* Configuration */
+       { 0x38, 0x00 }, /* Global Enable */
+       { 0x3A, 0x00 }, /* Boost Limiter */
+};
+
+static const struct soc_enum max98926_voltage_enum[] = {
+       SOC_ENUM_SINGLE(MAX98926_DAI_CLK_DIV_N_LSBS, 0,
+               ARRAY_SIZE(max98926_pdm_ch_text),
+               max98926_pdm_ch_text),
+};
+
+static const struct snd_kcontrol_new max98926_voltage_control =
+       SOC_DAPM_ENUM("Route", max98926_voltage_enum);
+
+static const struct soc_enum max98926_current_enum[] = {
+       SOC_ENUM_SINGLE(MAX98926_DAI_CLK_DIV_N_LSBS,
+               MAX98926_PDM_SOURCE_1_SHIFT,
+               ARRAY_SIZE(max98926_pdm_ch_text),
+               max98926_pdm_ch_text),
+};
+
+static const struct snd_kcontrol_new max98926_current_control =
+       SOC_DAPM_ENUM("Route", max98926_current_enum);
+
+static const struct snd_kcontrol_new max98926_mixer_controls[] = {
+       SOC_DAPM_SINGLE("PCM Single Switch", MAX98926_SPK_AMP,
+               MAX98926_INSELECT_MODE_SHIFT, 0, 0),
+       SOC_DAPM_SINGLE("PDM Single Switch", MAX98926_SPK_AMP,
+               MAX98926_INSELECT_MODE_SHIFT, 1, 0),
+};
+
+static const struct snd_kcontrol_new max98926_dai_controls[] = {
+       SOC_DAPM_SINGLE("Left", MAX98926_GAIN,
+               MAX98926_DAC_IN_SEL_SHIFT, 0, 0),
+       SOC_DAPM_SINGLE("Right", MAX98926_GAIN,
+               MAX98926_DAC_IN_SEL_SHIFT, 1, 0),
+       SOC_DAPM_SINGLE("LeftRight", MAX98926_GAIN,
+               MAX98926_DAC_IN_SEL_SHIFT, 2, 0),
+       SOC_DAPM_SINGLE("(Left+Right)/2 Switch", MAX98926_GAIN,
+               MAX98926_DAC_IN_SEL_SHIFT, 3, 0),
+};
+
+static const struct snd_soc_dapm_widget max98926_dapm_widgets[] = {
+       SND_SOC_DAPM_AIF_IN("DAI_OUT", "HiFi Playback", 0,
+               SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_DAC("Amp Enable", NULL, MAX98926_BLOCK_ENABLE,
+               MAX98926_SPK_EN_SHIFT, 0),
+       SND_SOC_DAPM_SUPPLY("Global Enable", MAX98926_GLOBAL_ENABLE,
+               MAX98926_EN_SHIFT, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("VI Enable", MAX98926_BLOCK_ENABLE,
+               MAX98926_ADC_IMON_EN_WIDTH |
+               MAX98926_ADC_VMON_EN_SHIFT,
+               0, NULL, 0),
+       SND_SOC_DAPM_PGA("BST Enable", MAX98926_BLOCK_ENABLE,
+               MAX98926_BST_EN_SHIFT, 0, NULL, 0),
+       SND_SOC_DAPM_OUTPUT("BE_OUT"),
+       SND_SOC_DAPM_MIXER("PCM Sel", MAX98926_SPK_AMP,
+               MAX98926_INSELECT_MODE_SHIFT, 0,
+               &max98926_mixer_controls[0],
+               ARRAY_SIZE(max98926_mixer_controls)),
+       SND_SOC_DAPM_MIXER("DAI Sel",
+               MAX98926_GAIN, MAX98926_DAC_IN_SEL_SHIFT, 0,
+               &max98926_dai_controls[0],
+               ARRAY_SIZE(max98926_dai_controls)),
+       SND_SOC_DAPM_MUX("PDM CH1 Source",
+               MAX98926_DAI_CLK_DIV_N_LSBS,
+               MAX98926_PDM_CURRENT_SHIFT,
+               0, &max98926_current_control),
+       SND_SOC_DAPM_MUX("PDM CH0 Source",
+               MAX98926_DAI_CLK_DIV_N_LSBS,
+               MAX98926_PDM_VOLTAGE_SHIFT,
+               0, &max98926_voltage_control),
+};
+
+static const struct snd_soc_dapm_route max98926_audio_map[] = {
+       {"VI Enable", NULL, "DAI_OUT"},
+       {"DAI Sel", "Left", "VI Enable"},
+       {"DAI Sel", "Right", "VI Enable"},
+       {"DAI Sel", "LeftRight", "VI Enable"},
+       {"DAI Sel", "LeftRightDiv2", "VI Enable"},
+       {"PCM Sel", "PCM", "DAI Sel"},
+
+       {"PDM CH1 Source", "Current", "DAI_OUT"},
+       {"PDM CH1 Source", "Voltage", "DAI_OUT"},
+       {"PDM CH0 Source", "Current", "DAI_OUT"},
+       {"PDM CH0 Source", "Voltage", "DAI_OUT"},
+       {"PCM Sel", "Analog", "PDM CH1 Source"},
+       {"PCM Sel", "Analog", "PDM CH0 Source"},
+       {"Amp Enable", NULL, "PCM Sel"},
+
+       {"BST Enable", NULL, "Amp Enable"},
+       {"BE_OUT", NULL, "BST Enable"},
+};
+
+static bool max98926_volatile_register(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case MAX98926_VBAT_DATA:
+       case MAX98926_VBST_DATA:
+       case MAX98926_LIVE_STATUS0:
+       case MAX98926_LIVE_STATUS1:
+       case MAX98926_LIVE_STATUS2:
+       case MAX98926_STATE0:
+       case MAX98926_STATE1:
+       case MAX98926_STATE2:
+       case MAX98926_FLAG0:
+       case MAX98926_FLAG1:
+       case MAX98926_FLAG2:
+       case MAX98926_VERSION:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static bool max98926_readable_register(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case MAX98926_IRQ_CLEAR0:
+       case MAX98926_IRQ_CLEAR1:
+       case MAX98926_IRQ_CLEAR2:
+       case MAX98926_ALC_HOLD_RLS:
+               return false;
+       default:
+               return true;
+       }
+};
+
+DECLARE_TLV_DB_SCALE(max98926_spk_tlv, -600, 100, 0);
+DECLARE_TLV_DB_RANGE(max98926_current_tlv,
+       0, 11, TLV_DB_SCALE_ITEM(20, 20, 0),
+       12, 15, TLV_DB_SCALE_ITEM(320, 40, 0),
+);
+
+static SOC_ENUM_SINGLE_DECL(max98926_dac_hpf_cutoff,
+               MAX98926_FILTERS, MAX98926_DAC_HPF_SHIFT,
+               max98926_hpf_cutoff_txt);
+
+static SOC_ENUM_SINGLE_DECL(max98926_boost_voltage,
+               MAX98926_CONFIGURATION, MAX98926_BST_VOUT_SHIFT,
+               max98926_boost_voltage_txt);
+
+static const struct snd_kcontrol_new max98926_snd_controls[] = {
+       SOC_SINGLE_TLV("Speaker Volume", MAX98926_GAIN,
+               MAX98926_SPK_GAIN_SHIFT,
+               (1<<MAX98926_SPK_GAIN_WIDTH)-1, 0,
+               max98926_spk_tlv),
+       SOC_SINGLE("Ramp Switch", MAX98926_GAIN_RAMPING,
+               MAX98926_SPK_RMP_EN_SHIFT, 1, 0),
+       SOC_SINGLE("ZCD Switch", MAX98926_GAIN_RAMPING,
+               MAX98926_SPK_ZCD_EN_SHIFT, 1, 0),
+       SOC_SINGLE("ALC Switch", MAX98926_THRESHOLD,
+               MAX98926_ALC_EN_SHIFT, 1, 0),
+       SOC_SINGLE("ALC Threshold", MAX98926_THRESHOLD,
+               MAX98926_ALC_TH_SHIFT,
+               (1<<MAX98926_ALC_TH_WIDTH)-1, 0),
+       SOC_ENUM("Boost Output Voltage", max98926_boost_voltage),
+       SOC_SINGLE_TLV("Boost Current Limit", MAX98926_BOOST_LIMITER,
+               MAX98926_BST_ILIM_SHIFT,
+               (1<<MAX98926_BST_ILIM_SHIFT)-1, 0,
+               max98926_current_tlv),
+       SOC_ENUM("DAC HPF Cutoff", max98926_dac_hpf_cutoff),
+       SOC_DOUBLE("PDM Channel One", MAX98926_DAI_CLK_DIV_N_LSBS,
+               MAX98926_PDM_CHANNEL_1_SHIFT,
+               MAX98926_PDM_CHANNEL_1_HIZ, 1, 0),
+       SOC_DOUBLE("PDM Channel Zero", MAX98926_DAI_CLK_DIV_N_LSBS,
+               MAX98926_PDM_CHANNEL_0_SHIFT,
+               MAX98926_PDM_CHANNEL_0_HIZ, 1, 0),
+};
+
+static const struct {
+       int rate;
+       int  sr;
+} rate_table[] = {
+       {
+               .rate = 8000,
+               .sr = 0,
+       },
+       {
+               .rate = 11025,
+               .sr = 1,
+       },
+       {
+               .rate = 12000,
+               .sr = 2,
+       },
+       {
+               .rate = 16000,
+               .sr = 3,
+       },
+       {
+               .rate = 22050,
+               .sr = 4,
+       },
+       {
+               .rate = 24000,
+               .sr = 5,
+       },
+       {
+               .rate = 32000,
+               .sr = 6,
+       },
+       {
+               .rate = 44100,
+               .sr = 7,
+       },
+       {
+               .rate = 48000,
+               .sr = 8,
+       },
+};
+
+static void max98926_set_sense_data(struct max98926_priv *max98926)
+{
+       regmap_update_bits(max98926->regmap,
+               MAX98926_DOUT_CFG_VMON,
+               MAX98926_DAI_VMON_EN_MASK,
+               MAX98926_DAI_VMON_EN_MASK);
+       regmap_update_bits(max98926->regmap,
+               MAX98926_DOUT_CFG_IMON,
+               MAX98926_DAI_IMON_EN_MASK,
+               MAX98926_DAI_IMON_EN_MASK);
+
+       if (!max98926->interleave_mode) {
+               /* set VMON slots */
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_DOUT_CFG_VMON,
+                       MAX98926_DAI_VMON_SLOT_MASK,
+                       max98926->v_slot);
+               /* set IMON slots */
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_DOUT_CFG_IMON,
+                       MAX98926_DAI_IMON_SLOT_MASK,
+                       max98926->i_slot);
+       } else {
+               /* enable interleave mode */
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_FORMAT,
+                       MAX98926_DAI_INTERLEAVE_MASK,
+                       MAX98926_DAI_INTERLEAVE_MASK);
+               /* set interleave slots */
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_DOUT_CFG_VBAT,
+                       MAX98926_DAI_INTERLEAVE_SLOT_MASK,
+                       max98926->v_slot);
+       }
+}
+
+static int max98926_dai_set_fmt(struct snd_soc_dai *codec_dai,
+               unsigned int fmt)
+{
+       struct snd_soc_codec *codec = codec_dai->codec;
+       struct max98926_priv *max98926 = snd_soc_codec_get_drvdata(codec);
+       unsigned int invert = 0;
+
+       dev_dbg(codec->dev, "%s: fmt 0x%08X\n", __func__, fmt);
+
+       switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+       case SND_SOC_DAIFMT_CBS_CFS:
+               max98926_set_sense_data(max98926);
+               break;
+       default:
+               dev_err(codec->dev, "DAI clock mode unsupported");
+               return -EINVAL;
+       }
+
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               break;
+       case SND_SOC_DAIFMT_NB_IF:
+               invert = MAX98926_DAI_WCI_MASK;
+               break;
+       case SND_SOC_DAIFMT_IB_NF:
+               invert = MAX98926_DAI_BCI_MASK;
+               break;
+       case SND_SOC_DAIFMT_IB_IF:
+               invert = MAX98926_DAI_BCI_MASK | MAX98926_DAI_WCI_MASK;
+               break;
+       default:
+               dev_err(codec->dev, "DAI invert mode unsupported");
+               return -EINVAL;
+       }
+
+       regmap_write(max98926->regmap,
+                       MAX98926_FORMAT, MAX98926_DAI_DLY_MASK);
+       regmap_update_bits(max98926->regmap, MAX98926_FORMAT,
+                       MAX98926_DAI_BCI_MASK, invert);
+       return 0;
+}
+
+static int max98926_dai_hw_params(struct snd_pcm_substream *substream,
+               struct snd_pcm_hw_params *params,
+               struct snd_soc_dai *dai)
+{
+       int dai_sr = -EINVAL;
+       int rate = params_rate(params), i;
+       struct snd_soc_codec *codec = dai->codec;
+       struct max98926_priv *max98926 = snd_soc_codec_get_drvdata(codec);
+       /* BCLK/LRCLK ratio calculation */
+       int blr_clk_ratio = params_channels(params) * max98926->ch_size;
+
+       switch (params_format(params)) {
+       case SNDRV_PCM_FORMAT_S16_LE:
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_FORMAT,
+                       MAX98926_DAI_CHANSZ_MASK,
+                       MAX98926_DAI_CHANSZ_16);
+               max98926->ch_size = 16;
+               break;
+       case SNDRV_PCM_FORMAT_S24_LE:
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_FORMAT,
+                       MAX98926_DAI_CHANSZ_MASK,
+                       MAX98926_DAI_CHANSZ_24);
+               max98926->ch_size = 24;
+               break;
+       case SNDRV_PCM_FORMAT_S32_LE:
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_FORMAT,
+                       MAX98926_DAI_CHANSZ_MASK,
+                       MAX98926_DAI_CHANSZ_32);
+               max98926->ch_size = 32;
+               break;
+       default:
+               dev_dbg(codec->dev, "format unsupported %d",
+                       params_format(params));
+               return -EINVAL;
+       }
+
+       switch (blr_clk_ratio) {
+       case 32:
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_DAI_CLK_MODE2,
+                       MAX98926_DAI_BSEL_MASK,
+                       MAX98926_DAI_BSEL_32);
+               break;
+       case 48:
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_DAI_CLK_MODE2,
+                       MAX98926_DAI_BSEL_MASK,
+                       MAX98926_DAI_BSEL_48);
+               break;
+       case 64:
+               regmap_update_bits(max98926->regmap,
+                       MAX98926_DAI_CLK_MODE2,
+                       MAX98926_DAI_BSEL_MASK,
+                       MAX98926_DAI_BSEL_64);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* find the closest rate */
+       for (i = 0; i < ARRAY_SIZE(rate_table); i++) {
+               if (rate_table[i].rate >= rate) {
+                       dai_sr = rate_table[i].sr;
+                       break;
+               }
+       }
+       if (dai_sr < 0)
+               return -EINVAL;
+
+       /* set DAI_SR to correct LRCLK frequency */
+       regmap_update_bits(max98926->regmap,
+               MAX98926_DAI_CLK_MODE2,
+               MAX98926_DAI_SR_MASK, dai_sr << MAX98926_DAI_SR_SHIFT);
+       return 0;
+}
+
+#define MAX98926_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+               SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_ops max98926_dai_ops = {
+       .set_fmt = max98926_dai_set_fmt,
+       .hw_params = max98926_dai_hw_params,
+};
+
+static struct snd_soc_dai_driver max98926_dai[] = {
+{
+       .name = "max98926-aif1",
+       .playback = {
+               .stream_name = "HiFi Playback",
+               .channels_min = 1,
+               .channels_max = 2,
+               .rates = SNDRV_PCM_RATE_8000_48000,
+               .formats = MAX98926_FORMATS,
+       },
+       .capture = {
+               .stream_name = "HiFi Capture",
+               .channels_min = 1,
+               .channels_max = 2,
+               .rates = SNDRV_PCM_RATE_8000_48000,
+               .formats = MAX98926_FORMATS,
+       },
+       .ops = &max98926_dai_ops,
+}
+};
+
+static int max98926_probe(struct snd_soc_codec *codec)
+{
+       struct max98926_priv *max98926 = snd_soc_codec_get_drvdata(codec);
+
+       max98926->codec = codec;
+       codec->control_data = max98926->regmap;
+       /* Hi-Z all the slots */
+       regmap_write(max98926->regmap, MAX98926_DOUT_HIZ_CFG4, 0xF0);
+       return 0;
+}
+
+static struct snd_soc_codec_driver soc_codec_dev_max98926 = {
+       .probe  = max98926_probe,
+       .controls = max98926_snd_controls,
+       .num_controls = ARRAY_SIZE(max98926_snd_controls),
+       .dapm_routes = max98926_audio_map,
+       .num_dapm_routes = ARRAY_SIZE(max98926_audio_map),
+       .dapm_widgets = max98926_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(max98926_dapm_widgets),
+};
+
+static struct regmap_config max98926_regmap = {
+       .reg_bits       = 8,
+       .val_bits       = 8,
+       .max_register   = MAX98926_VERSION,
+       .reg_defaults   = max98926_reg,
+       .num_reg_defaults = ARRAY_SIZE(max98926_reg),
+       .volatile_reg   = max98926_volatile_register,
+       .readable_reg   = max98926_readable_register,
+       .cache_type             = REGCACHE_RBTREE,
+};
+
+static int max98926_i2c_probe(struct i2c_client *i2c,
+               const struct i2c_device_id *id)
+{
+       int ret, reg;
+       u32 value;
+       struct max98926_priv *max98926;
+
+       max98926 = devm_kzalloc(&i2c->dev,
+                       sizeof(*max98926), GFP_KERNEL);
+       if (!max98926)
+               return -ENOMEM;
+
+       i2c_set_clientdata(i2c, max98926);
+       max98926->regmap = devm_regmap_init_i2c(i2c, &max98926_regmap);
+       if (IS_ERR(max98926->regmap)) {
+               ret = PTR_ERR(max98926->regmap);
+               dev_err(&i2c->dev,
+                               "Failed to allocate regmap: %d\n", ret);
+               goto err_out;
+       }
+       if (of_property_read_bool(i2c->dev.of_node, "interleave-mode"))
+               max98926->interleave_mode = true;
+
+       if (!of_property_read_u32(i2c->dev.of_node, "vmon-slot-no", &value)) {
+               if (value > MAX98926_DAI_VMON_SLOT_1E_1F) {
+                       dev_err(&i2c->dev, "vmon slot number is wrong:\n");
+                       return -EINVAL;
+               }
+               max98926->v_slot = value;
+       }
+       if (!of_property_read_u32(i2c->dev.of_node, "imon-slot-no", &value)) {
+               if (value > MAX98926_DAI_IMON_SLOT_1E_1F) {
+                       dev_err(&i2c->dev, "imon slot number is wrong:\n");
+                       return -EINVAL;
+               }
+               max98926->i_slot = value;
+       }
+       ret = regmap_read(max98926->regmap,
+                       MAX98926_VERSION, &reg);
+       if (ret < 0) {
+               dev_err(&i2c->dev, "Failed to read: %x\n", reg);
+               return ret;
+       }
+
+       ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_max98926,
+                       max98926_dai, ARRAY_SIZE(max98926_dai));
+       if (ret < 0)
+               dev_err(&i2c->dev,
+                               "Failed to register codec: %d\n", ret);
+       dev_info(&i2c->dev, "device version: %x\n", reg);
+err_out:
+       return ret;
+}
+
+static int max98926_i2c_remove(struct i2c_client *client)
+{
+       snd_soc_unregister_codec(&client->dev);
+       return 0;
+}
+
+static const struct i2c_device_id max98926_i2c_id[] = {
+       { "max98926", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, max98926_i2c_id);
+
+static const struct of_device_id max98926_of_match[] = {
+       { .compatible = "maxim,max98926", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, max98926_of_match);
+
+static struct i2c_driver max98926_i2c_driver = {
+       .driver = {
+               .name = "max98926",
+               .of_match_table = of_match_ptr(max98926_of_match),
+               .pm = NULL,
+       },
+       .probe  = max98926_i2c_probe,
+       .remove = max98926_i2c_remove,
+       .id_table = max98926_i2c_id,
+};
+
+module_i2c_driver(max98926_i2c_driver)
+MODULE_DESCRIPTION("ALSA SoC MAX98926 driver");
+MODULE_AUTHOR("Anish kumar <anish.kumar@maximintegrated.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/max98926.h b/sound/soc/codecs/max98926.h
new file mode 100644 (file)
index 0000000..9d7ab6d
--- /dev/null
@@ -0,0 +1,848 @@
+/*
+ * max98926.h -- MAX98926 ALSA SoC Audio driver
+ * Copyright 2013-2015 Maxim Integrated Products
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _MAX98926_H
+#define _MAX98926_H
+
+#define MAX98926_CHIP_VERSION   0x40
+#define MAX98926_CHIP_VERSION1  0x50
+
+#define MAX98926_VBAT_DATA          0x00
+#define MAX98926_VBST_DATA          0x01
+#define MAX98926_LIVE_STATUS0       0x02
+#define MAX98926_LIVE_STATUS1       0x03
+#define MAX98926_LIVE_STATUS2       0x04
+#define MAX98926_STATE0         0x05
+#define MAX98926_STATE1         0x06
+#define MAX98926_STATE2         0x07
+#define MAX98926_FLAG0          0x08
+#define MAX98926_FLAG1          0x09
+#define MAX98926_FLAG2          0x0A
+#define MAX98926_IRQ_ENABLE0        0x0B
+#define MAX98926_IRQ_ENABLE1        0x0C
+#define MAX98926_IRQ_ENABLE2        0x0D
+#define MAX98926_IRQ_CLEAR0     0x0E
+#define MAX98926_IRQ_CLEAR1     0x0F
+#define MAX98926_IRQ_CLEAR2     0x10
+#define MAX98926_MAP0           0x11
+#define MAX98926_MAP1           0x12
+#define MAX98926_MAP2           0x13
+#define MAX98926_MAP3           0x14
+#define MAX98926_MAP4           0x15
+#define MAX98926_MAP5           0x16
+#define MAX98926_MAP6           0x17
+#define MAX98926_MAP7           0x18
+#define MAX98926_MAP8           0x19
+#define MAX98926_DAI_CLK_MODE1      0x1A
+#define MAX98926_DAI_CLK_MODE2      0x1B
+#define MAX98926_DAI_CLK_DIV_M_MSBS 0x1C
+#define MAX98926_DAI_CLK_DIV_M_LSBS 0x1D
+#define MAX98926_DAI_CLK_DIV_N_MSBS 0x1E
+#define MAX98926_DAI_CLK_DIV_N_LSBS 0x1F
+#define MAX98926_FORMAT         0x20
+#define MAX98926_TDM_SLOT_SELECT        0x21
+#define MAX98926_DOUT_CFG_VMON      0x22
+#define MAX98926_DOUT_CFG_IMON      0x23
+#define MAX98926_DOUT_CFG_VBAT      0x24
+#define MAX98926_DOUT_CFG_VBST      0x25
+#define MAX98926_DOUT_CFG_FLAG      0x26
+#define MAX98926_DOUT_HIZ_CFG1      0x27
+#define MAX98926_DOUT_HIZ_CFG2      0x28
+#define MAX98926_DOUT_HIZ_CFG3      0x29
+#define MAX98926_DOUT_HIZ_CFG4      0x2A
+#define MAX98926_DOUT_DRV_STRENGTH      0x2B
+#define MAX98926_FILTERS            0x2C
+#define MAX98926_GAIN           0x2D
+#define MAX98926_GAIN_RAMPING       0x2E
+#define MAX98926_SPK_AMP            0x2F
+#define MAX98926_THRESHOLD          0x30
+#define MAX98926_ALC_ATTACK     0x31
+#define MAX98926_ALC_ATTEN_RLS      0x32
+#define MAX98926_ALC_HOLD_RLS       0x33
+#define MAX98926_ALC_CONFIGURATION      0x34
+#define MAX98926_BOOST_CONVERTER        0x35
+#define MAX98926_BLOCK_ENABLE       0x36
+#define MAX98926_CONFIGURATION      0x37
+#define MAX98926_GLOBAL_ENABLE      0x38
+#define MAX98926_BOOST_LIMITER      0x3A
+#define MAX98926_VERSION            0xFF
+
+#define MAX98926_REG_CNT               (MAX98926_R03A_BOOST_LIMITER+1)
+
+#define MAX98926_PDM_CURRENT_MASK (1<<7)
+#define MAX98926_PDM_CURRENT_SHIFT 7
+#define MAX98926_PDM_VOLTAGE_MASK (1<<3)
+#define MAX98926_PDM_VOLTAGE_SHIFT 3
+#define MAX98926_PDM_CHANNEL_0_MASK (1<<2)
+#define MAX98926_PDM_CHANNEL_0_SHIFT 2
+#define MAX98926_PDM_CHANNEL_1_MASK (1<<6)
+#define MAX98926_PDM_CHANNEL_1_SHIFT 6
+#define MAX98926_PDM_CHANNEL_1_HIZ 5
+#define MAX98926_PDM_CHANNEL_0_HIZ 1
+#define MAX98926_PDM_SOURCE_0_SHIFT 0
+#define MAX98926_PDM_SOURCE_0_MASK (1<<0)
+#define MAX98926_PDM_SOURCE_1_MASK (1<<4)
+#define MAX98926_PDM_SOURCE_1_SHIFT 4
+
+/* MAX98926 Register Bit Fields */
+
+/* MAX98926_R002_LIVE_STATUS0 */
+#define MAX98926_THERMWARN_STATUS_MASK          (1<<3)
+#define MAX98926_THERMWARN_STATUS_SHIFT         3
+#define MAX98926_THERMWARN_STATUS_WIDTH         1
+#define MAX98926_THERMSHDN_STATUS_MASK          (1<<1)
+#define MAX98926_THERMSHDN_STATUS_SHIFT         1
+#define MAX98926_THERMSHDN_STATUS_WIDTH         1
+
+/* MAX98926_R003_LIVE_STATUS1 */
+#define MAX98926_SPKCURNT_STATUS_MASK               (1<<5)
+#define MAX98926_SPKCURNT_STATUS_SHIFT          5
+#define MAX98926_SPKCURNT_STATUS_WIDTH          1
+#define MAX98926_WATCHFAIL_STATUS_MASK          (1<<4)
+#define MAX98926_WATCHFAIL_STATUS_SHIFT         4
+#define MAX98926_WATCHFAIL_STATUS_WIDTH         1
+#define MAX98926_ALCINFH_STATUS_MASK                (1<<3)
+#define MAX98926_ALCINFH_STATUS_SHIFT               3
+#define MAX98926_ALCINFH_STATUS_WIDTH               1
+#define MAX98926_ALCACT_STATUS_MASK             (1<<2)
+#define MAX98926_ALCACT_STATUS_SHIFT                2
+#define MAX98926_ALCACT_STATUS_WIDTH                1
+#define MAX98926_ALCMUT_STATUS_MASK             (1<<1)
+#define MAX98926_ALCMUT_STATUS_SHIFT                1
+#define MAX98926_ALCMUT_STATUS_WIDTH                1
+#define MAX98926_ACLP_STATUS_MASK                   (1<<0)
+#define MAX98926_ACLP_STATUS_SHIFT              0
+#define MAX98926_ACLP_STATUS_WIDTH              1
+
+/* MAX98926_R004_LIVE_STATUS2 */
+#define MAX98926_SLOTOVRN_STATUS_MASK               (1<<6)
+#define MAX98926_SLOTOVRN_STATUS_SHIFT          6
+#define MAX98926_SLOTOVRN_STATUS_WIDTH          1
+#define MAX98926_INVALSLOT_STATUS_MASK          (1<<5)
+#define MAX98926_INVALSLOT_STATUS_SHIFT         5
+#define MAX98926_INVALSLOT_STATUS_WIDTH         1
+#define MAX98926_SLOTCNFLT_STATUS_MASK          (1<<4)
+#define MAX98926_SLOTCNFLT_STATUS_SHIFT         4
+#define MAX98926_SLOTCNFLT_STATUS_WIDTH         1
+#define MAX98926_VBSTOVFL_STATUS_MASK               (1<<3)
+#define MAX98926_VBSTOVFL_STATUS_SHIFT          3
+#define MAX98926_VBSTOVFL_STATUS_WIDTH          1
+#define MAX98926_VBATOVFL_STATUS_MASK               (1<<2)
+#define MAX98926_VBATOVFL_STATUS_SHIFT          2
+#define MAX98926_VBATOVFL_STATUS_WIDTH          1
+#define MAX98926_IMONOVFL_STATUS_MASK               (1<<1)
+#define MAX98926_IMONOVFL_STATUS_SHIFT          1
+#define MAX98926_IMONOVFL_STATUS_WIDTH          1
+#define MAX98926_VMONOVFL_STATUS_MASK               (1<<0)
+#define MAX98926_VMONOVFL_STATUS_SHIFT          0
+#define MAX98926_VMONOVFL_STATUS_WIDTH          1
+
+/* MAX98926_R005_STATE0 */
+#define MAX98926_THERMWARN_END_STATE_MASK           (1<<3)
+#define MAX98926_THERMWARN_END_STATE_SHIFT      3
+#define MAX98926_THERMWARN_END_STATE_WIDTH      1
+#define MAX98926_THERMWARN_BGN_STATE_MASK           (1<<2)
+#define MAX98926_THERMWARN_BGN_STATE_SHIFT      1
+#define MAX98926_THERMWARN_BGN_STATE_WIDTH      1
+#define MAX98926_THERMSHDN_END_STATE_MASK           (1<<1)
+#define MAX98926_THERMSHDN_END_STATE_SHIFT      1
+#define MAX98926_THERMSHDN_END_STATE_WIDTH      1
+#define MAX98926_THERMSHDN_BGN_STATE_MASK           (1<<0)
+#define MAX98926_THERMSHDN_BGN_STATE_SHIFT      0
+#define MAX98926_THERMSHDN_BGN_STATE_WIDTH      1
+
+/* MAX98926_R006_STATE1 */
+#define MAX98926_SPRCURNT_STATE_MASK                (1<<5)
+#define MAX98926_SPRCURNT_STATE_SHIFT               5
+#define MAX98926_SPRCURNT_STATE_WIDTH               1
+#define MAX98926_WATCHFAIL_STATE_MASK               (1<<4)
+#define MAX98926_WATCHFAIL_STATE_SHIFT          4
+#define MAX98926_WATCHFAIL_STATE_WIDTH          1
+#define MAX98926_ALCINFH_STATE_MASK             (1<<3)
+#define MAX98926_ALCINFH_STATE_SHIFT                3
+#define MAX98926_ALCINFH_STATE_WIDTH                1
+#define MAX98926_ALCACT_STATE_MASK              (1<<2)
+#define MAX98926_ALCACT_STATE_SHIFT             2
+#define MAX98926_ALCACT_STATE_WIDTH             1
+#define MAX98926_ALCMUT_STATE_MASK              (1<<1)
+#define MAX98926_ALCMUT_STATE_SHIFT             1
+#define MAX98926_ALCMUT_STATE_WIDTH             1
+#define MAX98926_ALCP_STATE_MASK                    (1<<0)
+#define MAX98926_ALCP_STATE_SHIFT                   0
+#define MAX98926_ALCP_STATE_WIDTH                   1
+
+/* MAX98926_R007_STATE2 */
+#define MAX98926_SLOTOVRN_STATE_MASK                (1<<6)
+#define MAX98926_SLOTOVRN_STATE_SHIFT               6
+#define MAX98926_SLOTOVRN_STATE_WIDTH               1
+#define MAX98926_INVALSLOT_STATE_MASK               (1<<5)
+#define MAX98926_INVALSLOT_STATE_SHIFT          5
+#define MAX98926_INVALSLOT_STATE_WIDTH          1
+#define MAX98926_SLOTCNFLT_STATE_MASK               (1<<4)
+#define MAX98926_SLOTCNFLT_STATE_SHIFT          4
+#define MAX98926_SLOTCNFLT_STATE_WIDTH          1
+#define MAX98926_VBSTOVFL_STATE_MASK                (1<<3)
+#define MAX98926_VBSTOVFL_STATE_SHIFT               3
+#define MAX98926_VBSTOVFL_STATE_WIDTH               1
+#define MAX98926_VBATOVFL_STATE_MASK                (1<<2)
+#define MAX98926_VBATOVFL_STATE_SHIFT               2
+#define MAX98926_VBATOVFL_STATE_WIDTH               1
+#define MAX98926_IMONOVFL_STATE_MASK                (1<<1)
+#define MAX98926_IMONOVFL_STATE_SHIFT               1
+#define MAX98926_IMONOVFL_STATE_WIDTH               1
+#define MAX98926_VMONOVFL_STATE_MASK                (1<<0)
+#define MAX98926_VMONOVFL_STATE_SHIFT               0
+#define MAX98926_VMONOVFL_STATE_WIDTH               1
+
+/* MAX98926_R008_FLAG0 */
+#define MAX98926_THERMWARN_END_FLAG_MASK            (1<<3)
+#define MAX98926_THERMWARN_END_FLAG_SHIFT           3
+#define MAX98926_THERMWARN_END_FLAG_WIDTH           1
+#define MAX98926_THERMWARN_BGN_FLAG_MASK            (1<<2)
+#define MAX98926_THERMWARN_BGN_FLAG_SHIFT           2
+#define MAX98926_THERMWARN_BGN_FLAG_WIDTH           1
+#define MAX98926_THERMSHDN_END_FLAG_MASK            (1<<1)
+#define MAX98926_THERMSHDN_END_FLAG_SHIFT           1
+#define MAX98926_THERMSHDN_END_FLAG_WIDTH           1
+#define MAX98926_THERMSHDN_BGN_FLAG_MASK            (1<<0)
+#define MAX98926_THERMSHDN_BGN_FLAG_SHIFT           0
+#define MAX98926_THERMSHDN_BGN_FLAG_WIDTH           1
+
+/* MAX98926_R009_FLAG1 */
+#define MAX98926_SPKCURNT_FLAG_MASK             (1<<5)
+#define MAX98926_SPKCURNT_FLAG_SHIFT                5
+#define MAX98926_SPKCURNT_FLAG_WIDTH                1
+#define MAX98926_WATCHFAIL_FLAG_MASK                (1<<4)
+#define MAX98926_WATCHFAIL_FLAG_SHIFT               4
+#define MAX98926_WATCHFAIL_FLAG_WIDTH               1
+#define MAX98926_ALCINFH_FLAG_MASK              (1<<3)
+#define MAX98926_ALCINFH_FLAG_SHIFT             3
+#define MAX98926_ALCINFH_FLAG_WIDTH             1
+#define MAX98926_ALCACT_FLAG_MASK                   (1<<2)
+#define MAX98926_ALCACT_FLAG_SHIFT              2
+#define MAX98926_ALCACT_FLAG_WIDTH              1
+#define MAX98926_ALCMUT_FLAG_MASK                   (1<<1)
+#define MAX98926_ALCMUT_FLAG_SHIFT              1
+#define MAX98926_ALCMUT_FLAG_WIDTH              1
+#define MAX98926_ALCP_FLAG_MASK                 (1<<0)
+#define MAX98926_ALCP_FLAG_SHIFT                    0
+#define MAX98926_ALCP_FLAG_WIDTH                    1
+
+/* MAX98926_R00A_FLAG2 */
+#define MAX98926_SLOTOVRN_FLAG_MASK             (1<<6)
+#define MAX98926_SLOTOVRN_FLAG_SHIFT                6
+#define MAX98926_SLOTOVRN_FLAG_WIDTH                1
+#define MAX98926_INVALSLOT_FLAG_MASK                (1<<5)
+#define MAX98926_INVALSLOT_FLAG_SHIFT               5
+#define MAX98926_INVALSLOT_FLAG_WIDTH               1
+#define MAX98926_SLOTCNFLT_FLAG_MASK                (1<<4)
+#define MAX98926_SLOTCNFLT_FLAG_SHIFT               4
+#define MAX98926_SLOTCNFLT_FLAG_WIDTH               1
+#define MAX98926_VBSTOVFL_FLAG_MASK             (1<<3)
+#define MAX98926_VBSTOVFL_FLAG_SHIFT                3
+#define MAX98926_VBSTOVFL_FLAG_WIDTH                1
+#define MAX98926_VBATOVFL_FLAG_MASK             (1<<2)
+#define MAX98926_VBATOVFL_FLAG_SHIFT                2
+#define MAX98926_VBATOVFL_FLAG_WIDTH                1
+#define MAX98926_IMONOVFL_FLAG_MASK             (1<<1)
+#define MAX98926_IMONOVFL_FLAG_SHIFT                1
+#define MAX98926_IMONOVFL_FLAG_WIDTH                1
+#define MAX98926_VMONOVFL_FLAG_MASK             (1<<0)
+#define MAX98926_VMONOVFL_FLAG_SHIFT                0
+#define MAX98926_VMONOVFL_FLAG_WIDTH                1
+
+/* MAX98926_R00B_IRQ_ENABLE0 */
+#define MAX98926_THERMWARN_END_EN_MASK          (1<<3)
+#define MAX98926_THERMWARN_END_EN_SHIFT         3
+#define MAX98926_THERMWARN_END_EN_WIDTH         1
+#define MAX98926_THERMWARN_BGN_EN_MASK          (1<<2)
+#define MAX98926_THERMWARN_BGN_EN_SHIFT         2
+#define MAX98926_THERMWARN_BGN_EN_WIDTH         1
+#define MAX98926_THERMSHDN_END_EN_MASK          (1<<1)
+#define MAX98926_THERMSHDN_END_EN_SHIFT         1
+#define MAX98926_THERMSHDN_END_EN_WIDTH         1
+#define MAX98926_THERMSHDN_BGN_EN_MASK          (1<<0)
+#define MAX98926_THERMSHDN_BGN_EN_SHIFT         0
+#define MAX98926_THERMSHDN_BGN_EN_WIDTH         1
+
+/* MAX98926_R00C_IRQ_ENABLE1 */
+#define MAX98926_SPKCURNT_EN_MASK       (1<<5)
+#define MAX98926_SPKCURNT_EN_SHIFT  5
+#define MAX98926_SPKCURNT_EN_WIDTH  1
+#define MAX98926_WATCHFAIL_EN_MASK  (1<<4)
+#define MAX98926_WATCHFAIL_EN_SHIFT 4
+#define MAX98926_WATCHFAIL_EN_WIDTH 1
+#define MAX98926_ALCINFH_EN_MASK        (1<<3)
+#define MAX98926_ALCINFH_EN_SHIFT       3
+#define MAX98926_ALCINFH_EN_WIDTH       1
+#define MAX98926_ALCACT_EN_MASK     (1<<2)
+#define MAX98926_ALCACT_EN_SHIFT        2
+#define MAX98926_ALCACT_EN_WIDTH        1
+#define MAX98926_ALCMUT_EN_MASK     (1<<1)
+#define MAX98926_ALCMUT_EN_SHIFT        1
+#define MAX98926_ALCMUT_EN_WIDTH        1
+#define MAX98926_ALCP_EN_MASK           (1<<0)
+#define MAX98926_ALCP_EN_SHIFT      0
+#define MAX98926_ALCP_EN_WIDTH      1
+
+/* MAX98926_R00D_IRQ_ENABLE2 */
+#define MAX98926_SLOTOVRN_EN_MASK       (1<<6)
+#define MAX98926_SLOTOVRN_EN_SHIFT  6
+#define MAX98926_SLOTOVRN_EN_WIDTH  1
+#define MAX98926_INVALSLOT_EN_MASK  (1<<5)
+#define MAX98926_INVALSLOT_EN_SHIFT 5
+#define MAX98926_INVALSLOT_EN_WIDTH 1
+#define MAX98926_SLOTCNFLT_EN_MASK  (1<<4)
+#define MAX98926_SLOTCNFLT_EN_SHIFT 4
+#define MAX98926_SLOTCNFLT_EN_WIDTH 1
+#define MAX98926_VBSTOVFL_EN_MASK       (1<<3)
+#define MAX98926_VBSTOVFL_EN_SHIFT  3
+#define MAX98926_VBSTOVFL_EN_WIDTH  1
+#define MAX98926_VBATOVFL_EN_MASK       (1<<2)
+#define MAX98926_VBATOVFL_EN_SHIFT  2
+#define MAX98926_VBATOVFL_EN_WIDTH  1
+#define MAX98926_IMONOVFL_EN_MASK       (1<<1)
+#define MAX98926_IMONOVFL_EN_SHIFT  1
+#define MAX98926_IMONOVFL_EN_WIDTH  1
+#define MAX98926_VMONOVFL_EN_MASK       (1<<0)
+#define MAX98926_VMONOVFL_EN_SHIFT  0
+#define MAX98926_VMONOVFL_EN_WIDTH  1
+
+/* MAX98926_R00E_IRQ_CLEAR0 */
+#define MAX98926_THERMWARN_END_CLR_MASK         (1<<3)
+#define MAX98926_THERMWARN_END_CLR_SHIFT            3
+#define MAX98926_THERMWARN_END_CLR_WIDTH            1
+#define MAX98926_THERMWARN_BGN_CLR_MASK         (1<<2)
+#define MAX98926_THERMWARN_BGN_CLR_SHIFT            2
+#define MAX98926_THERMWARN_BGN_CLR_WIDTH            1
+#define MAX98926_THERMSHDN_END_CLR_MASK         (1<<1)
+#define MAX98926_THERMSHDN_END_CLR_SHIFT            1
+#define MAX98926_THERMSHDN_END_CLR_WIDTH            1
+#define MAX98926_THERMSHDN_BGN_CLR_MASK         (1<<0)
+#define MAX98926_THERMSHDN_BGN_CLR_SHIFT            0
+#define MAX98926_THERMSHDN_BGN_CLR_WIDTH            1
+
+/* MAX98926_R00F_IRQ_CLEAR1 */
+#define MAX98926_SPKCURNT_CLR_MASK      (1<<5)
+#define MAX98926_SPKCURNT_CLR_SHIFT     5
+#define MAX98926_SPKCURNT_CLR_WIDTH     1
+#define MAX98926_WATCHFAIL_CLR_MASK     (1<<4)
+#define MAX98926_WATCHFAIL_CLR_SHIFT        4
+#define MAX98926_WATCHFAIL_CLR_WIDTH        1
+#define MAX98926_ALCINFH_CLR_MASK           (1<<3)
+#define MAX98926_ALCINFH_CLR_SHIFT      3
+#define MAX98926_ALCINFH_CLR_WIDTH      1
+#define MAX98926_ALCACT_CLR_MASK            (1<<2)
+#define MAX98926_ALCACT_CLR_SHIFT           2
+#define MAX98926_ALCACT_CLR_WIDTH           1
+#define MAX98926_ALCMUT_CLR_MASK            (1<<1)
+#define MAX98926_ALCMUT_CLR_SHIFT           1
+#define MAX98926_ALCMUT_CLR_WIDTH           1
+#define MAX98926_ALCP_CLR_MASK          (1<<0)
+#define MAX98926_ALCP_CLR_SHIFT         0
+#define MAX98926_ALCP_CLR_WIDTH         1
+
+/* MAX98926_R010_IRQ_CLEAR2 */
+#define MAX98926_SLOTOVRN_CLR_MASK      (1<<6)
+#define MAX98926_SLOTOVRN_CLR_SHIFT     6
+#define MAX98926_SLOTOVRN_CLR_WIDTH     1
+#define MAX98926_INVALSLOT_CLR_MASK     (1<<5)
+#define MAX98926_INVALSLOT_CLR_SHIFT        5
+#define MAX98926_INVALSLOT_CLR_WIDTH        1
+#define MAX98926_SLOTCNFLT_CLR_MASK     (1<<4)
+#define MAX98926_SLOTCNFLT_CLR_SHIFT        4
+#define MAX98926_SLOTCNFLT_CLR_WIDTH        1
+#define MAX98926_VBSTOVFL_CLR_MASK      (1<<3)
+#define MAX98926_VBSTOVFL_CLR_SHIFT     3
+#define MAX98926_VBSTOVFL_CLR_WIDTH     1
+#define MAX98926_VBATOVFL_CLR_MASK      (1<<2)
+#define MAX98926_VBATOVFL_CLR_SHIFT     2
+#define MAX98926_VBATOVFL_CLR_WIDTH     1
+#define MAX98926_IMONOVFL_CLR_MASK      (1<<1)
+#define MAX98926_IMONOVFL_CLR_SHIFT     1
+#define MAX98926_IMONOVFL_CLR_WIDTH     1
+#define MAX98926_VMONOVFL_CLR_MASK          (1<<0)
+#define MAX98926_VMONOVFL_CLR_SHIFT         0
+#define MAX98926_VMONOVFL_CLR_WIDTH         1
+
+/* MAX98926_R011_MAP0 */
+#define MAX98926_ER_THERMWARN_EN_MASK               (1<<7)
+#define MAX98926_ER_THERMWARN_EN_SHIFT          7
+#define MAX98926_ER_THERMWARN_EN_WIDTH          1
+#define MAX98926_ER_THERMWARN_MAP_MASK          (0x07<<4)
+#define MAX98926_ER_THERMWARN_MAP_SHIFT         4
+#define MAX98926_ER_THERMWARN_MAP_WIDTH         3
+
+/* MAX98926_R012_MAP1 */
+#define MAX98926_ER_ALCMUT_EN_MASK      (1<<7)
+#define MAX98926_ER_ALCMUT_EN_SHIFT     7
+#define MAX98926_ER_ALCMUT_EN_WIDTH     1
+#define MAX98926_ER_ALCMUT_MAP_MASK     (0x07<<4)
+#define MAX98926_ER_ALCMUT_MAP_SHIFT        4
+#define MAX98926_ER_ALCMUT_MAP_WIDTH        3
+#define MAX98926_ER_ALCP_EN_MASK            (1<<3)
+#define MAX98926_ER_ALCP_EN_SHIFT           3
+#define MAX98926_ER_ALCP_EN_WIDTH           1
+#define MAX98926_ER_ALCP_MAP_MASK           (0x07<<0)
+#define MAX98926_ER_ALCP_MAP_SHIFT      0
+#define MAX98926_ER_ALCP_MAP_WIDTH      3
+
+/* MAX98926_R013_MAP2 */
+#define MAX98926_ER_ALCINFH_EN_MASK     (1<<7)
+#define MAX98926_ER_ALCINFH_EN_SHIFT        7
+#define MAX98926_ER_ALCINFH_EN_WIDTH        1
+#define MAX98926_ER_ALCINFH_MAP_MASK        (0x07<<4)
+#define MAX98926_ER_ALCINFH_MAP_SHIFT       4
+#define MAX98926_ER_ALCINFH_MAP_WIDTH       3
+#define MAX98926_ER_ALCACT_EN_MASK      (1<<3)
+#define MAX98926_ER_ALCACT_EN_SHIFT     3
+#define MAX98926_ER_ALCACT_EN_WIDTH     1
+#define MAX98926_ER_ALCACT_MAP_MASK     (0x07<<0)
+#define MAX98926_ER_ALCACT_MAP_SHIFT        0
+#define MAX98926_ER_ALCACT_MAP_WIDTH        3
+
+/* MAX98926_R014_MAP3 */
+#define MAX98926_ER_SPKCURNT_EN_MASK            (1<<7)
+#define MAX98926_ER_SPKCURNT_EN_SHIFT           7
+#define MAX98926_ER_SPKCURNT_EN_WIDTH           1
+#define MAX98926_ER_SPKCURNT_MAP_MASK           (0x07<<4)
+#define MAX98926_ER_SPKCURNT_MAP_SHIFT          4
+#define MAX98926_ER_SPKCURNT_MAP_WIDTH          3
+
+/* MAX98926_R015_MAP4 */
+/* RESERVED */
+
+/* MAX98926_R016_MAP5 */
+#define MAX98926_ER_IMONOVFL_EN_MASK            (1<<7)
+#define MAX98926_ER_IMONOVFL_EN_SHIFT           7
+#define MAX98926_ER_IMONOVFL_EN_WIDTH           1
+#define MAX98926_ER_IMONOVFL_MAP_MASK           (0x07<<4)
+#define MAX98926_ER_IMONOVFL_MAP_SHIFT          4
+#define MAX98926_ER_IMONOVFL_MAP_WIDTH          3
+#define MAX98926_ER_VMONOVFL_EN_MASK            (1<<3)
+#define MAX98926_ER_VMONOVFL_EN_SHIFT           3
+#define MAX98926_ER_VMONOVFL_EN_WIDTH           1
+#define MAX98926_ER_VMONOVFL_MAP_MASK           (0x07<<0)
+#define MAX98926_ER_VMONOVFL_MAP_SHIFT          0
+#define MAX98926_ER_VMONOVFL_MAP_WIDTH          3
+
+/* MAX98926_R017_MAP6 */
+#define MAX98926_ER_VBSTOVFL_EN_MASK            (1<<7)
+#define MAX98926_ER_VBSTOVFL_EN_SHIFT           7
+#define MAX98926_ER_VBSTOVFL_EN_WIDTH           1
+#define MAX98926_ER_VBSTOVFL_MAP_MASK           (0x07<<4)
+#define MAX98926_ER_VBSTOVFL_MAP_SHIFT          4
+#define MAX98926_ER_VBSTOVFL_MAP_WIDTH          3
+#define MAX98926_ER_VBATOVFL_EN_MASK            (1<<3)
+#define MAX98926_ER_VBATOVFL_EN_SHIFT           3
+#define MAX98926_ER_VBATOVFL_EN_WIDTH           1
+#define MAX98926_ER_VBATOVFL_MAP_MASK           (0x07<<0)
+#define MAX98926_ER_VBATOVFL_MAP_SHIFT          0
+#define MAX98926_ER_VBATOVFL_MAP_WIDTH          3
+
+/* MAX98926_R018_MAP7 */
+#define MAX98926_ER_INVALSLOT_EN_MASK               (1<<7)
+#define MAX98926_ER_INVALSLOT_EN_SHIFT          7
+#define MAX98926_ER_INVALSLOT_EN_WIDTH          1
+#define MAX98926_ER_INVALSLOT_MAP_MASK          (0x07<<4)
+#define MAX98926_ER_INVALSLOT_MAP_SHIFT         4
+#define MAX98926_ER_INVALSLOT_MAP_WIDTH         3
+#define MAX98926_ER_SLOTCNFLT_EN_MASK               (1<<3)
+#define MAX98926_ER_SLOTCNFLT_EN_SHIFT          3
+#define MAX98926_ER_SLOTCNFLT_EN_WIDTH          1
+#define MAX98926_ER_SLOTCNFLT_MAP_MASK          (0x07<<0)
+#define MAX98926_ER_SLOTCNFLT_MAP_SHIFT         0
+#define MAX98926_ER_SLOTCNFLT_MAP_WIDTH         3
+
+/* MAX98926_R019_MAP8 */
+#define MAX98926_ER_SLOTOVRN_EN_MASK    (1<<3)
+#define MAX98926_ER_SLOTOVRN_EN_SHIFT   3
+#define MAX98926_ER_SLOTOVRN_EN_WIDTH   1
+#define MAX98926_ER_SLOTOVRN_MAP_MASK   (0x07<<0)
+#define MAX98926_ER_SLOTOVRN_MAP_SHIFT  0
+#define MAX98926_ER_SLOTOVRN_MAP_WIDTH  3
+
+/* MAX98926_R01A_DAI_CLK_MODE1 */
+#define MAX98926_DAI_CLK_SOURCE_MASK    (1<<6)
+#define MAX98926_DAI_CLK_SOURCE_SHIFT   6
+#define MAX98926_DAI_CLK_SOURCE_WIDTH   1
+#define MAX98926_MDLL_MULT_MASK     (0x0F<<0)
+#define MAX98926_MDLL_MULT_SHIFT        0
+#define MAX98926_MDLL_MULT_WIDTH        4
+
+#define MAX98926_MDLL_MULT_MCLKx8       6
+#define MAX98926_MDLL_MULT_MCLKx16  8
+
+/* MAX98926_R01B_DAI_CLK_MODE2 */
+#define MAX98926_DAI_SR_MASK            (0x0F<<4)
+#define MAX98926_DAI_SR_SHIFT           4
+#define MAX98926_DAI_SR_WIDTH           4
+#define MAX98926_DAI_MAS_MASK           (1<<3)
+#define MAX98926_DAI_MAS_SHIFT          3
+#define MAX98926_DAI_MAS_WIDTH          1
+#define MAX98926_DAI_BSEL_MASK          (0x07<<0)
+#define MAX98926_DAI_BSEL_SHIFT         0
+#define MAX98926_DAI_BSEL_WIDTH         3
+
+#define MAX98926_DAI_BSEL_32 (0 << MAX98926_DAI_BSEL_SHIFT)
+#define MAX98926_DAI_BSEL_48 (1 << MAX98926_DAI_BSEL_SHIFT)
+#define MAX98926_DAI_BSEL_64 (2 << MAX98926_DAI_BSEL_SHIFT)
+#define MAX98926_DAI_BSEL_256 (6 << MAX98926_DAI_BSEL_SHIFT)
+
+/* MAX98926_R01C_DAI_CLK_DIV_M_MSBS */
+#define MAX98926_DAI_M_MSBS_MASK        (0xFF<<0)
+#define MAX98926_DAI_M_MSBS_SHIFT       0
+#define MAX98926_DAI_M_MSBS_WIDTH       8
+
+/* MAX98926_R01D_DAI_CLK_DIV_M_LSBS */
+#define MAX98926_DAI_M_LSBS_MASK        (0xFF<<0)
+#define MAX98926_DAI_M_LSBS_SHIFT       0
+#define MAX98926_DAI_M_LSBS_WIDTH       8
+
+/* MAX98926_R01E_DAI_CLK_DIV_N_MSBS */
+#define MAX98926_DAI_N_MSBS_MASK        (0x7F<<0)
+#define MAX98926_DAI_N_MSBS_SHIFT       0
+#define MAX98926_DAI_N_MSBS_WIDTH       7
+
+/* MAX98926_R01F_DAI_CLK_DIV_N_LSBS */
+#define MAX98926_DAI_N_LSBS_MASK        (0xFF<<0)
+#define MAX98926_DAI_N_LSBS_SHIFT       0
+#define MAX98926_DAI_N_LSBS_WIDTH       8
+
+/* MAX98926_R020_FORMAT */
+#define MAX98926_DAI_CHANSZ_MASK    (0x03<<6)
+#define MAX98926_DAI_CHANSZ_SHIFT   6
+#define MAX98926_DAI_CHANSZ_WIDTH   2
+#define MAX98926_DAI_INTERLEAVE_MASK        (1<<5)
+#define MAX98926_DAI_INTERLEAVE_SHIFT       5
+#define MAX98926_DAI_INTERLEAVE_WIDTH       1
+#define MAX98926_DAI_EXTBCLK_HIZ_MASK       (1<<4)
+#define MAX98926_DAI_EXTBCLK_HIZ_SHIFT      4
+#define MAX98926_DAI_EXTBCLK_HIZ_WIDTH      1
+#define MAX98926_DAI_WCI_MASK           (1<<3)
+#define MAX98926_DAI_WCI_SHIFT      3
+#define MAX98926_DAI_WCI_WIDTH      1
+#define MAX98926_DAI_BCI_MASK           (1<<2)
+#define MAX98926_DAI_BCI_SHIFT      2
+#define MAX98926_DAI_BCI_WIDTH      1
+#define MAX98926_DAI_DLY_MASK           (1<<1)
+#define MAX98926_DAI_DLY_SHIFT      1
+#define MAX98926_DAI_DLY_WIDTH      1
+#define MAX98926_DAI_TDM_MASK           (1<<0)
+#define MAX98926_DAI_TDM_SHIFT      0
+#define MAX98926_DAI_TDM_WIDTH      1
+
+#define MAX98926_DAI_CHANSZ_16 (1 << MAX98926_DAI_CHANSZ_SHIFT)
+#define MAX98926_DAI_CHANSZ_24 (2 << MAX98926_DAI_CHANSZ_SHIFT)
+#define MAX98926_DAI_CHANSZ_32 (3 << MAX98926_DAI_CHANSZ_SHIFT)
+
+/* MAX98926_R021_TDM_SLOT_SELECT */
+#define MAX98926_DAI_DO_EN_MASK     (1<<7)
+#define MAX98926_DAI_DO_EN_SHIFT        7
+#define MAX98926_DAI_DO_EN_WIDTH        1
+#define MAX98926_DAI_DIN_EN_MASK        (1<<6)
+#define MAX98926_DAI_DIN_EN_SHIFT       6
+#define MAX98926_DAI_DIN_EN_WIDTH       1
+#define MAX98926_DAI_INR_SOURCE_MASK    (0x07<<3)
+#define MAX98926_DAI_INR_SOURCE_SHIFT   3
+#define MAX98926_DAI_INR_SOURCE_WIDTH   3
+#define MAX98926_DAI_INL_SOURCE_MASK    (0x07<<0)
+#define MAX98926_DAI_INL_SOURCE_SHIFT   0
+#define MAX98926_DAI_INL_SOURCE_WIDTH   3
+
+/* MAX98926_R022_DOUT_CFG_VMON */
+#define MAX98926_DAI_VMON_EN_MASK       (1<<5)
+#define MAX98926_DAI_VMON_EN_SHIFT  5
+#define MAX98926_DAI_VMON_EN_WIDTH  1
+#define MAX98926_DAI_VMON_SLOT_MASK (0x1F<<0)
+#define MAX98926_DAI_VMON_SLOT_SHIFT    0
+#define MAX98926_DAI_VMON_SLOT_WIDTH    5
+
+#define MAX98926_DAI_VMON_SLOT_00_01 (0 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_01_02 (1 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_02_03 (2 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_03_04 (3 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_04_05 (4 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_05_06 (5 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_06_07 (6 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_07_08 (7 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_08_09 (8 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_09_0A (9 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_0A_0B (10 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_0B_0C (11 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_0C_0D (12 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_0D_0E (13 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_0E_0F (14 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_0F_10 (15 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_10_11 (16 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_11_12 (17 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_12_13 (18 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_13_14 (19 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_14_15 (20 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_15_16 (21 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_16_17 (22 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_17_18 (23 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_18_19 (24 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_19_1A (25 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_1A_1B (26 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_1B_1C (27 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_1C_1D (28 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_1D_1E (29 << MAX98926_DAI_VMON_SLOT_SHIFT)
+#define MAX98926_DAI_VMON_SLOT_1E_1F (30 << MAX98926_DAI_VMON_SLOT_SHIFT)
+
+/* MAX98926_R023_DOUT_CFG_IMON */
+#define MAX98926_DAI_IMON_EN_MASK       (1<<5)
+#define MAX98926_DAI_IMON_EN_SHIFT  5
+#define MAX98926_DAI_IMON_EN_WIDTH  1
+#define MAX98926_DAI_IMON_SLOT_MASK (0x1F<<0)
+#define MAX98926_DAI_IMON_SLOT_SHIFT    0
+#define MAX98926_DAI_IMON_SLOT_WIDTH    5
+
+#define MAX98926_DAI_IMON_SLOT_00_01 (0 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_01_02 (1 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_02_03 (2 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_03_04 (3 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_04_05 (4 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_05_06 (5 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_06_07 (6 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_07_08 (7 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_08_09 (8 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_09_0A (9 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_0A_0B (10 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_0B_0C (11 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_0C_0D (12 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_0D_0E (13 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_0E_0F (14 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_0F_10 (15 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_10_11 (16 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_11_12 (17 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_12_13 (18 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_13_14 (19 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_14_15 (20 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_15_16 (21 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_16_17 (22 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_17_18 (23 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_18_19 (24 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_19_1A (25 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_1A_1B (26 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_1B_1C (27 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_1C_1D (28 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_1D_1E (29 << MAX98926_DAI_IMON_SLOT_SHIFT)
+#define MAX98926_DAI_IMON_SLOT_1E_1F (30 << MAX98926_DAI_IMON_SLOT_SHIFT)
+
+/* MAX98926_R024_DOUT_CFG_VBAT */
+#define MAX98926_DAI_INTERLEAVE_SLOT_MASK       (0x1F<<0)
+#define MAX98926_DAI_INTERLEAVE_SLOT_SHIFT      0
+#define MAX98926_DAI_INTERLEAVE_SLOT_WIDTH      5
+
+/* MAX98926_R025_DOUT_CFG_VBST */
+#define MAX98926_DAI_VBST_EN_MASK               (1<<5)
+#define MAX98926_DAI_VBST_EN_SHIFT          5
+#define MAX98926_DAI_VBST_EN_WIDTH          1
+#define MAX98926_DAI_VBST_SLOT_MASK         (0x1F<<0)
+#define MAX98926_DAI_VBST_SLOT_SHIFT            0
+#define MAX98926_DAI_VBST_SLOT_WIDTH            5
+
+/* MAX98926_R026_DOUT_CFG_FLAG */
+#define MAX98926_DAI_FLAG_EN_MASK               (1<<5)
+#define MAX98926_DAI_FLAG_EN_SHIFT          5
+#define MAX98926_DAI_FLAG_EN_WIDTH          1
+#define MAX98926_DAI_FLAG_SLOT_MASK         (0x1F<<0)
+#define MAX98926_DAI_FLAG_SLOT_SHIFT            0
+#define MAX98926_DAI_FLAG_SLOT_WIDTH            5
+
+/* MAX98926_R027_DOUT_HIZ_CFG1 */
+#define MAX98926_DAI_SLOT_HIZ_CFG1_MASK         (0xFF<<0)
+#define MAX98926_DAI_SLOT_HIZ_CFG1_SHIFT            0
+#define MAX98926_DAI_SLOT_HIZ_CFG1_WIDTH            8
+
+/* MAX98926_R028_DOUT_HIZ_CFG2 */
+#define MAX98926_DAI_SLOT_HIZ_CFG2_MASK         (0xFF<<0)
+#define MAX98926_DAI_SLOT_HIZ_CFG2_SHIFT            0
+#define MAX98926_DAI_SLOT_HIZ_CFG2_WIDTH            8
+
+/* MAX98926_R029_DOUT_HIZ_CFG3 */
+#define MAX98926_DAI_SLOT_HIZ_CFG3_MASK         (0xFF<<0)
+#define MAX98926_DAI_SLOT_HIZ_CFG3_SHIFT            0
+#define MAX98926_DAI_SLOT_HIZ_CFG3_WIDTH            8
+
+/* MAX98926_R02A_DOUT_HIZ_CFG4 */
+#define MAX98926_DAI_SLOT_HIZ_CFG4_MASK         (0xFF<<0)
+#define MAX98926_DAI_SLOT_HIZ_CFG4_SHIFT            0
+#define MAX98926_DAI_SLOT_HIZ_CFG4_WIDTH            8
+
+/* MAX98926_R02B_DOUT_DRV_STRENGTH */
+#define MAX98926_DAI_OUT_DRIVE_MASK             (0x03<<0)
+#define MAX98926_DAI_OUT_DRIVE_SHIFT                0
+#define MAX98926_DAI_OUT_DRIVE_WIDTH                2
+
+/* MAX98926_R02C_FILTERS */
+#define MAX98926_ADC_DITHER_EN_MASK             (1<<7)
+#define MAX98926_ADC_DITHER_EN_SHIFT                7
+#define MAX98926_ADC_DITHER_EN_WIDTH                1
+#define MAX98926_IV_DCB_EN_MASK                 (1<<6)
+#define MAX98926_IV_DCB_EN_SHIFT                    6
+#define MAX98926_IV_DCB_EN_WIDTH                    1
+#define MAX98926_DAC_DITHER_EN_MASK             (1<<4)
+#define MAX98926_DAC_DITHER_EN_SHIFT                4
+#define MAX98926_DAC_DITHER_EN_WIDTH                1
+#define MAX98926_DAC_FILTER_MODE_MASK               (1<<3)
+#define MAX98926_DAC_FILTER_MODE_SHIFT          3
+#define MAX98926_DAC_FILTER_MODE_WIDTH          1
+#define MAX98926_DAC_HPF_MASK               (0x07<<0)
+#define MAX98926_DAC_HPF_SHIFT                  0
+#define MAX98926_DAC_HPF_WIDTH                  3
+#define MAX98926_DAC_HPF_DISABLE        (0 << MAX98926_DAC_HPF_SHIFT)
+#define MAX98926_DAC_HPF_DC_BLOCK       (1 << MAX98926_DAC_HPF_SHIFT)
+#define MAX98926_DAC_HPF_EN_100     (2 << MAX98926_DAC_HPF_SHIFT)
+#define MAX98926_DAC_HPF_EN_200     (3 << MAX98926_DAC_HPF_SHIFT)
+#define MAX98926_DAC_HPF_EN_400     (4 << MAX98926_DAC_HPF_SHIFT)
+#define MAX98926_DAC_HPF_EN_800     (5 << MAX98926_DAC_HPF_SHIFT)
+
+/* MAX98926_R02D_GAIN */
+#define MAX98926_DAC_IN_SEL_MASK    (0x03<<5)
+#define MAX98926_DAC_IN_SEL_SHIFT   5
+#define MAX98926_DAC_IN_SEL_WIDTH   2
+#define MAX98926_SPK_GAIN_MASK      (0x1F<<0)
+#define MAX98926_SPK_GAIN_SHIFT     0
+#define MAX98926_SPK_GAIN_WIDTH     5
+
+#define MAX98926_DAC_IN_SEL_LEFT_DAI (0 << MAX98926_DAC_IN_SEL_SHIFT)
+#define MAX98926_DAC_IN_SEL_RIGHT_DAI (1 << MAX98926_DAC_IN_SEL_SHIFT)
+#define MAX98926_DAC_IN_SEL_SUMMED_DAI (2 << MAX98926_DAC_IN_SEL_SHIFT)
+#define MAX98926_DAC_IN_SEL_DIV2_SUMMED_DAI (3 << MAX98926_DAC_IN_SEL_SHIFT)
+
+/* MAX98926_R02E_GAIN_RAMPING */
+#define MAX98926_SPK_RMP_EN_MASK        (1<<1)
+#define MAX98926_SPK_RMP_EN_SHIFT       1
+#define MAX98926_SPK_RMP_EN_WIDTH       1
+#define MAX98926_SPK_ZCD_EN_MASK        (1<<0)
+#define MAX98926_SPK_ZCD_EN_SHIFT       0
+#define MAX98926_SPK_ZCD_EN_WIDTH       1
+
+/* MAX98926_R02F_SPK_AMP */
+#define MAX98926_SPK_MODE_MASK      (1<<0)
+#define MAX98926_SPK_MODE_SHIFT     0
+#define MAX98926_SPK_MODE_WIDTH     1
+#define MAX98926_INSELECT_MODE_MASK (1<<1)
+#define MAX98926_INSELECT_MODE_SHIFT    1
+#define MAX98926_INSELECT_MODE_WIDTH    1
+
+/* MAX98926_R030_THRESHOLD */
+#define MAX98926_ALC_EN_MASK            (1<<5)
+#define MAX98926_ALC_EN_SHIFT           5
+#define MAX98926_ALC_EN_WIDTH           1
+#define MAX98926_ALC_TH_MASK            (0x1F<<0)
+#define MAX98926_ALC_TH_SHIFT           0
+#define MAX98926_ALC_TH_WIDTH           5
+
+/* MAX98926_R031_ALC_ATTACK */
+#define MAX98926_ALC_ATK_STEP_MASK  (0x0F<<4)
+#define MAX98926_ALC_ATK_STEP_SHIFT 4
+#define MAX98926_ALC_ATK_STEP_WIDTH 4
+#define MAX98926_ALC_ATK_RATE_MASK  (0x7<<0)
+#define MAX98926_ALC_ATK_RATE_SHIFT 0
+#define MAX98926_ALC_ATK_RATE_WIDTH 3
+
+/* MAX98926_R032_ALC_ATTEN_RLS */
+#define MAX98926_ALC_MAX_ATTEN_MASK (0x0F<<4)
+#define MAX98926_ALC_MAX_ATTEN_SHIFT    4
+#define MAX98926_ALC_MAX_ATTEN_WIDTH    4
+#define MAX98926_ALC_RLS_RATE_MASK  (0x7<<0)
+#define MAX98926_ALC_RLS_RATE_SHIFT 0
+#define MAX98926_ALC_RLS_RATE_WIDTH 3
+
+/* MAX98926_R033_ALC_HOLD_RLS */
+#define MAX98926_ALC_RLS_TGR_MASK       (1<<0)
+#define MAX98926_ALC_RLS_TGR_SHIFT  0
+#define MAX98926_ALC_RLS_TGR_WIDTH  1
+
+/* MAX98926_R034_ALC_CONFIGURATION */
+#define MAX98926_ALC_MUTE_EN_MASK       (1<<7)
+#define MAX98926_ALC_MUTE_EN_SHIFT  7
+#define MAX98926_ALC_MUTE_EN_WIDTH  1
+#define MAX98926_ALC_MUTE_DLY_MASK  (0x07<<4)
+#define MAX98926_ALC_MUTE_DLY_SHIFT 4
+#define MAX98926_ALC_MUTE_DLY_WIDTH 3
+#define MAX98926_ALC_RLS_DBT_MASK       (0x07<<0)
+#define MAX98926_ALC_RLS_DBT_SHIFT  0
+#define MAX98926_ALC_RLS_DBT_WIDTH  3
+
+/* MAX98926_R035_BOOST_CONVERTER */
+#define MAX98926_BST_SYNC_MASK      (1<<7)
+#define MAX98926_BST_SYNC_SHIFT     7
+#define MAX98926_BST_SYNC_WIDTH     1
+#define MAX98926_BST_PHASE_MASK     (0x03<<4)
+#define MAX98926_BST_PHASE_SHIFT        4
+#define MAX98926_BST_PHASE_WIDTH        2
+#define MAX98926_BST_SKIP_MODE_MASK (0x03<<0)
+#define MAX98926_BST_SKIP_MODE_SHIFT    0
+#define MAX98926_BST_SKIP_MODE_WIDTH    2
+
+/* MAX98926_R036_BLOCK_ENABLE */
+#define MAX98926_BST_EN_MASK            (1<<7)
+#define MAX98926_BST_EN_SHIFT           7
+#define MAX98926_BST_EN_WIDTH           1
+#define MAX98926_WATCH_EN_MASK      (1<<6)
+#define MAX98926_WATCH_EN_SHIFT     6
+#define MAX98926_WATCH_EN_WIDTH     1
+#define MAX98926_CLKMON_EN_MASK     (1<<5)
+#define MAX98926_CLKMON_EN_SHIFT        5
+#define MAX98926_CLKMON_EN_WIDTH        1
+#define MAX98926_SPK_EN_MASK            (1<<4)
+#define MAX98926_SPK_EN_SHIFT           4
+#define MAX98926_SPK_EN_WIDTH           1
+#define MAX98926_ADC_VBST_EN_MASK       (1<<3)
+#define MAX98926_ADC_VBST_EN_SHIFT  3
+#define MAX98926_ADC_VBST_EN_WIDTH  1
+#define MAX98926_ADC_VBAT_EN_MASK       (1<<2)
+#define MAX98926_ADC_VBAT_EN_SHIFT  2
+#define MAX98926_ADC_VBAT_EN_WIDTH  1
+#define MAX98926_ADC_IMON_EN_MASK       (1<<1)
+#define MAX98926_ADC_IMON_EN_SHIFT  1
+#define MAX98926_ADC_IMON_EN_WIDTH  1
+#define MAX98926_ADC_VMON_EN_MASK       (1<<0)
+#define MAX98926_ADC_VMON_EN_SHIFT  0
+#define MAX98926_ADC_VMON_EN_WIDTH  1
+
+/* MAX98926_R037_CONFIGURATION */
+#define MAX98926_BST_VOUT_MASK      (0x0F<<4)
+#define MAX98926_BST_VOUT_SHIFT     4
+#define MAX98926_BST_VOUT_WIDTH     4
+#define MAX98926_THERMWARN_LEVEL_MASK   (0x03<<2)
+#define MAX98926_THERMWARN_LEVEL_SHIFT          2
+#define MAX98926_THERMWARN_LEVEL_WIDTH          2
+#define MAX98926_WATCH_TIME_MASK            (0x03<<0)
+#define MAX98926_WATCH_TIME_SHIFT           0
+#define MAX98926_WATCH_TIME_WIDTH           2
+
+/* MAX98926_R038_GLOBAL_ENABLE */
+#define MAX98926_EN_MASK            (1<<7)
+#define MAX98926_EN_SHIFT           7
+#define MAX98926_EN_WIDTH           1
+
+/* MAX98926_R03A_BOOST_LIMITER */
+#define MAX98926_BST_ILIM_MASK  (0xF<<4)
+#define MAX98926_BST_ILIM_SHIFT 4
+#define MAX98926_BST_ILIM_WIDTH 4
+
+/* MAX98926_R0FF_VERSION */
+#define MAX98926_REV_ID_MASK    (0xFF<<0)
+#define MAX98926_REV_ID_SHIFT   0
+#define MAX98926_REV_ID_WIDTH   8
+
+struct max98926_priv {
+       struct regmap *regmap;
+       struct snd_soc_codec *codec;
+       unsigned int sysclk;
+       unsigned int v_slot;
+       unsigned int i_slot;
+       unsigned int ch_size;
+       unsigned int interleave_mode;
+};
+#endif
diff --git a/sound/soc/codecs/pcm179x-i2c.c b/sound/soc/codecs/pcm179x-i2c.c
new file mode 100644 (file)
index 0000000..4118106
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * PCM179X ASoC I2C driver
+ *
+ * Copyright (c) Teenage Engineering AB 2016
+ *
+ *     Jacob Siverskog <jacob@teenage.engineering>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+
+#include "pcm179x.h"
+
+static int pcm179x_i2c_probe(struct i2c_client *client,
+                             const struct i2c_device_id *id)
+{
+       struct regmap *regmap;
+       int ret;
+
+       regmap = devm_regmap_init_i2c(client, &pcm179x_regmap_config);
+       if (IS_ERR(regmap)) {
+               ret = PTR_ERR(regmap);
+               dev_err(&client->dev, "Failed to allocate regmap: %d\n", ret);
+               return ret;
+       }
+
+       return pcm179x_common_init(&client->dev, regmap);
+}
+
+static int pcm179x_i2c_remove(struct i2c_client *client)
+{
+       return pcm179x_common_exit(&client->dev);
+}
+
+static const struct of_device_id pcm179x_of_match[] = {
+       { .compatible = "ti,pcm1792a", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, pcm179x_of_match);
+
+static const struct i2c_device_id pcm179x_i2c_ids[] = {
+       { "pcm179x", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, pcm179x_i2c_ids);
+
+static struct i2c_driver pcm179x_i2c_driver = {
+       .driver = {
+               .name   = "pcm179x",
+               .of_match_table = of_match_ptr(pcm179x_of_match),
+       },
+       .id_table       = pcm179x_i2c_ids,
+       .probe          = pcm179x_i2c_probe,
+       .remove         = pcm179x_i2c_remove,
+};
+
+module_i2c_driver(pcm179x_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC PCM179X I2C driver");
+MODULE_AUTHOR("Jacob Siverskog <jacob@teenage.engineering>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/pcm179x-spi.c b/sound/soc/codecs/pcm179x-spi.c
new file mode 100644 (file)
index 0000000..da924d4
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * PCM179X ASoC SPI driver
+ *
+ * Copyright (c) Amarula Solutions B.V. 2013
+ *
+ *     Michael Trimarchi <michael@amarulasolutions.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+
+#include "pcm179x.h"
+
+static int pcm179x_spi_probe(struct spi_device *spi)
+{
+       struct regmap *regmap;
+       int ret;
+
+       regmap = devm_regmap_init_spi(spi, &pcm179x_regmap_config);
+       if (IS_ERR(regmap)) {
+               ret = PTR_ERR(regmap);
+               dev_err(&spi->dev, "Failed to allocate regmap: %d\n", ret);
+               return ret;
+       }
+
+       return pcm179x_common_init(&spi->dev, regmap);
+}
+
+static int pcm179x_spi_remove(struct spi_device *spi)
+{
+       return pcm179x_common_exit(&spi->dev);
+}
+
+static const struct of_device_id pcm179x_of_match[] = {
+       { .compatible = "ti,pcm1792a", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, pcm179x_of_match);
+
+static const struct spi_device_id pcm179x_spi_ids[] = {
+       { "pcm179x", 0 },
+       { },
+};
+MODULE_DEVICE_TABLE(spi, pcm179x_spi_ids);
+
+static struct spi_driver pcm179x_spi_driver = {
+       .driver = {
+               .name = "pcm179x",
+               .of_match_table = of_match_ptr(pcm179x_of_match),
+       },
+       .id_table = pcm179x_spi_ids,
+       .probe = pcm179x_spi_probe,
+       .remove = pcm179x_spi_remove,
+};
+
+module_spi_driver(pcm179x_spi_driver);
+
+MODULE_DESCRIPTION("ASoC PCM179X SPI driver");
+MODULE_AUTHOR("Michael Trimarchi <michael@amarulasolutions.com>");
+MODULE_LICENSE("GPL");
index a56c7b767d900cf8298d6f9f095b86740a74ca10..06a66579ca6d34964044c0ac297333a805b2a3c3 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/slab.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
-#include <linux/spi/spi.h>
 
 #include <sound/core.h>
 #include <sound/pcm.h>
@@ -29,7 +28,6 @@
 #include <sound/soc.h>
 #include <sound/tlv.h>
 #include <linux/of.h>
-#include <linux/of_device.h>
 
 #include "pcm179x.h"
 
@@ -189,18 +187,14 @@ static struct snd_soc_dai_driver pcm179x_dai = {
                .stream_name = "Playback",
                .channels_min = 2,
                .channels_max = 2,
-               .rates = PCM1792A_RATES,
+               .rates = SNDRV_PCM_RATE_CONTINUOUS,
+               .rate_min = 10000,
+               .rate_max = 200000,
                .formats = PCM1792A_FORMATS, },
        .ops = &pcm179x_dai_ops,
 };
 
-static const struct of_device_id pcm179x_of_match[] = {
-       { .compatible = "ti,pcm1792a", },
-       { }
-};
-MODULE_DEVICE_TABLE(of, pcm179x_of_match);
-
-static const struct regmap_config pcm179x_regmap = {
+const struct regmap_config pcm179x_regmap_config = {
        .reg_bits               = 8,
        .val_bits               = 8,
        .max_register           = 23,
@@ -209,6 +203,7 @@ static const struct regmap_config pcm179x_regmap = {
        .writeable_reg          = pcm179x_writeable_reg,
        .readable_reg           = pcm179x_accessible_reg,
 };
+EXPORT_SYMBOL_GPL(pcm179x_regmap_config);
 
 static struct snd_soc_codec_driver soc_codec_dev_pcm179x = {
        .controls               = pcm179x_controls,
@@ -219,52 +214,29 @@ static struct snd_soc_codec_driver soc_codec_dev_pcm179x = {
        .num_dapm_routes        = ARRAY_SIZE(pcm179x_dapm_routes),
 };
 
-static int pcm179x_spi_probe(struct spi_device *spi)
+int pcm179x_common_init(struct device *dev, struct regmap *regmap)
 {
        struct pcm179x_private *pcm179x;
-       int ret;
 
-       pcm179x = devm_kzalloc(&spi->dev, sizeof(struct pcm179x_private),
+       pcm179x = devm_kzalloc(dev, sizeof(struct pcm179x_private),
                                GFP_KERNEL);
        if (!pcm179x)
                return -ENOMEM;
 
-       spi_set_drvdata(spi, pcm179x);
-
-       pcm179x->regmap = devm_regmap_init_spi(spi, &pcm179x_regmap);
-       if (IS_ERR(pcm179x->regmap)) {
-               ret = PTR_ERR(pcm179x->regmap);
-               dev_err(&spi->dev, "Failed to register regmap: %d\n", ret);
-               return ret;
-       }
+       pcm179x->regmap = regmap;
+       dev_set_drvdata(dev, pcm179x);
 
-       return snd_soc_register_codec(&spi->dev,
+       return snd_soc_register_codec(dev,
                        &soc_codec_dev_pcm179x, &pcm179x_dai, 1);
 }
+EXPORT_SYMBOL_GPL(pcm179x_common_init);
 
-static int pcm179x_spi_remove(struct spi_device *spi)
+int pcm179x_common_exit(struct device *dev)
 {
-       snd_soc_unregister_codec(&spi->dev);
+       snd_soc_unregister_codec(dev);
        return 0;
 }
-
-static const struct spi_device_id pcm179x_spi_ids[] = {
-       { "pcm179x", 0 },
-       { },
-};
-MODULE_DEVICE_TABLE(spi, pcm179x_spi_ids);
-
-static struct spi_driver pcm179x_codec_driver = {
-       .driver = {
-               .name = "pcm179x",
-               .of_match_table = of_match_ptr(pcm179x_of_match),
-       },
-       .id_table = pcm179x_spi_ids,
-       .probe = pcm179x_spi_probe,
-       .remove = pcm179x_spi_remove,
-};
-
-module_spi_driver(pcm179x_codec_driver);
+EXPORT_SYMBOL_GPL(pcm179x_common_exit);
 
 MODULE_DESCRIPTION("ASoC PCM179X driver");
 MODULE_AUTHOR("Michael Trimarchi <michael@amarulasolutions.com>");
index c6fdc062a497ef080ee3e4225ba90328e1c390cc..11e331268aae27c2523a9e12dfb4c44ce3967dd0 100644 (file)
 #ifndef __PCM179X_H__
 #define __PCM179X_H__
 
-#define PCM1792A_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_8000_48000 | \
-                       SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
-                       SNDRV_PCM_RATE_192000)
-
 #define PCM1792A_FORMATS (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | \
                          SNDRV_PCM_FMTBIT_S16_LE)
 
+extern const struct regmap_config pcm179x_regmap_config;
+
+int pcm179x_common_init(struct device *dev, struct regmap *regmap);
+int pcm179x_common_exit(struct device *dev);
+
 #endif
index 44b268aa4dd88e0cee6ac4c4a60663d51df0d1ab..0c7248ab6a374498c01980d40d82a4f03843180c 100644 (file)
@@ -299,10 +299,15 @@ static int pcm3168a_set_dai_sysclk(struct snd_soc_dai *dai,
                                  int clk_id, unsigned int freq, int dir)
 {
        struct pcm3168a_priv *pcm3168a = snd_soc_codec_get_drvdata(dai->codec);
+       int ret;
 
        if (freq > PCM1368A_MAX_SYSCLK)
                return -EINVAL;
 
+       ret = clk_set_rate(pcm3168a->scki, freq);
+       if (ret)
+               return ret;
+
        pcm3168a->sysclk = freq;
 
        return 0;
index bc08f0c5a5f69fe4a184ac86e611e4df56eabb5c..1bd31644a782eac26042fccf47bfbc65e380b63f 100644 (file)
@@ -266,6 +266,8 @@ static int rt286_jack_detect(struct rt286_priv *rt286, bool *hp, bool *mic)
                } else {
                        *mic = false;
                        regmap_write(rt286->regmap, RT286_SET_MIC1, 0x20);
+                       regmap_update_bits(rt286->regmap,
+                               RT286_CBJ_CTRL1, 0x0400, 0x0000);
                }
        } else {
                regmap_read(rt286->regmap, RT286_GET_HP_SENSE, &buf);
@@ -470,24 +472,6 @@ static int rt286_set_dmic1_event(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
-static int rt286_vref_event(struct snd_soc_dapm_widget *w,
-                            struct snd_kcontrol *kcontrol, int event)
-{
-       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
-       switch (event) {
-       case SND_SOC_DAPM_PRE_PMU:
-               snd_soc_update_bits(codec,
-                       RT286_CBJ_CTRL1, 0x0400, 0x0000);
-               mdelay(50);
-               break;
-       default:
-               return 0;
-       }
-
-       return 0;
-}
-
 static int rt286_ldo2_event(struct snd_soc_dapm_widget *w,
                             struct snd_kcontrol *kcontrol, int event)
 {
@@ -536,7 +520,7 @@ static const struct snd_soc_dapm_widget rt286_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY_S("HV", 1, RT286_POWER_CTRL1,
                12, 1, NULL, 0),
        SND_SOC_DAPM_SUPPLY("VREF", RT286_POWER_CTRL1,
-               0, 1, rt286_vref_event, SND_SOC_DAPM_PRE_PMU),
+               0, 1, NULL, 0),
        SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT286_POWER_CTRL2,
                2, 0, NULL, 0),
        SND_SOC_DAPM_SUPPLY_S("LDO2", 2, RT286_POWER_CTRL1,
@@ -910,8 +894,6 @@ static int rt286_set_bias_level(struct snd_soc_codec *codec,
 
        case SND_SOC_BIAS_ON:
                mdelay(10);
-               snd_soc_update_bits(codec,
-                       RT286_CBJ_CTRL1, 0x0400, 0x0400);
                snd_soc_update_bits(codec,
                        RT286_DC_GAIN, 0x200, 0x0);
 
@@ -920,8 +902,6 @@ static int rt286_set_bias_level(struct snd_soc_codec *codec,
        case SND_SOC_BIAS_STANDBY:
                snd_soc_write(codec,
                        RT286_SET_AUDIO_POWER, AC_PWRST_D3);
-               snd_soc_update_bits(codec,
-                       RT286_CBJ_CTRL1, 0x0400, 0x0000);
                break;
 
        default:
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c
new file mode 100644 (file)
index 0000000..879bf60
--- /dev/null
@@ -0,0 +1,982 @@
+/*
+ * rt5514.c  --  RT5514 ALSA SoC audio codec driver
+ *
+ * Copyright 2015 Realtek Semiconductor Corp.
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include "rl6231.h"
+#include "rt5514.h"
+
+static const struct reg_sequence rt5514_i2c_patch[] = {
+       {0x1800101c, 0x00000000},
+       {0x18001100, 0x0000031f},
+       {0x18001104, 0x00000007},
+       {0x18001108, 0x00000000},
+       {0x1800110c, 0x00000000},
+       {0x18001110, 0x00000000},
+       {0x18001114, 0x00000001},
+       {0x18001118, 0x00000000},
+       {0x18002f08, 0x00000006},
+       {0x18002f00, 0x00055149},
+       {0x18002f00, 0x0005514b},
+       {0x18002f00, 0x00055149},
+       {0xfafafafa, 0x00000001},
+       {0x18002f10, 0x00000001},
+       {0x18002f10, 0x00000000},
+       {0x18002f10, 0x00000001},
+       {0xfafafafa, 0x00000001},
+       {0x18002000, 0x000010ec},
+       {0xfafafafa, 0x00000000},
+};
+
+static const struct reg_sequence rt5514_patch[] = {
+       {RT5514_DIG_IO_CTRL,            0x00000040},
+       {RT5514_CLK_CTRL1,              0x38020041},
+       {RT5514_SRC_CTRL,               0x44000eee},
+       {RT5514_ANA_CTRL_LDO10,         0x00028604},
+       {RT5514_ANA_CTRL_ADCFED,        0x00000800},
+};
+
+static const struct reg_default rt5514_reg[] = {
+       {RT5514_RESET,                  0x00000000},
+       {RT5514_PWR_ANA1,               0x00808880},
+       {RT5514_PWR_ANA2,               0x00220000},
+       {RT5514_I2S_CTRL1,              0x00000330},
+       {RT5514_I2S_CTRL2,              0x20000000},
+       {RT5514_VAD_CTRL6,              0xc00007d2},
+       {RT5514_EXT_VAD_CTRL,           0x80000080},
+       {RT5514_DIG_IO_CTRL,            0x00000040},
+       {RT5514_PAD_CTRL1,              0x00804000},
+       {RT5514_DMIC_DATA_CTRL,         0x00000005},
+       {RT5514_DIG_SOURCE_CTRL,        0x00000002},
+       {RT5514_SRC_CTRL,               0x44000eee},
+       {RT5514_DOWNFILTER2_CTRL1,      0x0000882f},
+       {RT5514_PLL_SOURCE_CTRL,        0x00000004},
+       {RT5514_CLK_CTRL1,              0x38020041},
+       {RT5514_CLK_CTRL2,              0x00000000},
+       {RT5514_PLL3_CALIB_CTRL1,       0x00400200},
+       {RT5514_PLL3_CALIB_CTRL5,       0x40220012},
+       {RT5514_DELAY_BUF_CTRL1,        0x7fff006a},
+       {RT5514_DELAY_BUF_CTRL3,        0x00000000},
+       {RT5514_DOWNFILTER0_CTRL1,      0x00020c2f},
+       {RT5514_DOWNFILTER0_CTRL2,      0x00020c2f},
+       {RT5514_DOWNFILTER0_CTRL3,      0x00000362},
+       {RT5514_DOWNFILTER1_CTRL1,      0x00020c2f},
+       {RT5514_DOWNFILTER1_CTRL2,      0x00020c2f},
+       {RT5514_DOWNFILTER1_CTRL3,      0x00000362},
+       {RT5514_ANA_CTRL_LDO10,         0x00028604},
+       {RT5514_ANA_CTRL_LDO18_16,      0x02000345},
+       {RT5514_ANA_CTRL_ADC12,         0x0000a2a8},
+       {RT5514_ANA_CTRL_ADC21,         0x00001180},
+       {RT5514_ANA_CTRL_ADC22,         0x0000aaa8},
+       {RT5514_ANA_CTRL_ADC23,         0x00151427},
+       {RT5514_ANA_CTRL_MICBST,        0x00002000},
+       {RT5514_ANA_CTRL_ADCFED,        0x00000800},
+       {RT5514_ANA_CTRL_INBUF,         0x00000143},
+       {RT5514_ANA_CTRL_VREF,          0x00008d50},
+       {RT5514_ANA_CTRL_PLL3,          0x0000000e},
+       {RT5514_ANA_CTRL_PLL1_1,        0x00000000},
+       {RT5514_ANA_CTRL_PLL1_2,        0x00030220},
+       {RT5514_DMIC_LP_CTRL,           0x00000000},
+       {RT5514_MISC_CTRL_DSP,          0x00000000},
+       {RT5514_DSP_CTRL1,              0x00055149},
+       {RT5514_DSP_CTRL3,              0x00000006},
+       {RT5514_DSP_CTRL4,              0x00000001},
+       {RT5514_VENDOR_ID1,             0x00000001},
+       {RT5514_VENDOR_ID2,             0x10ec5514},
+};
+
+static bool rt5514_volatile_register(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case RT5514_VENDOR_ID1:
+       case RT5514_VENDOR_ID2:
+               return true;
+
+       default:
+               return false;
+       }
+}
+
+static bool rt5514_readable_register(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case RT5514_RESET:
+       case RT5514_PWR_ANA1:
+       case RT5514_PWR_ANA2:
+       case RT5514_I2S_CTRL1:
+       case RT5514_I2S_CTRL2:
+       case RT5514_VAD_CTRL6:
+       case RT5514_EXT_VAD_CTRL:
+       case RT5514_DIG_IO_CTRL:
+       case RT5514_PAD_CTRL1:
+       case RT5514_DMIC_DATA_CTRL:
+       case RT5514_DIG_SOURCE_CTRL:
+       case RT5514_SRC_CTRL:
+       case RT5514_DOWNFILTER2_CTRL1:
+       case RT5514_PLL_SOURCE_CTRL:
+       case RT5514_CLK_CTRL1:
+       case RT5514_CLK_CTRL2:
+       case RT5514_PLL3_CALIB_CTRL1:
+       case RT5514_PLL3_CALIB_CTRL5:
+       case RT5514_DELAY_BUF_CTRL1:
+       case RT5514_DELAY_BUF_CTRL3:
+       case RT5514_DOWNFILTER0_CTRL1:
+       case RT5514_DOWNFILTER0_CTRL2:
+       case RT5514_DOWNFILTER0_CTRL3:
+       case RT5514_DOWNFILTER1_CTRL1:
+       case RT5514_DOWNFILTER1_CTRL2:
+       case RT5514_DOWNFILTER1_CTRL3:
+       case RT5514_ANA_CTRL_LDO10:
+       case RT5514_ANA_CTRL_LDO18_16:
+       case RT5514_ANA_CTRL_ADC12:
+       case RT5514_ANA_CTRL_ADC21:
+       case RT5514_ANA_CTRL_ADC22:
+       case RT5514_ANA_CTRL_ADC23:
+       case RT5514_ANA_CTRL_MICBST:
+       case RT5514_ANA_CTRL_ADCFED:
+       case RT5514_ANA_CTRL_INBUF:
+       case RT5514_ANA_CTRL_VREF:
+       case RT5514_ANA_CTRL_PLL3:
+       case RT5514_ANA_CTRL_PLL1_1:
+       case RT5514_ANA_CTRL_PLL1_2:
+       case RT5514_DMIC_LP_CTRL:
+       case RT5514_MISC_CTRL_DSP:
+       case RT5514_DSP_CTRL1:
+       case RT5514_DSP_CTRL3:
+       case RT5514_DSP_CTRL4:
+       case RT5514_VENDOR_ID1:
+       case RT5514_VENDOR_ID2:
+               return true;
+
+       default:
+               return false;
+       }
+}
+
+static bool rt5514_i2c_readable_register(struct device *dev,
+       unsigned int reg)
+{
+       switch (reg) {
+       case RT5514_DSP_MAPPING | RT5514_RESET:
+       case RT5514_DSP_MAPPING | RT5514_PWR_ANA1:
+       case RT5514_DSP_MAPPING | RT5514_PWR_ANA2:
+       case RT5514_DSP_MAPPING | RT5514_I2S_CTRL1:
+       case RT5514_DSP_MAPPING | RT5514_I2S_CTRL2:
+       case RT5514_DSP_MAPPING | RT5514_VAD_CTRL6:
+       case RT5514_DSP_MAPPING | RT5514_EXT_VAD_CTRL:
+       case RT5514_DSP_MAPPING | RT5514_DIG_IO_CTRL:
+       case RT5514_DSP_MAPPING | RT5514_PAD_CTRL1:
+       case RT5514_DSP_MAPPING | RT5514_DMIC_DATA_CTRL:
+       case RT5514_DSP_MAPPING | RT5514_DIG_SOURCE_CTRL:
+       case RT5514_DSP_MAPPING | RT5514_SRC_CTRL:
+       case RT5514_DSP_MAPPING | RT5514_DOWNFILTER2_CTRL1:
+       case RT5514_DSP_MAPPING | RT5514_PLL_SOURCE_CTRL:
+       case RT5514_DSP_MAPPING | RT5514_CLK_CTRL1:
+       case RT5514_DSP_MAPPING | RT5514_CLK_CTRL2:
+       case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL1:
+       case RT5514_DSP_MAPPING | RT5514_PLL3_CALIB_CTRL5:
+       case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL1:
+       case RT5514_DSP_MAPPING | RT5514_DELAY_BUF_CTRL3:
+       case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL1:
+       case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL2:
+       case RT5514_DSP_MAPPING | RT5514_DOWNFILTER0_CTRL3:
+       case RT5514_DSP_MAPPING | RT5514_DOWNFILTER1_CTRL1:
+       case RT5514_DSP_MAPPING | RT5514_DOWNFILTER1_CTRL2:
+       case RT5514_DSP_MAPPING | RT5514_DOWNFILTER1_CTRL3:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_LDO10:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_LDO18_16:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_ADC12:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_ADC21:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_ADC22:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_ADC23:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_MICBST:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_ADCFED:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_INBUF:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_VREF:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_PLL3:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_PLL1_1:
+       case RT5514_DSP_MAPPING | RT5514_ANA_CTRL_PLL1_2:
+       case RT5514_DSP_MAPPING | RT5514_DMIC_LP_CTRL:
+       case RT5514_DSP_MAPPING | RT5514_MISC_CTRL_DSP:
+       case RT5514_DSP_MAPPING | RT5514_DSP_CTRL1:
+       case RT5514_DSP_MAPPING | RT5514_DSP_CTRL3:
+       case RT5514_DSP_MAPPING | RT5514_DSP_CTRL4:
+       case RT5514_DSP_MAPPING | RT5514_VENDOR_ID1:
+       case RT5514_DSP_MAPPING | RT5514_VENDOR_ID2:
+               return true;
+
+       default:
+               return false;
+       }
+}
+
+/* {-3, 0, +3, +4.5, +7.5, +9.5, +12, +14, +17} dB */
+static const DECLARE_TLV_DB_RANGE(bst_tlv,
+       0, 2, TLV_DB_SCALE_ITEM(-300, 300, 0),
+       3, 3, TLV_DB_SCALE_ITEM(450, 0, 0),
+       4, 4, TLV_DB_SCALE_ITEM(750, 0, 0),
+       5, 5, TLV_DB_SCALE_ITEM(950, 0, 0),
+       6, 6, TLV_DB_SCALE_ITEM(1200, 0, 0),
+       7, 7, TLV_DB_SCALE_ITEM(1400, 0, 0),
+       8, 8, TLV_DB_SCALE_ITEM(1700, 0, 0)
+);
+
+static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
+
+static const struct snd_kcontrol_new rt5514_snd_controls[] = {
+       SOC_DOUBLE_TLV("MIC Boost Volume", RT5514_ANA_CTRL_MICBST,
+               RT5514_SEL_BSTL_SFT, RT5514_SEL_BSTR_SFT, 8, 0, bst_tlv),
+       SOC_DOUBLE_R_TLV("ADC1 Capture Volume", RT5514_DOWNFILTER0_CTRL1,
+               RT5514_DOWNFILTER0_CTRL2, RT5514_AD_GAIN_SFT, 127, 0,
+               adc_vol_tlv),
+       SOC_DOUBLE_R_TLV("ADC2 Capture Volume", RT5514_DOWNFILTER1_CTRL1,
+               RT5514_DOWNFILTER1_CTRL2, RT5514_AD_GAIN_SFT, 127, 0,
+               adc_vol_tlv),
+};
+
+/* ADC Mixer*/
+static const struct snd_kcontrol_new rt5514_sto1_adc_l_mix[] = {
+       SOC_DAPM_SINGLE("DMIC Switch", RT5514_DOWNFILTER0_CTRL1,
+               RT5514_AD_DMIC_MIX_BIT, 1, 1),
+       SOC_DAPM_SINGLE("ADC Switch", RT5514_DOWNFILTER0_CTRL1,
+               RT5514_AD_AD_MIX_BIT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5514_sto1_adc_r_mix[] = {
+       SOC_DAPM_SINGLE("DMIC Switch", RT5514_DOWNFILTER0_CTRL2,
+               RT5514_AD_DMIC_MIX_BIT, 1, 1),
+       SOC_DAPM_SINGLE("ADC Switch", RT5514_DOWNFILTER0_CTRL2,
+               RT5514_AD_AD_MIX_BIT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5514_sto2_adc_l_mix[] = {
+       SOC_DAPM_SINGLE("DMIC Switch", RT5514_DOWNFILTER1_CTRL1,
+               RT5514_AD_DMIC_MIX_BIT, 1, 1),
+       SOC_DAPM_SINGLE("ADC Switch", RT5514_DOWNFILTER1_CTRL1,
+               RT5514_AD_AD_MIX_BIT, 1, 1),
+};
+
+static const struct snd_kcontrol_new rt5514_sto2_adc_r_mix[] = {
+       SOC_DAPM_SINGLE("DMIC Switch", RT5514_DOWNFILTER1_CTRL2,
+               RT5514_AD_DMIC_MIX_BIT, 1, 1),
+       SOC_DAPM_SINGLE("ADC Switch", RT5514_DOWNFILTER1_CTRL2,
+               RT5514_AD_AD_MIX_BIT, 1, 1),
+};
+
+/* DMIC Source */
+static const char * const rt5514_dmic_src[] = {
+       "DMIC1", "DMIC2"
+};
+
+static const SOC_ENUM_SINGLE_DECL(
+       rt5514_stereo1_dmic_enum, RT5514_DIG_SOURCE_CTRL,
+       RT5514_AD0_DMIC_INPUT_SEL_SFT, rt5514_dmic_src);
+
+static const struct snd_kcontrol_new rt5514_sto1_dmic_mux =
+       SOC_DAPM_ENUM("Stereo1 DMIC Source", rt5514_stereo1_dmic_enum);
+
+static const SOC_ENUM_SINGLE_DECL(
+       rt5514_stereo2_dmic_enum, RT5514_DIG_SOURCE_CTRL,
+       RT5514_AD1_DMIC_INPUT_SEL_SFT, rt5514_dmic_src);
+
+static const struct snd_kcontrol_new rt5514_sto2_dmic_mux =
+       SOC_DAPM_ENUM("Stereo2 DMIC Source", rt5514_stereo2_dmic_enum);
+
+/**
+ * rt5514_calc_dmic_clk - Calculate the frequency divider parameter of dmic.
+ *
+ * @rate: base clock rate.
+ *
+ * Choose divider parameter that gives the highest possible DMIC frequency in
+ * 1MHz - 3MHz range.
+ */
+static int rt5514_calc_dmic_clk(struct snd_soc_codec *codec, int rate)
+{
+       int div[] = {2, 3, 4, 8, 12, 16, 24, 32};
+       int i;
+
+       if (rate < 1000000 * div[0]) {
+               pr_warn("Base clock rate %d is too low\n", rate);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ARRAY_SIZE(div); i++) {
+               /* find divider that gives DMIC frequency below 3.072MHz */
+               if (3072000 * div[i] >= rate)
+                       return i;
+       }
+
+       dev_warn(codec->dev, "Base clock rate %d is too high\n", rate);
+       return -EINVAL;
+}
+
+static int rt5514_set_dmic_clk(struct snd_soc_dapm_widget *w,
+       struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+       int idx;
+
+       idx = rt5514_calc_dmic_clk(codec, rt5514->sysclk);
+       if (idx < 0)
+               dev_err(codec->dev, "Failed to set DMIC clock\n");
+       else
+               regmap_update_bits(rt5514->regmap, RT5514_CLK_CTRL1,
+                       RT5514_CLK_DMIC_OUT_SEL_MASK,
+                       idx << RT5514_CLK_DMIC_OUT_SEL_SFT);
+
+       return idx;
+}
+
+static int rt5514_is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
+                        struct snd_soc_dapm_widget *sink)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
+       struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+
+       if (rt5514->sysclk_src == RT5514_SCLK_S_PLL1)
+               return 1;
+       else
+               return 0;
+}
+
+static const struct snd_soc_dapm_widget rt5514_dapm_widgets[] = {
+       /* Input Lines */
+       SND_SOC_DAPM_INPUT("DMIC1L"),
+       SND_SOC_DAPM_INPUT("DMIC1R"),
+       SND_SOC_DAPM_INPUT("DMIC2L"),
+       SND_SOC_DAPM_INPUT("DMIC2R"),
+
+       SND_SOC_DAPM_INPUT("AMICL"),
+       SND_SOC_DAPM_INPUT("AMICR"),
+
+       SND_SOC_DAPM_PGA("DMIC1", SND_SOC_NOPM, 0, 0, NULL, 0),
+       SND_SOC_DAPM_PGA("DMIC2", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+       SND_SOC_DAPM_SUPPLY("DMIC CLK", SND_SOC_NOPM, 0, 0,
+               rt5514_set_dmic_clk, SND_SOC_DAPM_PRE_PMU),
+
+       SND_SOC_DAPM_SUPPLY("ADC CLK", RT5514_CLK_CTRL1,
+               RT5514_CLK_AD_ANA1_EN_BIT, 0, NULL, 0),
+
+       SND_SOC_DAPM_SUPPLY("LDO18 IN", RT5514_PWR_ANA1,
+               RT5514_POW_LDO18_IN_BIT, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("LDO18 ADC", RT5514_PWR_ANA1,
+               RT5514_POW_LDO18_ADC_BIT, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("LDO21", RT5514_PWR_ANA1, RT5514_POW_LDO21_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("BG LDO18 IN", RT5514_PWR_ANA1,
+               RT5514_POW_BG_LDO18_IN_BIT, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("BG LDO21", RT5514_PWR_ANA1,
+               RT5514_POW_BG_LDO21_BIT, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("BG MBIAS", RT5514_PWR_ANA2,
+               RT5514_POW_BG_MBIAS_BIT, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("MBIAS", RT5514_PWR_ANA2, RT5514_POW_MBIAS_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("VREF2", RT5514_PWR_ANA2, RT5514_POW_VREF2_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("VREF1", RT5514_PWR_ANA2, RT5514_POW_VREF1_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("ADC Power", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+
+       SND_SOC_DAPM_SUPPLY("LDO16L", RT5514_PWR_ANA2, RT5514_POWL_LDO16_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("ADC1L", RT5514_PWR_ANA2, RT5514_POW_ADC1_L_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("BSTL2", RT5514_PWR_ANA2, RT5514_POW2_BSTL_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("BSTL", RT5514_PWR_ANA2, RT5514_POW_BSTL_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("ADCFEDL", RT5514_PWR_ANA2, RT5514_POW_ADCFEDL_BIT,
+               0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("ADCL Power", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+       SND_SOC_DAPM_SUPPLY("LDO16R", RT5514_PWR_ANA2, RT5514_POWR_LDO16_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("ADC1R", RT5514_PWR_ANA2, RT5514_POW_ADC1_R_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("BSTR2", RT5514_PWR_ANA2, RT5514_POW2_BSTR_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("BSTR", RT5514_PWR_ANA2, RT5514_POW_BSTR_BIT, 0,
+               NULL, 0),
+       SND_SOC_DAPM_SUPPLY("ADCFEDR", RT5514_PWR_ANA2, RT5514_POW_ADCFEDR_BIT,
+               0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("ADCR Power", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+       SND_SOC_DAPM_SUPPLY("PLL1 LDO ENABLE", RT5514_ANA_CTRL_PLL1_2,
+               RT5514_EN_LDO_PLL1_BIT, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("PLL1 LDO", RT5514_PWR_ANA2,
+               RT5514_POW_PLL1_LDO_BIT, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("PLL1", RT5514_PWR_ANA2, RT5514_POW_PLL1_BIT, 0,
+               NULL, 0),
+
+       /* ADC Mux */
+       SND_SOC_DAPM_MUX("Stereo1 DMIC Mux", SND_SOC_NOPM, 0, 0,
+                               &rt5514_sto1_dmic_mux),
+       SND_SOC_DAPM_MUX("Stereo2 DMIC Mux", SND_SOC_NOPM, 0, 0,
+                               &rt5514_sto2_dmic_mux),
+
+       /* ADC Mixer */
+       SND_SOC_DAPM_SUPPLY("adc stereo1 filter", RT5514_CLK_CTRL1,
+               RT5514_CLK_AD0_EN_BIT, 0, NULL, 0),
+       SND_SOC_DAPM_SUPPLY("adc stereo2 filter", RT5514_CLK_CTRL1,
+               RT5514_CLK_AD1_EN_BIT, 0, NULL, 0),
+
+       SND_SOC_DAPM_MIXER("Sto1 ADC MIXL", SND_SOC_NOPM, 0, 0,
+               rt5514_sto1_adc_l_mix, ARRAY_SIZE(rt5514_sto1_adc_l_mix)),
+       SND_SOC_DAPM_MIXER("Sto1 ADC MIXR", SND_SOC_NOPM, 0, 0,
+               rt5514_sto1_adc_r_mix, ARRAY_SIZE(rt5514_sto1_adc_r_mix)),
+       SND_SOC_DAPM_MIXER("Sto2 ADC MIXL", SND_SOC_NOPM, 0, 0,
+               rt5514_sto2_adc_l_mix, ARRAY_SIZE(rt5514_sto2_adc_l_mix)),
+       SND_SOC_DAPM_MIXER("Sto2 ADC MIXR", SND_SOC_NOPM, 0, 0,
+               rt5514_sto2_adc_r_mix, ARRAY_SIZE(rt5514_sto2_adc_r_mix)),
+
+       SND_SOC_DAPM_ADC("Stereo1 ADC MIXL", NULL, RT5514_DOWNFILTER0_CTRL1,
+               RT5514_AD_AD_MUTE_BIT, 1),
+       SND_SOC_DAPM_ADC("Stereo1 ADC MIXR", NULL, RT5514_DOWNFILTER0_CTRL2,
+               RT5514_AD_AD_MUTE_BIT, 1),
+       SND_SOC_DAPM_ADC("Stereo2 ADC MIXL", NULL, RT5514_DOWNFILTER1_CTRL1,
+               RT5514_AD_AD_MUTE_BIT, 1),
+       SND_SOC_DAPM_ADC("Stereo2 ADC MIXR", NULL, RT5514_DOWNFILTER1_CTRL2,
+               RT5514_AD_AD_MUTE_BIT, 1),
+
+       /* ADC PGA */
+       SND_SOC_DAPM_PGA("Stereo1 ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+       SND_SOC_DAPM_PGA("Stereo2 ADC MIX", SND_SOC_NOPM, 0, 0, NULL, 0),
+
+       /* Audio Interface */
+       SND_SOC_DAPM_AIF_OUT("AIF1TX", "AIF1 Capture", 0, SND_SOC_NOPM, 0, 0),
+};
+
+static const struct snd_soc_dapm_route rt5514_dapm_routes[] = {
+       { "DMIC1", NULL, "DMIC1L" },
+       { "DMIC1", NULL, "DMIC1R" },
+       { "DMIC2", NULL, "DMIC2L" },
+       { "DMIC2", NULL, "DMIC2R" },
+
+       { "DMIC1L", NULL, "DMIC CLK" },
+       { "DMIC1R", NULL, "DMIC CLK" },
+       { "DMIC2L", NULL, "DMIC CLK" },
+       { "DMIC2R", NULL, "DMIC CLK" },
+
+       { "Stereo1 DMIC Mux", "DMIC1", "DMIC1" },
+       { "Stereo1 DMIC Mux", "DMIC2", "DMIC2" },
+
+       { "Sto1 ADC MIXL", "DMIC Switch", "Stereo1 DMIC Mux" },
+       { "Sto1 ADC MIXL", "ADC Switch", "AMICL" },
+       { "Sto1 ADC MIXR", "DMIC Switch", "Stereo1 DMIC Mux" },
+       { "Sto1 ADC MIXR", "ADC Switch", "AMICR" },
+
+       { "ADC Power", NULL, "LDO18 IN" },
+       { "ADC Power", NULL, "LDO18 ADC" },
+       { "ADC Power", NULL, "LDO21" },
+       { "ADC Power", NULL, "BG LDO18 IN" },
+       { "ADC Power", NULL, "BG LDO21" },
+       { "ADC Power", NULL, "BG MBIAS" },
+       { "ADC Power", NULL, "MBIAS" },
+       { "ADC Power", NULL, "VREF2" },
+       { "ADC Power", NULL, "VREF1" },
+
+       { "ADCL Power", NULL, "LDO16L" },
+       { "ADCL Power", NULL, "ADC1L" },
+       { "ADCL Power", NULL, "BSTL2" },
+       { "ADCL Power", NULL, "BSTL" },
+       { "ADCL Power", NULL, "ADCFEDL" },
+
+       { "ADCR Power", NULL, "LDO16R" },
+       { "ADCR Power", NULL, "ADC1R" },
+       { "ADCR Power", NULL, "BSTR2" },
+       { "ADCR Power", NULL, "BSTR" },
+       { "ADCR Power", NULL, "ADCFEDR" },
+
+       { "AMICL", NULL, "ADC CLK" },
+       { "AMICL", NULL, "ADC Power" },
+       { "AMICL", NULL, "ADCL Power" },
+       { "AMICR", NULL, "ADC CLK" },
+       { "AMICR", NULL, "ADC Power" },
+       { "AMICR", NULL, "ADCR Power" },
+
+       { "PLL1 LDO", NULL, "PLL1 LDO ENABLE" },
+       { "PLL1", NULL, "PLL1 LDO" },
+
+       { "Stereo1 ADC MIXL", NULL, "Sto1 ADC MIXL" },
+       { "Stereo1 ADC MIXR", NULL, "Sto1 ADC MIXR" },
+
+       { "Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXL" },
+       { "Stereo1 ADC MIX", NULL, "Stereo1 ADC MIXR" },
+       { "Stereo1 ADC MIX", NULL, "adc stereo1 filter" },
+       { "adc stereo1 filter", NULL, "PLL1", rt5514_is_sys_clk_from_pll },
+
+       { "Stereo2 DMIC Mux", "DMIC1", "DMIC1" },
+       { "Stereo2 DMIC Mux", "DMIC2", "DMIC2" },
+
+       { "Sto2 ADC MIXL", "DMIC Switch", "Stereo2 DMIC Mux" },
+       { "Sto2 ADC MIXL", "ADC Switch", "AMICL" },
+       { "Sto2 ADC MIXR", "DMIC Switch", "Stereo2 DMIC Mux" },
+       { "Sto2 ADC MIXR", "ADC Switch", "AMICR" },
+
+       { "Stereo2 ADC MIXL", NULL, "Sto2 ADC MIXL" },
+       { "Stereo2 ADC MIXR", NULL, "Sto2 ADC MIXR" },
+
+       { "Stereo2 ADC MIX", NULL, "Stereo2 ADC MIXL" },
+       { "Stereo2 ADC MIX", NULL, "Stereo2 ADC MIXR" },
+       { "Stereo2 ADC MIX", NULL, "adc stereo2 filter" },
+       { "adc stereo2 filter", NULL, "PLL1", rt5514_is_sys_clk_from_pll },
+
+       { "AIF1TX", NULL, "Stereo1 ADC MIX"},
+       { "AIF1TX", NULL, "Stereo2 ADC MIX"},
+};
+
+static int rt5514_hw_params(struct snd_pcm_substream *substream,
+       struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+       int pre_div, bclk_ms, frame_size;
+       unsigned int val_len = 0;
+
+       rt5514->lrck = params_rate(params);
+       pre_div = rl6231_get_clk_info(rt5514->sysclk, rt5514->lrck);
+       if (pre_div < 0) {
+               dev_err(codec->dev, "Unsupported clock setting\n");
+               return -EINVAL;
+       }
+
+       frame_size = snd_soc_params_to_frame_size(params);
+       if (frame_size < 0) {
+               dev_err(codec->dev, "Unsupported frame size: %d\n", frame_size);
+               return -EINVAL;
+       }
+
+       bclk_ms = frame_size > 32;
+       rt5514->bclk = rt5514->lrck * (32 << bclk_ms);
+
+       dev_dbg(dai->dev, "bclk is %dHz and lrck is %dHz\n",
+               rt5514->bclk, rt5514->lrck);
+       dev_dbg(dai->dev, "bclk_ms is %d and pre_div is %d for iis %d\n",
+                               bclk_ms, pre_div, dai->id);
+
+       switch (params_format(params)) {
+       case SNDRV_PCM_FORMAT_S16_LE:
+               break;
+       case SNDRV_PCM_FORMAT_S20_3LE:
+               val_len = RT5514_I2S_DL_20;
+               break;
+       case SNDRV_PCM_FORMAT_S24_LE:
+               val_len = RT5514_I2S_DL_24;
+               break;
+       case SNDRV_PCM_FORMAT_S8:
+               val_len = RT5514_I2S_DL_8;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       regmap_update_bits(rt5514->regmap, RT5514_I2S_CTRL1, RT5514_I2S_DL_MASK,
+               val_len);
+       regmap_update_bits(rt5514->regmap, RT5514_CLK_CTRL2,
+               RT5514_CLK_SYS_DIV_OUT_MASK | RT5514_SEL_ADC_OSR_MASK,
+               pre_div << RT5514_CLK_SYS_DIV_OUT_SFT |
+               pre_div << RT5514_SEL_ADC_OSR_SFT);
+
+       return 0;
+}
+
+static int rt5514_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+       unsigned int reg_val = 0;
+
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               break;
+
+       case SND_SOC_DAIFMT_NB_IF:
+               reg_val |= RT5514_I2S_LR_INV;
+               break;
+
+       case SND_SOC_DAIFMT_IB_NF:
+               reg_val |= RT5514_I2S_BP_INV;
+               break;
+
+       case SND_SOC_DAIFMT_IB_IF:
+               reg_val |= RT5514_I2S_BP_INV | RT5514_I2S_LR_INV;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               break;
+
+       case SND_SOC_DAIFMT_LEFT_J:
+               reg_val |= RT5514_I2S_DF_LEFT;
+               break;
+
+       case SND_SOC_DAIFMT_DSP_A:
+               reg_val |= RT5514_I2S_DF_PCM_A;
+               break;
+
+       case SND_SOC_DAIFMT_DSP_B:
+               reg_val |= RT5514_I2S_DF_PCM_B;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       regmap_update_bits(rt5514->regmap, RT5514_I2S_CTRL1,
+               RT5514_I2S_DF_MASK | RT5514_I2S_BP_MASK | RT5514_I2S_LR_MASK,
+               reg_val);
+
+       return 0;
+}
+
+static int rt5514_set_dai_sysclk(struct snd_soc_dai *dai,
+               int clk_id, unsigned int freq, int dir)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+       unsigned int reg_val = 0;
+
+       if (freq == rt5514->sysclk && clk_id == rt5514->sysclk_src)
+               return 0;
+
+       switch (clk_id) {
+       case RT5514_SCLK_S_MCLK:
+               reg_val |= RT5514_CLK_SYS_PRE_SEL_MCLK;
+               break;
+
+       case RT5514_SCLK_S_PLL1:
+               reg_val |= RT5514_CLK_SYS_PRE_SEL_PLL;
+               break;
+
+       default:
+               dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
+               return -EINVAL;
+       }
+
+       regmap_update_bits(rt5514->regmap, RT5514_CLK_CTRL2,
+               RT5514_CLK_SYS_PRE_SEL_MASK, reg_val);
+
+       rt5514->sysclk = freq;
+       rt5514->sysclk_src = clk_id;
+
+       dev_dbg(dai->dev, "Sysclk is %dHz and clock id is %d\n", freq, clk_id);
+
+       return 0;
+}
+
+static int rt5514_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
+                       unsigned int freq_in, unsigned int freq_out)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+       struct rl6231_pll_code pll_code;
+       int ret;
+
+       if (!freq_in || !freq_out) {
+               dev_dbg(codec->dev, "PLL disabled\n");
+
+               rt5514->pll_in = 0;
+               rt5514->pll_out = 0;
+               regmap_update_bits(rt5514->regmap, RT5514_CLK_CTRL2,
+                       RT5514_CLK_SYS_PRE_SEL_MASK,
+                       RT5514_CLK_SYS_PRE_SEL_MCLK);
+
+               return 0;
+       }
+
+       if (source == rt5514->pll_src && freq_in == rt5514->pll_in &&
+           freq_out == rt5514->pll_out)
+               return 0;
+
+       switch (source) {
+       case RT5514_PLL1_S_MCLK:
+               regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL,
+                       RT5514_PLL_1_SEL_MASK, RT5514_PLL_1_SEL_MCLK);
+               break;
+
+       case RT5514_PLL1_S_BCLK:
+               regmap_update_bits(rt5514->regmap, RT5514_PLL_SOURCE_CTRL,
+                       RT5514_PLL_1_SEL_MASK, RT5514_PLL_1_SEL_SCLK);
+               break;
+
+       default:
+               dev_err(codec->dev, "Unknown PLL source %d\n", source);
+               return -EINVAL;
+       }
+
+       ret = rl6231_pll_calc(freq_in, freq_out, &pll_code);
+       if (ret < 0) {
+               dev_err(codec->dev, "Unsupport input clock %d\n", freq_in);
+               return ret;
+       }
+
+       dev_dbg(codec->dev, "bypass=%d m=%d n=%d k=%d\n",
+               pll_code.m_bp, (pll_code.m_bp ? 0 : pll_code.m_code),
+               pll_code.n_code, pll_code.k_code);
+
+       regmap_write(rt5514->regmap, RT5514_ANA_CTRL_PLL1_1,
+               pll_code.k_code << RT5514_PLL_K_SFT |
+               pll_code.n_code << RT5514_PLL_N_SFT |
+               (pll_code.m_bp ? 0 : pll_code.m_code) << RT5514_PLL_M_SFT);
+       regmap_update_bits(rt5514->regmap, RT5514_ANA_CTRL_PLL1_2,
+               RT5514_PLL_M_BP, pll_code.m_bp << RT5514_PLL_M_BP_SFT);
+
+       rt5514->pll_in = freq_in;
+       rt5514->pll_out = freq_out;
+       rt5514->pll_src = source;
+
+       return 0;
+}
+
+static int rt5514_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask,
+                       unsigned int rx_mask, int slots, int slot_width)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+       unsigned int val = 0;
+
+       if (rx_mask || tx_mask)
+               val |= RT5514_TDM_MODE;
+
+       if (slots == 4)
+               val |= RT5514_TDMSLOT_SEL_RX_4CH | RT5514_TDMSLOT_SEL_TX_4CH;
+
+
+       switch (slot_width) {
+       case 20:
+               val |= RT5514_CH_LEN_RX_20 | RT5514_CH_LEN_TX_20;
+               break;
+
+       case 24:
+               val |= RT5514_CH_LEN_RX_24 | RT5514_CH_LEN_TX_24;
+               break;
+
+       case 32:
+               val |= RT5514_CH_LEN_RX_32 | RT5514_CH_LEN_TX_32;
+               break;
+
+       case 16:
+       default:
+               break;
+       }
+
+       regmap_update_bits(rt5514->regmap, RT5514_I2S_CTRL1, RT5514_TDM_MODE |
+               RT5514_TDMSLOT_SEL_RX_MASK | RT5514_TDMSLOT_SEL_TX_MASK |
+               RT5514_CH_LEN_RX_MASK | RT5514_CH_LEN_TX_MASK, val);
+
+       return 0;
+}
+
+static int rt5514_probe(struct snd_soc_codec *codec)
+{
+       struct rt5514_priv *rt5514 = snd_soc_codec_get_drvdata(codec);
+
+       rt5514->codec = codec;
+
+       return 0;
+}
+
+static int rt5514_i2c_read(void *context, unsigned int reg, unsigned int *val)
+{
+       struct i2c_client *client = context;
+       struct rt5514_priv *rt5514 = i2c_get_clientdata(client);
+
+       regmap_read(rt5514->i2c_regmap, reg | RT5514_DSP_MAPPING, val);
+
+       return 0;
+}
+
+static int rt5514_i2c_write(void *context, unsigned int reg, unsigned int val)
+{
+       struct i2c_client *client = context;
+       struct rt5514_priv *rt5514 = i2c_get_clientdata(client);
+
+       regmap_write(rt5514->i2c_regmap, reg | RT5514_DSP_MAPPING, val);
+
+       return 0;
+}
+
+#define RT5514_STEREO_RATES SNDRV_PCM_RATE_8000_192000
+#define RT5514_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
+                       SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
+
+struct snd_soc_dai_ops rt5514_aif_dai_ops = {
+       .hw_params = rt5514_hw_params,
+       .set_fmt = rt5514_set_dai_fmt,
+       .set_sysclk = rt5514_set_dai_sysclk,
+       .set_pll = rt5514_set_dai_pll,
+       .set_tdm_slot = rt5514_set_tdm_slot,
+};
+
+struct snd_soc_dai_driver rt5514_dai[] = {
+       {
+               .name = "rt5514-aif1",
+               .id = 0,
+               .capture = {
+                       .stream_name = "AIF1 Capture",
+                       .channels_min = 1,
+                       .channels_max = 4,
+                       .rates = RT5514_STEREO_RATES,
+                       .formats = RT5514_FORMATS,
+               },
+               .ops = &rt5514_aif_dai_ops,
+       }
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_rt5514 = {
+       .probe = rt5514_probe,
+       .idle_bias_off = true,
+       .controls = rt5514_snd_controls,
+       .num_controls = ARRAY_SIZE(rt5514_snd_controls),
+       .dapm_widgets = rt5514_dapm_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(rt5514_dapm_widgets),
+       .dapm_routes = rt5514_dapm_routes,
+       .num_dapm_routes = ARRAY_SIZE(rt5514_dapm_routes),
+};
+
+static const struct regmap_config rt5514_i2c_regmap = {
+       .name = "i2c",
+       .reg_bits = 32,
+       .val_bits = 32,
+
+       .max_register = RT5514_DSP_MAPPING | RT5514_VENDOR_ID2,
+       .readable_reg = rt5514_i2c_readable_register,
+
+       .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_config rt5514_regmap = {
+       .reg_bits = 16,
+       .val_bits = 32,
+
+       .max_register = RT5514_VENDOR_ID2,
+       .volatile_reg = rt5514_volatile_register,
+       .readable_reg = rt5514_readable_register,
+       .reg_read = rt5514_i2c_read,
+       .reg_write = rt5514_i2c_write,
+
+       .cache_type = REGCACHE_RBTREE,
+       .reg_defaults = rt5514_reg,
+       .num_reg_defaults = ARRAY_SIZE(rt5514_reg),
+       .use_single_rw = true,
+};
+
+static const struct i2c_device_id rt5514_i2c_id[] = {
+       { "rt5514", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, rt5514_i2c_id);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id rt5514_of_match[] = {
+       { .compatible = "realtek,rt5514", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, rt5514_of_match);
+#endif
+
+static int rt5514_i2c_probe(struct i2c_client *i2c,
+                   const struct i2c_device_id *id)
+{
+       struct rt5514_priv *rt5514;
+       int ret;
+       unsigned int val;
+
+       rt5514 = devm_kzalloc(&i2c->dev, sizeof(struct rt5514_priv),
+                               GFP_KERNEL);
+       if (rt5514 == NULL)
+               return -ENOMEM;
+
+       i2c_set_clientdata(i2c, rt5514);
+
+       rt5514->i2c_regmap = devm_regmap_init_i2c(i2c, &rt5514_i2c_regmap);
+       if (IS_ERR(rt5514->i2c_regmap)) {
+               ret = PTR_ERR(rt5514->i2c_regmap);
+               dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+                       ret);
+               return ret;
+       }
+
+       rt5514->regmap = devm_regmap_init(&i2c->dev, NULL, i2c, &rt5514_regmap);
+       if (IS_ERR(rt5514->regmap)) {
+               ret = PTR_ERR(rt5514->regmap);
+               dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+                       ret);
+               return ret;
+       }
+
+       regmap_read(rt5514->regmap, RT5514_VENDOR_ID2, &val);
+       if (val != RT5514_DEVICE_ID) {
+               dev_err(&i2c->dev,
+                       "Device with ID register %x is not rt5514\n", val);
+               return -ENODEV;
+       }
+
+       ret = regmap_register_patch(rt5514->i2c_regmap, rt5514_i2c_patch,
+                                   ARRAY_SIZE(rt5514_i2c_patch));
+       if (ret != 0)
+               dev_warn(&i2c->dev, "Failed to apply i2c_regmap patch: %d\n",
+                       ret);
+
+       ret = regmap_register_patch(rt5514->regmap, rt5514_patch,
+                                   ARRAY_SIZE(rt5514_patch));
+       if (ret != 0)
+               dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
+
+       return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5514,
+                       rt5514_dai, ARRAY_SIZE(rt5514_dai));
+}
+
+static int rt5514_i2c_remove(struct i2c_client *i2c)
+{
+       snd_soc_unregister_codec(&i2c->dev);
+
+       return 0;
+}
+
+struct i2c_driver rt5514_i2c_driver = {
+       .driver = {
+               .name = "rt5514",
+               .of_match_table = of_match_ptr(rt5514_of_match),
+       },
+       .probe = rt5514_i2c_probe,
+       .remove   = rt5514_i2c_remove,
+       .id_table = rt5514_i2c_id,
+};
+module_i2c_driver(rt5514_i2c_driver);
+
+MODULE_DESCRIPTION("ASoC RT5514 driver");
+MODULE_AUTHOR("Oder Chiou <oder_chiou@realtek.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h
new file mode 100644 (file)
index 0000000..6ad8a61
--- /dev/null
@@ -0,0 +1,252 @@
+/*
+ * rt5514.h  --  RT5514 ALSA SoC audio driver
+ *
+ * Copyright 2015 Realtek Microelectronics
+ * Author: Oder Chiou <oder_chiou@realtek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __RT5514_H__
+#define __RT5514_H__
+
+#define RT5514_DEVICE_ID                       0x10ec5514
+
+#define RT5514_RESET                           0x2000
+#define RT5514_PWR_ANA1                                0x2004
+#define RT5514_PWR_ANA2                                0x2008
+#define RT5514_I2S_CTRL1                       0x2010
+#define RT5514_I2S_CTRL2                       0x2014
+#define RT5514_VAD_CTRL6                       0x2030
+#define RT5514_EXT_VAD_CTRL                    0x206c
+#define RT5514_DIG_IO_CTRL                     0x2070
+#define RT5514_PAD_CTRL1                       0x2080
+#define RT5514_DMIC_DATA_CTRL                  0x20a0
+#define RT5514_DIG_SOURCE_CTRL                 0x20a4
+#define RT5514_SRC_CTRL                                0x20ac
+#define RT5514_DOWNFILTER2_CTRL1               0x20d0
+#define RT5514_PLL_SOURCE_CTRL                 0x2100
+#define RT5514_CLK_CTRL1                       0x2104
+#define RT5514_CLK_CTRL2                       0x2108
+#define RT5514_PLL3_CALIB_CTRL1                        0x2110
+#define RT5514_PLL3_CALIB_CTRL5                        0x2124
+#define RT5514_DELAY_BUF_CTRL1                 0x2140
+#define RT5514_DELAY_BUF_CTRL3                 0x2148
+#define RT5514_DOWNFILTER0_CTRL1               0x2190
+#define RT5514_DOWNFILTER0_CTRL2               0x2194
+#define RT5514_DOWNFILTER0_CTRL3               0x2198
+#define RT5514_DOWNFILTER1_CTRL1               0x21a0
+#define RT5514_DOWNFILTER1_CTRL2               0x21a4
+#define RT5514_DOWNFILTER1_CTRL3               0x21a8
+#define RT5514_ANA_CTRL_LDO10                  0x2200
+#define RT5514_ANA_CTRL_LDO18_16               0x2204
+#define RT5514_ANA_CTRL_ADC12                  0x2210
+#define RT5514_ANA_CTRL_ADC21                  0x2214
+#define RT5514_ANA_CTRL_ADC22                  0x2218
+#define RT5514_ANA_CTRL_ADC23                  0x221c
+#define RT5514_ANA_CTRL_MICBST                 0x2220
+#define RT5514_ANA_CTRL_ADCFED                 0x2224
+#define RT5514_ANA_CTRL_INBUF                  0x2228
+#define RT5514_ANA_CTRL_VREF                   0x222c
+#define RT5514_ANA_CTRL_PLL3                   0x2240
+#define RT5514_ANA_CTRL_PLL1_1                 0x2260
+#define RT5514_ANA_CTRL_PLL1_2                 0x2264
+#define RT5514_DMIC_LP_CTRL                    0x2e00
+#define RT5514_MISC_CTRL_DSP                   0x2e04
+#define RT5514_DSP_CTRL1                       0x2f00
+#define RT5514_DSP_CTRL3                       0x2f08
+#define RT5514_DSP_CTRL4                       0x2f10
+#define RT5514_VENDOR_ID1                      0x2ff0
+#define RT5514_VENDOR_ID2                      0x2ff4
+
+#define RT5514_DSP_MAPPING                     0x18000000
+
+/* RT5514_PWR_ANA1 (0x2004) */
+#define RT5514_POW_LDO18_IN                    (0x1 << 5)
+#define RT5514_POW_LDO18_IN_BIT                        5
+#define RT5514_POW_LDO18_ADC                   (0x1 << 4)
+#define RT5514_POW_LDO18_ADC_BIT               4
+#define RT5514_POW_LDO21                       (0x1 << 3)
+#define RT5514_POW_LDO21_BIT                   3
+#define RT5514_POW_BG_LDO18_IN                 (0x1 << 2)
+#define RT5514_POW_BG_LDO18_IN_BIT             2
+#define RT5514_POW_BG_LDO21                    (0x1 << 1)
+#define RT5514_POW_BG_LDO21_BIT                        1
+
+/* RT5514_PWR_ANA2 (0x2008) */
+#define RT5514_POW_PLL1                                (0x1 << 18)
+#define RT5514_POW_PLL1_BIT                    18
+#define RT5514_POW_PLL1_LDO                    (0x1 << 16)
+#define RT5514_POW_PLL1_LDO_BIT                        16
+#define RT5514_POW_BG_MBIAS                    (0x1 << 15)
+#define RT5514_POW_BG_MBIAS_BIT                        15
+#define RT5514_POW_MBIAS                       (0x1 << 14)
+#define RT5514_POW_MBIAS_BIT                   14
+#define RT5514_POW_VREF2                       (0x1 << 13)
+#define RT5514_POW_VREF2_BIT                   13
+#define RT5514_POW_VREF1                       (0x1 << 12)
+#define RT5514_POW_VREF1_BIT                   12
+#define RT5514_POWR_LDO16                      (0x1 << 11)
+#define RT5514_POWR_LDO16_BIT                  11
+#define RT5514_POWL_LDO16                      (0x1 << 10)
+#define RT5514_POWL_LDO16_BIT                  10
+#define RT5514_POW_ADC2                                (0x1 << 9)
+#define RT5514_POW_ADC2_BIT                    9
+#define RT5514_POW_INPUT_BUF                   (0x1 << 8)
+#define RT5514_POW_INPUT_BUF_BIT               8
+#define RT5514_POW_ADC1_R                      (0x1 << 7)
+#define RT5514_POW_ADC1_R_BIT                  7
+#define RT5514_POW_ADC1_L                      (0x1 << 6)
+#define RT5514_POW_ADC1_L_BIT                  6
+#define RT5514_POW2_BSTR                       (0x1 << 5)
+#define RT5514_POW2_BSTR_BIT                   5
+#define RT5514_POW2_BSTL                       (0x1 << 4)
+#define RT5514_POW2_BSTL_BIT                   4
+#define RT5514_POW_BSTR                                (0x1 << 3)
+#define RT5514_POW_BSTR_BIT                    3
+#define RT5514_POW_BSTL                                (0x1 << 2)
+#define RT5514_POW_BSTL_BIT                    2
+#define RT5514_POW_ADCFEDR                     (0x1 << 1)
+#define RT5514_POW_ADCFEDR_BIT                 1
+#define RT5514_POW_ADCFEDL                     (0x1 << 0)
+#define RT5514_POW_ADCFEDL_BIT                 0
+
+/* RT5514_I2S_CTRL1 (0x2010) */
+#define RT5514_TDM_MODE                                (0x1 << 28)
+#define RT5514_TDM_MODE_SFT                    28
+#define RT5514_I2S_LR_MASK                     (0x1 << 26)
+#define RT5514_I2S_LR_SFT                      26
+#define RT5514_I2S_LR_NOR                      (0x0 << 26)
+#define RT5514_I2S_LR_INV                      (0x1 << 26)
+#define RT5514_I2S_BP_MASK                     (0x1 << 25)
+#define RT5514_I2S_BP_SFT                      25
+#define RT5514_I2S_BP_NOR                      (0x0 << 25)
+#define RT5514_I2S_BP_INV                      (0x1 << 25)
+#define RT5514_I2S_DF_MASK                     (0x7 << 16)
+#define RT5514_I2S_DF_SFT                      16
+#define RT5514_I2S_DF_I2S                      (0x0 << 16)
+#define RT5514_I2S_DF_LEFT                     (0x1 << 16)
+#define RT5514_I2S_DF_PCM_A                    (0x2 << 16)
+#define RT5514_I2S_DF_PCM_B                    (0x3 << 16)
+#define RT5514_TDMSLOT_SEL_RX_MASK             (0x3 << 10)
+#define RT5514_TDMSLOT_SEL_RX_SFT              10
+#define RT5514_TDMSLOT_SEL_RX_4CH              (0x1 << 10)
+#define RT5514_CH_LEN_RX_MASK                  (0x3 << 8)
+#define RT5514_CH_LEN_RX_SFT                   8
+#define RT5514_CH_LEN_RX_16                    (0x0 << 8)
+#define RT5514_CH_LEN_RX_20                    (0x1 << 8)
+#define RT5514_CH_LEN_RX_24                    (0x2 << 8)
+#define RT5514_CH_LEN_RX_32                    (0x3 << 8)
+#define RT5514_TDMSLOT_SEL_TX_MASK             (0x3 << 6)
+#define RT5514_TDMSLOT_SEL_TX_SFT              6
+#define RT5514_TDMSLOT_SEL_TX_4CH              (0x1 << 6)
+#define RT5514_CH_LEN_TX_MASK                  (0x3 << 4)
+#define RT5514_CH_LEN_TX_SFT                   4
+#define RT5514_CH_LEN_TX_16                    (0x0 << 4)
+#define RT5514_CH_LEN_TX_20                    (0x1 << 4)
+#define RT5514_CH_LEN_TX_24                    (0x2 << 4)
+#define RT5514_CH_LEN_TX_32                    (0x3 << 4)
+#define RT5514_I2S_DL_MASK                     (0x3 << 0)
+#define RT5514_I2S_DL_SFT                      0
+#define RT5514_I2S_DL_16                       (0x0 << 0)
+#define RT5514_I2S_DL_20                       (0x1 << 0)
+#define RT5514_I2S_DL_24                       (0x2 << 0)
+#define RT5514_I2S_DL_8                                (0x3 << 0)
+
+/* RT5514_DIG_SOURCE_CTRL (0x20a4) */
+#define RT5514_AD1_DMIC_INPUT_SEL              (0x1 << 1)
+#define RT5514_AD1_DMIC_INPUT_SEL_SFT          1
+#define RT5514_AD0_DMIC_INPUT_SEL              (0x1 << 0)
+#define RT5514_AD0_DMIC_INPUT_SEL_SFT          0
+
+/* RT5514_PLL_SOURCE_CTRL (0x2100) */
+#define RT5514_PLL_1_SEL_MASK                  (0x7 << 12)
+#define RT5514_PLL_1_SEL_SFT                   12
+#define RT5514_PLL_1_SEL_SCLK                  (0x3 << 12)
+#define RT5514_PLL_1_SEL_MCLK                  (0x4 << 12)
+
+/* RT5514_CLK_CTRL1 (0x2104) */
+#define RT5514_CLK_AD_ANA1_EN                  (0x1 << 31)
+#define RT5514_CLK_AD_ANA1_EN_BIT              31
+#define RT5514_CLK_AD1_EN                      (0x1 << 24)
+#define RT5514_CLK_AD1_EN_BIT                  24
+#define RT5514_CLK_AD0_EN                      (0x1 << 23)
+#define RT5514_CLK_AD0_EN_BIT                  23
+#define RT5514_CLK_DMIC_OUT_SEL_MASK           (0x7 << 8)
+#define RT5514_CLK_DMIC_OUT_SEL_SFT            8
+
+/* RT5514_CLK_CTRL2 (0x2108) */
+#define RT5514_CLK_SYS_DIV_OUT_MASK            (0x7 << 8)
+#define RT5514_CLK_SYS_DIV_OUT_SFT             8
+#define RT5514_SEL_ADC_OSR_MASK                        (0x7 << 4)
+#define RT5514_SEL_ADC_OSR_SFT                 4
+#define RT5514_CLK_SYS_PRE_SEL_MASK            (0x3 << 0)
+#define RT5514_CLK_SYS_PRE_SEL_SFT             0
+#define RT5514_CLK_SYS_PRE_SEL_MCLK            (0x2 << 0)
+#define RT5514_CLK_SYS_PRE_SEL_PLL             (0x3 << 0)
+
+/*  RT5514_DOWNFILTER_CTRL (0x2190 0x2194 0x21a0 0x21a4) */
+#define RT5514_AD_DMIC_MIX                     (0x1 << 11)
+#define RT5514_AD_DMIC_MIX_BIT                 11
+#define RT5514_AD_AD_MIX                       (0x1 << 10)
+#define RT5514_AD_AD_MIX_BIT                   10
+#define RT5514_AD_AD_MUTE                      (0x1 << 7)
+#define RT5514_AD_AD_MUTE_BIT                  7
+#define RT5514_AD_GAIN_MASK                    (0x7f << 0)
+#define RT5514_AD_GAIN_SFT                     0
+
+/*  RT5514_ANA_CTRL_MICBST (0x2220) */
+#define RT5514_SEL_BSTL_MASK                   (0xf << 4)
+#define RT5514_SEL_BSTL_SFT                    4
+#define RT5514_SEL_BSTR_MASK                   (0xf << 0)
+#define RT5514_SEL_BSTR_SFT                    0
+
+/*  RT5514_ANA_CTRL_PLL1_1 (0x2260) */
+#define RT5514_PLL_K_MAX                       0x1f
+#define RT5514_PLL_K_MASK                      (RT5514_PLL_K_MAX << 16)
+#define RT5514_PLL_K_SFT                       16
+#define RT5514_PLL_N_MAX                       0x1ff
+#define RT5514_PLL_N_MASK                      (RT5514_PLL_N_MAX << 7)
+#define RT5514_PLL_N_SFT                       4
+#define RT5514_PLL_M_MAX                       0xf
+#define RT5514_PLL_M_MASK                      (RT5514_PLL_M_MAX << 0)
+#define RT5514_PLL_M_SFT                       0
+
+/*  RT5514_ANA_CTRL_PLL1_2 (0x2264) */
+#define RT5514_PLL_M_BP                                (0x1 << 2)
+#define RT5514_PLL_M_BP_SFT                    2
+#define RT5514_PLL_K_BP                                (0x1 << 1)
+#define RT5514_PLL_K_BP_SFT                    1
+#define RT5514_EN_LDO_PLL1                     (0x1 << 0)
+#define RT5514_EN_LDO_PLL1_BIT                 0
+
+#define RT5514_PLL_INP_MAX                     40000000
+#define RT5514_PLL_INP_MIN                     256000
+
+/* System Clock Source */
+enum {
+       RT5514_SCLK_S_MCLK,
+       RT5514_SCLK_S_PLL1,
+};
+
+/* PLL1 Source */
+enum {
+       RT5514_PLL1_S_MCLK,
+       RT5514_PLL1_S_BCLK,
+};
+
+struct rt5514_priv {
+       struct snd_soc_codec *codec;
+       struct regmap *i2c_regmap, *regmap;
+       int sysclk;
+       int sysclk_src;
+       int lrck;
+       int bclk;
+       int pll_src;
+       int pll_in;
+       int pll_out;
+};
+
+#endif /* __RT5514_H__ */
index 1c10d8ed39d2f342a47f610c7be36a25b1ef8a51..3704d0716c04212143a056cde0e78cc50e2ac5c4 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/init.h>
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/i2c.h>
@@ -53,6 +54,7 @@ static const struct reg_sequence init_list[] = {
        {RT5616_PR_BASE + 0x21, 0x4040},
        {RT5616_PR_BASE + 0x23, 0x0004},
 };
+
 #define RT5616_INIT_REG_LEN ARRAY_SIZE(init_list)
 
 static const struct reg_default rt5616_reg[] = {
@@ -143,6 +145,7 @@ struct rt5616_priv {
        struct snd_soc_codec *codec;
        struct delayed_work patch_work;
        struct regmap *regmap;
+       struct clk *mclk;
 
        int sysclk;
        int sysclk_src;
@@ -162,9 +165,8 @@ static bool rt5616_volatile_register(struct device *dev, unsigned int reg)
 
        for (i = 0; i < ARRAY_SIZE(rt5616_ranges); i++) {
                if (reg >= rt5616_ranges[i].range_min &&
-                       reg <= rt5616_ranges[i].range_max) {
+                   reg <= rt5616_ranges[i].range_max)
                        return true;
-               }
        }
 
        switch (reg) {
@@ -190,9 +192,8 @@ static bool rt5616_readable_register(struct device *dev, unsigned int reg)
 
        for (i = 0; i < ARRAY_SIZE(rt5616_ranges); i++) {
                if (reg >= rt5616_ranges[i].range_min &&
-                       reg <= rt5616_ranges[i].range_max) {
+                   reg <= rt5616_ranges[i].range_max)
                        return true;
-               }
        }
 
        switch (reg) {
@@ -307,45 +308,45 @@ static unsigned int bst_tlv[] = {
 static const struct snd_kcontrol_new rt5616_snd_controls[] = {
        /* Headphone Output Volume */
        SOC_DOUBLE("HP Playback Switch", RT5616_HP_VOL,
-               RT5616_L_MUTE_SFT, RT5616_R_MUTE_SFT, 1, 1),
+                  RT5616_L_MUTE_SFT, RT5616_R_MUTE_SFT, 1, 1),
        SOC_DOUBLE_TLV("HP Playback Volume", RT5616_HP_VOL,
-               RT5616_L_VOL_SFT, RT5616_R_VOL_SFT, 39, 1, out_vol_tlv),
+                      RT5616_L_VOL_SFT, RT5616_R_VOL_SFT, 39, 1, out_vol_tlv),
        /* OUTPUT Control */
        SOC_DOUBLE("OUT Playback Switch", RT5616_LOUT_CTRL1,
-               RT5616_L_MUTE_SFT, RT5616_R_MUTE_SFT, 1, 1),
+                  RT5616_L_MUTE_SFT, RT5616_R_MUTE_SFT, 1, 1),
        SOC_DOUBLE("OUT Channel Switch", RT5616_LOUT_CTRL1,
-               RT5616_VOL_L_SFT, RT5616_VOL_R_SFT, 1, 1),
+                  RT5616_VOL_L_SFT, RT5616_VOL_R_SFT, 1, 1),
        SOC_DOUBLE_TLV("OUT Playback Volume", RT5616_LOUT_CTRL1,
-               RT5616_L_VOL_SFT, RT5616_R_VOL_SFT, 39, 1, out_vol_tlv),
+                      RT5616_L_VOL_SFT, RT5616_R_VOL_SFT, 39, 1, out_vol_tlv),
 
        /* DAC Digital Volume */
        SOC_DOUBLE_TLV("DAC1 Playback Volume", RT5616_DAC1_DIG_VOL,
-                       RT5616_L_VOL_SFT, RT5616_R_VOL_SFT,
-                       175, 0, dac_vol_tlv),
+                      RT5616_L_VOL_SFT, RT5616_R_VOL_SFT,
+                      175, 0, dac_vol_tlv),
        /* IN1/IN2 Control */
        SOC_SINGLE_TLV("IN1 Boost Volume", RT5616_IN1_IN2,
-               RT5616_BST_SFT1, 8, 0, bst_tlv),
+                      RT5616_BST_SFT1, 8, 0, bst_tlv),
        SOC_SINGLE_TLV("IN2 Boost Volume", RT5616_IN1_IN2,
-               RT5616_BST_SFT2, 8, 0, bst_tlv),
+                      RT5616_BST_SFT2, 8, 0, bst_tlv),
        /* INL/INR Volume Control */
        SOC_DOUBLE_TLV("IN Capture Volume", RT5616_INL1_INR1_VOL,
-                       RT5616_INL_VOL_SFT, RT5616_INR_VOL_SFT,
-                       31, 1, in_vol_tlv),
+                      RT5616_INL_VOL_SFT, RT5616_INR_VOL_SFT,
+                      31, 1, in_vol_tlv),
        /* ADC Digital Volume Control */
        SOC_DOUBLE("ADC Capture Switch", RT5616_ADC_DIG_VOL,
-               RT5616_L_MUTE_SFT, RT5616_R_MUTE_SFT, 1, 1),
+                  RT5616_L_MUTE_SFT, RT5616_R_MUTE_SFT, 1, 1),
        SOC_DOUBLE_TLV("ADC Capture Volume", RT5616_ADC_DIG_VOL,
-                       RT5616_L_VOL_SFT, RT5616_R_VOL_SFT,
-                       127, 0, adc_vol_tlv),
+                      RT5616_L_VOL_SFT, RT5616_R_VOL_SFT,
+                      127, 0, adc_vol_tlv),
 
        /* ADC Boost Volume Control */
        SOC_DOUBLE_TLV("ADC Boost Volume", RT5616_ADC_BST_VOL,
-                       RT5616_ADC_L_BST_SFT, RT5616_ADC_R_BST_SFT,
-                       3, 0, adc_bst_tlv),
+                      RT5616_ADC_L_BST_SFT, RT5616_ADC_R_BST_SFT,
+                      3, 0, adc_bst_tlv),
 };
 
 static int is_sys_clk_from_pll(struct snd_soc_dapm_widget *source,
-                        struct snd_soc_dapm_widget *sink)
+                              struct snd_soc_dapm_widget *sink)
 {
        unsigned int val;
 
@@ -462,20 +463,20 @@ static const struct snd_kcontrol_new rt5616_lout_mix[] = {
 };
 
 static int rt5616_adc_event(struct snd_soc_dapm_widget *w,
-       struct snd_kcontrol *kcontrol, int event)
+                           struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
                snd_soc_update_bits(codec, RT5616_ADC_DIG_VOL,
-                               RT5616_L_MUTE | RT5616_R_MUTE, 0);
+                                   RT5616_L_MUTE | RT5616_R_MUTE, 0);
                break;
 
        case SND_SOC_DAPM_POST_PMD:
                snd_soc_update_bits(codec, RT5616_ADC_DIG_VOL,
-                               RT5616_L_MUTE | RT5616_R_MUTE,
-                               RT5616_L_MUTE | RT5616_R_MUTE);
+                                   RT5616_L_MUTE | RT5616_R_MUTE,
+                                   RT5616_L_MUTE | RT5616_R_MUTE);
                break;
 
        default:
@@ -486,7 +487,7 @@ static int rt5616_adc_event(struct snd_soc_dapm_widget *w,
 }
 
 static int rt5616_charge_pump_event(struct snd_soc_dapm_widget *w,
-                          struct snd_kcontrol *kcontrol, int event)
+                                   struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 
@@ -494,54 +495,55 @@ static int rt5616_charge_pump_event(struct snd_soc_dapm_widget *w,
        case SND_SOC_DAPM_POST_PMU:
                /* depop parameters */
                snd_soc_update_bits(codec, RT5616_DEPOP_M2,
-                       RT5616_DEPOP_MASK, RT5616_DEPOP_MAN);
+                                   RT5616_DEPOP_MASK, RT5616_DEPOP_MAN);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_HP_CP_MASK | RT5616_HP_SG_MASK |
-                       RT5616_HP_CB_MASK, RT5616_HP_CP_PU |
-                       RT5616_HP_SG_DIS | RT5616_HP_CB_PU);
+                                   RT5616_HP_CP_MASK | RT5616_HP_SG_MASK |
+                                   RT5616_HP_CB_MASK, RT5616_HP_CP_PU |
+                                   RT5616_HP_SG_DIS | RT5616_HP_CB_PU);
                snd_soc_write(codec, RT5616_PR_BASE +
-                       RT5616_HP_DCC_INT1, 0x9f00);
+                             RT5616_HP_DCC_INT1, 0x9f00);
                /* headphone amp power on */
                snd_soc_update_bits(codec, RT5616_PWR_ANLG1,
-                       RT5616_PWR_FV1 | RT5616_PWR_FV2, 0);
+                                   RT5616_PWR_FV1 | RT5616_PWR_FV2, 0);
                snd_soc_update_bits(codec, RT5616_PWR_VOL,
-                       RT5616_PWR_HV_L | RT5616_PWR_HV_R,
-                       RT5616_PWR_HV_L | RT5616_PWR_HV_R);
+                                   RT5616_PWR_HV_L | RT5616_PWR_HV_R,
+                                   RT5616_PWR_HV_L | RT5616_PWR_HV_R);
                snd_soc_update_bits(codec, RT5616_PWR_ANLG1,
-                       RT5616_PWR_HP_L | RT5616_PWR_HP_R |
-                       RT5616_PWR_HA, RT5616_PWR_HP_L |
-                       RT5616_PWR_HP_R | RT5616_PWR_HA);
+                                   RT5616_PWR_HP_L | RT5616_PWR_HP_R |
+                                   RT5616_PWR_HA, RT5616_PWR_HP_L |
+                                   RT5616_PWR_HP_R | RT5616_PWR_HA);
                msleep(50);
                snd_soc_update_bits(codec, RT5616_PWR_ANLG1,
-                       RT5616_PWR_FV1 | RT5616_PWR_FV2,
-                       RT5616_PWR_FV1 | RT5616_PWR_FV2);
+                                   RT5616_PWR_FV1 | RT5616_PWR_FV2,
+                                   RT5616_PWR_FV1 | RT5616_PWR_FV2);
 
                snd_soc_update_bits(codec, RT5616_CHARGE_PUMP,
-                       RT5616_PM_HP_MASK, RT5616_PM_HP_HV);
+                                   RT5616_PM_HP_MASK, RT5616_PM_HP_HV);
                snd_soc_update_bits(codec, RT5616_PR_BASE +
-                       RT5616_CHOP_DAC_ADC, 0x0200, 0x0200);
+                                   RT5616_CHOP_DAC_ADC, 0x0200, 0x0200);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_HP_CO_MASK | RT5616_HP_SG_MASK,
-                       RT5616_HP_CO_EN | RT5616_HP_SG_EN);
+                                   RT5616_HP_CO_MASK | RT5616_HP_SG_MASK,
+                                   RT5616_HP_CO_EN | RT5616_HP_SG_EN);
                break;
        case SND_SOC_DAPM_PRE_PMD:
                snd_soc_update_bits(codec, RT5616_PR_BASE +
-                       RT5616_CHOP_DAC_ADC, 0x0200, 0x0);
+                                   RT5616_CHOP_DAC_ADC, 0x0200, 0x0);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_HP_SG_MASK | RT5616_HP_L_SMT_MASK |
-                       RT5616_HP_R_SMT_MASK, RT5616_HP_SG_DIS |
-                       RT5616_HP_L_SMT_DIS | RT5616_HP_R_SMT_DIS);
+                                   RT5616_HP_SG_MASK | RT5616_HP_L_SMT_MASK |
+                                   RT5616_HP_R_SMT_MASK, RT5616_HP_SG_DIS |
+                                   RT5616_HP_L_SMT_DIS | RT5616_HP_R_SMT_DIS);
                /* headphone amp power down */
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_SMT_TRIG_MASK | RT5616_HP_CD_PD_MASK |
-                       RT5616_HP_CO_MASK | RT5616_HP_CP_MASK |
-                       RT5616_HP_SG_MASK | RT5616_HP_CB_MASK,
-                       RT5616_SMT_TRIG_DIS | RT5616_HP_CD_PD_EN |
-                       RT5616_HP_CO_DIS | RT5616_HP_CP_PD |
-                       RT5616_HP_SG_EN | RT5616_HP_CB_PD);
+                                   RT5616_SMT_TRIG_MASK |
+                                   RT5616_HP_CD_PD_MASK | RT5616_HP_CO_MASK |
+                                   RT5616_HP_CP_MASK | RT5616_HP_SG_MASK |
+                                   RT5616_HP_CB_MASK,
+                                   RT5616_SMT_TRIG_DIS | RT5616_HP_CD_PD_EN |
+                                   RT5616_HP_CO_DIS | RT5616_HP_CP_PD |
+                                   RT5616_HP_SG_EN | RT5616_HP_CB_PD);
                snd_soc_update_bits(codec, RT5616_PWR_ANLG1,
-                       RT5616_PWR_HP_L | RT5616_PWR_HP_R |
-                       RT5616_PWR_HA, 0);
+                                   RT5616_PWR_HP_L | RT5616_PWR_HP_R |
+                                   RT5616_PWR_HA, 0);
                break;
        default:
                return 0;
@@ -551,7 +553,7 @@ static int rt5616_charge_pump_event(struct snd_soc_dapm_widget *w,
 }
 
 static int rt5616_hp_event(struct snd_soc_dapm_widget *w,
-       struct snd_kcontrol *kcontrol, int event)
+                          struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 
@@ -559,57 +561,57 @@ static int rt5616_hp_event(struct snd_soc_dapm_widget *w,
        case SND_SOC_DAPM_POST_PMU:
                /* headphone unmute sequence */
                snd_soc_update_bits(codec, RT5616_DEPOP_M3,
-                       RT5616_CP_FQ1_MASK | RT5616_CP_FQ2_MASK |
-                       RT5616_CP_FQ3_MASK,
-                       (RT5616_CP_FQ_192_KHZ << RT5616_CP_FQ1_SFT) |
-                       (RT5616_CP_FQ_12_KHZ << RT5616_CP_FQ2_SFT) |
-                       (RT5616_CP_FQ_192_KHZ << RT5616_CP_FQ3_SFT));
+                                   RT5616_CP_FQ1_MASK | RT5616_CP_FQ2_MASK |
+                                   RT5616_CP_FQ3_MASK,
+                                   RT5616_CP_FQ_192_KHZ << RT5616_CP_FQ1_SFT |
+                                   RT5616_CP_FQ_12_KHZ << RT5616_CP_FQ2_SFT |
+                                   RT5616_CP_FQ_192_KHZ << RT5616_CP_FQ3_SFT);
                snd_soc_write(codec, RT5616_PR_BASE +
-                       RT5616_MAMP_INT_REG2, 0xfc00);
+                             RT5616_MAMP_INT_REG2, 0xfc00);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_SMT_TRIG_MASK, RT5616_SMT_TRIG_EN);
+                                   RT5616_SMT_TRIG_MASK, RT5616_SMT_TRIG_EN);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_RSTN_MASK, RT5616_RSTN_EN);
+                                   RT5616_RSTN_MASK, RT5616_RSTN_EN);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_RSTN_MASK | RT5616_HP_L_SMT_MASK |
-                       RT5616_HP_R_SMT_MASK, RT5616_RSTN_DIS |
-                       RT5616_HP_L_SMT_EN | RT5616_HP_R_SMT_EN);
+                                   RT5616_RSTN_MASK | RT5616_HP_L_SMT_MASK |
+                                   RT5616_HP_R_SMT_MASK, RT5616_RSTN_DIS |
+                                   RT5616_HP_L_SMT_EN | RT5616_HP_R_SMT_EN);
                snd_soc_update_bits(codec, RT5616_HP_VOL,
-                       RT5616_L_MUTE | RT5616_R_MUTE, 0);
+                                   RT5616_L_MUTE | RT5616_R_MUTE, 0);
                msleep(100);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_HP_SG_MASK | RT5616_HP_L_SMT_MASK |
-                       RT5616_HP_R_SMT_MASK, RT5616_HP_SG_DIS |
-                       RT5616_HP_L_SMT_DIS | RT5616_HP_R_SMT_DIS);
+                                   RT5616_HP_SG_MASK | RT5616_HP_L_SMT_MASK |
+                                   RT5616_HP_R_SMT_MASK, RT5616_HP_SG_DIS |
+                                   RT5616_HP_L_SMT_DIS | RT5616_HP_R_SMT_DIS);
                msleep(20);
                snd_soc_update_bits(codec, RT5616_HP_CALIB_AMP_DET,
-                       RT5616_HPD_PS_MASK, RT5616_HPD_PS_EN);
+                                   RT5616_HPD_PS_MASK, RT5616_HPD_PS_EN);
                break;
 
        case SND_SOC_DAPM_PRE_PMD:
                /* headphone mute sequence */
                snd_soc_update_bits(codec, RT5616_DEPOP_M3,
-                       RT5616_CP_FQ1_MASK | RT5616_CP_FQ2_MASK |
-                       RT5616_CP_FQ3_MASK,
-                       (RT5616_CP_FQ_96_KHZ << RT5616_CP_FQ1_SFT) |
-                       (RT5616_CP_FQ_12_KHZ << RT5616_CP_FQ2_SFT) |
-                       (RT5616_CP_FQ_96_KHZ << RT5616_CP_FQ3_SFT));
+                                   RT5616_CP_FQ1_MASK | RT5616_CP_FQ2_MASK |
+                                   RT5616_CP_FQ3_MASK,
+                                   RT5616_CP_FQ_96_KHZ << RT5616_CP_FQ1_SFT |
+                                   RT5616_CP_FQ_12_KHZ << RT5616_CP_FQ2_SFT |
+                                   RT5616_CP_FQ_96_KHZ << RT5616_CP_FQ3_SFT);
                snd_soc_write(codec, RT5616_PR_BASE +
-                       RT5616_MAMP_INT_REG2, 0xfc00);
+                             RT5616_MAMP_INT_REG2, 0xfc00);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_HP_SG_MASK, RT5616_HP_SG_EN);
+                                   RT5616_HP_SG_MASK, RT5616_HP_SG_EN);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_RSTP_MASK, RT5616_RSTP_EN);
+                                   RT5616_RSTP_MASK, RT5616_RSTP_EN);
                snd_soc_update_bits(codec, RT5616_DEPOP_M1,
-                       RT5616_RSTP_MASK | RT5616_HP_L_SMT_MASK |
-                       RT5616_HP_R_SMT_MASK, RT5616_RSTP_DIS |
-                       RT5616_HP_L_SMT_EN | RT5616_HP_R_SMT_EN);
+                                   RT5616_RSTP_MASK | RT5616_HP_L_SMT_MASK |
+                                   RT5616_HP_R_SMT_MASK, RT5616_RSTP_DIS |
+                                   RT5616_HP_L_SMT_EN | RT5616_HP_R_SMT_EN);
                snd_soc_update_bits(codec, RT5616_HP_CALIB_AMP_DET,
-                       RT5616_HPD_PS_MASK, RT5616_HPD_PS_DIS);
+                                   RT5616_HPD_PS_MASK, RT5616_HPD_PS_DIS);
                msleep(90);
                snd_soc_update_bits(codec, RT5616_HP_VOL,
-                       RT5616_L_MUTE | RT5616_R_MUTE,
-                       RT5616_L_MUTE | RT5616_R_MUTE);
+                                   RT5616_L_MUTE | RT5616_R_MUTE,
+                                   RT5616_L_MUTE | RT5616_R_MUTE);
                msleep(30);
                break;
 
@@ -621,24 +623,24 @@ static int rt5616_hp_event(struct snd_soc_dapm_widget *w,
 }
 
 static int rt5616_lout_event(struct snd_soc_dapm_widget *w,
-       struct snd_kcontrol *kcontrol, int event)
+                            struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
                snd_soc_update_bits(codec, RT5616_PWR_ANLG1,
-                       RT5616_PWR_LM, RT5616_PWR_LM);
+                                   RT5616_PWR_LM, RT5616_PWR_LM);
                snd_soc_update_bits(codec, RT5616_LOUT_CTRL1,
-                       RT5616_L_MUTE | RT5616_R_MUTE, 0);
+                                   RT5616_L_MUTE | RT5616_R_MUTE, 0);
                break;
 
        case SND_SOC_DAPM_PRE_PMD:
                snd_soc_update_bits(codec, RT5616_LOUT_CTRL1,
-                       RT5616_L_MUTE | RT5616_R_MUTE,
-                       RT5616_L_MUTE | RT5616_R_MUTE);
+                                   RT5616_L_MUTE | RT5616_R_MUTE,
+                                   RT5616_L_MUTE | RT5616_R_MUTE);
                snd_soc_update_bits(codec, RT5616_PWR_ANLG1,
-                       RT5616_PWR_LM, 0);
+                                   RT5616_PWR_LM, 0);
                break;
 
        default:
@@ -649,19 +651,19 @@ static int rt5616_lout_event(struct snd_soc_dapm_widget *w,
 }
 
 static int rt5616_bst1_event(struct snd_soc_dapm_widget *w,
-       struct snd_kcontrol *kcontrol, int event)
+                            struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
                snd_soc_update_bits(codec, RT5616_PWR_ANLG2,
-                       RT5616_PWR_BST1_OP2, RT5616_PWR_BST1_OP2);
+                                   RT5616_PWR_BST1_OP2, RT5616_PWR_BST1_OP2);
                break;
 
        case SND_SOC_DAPM_PRE_PMD:
                snd_soc_update_bits(codec, RT5616_PWR_ANLG2,
-                       RT5616_PWR_BST1_OP2, 0);
+                                   RT5616_PWR_BST1_OP2, 0);
                break;
 
        default:
@@ -672,19 +674,19 @@ static int rt5616_bst1_event(struct snd_soc_dapm_widget *w,
 }
 
 static int rt5616_bst2_event(struct snd_soc_dapm_widget *w,
-       struct snd_kcontrol *kcontrol, int event)
+                            struct snd_kcontrol *kcontrol, int event)
 {
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
 
        switch (event) {
        case SND_SOC_DAPM_POST_PMU:
                snd_soc_update_bits(codec, RT5616_PWR_ANLG2,
-                       RT5616_PWR_BST2_OP2, RT5616_PWR_BST2_OP2);
+                                   RT5616_PWR_BST2_OP2, RT5616_PWR_BST2_OP2);
                break;
 
        case SND_SOC_DAPM_PRE_PMD:
                snd_soc_update_bits(codec, RT5616_PWR_ANLG2,
-                       RT5616_PWR_BST2_OP2, 0);
+                                   RT5616_PWR_BST2_OP2, 0);
                break;
 
        default:
@@ -696,13 +698,13 @@ static int rt5616_bst2_event(struct snd_soc_dapm_widget *w,
 
 static const struct snd_soc_dapm_widget rt5616_dapm_widgets[] = {
        SND_SOC_DAPM_SUPPLY("PLL1", RT5616_PWR_ANLG2,
-                       RT5616_PWR_PLL_BIT, 0, NULL, 0),
+                           RT5616_PWR_PLL_BIT, 0, NULL, 0),
        /* Input Side */
        /* micbias */
        SND_SOC_DAPM_SUPPLY("LDO", RT5616_PWR_ANLG1,
-                       RT5616_PWR_LDO_BIT, 0, NULL, 0),
+                           RT5616_PWR_LDO_BIT, 0, NULL, 0),
        SND_SOC_DAPM_SUPPLY("micbias1", RT5616_PWR_ANLG2,
-                       RT5616_PWR_MB1_BIT, 0, NULL, 0),
+                           RT5616_PWR_MB1_BIT, 0, NULL, 0),
 
        /* Input Lines */
        SND_SOC_DAPM_INPUT("MIC1"),
@@ -714,45 +716,47 @@ static const struct snd_soc_dapm_widget rt5616_dapm_widgets[] = {
 
        /* Boost */
        SND_SOC_DAPM_PGA_E("BST1", RT5616_PWR_ANLG2,
-               RT5616_PWR_BST1_BIT, 0, NULL, 0, rt5616_bst1_event,
-               SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+                          RT5616_PWR_BST1_BIT, 0, NULL, 0, rt5616_bst1_event,
+                          SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_PGA_E("BST2", RT5616_PWR_ANLG2,
-               RT5616_PWR_BST2_BIT, 0, NULL, 0, rt5616_bst2_event,
-               SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+                          RT5616_PWR_BST2_BIT, 0, NULL, 0, rt5616_bst2_event,
+                          SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
        /* Input Volume */
        SND_SOC_DAPM_PGA("INL1 VOL", RT5616_PWR_VOL,
-               RT5616_PWR_IN1_L_BIT, 0, NULL, 0),
+                        RT5616_PWR_IN1_L_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("INR1 VOL", RT5616_PWR_VOL,
-               RT5616_PWR_IN1_R_BIT, 0, NULL, 0),
+                        RT5616_PWR_IN1_R_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("INL2 VOL", RT5616_PWR_VOL,
-               RT5616_PWR_IN2_L_BIT, 0, NULL, 0),
+                        RT5616_PWR_IN2_L_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("INR2 VOL", RT5616_PWR_VOL,
-               RT5616_PWR_IN2_R_BIT, 0, NULL, 0),
+                        RT5616_PWR_IN2_R_BIT, 0, NULL, 0),
 
        /* REC Mixer */
        SND_SOC_DAPM_MIXER("RECMIXL", RT5616_PWR_MIXER, RT5616_PWR_RM_L_BIT, 0,
-                       rt5616_rec_l_mix, ARRAY_SIZE(rt5616_rec_l_mix)),
+                          rt5616_rec_l_mix, ARRAY_SIZE(rt5616_rec_l_mix)),
        SND_SOC_DAPM_MIXER("RECMIXR", RT5616_PWR_MIXER, RT5616_PWR_RM_R_BIT, 0,
-                       rt5616_rec_r_mix, ARRAY_SIZE(rt5616_rec_r_mix)),
+                          rt5616_rec_r_mix, ARRAY_SIZE(rt5616_rec_r_mix)),
        /* ADCs */
        SND_SOC_DAPM_ADC_E("ADC L", NULL, RT5616_PWR_DIG1,
-               RT5616_PWR_ADC_L_BIT, 0, rt5616_adc_event,
-               SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
+                          RT5616_PWR_ADC_L_BIT, 0, rt5616_adc_event,
+                          SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_ADC_E("ADC R", NULL, RT5616_PWR_DIG1,
-               RT5616_PWR_ADC_R_BIT, 0, rt5616_adc_event,
-               SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
+                          RT5616_PWR_ADC_R_BIT, 0, rt5616_adc_event,
+                          SND_SOC_DAPM_POST_PMD | SND_SOC_DAPM_POST_PMU),
 
        /* ADC Mixer */
        SND_SOC_DAPM_SUPPLY("stereo1 filter", RT5616_PWR_DIG2,
-               RT5616_PWR_ADC_STO1_F_BIT, 0, NULL, 0),
+                           RT5616_PWR_ADC_STO1_F_BIT, 0, NULL, 0),
        SND_SOC_DAPM_MIXER("Stereo1 ADC MIXL", SND_SOC_NOPM, 0, 0,
-               rt5616_sto1_adc_l_mix, ARRAY_SIZE(rt5616_sto1_adc_l_mix)),
+                          rt5616_sto1_adc_l_mix,
+                          ARRAY_SIZE(rt5616_sto1_adc_l_mix)),
        SND_SOC_DAPM_MIXER("Stereo1 ADC MIXR", SND_SOC_NOPM, 0, 0,
-               rt5616_sto1_adc_r_mix, ARRAY_SIZE(rt5616_sto1_adc_r_mix)),
+                          rt5616_sto1_adc_r_mix,
+                          ARRAY_SIZE(rt5616_sto1_adc_r_mix)),
 
        /* Digital Interface */
        SND_SOC_DAPM_SUPPLY("I2S1", RT5616_PWR_DIG1,
-               RT5616_PWR_I2S1_BIT, 0, NULL, 0),
+                           RT5616_PWR_I2S1_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("IF1 DAC", SND_SOC_NOPM, 0, 0, NULL, 0),
        SND_SOC_DAPM_PGA("IF1 DAC1 L", SND_SOC_NOPM, 0, 0, NULL, 0),
        SND_SOC_DAPM_PGA("IF1 DAC1 R", SND_SOC_NOPM, 0, 0, NULL, 0),
@@ -770,68 +774,70 @@ static const struct snd_soc_dapm_widget rt5616_dapm_widgets[] = {
        /* Output Side */
        /* DAC mixer before sound effect  */
        SND_SOC_DAPM_MIXER("DAC MIXL", SND_SOC_NOPM, 0, 0,
-               rt5616_dac_l_mix, ARRAY_SIZE(rt5616_dac_l_mix)),
+                          rt5616_dac_l_mix, ARRAY_SIZE(rt5616_dac_l_mix)),
        SND_SOC_DAPM_MIXER("DAC MIXR", SND_SOC_NOPM, 0, 0,
-               rt5616_dac_r_mix, ARRAY_SIZE(rt5616_dac_r_mix)),
+                          rt5616_dac_r_mix, ARRAY_SIZE(rt5616_dac_r_mix)),
 
        SND_SOC_DAPM_SUPPLY("Stero1 DAC Power", RT5616_PWR_DIG2,
-                       RT5616_PWR_DAC_STO1_F_BIT, 0, NULL, 0),
+                           RT5616_PWR_DAC_STO1_F_BIT, 0, NULL, 0),
 
        /* DAC Mixer */
        SND_SOC_DAPM_MIXER("Stereo DAC MIXL", SND_SOC_NOPM, 0, 0,
-               rt5616_sto_dac_l_mix, ARRAY_SIZE(rt5616_sto_dac_l_mix)),
+                          rt5616_sto_dac_l_mix,
+                          ARRAY_SIZE(rt5616_sto_dac_l_mix)),
        SND_SOC_DAPM_MIXER("Stereo DAC MIXR", SND_SOC_NOPM, 0, 0,
-               rt5616_sto_dac_r_mix, ARRAY_SIZE(rt5616_sto_dac_r_mix)),
+                          rt5616_sto_dac_r_mix,
+                          ARRAY_SIZE(rt5616_sto_dac_r_mix)),
 
        /* DACs */
        SND_SOC_DAPM_DAC("DAC L1", NULL, RT5616_PWR_DIG1,
-                       RT5616_PWR_DAC_L1_BIT, 0),
+                        RT5616_PWR_DAC_L1_BIT, 0),
        SND_SOC_DAPM_DAC("DAC R1", NULL, RT5616_PWR_DIG1,
-                       RT5616_PWR_DAC_R1_BIT, 0),
+                        RT5616_PWR_DAC_R1_BIT, 0),
        /* OUT Mixer */
        SND_SOC_DAPM_MIXER("OUT MIXL", RT5616_PWR_MIXER, RT5616_PWR_OM_L_BIT,
-               0, rt5616_out_l_mix, ARRAY_SIZE(rt5616_out_l_mix)),
+                          0, rt5616_out_l_mix, ARRAY_SIZE(rt5616_out_l_mix)),
        SND_SOC_DAPM_MIXER("OUT MIXR", RT5616_PWR_MIXER, RT5616_PWR_OM_R_BIT,
-               0, rt5616_out_r_mix, ARRAY_SIZE(rt5616_out_r_mix)),
+                          0, rt5616_out_r_mix, ARRAY_SIZE(rt5616_out_r_mix)),
        /* Output Volume */
        SND_SOC_DAPM_PGA("OUTVOL L", RT5616_PWR_VOL,
-               RT5616_PWR_OV_L_BIT, 0, NULL, 0),
+                        RT5616_PWR_OV_L_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("OUTVOL R", RT5616_PWR_VOL,
-               RT5616_PWR_OV_R_BIT, 0, NULL, 0),
+                        RT5616_PWR_OV_R_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("HPOVOL L", RT5616_PWR_VOL,
-               RT5616_PWR_HV_L_BIT, 0, NULL, 0),
+                        RT5616_PWR_HV_L_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("HPOVOL R", RT5616_PWR_VOL,
-               RT5616_PWR_HV_R_BIT, 0, NULL, 0),
+                        RT5616_PWR_HV_R_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("DAC 1", SND_SOC_NOPM,
-               0, 0, NULL, 0),
+                        0, 0, NULL, 0),
        SND_SOC_DAPM_PGA("DAC 2", SND_SOC_NOPM,
-               0, 0, NULL, 0),
+                        0, 0, NULL, 0),
        SND_SOC_DAPM_PGA("HPOVOL", SND_SOC_NOPM,
-               0, 0, NULL, 0),
+                        0, 0, NULL, 0),
        SND_SOC_DAPM_PGA("INL1", RT5616_PWR_VOL,
-               RT5616_PWR_IN1_L_BIT, 0, NULL, 0),
+                        RT5616_PWR_IN1_L_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("INR1", RT5616_PWR_VOL,
-               RT5616_PWR_IN1_R_BIT, 0, NULL, 0),
+                        RT5616_PWR_IN1_R_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("INL2", RT5616_PWR_VOL,
-               RT5616_PWR_IN2_L_BIT, 0, NULL, 0),
+                        RT5616_PWR_IN2_L_BIT, 0, NULL, 0),
        SND_SOC_DAPM_PGA("INR2", RT5616_PWR_VOL,
-               RT5616_PWR_IN2_R_BIT, 0, NULL, 0),
+                        RT5616_PWR_IN2_R_BIT, 0, NULL, 0),
        /* HPO/LOUT/Mono Mixer */
        SND_SOC_DAPM_MIXER("HPO MIX", SND_SOC_NOPM, 0, 0,
-               rt5616_hpo_mix, ARRAY_SIZE(rt5616_hpo_mix)),
+                          rt5616_hpo_mix, ARRAY_SIZE(rt5616_hpo_mix)),
        SND_SOC_DAPM_MIXER("LOUT MIX", SND_SOC_NOPM, 0, 0,
-               rt5616_lout_mix, ARRAY_SIZE(rt5616_lout_mix)),
+                          rt5616_lout_mix, ARRAY_SIZE(rt5616_lout_mix)),
 
        SND_SOC_DAPM_PGA_S("HP amp", 1, SND_SOC_NOPM, 0, 0,
-               rt5616_hp_event, SND_SOC_DAPM_PRE_PMD |
-               SND_SOC_DAPM_POST_PMU),
+                          rt5616_hp_event, SND_SOC_DAPM_PRE_PMD |
+                          SND_SOC_DAPM_POST_PMU),
        SND_SOC_DAPM_PGA_S("LOUT amp", 1, SND_SOC_NOPM, 0, 0,
-               rt5616_lout_event, SND_SOC_DAPM_PRE_PMD |
-               SND_SOC_DAPM_POST_PMU),
+                          rt5616_lout_event, SND_SOC_DAPM_PRE_PMD |
+                          SND_SOC_DAPM_POST_PMU),
 
        SND_SOC_DAPM_SUPPLY_S("Charge Pump", 1, SND_SOC_NOPM, 0, 0,
-               rt5616_charge_pump_event, SND_SOC_DAPM_POST_PMU |
-               SND_SOC_DAPM_PRE_PMD),
+                             rt5616_charge_pump_event, SND_SOC_DAPM_POST_PMU |
+                             SND_SOC_DAPM_PRE_PMD),
 
        /* Output Lines */
        SND_SOC_DAPM_OUTPUT("HPOL"),
@@ -950,7 +956,8 @@ static const struct snd_soc_dapm_route rt5616_dapm_routes[] = {
 };
 
 static int rt5616_hw_params(struct snd_pcm_substream *substream,
-       struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
+                           struct snd_pcm_hw_params *params,
+                           struct snd_soc_dai *dai)
 {
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct snd_soc_codec *codec = rtd->codec;
@@ -977,7 +984,7 @@ static int rt5616_hw_params(struct snd_pcm_substream *substream,
        dev_dbg(dai->dev, "bclk is %dHz and lrck is %dHz\n",
                rt5616->bclk[dai->id], rt5616->lrck[dai->id]);
        dev_dbg(dai->dev, "bclk_ms is %d and pre_div is %d for iis %d\n",
-                               bclk_ms, pre_div, dai->id);
+               bclk_ms, pre_div, dai->id);
 
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
@@ -998,10 +1005,9 @@ static int rt5616_hw_params(struct snd_pcm_substream *substream,
        mask_clk = RT5616_I2S_PD1_MASK;
        val_clk = pre_div << RT5616_I2S_PD1_SFT;
        snd_soc_update_bits(codec, RT5616_I2S1_SDP,
-               RT5616_I2S_DL_MASK, val_len);
+                           RT5616_I2S_DL_MASK, val_len);
        snd_soc_update_bits(codec, RT5616_ADDA_CLK1, mask_clk, val_clk);
 
-
        return 0;
 }
 
@@ -1050,15 +1056,14 @@ static int rt5616_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        }
 
        snd_soc_update_bits(codec, RT5616_I2S1_SDP,
-               RT5616_I2S_MS_MASK | RT5616_I2S_BP_MASK |
-               RT5616_I2S_DF_MASK, reg_val);
-
+                           RT5616_I2S_MS_MASK | RT5616_I2S_BP_MASK |
+                           RT5616_I2S_DF_MASK, reg_val);
 
        return 0;
 }
 
 static int rt5616_set_dai_sysclk(struct snd_soc_dai *dai,
-               int clk_id, unsigned int freq, int dir)
+                                int clk_id, unsigned int freq, int dir)
 {
        struct snd_soc_codec *codec = dai->codec;
        struct rt5616_priv *rt5616 = snd_soc_codec_get_drvdata(codec);
@@ -1078,8 +1083,9 @@ static int rt5616_set_dai_sysclk(struct snd_soc_dai *dai,
                dev_err(codec->dev, "Invalid clock id (%d)\n", clk_id);
                return -EINVAL;
        }
+
        snd_soc_update_bits(codec, RT5616_GLB_CLK,
-               RT5616_SCLK_SRC_MASK, reg_val);
+                           RT5616_SCLK_SRC_MASK, reg_val);
        rt5616->sysclk = freq;
        rt5616->sysclk_src = clk_id;
 
@@ -1089,7 +1095,7 @@ static int rt5616_set_dai_sysclk(struct snd_soc_dai *dai,
 }
 
 static int rt5616_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
-                       unsigned int freq_in, unsigned int freq_out)
+                             unsigned int freq_in, unsigned int freq_out)
 {
        struct snd_soc_codec *codec = dai->codec;
        struct rt5616_priv *rt5616 = snd_soc_codec_get_drvdata(codec);
@@ -1106,19 +1112,22 @@ static int rt5616_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
                rt5616->pll_in = 0;
                rt5616->pll_out = 0;
                snd_soc_update_bits(codec, RT5616_GLB_CLK,
-                       RT5616_SCLK_SRC_MASK, RT5616_SCLK_SRC_MCLK);
+                                   RT5616_SCLK_SRC_MASK,
+                                   RT5616_SCLK_SRC_MCLK);
                return 0;
        }
 
        switch (source) {
        case RT5616_PLL1_S_MCLK:
                snd_soc_update_bits(codec, RT5616_GLB_CLK,
-                       RT5616_PLL1_SRC_MASK, RT5616_PLL1_SRC_MCLK);
+                                   RT5616_PLL1_SRC_MASK,
+                                   RT5616_PLL1_SRC_MCLK);
                break;
        case RT5616_PLL1_S_BCLK1:
        case RT5616_PLL1_S_BCLK2:
                snd_soc_update_bits(codec, RT5616_GLB_CLK,
-                       RT5616_PLL1_SRC_MASK, RT5616_PLL1_SRC_BCLK1);
+                                   RT5616_PLL1_SRC_MASK,
+                                   RT5616_PLL1_SRC_BCLK1);
                break;
        default:
                dev_err(codec->dev, "Unknown PLL source %d\n", source);
@@ -1136,10 +1145,11 @@ static int rt5616_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
                pll_code.n_code, pll_code.k_code);
 
        snd_soc_write(codec, RT5616_PLL_CTRL1,
-               pll_code.n_code << RT5616_PLL_N_SFT | pll_code.k_code);
+                     pll_code.n_code << RT5616_PLL_N_SFT | pll_code.k_code);
        snd_soc_write(codec, RT5616_PLL_CTRL2,
-               (pll_code.m_bp ? 0 : pll_code.m_code) << RT5616_PLL_M_SFT |
-               pll_code.m_bp << RT5616_PLL_M_BP_SFT);
+                     (pll_code.m_bp ? 0 : pll_code.m_code) <<
+                     RT5616_PLL_M_SFT |
+                     pll_code.m_bp << RT5616_PLL_M_BP_SFT);
 
        rt5616->pll_in = freq_in;
        rt5616->pll_out = freq_out;
@@ -1149,22 +1159,50 @@ static int rt5616_set_dai_pll(struct snd_soc_dai *dai, int pll_id, int source,
 }
 
 static int rt5616_set_bias_level(struct snd_soc_codec *codec,
-                       enum snd_soc_bias_level level)
+                                enum snd_soc_bias_level level)
 {
+       struct rt5616_priv *rt5616 = snd_soc_codec_get_drvdata(codec);
+       int ret;
+
        switch (level) {
+
+       case SND_SOC_BIAS_ON:
+               break;
+
+       case SND_SOC_BIAS_PREPARE:
+               /*
+                * SND_SOC_BIAS_PREPARE is called while preparing for a
+                * transition to ON or away from ON. If current bias_level
+                * is SND_SOC_BIAS_ON, then it is preparing for a transition
+                * away from ON. Disable the clock in that case, otherwise
+                * enable it.
+                */
+               if (IS_ERR(rt5616->mclk))
+                       break;
+
+               if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_ON) {
+                       clk_disable_unprepare(rt5616->mclk);
+               } else {
+                       ret = clk_prepare_enable(rt5616->mclk);
+                       if (ret)
+                               return ret;
+               }
+               break;
+
        case SND_SOC_BIAS_STANDBY:
                if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
                        snd_soc_update_bits(codec, RT5616_PWR_ANLG1,
-                               RT5616_PWR_VREF1 | RT5616_PWR_MB |
-                               RT5616_PWR_BG | RT5616_PWR_VREF2,
-                               RT5616_PWR_VREF1 | RT5616_PWR_MB |
-                               RT5616_PWR_BG | RT5616_PWR_VREF2);
+                                           RT5616_PWR_VREF1 | RT5616_PWR_MB |
+                                           RT5616_PWR_BG | RT5616_PWR_VREF2,
+                                           RT5616_PWR_VREF1 | RT5616_PWR_MB |
+                                           RT5616_PWR_BG | RT5616_PWR_VREF2);
                        mdelay(10);
                        snd_soc_update_bits(codec, RT5616_PWR_ANLG1,
-                               RT5616_PWR_FV1 | RT5616_PWR_FV2,
-                               RT5616_PWR_FV1 | RT5616_PWR_FV2);
+                                           RT5616_PWR_FV1 | RT5616_PWR_FV2,
+                                           RT5616_PWR_FV1 | RT5616_PWR_FV2);
                        snd_soc_update_bits(codec, RT5616_D_MISC,
-                               RT5616_D_GATE_EN, RT5616_D_GATE_EN);
+                                           RT5616_D_GATE_EN,
+                                           RT5616_D_GATE_EN);
                }
                break;
 
@@ -1189,6 +1227,11 @@ static int rt5616_probe(struct snd_soc_codec *codec)
 {
        struct rt5616_priv *rt5616 = snd_soc_codec_get_drvdata(codec);
 
+       /* Check if MCLK provided */
+       rt5616->mclk = devm_clk_get(codec->dev, "mclk");
+       if (PTR_ERR(rt5616->mclk) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+
        rt5616->codec = codec;
 
        return 0;
@@ -1218,11 +1261,10 @@ static int rt5616_resume(struct snd_soc_codec *codec)
 #define rt5616_resume NULL
 #endif
 
-#define RT5616_STEREO_RATES SNDRV_PCM_RATE_8000_96000
+#define RT5616_STEREO_RATES SNDRV_PCM_RATE_8000_192000
 #define RT5616_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
                        SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S8)
 
-
 struct snd_soc_dai_ops rt5616_aif_dai_ops = {
        .hw_params = rt5616_hw_params,
        .set_fmt = rt5616_set_dai_fmt,
@@ -1296,15 +1338,15 @@ MODULE_DEVICE_TABLE(of, rt5616_of_match);
 #endif
 
 static int rt5616_i2c_probe(struct i2c_client *i2c,
-                   const struct i2c_device_id *id)
+                           const struct i2c_device_id *id)
 {
        struct rt5616_priv *rt5616;
        unsigned int val;
        int ret;
 
        rt5616 = devm_kzalloc(&i2c->dev, sizeof(struct rt5616_priv),
-                               GFP_KERNEL);
-       if (rt5616 == NULL)
+                             GFP_KERNEL);
+       if (!rt5616)
                return -ENOMEM;
 
        i2c_set_clientdata(i2c, rt5616);
@@ -1326,14 +1368,14 @@ static int rt5616_i2c_probe(struct i2c_client *i2c,
        }
        regmap_write(rt5616->regmap, RT5616_RESET, 0);
        regmap_update_bits(rt5616->regmap, RT5616_PWR_ANLG1,
-               RT5616_PWR_VREF1 | RT5616_PWR_MB |
-               RT5616_PWR_BG | RT5616_PWR_VREF2,
-               RT5616_PWR_VREF1 | RT5616_PWR_MB |
-               RT5616_PWR_BG | RT5616_PWR_VREF2);
+                          RT5616_PWR_VREF1 | RT5616_PWR_MB |
+                          RT5616_PWR_BG | RT5616_PWR_VREF2,
+                          RT5616_PWR_VREF1 | RT5616_PWR_MB |
+                          RT5616_PWR_BG | RT5616_PWR_VREF2);
        mdelay(10);
        regmap_update_bits(rt5616->regmap, RT5616_PWR_ANLG1,
-               RT5616_PWR_FV1 | RT5616_PWR_FV2,
-               RT5616_PWR_FV1 | RT5616_PWR_FV2);
+                          RT5616_PWR_FV1 | RT5616_PWR_FV2,
+                          RT5616_PWR_FV1 | RT5616_PWR_FV2);
 
        ret = regmap_register_patch(rt5616->regmap, init_list,
                                    ARRAY_SIZE(init_list));
@@ -1341,11 +1383,10 @@ static int rt5616_i2c_probe(struct i2c_client *i2c,
                dev_warn(&i2c->dev, "Failed to apply regmap patch: %d\n", ret);
 
        regmap_update_bits(rt5616->regmap, RT5616_PWR_ANLG1,
-               RT5616_PWR_LDO_DVO_MASK, RT5616_PWR_LDO_DVO_1_2V);
+                          RT5616_PWR_LDO_DVO_MASK, RT5616_PWR_LDO_DVO_1_2V);
 
        return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5616,
-                       rt5616_dai, ARRAY_SIZE(rt5616_dai));
-
+                                     rt5616_dai, ARRAY_SIZE(rt5616_dai));
 }
 
 static int rt5616_i2c_remove(struct i2c_client *i2c)
@@ -1361,7 +1402,6 @@ static void rt5616_i2c_shutdown(struct i2c_client *client)
 
        regmap_write(rt5616->regmap, RT5616_HP_VOL, 0xc8c8);
        regmap_write(rt5616->regmap, RT5616_LOUT_CTRL1, 0xc8c8);
-
 }
 
 static struct i2c_driver rt5616_i2c_driver = {
index c61d38b585fb06de6b6ca0f41370c3b78c10113c..93e8c9017633f97ed264cd9e54c4aaab606d54f3 100644 (file)
@@ -776,7 +776,7 @@ static const struct snd_kcontrol_new rt5645_snd_controls[] = {
 
        /* IN1/IN2 Control */
        SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
-               RT5645_BST_SFT1, 8, 0, bst_tlv),
+               RT5645_BST_SFT1, 12, 0, bst_tlv),
        SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
                RT5645_BST_SFT2, 8, 0, bst_tlv),
 
index 820d8fa62b5e5682fcdf52c823fa70f2f8b27631..1b30914c2d916ddd70db50b6e6203eeb0ef486e3 100644 (file)
@@ -3985,7 +3985,6 @@ static int rt5659_i2c_probe(struct i2c_client *i2c,
        if (rt5659 == NULL)
                return -ENOMEM;
 
-       rt5659->i2c = i2c;
        i2c_set_clientdata(i2c, rt5659);
 
        if (pdata)
@@ -4157,24 +4156,17 @@ static int rt5659_i2c_probe(struct i2c_client *i2c,
 
        INIT_DELAYED_WORK(&rt5659->jack_detect_work, rt5659_jack_detect_work);
 
-       if (rt5659->i2c->irq) {
-               ret = request_threaded_irq(rt5659->i2c->irq, NULL, rt5659_irq,
-                       IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+       if (i2c->irq) {
+               ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+                       rt5659_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
                        | IRQF_ONESHOT, "rt5659", rt5659);
                if (ret)
                        dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
 
        }
 
-       ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659,
+       return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659,
                        rt5659_dai, ARRAY_SIZE(rt5659_dai));
-
-       if (ret) {
-               if (rt5659->i2c->irq)
-                       free_irq(rt5659->i2c->irq, rt5659);
-       }
-
-       return 0;
 }
 
 static int rt5659_i2c_remove(struct i2c_client *i2c)
@@ -4184,31 +4176,36 @@ static int rt5659_i2c_remove(struct i2c_client *i2c)
        return 0;
 }
 
-void rt5659_i2c_shutdown(struct i2c_client *client)
+static void rt5659_i2c_shutdown(struct i2c_client *client)
 {
        struct rt5659_priv *rt5659 = i2c_get_clientdata(client);
 
        regmap_write(rt5659->regmap, RT5659_RESET, 0);
 }
 
+#ifdef CONFIG_OF
 static const struct of_device_id rt5659_of_match[] = {
        { .compatible = "realtek,rt5658", },
        { .compatible = "realtek,rt5659", },
-       {},
+       { },
 };
+MODULE_DEVICE_TABLE(of, rt5659_of_match);
+#endif
 
+#ifdef CONFIG_ACPI
 static struct acpi_device_id rt5659_acpi_match[] = {
-               { "10EC5658", 0},
-               { "10EC5659", 0},
-               { },
+       { "10EC5658", 0, },
+       { "10EC5659", 0, },
+       { },
 };
 MODULE_DEVICE_TABLE(acpi, rt5659_acpi_match);
+#endif
 
 struct i2c_driver rt5659_i2c_driver = {
        .driver = {
                .name = "rt5659",
                .owner = THIS_MODULE,
-               .of_match_table = rt5659_of_match,
+               .of_match_table = of_match_ptr(rt5659_of_match),
                .acpi_match_table = ACPI_PTR(rt5659_acpi_match),
        },
        .probe = rt5659_i2c_probe,
index 8f07ee903eaadf29a769f71b9c219cf9ac5bb3b6..d31c9e5bcec8adf93731532b1ef036da70ac959c 100644 (file)
@@ -1792,7 +1792,6 @@ struct rt5659_priv {
        struct snd_soc_codec *codec;
        struct rt5659_platform_data pdata;
        struct regmap *regmap;
-       struct i2c_client *i2c;
        struct gpio_desc *gpiod_ldo1_en;
        struct gpio_desc *gpiod_reset;
        struct snd_soc_jack *hs_jack;
index 21ca3a5e9f6603299f15a9bd5da3b64fa93e0f95..d374c18d4db7f9939fbdc3a5891bbdcb4cbe9e76 100644 (file)
@@ -31,7 +31,10 @@ static int sigmadsp_write_i2c(void *control_data,
 
        kfree(buf);
 
-       return ret;
+       if (ret < 0)
+               return ret;
+
+       return 0;
 }
 
 static int sigmadsp_read_i2c(void *control_data,
index e619d5651b09bbd6401cdea6734acf0a4a6f17dc..080c78e88e102199cb9b8923f0556ce084ad22d8 100644 (file)
@@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
        regcache_cache_only(ssm4567->regmap, !enable);
 
        if (enable) {
+               ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
+                       0x00);
+               if (ret)
+                       return ret;
+
                ret = regmap_update_bits(ssm4567->regmap,
                        SSM4567_REG_POWER_CTRL,
                        SSM4567_POWER_SPWDN, 0x00);
index 64637d1cf4e5673f17145b13d7c12681bed21d48..a8b3e3f701f964bdd3b54869a655d9cbfec74264 100644 (file)
@@ -619,7 +619,7 @@ static int wm5102_adsp_power_ev(struct snd_soc_dapm_widget *w,
 {
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
        struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
-       unsigned int v;
+       unsigned int v = 0;
        int ret;
 
        switch (event) {
@@ -654,7 +654,7 @@ static int wm5102_adsp_power_ev(struct snd_soc_dapm_widget *w,
                break;
        }
 
-       return wm_adsp2_early_event(w, kcontrol, event);
+       return wm_adsp2_early_event(w, kcontrol, event, v);
 }
 
 static int wm5102_out_comp_coeff_get(struct snd_kcontrol *kcontrol,
@@ -1408,7 +1408,7 @@ ARIZONA_MUX_WIDGETS(ISRC2DEC2, "ISRC2DEC2"),
 ARIZONA_MUX_WIDGETS(ISRC2INT1, "ISRC2INT1"),
 ARIZONA_MUX_WIDGETS(ISRC2INT2, "ISRC2INT2"),
 
-WM_ADSP2_E("DSP1", 0, wm5102_adsp_power_ev),
+WM_ADSP2("DSP1", 0, wm5102_adsp_power_ev),
 
 SND_SOC_DAPM_OUTPUT("HPOUT1L"),
 SND_SOC_DAPM_OUTPUT("HPOUT1R"),
@@ -1599,6 +1599,9 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
        { "Slim2 Capture", NULL, "SYSCLK" },
        { "Slim3 Capture", NULL, "SYSCLK" },
 
+       { "Audio Trace DSP", NULL, "DSP1" },
+       { "Audio Trace DSP", NULL, "SYSCLK" },
+
        { "IN1L PGA", NULL, "IN1L" },
        { "IN1R PGA", NULL, "IN1R" },
 
@@ -1735,7 +1738,7 @@ static int wm5102_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
        }
 }
 
-#define WM5102_RATES SNDRV_PCM_RATE_8000_192000
+#define WM5102_RATES SNDRV_PCM_RATE_KNOT
 
 #define WM5102_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
                        SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
@@ -1864,14 +1867,67 @@ static struct snd_soc_dai_driver wm5102_dai[] = {
                 },
                .ops = &arizona_simple_dai_ops,
        },
+       {
+               .name = "wm5102-cpu-trace",
+               .capture = {
+                       .stream_name = "Audio Trace CPU",
+                       .channels_min = 1,
+                       .channels_max = 6,
+                       .rates = WM5102_RATES,
+                       .formats = WM5102_FORMATS,
+               },
+               .compress_new = snd_soc_new_compress,
+       },
+       {
+               .name = "wm5102-dsp-trace",
+               .capture = {
+                       .stream_name = "Audio Trace DSP",
+                       .channels_min = 1,
+                       .channels_max = 4,
+                       .rates = WM5102_RATES,
+                       .formats = WM5102_FORMATS,
+               },
+       },
 };
 
+static int wm5102_open(struct snd_compr_stream *stream)
+{
+       struct snd_soc_pcm_runtime *rtd = stream->private_data;
+       struct wm5102_priv *priv = snd_soc_codec_get_drvdata(rtd->codec);
+
+       return wm_adsp_compr_open(&priv->core.adsp[0], stream);
+}
+
+static irqreturn_t wm5102_adsp2_irq(int irq, void *data)
+{
+       struct wm5102_priv *priv = data;
+       struct arizona *arizona = priv->core.arizona;
+       int ret;
+
+       ret = wm_adsp_compr_handle_irq(&priv->core.adsp[0]);
+       if (ret == -ENODEV) {
+               dev_err(arizona->dev, "Spurious compressed data IRQ\n");
+               return IRQ_NONE;
+       }
+
+       return IRQ_HANDLED;
+}
+
 static int wm5102_codec_probe(struct snd_soc_codec *codec)
 {
        struct snd_soc_dapm_context *dapm = snd_soc_codec_get_dapm(codec);
        struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+       struct arizona *arizona = priv->core.arizona;
        int ret;
 
+       ret = arizona_request_irq(arizona, ARIZONA_IRQ_DSP_IRQ1,
+                                 "ADSP2 Compressed IRQ", wm5102_adsp2_irq,
+                                 priv);
+       if (ret != 0) {
+               dev_err(codec->dev, "Failed to request DSP IRQ: %d\n", ret);
+               return ret;
+       }
+
        ret = wm_adsp2_codec_probe(&priv->core.adsp[0], codec);
        if (ret)
                return ret;
@@ -1946,6 +2002,20 @@ static struct snd_soc_codec_driver soc_codec_dev_wm5102 = {
        .num_dapm_routes = ARRAY_SIZE(wm5102_dapm_routes),
 };
 
+static struct snd_compr_ops wm5102_compr_ops = {
+       .open = wm5102_open,
+       .free = wm_adsp_compr_free,
+       .set_params = wm_adsp_compr_set_params,
+       .get_caps = wm_adsp_compr_get_caps,
+       .trigger = wm_adsp_compr_trigger,
+       .pointer = wm_adsp_compr_pointer,
+       .copy = wm_adsp_compr_copy,
+};
+
+static struct snd_soc_platform_driver wm5102_compr_platform = {
+       .compr_ops = &wm5102_compr_ops,
+};
+
 static int wm5102_probe(struct platform_device *pdev)
 {
        struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
@@ -2005,12 +2075,25 @@ static int wm5102_probe(struct platform_device *pdev)
        pm_runtime_enable(&pdev->dev);
        pm_runtime_idle(&pdev->dev);
 
-       return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5102,
+       ret = snd_soc_register_platform(&pdev->dev, &wm5102_compr_platform);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
+               return ret;
+       }
+
+       ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5102,
                                      wm5102_dai, ARRAY_SIZE(wm5102_dai));
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to register codec: %d\n", ret);
+               snd_soc_unregister_platform(&pdev->dev);
+       }
+
+       return ret;
 }
 
 static int wm5102_remove(struct platform_device *pdev)
 {
+       snd_soc_unregister_platform(&pdev->dev);
        snd_soc_unregister_codec(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
index 6088d30962a953cab19ee561ee288cca142dfccf..83ba70fe16e69488a473a68bcc97d4cc2408874f 100644 (file)
@@ -191,6 +191,25 @@ static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
        return 0;
 }
 
+static int wm5110_adsp_power_ev(struct snd_soc_dapm_widget *w,
+                               struct snd_kcontrol *kcontrol, int event)
+{
+       struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
+       struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+       unsigned int v;
+       int ret;
+
+       ret = regmap_read(arizona->regmap, ARIZONA_SYSTEM_CLOCK_1, &v);
+       if (ret != 0) {
+               dev_err(codec->dev, "Failed to read SYSCLK state: %d\n", ret);
+               return ret;
+       }
+
+       v = (v & ARIZONA_SYSCLK_FREQ_MASK) >> ARIZONA_SYSCLK_FREQ_SHIFT;
+
+       return wm_adsp2_early_event(w, kcontrol, event, v);
+}
+
 static const struct reg_sequence wm5110_no_dre_left_enable[] = {
        { 0x3024, 0xE410 },
        { 0x3025, 0x0056 },
@@ -1179,10 +1198,10 @@ SND_SOC_DAPM_PGA("ASRC2L", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2L_ENA_SHIFT, 0,
 SND_SOC_DAPM_PGA("ASRC2R", ARIZONA_ASRC_ENABLE, ARIZONA_ASRC2R_ENA_SHIFT, 0,
                 NULL, 0),
 
-WM_ADSP2("DSP1", 0),
-WM_ADSP2("DSP2", 1),
-WM_ADSP2("DSP3", 2),
-WM_ADSP2("DSP4", 3),
+WM_ADSP2("DSP1", 0, wm5110_adsp_power_ev),
+WM_ADSP2("DSP2", 1, wm5110_adsp_power_ev),
+WM_ADSP2("DSP3", 2, wm5110_adsp_power_ev),
+WM_ADSP2("DSP4", 3, wm5110_adsp_power_ev),
 
 SND_SOC_DAPM_PGA("ISRC1INT1", ARIZONA_ISRC_1_CTRL_3,
                 ARIZONA_ISRC1_INT0_ENA_SHIFT, 0, NULL, 0),
@@ -1809,6 +1828,9 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
        { "Voice Control DSP", NULL, "DSP3" },
        { "Voice Control DSP", NULL, "SYSCLK" },
 
+       { "Audio Trace DSP", NULL, "DSP1" },
+       { "Audio Trace DSP", NULL, "SYSCLK" },
+
        { "IN1L PGA", NULL, "IN1L" },
        { "IN1R PGA", NULL, "IN1R" },
 
@@ -2002,7 +2024,7 @@ static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
        }
 }
 
-#define WM5110_RATES SNDRV_PCM_RATE_8000_192000
+#define WM5110_RATES SNDRV_PCM_RATE_KNOT
 
 #define WM5110_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
                        SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
@@ -2152,6 +2174,27 @@ static struct snd_soc_dai_driver wm5110_dai[] = {
                        .formats = WM5110_FORMATS,
                },
        },
+       {
+               .name = "wm5110-cpu-trace",
+               .capture = {
+                       .stream_name = "Audio Trace CPU",
+                       .channels_min = 1,
+                       .channels_max = 6,
+                       .rates = WM5110_RATES,
+                       .formats = WM5110_FORMATS,
+               },
+               .compress_new = snd_soc_new_compress,
+       },
+       {
+               .name = "wm5110-dsp-trace",
+               .capture = {
+                       .stream_name = "Audio Trace DSP",
+                       .channels_min = 1,
+                       .channels_max = 6,
+                       .rates = WM5110_RATES,
+                       .formats = WM5110_FORMATS,
+               },
+       },
 };
 
 static int wm5110_open(struct snd_compr_stream *stream)
@@ -2163,6 +2206,8 @@ static int wm5110_open(struct snd_compr_stream *stream)
 
        if (strcmp(rtd->codec_dai->name, "wm5110-dsp-voicectrl") == 0) {
                n_adsp = 2;
+       } else if (strcmp(rtd->codec_dai->name, "wm5110-dsp-trace") == 0) {
+               n_adsp = 0;
        } else {
                dev_err(arizona->dev,
                        "No suitable compressed stream for DAI '%s'\n",
@@ -2175,12 +2220,21 @@ static int wm5110_open(struct snd_compr_stream *stream)
 
 static irqreturn_t wm5110_adsp2_irq(int irq, void *data)
 {
-       struct wm5110_priv *florida = data;
-       int ret;
+       struct wm5110_priv *priv = data;
+       struct arizona *arizona = priv->core.arizona;
+       int serviced = 0;
+       int i, ret;
 
-       ret = wm_adsp_compr_handle_irq(&florida->core.adsp[2]);
-       if (ret == -ENODEV)
+       for (i = 0; i < WM5110_NUM_ADSP; ++i) {
+               ret = wm_adsp_compr_handle_irq(&priv->core.adsp[i]);
+               if (ret != -ENODEV)
+                       serviced++;
+       }
+
+       if (!serviced) {
+               dev_err(arizona->dev, "Spurious compressed data IRQ\n");
                return IRQ_NONE;
+       }
 
        return IRQ_HANDLED;
 }
@@ -2366,7 +2420,7 @@ static int wm5110_probe(struct platform_device *pdev)
        ret = snd_soc_register_platform(&pdev->dev, &wm5110_compr_platform);
        if (ret < 0) {
                dev_err(&pdev->dev, "Failed to register platform: %d\n", ret);
-               goto error;
+               return ret;
        }
 
        ret = snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm5110,
@@ -2376,12 +2430,12 @@ static int wm5110_probe(struct platform_device *pdev)
                snd_soc_unregister_platform(&pdev->dev);
        }
 
-error:
        return ret;
 }
 
 static int wm5110_remove(struct platform_device *pdev)
 {
+       snd_soc_unregister_platform(&pdev->dev);
        snd_soc_unregister_codec(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
 
index ff237726775a16a4e66b8b104aaa878d9f187dab..d7f444f874604d7d8a6df4a05aa634fe5448fdfd 100644 (file)
@@ -240,13 +240,13 @@ SOC_DOUBLE_R("Capture Volume ZC Switch", WM8960_LINVOL, WM8960_RINVOL,
 SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
        7, 1, 1),
 
-SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
+SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume",
               WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
-SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
+SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume",
               WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv),
-SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume",
+SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
               WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv),
-SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume",
+SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
               WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv),
 SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume",
                WM8960_RINPATH, 4, 3, 0, micboost_tlv),
@@ -643,29 +643,31 @@ static int wm8960_configure_clocking(struct snd_soc_codec *codec)
                return -EINVAL;
        }
 
-       /* check if the sysclk frequency is available. */
-       for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
-               if (sysclk_divs[i] == -1)
-                       continue;
-               sysclk = freq_out / sysclk_divs[i];
-               for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
-                       if (sysclk == dac_divs[j] * lrclk) {
+       if (wm8960->clk_id != WM8960_SYSCLK_PLL) {
+               /* check if the sysclk frequency is available. */
+               for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+                       if (sysclk_divs[i] == -1)
+                               continue;
+                       sysclk = freq_out / sysclk_divs[i];
+                       for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+                               if (sysclk != dac_divs[j] * lrclk)
+                                       continue;
                                for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k)
                                        if (sysclk == bclk * bclk_divs[k] / 10)
                                                break;
                                if (k != ARRAY_SIZE(bclk_divs))
                                        break;
                        }
+                       if (j != ARRAY_SIZE(dac_divs))
+                               break;
                }
-               if (j != ARRAY_SIZE(dac_divs))
-                       break;
-       }
 
-       if (i != ARRAY_SIZE(sysclk_divs)) {
-               goto configure_clock;
-       } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
-               dev_err(codec->dev, "failed to configure clock\n");
-               return -EINVAL;
+               if (i != ARRAY_SIZE(sysclk_divs)) {
+                       goto configure_clock;
+               } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
+                       dev_err(codec->dev, "failed to configure clock\n");
+                       return -EINVAL;
+               }
        }
        /* get a available pll out frequency and set pll */
        for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
index c284c7b6db8bfa028846936e82a624de8c1a95af..dc8c3b1ebb6fe9a98b3468daa5cc6d9e06f6ae6a 100644 (file)
 
 #include "wm8974.h"
 
+struct wm8974_priv {
+       unsigned int mclk;
+       unsigned int fs;
+};
+
 static const struct reg_default wm8974_reg_defaults[] = {
        {  0, 0x0000 }, {  1, 0x0000 }, {  2, 0x0000 }, {  3, 0x0000 },
        {  4, 0x0050 }, {  5, 0x0000 }, {  6, 0x0140 }, {  7, 0x0000 },
@@ -379,6 +384,79 @@ static int wm8974_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
        return 0;
 }
 
+static unsigned int wm8974_get_mclkdiv(unsigned int f_in, unsigned int f_out,
+                                      int *mclkdiv)
+{
+       unsigned int ratio = 2 * f_in / f_out;
+
+       if (ratio <= 2) {
+               *mclkdiv = WM8974_MCLKDIV_1;
+               ratio = 2;
+       } else if (ratio == 3) {
+               *mclkdiv = WM8974_MCLKDIV_1_5;
+       } else if (ratio == 4) {
+               *mclkdiv = WM8974_MCLKDIV_2;
+       } else if (ratio <= 6) {
+               *mclkdiv = WM8974_MCLKDIV_3;
+               ratio = 6;
+       } else if (ratio <= 8) {
+               *mclkdiv = WM8974_MCLKDIV_4;
+               ratio = 8;
+       } else if (ratio <= 12) {
+               *mclkdiv = WM8974_MCLKDIV_6;
+               ratio = 12;
+       } else if (ratio <= 16) {
+               *mclkdiv = WM8974_MCLKDIV_8;
+               ratio = 16;
+       } else {
+               *mclkdiv = WM8974_MCLKDIV_12;
+               ratio = 24;
+       }
+
+       return f_out * ratio / 2;
+}
+
+static int wm8974_update_clocks(struct snd_soc_dai *dai)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct wm8974_priv *priv = snd_soc_codec_get_drvdata(codec);
+       unsigned int fs256;
+       unsigned int fpll = 0;
+       unsigned int f;
+       int mclkdiv;
+
+       if (!priv->mclk || !priv->fs)
+               return 0;
+
+       fs256 = 256 * priv->fs;
+
+       f = wm8974_get_mclkdiv(priv->mclk, fs256, &mclkdiv);
+
+       if (f != priv->mclk) {
+               /* The PLL performs best around 90MHz */
+               fpll = wm8974_get_mclkdiv(22500000, fs256, &mclkdiv);
+       }
+
+       wm8974_set_dai_pll(dai, 0, 0, priv->mclk, fpll);
+       wm8974_set_dai_clkdiv(dai, WM8974_MCLKDIV, mclkdiv);
+
+       return 0;
+}
+
+static int wm8974_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
+                                unsigned int freq, int dir)
+{
+       struct snd_soc_codec *codec = dai->codec;
+       struct wm8974_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+       if (dir != SND_SOC_CLOCK_IN)
+               return -EINVAL;
+
+       priv->mclk = freq;
+
+       return wm8974_update_clocks(dai);
+}
+
 static int wm8974_set_dai_fmt(struct snd_soc_dai *codec_dai,
                unsigned int fmt)
 {
@@ -441,8 +519,15 @@ static int wm8974_pcm_hw_params(struct snd_pcm_substream *substream,
                                struct snd_soc_dai *dai)
 {
        struct snd_soc_codec *codec = dai->codec;
+       struct wm8974_priv *priv = snd_soc_codec_get_drvdata(codec);
        u16 iface = snd_soc_read(codec, WM8974_IFACE) & 0x19f;
        u16 adn = snd_soc_read(codec, WM8974_ADD) & 0x1f1;
+       int err;
+
+       priv->fs = params_rate(params);
+       err = wm8974_update_clocks(dai);
+       if (err)
+               return err;
 
        /* bit size */
        switch (params_width(params)) {
@@ -547,6 +632,7 @@ static const struct snd_soc_dai_ops wm8974_ops = {
        .set_fmt = wm8974_set_dai_fmt,
        .set_clkdiv = wm8974_set_dai_clkdiv,
        .set_pll = wm8974_set_dai_pll,
+       .set_sysclk = wm8974_set_dai_sysclk,
 };
 
 static struct snd_soc_dai_driver wm8974_dai = {
@@ -606,9 +692,16 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8974 = {
 static int wm8974_i2c_probe(struct i2c_client *i2c,
                            const struct i2c_device_id *id)
 {
+       struct wm8974_priv *priv;
        struct regmap *regmap;
        int ret;
 
+       priv = devm_kzalloc(&i2c->dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       i2c_set_clientdata(i2c, priv);
+
        regmap = devm_regmap_init_i2c(i2c, &wm8974_regmap);
        if (IS_ERR(regmap))
                return PTR_ERR(regmap);
index b4dba3a02abafd73044724146601fade748637b4..52d766efe14f0d318844d43bc19df2dcf16bee0d 100644 (file)
@@ -943,7 +943,7 @@ static int wm8997_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
        }
 }
 
-#define WM8997_RATES SNDRV_PCM_RATE_8000_192000
+#define WM8997_RATES SNDRV_PCM_RATE_KNOT
 
 #define WM8997_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
                        SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
index 7719bc509e50b182a0c63b3280154bac4de277fe..012396074a8a6da52d416d5edf10cc092bed16e5 100644 (file)
@@ -1170,7 +1170,7 @@ static const struct snd_soc_dapm_route wm8998_dapm_routes[] = {
        { "DRC1 Signal Activity", NULL, "DRC1R" },
 };
 
-#define WM8998_RATES SNDRV_PCM_RATE_8000_192000
+#define WM8998_RATES SNDRV_PCM_RATE_KNOT
 
 #define WM8998_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
                        SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
index 33806d487b8ae00711dddd4ec400393dbff59b1f..ed90e12b40a624b5066577b383e19db44d2f9c40 100644 (file)
@@ -32,9 +32,6 @@
 #include <sound/initval.h>
 #include <sound/tlv.h>
 
-#include <linux/mfd/arizona/registers.h>
-
-#include "arizona.h"
 #include "wm_adsp.h"
 
 #define adsp_crit(_dsp, fmt, ...) \
@@ -295,6 +292,8 @@ struct wm_adsp_compr {
 
        u32 *raw_buf;
        unsigned int copied_total;
+
+       unsigned int sample_rate;
 };
 
 #define WM_ADSP_DATA_WORD_SIZE         3
@@ -328,7 +327,7 @@ struct wm_adsp_buffer_region_def {
        unsigned int size_offset;
 };
 
-static struct wm_adsp_buffer_region_def ez2control_regions[] = {
+static const struct wm_adsp_buffer_region_def default_regions[] = {
        {
                .mem_type = WMFW_ADSP2_XM,
                .base_offset = HOST_BUFFER_FIELD(X_buf_base),
@@ -350,10 +349,10 @@ struct wm_adsp_fw_caps {
        u32 id;
        struct snd_codec_desc desc;
        int num_regions;
-       struct wm_adsp_buffer_region_def *region_defs;
+       const struct wm_adsp_buffer_region_def *region_defs;
 };
 
-static const struct wm_adsp_fw_caps ez2control_caps[] = {
+static const struct wm_adsp_fw_caps ctrl_caps[] = {
        {
                .id = SND_AUDIOCODEC_BESPOKE,
                .desc = {
@@ -362,8 +361,26 @@ static const struct wm_adsp_fw_caps ez2control_caps[] = {
                        .num_sample_rates = 1,
                        .formats = SNDRV_PCM_FMTBIT_S16_LE,
                },
-               .num_regions = ARRAY_SIZE(ez2control_regions),
-               .region_defs = ez2control_regions,
+               .num_regions = ARRAY_SIZE(default_regions),
+               .region_defs = default_regions,
+       },
+};
+
+static const struct wm_adsp_fw_caps trace_caps[] = {
+       {
+               .id = SND_AUDIOCODEC_BESPOKE,
+               .desc = {
+                       .max_ch = 8,
+                       .sample_rates = {
+                               4000, 8000, 11025, 12000, 16000, 22050,
+                               24000, 32000, 44100, 48000, 64000, 88200,
+                               96000, 176400, 192000
+                       },
+                       .num_sample_rates = 15,
+                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+               },
+               .num_regions = ARRAY_SIZE(default_regions),
+               .region_defs = default_regions,
        },
 };
 
@@ -382,11 +399,16 @@ static const struct {
        [WM_ADSP_FW_CTRL] =     {
                .file = "ctrl",
                .compr_direction = SND_COMPRESS_CAPTURE,
-               .num_caps = ARRAY_SIZE(ez2control_caps),
-               .caps = ez2control_caps,
+               .num_caps = ARRAY_SIZE(ctrl_caps),
+               .caps = ctrl_caps,
        },
        [WM_ADSP_FW_ASR] =      { .file = "asr" },
-       [WM_ADSP_FW_TRACE] =    { .file = "trace" },
+       [WM_ADSP_FW_TRACE] =    {
+               .file = "trace",
+               .compr_direction = SND_COMPRESS_CAPTURE,
+               .num_caps = ARRAY_SIZE(trace_caps),
+               .caps = trace_caps,
+       },
        [WM_ADSP_FW_SPK_PROT] = { .file = "spk-prot" },
        [WM_ADSP_FW_MISC] =     { .file = "misc" },
 };
@@ -2123,30 +2145,9 @@ static void wm_adsp2_boot_work(struct work_struct *work)
                                           struct wm_adsp,
                                           boot_work);
        int ret;
-       unsigned int val;
 
        mutex_lock(&dsp->pwr_lock);
 
-       /*
-        * For simplicity set the DSP clock rate to be the
-        * SYSCLK rate rather than making it configurable.
-        */
-       ret = regmap_read(dsp->regmap, ARIZONA_SYSTEM_CLOCK_1, &val);
-       if (ret != 0) {
-               adsp_err(dsp, "Failed to read SYSCLK state: %d\n", ret);
-               goto err_mutex;
-       }
-       val = (val & ARIZONA_SYSCLK_FREQ_MASK)
-               >> ARIZONA_SYSCLK_FREQ_SHIFT;
-
-       ret = regmap_update_bits_async(dsp->regmap,
-                                      dsp->base + ADSP2_CLOCKING,
-                                      ADSP2_CLK_SEL_MASK, val);
-       if (ret != 0) {
-               adsp_err(dsp, "Failed to set clock rate: %d\n", ret);
-               goto err_mutex;
-       }
-
        ret = wm_adsp2_ena(dsp);
        if (ret != 0)
                goto err_mutex;
@@ -2186,8 +2187,21 @@ err_mutex:
        mutex_unlock(&dsp->pwr_lock);
 }
 
+static void wm_adsp2_set_dspclk(struct wm_adsp *dsp, unsigned int freq)
+{
+       int ret;
+
+       ret = regmap_update_bits_async(dsp->regmap,
+                                      dsp->base + ADSP2_CLOCKING,
+                                      ADSP2_CLK_SEL_MASK,
+                                      freq << ADSP2_CLK_SEL_SHIFT);
+       if (ret != 0)
+               adsp_err(dsp, "Failed to set clock rate: %d\n", ret);
+}
+
 int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
-                  struct snd_kcontrol *kcontrol, int event)
+                        struct snd_kcontrol *kcontrol, int event,
+                        unsigned int freq)
 {
        struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
        struct wm_adsp *dsps = snd_soc_codec_get_drvdata(codec);
@@ -2197,6 +2211,7 @@ int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
 
        switch (event) {
        case SND_SOC_DAPM_PRE_PMU:
+               wm_adsp2_set_dspclk(dsp, freq);
                queue_work(system_unbound_wq, &dsp->boot_work);
                break;
        default:
@@ -2471,6 +2486,8 @@ int wm_adsp_compr_set_params(struct snd_compr_stream *stream,
        if (!compr->raw_buf)
                return -ENOMEM;
 
+       compr->sample_rate = params->codec.sample_rate;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(wm_adsp_compr_set_params);
@@ -2810,7 +2827,6 @@ int wm_adsp_compr_handle_irq(struct wm_adsp *dsp)
        mutex_lock(&dsp->pwr_lock);
 
        if (!buf) {
-               adsp_err(dsp, "Spurious buffer IRQ\n");
                ret = -ENODEV;
                goto out;
        }
@@ -2911,6 +2927,7 @@ int wm_adsp_compr_pointer(struct snd_compr_stream *stream,
 
        tstamp->copied_total = compr->copied_total;
        tstamp->copied_total += buf->avail * WM_ADSP_DATA_WORD_SIZE;
+       tstamp->sampling_rate = compr->sample_rate;
 
 out:
        mutex_unlock(&dsp->pwr_lock);
index 1a928ec547411b6d38bcec2718d2dcc09d1f6cac..b61cb57e600fb6447798e5cf9684c51f641d094e 100644 (file)
@@ -80,7 +80,7 @@ struct wm_adsp {
        SND_SOC_DAPM_PGA_E(wname, SND_SOC_NOPM, num, 0, NULL, 0, \
                wm_adsp1_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD)
 
-#define WM_ADSP2_E(wname, num, event_fn) \
+#define WM_ADSP2(wname, num, event_fn) \
 {      .id = snd_soc_dapm_dai_link, .name = wname " Preloader", \
        .reg = SND_SOC_NOPM, .shift = num, .event = event_fn, \
        .event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }, \
@@ -88,9 +88,6 @@ struct wm_adsp {
        .reg = SND_SOC_NOPM, .shift = num, .event = wm_adsp2_event, \
        .event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD }
 
-#define WM_ADSP2(wname, num) \
-       WM_ADSP2_E(wname, num, wm_adsp2_early_event)
-
 extern const struct snd_kcontrol_new wm_adsp_fw_controls[];
 
 int wm_adsp1_init(struct wm_adsp *dsp);
@@ -100,7 +97,8 @@ int wm_adsp2_codec_remove(struct wm_adsp *dsp, struct snd_soc_codec *codec);
 int wm_adsp1_event(struct snd_soc_dapm_widget *w,
                   struct snd_kcontrol *kcontrol, int event);
 int wm_adsp2_early_event(struct snd_soc_dapm_widget *w,
-                        struct snd_kcontrol *kcontrol, int event);
+                        struct snd_kcontrol *kcontrol, int event,
+                        unsigned int freq);
 int wm_adsp2_event(struct snd_soc_dapm_widget *w,
                   struct snd_kcontrol *kcontrol, int event);
 
index 2ccb8bccc9d4cc4a79c6a94abe4dfb1566872737..3cf46afb353f5019c6c0ee6a3c96fcb48b26b729 100644 (file)
@@ -1517,6 +1517,8 @@ static int mcasp_reparent_fck(struct platform_device *pdev)
        if (!parent_name)
                return 0;
 
+       dev_warn(&pdev->dev, "Update the bindings to use assigned-clocks!\n");
+
        gfclk = clk_get(&pdev->dev, "fck");
        if (IS_ERR(gfclk)) {
                dev_err(&pdev->dev, "failed to get fck\n");
index ce664c239be32fda4d269aea1722bb14631e3b22..bff258d7bcea1f380403df5b9380ac7c9aabf835 100644 (file)
@@ -645,6 +645,8 @@ static int dw_i2s_probe(struct platform_device *pdev)
 
        dev->dev = &pdev->dev;
 
+       dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
+       dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
        if (pdata) {
                dev->capability = pdata->cap;
                clk_id = NULL;
@@ -652,9 +654,6 @@ static int dw_i2s_probe(struct platform_device *pdev)
                if (dev->quirks & DW_I2S_QUIRK_COMP_REG_OFFSET) {
                        dev->i2s_reg_comp1 = pdata->i2s_reg_comp1;
                        dev->i2s_reg_comp2 = pdata->i2s_reg_comp2;
-               } else {
-                       dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
-                       dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
                }
                ret = dw_configure_dai_by_pd(dev, dw_i2s_dai, res, pdata);
        } else {
index 14dfdee05fd5ae842bebdd0092e47bd4bc88f605..35aabf9dc503b8f4ac78bba4df86343585aaf8fb 100644 (file)
@@ -292,8 +292,8 @@ config SND_SOC_FSL_ASOC_CARD
        select SND_SOC_FSL_SSI
        help
         ALSA SoC Audio support with ASRC feature for Freescale SoCs that have
-        ESAI/SAI/SSI and connect with external CODECs such as WM8962, CS42888
-        and SGTL5000.
+        ESAI/SAI/SSI and connect with external CODECs such as WM8962, CS42888,
+        CS4271, CS4272 and SGTL5000.
         Say Y if you want to add support for Freescale Generic ASoC Sound Card.
 
 endif # SND_IMX_SOC
index 562b3bd22d9ac1f86de0a7d08fe64d8145445db8..dffd549a0e2abdc05d0b218669d34ec13f136ee6 100644 (file)
@@ -28,6 +28,8 @@
 #include "../codecs/wm8962.h"
 #include "../codecs/wm8960.h"
 
+#define CS427x_SYSCLK_MCLK 0
+
 #define RX 0
 #define TX 1
 
@@ -99,19 +101,26 @@ struct fsl_asoc_card_priv {
 /**
  * This dapm route map exsits for DPCM link only.
  * The other routes shall go through Device Tree.
+ *
+ * Note: keep all ASRC routes in the second half
+ *      to drop them easily for non-ASRC cases.
  */
 static const struct snd_soc_dapm_route audio_map[] = {
-       {"CPU-Playback",  NULL, "ASRC-Playback"},
+       /* 1st half -- Normal DAPM routes */
        {"Playback",  NULL, "CPU-Playback"},
-       {"ASRC-Capture",  NULL, "CPU-Capture"},
        {"CPU-Capture",  NULL, "Capture"},
+       /* 2nd half -- ASRC DAPM routes */
+       {"CPU-Playback",  NULL, "ASRC-Playback"},
+       {"ASRC-Capture",  NULL, "CPU-Capture"},
 };
 
 static const struct snd_soc_dapm_route audio_map_ac97[] = {
-       {"AC97 Playback",  NULL, "ASRC-Playback"},
+       /* 1st half -- Normal DAPM routes */
        {"Playback",  NULL, "AC97 Playback"},
-       {"ASRC-Capture",  NULL, "AC97 Capture"},
        {"AC97 Capture",  NULL, "Capture"},
+       /* 2nd half -- ASRC DAPM routes */
+       {"AC97 Playback",  NULL, "ASRC-Playback"},
+       {"ASRC-Capture",  NULL, "AC97 Capture"},
 };
 
 /* Add all possible widgets into here without being redundant */
@@ -528,6 +537,10 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
                priv->cpu_priv.sysclk_dir[RX] = SND_SOC_CLOCK_OUT;
                priv->cpu_priv.slot_width = 32;
                priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
+       } else if (of_device_is_compatible(np, "fsl,imx-audio-cs427x")) {
+               codec_dai_name = "cs4271-hifi";
+               priv->codec_priv.mclk_id = CS427x_SYSCLK_MCLK;
+               priv->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM;
        } else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) {
                codec_dai_name = "sgtl5000";
                priv->codec_priv.mclk_id = SGTL5000_SYSCLK;
@@ -593,6 +606,10 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
        priv->card.dapm_widgets = fsl_asoc_card_dapm_widgets;
        priv->card.num_dapm_widgets = ARRAY_SIZE(fsl_asoc_card_dapm_widgets);
 
+       /* Drop the second half of DAPM routes -- ASRC */
+       if (!asrc_pdev)
+               priv->card.num_dapm_routes /= 2;
+
        memcpy(priv->dai_link, fsl_asoc_card_dai,
               sizeof(struct snd_soc_dai_link) * ARRAY_SIZE(priv->dai_link));
 
@@ -681,6 +698,7 @@ fail:
 static const struct of_device_id fsl_asoc_card_dt_ids[] = {
        { .compatible = "fsl,imx-audio-ac97", },
        { .compatible = "fsl,imx-audio-cs42888", },
+       { .compatible = "fsl,imx-audio-cs427x", },
        { .compatible = "fsl,imx-audio-sgtl5000", },
        { .compatible = "fsl,imx-audio-wm8962", },
        { .compatible = "fsl,imx-audio-wm8960", },
index fef264d27fd3378d726a77209647ccc8098ec6a0..0754df771e3b4c8a945c29e8c3c99f5c23403719 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/of_address.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/time.h>
 #include <sound/core.h>
 #include <sound/dmaengine_pcm.h>
 #include <sound/pcm_params.h>
@@ -919,7 +920,7 @@ static int fsl_sai_resume(struct device *dev)
        regcache_cache_only(sai->regmap, false);
        regmap_write(sai->regmap, FSL_SAI_TCSR, FSL_SAI_CSR_SR);
        regmap_write(sai->regmap, FSL_SAI_RCSR, FSL_SAI_CSR_SR);
-       msleep(1);
+       usleep_range(1000, 2000);
        regmap_write(sai->regmap, FSL_SAI_TCSR, 0);
        regmap_write(sai->regmap, FSL_SAI_RCSR, 0);
        return regcache_sync(sai->regmap);
index 40dfd8a3648408a2cc76bb6e4c4c3872c8e28ac3..ed8de1035cda159d0d186f2cded0fb7a97fbceb4 100644 (file)
@@ -112,20 +112,6 @@ struct fsl_ssi_rxtx_reg_val {
        struct fsl_ssi_reg_val tx;
 };
 
-static const struct reg_default fsl_ssi_reg_defaults[] = {
-       {CCSR_SSI_SCR,     0x00000000},
-       {CCSR_SSI_SIER,    0x00003003},
-       {CCSR_SSI_STCR,    0x00000200},
-       {CCSR_SSI_SRCR,    0x00000200},
-       {CCSR_SSI_STCCR,   0x00040000},
-       {CCSR_SSI_SRCCR,   0x00040000},
-       {CCSR_SSI_SACNT,   0x00000000},
-       {CCSR_SSI_STMSK,   0x00000000},
-       {CCSR_SSI_SRMSK,   0x00000000},
-       {CCSR_SSI_SACCEN,  0x00000000},
-       {CCSR_SSI_SACCDIS, 0x00000000},
-};
-
 static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
@@ -190,8 +176,7 @@ static const struct regmap_config fsl_ssi_regconfig = {
        .val_bits = 32,
        .reg_stride = 4,
        .val_format_endian = REGMAP_ENDIAN_NATIVE,
-       .reg_defaults = fsl_ssi_reg_defaults,
-       .num_reg_defaults = ARRAY_SIZE(fsl_ssi_reg_defaults),
+       .num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1,
        .readable_reg = fsl_ssi_readable_reg,
        .volatile_reg = fsl_ssi_volatile_reg,
        .precious_reg = fsl_ssi_precious_reg,
@@ -201,6 +186,7 @@ static const struct regmap_config fsl_ssi_regconfig = {
 
 struct fsl_ssi_soc_data {
        bool imx;
+       bool imx21regs; /* imx21-class SSI - no SACC{ST,EN,DIS} regs */
        bool offline_config;
        u32 sisr_write_mask;
 };
@@ -303,6 +289,7 @@ static struct fsl_ssi_soc_data fsl_ssi_mpc8610 = {
 
 static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
        .imx = true,
+       .imx21regs = true,
        .offline_config = true,
        .sisr_write_mask = 0,
 };
@@ -586,8 +573,12 @@ static void fsl_ssi_setup_ac97(struct fsl_ssi_private *ssi_private)
         */
        regmap_write(regs, CCSR_SSI_SACNT,
                        CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
-       regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
-       regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
+
+       /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
+       if (!ssi_private->soc->imx21regs) {
+               regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
+               regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
+       }
 
        /*
         * Enable SSI, Transmit and Receive. AC97 has to communicate with the
@@ -1397,6 +1388,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
        struct resource *res;
        void __iomem *iomem;
        char name[64];
+       struct regmap_config regconfig = fsl_ssi_regconfig;
 
        of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
        if (!of_id || !of_id->data)
@@ -1444,15 +1436,25 @@ static int fsl_ssi_probe(struct platform_device *pdev)
                return PTR_ERR(iomem);
        ssi_private->ssi_phys = res->start;
 
+       if (ssi_private->soc->imx21regs) {
+               /*
+                * According to datasheet imx21-class SSI
+                * don't have SACC{ST,EN,DIS} regs.
+                */
+               regconfig.max_register = CCSR_SSI_SRMSK;
+               regconfig.num_reg_defaults_raw =
+                       CCSR_SSI_SRMSK / sizeof(uint32_t) + 1;
+       }
+
        ret = of_property_match_string(np, "clock-names", "ipg");
        if (ret < 0) {
                ssi_private->has_ipg_clk_name = false;
                ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
-                       &fsl_ssi_regconfig);
+                       &regconfig);
        } else {
                ssi_private->has_ipg_clk_name = true;
                ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
-                       "ipg", iomem, &fsl_ssi_regconfig);
+                       "ipg", iomem, &regconfig);
        }
        if (IS_ERR(ssi_private->regs)) {
                dev_err(&pdev->dev, "Failed to init register map\n");
index a407e833c612523e90c07a732e9cc472b64252ee..fb896b2c9ba32a058eb097d97e7bd8732667ca47 100644 (file)
@@ -72,8 +72,6 @@ static int imx_spdif_audio_probe(struct platform_device *pdev)
                goto end;
        }
 
-       platform_set_drvdata(pdev, data);
-
 end:
        of_node_put(spdif_np);
 
index 0bab76051fd830dd0bc0a3d993341ad8cd759afe..243700cc29e618417d2e3561f6c554031229d531 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
 #include <linux/delay.h>
+#include <linux/time.h>
 
 #include <sound/pcm.h>
 #include <sound/pcm_params.h>
@@ -127,7 +128,7 @@ static void psc_ac97_cold_reset(struct snd_ac97 *ac97)
 
        mutex_unlock(&psc_dma->mutex);
 
-       msleep(1);
+       usleep_range(1000, 2000);
        psc_ac97_warm_reset(ac97);
 }
 
index 1ded8811598ef4b05333e7b8e2204f72f0b8e142..2389ab47e25f68265c720ba3f4c4b52e5c321e59 100644 (file)
@@ -99,7 +99,7 @@ static int asoc_simple_card_hw_params(struct snd_pcm_substream *substream,
                if (ret && ret != -ENOTSUPP)
                        goto err;
        }
-
+       return 0;
 err:
        return ret;
 }
index 803f95e40679def6a185c3131f838b210914b6c2..7d7c872c280dbd62b0b1b66209e1e1cfe75791ee 100644 (file)
@@ -30,11 +30,15 @@ config SND_SST_IPC_ACPI
 config SND_SOC_INTEL_SST
        tristate
        select SND_SOC_INTEL_SST_ACPI if ACPI
+       select SND_SOC_INTEL_SST_MATCH if ACPI
        depends on (X86 || COMPILE_TEST)
 
 config SND_SOC_INTEL_SST_ACPI
        tristate
 
+config SND_SOC_INTEL_SST_MATCH
+       tristate
+
 config SND_SOC_INTEL_HASWELL
        tristate
 
@@ -57,7 +61,7 @@ config SND_SOC_INTEL_HASWELL_MACH
 config SND_SOC_INTEL_BYT_RT5640_MACH
        tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
        depends on X86_INTEL_LPSS && I2C
-       depends on DW_DMAC_CORE=y && (SND_SOC_INTEL_BYTCR_RT5640_MACH = n)
+       depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
        select SND_SOC_INTEL_SST
        select SND_SOC_INTEL_BAYTRAIL
        select SND_SOC_RT5640
@@ -69,7 +73,7 @@ config SND_SOC_INTEL_BYT_RT5640_MACH
 config SND_SOC_INTEL_BYT_MAX98090_MACH
        tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
        depends on X86_INTEL_LPSS && I2C
-       depends on DW_DMAC_CORE=y
+       depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
        select SND_SOC_INTEL_SST
        select SND_SOC_INTEL_BAYTRAIL
        select SND_SOC_MAX98090
@@ -97,6 +101,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH
        select SND_SOC_RT5640
        select SND_SST_MFLD_PLATFORM
        select SND_SST_IPC_ACPI
+       select SND_SOC_INTEL_SST_MATCH if ACPI
        help
           This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
           platforms with RT5640 audio codec.
@@ -109,6 +114,7 @@ config SND_SOC_INTEL_BYTCR_RT5651_MACH
        select SND_SOC_RT5651
        select SND_SST_MFLD_PLATFORM
        select SND_SST_IPC_ACPI
+       select SND_SOC_INTEL_SST_MATCH if ACPI
        help
           This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
           platforms with RT5651 audio codec.
@@ -121,6 +127,7 @@ config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
         select SND_SOC_RT5670
         select SND_SST_MFLD_PLATFORM
         select SND_SST_IPC_ACPI
+       select SND_SOC_INTEL_SST_MATCH if ACPI
         help
           This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
           platforms with RT5672 audio codec.
@@ -133,6 +140,7 @@ config SND_SOC_INTEL_CHT_BSW_RT5645_MACH
        select SND_SOC_RT5645
        select SND_SST_MFLD_PLATFORM
        select SND_SST_IPC_ACPI
+       select SND_SOC_INTEL_SST_MATCH if ACPI
        help
          This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
          platforms with RT5645/5650 audio codec.
@@ -145,6 +153,7 @@ config SND_SOC_INTEL_CHT_BSW_MAX98090_TI_MACH
        select SND_SOC_TS3A227E
        select SND_SST_MFLD_PLATFORM
        select SND_SST_IPC_ACPI
+       select SND_SOC_INTEL_SST_MATCH if ACPI
        help
       This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
       platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
index 55c33dc76ce44e2bd3d24de843ff552bb9bfaa30..52ed434cbca6a9e0a08ebc64bb8ee84111f479d9 100644 (file)
@@ -528,6 +528,7 @@ static struct snd_soc_dai_driver sst_platform_dai[] = {
        .ops = &sst_compr_dai_ops,
        .playback = {
                .stream_name = "Compress Playback",
+               .channels_min = 1,
        },
 },
 /* BE CPU  Dais */
index 90588d6e64fc7869a4a4a210b0f188a01e7907b8..e609f089593a935984c101cb6e78fe7a0966e314 100644 (file)
@@ -287,33 +287,20 @@ static struct snd_soc_card snd_soc_card_cht = {
        .num_controls = ARRAY_SIZE(cht_mc_controls),
 };
 
-static acpi_status snd_acpi_codec_match(acpi_handle handle, u32 level,
-                                               void *context, void **ret)
-{
-       *(bool *)context = true;
-       return AE_OK;
-}
-
 static int snd_cht_mc_probe(struct platform_device *pdev)
 {
        int ret_val = 0;
-       bool found = false;
        struct cht_mc_private *drv;
 
        drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
        if (!drv)
                return -ENOMEM;
 
-       if (ACPI_SUCCESS(acpi_get_devices(
-                                       "104C227E",
-                                       snd_acpi_codec_match,
-                                       &found, NULL)) && found) {
-               drv->ts3a227e_present = true;
-       } else {
+       drv->ts3a227e_present = acpi_dev_present("104C227E");
+       if (!drv->ts3a227e_present) {
                /* no need probe TI jack detection chip */
                snd_soc_card_cht.aux_dev = NULL;
                snd_soc_card_cht.num_aux_devs = 0;
-               drv->ts3a227e_present = false;
        }
 
        /* register the soc card */
index 2d3afddb0a2e8c83fef54b36eed42d7ff6b69878..e6cf800ab231f671261807a242fb8e73d37de2bf 100644 (file)
@@ -333,20 +333,12 @@ static struct cht_acpi_card snd_soc_cards[] = {
        {"10EC5650", CODEC_TYPE_RT5650, &snd_soc_card_chtrt5650},
 };
 
-static acpi_status snd_acpi_codec_match(acpi_handle handle, u32 level,
-                                      void *context, void **ret)
-{
-       *(bool *)context = true;
-       return AE_OK;
-}
-
 static int snd_cht_mc_probe(struct platform_device *pdev)
 {
        int ret_val = 0;
        int i;
        struct cht_mc_private *drv;
        struct snd_soc_card *card = snd_soc_cards[0].soc_card;
-       bool found = false;
        char codec_name[16];
 
        drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_ATOMIC);
@@ -354,10 +346,7 @@ static int snd_cht_mc_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        for (i = 0; i < ARRAY_SIZE(snd_soc_cards); i++) {
-               if (ACPI_SUCCESS(acpi_get_devices(
-                                               snd_soc_cards[i].codec_id,
-                                               snd_acpi_codec_match,
-                                               &found, NULL)) && found) {
+               if (acpi_dev_present(snd_soc_cards[i].codec_id)) {
                        dev_dbg(&pdev->dev,
                                "found codec %s\n", snd_soc_cards[i].codec_id);
                        card = snd_soc_cards[i].soc_card;
index 7396ddb427d8f95a3491d144f597fcb9f2e382d3..2cbcbe4126611d0d1660d86279329da5bec504b1 100644 (file)
@@ -212,7 +212,10 @@ static int skylake_dmic_fixup(struct snd_soc_pcm_runtime *rtd,
 {
        struct snd_interval *channels = hw_param_interval(params,
                                                SNDRV_PCM_HW_PARAM_CHANNELS);
-       channels->min = channels->max = 4;
+       if (params_channels(params) == 2)
+               channels->min = channels->max = 2;
+       else
+               channels->min = channels->max = 4;
 
        return 0;
 }
index 668fdeee195e2f0343047c41b0b5210d5bdc4c07..fbbb25c2ceed2949bd909f4b2e2e152154493227 100644 (file)
@@ -1,13 +1,10 @@
 snd-soc-sst-dsp-objs := sst-dsp.o
-ifneq ($(CONFIG_SND_SST_IPC_ACPI),)
-snd-soc-sst-acpi-objs := sst-match-acpi.o
-else
-snd-soc-sst-acpi-objs := sst-acpi.o sst-match-acpi.o
-endif
-
+snd-soc-sst-acpi-objs := sst-acpi.o
+snd-soc-sst-match-objs := sst-match-acpi.o
 snd-soc-sst-ipc-objs := sst-ipc.o
 
 snd-soc-sst-dsp-$(CONFIG_DW_DMAC_CORE) += sst-firmware.o
 
 obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
 obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
+obj-$(CONFIG_SND_SOC_INTEL_SST_MATCH) += snd-soc-sst-match.o
index 7a85c576dad33575efed4ca9eabb1ccf64970383..2c5eda14d51070947accc019b5d212e9adf0e6e3 100644 (file)
@@ -215,6 +215,7 @@ static struct sst_acpi_desc sst_acpi_broadwell_desc = {
        .dma_size = SST_LPT_DSP_DMA_SIZE,
 };
 
+#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI)
 static struct sst_acpi_mach baytrail_machines[] = {
        { "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
        { "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
@@ -231,11 +232,14 @@ static struct sst_acpi_desc sst_acpi_baytrail_desc = {
        .sst_id = SST_DEV_ID_BYT,
        .resindex_dma_base = -1,
 };
+#endif
 
 static const struct acpi_device_id sst_acpi_match[] = {
        { "INT33C8", (unsigned long)&sst_acpi_haswell_desc },
        { "INT3438", (unsigned long)&sst_acpi_broadwell_desc },
+#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI)
        { "80860F28", (unsigned long)&sst_acpi_baytrail_desc },
+#endif
        { }
 };
 MODULE_DEVICE_TABLE(acpi, sst_acpi_match);
index dd077e116d259b6b60f509f2b3cc10b020a2cb12..0b8ee04c593375006290e4ae8cc7d606bcb58264 100644 (file)
  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  * more details.
  */
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
 
 #include "sst-acpi.h"
 
-static acpi_status sst_acpi_mach_match(acpi_handle handle, u32 level,
-                                      void *context, void **ret)
-{
-       *(bool *)context = true;
-       return AE_OK;
-}
-
 struct sst_acpi_mach *sst_acpi_find_machine(struct sst_acpi_mach *machines)
 {
        struct sst_acpi_mach *mach;
-       bool found = false;
 
        for (mach = machines; mach->id[0]; mach++)
-               if (ACPI_SUCCESS(acpi_get_devices(mach->id,
-                                                 sst_acpi_mach_match,
-                                                 &found, NULL)) && found)
+               if (acpi_dev_present(mach->id))
                        return mach;
 
        return NULL;
 }
 EXPORT_SYMBOL_GPL(sst_acpi_find_machine);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
index de6dac496a0d8b611d4662864597e58ee20330f3..737934cc620d6dddaaab1bc4d22d7c105e7b1049 100644 (file)
@@ -238,9 +238,8 @@ static void skl_copy_copier_caps(struct skl_module_cfg *mconfig,
  * Calculate the gatewat settings required for copier module, type of
  * gateway and index of gateway to use
  */
-static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
-                       struct skl_module_cfg *mconfig,
-                       struct skl_cpr_cfg *cpr_mconfig)
+static u32 skl_get_node_id(struct skl_sst *ctx,
+                       struct skl_module_cfg *mconfig)
 {
        union skl_connector_node_id node_id = {0};
        union skl_ssp_dma_node ssp_node  = {0};
@@ -289,13 +288,24 @@ static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
                break;
 
        default:
-               cpr_mconfig->gtw_cfg.node_id = SKL_NON_GATEWAY_CPR_NODE_ID;
+               node_id.val = 0xFFFFFFFF;
+               break;
+       }
+
+       return node_id.val;
+}
+
+static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
+                       struct skl_module_cfg *mconfig,
+                       struct skl_cpr_cfg *cpr_mconfig)
+{
+       cpr_mconfig->gtw_cfg.node_id = skl_get_node_id(ctx, mconfig);
+
+       if (cpr_mconfig->gtw_cfg.node_id == SKL_NON_GATEWAY_CPR_NODE_ID) {
                cpr_mconfig->cpr_feature_mask = 0;
                return;
        }
 
-       cpr_mconfig->gtw_cfg.node_id = node_id.val;
-
        if (SKL_CONN_SOURCE == mconfig->hw_conn_type)
                cpr_mconfig->gtw_cfg.dma_buffer_size = 2 * mconfig->obs;
        else
@@ -307,6 +317,46 @@ static void skl_setup_cpr_gateway_cfg(struct skl_sst *ctx,
        skl_copy_copier_caps(mconfig, cpr_mconfig);
 }
 
+#define DMA_CONTROL_ID 5
+
+int skl_dsp_set_dma_control(struct skl_sst *ctx, struct skl_module_cfg *mconfig)
+{
+       struct skl_dma_control *dma_ctrl;
+       struct skl_i2s_config_blob config_blob;
+       struct skl_ipc_large_config_msg msg = {0};
+       int err = 0;
+
+
+       /*
+        * if blob size is same as capablity size, then no dma control
+        * present so return
+        */
+       if (mconfig->formats_config.caps_size == sizeof(config_blob))
+               return 0;
+
+       msg.large_param_id = DMA_CONTROL_ID;
+       msg.param_data_size = sizeof(struct skl_dma_control) +
+                               mconfig->formats_config.caps_size;
+
+       dma_ctrl = kzalloc(msg.param_data_size, GFP_KERNEL);
+       if (dma_ctrl == NULL)
+               return -ENOMEM;
+
+       dma_ctrl->node_id = skl_get_node_id(ctx, mconfig);
+
+       /* size in dwords */
+       dma_ctrl->config_length = sizeof(config_blob) / 4;
+
+       memcpy(dma_ctrl->config_data, mconfig->formats_config.caps,
+                               mconfig->formats_config.caps_size);
+
+       err = skl_ipc_set_large_config(&ctx->ipc, &msg, (u32 *)dma_ctrl);
+
+       kfree(dma_ctrl);
+
+       return err;
+}
+
 static void skl_setup_out_format(struct skl_sst *ctx,
                        struct skl_module_cfg *mconfig,
                        struct skl_audio_data_format *out_fmt)
@@ -688,14 +738,14 @@ int skl_unbind_modules(struct skl_sst *ctx,
        /* get src queue index */
        src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
        if (src_index < 0)
-               return -EINVAL;
+               return 0;
 
        msg.src_queue = src_index;
 
        /* get dst queue index */
        dst_index  = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
        if (dst_index < 0)
-               return -EINVAL;
+               return 0;
 
        msg.dst_queue = dst_index;
 
@@ -747,7 +797,7 @@ int skl_bind_modules(struct skl_sst *ctx,
 
        skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
 
-       if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
+       if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
                dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
                return 0;
 
index f3553258091a2bd7a78f2485aaee7c46fc667758..092450ac1c786d5a17b21b6f1db8b70c07d03cc3 100644 (file)
@@ -206,6 +206,23 @@ static int skl_get_format(struct snd_pcm_substream *substream,
        return format_val;
 }
 
+static int skl_be_prepare(struct snd_pcm_substream *substream,
+               struct snd_soc_dai *dai)
+{
+       struct skl *skl = get_skl_ctx(dai->dev);
+       struct skl_sst *ctx = skl->skl_sst;
+       struct skl_module_cfg *mconfig;
+
+       if ((dai->playback_active > 1) || (dai->capture_active > 1))
+               return 0;
+
+       mconfig = skl_tplg_be_get_cpr_module(dai, substream->stream);
+       if (mconfig == NULL)
+               return -EINVAL;
+
+       return skl_dsp_set_dma_control(ctx, mconfig);
+}
+
 static int skl_pcm_prepare(struct snd_pcm_substream *substream,
                struct snd_soc_dai *dai)
 {
@@ -588,6 +605,7 @@ static struct snd_soc_dai_ops skl_dmic_dai_ops = {
 
 static struct snd_soc_dai_ops skl_be_ssp_dai_ops = {
        .hw_params = skl_be_hw_params,
+       .prepare = skl_be_prepare,
 };
 
 static struct snd_soc_dai_ops skl_link_dai_ops = {
@@ -864,6 +882,9 @@ static int skl_get_delay_from_lpib(struct hdac_ext_bus *ebus,
                        delay += hstream->bufsize;
        }
 
+       if (hstream->bufsize == delay)
+               delay = 0;
+
        if (delay >= hstream->period_bytes) {
                dev_info(bus->dev,
                         "Unstable LPIB (%d >= %d); disabling LPIB delay counting\n",
index 4624556f486de34c396a327a0d279a57a1fdbf96..216d7a8e56803a97ce89acb62b823fc839fe96e1 100644 (file)
@@ -54,12 +54,9 @@ static int is_skl_dsp_widget_type(struct snd_soc_dapm_widget *w)
 
 /*
  * Each pipelines needs memory to be allocated. Check if we have free memory
- * from available pool. Then only add this to pool
- * This is freed when pipe is deleted
- * Note: DSP does actual memory management we only keep track for complete
- * pool
+ * from available pool.
  */
-static bool skl_tplg_alloc_pipe_mem(struct skl *skl,
+static bool skl_is_pipe_mem_avail(struct skl *skl,
                                struct skl_module_cfg *mconfig)
 {
        struct skl_sst *ctx = skl->skl_sst;
@@ -74,10 +71,20 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl,
                                "exceeds ppl memory available %d mem %d\n",
                                skl->resource.max_mem, skl->resource.mem);
                return false;
+       } else {
+               return true;
        }
+}
 
+/*
+ * Add the mem to the mem pool. This is freed when pipe is deleted.
+ * Note: DSP does actual memory management we only keep track for complete
+ * pool
+ */
+static void skl_tplg_alloc_pipe_mem(struct skl *skl,
+                               struct skl_module_cfg *mconfig)
+{
        skl->resource.mem += mconfig->pipe->memory_pages;
-       return true;
 }
 
 /*
@@ -85,10 +92,10 @@ static bool skl_tplg_alloc_pipe_mem(struct skl *skl,
  * quantified in MCPS (Million Clocks Per Second) required for module/pipe
  *
  * Each pipelines needs mcps to be allocated. Check if we have mcps for this
- * pipe. This adds the mcps to driver counter
- * This is removed on pipeline delete
+ * pipe.
  */
-static bool skl_tplg_alloc_pipe_mcps(struct skl *skl,
+
+static bool skl_is_pipe_mcps_avail(struct skl *skl,
                                struct skl_module_cfg *mconfig)
 {
        struct skl_sst *ctx = skl->skl_sst;
@@ -98,13 +105,18 @@ static bool skl_tplg_alloc_pipe_mcps(struct skl *skl,
                        "%s: module_id %d instance %d\n", __func__,
                        mconfig->id.module_id, mconfig->id.instance_id);
                dev_err(ctx->dev,
-                       "exceeds ppl memory available %d > mem %d\n",
+                       "exceeds ppl mcps available %d > mem %d\n",
                        skl->resource.max_mcps, skl->resource.mcps);
                return false;
+       } else {
+               return true;
        }
+}
 
+static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
+                               struct skl_module_cfg *mconfig)
+{
        skl->resource.mcps += mconfig->mcps;
-       return true;
 }
 
 /*
@@ -248,6 +260,64 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
                                multiplier;
 }
 
+static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
+                                               struct skl_sst *ctx)
+{
+       struct skl_module_cfg *m_cfg = w->priv;
+       int link_type, dir;
+       u32 ch, s_freq, s_fmt;
+       struct nhlt_specific_cfg *cfg;
+       struct skl *skl = get_skl_ctx(ctx->dev);
+
+       /* check if we already have blob */
+       if (m_cfg->formats_config.caps_size > 0)
+               return 0;
+
+       switch (m_cfg->dev_type) {
+       case SKL_DEVICE_DMIC:
+               link_type = NHLT_LINK_DMIC;
+               dir = 1;
+               s_freq = m_cfg->in_fmt[0].s_freq;
+               s_fmt = m_cfg->in_fmt[0].bit_depth;
+               ch = m_cfg->in_fmt[0].channels;
+               break;
+
+       case SKL_DEVICE_I2S:
+               link_type = NHLT_LINK_SSP;
+               if (m_cfg->hw_conn_type == SKL_CONN_SOURCE) {
+                       dir = 1;
+                       s_freq = m_cfg->in_fmt[0].s_freq;
+                       s_fmt = m_cfg->in_fmt[0].bit_depth;
+                       ch = m_cfg->in_fmt[0].channels;
+               } else {
+                       dir = 0;
+                       s_freq = m_cfg->out_fmt[0].s_freq;
+                       s_fmt = m_cfg->out_fmt[0].bit_depth;
+                       ch = m_cfg->out_fmt[0].channels;
+               }
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       /* update the blob based on virtual bus_id and default params */
+       cfg = skl_get_ep_blob(skl, m_cfg->vbus_id, link_type,
+                                       s_fmt, ch, s_freq, dir);
+       if (cfg) {
+               m_cfg->formats_config.caps_size = cfg->size;
+               m_cfg->formats_config.caps = (u32 *) &cfg->caps;
+       } else {
+               dev_err(ctx->dev, "Blob NULL for id %x type %d dirn %d\n",
+                                       m_cfg->vbus_id, link_type, dir);
+               dev_err(ctx->dev, "PCM: ch %d, freq %d, fmt %d\n",
+                                       ch, s_freq, s_fmt);
+               return -EIO;
+       }
+
+       return 0;
+}
+
 static void skl_tplg_update_module_params(struct snd_soc_dapm_widget *w,
                                                        struct skl_sst *ctx)
 {
@@ -411,7 +481,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
                mconfig = w->priv;
 
                /* check resource available */
-               if (!skl_tplg_alloc_pipe_mcps(skl, mconfig))
+               if (!skl_is_pipe_mcps_avail(skl, mconfig))
                        return -ENOMEM;
 
                if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
@@ -421,6 +491,9 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
                                return ret;
                }
 
+               /* update blob if blob is null for be with default value */
+               skl_tplg_update_be_blob(w, ctx);
+
                /*
                 * apply fix/conversion to module params based on
                 * FE/BE params
@@ -435,6 +508,7 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
                ret = skl_tplg_set_module_params(w, ctx);
                if (ret < 0)
                        return ret;
+               skl_tplg_alloc_pipe_mcps(skl, mconfig);
        }
 
        return 0;
@@ -477,10 +551,10 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
        struct skl_sst *ctx = skl->skl_sst;
 
        /* check resource available */
-       if (!skl_tplg_alloc_pipe_mcps(skl, mconfig))
+       if (!skl_is_pipe_mcps_avail(skl, mconfig))
                return -EBUSY;
 
-       if (!skl_tplg_alloc_pipe_mem(skl, mconfig))
+       if (!skl_is_pipe_mem_avail(skl, mconfig))
                return -ENOMEM;
 
        /*
@@ -526,11 +600,75 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
                src_module = dst_module;
        }
 
+       skl_tplg_alloc_pipe_mem(skl, mconfig);
+       skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
+       return 0;
+}
+
+/*
+ * Some modules require params to be set after the module is bound to
+ * all pins connected.
+ *
+ * The module provider initializes set_param flag for such modules and we
+ * send params after binding
+ */
+static int skl_tplg_set_module_bind_params(struct snd_soc_dapm_widget *w,
+                       struct skl_module_cfg *mcfg, struct skl_sst *ctx)
+{
+       int i, ret;
+       struct skl_module_cfg *mconfig = w->priv;
+       const struct snd_kcontrol_new *k;
+       struct soc_bytes_ext *sb;
+       struct skl_algo_data *bc;
+       struct skl_specific_cfg *sp_cfg;
+
+       /*
+        * check all out/in pins are in bind state.
+        * if so set the module param
+        */
+       for (i = 0; i < mcfg->max_out_queue; i++) {
+               if (mcfg->m_out_pin[i].pin_state != SKL_PIN_BIND_DONE)
+                       return 0;
+       }
+
+       for (i = 0; i < mcfg->max_in_queue; i++) {
+               if (mcfg->m_in_pin[i].pin_state != SKL_PIN_BIND_DONE)
+                       return 0;
+       }
+
+       if (mconfig->formats_config.caps_size > 0 &&
+               mconfig->formats_config.set_params == SKL_PARAM_BIND) {
+               sp_cfg = &mconfig->formats_config;
+               ret = skl_set_module_params(ctx, sp_cfg->caps,
+                                       sp_cfg->caps_size,
+                                       sp_cfg->param_id, mconfig);
+               if (ret < 0)
+                       return ret;
+       }
+
+       for (i = 0; i < w->num_kcontrols; i++) {
+               k = &w->kcontrol_news[i];
+               if (k->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
+                       sb = (void *) k->private_value;
+                       bc = (struct skl_algo_data *)sb->dobj.private;
+
+                       if (bc->set_params == SKL_PARAM_BIND) {
+                               ret = skl_set_module_params(ctx,
+                                               (u32 *)bc->params, bc->max,
+                                               bc->param_id, mconfig);
+                               if (ret < 0)
+                                       return ret;
+                       }
+               }
+       }
+
        return 0;
 }
 
 static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
                                struct skl *skl,
+                               struct snd_soc_dapm_widget *src_w,
                                struct skl_module_cfg *src_mconfig)
 {
        struct snd_soc_dapm_path *p;
@@ -547,6 +685,10 @@ static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
                dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
 
                next_sink = p->sink;
+
+               if (!is_skl_dsp_widget_type(p->sink))
+                       return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
+
                /*
                 * here we will check widgets in sink pipelines, so that
                 * can be any widgets type and we are only interested if
@@ -558,11 +700,19 @@ static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
                        sink = p->sink;
                        sink_mconfig = sink->priv;
 
+                       if (src_mconfig->m_state == SKL_MODULE_UNINIT ||
+                               sink_mconfig->m_state == SKL_MODULE_UNINIT)
+                               continue;
+
                        /* Bind source to sink, mixin is always source */
                        ret = skl_bind_modules(ctx, src_mconfig, sink_mconfig);
                        if (ret)
                                return ret;
 
+                       /* set module params after bind */
+                       skl_tplg_set_module_bind_params(src_w, src_mconfig, ctx);
+                       skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
+
                        /* Start sinks pipe first */
                        if (sink_mconfig->pipe->state != SKL_PIPE_STARTED) {
                                if (sink_mconfig->pipe->conn_type !=
@@ -576,7 +726,7 @@ static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
        }
 
        if (!sink)
-               return skl_tplg_bind_sinks(next_sink, skl, src_mconfig);
+               return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
 
        return 0;
 }
@@ -605,7 +755,7 @@ static int skl_tplg_pga_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
         * if sink is not started, start sink pipe first, then start
         * this pipe
         */
-       ret = skl_tplg_bind_sinks(w, skl, src_mconfig);
+       ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
        if (ret)
                return ret;
 
@@ -693,6 +843,10 @@ static int skl_tplg_mixer_dapm_post_pmu_event(struct snd_soc_dapm_widget *w,
                if (ret)
                        return ret;
 
+               /* set module params after bind */
+               skl_tplg_set_module_bind_params(source, src_mconfig, ctx);
+               skl_tplg_set_module_bind_params(sink, sink_mconfig, ctx);
+
                if (sink_mconfig->pipe->conn_type != SKL_PIPE_CONN_TYPE_FE)
                        ret = skl_run_pipe(ctx, sink_mconfig->pipe);
        }
@@ -773,10 +927,7 @@ static int skl_tplg_mixer_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
                        continue;
                }
 
-               ret = skl_unbind_modules(ctx, src_module, dst_module);
-               if (ret < 0)
-                       return ret;
-
+               skl_unbind_modules(ctx, src_module, dst_module);
                src_module = dst_module;
        }
 
@@ -814,9 +965,6 @@ static int skl_tplg_pga_dapm_post_pmd_event(struct snd_soc_dapm_widget *w,
                         * This is a connecter and if path is found that means
                         * unbind between source and sink has not happened yet
                         */
-                       ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
-                       if (ret < 0)
-                               return ret;
                        ret = skl_unbind_modules(ctx, src_mconfig,
                                                        sink_mconfig);
                }
@@ -842,6 +990,12 @@ static int skl_tplg_vmixer_event(struct snd_soc_dapm_widget *w,
        case SND_SOC_DAPM_PRE_PMU:
                return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
 
+       case SND_SOC_DAPM_POST_PMU:
+               return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
+
+       case SND_SOC_DAPM_PRE_PMD:
+               return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
+
        case SND_SOC_DAPM_POST_PMD:
                return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
        }
@@ -916,6 +1070,13 @@ static int skl_tplg_tlv_control_get(struct snd_kcontrol *kcontrol,
                skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
                                      bc->max, bc->param_id, mconfig);
 
+       /* decrement size for TLV header */
+       size -= 2 * sizeof(u32);
+
+       /* check size as we don't want to send kernel data */
+       if (size > bc->max)
+               size = bc->max;
+
        if (bc->params) {
                if (copy_to_user(data, &bc->param_id, sizeof(u32)))
                        return -EFAULT;
@@ -1063,6 +1224,66 @@ skl_tplg_fe_get_cpr_module(struct snd_soc_dai *dai, int stream)
        return NULL;
 }
 
+static struct skl_module_cfg *skl_get_mconfig_pb_cpr(
+               struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
+{
+       struct snd_soc_dapm_path *p;
+       struct skl_module_cfg *mconfig = NULL;
+
+       snd_soc_dapm_widget_for_each_source_path(w, p) {
+               if (w->endpoints[SND_SOC_DAPM_DIR_OUT] > 0) {
+                       if (p->connect &&
+                                   (p->sink->id == snd_soc_dapm_aif_out) &&
+                                   p->source->priv) {
+                               mconfig = p->source->priv;
+                               return mconfig;
+                       }
+                       mconfig = skl_get_mconfig_pb_cpr(dai, p->source);
+                       if (mconfig)
+                               return mconfig;
+               }
+       }
+       return mconfig;
+}
+
+static struct skl_module_cfg *skl_get_mconfig_cap_cpr(
+               struct snd_soc_dai *dai, struct snd_soc_dapm_widget *w)
+{
+       struct snd_soc_dapm_path *p;
+       struct skl_module_cfg *mconfig = NULL;
+
+       snd_soc_dapm_widget_for_each_sink_path(w, p) {
+               if (w->endpoints[SND_SOC_DAPM_DIR_IN] > 0) {
+                       if (p->connect &&
+                                   (p->source->id == snd_soc_dapm_aif_in) &&
+                                   p->sink->priv) {
+                               mconfig = p->sink->priv;
+                               return mconfig;
+                       }
+                       mconfig = skl_get_mconfig_cap_cpr(dai, p->sink);
+                       if (mconfig)
+                               return mconfig;
+               }
+       }
+       return mconfig;
+}
+
+struct skl_module_cfg *
+skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai, int stream)
+{
+       struct snd_soc_dapm_widget *w;
+       struct skl_module_cfg *mconfig;
+
+       if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               w = dai->playback_widget;
+               mconfig = skl_get_mconfig_pb_cpr(dai, w);
+       } else {
+               w = dai->capture_widget;
+               mconfig = skl_get_mconfig_cap_cpr(dai, w);
+       }
+       return mconfig;
+}
+
 static u8 skl_tplg_be_link_type(int dev_type)
 {
        int ret;
@@ -1510,6 +1731,7 @@ int skl_tplg_init(struct snd_soc_platform *platform, struct hdac_ext_bus *ebus)
                                        &skl_tplg_ops, fw, 0);
        if (ret < 0) {
                dev_err(bus->dev, "tplg component load failed%d\n", ret);
+               release_firmware(fw);
                return -EINVAL;
        }
 
index 9aa2a2b6598a3ede5dff38f0570c5b16e30bcb70..de3c401284d9df312b13d8a1549f274a08f5a754 100644 (file)
@@ -113,6 +113,29 @@ struct skl_cpr_gtw_cfg {
        u32 config_data[1];
 } __packed;
 
+struct skl_i2s_config_blob {
+       u32 gateway_attrib;
+       u32 tdm_ts_group[8];
+       u32 ssc0;
+       u32 ssc1;
+       u32 sscto;
+       u32 sspsp;
+       u32 sstsa;
+       u32 ssrsa;
+       u32 ssc2;
+       u32 sspsp2;
+       u32 ssc3;
+       u32 ssioc;
+       u32 mdivc;
+       u32 mdivr;
+} __packed;
+
+struct skl_dma_control {
+       u32 node_id;
+       u32 config_length;
+       u32 config_data[1];
+} __packed;
+
 struct skl_cpr_cfg {
        struct skl_base_cfg base_cfg;
        struct skl_audio_data_format out_fmt;
@@ -313,6 +336,8 @@ static inline struct skl *get_skl_ctx(struct device *dev)
 
 int skl_tplg_be_update_params(struct snd_soc_dai *dai,
        struct skl_pipe_params *params);
+int skl_dsp_set_dma_control(struct skl_sst *ctx,
+               struct skl_module_cfg *mconfig);
 void skl_tplg_set_be_dmic_config(struct snd_soc_dai *dai,
        struct skl_pipe_params *params, int stream);
 int skl_tplg_init(struct snd_soc_platform *platform,
@@ -345,5 +370,7 @@ int skl_set_module_params(struct skl_sst *ctx, u32 *params, int size,
 int skl_get_module_params(struct skl_sst *ctx, u32 *params, int size,
                          u32 param_id, struct skl_module_cfg *mcfg);
 
+struct skl_module_cfg *skl_tplg_be_get_cpr_module(struct snd_soc_dai *dai,
+                                                               int stream);
 enum skl_bitdepth skl_get_bit_depth(int params);
 #endif
index c9ae010b3cc8dc1df5a359a67987a44186e588cc..1db88a63ac1787f6862d62b53a141481dbb60cb8 100644 (file)
@@ -144,7 +144,8 @@ enum module_pin_type {
 enum skl_module_param_type {
        SKL_PARAM_DEFAULT = 0,
        SKL_PARAM_INIT,
-       SKL_PARAM_SET
+       SKL_PARAM_SET,
+       SKL_PARAM_BIND
 };
 
 struct skl_dfw_module_pin {
index 443a15de94b5fbc3813f126da74e4bb3781f6e15..06f4b2c134233524b9d871756e9120fb9d2afab5 100644 (file)
@@ -614,8 +614,6 @@ static int skl_probe(struct pci_dev *pci,
                goto out_unregister;
 
        /*configure PM */
-       pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY);
-       pm_runtime_use_autosuspend(bus->dev);
        pm_runtime_put_noidle(bus->dev);
        pm_runtime_allow(bus->dev);
 
@@ -636,6 +634,31 @@ out_free:
        return err;
 }
 
+static void skl_shutdown(struct pci_dev *pci)
+{
+       struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
+       struct hdac_bus *bus = ebus_to_hbus(ebus);
+       struct hdac_stream *s;
+       struct hdac_ext_stream *stream;
+       struct skl *skl;
+
+       if (ebus == NULL)
+               return;
+
+       skl = ebus_to_skl(ebus);
+
+       if (skl->init_failed)
+               return;
+
+       snd_hdac_ext_stop_streams(ebus);
+       list_for_each_entry(s, &bus->stream_list, list) {
+               stream = stream_to_hdac_ext_stream(s);
+               snd_hdac_ext_stream_decouple(ebus, stream, false);
+       }
+
+       snd_hdac_bus_stop_chip(bus);
+}
+
 static void skl_remove(struct pci_dev *pci)
 {
        struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
@@ -679,6 +702,7 @@ static struct pci_driver skl_driver = {
        .id_table = skl_ids,
        .probe = skl_probe,
        .remove = skl_remove,
+       .shutdown = skl_shutdown,
        .driver = {
                .pm = &skl_pm,
        },
index 15c04e2eae34a010d90abd645c8cdfb3bf07df41..9769676753878b833513d0e863dab776bdaae635 100644 (file)
@@ -9,7 +9,7 @@ config SND_SOC_MEDIATEK
 
 config SND_SOC_MT8173_MAX98090
        tristate "ASoC Audio driver for MT8173 with MAX98090 codec"
-       depends on SND_SOC_MEDIATEK
+       depends on SND_SOC_MEDIATEK && I2C
        select SND_SOC_MAX98090
        help
          This adds ASoC driver for Mediatek MT8173 boards
@@ -19,7 +19,7 @@ config SND_SOC_MT8173_MAX98090
 
 config SND_SOC_MT8173_RT5650_RT5676
        tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs"
-       depends on SND_SOC_MEDIATEK
+       depends on SND_SOC_MEDIATEK && I2C
        select SND_SOC_RT5645
        select SND_SOC_RT5677
        help
index 9b1af1a7087480c95f931a3b9e020d0f33ac8e46..f341f623e887f2d2d169e33ef89903a6705c186c 100644 (file)
@@ -87,6 +87,7 @@ struct mtk_afe_memif_data {
        int irq_en_shift;
        int irq_fs_shift;
        int irq_clr_shift;
+       int msb_shift;
 };
 
 struct mtk_afe_memif {
index 08af9f5dc4ab2117bb81501b0381930f9cf483b5..689c51f23d9fad43dced68ac0bc7e9b7222740a6 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/dma-mapping.h>
 #include <linux/pm_runtime.h>
 #include <sound/soc.h>
 #include "mtk-afe-common.h"
@@ -35,6 +36,7 @@
 #define AFE_I2S_CON1           0x0034
 #define AFE_I2S_CON2           0x0038
 #define AFE_CONN_24BIT         0x006c
+#define AFE_MEMIF_MSB          0x00cc
 
 #define AFE_CONN1              0x0024
 #define AFE_CONN2              0x0028
@@ -592,6 +594,7 @@ static int mtk_afe_dais_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct mtk_afe *afe = snd_soc_platform_get_drvdata(rtd->platform);
        struct mtk_afe_memif *memif = &afe->memif[rtd->cpu_dai->id];
+       int msb_at_bit33 = 0;
        int ret;
 
        dev_dbg(afe->dev,
@@ -603,7 +606,8 @@ static int mtk_afe_dais_hw_params(struct snd_pcm_substream *substream,
        if (ret < 0)
                return ret;
 
-       memif->phys_buf_addr = substream->runtime->dma_addr;
+       msb_at_bit33 = upper_32_bits(substream->runtime->dma_addr) ? 1 : 0;
+       memif->phys_buf_addr = lower_32_bits(substream->runtime->dma_addr);
        memif->buffer_size = substream->runtime->dma_bytes;
 
        /* start */
@@ -614,6 +618,11 @@ static int mtk_afe_dais_hw_params(struct snd_pcm_substream *substream,
                     memif->data->reg_ofs_base + AFE_BASE_END_OFFSET,
                     memif->phys_buf_addr + memif->buffer_size - 1);
 
+       /* set MSB to 33-bit */
+       regmap_update_bits(afe->regmap, AFE_MEMIF_MSB,
+                          1 << memif->data->msb_shift,
+                          msb_at_bit33 << memif->data->msb_shift);
+
        /* set channel */
        if (memif->data->mono_shift >= 0) {
                unsigned int mono = (params_channels(params) == 1) ? 1 : 0;
@@ -978,6 +987,7 @@ static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = {
                .irq_en_shift = 0,
                .irq_fs_shift = 4,
                .irq_clr_shift = 0,
+               .msb_shift = 0,
        }, {
                .name = "DL2",
                .id = MTK_AFE_MEMIF_DL2,
@@ -991,6 +1001,7 @@ static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = {
                .irq_en_shift = 2,
                .irq_fs_shift = 16,
                .irq_clr_shift = 2,
+               .msb_shift = 1,
        }, {
                .name = "VUL",
                .id = MTK_AFE_MEMIF_VUL,
@@ -1004,6 +1015,7 @@ static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = {
                .irq_en_shift = 1,
                .irq_fs_shift = 8,
                .irq_clr_shift = 1,
+               .msb_shift = 6,
        }, {
                .name = "DAI",
                .id = MTK_AFE_MEMIF_DAI,
@@ -1017,6 +1029,7 @@ static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = {
                .irq_en_shift = 3,
                .irq_fs_shift = 20,
                .irq_clr_shift = 3,
+               .msb_shift = 5,
        }, {
                .name = "AWB",
                .id = MTK_AFE_MEMIF_AWB,
@@ -1030,6 +1043,7 @@ static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = {
                .irq_en_shift = 14,
                .irq_fs_shift = 24,
                .irq_clr_shift = 6,
+               .msb_shift = 3,
        }, {
                .name = "MOD_DAI",
                .id = MTK_AFE_MEMIF_MOD_DAI,
@@ -1043,6 +1057,7 @@ static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = {
                .irq_en_shift = 3,
                .irq_fs_shift = 20,
                .irq_clr_shift = 3,
+               .msb_shift = 4,
        }, {
                .name = "HDMI",
                .id = MTK_AFE_MEMIF_HDMI,
@@ -1056,6 +1071,7 @@ static const struct mtk_afe_memif_data memif_data[MTK_AFE_MEMIF_NUM] = {
                .irq_en_shift = 12,
                .irq_fs_shift = -1,
                .irq_clr_shift = 4,
+               .msb_shift = 8,
        },
 };
 
@@ -1189,6 +1205,10 @@ static int mtk_afe_pcm_dev_probe(struct platform_device *pdev)
        struct mtk_afe *afe;
        struct resource *res;
 
+       ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(33));
+       if (ret)
+               return ret;
+
        afe = devm_kzalloc(&pdev->dev, sizeof(*afe), GFP_KERNEL);
        if (!afe)
                return -ENOMEM;
index c866ade28ad0a6a0005a9ec97e09c05bfa63a783..13631003cb7c6796d49f5278834eb5924aea294c 100644 (file)
@@ -381,9 +381,19 @@ static int mxs_saif_startup(struct snd_pcm_substream *substream,
        __raw_writel(BM_SAIF_CTRL_CLKGATE,
                saif->base + SAIF_CTRL + MXS_CLR_ADDR);
 
+       clk_prepare(saif->clk);
+
        return 0;
 }
 
+static void mxs_saif_shutdown(struct snd_pcm_substream *substream,
+                             struct snd_soc_dai *cpu_dai)
+{
+       struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
+
+       clk_unprepare(saif->clk);
+}
+
 /*
  * Should only be called when port is inactive.
  * although can be called multiple times by upper layers.
@@ -408,7 +418,7 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
        }
 
        stat = __raw_readl(saif->base + SAIF_STAT);
-       if (stat & BM_SAIF_STAT_BUSY) {
+       if (!saif->mclk_in_use && (stat & BM_SAIF_STAT_BUSY)) {
                dev_err(cpu_dai->dev, "error: busy\n");
                return -EBUSY;
        }
@@ -424,8 +434,6 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream,
                return ret;
        }
 
-       /* prepare clk in hw_param, enable in trigger */
-       clk_prepare(saif->clk);
        if (saif != master_saif) {
                /*
                * Set an initial clock rate for the saif internal logic to work
@@ -611,6 +619,7 @@ static int mxs_saif_trigger(struct snd_pcm_substream *substream, int cmd,
 
 static const struct snd_soc_dai_ops mxs_saif_dai_ops = {
        .startup = mxs_saif_startup,
+       .shutdown = mxs_saif_shutdown,
        .trigger = mxs_saif_trigger,
        .prepare = mxs_saif_prepare,
        .hw_params = mxs_saif_hw_params,
index 79688aa1941a5c4168c2c52924c79722ce6172c7..4aeb8e1a7160b812a6ab119cfec657b9f55b16cb 100644 (file)
@@ -440,18 +440,18 @@ static irqreturn_t lpass_platform_lpaif_irq(int irq, void *data)
 }
 
 static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream,
-               struct snd_soc_pcm_runtime *soc_runtime)
+               struct snd_soc_pcm_runtime *rt)
 {
        struct snd_dma_buffer *buf = &substream->dma_buffer;
        size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
 
        buf->dev.type = SNDRV_DMA_TYPE_DEV;
-       buf->dev.dev = soc_runtime->dev;
+       buf->dev.dev = rt->platform->dev;
        buf->private_data = NULL;
-       buf->area = dma_alloc_coherent(soc_runtime->dev, size, &buf->addr,
+       buf->area = dma_alloc_coherent(rt->platform->dev, size, &buf->addr,
                        GFP_KERNEL);
        if (!buf->area) {
-               dev_err(soc_runtime->dev, "%s: Could not allocate DMA buffer\n",
+               dev_err(rt->platform->dev, "%s: Could not allocate DMA buffer\n",
                                __func__);
                return -ENOMEM;
        }
@@ -461,12 +461,12 @@ static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream,
 }
 
 static void lpass_platform_free_buffer(struct snd_pcm_substream *substream,
-               struct snd_soc_pcm_runtime *soc_runtime)
+               struct snd_soc_pcm_runtime *rt)
 {
        struct snd_dma_buffer *buf = &substream->dma_buffer;
 
        if (buf->area) {
-               dma_free_coherent(soc_runtime->dev, buf->bytes, buf->area,
+               dma_free_coherent(rt->dev, buf->bytes, buf->area,
                                buf->addr);
        }
        buf->area = NULL;
@@ -499,9 +499,6 @@ static int lpass_platform_pcm_new(struct snd_soc_pcm_runtime *soc_runtime)
 
        snd_soc_pcm_set_drvdata(soc_runtime, data);
 
-       soc_runtime->dev->coherent_dma_mask = DMA_BIT_MASK(32);
-       soc_runtime->dev->dma_mask = &soc_runtime->dev->coherent_dma_mask;
-
        ret = lpass_platform_alloc_buffer(substream, soc_runtime);
        if (ret)
                return ret;
index df65c5b494b17aad482c7821586b6ed6832bf9f6..b6ab3fc5789e89a92b937e1b2c95a1c6fa2c0063 100644 (file)
@@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
 #endif
 
 int s3c_i2sv2_register_component(struct device *dev, int id,
-                          struct snd_soc_component_driver *cmp_drv,
+                          const struct snd_soc_component_driver *cmp_drv,
                           struct snd_soc_dai_driver *dai_drv)
 {
        struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
index 90abab364b495fd18d11ed421a37bc123e3fa45e..d0684145ed1fd50967c1ce27075af3636aec15aa 100644 (file)
@@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
  * soc core.
  */
 extern int s3c_i2sv2_register_component(struct device *dev, int id,
-                                       struct snd_soc_component_driver *cmp_drv,
+                                       const struct snd_soc_component_driver *cmp_drv,
                                        struct snd_soc_dai_driver *dai_drv);
 
 #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
index 6d3ef366d53692c222b311662a83feb6a5b6d8e4..d74e1ccc0f8f736828ec8172a633700419cc3f7e 100644 (file)
@@ -518,13 +518,8 @@ int rsnd_adg_probe(struct rsnd_priv *priv)
                return -ENOMEM;
        }
 
-       /*
-        * ADG is special module.
-        * Use ADG mod without rsnd_mod_init() to make debug easy
-        * for rsnd_write/rsnd_read
-        */
-       adg->mod.ops = &adg_ops;
-       adg->mod.priv = priv;
+       rsnd_mod_init(priv, &adg->mod, &adg_ops,
+                     NULL, NULL, 0, 0);
 
        rsnd_adg_get_clkin(priv, adg);
        rsnd_adg_get_clkout(priv, adg);
index cd1f064e63c4344af914e2629b92e0d326f47376..abb5eaac854a9b9c47027e278cd5204c18aceace 100644 (file)
@@ -29,7 +29,6 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
 {
        struct rsnd_mod *dvc = rsnd_io_to_mod_dvc(io);
        struct rsnd_mod *mix = rsnd_io_to_mod_mix(io);
-       struct rsnd_mod *src = rsnd_io_to_mod_src(io);
        struct device *dev = rsnd_priv_to_dev(priv);
        u32 data;
 
@@ -38,6 +37,8 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
 
        if (mix) {
                struct rsnd_dai *rdai;
+               struct rsnd_mod *src;
+               struct rsnd_dai_stream *tio;
                int i;
                u32 path[] = {
                        [0] = 0,
@@ -55,16 +56,20 @@ static int rsnd_cmd_init(struct rsnd_mod *mod,
                 */
                data = 0;
                for_each_rsnd_dai(rdai, priv, i) {
-                       io = &rdai->playback;
-                       if (mix == rsnd_io_to_mod_mix(io))
+                       tio = &rdai->playback;
+                       src = rsnd_io_to_mod_src(tio);
+                       if (mix == rsnd_io_to_mod_mix(tio))
                                data |= path[rsnd_mod_id(src)];
 
-                       io = &rdai->capture;
-                       if (mix == rsnd_io_to_mod_mix(io))
+                       tio = &rdai->capture;
+                       src = rsnd_io_to_mod_src(tio);
+                       if (mix == rsnd_io_to_mod_mix(tio))
                                data |= path[rsnd_mod_id(src)];
                }
 
        } else {
+               struct rsnd_mod *src = rsnd_io_to_mod_src(io);
+
                u32 path[] = {
                        [0] = 0x30000,
                        [1] = 0x30001,
@@ -152,7 +157,8 @@ int rsnd_cmd_probe(struct rsnd_priv *priv)
 
        for_each_rsnd_cmd(cmd, priv, i) {
                ret = rsnd_mod_init(priv, rsnd_mod_get(cmd),
-                                   &rsnd_cmd_ops, NULL, RSND_MOD_CMD, i);
+                                   &rsnd_cmd_ops, NULL,
+                                   rsnd_mod_get_status, RSND_MOD_CMD, i);
                if (ret)
                        return ret;
        }
index 02b4b085b8d77f57398a13c35214a49445d1ed77..5227aad43e38547f3f1eae83a8bf843283bea39d 100644 (file)
@@ -138,12 +138,22 @@ struct dma_chan *rsnd_mod_dma_req(struct rsnd_dai_stream *io,
        return mod->ops->dma_req(io, mod);
 }
 
+u32 *rsnd_mod_get_status(struct rsnd_dai_stream *io,
+                        struct rsnd_mod *mod,
+                        enum rsnd_mod_type type)
+{
+       return &mod->status;
+}
+
 int rsnd_mod_init(struct rsnd_priv *priv,
                  struct rsnd_mod *mod,
-                  struct rsnd_mod_ops *ops,
-                  struct clk *clk,
-                  enum rsnd_mod_type type,
-                  int id)
+                 struct rsnd_mod_ops *ops,
+                 struct clk *clk,
+                 u32* (*get_status)(struct rsnd_dai_stream *io,
+                                    struct rsnd_mod *mod,
+                                    enum rsnd_mod_type type),
+                 enum rsnd_mod_type type,
+                 int id)
 {
        int ret = clk_prepare(clk);
 
@@ -155,6 +165,7 @@ int rsnd_mod_init(struct rsnd_priv *priv,
        mod->type       = type;
        mod->clk        = clk;
        mod->priv       = priv;
+       mod->get_status = get_status;
 
        return ret;
 }
@@ -163,6 +174,7 @@ void rsnd_mod_quit(struct rsnd_mod *mod)
 {
        if (mod->clk)
                clk_unprepare(mod->clk);
+       mod->clk = NULL;
 }
 
 void rsnd_mod_interrupt(struct rsnd_mod *mod,
@@ -324,31 +336,73 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);         \
        struct rsnd_mod *mod = (io)->mod[idx];                  \
        struct device *dev = rsnd_priv_to_dev(priv);            \
-       u32 *status = (io)->mod_status + idx;                   \
+       u32 *status = mod->get_status(io, mod, idx);                    \
        u32 mask = 0xF << __rsnd_mod_shift_##func;                      \
        u8 val  = (*status >> __rsnd_mod_shift_##func) & 0xF;           \
        u8 add  = ((val + __rsnd_mod_add_##func) & 0xF);                \
        int ret = 0;                                                    \
        int call = (val == __rsnd_mod_call_##func) && (mod)->ops->func; \
-       *status = (*status & ~mask) +                                   \
-               (add << __rsnd_mod_shift_##func);                       \
+       if (add == 0xF)                                                 \
+               call = 0;                                               \
+       else                                                            \
+               *status = (*status & ~mask) +                           \
+                       (add << __rsnd_mod_shift_##func);               \
        dev_dbg(dev, "%s[%d]\t0x%08x %s\n",                             \
                rsnd_mod_name(mod), rsnd_mod_id(mod),                   \
                *status, call ? #func : "");                            \
        if (call)                                                       \
                ret = (mod)->ops->func(mod, io, param);                 \
+       if (ret)                                                        \
+               dev_dbg(dev, "%s[%d] : rsnd_mod_call error %d\n",       \
+                       rsnd_mod_name(mod), rsnd_mod_id(mod), ret);     \
        ret;                                                            \
 })
 
+static enum rsnd_mod_type rsnd_mod_sequence[][RSND_MOD_MAX] = {
+       {
+               /* CAPTURE */
+               RSND_MOD_AUDMAPP,
+               RSND_MOD_AUDMA,
+               RSND_MOD_DVC,
+               RSND_MOD_MIX,
+               RSND_MOD_CTU,
+               RSND_MOD_CMD,
+               RSND_MOD_SRC,
+               RSND_MOD_SSIU,
+               RSND_MOD_SSIM3,
+               RSND_MOD_SSIM2,
+               RSND_MOD_SSIM1,
+               RSND_MOD_SSIP,
+               RSND_MOD_SSI,
+       }, {
+               /* PLAYBACK */
+               RSND_MOD_AUDMAPP,
+               RSND_MOD_AUDMA,
+               RSND_MOD_SSIM3,
+               RSND_MOD_SSIM2,
+               RSND_MOD_SSIM1,
+               RSND_MOD_SSIP,
+               RSND_MOD_SSI,
+               RSND_MOD_SSIU,
+               RSND_MOD_DVC,
+               RSND_MOD_MIX,
+               RSND_MOD_CTU,
+               RSND_MOD_CMD,
+               RSND_MOD_SRC,
+       },
+};
+
 #define rsnd_dai_call(fn, io, param...)                                \
 ({                                                             \
        struct rsnd_mod *mod;                                   \
+       int type, is_play = rsnd_io_is_play(io);                \
        int ret = 0, i;                                         \
        for (i = 0; i < RSND_MOD_MAX; i++) {                    \
-               mod = (io)->mod[i];                             \
+               type = rsnd_mod_sequence[is_play][i];           \
+               mod = (io)->mod[type];                          \
                if (!mod)                                       \
                        continue;                               \
-               ret |= rsnd_mod_call(i, io, fn, param);         \
+               ret |= rsnd_mod_call(type, io, fn, param);      \
        }                                                       \
        ret;                                                    \
 })
@@ -363,6 +417,9 @@ int rsnd_dai_connect(struct rsnd_mod *mod,
        if (!mod)
                return -EIO;
 
+       if (io->mod[type] == mod)
+               return 0;
+
        if (io->mod[type])
                return -EINVAL;
 
@@ -511,9 +568,16 @@ static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
                ret = rsnd_dai_call(start, io, priv);
                if (ret < 0)
                        goto dai_trigger_end;
+
+               ret = rsnd_dai_call(irq, io, priv, 1);
+               if (ret < 0)
+                       goto dai_trigger_end;
+
                break;
        case SNDRV_PCM_TRIGGER_STOP:
-               ret = rsnd_dai_call(stop, io, priv);
+               ret = rsnd_dai_call(irq, io, priv, 0);
+
+               ret |= rsnd_dai_call(stop, io, priv);
 
                ret |= rsnd_dai_call(quit, io, priv);
 
index d53a225d19e9b724b2b326a32f5746c822e3c4b8..a10d0f7b73fa82ff51596ec0f4bef1aba90edef1 100644 (file)
@@ -24,11 +24,17 @@ struct rsnd_ctu {
             i++)
 
 #define rsnd_ctu_get(priv, id) ((struct rsnd_ctu *)(priv->ctu) + id)
-#define rsnd_ctu_initialize_lock(mod)  __rsnd_ctu_initialize_lock(mod, 1)
-#define rsnd_ctu_initialize_unlock(mod)        __rsnd_ctu_initialize_lock(mod, 0)
-static void __rsnd_ctu_initialize_lock(struct rsnd_mod *mod, u32 enable)
+
+static void rsnd_ctu_activation(struct rsnd_mod *mod)
+{
+       rsnd_mod_write(mod, CTU_SWRSR, 0);
+       rsnd_mod_write(mod, CTU_SWRSR, 1);
+}
+
+static void rsnd_ctu_halt(struct rsnd_mod *mod)
 {
-       rsnd_mod_write(mod, CTU_CTUIR, enable);
+       rsnd_mod_write(mod, CTU_CTUIR, 1);
+       rsnd_mod_write(mod, CTU_SWRSR, 0);
 }
 
 static int rsnd_ctu_probe_(struct rsnd_mod *mod,
@@ -38,17 +44,63 @@ static int rsnd_ctu_probe_(struct rsnd_mod *mod,
        return rsnd_cmd_attach(io, rsnd_mod_id(mod) / 4);
 }
 
+static void rsnd_ctu_value_init(struct rsnd_dai_stream *io,
+                              struct rsnd_mod *mod)
+{
+       rsnd_mod_write(mod, CTU_CTUIR, 1);
+
+       rsnd_mod_write(mod, CTU_ADINR, rsnd_get_adinr_chan(mod, io));
+
+       rsnd_mod_write(mod, CTU_CPMDR, 0);
+       rsnd_mod_write(mod, CTU_SCMDR, 0);
+       rsnd_mod_write(mod, CTU_SV00R, 0);
+       rsnd_mod_write(mod, CTU_SV01R, 0);
+       rsnd_mod_write(mod, CTU_SV02R, 0);
+       rsnd_mod_write(mod, CTU_SV03R, 0);
+       rsnd_mod_write(mod, CTU_SV04R, 0);
+       rsnd_mod_write(mod, CTU_SV05R, 0);
+       rsnd_mod_write(mod, CTU_SV06R, 0);
+       rsnd_mod_write(mod, CTU_SV07R, 0);
+
+       rsnd_mod_write(mod, CTU_SV10R, 0);
+       rsnd_mod_write(mod, CTU_SV11R, 0);
+       rsnd_mod_write(mod, CTU_SV12R, 0);
+       rsnd_mod_write(mod, CTU_SV13R, 0);
+       rsnd_mod_write(mod, CTU_SV14R, 0);
+       rsnd_mod_write(mod, CTU_SV15R, 0);
+       rsnd_mod_write(mod, CTU_SV16R, 0);
+       rsnd_mod_write(mod, CTU_SV17R, 0);
+
+       rsnd_mod_write(mod, CTU_SV20R, 0);
+       rsnd_mod_write(mod, CTU_SV21R, 0);
+       rsnd_mod_write(mod, CTU_SV22R, 0);
+       rsnd_mod_write(mod, CTU_SV23R, 0);
+       rsnd_mod_write(mod, CTU_SV24R, 0);
+       rsnd_mod_write(mod, CTU_SV25R, 0);
+       rsnd_mod_write(mod, CTU_SV26R, 0);
+       rsnd_mod_write(mod, CTU_SV27R, 0);
+
+       rsnd_mod_write(mod, CTU_SV30R, 0);
+       rsnd_mod_write(mod, CTU_SV31R, 0);
+       rsnd_mod_write(mod, CTU_SV32R, 0);
+       rsnd_mod_write(mod, CTU_SV33R, 0);
+       rsnd_mod_write(mod, CTU_SV34R, 0);
+       rsnd_mod_write(mod, CTU_SV35R, 0);
+       rsnd_mod_write(mod, CTU_SV36R, 0);
+       rsnd_mod_write(mod, CTU_SV37R, 0);
+
+       rsnd_mod_write(mod, CTU_CTUIR, 0);
+}
+
 static int rsnd_ctu_init(struct rsnd_mod *mod,
                         struct rsnd_dai_stream *io,
                         struct rsnd_priv *priv)
 {
        rsnd_mod_power_on(mod);
 
-       rsnd_ctu_initialize_lock(mod);
+       rsnd_ctu_activation(mod);
 
-       rsnd_mod_write(mod, CTU_ADINR, rsnd_get_adinr_chan(mod, io));
-
-       rsnd_ctu_initialize_unlock(mod);
+       rsnd_ctu_value_init(io, mod);
 
        return 0;
 }
@@ -57,6 +109,8 @@ static int rsnd_ctu_quit(struct rsnd_mod *mod,
                         struct rsnd_dai_stream *io,
                         struct rsnd_priv *priv)
 {
+       rsnd_ctu_halt(mod);
+
        rsnd_mod_power_off(mod);
 
        return 0;
@@ -129,7 +183,7 @@ int rsnd_ctu_probe(struct rsnd_priv *priv)
                }
 
                ret = rsnd_mod_init(priv, rsnd_mod_get(ctu), &rsnd_ctu_ops,
-                                   clk, RSND_MOD_CTU, i);
+                                   clk, rsnd_mod_get_status, RSND_MOD_CTU, i);
                if (ret)
                        goto rsnd_ctu_probe_done;
 
index 418e6fdd06a338b1aac03d585e1a353bd21bfc26..7658e8fd7bdce5032df9210b5662048a88ebf329 100644 (file)
@@ -622,15 +622,13 @@ static void rsnd_dma_of_path(struct rsnd_mod *this,
        }
 }
 
-struct rsnd_mod *rsnd_dma_attach(struct rsnd_dai_stream *io,
-                                struct rsnd_mod *mod, int id)
+int rsnd_dma_attach(struct rsnd_dai_stream *io, struct rsnd_mod *mod,
+                   struct rsnd_mod **dma_mod, int id)
 {
-       struct rsnd_mod *dma_mod;
        struct rsnd_mod *mod_from = NULL;
        struct rsnd_mod *mod_to = NULL;
        struct rsnd_priv *priv = rsnd_io_to_priv(io);
        struct rsnd_dma_ctrl *dmac = rsnd_priv_to_dmac(priv);
-       struct rsnd_dma *dma;
        struct device *dev = rsnd_priv_to_dev(priv);
        struct rsnd_mod_ops *ops;
        enum rsnd_mod_type type;
@@ -646,17 +644,10 @@ struct rsnd_mod *rsnd_dma_attach(struct rsnd_dai_stream *io,
         *      rsnd_rdai_continuance_probe()
         */
        if (!dmac)
-               return ERR_PTR(-EAGAIN);
-
-       dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
-       if (!dma)
-               return ERR_PTR(-ENOMEM);
+               return -EAGAIN;
 
        rsnd_dma_of_path(mod, io, is_play, &mod_from, &mod_to);
 
-       dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
-       dma->dst_addr = rsnd_dma_addr(io, mod_to,   is_play, 0);
-
        /* for Gen2 */
        if (mod_from && mod_to) {
                ops     = &rsnd_dmapp_ops;
@@ -678,27 +669,38 @@ struct rsnd_mod *rsnd_dma_attach(struct rsnd_dai_stream *io,
                type    = RSND_MOD_AUDMA;
        }
 
-       dma_mod = rsnd_mod_get(dma);
+       if (!(*dma_mod)) {
+               struct rsnd_dma *dma;
 
-       ret = rsnd_mod_init(priv, dma_mod,
-                           ops, NULL, type, dma_id);
-       if (ret < 0)
-               return ERR_PTR(ret);
+               dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+               if (!dma)
+                       return -ENOMEM;
 
-       dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
-               rsnd_mod_name(dma_mod), rsnd_mod_id(dma_mod),
-               rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
-               rsnd_mod_name(mod_to),   rsnd_mod_id(mod_to));
+               *dma_mod = rsnd_mod_get(dma);
 
-       ret = attach(io, dma, id, mod_from, mod_to);
-       if (ret < 0)
-               return ERR_PTR(ret);
+               dma->src_addr = rsnd_dma_addr(io, mod_from, is_play, 1);
+               dma->dst_addr = rsnd_dma_addr(io, mod_to,   is_play, 0);
+
+               ret = rsnd_mod_init(priv, *dma_mod, ops, NULL,
+                                   rsnd_mod_get_status, type, dma_id);
+               if (ret < 0)
+                       return ret;
 
-       ret = rsnd_dai_connect(dma_mod, io, type);
+               dev_dbg(dev, "%s[%d] %s[%d] -> %s[%d]\n",
+                       rsnd_mod_name(*dma_mod), rsnd_mod_id(*dma_mod),
+                       rsnd_mod_name(mod_from), rsnd_mod_id(mod_from),
+                       rsnd_mod_name(mod_to),   rsnd_mod_id(mod_to));
+
+               ret = attach(io, dma, id, mod_from, mod_to);
+               if (ret < 0)
+                       return ret;
+       }
+
+       ret = rsnd_dai_connect(*dma_mod, io, type);
        if (ret < 0)
-               return ERR_PTR(ret);
+               return ret;
 
-       return rsnd_mod_get(dma);
+       return 0;
 }
 
 int rsnd_dma_probe(struct rsnd_priv *priv)
index d45ffe496397f84b4f68adc3377699f2e94e2308..302c193f674d26fe6e5e4e23d00944525310c58c 100644 (file)
@@ -373,7 +373,7 @@ int rsnd_dvc_probe(struct rsnd_priv *priv)
                }
 
                ret = rsnd_mod_init(priv, rsnd_mod_get(dvc), &rsnd_dvc_ops,
-                             clk, RSND_MOD_DVC, i);
+                                   clk, rsnd_mod_get_status, RSND_MOD_DVC, i);
                if (ret)
                        goto rsnd_dvc_probe_done;
 
index ea24247eba73c2fffc8bfdb4c56f2c6d7c9268c5..271d29adac68d2b3b010f588378c00a70a08d589 100644 (file)
@@ -260,8 +260,43 @@ static int rsnd_gen2_probe(struct rsnd_priv *priv)
                RSND_GEN_M_REG(SRC_SRCCR,       0x224,  0x40),
                RSND_GEN_M_REG(SRC_BSDSR,       0x22c,  0x40),
                RSND_GEN_M_REG(SRC_BSISR,       0x238,  0x40),
+               RSND_GEN_M_REG(CTU_SWRSR,       0x500,  0x100),
                RSND_GEN_M_REG(CTU_CTUIR,       0x504,  0x100),
                RSND_GEN_M_REG(CTU_ADINR,       0x508,  0x100),
+               RSND_GEN_M_REG(CTU_CPMDR,       0x510,  0x100),
+               RSND_GEN_M_REG(CTU_SCMDR,       0x514,  0x100),
+               RSND_GEN_M_REG(CTU_SV00R,       0x518,  0x100),
+               RSND_GEN_M_REG(CTU_SV01R,       0x51c,  0x100),
+               RSND_GEN_M_REG(CTU_SV02R,       0x520,  0x100),
+               RSND_GEN_M_REG(CTU_SV03R,       0x524,  0x100),
+               RSND_GEN_M_REG(CTU_SV04R,       0x528,  0x100),
+               RSND_GEN_M_REG(CTU_SV05R,       0x52c,  0x100),
+               RSND_GEN_M_REG(CTU_SV06R,       0x530,  0x100),
+               RSND_GEN_M_REG(CTU_SV07R,       0x534,  0x100),
+               RSND_GEN_M_REG(CTU_SV10R,       0x538,  0x100),
+               RSND_GEN_M_REG(CTU_SV11R,       0x53c,  0x100),
+               RSND_GEN_M_REG(CTU_SV12R,       0x540,  0x100),
+               RSND_GEN_M_REG(CTU_SV13R,       0x544,  0x100),
+               RSND_GEN_M_REG(CTU_SV14R,       0x548,  0x100),
+               RSND_GEN_M_REG(CTU_SV15R,       0x54c,  0x100),
+               RSND_GEN_M_REG(CTU_SV16R,       0x550,  0x100),
+               RSND_GEN_M_REG(CTU_SV17R,       0x554,  0x100),
+               RSND_GEN_M_REG(CTU_SV20R,       0x558,  0x100),
+               RSND_GEN_M_REG(CTU_SV21R,       0x55c,  0x100),
+               RSND_GEN_M_REG(CTU_SV22R,       0x560,  0x100),
+               RSND_GEN_M_REG(CTU_SV23R,       0x564,  0x100),
+               RSND_GEN_M_REG(CTU_SV24R,       0x568,  0x100),
+               RSND_GEN_M_REG(CTU_SV25R,       0x56c,  0x100),
+               RSND_GEN_M_REG(CTU_SV26R,       0x570,  0x100),
+               RSND_GEN_M_REG(CTU_SV27R,       0x574,  0x100),
+               RSND_GEN_M_REG(CTU_SV30R,       0x578,  0x100),
+               RSND_GEN_M_REG(CTU_SV31R,       0x57c,  0x100),
+               RSND_GEN_M_REG(CTU_SV32R,       0x580,  0x100),
+               RSND_GEN_M_REG(CTU_SV33R,       0x584,  0x100),
+               RSND_GEN_M_REG(CTU_SV34R,       0x588,  0x100),
+               RSND_GEN_M_REG(CTU_SV35R,       0x58c,  0x100),
+               RSND_GEN_M_REG(CTU_SV36R,       0x590,  0x100),
+               RSND_GEN_M_REG(CTU_SV37R,       0x594,  0x100),
                RSND_GEN_M_REG(MIX_SWRSR,       0xd00,  0x40),
                RSND_GEN_M_REG(MIX_MIXIR,       0xd04,  0x40),
                RSND_GEN_M_REG(MIX_ADINR,       0xd08,  0x40),
index 65542b6a89e9e3452b312369035983c98b70077d..e0e337ad4206a7a114f7ab3d4caca96ccf923e5d 100644 (file)
@@ -172,7 +172,7 @@ int rsnd_mix_probe(struct rsnd_priv *priv)
                }
 
                ret = rsnd_mod_init(priv, rsnd_mod_get(mix), &rsnd_mix_ops,
-                                   clk, RSND_MOD_MIX, i);
+                                   clk, rsnd_mod_get_status, RSND_MOD_MIX, i);
                if (ret)
                        goto rsnd_mix_probe_done;
 
index 317dd793149a7feb5279e2f665385a564c594469..bbaf89b6de8af100453a1cb12b08bb5b68b4fc47 100644 (file)
@@ -86,8 +86,43 @@ enum rsnd_reg {
        RSND_REG_CMD_BUSIF_DALIGN,      /* Gen2 only */
        RSND_REG_CMD_ROUTE_SLCT,
        RSND_REG_CMDOUT_TIMSEL,         /* Gen2 only */
+       RSND_REG_CTU_SWRSR,
        RSND_REG_CTU_CTUIR,
        RSND_REG_CTU_ADINR,
+       RSND_REG_CTU_CPMDR,
+       RSND_REG_CTU_SCMDR,
+       RSND_REG_CTU_SV00R,
+       RSND_REG_CTU_SV01R,
+       RSND_REG_CTU_SV02R,
+       RSND_REG_CTU_SV03R,
+       RSND_REG_CTU_SV04R,
+       RSND_REG_CTU_SV05R,
+       RSND_REG_CTU_SV06R,
+       RSND_REG_CTU_SV07R,
+       RSND_REG_CTU_SV10R,
+       RSND_REG_CTU_SV11R,
+       RSND_REG_CTU_SV12R,
+       RSND_REG_CTU_SV13R,
+       RSND_REG_CTU_SV14R,
+       RSND_REG_CTU_SV15R,
+       RSND_REG_CTU_SV16R,
+       RSND_REG_CTU_SV17R,
+       RSND_REG_CTU_SV20R,
+       RSND_REG_CTU_SV21R,
+       RSND_REG_CTU_SV22R,
+       RSND_REG_CTU_SV23R,
+       RSND_REG_CTU_SV24R,
+       RSND_REG_CTU_SV25R,
+       RSND_REG_CTU_SV26R,
+       RSND_REG_CTU_SV27R,
+       RSND_REG_CTU_SV30R,
+       RSND_REG_CTU_SV31R,
+       RSND_REG_CTU_SV32R,
+       RSND_REG_CTU_SV33R,
+       RSND_REG_CTU_SV34R,
+       RSND_REG_CTU_SV35R,
+       RSND_REG_CTU_SV36R,
+       RSND_REG_CTU_SV37R,
        RSND_REG_MIX_SWRSR,
        RSND_REG_MIX_MIXIR,
        RSND_REG_MIX_ADINR,
@@ -166,8 +201,8 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io);
 /*
  *     R-Car DMA
  */
-struct rsnd_mod *rsnd_dma_attach(struct rsnd_dai_stream *io,
-                              struct rsnd_mod *mod, int id);
+int rsnd_dma_attach(struct rsnd_dai_stream *io,
+                   struct rsnd_mod *mod, struct rsnd_mod **dma_mod, int id);
 int rsnd_dma_probe(struct rsnd_priv *priv);
 struct dma_chan *rsnd_dma_request_channel(struct device_node *of_node,
                                          struct rsnd_mod *mod, char *name);
@@ -214,6 +249,9 @@ struct rsnd_mod_ops {
        int (*stop)(struct rsnd_mod *mod,
                    struct rsnd_dai_stream *io,
                    struct rsnd_priv *priv);
+       int (*irq)(struct rsnd_mod *mod,
+                  struct rsnd_dai_stream *io,
+                  struct rsnd_priv *priv, int enable);
        int (*pcm_new)(struct rsnd_mod *mod,
                       struct rsnd_dai_stream *io,
                       struct snd_soc_pcm_runtime *rtd);
@@ -233,47 +271,54 @@ struct rsnd_mod {
        struct rsnd_mod_ops *ops;
        struct rsnd_priv *priv;
        struct clk *clk;
+       u32 *(*get_status)(struct rsnd_dai_stream *io,
+                          struct rsnd_mod *mod,
+                          enum rsnd_mod_type type);
+       u32 status;
 };
 /*
  * status
  *
- * 0xH0000CBA
+ * 0xH0000CB0
  *
- * A   0: probe        1: remove
  * B   0: init         1: quit
  * C   0: start        1: stop
  *
  * H is always called (see __rsnd_mod_call)
+ * H   0: probe        1: remove
  * H   0: pcm_new
  * H   0: fallback
  * H   0: hw_params
  */
-#define __rsnd_mod_shift_probe         0
-#define __rsnd_mod_shift_remove                0
 #define __rsnd_mod_shift_init          4
 #define __rsnd_mod_shift_quit          4
 #define __rsnd_mod_shift_start         8
 #define __rsnd_mod_shift_stop          8
+#define __rsnd_mod_shift_probe         28 /* always called */
+#define __rsnd_mod_shift_remove                28 /* always called */
+#define __rsnd_mod_shift_irq           28 /* always called */
 #define __rsnd_mod_shift_pcm_new       28 /* always called */
 #define __rsnd_mod_shift_fallback      28 /* always called */
 #define __rsnd_mod_shift_hw_params     28 /* always called */
 
-#define __rsnd_mod_add_probe            1
-#define __rsnd_mod_add_remove          -1
+#define __rsnd_mod_add_probe           0
+#define __rsnd_mod_add_remove          0
 #define __rsnd_mod_add_init             1
 #define __rsnd_mod_add_quit            -1
 #define __rsnd_mod_add_start            1
 #define __rsnd_mod_add_stop            -1
+#define __rsnd_mod_add_irq             0
 #define __rsnd_mod_add_pcm_new         0
 #define __rsnd_mod_add_fallback                0
 #define __rsnd_mod_add_hw_params       0
 
 #define __rsnd_mod_call_probe          0
-#define __rsnd_mod_call_remove         1
+#define __rsnd_mod_call_remove         0
 #define __rsnd_mod_call_init           0
 #define __rsnd_mod_call_quit           1
 #define __rsnd_mod_call_start          0
 #define __rsnd_mod_call_stop           1
+#define __rsnd_mod_call_irq            0
 #define __rsnd_mod_call_pcm_new                0
 #define __rsnd_mod_call_fallback       0
 #define __rsnd_mod_call_hw_params      0
@@ -286,10 +331,13 @@ struct rsnd_mod {
 
 int rsnd_mod_init(struct rsnd_priv *priv,
                  struct rsnd_mod *mod,
-                  struct rsnd_mod_ops *ops,
-                  struct clk *clk,
-                  enum rsnd_mod_type type,
-                  int id);
+                 struct rsnd_mod_ops *ops,
+                 struct clk *clk,
+                 u32* (*get_status)(struct rsnd_dai_stream *io,
+                                    struct rsnd_mod *mod,
+                                    enum rsnd_mod_type type),
+                 enum rsnd_mod_type type,
+                 int id);
 void rsnd_mod_quit(struct rsnd_mod *mod);
 char *rsnd_mod_name(struct rsnd_mod *mod);
 struct dma_chan *rsnd_mod_dma_req(struct rsnd_dai_stream *io,
@@ -297,6 +345,10 @@ struct dma_chan *rsnd_mod_dma_req(struct rsnd_dai_stream *io,
 void rsnd_mod_interrupt(struct rsnd_mod *mod,
                        void (*callback)(struct rsnd_mod *mod,
                                         struct rsnd_dai_stream *io));
+u32 *rsnd_mod_get_status(struct rsnd_dai_stream *io,
+                        struct rsnd_mod *mod,
+                        enum rsnd_mod_type type);
+
 void rsnd_parse_connect_common(struct rsnd_dai *rdai,
                struct rsnd_mod* (*mod_get)(struct rsnd_priv *priv, int id),
                struct device_node *node,
@@ -319,7 +371,7 @@ struct rsnd_dai_stream {
        struct rsnd_mod *mod[RSND_MOD_MAX];
        struct rsnd_dai_path_info *info; /* rcar_snd.h */
        struct rsnd_dai *rdai;
-       u32 mod_status[RSND_MOD_MAX];
+       u32 parent_ssi_status;
        int byte_pos;
        int period_pos;
        int byte_per_period;
index 5eda056d9f20ed8ef3137da612c63ae9d2cba067..dc1621acc21137697802b089d164baca8feda181 100644 (file)
@@ -25,7 +25,6 @@ struct rsnd_src {
        struct rsnd_kctrl_cfg_s sen;  /* sync convert enable */
        struct rsnd_kctrl_cfg_s sync; /* sync convert */
        u32 convert_rate; /* sampling rate convert */
-       int err;
        int irq;
 };
 
@@ -272,9 +271,10 @@ static void rsnd_src_set_convert_rate(struct rsnd_dai_stream *io,
                rsnd_adg_set_convert_timing_gen2(mod, io);
 }
 
-#define rsnd_src_irq_enable(mod)  rsnd_src_irq_ctrol(mod, 1)
-#define rsnd_src_irq_disable(mod) rsnd_src_irq_ctrol(mod, 0)
-static void rsnd_src_irq_ctrol(struct rsnd_mod *mod, int enable)
+static int rsnd_src_irq(struct rsnd_mod *mod,
+                       struct rsnd_dai_stream *io,
+                       struct rsnd_priv *priv,
+                       int enable)
 {
        struct rsnd_src *src = rsnd_mod_to_src(mod);
        u32 sys_int_val, int_val, sys_int_mask;
@@ -306,6 +306,8 @@ static void rsnd_src_irq_ctrol(struct rsnd_mod *mod, int enable)
        rsnd_mod_write(mod, SRC_INT_ENABLE0, int_val);
        rsnd_mod_bset(mod, SCU_SYS_INT_EN0, sys_int_mask, sys_int_val);
        rsnd_mod_bset(mod, SCU_SYS_INT_EN1, sys_int_mask, sys_int_val);
+
+       return 0;
 }
 
 static void rsnd_src_status_clear(struct rsnd_mod *mod)
@@ -316,7 +318,7 @@ static void rsnd_src_status_clear(struct rsnd_mod *mod)
        rsnd_mod_bset(mod, SCU_SYS_STATUS1, val, val);
 }
 
-static bool rsnd_src_record_error(struct rsnd_mod *mod)
+static bool rsnd_src_error_occurred(struct rsnd_mod *mod)
 {
        struct rsnd_src *src = rsnd_mod_to_src(mod);
        u32 val0, val1;
@@ -333,12 +335,8 @@ static bool rsnd_src_record_error(struct rsnd_mod *mod)
                val0 = val0 & 0xffff;
 
        if ((rsnd_mod_read(mod, SCU_SYS_STATUS0) & val0) ||
-           (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val1)) {
-               struct rsnd_src *src = rsnd_mod_to_src(mod);
-
-               src->err++;
+           (rsnd_mod_read(mod, SCU_SYS_STATUS1) & val1))
                ret = true;
-       }
 
        return ret;
 }
@@ -367,11 +365,7 @@ static int rsnd_src_stop(struct rsnd_mod *mod,
                         struct rsnd_dai_stream *io,
                         struct rsnd_priv *priv)
 {
-       /*
-        * stop SRC output only
-        * see rsnd_src_quit
-        */
-       rsnd_mod_write(mod, SRC_CTRL, 0x01);
+       rsnd_mod_write(mod, SRC_CTRL, 0);
 
        return 0;
 }
@@ -390,10 +384,6 @@ static int rsnd_src_init(struct rsnd_mod *mod,
 
        rsnd_src_status_clear(mod);
 
-       rsnd_src_irq_enable(mod);
-
-       src->err = 0;
-
        /* reset sync convert_rate */
        src->sync.val = 0;
 
@@ -405,21 +395,11 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
                         struct rsnd_priv *priv)
 {
        struct rsnd_src *src = rsnd_mod_to_src(mod);
-       struct device *dev = rsnd_priv_to_dev(priv);
-
-       rsnd_src_irq_disable(mod);
-
-       /* stop both out/in */
-       rsnd_mod_write(mod, SRC_CTRL, 0);
 
        rsnd_src_halt(mod);
 
        rsnd_mod_power_off(mod);
 
-       if (src->err)
-               dev_warn(dev, "%s[%d] under/over flow err = %d\n",
-                        rsnd_mod_name(mod), rsnd_mod_id(mod), src->err);
-
        src->convert_rate = 0;
 
        /* reset sync convert_rate */
@@ -432,8 +412,7 @@ static void __rsnd_src_interrupt(struct rsnd_mod *mod,
                                 struct rsnd_dai_stream *io)
 {
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
-       struct rsnd_src *src = rsnd_mod_to_src(mod);
-       struct device *dev = rsnd_priv_to_dev(priv);
+       bool stop = false;
 
        spin_lock(&priv->lock);
 
@@ -441,26 +420,16 @@ static void __rsnd_src_interrupt(struct rsnd_mod *mod,
        if (!rsnd_io_is_working(io))
                goto rsnd_src_interrupt_out;
 
-       if (rsnd_src_record_error(mod)) {
-
-               dev_dbg(dev, "%s[%d] restart\n",
-                       rsnd_mod_name(mod), rsnd_mod_id(mod));
-
-               rsnd_src_stop(mod, io, priv);
-               rsnd_src_start(mod, io, priv);
-       }
-
-       if (src->err > 1024) {
-               rsnd_src_irq_disable(mod);
-
-               dev_warn(dev, "no more %s[%d] restart\n",
-                        rsnd_mod_name(mod), rsnd_mod_id(mod));
-       }
+       if (rsnd_src_error_occurred(mod))
+               stop = true;
 
        rsnd_src_status_clear(mod);
 rsnd_src_interrupt_out:
 
        spin_unlock(&priv->lock);
+
+       if (stop)
+               snd_pcm_stop_xrun(io->substream);
 }
 
 static irqreturn_t rsnd_src_interrupt(int irq, void *data)
@@ -485,7 +454,7 @@ static int rsnd_src_probe_(struct rsnd_mod *mod,
                /*
                 * IRQ is not supported on non-DT
                 * see
-                *      rsnd_src_irq_enable()
+                *      rsnd_src_irq()
                 */
                ret = devm_request_irq(dev, irq,
                                       rsnd_src_interrupt,
@@ -495,9 +464,7 @@ static int rsnd_src_probe_(struct rsnd_mod *mod,
                        return ret;
        }
 
-       src->dma = rsnd_dma_attach(io, mod, 0);
-       if (IS_ERR(src->dma))
-               return PTR_ERR(src->dma);
+       ret = rsnd_dma_attach(io, mod, &src->dma, 0);
 
        return ret;
 }
@@ -557,6 +524,7 @@ static struct rsnd_mod_ops rsnd_src_ops = {
        .quit   = rsnd_src_quit,
        .start  = rsnd_src_start,
        .stop   = rsnd_src_stop,
+       .irq    = rsnd_src_irq,
        .hw_params = rsnd_src_hw_params,
        .pcm_new = rsnd_src_pcm_new,
 };
@@ -622,7 +590,8 @@ int rsnd_src_probe(struct rsnd_priv *priv)
                }
 
                ret = rsnd_mod_init(priv, rsnd_mod_get(src),
-                                   &rsnd_src_ops, clk, RSND_MOD_SRC, i);
+                                   &rsnd_src_ops, clk, rsnd_mod_get_status,
+                                   RSND_MOD_SRC, i);
                if (ret)
                        goto rsnd_src_probe_done;
 
index 7ee89da4dd5fc3a36b62147acd95fc6b4724661b..592505a4bc136967585f1e7d9e1674633028a42c 100644 (file)
@@ -64,7 +64,6 @@
 #define SSI_NAME "ssi"
 
 struct rsnd_ssi {
-       struct rsnd_ssi *parent;
        struct rsnd_mod mod;
        struct rsnd_mod *dma;
 
@@ -75,7 +74,6 @@ struct rsnd_ssi {
        u32 wsr;
        int chan;
        int rate;
-       int err;
        int irq;
        unsigned int usrcnt;
 };
@@ -96,7 +94,8 @@ struct rsnd_ssi {
 #define rsnd_mod_to_ssi(_mod) container_of((_mod), struct rsnd_ssi, mod)
 #define rsnd_ssi_mode_flags(p) ((p)->flags)
 #define rsnd_ssi_is_parent(ssi, io) ((ssi) == rsnd_io_to_mod_ssip(io))
-#define rsnd_ssi_is_multi_slave(ssi, io) ((mod) != rsnd_io_to_mod_ssi(io))
+#define rsnd_ssi_is_multi_slave(mod, io) \
+       (rsnd_ssi_multi_slaves(io) & (1 << rsnd_mod_id(mod)))
 
 int rsnd_ssi_use_busif(struct rsnd_dai_stream *io)
 {
@@ -144,30 +143,24 @@ static void rsnd_ssi_status_check(struct rsnd_mod *mod,
        dev_warn(dev, "status check failed\n");
 }
 
-static int rsnd_ssi_irq_enable(struct rsnd_mod *ssi_mod)
+static int rsnd_ssi_irq(struct rsnd_mod *mod,
+                       struct rsnd_dai_stream *io,
+                       struct rsnd_priv *priv,
+                       int enable)
 {
-       struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       u32 val = 0;
 
        if (rsnd_is_gen1(priv))
                return 0;
 
-       /* enable SSI interrupt if Gen2 */
-       rsnd_mod_write(ssi_mod, SSI_INT_ENABLE,
-                      rsnd_ssi_is_dma_mode(ssi_mod) ?
-                      0x0e000000 : 0x0f000000);
-
-       return 0;
-}
-
-static int rsnd_ssi_irq_disable(struct rsnd_mod *ssi_mod)
-{
-       struct rsnd_priv *priv = rsnd_mod_to_priv(ssi_mod);
-
-       if (rsnd_is_gen1(priv))
+       if (ssi->usrcnt != 1)
                return 0;
 
-       /* disable SSI interrupt if Gen2 */
-       rsnd_mod_write(ssi_mod, SSI_INT_ENABLE, 0x00000000);
+       if (enable)
+               val = rsnd_ssi_is_dma_mode(mod) ? 0x0e000000 : 0x0f000000;
+
+       rsnd_mod_write(mod, SSI_INT_ENABLE, val);
 
        return 0;
 }
@@ -175,9 +168,6 @@ static int rsnd_ssi_irq_disable(struct rsnd_mod *ssi_mod)
 u32 rsnd_ssi_multi_slaves(struct rsnd_dai_stream *io)
 {
        struct rsnd_mod *mod;
-       struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
-       struct rsnd_priv *priv = rsnd_io_to_priv(io);
-       struct device *dev = rsnd_priv_to_dev(priv);
        enum rsnd_mod_type types[] = {
                RSND_MOD_SSIM1,
                RSND_MOD_SSIM2,
@@ -185,16 +175,6 @@ u32 rsnd_ssi_multi_slaves(struct rsnd_dai_stream *io)
        };
        int i, mask;
 
-       switch (runtime->channels) {
-       case 2: /* Multi channel is not needed for Stereo */
-               return 0;
-       case 6:
-               break;
-       default:
-               dev_err(dev, "unsupported channel\n");
-               return 0;
-       }
-
        mask = 0;
        for (i = 0; i < ARRAY_SIZE(types); i++) {
                mod = rsnd_io_to_mod(io, types[i]);
@@ -386,13 +366,9 @@ static int rsnd_ssi_init(struct rsnd_mod *mod,
        if (ret < 0)
                return ret;
 
-       ssi->err        = -1; /* ignore 1st error */
-
        /* clear error status */
        rsnd_ssi_status_clear(mod);
 
-       rsnd_ssi_irq_enable(mod);
-
        return 0;
 }
 
@@ -409,17 +385,8 @@ static int rsnd_ssi_quit(struct rsnd_mod *mod,
                return -EIO;
        }
 
-       if (!rsnd_ssi_is_parent(mod, io)) {
-               if (ssi->err > 0)
-                       dev_warn(dev, "%s[%d] under/over flow err = %d\n",
-                                rsnd_mod_name(mod), rsnd_mod_id(mod),
-                                ssi->err);
-
+       if (!rsnd_ssi_is_parent(mod, io))
                ssi->cr_own     = 0;
-               ssi->err        = 0;
-
-               rsnd_ssi_irq_disable(mod);
-       }
 
        rsnd_ssi_master_clk_stop(ssi, io);
 
@@ -456,21 +423,9 @@ static int rsnd_ssi_hw_params(struct rsnd_mod *mod,
        return 0;
 }
 
-static u32 rsnd_ssi_record_error(struct rsnd_ssi *ssi)
-{
-       struct rsnd_mod *mod = rsnd_mod_get(ssi);
-       u32 status = rsnd_ssi_status_get(mod);
-
-       /* under/over flow error */
-       if (status & (UIRQ | OIRQ))
-               ssi->err++;
-
-       return status;
-}
-
-static int __rsnd_ssi_start(struct rsnd_mod *mod,
-                           struct rsnd_dai_stream *io,
-                           struct rsnd_priv *priv)
+static int rsnd_ssi_start(struct rsnd_mod *mod,
+                         struct rsnd_dai_stream *io,
+                         struct rsnd_priv *priv)
 {
        struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
        u32 cr;
@@ -492,25 +447,21 @@ static int __rsnd_ssi_start(struct rsnd_mod *mod,
        return 0;
 }
 
-static int rsnd_ssi_start(struct rsnd_mod *mod,
-                         struct rsnd_dai_stream *io,
-                         struct rsnd_priv *priv)
+static int rsnd_ssi_stop(struct rsnd_mod *mod,
+                        struct rsnd_dai_stream *io,
+                        struct rsnd_priv *priv)
 {
+       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+       u32 cr;
+
        /*
-        * no limit to start
+        * don't stop if not last user
         * see also
-        *      rsnd_ssi_stop
+        *      rsnd_ssi_start
         *      rsnd_ssi_interrupt
         */
-       return __rsnd_ssi_start(mod, io, priv);
-}
-
-static int __rsnd_ssi_stop(struct rsnd_mod *mod,
-                          struct rsnd_dai_stream *io,
-                          struct rsnd_priv *priv)
-{
-       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-       u32 cr;
+       if (ssi->usrcnt > 1)
+               return 0;
 
        /*
         * disable all IRQ,
@@ -532,33 +483,14 @@ static int __rsnd_ssi_stop(struct rsnd_mod *mod,
        return 0;
 }
 
-static int rsnd_ssi_stop(struct rsnd_mod *mod,
-                        struct rsnd_dai_stream *io,
-                        struct rsnd_priv *priv)
-{
-       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
-
-       /*
-        * don't stop if not last user
-        * see also
-        *      rsnd_ssi_start
-        *      rsnd_ssi_interrupt
-        */
-       if (ssi->usrcnt > 1)
-               return 0;
-
-       return __rsnd_ssi_stop(mod, io, priv);
-}
-
 static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
                                 struct rsnd_dai_stream *io)
 {
-       struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
        struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
-       struct device *dev = rsnd_priv_to_dev(priv);
        int is_dma = rsnd_ssi_is_dma_mode(mod);
        u32 status;
        bool elapsed = false;
+       bool stop = false;
 
        spin_lock(&priv->lock);
 
@@ -566,7 +498,7 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
        if (!rsnd_io_is_working(io))
                goto rsnd_ssi_interrupt_out;
 
-       status = rsnd_ssi_record_error(ssi);
+       status = rsnd_ssi_status_get(mod);
 
        /* PIO only */
        if (!is_dma && (status & DIRQ)) {
@@ -588,23 +520,8 @@ static void __rsnd_ssi_interrupt(struct rsnd_mod *mod,
        }
 
        /* DMA only */
-       if (is_dma && (status & (UIRQ | OIRQ))) {
-               /*
-                * restart SSI
-                */
-               dev_dbg(dev, "%s[%d] restart\n",
-                       rsnd_mod_name(mod), rsnd_mod_id(mod));
-
-               __rsnd_ssi_stop(mod, io, priv);
-               __rsnd_ssi_start(mod, io, priv);
-       }
-
-       if (ssi->err > 1024) {
-               rsnd_ssi_irq_disable(mod);
-
-               dev_warn(dev, "no more %s[%d] restart\n",
-                        rsnd_mod_name(mod), rsnd_mod_id(mod));
-       }
+       if (is_dma && (status & (UIRQ | OIRQ)))
+               stop = true;
 
        rsnd_ssi_status_clear(mod);
 rsnd_ssi_interrupt_out:
@@ -612,6 +529,10 @@ rsnd_ssi_interrupt_out:
 
        if (elapsed)
                rsnd_dai_period_elapsed(io);
+
+       if (stop)
+               snd_pcm_stop_xrun(io->substream);
+
 }
 
 static irqreturn_t rsnd_ssi_interrupt(int irq, void *data)
@@ -683,6 +604,7 @@ static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
        .quit   = rsnd_ssi_quit,
        .start  = rsnd_ssi_start,
        .stop   = rsnd_ssi_stop,
+       .irq    = rsnd_ssi_irq,
        .hw_params = rsnd_ssi_hw_params,
 };
 
@@ -705,9 +627,8 @@ static int rsnd_ssi_dma_probe(struct rsnd_mod *mod,
        if (ret)
                return ret;
 
-       ssi->dma = rsnd_dma_attach(io, mod, dma_id);
-       if (IS_ERR(ssi->dma))
-               return PTR_ERR(ssi->dma);
+       /* SSI probe might be called many times in MUX multi path */
+       ret = rsnd_dma_attach(io, mod, &ssi->dma, dma_id);
 
        return ret;
 }
@@ -858,6 +779,41 @@ int __rsnd_ssi_is_pin_sharing(struct rsnd_mod *mod)
        return !!(rsnd_ssi_mode_flags(ssi) & RSND_SSI_CLK_PIN_SHARE);
 }
 
+static u32 *rsnd_ssi_get_status(struct rsnd_dai_stream *io,
+                               struct rsnd_mod *mod,
+                               enum rsnd_mod_type type)
+{
+       /*
+        * SSIP (= SSI parent) needs to be special, otherwise,
+        * 2nd SSI might doesn't start. see also rsnd_mod_call()
+        *
+        * We can't include parent SSI status on SSI, because we don't know
+        * how many SSI requests parent SSI. Thus, it is localed on "io" now.
+        * ex) trouble case
+        *      Playback: SSI0
+        *      Capture : SSI1 (needs SSI0)
+        *
+        * 1) start Capture  -> SSI0/SSI1 are started.
+        * 2) start Playback -> SSI0 doesn't work, because it is already
+        *                      marked as "started" on 1)
+        *
+        * OTOH, using each mod's status is good for MUX case.
+        * It doesn't need to start in 2nd start
+        * ex)
+        *      IO-0: SRC0 -> CTU1 -+-> MUX -> DVC -> SSIU -> SSI0
+        *                          |
+        *      IO-1: SRC1 -> CTU2 -+
+        *
+        * 1) start IO-0 ->     start SSI0
+        * 2) start IO-1 ->     SSI0 doesn't need to start, because it is
+        *                      already started on 1)
+        */
+       if (type == RSND_MOD_SSIP)
+               return &io->parent_ssi_status;
+
+       return rsnd_mod_get_status(io, mod, type);
+}
+
 int rsnd_ssi_probe(struct rsnd_priv *priv)
 {
        struct device_node *node;
@@ -920,7 +876,7 @@ int rsnd_ssi_probe(struct rsnd_priv *priv)
                        ops = &rsnd_ssi_dma_ops;
 
                ret = rsnd_mod_init(priv, rsnd_mod_get(ssi), ops, clk,
-                                   RSND_MOD_SSI, i);
+                                   rsnd_ssi_get_status, RSND_MOD_SSI, i);
                if (ret)
                        goto rsnd_ssi_probe_done;
 
index 06d72828e5bcdc210ceb1234880abd4d7a50186b..11e55889b40105c24e9a41b7e5eb3d64d7e0cd20 100644 (file)
@@ -206,7 +206,8 @@ int rsnd_ssiu_probe(struct rsnd_priv *priv)
 
        for_each_rsnd_ssiu(ssiu, priv, i) {
                ret = rsnd_mod_init(priv, rsnd_mod_get(ssiu),
-                                   ops, NULL, RSND_MOD_SSIU, i);
+                                   ops, NULL, rsnd_mod_get_status,
+                                   RSND_MOD_SSIU, i);
                if (ret)
                        return ret;
        }
index 5a2812fa8946071014d54f9c18ae8311be56fe4e..0d37079879002d472c538b69ccc3321c0d7772ad 100644 (file)
@@ -310,7 +310,7 @@ struct dapm_kcontrol_data {
 };
 
 static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
-       struct snd_kcontrol *kcontrol)
+       struct snd_kcontrol *kcontrol, const char *ctrl_name)
 {
        struct dapm_kcontrol_data *data;
        struct soc_mixer_control *mc;
@@ -333,7 +333,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
                if (mc->autodisable) {
                        struct snd_soc_dapm_widget template;
 
-                       name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name,
+                       name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name,
                                         "Autodisable");
                        if (!name) {
                                ret = -ENOMEM;
@@ -371,7 +371,7 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
                if (e->autodisable) {
                        struct snd_soc_dapm_widget template;
 
-                       name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name,
+                       name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name,
                                         "Autodisable");
                        if (!name) {
                                ret = -ENOMEM;
@@ -871,7 +871,7 @@ static int dapm_create_or_share_kcontrol(struct snd_soc_dapm_widget *w,
 
                kcontrol->private_free = dapm_kcontrol_free;
 
-               ret = dapm_kcontrol_data_alloc(w, kcontrol);
+               ret = dapm_kcontrol_data_alloc(w, kcontrol, name);
                if (ret) {
                        snd_ctl_free_one(kcontrol);
                        goto exit_free;
index e898b427be7ee34961451b2d8b177848415436ed..aa99dac31b3bff3ce6bb510b4b07bba637363371 100644 (file)
@@ -1810,7 +1810,8 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
                    (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
-                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
                        continue;
 
                dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
@@ -1866,18 +1867,6 @@ int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
                if (!snd_soc_dpcm_be_can_update(fe, be, stream))
                        continue;
 
-               /* only allow hw_params() if no connected FEs are running */
-               if (!snd_soc_dpcm_can_be_params(fe, be, stream))
-                       continue;
-
-               if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
-                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
-                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE))
-                       continue;
-
-               dev_dbg(be->dev, "ASoC: hw_params BE %s\n",
-                       dpcm->fe->dai_link->name);
-
                /* copy params for each dpcm */
                memcpy(&dpcm->hw_params, &fe->dpcm[stream].hw_params,
                                sizeof(struct snd_pcm_hw_params));
@@ -1894,6 +1883,18 @@ int dpcm_be_dai_hw_params(struct snd_soc_pcm_runtime *fe, int stream)
                        }
                }
 
+               /* only allow hw_params() if no connected FEs are running */
+               if (!snd_soc_dpcm_can_be_params(fe, be, stream))
+                       continue;
+
+               if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN) &&
+                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
+                   (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE))
+                       continue;
+
+               dev_dbg(be->dev, "ASoC: hw_params BE %s\n",
+                       dpcm->fe->dai_link->name);
+
                ret = soc_pcm_hw_params(be_substream, &dpcm->hw_params);
                if (ret < 0) {
                        dev_err(dpcm->be->dev,
index d75deba5617db05dc02407b6c669d61ee1695a5e..dfcd38647606445d7dab130178c7ca4f765ff33c 100644 (file)
@@ -22,6 +22,7 @@ config SND_SUN_AMD7930
 config SND_SUN_CS4231
        tristate "Sun CS4231"
        select SND_PCM
+       select SND_TIMER
        help
          Say Y here to include support for CS4231 sound device on Sun.
 
index 1f09d9591276f94a9601e42e1687899b58368e03..3fc63583a5372dbec8d09e7ba7cf7399abb54cd1 100644 (file)
@@ -82,6 +82,7 @@ static int pid[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS-1)] = -1 };
 static int device_setup[SNDRV_CARDS]; /* device parameter for this card */
 static bool ignore_ctl_error;
 static bool autoclock = true;
+static char *quirk_alias[SNDRV_CARDS];
 
 module_param_array(index, int, NULL, 0444);
 MODULE_PARM_DESC(index, "Index value for the USB audio adapter.");
@@ -100,6 +101,8 @@ MODULE_PARM_DESC(ignore_ctl_error,
                 "Ignore errors from USB controller for mixer interfaces.");
 module_param(autoclock, bool, 0444);
 MODULE_PARM_DESC(autoclock, "Enable auto-clock selection for UAC2 devices (default: yes).");
+module_param_array(quirk_alias, charp, NULL, 0444);
+MODULE_PARM_DESC(quirk_alias, "Quirk aliases, e.g. 0123abcd:5678beef.");
 
 /*
  * we keep the snd_usb_audio_t instances by ourselves for merging
@@ -171,8 +174,9 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int
        if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
             altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
            altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
-               int err = snd_usbmidi_create(chip->card, iface,
-                                            &chip->midi_list, NULL);
+               int err = __snd_usbmidi_create(chip->card, iface,
+                                            &chip->midi_list, NULL,
+                                            chip->usb_id);
                if (err < 0) {
                        dev_err(&dev->dev,
                                "%u:%d: cannot create sequencer device\n",
@@ -311,6 +315,7 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip)
                snd_usb_endpoint_free(ep);
 
        mutex_destroy(&chip->mutex);
+       dev_set_drvdata(&chip->dev->dev, NULL);
        kfree(chip);
        return 0;
 }
@@ -455,6 +460,48 @@ static int snd_usb_audio_create(struct usb_interface *intf,
        return 0;
 }
 
+/* look for a matching quirk alias id */
+static bool get_alias_id(struct usb_device *dev, unsigned int *id)
+{
+       int i;
+       unsigned int src, dst;
+
+       for (i = 0; i < ARRAY_SIZE(quirk_alias); i++) {
+               if (!quirk_alias[i] ||
+                   sscanf(quirk_alias[i], "%x:%x", &src, &dst) != 2 ||
+                   src != *id)
+                       continue;
+               dev_info(&dev->dev,
+                        "device (%04x:%04x): applying quirk alias %04x:%04x\n",
+                        USB_ID_VENDOR(*id), USB_ID_PRODUCT(*id),
+                        USB_ID_VENDOR(dst), USB_ID_PRODUCT(dst));
+               *id = dst;
+               return true;
+       }
+
+       return false;
+}
+
+static struct usb_device_id usb_audio_ids[]; /* defined below */
+
+/* look for the corresponding quirk */
+static const struct snd_usb_audio_quirk *
+get_alias_quirk(struct usb_device *dev, unsigned int id)
+{
+       const struct usb_device_id *p;
+
+       for (p = usb_audio_ids; p->match_flags; p++) {
+               /* FIXME: this checks only vendor:product pair in the list */
+               if ((p->match_flags & USB_DEVICE_ID_MATCH_DEVICE) ==
+                   USB_DEVICE_ID_MATCH_DEVICE &&
+                   p->idVendor == USB_ID_VENDOR(id) &&
+                   p->idProduct == USB_ID_PRODUCT(id))
+                       return (const struct snd_usb_audio_quirk *)p->driver_info;
+       }
+
+       return NULL;
+}
+
 /*
  * probe the active usb device
  *
@@ -481,10 +528,12 @@ static int usb_audio_probe(struct usb_interface *intf,
        ifnum = get_iface_desc(alts)->bInterfaceNumber;
        id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
                    le16_to_cpu(dev->descriptor.idProduct));
+       if (get_alias_id(dev, &id))
+               quirk = get_alias_quirk(dev, id);
        if (quirk && quirk->ifnum >= 0 && ifnum != quirk->ifnum)
                return -ENXIO;
 
-       err = snd_usb_apply_boot_quirk(dev, intf, quirk);
+       err = snd_usb_apply_boot_quirk(dev, intf, quirk, id);
        if (err < 0)
                return err;
 
@@ -503,6 +552,7 @@ static int usb_audio_probe(struct usb_interface *intf,
                                goto __error;
                        }
                        chip = usb_chip[i];
+                       dev_set_drvdata(&dev->dev, chip);
                        atomic_inc(&chip->active); /* avoid autopm */
                        break;
                }
index cc39f63299ef0a1fe58572ab23139f46725f9587..b79875ebec1e1df73b45749941c73edcf3b3aaf6 100644 (file)
@@ -2320,10 +2320,11 @@ EXPORT_SYMBOL(snd_usbmidi_resume);
 /*
  * Creates and registers everything needed for a MIDI streaming interface.
  */
-int snd_usbmidi_create(struct snd_card *card,
-                      struct usb_interface *iface,
-                      struct list_head *midi_list,
-                      const struct snd_usb_audio_quirk *quirk)
+int __snd_usbmidi_create(struct snd_card *card,
+                        struct usb_interface *iface,
+                        struct list_head *midi_list,
+                        const struct snd_usb_audio_quirk *quirk,
+                        unsigned int usb_id)
 {
        struct snd_usb_midi *umidi;
        struct snd_usb_midi_endpoint_info endpoints[MIDI_MAX_ENDPOINTS];
@@ -2341,8 +2342,10 @@ int snd_usbmidi_create(struct snd_card *card,
        spin_lock_init(&umidi->disc_lock);
        init_rwsem(&umidi->disc_rwsem);
        mutex_init(&umidi->mutex);
-       umidi->usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
+       if (!usb_id)
+               usb_id = USB_ID(le16_to_cpu(umidi->dev->descriptor.idVendor),
                               le16_to_cpu(umidi->dev->descriptor.idProduct));
+       umidi->usb_id = usb_id;
        setup_timer(&umidi->error_timer, snd_usbmidi_error_timer,
                    (unsigned long)umidi);
 
@@ -2464,4 +2467,4 @@ int snd_usbmidi_create(struct snd_card *card,
        list_add_tail(&umidi->list, midi_list);
        return 0;
 }
-EXPORT_SYMBOL(snd_usbmidi_create);
+EXPORT_SYMBOL(__snd_usbmidi_create);
index ad8a3211f8e7b284342058592f796b6ff5e0a279..5e25a3fd6c1d8647eff2850732963a1ec7f225b3 100644 (file)
@@ -39,10 +39,20 @@ struct snd_usb_midi_endpoint_info {
 
 /* for QUIRK_MIDI_AKAI, data is NULL */
 
-int snd_usbmidi_create(struct snd_card *card,
+int __snd_usbmidi_create(struct snd_card *card,
+                        struct usb_interface *iface,
+                        struct list_head *midi_list,
+                        const struct snd_usb_audio_quirk *quirk,
+                        unsigned int usb_id);
+
+static inline int snd_usbmidi_create(struct snd_card *card,
                       struct usb_interface *iface,
                       struct list_head *midi_list,
-                      const struct snd_usb_audio_quirk *quirk);
+                      const struct snd_usb_audio_quirk *quirk)
+{
+       return __snd_usbmidi_create(card, iface, midi_list, quirk, 0);
+}
+
 void snd_usbmidi_input_stop(struct list_head *p);
 void snd_usbmidi_input_start(struct list_head *p);
 void snd_usbmidi_disconnect(struct list_head *p);
index 23ea6d800c4c5283adc27d88fd26e74feaf066e0..2585c175b54e982edb050782b66b0f74ce458cf1 100644 (file)
@@ -446,8 +446,9 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip,
                const struct snd_usb_audio_quirk *quirk =
                        chip->usb_id == USB_ID(0x0582, 0x002b)
                        ? &ua700_quirk : &uaxx_quirk;
-               return snd_usbmidi_create(chip->card, iface,
-                                         &chip->midi_list, quirk);
+               return __snd_usbmidi_create(chip->card, iface,
+                                         &chip->midi_list, quirk,
+                                         chip->usb_id);
        }
 
        if (altsd->bNumEndpoints != 1)
@@ -974,11 +975,9 @@ int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip,
 
 int snd_usb_apply_boot_quirk(struct usb_device *dev,
                             struct usb_interface *intf,
-                            const struct snd_usb_audio_quirk *quirk)
+                            const struct snd_usb_audio_quirk *quirk,
+                            unsigned int id)
 {
-       u32 id = USB_ID(le16_to_cpu(dev->descriptor.idVendor),
-                       le16_to_cpu(dev->descriptor.idProduct));
-
        switch (id) {
        case USB_ID(0x041e, 0x3000):
                /* SB Extigy needs special boot-up sequence */
@@ -1121,6 +1120,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        switch (chip->usb_id) {
        case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
        case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+       case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
        case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
        case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
@@ -1182,7 +1182,7 @@ void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep)
         * "Playback Design" products send bogus feedback data at the start
         * of the stream. Ignore them.
         */
-       if ((le16_to_cpu(ep->chip->dev->descriptor.idVendor) == 0x23ba) &&
+       if (USB_ID_VENDOR(ep->chip->usb_id) == 0x23ba &&
            ep->type == SND_USB_ENDPOINT_TYPE_SYNC)
                ep->skip_packets = 4;
 
@@ -1201,39 +1201,58 @@ void snd_usb_endpoint_start_quirk(struct snd_usb_endpoint *ep)
 
 void snd_usb_set_interface_quirk(struct usb_device *dev)
 {
+       struct snd_usb_audio *chip = dev_get_drvdata(&dev->dev);
+
+       if (!chip)
+               return;
        /*
         * "Playback Design" products need a 50ms delay after setting the
         * USB interface.
         */
-       if (le16_to_cpu(dev->descriptor.idVendor) == 0x23ba)
+       switch (USB_ID_VENDOR(chip->usb_id)) {
+       case 0x23ba: /* Playback Design */
+       case 0x0644: /* TEAC Corp. */
                mdelay(50);
+               break;
+       }
 }
 
+/* quirk applied after snd_usb_ctl_msg(); not applied during boot quirks */
 void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
                           __u8 request, __u8 requesttype, __u16 value,
                           __u16 index, void *data, __u16 size)
 {
+       struct snd_usb_audio *chip = dev_get_drvdata(&dev->dev);
+
+       if (!chip)
+               return;
        /*
         * "Playback Design" products need a 20ms delay after each
         * class compliant request
         */
-       if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) &&
+       if (USB_ID_VENDOR(chip->usb_id) == 0x23ba &&
+           (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
+               mdelay(20);
+
+       /*
+        * "TEAC Corp." products need a 20ms delay after each
+        * class compliant request
+        */
+       if (USB_ID_VENDOR(chip->usb_id) == 0x0644 &&
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                mdelay(20);
 
        /* Marantz/Denon devices with USB DAC functionality need a delay
         * after each class compliant request
         */
-       if (is_marantz_denon_dac(USB_ID(le16_to_cpu(dev->descriptor.idVendor),
-                                       le16_to_cpu(dev->descriptor.idProduct)))
+       if (is_marantz_denon_dac(chip->usb_id)
            && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                mdelay(20);
 
        /* Zoom R16/24 needs a tiny delay here, otherwise requests like
         * get/set frequency return as failed despite actually succeeding.
         */
-       if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1686) &&
-           (le16_to_cpu(dev->descriptor.idProduct) == 0x00dd) &&
+       if (chip->usb_id == USB_ID(0x1686, 0x00dd) &&
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
                mdelay(1);
 }
@@ -1250,7 +1269,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                                        unsigned int sample_bytes)
 {
        /* Playback Designs */
-       if (le16_to_cpu(chip->dev->descriptor.idVendor) == 0x23ba) {
+       if (USB_ID_VENDOR(chip->usb_id) == 0x23ba) {
                switch (fp->altsetting) {
                case 1:
                        fp->dsd_dop = true;
@@ -1269,7 +1288,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
        case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
        case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
-       case USB_ID(0x22d8, 0x0416): /* OPPO HA-1*/
+       case USB_ID(0x22d9, 0x0416): /* OPPO HA-1 */
                if (fp->altsetting == 2)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
@@ -1278,6 +1297,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
        case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
        case USB_ID(0x20b1, 0x3023): /* Aune X1S 32BIT/384 DSD DAC */
+       case USB_ID(0x2616, 0x0106): /* PS Audio NuWave DAC */
                if (fp->altsetting == 3)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
index 2cd71ed1201f93ea8e6b54e1d487a84751e09885..192ff5ce94521b35ae09e6adc624f12239f48003 100644 (file)
@@ -16,7 +16,8 @@ int snd_usb_apply_interface_quirk(struct snd_usb_audio *chip,
 
 int snd_usb_apply_boot_quirk(struct usb_device *dev,
                             struct usb_interface *intf,
-                            const struct snd_usb_audio_quirk *quirk);
+                            const struct snd_usb_audio_quirk *quirk,
+                            unsigned int usb_id);
 
 void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
                              struct audioformat *fmt);
index 02db3cdff20ff7653c8ca0db21d28ef7b508a9eb..6b7707270aa3b19791c8b6248f90ecddeabc1fdd 100644 (file)
@@ -27,7 +27,7 @@ endef
 #   the rule that uses them - an example for that is the 'bionic'
 #   feature check. ]
 #
-FEATURE_TESTS ?=                       \
+FEATURE_TESTS_BASIC :=                 \
        backtrace                       \
        dwarf                           \
        fortify-source                  \
@@ -46,6 +46,7 @@ FEATURE_TESTS ?=                      \
        libpython                       \
        libpython-version               \
        libslang                        \
+       libcrypto                       \
        libunwind                       \
        pthread-attr-setaffinity-np     \
        stackprotector-all              \
@@ -56,6 +57,25 @@ FEATURE_TESTS ?=                     \
        get_cpuid                       \
        bpf
 
+# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
+# of all feature tests
+FEATURE_TESTS_EXTRA :=                 \
+       bionic                          \
+       compile-32                      \
+       compile-x32                     \
+       cplus-demangle                  \
+       hello                           \
+       libbabeltrace                   \
+       liberty                         \
+       liberty-z                       \
+       libunwind-debug-frame
+
+FEATURE_TESTS ?= $(FEATURE_TESTS_BASIC)
+
+ifeq ($(FEATURE_TESTS),all)
+  FEATURE_TESTS := $(FEATURE_TESTS_BASIC) $(FEATURE_TESTS_EXTRA)
+endif
+
 FEATURE_DISPLAY ?=                     \
        dwarf                           \
        glibc                           \
@@ -68,6 +88,7 @@ FEATURE_DISPLAY ?=                    \
        libperl                         \
        libpython                       \
        libslang                        \
+       libcrypto                       \
        libunwind                       \
        libdw-dwarf-unwind              \
        zlib                            \
@@ -100,6 +121,14 @@ ifeq ($(feature-all), 1)
   # test-all.c passed - just set all the core feature flags to 1:
   #
   $(foreach feat,$(FEATURE_TESTS),$(call feature_set,$(feat)))
+  #
+  # test-all.c does not comprise these tests, so we need to
+  # for this case to get features proper values
+  #
+  $(call feature_check,compile-32)
+  $(call feature_check,compile-x32)
+  $(call feature_check,bionic)
+  $(call feature_check,libbabeltrace)
 else
   $(foreach feat,$(FEATURE_TESTS),$(call feature_check,$(feat)))
 endif
index bf8f0352264dcc8a5a1286bc4add2ca9de481357..c5f4c417428d7099fbe4f487a179b0663f478611 100644 (file)
@@ -23,6 +23,7 @@ FILES=                                        \
        test-libpython.bin              \
        test-libpython-version.bin      \
        test-libslang.bin               \
+       test-libcrypto.bin              \
        test-libunwind.bin              \
        test-libunwind-debug-frame.bin  \
        test-pthread-attr-setaffinity-np.bin    \
@@ -105,6 +106,9 @@ $(OUTPUT)test-libaudit.bin:
 $(OUTPUT)test-libslang.bin:
        $(BUILD) -I/usr/include/slang -lslang
 
+$(OUTPUT)test-libcrypto.bin:
+       $(BUILD) -lcrypto
+
 $(OUTPUT)test-gtk2.bin:
        $(BUILD) $(shell $(PKG_CONFIG) --libs --cflags gtk+-2.0 2>/dev/null)
 
index 81025cade45fa9a3fcc6f8f0aacc8f403e6d9211..e499a36c1e4a9e21e9c355309b53a7dc5901664a 100644 (file)
 # include "test-bpf.c"
 #undef main
 
+#define main main_test_libcrypto
+# include "test-libcrypto.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
        main_test_libpython();
@@ -158,6 +162,7 @@ int main(int argc, char *argv[])
        main_test_lzma();
        main_test_get_cpuid();
        main_test_bpf();
+       main_test_libcrypto();
 
        return 0;
 }
index 31dbf45bf99c5996ca0ee62cba186d912b08e666..c54e6551ae4c54cd6e9f418e6e8ff97280444efd 100644 (file)
@@ -1,4 +1,6 @@
+#include <stdio.h>
 int main(void)
 {
+       printf("Hello World!\n");
        return 0;
 }
diff --git a/tools/build/feature/test-libcrypto.c b/tools/build/feature/test-libcrypto.c
new file mode 100644 (file)
index 0000000..bd79dc7
--- /dev/null
@@ -0,0 +1,17 @@
+#include <openssl/sha.h>
+#include <openssl/md5.h>
+
+int main(void)
+{
+       MD5_CTX context;
+       unsigned char md[MD5_DIGEST_LENGTH + SHA_DIGEST_LENGTH];
+       unsigned char dat[] = "12345";
+
+       MD5_Init(&context);
+       MD5_Update(&context, &dat[0], sizeof(dat));
+       MD5_Final(&md[0], &context);
+
+       SHA1(&dat[0], sizeof(dat), &md[0]);
+
+       return 0;
+}
index 8334a5a9d5d7f55e97144e1d7462c52deec02e02..7e543c3102d4118a09717bb6fb4c0f779532ae91 100644 (file)
@@ -201,6 +201,7 @@ struct bpf_object {
                        Elf_Data *data;
                } *reloc;
                int nr_reloc;
+               int maps_shndx;
        } efile;
        /*
         * All loaded bpf_object is linked in a list, which is
@@ -350,6 +351,7 @@ static struct bpf_object *bpf_object__new(const char *path,
         */
        obj->efile.obj_buf = obj_buf;
        obj->efile.obj_buf_sz = obj_buf_sz;
+       obj->efile.maps_shndx = -1;
 
        obj->loaded = false;
 
@@ -529,12 +531,12 @@ bpf_object__init_maps(struct bpf_object *obj, void *data,
 }
 
 static int
-bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
+bpf_object__init_maps_name(struct bpf_object *obj)
 {
        int i;
        Elf_Data *symbols = obj->efile.symbols;
 
-       if (!symbols || maps_shndx < 0)
+       if (!symbols || obj->efile.maps_shndx < 0)
                return -EINVAL;
 
        for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
@@ -544,7 +546,7 @@ bpf_object__init_maps_name(struct bpf_object *obj, int maps_shndx)
 
                if (!gelf_getsym(symbols, i, &sym))
                        continue;
-               if (sym.st_shndx != maps_shndx)
+               if (sym.st_shndx != obj->efile.maps_shndx)
                        continue;
 
                map_name = elf_strptr(obj->efile.elf,
@@ -572,7 +574,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
        Elf *elf = obj->efile.elf;
        GElf_Ehdr *ep = &obj->efile.ehdr;
        Elf_Scn *scn = NULL;
-       int idx = 0, err = 0, maps_shndx = -1;
+       int idx = 0, err = 0;
 
        /* Elf is corrupted/truncated, avoid calling elf_strptr. */
        if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
@@ -625,7 +627,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
                else if (strcmp(name, "maps") == 0) {
                        err = bpf_object__init_maps(obj, data->d_buf,
                                                    data->d_size);
-                       maps_shndx = idx;
+                       obj->efile.maps_shndx = idx;
                } else if (sh.sh_type == SHT_SYMTAB) {
                        if (obj->efile.symbols) {
                                pr_warning("bpf: multiple SYMTAB in %s\n",
@@ -674,8 +676,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
                pr_warning("Corrupted ELF file: index of strtab invalid\n");
                return LIBBPF_ERRNO__FORMAT;
        }
-       if (maps_shndx >= 0)
-               err = bpf_object__init_maps_name(obj, maps_shndx);
+       if (obj->efile.maps_shndx >= 0)
+               err = bpf_object__init_maps_name(obj);
 out:
        return err;
 }
@@ -697,7 +699,8 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
 static int
 bpf_program__collect_reloc(struct bpf_program *prog,
                           size_t nr_maps, GElf_Shdr *shdr,
-                          Elf_Data *data, Elf_Data *symbols)
+                          Elf_Data *data, Elf_Data *symbols,
+                          int maps_shndx)
 {
        int i, nrels;
 
@@ -724,9 +727,6 @@ bpf_program__collect_reloc(struct bpf_program *prog,
                        return -LIBBPF_ERRNO__FORMAT;
                }
 
-               insn_idx = rel.r_offset / sizeof(struct bpf_insn);
-               pr_debug("relocation: insn_idx=%u\n", insn_idx);
-
                if (!gelf_getsym(symbols,
                                 GELF_R_SYM(rel.r_info),
                                 &sym)) {
@@ -735,6 +735,15 @@ bpf_program__collect_reloc(struct bpf_program *prog,
                        return -LIBBPF_ERRNO__FORMAT;
                }
 
+               if (sym.st_shndx != maps_shndx) {
+                       pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
+                                  prog->section_name, sym.st_shndx);
+                       return -LIBBPF_ERRNO__RELOC;
+               }
+
+               insn_idx = rel.r_offset / sizeof(struct bpf_insn);
+               pr_debug("relocation: insn_idx=%u\n", insn_idx);
+
                if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
                        pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
                                   insn_idx, insns[insn_idx].code);
@@ -863,7 +872,8 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
 
                err = bpf_program__collect_reloc(prog, nr_maps,
                                                 shdr, data,
-                                                obj->efile.symbols);
+                                                obj->efile.symbols,
+                                                obj->efile.maps_shndx);
                if (err)
                        return err;
        }
index 9be663340f0a4c48e9ec51c996b13c3a8b596fdc..d1c89cc06f5f6710471c42dc5b2a475a54ee7658 100644 (file)
@@ -11,11 +11,6 @@ static __thread struct task_struct current_obj;
 bool debug_locks = true;
 bool debug_locks_silent;
 
-__attribute__((constructor)) static void liblockdep_init(void)
-{
-       lockdep_init();
-}
-
 __attribute__((destructor)) static void liblockdep_exit(void)
 {
        debug_check_no_locks_held();
index a60c14b9662aefd6f17b47dec534bce2f53d9f03..6e66277ec4375c84693b25756fdf8336faf79fcf 100644 (file)
@@ -44,7 +44,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 void lock_release(struct lockdep_map *lock, int nested,
                        unsigned long ip);
 extern void debug_check_no_locks_freed(const void *from, unsigned long len);
-extern void lockdep_init(void);
 
 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
        { .name = (_name), .key = (void *)(_key), }
index 21cdf869a01b8b7facc084c4dc66cf73afa163bb..52844847569c99b878c031da8f25a13d1c0b5838 100644 (file)
@@ -439,7 +439,5 @@ __attribute__((constructor)) static void init_preload(void)
        ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
 #endif
 
-       lockdep_init();
-
        __init_state = done;
 }
index b9ca1e304158da86f36dafad33e7de0a52e01567..c7158bfb16496992ab805c856516eeb9e3c612b4 100644 (file)
@@ -62,7 +62,7 @@ Given a $HOME/.perfconfig like this:
                medium = green, default
                normal = lightgray, default
                selected = white, lightgray
-               code = blue, default
+               jump_arrows = blue, default
                addr = magenta, default
                root = white, blue
 
@@ -98,6 +98,347 @@ Given a $HOME/.perfconfig like this:
                order = caller
                sort-key = function
 
+Variables
+~~~~~~~~~
+
+colors.*::
+       The variables for customizing the colors used in the output for the
+       'report', 'top' and 'annotate' in the TUI. They should specify the
+       foreground and background colors, separated by a comma, for example:
+
+               medium = green, lightgray
+
+       If you want to use the color configured for you terminal, just leave it
+       as 'default', for example:
+
+               medium = default, lightgray
+
+       Available colors:
+       red, yellow, green, cyan, gray, black, blue,
+       white, default, magenta, lightgray
+
+       colors.top::
+               'top' means a overhead percentage which is more than 5%.
+               And values of this variable specify percentage colors.
+               Basic key values are foreground-color 'red' and
+               background-color 'default'.
+       colors.medium::
+               'medium' means a overhead percentage which has more than 0.5%.
+               Default values are 'green' and 'default'.
+       colors.normal::
+               'normal' means the rest of overhead percentages
+               except 'top', 'medium', 'selected'.
+               Default values are 'lightgray' and 'default'.
+       colors.selected::
+               This selects the colors for the current entry in a list of entries
+               from sub-commands (top, report, annotate).
+               Default values are 'black' and 'lightgray'.
+       colors.jump_arrows::
+               Colors for jump arrows on assembly code listings
+               such as 'jns', 'jmp', 'jane', etc.
+               Default values are 'blue', 'default'.
+       colors.addr::
+               This selects colors for addresses from 'annotate'.
+               Default values are 'magenta', 'default'.
+       colors.root::
+               Colors for headers in the output of a sub-commands (top, report).
+               Default values are 'white', 'blue'.
+
+tui.*, gtk.*::
+       Subcommands that can be configured here are 'top', 'report' and 'annotate'.
+       These values are booleans, for example:
+
+       [tui]
+               top = true
+
+       will make the TUI be the default for the 'top' subcommand. Those will be
+       available if the required libs were detected at tool build time.
+
+buildid.*::
+       buildid.dir::
+               Each executable and shared library in modern distributions comes with a
+               content based identifier that, if available, will be inserted in a
+               'perf.data' file header to, at analysis time find what is needed to do
+               symbol resolution, code annotation, etc.
+
+               The recording tools also stores a hard link or copy in a per-user
+               directory, $HOME/.debug/, of binaries, shared libraries, /proc/kallsyms
+               and /proc/kcore files to be used at analysis time.
+
+               The buildid.dir variable can be used to either change this directory
+               cache location, or to disable it altogether. If you want to disable it,
+               set buildid.dir to /dev/null. The default is $HOME/.debug
+
+annotate.*::
+       These options work only for TUI.
+       These are in control of addresses, jump function, source code
+       in lines of assembly code from a specific program.
+
+       annotate.hide_src_code::
+               If a program which is analyzed has source code,
+               this option lets 'annotate' print a list of assembly code with the source code.
+               For example, let's see a part of a program. There're four lines.
+               If this option is 'true', they can be printed
+               without source code from a program as below.
+
+               │        push   %rbp
+               │        mov    %rsp,%rbp
+               │        sub    $0x10,%rsp
+               │        mov    (%rdi),%rdx
+
+               But if this option is 'false', source code of the part
+               can be also printed as below. Default is 'false'.
+
+               │      struct rb_node *rb_next(const struct rb_node *node)
+               │      {
+               │        push   %rbp
+               │        mov    %rsp,%rbp
+               │        sub    $0x10,%rsp
+               │              struct rb_node *parent;
+               │
+               │              if (RB_EMPTY_NODE(node))
+               │        mov    (%rdi),%rdx
+               │              return n;
+
+        annotate.use_offset::
+               Basing on a first address of a loaded function, offset can be used.
+               Instead of using original addresses of assembly code,
+               addresses subtracted from a base address can be printed.
+               Let's illustrate an example.
+               If a base address is 0XFFFFFFFF81624d50 as below,
+
+               ffffffff81624d50 <load0>
+
+               an address on assembly code has a specific absolute address as below
+
+               ffffffff816250b8:│  mov    0x8(%r14),%rdi
+
+               but if use_offset is 'true', an address subtracted from a base address is printed.
+               Default is true. This option is only applied to TUI.
+
+                            368:│  mov    0x8(%r14),%rdi
+
+       annotate.jump_arrows::
+               There can be jump instruction among assembly code.
+               Depending on a boolean value of jump_arrows,
+               arrows can be printed or not which represent
+               where do the instruction jump into as below.
+
+               │     ┌──jmp    1333
+               │     │  xchg   %ax,%ax
+               │1330:│  mov    %r15,%r10
+               │1333:└─→cmp    %r15,%r14
+
+               If jump_arrow is 'false', the arrows isn't printed as below.
+               Default is 'false'.
+
+               │      ↓ jmp    1333
+               │        xchg   %ax,%ax
+               │1330:   mov    %r15,%r10
+               │1333:   cmp    %r15,%r14
+
+        annotate.show_linenr::
+               When showing source code if this option is 'true',
+               line numbers are printed as below.
+
+               │1628         if (type & PERF_SAMPLE_IDENTIFIER) {
+               │     ↓ jne    508
+               │1628                 data->id = *array;
+               │1629                 array++;
+               │1630         }
+
+               However if this option is 'false', they aren't printed as below.
+               Default is 'false'.
+
+               │             if (type & PERF_SAMPLE_IDENTIFIER) {
+               │     ↓ jne    508
+               │                     data->id = *array;
+               │                     array++;
+               │             }
+
+        annotate.show_nr_jumps::
+               Let's see a part of assembly code.
+
+               │1382:   movb   $0x1,-0x270(%rbp)
+
+               If use this, the number of branches jumping to that address can be printed as below.
+               Default is 'false'.
+
+               │1 1382:   movb   $0x1,-0x270(%rbp)
+
+        annotate.show_total_period::
+               To compare two records on an instruction base, with this option
+               provided, display total number of samples that belong to a line
+               in assembly code. If this option is 'true', total periods are printed
+               instead of percent values as below.
+
+                 302 │      mov    %eax,%eax
+
+               But if this option is 'false', percent values for overhead are printed i.e.
+               Default is 'false'.
+
+               99.93 │      mov    %eax,%eax
+
+hist.*::
+       hist.percentage::
+               This option control the way to calculate overhead of filtered entries -
+               that means the value of this option is effective only if there's a
+               filter (by comm, dso or symbol name). Suppose a following example:
+
+                      Overhead  Symbols
+                      ........  .......
+                       33.33%     foo
+                       33.33%     bar
+                       33.33%     baz
+
+              This is an original overhead and we'll filter out the first 'foo'
+              entry. The value of 'relative' would increase the overhead of 'bar'
+              and 'baz' to 50.00% for each, while 'absolute' would show their
+              current overhead (33.33%).
+
+ui.*::
+       ui.show-headers::
+               This option controls display of column headers (like 'Overhead' and 'Symbol')
+               in 'report' and 'top'. If this option is false, they are hidden.
+               This option is only applied to TUI.
+
+call-graph.*::
+       When sub-commands 'top' and 'report' work with -g/—-children
+       there're options in control of call-graph.
+
+       call-graph.record-mode::
+               The record-mode can be 'fp' (frame pointer), 'dwarf' and 'lbr'.
+               The value of 'dwarf' is effective only if perf detect needed library
+               (libunwind or a recent version of libdw).
+               'lbr' only work for cpus that support it.
+
+       call-graph.dump-size::
+               The size of stack to dump in order to do post-unwinding. Default is 8192 (byte).
+               When using dwarf into record-mode, the default size will be used if omitted.
+
+       call-graph.print-type::
+               The print-types can be graph (graph absolute), fractal (graph relative),
+               flat and folded. This option controls a way to show overhead for each callchain
+               entry. Suppose a following example.
+
+                Overhead  Symbols
+                ........  .......
+                  40.00%  foo
+                          |
+                          ---foo
+                             |
+                             |--50.00%--bar
+                             |          main
+                             |
+                              --50.00%--baz
+                                        main
+
+               This output is a 'fractal' format. The 'foo' came from 'bar' and 'baz' exactly
+               half and half so 'fractal' shows 50.00% for each
+               (meaning that it assumes 100% total overhead of 'foo').
+
+               The 'graph' uses absolute overhead value of 'foo' as total so each of
+               'bar' and 'baz' callchain will have 20.00% of overhead.
+               If 'flat' is used, single column and linear exposure of call chains.
+               'folded' mean call chains are displayed in a line, separated by semicolons.
+
+       call-graph.order::
+               This option controls print order of callchains. The default is
+               'callee' which means callee is printed at top and then followed by its
+               caller and so on. The 'caller' prints it in reverse order.
+
+               If this option is not set and report.children or top.children is
+               set to true (or the equivalent command line option is given),
+               the default value of this option is changed to 'caller' for the
+               execution of 'perf report' or 'perf top'. Other commands will
+               still default to 'callee'.
+
+       call-graph.sort-key::
+               The callchains are merged if they contain same information.
+               The sort-key option determines a way to compare the callchains.
+               A value of 'sort-key' can be 'function' or 'address'.
+               The default is 'function'.
+
+       call-graph.threshold::
+               When there're many callchains it'd print tons of lines. So perf omits
+               small callchains under a certain overhead (threshold) and this option
+               control the threshold. Default is 0.5 (%). The overhead is calculated
+               by value depends on call-graph.print-type.
+
+       call-graph.print-limit::
+               This is a maximum number of lines of callchain printed for a single
+               histogram entry. Default is 0 which means no limitation.
+
+report.*::
+       report.percent-limit::
+               This one is mostly the same as call-graph.threshold but works for
+               histogram entries. Entries having an overhead lower than this
+               percentage will not be printed. Default is '0'. If percent-limit
+               is '10', only entries which have more than 10% of overhead will be
+               printed.
+
+       report.queue-size::
+               This option sets up the maximum allocation size of the internal
+               event queue for ordering events. Default is 0, meaning no limit.
+
+       report.children::
+               'Children' means functions called from another function.
+               If this option is true, 'perf report' cumulates callchains of children
+               and show (accumulated) total overhead as well as 'Self' overhead.
+               Please refer to the 'perf report' manual. The default is 'true'.
+
+       report.group::
+               This option is to show event group information together.
+               Example output with this turned on, notice that there is one column
+               per event in the group, ref-cycles and cycles:
+
+               # group: {ref-cycles,cycles}
+               # ========
+               #
+               # Samples: 7K of event 'anon group { ref-cycles, cycles }'
+               # Event count (approx.): 6876107743
+               #
+               #         Overhead  Command      Shared Object               Symbol
+               # ................  .......  .................  ...................
+               #
+                   99.84%  99.76%  noploop  noploop            [.] main
+                    0.07%   0.00%  noploop  ld-2.15.so         [.] strcmp
+                    0.03%   0.00%  noploop  [kernel.kallsyms]  [k] timerqueue_del
+
+top.*::
+       top.children::
+               Same as 'report.children'. So if it is enabled, the output of 'top'
+               command will have 'Children' overhead column as well as 'Self' overhead
+               column by default.
+               The default is 'true'.
+
+man.*::
+       man.viewer::
+               This option can assign a tool to view manual pages when 'help'
+               subcommand was invoked. Supported tools are 'man', 'woman'
+               (with emacs client) and 'konqueror'. Default is 'man'.
+
+               New man viewer tool can be also added using 'man.<tool>.cmd'
+               or use different path using 'man.<tool>.path' config option.
+
+pager.*::
+       pager.<subcommand>::
+               When the subcommand is run on stdio, determine whether it uses
+               pager or not based on this value. Default is 'unspecified'.
+
+kmem.*::
+       kmem.default::
+               This option decides which allocator is to be analyzed if neither
+               '--slab' nor '--page' option is used. Default is 'slab'.
+
+record.*::
+       record.build-id::
+               This option can be 'cache', 'no-cache' or 'skip'.
+               'cache' is to post-process data and save/update the binaries into
+               the build-id cache (in ~/.debug). This is the default.
+               But if this option is 'no-cache', it will not update the build-id cache.
+               'skip' skips post-processing and does not update the cache.
+
 SEE ALSO
 --------
 linkperf:perf[1]
index 0b1cedeef895369c9c60aa0a1e6cbb718c65c509..87b2588d1cbdee6060a304be4f79015cb5889171 100644 (file)
@@ -53,6 +53,13 @@ include::itrace.txt[]
 --strip::
        Use with --itrace to strip out non-synthesized events.
 
+-j::
+--jit::
+       Process jitdump files by injecting the mmap records corresponding to jitted
+       functions. This option also generates the ELF images for each jitted function
+       found in the jitdumps files captured in the input perf.data file. Use this option
+       if you are monitoring environment using JIT runtimes, such as Java, DART or V8.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
index 8a301f6afb37ad855351fcd5fba1644d12dc2529..89cab84e92fdad5ed705e2c810d24ed45d329364 100644 (file)
@@ -117,6 +117,22 @@ OPTIONS
        And default sort keys are changed to comm, dso_from, symbol_from, dso_to
        and symbol_to, see '--branch-stack'.
 
+       If the --mem-mode option is used, the following sort keys are also available
+       (incompatible with --branch-stack):
+       symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline.
+
+       - symbol_daddr: name of data symbol being executed on at the time of sample
+       - dso_daddr: name of library or module containing the data being executed
+       on at the time of the sample
+       - locked: whether the bus was locked at the time of the sample
+       - tlb: type of tlb access for the data at the time of the sample
+       - mem: type of memory access for the data at the time of the sample
+       - snoop: type of snoop (if any) for the data at the time of the sample
+       - dcacheline: the cacheline the data address is on at the time of the sample
+
+       And the default sort keys are changed to local_weight, mem, sym, dso,
+       symbol_daddr, dso_daddr, snoop, tlb, locked, see '--mem-mode'.
+
        If the data file has tracepoint event(s), following (dynamic) sort keys
        are also available:
        trace, trace_fields, [<event>.]<field>[/raw]
@@ -151,22 +167,6 @@ OPTIONS
        By default, every sort keys not specified in -F will be appended
        automatically.
 
-       If --mem-mode option is used, following sort keys are also available
-       (incompatible with --branch-stack):
-       symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline.
-
-       - symbol_daddr: name of data symbol being executed on at the time of sample
-       - dso_daddr: name of library or module containing the data being executed
-       on at the time of sample
-       - locked: whether the bus was locked at the time of sample
-       - tlb: type of tlb access for the data at the time of sample
-       - mem: type of memory access for the data at the time of sample
-       - snoop: type of snoop (if any) for the data at the time of sample
-       - dcacheline: the cacheline the data address is on at the time of sample
-
-       And default sort keys are changed to local_weight, mem, sym, dso,
-       symbol_daddr, dso_daddr, snoop, tlb, locked, see '--mem-mode'.
-
 -p::
 --parent=<regex>::
         A regex filter to identify parent. The parent is a caller of this
@@ -351,7 +351,10 @@ OPTIONS
 
 --percent-limit::
        Do not show entries which have an overhead under that percent.
-       (Default: 0).
+       (Default: 0).  Note that this option also sets the percent limit (threshold)
+       of callchains.  However the default value of callchain threshold is
+       different than the default value of hist entries.  Please see the
+       --call-graph option for details.
 
 --percentage::
        Determine how to display the overhead percentage of filtered entries.
index 767ea2436e1cd841a762ee8cdb039ba80a7120b3..1d8d5bc4cd2de6601f926696d5c172b30fa735d4 100644 (file)
@@ -5,7 +5,7 @@
        medium = green, lightgray
        normal = black, lightgray
        selected = lightgray, magenta
-       code = blue, lightgray
+       jump_arrows = blue, lightgray
        addr = magenta, lightgray
 
 [tui]
index dcd9a70c7193b43b5791058b01a3773da6e0d870..32a64e6190288e5110cfa6de78138aceb955e7af 100644 (file)
@@ -68,6 +68,20 @@ all tags TAGS:
        $(print_msg)
        $(make)
 
+ifdef MAKECMDGOALS
+has_clean := 0
+ifneq ($(filter clean,$(MAKECMDGOALS)),)
+  has_clean := 1
+endif # clean
+
+ifeq ($(has_clean),1)
+  rest := $(filter-out clean,$(MAKECMDGOALS))
+  ifneq ($(rest),)
+$(rest): clean
+  endif # rest
+endif # has_clean
+endif # MAKECMDGOALS
+
 #
 # The clean target is not really parallel, don't print the jobs info:
 #
@@ -75,10 +89,17 @@ clean:
        $(make)
 
 #
-# The build-test target is not really parallel, don't print the jobs info:
+# The build-test target is not really parallel, don't print the jobs info,
+# it also uses only the tests/make targets that don't pollute the source
+# repository, i.e. that uses O= or builds the tarpkg outside the source
+# repo directories.
+#
+# For a full test, use:
+#
+# make -C tools/perf -f tests/make
 #
 build-test:
-       @$(MAKE) SHUF=1 -f tests/make --no-print-directory
+       @$(MAKE) SHUF=1 -f tests/make REUSE_FEATURES_DUMP=1 MK=Makefile SET_PARALLEL=1 --no-print-directory tarpkg out
 
 #
 # All other targets get passed through:
index 0a22407e1d7d8abb092ff3d24b6829a27c234bbe..d404117810a7fd0f3ae024e23cc81fb13e91326c 100644 (file)
@@ -58,6 +58,9 @@ include config/utilities.mak
 #
 # Define NO_LIBBIONIC if you do not want bionic support
 #
+# Define NO_LIBCRYPTO if you do not want libcrypto (openssl) support
+# used for generating build-ids for ELFs generated by jitdump.
+#
 # Define NO_LIBDW_DWARF_UNWIND if you do not want libdw support
 # for dwarf backtrace post unwind.
 #
@@ -77,6 +80,9 @@ include config/utilities.mak
 # Define NO_AUXTRACE if you do not want AUX area tracing support
 #
 # Define NO_LIBBPF if you do not want BPF support
+#
+# Define FEATURES_DUMP to provide features detection dump file
+# and bypass the feature detection
 
 # As per kernel Makefile, avoid funny character set dependencies
 unexport LC_ALL
@@ -162,10 +168,28 @@ ifeq ($(filter-out $(NON_CONFIG_TARGETS),$(MAKECMDGOALS)),)
 endif
 endif
 
+# Set FEATURE_TESTS to 'all' so all possible feature checkers are executed.
+# Without this setting the output feature dump file misses some features, for
+# example, liberty. Select all checkers so we won't get an incomplete feature
+# dump file.
 ifeq ($(config),1)
+ifdef MAKECMDGOALS
+ifeq ($(filter feature-dump,$(MAKECMDGOALS)),feature-dump)
+FEATURE_TESTS := all
+endif
+endif
 include config/Makefile
 endif
 
+# The FEATURE_DUMP_EXPORT holds location of the actual
+# FEATURE_DUMP file to be used to bypass feature detection
+# (for bpf or any other subproject)
+ifeq ($(FEATURES_DUMP),)
+FEATURE_DUMP_EXPORT := $(realpath $(OUTPUT)FEATURE-DUMP)
+else
+FEATURE_DUMP_EXPORT := $(FEATURES_DUMP)
+endif
+
 export prefix bindir sharedir sysconfdir DESTDIR
 
 # sparse is architecture-neutral, which means that we need to tell it
@@ -436,7 +460,7 @@ $(LIBAPI)-clean:
        $(Q)$(MAKE) -C $(LIB_DIR) O=$(OUTPUT) clean >/dev/null
 
 $(LIBBPF): fixdep FORCE
-       $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(realpath $(OUTPUT)FEATURE-DUMP)
+       $(Q)$(MAKE) -C $(BPF_DIR) O=$(OUTPUT) $(OUTPUT)libbpf.a FEATURES_DUMP=$(FEATURE_DUMP_EXPORT)
 
 $(LIBBPF)-clean:
        $(call QUIET_CLEAN, libbpf)
@@ -606,10 +630,21 @@ clean: $(LIBTRACEEVENT)-clean $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean
        $(call QUIET_CLEAN, core-progs) $(RM) $(ALL_PROGRAMS) perf perf-read-vdso32 perf-read-vdsox32
        $(call QUIET_CLEAN, core-gen)   $(RM)  *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)FEATURE-DUMP $(OUTPUT)util/*-bison* $(OUTPUT)util/*-flex* \
                $(OUTPUT)util/intel-pt-decoder/inat-tables.c $(OUTPUT)fixdep \
-               $(OUTPUT)tests/llvm-src-{base,kbuild,prologue}.c
+               $(OUTPUT)tests/llvm-src-{base,kbuild,prologue,relocation}.c
        $(QUIET_SUBDIR0)Documentation $(QUIET_SUBDIR1) clean
        $(python-clean)
 
+#
+# To provide FEATURE-DUMP into $(FEATURE_DUMP_COPY)
+# file if defined, with no further action.
+feature-dump:
+ifdef FEATURE_DUMP_COPY
+       @cp $(OUTPUT)FEATURE-DUMP $(FEATURE_DUMP_COPY)
+       @echo "FEATURE-DUMP file copied into $(FEATURE_DUMP_COPY)"
+else
+       @echo "FEATURE-DUMP file available in $(OUTPUT)FEATURE-DUMP"
+endif
+
 #
 # Trick: if ../../.git does not exist - we are building out of tree for example,
 # then force version regeneration:
index 7fbca175099ec917ad69b8025c8249ee6c52a6a4..9f9cea3478fdda25aede1cfbf865b107c88a262c 100644 (file)
@@ -1,3 +1,5 @@
 ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 endif
+
+HAVE_KVM_STAT_SUPPORT := 1
index 7b8b0d1a1b626065e0b414f42a7a998723e0e57f..c8fe2074d2177f0709f0e1d54f63cd77f84f2233 100644 (file)
@@ -1,5 +1,6 @@
 libperf-y += header.o
 libperf-y += sym-handling.o
+libperf-y += kvm-stat.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/book3s_hcalls.h b/tools/perf/arch/powerpc/util/book3s_hcalls.h
new file mode 100644 (file)
index 0000000..0dd6b7f
--- /dev/null
@@ -0,0 +1,123 @@
+#ifndef ARCH_PERF_BOOK3S_HV_HCALLS_H
+#define ARCH_PERF_BOOK3S_HV_HCALLS_H
+
+/*
+ * PowerPC HCALL codes : hcall code to name mapping
+ */
+#define kvm_trace_symbol_hcall \
+       {0x4, "H_REMOVE"},                                      \
+       {0x8, "H_ENTER"},                                       \
+       {0xc, "H_READ"},                                        \
+       {0x10, "H_CLEAR_MOD"},                                  \
+       {0x14, "H_CLEAR_REF"},                                  \
+       {0x18, "H_PROTECT"},                                    \
+       {0x1c, "H_GET_TCE"},                                    \
+       {0x20, "H_PUT_TCE"},                                    \
+       {0x24, "H_SET_SPRG0"},                                  \
+       {0x28, "H_SET_DABR"},                                   \
+       {0x2c, "H_PAGE_INIT"},                                  \
+       {0x30, "H_SET_ASR"},                                    \
+       {0x34, "H_ASR_ON"},                                     \
+       {0x38, "H_ASR_OFF"},                                    \
+       {0x3c, "H_LOGICAL_CI_LOAD"},                            \
+       {0x40, "H_LOGICAL_CI_STORE"},                           \
+       {0x44, "H_LOGICAL_CACHE_LOAD"},                         \
+       {0x48, "H_LOGICAL_CACHE_STORE"},                        \
+       {0x4c, "H_LOGICAL_ICBI"},                               \
+       {0x50, "H_LOGICAL_DCBF"},                               \
+       {0x54, "H_GET_TERM_CHAR"},                              \
+       {0x58, "H_PUT_TERM_CHAR"},                              \
+       {0x5c, "H_REAL_TO_LOGICAL"},                            \
+       {0x60, "H_HYPERVISOR_DATA"},                            \
+       {0x64, "H_EOI"},                                        \
+       {0x68, "H_CPPR"},                                       \
+       {0x6c, "H_IPI"},                                        \
+       {0x70, "H_IPOLL"},                                      \
+       {0x74, "H_XIRR"},                                       \
+       {0x78, "H_MIGRATE_DMA"},                                \
+       {0x7c, "H_PERFMON"},                                    \
+       {0xdc, "H_REGISTER_VPA"},                               \
+       {0xe0, "H_CEDE"},                                       \
+       {0xe4, "H_CONFER"},                                     \
+       {0xe8, "H_PROD"},                                       \
+       {0xec, "H_GET_PPP"},                                    \
+       {0xf0, "H_SET_PPP"},                                    \
+       {0xf4, "H_PURR"},                                       \
+       {0xf8, "H_PIC"},                                        \
+       {0xfc, "H_REG_CRQ"},                                    \
+       {0x100, "H_FREE_CRQ"},                                  \
+       {0x104, "H_VIO_SIGNAL"},                                \
+       {0x108, "H_SEND_CRQ"},                                  \
+       {0x110, "H_COPY_RDMA"},                                 \
+       {0x114, "H_REGISTER_LOGICAL_LAN"},                      \
+       {0x118, "H_FREE_LOGICAL_LAN"},                          \
+       {0x11c, "H_ADD_LOGICAL_LAN_BUFFER"},                    \
+       {0x120, "H_SEND_LOGICAL_LAN"},                          \
+       {0x124, "H_BULK_REMOVE"},                               \
+       {0x130, "H_MULTICAST_CTRL"},                            \
+       {0x134, "H_SET_XDABR"},                                 \
+       {0x138, "H_STUFF_TCE"},                                 \
+       {0x13c, "H_PUT_TCE_INDIRECT"},                          \
+       {0x14c, "H_CHANGE_LOGICAL_LAN_MAC"},                    \
+       {0x150, "H_VTERM_PARTNER_INFO"},                        \
+       {0x154, "H_REGISTER_VTERM"},                            \
+       {0x158, "H_FREE_VTERM"},                                \
+       {0x15c, "H_RESET_EVENTS"},                              \
+       {0x160, "H_ALLOC_RESOURCE"},                            \
+       {0x164, "H_FREE_RESOURCE"},                             \
+       {0x168, "H_MODIFY_QP"},                                 \
+       {0x16c, "H_QUERY_QP"},                                  \
+       {0x170, "H_REREGISTER_PMR"},                            \
+       {0x174, "H_REGISTER_SMR"},                              \
+       {0x178, "H_QUERY_MR"},                                  \
+       {0x17c, "H_QUERY_MW"},                                  \
+       {0x180, "H_QUERY_HCA"},                                 \
+       {0x184, "H_QUERY_PORT"},                                \
+       {0x188, "H_MODIFY_PORT"},                               \
+       {0x18c, "H_DEFINE_AQP1"},                               \
+       {0x190, "H_GET_TRACE_BUFFER"},                          \
+       {0x194, "H_DEFINE_AQP0"},                               \
+       {0x198, "H_RESIZE_MR"},                                 \
+       {0x19c, "H_ATTACH_MCQP"},                               \
+       {0x1a0, "H_DETACH_MCQP"},                               \
+       {0x1a4, "H_CREATE_RPT"},                                \
+       {0x1a8, "H_REMOVE_RPT"},                                \
+       {0x1ac, "H_REGISTER_RPAGES"},                           \
+       {0x1b0, "H_DISABLE_AND_GETC"},                          \
+       {0x1b4, "H_ERROR_DATA"},                                \
+       {0x1b8, "H_GET_HCA_INFO"},                              \
+       {0x1bc, "H_GET_PERF_COUNT"},                            \
+       {0x1c0, "H_MANAGE_TRACE"},                              \
+       {0x1d4, "H_FREE_LOGICAL_LAN_BUFFER"},                   \
+       {0x1d8, "H_POLL_PENDING"},                              \
+       {0x1e4, "H_QUERY_INT_STATE"},                           \
+       {0x244, "H_ILLAN_ATTRIBUTES"},                          \
+       {0x250, "H_MODIFY_HEA_QP"},                             \
+       {0x254, "H_QUERY_HEA_QP"},                              \
+       {0x258, "H_QUERY_HEA"},                                 \
+       {0x25c, "H_QUERY_HEA_PORT"},                            \
+       {0x260, "H_MODIFY_HEA_PORT"},                           \
+       {0x264, "H_REG_BCMC"},                                  \
+       {0x268, "H_DEREG_BCMC"},                                \
+       {0x26c, "H_REGISTER_HEA_RPAGES"},                       \
+       {0x270, "H_DISABLE_AND_GET_HEA"},                       \
+       {0x274, "H_GET_HEA_INFO"},                              \
+       {0x278, "H_ALLOC_HEA_RESOURCE"},                        \
+       {0x284, "H_ADD_CONN"},                                  \
+       {0x288, "H_DEL_CONN"},                                  \
+       {0x298, "H_JOIN"},                                      \
+       {0x2a4, "H_VASI_STATE"},                                \
+       {0x2b0, "H_ENABLE_CRQ"},                                \
+       {0x2b8, "H_GET_EM_PARMS"},                              \
+       {0x2d0, "H_SET_MPP"},                                   \
+       {0x2d4, "H_GET_MPP"},                                   \
+       {0x2ec, "H_HOME_NODE_ASSOCIATIVITY"},                   \
+       {0x2f4, "H_BEST_ENERGY"},                               \
+       {0x2fc, "H_XIRR_X"},                                    \
+       {0x300, "H_RANDOM"},                                    \
+       {0x304, "H_COP"},                                       \
+       {0x314, "H_GET_MPP_X"},                                 \
+       {0x31c, "H_SET_MODE"},                                  \
+       {0xf000, "H_RTAS"}                                      \
+
+#endif
diff --git a/tools/perf/arch/powerpc/util/book3s_hv_exits.h b/tools/perf/arch/powerpc/util/book3s_hv_exits.h
new file mode 100644 (file)
index 0000000..e68ba2d
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef ARCH_PERF_BOOK3S_HV_EXITS_H
+#define ARCH_PERF_BOOK3S_HV_EXITS_H
+
+/*
+ * PowerPC Interrupt vectors : exit code to name mapping
+ */
+
+#define kvm_trace_symbol_exit \
+       {0x0,   "RETURN_TO_HOST"}, \
+       {0x100, "SYSTEM_RESET"}, \
+       {0x200, "MACHINE_CHECK"}, \
+       {0x300, "DATA_STORAGE"}, \
+       {0x380, "DATA_SEGMENT"}, \
+       {0x400, "INST_STORAGE"}, \
+       {0x480, "INST_SEGMENT"}, \
+       {0x500, "EXTERNAL"}, \
+       {0x501, "EXTERNAL_LEVEL"}, \
+       {0x502, "EXTERNAL_HV"}, \
+       {0x600, "ALIGNMENT"}, \
+       {0x700, "PROGRAM"}, \
+       {0x800, "FP_UNAVAIL"}, \
+       {0x900, "DECREMENTER"}, \
+       {0x980, "HV_DECREMENTER"}, \
+       {0xc00, "SYSCALL"}, \
+       {0xd00, "TRACE"}, \
+       {0xe00, "H_DATA_STORAGE"}, \
+       {0xe20, "H_INST_STORAGE"}, \
+       {0xe40, "H_EMUL_ASSIST"}, \
+       {0xf00, "PERFMON"}, \
+       {0xf20, "ALTIVEC"}, \
+       {0xf40, "VSX"}
+
+#endif
diff --git a/tools/perf/arch/powerpc/util/kvm-stat.c b/tools/perf/arch/powerpc/util/kvm-stat.c
new file mode 100644 (file)
index 0000000..74eee30
--- /dev/null
@@ -0,0 +1,170 @@
+#include "util/kvm-stat.h"
+#include "util/parse-events.h"
+#include "util/debug.h"
+
+#include "book3s_hv_exits.h"
+#include "book3s_hcalls.h"
+
+#define NR_TPS 4
+
+const char *vcpu_id_str = "vcpu_id";
+const int decode_str_len = 40;
+const char *kvm_entry_trace = "kvm_hv:kvm_guest_enter";
+const char *kvm_exit_trace = "kvm_hv:kvm_guest_exit";
+
+define_exit_reasons_table(hv_exit_reasons, kvm_trace_symbol_exit);
+define_exit_reasons_table(hcall_reasons, kvm_trace_symbol_hcall);
+
+/* Tracepoints specific to ppc_book3s_hv */
+const char *ppc_book3s_hv_kvm_tp[] = {
+       "kvm_hv:kvm_guest_enter",
+       "kvm_hv:kvm_guest_exit",
+       "kvm_hv:kvm_hcall_enter",
+       "kvm_hv:kvm_hcall_exit",
+       NULL,
+};
+
+/* 1 extra placeholder for NULL */
+const char *kvm_events_tp[NR_TPS + 1];
+const char *kvm_exit_reason;
+
+static void hcall_event_get_key(struct perf_evsel *evsel,
+                               struct perf_sample *sample,
+                               struct event_key *key)
+{
+       key->info = 0;
+       key->key = perf_evsel__intval(evsel, sample, "req");
+}
+
+static const char *get_hcall_exit_reason(u64 exit_code)
+{
+       struct exit_reasons_table *tbl = hcall_reasons;
+
+       while (tbl->reason != NULL) {
+               if (tbl->exit_code == exit_code)
+                       return tbl->reason;
+               tbl++;
+       }
+
+       pr_debug("Unknown hcall code: %lld\n",
+              (unsigned long long)exit_code);
+       return "UNKNOWN";
+}
+
+static bool hcall_event_end(struct perf_evsel *evsel,
+                           struct perf_sample *sample __maybe_unused,
+                           struct event_key *key __maybe_unused)
+{
+       return (!strcmp(evsel->name, kvm_events_tp[3]));
+}
+
+static bool hcall_event_begin(struct perf_evsel *evsel,
+                             struct perf_sample *sample, struct event_key *key)
+{
+       if (!strcmp(evsel->name, kvm_events_tp[2])) {
+               hcall_event_get_key(evsel, sample, key);
+               return true;
+       }
+
+       return false;
+}
+static void hcall_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+                                  struct event_key *key,
+                                  char *decode)
+{
+       const char *hcall_reason = get_hcall_exit_reason(key->key);
+
+       scnprintf(decode, decode_str_len, "%s", hcall_reason);
+}
+
+static struct kvm_events_ops hcall_events = {
+       .is_begin_event = hcall_event_begin,
+       .is_end_event = hcall_event_end,
+       .decode_key = hcall_event_decode_key,
+       .name = "HCALL-EVENT",
+};
+
+static struct kvm_events_ops exit_events = {
+       .is_begin_event = exit_event_begin,
+       .is_end_event = exit_event_end,
+       .decode_key = exit_event_decode_key,
+       .name = "VM-EXIT"
+};
+
+struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+       { .name = "vmexit", .ops = &exit_events },
+       { .name = "hcall", .ops = &hcall_events },
+       { NULL, NULL },
+};
+
+const char * const kvm_skip_events[] = {
+       NULL,
+};
+
+
+static int is_tracepoint_available(const char *str, struct perf_evlist *evlist)
+{
+       struct parse_events_error err;
+       int ret;
+
+       err.str = NULL;
+       ret = parse_events(evlist, str, &err);
+       if (err.str)
+               pr_err("%s : %s\n", str, err.str);
+       return ret;
+}
+
+static int ppc__setup_book3s_hv(struct perf_kvm_stat *kvm,
+                               struct perf_evlist *evlist)
+{
+       const char **events_ptr;
+       int i, nr_tp = 0, err = -1;
+
+       /* Check for book3s_hv tracepoints */
+       for (events_ptr = ppc_book3s_hv_kvm_tp; *events_ptr; events_ptr++) {
+               err = is_tracepoint_available(*events_ptr, evlist);
+               if (err)
+                       return -1;
+               nr_tp++;
+       }
+
+       for (i = 0; i < nr_tp; i++)
+               kvm_events_tp[i] = ppc_book3s_hv_kvm_tp[i];
+
+       kvm_events_tp[i] = NULL;
+       kvm_exit_reason = "trap";
+       kvm->exit_reasons = hv_exit_reasons;
+       kvm->exit_reasons_isa = "HV";
+
+       return 0;
+}
+
+/* Wrapper to setup kvm tracepoints */
+static int ppc__setup_kvm_tp(struct perf_kvm_stat *kvm)
+{
+       struct perf_evlist *evlist = perf_evlist__new();
+
+       if (evlist == NULL)
+               return -ENOMEM;
+
+       /* Right now, only supported on book3s_hv */
+       return ppc__setup_book3s_hv(kvm, evlist);
+}
+
+int setup_kvm_events_tp(struct perf_kvm_stat *kvm)
+{
+       return ppc__setup_kvm_tp(kvm);
+}
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid __maybe_unused)
+{
+       int ret;
+
+       ret = ppc__setup_kvm_tp(kvm);
+       if (ret) {
+               kvm->exit_reasons = NULL;
+               kvm->exit_reasons_isa = NULL;
+       }
+
+       return ret;
+}
index a5dbc07ec9dc70900ed4a593f5979d419061fd35..ed57df2e6d687d89ccf95d9aa03c263411de7fe9 100644 (file)
@@ -10,7 +10,7 @@
  */
 
 #include "../../util/kvm-stat.h"
-#include <asm/kvm_perf.h>
+#include <asm/sie.h>
 
 define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
 define_exit_reasons_table(sie_icpt_insn_codes, icpt_insn_codes);
@@ -18,6 +18,12 @@ define_exit_reasons_table(sie_sigp_order_codes, sigp_order_codes);
 define_exit_reasons_table(sie_diagnose_codes, diagnose_codes);
 define_exit_reasons_table(sie_icpt_prog_codes, icpt_prog_codes);
 
+const char *vcpu_id_str = "id";
+const int decode_str_len = 40;
+const char *kvm_exit_reason = "icptcode";
+const char *kvm_entry_trace = "kvm:kvm_s390_sie_enter";
+const char *kvm_exit_trace = "kvm:kvm_s390_sie_exit";
+
 static void event_icpt_insn_get_key(struct perf_evsel *evsel,
                                    struct perf_sample *sample,
                                    struct event_key *key)
@@ -73,7 +79,7 @@ static struct kvm_events_ops exit_events = {
        .name = "VM-EXIT"
 };
 
-const char * const kvm_events_tp[] = {
+const char *kvm_events_tp[] = {
        "kvm:kvm_s390_sie_enter",
        "kvm:kvm_s390_sie_exit",
        "kvm:kvm_s390_intercept_instruction",
index 3e89ba825f6bf3b3f8b15b9110564c63283998b4..7f064eb371589aa887e112cddb54145d6268141b 100644 (file)
@@ -17,7 +17,7 @@ static pid_t spawn(void)
        if (pid)
                return pid;
 
-       while(1);
+       while(1)
                sleep(5);
        return 0;
 }
index 8d8150f1cf9bcf426b4c29a19170221db680a980..d66f9ad4df2ea5da6eca22cae351a842d1143508 100644 (file)
@@ -60,7 +60,9 @@ struct branch {
        u64 misc;
 };
 
-static size_t intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+static size_t
+intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+                        struct perf_evlist *evlist __maybe_unused)
 {
        return INTEL_BTS_AUXTRACE_PRIV_SIZE;
 }
index f05daacc9e7810117cfe25415a75990b3aa89f63..6f7d453b0e324032cdf84986698508c63b59d5b6 100644 (file)
@@ -273,7 +273,9 @@ intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
        return attr;
 }
 
-static size_t intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused)
+static size_t
+intel_pt_info_priv_size(struct auxtrace_record *itr __maybe_unused,
+                       struct perf_evlist *evlist __maybe_unused)
 {
        return INTEL_PT_AUXTRACE_PRIV_SIZE;
 }
index 14e4e668fad733c1802908047ae524318aa586de..b63d4be655a24a678f69d6deea3b4ee87283b938 100644 (file)
@@ -1,5 +1,7 @@
 #include "../../util/kvm-stat.h"
-#include <asm/kvm_perf.h>
+#include <asm/svm.h>
+#include <asm/vmx.h>
+#include <asm/kvm.h>
 
 define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
 define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
@@ -11,6 +13,12 @@ static struct kvm_events_ops exit_events = {
        .name = "VM-EXIT"
 };
 
+const char *vcpu_id_str = "vcpu_id";
+const int decode_str_len = 20;
+const char *kvm_exit_reason = "exit_reason";
+const char *kvm_entry_trace = "kvm:kvm_entry";
+const char *kvm_exit_trace = "kvm:kvm_exit";
+
 /*
  * For the mmio events, we treat:
  * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
@@ -65,7 +73,7 @@ static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
                                  struct event_key *key,
                                  char *decode)
 {
-       scnprintf(decode, DECODE_STR_LEN, "%#lx:%s",
+       scnprintf(decode, decode_str_len, "%#lx:%s",
                  (unsigned long)key->key,
                  key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
 }
@@ -109,7 +117,7 @@ static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
                                    struct event_key *key,
                                    char *decode)
 {
-       scnprintf(decode, DECODE_STR_LEN, "%#llx:%s",
+       scnprintf(decode, decode_str_len, "%#llx:%s",
                  (unsigned long long)key->key,
                  key->info ? "POUT" : "PIN");
 }
@@ -121,7 +129,7 @@ static struct kvm_events_ops ioport_events = {
        .name = "IO Port Access"
 };
 
-const char * const kvm_events_tp[] = {
+const char *kvm_events_tp[] = {
        "kvm:kvm_entry",
        "kvm:kvm_exit",
        "kvm:kvm_mmio",
index cc5c1267c738da038cfb6b824f79854fab41a629..cfe366375c4b89bb3642f5531d04b418a8d76e56 100644 (file)
@@ -245,7 +245,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
                        hists__collapse_resort(hists, NULL);
                        /* Don't sort callchain */
                        perf_evsel__reset_sample_bit(pos, CALLCHAIN);
-                       hists__output_resort(hists, NULL);
+                       perf_evsel__output_resort(pos, NULL);
 
                        if (symbol_conf.event_group &&
                            !perf_evsel__is_group_leader(pos))
index d93bff7fc0e407d6518a6ce0e2ee0dd4b65bf910..632efc6b79a07e20c4b25f9757bcd9c089050270 100644 (file)
@@ -38,19 +38,7 @@ static int build_id_cache__kcore_buildid(const char *proc_dir, char *sbuildid)
 
 static int build_id_cache__kcore_dir(char *dir, size_t sz)
 {
-       struct timeval tv;
-       struct tm tm;
-       char dt[32];
-
-       if (gettimeofday(&tv, NULL) || !localtime_r(&tv.tv_sec, &tm))
-               return -1;
-
-       if (!strftime(dt, sizeof(dt), "%Y%m%d%H%M%S", &tm))
-               return -1;
-
-       scnprintf(dir, sz, "%s%02u", dt, (unsigned)tv.tv_usec / 10000);
-
-       return 0;
+       return fetch_current_timestamp(dir, sz);
 }
 
 static bool same_kallsyms_reloc(const char *from_dir, char *to_dir)
index 0022e02ed31a7034b9286884ab87c4c131e41fa3..b38445f08c2fb0b06e7b6236f3096ff91892c156 100644 (file)
@@ -17,6 +17,7 @@
 #include "util/build-id.h"
 #include "util/data.h"
 #include "util/auxtrace.h"
+#include "util/jit.h"
 
 #include <subcmd/parse-options.h>
 
@@ -29,6 +30,7 @@ struct perf_inject {
        bool                    sched_stat;
        bool                    have_auxtrace;
        bool                    strip;
+       bool                    jit_mode;
        const char              *input_name;
        struct perf_data_file   output;
        u64                     bytes_written;
@@ -71,6 +73,15 @@ static int perf_event__repipe_oe_synth(struct perf_tool *tool,
        return perf_event__repipe_synth(tool, event);
 }
 
+#ifdef HAVE_LIBELF_SUPPORT
+static int perf_event__drop_oe(struct perf_tool *tool __maybe_unused,
+                              union perf_event *event __maybe_unused,
+                              struct ordered_events *oe __maybe_unused)
+{
+       return 0;
+}
+#endif
+
 static int perf_event__repipe_op2_synth(struct perf_tool *tool,
                                        union perf_event *event,
                                        struct perf_session *session
@@ -234,6 +245,27 @@ static int perf_event__repipe_mmap(struct perf_tool *tool,
        return err;
 }
 
+#ifdef HAVE_LIBELF_SUPPORT
+static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
+                                      union perf_event *event,
+                                      struct perf_sample *sample,
+                                      struct machine *machine)
+{
+       struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+       u64 n = 0;
+
+       /*
+        * if jit marker, then inject jit mmaps and generate ELF images
+        */
+       if (!jit_process(inject->session, &inject->output, machine,
+                        event->mmap.filename, sample->pid, &n)) {
+               inject->bytes_written += n;
+               return 0;
+       }
+       return perf_event__repipe_mmap(tool, event, sample, machine);
+}
+#endif
+
 static int perf_event__repipe_mmap2(struct perf_tool *tool,
                                   union perf_event *event,
                                   struct perf_sample *sample,
@@ -247,6 +279,27 @@ static int perf_event__repipe_mmap2(struct perf_tool *tool,
        return err;
 }
 
+#ifdef HAVE_LIBELF_SUPPORT
+static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
+                                       union perf_event *event,
+                                       struct perf_sample *sample,
+                                       struct machine *machine)
+{
+       struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
+       u64 n = 0;
+
+       /*
+        * if jit marker, then inject jit mmaps and generate ELF images
+        */
+       if (!jit_process(inject->session, &inject->output, machine,
+                         event->mmap2.filename, sample->pid, &n)) {
+               inject->bytes_written += n;
+               return 0;
+       }
+       return perf_event__repipe_mmap2(tool, event, sample, machine);
+}
+#endif
+
 static int perf_event__repipe_fork(struct perf_tool *tool,
                                   union perf_event *event,
                                   struct perf_sample *sample,
@@ -664,6 +717,23 @@ static int __cmd_inject(struct perf_inject *inject)
        return ret;
 }
 
+#ifdef HAVE_LIBELF_SUPPORT
+static int
+jit_validate_events(struct perf_session *session)
+{
+       struct perf_evsel *evsel;
+
+       /*
+        * check that all events use CLOCK_MONOTONIC
+        */
+       evlist__for_each(session->evlist, evsel) {
+               if (evsel->attr.use_clockid == 0 || evsel->attr.clockid != CLOCK_MONOTONIC)
+                       return -1;
+       }
+       return 0;
+}
+#endif
+
 int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        struct perf_inject inject = {
@@ -703,7 +773,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
        };
        int ret;
 
-       const struct option options[] = {
+       struct option options[] = {
                OPT_BOOLEAN('b', "build-ids", &inject.build_ids,
                            "Inject build-ids into the output stream"),
                OPT_STRING('i', "input", &inject.input_name, "file",
@@ -713,6 +783,7 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
                OPT_BOOLEAN('s', "sched-stat", &inject.sched_stat,
                            "Merge sched-stat and sched-switch for getting events "
                            "where and how long tasks slept"),
+               OPT_BOOLEAN('j', "jit", &inject.jit_mode, "merge jitdump files into perf.data file"),
                OPT_INCR('v', "verbose", &verbose,
                         "be more verbose (show build ids, etc)"),
                OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
@@ -729,7 +800,9 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
                "perf inject [<options>]",
                NULL
        };
-
+#ifndef HAVE_LIBELF_SUPPORT
+       set_option_nobuild(options, 'j', "jit", "NO_LIBELF=1", true);
+#endif
        argc = parse_options(argc, argv, options, inject_usage, 0);
 
        /*
@@ -755,6 +828,36 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
        if (inject.session == NULL)
                return -1;
 
+       if (inject.build_ids) {
+               /*
+                * to make sure the mmap records are ordered correctly
+                * and so that the correct especially due to jitted code
+                * mmaps. We cannot generate the buildid hit list and
+                * inject the jit mmaps at the same time for now.
+                */
+               inject.tool.ordered_events = true;
+               inject.tool.ordering_requires_timestamps = true;
+       }
+#ifdef HAVE_LIBELF_SUPPORT
+       if (inject.jit_mode) {
+               /*
+                * validate event is using the correct clockid
+                */
+               if (jit_validate_events(inject.session)) {
+                       fprintf(stderr, "error, jitted code must be sampled with perf record -k 1\n");
+                       return -1;
+               }
+               inject.tool.mmap2          = perf_event__jit_repipe_mmap2;
+               inject.tool.mmap           = perf_event__jit_repipe_mmap;
+               inject.tool.ordered_events = true;
+               inject.tool.ordering_requires_timestamps = true;
+               /*
+                * JIT MMAP injection injects all MMAP events in one go, so it
+                * does not obey finished_round semantics.
+                */
+               inject.tool.finished_round = perf_event__drop_oe;
+       }
+#endif
        ret = symbol__init(&inject.session->header.env);
        if (ret < 0)
                goto out_delete;
index 4418d9214872150648a719dd07fc14a29c913e9b..bff666458b28e24dccac682d0f28b6708a1a7c83 100644 (file)
@@ -30,7 +30,6 @@
 #include <math.h>
 
 #ifdef HAVE_KVM_STAT_SUPPORT
-#include <asm/kvm_perf.h>
 #include "util/kvm-stat.h"
 
 void exit_event_get_key(struct perf_evsel *evsel,
@@ -38,12 +37,12 @@ void exit_event_get_key(struct perf_evsel *evsel,
                        struct event_key *key)
 {
        key->info = 0;
-       key->key = perf_evsel__intval(evsel, sample, KVM_EXIT_REASON);
+       key->key = perf_evsel__intval(evsel, sample, kvm_exit_reason);
 }
 
 bool kvm_exit_event(struct perf_evsel *evsel)
 {
-       return !strcmp(evsel->name, KVM_EXIT_TRACE);
+       return !strcmp(evsel->name, kvm_exit_trace);
 }
 
 bool exit_event_begin(struct perf_evsel *evsel,
@@ -59,7 +58,7 @@ bool exit_event_begin(struct perf_evsel *evsel,
 
 bool kvm_entry_event(struct perf_evsel *evsel)
 {
-       return !strcmp(evsel->name, KVM_ENTRY_TRACE);
+       return !strcmp(evsel->name, kvm_entry_trace);
 }
 
 bool exit_event_end(struct perf_evsel *evsel,
@@ -91,7 +90,7 @@ void exit_event_decode_key(struct perf_kvm_stat *kvm,
        const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
                                                  key->key);
 
-       scnprintf(decode, DECODE_STR_LEN, "%s", exit_reason);
+       scnprintf(decode, decode_str_len, "%s", exit_reason);
 }
 
 static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
@@ -357,7 +356,7 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
        time_diff = sample->time - time_begin;
 
        if (kvm->duration && time_diff > kvm->duration) {
-               char decode[DECODE_STR_LEN];
+               char decode[decode_str_len];
 
                kvm->events_ops->decode_key(kvm, &event->key, decode);
                if (!skip_event(decode)) {
@@ -385,7 +384,8 @@ struct vcpu_event_record *per_vcpu_record(struct thread *thread,
                        return NULL;
                }
 
-               vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, VCPU_ID);
+               vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample,
+                                                         vcpu_id_str);
                thread__set_priv(thread, vcpu_record);
        }
 
@@ -574,7 +574,7 @@ static void show_timeofday(void)
 
 static void print_result(struct perf_kvm_stat *kvm)
 {
-       char decode[DECODE_STR_LEN];
+       char decode[decode_str_len];
        struct kvm_event *event;
        int vcpu = kvm->trace_vcpu;
 
@@ -585,7 +585,7 @@ static void print_result(struct perf_kvm_stat *kvm)
 
        pr_info("\n\n");
        print_vcpu_info(kvm);
-       pr_info("%*s ", DECODE_STR_LEN, kvm->events_ops->name);
+       pr_info("%*s ", decode_str_len, kvm->events_ops->name);
        pr_info("%10s ", "Samples");
        pr_info("%9s ", "Samples%");
 
@@ -604,7 +604,7 @@ static void print_result(struct perf_kvm_stat *kvm)
                min = get_event_min(event, vcpu);
 
                kvm->events_ops->decode_key(kvm, &event->key, decode);
-               pr_info("%*s ", DECODE_STR_LEN, decode);
+               pr_info("%*s ", decode_str_len, decode);
                pr_info("%10llu ", (unsigned long long)ecount);
                pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
                pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
@@ -1132,6 +1132,11 @@ exit:
                _p;                     \
        })
 
+int __weak setup_kvm_events_tp(struct perf_kvm_stat *kvm __maybe_unused)
+{
+       return 0;
+}
+
 static int
 kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
 {
@@ -1148,7 +1153,14 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
                NULL
        };
        const char * const *events_tp;
+       int ret;
+
        events_tp_size = 0;
+       ret = setup_kvm_events_tp(kvm);
+       if (ret < 0) {
+               pr_err("Unable to setup the kvm tracepoints\n");
+               return ret;
+       }
 
        for (events_tp = kvm_events_tp; *events_tp; events_tp++)
                events_tp_size++;
@@ -1377,6 +1389,12 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
        /*
         * generate the event list
         */
+       err = setup_kvm_events_tp(kvm);
+       if (err < 0) {
+               pr_err("Unable to setup the kvm tracepoints\n");
+               return err;
+       }
+
        kvm->evlist = kvm_live_event_list();
        if (kvm->evlist == NULL) {
                err = -1;
index 319712a4e02b73de7359fc2ed772bed1d9cd424f..0ee0d5cd31a71a833ecd199d60d0d9ba60024bab 100644 (file)
@@ -49,7 +49,9 @@ struct record {
        const char              *progname;
        int                     realtime_prio;
        bool                    no_buildid;
+       bool                    no_buildid_set;
        bool                    no_buildid_cache;
+       bool                    no_buildid_cache_set;
        bool                    buildid_all;
        unsigned long long      samples;
 };
@@ -1097,10 +1099,12 @@ struct option __record_options[] = {
        OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
        OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
                    "don't sample"),
-       OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
-                   "do not update the buildid cache"),
-       OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
-                   "do not collect buildids in perf.data"),
+       OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
+                       &record.no_buildid_cache_set,
+                       "do not update the buildid cache"),
+       OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
+                       &record.no_buildid_set,
+                       "do not collect buildids in perf.data"),
        OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
                     "monitor event in cgroup name only",
                     parse_cgroups),
index 2bf537f190a026952bd63126e9c37def827e9914..1eab50ac1ef601e04192dadb76e74894c727795a 100644 (file)
@@ -75,7 +75,10 @@ static int report__config(const char *var, const char *value, void *cb)
                return 0;
        }
        if (!strcmp(var, "report.percent-limit")) {
-               rep->min_percent = strtof(value, NULL);
+               double pcnt = strtof(value, NULL);
+
+               rep->min_percent = pcnt;
+               callchain_param.min_percent = pcnt;
                return 0;
        }
        if (!strcmp(var, "report.children")) {
@@ -504,7 +507,7 @@ static void report__output_resort(struct report *rep)
        ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
 
        evlist__for_each(rep->session->evlist, pos)
-               hists__output_resort(evsel__hists(pos), &prog);
+               perf_evsel__output_resort(pos, &prog);
 
        ui_progress__finish();
 }
@@ -633,8 +636,10 @@ parse_percent_limit(const struct option *opt, const char *str,
                    int unset __maybe_unused)
 {
        struct report *rep = opt->value;
+       double pcnt = strtof(str, NULL);
 
-       rep->min_percent = strtof(str, NULL);
+       rep->min_percent = pcnt;
+       callchain_param.min_percent = pcnt;
        return 0;
 }
 
@@ -907,15 +912,6 @@ repeat:
                symbol_conf.cumulate_callchain = false;
        }
 
-       if (setup_sorting(session->evlist) < 0) {
-               if (sort_order)
-                       parse_options_usage(report_usage, options, "s", 1);
-               if (field_order)
-                       parse_options_usage(sort_order ? NULL : report_usage,
-                                           options, "F", 1);
-               goto error;
-       }
-
        /* Force tty output for header output and per-thread stat. */
        if (report.header || report.header_only || report.show_threads)
                use_browser = 0;
@@ -925,6 +921,15 @@ repeat:
        else
                use_browser = 0;
 
+       if (setup_sorting(session->evlist) < 0) {
+               if (sort_order)
+                       parse_options_usage(report_usage, options, "s", 1);
+               if (field_order)
+                       parse_options_usage(sort_order ? NULL : report_usage,
+                                           options, "F", 1);
+               goto error;
+       }
+
        if (report.header || report.header_only) {
                perf_session__fprintf_info(session, stdout,
                                           report.show_full_info);
index bf01cbb0ef2369f2fc904809b86b70a823bdb604..a75de3940b976499614ed7dae56a7cfc89e89c53 100644 (file)
@@ -252,7 +252,8 @@ static void perf_top__print_sym_table(struct perf_top *top)
        char bf[160];
        int printed = 0;
        const int win_width = top->winsize.ws_col - 1;
-       struct hists *hists = evsel__hists(top->sym_evsel);
+       struct perf_evsel *evsel = top->sym_evsel;
+       struct hists *hists = evsel__hists(evsel);
 
        puts(CONSOLE_CLEAR);
 
@@ -288,7 +289,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
        }
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists, NULL);
+       perf_evsel__output_resort(evsel, NULL);
 
        hists__output_recalc_col_len(hists, top->print_entries - printed);
        putchar('\n');
@@ -540,6 +541,7 @@ static bool perf_top__handle_keypress(struct perf_top *top, int c)
 static void perf_top__sort_new_samples(void *arg)
 {
        struct perf_top *t = arg;
+       struct perf_evsel *evsel = t->sym_evsel;
        struct hists *hists;
 
        perf_top__reset_sample_counters(t);
@@ -547,7 +549,7 @@ static void perf_top__sort_new_samples(void *arg)
        if (t->evlist->selected != NULL)
                t->sym_evsel = t->evlist->selected;
 
-       hists = evsel__hists(t->sym_evsel);
+       hists = evsel__hists(evsel);
 
        if (t->evlist->enabled) {
                if (t->zero) {
@@ -559,7 +561,7 @@ static void perf_top__sort_new_samples(void *arg)
        }
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists, NULL);
+       perf_evsel__output_resort(evsel, NULL);
 }
 
 static void *display_thread_tui(void *arg)
@@ -1243,6 +1245,13 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
        /* display thread wants entries to be collapsed in a different tree */
        sort__need_collapse = 1;
 
+       if (top.use_stdio)
+               use_browser = 0;
+       else if (top.use_tui)
+               use_browser = 1;
+
+       setup_browser(false);
+
        if (setup_sorting(top.evlist) < 0) {
                if (sort_order)
                        parse_options_usage(top_usage, options, "s", 1);
@@ -1252,13 +1261,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
                goto out_delete_evlist;
        }
 
-       if (top.use_stdio)
-               use_browser = 0;
-       else if (top.use_tui)
-               use_browser = 1;
-
-       setup_browser(false);
-
        status = target__validate(target);
        if (status) {
                target__strerror(target, status, errbuf, BUFSIZ);
index e5959c136a1907ee9905f044ae2e7786cb47d075..f7aeaf303f5a0c072dc91ccd595725fb36986567 100644 (file)
@@ -61,50 +61,45 @@ endif
 
 ifeq ($(LIBUNWIND_LIBS),)
   NO_LIBUNWIND := 1
-else
-  #
-  # For linking with debug library, run like:
-  #
-  #   make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
-  #
-  ifdef LIBUNWIND_DIR
-    LIBUNWIND_CFLAGS  = -I$(LIBUNWIND_DIR)/include
-    LIBUNWIND_LDFLAGS = -L$(LIBUNWIND_DIR)/lib
-  endif
-  LIBUNWIND_LDFLAGS += $(LIBUNWIND_LIBS)
-
-  # Set per-feature check compilation flags
-  FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
-  FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS)
-  FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
-  FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS)
 endif
+#
+# For linking with debug library, run like:
+#
+#   make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/
+#
+ifdef LIBUNWIND_DIR
+  LIBUNWIND_CFLAGS  = -I$(LIBUNWIND_DIR)/include
+  LIBUNWIND_LDFLAGS = -L$(LIBUNWIND_DIR)/lib
+endif
+LIBUNWIND_LDFLAGS += $(LIBUNWIND_LIBS)
+
+# Set per-feature check compilation flags
+FEATURE_CHECK_CFLAGS-libunwind = $(LIBUNWIND_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libunwind = $(LIBUNWIND_LDFLAGS)
+FEATURE_CHECK_CFLAGS-libunwind-debug-frame = $(LIBUNWIND_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libunwind-debug-frame = $(LIBUNWIND_LDFLAGS)
 
 ifeq ($(NO_PERF_REGS),0)
   CFLAGS += -DHAVE_PERF_REGS_SUPPORT
 endif
 
-ifndef NO_LIBELF
-  # for linking with debug library, run like:
-  # make DEBUG=1 LIBDW_DIR=/opt/libdw/
-  ifdef LIBDW_DIR
-    LIBDW_CFLAGS  := -I$(LIBDW_DIR)/include
-    LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
-  endif
-  FEATURE_CHECK_CFLAGS-libdw-dwarf-unwind := $(LIBDW_CFLAGS)
-  FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) -ldw
+# for linking with debug library, run like:
+# make DEBUG=1 LIBDW_DIR=/opt/libdw/
+ifdef LIBDW_DIR
+  LIBDW_CFLAGS  := -I$(LIBDW_DIR)/include
+  LIBDW_LDFLAGS := -L$(LIBDW_DIR)/lib
 endif
+FEATURE_CHECK_CFLAGS-libdw-dwarf-unwind := $(LIBDW_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libdw-dwarf-unwind := $(LIBDW_LDFLAGS) -ldw
 
-ifdef LIBBABELTRACE
-  # for linking with debug library, run like:
-  # make DEBUG=1 LIBBABELTRACE_DIR=/opt/libbabeltrace/
-  ifdef LIBBABELTRACE_DIR
-    LIBBABELTRACE_CFLAGS  := -I$(LIBBABELTRACE_DIR)/include
-    LIBBABELTRACE_LDFLAGS := -L$(LIBBABELTRACE_DIR)/lib
-  endif
-  FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
-  FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
+# for linking with debug library, run like:
+# make DEBUG=1 LIBBABELTRACE_DIR=/opt/libbabeltrace/
+ifdef LIBBABELTRACE_DIR
+  LIBBABELTRACE_CFLAGS  := -I$(LIBBABELTRACE_DIR)/include
+  LIBBABELTRACE_LDFLAGS := -L$(LIBBABELTRACE_DIR)/lib
 endif
+FEATURE_CHECK_CFLAGS-libbabeltrace := $(LIBBABELTRACE_CFLAGS)
+FEATURE_CHECK_LDFLAGS-libbabeltrace := $(LIBBABELTRACE_LDFLAGS) -lbabeltrace-ctf
 
 FEATURE_CHECK_CFLAGS-bpf = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
 # include ARCH specific config
@@ -145,28 +140,26 @@ ifdef PARSER_DEBUG
   $(call detected_var,PARSER_DEBUG_FLEX)
 endif
 
-ifndef NO_LIBPYTHON
-  # Try different combinations to accommodate systems that only have
-  # python[2][-config] in weird combinations but always preferring
-  # python2 and python2-config as per pep-0394. If we catch a
-  # python[-config] in version 3, the version check will kill it.
-  PYTHON2 := $(if $(call get-executable,python2),python2,python)
-  override PYTHON := $(call get-executable-or-default,PYTHON,$(PYTHON2))
-  PYTHON2_CONFIG := \
-    $(if $(call get-executable,$(PYTHON)-config),$(PYTHON)-config,python-config)
-  override PYTHON_CONFIG := \
-    $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON2_CONFIG))
+# Try different combinations to accommodate systems that only have
+# python[2][-config] in weird combinations but always preferring
+# python2 and python2-config as per pep-0394. If we catch a
+# python[-config] in version 3, the version check will kill it.
+PYTHON2 := $(if $(call get-executable,python2),python2,python)
+override PYTHON := $(call get-executable-or-default,PYTHON,$(PYTHON2))
+PYTHON2_CONFIG := \
+  $(if $(call get-executable,$(PYTHON)-config),$(PYTHON)-config,python-config)
+override PYTHON_CONFIG := \
+  $(call get-executable-or-default,PYTHON_CONFIG,$(PYTHON2_CONFIG))
 
-  PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
+PYTHON_CONFIG_SQ := $(call shell-sq,$(PYTHON_CONFIG))
 
-  PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
-  PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
+PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
+PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
 
-  FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
-  FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
-  FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
-  FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
-endif
+FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS)
+FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
+FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
+FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
 
 CFLAGS += -fno-omit-frame-pointer
 CFLAGS += -ggdb3
@@ -181,7 +174,11 @@ LDFLAGS += -Wl,-z,noexecstack
 
 EXTLIBS = -lpthread -lrt -lm -ldl
 
+ifeq ($(FEATURES_DUMP),)
 include $(srctree)/tools/build/Makefile.feature
+else
+include $(FEATURES_DUMP)
+endif
 
 ifeq ($(feature-stackprotector-all), 1)
   CFLAGS += -fstack-protector-all
@@ -407,6 +404,17 @@ ifndef NO_LIBAUDIT
   endif
 endif
 
+ifndef NO_LIBCRYPTO
+  ifneq ($(feature-libcrypto), 1)
+    msg := $(warning No libcrypto.h found, disables jitted code injection, please install libssl-devel or libssl-dev);
+    NO_LIBCRYPTO := 1
+  else
+    CFLAGS += -DHAVE_LIBCRYPTO_SUPPORT
+    EXTLIBS += -lcrypto
+    $(call detected,CONFIG_CRYPTO)
+  endif
+endif
+
 ifdef NO_NEWT
   NO_SLANG=1
 endif
diff --git a/tools/perf/jvmti/Makefile b/tools/perf/jvmti/Makefile
new file mode 100644 (file)
index 0000000..5968f83
--- /dev/null
@@ -0,0 +1,76 @@
+ARCH=$(shell uname -m)
+
+ifeq ($(ARCH), x86_64)
+JARCH=amd64
+endif
+ifeq ($(ARCH), armv7l)
+JARCH=armhf
+endif
+ifeq ($(ARCH), armv6l)
+JARCH=armhf
+endif
+ifeq ($(ARCH), aarch64)
+JARCH=aarch64
+endif
+ifeq ($(ARCH), ppc64)
+JARCH=powerpc
+endif
+ifeq ($(ARCH), ppc64le)
+JARCH=powerpc
+endif
+
+DESTDIR=/usr/local
+
+VERSION=1
+REVISION=0
+AGE=0
+
+LN=ln -sf
+RM=rm
+
+SLIBJVMTI=libjvmti.so.$(VERSION).$(REVISION).$(AGE)
+VLIBJVMTI=libjvmti.so.$(VERSION)
+SLDFLAGS=-shared -Wl,-soname -Wl,$(VLIBJVMTI)
+SOLIBEXT=so
+
+# The following works at least on fedora 23, you may need the next
+# line for other distros.
+JDIR=$(shell alternatives --display java | tail -1 | cut -d' ' -f 5 | sed 's%/jre/bin/java.%%g')
+#JDIR=$(shell /usr/sbin/update-java-alternatives -l | head -1 | cut -d ' ' -f 3)
+# -lrt required in 32-bit mode for clock_gettime()
+LIBS=-lelf -lrt
+INCDIR=-I $(JDIR)/include -I $(JDIR)/include/linux
+
+TARGETS=$(SLIBJVMTI)
+
+SRCS=libjvmti.c jvmti_agent.c
+OBJS=$(SRCS:.c=.o)
+SOBJS=$(OBJS:.o=.lo)
+OPT=-O2 -g -Werror -Wall
+
+CFLAGS=$(INCDIR) $(OPT)
+
+all: $(TARGETS)
+
+.c.o:
+       $(CC) $(CFLAGS) -c $*.c
+.c.lo:
+       $(CC) -fPIC -DPIC $(CFLAGS) -c $*.c -o $*.lo
+
+$(OBJS) $(SOBJS): Makefile jvmti_agent.h ../util/jitdump.h
+
+$(SLIBJVMTI):  $(SOBJS)
+       $(CC) $(CFLAGS) $(SLDFLAGS)  -o $@ $(SOBJS) $(LIBS)
+       $(LN) $@ libjvmti.$(SOLIBEXT)
+
+clean:
+       $(RM) -f *.o *.so.* *.so *.lo
+
+install:
+       -mkdir -p $(DESTDIR)/lib
+       install -m 755 $(SLIBJVMTI) $(DESTDIR)/lib/
+       (cd $(DESTDIR)/lib; $(LN) $(SLIBJVMTI) $(VLIBJVMTI))
+       (cd $(DESTDIR)/lib; $(LN) $(SLIBJVMTI) libjvmti.$(SOLIBEXT))
+       ldconfig
+
+.SUFFIXES: .c .S .o .lo
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
new file mode 100644 (file)
index 0000000..6461e02
--- /dev/null
@@ -0,0 +1,465 @@
+/*
+ * jvmti_agent.c: JVMTI agent interface
+ *
+ * Adapted from the Oprofile code in opagent.c:
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Copyright 2007 OProfile authors
+ * Jens Wilke
+ * Daniel Hansel
+ * Copyright IBM Corporation 2007
+ */
+#include <sys/types.h>
+#include <sys/stat.h> /* for mkdir() */
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/mman.h>
+#include <syscall.h> /* for gettid() */
+#include <err.h>
+
+#include "jvmti_agent.h"
+#include "../util/jitdump.h"
+
+#define JIT_LANG "java"
+
+static char jit_path[PATH_MAX];
+static void *marker_addr;
+
+/*
+ * padding buffer
+ */
+static const char pad_bytes[7];
+
+static inline pid_t gettid(void)
+{
+       return (pid_t)syscall(__NR_gettid);
+}
+
+static int get_e_machine(struct jitheader *hdr)
+{
+       ssize_t sret;
+       char id[16];
+       int fd, ret = -1;
+       int m = -1;
+       struct {
+               uint16_t e_type;
+               uint16_t e_machine;
+       } info;
+
+       fd = open("/proc/self/exe", O_RDONLY);
+       if (fd == -1)
+               return -1;
+
+       sret = read(fd, id, sizeof(id));
+       if (sret != sizeof(id))
+               goto error;
+
+       /* check ELF signature */
+       if (id[0] != 0x7f || id[1] != 'E' || id[2] != 'L' || id[3] != 'F')
+               goto error;
+
+       sret = read(fd, &info, sizeof(info));
+       if (sret != sizeof(info))
+               goto error;
+
+       m = info.e_machine;
+       if (m < 0)
+               m = 0; /* ELF EM_NONE */
+
+       hdr->elf_mach = m;
+       ret = 0;
+error:
+       close(fd);
+       return ret;
+}
+
+#define NSEC_PER_SEC   1000000000
+static int perf_clk_id = CLOCK_MONOTONIC;
+
+static inline uint64_t
+timespec_to_ns(const struct timespec *ts)
+{
+        return ((uint64_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
+}
+
+static inline uint64_t
+perf_get_timestamp(void)
+{
+       struct timespec ts;
+       int ret;
+
+       ret = clock_gettime(perf_clk_id, &ts);
+       if (ret)
+               return 0;
+
+       return timespec_to_ns(&ts);
+}
+
+static int
+debug_cache_init(void)
+{
+       char str[32];
+       char *base, *p;
+       struct tm tm;
+       time_t t;
+       int ret;
+
+       time(&t);
+       localtime_r(&t, &tm);
+
+       base = getenv("JITDUMPDIR");
+       if (!base)
+               base = getenv("HOME");
+       if (!base)
+               base = ".";
+
+       strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
+
+       snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base);
+
+       ret = mkdir(jit_path, 0755);
+       if (ret == -1) {
+               if (errno != EEXIST) {
+                       warn("jvmti: cannot create jit cache dir %s", jit_path);
+                       return -1;
+               }
+       }
+
+       snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base);
+       ret = mkdir(jit_path, 0755);
+       if (ret == -1) {
+               if (errno != EEXIST) {
+                       warn("cannot create jit cache dir %s", jit_path);
+                       return -1;
+               }
+       }
+
+       snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str);
+
+       p = mkdtemp(jit_path);
+       if (p != jit_path) {
+               warn("cannot create jit cache dir %s", jit_path);
+               return -1;
+       }
+
+       return 0;
+}
+
+static int
+perf_open_marker_file(int fd)
+{
+       long pgsz;
+
+       pgsz = sysconf(_SC_PAGESIZE);
+       if (pgsz == -1)
+               return -1;
+
+       /*
+        * we mmap the jitdump to create an MMAP RECORD in perf.data file.
+        * The mmap is captured either live (perf record running when we mmap)
+        * or  in deferred mode, via /proc/PID/maps
+        * the MMAP record is used as a marker of a jitdump file for more meta
+        * data info about the jitted code. Perf report/annotate detect this
+        * special filename and process the jitdump file.
+        *
+        * mapping must be PROT_EXEC to ensure it is captured by perf record
+        * even when not using -d option
+        */
+       marker_addr = mmap(NULL, pgsz, PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
+       return (marker_addr == MAP_FAILED) ? -1 : 0;
+}
+
+static void
+perf_close_marker_file(void)
+{
+       long pgsz;
+
+       if (!marker_addr)
+               return;
+
+       pgsz = sysconf(_SC_PAGESIZE);
+       if (pgsz == -1)
+               return;
+
+       munmap(marker_addr, pgsz);
+}
+
+void *jvmti_open(void)
+{
+       int pad_cnt;
+       char dump_path[PATH_MAX];
+       struct jitheader header;
+       int fd;
+       FILE *fp;
+
+       /*
+        * check if clockid is supported
+        */
+       if (!perf_get_timestamp())
+               warnx("jvmti: kernel does not support %d clock id", perf_clk_id);
+
+       memset(&header, 0, sizeof(header));
+
+       debug_cache_init();
+
+       /*
+        * jitdump file name
+        */
+       snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+
+       fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
+       if (fd == -1)
+               return NULL;
+
+       /*
+        * create perf.data maker for the jitdump file
+        */
+       if (perf_open_marker_file(fd)) {
+               warnx("jvmti: failed to create marker file");
+               return NULL;
+       }
+
+       fp = fdopen(fd, "w+");
+       if (!fp) {
+               warn("jvmti: cannot create %s", dump_path);
+               close(fd);
+               goto error;
+       }
+
+       warnx("jvmti: jitdump in %s", dump_path);
+
+       if (get_e_machine(&header)) {
+               warn("get_e_machine failed\n");
+               goto error;
+       }
+
+       header.magic      = JITHEADER_MAGIC;
+       header.version    = JITHEADER_VERSION;
+       header.total_size = sizeof(header);
+       header.pid        = getpid();
+
+       /* calculate amount of padding '\0' */
+       pad_cnt = PADDING_8ALIGNED(header.total_size);
+       header.total_size += pad_cnt;
+
+       header.timestamp = perf_get_timestamp();
+
+       if (!fwrite(&header, sizeof(header), 1, fp)) {
+               warn("jvmti: cannot write dumpfile header");
+               goto error;
+       }
+
+       /* write padding '\0' if necessary */
+       if (pad_cnt && !fwrite(pad_bytes, pad_cnt, 1, fp)) {
+               warn("jvmti: cannot write dumpfile header padding");
+               goto error;
+       }
+
+       return fp;
+error:
+       fclose(fp);
+       return NULL;
+}
+
+int
+jvmti_close(void *agent)
+{
+       struct jr_code_close rec;
+       FILE *fp = agent;
+
+       if (!fp) {
+               warnx("jvmti: incalid fd in close_agent");
+               return -1;
+       }
+
+       rec.p.id = JIT_CODE_CLOSE;
+       rec.p.total_size = sizeof(rec);
+
+       rec.p.timestamp = perf_get_timestamp();
+
+       if (!fwrite(&rec, sizeof(rec), 1, fp))
+               return -1;
+
+       fclose(fp);
+
+       fp = NULL;
+
+       perf_close_marker_file();
+
+       return 0;
+}
+
+int
+jvmti_write_code(void *agent, char const *sym,
+       uint64_t vma, void const *code, unsigned int const size)
+{
+       static int code_generation = 1;
+       struct jr_code_load rec;
+       size_t sym_len;
+       size_t padding_count;
+       FILE *fp = agent;
+       int ret = -1;
+
+       /* don't care about 0 length function, no samples */
+       if (size == 0)
+               return 0;
+
+       if (!fp) {
+               warnx("jvmti: invalid fd in write_native_code");
+               return -1;
+       }
+
+       sym_len = strlen(sym) + 1;
+
+       rec.p.id           = JIT_CODE_LOAD;
+       rec.p.total_size   = sizeof(rec) + sym_len;
+       padding_count      = PADDING_8ALIGNED(rec.p.total_size);
+       rec.p. total_size += padding_count;
+       rec.p.timestamp    = perf_get_timestamp();
+
+       rec.code_size  = size;
+       rec.vma        = vma;
+       rec.code_addr  = vma;
+       rec.pid        = getpid();
+       rec.tid        = gettid();
+
+       if (code)
+               rec.p.total_size += size;
+
+       /*
+        * If JVM is multi-threaded, nultiple concurrent calls to agent
+        * may be possible, so protect file writes
+        */
+       flockfile(fp);
+
+       /*
+        * get code index inside lock to avoid race condition
+        */
+       rec.code_index = code_generation++;
+
+       ret = fwrite_unlocked(&rec, sizeof(rec), 1, fp);
+       fwrite_unlocked(sym, sym_len, 1, fp);
+
+       if (padding_count)
+               fwrite_unlocked(pad_bytes, padding_count, 1, fp);
+
+       if (code)
+               fwrite_unlocked(code, size, 1, fp);
+
+       funlockfile(fp);
+
+       ret = 0;
+
+       return ret;
+}
+
+int
+jvmti_write_debug_info(void *agent, uint64_t code, const char *file,
+                      jvmti_line_info_t *li, int nr_lines)
+{
+       struct jr_code_debug_info rec;
+       size_t sret, len, size, flen;
+       size_t padding_count;
+       uint64_t addr;
+       const char *fn = file;
+       FILE *fp = agent;
+       int i;
+
+       /*
+        * no entry to write
+        */
+       if (!nr_lines)
+               return 0;
+
+       if (!fp) {
+               warnx("jvmti: invalid fd in write_debug_info");
+               return -1;
+       }
+
+       flen = strlen(file) + 1;
+
+       rec.p.id        = JIT_CODE_DEBUG_INFO;
+       size            = sizeof(rec);
+       rec.p.timestamp = perf_get_timestamp();
+       rec.code_addr   = (uint64_t)(uintptr_t)code;
+       rec.nr_entry    = nr_lines;
+
+       /*
+        * on disk source line info layout:
+        * uint64_t : addr
+        * int      : line number
+        * int      : column discriminator
+        * file[]   : source file name
+        * padding  : pad to multiple of 8 bytes
+        */
+       size += nr_lines * sizeof(struct debug_entry);
+       size += flen * nr_lines;
+       /*
+        * pad to 8 bytes
+        */
+       padding_count = PADDING_8ALIGNED(size);
+
+       rec.p.total_size = size + padding_count;
+
+       /*
+        * If JVM is multi-threaded, nultiple concurrent calls to agent
+        * may be possible, so protect file writes
+        */
+       flockfile(fp);
+
+       sret = fwrite_unlocked(&rec, sizeof(rec), 1, fp);
+       if (sret != 1)
+               goto error;
+
+       for (i = 0; i < nr_lines; i++) {
+
+               addr = (uint64_t)li[i].pc;
+               len  = sizeof(addr);
+               sret = fwrite_unlocked(&addr, len, 1, fp);
+               if (sret != 1)
+                       goto error;
+
+               len  = sizeof(li[0].line_number);
+               sret = fwrite_unlocked(&li[i].line_number, len, 1, fp);
+               if (sret != 1)
+                       goto error;
+
+               len  = sizeof(li[0].discrim);
+               sret = fwrite_unlocked(&li[i].discrim, len, 1, fp);
+               if (sret != 1)
+                       goto error;
+
+               sret = fwrite_unlocked(fn, flen, 1, fp);
+               if (sret != 1)
+                       goto error;
+       }
+       if (padding_count)
+               sret = fwrite_unlocked(pad_bytes, padding_count, 1, fp);
+               if (sret != 1)
+                       goto error;
+
+       funlockfile(fp);
+       return 0;
+error:
+       funlockfile(fp);
+       return -1;
+}
diff --git a/tools/perf/jvmti/jvmti_agent.h b/tools/perf/jvmti/jvmti_agent.h
new file mode 100644 (file)
index 0000000..bedf5d0
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef __JVMTI_AGENT_H__
+#define __JVMTI_AGENT_H__
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <jvmti.h>
+
+#define __unused __attribute__((unused))
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+typedef struct {
+       unsigned long   pc;
+       int             line_number;
+       int             discrim; /* discriminator -- 0 for now */
+} jvmti_line_info_t;
+
+void *jvmti_open(void);
+int   jvmti_close(void *agent);
+int   jvmti_write_code(void *agent, char const *symbol_name,
+                      uint64_t vma, void const *code,
+                      const unsigned int code_size);
+
+int   jvmti_write_debug_info(void *agent,
+                            uint64_t code,
+                            const char *file,
+                            jvmti_line_info_t *li,
+                            int nr_lines);
+
+#if defined(__cplusplus)
+}
+
+#endif
+#endif /* __JVMTI_H__ */
diff --git a/tools/perf/jvmti/libjvmti.c b/tools/perf/jvmti/libjvmti.c
new file mode 100644 (file)
index 0000000..ac12e4b
--- /dev/null
@@ -0,0 +1,304 @@
+#include <sys/types.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <err.h>
+#include <jvmti.h>
+#include <jvmticmlr.h>
+#include <limits.h>
+
+#include "jvmti_agent.h"
+
+static int has_line_numbers;
+void *jvmti_agent;
+
+static jvmtiError
+do_get_line_numbers(jvmtiEnv *jvmti, void *pc, jmethodID m, jint bci,
+                   jvmti_line_info_t *tab, jint *nr)
+{
+       jint i, lines = 0;
+       jint nr_lines = 0;
+       jvmtiLineNumberEntry *loc_tab = NULL;
+       jvmtiError ret;
+
+       ret = (*jvmti)->GetLineNumberTable(jvmti, m, &nr_lines, &loc_tab);
+       if (ret != JVMTI_ERROR_NONE)
+               return ret;
+
+       for (i = 0; i < nr_lines; i++) {
+               if (loc_tab[i].start_location < bci) {
+                       tab[lines].pc = (unsigned long)pc;
+                       tab[lines].line_number = loc_tab[i].line_number;
+                       tab[lines].discrim = 0; /* not yet used */
+                       lines++;
+               } else {
+                       break;
+               }
+       }
+       (*jvmti)->Deallocate(jvmti, (unsigned char *)loc_tab);
+       *nr = lines;
+       return JVMTI_ERROR_NONE;
+}
+
+static jvmtiError
+get_line_numbers(jvmtiEnv *jvmti, const void *compile_info, jvmti_line_info_t **tab, int *nr_lines)
+{
+       const jvmtiCompiledMethodLoadRecordHeader *hdr;
+       jvmtiCompiledMethodLoadInlineRecord *rec;
+       jvmtiLineNumberEntry *lne = NULL;
+       PCStackInfo *c;
+       jint nr, ret;
+       int nr_total = 0;
+       int i, lines_total = 0;
+
+       if (!(tab && nr_lines))
+               return JVMTI_ERROR_NULL_POINTER;
+
+       /*
+        * Phase 1 -- get the number of lines necessary
+        */
+       for (hdr = compile_info; hdr != NULL; hdr = hdr->next) {
+               if (hdr->kind == JVMTI_CMLR_INLINE_INFO) {
+                       rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
+                       for (i = 0; i < rec->numpcs; i++) {
+                               c = rec->pcinfo + i;
+                               nr = 0;
+                               /*
+                                * unfortunately, need a tab to get the number of lines!
+                                */
+                               ret = (*jvmti)->GetLineNumberTable(jvmti, c->methods[0], &nr, &lne);
+                               if (ret == JVMTI_ERROR_NONE) {
+                                       /* free what was allocated for nothing */
+                                       (*jvmti)->Deallocate(jvmti, (unsigned char *)lne);
+                                       nr_total += (int)nr;
+                               }
+                       }
+               }
+       }
+
+       if (nr_total == 0)
+               return JVMTI_ERROR_NOT_FOUND;
+
+       /*
+        * Phase 2 -- allocate big enough line table
+        */
+       *tab = malloc(nr_total * sizeof(**tab));
+       if (!*tab)
+               return JVMTI_ERROR_OUT_OF_MEMORY;
+
+       for (hdr = compile_info; hdr != NULL; hdr = hdr->next) {
+               if (hdr->kind == JVMTI_CMLR_INLINE_INFO) {
+                       rec = (jvmtiCompiledMethodLoadInlineRecord *)hdr;
+                       for (i = 0; i < rec->numpcs; i++) {
+                               c = rec->pcinfo + i;
+                               nr = 0;
+                               ret = do_get_line_numbers(jvmti, c->pc,
+                                                         c->methods[0],
+                                                         c->bcis[0],
+                                                         *tab + lines_total,
+                                                         &nr);
+                               if (ret == JVMTI_ERROR_NONE)
+                                       lines_total += nr;
+                       }
+               }
+       }
+       *nr_lines = lines_total;
+       return JVMTI_ERROR_NONE;
+}
+
+static void JNICALL
+compiled_method_load_cb(jvmtiEnv *jvmti,
+                       jmethodID method,
+                       jint code_size,
+                       void const *code_addr,
+                       jint map_length,
+                       jvmtiAddrLocationMap const *map,
+                       const void *compile_info)
+{
+       jvmti_line_info_t *line_tab = NULL;
+       jclass decl_class;
+       char *class_sign = NULL;
+       char *func_name = NULL;
+       char *func_sign = NULL;
+       char *file_name= NULL;
+       char fn[PATH_MAX];
+       uint64_t addr = (uint64_t)(uintptr_t)code_addr;
+       jvmtiError ret;
+       int nr_lines = 0; /* in line_tab[] */
+       size_t len;
+
+       ret = (*jvmti)->GetMethodDeclaringClass(jvmti, method,
+                                               &decl_class);
+       if (ret != JVMTI_ERROR_NONE) {
+               warnx("jvmti: cannot get declaring class");
+               return;
+       }
+
+       if (has_line_numbers && map && map_length) {
+               ret = get_line_numbers(jvmti, compile_info, &line_tab, &nr_lines);
+               if (ret != JVMTI_ERROR_NONE) {
+                       warnx("jvmti: cannot get line table for method");
+                       nr_lines = 0;
+               }
+       }
+
+       ret = (*jvmti)->GetSourceFileName(jvmti, decl_class, &file_name);
+       if (ret != JVMTI_ERROR_NONE) {
+               warnx("jvmti: cannot get source filename ret=%d", ret);
+               goto error;
+       }
+
+       ret = (*jvmti)->GetClassSignature(jvmti, decl_class,
+                                         &class_sign, NULL);
+       if (ret != JVMTI_ERROR_NONE) {
+               warnx("jvmti: getclassignature failed");
+               goto error;
+       }
+
+       ret = (*jvmti)->GetMethodName(jvmti, method, &func_name,
+                                     &func_sign, NULL);
+       if (ret != JVMTI_ERROR_NONE) {
+               warnx("jvmti: failed getmethodname");
+               goto error;
+       }
+
+       /*
+        * Assume path name is class hierarchy, this is a common practice with Java programs
+        */
+       if (*class_sign == 'L') {
+               int j, i = 0;
+               char *p = strrchr(class_sign, '/');
+               if (p) {
+                       /* drop the 'L' prefix and copy up to the final '/' */
+                       for (i = 0; i < (p - class_sign); i++)
+                               fn[i] = class_sign[i+1];
+               }
+               /*
+                * append file name, we use loops and not string ops to avoid modifying
+                * class_sign which is used later for the symbol name
+                */
+               for (j = 0; i < (PATH_MAX - 1) && file_name && j < strlen(file_name); j++, i++)
+                       fn[i] = file_name[j];
+               fn[i] = '\0';
+       } else {
+               /* fallback case */
+               strcpy(fn, file_name);
+       }
+       /*
+        * write source line info record if we have it
+        */
+       if (jvmti_write_debug_info(jvmti_agent, addr, fn, line_tab, nr_lines))
+               warnx("jvmti: write_debug_info() failed");
+
+       len = strlen(func_name) + strlen(class_sign) + strlen(func_sign) + 2;
+       {
+               char str[len];
+               snprintf(str, len, "%s%s%s", class_sign, func_name, func_sign);
+
+               if (jvmti_write_code(jvmti_agent, str, addr, code_addr, code_size))
+                       warnx("jvmti: write_code() failed");
+       }
+error:
+       (*jvmti)->Deallocate(jvmti, (unsigned char *)func_name);
+       (*jvmti)->Deallocate(jvmti, (unsigned char *)func_sign);
+       (*jvmti)->Deallocate(jvmti, (unsigned char *)class_sign);
+       (*jvmti)->Deallocate(jvmti, (unsigned char *)file_name);
+       free(line_tab);
+}
+
+static void JNICALL
+code_generated_cb(jvmtiEnv *jvmti,
+                 char const *name,
+                 void const *code_addr,
+                 jint code_size)
+{
+       uint64_t addr = (uint64_t)(unsigned long)code_addr;
+       int ret;
+
+       ret = jvmti_write_code(jvmti_agent, name, addr, code_addr, code_size);
+       if (ret)
+               warnx("jvmti: write_code() failed for code_generated");
+}
+
+JNIEXPORT jint JNICALL
+Agent_OnLoad(JavaVM *jvm, char *options, void *reserved __unused)
+{
+       jvmtiEventCallbacks cb;
+       jvmtiCapabilities caps1;
+       jvmtiJlocationFormat format;
+       jvmtiEnv *jvmti = NULL;
+       jint ret;
+
+       jvmti_agent = jvmti_open();
+       if (!jvmti_agent) {
+               warnx("jvmti: open_agent failed");
+               return -1;
+       }
+
+       /*
+        * Request a JVMTI interface version 1 environment
+        */
+       ret = (*jvm)->GetEnv(jvm, (void *)&jvmti, JVMTI_VERSION_1);
+       if (ret != JNI_OK) {
+               warnx("jvmti: jvmti version 1 not supported");
+               return -1;
+       }
+
+       /*
+        * acquire method_load capability, we require it
+        * request line numbers (optional)
+        */
+       memset(&caps1, 0, sizeof(caps1));
+       caps1.can_generate_compiled_method_load_events = 1;
+
+       ret = (*jvmti)->AddCapabilities(jvmti, &caps1);
+       if (ret != JVMTI_ERROR_NONE) {
+               warnx("jvmti: acquire compiled_method capability failed");
+               return -1;
+       }
+       ret = (*jvmti)->GetJLocationFormat(jvmti, &format);
+        if (ret == JVMTI_ERROR_NONE && format == JVMTI_JLOCATION_JVMBCI) {
+                memset(&caps1, 0, sizeof(caps1));
+                caps1.can_get_line_numbers = 1;
+                caps1.can_get_source_file_name = 1;
+               ret = (*jvmti)->AddCapabilities(jvmti, &caps1);
+                if (ret == JVMTI_ERROR_NONE)
+                        has_line_numbers = 1;
+        }
+
+       memset(&cb, 0, sizeof(cb));
+
+       cb.CompiledMethodLoad   = compiled_method_load_cb;
+       cb.DynamicCodeGenerated = code_generated_cb;
+
+       ret = (*jvmti)->SetEventCallbacks(jvmti, &cb, sizeof(cb));
+       if (ret != JVMTI_ERROR_NONE) {
+               warnx("jvmti: cannot set event callbacks");
+               return -1;
+       }
+
+       ret = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE,
+                       JVMTI_EVENT_COMPILED_METHOD_LOAD, NULL);
+       if (ret != JVMTI_ERROR_NONE) {
+               warnx("jvmti: setnotification failed for method_load");
+               return -1;
+       }
+
+       ret = (*jvmti)->SetEventNotificationMode(jvmti, JVMTI_ENABLE,
+                       JVMTI_EVENT_DYNAMIC_CODE_GENERATED, NULL);
+       if (ret != JVMTI_ERROR_NONE) {
+               warnx("jvmti: setnotification failed on code_generated");
+               return -1;
+       }
+       return 0;
+}
+
+JNIEXPORT void JNICALL
+Agent_OnUnload(JavaVM *jvm __unused)
+{
+       int ret;
+
+       ret = jvmti_close(jvmti_agent);
+       if (ret)
+               errx(1, "Error: op_close_agent()");
+}
index bf016c439fbd10a3cada60253c98d60ce441aa98..8cc30e731c739495f3eb61a0da9a76246ad35600 100644 (file)
@@ -1,3 +1,4 @@
 llvm-src-base.c
 llvm-src-kbuild.c
 llvm-src-prologue.c
+llvm-src-relocation.c
index 614899b88b377e07f9615d618ba7045f66051bea..1ba628ed049adbafc27c7b8900ecb838165a2aa7 100644 (file)
@@ -31,7 +31,7 @@ perf-y += sample-parsing.o
 perf-y += parse-no-sample-id-all.o
 perf-y += kmod-path.o
 perf-y += thread-map.o
-perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o
+perf-y += llvm.o llvm-src-base.o llvm-src-kbuild.o llvm-src-prologue.o llvm-src-relocation.o
 perf-y += bpf.o
 perf-y += topology.o
 perf-y += cpumap.o
@@ -59,6 +59,13 @@ $(OUTPUT)tests/llvm-src-prologue.c: tests/bpf-script-test-prologue.c tests/Build
        $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
        $(Q)echo ';' >> $@
 
+$(OUTPUT)tests/llvm-src-relocation.c: tests/bpf-script-test-relocation.c tests/Build
+       $(call rule_mkdir)
+       $(Q)echo '#include <tests/llvm.h>' > $@
+       $(Q)echo 'const char test_llvm__bpf_test_relocation[] =' >> $@
+       $(Q)sed -e 's/"/\\"/g' -e 's/\(.*\)/"\1\\n"/g' $< >> $@
+       $(Q)echo ';' >> $@
+
 ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
 endif
index fb80c9eb6a95b67947b23adaeca69b5e57bac704..1d1bb489b4e82aede11386549831c82860d76c00 100644 (file)
 
 static int fd1;
 static int fd2;
+static int fd3;
 static int overflows;
+static int overflows_2;
+
+volatile long the_var;
+
+
+/*
+ * Use ASM to ensure watchpoint and breakpoint can be triggered
+ * at one instruction.
+ */
+#if defined (__x86_64__)
+extern void __test_function(volatile long *ptr);
+asm (
+       ".globl __test_function\n"
+       "__test_function:\n"
+       "incq (%rdi)\n"
+       "ret\n");
+#elif defined (__aarch64__)
+extern void __test_function(volatile long *ptr);
+asm (
+       ".globl __test_function\n"
+       "__test_function:\n"
+       "str x30, [x0]\n"
+       "ret\n");
+
+#else
+static void __test_function(volatile long *ptr)
+{
+       *ptr = 0x1234;
+}
+#endif
 
 __attribute__ ((noinline))
 static int test_function(void)
 {
+       __test_function(&the_var);
+       the_var++;
        return time(NULL);
 }
 
+static void sig_handler_2(int signum __maybe_unused,
+                         siginfo_t *oh __maybe_unused,
+                         void *uc __maybe_unused)
+{
+       overflows_2++;
+       if (overflows_2 > 10) {
+               ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
+               ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
+               ioctl(fd3, PERF_EVENT_IOC_DISABLE, 0);
+       }
+}
+
 static void sig_handler(int signum __maybe_unused,
                        siginfo_t *oh __maybe_unused,
                        void *uc __maybe_unused)
@@ -54,10 +99,11 @@ static void sig_handler(int signum __maybe_unused,
                 */
                ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
                ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
+               ioctl(fd3, PERF_EVENT_IOC_DISABLE, 0);
        }
 }
 
-static int bp_event(void *fn, int setup_signal)
+static int __event(bool is_x, void *addr, int signal)
 {
        struct perf_event_attr pe;
        int fd;
@@ -67,8 +113,8 @@ static int bp_event(void *fn, int setup_signal)
        pe.size = sizeof(struct perf_event_attr);
 
        pe.config = 0;
-       pe.bp_type = HW_BREAKPOINT_X;
-       pe.bp_addr = (unsigned long) fn;
+       pe.bp_type = is_x ? HW_BREAKPOINT_X : HW_BREAKPOINT_W;
+       pe.bp_addr = (unsigned long) addr;
        pe.bp_len = sizeof(long);
 
        pe.sample_period = 1;
@@ -86,17 +132,25 @@ static int bp_event(void *fn, int setup_signal)
                return TEST_FAIL;
        }
 
-       if (setup_signal) {
-               fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC);
-               fcntl(fd, F_SETSIG, SIGIO);
-               fcntl(fd, F_SETOWN, getpid());
-       }
+       fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC);
+       fcntl(fd, F_SETSIG, signal);
+       fcntl(fd, F_SETOWN, getpid());
 
        ioctl(fd, PERF_EVENT_IOC_RESET, 0);
 
        return fd;
 }
 
+static int bp_event(void *addr, int signal)
+{
+       return __event(true, addr, signal);
+}
+
+static int wp_event(void *addr, int signal)
+{
+       return __event(false, addr, signal);
+}
+
 static long long bp_count(int fd)
 {
        long long count;
@@ -114,7 +168,7 @@ static long long bp_count(int fd)
 int test__bp_signal(int subtest __maybe_unused)
 {
        struct sigaction sa;
-       long long count1, count2;
+       long long count1, count2, count3;
 
        /* setup SIGIO signal handler */
        memset(&sa, 0, sizeof(struct sigaction));
@@ -126,21 +180,52 @@ int test__bp_signal(int subtest __maybe_unused)
                return TEST_FAIL;
        }
 
+       sa.sa_sigaction = (void *) sig_handler_2;
+       if (sigaction(SIGUSR1, &sa, NULL) < 0) {
+               pr_debug("failed setting up signal handler 2\n");
+               return TEST_FAIL;
+       }
+
        /*
         * We create following events:
         *
-        * fd1 - breakpoint event on test_function with SIGIO
+        * fd1 - breakpoint event on __test_function with SIGIO
         *       signal configured. We should get signal
         *       notification each time the breakpoint is hit
         *
-        * fd2 - breakpoint event on sig_handler without SIGIO
+        * fd2 - breakpoint event on sig_handler with SIGUSR1
+        *       configured. We should get SIGUSR1 each time when
+        *       breakpoint is hit
+        *
+        * fd3 - watchpoint event on __test_function with SIGIO
         *       configured.
         *
         * Following processing should happen:
-        *   - execute test_function
-        *   - fd1 event breakpoint hit -> count1 == 1
-        *   - SIGIO is delivered       -> overflows == 1
-        *   - fd2 event breakpoint hit -> count2 == 1
+        *   Exec:               Action:                       Result:
+        *   incq (%rdi)       - fd1 event breakpoint hit   -> count1 == 1
+        *                     - SIGIO is delivered
+        *   sig_handler       - fd2 event breakpoint hit   -> count2 == 1
+        *                     - SIGUSR1 is delivered
+        *   sig_handler_2                                  -> overflows_2 == 1  (nested signal)
+        *   sys_rt_sigreturn  - return from sig_handler_2
+        *   overflows++                                    -> overflows = 1
+        *   sys_rt_sigreturn  - return from sig_handler
+        *   incq (%rdi)       - fd3 event watchpoint hit   -> count3 == 1       (wp and bp in one insn)
+        *                     - SIGIO is delivered
+        *   sig_handler       - fd2 event breakpoint hit   -> count2 == 2
+        *                     - SIGUSR1 is delivered
+        *   sig_handler_2                                  -> overflows_2 == 2  (nested signal)
+        *   sys_rt_sigreturn  - return from sig_handler_2
+        *   overflows++                                    -> overflows = 2
+        *   sys_rt_sigreturn  - return from sig_handler
+        *   the_var++         - fd3 event watchpoint hit   -> count3 == 2       (standalone watchpoint)
+        *                     - SIGIO is delivered
+        *   sig_handler       - fd2 event breakpoint hit   -> count2 == 3
+        *                     - SIGUSR1 is delivered
+        *   sig_handler_2                                  -> overflows_2 == 3  (nested signal)
+        *   sys_rt_sigreturn  - return from sig_handler_2
+        *   overflows++                                    -> overflows == 3
+        *   sys_rt_sigreturn  - return from sig_handler
         *
         * The test case check following error conditions:
         * - we get stuck in signal handler because of debug
@@ -152,11 +237,13 @@ int test__bp_signal(int subtest __maybe_unused)
         *
         */
 
-       fd1 = bp_event(test_function, 1);
-       fd2 = bp_event(sig_handler, 0);
+       fd1 = bp_event(__test_function, SIGIO);
+       fd2 = bp_event(sig_handler, SIGUSR1);
+       fd3 = wp_event((void *)&the_var, SIGIO);
 
        ioctl(fd1, PERF_EVENT_IOC_ENABLE, 0);
        ioctl(fd2, PERF_EVENT_IOC_ENABLE, 0);
+       ioctl(fd3, PERF_EVENT_IOC_ENABLE, 0);
 
        /*
         * Kick off the test by trigering 'fd1'
@@ -166,15 +253,18 @@ int test__bp_signal(int subtest __maybe_unused)
 
        ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0);
        ioctl(fd2, PERF_EVENT_IOC_DISABLE, 0);
+       ioctl(fd3, PERF_EVENT_IOC_DISABLE, 0);
 
        count1 = bp_count(fd1);
        count2 = bp_count(fd2);
+       count3 = bp_count(fd3);
 
        close(fd1);
        close(fd2);
+       close(fd3);
 
-       pr_debug("count1 %lld, count2 %lld, overflow %d\n",
-                count1, count2, overflows);
+       pr_debug("count1 %lld, count2 %lld, count3 %lld, overflow %d, overflows_2 %d\n",
+                count1, count2, count3, overflows, overflows_2);
 
        if (count1 != 1) {
                if (count1 == 11)
@@ -183,12 +273,18 @@ int test__bp_signal(int subtest __maybe_unused)
                        pr_debug("failed: wrong count for bp1%lld\n", count1);
        }
 
-       if (overflows != 1)
+       if (overflows != 3)
                pr_debug("failed: wrong overflow hit\n");
 
-       if (count2 != 1)
+       if (overflows_2 != 3)
+               pr_debug("failed: wrong overflow_2 hit\n");
+
+       if (count2 != 3)
                pr_debug("failed: wrong count for bp2\n");
 
-       return count1 == 1 && overflows == 1 && count2 == 1 ?
+       if (count3 != 2)
+               pr_debug("failed: wrong count for bp3\n");
+
+       return count1 == 1 && overflows == 3 && count2 == 3 && overflows_2 == 3 && count3 == 2 ?
                TEST_OK : TEST_FAIL;
 }
diff --git a/tools/perf/tests/bpf-script-test-relocation.c b/tools/perf/tests/bpf-script-test-relocation.c
new file mode 100644 (file)
index 0000000..93af774
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * bpf-script-test-relocation.c
+ * Test BPF loader checking relocation
+ */
+#ifndef LINUX_VERSION_CODE
+# error Need LINUX_VERSION_CODE
+# error Example: for 4.2 kernel, put 'clang-opt="-DLINUX_VERSION_CODE=0x40200" into llvm section of ~/.perfconfig'
+#endif
+#define BPF_ANY 0
+#define BPF_MAP_TYPE_ARRAY 2
+#define BPF_FUNC_map_lookup_elem 1
+#define BPF_FUNC_map_update_elem 2
+
+static void *(*bpf_map_lookup_elem)(void *map, void *key) =
+       (void *) BPF_FUNC_map_lookup_elem;
+static void *(*bpf_map_update_elem)(void *map, void *key, void *value, int flags) =
+       (void *) BPF_FUNC_map_update_elem;
+
+struct bpf_map_def {
+       unsigned int type;
+       unsigned int key_size;
+       unsigned int value_size;
+       unsigned int max_entries;
+};
+
+#define SEC(NAME) __attribute__((section(NAME), used))
+struct bpf_map_def SEC("maps") my_table = {
+       .type = BPF_MAP_TYPE_ARRAY,
+       .key_size = sizeof(int),
+       .value_size = sizeof(int),
+       .max_entries = 1,
+};
+
+int this_is_a_global_val;
+
+SEC("func=sys_write")
+int bpf_func__sys_write(void *ctx)
+{
+       int key = 0;
+       int value = 0;
+
+       /*
+        * Incorrect relocation. Should not allow this program be
+        * loaded into kernel.
+        */
+       bpf_map_update_elem(&this_is_a_global_val, &key, &value, 0);
+       return 0;
+}
+char _license[] SEC("license") = "GPL";
+int _version SEC("version") = LINUX_VERSION_CODE;
index 33689a0cf821e5b9608e54b981e3280ee580890c..4aed5cb4ac2d393e53ffe031c6deff748ba8636b 100644 (file)
@@ -1,7 +1,11 @@
 #include <stdio.h>
 #include <sys/epoll.h>
+#include <util/util.h>
 #include <util/bpf-loader.h>
 #include <util/evlist.h>
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <bpf/bpf.h>
 #include "tests.h"
 #include "llvm.h"
 #include "debug.h"
@@ -71,6 +75,15 @@ static struct {
                (NR_ITERS + 1) / 4,
        },
 #endif
+       {
+               LLVM_TESTCASE_BPF_RELOCATION,
+               "Test BPF relocation checker",
+               "[bpf_relocation_test]",
+               "fix 'perf test LLVM' first",
+               "libbpf error when dealing with relocation",
+               NULL,
+               0,
+       },
 };
 
 static int do_test(struct bpf_object *obj, int (*func)(void),
@@ -190,7 +203,7 @@ static int __test__bpf(int idx)
 
        ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
                                       bpf_testcase_table[idx].prog_id,
-                                      true);
+                                      true, NULL);
        if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
                pr_debug("Unable to get BPF object, %s\n",
                         bpf_testcase_table[idx].msg_compile_fail);
@@ -202,14 +215,21 @@ static int __test__bpf(int idx)
 
        obj = prepare_bpf(obj_buf, obj_buf_sz,
                          bpf_testcase_table[idx].name);
-       if (!obj) {
+       if ((!!bpf_testcase_table[idx].target_func) != (!!obj)) {
+               if (!obj)
+                       pr_debug("Fail to load BPF object: %s\n",
+                                bpf_testcase_table[idx].msg_load_fail);
+               else
+                       pr_debug("Success unexpectedly: %s\n",
+                                bpf_testcase_table[idx].msg_load_fail);
                ret = TEST_FAIL;
                goto out;
        }
 
-       ret = do_test(obj,
-                     bpf_testcase_table[idx].target_func,
-                     bpf_testcase_table[idx].expect_result);
+       if (obj)
+               ret = do_test(obj,
+                             bpf_testcase_table[idx].target_func,
+                             bpf_testcase_table[idx].expect_result);
 out:
        bpf__clear();
        return ret;
@@ -227,6 +247,36 @@ const char *test__bpf_subtest_get_desc(int i)
        return bpf_testcase_table[i].desc;
 }
 
+static int check_env(void)
+{
+       int err;
+       unsigned int kver_int;
+       char license[] = "GPL";
+
+       struct bpf_insn insns[] = {
+               BPF_MOV64_IMM(BPF_REG_0, 1),
+               BPF_EXIT_INSN(),
+       };
+
+       err = fetch_kernel_version(&kver_int, NULL, 0);
+       if (err) {
+               pr_debug("Unable to get kernel version\n");
+               return err;
+       }
+
+       err = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
+                              sizeof(insns) / sizeof(insns[0]),
+                              license, kver_int, NULL, 0);
+       if (err < 0) {
+               pr_err("Missing basic BPF support, skip this test: %s\n",
+                      strerror(errno));
+               return err;
+       }
+       close(err);
+
+       return 0;
+}
+
 int test__bpf(int i)
 {
        int err;
@@ -239,6 +289,9 @@ int test__bpf(int i)
                return TEST_SKIP;
        }
 
+       if (check_env())
+               return TEST_SKIP;
+
        err = __test__bpf(i);
        return err;
 }
index 5e6a86e50fb97aae648c16ce627157e8377ebb02..ecf136c385d5ff2f21111e813a9ce1e30c03365a 100644 (file)
@@ -191,7 +191,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
         * function since TEST_ASSERT_VAL() returns in case of failure.
         */
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists, NULL);
+       perf_evsel__output_resort(hists_to_evsel(hists), NULL);
 
        if (verbose > 2) {
                pr_info("use callchain: %d, cumulate callchain: %d\n",
index 351a42463444a3df9f80daea3848499b8d39659a..34b945a55d4d2864cc561f36275191f57f84210a 100644 (file)
@@ -145,7 +145,7 @@ int test__hists_filter(int subtest __maybe_unused)
                struct hists *hists = evsel__hists(evsel);
 
                hists__collapse_resort(hists, NULL);
-               hists__output_resort(hists, NULL);
+               perf_evsel__output_resort(evsel, NULL);
 
                if (verbose > 2) {
                        pr_info("Normal histogram\n");
index b231265148d89a28e39387d423711c643351eb70..23cce67c7e48902b86c00cc6e56df22ef16386e4 100644 (file)
@@ -156,7 +156,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists, NULL);
+       perf_evsel__output_resort(evsel, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -256,7 +256,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists, NULL);
+       perf_evsel__output_resort(evsel, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -310,7 +310,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists, NULL);
+       perf_evsel__output_resort(evsel, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -388,7 +388,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists, NULL);
+       perf_evsel__output_resort(evsel, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -491,7 +491,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
                goto out;
 
        hists__collapse_resort(hists, NULL);
-       hists__output_resort(hists, NULL);
+       perf_evsel__output_resort(evsel, NULL);
 
        if (verbose > 2) {
                pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
index 06f45c1d42561df030e35d55078d70d910e79998..70edcdfa5672434950a9bba66150d0e0db7a6a4e 100644 (file)
@@ -35,6 +35,7 @@ static int test__bpf_parsing(void *obj_buf __maybe_unused,
 static struct {
        const char *source;
        const char *desc;
+       bool should_load_fail;
 } bpf_source_table[__LLVM_TESTCASE_MAX] = {
        [LLVM_TESTCASE_BASE] = {
                .source = test_llvm__bpf_base_prog,
@@ -48,14 +49,19 @@ static struct {
                .source = test_llvm__bpf_test_prologue_prog,
                .desc = "Compile source for BPF prologue generation test",
        },
+       [LLVM_TESTCASE_BPF_RELOCATION] = {
+               .source = test_llvm__bpf_test_relocation,
+               .desc = "Compile source for BPF relocation test",
+               .should_load_fail = true,
+       },
 };
 
-
 int
 test_llvm__fetch_bpf_obj(void **p_obj_buf,
                         size_t *p_obj_buf_sz,
                         enum test_llvm__testcase idx,
-                        bool force)
+                        bool force,
+                        bool *should_load_fail)
 {
        const char *source;
        const char *desc;
@@ -68,6 +74,8 @@ test_llvm__fetch_bpf_obj(void **p_obj_buf,
 
        source = bpf_source_table[idx].source;
        desc = bpf_source_table[idx].desc;
+       if (should_load_fail)
+               *should_load_fail = bpf_source_table[idx].should_load_fail;
 
        perf_config(perf_config_cb, NULL);
 
@@ -136,14 +144,15 @@ int test__llvm(int subtest)
        int ret;
        void *obj_buf = NULL;
        size_t obj_buf_sz = 0;
+       bool should_load_fail = false;
 
        if ((subtest < 0) || (subtest >= __LLVM_TESTCASE_MAX))
                return TEST_FAIL;
 
        ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
-                                      subtest, false);
+                                      subtest, false, &should_load_fail);
 
-       if (ret == TEST_OK) {
+       if (ret == TEST_OK && !should_load_fail) {
                ret = test__bpf_parsing(obj_buf, obj_buf_sz);
                if (ret != TEST_OK) {
                        pr_debug("Failed to parse test case '%s'\n",
index 5150b4d6ef50afe357f5f86300f4d39d28bec658..0eaa604be99defec6dcf3b09ee9a75eb348096fb 100644 (file)
@@ -7,14 +7,17 @@
 extern const char test_llvm__bpf_base_prog[];
 extern const char test_llvm__bpf_test_kbuild_prog[];
 extern const char test_llvm__bpf_test_prologue_prog[];
+extern const char test_llvm__bpf_test_relocation[];
 
 enum test_llvm__testcase {
        LLVM_TESTCASE_BASE,
        LLVM_TESTCASE_KBUILD,
        LLVM_TESTCASE_BPF_PROLOGUE,
+       LLVM_TESTCASE_BPF_RELOCATION,
        __LLVM_TESTCASE_MAX,
 };
 
 int test_llvm__fetch_bpf_obj(void **p_obj_buf, size_t *p_obj_buf_sz,
-                            enum test_llvm__testcase index, bool force);
+                            enum test_llvm__testcase index, bool force,
+                            bool *should_load_fail);
 #endif
index df38decc48c32df72e9dfa86ef3f7e7b035322e7..cac15d93aea656f96ad449cba1f9529e42afcbec 100644 (file)
@@ -5,7 +5,7 @@ ifeq ($(MAKECMDGOALS),)
 # no target specified, trigger the whole suite
 all:
        @echo "Testing Makefile";      $(MAKE) -sf tests/make MK=Makefile
-       @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf
+       @echo "Testing Makefile.perf"; $(MAKE) -sf tests/make MK=Makefile.perf SET_PARALLEL=1 SET_O=1
 else
 # run only specific test over 'Makefile'
 %:
@@ -13,6 +13,27 @@ else
 endif
 else
 PERF := .
+PERF_O := $(PERF)
+O_OPT :=
+FULL_O := $(shell readlink -f $(PERF_O) || echo $(PERF_O))
+
+ifneq ($(O),)
+  FULL_O := $(shell readlink -f $(O) || echo $(O))
+  PERF_O := $(FULL_O)
+  ifeq ($(SET_O),1)
+    O_OPT := 'O=$(FULL_O)'
+  endif
+  K_O_OPT := 'O=$(FULL_O)'
+endif
+
+PARALLEL_OPT=
+ifeq ($(SET_PARALLEL),1)
+  cores := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
+  ifeq ($(cores),0)
+    cores := 1
+  endif
+  PARALLEL_OPT="-j$(cores)"
+endif
 
 # As per kernel Makefile, avoid funny character set dependencies
 unexport LC_ALL
@@ -59,6 +80,7 @@ make_no_libaudit    := NO_LIBAUDIT=1
 make_no_libbionic   := NO_LIBBIONIC=1
 make_no_auxtrace    := NO_AUXTRACE=1
 make_no_libbpf     := NO_LIBBPF=1
+make_no_libcrypto   := NO_LIBCRYPTO=1
 make_tags           := tags
 make_cscope         := cscope
 make_help           := help
@@ -82,6 +104,7 @@ make_minimal        := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
 make_minimal        += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
 make_minimal        += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
 make_minimal        += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1 NO_LIBBPF=1
+make_minimal        += NO_LIBCRYPTO=1
 
 # $(run) contains all available tests
 run := make_pure
@@ -90,6 +113,9 @@ run := make_pure
 # disable features detection
 ifeq ($(MK),Makefile)
 run += make_clean_all
+MAKE_F := $(MAKE)
+else
+MAKE_F := $(MAKE) -f $(MK)
 endif
 run += make_python_perf_so
 run += make_debug
@@ -156,11 +182,11 @@ test_make_doc    := $(test_ok)
 test_make_help_O := $(test_ok)
 test_make_doc_O  := $(test_ok)
 
-test_make_python_perf_so := test -f $(PERF)/python/perf.so
+test_make_python_perf_so := test -f $(PERF_O)/python/perf.so
 
-test_make_perf_o           := test -f $(PERF)/perf.o
-test_make_util_map_o       := test -f $(PERF)/util/map.o
-test_make_util_pmu_bison_o := test -f $(PERF)/util/pmu-bison.o
+test_make_perf_o           := test -f $(PERF_O)/perf.o
+test_make_util_map_o       := test -f $(PERF_O)/util/map.o
+test_make_util_pmu_bison_o := test -f $(PERF_O)/util/pmu-bison.o
 
 define test_dest_files
   for file in $(1); do                         \
@@ -227,7 +253,7 @@ test_make_perf_o_O            := test -f $$TMP_O/perf.o
 test_make_util_map_o_O        := test -f $$TMP_O/util/map.o
 test_make_util_pmu_bison_o_O := test -f $$TMP_O/util/pmu-bison.o
 
-test_default = test -x $(PERF)/perf
+test_default = test -x $(PERF_O)/perf
 test = $(if $(test_$1),$(test_$1),$(test_default))
 
 test_default_O = test -x $$TMP_O/perf
@@ -240,6 +266,8 @@ run := $(shell shuf -e $(run))
 run_O := $(shell shuf -e $(run_O))
 endif
 
+max_width := $(shell echo $(run_O) | sed 's/ /\n/g' | wc -L)
+
 ifdef DEBUG
 d := $(info run   $(run))
 d := $(info run_O $(run_O))
@@ -247,13 +275,13 @@ endif
 
 MAKEFLAGS := --no-print-directory
 
-clean := @(cd $(PERF); make -s -f $(MK) clean >/dev/null)
+clean := @(cd $(PERF); $(MAKE_F) -s $(O_OPT) clean >/dev/null)
 
 $(run):
        $(call clean)
        @TMP_DEST=$$(mktemp -d); \
-       cmd="cd $(PERF) && make -f $(MK) DESTDIR=$$TMP_DEST $($@)"; \
-       echo "- $@: $$cmd" && echo $$cmd > $@ && \
+       cmd="cd $(PERF) && $(MAKE_F) $($@) $(PARALLEL_OPT) $(O_OPT) DESTDIR=$$TMP_DEST"; \
+       printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
        ( eval $$cmd ) >> $@ 2>&1; \
        echo "  test: $(call test,$@)" >> $@ 2>&1; \
        $(call test,$@) && \
@@ -263,8 +291,8 @@ $(run_O):
        $(call clean)
        @TMP_O=$$(mktemp -d); \
        TMP_DEST=$$(mktemp -d); \
-       cmd="cd $(PERF) && make -f $(MK) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
-       echo "- $@: $$cmd" && echo $$cmd > $@ && \
+       cmd="cd $(PERF) && $(MAKE_F) $($(patsubst %_O,%,$@)) $(PARALLEL_OPT) O=$$TMP_O DESTDIR=$$TMP_DEST"; \
+       printf "%*.*s: %s\n" $(max_width) $(max_width) "$@" "$$cmd" && echo $$cmd > $@ && \
        ( eval $$cmd ) >> $@ 2>&1 && \
        echo "  test: $(call test_O,$@)" >> $@ 2>&1; \
        $(call test_O,$@) && \
@@ -276,23 +304,60 @@ tarpkg:
        ( eval $$cmd ) >> $@ 2>&1 && \
        rm -f $@
 
+KERNEL_O := ../..
+ifneq ($(O),)
+  KERNEL_O := $(O)
+endif
+
 make_kernelsrc:
-       @echo "- make -C <kernelsrc> tools/perf"
+       @echo "- make -C <kernelsrc> $(PARALLEL_OPT) $(K_O_OPT) tools/perf"
        $(call clean); \
-       (make -C ../.. tools/perf) > $@ 2>&1 && \
-       test -x perf && rm -f $@ || (cat $@ ; false)
+       (make -C ../.. $(PARALLEL_OPT) $(K_O_OPT) tools/perf) > $@ 2>&1 && \
+       test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
 
 make_kernelsrc_tools:
-       @echo "- make -C <kernelsrc>/tools perf"
+       @echo "- make -C <kernelsrc>/tools $(PARALLEL_OPT) $(K_O_OPT) perf"
        $(call clean); \
-       (make -C ../../tools perf) > $@ 2>&1 && \
-       test -x perf && rm -f $@ || (cat $@ ; false)
+       (make -C ../../tools $(PARALLEL_OPT) $(K_O_OPT) perf) > $@ 2>&1 && \
+       test -x $(KERNEL_O)/tools/perf/perf && rm -f $@ || (cat $@ ; false)
+
+FEATURES_DUMP_FILE := $(FULL_O)/BUILD_TEST_FEATURE_DUMP
+FEATURES_DUMP_FILE_STATIC := $(FULL_O)/BUILD_TEST_FEATURE_DUMP_STATIC
 
 all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools
        @echo OK
+       @rm -f $(FEATURES_DUMP_FILE) $(FEATURES_DUMP_FILE_STATIC)
 
 out: $(run_O)
        @echo OK
+       @rm -f $(FEATURES_DUMP_FILE) $(FEATURES_DUMP_FILE_STATIC)
+
+ifeq ($(REUSE_FEATURES_DUMP),1)
+$(FEATURES_DUMP_FILE):
+       $(call clean)
+       @cmd="cd $(PERF) && make FEATURE_DUMP_COPY=$@ $(O_OPT) feature-dump"; \
+       echo "- $@: $$cmd" && echo $$cmd && \
+       ( eval $$cmd ) > /dev/null 2>&1
+
+$(FEATURES_DUMP_FILE_STATIC):
+       $(call clean)
+       @cmd="cd $(PERF) && make FEATURE_DUMP_COPY=$@ $(O_OPT) LDFLAGS='-static' feature-dump"; \
+       echo "- $@: $$cmd" && echo $$cmd && \
+       ( eval $$cmd ) > /dev/null 2>&1
+
+# Add feature dump dependency for run/run_O targets
+$(foreach t,$(run) $(run_O),$(eval \
+       $(t): $(if $(findstring make_static,$(t)),\
+               $(FEATURES_DUMP_FILE_STATIC),\
+               $(FEATURES_DUMP_FILE))))
+
+# Append 'FEATURES_DUMP=' option to all test cases. For example:
+# make_no_libbpf: NO_LIBBPF=1  --> NO_LIBBPF=1 FEATURES_DUMP=/a/b/BUILD_TEST_FEATURE_DUMP
+# make_static: LDFLAGS=-static --> LDFLAGS=-static FEATURES_DUMP=/a/b/BUILD_TEST_FEATURE_DUMP_STATIC
+$(foreach t,$(run),$(if $(findstring make_static,$(t)),\
+                       $(eval $(t) := $($(t)) FEATURES_DUMP=$(FEATURES_DUMP_FILE_STATIC)),\
+                       $(eval $(t) := $($(t)) FEATURES_DUMP=$(FEATURES_DUMP_FILE))))
+endif
 
 .PHONY: all $(run) $(run_O) tarpkg clean make_kernelsrc make_kernelsrc_tools
 endif # ifndef MK
index f0bfc9e8fd9f617d69c0b3cb36fe6c210ebbd6d5..630b0b409b973f87ba00312e5e4e3d8fdf829f32 100644 (file)
@@ -110,7 +110,6 @@ int test__vmlinux_matches_kallsyms(int subtest __maybe_unused)
         */
        for (nd = rb_first(&vmlinux_map->dso->symbols[type]); nd; nd = rb_next(nd)) {
                struct symbol *pair, *first_pair;
-               bool backwards = true;
 
                sym  = rb_entry(nd, struct symbol, rb_node);
 
@@ -151,27 +150,14 @@ next_pair:
                                continue;
 
                        } else {
-                               struct rb_node *nnd;
-detour:
-                               nnd = backwards ? rb_prev(&pair->rb_node) :
-                                                 rb_next(&pair->rb_node);
-                               if (nnd) {
-                                       struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
-
-                                       if (UM(next->start) == mem_start) {
-                                               pair = next;
+                               pair = machine__find_kernel_symbol_by_name(&kallsyms, type, sym->name, NULL, NULL);
+                               if (pair) {
+                                       if (UM(pair->start) == mem_start)
                                                goto next_pair;
-                                       }
-                               }
 
-                               if (backwards) {
-                                       backwards = false;
-                                       pair = first_pair;
-                                       goto detour;
+                                       pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
+                                                mem_start, sym->name, pair->name);
                                }
-
-                               pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
-                                        mem_start, sym->name, pair->name);
                        }
                } else
                        pr_debug("%#" PRIx64 ": %s not on kallsyms\n",
index d37202121689a017b0790d552e088d695716c527..af68a9d488bfce964c84e67cf5394e7e13daab29 100644 (file)
@@ -531,8 +531,8 @@ static struct ui_browser_colorset {
                .bg       = "yellow",
        },
        {
-               .colorset = HE_COLORSET_CODE,
-               .name     = "code",
+               .colorset = HE_COLORSET_JUMP_ARROWS,
+               .name     = "jump_arrows",
                .fg       = "blue",
                .bg       = "default",
        },
index 01781de59532ce9c9fd1ff7f8c7fdf97d45fa105..be3b70eb5fca6e402830570c4111f10258702c04 100644 (file)
@@ -7,7 +7,7 @@
 #define HE_COLORSET_MEDIUM     51
 #define HE_COLORSET_NORMAL     52
 #define HE_COLORSET_SELECTED   53
-#define HE_COLORSET_CODE       54
+#define HE_COLORSET_JUMP_ARROWS        54
 #define HE_COLORSET_ADDR       55
 #define HE_COLORSET_ROOT       56
 
index d4d7cc27252f1184bf2d678b6460f138099efd28..4fc208e82c6fc7b28d99af147d1c75738bc338e6 100644 (file)
@@ -284,7 +284,7 @@ static void annotate_browser__draw_current_jump(struct ui_browser *browser)
                to = (u64)btarget->idx;
        }
 
-       ui_browser__set_color(browser, HE_COLORSET_CODE);
+       ui_browser__set_color(browser, HE_COLORSET_JUMP_ARROWS);
        __ui_browser__line_arrow(browser, pcnt_width + 2 + ab->addr_width,
                                 from, to);
 }
@@ -755,11 +755,11 @@ static int annotate_browser__run(struct annotate_browser *browser,
                                nd = browser->curr_hot;
                        break;
                case K_UNTAB:
-                       if (nd != NULL)
+                       if (nd != NULL) {
                                nd = rb_next(nd);
                                if (nd == NULL)
                                        nd = rb_first(&browser->entries);
-                       else
+                       else
                                nd = browser->curr_hot;
                        break;
                case K_F1:
index 08c09ad755d2d2f8dd55be06b258e16fb2c8335f..a5a5390476acead8acd48575ad404a5198856073 100644 (file)
@@ -657,9 +657,24 @@ static int hist_browser__show_callchain_list(struct hist_browser *browser,
        return 1;
 }
 
+static bool check_percent_display(struct rb_node *node, u64 parent_total)
+{
+       struct callchain_node *child;
+
+       if (node == NULL)
+               return false;
+
+       if (rb_next(node))
+               return true;
+
+       child = rb_entry(node, struct callchain_node, rb_node);
+       return callchain_cumul_hits(child) != parent_total;
+}
+
 static int hist_browser__show_callchain_flat(struct hist_browser *browser,
                                             struct rb_root *root,
                                             unsigned short row, u64 total,
+                                            u64 parent_total,
                                             print_callchain_entry_fn print,
                                             struct callchain_print_arg *arg,
                                             check_output_full_fn is_output_full)
@@ -669,7 +684,7 @@ static int hist_browser__show_callchain_flat(struct hist_browser *browser,
        bool need_percent;
 
        node = rb_first(root);
-       need_percent = node && rb_next(node);
+       need_percent = check_percent_display(node, parent_total);
 
        while (node) {
                struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
@@ -763,6 +778,7 @@ static char *hist_browser__folded_callchain_str(struct hist_browser *browser,
 static int hist_browser__show_callchain_folded(struct hist_browser *browser,
                                               struct rb_root *root,
                                               unsigned short row, u64 total,
+                                              u64 parent_total,
                                               print_callchain_entry_fn print,
                                               struct callchain_print_arg *arg,
                                               check_output_full_fn is_output_full)
@@ -772,7 +788,7 @@ static int hist_browser__show_callchain_folded(struct hist_browser *browser,
        bool need_percent;
 
        node = rb_first(root);
-       need_percent = node && rb_next(node);
+       need_percent = check_percent_display(node, parent_total);
 
        while (node) {
                struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
@@ -844,20 +860,24 @@ next:
        return row - first_row;
 }
 
-static int hist_browser__show_callchain(struct hist_browser *browser,
+static int hist_browser__show_callchain_graph(struct hist_browser *browser,
                                        struct rb_root *root, int level,
                                        unsigned short row, u64 total,
+                                       u64 parent_total,
                                        print_callchain_entry_fn print,
                                        struct callchain_print_arg *arg,
                                        check_output_full_fn is_output_full)
 {
        struct rb_node *node;
        int first_row = row, offset = level * LEVEL_OFFSET_STEP;
-       u64 new_total;
        bool need_percent;
+       u64 percent_total = total;
+
+       if (callchain_param.mode == CHAIN_GRAPH_REL)
+               percent_total = parent_total;
 
        node = rb_first(root);
-       need_percent = node && rb_next(node);
+       need_percent = check_percent_display(node, parent_total);
 
        while (node) {
                struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
@@ -878,7 +898,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
                        folded_sign = callchain_list__folded(chain);
 
                        row += hist_browser__show_callchain_list(browser, child,
-                                                       chain, row, total,
+                                                       chain, row, percent_total,
                                                        was_first && need_percent,
                                                        offset + extra_offset,
                                                        print, arg);
@@ -893,13 +913,9 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
                if (folded_sign == '-') {
                        const int new_level = level + (extra_offset ? 2 : 1);
 
-                       if (callchain_param.mode == CHAIN_GRAPH_REL)
-                               new_total = child->children_hit;
-                       else
-                               new_total = total;
-
-                       row += hist_browser__show_callchain(browser, &child->rb_root,
-                                                           new_level, row, new_total,
+                       row += hist_browser__show_callchain_graph(browser, &child->rb_root,
+                                                           new_level, row, total,
+                                                           child->children_hit,
                                                            print, arg, is_output_full);
                }
                if (is_output_full(browser, row))
@@ -910,6 +926,45 @@ out:
        return row - first_row;
 }
 
+static int hist_browser__show_callchain(struct hist_browser *browser,
+                                       struct hist_entry *entry, int level,
+                                       unsigned short row,
+                                       print_callchain_entry_fn print,
+                                       struct callchain_print_arg *arg,
+                                       check_output_full_fn is_output_full)
+{
+       u64 total = hists__total_period(entry->hists);
+       u64 parent_total;
+       int printed;
+
+       if (symbol_conf.cumulate_callchain)
+               parent_total = entry->stat_acc->period;
+       else
+               parent_total = entry->stat.period;
+
+       if (callchain_param.mode == CHAIN_FLAT) {
+               printed = hist_browser__show_callchain_flat(browser,
+                                               &entry->sorted_chain, row,
+                                               total, parent_total, print, arg,
+                                               is_output_full);
+       } else if (callchain_param.mode == CHAIN_FOLDED) {
+               printed = hist_browser__show_callchain_folded(browser,
+                                               &entry->sorted_chain, row,
+                                               total, parent_total, print, arg,
+                                               is_output_full);
+       } else {
+               printed = hist_browser__show_callchain_graph(browser,
+                                               &entry->sorted_chain, level, row,
+                                               total, parent_total, print, arg,
+                                               is_output_full);
+       }
+
+       if (arg->is_current_entry)
+               browser->he_selection = entry;
+
+       return printed;
+}
+
 struct hpp_arg {
        struct ui_browser *b;
        char folded_sign;
@@ -1040,7 +1095,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
 
                hist_browser__gotorc(browser, row, 0);
 
-               perf_hpp__for_each_format(fmt) {
+               hists__for_each_format(browser->hists, fmt) {
                        if (perf_hpp__should_skip(fmt, entry->hists) ||
                            column++ < browser->b.horiz_scroll)
                                continue;
@@ -1084,38 +1139,14 @@ static int hist_browser__show_entry(struct hist_browser *browser,
                --row_offset;
 
        if (folded_sign == '-' && row != browser->b.rows) {
-               u64 total = hists__total_period(entry->hists);
                struct callchain_print_arg arg = {
                        .row_offset = row_offset,
                        .is_current_entry = current_entry,
                };
 
-               if (callchain_param.mode == CHAIN_GRAPH_REL) {
-                       if (symbol_conf.cumulate_callchain)
-                               total = entry->stat_acc->period;
-                       else
-                               total = entry->stat.period;
-               }
-
-               if (callchain_param.mode == CHAIN_FLAT) {
-                       printed += hist_browser__show_callchain_flat(browser,
-                                       &entry->sorted_chain, row, total,
-                                       hist_browser__show_callchain_entry, &arg,
-                                       hist_browser__check_output_full);
-               } else if (callchain_param.mode == CHAIN_FOLDED) {
-                       printed += hist_browser__show_callchain_folded(browser,
-                                       &entry->sorted_chain, row, total,
-                                       hist_browser__show_callchain_entry, &arg,
-                                       hist_browser__check_output_full);
-               } else {
-                       printed += hist_browser__show_callchain(browser,
-                                       &entry->sorted_chain, 1, row, total,
+               printed += hist_browser__show_callchain(browser, entry, 1, row,
                                        hist_browser__show_callchain_entry, &arg,
                                        hist_browser__check_output_full);
-               }
-
-               if (arg.is_current_entry)
-                       browser->he_selection = entry;
        }
 
        return printed;
@@ -1144,7 +1175,7 @@ static int hists_browser__scnprintf_headers(struct hist_browser *browser, char *
                        return ret;
        }
 
-       perf_hpp__for_each_format(fmt) {
+       hists__for_each_format(browser->hists, fmt) {
                if (perf_hpp__should_skip(fmt, hists)  || column++ < browser->b.horiz_scroll)
                        continue;
 
@@ -1380,15 +1411,11 @@ do_offset:
 static int hist_browser__fprintf_callchain(struct hist_browser *browser,
                                           struct hist_entry *he, FILE *fp)
 {
-       u64 total = hists__total_period(he->hists);
        struct callchain_print_arg arg  = {
                .fp = fp,
        };
 
-       if (symbol_conf.cumulate_callchain)
-               total = he->stat_acc->period;
-
-       hist_browser__show_callchain(browser, &he->sorted_chain, 1, 0, total,
+       hist_browser__show_callchain(browser, he, 1, 0,
                                     hist_browser__fprintf_callchain_entry, &arg,
                                     hist_browser__check_dump_full);
        return arg.printed;
@@ -1414,7 +1441,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser,
        if (symbol_conf.use_callchain)
                printed += fprintf(fp, "%c ", folded_sign);
 
-       perf_hpp__for_each_format(fmt) {
+       hists__for_each_format(browser->hists, fmt) {
                if (perf_hpp__should_skip(fmt, he->hists))
                        continue;
 
@@ -1782,7 +1809,7 @@ static int
 add_thread_opt(struct hist_browser *browser, struct popup_action *act,
               char **optstr, struct thread *thread)
 {
-       if (thread == NULL)
+       if (!sort__has_thread || thread == NULL)
                return 0;
 
        if (asprintf(optstr, "Zoom %s %s(%d) thread",
@@ -1825,7 +1852,7 @@ static int
 add_dso_opt(struct hist_browser *browser, struct popup_action *act,
            char **optstr, struct map *map)
 {
-       if (map == NULL)
+       if (!sort__has_dso || map == NULL)
                return 0;
 
        if (asprintf(optstr, "Zoom %s %s DSO",
@@ -1850,7 +1877,7 @@ static int
 add_map_opt(struct hist_browser *browser __maybe_unused,
            struct popup_action *act, char **optstr, struct map *map)
 {
-       if (map == NULL)
+       if (!sort__has_dso || map == NULL)
                return 0;
 
        if (asprintf(optstr, "Browse map details") < 0)
@@ -1971,7 +1998,7 @@ static int
 add_socket_opt(struct hist_browser *browser, struct popup_action *act,
               char **optstr, int socket_id)
 {
-       if (socket_id < 0)
+       if (!sort__has_socket || socket_id < 0)
                return 0;
 
        if (asprintf(optstr, "Zoom %s Processor Socket %d",
@@ -2002,6 +2029,42 @@ static void hist_browser__update_nr_entries(struct hist_browser *hb)
        hb->nr_non_filtered_entries = nr_entries;
 }
 
+static void hist_browser__update_percent_limit(struct hist_browser *hb,
+                                              double percent)
+{
+       struct hist_entry *he;
+       struct rb_node *nd = rb_first(&hb->hists->entries);
+       u64 total = hists__total_period(hb->hists);
+       u64 min_callchain_hits = total * (percent / 100);
+
+       hb->min_pcnt = callchain_param.min_percent = percent;
+
+       if (!symbol_conf.use_callchain)
+               return;
+
+       while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) {
+               he = rb_entry(nd, struct hist_entry, rb_node);
+
+               if (callchain_param.mode == CHAIN_GRAPH_REL) {
+                       total = he->stat.period;
+
+                       if (symbol_conf.cumulate_callchain)
+                               total = he->stat_acc->period;
+
+                       min_callchain_hits = total * (percent / 100);
+               }
+
+               callchain_param.sort(&he->sorted_chain, he->callchain,
+                                    min_callchain_hits, &callchain_param);
+
+               /* force to re-evaluate folding state of callchains */
+               he->init_have_children = false;
+               hist_entry__set_folding(he, false);
+
+               nd = rb_next(nd);
+       }
+}
+
 static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                    const char *helpline,
                                    bool left_exits,
@@ -2037,6 +2100,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        "E             Expand all callchains\n"                         \
        "F             Toggle percentage of filtered entries\n"         \
        "H             Display column headers\n"                        \
+       "L             Change percent limit\n"                          \
        "m             Display context menu\n"                          \
        "S             Zoom into current Processor Socket\n"            \
 
@@ -2077,7 +2141,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        memset(options, 0, sizeof(options));
        memset(actions, 0, sizeof(actions));
 
-       perf_hpp__for_each_format(fmt) {
+       hists__for_each_format(browser->hists, fmt) {
                perf_hpp__reset_width(fmt, hists);
                /*
                 * This is done just once, and activates the horizontal scrolling
@@ -2192,6 +2256,24 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                top->zero = !top->zero;
                        }
                        continue;
+               case 'L':
+                       if (ui_browser__input_window("Percent Limit",
+                                       "Please enter the value you want to hide entries under that percent.",
+                                       buf, "ENTER: OK, ESC: Cancel",
+                                       delay_secs * 2) == K_ENTER) {
+                               char *end;
+                               double new_percent = strtod(buf, &end);
+
+                               if (new_percent < 0 || new_percent > 100) {
+                                       ui_browser__warning(&browser->b, delay_secs * 2,
+                                               "Invalid percent: %.2f", new_percent);
+                                       continue;
+                               }
+
+                               hist_browser__update_percent_limit(browser, new_percent);
+                               hist_browser__reset(browser);
+                       }
+                       continue;
                case K_F1:
                case 'h':
                case '?':
@@ -2263,10 +2345,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                        continue;
                }
 
-               if (!sort__has_sym)
-                       goto add_exit_option;
-
-               if (browser->selection == NULL)
+               if (!sort__has_sym || browser->selection == NULL)
                        goto skip_annotation;
 
                if (sort__mode == SORT_MODE__BRANCH) {
@@ -2306,11 +2385,16 @@ skip_annotation:
                                             &options[nr_options],
                                             socked_id);
                /* perf script support */
+               if (!is_report_browser(hbt))
+                       goto skip_scripting;
+
                if (browser->he_selection) {
-                       nr_options += add_script_opt(browser,
-                                                    &actions[nr_options],
-                                                    &options[nr_options],
-                                                    thread, NULL);
+                       if (sort__has_thread && thread) {
+                               nr_options += add_script_opt(browser,
+                                                            &actions[nr_options],
+                                                            &options[nr_options],
+                                                            thread, NULL);
+                       }
                        /*
                         * Note that browser->selection != NULL
                         * when browser->he_selection is not NULL,
@@ -2320,16 +2404,18 @@ skip_annotation:
                         *
                         * See hist_browser__show_entry.
                         */
-                       nr_options += add_script_opt(browser,
-                                                    &actions[nr_options],
-                                                    &options[nr_options],
-                                                    NULL, browser->selection->sym);
+                       if (sort__has_sym && browser->selection->sym) {
+                               nr_options += add_script_opt(browser,
+                                                            &actions[nr_options],
+                                                            &options[nr_options],
+                                                            NULL, browser->selection->sym);
+                       }
                }
                nr_options += add_script_opt(browser, &actions[nr_options],
                                             &options[nr_options], NULL, NULL);
                nr_options += add_switch_opt(browser, &actions[nr_options],
                                             &options[nr_options]);
-add_exit_option:
+skip_scripting:
                nr_options += add_exit_opt(browser, &actions[nr_options],
                                           &options[nr_options]);
 
index 0f8dcfdfb10f3856abefc7f252631e8937bacf6b..32cc38a5b57fc736893501d7eb07c6c8a7d3b8c4 100644 (file)
@@ -306,7 +306,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
 
        nr_cols = 0;
 
-       perf_hpp__for_each_format(fmt)
+       hists__for_each_format(hists, fmt)
                col_types[nr_cols++] = G_TYPE_STRING;
 
        store = gtk_tree_store_newv(nr_cols, col_types);
@@ -317,7 +317,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
 
        col_idx = 0;
 
-       perf_hpp__for_each_format(fmt) {
+       hists__for_each_format(hists, fmt) {
                if (perf_hpp__should_skip(fmt, hists))
                        continue;
 
@@ -367,7 +367,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
 
                col_idx = 0;
 
-               perf_hpp__for_each_format(fmt) {
+               hists__for_each_format(hists, fmt) {
                        if (perf_hpp__should_skip(fmt, h->hists))
                                continue;
 
index bf2a66e254eac35c63a0dd44e83628f34e4dc2f9..1ba4117d9c2de0d2f6326c1bc160c4d6171efae0 100644 (file)
@@ -371,7 +371,20 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
        return 0;
 }
 
-#define HPP__COLOR_PRINT_FNS(_name, _fn)               \
+static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
+{
+       return a->header == hpp__header_fn;
+}
+
+static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+       if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
+               return false;
+
+       return a->idx == b->idx;
+}
+
+#define HPP__COLOR_PRINT_FNS(_name, _fn, _idx)         \
        {                                               \
                .name   = _name,                        \
                .header = hpp__header_fn,               \
@@ -381,9 +394,11 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
                .cmp    = hpp__nop_cmp,                 \
                .collapse = hpp__nop_cmp,               \
                .sort   = hpp__sort_ ## _fn,            \
+               .idx    = PERF_HPP__ ## _idx,           \
+               .equal  = hpp__equal,                   \
        }
 
-#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn)           \
+#define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx)     \
        {                                               \
                .name   = _name,                        \
                .header = hpp__header_fn,               \
@@ -393,9 +408,11 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
                .cmp    = hpp__nop_cmp,                 \
                .collapse = hpp__nop_cmp,               \
                .sort   = hpp__sort_ ## _fn,            \
+               .idx    = PERF_HPP__ ## _idx,           \
+               .equal  = hpp__equal,                   \
        }
 
-#define HPP__PRINT_FNS(_name, _fn)                     \
+#define HPP__PRINT_FNS(_name, _fn, _idx)               \
        {                                               \
                .name   = _name,                        \
                .header = hpp__header_fn,               \
@@ -404,22 +421,25 @@ static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
                .cmp    = hpp__nop_cmp,                 \
                .collapse = hpp__nop_cmp,               \
                .sort   = hpp__sort_ ## _fn,            \
+               .idx    = PERF_HPP__ ## _idx,           \
+               .equal  = hpp__equal,                   \
        }
 
 struct perf_hpp_fmt perf_hpp__format[] = {
-       HPP__COLOR_PRINT_FNS("Overhead", overhead),
-       HPP__COLOR_PRINT_FNS("sys", overhead_sys),
-       HPP__COLOR_PRINT_FNS("usr", overhead_us),
-       HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys),
-       HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us),
-       HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc),
-       HPP__PRINT_FNS("Samples", samples),
-       HPP__PRINT_FNS("Period", period)
+       HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
+       HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
+       HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
+       HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
+       HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
+       HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
+       HPP__PRINT_FNS("Samples", samples, SAMPLES),
+       HPP__PRINT_FNS("Period", period, PERIOD)
 };
 
-LIST_HEAD(perf_hpp__list);
-LIST_HEAD(perf_hpp__sort_list);
-
+struct perf_hpp_list perf_hpp_list = {
+       .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
+       .sorts  = LIST_HEAD_INIT(perf_hpp_list.sorts),
+};
 
 #undef HPP__COLOR_PRINT_FNS
 #undef HPP__COLOR_ACC_PRINT_FNS
@@ -485,63 +505,60 @@ void perf_hpp__init(void)
                hpp_dimension__add_output(PERF_HPP__PERIOD);
 }
 
-void perf_hpp__column_register(struct perf_hpp_fmt *format)
-{
-       list_add_tail(&format->list, &perf_hpp__list);
-}
-
-void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
-{
-       list_del(&format->list);
-}
-
-void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
+void perf_hpp_list__column_register(struct perf_hpp_list *list,
+                                   struct perf_hpp_fmt *format)
 {
-       list_add_tail(&format->sort_list, &perf_hpp__sort_list);
+       list_add_tail(&format->list, &list->fields);
 }
 
-void perf_hpp__column_enable(unsigned col)
+void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
+                                       struct perf_hpp_fmt *format)
 {
-       BUG_ON(col >= PERF_HPP__MAX_INDEX);
-       perf_hpp__column_register(&perf_hpp__format[col]);
+       list_add_tail(&format->sort_list, &list->sorts);
 }
 
-void perf_hpp__column_disable(unsigned col)
+void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
 {
-       BUG_ON(col >= PERF_HPP__MAX_INDEX);
-       perf_hpp__column_unregister(&perf_hpp__format[col]);
+       list_del(&format->list);
 }
 
 void perf_hpp__cancel_cumulate(void)
 {
+       struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
+
        if (is_strict_order(field_order))
                return;
 
-       perf_hpp__column_disable(PERF_HPP__OVERHEAD_ACC);
-       perf_hpp__format[PERF_HPP__OVERHEAD].name = "Overhead";
+       ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
+       acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
+
+       perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
+               if (acc->equal(acc, fmt)) {
+                       perf_hpp__column_unregister(fmt);
+                       continue;
+               }
+
+               if (ovh->equal(ovh, fmt))
+                       fmt->name = "Overhead";
+       }
+}
+
+static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+       return a->equal && a->equal(a, b);
 }
 
-void perf_hpp__setup_output_field(void)
+void perf_hpp__setup_output_field(struct perf_hpp_list *list)
 {
        struct perf_hpp_fmt *fmt;
 
        /* append sort keys to output field */
-       perf_hpp__for_each_sort_list(fmt) {
-               if (!list_empty(&fmt->list))
-                       continue;
-
-               /*
-                * sort entry fields are dynamically created,
-                * so they can share a same sort key even though
-                * the list is empty.
-                */
-               if (perf_hpp__is_sort_entry(fmt)) {
-                       struct perf_hpp_fmt *pos;
+       perf_hpp_list__for_each_sort_list(list, fmt) {
+               struct perf_hpp_fmt *pos;
 
-                       perf_hpp__for_each_format(pos) {
-                               if (perf_hpp__same_sort_entry(pos, fmt))
-                                       goto next;
-                       }
+               perf_hpp_list__for_each_format(list, pos) {
+                       if (fmt_equal(fmt, pos))
+                               goto next;
                }
 
                perf_hpp__column_register(fmt);
@@ -550,27 +567,17 @@ next:
        }
 }
 
-void perf_hpp__append_sort_keys(void)
+void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
 {
        struct perf_hpp_fmt *fmt;
 
        /* append output fields to sort keys */
-       perf_hpp__for_each_format(fmt) {
-               if (!list_empty(&fmt->sort_list))
-                       continue;
+       perf_hpp_list__for_each_format(list, fmt) {
+               struct perf_hpp_fmt *pos;
 
-               /*
-                * sort entry fields are dynamically created,
-                * so they can share a same sort key even though
-                * the list is empty.
-                */
-               if (perf_hpp__is_sort_entry(fmt)) {
-                       struct perf_hpp_fmt *pos;
-
-                       perf_hpp__for_each_sort_list(pos) {
-                               if (perf_hpp__same_sort_entry(pos, fmt))
-                                       goto next;
-                       }
+               perf_hpp_list__for_each_sort_list(list, pos) {
+                       if (fmt_equal(fmt, pos))
+                               goto next;
                }
 
                perf_hpp__register_sort_field(fmt);
@@ -579,20 +586,29 @@ next:
        }
 }
 
-void perf_hpp__reset_output_field(void)
+
+static void fmt_free(struct perf_hpp_fmt *fmt)
+{
+       if (fmt->free)
+               fmt->free(fmt);
+}
+
+void perf_hpp__reset_output_field(struct perf_hpp_list *list)
 {
        struct perf_hpp_fmt *fmt, *tmp;
 
        /* reset output fields */
-       perf_hpp__for_each_format_safe(fmt, tmp) {
+       perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
                list_del_init(&fmt->list);
                list_del_init(&fmt->sort_list);
+               fmt_free(fmt);
        }
 
        /* reset sort keys */
-       perf_hpp__for_each_sort_list_safe(fmt, tmp) {
+       perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
                list_del_init(&fmt->list);
                list_del_init(&fmt->sort_list);
+               fmt_free(fmt);
        }
 }
 
@@ -606,7 +622,7 @@ unsigned int hists__sort_list_width(struct hists *hists)
        bool first = true;
        struct perf_hpp dummy_hpp;
 
-       perf_hpp__for_each_format(fmt) {
+       hists__for_each_format(hists, fmt) {
                if (perf_hpp__should_skip(fmt, hists))
                        continue;
 
@@ -626,20 +642,12 @@ unsigned int hists__sort_list_width(struct hists *hists)
 
 void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
 {
-       int idx;
-
        if (perf_hpp__is_sort_entry(fmt))
                return perf_hpp__reset_sort_width(fmt, hists);
 
-       for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) {
-               if (fmt == &perf_hpp__format[idx])
-                       break;
-       }
-
-       if (idx == PERF_HPP__MAX_INDEX)
-               return;
+       BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
 
-       switch (idx) {
+       switch (fmt->idx) {
        case PERF_HPP__OVERHEAD:
        case PERF_HPP__OVERHEAD_SYS:
        case PERF_HPP__OVERHEAD_US:
@@ -667,7 +675,7 @@ void perf_hpp__set_user_width(const char *width_list_str)
        struct perf_hpp_fmt *fmt;
        const char *ptr = width_list_str;
 
-       perf_hpp__for_each_format(fmt) {
+       perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
                char *p;
 
                int len = strtol(ptr, &p, 10);
index 387110d50b002557d99e723605407c3db990d71a..1a6e8f7f38c4652c6fb0480e56af5dd7abbc8ac6 100644 (file)
@@ -165,8 +165,28 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
        return ret;
 }
 
+/*
+ * If have one single callchain root, don't bother printing
+ * its percentage (100 % in fractal mode and the same percentage
+ * than the hist in graph mode). This also avoid one level of column.
+ *
+ * However when percent-limit applied, it's possible that single callchain
+ * node have different (non-100% in fractal mode) percentage.
+ */
+static bool need_percent_display(struct rb_node *node, u64 parent_samples)
+{
+       struct callchain_node *cnode;
+
+       if (rb_next(node))
+               return true;
+
+       cnode = rb_entry(node, struct callchain_node, rb_node);
+       return callchain_cumul_hits(cnode) != parent_samples;
+}
+
 static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
-                                      u64 total_samples, int left_margin)
+                                      u64 total_samples, u64 parent_samples,
+                                      int left_margin)
 {
        struct callchain_node *cnode;
        struct callchain_list *chain;
@@ -177,13 +197,8 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
        int ret = 0;
        char bf[1024];
 
-       /*
-        * If have one single callchain root, don't bother printing
-        * its percentage (100 % in fractal mode and the same percentage
-        * than the hist in graph mode). This also avoid one level of column.
-        */
        node = rb_first(root);
-       if (node && !rb_next(node)) {
+       if (node && !need_percent_display(node, parent_samples)) {
                cnode = rb_entry(node, struct callchain_node, rb_node);
                list_for_each_entry(chain, &cnode->val, list) {
                        /*
@@ -213,9 +228,15 @@ static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
                root = &cnode->rb_root;
        }
 
+       if (callchain_param.mode == CHAIN_GRAPH_REL)
+               total_samples = parent_samples;
+
        ret += __callchain__fprintf_graph(fp, root, total_samples,
                                          1, 1, left_margin);
-       ret += fprintf(fp, "\n");
+       if (ret) {
+               /* do not add a blank line if it printed nothing */
+               ret += fprintf(fp, "\n");
+       }
 
        return ret;
 }
@@ -323,16 +344,19 @@ static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
                                            u64 total_samples, int left_margin,
                                            FILE *fp)
 {
+       u64 parent_samples = he->stat.period;
+
+       if (symbol_conf.cumulate_callchain)
+               parent_samples = he->stat_acc->period;
+
        switch (callchain_param.mode) {
        case CHAIN_GRAPH_REL:
-               return callchain__fprintf_graph(fp, &he->sorted_chain,
-                                               symbol_conf.cumulate_callchain ?
-                                               he->stat_acc->period : he->stat.period,
-                                               left_margin);
+               return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
+                                               parent_samples, left_margin);
                break;
        case CHAIN_GRAPH_ABS:
                return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
-                                               left_margin);
+                                               parent_samples, left_margin);
                break;
        case CHAIN_FLAT:
                return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
@@ -349,30 +373,6 @@ static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
        return 0;
 }
 
-static size_t hist_entry__callchain_fprintf(struct hist_entry *he,
-                                           struct hists *hists,
-                                           FILE *fp)
-{
-       int left_margin = 0;
-       u64 total_period = hists->stats.total_period;
-
-       if (field_order == NULL && (sort_order == NULL ||
-                                   !prefixcmp(sort_order, "comm"))) {
-               struct perf_hpp_fmt *fmt;
-
-               perf_hpp__for_each_format(fmt) {
-                       if (!perf_hpp__is_sort_entry(fmt))
-                               continue;
-
-                       /* must be 'comm' sort entry */
-                       left_margin = fmt->width(fmt, NULL, hists_to_evsel(hists));
-                       left_margin -= thread__comm_len(he->thread);
-                       break;
-               }
-       }
-       return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
-}
-
 static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
 {
        const char *sep = symbol_conf.field_sep;
@@ -384,7 +384,7 @@ static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
        if (symbol_conf.exclude_other && !he->parent)
                return 0;
 
-       perf_hpp__for_each_format(fmt) {
+       hists__for_each_format(he->hists, fmt) {
                if (perf_hpp__should_skip(fmt, he->hists))
                        continue;
 
@@ -418,6 +418,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
                .buf            = bf,
                .size           = size,
        };
+       u64 total_period = hists->stats.total_period;
 
        if (size == 0 || size > bfsz)
                size = hpp.size = bfsz;
@@ -427,7 +428,7 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
        ret = fprintf(fp, "%s\n", bf);
 
        if (symbol_conf.use_callchain)
-               ret += hist_entry__callchain_fprintf(he, hists, fp);
+               ret += hist_entry_callchain__fprintf(he, total_period, 0, fp);
 
        return ret;
 }
@@ -452,7 +453,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
 
        init_rem_hits();
 
-       perf_hpp__for_each_format(fmt)
+       hists__for_each_format(hists, fmt)
                perf_hpp__reset_width(fmt, hists);
 
        if (symbol_conf.col_width_list_str)
@@ -463,7 +464,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
 
        fprintf(fp, "# ");
 
-       perf_hpp__for_each_format(fmt) {
+       hists__for_each_format(hists, fmt) {
                if (perf_hpp__should_skip(fmt, hists))
                        continue;
 
@@ -487,7 +488,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
 
        fprintf(fp, "# ");
 
-       perf_hpp__for_each_format(fmt) {
+       hists__for_each_format(hists, fmt) {
                unsigned int i;
 
                if (perf_hpp__should_skip(fmt, hists))
index 5eec53a3f4ac7dbedb54776f746705acfa4fa2b4..a34752d284880459a13d6b29290270efd530a9fd 100644 (file)
@@ -105,8 +105,14 @@ libperf-y += scripting-engines/
 
 libperf-$(CONFIG_ZLIB) += zlib.o
 libperf-$(CONFIG_LZMA) += lzma.o
+libperf-y += demangle-java.o
+libperf-$(CONFIG_LIBELF) += jitdump.o
+libperf-$(CONFIG_LIBELF) += genelf.o
+libperf-$(CONFIG_LIBELF) += genelf_debug.o
 
 CFLAGS_config.o   += -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))"
+# avoid compiler warnings in 32-bit mode
+CFLAGS_genelf_debug.o  += -Wno-packed
 
 $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
        $(call rule_mkdir)
index 360fda01f3b0d17369254d03fe16daf24f30c698..ec164fe70718df1480b02733d8701c7ab2b74297 100644 (file)
@@ -478,10 +478,11 @@ void auxtrace_heap__pop(struct auxtrace_heap *heap)
                         heap_array[last].ordinal);
 }
 
-size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr)
+size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
+                                      struct perf_evlist *evlist)
 {
        if (itr)
-               return itr->info_priv_size(itr);
+               return itr->info_priv_size(itr, evlist);
        return 0;
 }
 
@@ -852,7 +853,7 @@ int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
        int err;
 
        pr_debug2("Synthesizing auxtrace information\n");
-       priv_size = auxtrace_record__info_priv_size(itr);
+       priv_size = auxtrace_record__info_priv_size(itr, session->evlist);
        ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
        if (!ev)
                return -ENOMEM;
index b86f90db1352a6c8635e3ea5d02aa3c21bccc323..e5a8e2d4f2af4e9b717be7ce27c587f569983027 100644 (file)
@@ -293,7 +293,8 @@ struct auxtrace_record {
        int (*recording_options)(struct auxtrace_record *itr,
                                 struct perf_evlist *evlist,
                                 struct record_opts *opts);
-       size_t (*info_priv_size)(struct auxtrace_record *itr);
+       size_t (*info_priv_size)(struct auxtrace_record *itr,
+                                struct perf_evlist *evlist);
        int (*info_fill)(struct auxtrace_record *itr,
                         struct perf_session *session,
                         struct auxtrace_info_event *auxtrace_info,
@@ -429,7 +430,8 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
 int auxtrace_record__options(struct auxtrace_record *itr,
                             struct perf_evlist *evlist,
                             struct record_opts *opts);
-size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr);
+size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr,
+                                      struct perf_evlist *evlist);
 int auxtrace_record__info_fill(struct auxtrace_record *itr,
                               struct perf_session *session,
                               struct auxtrace_info_event *auxtrace_info,
index 6a7e273a514a642b30a477c3119696dc7fa09975..b28100ee1732a5b4fd8c5ee8db327fb54f5b167b 100644 (file)
@@ -211,6 +211,7 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
        dsos__for_each_with_build_id(pos, &machine->dsos.head) {
                const char *name;
                size_t name_len;
+               bool in_kernel = false;
 
                if (!pos->hit)
                        continue;
@@ -227,8 +228,11 @@ static int machine__write_buildid_table(struct machine *machine, int fd)
                        name_len = pos->long_name_len + 1;
                }
 
+               in_kernel = pos->kernel ||
+                               is_kernel_module(name,
+                                       PERF_RECORD_MISC_CPUMODE_UNKNOWN);
                err = write_buildid(name, name_len, pos->build_id, machine->pid,
-                                   pos->kernel ? kmisc : umisc, fd);
+                                   in_kernel ? kmisc : umisc, fd);
                if (err)
                        break;
        }
index fa935093a599429011fa214c24645fd0230e3b3a..9bcf2bed3a6d1b7369ee4deee7f38e9c4abab06d 100644 (file)
@@ -8,6 +8,10 @@
 #include <linux/bitmap.h>
 #include "asm/bug.h"
 
+static int max_cpu_num;
+static int max_node_num;
+static int *cpunode_map;
+
 static struct cpu_map *cpu_map__default_new(void)
 {
        struct cpu_map *cpus;
@@ -486,6 +490,32 @@ out:
                pr_err("Failed to read max nodes, using default of %d\n", max_node_num);
 }
 
+int cpu__max_node(void)
+{
+       if (unlikely(!max_node_num))
+               set_max_node_num();
+
+       return max_node_num;
+}
+
+int cpu__max_cpu(void)
+{
+       if (unlikely(!max_cpu_num))
+               set_max_cpu_num();
+
+       return max_cpu_num;
+}
+
+int cpu__get_node(int cpu)
+{
+       if (unlikely(cpunode_map == NULL)) {
+               pr_debug("cpu_map not initialized\n");
+               return -1;
+       }
+
+       return cpunode_map[cpu];
+}
+
 static int init_cpunode_map(void)
 {
        int i;
index 71c41b9efabb3b38dd17a7374413985dfd944f77..81a2562aaa2b02261b88c960997238dc9e0925ab 100644 (file)
@@ -57,37 +57,11 @@ static inline bool cpu_map__empty(const struct cpu_map *map)
        return map ? map->map[0] == -1 : true;
 }
 
-int max_cpu_num;
-int max_node_num;
-int *cpunode_map;
-
 int cpu__setup_cpunode_map(void);
 
-static inline int cpu__max_node(void)
-{
-       if (unlikely(!max_node_num))
-               pr_debug("cpu_map not initialized\n");
-
-       return max_node_num;
-}
-
-static inline int cpu__max_cpu(void)
-{
-       if (unlikely(!max_cpu_num))
-               pr_debug("cpu_map not initialized\n");
-
-       return max_cpu_num;
-}
-
-static inline int cpu__get_node(int cpu)
-{
-       if (unlikely(cpunode_map == NULL)) {
-               pr_debug("cpu_map not initialized\n");
-               return -1;
-       }
-
-       return cpunode_map[cpu];
-}
+int cpu__max_node(void);
+int cpu__max_cpu(void);
+int cpu__get_node(int cpu);
 
 int cpu_map__build_map(struct cpu_map *cpus, struct cpu_map **res,
                       int (*f)(struct cpu_map *map, int cpu, void *data),
diff --git a/tools/perf/util/demangle-java.c b/tools/perf/util/demangle-java.c
new file mode 100644 (file)
index 0000000..3e6062a
--- /dev/null
@@ -0,0 +1,199 @@
+#include <sys/types.h>
+#include <stdio.h>
+#include <string.h>
+#include "util.h"
+#include "debug.h"
+#include "symbol.h"
+
+#include "demangle-java.h"
+
+enum {
+       MODE_PREFIX = 0,
+       MODE_CLASS  = 1,
+       MODE_FUNC   = 2,
+       MODE_TYPE   = 3,
+       MODE_CTYPE  = 3, /* class arg */
+};
+
+#define BASE_ENT(c, n) [c - 'A']=n
+static const char *base_types['Z' - 'A' + 1] = {
+       BASE_ENT('B', "byte" ),
+       BASE_ENT('C', "char" ),
+       BASE_ENT('D', "double" ),
+       BASE_ENT('F', "float" ),
+       BASE_ENT('I', "int" ),
+       BASE_ENT('J', "long" ),
+       BASE_ENT('S', "short" ),
+       BASE_ENT('Z', "bool" ),
+};
+
+/*
+ * demangle Java symbol between str and end positions and stores
+ * up to maxlen characters into buf. The parser starts in mode.
+ *
+ * Use MODE_PREFIX to process entire prototype till end position
+ * Use MODE_TYPE to process return type if str starts on return type char
+ *
+ *  Return:
+ *     success: buf
+ *     error  : NULL
+ */
+static char *
+__demangle_java_sym(const char *str, const char *end, char *buf, int maxlen, int mode)
+{
+       int rlen = 0;
+       int array = 0;
+       int narg = 0;
+       const char *q;
+
+       if (!end)
+               end = str + strlen(str);
+
+       for (q = str; q != end; q++) {
+
+               if (rlen == (maxlen - 1))
+                       break;
+
+               switch (*q) {
+               case 'L':
+                       if (mode == MODE_PREFIX || mode == MODE_CTYPE) {
+                               if (mode == MODE_CTYPE) {
+                                       if (narg)
+                                               rlen += scnprintf(buf + rlen, maxlen - rlen, ", ");
+                                       narg++;
+                               }
+                               rlen += scnprintf(buf + rlen, maxlen - rlen, "class ");
+                               if (mode == MODE_PREFIX)
+                                       mode = MODE_CLASS;
+                       } else
+                               buf[rlen++] = *q;
+                       break;
+               case 'B':
+               case 'C':
+               case 'D':
+               case 'F':
+               case 'I':
+               case 'J':
+               case 'S':
+               case 'Z':
+                       if (mode == MODE_TYPE) {
+                               if (narg)
+                                       rlen += scnprintf(buf + rlen, maxlen - rlen, ", ");
+                               rlen += scnprintf(buf + rlen, maxlen - rlen, "%s", base_types[*q - 'A']);
+                               while (array--)
+                                       rlen += scnprintf(buf + rlen, maxlen - rlen, "[]");
+                               array = 0;
+                               narg++;
+                       } else
+                               buf[rlen++] = *q;
+                       break;
+               case 'V':
+                       if (mode == MODE_TYPE) {
+                               rlen += scnprintf(buf + rlen, maxlen - rlen, "void");
+                               while (array--)
+                                       rlen += scnprintf(buf + rlen, maxlen - rlen, "[]");
+                               array = 0;
+                       } else
+                               buf[rlen++] = *q;
+                       break;
+               case '[':
+                       if (mode != MODE_TYPE)
+                               goto error;
+                       array++;
+                       break;
+               case '(':
+                       if (mode != MODE_FUNC)
+                               goto error;
+                       buf[rlen++] = *q;
+                       mode = MODE_TYPE;
+                       break;
+               case ')':
+                       if (mode != MODE_TYPE)
+                               goto error;
+                       buf[rlen++] = *q;
+                       narg = 0;
+                       break;
+               case ';':
+                       if (mode != MODE_CLASS && mode != MODE_CTYPE)
+                               goto error;
+                       /* safe because at least one other char to process */
+                       if (isalpha(*(q + 1)))
+                               rlen += scnprintf(buf + rlen, maxlen - rlen, ".");
+                       if (mode == MODE_CLASS)
+                               mode = MODE_FUNC;
+                       else if (mode == MODE_CTYPE)
+                               mode = MODE_TYPE;
+                       break;
+               case '/':
+                       if (mode != MODE_CLASS && mode != MODE_CTYPE)
+                               goto error;
+                       rlen += scnprintf(buf + rlen, maxlen - rlen, ".");
+                       break;
+               default :
+                       buf[rlen++] = *q;
+               }
+       }
+       buf[rlen] = '\0';
+       return buf;
+error:
+       return NULL;
+}
+
+/*
+ * Demangle Java function signature (openJDK, not GCJ)
+ * input:
+ *     str: string to parse. String is not modified
+ *    flags: comobination of JAVA_DEMANGLE_* flags to modify demangling
+ * return:
+ *     if input can be demangled, then a newly allocated string is returned.
+ *     if input cannot be demangled, then NULL is returned
+ *
+ * Note: caller is responsible for freeing demangled string
+ */
+char *
+java_demangle_sym(const char *str, int flags)
+{
+       char *buf, *ptr;
+       char *p;
+       size_t len, l1 = 0;
+
+       if (!str)
+               return NULL;
+
+       /* find start of retunr type */
+       p = strrchr(str, ')');
+       if (!p)
+               return NULL;
+
+       /*
+        * expansion factor estimated to 3x
+        */
+       len = strlen(str) * 3 + 1;
+       buf = malloc(len);
+       if (!buf)
+               return NULL;
+
+       buf[0] = '\0';
+       if (!(flags & JAVA_DEMANGLE_NORET)) {
+               /*
+                * get return type first
+                */
+               ptr = __demangle_java_sym(p + 1, NULL, buf, len, MODE_TYPE);
+               if (!ptr)
+                       goto error;
+
+               /* add space between return type and function prototype */
+               l1 = strlen(buf);
+               buf[l1++] = ' ';
+       }
+
+       /* process function up to return type */
+       ptr = __demangle_java_sym(str, p + 1, buf + l1, len - l1, MODE_PREFIX);
+       if (!ptr)
+               goto error;
+
+       return buf;
+error:
+       free(buf);
+       return NULL;
+}
diff --git a/tools/perf/util/demangle-java.h b/tools/perf/util/demangle-java.h
new file mode 100644 (file)
index 0000000..a981c1f
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef __PERF_DEMANGLE_JAVA
+#define __PERF_DEMANGLE_JAVA 1
+/*
+ * demangle function flags
+ */
+#define JAVA_DEMANGLE_NORET    0x1 /* do not process return type */
+
+char * java_demangle_sym(const char *str, int flags);
+
+#endif /* __PERF_DEMANGLE_JAVA */
index e8e9a9dbf5e395a20d589c80b253c2d869b927c1..8e6395439ca0830cefaaa5b6dbe905ae2af93011 100644 (file)
@@ -52,6 +52,11 @@ int dso__read_binary_type_filename(const struct dso *dso,
                        debuglink--;
                if (*debuglink == '/')
                        debuglink++;
+
+               ret = -1;
+               if (!is_regular_file(filename))
+                       break;
+
                ret = filename__read_debuglink(filename, debuglink,
                                               size - (debuglink - filename));
                }
index 85155e91b61ba9b70feaf867b109270df498b9b8..7bad5c3fa7b7175862f5e3bdf38ffca0e1b14ee1 100644 (file)
@@ -282,7 +282,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                strcpy(execname, "");
 
                /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
-               n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
+               n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
                       &event->mmap2.start, &event->mmap2.len, prot,
                       &event->mmap2.pgoff, &event->mmap2.maj,
                       &event->mmap2.min,
index cdbaf9b51e428ad4537a38ded24ee07bec54e90c..467808680ee41f641892dc9e4f08bb864a1118e0 100644 (file)
@@ -2362,12 +2362,15 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
        case EPERM:
        case EACCES:
                return scnprintf(msg, size,
-                "You may not have permission to collect %sstats.\n"
-                "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
-                " -1 - Not paranoid at all\n"
-                "  0 - Disallow raw tracepoint access for unpriv\n"
-                "  1 - Disallow cpu events for unpriv\n"
-                "  2 - Disallow kernel profiling for unpriv",
+                "You may not have permission to collect %sstats.\n\n"
+                "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
+                "which controls use of the performance events system by\n"
+                "unprivileged users (without CAP_SYS_ADMIN).\n\n"
+                "The default value is 1:\n\n"
+                "  -1: Allow use of (almost) all events by all users\n"
+                ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
+                ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
+                ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
                                 target->system_wide ? "system-wide " : "");
        case ENOENT:
                return scnprintf(msg, size, "The %s event is not supported.",
diff --git a/tools/perf/util/genelf.c b/tools/perf/util/genelf.c
new file mode 100644 (file)
index 0000000..c1ef805
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+ * genelf.c
+ * Copyright (C) 2014, Google, Inc
+ *
+ * Contributed by:
+ *     Stephane Eranian <eranian@gmail.com>
+ *
+ * Released under the GPL v2. (and only v2, not any later version)
+ */
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <stddef.h>
+#include <libelf.h>
+#include <string.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <err.h>
+#include <dwarf.h>
+
+#include "perf.h"
+#include "genelf.h"
+#include "../util/jitdump.h"
+
+#define JVMTI
+
+#define BUILD_ID_URANDOM /* different uuid for each run */
+
+#ifdef HAVE_LIBCRYPTO
+
+#define BUILD_ID_MD5
+#undef BUILD_ID_SHA    /* does not seem to work well when linked with Java */
+#undef BUILD_ID_URANDOM /* different uuid for each run */
+
+#ifdef BUILD_ID_SHA
+#include <openssl/sha.h>
+#endif
+
+#ifdef BUILD_ID_MD5
+#include <openssl/md5.h>
+#endif
+#endif
+
+
+typedef struct {
+  unsigned int namesz;  /* Size of entry's owner string */
+  unsigned int descsz;  /* Size of the note descriptor */
+  unsigned int type;    /* Interpretation of the descriptor */
+  char         name[0]; /* Start of the name+desc data */
+} Elf_Note;
+
+struct options {
+       char *output;
+       int fd;
+};
+
+static char shd_string_table[] = {
+       0,
+       '.', 't', 'e', 'x', 't', 0,                     /*  1 */
+       '.', 's', 'h', 's', 't', 'r', 't', 'a', 'b', 0, /*  7 */
+       '.', 's', 'y', 'm', 't', 'a', 'b', 0,           /* 17 */
+       '.', 's', 't', 'r', 't', 'a', 'b', 0,           /* 25 */
+       '.', 'n', 'o', 't', 'e', '.', 'g', 'n', 'u', '.', 'b', 'u', 'i', 'l', 'd', '-', 'i', 'd', 0, /* 33 */
+       '.', 'd', 'e', 'b', 'u', 'g', '_', 'l', 'i', 'n', 'e', 0, /* 52 */
+       '.', 'd', 'e', 'b', 'u', 'g', '_', 'i', 'n', 'f', 'o', 0, /* 64 */
+       '.', 'd', 'e', 'b', 'u', 'g', '_', 'a', 'b', 'b', 'r', 'e', 'v', 0, /* 76 */
+};
+
+static struct buildid_note {
+       Elf_Note desc;          /* descsz: size of build-id, must be multiple of 4 */
+       char     name[4];       /* GNU\0 */
+       char     build_id[20];
+} bnote;
+
+static Elf_Sym symtab[]={
+       /* symbol 0 MUST be the undefined symbol */
+       { .st_name  = 0, /* index in sym_string table */
+         .st_info  = ELF_ST_TYPE(STT_NOTYPE),
+         .st_shndx = 0, /* for now */
+         .st_value = 0x0,
+         .st_other = ELF_ST_VIS(STV_DEFAULT),
+         .st_size  = 0,
+       },
+       { .st_name  = 1, /* index in sym_string table */
+         .st_info  = ELF_ST_BIND(STB_LOCAL) | ELF_ST_TYPE(STT_FUNC),
+         .st_shndx = 1,
+         .st_value = 0, /* for now */
+         .st_other = ELF_ST_VIS(STV_DEFAULT),
+         .st_size  = 0, /* for now */
+       }
+};
+
+#ifdef BUILD_ID_URANDOM
+static void
+gen_build_id(struct buildid_note *note,
+            unsigned long load_addr __maybe_unused,
+            const void *code __maybe_unused,
+            size_t csize __maybe_unused)
+{
+       int fd;
+       size_t sz = sizeof(note->build_id);
+       ssize_t sret;
+
+       fd = open("/dev/urandom", O_RDONLY);
+       if (fd == -1)
+               err(1, "cannot access /dev/urandom for builid");
+
+       sret = read(fd, note->build_id, sz);
+
+       close(fd);
+
+       if (sret != (ssize_t)sz)
+               memset(note->build_id, 0, sz);
+}
+#endif
+
+#ifdef BUILD_ID_SHA
+static void
+gen_build_id(struct buildid_note *note,
+            unsigned long load_addr __maybe_unused,
+            const void *code,
+            size_t csize)
+{
+       if (sizeof(note->build_id) < SHA_DIGEST_LENGTH)
+               errx(1, "build_id too small for SHA1");
+
+       SHA1(code, csize, (unsigned char *)note->build_id);
+}
+#endif
+
+#ifdef BUILD_ID_MD5
+static void
+gen_build_id(struct buildid_note *note, unsigned long load_addr, const void *code, size_t csize)
+{
+       MD5_CTX context;
+
+       if (sizeof(note->build_id) < 16)
+               errx(1, "build_id too small for MD5");
+
+       MD5_Init(&context);
+       MD5_Update(&context, &load_addr, sizeof(load_addr));
+       MD5_Update(&context, code, csize);
+       MD5_Final((unsigned char *)note->build_id, &context);
+}
+#endif
+
+/*
+ * fd: file descriptor open for writing for the output file
+ * load_addr: code load address (could be zero, just used for buildid)
+ * sym: function name (for native code - used as the symbol)
+ * code: the native code
+ * csize: the code size in bytes
+ */
+int
+jit_write_elf(int fd, uint64_t load_addr, const char *sym,
+             const void *code, int csize,
+             void *debug, int nr_debug_entries)
+{
+       Elf *e;
+       Elf_Data *d;
+       Elf_Scn *scn;
+       Elf_Ehdr *ehdr;
+       Elf_Shdr *shdr;
+       char *strsym = NULL;
+       int symlen;
+       int retval = -1;
+
+       if (elf_version(EV_CURRENT) == EV_NONE) {
+               warnx("ELF initialization failed");
+               return -1;
+       }
+
+       e = elf_begin(fd, ELF_C_WRITE, NULL);
+       if (!e) {
+               warnx("elf_begin failed");
+               goto error;
+       }
+
+       /*
+        * setup ELF header
+        */
+       ehdr = elf_newehdr(e);
+       if (!ehdr) {
+               warnx("cannot get ehdr");
+               goto error;
+       }
+
+       ehdr->e_ident[EI_DATA] = GEN_ELF_ENDIAN;
+       ehdr->e_ident[EI_CLASS] = GEN_ELF_CLASS;
+       ehdr->e_machine = GEN_ELF_ARCH;
+       ehdr->e_type = ET_DYN;
+       ehdr->e_entry = GEN_ELF_TEXT_OFFSET;
+       ehdr->e_version = EV_CURRENT;
+       ehdr->e_shstrndx= 2; /* shdr index for section name */
+
+       /*
+        * setup text section
+        */
+       scn = elf_newscn(e);
+       if (!scn) {
+               warnx("cannot create section");
+               goto error;
+       }
+
+       d = elf_newdata(scn);
+       if (!d) {
+               warnx("cannot get new data");
+               goto error;
+       }
+
+       d->d_align = 16;
+       d->d_off = 0LL;
+       d->d_buf = (void *)code;
+       d->d_type = ELF_T_BYTE;
+       d->d_size = csize;
+       d->d_version = EV_CURRENT;
+
+       shdr = elf_getshdr(scn);
+       if (!shdr) {
+               warnx("cannot get section header");
+               goto error;
+       }
+
+       shdr->sh_name = 1;
+       shdr->sh_type = SHT_PROGBITS;
+       shdr->sh_addr = GEN_ELF_TEXT_OFFSET;
+       shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+       shdr->sh_entsize = 0;
+
+       /*
+        * setup section headers string table
+        */
+       scn = elf_newscn(e);
+       if (!scn) {
+               warnx("cannot create section");
+               goto error;
+       }
+
+       d = elf_newdata(scn);
+       if (!d) {
+               warnx("cannot get new data");
+               goto error;
+       }
+
+       d->d_align = 1;
+       d->d_off = 0LL;
+       d->d_buf = shd_string_table;
+       d->d_type = ELF_T_BYTE;
+       d->d_size = sizeof(shd_string_table);
+       d->d_version = EV_CURRENT;
+
+       shdr = elf_getshdr(scn);
+       if (!shdr) {
+               warnx("cannot get section header");
+               goto error;
+       }
+
+       shdr->sh_name = 7; /* offset of '.shstrtab' in shd_string_table */
+       shdr->sh_type = SHT_STRTAB;
+       shdr->sh_flags = 0;
+       shdr->sh_entsize = 0;
+
+       /*
+        * setup symtab section
+        */
+       symtab[1].st_size  = csize;
+       symtab[1].st_value = GEN_ELF_TEXT_OFFSET;
+
+       scn = elf_newscn(e);
+       if (!scn) {
+               warnx("cannot create section");
+               goto error;
+       }
+
+       d = elf_newdata(scn);
+       if (!d) {
+               warnx("cannot get new data");
+               goto error;
+       }
+
+       d->d_align = 8;
+       d->d_off = 0LL;
+       d->d_buf = symtab;
+       d->d_type = ELF_T_SYM;
+       d->d_size = sizeof(symtab);
+       d->d_version = EV_CURRENT;
+
+       shdr = elf_getshdr(scn);
+       if (!shdr) {
+               warnx("cannot get section header");
+               goto error;
+       }
+
+       shdr->sh_name = 17; /* offset of '.symtab' in shd_string_table */
+       shdr->sh_type = SHT_SYMTAB;
+       shdr->sh_flags = 0;
+       shdr->sh_entsize = sizeof(Elf_Sym);
+       shdr->sh_link = 4; /* index of .strtab section */
+
+       /*
+        * setup symbols string table
+        * 2 = 1 for 0 in 1st entry, 1 for the 0 at end of symbol for 2nd entry
+        */
+       symlen = 2 + strlen(sym);
+       strsym = calloc(1, symlen);
+       if (!strsym) {
+               warnx("cannot allocate strsym");
+               goto error;
+       }
+       strcpy(strsym + 1, sym);
+
+       scn = elf_newscn(e);
+       if (!scn) {
+               warnx("cannot create section");
+               goto error;
+       }
+
+       d = elf_newdata(scn);
+       if (!d) {
+               warnx("cannot get new data");
+               goto error;
+       }
+
+       d->d_align = 1;
+       d->d_off = 0LL;
+       d->d_buf = strsym;
+       d->d_type = ELF_T_BYTE;
+       d->d_size = symlen;
+       d->d_version = EV_CURRENT;
+
+       shdr = elf_getshdr(scn);
+       if (!shdr) {
+               warnx("cannot get section header");
+               goto error;
+       }
+
+       shdr->sh_name = 25; /* offset in shd_string_table */
+       shdr->sh_type = SHT_STRTAB;
+       shdr->sh_flags = 0;
+       shdr->sh_entsize = 0;
+
+       /*
+        * setup build-id section
+        */
+       scn = elf_newscn(e);
+       if (!scn) {
+               warnx("cannot create section");
+               goto error;
+       }
+
+       d = elf_newdata(scn);
+       if (!d) {
+               warnx("cannot get new data");
+               goto error;
+       }
+
+       /*
+        * build-id generation
+        */
+       gen_build_id(&bnote, load_addr, code, csize);
+       bnote.desc.namesz = sizeof(bnote.name); /* must include 0 termination */
+       bnote.desc.descsz = sizeof(bnote.build_id);
+       bnote.desc.type   = NT_GNU_BUILD_ID;
+       strcpy(bnote.name, "GNU");
+
+       d->d_align = 4;
+       d->d_off = 0LL;
+       d->d_buf = &bnote;
+       d->d_type = ELF_T_BYTE;
+       d->d_size = sizeof(bnote);
+       d->d_version = EV_CURRENT;
+
+       shdr = elf_getshdr(scn);
+       if (!shdr) {
+               warnx("cannot get section header");
+               goto error;
+       }
+
+       shdr->sh_name = 33; /* offset in shd_string_table */
+       shdr->sh_type = SHT_NOTE;
+       shdr->sh_addr = 0x0;
+       shdr->sh_flags = SHF_ALLOC;
+       shdr->sh_size = sizeof(bnote);
+       shdr->sh_entsize = 0;
+
+       if (debug && nr_debug_entries) {
+               retval = jit_add_debug_info(e, load_addr, debug, nr_debug_entries);
+               if (retval)
+                       goto error;
+       } else {
+               if (elf_update(e, ELF_C_WRITE) < 0) {
+                       warnx("elf_update 4 failed");
+                       goto error;
+               }
+       }
+
+       retval = 0;
+error:
+       (void)elf_end(e);
+
+       free(strsym);
+
+
+       return retval;
+}
+
+#ifndef JVMTI
+
+static unsigned char x86_code[] = {
+    0xBB, 0x2A, 0x00, 0x00, 0x00, /* movl $42, %ebx */
+    0xB8, 0x01, 0x00, 0x00, 0x00, /* movl $1, %eax */
+    0xCD, 0x80            /* int $0x80 */
+};
+
+static struct options options;
+
+int main(int argc, char **argv)
+{
+       int c, fd, ret;
+
+       while ((c = getopt(argc, argv, "o:h")) != -1) {
+               switch (c) {
+               case 'o':
+                       options.output = optarg;
+                       break;
+               case 'h':
+                       printf("Usage: genelf -o output_file [-h]\n");
+                       return 0;
+               default:
+                       errx(1, "unknown option");
+               }
+       }
+
+       fd = open(options.output, O_CREAT|O_TRUNC|O_RDWR, 0666);
+       if (fd == -1)
+               err(1, "cannot create file %s", options.output);
+
+       ret = jit_write_elf(fd, "main", x86_code, sizeof(x86_code));
+       close(fd);
+
+       if (ret != 0)
+               unlink(options.output);
+
+       return ret;
+}
+#endif
diff --git a/tools/perf/util/genelf.h b/tools/perf/util/genelf.h
new file mode 100644 (file)
index 0000000..45bf9c6
--- /dev/null
@@ -0,0 +1,67 @@
+#ifndef __GENELF_H__
+#define __GENELF_H__
+
+/* genelf.c */
+extern int jit_write_elf(int fd, uint64_t code_addr, const char *sym,
+                        const void *code, int csize,
+                        void *debug, int nr_debug_entries);
+/* genelf_debug.c */
+extern int jit_add_debug_info(Elf *e, uint64_t code_addr,
+                             void *debug, int nr_debug_entries);
+
+#if   defined(__arm__)
+#define GEN_ELF_ARCH   EM_ARM
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS  ELFCLASS32
+#elif defined(__aarch64__)
+#define GEN_ELF_ARCH   EM_AARCH64
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS  ELFCLASS64
+#elif defined(__x86_64__)
+#define GEN_ELF_ARCH   EM_X86_64
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS  ELFCLASS64
+#elif defined(__i386__)
+#define GEN_ELF_ARCH   EM_386
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS  ELFCLASS32
+#elif defined(__ppcle__)
+#define GEN_ELF_ARCH   EM_PPC
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS  ELFCLASS64
+#elif defined(__powerpc__)
+#define GEN_ELF_ARCH   EM_PPC64
+#define GEN_ELF_ENDIAN ELFDATA2MSB
+#define GEN_ELF_CLASS  ELFCLASS64
+#elif defined(__powerpcle__)
+#define GEN_ELF_ARCH   EM_PPC64
+#define GEN_ELF_ENDIAN ELFDATA2LSB
+#define GEN_ELF_CLASS  ELFCLASS64
+#else
+#error "unsupported architecture"
+#endif
+
+#if GEN_ELF_CLASS == ELFCLASS64
+#define elf_newehdr    elf64_newehdr
+#define elf_getshdr    elf64_getshdr
+#define Elf_Ehdr       Elf64_Ehdr
+#define Elf_Shdr       Elf64_Shdr
+#define Elf_Sym                Elf64_Sym
+#define ELF_ST_TYPE(a) ELF64_ST_TYPE(a)
+#define ELF_ST_BIND(a) ELF64_ST_BIND(a)
+#define ELF_ST_VIS(a)  ELF64_ST_VISIBILITY(a)
+#else
+#define elf_newehdr    elf32_newehdr
+#define elf_getshdr    elf32_getshdr
+#define Elf_Ehdr       Elf32_Ehdr
+#define Elf_Shdr       Elf32_Shdr
+#define Elf_Sym                Elf32_Sym
+#define ELF_ST_TYPE(a) ELF32_ST_TYPE(a)
+#define ELF_ST_BIND(a) ELF32_ST_BIND(a)
+#define ELF_ST_VIS(a)  ELF32_ST_VISIBILITY(a)
+#endif
+
+/* The .text section is directly after the ELF header */
+#define GEN_ELF_TEXT_OFFSET sizeof(Elf_Ehdr)
+
+#endif
diff --git a/tools/perf/util/genelf_debug.c b/tools/perf/util/genelf_debug.c
new file mode 100644 (file)
index 0000000..5980f7d
--- /dev/null
@@ -0,0 +1,610 @@
+/*
+ * genelf_debug.c
+ * Copyright (C) 2015, Google, Inc
+ *
+ * Contributed by:
+ *     Stephane Eranian <eranian@google.com>
+ *
+ * Released under the GPL v2.
+ *
+ * based on GPLv2 source code from Oprofile
+ * @remark Copyright 2007 OProfile authors
+ * @author Philippe Elie
+ */
+#include <sys/types.h>
+#include <stdio.h>
+#include <getopt.h>
+#include <stddef.h>
+#include <libelf.h>
+#include <string.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <err.h>
+#include <dwarf.h>
+
+#include "perf.h"
+#include "genelf.h"
+#include "../util/jitdump.h"
+
+#define BUFFER_EXT_DFL_SIZE    (4 * 1024)
+
+typedef uint32_t uword;
+typedef uint16_t uhalf;
+typedef int32_t  sword;
+typedef int16_t  shalf;
+typedef uint8_t  ubyte;
+typedef int8_t   sbyte;
+
+struct buffer_ext {
+       size_t cur_pos;
+       size_t max_sz;
+       void *data;
+};
+
+static void
+buffer_ext_dump(struct buffer_ext *be, const char *msg)
+{
+       size_t i;
+       warnx("DUMP for %s", msg);
+       for (i = 0 ; i < be->cur_pos; i++)
+               warnx("%4zu 0x%02x", i, (((char *)be->data)[i]) & 0xff);
+}
+
+static inline int
+buffer_ext_add(struct buffer_ext *be, void *addr, size_t sz)
+{
+       void *tmp;
+       size_t be_sz = be->max_sz;
+
+retry:
+       if ((be->cur_pos + sz) < be_sz) {
+               memcpy(be->data + be->cur_pos, addr, sz);
+               be->cur_pos += sz;
+               return 0;
+       }
+
+       if (!be_sz)
+               be_sz = BUFFER_EXT_DFL_SIZE;
+       else
+               be_sz <<= 1;
+
+       tmp = realloc(be->data, be_sz);
+       if (!tmp)
+               return -1;
+
+       be->data   = tmp;
+       be->max_sz = be_sz;
+
+       goto retry;
+}
+
+static void
+buffer_ext_init(struct buffer_ext *be)
+{
+       be->data = NULL;
+       be->cur_pos = 0;
+       be->max_sz = 0;
+}
+
+static inline size_t
+buffer_ext_size(struct buffer_ext *be)
+{
+       return be->cur_pos;
+}
+
+static inline void *
+buffer_ext_addr(struct buffer_ext *be)
+{
+       return be->data;
+}
+
+struct debug_line_header {
+       // Not counting this field
+       uword total_length;
+       // version number (2 currently)
+       uhalf version;
+       // relative offset from next field to
+       // program statement
+       uword prolog_length;
+       ubyte minimum_instruction_length;
+       ubyte default_is_stmt;
+       // line_base - see DWARF 2 specs
+       sbyte line_base;
+       // line_range - see DWARF 2 specs
+       ubyte line_range;
+       // number of opcode + 1
+       ubyte opcode_base;
+       /* follow the array of opcode args nr: ubytes [nr_opcode_base] */
+       /* follow the search directories index, zero terminated string
+        * terminated by an empty string.
+        */
+       /* follow an array of { filename, LEB128, LEB128, LEB128 }, first is
+        * the directory index entry, 0 means current directory, then mtime
+        * and filesize, last entry is followed by en empty string.
+        */
+       /* follow the first program statement */
+} __attribute__((packed));
+
+/* DWARF 2 spec talk only about one possible compilation unit header while
+ * binutils can handle two flavours of dwarf 2, 32 and 64 bits, this is not
+ * related to the used arch, an ELF 32 can hold more than 4 Go of debug
+ * information. For now we handle only DWARF 2 32 bits comp unit. It'll only
+ * become a problem if we generate more than 4GB of debug information.
+ */
+struct compilation_unit_header {
+       uword total_length;
+       uhalf version;
+       uword debug_abbrev_offset;
+       ubyte pointer_size;
+} __attribute__((packed));
+
+#define DW_LNS_num_opcode (DW_LNS_set_isa + 1)
+
+/* field filled at run time are marked with -1 */
+static struct debug_line_header const default_debug_line_header = {
+       .total_length = -1,
+       .version = 2,
+       .prolog_length = -1,
+       .minimum_instruction_length = 1,        /* could be better when min instruction size != 1 */
+       .default_is_stmt = 1,   /* we don't take care about basic block */
+       .line_base = -5,        /* sensible value for line base ... */
+       .line_range = -14,     /* ... and line range are guessed statically */
+       .opcode_base = DW_LNS_num_opcode
+};
+
+static ubyte standard_opcode_length[] =
+{
+       0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1
+};
+#if 0
+{
+       [DW_LNS_advance_pc]   = 1,
+       [DW_LNS_advance_line] = 1,
+       [DW_LNS_set_file] =  1,
+       [DW_LNS_set_column] = 1,
+       [DW_LNS_fixed_advance_pc] = 1,
+       [DW_LNS_set_isa] = 1,
+};
+#endif
+
+/* field filled at run time are marked with -1 */
+static struct compilation_unit_header default_comp_unit_header = {
+       .total_length = -1,
+       .version = 2,
+       .debug_abbrev_offset = 0,     /* we reuse the same abbrev entries for all comp unit */
+       .pointer_size = sizeof(void *)
+};
+
+static void emit_uword(struct buffer_ext *be, uword data)
+{
+       buffer_ext_add(be, &data, sizeof(uword));
+}
+
+static void emit_string(struct buffer_ext *be, const char *s)
+{
+       buffer_ext_add(be, (void *)s, strlen(s) + 1);
+}
+
+static void emit_unsigned_LEB128(struct buffer_ext *be,
+                                unsigned long data)
+{
+       do {
+               ubyte cur = data & 0x7F;
+               data >>= 7;
+               if (data)
+                       cur |= 0x80;
+               buffer_ext_add(be, &cur, 1);
+       } while (data);
+}
+
+static void emit_signed_LEB128(struct buffer_ext *be, long data)
+{
+       int more = 1;
+       int negative = data < 0;
+       int size = sizeof(long) * CHAR_BIT;
+       while (more) {
+               ubyte cur = data & 0x7F;
+               data >>= 7;
+               if (negative)
+                       data |= - (1 << (size - 7));
+               if ((data == 0 && !(cur & 0x40)) ||
+                   (data == -1l && (cur & 0x40)))
+                       more = 0;
+               else
+                       cur |= 0x80;
+               buffer_ext_add(be, &cur, 1);
+       }
+}
+
+static void emit_extended_opcode(struct buffer_ext *be, ubyte opcode,
+                                void *data, size_t data_len)
+{
+       buffer_ext_add(be, (char *)"", 1);
+
+       emit_unsigned_LEB128(be, data_len + 1);
+
+       buffer_ext_add(be, &opcode, 1);
+       buffer_ext_add(be, data, data_len);
+}
+
+static void emit_opcode(struct buffer_ext *be, ubyte opcode)
+{
+       buffer_ext_add(be, &opcode, 1);
+}
+
+static void emit_opcode_signed(struct buffer_ext  *be,
+                              ubyte opcode, long data)
+{
+       buffer_ext_add(be, &opcode, 1);
+       emit_signed_LEB128(be, data);
+}
+
+static void emit_opcode_unsigned(struct buffer_ext *be, ubyte opcode,
+                                unsigned long data)
+{
+       buffer_ext_add(be, &opcode, 1);
+       emit_unsigned_LEB128(be, data);
+}
+
+static void emit_advance_pc(struct buffer_ext *be, unsigned long delta_pc)
+{
+       emit_opcode_unsigned(be, DW_LNS_advance_pc, delta_pc);
+}
+
+static void emit_advance_lineno(struct buffer_ext  *be, long delta_lineno)
+{
+       emit_opcode_signed(be, DW_LNS_advance_line, delta_lineno);
+}
+
+static void emit_lne_end_of_sequence(struct buffer_ext *be)
+{
+       emit_extended_opcode(be, DW_LNE_end_sequence, NULL, 0);
+}
+
+static void emit_set_file(struct buffer_ext *be, unsigned long idx)
+{
+       emit_opcode_unsigned(be, DW_LNS_set_file, idx);
+}
+
+static void emit_lne_define_filename(struct buffer_ext *be,
+                                    const char *filename)
+{
+       buffer_ext_add(be, (void *)"", 1);
+
+       /* LNE field, strlen(filename) + zero termination, 3 bytes for: the dir entry, timestamp, filesize */
+       emit_unsigned_LEB128(be, strlen(filename) + 5);
+       emit_opcode(be, DW_LNE_define_file);
+       emit_string(be, filename);
+       /* directory index 0=do not know */
+        emit_unsigned_LEB128(be, 0);
+       /* last modification date on file 0=do not know */
+        emit_unsigned_LEB128(be, 0);
+       /* filesize 0=do not know */
+        emit_unsigned_LEB128(be, 0);
+}
+
+static void emit_lne_set_address(struct buffer_ext *be,
+                                void *address)
+{
+       emit_extended_opcode(be, DW_LNE_set_address, &address, sizeof(unsigned long));
+}
+
+static ubyte get_special_opcode(struct debug_entry *ent,
+                               unsigned int last_line,
+                               unsigned long last_vma)
+{
+       unsigned int temp;
+       unsigned long delta_addr;
+
+       /*
+        * delta from line_base
+        */
+       temp = (ent->lineno - last_line) - default_debug_line_header.line_base;
+
+       if (temp >= default_debug_line_header.line_range)
+               return 0;
+
+       /*
+        * delta of addresses
+        */
+       delta_addr = (ent->addr - last_vma) / default_debug_line_header.minimum_instruction_length;
+
+       /* This is not sufficient to ensure opcode will be in [0-256] but
+        * sufficient to ensure when summing with the delta lineno we will
+        * not overflow the unsigned long opcode */
+
+       if (delta_addr <= 256 / default_debug_line_header.line_range) {
+               unsigned long opcode = temp +
+                       (delta_addr * default_debug_line_header.line_range) +
+                       default_debug_line_header.opcode_base;
+
+               return opcode <= 255 ? opcode : 0;
+       }
+       return 0;
+}
+
+static void emit_lineno_info(struct buffer_ext *be,
+                            struct debug_entry *ent, size_t nr_entry,
+                            unsigned long code_addr)
+{
+       size_t i;
+
+       /*
+        * Machine state at start of a statement program
+        * address = 0
+        * file    = 1
+        * line    = 1
+        * column  = 0
+        * is_stmt = default_is_stmt as given in the debug_line_header
+        * basic block = 0
+        * end sequence = 0
+        */
+
+       /* start state of the state machine we take care of */
+       unsigned long last_vma = code_addr;
+       char const  *cur_filename = NULL;
+       unsigned long cur_file_idx = 0;
+       int last_line = 1;
+
+       emit_lne_set_address(be, (void *)code_addr);
+
+       for (i = 0; i < nr_entry; i++, ent = debug_entry_next(ent)) {
+               int need_copy = 0;
+               ubyte special_opcode;
+
+               /*
+                * check if filename changed, if so add it
+                */
+               if (!cur_filename || strcmp(cur_filename, ent->name)) {
+                       emit_lne_define_filename(be, ent->name);
+                       cur_filename = ent->name;
+                       emit_set_file(be, ++cur_file_idx);
+                       need_copy = 1;
+               }
+
+               special_opcode = get_special_opcode(ent, last_line, last_vma);
+               if (special_opcode != 0) {
+                       last_line = ent->lineno;
+                       last_vma  = ent->addr;
+                       emit_opcode(be, special_opcode);
+               } else {
+                       /*
+                        * lines differ, emit line delta
+                        */
+                       if (last_line != ent->lineno) {
+                               emit_advance_lineno(be, ent->lineno - last_line);
+                               last_line = ent->lineno;
+                               need_copy = 1;
+                       }
+                       /*
+                        * addresses differ, emit address delta
+                        */
+                       if (last_vma != ent->addr) {
+                               emit_advance_pc(be, ent->addr - last_vma);
+                               last_vma = ent->addr;
+                               need_copy = 1;
+                       }
+                       /*
+                        * add new row to matrix
+                        */
+                       if (need_copy)
+                               emit_opcode(be, DW_LNS_copy);
+               }
+       }
+}
+
+static void add_debug_line(struct buffer_ext *be,
+       struct debug_entry *ent, size_t nr_entry,
+       unsigned long code_addr)
+{
+       struct debug_line_header * dbg_header;
+       size_t old_size;
+
+       old_size = buffer_ext_size(be);
+
+       buffer_ext_add(be, (void *)&default_debug_line_header,
+                sizeof(default_debug_line_header));
+
+       buffer_ext_add(be, &standard_opcode_length,  sizeof(standard_opcode_length));
+
+       // empty directory entry
+       buffer_ext_add(be, (void *)"", 1);
+
+       // empty filename directory
+       buffer_ext_add(be, (void *)"", 1);
+
+       dbg_header = buffer_ext_addr(be) + old_size;
+       dbg_header->prolog_length = (buffer_ext_size(be) - old_size) -
+               offsetof(struct debug_line_header, minimum_instruction_length);
+
+       emit_lineno_info(be, ent, nr_entry, code_addr);
+
+       emit_lne_end_of_sequence(be);
+
+       dbg_header = buffer_ext_addr(be) + old_size;
+       dbg_header->total_length = (buffer_ext_size(be) - old_size) -
+               offsetof(struct debug_line_header, version);
+}
+
+static void
+add_debug_abbrev(struct buffer_ext *be)
+{
+        emit_unsigned_LEB128(be, 1);
+        emit_unsigned_LEB128(be, DW_TAG_compile_unit);
+        emit_unsigned_LEB128(be, DW_CHILDREN_yes);
+        emit_unsigned_LEB128(be, DW_AT_stmt_list);
+        emit_unsigned_LEB128(be, DW_FORM_data4);
+        emit_unsigned_LEB128(be, 0);
+        emit_unsigned_LEB128(be, 0);
+        emit_unsigned_LEB128(be, 0);
+}
+
+static void
+add_compilation_unit(struct buffer_ext *be,
+                    size_t offset_debug_line)
+{
+       struct compilation_unit_header *comp_unit_header;
+       size_t old_size = buffer_ext_size(be);
+
+       buffer_ext_add(be, &default_comp_unit_header,
+                      sizeof(default_comp_unit_header));
+
+       emit_unsigned_LEB128(be, 1);
+       emit_uword(be, offset_debug_line);
+
+       comp_unit_header = buffer_ext_addr(be) + old_size;
+       comp_unit_header->total_length = (buffer_ext_size(be) - old_size) -
+               offsetof(struct compilation_unit_header, version);
+}
+
+static int
+jit_process_debug_info(uint64_t code_addr,
+                      void *debug, int nr_debug_entries,
+                      struct buffer_ext *dl,
+                      struct buffer_ext *da,
+                      struct buffer_ext *di)
+{
+       struct debug_entry *ent = debug;
+       int i;
+
+       for (i = 0; i < nr_debug_entries; i++) {
+               ent->addr = ent->addr - code_addr;
+               ent = debug_entry_next(ent);
+       }
+       add_compilation_unit(di, buffer_ext_size(dl));
+       add_debug_line(dl, debug, nr_debug_entries, 0);
+       add_debug_abbrev(da);
+       if (0) buffer_ext_dump(da, "abbrev");
+
+       return 0;
+}
+
+int
+jit_add_debug_info(Elf *e, uint64_t code_addr, void *debug, int nr_debug_entries)
+{
+       Elf_Data *d;
+       Elf_Scn *scn;
+       Elf_Shdr *shdr;
+       struct buffer_ext dl, di, da;
+       int ret;
+
+       buffer_ext_init(&dl);
+       buffer_ext_init(&di);
+       buffer_ext_init(&da);
+
+       ret = jit_process_debug_info(code_addr, debug, nr_debug_entries, &dl, &da, &di);
+       if (ret)
+               return -1;
+       /*
+        * setup .debug_line section
+        */
+       scn = elf_newscn(e);
+       if (!scn) {
+               warnx("cannot create section");
+               return -1;
+       }
+
+       d = elf_newdata(scn);
+       if (!d) {
+               warnx("cannot get new data");
+               return -1;
+       }
+
+       d->d_align = 1;
+       d->d_off = 0LL;
+       d->d_buf = buffer_ext_addr(&dl);
+       d->d_type = ELF_T_BYTE;
+       d->d_size = buffer_ext_size(&dl);
+       d->d_version = EV_CURRENT;
+
+       shdr = elf_getshdr(scn);
+       if (!shdr) {
+               warnx("cannot get section header");
+               return -1;
+       }
+
+       shdr->sh_name = 52; /* .debug_line */
+       shdr->sh_type = SHT_PROGBITS;
+       shdr->sh_addr = 0; /* must be zero or == sh_offset -> dynamic object */
+       shdr->sh_flags = 0;
+       shdr->sh_entsize = 0;
+
+       /*
+        * setup .debug_info section
+        */
+       scn = elf_newscn(e);
+       if (!scn) {
+               warnx("cannot create section");
+               return -1;
+       }
+
+       d = elf_newdata(scn);
+       if (!d) {
+               warnx("cannot get new data");
+               return -1;
+       }
+
+       d->d_align = 1;
+       d->d_off = 0LL;
+       d->d_buf = buffer_ext_addr(&di);
+       d->d_type = ELF_T_BYTE;
+       d->d_size = buffer_ext_size(&di);
+       d->d_version = EV_CURRENT;
+
+       shdr = elf_getshdr(scn);
+       if (!shdr) {
+               warnx("cannot get section header");
+               return -1;
+       }
+
+       shdr->sh_name = 64; /* .debug_info */
+       shdr->sh_type = SHT_PROGBITS;
+       shdr->sh_addr = 0; /* must be zero or == sh_offset -> dynamic object */
+       shdr->sh_flags = 0;
+       shdr->sh_entsize = 0;
+
+       /*
+        * setup .debug_abbrev section
+        */
+       scn = elf_newscn(e);
+       if (!scn) {
+               warnx("cannot create section");
+               return -1;
+       }
+
+       d = elf_newdata(scn);
+       if (!d) {
+               warnx("cannot get new data");
+               return -1;
+       }
+
+       d->d_align = 1;
+       d->d_off = 0LL;
+       d->d_buf = buffer_ext_addr(&da);
+       d->d_type = ELF_T_BYTE;
+       d->d_size = buffer_ext_size(&da);
+       d->d_version = EV_CURRENT;
+
+       shdr = elf_getshdr(scn);
+       if (!shdr) {
+               warnx("cannot get section header");
+               return -1;
+       }
+
+       shdr->sh_name = 76; /* .debug_info */
+       shdr->sh_type = SHT_PROGBITS;
+       shdr->sh_addr = 0; /* must be zero or == sh_offset -> dynamic object */
+       shdr->sh_flags = 0;
+       shdr->sh_entsize = 0;
+
+       /*
+        * now we update the ELF image with all the sections
+        */
+       if (elf_update(e, ELF_C_WRITE) < 0) {
+               warnx("elf_update debug failed");
+               return -1;
+       }
+       return 0;
+}
index c226303e3da045743fe676c7c2ffc0ff13eac674..12f2d794dc28d6bb812a90d85df60f32eb0d7523 100644 (file)
@@ -131,6 +131,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
                        symlen = unresolved_col_width + 4 + 2;
                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
                                           symlen);
+                       hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
+                                          symlen);
                }
 
                if (h->mem_info->iaddr.sym) {
@@ -430,8 +432,12 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
                cmp = hist_entry__cmp(he, entry);
 
                if (!cmp) {
-                       if (sample_self)
+                       if (sample_self) {
                                he_stat__add_period(&he->stat, period, weight);
+                               hists->stats.total_period += period;
+                               if (!he->filtered)
+                                       hists->stats.total_non_filtered_period += period;
+                       }
                        if (symbol_conf.cumulate_callchain)
                                he_stat__add_period(he->stat_acc, period, weight);
 
@@ -464,7 +470,10 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
        if (!he)
                return NULL;
 
-       hists->nr_entries++;
+       if (sample_self)
+               hists__inc_stats(hists, he);
+       else
+               hists->nr_entries++;
 
        rb_link_node(&he->rb_node_in, parent, p);
        rb_insert_color(&he->rb_node_in, hists->entries_in);
@@ -949,10 +958,11 @@ out:
 int64_t
 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
 {
+       struct hists *hists = left->hists;
        struct perf_hpp_fmt *fmt;
        int64_t cmp = 0;
 
-       perf_hpp__for_each_sort_list(fmt) {
+       hists__for_each_sort_list(hists, fmt) {
                cmp = fmt->cmp(fmt, left, right);
                if (cmp)
                        break;
@@ -964,10 +974,11 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
 int64_t
 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
 {
+       struct hists *hists = left->hists;
        struct perf_hpp_fmt *fmt;
        int64_t cmp = 0;
 
-       perf_hpp__for_each_sort_list(fmt) {
+       hists__for_each_sort_list(hists, fmt) {
                cmp = fmt->collapse(fmt, left, right);
                if (cmp)
                        break;
@@ -1108,10 +1119,11 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
 
 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
 {
+       struct hists *hists = a->hists;
        struct perf_hpp_fmt *fmt;
        int64_t cmp = 0;
 
-       perf_hpp__for_each_sort_list(fmt) {
+       hists__for_each_sort_list(hists, fmt) {
                if (perf_hpp__should_skip(fmt, a->hists))
                        continue;
 
@@ -1161,9 +1173,18 @@ static void __hists__insert_output_entry(struct rb_root *entries,
        struct rb_node *parent = NULL;
        struct hist_entry *iter;
 
-       if (use_callchain)
+       if (use_callchain) {
+               if (callchain_param.mode == CHAIN_GRAPH_REL) {
+                       u64 total = he->stat.period;
+
+                       if (symbol_conf.cumulate_callchain)
+                               total = he->stat_acc->period;
+
+                       min_callchain_hits = total * (callchain_param.min_percent / 100);
+               }
                callchain_param.sort(&he->sorted_chain, he->callchain,
                                      min_callchain_hits, &callchain_param);
+       }
 
        while (*p != NULL) {
                parent = *p;
@@ -1179,21 +1200,15 @@ static void __hists__insert_output_entry(struct rb_root *entries,
        rb_insert_color(&he->rb_node, entries);
 }
 
-void hists__output_resort(struct hists *hists, struct ui_progress *prog)
+static void output_resort(struct hists *hists, struct ui_progress *prog,
+                         bool use_callchain)
 {
        struct rb_root *root;
        struct rb_node *next;
        struct hist_entry *n;
        u64 min_callchain_hits;
-       struct perf_evsel *evsel = hists_to_evsel(hists);
-       bool use_callchain;
 
-       if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
-               use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
-       else
-               use_callchain = symbol_conf.use_callchain;
-
-       min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
+       min_callchain_hits = hists__total_period(hists) * (callchain_param.min_percent / 100);
 
        if (sort__need_collapse)
                root = &hists->entries_collapsed;
@@ -1221,6 +1236,23 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog)
        }
 }
 
+void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+{
+       bool use_callchain;
+
+       if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
+               use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
+       else
+               use_callchain = symbol_conf.use_callchain;
+
+       output_resort(evsel__hists(evsel), prog, use_callchain);
+}
+
+void hists__output_resort(struct hists *hists, struct ui_progress *prog)
+{
+       output_resort(hists, prog, symbol_conf.use_callchain);
+}
+
 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
                                       enum hist_filter filter)
 {
@@ -1252,28 +1284,6 @@ static bool hists__filter_entry_by_dso(struct hists *hists,
        return false;
 }
 
-void hists__filter_by_dso(struct hists *hists)
-{
-       struct rb_node *nd;
-
-       hists->stats.nr_non_filtered_samples = 0;
-
-       hists__reset_filter_stats(hists);
-       hists__reset_col_len(hists);
-
-       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
-               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-
-               if (symbol_conf.exclude_other && !h->parent)
-                       continue;
-
-               if (hists__filter_entry_by_dso(hists, h))
-                       continue;
-
-               hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
-       }
-}
-
 static bool hists__filter_entry_by_thread(struct hists *hists,
                                          struct hist_entry *he)
 {
@@ -1286,25 +1296,6 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
        return false;
 }
 
-void hists__filter_by_thread(struct hists *hists)
-{
-       struct rb_node *nd;
-
-       hists->stats.nr_non_filtered_samples = 0;
-
-       hists__reset_filter_stats(hists);
-       hists__reset_col_len(hists);
-
-       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
-               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-
-               if (hists__filter_entry_by_thread(hists, h))
-                       continue;
-
-               hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
-       }
-}
-
 static bool hists__filter_entry_by_symbol(struct hists *hists,
                                          struct hist_entry *he)
 {
@@ -1318,25 +1309,6 @@ static bool hists__filter_entry_by_symbol(struct hists *hists,
        return false;
 }
 
-void hists__filter_by_symbol(struct hists *hists)
-{
-       struct rb_node *nd;
-
-       hists->stats.nr_non_filtered_samples = 0;
-
-       hists__reset_filter_stats(hists);
-       hists__reset_col_len(hists);
-
-       for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
-               struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-
-               if (hists__filter_entry_by_symbol(hists, h))
-                       continue;
-
-               hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
-       }
-}
-
 static bool hists__filter_entry_by_socket(struct hists *hists,
                                          struct hist_entry *he)
 {
@@ -1349,7 +1321,9 @@ static bool hists__filter_entry_by_socket(struct hists *hists,
        return false;
 }
 
-void hists__filter_by_socket(struct hists *hists)
+typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
+
+static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
 {
        struct rb_node *nd;
 
@@ -1361,13 +1335,37 @@ void hists__filter_by_socket(struct hists *hists)
        for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
                struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
 
-               if (hists__filter_entry_by_socket(hists, h))
+               if (filter(hists, h))
                        continue;
 
-               hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
+               hists__remove_entry_filter(hists, h, type);
        }
 }
 
+void hists__filter_by_thread(struct hists *hists)
+{
+       hists__filter_by_type(hists, HIST_FILTER__THREAD,
+                             hists__filter_entry_by_thread);
+}
+
+void hists__filter_by_dso(struct hists *hists)
+{
+       hists__filter_by_type(hists, HIST_FILTER__DSO,
+                             hists__filter_entry_by_dso);
+}
+
+void hists__filter_by_symbol(struct hists *hists)
+{
+       hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
+                             hists__filter_entry_by_symbol);
+}
+
+void hists__filter_by_socket(struct hists *hists)
+{
+       hists__filter_by_type(hists, HIST_FILTER__SOCKET,
+                             hists__filter_entry_by_socket);
+}
+
 void events_stats__inc(struct events_stats *stats, u32 type)
 {
        ++stats->nr_events[0];
@@ -1583,7 +1581,7 @@ int perf_hist_config(const char *var, const char *value)
        return 0;
 }
 
-int __hists__init(struct hists *hists)
+int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
 {
        memset(hists, 0, sizeof(*hists));
        hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
@@ -1592,6 +1590,7 @@ int __hists__init(struct hists *hists)
        hists->entries = RB_ROOT;
        pthread_mutex_init(&hists->lock, NULL);
        hists->socket_filter = -1;
+       hists->hpp_list = hpp_list;
        return 0;
 }
 
@@ -1628,7 +1627,7 @@ static int hists_evsel__init(struct perf_evsel *evsel)
 {
        struct hists *hists = evsel__hists(evsel);
 
-       __hists__init(hists);
+       __hists__init(hists, &perf_hpp_list);
        return 0;
 }
 
@@ -1647,3 +1646,9 @@ int hists__init(void)
 
        return err;
 }
+
+void perf_hpp_list__init(struct perf_hpp_list *list)
+{
+       INIT_LIST_HEAD(&list->fields);
+       INIT_LIST_HEAD(&list->sorts);
+}
index d4ec4822a1038611a7269aa0df2eb37171a647a6..1c7544a8fe1ab9eacbb4c3c2590919c897e027db 100644 (file)
@@ -75,6 +75,7 @@ struct hists {
        u64                     event_stream;
        u16                     col_len[HISTC_NR_COLS];
        int                     socket_filter;
+       struct perf_hpp_list    *hpp_list;
 };
 
 struct hist_entry_iter;
@@ -128,6 +129,7 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
                              struct hists *hists);
 void hist_entry__delete(struct hist_entry *he);
 
+void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog);
 void hists__output_resort(struct hists *hists, struct ui_progress *prog);
 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
 
@@ -185,7 +187,7 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
 }
 
 int hists__init(void);
-int __hists__init(struct hists *hists);
+int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list);
 
 struct rb_root *hists__get_rotate_entries_in(struct hists *hists);
 bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
@@ -214,28 +216,56 @@ struct perf_hpp_fmt {
                            struct hist_entry *a, struct hist_entry *b);
        int64_t (*sort)(struct perf_hpp_fmt *fmt,
                        struct hist_entry *a, struct hist_entry *b);
+       bool (*equal)(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
+       void (*free)(struct perf_hpp_fmt *fmt);
 
        struct list_head list;
        struct list_head sort_list;
        bool elide;
        int len;
        int user_len;
+       int idx;
 };
 
-extern struct list_head perf_hpp__list;
-extern struct list_head perf_hpp__sort_list;
+struct perf_hpp_list {
+       struct list_head fields;
+       struct list_head sorts;
+};
+
+extern struct perf_hpp_list perf_hpp_list;
+
+void perf_hpp_list__column_register(struct perf_hpp_list *list,
+                                   struct perf_hpp_fmt *format);
+void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
+                                       struct perf_hpp_fmt *format);
+
+static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
+{
+       perf_hpp_list__column_register(&perf_hpp_list, format);
+}
 
-#define perf_hpp__for_each_format(format) \
-       list_for_each_entry(format, &perf_hpp__list, list)
+static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
+{
+       perf_hpp_list__register_sort_field(&perf_hpp_list, format);
+}
+
+#define perf_hpp_list__for_each_format(_list, format) \
+       list_for_each_entry(format, &(_list)->fields, list)
 
-#define perf_hpp__for_each_format_safe(format, tmp)    \
-       list_for_each_entry_safe(format, tmp, &perf_hpp__list, list)
+#define perf_hpp_list__for_each_format_safe(_list, format, tmp)        \
+       list_for_each_entry_safe(format, tmp, &(_list)->fields, list)
 
-#define perf_hpp__for_each_sort_list(format) \
-       list_for_each_entry(format, &perf_hpp__sort_list, sort_list)
+#define perf_hpp_list__for_each_sort_list(_list, format) \
+       list_for_each_entry(format, &(_list)->sorts, sort_list)
 
-#define perf_hpp__for_each_sort_list_safe(format, tmp) \
-       list_for_each_entry_safe(format, tmp, &perf_hpp__sort_list, sort_list)
+#define perf_hpp_list__for_each_sort_list_safe(_list, format, tmp)     \
+       list_for_each_entry_safe(format, tmp, &(_list)->sorts, sort_list)
+
+#define hists__for_each_format(hists, format) \
+       perf_hpp_list__for_each_format((hists)->hpp_list, fmt)
+
+#define hists__for_each_sort_list(hists, format) \
+       perf_hpp_list__for_each_sort_list((hists)->hpp_list, fmt)
 
 extern struct perf_hpp_fmt perf_hpp__format[];
 
@@ -254,19 +284,14 @@ enum {
 };
 
 void perf_hpp__init(void);
-void perf_hpp__column_register(struct perf_hpp_fmt *format);
 void perf_hpp__column_unregister(struct perf_hpp_fmt *format);
-void perf_hpp__column_enable(unsigned col);
-void perf_hpp__column_disable(unsigned col);
 void perf_hpp__cancel_cumulate(void);
+void perf_hpp__setup_output_field(struct perf_hpp_list *list);
+void perf_hpp__reset_output_field(struct perf_hpp_list *list);
+void perf_hpp__append_sort_keys(struct perf_hpp_list *list);
 
-void perf_hpp__register_sort_field(struct perf_hpp_fmt *format);
-void perf_hpp__setup_output_field(void);
-void perf_hpp__reset_output_field(void);
-void perf_hpp__append_sort_keys(void);
 
 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format);
-bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b);
 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *format);
 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists);
 
@@ -381,4 +406,6 @@ int parse_filter_percentage(const struct option *opt __maybe_unused,
                            const char *arg, int unset __maybe_unused);
 int perf_hist_config(const char *var, const char *value);
 
+void perf_hpp_list__init(struct perf_hpp_list *list);
+
 #endif /* __PERF_HIST_H */
index 81a2eb77ba7ff56f7558328437fa32e8345051cd..05d815851be19bd40e00672c913a83ad2003175a 100644 (file)
@@ -2068,6 +2068,15 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
                err = -ENOMEM;
                goto err_free_queues;
        }
+
+       /*
+        * Since this thread will not be kept in any rbtree not in a
+        * list, initialize its list node so that at thread__put() the
+        * current thread lifetime assuption is kept and we don't segfault
+        * at list_del_init().
+        */
+       INIT_LIST_HEAD(&pt->unknown_thread->node);
+
        err = thread__set_comm(pt->unknown_thread, "unknown", 0);
        if (err)
                goto err_delete_thread;
diff --git a/tools/perf/util/jit.h b/tools/perf/util/jit.h
new file mode 100644 (file)
index 0000000..a1e99da
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef __JIT_H__
+#define __JIT_H__
+
+#include <data.h>
+
+extern int jit_process(struct perf_session *session,
+                      struct perf_data_file *output,
+                      struct machine *machine,
+                      char *filename,
+                      pid_t pid,
+                      u64 *nbytes);
+
+extern int jit_inject_record(const char *filename);
+
+#endif /* __JIT_H__ */
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
new file mode 100644 (file)
index 0000000..99fa5ee
--- /dev/null
@@ -0,0 +1,672 @@
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <byteswap.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+
+#include "util.h"
+#include "event.h"
+#include "debug.h"
+#include "evlist.h"
+#include "symbol.h"
+#include "strlist.h"
+#include <elf.h>
+
+#include "session.h"
+#include "jit.h"
+#include "jitdump.h"
+#include "genelf.h"
+#include "../builtin.h"
+
+struct jit_buf_desc {
+       struct perf_data_file *output;
+       struct perf_session *session;
+       struct machine *machine;
+       union jr_entry   *entry;
+       void             *buf;
+       uint64_t         sample_type;
+       size_t           bufsize;
+       FILE             *in;
+       bool             needs_bswap; /* handles cross-endianess */
+       void             *debug_data;
+       size_t           nr_debug_entries;
+       uint32_t         code_load_count;
+       u64              bytes_written;
+       struct rb_root   code_root;
+       char             dir[PATH_MAX];
+};
+
+struct debug_line_info {
+       unsigned long vma;
+       unsigned int lineno;
+       /* The filename format is unspecified, absolute path, relative etc. */
+       char const filename[0];
+};
+
+struct jit_tool {
+       struct perf_tool tool;
+       struct perf_data_file   output;
+       struct perf_data_file   input;
+       u64 bytes_written;
+};
+
+#define hmax(a, b) ((a) > (b) ? (a) : (b))
+#define get_jit_tool(t) (container_of(tool, struct jit_tool, tool))
+
+static int
+jit_emit_elf(char *filename,
+            const char *sym,
+            uint64_t code_addr,
+            const void *code,
+            int csize,
+            void *debug,
+            int nr_debug_entries)
+{
+       int ret, fd;
+
+       if (verbose > 0)
+               fprintf(stderr, "write ELF image %s\n", filename);
+
+       fd = open(filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
+       if (fd == -1) {
+               pr_warning("cannot create jit ELF %s: %s\n", filename, strerror(errno));
+               return -1;
+       }
+
+        ret = jit_write_elf(fd, code_addr, sym, (const void *)code, csize, debug, nr_debug_entries);
+
+        close(fd);
+
+        if (ret)
+                unlink(filename);
+
+       return ret;
+}
+
+static void
+jit_close(struct jit_buf_desc *jd)
+{
+       if (!(jd && jd->in))
+               return;
+       funlockfile(jd->in);
+       fclose(jd->in);
+       jd->in = NULL;
+}
+
+static int
+jit_open(struct jit_buf_desc *jd, const char *name)
+{
+       struct jitheader header;
+       struct jr_prefix *prefix;
+       ssize_t bs, bsz = 0;
+       void *n, *buf = NULL;
+       int ret, retval = -1;
+
+       jd->in = fopen(name, "r");
+       if (!jd->in)
+               return -1;
+
+       bsz = hmax(sizeof(header), sizeof(*prefix));
+
+       buf = malloc(bsz);
+       if (!buf)
+               goto error;
+
+       /*
+        * protect from writer modifying the file while we are reading it
+        */
+       flockfile(jd->in);
+
+       ret = fread(buf, sizeof(header), 1, jd->in);
+       if (ret != 1)
+               goto error;
+
+       memcpy(&header, buf, sizeof(header));
+
+       if (header.magic != JITHEADER_MAGIC) {
+               if (header.magic != JITHEADER_MAGIC_SW)
+                       goto error;
+               jd->needs_bswap = true;
+       }
+
+       if (jd->needs_bswap) {
+               header.version    = bswap_32(header.version);
+               header.total_size = bswap_32(header.total_size);
+               header.pid        = bswap_32(header.pid);
+               header.elf_mach   = bswap_32(header.elf_mach);
+               header.timestamp  = bswap_64(header.timestamp);
+               header.flags      = bswap_64(header.flags);
+       }
+
+       if (verbose > 2)
+               pr_debug("version=%u\nhdr.size=%u\nts=0x%llx\npid=%d\nelf_mach=%d\n",
+                       header.version,
+                       header.total_size,
+                       (unsigned long long)header.timestamp,
+                       header.pid,
+                       header.elf_mach);
+
+       if (header.flags & JITDUMP_FLAGS_RESERVED) {
+               pr_err("jitdump file contains invalid or unsupported flags 0x%llx\n",
+                      (unsigned long long)header.flags & JITDUMP_FLAGS_RESERVED);
+               goto error;
+       }
+
+       bs = header.total_size - sizeof(header);
+
+       if (bs > bsz) {
+               n = realloc(buf, bs);
+               if (!n)
+                       goto error;
+               bsz = bs;
+               buf = n;
+               /* read extra we do not know about */
+               ret = fread(buf, bs - bsz, 1, jd->in);
+               if (ret != 1)
+                       goto error;
+       }
+       /*
+        * keep dirname for generating files and mmap records
+        */
+       strcpy(jd->dir, name);
+       dirname(jd->dir);
+
+       return 0;
+error:
+       funlockfile(jd->in);
+       fclose(jd->in);
+       return retval;
+}
+
+static union jr_entry *
+jit_get_next_entry(struct jit_buf_desc *jd)
+{
+       struct jr_prefix *prefix;
+       union jr_entry *jr;
+       void *addr;
+       size_t bs, size;
+       int id, ret;
+
+       if (!(jd && jd->in))
+               return NULL;
+
+       if (jd->buf == NULL) {
+               size_t sz = getpagesize();
+               if (sz < sizeof(*prefix))
+                       sz = sizeof(*prefix);
+
+               jd->buf = malloc(sz);
+               if (jd->buf == NULL)
+                       return NULL;
+
+               jd->bufsize = sz;
+       }
+
+       prefix = jd->buf;
+
+       /*
+        * file is still locked at this point
+        */
+       ret = fread(prefix, sizeof(*prefix), 1, jd->in);
+       if (ret  != 1)
+               return NULL;
+
+       if (jd->needs_bswap) {
+               prefix->id         = bswap_32(prefix->id);
+               prefix->total_size = bswap_32(prefix->total_size);
+               prefix->timestamp  = bswap_64(prefix->timestamp);
+       }
+       id   = prefix->id;
+       size = prefix->total_size;
+
+       bs = (size_t)size;
+       if (bs < sizeof(*prefix))
+               return NULL;
+
+       if (id >= JIT_CODE_MAX) {
+               pr_warning("next_entry: unknown prefix %d, skipping\n", id);
+               return NULL;
+       }
+       if (bs > jd->bufsize) {
+               void *n;
+               n = realloc(jd->buf, bs);
+               if (!n)
+                       return NULL;
+               jd->buf = n;
+               jd->bufsize = bs;
+       }
+
+       addr = ((void *)jd->buf) + sizeof(*prefix);
+
+       ret = fread(addr, bs - sizeof(*prefix), 1, jd->in);
+       if (ret != 1)
+               return NULL;
+
+       jr = (union jr_entry *)jd->buf;
+
+       switch(id) {
+       case JIT_CODE_DEBUG_INFO:
+               if (jd->needs_bswap) {
+                       uint64_t n;
+                       jr->info.code_addr = bswap_64(jr->info.code_addr);
+                       jr->info.nr_entry  = bswap_64(jr->info.nr_entry);
+                       for (n = 0 ; n < jr->info.nr_entry; n++) {
+                               jr->info.entries[n].addr    = bswap_64(jr->info.entries[n].addr);
+                               jr->info.entries[n].lineno  = bswap_32(jr->info.entries[n].lineno);
+                               jr->info.entries[n].discrim = bswap_32(jr->info.entries[n].discrim);
+                       }
+               }
+               break;
+       case JIT_CODE_CLOSE:
+               break;
+       case JIT_CODE_LOAD:
+               if (jd->needs_bswap) {
+                       jr->load.pid       = bswap_32(jr->load.pid);
+                       jr->load.tid       = bswap_32(jr->load.tid);
+                       jr->load.vma       = bswap_64(jr->load.vma);
+                       jr->load.code_addr = bswap_64(jr->load.code_addr);
+                       jr->load.code_size = bswap_64(jr->load.code_size);
+                       jr->load.code_index= bswap_64(jr->load.code_index);
+               }
+               jd->code_load_count++;
+               break;
+       case JIT_CODE_MOVE:
+               if (jd->needs_bswap) {
+                       jr->move.pid           = bswap_32(jr->move.pid);
+                       jr->move.tid           = bswap_32(jr->move.tid);
+                       jr->move.vma           = bswap_64(jr->move.vma);
+                       jr->move.old_code_addr = bswap_64(jr->move.old_code_addr);
+                       jr->move.new_code_addr = bswap_64(jr->move.new_code_addr);
+                       jr->move.code_size     = bswap_64(jr->move.code_size);
+                       jr->move.code_index    = bswap_64(jr->move.code_index);
+               }
+               break;
+       case JIT_CODE_MAX:
+       default:
+               return NULL;
+       }
+       return jr;
+}
+
+static int
+jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
+{
+       ssize_t size;
+
+       size = perf_data_file__write(jd->output, event, event->header.size);
+       if (size < 0)
+               return -1;
+
+       jd->bytes_written += size;
+       return 0;
+}
+
+static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
+{
+       struct perf_sample sample;
+       union perf_event *event;
+       struct perf_tool *tool = jd->session->tool;
+       uint64_t code, addr;
+       uintptr_t uaddr;
+       char *filename;
+       struct stat st;
+       size_t size;
+       u16 idr_size;
+       const char *sym;
+       uint32_t count;
+       int ret, csize;
+       pid_t pid, tid;
+       struct {
+               u32 pid, tid;
+               u64 time;
+       } *id;
+
+       pid   = jr->load.pid;
+       tid   = jr->load.tid;
+       csize = jr->load.code_size;
+       addr  = jr->load.code_addr;
+       sym   = (void *)((unsigned long)jr + sizeof(jr->load));
+       code  = (unsigned long)jr + jr->load.p.total_size - csize;
+       count = jr->load.code_index;
+       idr_size = jd->machine->id_hdr_size;
+
+       event = calloc(1, sizeof(*event) + idr_size);
+       if (!event)
+               return -1;
+
+       filename = event->mmap2.filename;
+       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+                       jd->dir,
+                       pid,
+                       count);
+
+       size++; /* for \0 */
+
+       size = PERF_ALIGN(size, sizeof(u64));
+       uaddr = (uintptr_t)code;
+       ret = jit_emit_elf(filename, sym, addr, (const void *)uaddr, csize, jd->debug_data, jd->nr_debug_entries);
+
+       if (jd->debug_data && jd->nr_debug_entries) {
+               free(jd->debug_data);
+               jd->debug_data = NULL;
+               jd->nr_debug_entries = 0;
+       }
+
+       if (ret) {
+               free(event);
+               return -1;
+       }
+       if (stat(filename, &st))
+               memset(&st, 0, sizeof(stat));
+
+       event->mmap2.header.type = PERF_RECORD_MMAP2;
+       event->mmap2.header.misc = PERF_RECORD_MISC_USER;
+       event->mmap2.header.size = (sizeof(event->mmap2) -
+                       (sizeof(event->mmap2.filename) - size) + idr_size);
+
+       event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
+       event->mmap2.start = addr;
+       event->mmap2.len   = csize;
+       event->mmap2.pid   = pid;
+       event->mmap2.tid   = tid;
+       event->mmap2.ino   = st.st_ino;
+       event->mmap2.maj   = major(st.st_dev);
+       event->mmap2.min   = minor(st.st_dev);
+       event->mmap2.prot  = st.st_mode;
+       event->mmap2.flags = MAP_SHARED;
+       event->mmap2.ino_generation = 1;
+
+       id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
+       if (jd->sample_type & PERF_SAMPLE_TID) {
+               id->pid  = pid;
+               id->tid  = tid;
+       }
+       if (jd->sample_type & PERF_SAMPLE_TIME)
+               id->time = jr->load.p.timestamp;
+
+       /*
+        * create pseudo sample to induce dso hit increment
+        * use first address as sample address
+        */
+       memset(&sample, 0, sizeof(sample));
+       sample.pid  = pid;
+       sample.tid  = tid;
+       sample.time = id->time;
+       sample.ip   = addr;
+
+       ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
+       if (ret)
+               return ret;
+
+       ret = jit_inject_event(jd, event);
+       /*
+        * mark dso as use to generate buildid in the header
+        */
+       if (!ret)
+               build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
+
+       return ret;
+}
+
+static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
+{
+       struct perf_sample sample;
+       union perf_event *event;
+       struct perf_tool *tool = jd->session->tool;
+       char *filename;
+       size_t size;
+       struct stat st;
+       u16 idr_size;
+       int ret;
+       pid_t pid, tid;
+       struct {
+               u32 pid, tid;
+               u64 time;
+       } *id;
+
+       pid = jr->move.pid;
+       tid =  jr->move.tid;
+       idr_size = jd->machine->id_hdr_size;
+
+       /*
+        * +16 to account for sample_id_all (hack)
+        */
+       event = calloc(1, sizeof(*event) + 16);
+       if (!event)
+               return -1;
+
+       filename = event->mmap2.filename;
+       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+                jd->dir,
+                pid,
+                jr->move.code_index);
+
+       size++; /* for \0 */
+
+       if (stat(filename, &st))
+               memset(&st, 0, sizeof(stat));
+
+       size = PERF_ALIGN(size, sizeof(u64));
+
+       event->mmap2.header.type = PERF_RECORD_MMAP2;
+       event->mmap2.header.misc = PERF_RECORD_MISC_USER;
+       event->mmap2.header.size = (sizeof(event->mmap2) -
+                       (sizeof(event->mmap2.filename) - size) + idr_size);
+       event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
+       event->mmap2.start = jr->move.new_code_addr;
+       event->mmap2.len   = jr->move.code_size;
+       event->mmap2.pid   = pid;
+       event->mmap2.tid   = tid;
+       event->mmap2.ino   = st.st_ino;
+       event->mmap2.maj   = major(st.st_dev);
+       event->mmap2.min   = minor(st.st_dev);
+       event->mmap2.prot  = st.st_mode;
+       event->mmap2.flags = MAP_SHARED;
+       event->mmap2.ino_generation = 1;
+
+       id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
+       if (jd->sample_type & PERF_SAMPLE_TID) {
+               id->pid  = pid;
+               id->tid  = tid;
+       }
+       if (jd->sample_type & PERF_SAMPLE_TIME)
+               id->time = jr->load.p.timestamp;
+
+       /*
+        * create pseudo sample to induce dso hit increment
+        * use first address as sample address
+        */
+       memset(&sample, 0, sizeof(sample));
+       sample.pid  = pid;
+       sample.tid  = tid;
+       sample.time = id->time;
+       sample.ip   = jr->move.new_code_addr;
+
+       ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
+       if (ret)
+               return ret;
+
+       ret = jit_inject_event(jd, event);
+       if (!ret)
+               build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
+
+       return ret;
+}
+
+static int jit_repipe_debug_info(struct jit_buf_desc *jd, union jr_entry *jr)
+{
+       void *data;
+       size_t sz;
+
+       if (!(jd && jr))
+               return -1;
+
+       sz  = jr->prefix.total_size - sizeof(jr->info);
+       data = malloc(sz);
+       if (!data)
+               return -1;
+
+       memcpy(data, &jr->info.entries, sz);
+
+       jd->debug_data       = data;
+
+       /*
+        * we must use nr_entry instead of size here because
+        * we cannot distinguish actual entry from padding otherwise
+        */
+       jd->nr_debug_entries = jr->info.nr_entry;
+
+       return 0;
+}
+
+static int
+jit_process_dump(struct jit_buf_desc *jd)
+{
+       union jr_entry *jr;
+       int ret;
+
+       while ((jr = jit_get_next_entry(jd))) {
+               switch(jr->prefix.id) {
+               case JIT_CODE_LOAD:
+                       ret = jit_repipe_code_load(jd, jr);
+                       break;
+               case JIT_CODE_MOVE:
+                       ret = jit_repipe_code_move(jd, jr);
+                       break;
+               case JIT_CODE_DEBUG_INFO:
+                       ret = jit_repipe_debug_info(jd, jr);
+                       break;
+               default:
+                       ret = 0;
+                       continue;
+               }
+       }
+       return ret;
+}
+
+static int
+jit_inject(struct jit_buf_desc *jd, char *path)
+{
+       int ret;
+
+       if (verbose > 0)
+               fprintf(stderr, "injecting: %s\n", path);
+
+       ret = jit_open(jd, path);
+       if (ret)
+               return -1;
+
+       ret = jit_process_dump(jd);
+
+       jit_close(jd);
+
+       if (verbose > 0)
+               fprintf(stderr, "injected: %s (%d)\n", path, ret);
+
+       return 0;
+}
+
+/*
+ * File must be with pattern .../jit-XXXX.dump
+ * where XXXX is the PID of the process which did the mmap()
+ * as captured in the RECORD_MMAP record
+ */
+static int
+jit_detect(char *mmap_name, pid_t pid)
+ {
+       char *p;
+       char *end = NULL;
+       pid_t pid2;
+
+       if (verbose > 2)
+               fprintf(stderr, "jit marker trying : %s\n", mmap_name);
+       /*
+        * get file name
+        */
+       p = strrchr(mmap_name, '/');
+       if (!p)
+               return -1;
+
+       /*
+        * match prefix
+        */
+       if (strncmp(p, "/jit-", 5))
+               return -1;
+
+       /*
+        * skip prefix
+        */
+       p += 5;
+
+       /*
+        * must be followed by a pid
+        */
+       if (!isdigit(*p))
+               return -1;
+
+       pid2 = (int)strtol(p, &end, 10);
+       if (!end)
+               return -1;
+
+       /*
+        * pid does not match mmap pid
+        * pid==0 in system-wide mode (synthesized)
+        */
+       if (pid && pid2 != pid)
+               return -1;
+       /*
+        * validate suffix
+        */
+       if (strcmp(end, ".dump"))
+               return -1;
+
+       if (verbose > 0)
+               fprintf(stderr, "jit marker found: %s\n", mmap_name);
+
+       return 0;
+}
+
+int
+jit_process(struct perf_session *session,
+           struct perf_data_file *output,
+           struct machine *machine,
+           char *filename,
+           pid_t pid,
+           u64 *nbytes)
+{
+       struct perf_evsel *first;
+       struct jit_buf_desc jd;
+       int ret;
+
+       /*
+        * first, detect marker mmap (i.e., the jitdump mmap)
+        */
+       if (jit_detect(filename, pid))
+               return -1;
+
+       memset(&jd, 0, sizeof(jd));
+
+       jd.session = session;
+       jd.output  = output;
+       jd.machine = machine;
+
+       /*
+        * track sample_type to compute id_all layout
+        * perf sets the same sample type to all events as of now
+        */
+       first = perf_evlist__first(session->evlist);
+       jd.sample_type = first->attr.sample_type;
+
+       *nbytes = 0;
+
+       ret = jit_inject(&jd, filename);
+       if (!ret)
+               *nbytes = jd.bytes_written;
+
+       return ret;
+}
diff --git a/tools/perf/util/jitdump.h b/tools/perf/util/jitdump.h
new file mode 100644 (file)
index 0000000..b66c1f5
--- /dev/null
@@ -0,0 +1,124 @@
+/*
+ * jitdump.h: jitted code info encapsulation file format
+ *
+ * Adapted from OProfile GPLv2 support jidump.h:
+ * Copyright 2007 OProfile authors
+ * Jens Wilke
+ * Daniel Hansel
+ * Copyright IBM Corporation 2007
+ */
+#ifndef JITDUMP_H
+#define JITDUMP_H
+
+#include <sys/time.h>
+#include <time.h>
+#include <stdint.h>
+
+/* JiTD */
+#define JITHEADER_MAGIC                0x4A695444
+#define JITHEADER_MAGIC_SW     0x4454694A
+
+#define PADDING_8ALIGNED(x) ((((x) + 7) & 7) ^ 7)
+
+#define JITHEADER_VERSION 1
+
+enum jitdump_flags_bits {
+       JITDUMP_FLAGS_MAX_BIT,
+};
+
+#define JITDUMP_FLAGS_RESERVED (JITDUMP_FLAGS_MAX_BIT < 64 ? \
+                               (~((1ULL << JITDUMP_FLAGS_MAX_BIT) - 1)) : 0)
+
+struct jitheader {
+       uint32_t magic;         /* characters "jItD" */
+       uint32_t version;       /* header version */
+       uint32_t total_size;    /* total size of header */
+       uint32_t elf_mach;      /* elf mach target */
+       uint32_t pad1;          /* reserved */
+       uint32_t pid;           /* JIT process id */
+       uint64_t timestamp;     /* timestamp */
+       uint64_t flags;         /* flags */
+};
+
+enum jit_record_type {
+       JIT_CODE_LOAD           = 0,
+        JIT_CODE_MOVE           = 1,
+       JIT_CODE_DEBUG_INFO     = 2,
+       JIT_CODE_CLOSE          = 3,
+
+       JIT_CODE_MAX,
+};
+
+/* record prefix (mandatory in each record) */
+struct jr_prefix {
+       uint32_t id;
+       uint32_t total_size;
+       uint64_t timestamp;
+};
+
+struct jr_code_load {
+       struct jr_prefix p;
+
+       uint32_t pid;
+       uint32_t tid;
+       uint64_t vma;
+       uint64_t code_addr;
+       uint64_t code_size;
+       uint64_t code_index;
+};
+
+struct jr_code_close {
+       struct jr_prefix p;
+};
+
+struct jr_code_move {
+       struct jr_prefix p;
+
+       uint32_t pid;
+       uint32_t tid;
+       uint64_t vma;
+       uint64_t old_code_addr;
+       uint64_t new_code_addr;
+       uint64_t code_size;
+       uint64_t code_index;
+};
+
+struct debug_entry {
+       uint64_t addr;
+       int lineno;         /* source line number starting at 1 */
+       int discrim;        /* column discriminator, 0 is default */
+       const char name[0]; /* null terminated filename, \xff\0 if same as previous entry */
+};
+
+struct jr_code_debug_info {
+       struct jr_prefix p;
+
+       uint64_t code_addr;
+       uint64_t nr_entry;
+       struct debug_entry entries[0];
+};
+
+union jr_entry {
+        struct jr_code_debug_info info;
+        struct jr_code_close close;
+        struct jr_code_load load;
+        struct jr_code_move move;
+        struct jr_prefix prefix;
+};
+
+static inline struct debug_entry *
+debug_entry_next(struct debug_entry *ent)
+{
+       void *a = ent + 1;
+       size_t l = strlen(ent->name) + 1;
+       return a + l;
+}
+
+static inline char *
+debug_entry_file(struct debug_entry *ent)
+{
+       void *a = ent + 1;
+       return a;
+}
+
+#endif /* !JITDUMP_H */
index ae825d4ec110fcfe6a06ef5e4daf50b03a214a87..d01e73592f6e34347f0c26531b58aa6ce1d57d52 100644 (file)
@@ -122,6 +122,7 @@ void exit_event_decode_key(struct perf_kvm_stat *kvm,
 
 bool kvm_exit_event(struct perf_evsel *evsel);
 bool kvm_entry_event(struct perf_evsel *evsel);
+int setup_kvm_events_tp(struct perf_kvm_stat *kvm);
 
 #define define_exit_reasons_table(name, symbols)       \
        static struct exit_reasons_table name[] = {     \
@@ -133,8 +134,13 @@ bool kvm_entry_event(struct perf_evsel *evsel);
  */
 int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid);
 
-extern const char * const kvm_events_tp[];
+extern const char *kvm_events_tp[];
 extern struct kvm_reg_events_ops kvm_reg_events_ops[];
 extern const char * const kvm_skip_events[];
+extern const char *vcpu_id_str;
+extern const int decode_str_len;
+extern const char *kvm_exit_reason;
+extern const char *kvm_entry_trace;
+extern const char *kvm_exit_trace;
 
 #endif /* __PERF_KVM_STAT_H */
index 2c2b443df5ba796c43ee12c6c0f2061b5d7c7b80..1a3e45baf97fb575c02afe49707727aff0f628ec 100644 (file)
@@ -179,6 +179,16 @@ struct symbol *machine__find_kernel_symbol(struct machine *machine,
                                       mapp, filter);
 }
 
+static inline
+struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
+                                                  enum map_type type, const char *name,
+                                                  struct map **mapp,
+                                                  symbol_filter_t filter)
+{
+       return map_groups__find_symbol_by_name(&machine->kmaps, type, name,
+                                              mapp, filter);
+}
+
 static inline
 struct symbol *machine__find_kernel_function(struct machine *machine, u64 addr,
                                             struct map **mapp,
index 4f7b0efdde2fa0c0c9f7cab32c6a20809ea47a4f..813d9b272c813b0dd749470f6209aa3a0a0607b4 100644 (file)
@@ -399,6 +399,9 @@ static void tracepoint_error(struct parse_events_error *e, int err,
 {
        char help[BUFSIZ];
 
+       if (!e)
+               return;
+
        /*
         * We get error directly from syscall errno ( > 0),
         * or from encoded pointer's error ( < 0).
index b597bcc8fc781f4fa2c631044bddaab447b756e9..41a9c875e49281450637de5d1240382c4a3ee953 100644 (file)
@@ -153,7 +153,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
        if (fd == -1)
                return -1;
 
-               sret = read(fd, alias->unit, UNIT_MAX_LEN);
+       sret = read(fd, alias->unit, UNIT_MAX_LEN);
        if (sret < 0)
                goto error;
 
index 2be10fb27172727fe15d5f3822ff194a5ad96d95..4ce5c5e18f48cd43777d2eb9c77aea35944440b8 100644 (file)
@@ -686,8 +686,9 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
                pf->fb_ops = NULL;
 #if _ELFUTILS_PREREQ(0, 142)
        } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
-                  pf->cfi != NULL) {
-               if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
+                  (pf->cfi_eh != NULL || pf->cfi_dbg != NULL)) {
+               if ((dwarf_cfi_addrframe(pf->cfi_eh, pf->addr, &frame) != 0 &&
+                    (dwarf_cfi_addrframe(pf->cfi_dbg, pf->addr, &frame) != 0)) ||
                    dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
                        pr_warning("Failed to get call frame on 0x%jx\n",
                                   (uintmax_t)pf->addr);
@@ -1015,8 +1016,7 @@ static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data)
        return DWARF_CB_OK;
 }
 
-/* Find probe points from debuginfo */
-static int debuginfo__find_probes(struct debuginfo *dbg,
+static int debuginfo__find_probe_location(struct debuginfo *dbg,
                                  struct probe_finder *pf)
 {
        struct perf_probe_point *pp = &pf->pev->point;
@@ -1025,27 +1025,6 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
        Dwarf_Die *diep;
        int ret = 0;
 
-#if _ELFUTILS_PREREQ(0, 142)
-       Elf *elf;
-       GElf_Ehdr ehdr;
-       GElf_Shdr shdr;
-
-       /* Get the call frame information from this dwarf */
-       elf = dwarf_getelf(dbg->dbg);
-       if (elf == NULL)
-               return -EINVAL;
-
-       if (gelf_getehdr(elf, &ehdr) == NULL)
-               return -EINVAL;
-
-       if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
-           shdr.sh_type == SHT_PROGBITS) {
-               pf->cfi = dwarf_getcfi_elf(elf);
-       } else {
-               pf->cfi = dwarf_getcfi(dbg->dbg);
-       }
-#endif
-
        off = 0;
        pf->lcache = intlist__new(NULL);
        if (!pf->lcache)
@@ -1108,6 +1087,39 @@ found:
        return ret;
 }
 
+/* Find probe points from debuginfo */
+static int debuginfo__find_probes(struct debuginfo *dbg,
+                                 struct probe_finder *pf)
+{
+       int ret = 0;
+
+#if _ELFUTILS_PREREQ(0, 142)
+       Elf *elf;
+       GElf_Ehdr ehdr;
+       GElf_Shdr shdr;
+
+       if (pf->cfi_eh || pf->cfi_dbg)
+               return debuginfo__find_probe_location(dbg, pf);
+
+       /* Get the call frame information from this dwarf */
+       elf = dwarf_getelf(dbg->dbg);
+       if (elf == NULL)
+               return -EINVAL;
+
+       if (gelf_getehdr(elf, &ehdr) == NULL)
+               return -EINVAL;
+
+       if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
+           shdr.sh_type == SHT_PROGBITS)
+               pf->cfi_eh = dwarf_getcfi_elf(elf);
+
+       pf->cfi_dbg = dwarf_getcfi(dbg->dbg);
+#endif
+
+       ret = debuginfo__find_probe_location(dbg, pf);
+       return ret;
+}
+
 struct local_vars_finder {
        struct probe_finder *pf;
        struct perf_probe_arg *args;
index bed82716e1b44960a0ebc435d0ba1e94ed30730d..0aec7704e39540b51a449cdf395b6a16ab682fba 100644 (file)
@@ -76,7 +76,10 @@ struct probe_finder {
 
        /* For variable searching */
 #if _ELFUTILS_PREREQ(0, 142)
-       Dwarf_CFI               *cfi;           /* Call Frame Information */
+       /* Call Frame Information from .eh_frame */
+       Dwarf_CFI               *cfi_eh;
+       /* Call Frame Information from .debug_frame */
+       Dwarf_CFI               *cfi_dbg;
 #endif
        Dwarf_Op                *fb_ops;        /* Frame base attribute */
        struct perf_probe_arg   *pvar;          /* Current target variable */
index d5636ba94b20f722d3f521614e8fae08b620fad4..40b7a0d0905b8d7f01972c6e38e225f108b15133 100644 (file)
@@ -1149,7 +1149,7 @@ static struct machine *machines__find_for_cpumode(struct machines *machines,
 
                machine = machines__find(machines, pid);
                if (!machine)
-                       machine = machines__find(machines, DEFAULT_GUEST_KERNEL_ID);
+                       machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
                return machine;
        }
 
index ec722346e6ffb8dd6531e94576bb15b548a1487b..de620f7f40f4ad01e30e4385865efd1c3797022a 100644 (file)
@@ -25,6 +25,7 @@ int           sort__has_parent = 0;
 int            sort__has_sym = 0;
 int            sort__has_dso = 0;
 int            sort__has_socket = 0;
+int            sort__has_thread = 0;
 enum sort_mode sort__mode = SORT_MODE__NORMAL;
 
 
@@ -1440,20 +1441,6 @@ struct hpp_sort_entry {
        struct sort_entry *se;
 };
 
-bool perf_hpp__same_sort_entry(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
-{
-       struct hpp_sort_entry *hse_a;
-       struct hpp_sort_entry *hse_b;
-
-       if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
-               return false;
-
-       hse_a = container_of(a, struct hpp_sort_entry, hpp);
-       hse_b = container_of(b, struct hpp_sort_entry, hpp);
-
-       return hse_a->se == hse_b->se;
-}
-
 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
 {
        struct hpp_sort_entry *hse;
@@ -1539,6 +1526,33 @@ static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
        return sort_fn(a, b);
 }
 
+bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
+{
+       return format->header == __sort__hpp_header;
+}
+
+static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
+{
+       struct hpp_sort_entry *hse_a;
+       struct hpp_sort_entry *hse_b;
+
+       if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
+               return false;
+
+       hse_a = container_of(a, struct hpp_sort_entry, hpp);
+       hse_b = container_of(b, struct hpp_sort_entry, hpp);
+
+       return hse_a->se == hse_b->se;
+}
+
+static void hse_free(struct perf_hpp_fmt *fmt)
+{
+       struct hpp_sort_entry *hse;
+
+       hse = container_of(fmt, struct hpp_sort_entry, hpp);
+       free(hse);
+}
+
 static struct hpp_sort_entry *
 __sort_dimension__alloc_hpp(struct sort_dimension *sd)
 {
@@ -1560,6 +1574,8 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
        hse->hpp.cmp = __sort__hpp_cmp;
        hse->hpp.collapse = __sort__hpp_collapse;
        hse->hpp.sort = __sort__hpp_sort;
+       hse->hpp.equal = __sort__hpp_equal;
+       hse->hpp.free = hse_free;
 
        INIT_LIST_HEAD(&hse->hpp.list);
        INIT_LIST_HEAD(&hse->hpp.sort_list);
@@ -1570,9 +1586,23 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
        return hse;
 }
 
-bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
+static void hpp_free(struct perf_hpp_fmt *fmt)
 {
-       return format->header == __sort__hpp_header;
+       free(fmt);
+}
+
+static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
+{
+       struct perf_hpp_fmt *fmt;
+
+       fmt = memdup(hd->fmt, sizeof(*fmt));
+       if (fmt) {
+               INIT_LIST_HEAD(&fmt->list);
+               INIT_LIST_HEAD(&fmt->sort_list);
+               fmt->free = hpp_free;
+       }
+
+       return fmt;
 }
 
 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
@@ -1586,14 +1616,15 @@ static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
        return 0;
 }
 
-static int __sort_dimension__add_hpp_output(struct sort_dimension *sd)
+static int __sort_dimension__add_hpp_output(struct perf_hpp_list *list,
+                                           struct sort_dimension *sd)
 {
        struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
 
        if (hse == NULL)
                return -1;
 
-       perf_hpp__column_register(&hse->hpp);
+       perf_hpp_list__column_register(list, &hse->hpp);
        return 0;
 }
 
@@ -1803,6 +1834,14 @@ bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
        return fmt->cmp == __sort__hde_cmp;
 }
 
+static void hde_free(struct perf_hpp_fmt *fmt)
+{
+       struct hpp_dynamic_entry *hde;
+
+       hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+       free(hde);
+}
+
 static struct hpp_dynamic_entry *
 __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
 {
@@ -1827,6 +1866,7 @@ __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
        hde->hpp.cmp = __sort__hde_cmp;
        hde->hpp.collapse = __sort__hde_cmp;
        hde->hpp.sort = __sort__hde_cmp;
+       hde->hpp.free = hde_free;
 
        INIT_LIST_HEAD(&hde->hpp.list);
        INIT_LIST_HEAD(&hde->hpp.sort_list);
@@ -2064,40 +2104,54 @@ static int __sort_dimension__add(struct sort_dimension *sd)
 
 static int __hpp_dimension__add(struct hpp_dimension *hd)
 {
-       if (!hd->taken) {
-               hd->taken = 1;
+       struct perf_hpp_fmt *fmt;
 
-               perf_hpp__register_sort_field(hd->fmt);
-       }
+       if (hd->taken)
+               return 0;
+
+       fmt = __hpp_dimension__alloc_hpp(hd);
+       if (!fmt)
+               return -1;
+
+       hd->taken = 1;
+       perf_hpp__register_sort_field(fmt);
        return 0;
 }
 
-static int __sort_dimension__add_output(struct sort_dimension *sd)
+static int __sort_dimension__add_output(struct perf_hpp_list *list,
+                                       struct sort_dimension *sd)
 {
        if (sd->taken)
                return 0;
 
-       if (__sort_dimension__add_hpp_output(sd) < 0)
+       if (__sort_dimension__add_hpp_output(list, sd) < 0)
                return -1;
 
        sd->taken = 1;
        return 0;
 }
 
-static int __hpp_dimension__add_output(struct hpp_dimension *hd)
+static int __hpp_dimension__add_output(struct perf_hpp_list *list,
+                                      struct hpp_dimension *hd)
 {
-       if (!hd->taken) {
-               hd->taken = 1;
+       struct perf_hpp_fmt *fmt;
 
-               perf_hpp__column_register(hd->fmt);
-       }
+       if (hd->taken)
+               return 0;
+
+       fmt = __hpp_dimension__alloc_hpp(hd);
+       if (!fmt)
+               return -1;
+
+       hd->taken = 1;
+       perf_hpp_list__column_register(list, fmt);
        return 0;
 }
 
 int hpp_dimension__add_output(unsigned col)
 {
        BUG_ON(col >= PERF_HPP__MAX_INDEX);
-       return __hpp_dimension__add_output(&hpp_sort_dimensions[col]);
+       return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
 }
 
 static int sort_dimension__add(const char *tok,
@@ -2136,6 +2190,8 @@ static int sort_dimension__add(const char *tok,
                        sort__has_dso = 1;
                } else if (sd->entry == &sort_socket) {
                        sort__has_socket = 1;
+               } else if (sd->entry == &sort_thread) {
+                       sort__has_thread = 1;
                }
 
                return __sort_dimension__add(sd);
@@ -2188,6 +2244,26 @@ static int sort_dimension__add(const char *tok,
        return -ESRCH;
 }
 
+static int setup_sort_list(char *str, struct perf_evlist *evlist)
+{
+       char *tmp, *tok;
+       int ret = 0;
+
+       for (tok = strtok_r(str, ", ", &tmp);
+                       tok; tok = strtok_r(NULL, ", ", &tmp)) {
+               ret = sort_dimension__add(tok, evlist);
+               if (ret == -EINVAL) {
+                       error("Invalid --sort key: `%s'", tok);
+                       break;
+               } else if (ret == -ESRCH) {
+                       error("Unknown --sort key: `%s'", tok);
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 static const char *get_default_sort_order(struct perf_evlist *evlist)
 {
        const char *default_sort_orders[] = {
@@ -2282,7 +2358,7 @@ static char *setup_overhead(char *keys)
 
 static int __setup_sorting(struct perf_evlist *evlist)
 {
-       char *tmp, *tok, *str;
+       char *str;
        const char *sort_keys;
        int ret = 0;
 
@@ -2320,17 +2396,7 @@ static int __setup_sorting(struct perf_evlist *evlist)
                }
        }
 
-       for (tok = strtok_r(str, ", ", &tmp);
-                       tok; tok = strtok_r(NULL, ", ", &tmp)) {
-               ret = sort_dimension__add(tok, evlist);
-               if (ret == -EINVAL) {
-                       error("Invalid --sort key: `%s'", tok);
-                       break;
-               } else if (ret == -ESRCH) {
-                       error("Unknown --sort key: `%s'", tok);
-                       break;
-               }
-       }
+       ret = setup_sort_list(str, evlist);
 
        free(str);
        return ret;
@@ -2341,7 +2407,7 @@ void perf_hpp__set_elide(int idx, bool elide)
        struct perf_hpp_fmt *fmt;
        struct hpp_sort_entry *hse;
 
-       perf_hpp__for_each_format(fmt) {
+       perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
                if (!perf_hpp__is_sort_entry(fmt))
                        continue;
 
@@ -2401,7 +2467,7 @@ void sort__setup_elide(FILE *output)
        struct perf_hpp_fmt *fmt;
        struct hpp_sort_entry *hse;
 
-       perf_hpp__for_each_format(fmt) {
+       perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
                if (!perf_hpp__is_sort_entry(fmt))
                        continue;
 
@@ -2413,7 +2479,7 @@ void sort__setup_elide(FILE *output)
         * It makes no sense to elide all of sort entries.
         * Just revert them to show up again.
         */
-       perf_hpp__for_each_format(fmt) {
+       perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
                if (!perf_hpp__is_sort_entry(fmt))
                        continue;
 
@@ -2421,7 +2487,7 @@ void sort__setup_elide(FILE *output)
                        return;
        }
 
-       perf_hpp__for_each_format(fmt) {
+       perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
                if (!perf_hpp__is_sort_entry(fmt))
                        continue;
 
@@ -2429,7 +2495,7 @@ void sort__setup_elide(FILE *output)
        }
 }
 
-static int output_field_add(char *tok)
+static int output_field_add(struct perf_hpp_list *list, char *tok)
 {
        unsigned int i;
 
@@ -2439,7 +2505,7 @@ static int output_field_add(char *tok)
                if (strncasecmp(tok, sd->name, strlen(tok)))
                        continue;
 
-               return __sort_dimension__add_output(sd);
+               return __sort_dimension__add_output(list, sd);
        }
 
        for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
@@ -2448,7 +2514,7 @@ static int output_field_add(char *tok)
                if (strncasecmp(tok, hd->name, strlen(tok)))
                        continue;
 
-               return __hpp_dimension__add_output(hd);
+               return __hpp_dimension__add_output(list, hd);
        }
 
        for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
@@ -2457,7 +2523,7 @@ static int output_field_add(char *tok)
                if (strncasecmp(tok, sd->name, strlen(tok)))
                        continue;
 
-               return __sort_dimension__add_output(sd);
+               return __sort_dimension__add_output(list, sd);
        }
 
        for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
@@ -2466,12 +2532,32 @@ static int output_field_add(char *tok)
                if (strncasecmp(tok, sd->name, strlen(tok)))
                        continue;
 
-               return __sort_dimension__add_output(sd);
+               return __sort_dimension__add_output(list, sd);
        }
 
        return -ESRCH;
 }
 
+static int setup_output_list(struct perf_hpp_list *list, char *str)
+{
+       char *tmp, *tok;
+       int ret = 0;
+
+       for (tok = strtok_r(str, ", ", &tmp);
+                       tok; tok = strtok_r(NULL, ", ", &tmp)) {
+               ret = output_field_add(list, tok);
+               if (ret == -EINVAL) {
+                       error("Invalid --fields key: `%s'", tok);
+                       break;
+               } else if (ret == -ESRCH) {
+                       error("Unknown --fields key: `%s'", tok);
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 static void reset_dimensions(void)
 {
        unsigned int i;
@@ -2496,7 +2582,7 @@ bool is_strict_order(const char *order)
 
 static int __setup_output_field(void)
 {
-       char *tmp, *tok, *str, *strp;
+       char *str, *strp;
        int ret = -EINVAL;
 
        if (field_order == NULL)
@@ -2516,17 +2602,7 @@ static int __setup_output_field(void)
                goto out;
        }
 
-       for (tok = strtok_r(strp, ", ", &tmp);
-                       tok; tok = strtok_r(NULL, ", ", &tmp)) {
-               ret = output_field_add(tok);
-               if (ret == -EINVAL) {
-                       error("Invalid --fields key: `%s'", tok);
-                       break;
-               } else if (ret == -ESRCH) {
-                       error("Unknown --fields key: `%s'", tok);
-                       break;
-               }
-       }
+       ret = setup_output_list(&perf_hpp_list, strp);
 
 out:
        free(str);
@@ -2560,9 +2636,9 @@ int setup_sorting(struct perf_evlist *evlist)
                return err;
 
        /* copy sort keys to output fields */
-       perf_hpp__setup_output_field();
+       perf_hpp__setup_output_field(&perf_hpp_list);
        /* and then copy output fields to sort keys */
-       perf_hpp__append_sort_keys();
+       perf_hpp__append_sort_keys(&perf_hpp_list);
 
        return 0;
 }
@@ -2578,5 +2654,5 @@ void reset_output_field(void)
        sort_order = NULL;
 
        reset_dimensions();
-       perf_hpp__reset_output_field();
+       perf_hpp__reset_output_field(&perf_hpp_list);
 }
index 687bbb1244281ba65a99c3cb78f5bcb8a1eaa41b..89a1273fd2da9545c7044547c5c2c2890ab7b387 100644 (file)
@@ -32,9 +32,11 @@ extern const char default_sort_order[];
 extern regex_t ignore_callees_regex;
 extern int have_ignore_callees;
 extern int sort__need_collapse;
+extern int sort__has_dso;
 extern int sort__has_parent;
 extern int sort__has_sym;
 extern int sort__has_socket;
+extern int sort__has_thread;
 extern enum sort_mode sort__mode;
 extern struct sort_entry sort_comm;
 extern struct sort_entry sort_dso;
index 2f901d15e06370d26d579869d351811f4d31f940..4d9b481cf3b6edbb7d6161cbd238709241dedc5b 100644 (file)
@@ -97,7 +97,7 @@ void perf_stat_evsel_id_init(struct perf_evsel *evsel)
        }
 }
 
-void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
+static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
 {
        int i;
        struct perf_stat_evsel *ps = evsel->priv;
@@ -108,7 +108,7 @@ void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
        perf_stat_evsel_id_init(evsel);
 }
 
-int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
+static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
 {
        evsel->priv = zalloc(sizeof(struct perf_stat_evsel));
        if (evsel->priv == NULL)
@@ -117,13 +117,13 @@ int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
        return 0;
 }
 
-void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
+static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
 {
        zfree(&evsel->priv);
 }
 
-int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
-                                     int ncpus, int nthreads)
+static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
+                                            int ncpus, int nthreads)
 {
        struct perf_counts *counts;
 
@@ -134,13 +134,13 @@ int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
        return counts ? 0 : -ENOMEM;
 }
 
-void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
+static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
 {
        perf_counts__delete(evsel->prev_raw_counts);
        evsel->prev_raw_counts = NULL;
 }
 
-int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
+static int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw)
 {
        int ncpus = perf_evsel__nr_cpus(evsel);
        int nthreads = thread_map__nr(evsel->threads);
@@ -310,7 +310,16 @@ int perf_stat_process_counter(struct perf_stat_config *config,
        int i, ret;
 
        aggr->val = aggr->ena = aggr->run = 0;
-       init_stats(ps->res_stats);
+
+       /*
+        * We calculate counter's data every interval,
+        * and the display code shows ps->res_stats
+        * avg value. We need to zero the stats for
+        * interval mode, otherwise overall avg running
+        * averages will be shown for each interval.
+        */
+       if (config->interval)
+               init_stats(ps->res_stats);
 
        if (counter->per_pkg)
                zero_per_pkg(counter);
index 086f4e128d6351f0e0de862c2ebcc44801cbc2f8..2af63c9cb59f55e06e8bc95ca7e0ace2cbfa619d 100644 (file)
@@ -74,16 +74,6 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
 void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
                                   double avg, int cpu, enum aggr_mode aggr);
 
-void perf_evsel__reset_stat_priv(struct perf_evsel *evsel);
-int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel);
-void perf_evsel__free_stat_priv(struct perf_evsel *evsel);
-
-int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel,
-                                     int ncpus, int nthreads);
-void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel);
-
-int perf_evsel__alloc_stats(struct perf_evsel *evsel, bool alloc_raw);
-
 int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw);
 void perf_evlist__free_stats(struct perf_evlist *evlist);
 void perf_evlist__reset_stats(struct perf_evlist *evlist);
index 562b8ebeae5b2414b6bac867c928cb22ee9a5f13..b1dd68f358fcd8e7390b5929ed736a357c7853c1 100644 (file)
@@ -6,6 +6,7 @@
 #include <inttypes.h>
 
 #include "symbol.h"
+#include "demangle-java.h"
 #include "machine.h"
 #include "vdso.h"
 #include <symbol/kallsyms.h>
@@ -1077,6 +1078,8 @@ new_symbol:
                                demangle_flags = DMGL_PARAMS | DMGL_ANSI;
 
                        demangled = bfd_demangle(NULL, elf_name, demangle_flags);
+                       if (demangled == NULL)
+                               demangled = java_demangle_sym(elf_name, JAVA_DEMANGLE_NORET);
                        if (demangled != NULL)
                                elf_name = demangled;
                }
index 3b2de6eb33763b0ab1a23195529fda8d6367eb1d..90cedfa30e43aed4533483d89be652426306df52 100644 (file)
@@ -1466,7 +1466,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
         * Read the build id if possible. This is required for
         * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
         */
-       if (filename__read_build_id(dso->name, build_id, BUILD_ID_SIZE) > 0)
+       if (is_regular_file(name) &&
+           filename__read_build_id(dso->long_name, build_id, BUILD_ID_SIZE) > 0)
                dso__set_build_id(dso, build_id);
 
        /*
@@ -1487,6 +1488,9 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
                                                   root_dir, name, PATH_MAX))
                        continue;
 
+               if (!is_regular_file(name))
+                       continue;
+
                /* Name is now the name of the next image to try */
                if (symsrc__init(ss, dso, name, symtab_type) < 0)
                        continue;
index ead9509835d23e2aee29b9f3548613973a5937fb..b9e2843cfbe77b83a43f165d37491ebc74782781 100644 (file)
@@ -691,3 +691,30 @@ out:
 
        return tip;
 }
+
+bool is_regular_file(const char *file)
+{
+       struct stat st;
+
+       if (stat(file, &st))
+               return false;
+
+       return S_ISREG(st.st_mode);
+}
+
+int fetch_current_timestamp(char *buf, size_t sz)
+{
+       struct timeval tv;
+       struct tm tm;
+       char dt[32];
+
+       if (gettimeofday(&tv, NULL) || !localtime_r(&tv.tv_sec, &tm))
+               return -1;
+
+       if (!strftime(dt, sizeof(dt), "%Y%m%d%H%M%S", &tm))
+               return -1;
+
+       scnprintf(buf, sz, "%s%02u", dt, (unsigned)tv.tv_usec / 10000);
+
+       return 0;
+}
index fe915e616f9b65388e15be963d0ef5cf58c325fd..a8615816a00da5b277ea1ab50a7fbaafc3a5ffaa 100644 (file)
@@ -343,5 +343,7 @@ int fetch_kernel_version(unsigned int *puint,
 #define KVER_PARAM(x)  KVER_VERSION(x), KVER_PATCHLEVEL(x), KVER_SUBLEVEL(x)
 
 const char *perf_tip(const char *dirpath);
+bool is_regular_file(const char *file);
+int fetch_current_timestamp(char *buf, size_t sz);
 
 #endif /* GIT_COMPAT_UTIL_H */
index 7ec7df9e7fc731ab7e118425b5bd01acaab349d5..0c1a7e65bb815a692f9b20cd97b337c181699308 100644 (file)
@@ -113,7 +113,7 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res,
 }
 EXPORT_SYMBOL(__wrap_devm_memremap_pages);
 
-pfn_t __wrap_phys_to_pfn_t(dma_addr_t addr, unsigned long flags)
+pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
 {
        struct nfit_test_resource *nfit_res = get_nfit_res(addr);
 
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
new file mode 100755 (executable)
index 0000000..f79b0e9
--- /dev/null
@@ -0,0 +1,121 @@
+#!/bin/bash
+#
+# Analyze a given results directory for rcuperf performance measurements,
+# looking for ftrace data.  Exits with 0 if data was found, analyzed, and
+# printed.  Intended to be invoked from kvm-recheck-rcuperf.sh after
+# argument checking.
+#
+# Usage: kvm-recheck-rcuperf-ftrace.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2016
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+. tools/testing/selftests/rcutorture/bin/functions.sh
+
+if test "`grep -c 'rcu_exp_grace_period.*start' < $i/console.log`" -lt 100
+then
+       exit 10
+fi
+
+sed -e 's/^\[[^]]*]//' < $i/console.log |
+grep 'us : rcu_exp_grace_period' |
+sed -e 's/us : / : /' |
+tr -d '\015' |
+awk '
+$8 == "start" {
+       if (starttask != "")
+               nlost++;
+       starttask = $1;
+       starttime = $3;
+       startseq = $7;
+}
+
+$8 == "end" {
+       if (starttask == $1 && startseq == $7) {
+               curgpdur = $3 - starttime;
+               gptimes[++n] = curgpdur;
+               gptaskcnt[starttask]++;
+               sum += curgpdur;
+               if (curgpdur > 1000)
+                       print "Long GP " starttime "us to " $3 "us (" curgpdur "us)";
+               starttask = "";
+       } else {
+               # Lost a message or some such, reset.
+               starttask = "";
+               nlost++;
+       }
+}
+
+$8 == "done" {
+       piggybackcnt[$1]++;
+}
+
+END {
+       newNR = asort(gptimes);
+       if (newNR <= 0) {
+               print "No ftrace records found???"
+               exit 10;
+       }
+       pct50 = int(newNR * 50 / 100);
+       if (pct50 < 1)
+               pct50 = 1;
+       pct90 = int(newNR * 90 / 100);
+       if (pct90 < 1)
+               pct90 = 1;
+       pct99 = int(newNR * 99 / 100);
+       if (pct99 < 1)
+               pct99 = 1;
+       div = 10 ** int(log(gptimes[pct90]) / log(10) + .5) / 100;
+       print "Histogram bucket size: " div;
+       last = gptimes[1] - 10;
+       count = 0;
+       for (i = 1; i <= newNR; i++) {
+               current = div * int(gptimes[i] / div);
+               if (last == current) {
+                       count++;
+               } else {
+                       if (count > 0)
+                               print last, count;
+                       count = 1;
+                       last = current;
+               }
+       }
+       if (count > 0)
+               print last, count;
+       print "Distribution of grace periods across tasks:";
+       for (i in gptaskcnt) {
+               print "\t" i, gptaskcnt[i];
+               nbatches += gptaskcnt[i];
+       }
+       ngps = nbatches;
+       print "Distribution of piggybacking across tasks:";
+       for (i in piggybackcnt) {
+               print "\t" i, piggybackcnt[i];
+               ngps += piggybackcnt[i];
+       }
+       print "Average grace-period duration: " sum / newNR " microseconds";
+       print "Minimum grace-period duration: " gptimes[1];
+       print "50th percentile grace-period duration: " gptimes[pct50];
+       print "90th percentile grace-period duration: " gptimes[pct90];
+       print "99th percentile grace-period duration: " gptimes[pct99];
+       print "Maximum grace-period duration: " gptimes[newNR];
+       print "Grace periods: " ngps + 0 " Batches: " nbatches + 0 " Ratio: " ngps / nbatches " Lost: " nlost + 0;
+       print "Computed from ftrace data.";
+}'
+exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
new file mode 100755 (executable)
index 0000000..8f3121a
--- /dev/null
@@ -0,0 +1,96 @@
+#!/bin/bash
+#
+# Analyze a given results directory for rcuperf performance measurements.
+#
+# Usage: kvm-recheck-rcuperf.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2016
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+if test -d $i
+then
+       :
+else
+       echo Unreadable results directory: $i
+       exit 1
+fi
+PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
+. tools/testing/selftests/rcutorture/bin/functions.sh
+
+if kvm-recheck-rcuperf-ftrace.sh $i
+then
+       # ftrace data was successfully analyzed, call it good!
+       exit 0
+fi
+
+configfile=`echo $i | sed -e 's/^.*\///'`
+
+sed -e 's/^\[[^]]*]//' < $i/console.log |
+awk '
+/-perf: .* gps: .* batches:/ {
+       ngps = $9;
+       nbatches = $11;
+}
+
+/-perf: .*writer-duration/ {
+       gptimes[++n] = $5 / 1000.;
+       sum += $5 / 1000.;
+}
+
+END {
+       newNR = asort(gptimes);
+       if (newNR <= 0) {
+               print "No rcuperf records found???"
+               exit;
+       }
+       pct50 = int(newNR * 50 / 100);
+       if (pct50 < 1)
+               pct50 = 1;
+       pct90 = int(newNR * 90 / 100);
+       if (pct90 < 1)
+               pct90 = 1;
+       pct99 = int(newNR * 99 / 100);
+       if (pct99 < 1)
+               pct99 = 1;
+       div = 10 ** int(log(gptimes[pct90]) / log(10) + .5) / 100;
+       print "Histogram bucket size: " div;
+       last = gptimes[1] - 10;
+       count = 0;
+       for (i = 1; i <= newNR; i++) {
+               current = div * int(gptimes[i] / div);
+               if (last == current) {
+                       count++;
+               } else {
+                       if (count > 0)
+                               print last, count;
+                       count = 1;
+                       last = current;
+               }
+       }
+       if (count > 0)
+               print last, count;
+       print "Average grace-period duration: " sum / newNR " microseconds";
+       print "Minimum grace-period duration: " gptimes[1];
+       print "50th percentile grace-period duration: " gptimes[pct50];
+       print "90th percentile grace-period duration: " gptimes[pct90];
+       print "99th percentile grace-period duration: " gptimes[pct99];
+       print "Maximum grace-period duration: " gptimes[newNR];
+       print "Grace periods: " ngps + 0 " Batches: " nbatches + 0 " Ratio: " ngps / nbatches;
+       print "Computed from rcuperf printk output.";
+}'
index d86bdd6b6cc2df3148adf7bacff4c6a014edc3cc..f659346d335854328fd1af1e662c21179b4b305c 100755 (executable)
@@ -48,7 +48,10 @@ do
                                cat $i/Make.oldconfig.err
                        fi
                        parse-build.sh $i/Make.out $configfile
-                       parse-torture.sh $i/console.log $configfile
+                       if test "$TORTURE_SUITE" != rcuperf
+                       then
+                               parse-torture.sh $i/console.log $configfile
+                       fi
                        parse-console.sh $i/console.log $configfile
                        if test -r $i/Warnings
                        then
index 4a431767f77a0215096d3cc5421439a857175783..c33cb582b3dcbf78ade31fc012e4fb0300c435e1 100755 (executable)
@@ -156,7 +156,7 @@ do
                shift
                ;;
        --torture)
-               checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--'
+               checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\|rcuperf\)$' '^--'
                TORTURE_SUITE=$2
                shift
                ;;
index 844787a0d7bed618511a022f09700f8738018c11..5eb49b7f864c7cfd0ffa84b1f9975ae66d74f14c 100755 (executable)
@@ -33,7 +33,7 @@ if grep -Pq '\x00' < $file
 then
        print_warning Console output contains nul bytes, old qemu still running?
 fi
-egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|Stall ended before state dump start' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $1.diags
+egrep 'Badness|WARNING:|Warn|BUG|===========|Call Trace:|Oops:|detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state' < $file | grep -v 'ODEBUG: ' | grep -v 'Warning: unable to open an initial console' > $1.diags
 if test -s $1.diags
 then
        print_warning Assertion failure in $file $title
@@ -64,10 +64,12 @@ then
        then
                summary="$summary  lockdep: $n_badness"
        fi
-       n_stalls=`egrep -c 'detected stalls on CPUs/tasks:|Stall ended before state dump start' $1`
+       n_stalls=`egrep -c 'detected stalls on CPUs/tasks:|self-detected stall on CPU|Stall ended before state dump start|\?\?\? Writer stall state' $1`
        if test "$n_stalls" -ne 0
        then
                summary="$summary  Stalls: $n_stalls"
        fi
        print_warning Summary: $summary
+else
+       rm $1.diags
 fi
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST b/tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST
new file mode 100644 (file)
index 0000000..c9f56cf
--- /dev/null
@@ -0,0 +1 @@
+TREE
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon b/tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon
new file mode 100644 (file)
index 0000000..a09816b
--- /dev/null
@@ -0,0 +1,2 @@
+CONFIG_RCU_PERF_TEST=y
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/TREE b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE
new file mode 100644 (file)
index 0000000..a312f67
--- /dev/null
@@ -0,0 +1,20 @@
+CONFIG_SMP=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_LOCKING=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TRACE=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
new file mode 100644 (file)
index 0000000..34f2a1b
--- /dev/null
@@ -0,0 +1,52 @@
+#!/bin/bash
+#
+# Torture-suite-dependent shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2015
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# rcuperf_param_nreaders bootparam-string
+#
+# Adds nreaders rcuperf module parameter if not already specified.
+rcuperf_param_nreaders () {
+       if ! echo "$1" | grep -q "rcuperf.nreaders"
+       then
+               echo rcuperf.nreaders=-1
+       fi
+}
+
+# rcuperf_param_nwriters bootparam-string
+#
+# Adds nwriters rcuperf module parameter if not already specified.
+rcuperf_param_nwriters () {
+       if ! echo "$1" | grep -q "rcuperf.nwriters"
+       then
+               echo rcuperf.nwriters=-1
+       fi
+}
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+       echo $1 `rcuperf_param_nreaders "$1"` \
+               `rcuperf_param_nwriters "$1"` \
+               rcuperf.perf_runnable=1 \
+               rcuperf.shutdown=1 \
+               rcuperf.verbose=1
+}
index 72cacf5383dd92015a8f30ece6fa8239c7666e23..2b361b8303955b6fa91e9844586ae84843203056 100644 (file)
@@ -153,7 +153,7 @@ int main(void)
 
                alarmcount = 0;
                if (timer_create(alarm_clock_id, &se, &tm1) == -1) {
-                       printf("timer_create failled, %s unspported?\n",
+                       printf("timer_create failed, %s unsupported?\n",
                                        clockstring(alarm_clock_id));
                        break;
                }
index e86d937cc22c04cb36aecfe4fb4af0c5deed3825..60fe3c569bd90f9cbfdc9664720c598e9430e8a3 100644 (file)
@@ -45,7 +45,17 @@ static inline int ksft_exit_fail(void)
 }
 #endif
 
-#define NSEC_PER_SEC 1000000000L
+#define NSEC_PER_SEC 1000000000LL
+#define USEC_PER_SEC 1000000LL
+
+#define ADJ_SETOFFSET 0x0100
+
+#include <sys/syscall.h>
+static int clock_adjtime(clockid_t id, struct timex *tx)
+{
+       return syscall(__NR_clock_adjtime, id, tx);
+}
+
 
 /* clear NTP time_status & time_state */
 int clear_time_state(void)
@@ -193,10 +203,137 @@ out:
 }
 
 
+int set_offset(long long offset, int use_nano)
+{
+       struct timex tmx = {};
+       int ret;
+
+       tmx.modes = ADJ_SETOFFSET;
+       if (use_nano) {
+               tmx.modes |= ADJ_NANO;
+
+               tmx.time.tv_sec = offset / NSEC_PER_SEC;
+               tmx.time.tv_usec = offset % NSEC_PER_SEC;
+
+               if (offset < 0 && tmx.time.tv_usec) {
+                       tmx.time.tv_sec -= 1;
+                       tmx.time.tv_usec += NSEC_PER_SEC;
+               }
+       } else {
+               tmx.time.tv_sec = offset / USEC_PER_SEC;
+               tmx.time.tv_usec = offset % USEC_PER_SEC;
+
+               if (offset < 0 && tmx.time.tv_usec) {
+                       tmx.time.tv_sec -= 1;
+                       tmx.time.tv_usec += USEC_PER_SEC;
+               }
+       }
+
+       ret = clock_adjtime(CLOCK_REALTIME, &tmx);
+       if (ret < 0) {
+               printf("(sec: %ld  usec: %ld) ", tmx.time.tv_sec, tmx.time.tv_usec);
+               printf("[FAIL]\n");
+               return -1;
+       }
+       return 0;
+}
+
+int set_bad_offset(long sec, long usec, int use_nano)
+{
+       struct timex tmx = {};
+       int ret;
+
+       tmx.modes = ADJ_SETOFFSET;
+       if (use_nano)
+               tmx.modes |= ADJ_NANO;
+
+       tmx.time.tv_sec = sec;
+       tmx.time.tv_usec = usec;
+       ret = clock_adjtime(CLOCK_REALTIME, &tmx);
+       if (ret >= 0) {
+               printf("Invalid (sec: %ld  usec: %ld) did not fail! ", tmx.time.tv_sec, tmx.time.tv_usec);
+               printf("[FAIL]\n");
+               return -1;
+       }
+       return 0;
+}
+
+int validate_set_offset(void)
+{
+       printf("Testing ADJ_SETOFFSET... ");
+
+       /* Test valid values */
+       if (set_offset(NSEC_PER_SEC - 1, 1))
+               return -1;
+
+       if (set_offset(-NSEC_PER_SEC + 1, 1))
+               return -1;
+
+       if (set_offset(-NSEC_PER_SEC - 1, 1))
+               return -1;
+
+       if (set_offset(5 * NSEC_PER_SEC, 1))
+               return -1;
+
+       if (set_offset(-5 * NSEC_PER_SEC, 1))
+               return -1;
+
+       if (set_offset(5 * NSEC_PER_SEC + NSEC_PER_SEC / 2, 1))
+               return -1;
+
+       if (set_offset(-5 * NSEC_PER_SEC - NSEC_PER_SEC / 2, 1))
+               return -1;
+
+       if (set_offset(USEC_PER_SEC - 1, 0))
+               return -1;
+
+       if (set_offset(-USEC_PER_SEC + 1, 0))
+               return -1;
+
+       if (set_offset(-USEC_PER_SEC - 1, 0))
+               return -1;
+
+       if (set_offset(5 * USEC_PER_SEC, 0))
+               return -1;
+
+       if (set_offset(-5 * USEC_PER_SEC, 0))
+               return -1;
+
+       if (set_offset(5 * USEC_PER_SEC + USEC_PER_SEC / 2, 0))
+               return -1;
+
+       if (set_offset(-5 * USEC_PER_SEC - USEC_PER_SEC / 2, 0))
+               return -1;
+
+       /* Test invalid values */
+       if (set_bad_offset(0, -1, 1))
+               return -1;
+       if (set_bad_offset(0, -1, 0))
+               return -1;
+       if (set_bad_offset(0, 2 * NSEC_PER_SEC, 1))
+               return -1;
+       if (set_bad_offset(0, 2 * USEC_PER_SEC, 0))
+               return -1;
+       if (set_bad_offset(0, NSEC_PER_SEC, 1))
+               return -1;
+       if (set_bad_offset(0, USEC_PER_SEC, 0))
+               return -1;
+       if (set_bad_offset(0, -NSEC_PER_SEC, 1))
+               return -1;
+       if (set_bad_offset(0, -USEC_PER_SEC, 0))
+               return -1;
+
+       printf("[OK]\n");
+       return 0;
+}
+
 int main(int argc, char **argv)
 {
        if (validate_freq())
                return ksft_exit_fail();
 
+       if (validate_set_offset())
+               return ksft_exit_fail();
+
        return ksft_exit_pass();
 }
index d0c473f6585058053d83ea2e6a3eee46321668a1..df4f767f48dafffb615f66769391c447c0e64499 100644 (file)
@@ -4,15 +4,17 @@ include ../lib.mk
 
 .PHONY: all all_32 all_64 warn_32bit_failure clean
 
-TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall
+TARGETS_C_BOTHBITS := single_step_syscall sysret_ss_attrs syscall_nt ptrace_syscall \
+                       check_initial_reg_state
 TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault sigreturn test_syscall_vdso unwind_vdso \
                        test_FCMOV test_FCOMI test_FISTTP \
                        ldt_gdt \
                        vdso_restorer
 
 TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
+TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
 BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
-BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64)
+BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64)
 
 CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
 
@@ -40,7 +42,7 @@ clean:
 $(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c
        $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl -lm
 
-$(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c
+$(TARGETS_C_64BIT_ALL:%=%_64): %_64: %.c
        $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
 
 # x86_64 users should be encouraged to install 32-bit libraries
@@ -65,3 +67,9 @@ endif
 sysret_ss_attrs_64: thunks.S
 ptrace_syscall_32: raw_syscall_helper_32.S
 test_syscall_vdso_32: thunks_32.S
+
+# check_initial_reg_state is special: it needs a custom entry, and it
+# needs to be static so that its interpreter doesn't destroy its initial
+# state.
+check_initial_reg_state_32: CFLAGS += -Wl,-ereal_start -static
+check_initial_reg_state_64: CFLAGS += -Wl,-ereal_start -static
diff --git a/tools/testing/selftests/x86/check_initial_reg_state.c b/tools/testing/selftests/x86/check_initial_reg_state.c
new file mode 100644 (file)
index 0000000..6aaed9b
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * check_initial_reg_state.c - check that execve sets the correct state
+ * Copyright (c) 2014-2016 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdio.h>
+
+unsigned long ax, bx, cx, dx, si, di, bp, sp, flags;
+unsigned long r8, r9, r10, r11, r12, r13, r14, r15;
+
+asm (
+       ".pushsection .text\n\t"
+       ".type real_start, @function\n\t"
+       ".global real_start\n\t"
+       "real_start:\n\t"
+#ifdef __x86_64__
+       "mov %rax, ax\n\t"
+       "mov %rbx, bx\n\t"
+       "mov %rcx, cx\n\t"
+       "mov %rdx, dx\n\t"
+       "mov %rsi, si\n\t"
+       "mov %rdi, di\n\t"
+       "mov %rbp, bp\n\t"
+       "mov %rsp, sp\n\t"
+       "mov %r8, r8\n\t"
+       "mov %r9, r9\n\t"
+       "mov %r10, r10\n\t"
+       "mov %r11, r11\n\t"
+       "mov %r12, r12\n\t"
+       "mov %r13, r13\n\t"
+       "mov %r14, r14\n\t"
+       "mov %r15, r15\n\t"
+       "pushfq\n\t"
+       "popq flags\n\t"
+#else
+       "mov %eax, ax\n\t"
+       "mov %ebx, bx\n\t"
+       "mov %ecx, cx\n\t"
+       "mov %edx, dx\n\t"
+       "mov %esi, si\n\t"
+       "mov %edi, di\n\t"
+       "mov %ebp, bp\n\t"
+       "mov %esp, sp\n\t"
+       "pushfl\n\t"
+       "popl flags\n\t"
+#endif
+       "jmp _start\n\t"
+       ".size real_start, . - real_start\n\t"
+       ".popsection");
+
+int main()
+{
+       int nerrs = 0;
+
+       if (sp == 0) {
+               printf("[FAIL]\tTest was built incorrectly\n");
+               return 1;
+       }
+
+       if (ax || bx || cx || dx || si || di || bp
+#ifdef __x86_64__
+           || r8 || r9 || r10 || r11 || r12 || r13 || r14 || r15
+#endif
+               ) {
+               printf("[FAIL]\tAll GPRs except SP should be 0\n");
+#define SHOW(x) printf("\t" #x " = 0x%lx\n", x);
+               SHOW(ax);
+               SHOW(bx);
+               SHOW(cx);
+               SHOW(dx);
+               SHOW(si);
+               SHOW(di);
+               SHOW(bp);
+               SHOW(sp);
+#ifdef __x86_64__
+               SHOW(r8);
+               SHOW(r9);
+               SHOW(r10);
+               SHOW(r11);
+               SHOW(r12);
+               SHOW(r13);
+               SHOW(r14);
+               SHOW(r15);
+#endif
+               nerrs++;
+       } else {
+               printf("[OK]\tAll GPRs except SP are 0\n");
+       }
+
+       if (flags != 0x202) {
+               printf("[FAIL]\tFLAGS is 0x%lx, but it should be 0x202\n", flags);
+               nerrs++;
+       } else {
+               printf("[OK]\tFLAGS is 0x202\n");
+       }
+
+       return nerrs ? 1 : 0;
+}
index 26b7926bda8849a8b931fff6750a3ed8217dae89..ba34f9e96efd6bd760870150dd9cb6ba0eacbd69 100644 (file)
@@ -1,15 +1,19 @@
 #if defined(__i386__) || defined(__x86_64__)
 #define barrier() asm volatile("" ::: "memory")
-#define mb() __sync_synchronize()
-
-#define smp_mb()       mb()
-# define dma_rmb()     barrier()
-# define dma_wmb()     barrier()
-# define smp_rmb()     barrier()
-# define smp_wmb()     barrier()
+#define virt_mb() __sync_synchronize()
+#define virt_rmb() barrier()
+#define virt_wmb() barrier()
+/* Atomic store should be enough, but gcc generates worse code in that case. */
+#define virt_store_mb(var, value)  do { \
+       typeof(var) virt_store_mb_value = (value); \
+       __atomic_exchange(&(var), &virt_store_mb_value, &virt_store_mb_value, \
+                         __ATOMIC_SEQ_CST); \
+       barrier(); \
+} while (0);
 /* Weak barriers should be used. If not - it's a bug */
-# define rmb() abort()
-# define wmb() abort()
+# define mb() abort()
+# define rmb() abort()
+# define wmb() abort()
 #else
 #error Please fill in barrier macros
 #endif
diff --git a/tools/virtio/linux/compiler.h b/tools/virtio/linux/compiler.h
new file mode 100644 (file)
index 0000000..845960e
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef LINUX_COMPILER_H
+#define LINUX_COMPILER_H
+
+#define WRITE_ONCE(var, val) \
+       (*((volatile typeof(val) *)(&(var))) = (val))
+
+#define READ_ONCE(var) (*((volatile typeof(val) *)(&(var))))
+
+#endif
index 4db7d5691ba71b33d2bda8c85d634fe3781ae6d6..0338499482159883bb3e30452d5afc0f77af577e 100644 (file)
@@ -8,6 +8,7 @@
 #include <assert.h>
 #include <stdarg.h>
 
+#include <linux/compiler.h>
 #include <linux/types.h>
 #include <linux/printk.h>
 #include <linux/bug.h>
diff --git a/tools/virtio/ringtest/Makefile b/tools/virtio/ringtest/Makefile
new file mode 100644 (file)
index 0000000..feaa64a
--- /dev/null
@@ -0,0 +1,22 @@
+all:
+
+all: ring virtio_ring_0_9 virtio_ring_poll
+
+CFLAGS += -Wall
+CFLAGS += -pthread -O2 -ggdb
+LDFLAGS += -pthread -O2 -ggdb
+
+main.o: main.c main.h
+ring.o: ring.c main.h
+virtio_ring_0_9.o: virtio_ring_0_9.c main.h
+virtio_ring_poll.o: virtio_ring_poll.c virtio_ring_0_9.c main.h
+ring: ring.o main.o
+virtio_ring_0_9: virtio_ring_0_9.o main.o
+virtio_ring_poll: virtio_ring_poll.o main.o
+clean:
+       -rm main.o
+       -rm ring.o ring
+       -rm virtio_ring_0_9.o virtio_ring_0_9
+       -rm virtio_ring_poll.o virtio_ring_poll
+
+.PHONY: all clean
diff --git a/tools/virtio/ringtest/README b/tools/virtio/ringtest/README
new file mode 100644 (file)
index 0000000..34e94c4
--- /dev/null
@@ -0,0 +1,2 @@
+Partial implementation of various ring layouts, useful to tune virtio design.
+Uses shared memory heavily.
diff --git a/tools/virtio/ringtest/main.c b/tools/virtio/ringtest/main.c
new file mode 100644 (file)
index 0000000..3a5ff43
--- /dev/null
@@ -0,0 +1,366 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Command line processing and common functions for ring benchmarking.
+ */
+#define _GNU_SOURCE
+#include <getopt.h>
+#include <pthread.h>
+#include <assert.h>
+#include <sched.h>
+#include "main.h"
+#include <sys/eventfd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <limits.h>
+
+int runcycles = 10000000;
+int max_outstanding = INT_MAX;
+int batch = 1;
+
+bool do_sleep = false;
+bool do_relax = false;
+bool do_exit = true;
+
+unsigned ring_size = 256;
+
+static int kickfd = -1;
+static int callfd = -1;
+
+void notify(int fd)
+{
+       unsigned long long v = 1;
+       int r;
+
+       vmexit();
+       r = write(fd, &v, sizeof v);
+       assert(r == sizeof v);
+       vmentry();
+}
+
+void wait_for_notify(int fd)
+{
+       unsigned long long v = 1;
+       int r;
+
+       vmexit();
+       r = read(fd, &v, sizeof v);
+       assert(r == sizeof v);
+       vmentry();
+}
+
+void kick(void)
+{
+       notify(kickfd);
+}
+
+void wait_for_kick(void)
+{
+       wait_for_notify(kickfd);
+}
+
+void call(void)
+{
+       notify(callfd);
+}
+
+void wait_for_call(void)
+{
+       wait_for_notify(callfd);
+}
+
+void set_affinity(const char *arg)
+{
+       cpu_set_t cpuset;
+       int ret;
+       pthread_t self;
+       long int cpu;
+       char *endptr;
+
+       if (!arg)
+               return;
+
+       cpu = strtol(arg, &endptr, 0);
+       assert(!*endptr);
+
+       assert(cpu >= 0 || cpu < CPU_SETSIZE);
+
+       self = pthread_self();
+       CPU_ZERO(&cpuset);
+       CPU_SET(cpu, &cpuset);
+
+       ret = pthread_setaffinity_np(self, sizeof(cpu_set_t), &cpuset);
+       assert(!ret);
+}
+
+static void run_guest(void)
+{
+       int completed_before;
+       int completed = 0;
+       int started = 0;
+       int bufs = runcycles;
+       int spurious = 0;
+       int r;
+       unsigned len;
+       void *buf;
+       int tokick = batch;
+
+       for (;;) {
+               if (do_sleep)
+                       disable_call();
+               completed_before = completed;
+               do {
+                       if (started < bufs &&
+                           started - completed < max_outstanding) {
+                               r = add_inbuf(0, NULL, "Hello, world!");
+                               if (__builtin_expect(r == 0, true)) {
+                                       ++started;
+                                       if (!--tokick) {
+                                               tokick = batch;
+                                               if (do_sleep)
+                                                       kick_available();
+                                       }
+
+                               }
+                       } else
+                               r = -1;
+
+                       /* Flush out completed bufs if any */
+                       if (get_buf(&len, &buf)) {
+                               ++completed;
+                               if (__builtin_expect(completed == bufs, false))
+                                       return;
+                               r = 0;
+                       }
+               } while (r == 0);
+               if (completed == completed_before)
+                       ++spurious;
+               assert(completed <= bufs);
+               assert(started <= bufs);
+               if (do_sleep) {
+                       if (enable_call())
+                               wait_for_call();
+               } else {
+                       poll_used();
+               }
+       }
+}
+
+static void run_host(void)
+{
+       int completed_before;
+       int completed = 0;
+       int spurious = 0;
+       int bufs = runcycles;
+       unsigned len;
+       void *buf;
+
+       for (;;) {
+               if (do_sleep) {
+                       if (enable_kick())
+                               wait_for_kick();
+               } else {
+                       poll_avail();
+               }
+               if (do_sleep)
+                       disable_kick();
+               completed_before = completed;
+               while (__builtin_expect(use_buf(&len, &buf), true)) {
+                       if (do_sleep)
+                               call_used();
+                       ++completed;
+                       if (__builtin_expect(completed == bufs, false))
+                               return;
+               }
+               if (completed == completed_before)
+                       ++spurious;
+               assert(completed <= bufs);
+               if (completed == bufs)
+                       break;
+       }
+}
+
+void *start_guest(void *arg)
+{
+       set_affinity(arg);
+       run_guest();
+       pthread_exit(NULL);
+}
+
+void *start_host(void *arg)
+{
+       set_affinity(arg);
+       run_host();
+       pthread_exit(NULL);
+}
+
+static const char optstring[] = "";
+static const struct option longopts[] = {
+       {
+               .name = "help",
+               .has_arg = no_argument,
+               .val = 'h',
+       },
+       {
+               .name = "host-affinity",
+               .has_arg = required_argument,
+               .val = 'H',
+       },
+       {
+               .name = "guest-affinity",
+               .has_arg = required_argument,
+               .val = 'G',
+       },
+       {
+               .name = "ring-size",
+               .has_arg = required_argument,
+               .val = 'R',
+       },
+       {
+               .name = "run-cycles",
+               .has_arg = required_argument,
+               .val = 'C',
+       },
+       {
+               .name = "outstanding",
+               .has_arg = required_argument,
+               .val = 'o',
+       },
+       {
+               .name = "batch",
+               .has_arg = required_argument,
+               .val = 'b',
+       },
+       {
+               .name = "sleep",
+               .has_arg = no_argument,
+               .val = 's',
+       },
+       {
+               .name = "relax",
+               .has_arg = no_argument,
+               .val = 'x',
+       },
+       {
+               .name = "exit",
+               .has_arg = no_argument,
+               .val = 'e',
+       },
+       {
+       }
+};
+
+static void help(void)
+{
+       fprintf(stderr, "Usage: <test> [--help]"
+               " [--host-affinity H]"
+               " [--guest-affinity G]"
+               " [--ring-size R (default: %d)]"
+               " [--run-cycles C (default: %d)]"
+               " [--batch b]"
+               " [--outstanding o]"
+               " [--sleep]"
+               " [--relax]"
+               " [--exit]"
+               "\n",
+               ring_size,
+               runcycles);
+}
+
+int main(int argc, char **argv)
+{
+       int ret;
+       pthread_t host, guest;
+       void *tret;
+       char *host_arg = NULL;
+       char *guest_arg = NULL;
+       char *endptr;
+       long int c;
+
+       kickfd = eventfd(0, 0);
+       assert(kickfd >= 0);
+       callfd = eventfd(0, 0);
+       assert(callfd >= 0);
+
+       for (;;) {
+               int o = getopt_long(argc, argv, optstring, longopts, NULL);
+               switch (o) {
+               case -1:
+                       goto done;
+               case '?':
+                       help();
+                       exit(2);
+               case 'H':
+                       host_arg = optarg;
+                       break;
+               case 'G':
+                       guest_arg = optarg;
+                       break;
+               case 'R':
+                       ring_size = strtol(optarg, &endptr, 0);
+                       assert(ring_size && !(ring_size & (ring_size - 1)));
+                       assert(!*endptr);
+                       break;
+               case 'C':
+                       c = strtol(optarg, &endptr, 0);
+                       assert(!*endptr);
+                       assert(c > 0 && c < INT_MAX);
+                       runcycles = c;
+                       break;
+               case 'o':
+                       c = strtol(optarg, &endptr, 0);
+                       assert(!*endptr);
+                       assert(c > 0 && c < INT_MAX);
+                       max_outstanding = c;
+                       break;
+               case 'b':
+                       c = strtol(optarg, &endptr, 0);
+                       assert(!*endptr);
+                       assert(c > 0 && c < INT_MAX);
+                       batch = c;
+                       break;
+               case 's':
+                       do_sleep = true;
+                       break;
+               case 'x':
+                       do_relax = true;
+                       break;
+               case 'e':
+                       do_exit = true;
+                       break;
+               default:
+                       help();
+                       exit(4);
+                       break;
+               }
+       }
+
+       /* does nothing here, used to make sure all smp APIs compile */
+       smp_acquire();
+       smp_release();
+       smp_mb();
+done:
+
+       if (batch > max_outstanding)
+               batch = max_outstanding;
+
+       if (optind < argc) {
+               help();
+               exit(4);
+       }
+       alloc_ring();
+
+       ret = pthread_create(&host, NULL, start_host, host_arg);
+       assert(!ret);
+       ret = pthread_create(&guest, NULL, start_guest, guest_arg);
+       assert(!ret);
+
+       ret = pthread_join(guest, &tret);
+       assert(!ret);
+       ret = pthread_join(host, &tret);
+       assert(!ret);
+       return 0;
+}
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
new file mode 100644 (file)
index 0000000..16917ac
--- /dev/null
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Common macros and functions for ring benchmarking.
+ */
+#ifndef MAIN_H
+#define MAIN_H
+
+#include <stdbool.h>
+
+extern bool do_exit;
+
+#if defined(__x86_64__) || defined(__i386__)
+#include "x86intrin.h"
+
+static inline void wait_cycles(unsigned long long cycles)
+{
+       unsigned long long t;
+
+       t = __rdtsc();
+       while (__rdtsc() - t < cycles) {}
+}
+
+#define VMEXIT_CYCLES 500
+#define VMENTRY_CYCLES 500
+
+#else
+static inline void wait_cycles(unsigned long long cycles)
+{
+       _Exit(5);
+}
+#define VMEXIT_CYCLES 0
+#define VMENTRY_CYCLES 0
+#endif
+
+static inline void vmexit(void)
+{
+       if (!do_exit)
+               return;
+       
+       wait_cycles(VMEXIT_CYCLES);
+}
+static inline void vmentry(void)
+{
+       if (!do_exit)
+               return;
+       
+       wait_cycles(VMENTRY_CYCLES);
+}
+
+/* implemented by ring */
+void alloc_ring(void);
+/* guest side */
+int add_inbuf(unsigned, void *, void *);
+void *get_buf(unsigned *, void **);
+void disable_call();
+bool enable_call();
+void kick_available();
+void poll_used();
+/* host side */
+void disable_kick();
+bool enable_kick();
+bool use_buf(unsigned *, void **);
+void call_used();
+void poll_avail();
+
+/* implemented by main */
+extern bool do_sleep;
+void kick(void);
+void wait_for_kick(void);
+void call(void);
+void wait_for_call(void);
+
+extern unsigned ring_size;
+
+/* Compiler barrier - similar to what Linux uses */
+#define barrier() asm volatile("" ::: "memory")
+
+/* Is there a portable way to do this? */
+#if defined(__x86_64__) || defined(__i386__)
+#define cpu_relax() asm ("rep; nop" ::: "memory")
+#else
+#define cpu_relax() assert(0)
+#endif
+
+extern bool do_relax;
+
+static inline void busy_wait(void)
+{
+       if (do_relax)
+               cpu_relax();
+       else
+               /* prevent compiler from removing busy loops */
+               barrier();
+} 
+
+/*
+ * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized
+ * with other __ATOMIC_SEQ_CST calls.
+ */
+#define smp_mb() __sync_synchronize()
+
+/*
+ * This abuses the atomic builtins for thread fences, and
+ * adds a compiler barrier.
+ */
+#define smp_release() do { \
+    barrier(); \
+    __atomic_thread_fence(__ATOMIC_RELEASE); \
+} while (0)
+
+#define smp_acquire() do { \
+    __atomic_thread_fence(__ATOMIC_ACQUIRE); \
+    barrier(); \
+} while (0)
+
+#endif
diff --git a/tools/virtio/ringtest/ring.c b/tools/virtio/ringtest/ring.c
new file mode 100644 (file)
index 0000000..c25c8d2
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Simple descriptor-based ring. virtio 0.9 compatible event index is used for
+ * signalling, unconditionally.
+ */
+#define _GNU_SOURCE
+#include "main.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+
+/* Next - Where next entry will be written.
+ * Prev - "Next" value when event triggered previously.
+ * Event - Peer requested event after writing this entry.
+ */
+static inline bool need_event(unsigned short event,
+                             unsigned short next,
+                             unsigned short prev)
+{
+       return (unsigned short)(next - event - 1) < (unsigned short)(next - prev);
+}
+
+/* Design:
+ * Guest adds descriptors with unique index values and DESC_HW in flags.
+ * Host overwrites used descriptors with correct len, index, and DESC_HW clear.
+ * Flags are always set last.
+ */
+#define DESC_HW 0x1
+
+struct desc {
+       unsigned short flags;
+       unsigned short index;
+       unsigned len;
+       unsigned long long addr;
+};
+
+/* how much padding is needed to avoid false cache sharing */
+#define HOST_GUEST_PADDING 0x80
+
+/* Mostly read */
+struct event {
+       unsigned short kick_index;
+       unsigned char reserved0[HOST_GUEST_PADDING - 2];
+       unsigned short call_index;
+       unsigned char reserved1[HOST_GUEST_PADDING - 2];
+};
+
+struct data {
+       void *buf; /* descriptor is writeable, we can't get buf from there */
+       void *data;
+} *data;
+
+struct desc *ring;
+struct event *event;
+
+struct guest {
+       unsigned avail_idx;
+       unsigned last_used_idx;
+       unsigned num_free;
+       unsigned kicked_avail_idx;
+       unsigned char reserved[HOST_GUEST_PADDING - 12];
+} guest;
+
+struct host {
+       /* we do not need to track last avail index
+        * unless we have more than one in flight.
+        */
+       unsigned used_idx;
+       unsigned called_used_idx;
+       unsigned char reserved[HOST_GUEST_PADDING - 4];
+} host;
+
+/* implemented by ring */
+void alloc_ring(void)
+{
+       int ret;
+       int i;
+
+       ret = posix_memalign((void **)&ring, 0x1000, ring_size * sizeof *ring);
+       if (ret) {
+               perror("Unable to allocate ring buffer.\n");
+               exit(3);
+       }
+       event = malloc(sizeof *event);
+       if (!event) {
+               perror("Unable to allocate event buffer.\n");
+               exit(3);
+       }
+       memset(event, 0, sizeof *event);
+       guest.avail_idx = 0;
+       guest.kicked_avail_idx = -1;
+       guest.last_used_idx = 0;
+       host.used_idx = 0;
+       host.called_used_idx = -1;
+       for (i = 0; i < ring_size; ++i) {
+               struct desc desc = {
+                       .index = i,
+               };
+               ring[i] = desc;
+       }
+       guest.num_free = ring_size;
+       data = malloc(ring_size * sizeof *data);
+       if (!data) {
+               perror("Unable to allocate data buffer.\n");
+               exit(3);
+       }
+       memset(data, 0, ring_size * sizeof *data);
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+       unsigned head, index;
+
+       if (!guest.num_free)
+               return -1;
+
+       guest.num_free--;
+       head = (ring_size - 1) & (guest.avail_idx++);
+
+       /* Start with a write. On MESI architectures this helps
+        * avoid a shared state with consumer that is polling this descriptor.
+        */
+       ring[head].addr = (unsigned long)(void*)buf;
+       ring[head].len = len;
+       /* read below might bypass write above. That is OK because it's just an
+        * optimization. If this happens, we will get the cache line in a
+        * shared state which is unfortunate, but probably not worth it to
+        * add an explicit full barrier to avoid this.
+        */
+       barrier();
+       index = ring[head].index;
+       data[index].buf = buf;
+       data[index].data = datap;
+       /* Barrier A (for pairing) */
+       smp_release();
+       ring[head].flags = DESC_HW;
+
+       return 0;
+}
+
+void *get_buf(unsigned *lenp, void **bufp)
+{
+       unsigned head = (ring_size - 1) & guest.last_used_idx;
+       unsigned index;
+       void *datap;
+
+       if (ring[head].flags & DESC_HW)
+               return NULL;
+       /* Barrier B (for pairing) */
+       smp_acquire();
+       *lenp = ring[head].len;
+       index = ring[head].index & (ring_size - 1);
+       datap = data[index].data;
+       *bufp = data[index].buf;
+       data[index].buf = NULL;
+       data[index].data = NULL;
+       guest.num_free++;
+       guest.last_used_idx++;
+       return datap;
+}
+
+void poll_used(void)
+{
+       unsigned head = (ring_size - 1) & guest.last_used_idx;
+
+       while (ring[head].flags & DESC_HW)
+               busy_wait();
+}
+
+void disable_call()
+{
+       /* Doing nothing to disable calls might cause
+        * extra interrupts, but reduces the number of cache misses.
+        */
+}
+
+bool enable_call()
+{
+       unsigned head = (ring_size - 1) & guest.last_used_idx;
+
+       event->call_index = guest.last_used_idx;
+       /* Flush call index write */
+       /* Barrier D (for pairing) */
+       smp_mb();
+       return ring[head].flags & DESC_HW;
+}
+
+void kick_available(void)
+{
+       /* Flush in previous flags write */
+       /* Barrier C (for pairing) */
+       smp_mb();
+       if (!need_event(event->kick_index,
+                       guest.avail_idx,
+                       guest.kicked_avail_idx))
+               return;
+
+       guest.kicked_avail_idx = guest.avail_idx;
+       kick();
+}
+
+/* host side */
+void disable_kick()
+{
+       /* Doing nothing to disable kicks might cause
+        * extra interrupts, but reduces the number of cache misses.
+        */
+}
+
+bool enable_kick()
+{
+       unsigned head = (ring_size - 1) & host.used_idx;
+
+       event->kick_index = host.used_idx;
+       /* Barrier C (for pairing) */
+       smp_mb();
+       return !(ring[head].flags & DESC_HW);
+}
+
+void poll_avail(void)
+{
+       unsigned head = (ring_size - 1) & host.used_idx;
+
+       while (!(ring[head].flags & DESC_HW))
+               busy_wait();
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+       unsigned head = (ring_size - 1) & host.used_idx;
+
+       if (!(ring[head].flags & DESC_HW))
+               return false;
+
+       /* make sure length read below is not speculated */
+       /* Barrier A (for pairing) */
+       smp_acquire();
+
+       /* simple in-order completion: we don't need
+        * to touch index at all. This also means we
+        * can just modify the descriptor in-place.
+        */
+       ring[head].len--;
+       /* Make sure len is valid before flags.
+        * Note: alternative is to write len and flags in one access -
+        * possible on 64 bit architectures but wmb is free on Intel anyway
+        * so I have no way to test whether it's a gain.
+        */
+       /* Barrier B (for pairing) */
+       smp_release();
+       ring[head].flags = 0;
+       host.used_idx++;
+       return true;
+}
+
+void call_used(void)
+{
+       /* Flush in previous flags write */
+       /* Barrier D (for pairing) */
+       smp_mb();
+       if (!need_event(event->call_index,
+                       host.used_idx,
+                       host.called_used_idx))
+               return;
+
+       host.called_used_idx = host.used_idx;
+       call();
+}
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
new file mode 100755 (executable)
index 0000000..52b0f71
--- /dev/null
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#use last CPU for host. Why not the first?
+#many devices tend to use cpu0 by default so
+#it tends to be busier
+HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1)
+
+#run command on all cpus
+for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n);
+do
+       #Don't run guest and host on same CPU
+       #It actually works ok if using signalling
+       if
+               (echo "$@" | grep -e "--sleep" > /dev/null) || \
+                       test $HOST_AFFINITY '!=' $cpu
+       then
+               echo "GUEST AFFINITY $cpu"
+               "$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu
+       fi
+done
+echo "NO GUEST AFFINITY"
+"$@" --host-affinity $HOST_AFFINITY
+echo "NO AFFINITY"
+"$@"
diff --git a/tools/virtio/ringtest/virtio_ring_0_9.c b/tools/virtio/ringtest/virtio_ring_0_9.c
new file mode 100644 (file)
index 0000000..47c9a1a
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2016 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Partial implementation of virtio 0.9. event index is used for signalling,
+ * unconditionally. Design roughly follows linux kernel implementation in order
+ * to be able to judge its performance.
+ */
+#define _GNU_SOURCE
+#include "main.h"
+#include <stdlib.h>
+#include <stdio.h>
+#include <assert.h>
+#include <string.h>
+#include <linux/virtio_ring.h>
+
+struct data {
+       void *data;
+} *data;
+
+struct vring ring;
+
+/* enabling the below activates experimental ring polling code
+ * (which skips index reads on consumer in favor of looking at
+ * high bits of ring id ^ 0x8000).
+ */
+/* #ifdef RING_POLL */
+
+/* how much padding is needed to avoid false cache sharing */
+#define HOST_GUEST_PADDING 0x80
+
+struct guest {
+       unsigned short avail_idx;
+       unsigned short last_used_idx;
+       unsigned short num_free;
+       unsigned short kicked_avail_idx;
+       unsigned short free_head;
+       unsigned char reserved[HOST_GUEST_PADDING - 10];
+} guest;
+
+struct host {
+       /* we do not need to track last avail index
+        * unless we have more than one in flight.
+        */
+       unsigned short used_idx;
+       unsigned short called_used_idx;
+       unsigned char reserved[HOST_GUEST_PADDING - 4];
+} host;
+
+/* implemented by ring */
+void alloc_ring(void)
+{
+       int ret;
+       int i;
+       void *p;
+
+       ret = posix_memalign(&p, 0x1000, vring_size(ring_size, 0x1000));
+       if (ret) {
+               perror("Unable to allocate ring buffer.\n");
+               exit(3);
+       }
+       memset(p, 0, vring_size(ring_size, 0x1000));
+       vring_init(&ring, ring_size, p, 0x1000);
+
+       guest.avail_idx = 0;
+       guest.kicked_avail_idx = -1;
+       guest.last_used_idx = 0;
+       /* Put everything in free lists. */
+       guest.free_head = 0;
+       for (i = 0; i < ring_size - 1; i++)
+               ring.desc[i].next = i + 1;
+       host.used_idx = 0;
+       host.called_used_idx = -1;
+       guest.num_free = ring_size;
+       data = malloc(ring_size * sizeof *data);
+       if (!data) {
+               perror("Unable to allocate data buffer.\n");
+               exit(3);
+       }
+       memset(data, 0, ring_size * sizeof *data);
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+       unsigned head, avail;
+       struct vring_desc *desc;
+
+       if (!guest.num_free)
+               return -1;
+
+       head = guest.free_head;
+       guest.num_free--;
+
+       desc = ring.desc;
+       desc[head].flags = VRING_DESC_F_NEXT;
+       desc[head].addr = (unsigned long)(void *)buf;
+       desc[head].len = len;
+       /* We do it like this to simulate the way
+        * we'd have to flip it if we had multiple
+        * descriptors.
+        */
+       desc[head].flags &= ~VRING_DESC_F_NEXT;
+       guest.free_head = desc[head].next;
+
+       data[head].data = datap;
+
+#ifdef RING_POLL
+       /* Barrier A (for pairing) */
+       smp_release();
+       avail = guest.avail_idx++;
+       ring.avail->ring[avail & (ring_size - 1)] =
+               (head | (avail & ~(ring_size - 1))) ^ 0x8000;
+#else
+       avail = (ring_size - 1) & (guest.avail_idx++);
+       ring.avail->ring[avail] = head;
+       /* Barrier A (for pairing) */
+       smp_release();
+#endif
+       ring.avail->idx = guest.avail_idx;
+       return 0;
+}
+
+void *get_buf(unsigned *lenp, void **bufp)
+{
+       unsigned head;
+       unsigned index;
+       void *datap;
+
+#ifdef RING_POLL
+       head = (ring_size - 1) & guest.last_used_idx;
+       index = ring.used->ring[head].id;
+       if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
+               return NULL;
+       /* Barrier B (for pairing) */
+       smp_acquire();
+       index &= ring_size - 1;
+#else
+       if (ring.used->idx == guest.last_used_idx)
+               return NULL;
+       /* Barrier B (for pairing) */
+       smp_acquire();
+       head = (ring_size - 1) & guest.last_used_idx;
+       index = ring.used->ring[head].id;
+#endif
+       *lenp = ring.used->ring[head].len;
+       datap = data[index].data;
+       *bufp = (void*)(unsigned long)ring.desc[index].addr;
+       data[index].data = NULL;
+       ring.desc[index].next = guest.free_head;
+       guest.free_head = index;
+       guest.num_free++;
+       guest.last_used_idx++;
+       return datap;
+}
+
+void poll_used(void)
+{
+#ifdef RING_POLL
+       unsigned head = (ring_size - 1) & guest.last_used_idx;
+
+       for (;;) {
+               unsigned index = ring.used->ring[head].id;
+
+               if ((index ^ guest.last_used_idx ^ 0x8000) & ~(ring_size - 1))
+                       busy_wait();
+               else
+                       break;
+       }
+#else
+       unsigned head = guest.last_used_idx;
+
+       while (ring.used->idx == head)
+               busy_wait();
+#endif
+}
+
+void disable_call()
+{
+       /* Doing nothing to disable calls might cause
+        * extra interrupts, but reduces the number of cache misses.
+        */
+}
+
+bool enable_call()
+{
+       unsigned short last_used_idx;
+
+       vring_used_event(&ring) = (last_used_idx = guest.last_used_idx);
+       /* Flush call index write */
+       /* Barrier D (for pairing) */
+       smp_mb();
+#ifdef RING_POLL
+       {
+               unsigned short head = last_used_idx & (ring_size - 1);
+               unsigned index = ring.used->ring[head].id;
+
+               return (index ^ last_used_idx ^ 0x8000) & ~(ring_size - 1);
+       }
+#else
+       return ring.used->idx == last_used_idx;
+#endif
+}
+
+void kick_available(void)
+{
+       /* Flush in previous flags write */
+       /* Barrier C (for pairing) */
+       smp_mb();
+       if (!vring_need_event(vring_avail_event(&ring),
+                             guest.avail_idx,
+                             guest.kicked_avail_idx))
+               return;
+
+       guest.kicked_avail_idx = guest.avail_idx;
+       kick();
+}
+
+/* host side */
+void disable_kick()
+{
+       /* Doing nothing to disable kicks might cause
+        * extra interrupts, but reduces the number of cache misses.
+        */
+}
+
+bool enable_kick()
+{
+       unsigned head = host.used_idx;
+
+       vring_avail_event(&ring) = head;
+       /* Barrier C (for pairing) */
+       smp_mb();
+#ifdef RING_POLL
+       {
+               unsigned index = ring.avail->ring[head & (ring_size - 1)];
+
+               return (index ^ head ^ 0x8000) & ~(ring_size - 1);
+       }
+#else
+       return head == ring.avail->idx;
+#endif
+}
+
+void poll_avail(void)
+{
+       unsigned head = host.used_idx;
+#ifdef RING_POLL
+       for (;;) {
+               unsigned index = ring.avail->ring[head & (ring_size - 1)];
+               if ((index ^ head ^ 0x8000) & ~(ring_size - 1))
+                       busy_wait();
+               else
+                       break;
+       }
+#else
+       while (ring.avail->idx == head)
+               busy_wait();
+#endif
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+       unsigned used_idx = host.used_idx;
+       struct vring_desc *desc;
+       unsigned head;
+
+#ifdef RING_POLL
+       head = ring.avail->ring[used_idx & (ring_size - 1)];
+       if ((used_idx ^ head ^ 0x8000) & ~(ring_size - 1))
+               return false;
+       /* Barrier A (for pairing) */
+       smp_acquire();
+
+       used_idx &= ring_size - 1;
+       desc = &ring.desc[head & (ring_size - 1)];
+#else
+       if (used_idx == ring.avail->idx)
+               return false;
+
+       /* Barrier A (for pairing) */
+       smp_acquire();
+
+       used_idx &= ring_size - 1;
+       head = ring.avail->ring[used_idx];
+       desc = &ring.desc[head];
+#endif
+
+       *lenp = desc->len;
+       *bufp = (void *)(unsigned long)desc->addr;
+
+       /* now update used ring */
+       ring.used->ring[used_idx].id = head;
+       ring.used->ring[used_idx].len = desc->len - 1;
+       /* Barrier B (for pairing) */
+       smp_release();
+       host.used_idx++;
+       ring.used->idx = host.used_idx;
+       
+       return true;
+}
+
+void call_used(void)
+{
+       /* Flush in previous flags write */
+       /* Barrier D (for pairing) */
+       smp_mb();
+       if (!vring_need_event(vring_used_event(&ring),
+                             host.used_idx,
+                             host.called_used_idx))
+               return;
+
+       host.called_used_idx = host.used_idx;
+       call();
+}
diff --git a/tools/virtio/ringtest/virtio_ring_poll.c b/tools/virtio/ringtest/virtio_ring_poll.c
new file mode 100644 (file)
index 0000000..84fc2c5
--- /dev/null
@@ -0,0 +1,2 @@
+#define RING_POLL 1
+#include "virtio_ring_0_9.c"
index 69bca185c471d1dec971f02403ae9fd60851f5bd..97c58153f923ede13f6392b12105609aa151d948 100644 (file)
@@ -48,7 +48,7 @@ static bool timer_is_armed(struct arch_timer_cpu *timer)
 static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
 {
        timer->armed = true;
-       hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
+       hrtimer_start(&timer->timer, ktime_add_ns(ktime_get_raw(), ns),
                      HRTIMER_MODE_ABS);
 }
 
@@ -308,7 +308,7 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 
        INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
-       hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       hrtimer_init(&timer->timer, CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
        timer->timer.function = kvm_timer_expire;
 }